Merge "defconfig: sdxpoorwills: Enable generic serial driver"
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 4987417..dfd56ec 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -350,3 +350,19 @@
 Description:	AArch64 CPU registers
 		'identification' directory exposes the CPU ID registers for
 		 identifying model and revision of the CPU.
+
+What:		/sys/devices/system/cpu/vulnerabilities
+		/sys/devices/system/cpu/vulnerabilities/meltdown
+		/sys/devices/system/cpu/vulnerabilities/spectre_v1
+		/sys/devices/system/cpu/vulnerabilities/spectre_v2
+Date:		January 2018
+Contact:	Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:	Information about CPU vulnerabilities
+
+		The files are named after the code names of CPU
+		vulnerabilities. The output of those files reflects the
+		state of the CPUs in the system. Possible output values:
+
+		"Not affected"	  CPU is not affected by the vulnerability
+		"Vulnerable"	  CPU is affected and no mitigation in effect
+		"Mitigation: $M"  CPU is affected and mitigation $M is in effect
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 59c3356..9a9a6d0 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -154,6 +154,7 @@
 	* qcom,msr-fix-req: boolean, indicating if MSRs need to be programmed
 	  after enabling the subunit.
 
+	* qcom,dump-enable: boolean, specifies to dump MCMB data.
 * Optional properties for CTI:
 
 	* qcom,cti-gpio-trigin: cti trigger input driven by gpio.
diff --git a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
index 105dcac..30961be 100644
--- a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
+++ b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
@@ -7,7 +7,7 @@
 Required Properties:
 - compatible:	The bus devices need to be compatible with
 		"qcom,mdm2-modem", "qcom,ext-mdm9x25", "qcom,ext-mdm9x35", "qcom, ext-mdm9x45",
-		"qcom,ext-mdm9x55".
+		"qcom,ext-mdm9x55", "qcom,ext-sdxpoorwills".
 
 Required named gpio properties:
 - qcom,mdm2ap-errfatal-gpio: gpio for the external modem to indicate to the apps processor
diff --git a/Documentation/devicetree/bindings/arm/msm/msm-machine-name.txt b/Documentation/devicetree/bindings/arm/msm/msm-machine-name.txt
new file mode 100644
index 0000000..28f6e7d
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm-machine-name.txt
@@ -0,0 +1,63 @@
+Msm Machine Name
+
+Machine name is used to:
+	1. show up in the beginning of kernel message.
+	Example:
+		[    0.000000] Machine: Qualcomm Technologies, Inc. MSM8953 PMI8950 MTP
+	2. show up as arch description when do dump stack.
+	Example:
+		[    1.222319] WARNING: CPU: 2 PID: 1 at kernel/lib/debugobjects.c:263 debug_print_object+0xa8/0xb0
+		[    1.222334] Modules linked in:
+		[    1.222362] CPU: 2 PID: 1 Comm: swapper/0 Not tainted 4.9.65+ #71
+		[    1.222376] Hardware name: Qualcomm Technologies, Inc. MSM8953 PMI8950 MTP (DT)
+		[    1.222392] task: ffffffc0ed1b0080 task.stack: ffffffc0ed1b8000
+		[    1.222408] PC is at debug_print_object+0xa8/0xb0
+		[    1.222424] LR is at debug_print_object+0xa8/0xb0
+
+Msm machine name is a string concatenated from:
+	1. constant string contain msm information: "Qualcomm Technologies, Inc.".
+	2. string of device tree property "qcom,msm-name".
+	3. string of device tree property "qcom,pmic-name".
+	4. string of device tree property "model".
+
+The reason for using msm machine Name is single board overlay device tree
+may applied to multiple soc device trees. The "model" property in soc device
+tree is overwritten with board overlay device tree. So the final string in
+"model" property can only contain Board information. And "qcom,msm-name"
+and "qcom,pmic-name" property is introduced.
+
+Optional properties:
+- qcom,msm-name: The name string of MSM SoC chip
+- qcom,pmic-name: The name string of MSM Pmic chip
+
+Required properties:
+- model: in soc device tree
+	Contain the soc and pmic information.
+	Will be overwritten by model string in board overlay device tree.
+	It will be used in bootloader for debug purpose.
+- model: in board overlay device tree
+	Contain the board information. It is the final model string that
+	kernel can see.
+
+Note:
+When device tree property qcom,msm-name and qcom,pmic-name exist, it will
+use concatenated msm machine name string for final machine name.
+When device tree property qcom,msm-name and qcom,pmic-name doesn't exist,
+it will use model property string for final machine name.
+
+Example:
+* In soc device tree:
+	/ {
+		model = "Qualcomm Technologies, Inc. APQ 8953 + PMI8950 SOC";
+		compatible = "qcom,apq8053";
+		qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+		qcom,pmic-name = "PMI8950";
+		qcom,msm-id = <293 0x0>;
+		qcom,msm-name = "APQ8053";
+	};
+* In board overlay device tree:
+	/ {
+		model = "MTP";
+		compatible = "qcom,mtp";
+	};
+
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index b3d4d44..d150116 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -110,6 +110,12 @@
 - SDM450
   compatible = "qcom,sdm450"
 
+- SDM632
+  compatible = "qcom,sdm632"
+
+- SDA632
+  compatible = "qcom,sda632"
+
 - MSM8937
   compatible = "qcom,msm8937"
 
@@ -321,6 +327,10 @@
 compatible = "qcom,sdm450-mtp"
 compatible = "qcom,sdm450-cdp"
 compatible = "qcom,sdm450-qrd"
+compatible = "qcom,sdm632-rumi"
+compatible = "qcom,sdm632-cdp"
+compatible = "qcom,sdm632-mtp"
+compatible = "qcom,sdm632-qrd"
 compatible = "qcom,mdm9640-cdp"
 compatible = "qcom,mdm9640-mtp"
 compatible = "qcom,mdm9640-rumi"
diff --git a/Documentation/devicetree/bindings/arm/msm/rpmh-master-stat.txt b/Documentation/devicetree/bindings/arm/msm/rpmh-master-stat.txt
new file mode 100644
index 0000000..36e1a69
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/rpmh-master-stat.txt
@@ -0,0 +1,18 @@
+* RPMH Master Stats
+
+Differet Subsystems maintains master data in SMEM.
+It tells about the individual masters information at any given
+time like "system sleep counts", "system sleep last entered at"
+and "system sleep accumulated duration" etc. These stats can be
+show to the user using the debugfs interface of the kernel.
+To achieve this, device tree node has been added.
+
+The required properties for rpmh-master-stats are:
+
+- compatible: "qcom,rpmh-master-stats".
+
+Example:
+
+qcom,rpmh-master-stats {
+	compatible = "qcom,rpmh-master-stats";
+};
diff --git a/Documentation/devicetree/bindings/cnss/cnss-wlan.txt b/Documentation/devicetree/bindings/cnss/cnss-wlan.txt
new file mode 100644
index 0000000..9dff08d
--- /dev/null
+++ b/Documentation/devicetree/bindings/cnss/cnss-wlan.txt
@@ -0,0 +1,75 @@
+* Qualcomm Technologies, Inc. ConNectivity SubSystem Platform Driver
+
+This platform driver adds support for the CNSS subsystem used for PCIe
+based Wi-Fi devices. It also adds support to integrate PCIe WLAN module
+to subsystem restart framework. Apart from that, it also manages the
+3.3V voltage regulator, WLAN Enable GPIO signal and PCIe link dynamically
+with support for suspend and resume by retaining the PCI config space
+states when PCIe link is shutdown. The main purpose of this device tree
+entry below is to invoke the CNSS platform driver and provide handle to
+the WLAN enable GPIO, 3.3V fixed voltage regulator resources. It also
+provides the reserved RAM dump memory location and size.
+
+Required properties:
+  - compatible: "qcom,cnss" for QCA6174 device
+                "qcom,cnss-qca6290" for QCA6290 device
+  - wlan-en-gpio: WLAN_EN GPIO signal specified by the chip specifications
+  - vdd-wlan-supply: phandle to the regulator device tree node
+  - pinctrl-names: Names corresponding to the numbered pinctrl states
+  - pinctrl-<n>: Pinctrl states as described in
+                 bindings/pinctrl/pinctrl-bindings.txt
+  - qcom,wlan-rc-num: PCIe root complex number which WLAN chip is attached to
+
+Optional properties:
+  - qcom,notify-modem-status: Boolean property to decide whether modem
+                              notification should be enabled or not in this
+                              platform
+  - wlan-soc-swreg-supply: phandle to the external 1.15V regulator for QCA6174
+  - wlan-ant-switch-supply: phandle to the 2.7V regulator for the antenna
+                            switch of QCA6174
+  - qcom,wlan-uart-access: Boolean property to decide whether QCA6174
+                           has exclusive access to UART.
+  - vdd-wlan-io-supply: phandle to the 1.8V IO regulator for QCA6174
+  - vdd-wlan-xtal-supply: phandle to the 1.8V XTAL regulator for QCA6174
+  - vdd-wlan-xtal-aon-supply: phandle to the LDO-4 regulator. This is needed
+                              on platforms where XTAL regulator depends on
+                              always on regulator in VDDmin.
+  - vdd-wlan-core-supply: phandle to the 1.3V CORE regulator for QCA6174
+  - vdd-wlan-sp2t-supply: phandle to the 2.7V SP2T regulator for QCA6174
+  - qcom,wlan-smmu-iova-address: I/O virtual address range as <start length>
+                                 format to be used for allocations associated
+                                 between WLAN/PCIe and SMMU
+  - qcom,wlan-ramdump-dynamic: To enable CNSS RAMDUMP collection
+                               by providing the size of CNSS DUMP
+  - reg: Memory regions defined as starting address and size
+  - reg-names: Names of the memory regions defined in reg entry
+  - wlan-bootstrap-gpio: WLAN_BOOTSTRAP GPIO signal specified by QCA6174
+                         which should be drived depending on platforms
+  - qcom,is-dual-wifi-enabled: Boolean property to control wlan enable(wlan-en)
+                               gpio on dual-wifi platforms.
+  - vdd-wlan-en-supply: WLAN_EN fixed regulator specified by QCA6174
+                        specifications.
+  - qcom,wlan-en-vreg-support: Boolean property to decide the whether the
+                               WLAN_EN pin is a gpio or fixed regulator.
+  - qcom,mhi: phandle to indicate the device which needs MHI support.
+  - qcom,cap-tsf-gpio: WLAN_TSF_CAPTURED GPIO signal specified by the chip
+                       specifications, should be drived depending on products
+
+Example:
+
+    qcom,cnss@0d400000 {
+        compatible = "qcom,cnss";
+        reg = <0x0d400000 0x200000>;
+        reg-names = "ramdump";
+        qcom,wlan-ramdump-dynamic = <0x200000>;
+        wlan-en-gpio = <&msmgpio 82 0>;
+        vdd-wlan-supply = <&wlan_vreg>;
+        qcom,notify-modem-status;
+        wlan-soc-swreg-supply = <&pma8084_l27>;
+        pinctrl-names = "default";
+        pinctrl-0 = <&cnss_default>;
+        qcom,wlan-rc-num = <0>;
+        qcom,wlan-smmu-iova-address = <0 0x10000000>;
+        qcom,mhi = <&mhi_wlan>;
+        qcom,cap-tsf-gpio = <&tlmm 126 1>;
+    };
diff --git a/Documentation/devicetree/bindings/fb/adv7533.txt b/Documentation/devicetree/bindings/fb/adv7533.txt
new file mode 100644
index 0000000..b198f37
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/adv7533.txt
@@ -0,0 +1,54 @@
+ADV7533 DSI to HDMI bridge
+
+
+Required properties:
+- compatible:				Must be "adv7533"
+- reg:					Main I2C slave ID (for I2C host driver)
+- adi,video-mode:			Excepted a number and possible inputs are 0 to 3, while:
+					3 = 1080p
+					2 = 720p
+					1 = 480p
+					0 = 1080p pattern
+- adi,main-addr:			Main I2C slave ID
+- adi,cec-dsi-addr:			CEC DSI I2C slave ID
+
+Optional properties:
+- adi,enable-audio:
+- adi,disable-gpios:
+- adi,irq-gpio:				Main IRQ gpio mapping
+- adi,hpd-irq-gpio:			HPD IRQ gpio mapping
+- adi,switch-gpio:			DSI switch gpio mapping
+- qcom,supply-names:			Regulator names that supply 5v to bridge chip
+- qcom,min-voltage-level		Minimum voltage level to be supplied to bridge chip
+- qcom,max-voltage-level		Maximum voltage level to be supplied to bridge chip
+- qcom,enable-load			Load current to bridge chip when enabled
+- qcom,disable-load			Load current to bridge chip when disabled
+- qcom,post-on-sleep			Sleep time (ms) to indicate the sleep
+					time after the vreg is enabled
+
+Example:
+&soc {
+	i2c@78b8000 {
+		adv7533@39 {
+			compatible = "adv7533";
+			reg = <0x39>;
+			adi,video-mode = <3>; /* 3 = 1080p */
+			adi,main-addr = <0x39>;
+			adi,cec-dsi-addr = <0x3C>;
+			adi,enable-audio;
+			pinctrl-names = "pmx_adv7533_active","pmx_adv7533_suspend";
+			pinctrl-0 = <&adv7533_int_active &adv7533_hpd_int_active &adv7533_switch_active>;
+			pinctrl-1 = <&adv7533_int_suspend &adv7533_hpd_int_suspend &adv7533_switch_suspend>;
+			adi,irq-gpio = <&msm_gpio 31 0x2002>;
+			adi,hpd-irq-gpio = <&msm_gpio 20 0x2003>;
+			adi,switch-gpio = <&msm_gpio 32 0x0>;
+			hpd-5v-en-supply = <&adv_vreg>;
+			qcom,supply-names = "hpd-5v-en";
+			qcom,min-voltage-level = <0>;
+			qcom,max-voltage-level = <0>;
+			qcom,enable-load = <0>;
+			qcom,disable-load = <0>;
+			qcom,post-on-sleep = <10>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/fb/lt8912.txt b/Documentation/devicetree/bindings/fb/lt8912.txt
new file mode 100644
index 0000000..daeb15f
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/lt8912.txt
@@ -0,0 +1,20 @@
+LT8912 DSI to HDMI bridge
+
+
+Required properties:
+- compatible:				Must be "lontium,lt8912"
+- reg:					Main I2C slave ID (for I2C host driver)
+
+Optional properties:
+- qcom,hdmi-reset:				Main reset gpio mapping
+
+Example:
+&soc {
+	i2c@78b8000 {
+		lt8912@48 {
+			compatible = "lontium,lt8912";
+			reg = <0x48>;
+			qcom,hdmi-reset = <&tlmm 64 0x0>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index 608b426..493a1aa 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -12,6 +12,7 @@
 					This property specifies the version
 					for DSI HW that this panel will work with
 					"qcom,dsi-panel-v2" = DSI V2.0
+					"qcom,msm-dsi-v2" = DSI V2.0
 - status:        			This property applies to DSI V2 panels only.
 					This property should not be added for panels
 					that work based on version "V6.0"
@@ -37,8 +38,8 @@
 					"display_2" = DISPLAY_2
 - qcom,mdss-dsi-panel-timings:		An array of length 12 that specifies the PHY
 					timing settings for the panel.
-- qcom,mdss-dsi-panel-timings-8996:		An array of length 40 char that specifies the 8996 PHY lane
-					timing settings for the panel.
+- qcom,mdss-dsi-panel-timings-phy-v2:	An array of length 40 char that specifies the PHY version 2
+					lane timing settings for the panel.
 - qcom,mdss-dsi-on-command:		A byte stream formed by multiple dcs packets base on
 					qcom dsi controller protocol.
 					byte 0: dcs data type
@@ -61,9 +62,39 @@
 						 transmitted
 					byte 5, 6: 16 bits length in network byte order
 					byte 7 and beyond: number byte of payload
+- qcom,mdss-dsi-lp-mode-on:		This is used to enable display low persistence mode.
+					A byte stream formed by multiple dcs packets base on
+					qcom dsi controller protocol.
+					byte 0: dcs data type
+					byte 1: set to indicate this is an individual packet
+						 (no chain)
+					byte 2: virtual channel number
+					byte 3: expect ack from client (dcs read command)
+					byte 4: wait number of specified ms after dcs command
+						 transmitted
+					byte 5, 6: 16 bits length in network byte order
+					byte 7 and beyond: number byte of payload
+- qcom,mdss-dsi-lp-mode-off:		This is used to disable display low persistence mode.
+					A byte stream formed by multiple dcs packets base on
+					qcom dsi controller protocol.
+					byte 0: dcs data type
+					byte 1: set to indicate this is an individual packet
+						 (no chain)
+					byte 2: virtual channel number
+					byte 3: expect ack from client (dcs read command)
+					byte 4: wait number of specified ms after dcs command
+						 transmitted
+					byte 5, 6: 16 bits length in network byte order
+					byte 7 and beyond: number byte of payload
 - qcom,mdss-dsi-post-panel-on-command:	same as "qcom,mdss-dsi-on-command" except commands are
 					sent after displaying an image.
 
+- qcom,mdss-dsi-idle-on-command:	same as "qcom,mdss-dsi-on-command". Set of DCS command
+					used for idle mode entry.
+
+- qcom,mdss-dsi-idle-off-command:	same as "qcom,mdss-dsi-on-command". Set of DCS command
+					used for idle mode exit.
+
 Note, if a short DCS packet(i.e packet with Byte 0:dcs data type as 05) mentioned in
 qcom,mdss-dsi-on-command/qcom,mdss-dsi-off-command stream fails to transmit,
 then 3 options can be tried.
@@ -275,14 +306,10 @@
 					to the physical width in the framebuffer information.
 - qcom,mdss-pan-physical-height-dimension:	Specifies panel physical height in mm which corresponds
 					to the physical height in the framebuffer information.
-- qcom,mdss-dsi-mode-sel-gpio-state:	String that specifies the lcd mode for panel
-					(such as single-port/dual-port), if qcom,panel-mode-gpio
-					binding is defined in dsi controller.
-					"dual_port" = Set GPIO to LOW
-					"single_port" = Set GPIO to HIGH
+- qcom,mdss-dsi-panel-mode-gpio-state:	String that specifies the mode state for panel if it is defined
+					in dsi controller.
 					"high" = Set GPIO to HIGH
 					"low" = Set GPIO to LOW
-					The default value is "dual_port".
 - qcom,mdss-tear-check-disable:		Boolean to disable mdp tear check. Tear check is enabled by default to avoid
 					tearing. Other tear-check properties are ignored if this property is present.
 					The below tear check configuration properties can be individually tuned if
@@ -330,6 +357,28 @@
 					2A/2B command.
 - qcom,dcs-cmd-by-left:			Boolean to indicate that dcs command are sent
 					through the left DSI controller only in a dual-dsi configuration
+- qcom,mdss-dsi-panel-hdr-enabled:	Boolean to indicate HDR support in panel.
+- qcom,mdss-dsi-panel-hdr-color-primaries:
+					Array of 8 unsigned integers denoting chromaticity of panel.These
+					values are specified in nits units. The value range is 0 through 50000.
+					To obtain real chromacity, these values should be divided by factor of
+					50000. The structure of array is defined in below order
+						value 1: x value of white chromaticity of display panel
+						value 2: y value of white chromaticity of display panel
+						value 3: x value of red chromaticity of display panel
+						value 4: y value of red chromaticity of display panel
+						value 5: x value of green chromaticity of display panel
+						value 6: y value of green chromaticity of display panel
+						value 7: x value of blue chromaticity of display panel
+						value 8: y value of blue chromaticity of display panel
+- qcom,mdss-dsi-panel-peak-brightness:	Maximum brightness supported by panel.In absence of maximum value
+					typical value becomes peak brightness. Value is specified in nits units.
+					To obtail real peak brightness, this value should be divided by factor of
+					10000.
+- qcom,mdss-dsi-panel-blackness-level:	Blackness level supported by panel. Blackness level is defined as
+					ratio of peak brightness to contrast. Value is specified in nits units.
+					To obtail real blackness level, this value should be divided by factor of
+					10000.
 - qcom,mdss-dsi-lp11-init:		Boolean used to enable the DSI clocks and data lanes (low power 11)
 					before issuing hardware reset line.
 - qcom,mdss-dsi-init-delay-us:		Delay in microseconds(us) before performing any DSI activity in lp11
@@ -424,7 +473,11 @@
 					fields in the supply entry, refer to the qcom,ctrl-supply-entries
 					binding above.
 - qcom,config-select:			Optional property to select default configuration.
-
+- qcom,panel-allow-phy-poweroff:	A boolean property indicates that panel allows to turn off the phy power
+					supply during idle screen. A panel should able to handle the dsi lanes
+					in floating state(not LP00 or LP11) to turn on this property. Software
+					turns off PHY pmic power supply, phy ldo and DSI Lane ldo during
+					idle screen (footswitch control off) when this property is enabled.
 [[Optional config sub-nodes]]		These subnodes provide different configurations for a given same panel.
 					Default configuration can be chosen by specifying phandle of the
 					selected subnode in the qcom,config-select.
@@ -471,6 +524,7 @@
 					to a non-DSI interface.
 - qcom,bridge-name:			A string to indicate the name of the bridge chip connected to DSI. qcom,bridge-name
 					is required if qcom,dba-panel is defined for the panel.
+- qcom,hdmi-mode:			Indicates where current panel is HDMI mode, otherwise, it will be DVI mode.
 - qcom,adjust-timer-wakeup-ms:		An integer value to indicate the timer delay(in ms) to accommodate
 					s/w delay while configuring the event timer wakeup logic.
 
@@ -493,6 +547,8 @@
 Note, if a given optional qcom,* binding is not present, then the driver will configure
 the default values specified.
 
+Note, all the "qcom,supply-*" properties have their definitions in mdss-dsi-txt.
+
 Example:
 &mdss_mdp {
 	dsi_sim_vid: qcom,mdss_dsi_sim_video {
@@ -538,7 +594,6 @@
 		qcom,mdss-dsi-underflow-color = <0xff>;
 		qcom,mdss-dsi-bl-min-level = <1>;
 		qcom,mdss-dsi-bl-max-level = < 15>;
-		qcom,mdss-brightness-max-level = <255>;
 		qcom,mdss-dsi-interleave-mode = <0>;
 		qcom,mdss-dsi-panel-type = "dsi_video_mode";
 		qcom,mdss-dsi-te-check-enable;
@@ -571,7 +626,7 @@
 		qcom,mdss-mdp-transfer-time-us = <12500>;
 		qcom,mdss-dsi-panel-timings = [7d 25 1d 00 37 33
 					22 27 1e 03 04 00];
-                qcom,mdss-dsi-panel-timings-8996 = [23 20 06 09 05 03 04 a0
+                qcom,mdss-dsi-panel-timings-phy-v2 = [23 20 06 09 05 03 04 a0
                                 23 20 06 09 05 03 04 a0
                                 23 20 06 09 05 03 04 a0
                                 23 20 06 09 05 03 04 a0
@@ -581,6 +636,9 @@
 		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
 		qcom,mdss-dsi-off-command = [22 01 00 00 00 00 00];
 		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-lp-mode-on = [32 01 00 00 00 00 02 00 00
+					29 01 00 00 10 00 02 FF 99];
+		qcom,mdss-dsi-lp-mode-off = [22 01 00 00 00 00 00];
 		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 		qcom,mdss-dsi-pan-enable-dynamic-fps;
 		qcom,mdss-dsi-pan-fps-update = "dfps_suspend_resume_mode";
@@ -592,7 +650,7 @@
 		qcom,5v-boost-gpio = <&pm8994_gpios 14 0>;
 		qcom,mdss-pan-physical-width-dimension = <60>;
 		qcom,mdss-pan-physical-height-dimension = <140>;
-		qcom,mdss-dsi-mode-sel-gpio-state = "dsc_mode";
+		qcom,mdss-dsi-panel-mode-gpio-state = "low";
 		qcom,mdss-tear-check-sync-cfg-height = <0xfff0>;
 		qcom,mdss-tear-check-sync-init-val = <1280>;
 		qcom,mdss-tear-check-sync-threshold-start = <4>;
@@ -611,6 +669,7 @@
 		qcom,suspend-ulps-enabled;
 		qcom,panel-roi-alignment = <4 4 2 2 20 20>;
 		qcom,esd-check-enabled;
+		qcom,panel-allow-phy-poweroff;
 		qcom,mdss-dsi-panel-status-command = [06 01 00 01 05 00 02 0A 08];
 		qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
 		qcom,mdss-dsi-panel-status-check-mode = "reg_read";
@@ -682,6 +741,7 @@
 				qcom,supply-max-voltage = <2800000>;
 				qcom,supply-enable-load = <100000>;
 				qcom,supply-disable-load = <100>;
+				qcom,supply-ulp-load = <100>;
 				qcom,supply-pre-on-sleep = <0>;
 				qcom,supply-post-on-sleep = <0>;
 				qcom,supply-pre-off-sleep = <0>;
@@ -695,6 +755,7 @@
 				qcom,supply-max-voltage = <1800000>;
 				qcom,supply-enable-load = <100000>;
 				qcom,supply-disable-load = <100>;
+				qcom,supply-ulp-load = <100>;
 				qcom,supply-pre-on-sleep = <0>;
 				qcom,supply-post-on-sleep = <0>;
 				qcom,supply-pre-off-sleep = <0>;
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi.txt b/Documentation/devicetree/bindings/fb/mdss-dsi.txt
new file mode 100644
index 0000000..2f74f7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi.txt
@@ -0,0 +1,261 @@
+Qualcomm mdss-dsi
+
+mdss-dsi is the master DSI device which supports multiple DSI host controllers that
+are compatible with MIPI display serial interface specification.
+
+Required properties:
+- compatible:				Must be "qcom,mdss-dsi"
+- hw-config:				Specifies the DSI host setup configuration
+					"hw-config" = "single_dsi"
+					"hw-config" = "dual_dsi"
+					"hw-config" = "split_dsi"
+- ranges:				The standard property which specifies the child address
+					space, parent address space and the length.
+- vdda-supply:				Phandle for vreg regulator device node.
+
+Bus Scaling Data:
+- qcom,msm-bus,name:		String property describing MDSS client.
+- qcom, msm-bus,num-cases:	This is the number of bus scaling use cases
+				defined in the vectors property. This must be
+				set to <2> for MDSS DSI driver where use-case 0
+				is used to remove BW votes from the system. Use
+				case 1 is used to generate bandwidth requestes
+				when sending command packets.
+- qcom,msm-bus,num-paths:	This represents number of paths in each bus
+				scaling usecase. This value depends on number of
+				AXI master ports dedicated to MDSS for
+				particular chipset.
+- qcom,msm-bus,vectors-KBps:	A series of 4 cell properties, with a format
+				of (src, dst, ab, ib) which is defined at
+				Documentation/devicetree/bindings/arm/msm/msm_bus.txt.
+				DSI driver should always set average bandwidth
+				(ab) to 0 and always use instantaneous
+				bandwidth(ib) values.
+
+Optional properties:
+- vcca-supply:				Phandle for vcca regulator device node.
+- qcom,<type>-supply-entries:		A node that lists the elements of the supply used by the
+					a particular "type" of DSI modulee. The module "types"
+					can be "core", "ctrl", and "phy". Within the same type,
+					there can be more than one instance of this binding,
+					in which case the entry would be appended with the
+					supply entry index.
+					e.g. qcom,ctrl-supply-entry@0
+					-- qcom,supply-name: name of the supply (vdd/vdda/vddio)
+					-- qcom,supply-min-voltage: minimum voltage level (uV)
+					-- qcom,supply-max-voltage: maximum voltage level (uV)
+					-- qcom,supply-enable-load: load drawn (uA) from enabled supply
+					-- qcom,supply-disable-load: load drawn (uA) from disabled supply
+					-- qcom,supply-ulp-load: load drawn (uA) from supply in ultra-low power mode
+					-- qcom,supply-pre-on-sleep: time to sleep (ms) before turning on
+					-- qcom,supply-post-on-sleep: time to sleep (ms) after turning on
+					-- qcom,supply-pre-off-sleep: time to sleep (ms) before turning off
+					-- qcom,supply-post-off-sleep: time to sleep (ms) after turning off
+- pll-src-config			Specified the source PLL for the DSI
+					link clocks:
+					"PLL0" - Clocks sourced out of DSI PLL0
+					"PLL1" - Clocks sourced out of DSI PLL1
+					This property is only valid for
+					certain DSI hardware configurations
+					mentioned in the "hw-config" binding above.
+					For example, in split_dsi config, the clocks can
+					only be sourced out of PLL0. For
+					dual_dsi, both PLL would be active.
+					For single DSI, it is possible to
+					select either PLL. If no value is specified,
+					the default value for single DSI is set as PLL0.
+- qcom,mmss-ulp-clamp-ctrl-offset:	Specifies the offset for dsi ulps clamp control register.
+- qcom,mmss-phyreset-ctrl-offset:	Specifies the offset for dsi phy reset control register.
+- qcom,dsi-clk-ln-recovery:		Boolean which enables the clk lane recovery
+
+mdss-dsi-ctrl is a dsi controller device which is treated as a subnode of the mdss-dsi device.
+
+Required properties:
+- compatible:				Must be "qcom,mdss-dsi-ctrl"
+- cell-index:				Specifies the controller used among the two controllers.
+- reg: 					Base address and length of the different register
+					regions(s) required for DSI device functionality.
+- reg-names: 				A list of strings that map in order to the list of regs.
+					"dsi_ctrl" - MDSS DSI controller register region
+					"dsi_phy" - MDSS DSI PHY register region
+					"dsi_phy_regulator" - MDSS DSI PHY REGULATOR region
+					"mmss_misc_phys" - Register region for MMSS DSI clamps
+- vdd-supply:				Phandle for vdd regulator device node.
+- vddio-supply:				Phandle for vdd-io regulator device node.
+- qcom,mdss-fb-map-prim:		pHandle that specifies the framebuffer to which the
+					primary interface is mapped.
+- qcom,mdss-mdp:			pHandle that specifies the mdss-mdp device.
+- qcom,platform-regulator-settings:	An array of length 7 or 5 that specifies the PHY
+					regulator settings. It use 5 bytes for 8996 pll.
+- qcom,platform-strength-ctrl:		An array of length 2 or 10 that specifies the PHY
+					strengthCtrl settings. It use 10 bytes for 8996 pll.
+- qcom,platform-lane-config:		An array of length 45 or 20 that specifies the PHY
+					lane configuration settings. It use 20 bytes for 8996 pll.
+- qcom,platform-bist-ctrl:		An array of length 6 that specifies the PHY
+					BIST ctrl settings.
+- qcom,dsi-pref-prim-pan:		phandle that specifies the primary panel to be used
+					with the controller.
+
+Optional properties:
+- label:		        	A string used to describe the controller used.
+- qcom,mdss-fb-map:			pHandle that specifies the framebuffer to which the
+					interface is mapped.
+- qcom,mdss-fb-map-sec:			pHandle that specifies the framebuffer to which the
+					secondary interface is mapped.
+- qcom,platform-enable-gpio:		Specifies the panel lcd/display enable gpio.
+- qcom,platform-reset-gpio:		Specifies the panel reset gpio.
+- qcom,platform-te-gpio:		Specifies the gpio used for TE.
+- qcom,platform-bklight-en-gpio:	Specifies the gpio used to enable display back-light
+- qcom,platform-mode-gpio:		Select video/command mode of panel through gpio when it supports
+					both modes.
+- qcom,platform-intf-mux-gpio:		Select dsi/external(hdmi) interface through gpio when it supports
+					either dsi or external interface.
+- pinctrl-names:			List of names to assign mdss pin states defined in pinctrl device node
+					Refer to pinctrl-bindings.txt
+- pinctrl-<0..n>:			Lists phandles each pointing to the pin configuration node within a pin
+					controller. These pin configurations are installed in the pinctrl
+					device node. Refer to pinctrl-bindings.txt
+- qcom,regulator-ldo-mode:		Boolean to enable ldo mode for the dsi phy regulator
+- qcom,null-insertion-enabled:		Boolean to enable NULL packet insertion
+					feature for DSI controller.
+- qcom,dsi-irq-line:			Boolean specifies if DSI has a different irq line than mdp.
+- qcom,lane-map:			Specifies the data lane swap configuration.
+					"lane_map_0123" = <0 1 2 3> (default value)
+					"lane_map_3012" = <3 0 1 2>
+					"lane_map_2301" = <2 3 0 1>
+					"lane_map_1230" = <1 2 3 0>
+					"lane_map_0321" = <0 3 2 1>
+					"lane_map_1032" = <1 0 3 2>
+					"lane_map_2103" = <2 1 0 3>
+					"lane_map_3210" = <3 2 1 0>
+- qcom,pluggable			Boolean to enable hotplug feature.
+- qcom,timing-db-mode:			Boolean specifies dsi timing mode registers are supported or not.
+- qcom,display-id			A string indicates the display ID for the controller.
+					The possible values are:
+					- "primary"
+					- "secondary"
+					- "tertiary"
+- qcom,bridge-index:			Instance id of the bridge chip connected to DSI. qcom,bridge-index is
+					required if a bridge chip panel is used.
+
+Example:
+	mdss_dsi: qcom,mdss_dsi@0 {
+		compatible = "qcom,mdss-dsi";
+		hw-config = "single_dsi";
+		pll-src-config = "PLL0";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		vdda-supply = <&pm8226_l4>;
+		vcca-supply = <&pm8226_l28>;
+		reg = <0x1a98000 0x1a98000 0x25c
+		      0x1a98500 0x1a98500 0x280
+		      0x1a98780 0x1a98780 0x30
+		      0x193e000 0x193e000 0x30>;
+
+		qcom,dsi-clk-ln-recovery;
+
+		qcom,core-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,core-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+				qcom,supply-ulp-load = <0>;
+				qcom,supply-pre-on-sleep = <0>;
+				qcom,supply-post-on-sleep = <0>;
+				qcom,supply-pre-off-sleep = <0>;
+				qcom,supply-post-off-sleep = <0>;
+			};
+		};
+
+		qcom,phy-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,phy-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vddio";
+				qcom,supply-min-voltage = <1800000>;
+				qcom,supply-max-voltage = <1800000>;
+				qcom,supply-enable-load = <100000>;
+				qcom,supply-disable-load = <100>;
+				qcom,supply-ulp-load = <100>;
+				qcom,supply-pre-on-sleep = <0>;
+				qcom,supply-post-on-sleep = <20>;
+				qcom,supply-pre-off-sleep = <0>;
+				qcom,supply-post-off-sleep = <0>;
+			};
+		};
+
+		qcom,ctrl-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,ctrl-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda";
+				qcom,supply-min-voltage = <1200000>;
+				qcom,supply-max-voltage = <1200000>;
+				qcom,supply-enable-load = <100000>;
+				qcom,supply-disable-load = <100>;
+				qcom,supply-ulp-load = <1000>;
+				qcom,supply-pre-on-sleep = <0>;
+				qcom,supply-post-on-sleep = <20>;
+				qcom,supply-pre-off-sleep = <0>;
+				qcom,supply-post-off-sleep = <0>;
+			};
+		};
+
+		mdss_dsi0: mdss_dsi_ctrl0@fd922800 {
+			compatible = "qcom,mdss-dsi-ctrl";
+			label = "MDSS DSI CTRL->0";
+			cell-index = <0>;
+			reg = 	<0xfd922800 0x1f8>,
+				<0xfd922b00 0x2b0>,
+				<0xfd998780 0x30>,
+				<0xfd828000 0x108>;
+			reg-names = "dsi_ctrl", "dsi_phy",
+				"dsi_phy_regulator", "mmss_misc_phys";
+
+			vdd-supply = <&pm8226_l15>;
+			vddio-supply = <&pm8226_l8>;
+			qcom,mdss-fb-map-prim = <&mdss_fb0>;
+			qcom,mdss-mdp = <&mdss_mdp>;
+
+			qcom,dsi-pref-prim-pan = <&dsi_tosh_720_vid>;
+
+			qcom,platform-strength-ctrl = [ff 06];
+			qcom,platform-bist-ctrl = [00 00 b1 ff 00 00];
+			qcom,platform-regulator-settings = [07 09 03 00 20 00 01];
+			qcom,platform-lane-config = [00 00 00 00 00 00 00 01 97
+				00 00 00 00 05 00 00 01 97
+				00 00 00 00 0a 00 00 01 97
+				00 00 00 00 0f 00 00 01 97
+				00 c0 00 00 00 00 00 01 bb];
+
+			qcom,mmss-ulp-clamp-ctrl-offset = <0x20>;
+			qcom,mmss-phyreset-ctrl-offset = <0x24>;
+			qcom,regulator-ldo-mode;
+			qcom,null-insertion-enabled;
+			qcom,timing-db-mode;
+
+			pinctrl-names = "mdss_default", "mdss_sleep";
+			pinctrl-0 = <&mdss_dsi_active>;
+			pinctrl-1 = <&mdss_dsi_suspend>;
+			qcom,platform-reset-gpio = <&msmgpio 25 1>;
+			qcom,platform-te-gpio = <&msmgpio 24 0>;
+			qcom,platform-enable-gpio = <&msmgpio 58 1>;
+			qcom,platform-bklight-en-gpio = <&msmgpio 86 0>;
+			qcom,platform-mode-gpio = <&msmgpio 7 0>;
+			qcom,platform-intf-mux-gpio = <&tlmm 115 0>;
+			qcom,dsi-irq-line;
+			qcom,lane-map = "lane_map_3012";
+			qcom,display-id = "primary";
+			qcom,bridge-index = <00>;
+	        };
+	};
diff --git a/Documentation/devicetree/bindings/fb/mdss-edp.txt b/Documentation/devicetree/bindings/fb/mdss-edp.txt
new file mode 100644
index 0000000..c474b88
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-edp.txt
@@ -0,0 +1,52 @@
+Qualcomm MDSS EDP
+
+MDSS EDP is a edp driver which supports panels that are compatible with
+VESA EDP display interface specification.
+
+When configuring the optional properties for external backlight, one should also
+configure the gpio that drives the pwm to it.
+
+Required properties
+- compatible :				Must be "qcom,mdss-edp".
+- reg :						Offset and length of the register set for the
+							device.
+- reg-names :				Names to refer to register sets related to this
+							device
+- vdda-supply :				Phandle for vdd regulator device node.
+- gpio-panel-en	:			GPIO for supplying power to panel and backlight
+							driver.
+- gpio-lvl-en	:			GPIO to enable HPD be received by host.
+- status :				A string that has to be set to "okay/ok" to enable
+						the driver. By default this property will be set to
+						"disable". Will be set to "ok/okay" status for
+						specific platforms.
+- qcom,mdss-fb-map:			pHandle that specifies the framebuffer to which the
+					interface is mapped.
+- gpio-panel-hpd :			gpio pin use for edp hpd
+
+Optional properties
+- qcom,panel-lpg-channel :		LPG channel for backlight.
+- qcom,panel-pwm-period :		PWM period in microseconds.
+
+
+Optional properties:
+- qcom,mdss-brightness-max-level:	Specifies the max brightness level supported.
+					255 = default value.
+
+Example:
+	mdss_edp: qcom,mdss_edp@fd923400 {
+		compatible = "qcom,mdss-edp";
+		reg = <0xfd923400 0x700>,
+			<0xfd8c2000 0x1000>;
+		reg-names = "edp_base", "mmss_cc_base";
+		vdda-supply = <&pm8941_l12>;
+		gpio-panel-en = <&msmgpio 58 0>;
+		gpio-lvl-en = <&msmgpio 91 0>;
+		qcom,panel-lpg-channel = <7>; /* LPG Channel 8 */
+		qcom,panel-pwm-period = <53>;
+		status = "disable";
+		qcom,mdss-fb-map = <&mdss_fb0>;
+		gpio-panel-hpd = <&msmgpio 102 0>;
+	};
+
+
diff --git a/Documentation/devicetree/bindings/fb/mdss-mdp.txt b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
new file mode 100644
index 0000000..e33d358
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
@@ -0,0 +1,898 @@
+Qualcomm MDSS MDP
+
+MDSS is Mobile Display SubSystem which implements Linux framebuffer APIs to
+drive user interface to different panel interfaces. MDP driver is the core of
+MDSS which manage all data paths to different panel interfaces.
+
+Required properties
+- compatible :		Must be "qcom,mdss_mdp"
+			- "qcom,mdss_mdp3" for mdp3
+- reg :			offset and length of the register set for the device.
+- reg-names :		names to refer to register sets related to this device
+- interrupts :		Interrupt associated with MDSS.
+- interrupt-controller:	Mark the device node as an interrupt controller.
+			This is an empty, boolean property.
+- #interrupt-cells: 	Should be one. The first cell is interrupt number.
+- vdd-supply :		Phandle for vdd regulator device node.
+- qcom,max-clk-rate:	Specify maximum MDP core clock rate in hz that this
+			device supports.
+- qcom,mdss-pipe-vig-off:	Array of offset for MDP source surface pipes of
+				type VIG, the offsets are calculated from
+				register "mdp_phys" defined in reg property.
+				The number of offsets defined here should
+				reflect the amount of VIG pipes that can be
+				active in MDP for this configuration.
+- qcom,mdss-pipe-vig-fetch-id:	Array of shared memory pool fetch ids
+				corresponding to the VIG pipe offsets defined in
+				previous property, the amount of fetch ids
+				defined should match the number of offsets
+				defined in property: qcom,mdss-pipe-vig-off
+- qcom,mdss-pipe-vig-xin-id:	Array of VBIF clients ids (xins) corresponding
+				to the respective VIG pipes. Number of xin ids
+				defined should match the number of offsets
+				defined in property: qcom,mdss-pipe-vig-off
+- qcom,mdss-pipe-vig-clk-ctrl-off: Array of offsets describing clk control
+				offsets for dynamic clock gating. 1st value
+				in the array represents offset of the control
+				register. 2nd value represents bit offset within
+				control register and 3rd value represents bit
+				offset within status register. Number of tuples
+				defined should match the number of offsets
+				defined in property: qcom,mdss-pipe-vig-off
+- qcom,mdss-pipe-rgb-off:	Array of offsets for MDP source surface pipes of
+				type RGB, the offsets are calculated from
+				register "mdp_phys" defined in reg property.
+				The number of offsets defined here should
+				reflect the amount of RGB pipes that can be
+				active in MDP for this configuration.
+- qcom,mdss-pipe-rgb-fetch-id:	Array of shared memory pool fetch ids
+				corresponding to the RGB pipe offsets defined in
+				previous property, the amount of fetch ids
+				defined should match the number of offsets
+				defined in property: qcom,mdss-pipe-rgb-off
+- qcom,mdss-pipe-rgb-xin-id:	Array of VBIF clients ids (xins) corresponding
+				to the respective RGB pipes. Number of xin ids
+				defined should match the number of offsets
+				defined in property: qcom,mdss-pipe-rgb-off
+- qcom,mdss-pipe-rgb-clk-ctrl-off: Array of offsets describing clk control
+				offsets for dynamic clock gating. 1st value
+				in the array represents offset of the control
+				register. 2nd value represents bit offset within
+				control register and 3rd value represents bit
+				offset within status register. Number of tuples
+				defined should match the number of offsets
+				defined in property: qcom,mdss-pipe-rgb-off
+- qcom,mdss-pipe-dma-off:	Array of offsets for MDP source surface pipes of
+				type DMA, the offsets are calculated from
+				register "mdp_phys" defined in reg property.
+				The number of offsets defined here should
+				reflect the amount of DMA pipes that can be
+				active in MDP for this configuration.
+- qcom,mdss-pipe-dma-fetch-id:	Array of shared memory pool fetch ids
+				corresponding to the DMA pipe offsets defined in
+				previous property, the amount of fetch ids
+				defined should match the number of offsets
+				defined in property: qcom,mdss-pipe-dma-off
+- qcom,mdss-pipe-dma-xin-id:	Array of VBIF clients ids (xins) corresponding
+				to the respective DMA pipes. Number of xin ids
+				defined should match the number of offsets
+				defined in property: qcom,mdss-pipe-dma-off
+- qcom,mdss-pipe-dma-clk-ctrl-off: Array of offsets describing clk control
+				offsets for dynamic clock gating. 1st value
+				in the array represents offset of the control
+				register. 2nd value represents bit offset within
+				control register and 3rd value represents bit
+				offset within status register. Number of tuples
+				defined should match the number of offsets
+				defined in property: qcom,mdss-pipe-dma-off
+- qcom,mdss-pipe-cursor-off:	Array of offsets for MDP source surface pipes of
+				type cursor, the offsets are calculated from
+				register "mdp_phys" defined in reg property.
+				The number of offsets defined here should
+				reflect the amount of cursor pipes that can be
+				active in MDP for this configuration. Meant for
+				hardware that has hw cursors support as a
+				source pipe.
+- qcom,mdss-pipe-cursor-xin-id:	Array of VBIF clients ids (xins) corresponding
+				to the respective cursor pipes. Number of xin ids
+				defined should match the number of offsets
+				defined in property: qcom,mdss-pipe-cursor-off
+- qcom,mdss-pipe-cursor-clk-ctrl-off: Array of offsets describing clk control
+				offsets for dynamic clock gating. 1st value
+				in the array represents offset of the control
+				register. 2nd value represents bit offset within
+				control register and 3rd value represents bit
+				offset within status register. Number of tuples
+				defined should match the number of offsets
+				defined in property: qcom,mdss-pipe-cursor-off
+- qcom,mdss-ctl-off:		Array of offset addresses for the available ctl
+				hw blocks within MDP, these offsets are
+				calculated from register "mdp_phys" defined in
+				reg property.  The number of ctl offsets defined
+				here should reflect the number of control paths
+				that can be configured concurrently on MDP for
+				this configuration.
+- qcom,mdss-wb-off:		Array of offset addresses for the progammable
+				writeback blocks within MDP. The number of
+				offsets defined should match the number of ctl
+				blocks defined in property: qcom,mdss-ctl-off
+- qcom,mdss-mixer-intf-off: 	Array of offset addresses for the available
+				mixer blocks that can drive data to panel
+				interfaces.
+				These offsets are be calculated from register
+				"mdp_phys" defined in reg property.
+				The number of offsets defined should reflect the
+				amount of mixers that can drive data to a panel
+				interface.
+- qcom,mdss-dspp-off: 		Array of offset addresses for the available dspp
+				blocks. These offsets are calculated from
+				regsiter "mdp_phys" defined in reg property.
+				The number of dspp blocks should match the
+				number of mixers driving data to interface
+				defined in property: qcom,mdss-mixer-intf-off
+- qcom,mdss-pingpong-off:	Array of offset addresses for the available
+				pingpong blocks. These offsets are calculated
+				from regsiter "mdp_phys" defined in reg property.
+				The number of pingpong blocks should match the
+				number of mixers driving data to interface
+				defined in property: qcom,mdss-mixer-intf-off
+- qcom,mdss-mixer-wb-off: 	Array of offset addresses for the available
+				mixer blocks that can be drive data to writeback
+				block.  These offsets will be calculated from
+				register "mdp_phys" defined in reg property.
+				The number of writeback mixer offsets defined
+				should reflect the number of mixers that can
+				drive data to a writeback block.
+- qcom,mdss-intf-off:		Array of offset addresses for the available MDP
+				video interface blocks that can drive data to a
+				panel controller through timing engine.
+				The offsets are calculated from "mdp_phys"
+				defined in reg property. The number of offsets
+				defiend should reflect the number of progammable
+				interface blocks available in hardware.
+- qcom,mdss-pref-prim-intf:	A string which indicates the configured hardware
+				interface between MDP and the primary panel.
+				Individual panel controller drivers initialize
+				hardware based on this property.
+				Based on the interfaces supported at present,
+				possible values are:
+				- "dsi"
+				- "edp"
+				- "hdmi"
+
+Bus Scaling Data:
+- qcom,msm-bus,name:		String property describing MDSS client.
+- qcom,msm-bus,num-cases:	This is the the number of Bus Scaling use cases
+				defined in the vectors property. This must be
+				set to <3> for MDSS driver where use-case 0 is
+				used to take off MDSS BW votes from the system.
+				And use-case 1 & 2 are used in ping-pong fashion
+				to generate run-time BW requests.
+- qcom,msm-bus,active-only:	A boolean flag indicating if it is active only.
+- qcom,msm-bus,num-paths:	This represents the number of paths in each
+				Bus Scaling Usecase. This value depends on
+				how many number of AXI master ports are
+				dedicated to MDSS for particular chipset. This
+				value represents the RT + NRT AXI master ports.
+- qcom,msm-bus,vectors-KBps:	* A series of 4 cell properties, with a format
+				of (src, dst, ab, ib) which is defined at
+				Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+				* Current values of src & dst are defined at
+				include/linux/msm-bus-board.h
+				src values allowed for MDSS are:
+					22 = MSM_BUS_MASTER_MDP_PORT0
+					23 = MSM_BUS_MASTER_MDP_PORT1
+					25 = MSM_BUS_MASTER_ROTATOR
+				dst values allowed for MDSS are:
+					512 = MSM_BUS_SLAVE_EBI_CH0
+				ab: Represents aggregated bandwidth.
+				ib: Represents instantaneous bandwidth.
+				* Total number of 4 cell properties will be
+				(number of use-cases * number of paths).
+				* These values will be overridden by the driver
+				based on the run-time requirements. So initial
+				ab and ib values defined here are random and
+				bare no logic except for the use-case 0 where ab
+				and ib values needs to be 0.
+				* Define realtime vector properties followed by
+				non-realtime vector properties.
+
+- qcom,mdss-prefill-outstanding-buffer-bytes: The size of mdp outstanding buffer
+				in bytes. The buffer is filled during prefill
+				time and the buffer size shall be included in
+				prefill bandwidth calculation.
+- qcom,mdss-prefill-y-buffer-bytes: The size of mdp y plane buffer in bytes. The
+				buffer is filled during prefill time when format
+				is YUV and the buffer size shall be included in
+				prefill bandwidth calculation.
+- qcom,mdss-prefill-scaler-buffer-lines-bilinear: The value indicates how many lines
+				of scaler line buffer need to be filled during
+				prefill time. If bilinear scalar is enabled, then this
+				number of lines is used to determine how many bytes
+				of scaler buffer to be included in prefill bandwidth
+				calculation.
+- qcom,mdss-prefill-scaler-buffer-lines-caf: The value indicates how many lines of
+				of scaler line buffer need to be filled during
+				prefill time. If CAF mode filter is enabled, then
+				this number of lines is used to determine how many
+				bytes of scaler buffer to be included in prefill
+				bandwidth calculation.
+- qcom,mdss-prefill-post-scaler-buffer: The size of post scaler buffer in bytes.
+				The buffer is used to smooth the output of the
+				scaler. If the buffer is present in h/w, it is
+				filled during prefill time and the number of bytes
+				shall be included in prefill bandwidth calculation.
+- qcom,mdss-prefill-pingpong-buffer-pixels: The size of pingpong buffer in pixels.
+				The buffer is used to keep pixels flowing to the
+				panel interface. If the vertical start position of a
+				layer is in the beginning of the active area, pingpong
+				buffer must be filled during prefill time to generate
+				starting lines. The number of bytes to be filled is
+				determined by the line width, starting position,
+				byte per pixel and scaling ratio, this number shall be
+				included in prefill bandwidth calculation.
+- qcom,mdss-prefill-fbc-lines:  The value indicates how many lines are required to fill
+				fbc buffer during prefill time if FBC (Frame Buffer
+				Compressor) is enabled. The number of bytes to be filled
+				is determined by the line width, bytes per pixel and
+				scaling ratio, this number shall be included in prefill bandwidth
+				calculation.
+- qcom,max-mixer-width:	Specify maximum MDP mixer width that the device supports.
+				This is a mandatory property, if not specified then
+				mdp probe will fail.
+
+Optional properties:
+- batfet-supply :	Phandle for battery FET regulator device node.
+- vdd-cx-supply :	Phandle for vdd CX regulator device node.
+- qcom,vbif-settings :	Array with key-value pairs of constant VBIF register
+			settings used to setup MDSS QoS for optimum performance.
+			The key used should be offset from "vbif_phys" register
+			defined in reg property.
+- qcom,vbif-nrt-settings : The key used should be offset from "vbif_nrt_phys"
+			register defined in reg property. Refer qcom,vbif-settings
+			for a detailed description of this binding.
+- qcom,mdp-settings :	Array with key-value pairs of constant MDP register
+			settings used to setup MDSS QoS for best performance.
+			The key used should be offset from "mdp_phys" register
+			defined in reg property.
+- qcom,mdss-smp-data:	Array of shared memory pool data for dynamic SMP. There
+			should be only two values in this property. The first
+			value corresponds to the number of smp blocks and the
+			second is the size of each block present in the mdss
+			hardware. This property is optional for MDP hardware
+			with fix pixel latency ram.
+- qcom,mdss-rot-block-size:	The size of a memory block (in pixels) to be used
+				by the rotator. If this property is not specified,
+				then a default value of 128 pixels would be used.
+- qcom,mdss-has-bwc: Boolean property to indicate the presence of bandwidth
+		      compression feature in the rotator.
+- qcom,mdss-has-non-scalar-rgb: Boolean property to indicate the presence of RGB
+				pipes which have no scaling support.
+- qcom,mdss-has-decimation: Boolean property to indicate the presence of
+			    decimation feature in fetch.
+- qcom,mdss-has-fixed-qos-arbiter-enabled: Boolean property to indicate the
+			    presence of rt/nrt feature. This feature enables
+			    increased performance by prioritizing the real time
+			    (rt) traffic over non real time (nrt) traffic to
+			    access the memory.
+- qcom,mdss-num-nrt-paths: Integer property represents the number of non-realtime
+			    paths in each Bus Scaling Usecase. This value depends on
+			    number of AXI ports are dedicated to non-realtime VBIF for
+			    particular chipset. This property is mandatory when
+			    "qcom,mdss-has-fixed-qos-arbiter-enabled" is enabled.
+			    These paths must be defined after rt-paths in
+			    "qcom,msm-bus,vectors-KBps" vector request.
+- qcom,mdss-has-source-split: Boolean property to indicate if source split
+			      feature is available or not.
+- qcom,mdss-has-rotator-downscale: Boolean property to indicate if rotator
+				   downscale feature is available or not.
+- qcom,mdss-rot-downscale-min: This integer value indicates the Minimum
+				downscale factor supported by rotator.
+- qcom,mdss-rot-downscale-max: This integer value indicates the Maximum
+				downscale factor supported by rotator.
+- qcom,mdss-ad-off:		Array of offset addresses for the available
+				Assertive Display (AD) blocks. These offsets
+				are calculated from the register "mdp_phys"
+				defined in reg property. The number of AD
+				offsets should be less than or equal to the
+				number of mixers driving interfaces defined in
+				property: qcom,mdss-mixer-intf-off. Assumes
+				that AD blocks are aligned with the mixer
+				offsets as well (i.e. the first mixer offset
+				corresponds to the same pathway as the first
+				AD offset).
+- qcom,mdss-has-wb-ad: Boolean property to indicate assertive display feature
+				support on write back framebuffer.
+- qcom,mdss-no-lut-read: 	Boolean property to indicate reading of LUT is
+				not supported.
+- qcom,mdss-no-hist-vote	Boolean property to indicate histogram reads
+				and histogram LUT writes do not need additional
+				bandwidth voting.
+- qcom,mdss-mdp-wfd-mode:	A string that specifies what is the mode of
+				   writeback wfd block.
+				"intf" = Writeback wfd block is
+				   connected to the interface mixer.
+				"shared" = Writeback block shared
+				   between wfd and rotator.
+				"dedicated" = Dedicated writeback
+				   block for wfd using writeback mixer.
+- qcom,mdss-smp-mb-per-pipe:	Maximum number of shared memory pool blocks
+				restricted for a source surface pipe. If this
+				property is not specified, no such restriction
+				would be applied.
+- qcom,mdss-highest-bank-bit: Property to indicate tile format as opposed to usual
+				linear format. The value tells the GPU highest memory
+				 bank bit used.
+- qcom,mdss-pipe-rgb-fixed-mmb: Array of indexes describing fixed Memory Macro
+				Blocks (MMBs) for rgb pipes. First value denotes
+				total numbers of MMBs per pipe while values, if
+				any, following first one denotes indexes of MMBs
+				to that RGB pipe.
+- qcom,mdss-pipe-vig-fixed-mmb: Array of indexes describing fixed Memory Macro
+				Blocks (MMBs) for vig pipes. First value denotes
+				total numbers of MMBs per pipe while values, if
+				any, following first one denotes indexes of MMBs
+				to that VIG pipe.
+- qcom,mdss-pipe-sw-reset-off: Property to indicate offset to the register which
+			       holds sw_reset bitmap for different MDSS
+			       components.
+- qcom,mdss-pipe-vig-sw-reset-map: Array of bit offsets for vig pipes within
+				   sw_reset register bitmap. Number of offsets
+				   defined should match the number of offsets
+				   defined in property: qcom,mdss-pipe-vig-off
+- qcom,mdss-pipe-rgb-sw-reset-map: Array of bit offsets for rgb pipes within
+				   sw_reset register bitmap. Number of offsets
+				   defined should match the number of offsets
+				   defined in property: qcom,mdss-pipe-rgb-off
+- qcom,mdss-pipe-dma-sw-reset-map: Array of bit offsets for dma pipes within
+				   sw_reset register bitmap. Number of offsets
+				   defined should match the number of offsets
+				   defined in property: qcom,mdss-pipe-dma-off
+- qcom,mdss-default-ot-wr-limit: This integer value indicates maximum number of pending
+				writes that can be allowed on each WR xin.
+				This value can be used to reduce the pending writes
+				limit and can be tuned to match performance
+				requirements depending upon system state.
+				Some platforms require a dynamic ot limiting in
+				some cases. Setting this default ot write limit
+				will enable this dynamic limiting for the write
+				operations in the platforms that require these
+				limits.
+- qcom,mdss-default-ot-rd-limit: This integer value indicates the default number of pending
+				reads that can be allowed on each RD xin.
+				Some platforms require a dynamic ot limiting in
+				some cases. Setting this default ot read limit
+				will enable this dynamic limiting for the read
+				operations in the platforms that require these
+				limits.
+- qcom,mdss-clk-levels:		This array indicates the mdp core clock level selection
+				array. Core clock is calculated for each frame and
+				hence depending upon calculated value, clock rate
+				will be rounded up to the next level according to
+				this table. Order of entries need to be ordered in
+				ascending order.
+- qcom,mdss-vbif-qos-rt-setting: This array is used to program vbif qos remapper register
+				 priority for real time clients.
+- qcom,mdss-vbif-qos-nrt-setting: This array is used to program vbif qos remapper register
+				  priority for non real time clients.
+- qcom,mdss-traffic-shaper-enabled: This boolean property enables traffic shaper functionality
+				    for MDSS rotator which spread out rotator bandwidth request
+				    so that rotator don't compete with other real time read
+				    clients.
+- qcom,mdss-dram-channels:	This represents the number of channels in the
+				Bus memory controller.
+- qcom,regs-dump-mdp:		This array represents the registers offsets that
+				will be dumped from the mdp when the debug logging
+				is enabled; each entry in the table is an start and
+				end offset from the MDP address "mdp_phys", the
+				format of each entry is as follows:
+				<start_offset end_offset>
+				Ex:
+				<0x01000 0x01404>
+				Will dump the MDP registers
+				from the address: "mdp_phys + 0x01000"
+				to the address: "mdp_phys + 0x01404"
+- qcom,regs-dump-names-mdp:	This array represents the tag that will be used
+				for each of the entries defined within regs-dump.
+				Note that each tag matches with one of the
+				regs-dump entries in the same order as they
+				are defined.
+- qcom,regs-dump-xin-id-mdp:	Array of VBIF clients ids (xins) corresponding
+				to mdp block. Xin id property is not valid for mdp
+				internal blocks like ctl, lm, dspp. It should set
+				to 0xff for such blocks.
+
+Fudge Factors:			Fudge factors are used to boost demand for
+				resources like bus bandswidth, clk rate etc. to
+				overcome system inefficiencies and avoid any
+				glitches. These fudge factors are expressed in
+				terms of numerator and denominator. First value
+				is numerator followed by denominator. They all
+				are optional but highly recommended.
+				Ex:
+				x = value to be fudged
+				a = numerator, default value is 1
+				b = denominator, default value is 1
+				FUDGE(x, a, b) = ((x * a) / b)
+- qcom,mdss-ib-factor:		This fudge factor is applied to calculated ib
+				values in default conditions.
+- qcom,mdss-ib-factor-overlap:	This fudge factor is applied to calculated ib
+				values when the overlap bandwidth is the
+				predominant value compared to prefill bandwidth
+				value.
+- qcom,mdss-clk-factor:		This fudge factor is applied to calculated mdp
+				clk rate in default conditions.
+
+- qcom,max-bandwidth-low-kbps:	This value indicates the max bandwidth in KB
+				that can be supported without underflow.
+				This is a low bandwidth threshold which should
+				be applied in most scenarios to be safe from
+				underflows when unable to satisfy bandwidth
+				requirements.
+- qcom,max-bandwidth-high-kbps:	This value indicates the max bandwidth in KB
+				that can be supported without underflow.
+				This is a high bandwidth threshold which can be
+				applied in scenarios where panel interface can
+				be more tolerant to memory latency such as
+				command mode panels.
+- qcom,max-bandwidth-per-pipe-kbps: A two dimensional array indicating the max
+				bandwidth in KB that a single pipe can support
+				without underflow for various usecases. The
+				first parameter indicates the usecase and the
+				second parameter gives the max bw allowed for
+				the usecase. Following are the enum values for
+				modes in different cases:
+				For default case, mode = 1
+				camera usecase, mode = 2
+				hflip usecase, mode = 4
+				vflip usecase, mode = 8
+				First parameter/mode value need to match enum,
+				mdss_mdp_max_bw_mode, present in
+				include/uapi/linux/msm_mdp.h.
+- qcom,max-bw-settings:         This two dimension array indicates the max bandwidth
+				in KB that has to be supported when particular
+				scenarios are involved such as camera, flip.
+				The first parameter indicate the
+				scenario/usecase and second parameter indicate
+				the maximum bandwidth for that usecase.
+				Following are the enum values for modes in different
+				cases:
+				For default case, mode = 1
+				camera usecase, mode = 2
+				hflip usecase, mode = 4
+				vflip usecase, mode = 8
+				First parameter/mode value need to match enum,
+				mdss_mdp_max_bw_mode, present in
+				include/uapi/linux/msm_mdp.h.
+
+- qcom,mdss-has-panic-ctrl: Boolean property to indicate if panic/robust signal
+				control feature is available or not.
+- qcom,mdss-en-svs-high: Boolean property to indicate if this target needs to
+				enable the svs high voltage level for CX rail.
+- qcom,mdss-pipe-vig-panic-ctrl-offsets: Array of panic/robust signal offsets
+				corresponding to the respective VIG pipes.
+				Number of signal offsets should match the
+				number of offsets defined in property:
+				qcom,mdss-pipe-vig-off
+- qcom,mdss-pipe-rgb-panic-ctrl-offsets: Array of panic/robust signal offsets
+				corresponding to the respective RGB pipes.
+				Number of signal offsets should match the
+				number of offsets defined in property:
+				qcom,mdss-pipe-rgb-off
+- qcom,mdss-pipe-dma-panic-ctrl-offsets: Array of panic/robust signal offsets
+				corresponding to the respective DMA pipes.
+				Number of signal offsets should match the
+				number of offsets defined in property:
+				qcom,mdss-pipe-dma-off
+- qcom,mdss-per-pipe-panic-luts: Array to configure the panic/robust luts for
+				each rt and nrt clients. This property is
+				for the MDPv1.7 and above, which configures
+				the panic independently on each client.
+				Each element of the array corresponds to:
+				First element - panic for linear formats
+				Second element - panic for tile formats
+				Third element - robust for linear formats
+				Fourth element - robust for tile formats
+- qcom,mdss-has-pingpong-split:	Boolean property to indicate if destination
+				split feature is available or not in the target.
+- qcom,mdss-slave-pingpong-off:	Offset address for the extra TE block which needs
+				to be programmed when pingpong split feature is enabled.
+				Offset is calculated from the "mdp_phys"
+				register value. Mandatory when qcom,mdss-has-pingpong-split
+				is enabled.
+- qcom,mdss-ppb-ctl-off:	Array of offset addresses of ping pong buffer control registers.
+				The offsets are calculated from the "mdp_phys" base address
+				specified. The number of offsets should match the
+				number of ping pong buffers available in the hardware.
+				Mandatory when qcom,mdss-has-pingpong-split is enabled.
+- qcom,mdss-ppb-cfg-off:	Array of offset addresses of ping pong buffer config registers.
+				The offsets are calculated from the "mdp_phys" base address
+				specified. The number of offsets should match the
+				number of ping pong buffers available in the hardware.
+				Mandatory when qcom,mdss-has-pingpong-split is enabled.
+- qcom,mdss-cdm-off:            Array of offset addresses for the available
+				chroma down modules that can convert RGB data
+				to YUV before sending it to the interface
+				block. These offsets will be calculated from
+				register "mdp_phys" define in reg property. The
+				number of cdm offsets should reflect the number
+				of cdm blocks present in hardware.
+- qcom,mdss-dsc-off:            Array of offset addresses for the available
+				display stream compression module block.
+				These offsets will be calculated from
+				register "mdp_phys" define in reg property. The
+				number of dsc offsets should reflect the number
+				of dsc blocks present in hardware.
+- qcom,max-pipe-width:		This value specifies the maximum MDP SSPP width
+				the device supports. If not specified, a default value
+				of 2048 will be applied.
+- qcom,mdss-reg-bus:		Property to provide Bus scaling for register access for
+				MDP and DSI Blocks.
+
+- qcom,mdss-rot-reg-bus:	Property to provide Bus scaling for register access for
+				Rotator Block.
+
+- qcom,mdss-hw-rt:	        Optional Property to request min vote on the bus.
+			        Few Low tier targets expect min vote on the bus during SMMU
+			        and TZ operations. use this handle to request the vote needed.
+
+Optional subnodes:
+- mdss_fb:		Child nodes representing the frame buffer virtual devices.
+
+Subnode properties:
+- compatible :		Must be "qcom,mdss-fb"
+- cell-index :		Index representing frame buffer
+- qcom,mdss-mixer-swap: A boolean property that indicates if the mixer muxes
+			 need to be swapped based on the target panel.
+			 By default the property is not defined.
+- qcom,memblock-reserve: Specifies the memory location and the size reserved
+			 for the framebuffer used to display the splash screen.
+			 This property is required whenever the continuous splash
+			 screen feature is enabled for the corresponding
+			 framebuffer device. It should be used for only 32bit
+			 kernel.
+- qcom,cont-splash-memory: Specifies the memory block region reserved for
+			 continuous splash screen feature. This property should be
+			 defined for corresponding framebuffer device if
+			 "qcom,memblock-reserve" is not defined when continuous
+			 splash screen feature is enabled.
+- linux,contiguous-region: Phandle to the continuous memory region reserved for
+			 frame-buffer or continuous splash screen. Size of this
+			 region is dependent on the display panel resolution and
+			 buffering scheme for frame-buffer node. Currently driver
+			 uses double buffering.
+
+			 Example: Width = 1920, Height = 1080, BytesPerPixel = 4,
+			 Number of frame-buffers reserved = 2.
+			 Size = 1920*1080*4*2 = ROUND_1MB(15.8MB) = 16MB.
+- qcom,mdss-fb-splash-logo-enabled:    The boolean entry enables the framebuffer
+					driver to display the splash logo image.
+					It is independent of continuous splash
+					screen feature and has no relation with
+					qcom,cont-splash-enabled entry present in
+					panel configuration.
+- qcom,mdss-idle-power-collapse-enabled: Boolean property that enables support
+					for mdss power collapse in idle
+					screen use cases with smart panels.
+- qcom,boot-indication-enabled: Boolean property that enables turning on the blue
+				LED for notifying that the device is in boot
+				process.
+
+- qcom,mdss-pp-offets: A node that lists the offsets of post processing blocks
+		       from base module.
+		       -- qcom,mdss-mdss-sspp-igc-lut-off: This 32 bit value provides the
+		              offset to the IGC lut rams from mdp_phys base.
+		       -- qcom,mdss-sspp-vig-pcc-off: This 32 bit value provides the offset
+		              to PCC block from the VIG pipe base address.
+		       -- qcom,mdss-sspp-rgb-pcc-off: This 32 bit value provides the offset
+		              to PCC block from the RGB pipe base address.
+		       -- qcom,mdss-sspp-dma-pcc-off: This 32 bit value provides the offset
+		              to PCC block from the DMA pipe base address.
+		       -- qcom,mdss-dspp-pcc-off: This 32 bit value provides the offset
+		              to PCC block from the DSPP pipe base address.
+		       -- qcom,mdss-lm-pgc-off: This 32 bit value provides the offset
+		               to PGC block from the layer mixer base address.
+		       -- qcom,mdss-dspp-gamut-off: This 32 bit value provides the offset
+		               to gamut block from DSPP base address.
+		       -- qcom,mdss-dspp-pgc-off: This 32 bit value provides the offset to
+		               PGC block from the DSPP base address.
+
+- qcom,mdss-scaler-offsets: A node that lists the offsets of scaler blocks
+		            from base module.
+		            -- qcom,mdss-vig-scaler-off: This 32 bit value provides the
+		                   offset to vig scaler from vig pipe base.
+		            -- qcom,mdss-vig-scaler-lut-off: This 32 bit value provides the
+		                   offset to vig scaler lut from vig pipe base.
+		            -- qcom,mdss-has-dest-scaler: Boolean property to indicate the
+		                   presence of destination scaler block.
+		            -- qcom,mdss-dest-block-off: This 32 bit value provides the
+		                   offset from mdp base to destination scaler block.
+		            -- qcom,mdss-dest-scaler-off: Array containing offsets of
+		                   destination scalar modules from the scaler block.
+		            -- qcom,mdss-dest-scaler-lut-off: Array containing offsets of destination
+		                   scaler lut tables from scalar block.
+
+- qcom,mdss-has-separate-rotator: Boolean property to indicate support of
+				  indpendent rotator. Indpendent rotator has
+				  separate DMA pipe working in block mode only.
+
+- smmu_mdp_***:	Child nodes representing the mdss smmu virtual devices.
+		Mandatory smmu v2 and not required for smmu v1.
+
+Subnode properties:
+- compatible :		Compatible name used in smmu v2.
+			smmu_v2 names should be:
+			"qcom,smmu_mdp_unsec" 	- smmu context bank device for
+						unsecure mdp domain.
+			"qcom,smmu_rot_unsec"	- smmu context bank device for
+						unsecure rotation domain.
+			"qcom,smmu_mdp_sec" 	- smmu context bank device for
+						secure mdp domain.
+			"qcom,smmu_rot_sec"	- smmu context bank device for
+						secure rotation domain.
+			"qcom,smmu_kms_unsec" 	- smmu context bank device for
+						unsecure mdp domain for KMS driver.
+			"qcom,smmu_nrt_unsec"	- smmu context bank device for
+						unsecure rotation domain for KMS driver.
+			"qcom,smmu_kms_sec" 	- smmu context bank device for
+						secure mdp domain for KMS driver.
+			"qcom,smmu_nrt_sec"	- smmu context bank device for
+						secure rotation domain for KMS driver.
+			"qcom,smmu_arm_mdp_unsec"	- arm smmu context bank device for
+						unsecure mdp domain.
+			"qcom,smmu_arm_mdp_sec"	- arm smmu context bank device for
+						secure mdp domain.
+- gdsc-mmagic-mdss-supply: Phandle for mmagic mdss supply regulator device node.
+- reg :			offset and length of the register set for the device.
+- reg-names :		names to refer to register sets related to this device
+- clocks:		List of Phandles for clock device nodes
+			needed by the device.
+- clock-names:		List of clock names needed by the device.
+
+Subnode properties:
+Required properties:
+- compatible:		Must be "qcom,mdss_wb"
+- qcom,mdss_pan_res:	Array containing two elements, width and height which
+			specifies size of writeback buffer.
+- qcom,mdss_pan_bpp:	Specifies bits per pixel for writeback buffer.
+- qcom,mdss-fb-map:	Specifies the handle for frame buffer.
+
+Example:
+	mdss_mdp: qcom,mdss_mdp@fd900000 {
+		compatible = "qcom,mdss_mdp";
+		reg = <0xfd900000 0x22100>,
+			<0xfd924000 0x1000>,
+			<0xfd925000 0x1000>;
+		reg-names = "mdp_phys", "vbif_phys", "vbif_nrt_phys";
+		interrupts = <0 72 0>;
+		interrupt-controller;
+		#interrupt-cells = <1>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		vdd-supply = <&gdsc_mdss>;
+		batfet-supply = <&pm8941_chg_batif>;
+		vdd-cx-supply = <&pm8841_s2_corner>;
+
+		/* Bus Scale Settings */
+		qcom,msm-bus,name = "mdss_mdp";
+		qcom,msm-bus,num-cases = <3>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,mdss-dram-channels = <2>;
+		qcom,mdss-num-nrt-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<22 512 0 0>, <23 512 0 0>,
+			<22 512 0 6400000>, <23 512 0 6400000>,
+			<22 512 0 6400000>, <23 512 0 6400000>;
+
+		/* Fudge factors */
+		qcom,mdss-ab-factor = <2 1>;		/* 2 times    */
+		qcom,mdss-ib-factor = <3 2>;		/* 1.5 times  */
+		qcom,mdss-high-ib-factor = <2 1>;	/* 2 times    */
+		qcom,mdss-clk-factor = <5 4>;		/* 1.25 times */
+
+		/* Clock levels */
+		qcom,mdss-clk-levels = <92310000, 177780000, 200000000>;
+
+		/* VBIF QoS remapper settings*/
+		qcom,mdss-vbif-qos-rt-setting = <2 2 2 2>;
+		qcom,mdss-vbif-qos-nrt-setting = <1 1 1 1>;
+
+		qcom,max-bandwidth-low-kbps = <2300000>;
+		qcom,max-bandwidth-high-kbps = <3000000>;
+		qcom,max-bandwidth-per-pipe-kbps = <4 2100000>,
+						   <8 1800000>;
+		qcom,max-bw-settings = <1 2300000>,
+				       <2 1700000>,
+				       <4 2300000>,
+				       <8 2000000>;
+
+		qcom,max-mixer-width = <2048>;
+		qcom,max-pipe-width = <2048>;
+		qcom,max-clk-rate = <320000000>;
+		qcom,vbif-settings = <0x0004 0x00000001>,
+				     <0x00D8 0x00000707>;
+		qcom,vbif-nrt-settings = <0x0004 0x00000001>,
+				     <0x00D8 0x00000707>;
+		qcom,mdp-settings = <0x02E0 0x000000AA>,
+				    <0x02E4 0x00000055>;
+		qcom,mdss-pipe-vig-off = <0x00001200 0x00001600
+					  0x00001A00>;
+		qcom,mdss-pipe-rgb-off = <0x00001E00 0x00002200
+					  0x00002600>;
+		qcom,mdss-pipe-dma-off = <0x00002A00 0x00002E00>;
+		qcom,mdss-pipe-cursor-off = <0x00035000 0x00037000>;
+		qcom,mdss-dsc-off = <0x00081000 0x00081400>;
+		qcom,mdss-pipe-vig-fetch-id = <1 4 7>;
+		qcom,mdss-pipe-rgb-fetch-id = <16 17 18>;
+		qcom,mdss-pipe-dma-fetch-id = <10 13>;
+		qcom,mdss-pipe-rgb-fixed-mmb =	<2 0 1>,
+						<2 2 3>,
+						<2 4 5>,
+						<2 6 7>;
+		qcom,mdss-pipe-vig-fixed-mmb =	<1 8>,
+						<1 9>,
+						<1 10>,
+						<1 11>;
+		qcom,mdss-smp-data = <22 4096>;
+		qcom,mdss-rot-block-size = <64>;
+		qcom,mdss-rotator-ot-limit = <2>;
+		qcom,mdss-smp-mb-per-pipe = <2>;
+		qcom,mdss-pref-prim-intf = "dsi";
+		qcom,mdss-has-non-scalar-rgb;
+		qcom,mdss-has-bwc;
+		qcom,mdss-has-decimation;
+		qcom,mdss-has-fixed-qos-arbiter-enabled;
+		qcom,mdss-has-source-split;
+		qcom,mdss-wfd-mode = "intf";
+		qcom,mdss-no-lut-read;
+		qcom,mdss-no-hist-vote;
+		qcom,mdss-traffic-shaper-enabled;
+		qcom,mdss-has-rotator-downscale;
+		qcom,mdss-rot-downscale-min = <2>;
+		qcom,mdss-rot-downscale-max = <16>;
+
+		qcom,mdss-has-pingpong-split;
+		qcom,mdss-pipe-vig-xin-id = <0 4 8>;
+		qcom,mdss-pipe-rgb-xin-id = <1 5 9>;
+		qcom,mdss-pipe-dma-xin-id = <2 10>;
+		qcom,mdss-pipe-cursor-xin-id = <7 7>;
+
+		qcom,mdss-pipe-vig-clk-ctrl-offsets = <0x3AC 0 0>,
+						      <0x3B4 0 0>,
+						      <0x3BC 0 0>,
+						      <0x3C4 0 0>;
+
+		qcom,mdss-pipe-rgb-clk-ctrl-offsets = <0x3AC 4 8>,
+						      <0x3B4 4 8>,
+						      <0x3BC 4 8>,
+						      <0x3C4 4 8>;
+
+		qcom,mdss-pipe-dma-clk-ctrl-offsets = <0x3AC 8 12>,
+						      <0x3B4 8 12>;
+
+		qcom,mdss-per-pipe-panic-luts = <0x000f>,
+						<0xffff>,
+						<0xfffc>,
+						<0xff00>;
+
+		qcom,mdss-has-panic-ctrl;
+		qcom,mdss-pipe-vig-panic-ctrl-offsets = <0 1 2 3>;
+		qcom,mdss-pipe-rgb-panic-ctrl-offsets = <4 5 6 7>;
+		qcom,mdss-pipe-dma-panic-ctrl-offsets = <8 9>;
+
+		qcom,mdss-pipe-sw-reset-off = <0x0128>;
+		qcom,mdss-pipe-vig-sw-reset-map = <5 6 7 8>;
+		qcom,mdss-pipe-rgb-sw-reset-map = <9 10 11 12>;
+		qcom,mdss-pipe-dma-sw-reset-map = <13 14>;
+
+		qcom,mdss-ctl-off = <0x00000600 0x00000700 0x00000800
+				     0x00000900 0x0000A00>;
+		qcom,mdss-mixer-intf-off = <0x00003200 0x00003600
+					    0x00003A00>;
+		qcom,mdss-mixer-wb-off = <0x00003E00 0x00004200>;
+		qcom,mdss-dspp-off = <0x00004600 0x00004A00 0x00004E00>;
+		qcom,mdss-pingpong-off = <0x00012D00 0x00012E00 0x00012F00>;
+		qcom,mdss-wb-off = <0x00011100 0x00013100 0x00015100
+				    0x00017100 0x00019100>;
+		qcom,mdss-intf-off = <0x00021100 0x00021300
+					   0x00021500 0x00021700>;
+		qcom,mdss-cdm-off = <0x0007A200>;
+		qcom,mdss-ppb-ctl-off = <0x0000420>;
+		qcom,mdss-ppb-cfg-off = <0x0000424>;
+		qcom,mdss-slave-pingpong-off = <0x00073000>
+
+		/* buffer parameters to calculate prefill bandwidth */
+		qcom,mdss-prefill-outstanding-buffer-bytes = <1024>;
+		qcom,mdss-prefill-y-buffer-bytes = <4096>;
+		qcom,mdss-prefill-scaler-buffer-lines-bilinear = <2>;
+		qcom,mdss-prefill-scaler-buffer-lines-caf = <4>;
+		qcom,mdss-prefill-post-scaler-buffer-pixels = <2048>;
+		qcom,mdss-prefill-pingpong-buffer-pixels = <5120>;
+		qcom,mdss-prefill-fbc-lines = <2>;
+		qcom,mdss-idle-power-collapse-enabled;
+
+		qcom,regs-dump-xin-id-mdp = <0xff 0xff 0xff 0xff 0x0 0x0>;
+		mdss_fb0: qcom,mdss_fb_primary {
+			cell-index = <0>;
+			compatible = "qcom,mdss-fb";
+			qcom,mdss-mixer-swap;
+			linux,contiguous-region = <&fb_mem>;
+			qcom,mdss-fb-splash-logo-enabled:
+			qcom,cont-splash-memory {
+				linux,contiguous-region = <&cont_splash_mem>;
+			};
+		};
+
+		qcom,mdss-pp-offsets {
+			qcom,mdss-sspp-mdss-igc-lut-off = <0x3000>;
+			qcom,mdss-sspp-vig-pcc-off = <0x1580>;
+			qcom,mdss-sspp-rgb-pcc-off = <0x180>;
+			qcom,mdss-sspp-dma-pcc-off = <0x180>;
+			qcom,mdss-lm-pgc-off = <0x3C0>;
+			qcom,mdss-dspp-gamut-off = <0x1600>;
+			qcom,mdss-dspp-pcc-off = <0x1700>;
+			qcom,mdss-dspp-pgc-off = <0x17C0>;
+		};
+
+		qcom,mdss-scaler-offsets {
+			qcom,mdss-vig-scaler-off = <0xA00>;
+			qcom,mdss-vig-scaler-lut-off = <0xB00>;
+			qcom,mdss-has-dest-scaler;
+			qcom,mdss-dest-block-off = <0x00061000>;
+			qcom,mdss-dest-scaler-off = <0x800 0x1000>;
+			qcom,mdss-dest-scaler-lut-off = <0x900 0x1100>;
+		};
+
+		qcom,mdss-reg-bus {
+		    /* Reg Bus Scale Settings */
+		    qcom,msm-bus,name = "mdss_reg";
+		    qcom,msm-bus,num-cases = <4>;
+		    qcom,msm-bus,num-paths = <1>;
+		    qcom,msm-bus,active-only;
+		    qcom,msm-bus,vectors-KBps =
+			    <1 590 0 0>,
+			    <1 590 0 76800>,
+			    <1 590 0 160000>,
+			    <1 590 0 320000>;
+		};
+
+		qcom,mdss-hw-rt-bus {
+		    /* hw-rt Bus Scale Settings */
+		    qcom,msm-bus,name = "mdss_hw_rt";
+		    qcom,msm-bus,num-cases = <2>;
+		    qcom,msm-bus,num-paths = <1>;
+		    qcom,msm-bus,vectors-KBps =
+			    <22 512 0 0>,
+			    <22 512 0 1000>;
+		};
+
+		smmu_mdp_sec: qcom,smmu_mdp_sec_cb {
+			compatible = "qcom,smmu_mdp_sec";
+			iommus = <&mdp_smmu 1>;
+			reg = <0xd09000 0x000d00>,
+			reg-names = "mmu_cb";
+			gdsc-mmagic-mdss-supply = <&gdsc_mmagic_mdss>;
+			clocks = <&clock_mmss clk_smmu_mdp_ahb_clk>,
+				<&clock_mmss clk_smmu_mdp_axi_clk>;
+			clock-names = "dummy_clk", "dummy_clk";
+		};
+
+		qcom,mdss_wb_panel {
+			compatible = "qcom,mdss_wb";
+			qcom,mdss_pan_res = <1280 720>;
+			qcom,mdss_pan_bpp = <24>;
+			qcom,mdss-fb-map = <&mdss_fb1>;
+		};
+
+		qcom,mdss-rot-reg-bus {
+		    /* Reg Bus Scale Settings */
+		    qcom,msm-bus,name = "mdss_rot_reg";
+		    qcom,msm-bus,num-cases = <2>;
+		    qcom,msm-bus,num-paths = <1>;
+		    qcom,msm-bus,active-only;
+		    qcom,msm-bus,vectors-KBps =
+			    <1 590 0 0>,
+			    <1 590 0 76800>;
+		};
+	};
+
diff --git a/Documentation/devicetree/bindings/fb/mdss-pll.txt b/Documentation/devicetree/bindings/fb/mdss-pll.txt
index 59fa6a0..d746a52 100644
--- a/Documentation/devicetree/bindings/fb/mdss-pll.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-pll.txt
@@ -1,22 +1,21 @@
-Qualcomm Technologies MDSS pll for DSI/EDP/HDMI
+Qualcomm MDSS pll for DSI/EDP/HDMI
 
 mdss-pll is a pll controller device which supports pll devices that
-are compatible with MIPI display serial interface specification,
+are compatiable with MIPI display serial interface specification,
 HDMI and edp.
 
 Required properties:
-- compatible:		Compatible name used in the driver
-                        "qcom,mdss_dsi_pll_8916", "qcom,mdss_dsi_pll_8939",
-                        "qcom,mdss_dsi_pll_8974", "qcom,mdss_dsi_pll_8994",
-                        "qcom,mdss_dsi_pll_8994", "qcom,mdss_dsi_pll_8909",
-                        "qcom,mdss_hdmi_pll", "qcom,mdss_hdmi_pll_8994",
-                        "qcom,mdss_dsi_pll_8992", "qcom,mdss_hdmi_pll_8992",
-                        "qcom,mdss_dsi_pll_8996", "qcom,mdss_hdmi_pll_8996",
-                        "qcom,mdss_hdmi_pll_8996_v2", "qcom,mdss_dsi_pll_8996_v2",
-                        "qcom,mdss_hdmi_pll_8996_v3", "qcom,mdss_hdmi_pll_8996_v3_1p8",
-                        "qcom,mdss_edp_pll_8996_v3", "qcom,mdss_edp_pll_8996_v3_1p8",
-                        "qcom,mdss_dsi_pll_10nm", "qcom,mdss_dp_pll_8998",
-                        "qcom,mdss_hdmi_pll_8998", "qcom,mdss_dp_pll_10nm".
+- compatible:		Compatible name used in the driver. Should be one of:
+			"qcom,mdss_dsi_pll_8916", "qcom,mdss_dsi_pll_8939",
+			"qcom,mdss_dsi_pll_8974", "qcom,mdss_dsi_pll_8994",
+			"qcom,mdss_dsi_pll_8994", "qcom,mdss_dsi_pll_8909",
+			"qcom,mdss_hdmi_pll", "qcom,mdss_hdmi_pll_8994",
+			"qcom,mdss_dsi_pll_8992", "qcom,mdss_hdmi_pll_8992",
+			"qcom,mdss_dsi_pll_8996", "qcom,mdss_hdmi_pll_8996",
+			"qcom,mdss_hdmi_pll_8996_v2", "qcom,mdss_dsi_pll_8996_v2",
+			"qcom,mdss_hdmi_pll_8996_v3", "qcom,mdss_dsi_pll_8952",
+			"qcom,mdss_dsi_pll_8937", "qcom,mdss_hdmi_pll_8996_v3_1p8",
+			"qcom,mdss_dsi_pll_8953"
 - cell-index:		Specifies the controller used
 - reg:			offset and length of the register set for the device.
 - reg-names :		names to refer to register sets related to this device
diff --git a/Documentation/devicetree/bindings/fb/mdss-qpic-panel.txt b/Documentation/devicetree/bindings/fb/mdss-qpic-panel.txt
new file mode 100644
index 0000000..8c11a43
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-qpic-panel.txt
@@ -0,0 +1,25 @@
+Qualcomm Technologies, Inc. mdss-qpic-panel
+
+mdss-qpic-panel is a panel device which can be driven by qpic.
+
+Required properties:
+- compatible:				Must be "qcom,mdss-qpic-panel"
+- qcom,mdss-pan-res:		A two dimensional array that specifies the panel
+							resolution.
+- qcom,mdss-pan-bpp:		Specifies the panel bits per pixel.
+- qcom,refresh_rate:		Panel refresh rate
+
+Optional properties:
+- label:					A string used as a descriptive name of the panel
+
+
+Example:
+/ {
+	qcom,mdss_lcdc_ili9341_qvga {
+		compatible = "qcom,mdss-qpic-panel";
+		label = "ili qvga lcdc panel";
+		qcom,mdss-pan-res = <240 320>;
+		qcom,mdss-pan-bpp = <18>;
+		qcom,refresh_rate = <60>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/fb/mdss-qpic.txt b/Documentation/devicetree/bindings/fb/mdss-qpic.txt
new file mode 100644
index 0000000..16d5b35
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-qpic.txt
@@ -0,0 +1,49 @@
+Qualcomm Technolgies, Inc.  mdss-qpic
+
+mdss-qpic is a qpic controller device which supports dma transmission to MIPI
+and LCDC panel.
+
+Required properties:
+- compatible:			must be "qcom,mdss_qpic"
+- reg:					offset and length of the register set for the device.
+- reg-names :			names to refer to register sets related to this device
+- interrupts:			IRQ line
+- vdd-supply:			Phandle for vdd regulator device node.
+- avdd-supply:			Phandle for avdd regulator device node.
+- qcom,cs-gpio:			Phandle for cs gpio device node.
+- qcom,te-gpio:			Phandle for te gpio device node.
+- qcom,rst-gpio:		Phandle for rst gpio device node.
+- qcom,ad8-gpio:		Phandle for ad8 gpio device node.
+- qcom,bl-gpio:			Phandle for backlight gpio device node.
+
+Optional properties:
+- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
+below Bus Scaling properties:
+	- qcom,msm-bus,name
+	- qcom,msm-bus,num-cases
+	- qcom,msm-bus,num-paths
+	- qcom,msm-bus,vectors-KBps
+
+Example:
+	qcom,msm_qpic@f9ac0000 {
+		compatible = "qcom,mdss_qpic";
+		reg = <0xf9ac0000 0x24000>;
+		reg-names = "qpic_base";
+		interrupts = <0 251 0>;
+
+		qcom,msm-bus,name = "mdss_qpic";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+
+		qcom,msm-bus,vectors-KBps =
+			<91 512 0 0>,
+			<91 512 400000 800000>;
+
+		vdd-supply = <&pm8019_l11>;
+		avdd-supply = <&pm8019_l14>;
+		qcom,cs-gpio = <&msmgpio 21 0>;
+		qcom,te-gpio = <&msmgpio 22 0>;
+		qcom,rst-gpio = <&msmgpio 23 0>;
+		qcom,ad8-gpio = <&msmgpio 20 0>;
+		qcom,bl-gpio = <&msmgpio 84 0>;
+	};
diff --git a/Documentation/devicetree/bindings/fb/mdss-rotator.txt b/Documentation/devicetree/bindings/fb/mdss-rotator.txt
new file mode 100644
index 0000000..5e077ac
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-rotator.txt
@@ -0,0 +1,78 @@
+QTI MDSS Rotator
+
+MDSS rotator is a rotator driver, which manages the rotator hw
+block inside the Mobile Display Subsystem.
+
+Required properties
+- compatible :			Must be "qcom,mdss-rotator".
+- qcom,mdss-wb-count:		The number of writeback block
+				in the hardware
+- <name>-supply:		Phandle for <name> regulator device node.
+
+Bus Scaling Data:
+- qcom,msm-bus,name:		String property describing MDSS client.
+- qcom,msm-bus,num-cases:	This is the the number of Bus Scaling use cases
+				defined in the vectors property. This must be
+				set to <3> for MDSS driver where use-case 0 is
+				used to take off MDSS BW votes from the system.
+				And use-case 1 & 2 are used in ping-pong fashion
+				to generate run-time BW requests.
+- qcom,msm-bus,num-paths:	This represents the number of paths in each
+				Bus Scaling Usecase. This value depends on
+				how many number of AXI master ports are
+				dedicated to MDSS for particular chipset.
+- qcom,msm-bus,vectors-KBps:	* A series of 4 cell properties, with a format
+				of (src, dst, ab, ib) which is defined at
+				Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+				* Current values of src & dst are defined at
+				include/linux/msm-bus-board.h
+				src values allowed for MDSS are:
+					22 = MSM_BUS_MASTER_MDP_PORT0
+					23 = MSM_BUS_MASTER_MDP_PORT1
+					25 = MSM_BUS_MASTER_ROTATOR
+				dst values allowed for MDSS are:
+					512 = MSM_BUS_SLAVE_EBI_CH0
+				ab: Represents aggregated bandwidth.
+				ib: Represents instantaneous bandwidth.
+				* Total number of 4 cell properties will be
+				(number of use-cases * number of paths).
+				* These values will be overridden by the driver
+				based on the run-time requirements. So initial
+				ab and ib values defined here are random and
+				bare no logic except for the use-case 0 where ab
+				and ib values needs to be 0.
+				* Define realtime vector properties followed by
+				non-realtime vector properties.
+
+Optional properties
+- qcom,mdss-has-reg-bus:	Boolean property to indicate
+				if rotator needs to vote for register bus. This
+				property is needed starting 8996
+- qcom,mdss-has-ubwc:		Boolean property to indicate
+				if the hw supports universal
+				bandwidth compression (ubwc)
+- qcom,mdss-has-downscale	Boolean property to indicate
+				if the hw supports downscale
+
+Example:
+	mdss_rotator: qcom,mdss_rotator {
+		compatible = "qcom,mdss_rotator";
+		qcom,mdss-has-downscale;
+		qcom,mdss-has-ubwc;
+		qcom,mdss-wb-count = <2>;
+
+		qcom,mdss-has-reg-bus;
+		/* Bus Scale Settings */
+		qcom,msm-bus,name = "mdss_rotator";
+		qcom,msm-bus,num-cases = <3>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,mdss-num-nrt-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<25 512 0 0>,
+			<25 512 0 6400000>,
+			<25 512 0 6400000>;
+
+		vdd-supply = <&gdsc_mdss>;
+		gdsc-mmagic-mdss-supply = <&gdsc_mmagic_mdss>;
+		qcom,supply-names = "vdd", "gdsc-mmagic-mdss";
+	};
diff --git a/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt b/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
new file mode 100644
index 0000000..7f95ed4
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/msm-hdmi-tx.txt
@@ -0,0 +1,116 @@
+* Qualcomm HDMI Tx
+
+Required properties:
+- cell-index: hdmi tx controller index
+- compatible: must be "qcom,hdmi-tx"
+- reg: offset and length of the register regions(s) for the device.
+- reg-names: a list of strings that map in order to the list of regs.
+
+- hpd-gdsc-supply: phandle to the mdss gdsc regulator device tree node.
+- hpd-5v-supply: phandle to the 5V regulator device tree node.
+- core-vdda-supply: phandle to the HDMI vdda regulator device tree node.
+- core-vcc-supply: phandle to the HDMI vcc regulator device tree node.
+- qcom,supply-names: a list of strings that map in order
+  to the list of supplies.
+- qcom,min-voltage-level: specifies minimum voltage (uV) level
+  of supply(ies) mentioned above.
+- qcom,max-voltage-level: specifies maximum voltage (uV) level
+  of supply(ies) mentioned above.
+- qcom,enable-load: specifies the current (uA) that will be
+  drawn from the enabled supply(ies) mentioned above.
+- qcom,disable-load: specifies the current (uA) that will be
+  drawn from the disabled supply(ies) mentioned above.
+
+- qcom,hdmi-tx-cec: gpio for Consumer Electronics Control (cec) line.
+- qcom,hdmi-tx-ddc-clk: gpio for Display Data Channel (ddc) clock line.
+- qcom,hdmi-tx-ddc-data: gpio for ddc data line.
+
+Optional properties:
+- hpd-5v-en-supply: phandle to the 5V boost enable regulator device tree node.
+- qcom,hdmi-tx-mux-sel: gpio required to toggle HDMI output between
+  docking station, type A, and liquid device, type D, ports. Required
+  property for liquid devices.
+- qcom,hdmi-tx-ddc-mux-sel: gpio for ddc mux select.
+- qcom,hdmi-tx-mux-en: gpio required to enable mux for HDMI output
+  on liquid devices. Required property for liquid devices.
+- qcom,hdmi-tx-mux-lpm: gpio required for hdmi mux configuration
+  selection on liquid devices. Required property for liquid devices.
+- qcom,conditional-power-on: Enables HPD conditionally on MTP targets.
+  Required property for MTP devices which are reworked to expose HDMI port.
+- qcom,hdmi-tx-hpd: gpio required for HDMI hot-plug detect. Required on
+  platforms where companion chip is not used.
+- pinctrl-names: a list of strings that map to the pinctrl states.
+- pinctrl-0: list of phandles, each pointing at a pin configuration node.
+...
+- pinctrl-n: list of phandles, each pointing at a pin configuration node.
+- qcom,conti-splash-enabled: Enables the hdmi continuous splash screen feature.
+  HDMI interface will remain powered on from LK to kernel with continuous
+  display of bootup logo.
+- qcom,pluggable: boolean to enable hotplug feature.
+- qcom,display-id: A string indicates the display ID for the controller.
+		   The possible values are:
+		   - "primary"
+		   - "secondary"
+		   - "tertiary"
+
+[Optional child nodes]: These nodes are for devices which are
+dependent on HDMI Tx controller. If HDMI Tx controller is disabled then
+these devices will be disabled as well. Ex. HDMI Audio Codec device.
+
+- qcom,msm-hdmi-audio-rx: Node for HDMI audio codec.
+Required properties:
+- compatible : "msm-hdmi-audio-codec-rx";
+
+Example:
+	mdss_hdmi_tx: qcom,hdmi_tx@fd922100 {
+		cell-index = <0>;
+		compatible = "qcom,hdmi-tx";
+		reg =	<0xfd922100 0x35C>,
+			<0xfd922500 0x7C>,
+			<0xfc4b8000 0x60F0>,
+			<0xfe2a0000 0xFFF>;
+		reg-names = "core_physical", "phy_physical", "qfprom_physical",
+			"hdcp_physical";
+
+		hpd-gdsc-supply = <&gdsc_mdss>;
+		hpd-5v-supply = <&pm8941_mvs2>;
+		hpd-5v-en-supply = <&hdmi_vreg>;
+		core-vdda-supply = <&pm8941_l12>;
+		core-vcc-supply = <&pm8941_s3>;
+		qcom,supply-names = "hpd-gdsc", "hpd-5v", "hpd-5v-en", "core-vdda", "core-vcc";
+		qcom,min-voltage-level = <0 0 0 1800000 1800000>;
+		qcom,max-voltage-level = <0 0 0 1800000 1800000>;
+		qcom,enable-load = <0 0 0 1800000 0>;
+		qcom,disable-load = <0 0 0 0 0>;
+
+		qcom,hdmi-tx-ddc-mux-sel = <&pma8084_gpios 6 0>;
+		qcom,hdmi-tx-cec = <&msmgpio 31 0>;
+		qcom,hdmi-tx-ddc-clk = <&msmgpio 32 0>;
+		qcom,hdmi-tx-ddc-data = <&msmgpio 33 0>;
+		qcom,hdmi-tx-hpd = <&msmgpio 34 0>;
+
+		qcom,hdmi-tx-mux-lpm = <&msmgpio 27 0>;
+		qcom,hdmi-tx-mux-en = <&msmgpio 83 0>;
+		qcom,hdmi-tx-mux-sel = <&msmgpio 85 0>;
+
+		qcom,conditional-power-on;
+		qcom,pluggable;
+		qcom,display-id = "secondary";
+
+		qcom,msm-hdmi-audio-rx {
+			compatible = "qcom,msm-hdmi-audio-codec-rx";
+		};
+		pinctrl-names = "hdmi_hpd_active", "hdmi_ddc_active",
+					"hdmi_cec_active", "hdmi_active",
+					"hdmi_sleep";
+		pinctrl-0 = <&mdss_hdmi_hpd_active &mdss_hdmi_ddc_suspend
+							&mdss_hdmi_cec_suspend>;
+		pinctrl-1 = <&mdss_hdmi_hpd_active &mdss_hdmi_ddc_active
+							&mdss_hdmi_cec_suspend>;
+		pinctrl-2 = <&mdss_hdmi_hpd_active &mdss_hdmi_cec_active
+							&mdss_hdmi_ddc_suspend>;
+		pinctrl-3 = <&mdss_hdmi_hpd_active &mdss_hdmi_ddc_active
+							&mdss_hdmi_cec_active>;
+		pinctrl-4 = <&mdss_hdmi_hpd_suspend &mdss_hdmi_ddc_suspend
+							&mdss_hdmi_cec_suspend>;
+	};
diff --git a/Documentation/devicetree/bindings/fb/mxsfb.txt b/Documentation/devicetree/bindings/fb/mxsfb.txt
new file mode 100644
index 0000000..96ec517
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mxsfb.txt
@@ -0,0 +1,49 @@
+* Freescale MXS LCD Interface (LCDIF)
+
+Required properties:
+- compatible: Should be "fsl,<chip>-lcdif".  Supported chips include
+  imx23 and imx28.
+- reg: Address and length of the register set for lcdif
+- interrupts: Should contain lcdif interrupts
+- display : phandle to display node (see below for details)
+
+* display node
+
+Required properties:
+- bits-per-pixel : <16> for RGB565, <32> for RGB888/666.
+- bus-width : number of data lines.  Could be <8>, <16>, <18> or <24>.
+
+Required sub-node:
+- display-timings : Refer to binding doc display-timing.txt for details.
+
+Examples:
+
+lcdif@80030000 {
+	compatible = "fsl,imx28-lcdif";
+	reg = <0x80030000 2000>;
+	interrupts = <38 86>;
+
+	display: display {
+		bits-per-pixel = <32>;
+		bus-width = <24>;
+
+		display-timings {
+			native-mode = <&timing0>;
+			timing0: timing0 {
+				clock-frequency = <33500000>;
+				hactive = <800>;
+				vactive = <480>;
+				hfront-porch = <164>;
+				hback-porch = <89>;
+				hsync-len = <10>;
+				vback-porch = <23>;
+				vfront-porch = <10>;
+				vsync-len = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/fb/sm501fb.txt b/Documentation/devicetree/bindings/fb/sm501fb.txt
new file mode 100644
index 0000000..9d9f009
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/sm501fb.txt
@@ -0,0 +1,34 @@
+* SM SM501
+
+The SM SM501 is a LCD controller, with proper hardware, it can also
+drive DVI monitors.
+
+Required properties:
+- compatible : should be "smi,sm501".
+- reg : contain two entries:
+    - First entry: System Configuration register
+    - Second entry: IO space (Display Controller register)
+- interrupts : SMI interrupt to the cpu should be described here.
+- interrupt-parent : the phandle for the interrupt controller that
+  services interrupts for this device.
+
+Optional properties:
+- mode : select a video mode:
+    <xres>x<yres>[-<bpp>][@<refresh>]
+- edid : verbatim EDID data block describing attached display.
+  Data from the detailed timing descriptor will be used to
+  program the display controller.
+- little-endian: available on big endian systems, to
+  set different foreign endian.
+- big-endian: available on little endian systems, to
+  set different foreign endian.
+
+Example for MPC5200:
+	display@1,0 {
+		compatible = "smi,sm501";
+		reg = <1 0x00000000 0x00800000
+		       1 0x03e00000 0x00200000>;
+		interrupts = <1 1 3>;
+		mode = "640x480-32@60";
+		edid = [edid-data];
+	};
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
index bdba526..7cfc44b 100644
--- a/Documentation/devicetree/bindings/firmware/qcom,scm.txt
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
@@ -13,6 +13,7 @@
  * "qcom,scm" for later processors (MSM8916, APQ8084, MSM8974, etc)
  * "android,firmware" for firmware image
  * "android,vbmeta" for setting system properties for verified boot.
+ * "android,system" for system partition properties.
 - clocks: One to three clocks may be required based on compatible.
  * Only core clock required for "qcom,scm-apq8064", "qcom,scm-msm8660", and "qcom,scm-msm8960"
  * Core, iface, and bus clocks required for "qcom,scm"
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index 69174ca..55cd383 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -124,6 +124,12 @@
 				mask   - mask for the relevant bits in the efuse register.
 				shift  - number of bits to right shift to get the speed bin
 				value.
+- qcom,gpu-disable-fuse:	GPU disable fuse
+				<offset mask shift>
+				offset - offset of the efuse register from the base.
+				mask   - mask for the relevant bits in the efuse register.
+				shift  - number of bits to right shift to get the disable_gpu
+				fuse bit value.
 - qcom,highest-bank-bit:
 				Specify the bit of the highest DDR bank. This
 				is programmed into protected registers and also
@@ -191,6 +197,9 @@
 - qcom,gpu-quirk-hfi-use-reg:
 				Use registers to replace DCVS HFI message to avoid GMU failure
 				to access system memory during IFPC
+- qcom,gpu-quirk-limit-uche-gbif-rw:
+				Limit number of read and write transactions from UCHE block to
+				GBIF to avoid possible deadlock between GBIF, SMMU and MEMNOC.
 
 KGSL Memory Pools:
 - qcom,gpu-mempools:		Container for sets of GPU mempools.Multiple sets
@@ -209,6 +218,26 @@
 - qcom,mempool-allocate:	Allocate memory from the system memory when the
 				reserved pool exhausted.
 
+SOC Hardware revisions:
+- qcom,soc-hw-revisions:
+		Container of sets of SOC hardware revisions specified by
+		qcom,soc-hw-revision.
+Properties:
+- compatible:
+		Must be qcom,soc-hw-revisions.
+
+- qcom,soc-hw-revision:
+		Defines a SOC hardware revision.
+
+Properties:
+- reg:
+		Identifier for the hardware revision - must match the value read
+		from the hardware.
+- qcom,chipid:
+		GPU Chip ID to be used for this hardware revision.
+- qcom,gpu-quirk-*:
+		GPU quirks applicable for this hardware revision.
+
 GPU LLC slice info:
 - cache-slice-names:		List of LLC cache slices for GPU transactions
 				and pagetable walk.
@@ -284,6 +313,28 @@
 		coresight-child-list = <&funnel_in0>;
 		coresight-child-ports = <5>;
 
+		qcom,soc-hw-revisions {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			compatible="qcom,soc-hw-revisions";
+
+			qcom,soc-hw-revision@0 {
+				reg = <0>;
+
+				qcom,chipid = <0x06010500>;
+				qcom,gpu-quirk-hfi-use-reg;
+				qcom,gpu-quirk-limit-uche-gbif-rw;
+			};
+
+			qcom,soc-hw-revision@1 {
+				reg = <1>;
+
+				qcom,chipid = <0x06010501>;
+				qcom,gpu-quirk-hfi-use-reg;
+			};
+		};
+
 		/* GPU Mempools */
 		qcom,gpu-mempools {
 			#address-cells= <1>;
diff --git a/Documentation/devicetree/bindings/hwmon/jc42.txt b/Documentation/devicetree/bindings/hwmon/jc42.txt
index 07a2504..f569db5 100644
--- a/Documentation/devicetree/bindings/hwmon/jc42.txt
+++ b/Documentation/devicetree/bindings/hwmon/jc42.txt
@@ -34,6 +34,10 @@
 
 - reg: I2C address
 
+Optional properties:
+- smbus-timeout-disable: When set, the smbus timeout function will be disabled.
+			 This is not supported on all chips.
+
 Example:
 
 temp-sensor@1a {
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
index b0c5b57..d9d3470 100644
--- a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
@@ -51,6 +51,8 @@
   device (see pinctrl binding [0]).
 
 [0]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+- #thermal-sensor-cells : To register ADC sensors with of_thermal. Should be 1.
+  See ./thermal.txt for a description.
 
 Client required property:
 - qcom,<consumer name>-vadc : The phandle to the corresponding vadc device.
diff --git a/Documentation/devicetree/bindings/input/qpnp-power-on.txt b/Documentation/devicetree/bindings/input/qpnp-power-on.txt
index c2550e6..33d0236 100644
--- a/Documentation/devicetree/bindings/input/qpnp-power-on.txt
+++ b/Documentation/devicetree/bindings/input/qpnp-power-on.txt
@@ -84,7 +84,34 @@
 				case.
 - qcom,kpdpwr-sw-debounce	Boolean property to enable the debounce logic
 				on the KPDPWR_N rising edge.
-
+- qcom,resin-pon-reset		Boolean property which indicates that resin
+				needs to be configured during reset in addition
+				to the primary PON device that is configured
+				for system reset through qcom,system-reset
+				property.
+- qcom,resin-warm-reset-type 	Poweroff type required to be configured on
+				RESIN reset control register when the system
+				goes for warm reset. If this property is not
+				specified, then the default type, warm reset
+				will be configured to RESIN reset control
+				register. This property is effective only if
+				qcom,resin-pon-reset is defined.
+- qcom,resin-hard-reset-type 	Same description as qcom,resin-warm-reset-type
+				but this applies for the system hard reset case.
+- qcom,resin-shutdown-type   	Same description as qcom,resin-warm-reset-type
+				but this applies for the system shutdown case.
+- qcom,resin-shutdown-disable	Boolean property to disable RESIN POFF
+				trigger during system shutdown case.
+				This property is effective only if
+				qcom,resin-pon-reset is defined.
+- qcom,resin-hard-reset-disable	Boolean property to disable RESIN POFF
+	                        trigger during system hard reset case.
+				This property is effective only if
+				qcom,resin-pon-reset is defined.
+- qcom,ps-hold-shutdown-disable	Boolean property to disable PS_HOLD POFF
+				trigger during system shutdown case.
+- qcom,ps-hold-hard-reset-disable	Boolean property to disable PS_HOLD
+				POFF trigger during system hard reset case.
 
 All the below properties are in the sub-node section (properties of the child
 node).
diff --git a/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsx_i2c.txt b/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsx_i2c.txt
new file mode 100644
index 0000000..131942d
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsx_i2c.txt
@@ -0,0 +1,62 @@
+Synaptics DSXV27 touch controller
+
+Please add this description here: The Synaptics Touch controller is connected to the
+host processor via I2C. The controller generates interrupts when the user touches
+the panel. The host controller is expected to read the touch coordinates over I2C and
+pass the coordinates to the rest of the system.
+
+Required properties:
+
+ - compatible		           : should be "synaptics,dsx-i2c".
+ - reg			               : i2c slave address of the device.
+ - interrupt-parent	           : parent of interrupt.
+ - synaptics,irq-gpio	       : irq gpio.
+ - synaptics,reset-gpio	       : reset gpio.
+ - vdd_supply			   : digital voltage power supply needed to power device.
+ - avdd_supply			   : analog voltage power supply needed to power device.
+ - synaptics,pwr-reg-name	   : power reg name of digital voltage.
+ - synaptics,bus-reg-name	   : bus reg name of analog voltage.
+
+Optional property:
+ - synaptics,ub-i2c-addr       : addr of ub-i2c.
+ - synaptics,irq-on-state      : status of irq gpio.
+ - synaptics,cap-button-codes  : virtual key code mappings to be used.
+ - synaptics,vir-button-codes  : virtual key code and the response region on panel.
+ - synaptics,x-flip		       : modify orientation of the x axis.
+ - synaptics,y-flip		       : modify orientation of the y axis.
+ - synaptics,reset-delay-ms	   : reset delay for controller (ms), default 100.
+ - synaptics,power-delay-ms	   : power delay for controller (ms), default 100.
+ - synaptics,reset-active-ms	   : reset active time for controller (ms), default 20.
+ - synaptics,max-y-for-2d	   : maximal y value of the panel.
+ - clock-names			: Clock names used for secure touch. They are: "iface_clk", "core_clk"
+ - clocks			: Defined if 'clock-names' DT property is defined. These clocks
+				  are associated with the underlying I2C bus.
+
+Example:
+	i2c@78b7000 {
+		status = "ok";
+		synaptics@4b {
+			compatible = "synaptics,dsx-i2c";
+			reg = <0x4b>;
+			interrupt-parent = <&tlmm>;
+			interrupts = <65 0x2008>;
+			vdd_supply = <&pmtitanium_l17>;
+			avdd_supply = <&pmtitanium_l6>;
+			synaptics,pwr-reg-name = "vdd";
+			synaptics,bus-reg-name = "avdd";
+			synaptics,ub-i2c-addr = <0x2c>;
+			synaptics,irq-gpio = <&tlmm 65 0x2008>;
+			synaptics,reset-gpio = <&tlmm 99 0x2008>;
+			synaptics,irq-on-state = <0>;
+			synaptics,power-delay-ms = <200>;
+			synaptics,reset-delay-ms = <200>;
+			synaptics,reset-active-ms = <20>;
+			synaptics,max-y-for-2d = <1919>; /* remove if no virtual buttons */
+			synaptics,cap-button-codes = <139 172 158>;
+			synaptics,vir-button-codes = <139 180 2000 320 160 172 540 2000 320 160 158 900 2000 320 160>;
+			/* Underlying clocks used by secure touch */
+			clock-names = "iface_clk", "core_clk";
+			clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+				<&clock_gcc clk_gcc_blsp1_qup3_i2c_apps_clk>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt b/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt
new file mode 100644
index 0000000..7dece8e
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt
@@ -0,0 +1,50 @@
+Synaptics DSXV26 touch controller
+
+Please add this description here: The Synaptics Touch controller is connected to the
+host processor via I2C. The controller generates interrupts when the user touches
+the panel. The host controller is expected to read the touch coordinates over I2C and
+pass the coordinates to the rest of the system.
+
+Required properties:
+
+ - compatible		           : should be "synaptics,dsx-i2c".
+ - reg			               : i2c slave address of the device.
+ - interrupt-parent	           : parent of interrupt.
+ - synaptics,irq-gpio	       : irq gpio.
+ - synaptics,irq-flags         : irq flags.
+
+Optional property:
+ - vdd_ana-supply			   : digital voltage power supply needed to power device.
+ - vcc_i2c-supply			   : analog voltage power supply needed to power device.
+ - synaptics,pwr-reg-name	   : power reg name of digital voltage.
+ - synaptics,bus-reg-name	   : bus reg name of analog voltage.
+ - synaptics,irq-on-state      : status of irq gpio.
+ - synaptics,cap-button-codes  : virtual key code mappings to be used.
+ - synaptics,vir-button-codes  : virtual key code and the response region on panel.
+ - synaptics,x-flip		       : modify orientation of the x axis.
+ - synaptics,y-flip		       : modify orientation of the y axis.
+ - synaptics,reset-delay-ms	   : reset delay for controller (ms), default 100.
+ - synaptics,max-y-for-2d	   : maximal y value of the panel.
+
+Example:
+	i2c@78b7000 {
+		status = "ok";
+		synaptics@4b {
+			compatible = "synaptics,dsx-i2c";
+			reg = <0x4b>;
+			interrupt-parent = <&tlmm>;
+			interrupts = <65 0x2008>;
+			vdd_ana-supply = <&pmtitanium_l17>;
+			vcc_i2c-supply = <&pmtitanium_l6>;
+			synaptics,pwr-reg-name = "vdd_ana";
+			synaptics,bus-reg-name = "vcc_i2c";
+			synaptics,irq-gpio = <&tlmm 65 0x2008>;
+			synaptics,irq-on-state = <0>;
+			synaptics,irq-flags = <0x2008>; /* IRQF_ONESHOT | IRQF_TRIGGER_LOW */
+			synaptics,power-delay-ms = <200>;
+			synaptics,reset-delay-ms = <200>;
+			synaptics,max-y-for-2d = <1919>; /* remove if no virtual buttons */
+			synaptics,cap-button-codes = <139 172 158>;
+			synaptics,vir-button-codes = <139 180 2000 320 160 172 540 2000 320 160 158 900 2000 320 160>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt b/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt
index 07667a4..8d6fad0 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt
@@ -31,6 +31,7 @@
 	* "qcom,pdc-sdm845": For sdm845 pin data
 	* "qcom,pdc-sdm845-v2": For sdm845 v2 pin data
 	* "qcom,pdc-sdm670": For sdm670 pin data
+	* "qcom,pdc-sdxpoorwills": For sdxpoorwills pin data
 
 - reg:
 	Usage: required
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-vibrator-ldo.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-vibrator-ldo.txt
new file mode 100644
index 0000000..2865019
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-vibrator-ldo.txt
@@ -0,0 +1,50 @@
+Qualcomm Technologies, Inc. Vibrator-LDO
+
+QPNP (Qualcomm Technologies, Inc. Plug N Play) Vibrator-LDO is a peripheral
+on some QTI PMICs. It can be interfaced with the host processor via SPMI.
+
+Vibrator-LDO peripheral supports Eccentric Rotation Mass (ERM) vibrator.
+
+Properties:
+
+- compatible
+	Usage:      required
+	Value type: <string>
+	Definition: "qcom,qpnp-vibrator-ldo".
+
+- reg
+	Usage:      required
+	Value type: <u32>
+	Definition: Base address of vibrator-ldo peripheral.
+
+- qcom,vib-ldo-volt-uv
+	Usage:      required
+	Value type: <u32>
+	Definition: The optimal voltage requirement of the vibrator motor for
+		    a normal vibration. Value is specified in microvolts.
+
+- qcom,disable-overdrive
+	Usage:      optional
+	Value type: <empty>
+	Definition: Do not apply overdrive voltage.
+
+- qcom,vib-overdrive-volt-uv
+	Usage:      optional and not required if qcom,disable-overdrive present
+	Value type: <u32>
+	Definition: The voltage in microvolts used as overdrive factor for
+		    improving motor reactivity at the start of vibration.
+		    If this property not specified, a default value of
+		    2 times the value specified in qcom,vib-ldo-volt-uv
+		    property is used.
+
+=======
+Example
+=======
+
+pmi632_vib: qcom,vibrator@5700 {
+	compatible = "qcom,qpnp-vibrator-ldo";
+	reg = <0x5700 0x100>;
+	qcom,vib-ldo-volt-uv = <1504000>;
+	qcom,disable-overdrive;
+	qcom,vib-overdrive-volt-uv = <3544000>;
+};
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
index 42e97f7..c7268ef 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
@@ -80,6 +80,13 @@
 			  or disabled.
 - qcom,auto-calibration-enable : A boolean property which enables auto-calibration
 				 of the WLED sink configuration.
+- qcom,wled-brightness-map	: Array of brightness map codes of size 256.
+				  These codes will be mapped to the brightness
+				  level requested in the scale of 0-4095. Code
+				  entry is of 16 bit size.
+- qcom,wled-stepper-en	: A boolean property to specify if stepper algorithm
+			  needs to be enabled. This needs the brightness map
+			  table to be specified.
 
 Optional properties if 'qcom,disp-type-amoled' is mentioned in DT:
 - qcom,loop-comp-res-kohm	: control to select the compensation resistor in kohm. default is 320.
@@ -123,4 +130,5 @@
 		qcom,en-phase-stag;
 		qcom,led-strings-list = [00 01 02 03];
 		qcom,en-ext-pfet-sc-pro;
+		qcom,wled-brightness-map = /bits/ 16  <0 . . 4095>;
 	};
diff --git a/Documentation/devicetree/bindings/leds/leds-qti-tri-led.txt b/Documentation/devicetree/bindings/leds/leds-qti-tri-led.txt
new file mode 100644
index 0000000..c088d42
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-qti-tri-led.txt
@@ -0,0 +1,60 @@
+Qualcomm Technologies, Inc. TRI_LED driver specific bindings
+
+This binding document describes the properties of TRI_LED module in
+Qualcomm Technologies, Inc. PMIC chips.
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: Must be "qcom,tri-led".
+
+- reg:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: Register base of the TRI_LED module and length.
+
+Properties for child nodes:
+- pwms:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: The PWM device (phandle) used for controlling LED.
+
+- led-sources:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: see Documentation/devicetree/bindings/leds/common.txt;
+		Device current output identifiers are: 0 - LED1_EN,
+		1 - LED2_EN, 2 - LED3_EN.
+
+- label:
+	Usage: optional
+	Value type: <string>
+	Definition: see Documentation/devicetree/bindings/leds/common.txt;
+
+- linux,default-trigger:
+	Usage: optional
+	Value_type: <string>
+	Definition: see Documentation/devicetree/bindings/leds/common.txt;
+
+Example:
+
+	pmi8998_rgb: tri-led@d000{
+		compatible = "qcom,tri-led";
+		reg = <0xd000 0x100>;
+
+		red {
+			label = "red";
+			pwms = <&pmi8998_lpg 4 1000000>;
+			led-sources = <0>;
+		};
+		green {
+			label = "green";
+			pwms = <&pmi8998_lpg 3 1000000>;
+			led-sources = <1>;
+		};
+		blue {
+			label = "blue";
+			pwms = <&pmi8998_lpg 2 1000000>;
+			led-sources = <2>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
index db34047..5a92bf6 100644
--- a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -108,6 +108,10 @@
 				<rd_lut, wr_lut> indicating the safe lut
 				settings for the inline rotator sspp and
 				writeback client.
+- qcom,mdss-rot-qos-cpu-mask: A u32 value indicating desired PM QoS CPU
+				affine mask.
+- qcom,mdss-rot-qos-cpu-dma-latency: A u32 value indicating desired PM QoS CPU DMA
+				latency in usec.
 - qcom,mdss-rot-mode:		This is integer value indicates operation mode
 				of the rotator device
 - qcom,mdss-sbuf-headroom:	This integer value indicates stream buffer headroom in lines.
@@ -188,6 +192,9 @@
 		qcom,mdss-rot-danger-lut = <0x0 0x0>;
 		qcom,mdss-rot-safe-lut = <0x0000ffff 0x0>;
 
+		qcom,mdss-rot-qos-cpu-mask = <0xf>;
+		qcom,mdss-rot-qos-cpu-dma-latency = <75>;
+
 		qcom,mdss-inline-rot-qos-lut = <0x0 0x0 0x00112233 0x44556677>;
 		qcom,mdss-inline-rot-danger-lut = <0x0 0x0000ffff>;
 		qcom,mdss-inline-rot-safe-lut = <0x0 0x0000ff00>;
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 001f74f3..6c61ada 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -121,6 +121,10 @@
 
 	- qcom,wakeup-on-idle: if configured, the mmcqd thread will call
 	  set_wake_up_idle(), thereby voting for it to be called on idle CPUs.
+	- nvmem-cells: specifies the handle to represent the SoC revision.
+	  usually it is defined by qfprom device node.
+	- nvmem-cell-names: specifies the given nvmem cell name as defined in
+	  qfprom node.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt b/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt
index 8e56180..eff3d82 100644
--- a/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt
+++ b/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt
@@ -11,6 +11,16 @@
 - interrupts: Interrupt number used by this controller
 - io-macro-info: Internal io-macro-info
 
+Optional:
+- qcom,msm-bus,name: String representing the client-name
+- qcom,msm-bus,num-cases: Total number of usecases
+- qcom,msm-bus,num-paths: Total number of master-slave pairs
+- qcom,msm-bus,vectors-KBps: Arrays of unsigned integers representing:
+                             master-id, slave-id, arbitrated bandwidth
+                             in KBps, instantaneous bandwidth in KBps
+qcom,bus-vector-names: specifies string IDs for the corresponding bus vectors
+                       in the same order as qcom,msm-bus,vectors-KBps property.
+
 Internal io-macro-info:
 - io-macro-bypass-mode: <0 or 1> internal or external delay configuration
 - io-interface: <rgmii/mii/rmii> PHY interface used
@@ -35,6 +45,14 @@
 				"tx-ch4-intr", "rx-ch0-intr",
 				"rx-ch1-intr", "rx-ch2-intr",
 				"rx-ch3-intr";
+			qcom,msm-bus,name = "emac";
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <2>;
+			qcom,msm-bus,vectors-KBps =
+				<98 512 1250 0>, <1 781 0 40000>,  /* 10Mbps vote */
+				<98 512 12500 0>, <1 781 0 40000>,  /* 100Mbps vote */
+				<98 512 125000 0>, <1 781 0 40000>; /* 1000Mbps vote */
+			qcom,bus-vector-names = "10", "100", "1000";
 			io-macro-info {
 				io-macro-bypass-mode = <0>;
 				io-interface = "rgmii";
diff --git a/Documentation/devicetree/bindings/pci/msm_pcie.txt b/Documentation/devicetree/bindings/pci/msm_pcie.txt
index 6af2bac..dfe5852 100644
--- a/Documentation/devicetree/bindings/pci/msm_pcie.txt
+++ b/Documentation/devicetree/bindings/pci/msm_pcie.txt
@@ -37,6 +37,7 @@
 		     MSIs, virtual IRQ's (INT#), link state notifications.
   - perst-gpio: PERST GPIO specified by PCIe spec.
   - wake-gpio: WAKE GPIO specified by PCIe spec.
+  - phy-status-offset: Offset from PCIe PHY base to check if PCIe PHY is up.
   - <supply-name>-supply: phandle to the regulator device tree node.
     Refer to the schematics for the corresponding voltage regulators.
     vreg-1.8-supply: phandle to the analog supply for the PCIe controller.
@@ -274,6 +275,7 @@
 		qcom,switch-latency = <100>;
 		qcom,wr-halt-size = <0xa>; /* 1KB */
 		qcom,slv-addr-space-size = <0x1000000>; /* 16MB */
+		qcom,phy-status-offset = <0x800>;
 		qcom,cpl-timeout = <0x2>;
 
 		iommus = <&anoc0_smmu>;
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index abbc560..793a965 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -95,6 +95,7 @@
 - qcom,qdsp6v56-1-10: Boolean- Present if the qdsp version is v56 1.10
 - qcom,override-acc-1: Override the default ACC settings with this value if present.
 - qcom,minidump-id: Unique id for each subsystem
+- qcom,reset-clk: Enable clock after MSS restart
 
 One child node to represent the MBA image may be specified, when the MBA image
 needs to be loaded in a specifically carved out memory region.
diff --git a/Documentation/devicetree/bindings/platform/msm/gpio-usbdetect.txt b/Documentation/devicetree/bindings/platform/msm/gpio-usbdetect.txt
new file mode 100644
index 0000000..5bb85a4
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/gpio-usbdetect.txt
@@ -0,0 +1,25 @@
+GPIO USB VBUS Detection
+
+Discrete USB VBUS detection circuitry can be connected to the AP or PMICs.
+Such circuits can be used to detect the when a USB cable is connected to
+an upstream port such as a standard host or a wall charger by detecting
+the presence of VBUS voltage. The GPIO can be configured to trigger an
+interrupt, and allow the software driver to in turn notify the USB
+subsytem using the power_supply framework.
+
+Required Properties:
+ - compatible: must be "qcom,gpio-usbdetect"
+ - qcom,vbus-det-gpio: GPIO from which VBUS detection can be read from.
+ - interrupts: an interrupt triggered by the output of the detection circuit
+ - interrupt-names: must be "vbus_det_irq"
+
+Optional Properties:
+ - vin-supply: phandle to a regulator that powers this circuit, if needed
+
+Example:
+
+	usb_detect {
+		compatible = "qcom,gpio-usbdetect";
+		qcom,vbus-det-gpio = <&pm8084 2 0>;
+                vin-supply = <&vbus_det_reg>;
+	};
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
index f50fd88..75996a5 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
@@ -104,6 +104,13 @@
 		    this property is not specified, then the default value used
 		    will be 75mA.
 
+- qcom,fg-cutoff-current
+	Usage:      optional
+	Value type: <u32>
+	Definition: Minimum Battery current (in mA) used for cutoff SOC
+		    estimate. If this property is not specified, then a default
+		    value of 500 mA will be applied.
+
 - qcom,fg-delta-soc-thr
 	Usage:      optional
 	Value type: <u32>
diff --git a/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt b/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt
new file mode 100644
index 0000000..3174ccb
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-qti-lpg.txt
@@ -0,0 +1,36 @@
+Qualcomm Technologies, Inc. LPG driver specific bindings
+
+This binding document describes the properties of LPG (Light Pulse Generator)
+device module in Qualcomm Technologies, Inc. PMIC chips.
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: Must be "qcom,pwm-lpg".
+
+- reg:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: Register base and length for LPG modules. The length
+		      varies based on the number of channels available in
+		      the PMIC chips.
+
+- reg-names:
+	Usage: required
+	Value type: <string>
+	Definition: The name of the register defined in the reg property.
+		      It must be "lpg-base".
+
+- #pwm-cells:
+	Usage: required
+	Value type: <u32>
+	Definition: See Documentation/devicetree/bindings/pwm/pwm.txt;
+
+Example:
+
+	pmi8998_lpg: lpg@b100 {
+		compatible = "qcom,pwm-lpg";
+		reg = <0xb100 0x600>;
+		reg-names = "lpg-base";
+		#pwm-cells = <2>;
+	};
diff --git a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
index 0c5f696..e64599c 100644
--- a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
+++ b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
@@ -13,7 +13,8 @@
 Optional properties:
 - qcom,fastrpc-glink:	Flag to use glink instead of smd for IPC
 - qcom,rpc-latency-us:	FastRPC QoS latency vote
-- qcom,adsp-remoteheap-vmid:  FastRPC remote heap VMID number
+- qcom,adsp-remoteheap-vmid:  FastRPC remote heap VMID list
+- qcom,fastrpc-adsp-audio-pdr:  Flag to enable ADSP Audio PDR
 
 Optional subnodes:
 - qcom,msm_fastrpc_compute_cb :	Child nodes representing the compute context
@@ -29,7 +30,7 @@
 		compatible = "qcom,msm-fastrpc-adsp";
 		qcom,fastrpc-glink;
 		qcom,rpc-latency-us = <2343>;
-		qcom,adsp-remoteheap-vmid = <37>;
+		qcom,adsp-remoteheap-vmid = <22 37>;
 
 		qcom,msm_fastrpc_compute_cb_1 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
@@ -73,9 +74,11 @@
 Required properties:
 - compatible : Must be "qcom,msm-adsprpc-mem-region"
 - memory-region : CMA region which is owned by this device
+- restrict-access : Blocking vote for hyp_assign_phys function call
 
 Example:
        qcom,adsprpc-mem {
                compatible = "qcom,msm-adsprpc-mem-region";
                memory-region = <&adsp_mem>;
+               restrict-access;
        };
diff --git a/Documentation/devicetree/bindings/regulator/cpr-regulator.txt b/Documentation/devicetree/bindings/regulator/cpr-regulator.txt
new file mode 100644
index 0000000..1c4dfbf
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/cpr-regulator.txt
@@ -0,0 +1,978 @@
+QTI CPR (Core Power Reduction) Regulator
+
+CPR regulator device is for QTI RBCPR (RapidBridge CPR) on
+	application processor core. It takes voltage corner level
+	as input and converts it to actual voltage based on the
+	suggestions from factory production process. When CPR is
+	enabled for application processer core, it will suggest
+	scaling the voltage up or down for best performance and
+	power of the core. The scaling based on factory production
+	process is called PVS (Process Voltage Scaling) with efuse
+	bits to indicate what bin (and voltage range) a chip is in.
+
+Required properties:
+- compatible:			Must be "qcom,cpr-regulator"
+- reg:				Register addresses for RBCPR, RBCPR clock
+				select, PVS and CPR eFuse address
+- reg-names:			Register names. Must be "rbcpr" and "efuse_addr".
+				"rbcpr_clk" is optional.
+- regulator-name:		A string used to describe the regulator
+- interrupts:			Interrupt line from RBCPR to interrupt controller.
+- qcom,cpr-fuse-corners:	Number of fuse corners present.  Many other properties
+				are sized based upon this value.
+- regulator-min-microvolt:	Minimum corner value which should be 1 to
+				represent the lowest supported corner.
+- regulator-max-microvolt:	Maximum corner value which should be equal to
+				qcom,cpr-fuse-corners if consumers request fuse
+				corners or the length of qcom,cpr-corner-map if
+				consumers request virtual corners.
+- qcom,cpr-voltage-ceiling:	Array of ceiling voltages in microvolts for fuse
+				corners ordered from lowest voltage corner to highest
+				voltage corner.  This property must be of length
+				defined by qcom,cpr-fuse-corners.
+- qcom,cpr-voltage-floor:	Array of floor voltages in microvolts for fuse
+				corners ordered from lowest voltage corner to highest
+				voltage corner.  This property must be of length
+				defined by qcom,cpr-fuse-corners.
+- vdd-apc-supply:		Regulator to supply VDD APC power
+- qcom,vdd-apc-step-up-limit:	Limit of vdd-apc-supply steps for scaling up.
+- qcom,vdd-apc-step-down-limit:	Limit of vdd-apc-supply steps for scaling down.
+- qcom,cpr-ref-clk:		The reference clock in kHz.
+- qcom,cpr-timer-delay:		The delay in microseconds for the timer interval.
+- qcom,cpr-timer-cons-up:	Consecutive number of timer interval (qcom,cpr-timer-delay)
+				occurred before issuing UP interrupt.
+- qcom,cpr-timer-cons-down:	Consecutive number of timer interval (qcom,cpr-timer-delay)
+				occurred before issuing DOWN interrupt.
+- qcom,cpr-irq-line:		Internal interrupt route signal of RBCPR, one of 0, 1 or 2.
+- qcom,cpr-step-quotient:	Defines the number of CPR quotient (i.e. Ring Oscillator(RO)
+				count) per vdd-apc-supply output voltage step.  A single
+				integer value may be specified which is to be used for all
+				RO's.  Alternatively, 8 integer values may be specified which
+				define the step quotients for RO0 to RO7 in order.
+- qcom,cpr-up-threshold:	The threshold for CPR to issue interrupt when
+				error_steps is greater than it when stepping up.
+- qcom,cpr-down-threshold:	The threshold for CPR to issue interrupt when
+				error_steps is greater than it when stepping down.
+- qcom,cpr-idle-clocks:		Idle clock cycles RO can be in.
+- qcom,cpr-gcnt-time:		The time for gate count in microseconds.
+- qcom,cpr-apc-volt-step:	The voltage in microvolt per CPR step, such as 5000uV.
+- qcom,cpr-fuse-row:		Array of row number of CPR fuse and method to read that row. It should have
+				index and value like this:
+				 [0] => the fuse row number
+				 [1] => fuse reading method, 0 for direct reading or 1 for SCM reading
+- qcom,cpr-fuse-target-quot:	Array of bit positions in the primary CPR fuse row defined
+				by qcom,cpr-fuse-row for the target quotients of each
+				fuse corner.  Each bit position corresponds to the LSB
+				of the quotient parameter.  The elements in the array
+				are ordered from lowest voltage corner to highest voltage
+				corner.  This property must be of length defined by
+				qcom,cpr-fuse-corners.
+- qcom,cpr-fuse-ro-sel:		Array of bit positions in the primary CPR fuse row defined
+				by qcom,cpr-fuse-row for the ring oscillator selection for each
+				fuse corner.  Each bit position corresponds to the LSB
+				of the RO select parameter.  The elements in the array
+				are ordered from lowest voltage corner to highest voltage
+				corner.  This property must be of length defined by
+				qcom,cpr-fuse-corners.
+
+Optional properties:
+- vdd-mx-supply:		Regulator to supply memory power as dependency
+				of VDD APC.
+- qcom,vdd-mx-vmax:		The maximum voltage in uV for vdd-mx-supply. This
+				is required when vdd-mx-supply is present.
+- qcom,vdd-mx-vmin-method:	The method to determine the minimum voltage for
+				vdd-mx-supply, which can be one of following
+				choices compared with VDD APC:
+				  0 => equal to the voltage(vmin) of VDD APC
+				  1 => equal to PVS corner ceiling voltage
+				  2 => equal to slow speed corner ceiling
+				  3 => equal to qcom,vdd-mx-vmax
+				  4 => equal to VDD_APC fuse corner mapped vdd-mx voltage
+				  5 => equal to VDD_APC virtual corner mapped vdd-mx voltage
+				This is required when vdd-mx-supply is present.
+- qcom,vdd-mx-corner-map:	Array of integers which defines the mapping from VDD_APC
+				voltage corners to vdd-mx-supply voltages.
+				Each element is a voltage to request from vdd-mx for the
+				corresponding fuse corner or virtual corner. The elements
+				in the array are ordered from lowest voltage corner
+				to highest voltage corner.  The length of this property
+				depends on the value of qcom,vdd-mx-vmin-method property.
+				When qcom,vdd-mx-vmin-method property has a value of 4, the length
+				of this property must be equal to the value defined by qcom,cpr-fuse-corners.
+				When qcom,vdd-mx-vmin-method property has a value of 5, the length of
+				this property must be equal to the number of elements in the qcom,cpr-corner-map
+				property.
+- qcom,pvs-voltage-table: 	Array of N-tuples in which each tuple specifies the
+				initial voltage in microvolts of the PVS bin for each
+				fuse voltage corner.  The location or 0-based index
+				of a tuple in the list corresponds to the PVS bin number.
+				Each tuple must be of length defined by qcom,cpr-fuse-corners.
+				A given cpr-regulator device must have either
+				qcom,pvs-voltage-table specified or
+				qcom,cpr-fuse-init-voltage (and its associated properties).
+- qcom,pvs-fuse-redun-sel:	Array of 5 elements to indicate where to read the bits, what value to
+				compare with in order to decide if the redundant PVS fuse bits would be
+				used instead of the original bits and method to read fuse row, reading
+				register through SCM or directly. The 5 elements with index [0..4] are:
+				  [0] => the fuse row number of the selector
+				  [1] => LSB bit position of the bits
+				  [2] => number of bits
+				  [3] => the value to indicate redundant selection
+				  [4] => fuse reading method, 0 for direct reading or 1 for SCM reading
+				When the value of the fuse bits specified by first 3 elements equals to
+				the value in 4th element, redundant PVS fuse bits should be selected.
+				Otherwise, the original PVS bits should be selected. If the 5th
+				element is 0, read the fuse row from register directly. Otherwise,
+				read it through SCM.
+				This property is required if qcom,pvs-voltage-table is present.
+- qcom,pvs-fuse:		Array of 4 elements to indicate the bits for PVS fuse and read method.
+				The array should have index and value like this:
+				  [0] => the PVS fuse row number
+				  [1] => LSB bit position of the bits
+				  [2] => number of bits
+				  [3] => fuse reading method, 0 for direct reading or 1 for SCM reading
+				This property is required if qcom,pvs-voltage-table is present.
+- qcom,pvs-fuse-redun:		Array of 4 elements to indicate the bits for redundant PVS fuse.
+				The array should have index and value like this:
+				  [0] => the redundant PVS fuse row number
+				  [1] => LSB bit position of the bits
+				  [2] => number of bits
+				  [3] => fuse reading method, 0 for direct reading or 1 for SCM reading
+				This property is required if qcom,pvs-voltage-table is present.
+- qcom,cpr-fuse-redun-sel:	Array of 5 elements to indicate where to read the bits, what value to
+				compare with in order to decide if the redundant CPR fuse bits would be
+				used instead of the original bits and method to read fuse row, using SCM
+				to read or read register directly. The 5 elements with index [0..4] are:
+				  [0] => the fuse row number of the selector
+				  [1] => LSB bit position of the bits
+				  [2] => number of bits
+				  [3] => the value to indicate redundant selection
+				  [4] => fuse reading method, 0 for direct reading or 1 for SCM reading
+				When the value of the fuse bits specified by first 3 elements equals to
+				the value in 4th element, redundant CPR fuse bits should be selected.
+				Otherwise, the original CPR bits should be selected. If the 5th element
+				is 0, read the fuse row from register directly. Otherwise, read it through
+				SCM.
+- qcom,cpr-fuse-redun-row:	Array of row number of redundant CPR fuse and method to read that
+				row. It should have index and value like this:
+				 [0] => the redundant fuse row number
+				 [1] => the value to indicate reading the fuse row directly or using SCM
+				This property is required if qcom,cpr-fuse-redun-sel is present.
+- qcom,cpr-fuse-redun-target-quot:	Array of bit positions in the redundant CPR fuse row defined
+				by qcom,cpr-fuse-redun-row for the target quotients of each
+				fuse corner.  Each bit position corresponds to the LSB
+				of the quotient parameter.  The elements in the array
+				are ordered from lowest voltage corner to highest voltage corner.
+				This property must be of length defined by qcom,cpr-fuse-corners.
+				This property is required if qcom,cpr-fuse-redun-sel is present.
+- qcom,cpr-fuse-redun-ro-sel:	Array of bit positions in the redundant CPR fuse row defined
+				by qcom,cpr-fuse-redun-row for the ring oscillator select of each
+				fuse corner.  Each bit position corresponds to the LSB of the RO
+				select parameter.  The elements in the array are ordered from
+				lowest voltage corner to highest voltage corner.
+				This property must be of length defined by qcom,cpr-fuse-corners.
+				This property is required if qcom,cpr-fuse-redun-sel is present.
+- qcom,cpr-fuse-redun-bp-cpr-disable:	Redundant bit position of the bit to indicate if CPR should be disable
+- qcom,cpr-fuse-redun-bp-scheme:	Redundant bit position of the bit to indicate if it's a global/local scheme
+					This property is required if cpr-fuse-redun-bp-cpr-disable
+					is present, and vise versa.
+- qcom,cpr-fuse-bp-cpr-disable:	Bit position of the bit to indicate if CPR should be disabled
+- qcom,cpr-fuse-bp-scheme:     Bit position of the bit to indicate if it's a global/local scheme
+- qcom,cpr-fuse-revision:	Array of 4 integer elements which define the location of the bits for
+				the CPR fusing revision fuse parameter.  The 4 elements are:
+				[0]: => the fuse row number of the bits
+				[1]: => LSB bit position of the bits
+				[2]: => the number of bits
+				[3]: => fuse reading method, 0 for direct reading or 1 for SCM reading
+				The fusing revision value is used to determine which specific adjustments
+				are required on some chips.
+- qcom,cpr-fuse-target-quot-size:	Array of target quotient parameter bit sizes in the primary
+				or redundant CPR fuse row for each fuse corner.  The elements in the
+				array are ordered from lowest voltage corner to highest voltage corner.
+				If this property is not present, then all target quotient fuse values
+				are assumed to be the default length of 12 bits.
+- qcom,cpr-fuse-target-quot-scale:	Array of doubles which defines the scaling coefficients to decode
+				the target quotients of each fuse corner.  The first element in each
+				double represents the offset to add to the scaled quotient.  The second
+				element represents the multiplier to scale the quotient by.  For example,
+				given a tuple <A B>, quot_decoded = A + (B * quot_raw).
+				The doubles in the array are ordered from lowest voltage corner to highest
+				voltage corner.  This property must contain a number of doubles equal to
+				the value of qcom,cpr-fuse-corners.  If this property is not present,
+				then all target quotient parameters are assumed to have an offset of 0
+				and a multiplier of 1 (i.e. no decoding needed).
+- qcom,cpr-enable:		Present: CPR enabled by default.
+				Not Present: CPR disable by default.
+- qcom,cpr-fuse-cond-min-volt-sel:	Array of 5 elements to indicate where to read the bits,  what value to
+				compare with in order to decide if the conditional minimum apc voltage needs
+				to be applied and the fuse reading method.
+				The 5 elements with index[0..4] are:
+				[0] => the fuse row number;
+				[1] => LSB bit position of the bits;
+				[2] => number of the bits;
+				[3] => the expected data to read;
+				[4] => fuse reading method, 0 for direct reading or 1 for SCM reading;
+				When the value of the fuse bits specified by first 3 elements is not equal to
+				the value in 4th element, then set the apc voltage for all parts running
+				at each voltage corner to be not lower than the voltage defined
+				using "qcom,cpr-cond-min-voltage".
+- qcom,cpr-cond-min-voltage:	Minimum voltage in microvolts allowed for cpr-regulator output if the fuse bits
+				defined in qcom,cpr-fuse-cond-min-volt-sel have not been programmed with the
+				expected data. This is required if cpr-fuse-cond-min-volt-sel is present.
+- qcom,cpr-fuse-uplift-sel: 	Array of 5 elements to indicate where to read the bits, what value to
+				compare with in order to enable or disable the pvs voltage uplift workaround,
+				and the fuse reading method.
+				The 5 elements with index[0..4] are:
+				[0]: => the fuse row number of the selector;
+				[1]: => LSB bit position of the bits;
+				[2]: => number of the bits;
+				[3]: => the value to indicate if the apc pvs voltage uplift workaround will
+					be enabled;
+				[4]: => fuse reading method, 0 for direct reading or 1 for SCM reading.
+				When the value of the fuse bits specified by first 3 elements equals to the
+				value in 4th element, the pvs voltage uplift workaround will be enabled.
+- qcom,speed-bin-fuse-sel:	Array of 4 elements to indicate where to read the speed bin of the processor,
+				and the fuse reading method.
+				The 4 elements with index[0..3] are:
+				[0]: => the fuse row number of the selector;
+				[1]: => LSB bit position of the bits;
+				[2]: => number of the bits;
+				[3]: => fuse reading method, 0 for direct reading or 1 for SCM reading.
+				This is required if cpr-fuse-uplift-disable-sel is present.
+- qcom,cpr-uplift-voltage:	Uplift in microvolts used for increasing pvs init voltage. If this property is present,
+				This is required if cpr-fuse-uplift-disable-sel is present.
+- qcom,cpr-uplift-max-volt:	Maximum voltage in microvolts used for pvs voltage uplift workaround to limit
+				the maximum pvs voltage.
+				This is required if cpr-fuse-uplift-disable-sel is present.
+- qcom,cpr-uplift-quotient:	Array of target quotient increments to add to the fused quotients of each
+				fuse corner as part of the PVS voltage uplift workaround.
+				The elements in the array are ordered from lowest voltage
+				corner to highest voltage corner.  This property must be of
+				length defined by qcom,cpr-fuse-corners.  This is required
+				if cpr-fuse-uplift-disable-sel is present.
+- qcom,cpr-uplift-speed-bin:	The speed bin value corresponding to one type of processor which needs to apply the
+				pvs voltage uplift workaround.
+				This is required if cpr-fuse-uplift-disable-sel is present.
+- qcom,cpr-fuse-version-map:	Array of integer tuples which each match to a given combination of CPR
+				fuse parameter values.  Each tuple consists of N + 3 elements.  Where
+				N is the number of fuse corners defined by the qcom,cpr-fuse-corners
+				property.  The elements in one tuple are:
+				[0]: =>		the speed bin of the CPU
+				[1]: =>		the PVS version of the CPU
+				[2]: =>		the CPR fuse revision
+				[3 - N+2]: =>	the ring oscillator select value of each fuse corner
+						ordered from lowest to highest
+				Any element in a tuple may use the value 0xffffffff as a wildcard
+				which will match against any fuse parameter value.  The first tuple
+				that matches against the fuse values read from hardware will be used.
+				This property is used by several properties to provide an index into
+				their lists.
+- qcom,cpr-allowed:		Integer values that specifies whether the closed loop CPR is allowed or
+				not for a particular fuse revision. If the qcom,cpr-fuse-version-map
+				property is specified, then qcom,cpr-allowed must contain the same number
+				of integers as that of the number of tuples in qcom,cpr-fuse-version-map.
+				If the integer value has a value 0 for a particular fuse revision, then it
+				is treated as if the closed loop operation is disabled in the fuse. If the
+				integer value has a value 1 for a particular fuse revision, then the closed
+				loop operation is enabled for that fuse revision. If nothing is specified
+				for a particular fuse revision, then the closed loop operation is enabled
+				for that fuse revision by default.
+- qcom,cpr-quotient-adjustment:	Array of integer tuples of target quotient adjustments to add to the fused
+				quotients of each fuse corner.  The elements in a tuple are ordered from
+				lowest voltage corner to highest voltage corner.  Each tuple must be of
+				length defined by qcom,cpr-fuse-corners.  If the qcom,cpr-fuse-version-map
+				property is specified, then qcom,cpr-quotient-adjustment must contain the
+				same number of tuples as qcom,cpr-fuse-version-map.  These tuples are then
+				mapped one-to-one in the order specified.  E.g. if the second
+				qcom,cpr-fuse-version-map tuple matches for a given device, then the quotient
+				adjustments defined in the second qcom,cpr-quotient-adjustment tuple will
+				be applied.  If the qcom,cpr-fuse-version-map property is not specified,
+				then qcom,cpr-quotient-adjustment must contain a single tuple which is then
+				applied unconditionally.  If this property is specified, then the quotient
+				adjustment values are added to the target quotient values read from fuses
+				before writing them into the CPR GCNT target control registers.
+				This property can be used to add or subtract static voltage margin from the
+				regulator managed by the CPR controller.
+- qcom,cpr-init-voltage-adjustment:  Array of integer tuples of initial voltage adjustments in microvolts to
+				add to the fused initial voltage values of each fuse corner.  The elements
+				in a tuple are ordered from lowest voltage corner to highest voltage corner.
+				Each tuple must be of the length defined by qcom,cpr-fuse-corners.  If the
+				qcom,cpr-fuse-version-map property is specified, then
+				qcom,cpr-init-voltage-adjustment must contain the same number of tuples as
+				qcom,cpr-fuse-version-map.  These tuples are then mapped one-to-one in the
+				order specified.  E.g. if the second qcom,cpr-fuse-version-map tuple matches
+				for a given device, then the initial voltage adjustments defined in the
+				second qcom,cpr-init-voltage-adjustment tuple will be applied.  If the
+				qcom,cpr-fuse-version-map property is not specified, then
+				qcom,cpr-init-voltage-adjustment must contain a single tuple which is then
+				applied unconditionally.  This property can be used to add or subtract
+				static initial voltage margin from the regulator managed by the CPR
+				controller.
+- qcom,cpr-quot-offset-adjustment:	Array of integer tuples of target quotient offset adjustments to add
+				to the fused quotient offsets of each fuse corner. The elements in a tuple
+				are ordered from lowest voltage corner to highest voltage corner. Each tuple
+				must be of length defined by qcom,cpr-fuse-corners. If the qcom,cpr-fuse-version-map
+				property is specified, then qcom,cpr-quot-offset-adjustment must contain the
+				same number of tuples as qcom,cpr-fuse-version-map.  These tuples are then
+				mapped one-to-one in the order specified.  E.g. if the second
+				qcom,cpr-fuse-version-map tuple matches for a given device, then the quotient
+				offset adjustments defined in the second qcom,cpr-quot-offset-adjustment tuple
+				will be applied. If the qcom,cpr-fuse-version-map property is not specified,
+				then qcom,cpr-quot-offset-adjustment must contain a single tuple which is then
+				applied unconditionally.  If this property is specified, then the quotient
+				offset adjustment values are added to the target quotient offset values read
+				from fuses.
+				This property can be used to add or subtract static quotient offset margin from
+				the regulator managed by the CPR controller.
+- qcom,cpr-clamp-timer-interval:	The number of 64 reference clock cycle blocks to delay for whenever
+					the clamp signal, sensor mask registers or sensor bypass registers
+					change.  The CPR controller loop is disabled during this delay.
+					Supported values are 0 to 255.  If this property is not specified,
+					then a value of 0 is assumed.  Note that if this property has a
+					value greater than 0, then software cannot accurately determine the
+					error_steps value that corresponds to a given CPR measurement
+					unless processor power collapsing is disabled.  If this property
+					has a value of 0, then the CPR controller loop is not disabled and
+					re-enabled while idle if the clamp signal changes.  Instead, it
+					will remain idle until software issues an ACK or NACK command.
+					This ensures that software can read the error_steps value which
+					resulted in the CPR up or down interrupt.  Setting this property to
+					a value greater than 0 is useful for resetting the CPR sensors of a
+					processor that uses BHS type voltage switches in order to avoid
+					anomalous CPR up interrupts when exiting from power collapse.
+- vdd-apc-optional-prim-supply:	Present: Regulator of highest priority to supply VDD APC power
+				Not Present: No such regulator.
+- vdd-apc-optional-sec-supply:	Present: Regulator of second highest priority to supply VDD APC power.
+				Not Present: No such regulator.
+- qcom,cpr-speed-bin-max-corners: Array of (N+2)-tuples in which each tuple maps a CPU speed bin and PVS version to
+				the maximum virtual voltage corner corresponding to each fuse corner.  The value N
+				corresponds to the number of fuse corners specified by qcom,cpr-fuse-corners.
+				The elements in one tuple are:
+				[0]: =>		the speed bin of the CPU. It may use the value 0xffffffff as a
+						wildcard to match any speed bin values.
+				[1]: =>		the PVS version of the CPU. It may use the value 0xffffffff as
+						a wildcard to match any PVS version values.
+				[2 - N+1]: =>	the max virtual voltage corner value corresponding to each fuse corner
+						for this speed bin, ordered from lowest voltage corner to highest
+						voltage corner.
+				No CPR target quotient scaling is applied on chips which have a speed bin + PVS version
+				pair that does not appear in one of the tuples in this property. If the property is
+				specified, then quotient scaling is enabled for the highest voltage corner. If this property is
+				not specified, then no quotient scaling can take place.
+- qcom,cpr-corner-map:		Array of elements of fuse corner value for each virtual corner.
+				The location or 1-based index of an element in the list corresponds to
+				the virtual corner value. For example, the first element in the list is the fuse corner
+				value that virtual corner 1 maps to.
+				This property is required if qcom,cpr-speed-bin-max-corners is present.
+- qcom,cpr-corner-frequency-map: Array of tuples in which a tuple describes a corner to application processor frequency
+				mapping.
+				The 2 elements in one tuple are:
+				[0]: => a virtual voltage corner.
+				[1]: => the application processor frequency in Hz corresponding to the virtual corner.
+				This property is required if qcom,cpr-speed-bin-max-corners is present.
+- qcom,pvs-version-fuse-sel:	Array of 4 elements to indicate where to read the pvs version of the processor,
+				and the fuse reading method.
+				The 4 elements with index[0..3] are:
+				[0]: => the fuse row number of the selector;
+				[1]: => LSB bit position of the bits;
+				[2]: => the number of bits;
+				[3]: => fuse reading method, 0 for direct reading or 1 for SCM reading.
+- qcom,cpr-voltage-ceiling-override: Array of (N+2)-tuples in which each tuple maps a CPU speed bin and PVS version
+				to the ceiling voltage to apply for each virtual voltage corner.  The value N
+				corresponds to the number of virtual corners as specified by the number of elements
+				in the qcom,cpr-corner-map property.
+				The elements in one tuple are:
+				[0]: =>		the speed bin of the CPU. It may use the value 0xffffffff as a
+						wildcard to match any speed bin values.
+				[1]: =>		the PVS version of the CPU. It may use the value 0xffffffff as a
+						wildcard to match any PVS version values.
+				[2 - N+1]: =>	the ceiling voltage value in microvolts corresponding to each virtual
+						corner for this speed bin, ordered from lowest voltage corner to
+						highest voltage corner.
+				No ceiling override is applied on chips which have a speed bin + PVS version
+				pair that does not appear in one of the tuples in this property.  If the property is
+				specified and the speed bin + PVS version matches, then the per-virtual-corner ceiling
+				voltages will be used in place of the per-fuse-corner ceiling voltages defined in the
+				qcom,cpr-voltage-ceiling property.  If this property is not specified, then the
+				per-fuse-corner ceiling voltages will always be used.
+- qcom,cpr-voltage-floor-override: Array of (N+2)-tuples in which each tuple maps a CPU speed bin and PVS version
+				to the floor voltage to apply for each virtual voltage corner.  The value N
+				corresponds to the number of virtual corners as specified by the number of elements
+				in the qcom,cpr-corner-map property.
+				The elements in one tuple are:
+				[0]: =>		the speed bin of the CPU. It may use the value 0xffffffff as a
+						wildcard to match any speed bin values.
+				[1]: =>		the PVS version of the CPU. It may use the value 0xffffffff as a
+						wildcard to match any PVS version values.
+				[2 - N+1]: =>	the floor voltage value in microvolts corresponding to each virtual
+						corner for this speed bin, ordered from lowest voltage corner to
+						highest voltage corner.
+				No floor override is applied on chips which have a speed bin + PVS version
+				pair that does not appear in one of the tuples in this property.  If the property is
+				specified and the speed bin + PVS version matches, then the per-virtual-corner floor
+				voltages will be used in place of the per-fuse-corner floor voltages defined in the
+				qcom,cpr-voltage-floor property.  If this property is not specified, then the
+				per-fuse-corner floor voltages will always be used.
+- qcom,cpr-floor-to-ceiling-max-range:	Array of integer tuples of floor-to-ceiling max range values in microvolts
+				to be subtracted from the ceiling voltage values of each virtual corner.
+				Supported values are those greater than or equal 0, or (-1). The value 0 for a corner
+				implies that the floor value for that corner has to equal to its ceiling value.
+				The value (-1) for a corner implies that no modification to the default floor voltage
+				is required. The elements in a tuple are ordered from lowest voltage corner to highest
+				voltage corner. Each tuple must be of the length equal to the number of virtual corners
+				as specified by the number of elements in the qcom,cpr-corner-map property. If the
+				qcom,cpr-fuse-version-map property is specified, then
+				qcom,cpr-dynamic-floor-override-adjustment must contain the same number of
+				tuples as qcom,cpr-fuse-version-map.  These tuples are then mapped one-to-one in the
+				order specified.  E.g. if the second qcom,cpr-fuse-version-map tuple matches
+				for a given device, then voltage adjustments defined in the second
+				qcom,cpr-dynamic-floor-override-adjustment tuple will be applied.  If the
+				qcom,cpr-fuse-version-map property is not specified, then
+				qcom,cpr-dynamic-floor-override-adjustment must contain a single tuple which
+				is then applied unconditionally.
+- qcom,cpr-virtual-corner-init-voltage-adjustment: Array of integer tuples of voltage adjustments in microvolts to be
+				added to the initial voltage values of each virtual corner.  The elements
+				in a tuple are ordered from lowest voltage corner to highest voltage corner.
+				Each tuple must be of the length equal to the number of virtual corners as
+				specified by the number of elements in the qcom,cpr-corner-map property. If the
+				qcom,cpr-fuse-version-map property is specified, then
+				qcom,cpr-virtual-corner-init-voltage-adjustment must contain the same number of
+				tuples as qcom,cpr-fuse-version-map.  These tuples are then mapped one-to-one in the
+				order specified.  E.g. if the second qcom,cpr-fuse-version-map tuple matches
+				for a given device, then voltage adjustments defined in the second
+				qcom,cpr-virtual-corner-init-voltage-adjustment tuple will be applied.  If the
+				qcom,cpr-fuse-version-map property is not specified, then
+				qcom,cpr-virtual-corner-init-voltage-adjustment must contain a single tuple which
+				is then applied unconditionally.
+- qcom,cpr-virtual-corner-quotient-adjustment: Array of integer tuples of quotient offsets to be added to
+				the scaled target quotient of each virtual corner. The elements
+				in a tuple are ordered from lowest voltage corner to highest voltage corner.
+				Each tuple must be of the length equal to the number of virtual corners as
+				specified by the number of elements in the qcom,cpr-corner-map property.
+				If the qcom,cpr-fuse-version-map property is specified, then
+				qcom,cpr-virtual-corner-quotient-adjustment must contain the same number of tuples as
+				qcom,cpr-fuse-version-map.  These tuples are then mapped one-to-one in the
+				order specified.  E.g. if the second qcom,cpr-fuse-version-map tuple matches
+				for a given device, then quotient adjustments defined in the second
+				qcom,cpr-virtual-corner-quotient-adjustment tuple will be applied.  If the
+				qcom,cpr-fuse-version-map property is not specified, then
+				qcom,cpr-virtual-corner-quotient-adjustment must contain a single tuple which is then
+				applied unconditionally.
+- qcom,cpr-cpus:		Array of CPU phandles which correspond to the cores that this cpr-regulator
+				device must monitor when adjusting the voltage and/or target quotient based
+				upon the number of online cores or make sure that one of them must be online
+				when performing de-aging measurements. This property must be specified in order to
+				utilize the qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment or
+				qcom,cpr-online-cpu-virtual-corner-quotient-adjustment or qcom,cpr-aging-sensor-id properties.
+- qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment:	Array of tuples where each tuple specifies
+				the voltage adjustment for each corner. These adjustments apply to the
+				initial voltage of each corner. The size of each tuple must be equal
+				to qcom,cpr-fuse-corners if consumers request fuse corners or the length of
+				qcom,cpr-corner-map if consumers request virtual corners. In each tuple, the
+				value corresponds to the voltage adjustment when running at that corner at
+				init, from lowest to highest. The tuples must be organized into 1 group if
+				qcom,cpr-fuse-version-map is not specified or the same number of groups as
+				the number of tuples in qcom,cpr-fuse-version-map. The i-th group of tuples
+				corresponds to the voltage adjustments for i-th fuse version map tuple. In
+				each group, there are 1 plus length of qcom,cpr-cpus tuples, each tuple
+				corresponds to the number of cores online, from 0 to the number of elements
+				in qcom,cpr-cpus.
+- qcom,cpr-online-cpu-init-voltage-as-ceiling:	Boolean which indicates that the ceiling voltage used for a
+				given virtual corner may be reduced to the per number of cores online,
+				per-virtual corner ceiling voltage value. This property takes precedence
+				over qcom,cpr-scaled-init-voltage-as-ceiling if both are specified.
+- qcom,cpr-online-cpu-virtual-corner-quotient-adjustment:	Array of tuples where each tuple specifies
+				the quotient adjustment for each corner. These adjustments will be applied
+				to each corner at run time. The size of each tuple must be equal to
+				qcom,cpr-fuse-corners if consumers request fuse corners or the length of
+				qcom,cpr-corner-map if consumers request virtual corners. In each tuple,
+				the value corresponds to the quotient adjustment when running at that corner,
+				from lowest to highest. The tuples must be organized into 1 group if
+				qcom,cpr-fuse-version-map is not specified or the same number of groups
+				as the number of tuples in qcom,cpr-fuse-version-map. The i-th group of
+				tuples corresponds to the quotient adjustments for i-th fuse version map
+				tuple. In each group, there are 1 plus length of qcom,cpr-cpus tuples,
+				each tuple corresponds to the number of cores online, from 0 to the
+				number of elements in qcom,cpr-cpus.
+- qcom,cpr-init-voltage-as-ceiling: Boolean which indicates that the ceiling voltage used for a given virtual
+				corner may be reduced to the per-fuse-corner initial voltage fuse value.
+- qcom,cpr-scaled-init-voltage-as-ceiling: Boolean which indicates that the ceiling voltage used for a given
+				virtual corner may be reduced to the interpolated, per-virtual-corner initial
+				voltage value.  Note that if both qcom,cpr-init-voltage-as-ceiling and
+				qcom,cpr-scaled-init-voltage-as-ceiling are specified, then
+				qcom,cpr-scaled-init-voltage-as-ceiling will take precedence since the interpolated
+				voltages are necessarily less than or equal to the fused initial voltage values.
+- qcom,cpr-voltage-scaling-factor-max: Array of values which define the maximum allowed scaling factor to apply
+				when calculating per-corner initial voltage values for each fuse corner.  The
+				array must be of length equal to the value of the qcom,cpr-fuse-corners property.
+				Each element in the array maps to the fuse corners in increasing order.
+				The elements have units of uV/MHz.  Each element corresponds to 'max_factor' in
+				the following equation:
+				init_voltage_min(f) = fuse_init_voltage(f) - (fuse_f_max - f) * max_factor
+				If this property is not specified, then the initial voltage for each virtual
+				corner will be set to the initial voltage of the associated fuse corner.
+- qcom,cpr-quot-adjust-scaling-factor-max: Array of values which define the maximum allowed scaling factor to
+				apply when calculating per-virtual-corner target quotients for each fuse
+				corner.  Two data formats are allowed for this property.  The primary one
+				requires that the array be of length equal to the value of the
+				qcom,cpr-fuse-corners property.  When using this format, each element in the
+				array maps to the fuse corners in increasing order.  The second depreciated
+				format allows for only a single element to be specified which defines the
+				maximum scaling factor for the highest fuse corner.  In this case, a value of
+				0 is assumed for the lower fuse corners.  The elements of this property have
+				units of QUOT/GHz.  Each element corresponds to 'max_factor' in the following
+				equation:
+				quot_min(f) = fuse_quot(f) - (fuse_f_max - f) * max_factor / 1000
+				where f and fuse_f_max have units of MHz.
+				This property is required if qcom,cpr-speed-bin-max-corners is present.
+- qcom,cpr-fuse-init-voltage:	Array of quadruples in which each quadruple specifies a fuse location to
+				read in order to get an initial voltage for a fuse corner. The fuse values
+				are encoded as voltage steps higher or lower than the voltages defined in
+				qcom,cpr-voltage-ceiling. Each step corresponds to the voltage defined by
+				the qcom,cpr-init-voltage-step property.
+				The 4 elements in one quadruple are:
+				[0]: => the fuse row number of the bits
+				[1]: => LSB bit position of the bits
+				[2]: => number of the bits
+				[3]: => fuse reading method, 0 for direct reading or 1 for SCM reading
+				The quadruples are ordered from the lowest voltage fuse corner to the
+				highest voltage fuse corner.
+				A given cpr-regulator device must have either qcom,cpr-fuse-init-voltage
+				specified or qcom,pvs-voltage-table (and its associated properties).
+- qcom,cpr-fuse-redun-init-voltage: Array of quadruples in which each quadruple specifies a fuse location
+				to read in order to get the redundant initial voltage for a fuse corner.
+				This property is the same as qcom,cpr-fuse-init-voltage except that it is
+				only utilized if a chip is configured to use the redundant set of fuse
+				values.  This property is required if qcom,cpr-fuse-redun-sel and
+				qcom,cpr-fuse-init-voltage are specified.
+- qcom,cpr-init-voltage-ref:	Array of reference voltages in microvolts used when decoding the initial
+				voltage fuse values.  The elements in the array are ordered from lowest
+				voltage corner to highest voltage corner.  This property must be of length
+				defined by qcom,cpr-fuse-corners.
+				This property is required if qcom,cpr-fuse-init-voltage is present.
+- qcom,cpr-init-voltage-step:	The voltage step size in microvolts of the CPR initial voltage fuses described by the
+				qcom,cpr-fuse-init-voltage property.
+				This property is required if qcom,cpr-fuse-init-voltage is present.
+- mem-acc-supply:		Regulator to vote for the memory accelerator configuration.
+				Not Present: memory accelerator configuration not supported.
+- qcom,mem-acc-corner-map:	Array of integer which defines the mapping from mem-acc corner value for each
+				virtual corner. Each element is a mem-acc state for the corresponding virtual corner.
+				The elements in the array are ordered from lowest voltage corner to highest voltage corner.
+- qcom,fuse-remap-source:	Array of quadruples in which each quadruple specifies a fuse location to
+				remap.  The 4 elements in one quadruple are:
+				[0]: => the fuse row number of the bits
+				[1]: => LSB bit position of the bits
+				[2]: => the number of bits
+				[3]: => fuse reading method, 0 for direct reading or 1 for SCM reading
+				The fuse bits for all quadruples are packed together in the order specified
+				into 64-bit virtual fuse rows beginning at the row number defined in the
+				qcom,fuse-remap-base-row property.  The remapped rows may be used by any
+				other properties.
+				Example:
+					qcom,fuse-remap-base-row = <1000>;
+					qcom,fuse-remap-source =
+							<13 57 2 0>,
+							<14 30 3 0>,
+							<20 1 7 0>,
+							<40 47 120 0>;
+
+					This results in the following bit remapping:
+
+					Row   Bits       Remap Row  Remap Bits
+					13    57..58  -->  1000      0..1
+					14    30..32  -->  1000      2..4
+					20     1..7   -->  1000      5..11
+					40    47..63  -->  1000     12..28
+					41     0..34  -->  1000     29..63
+					41    35..63  -->  1001      0..28
+					42     0..34  -->  1001     29..63
+					42    35..38  -->  1002      0..3
+
+					A tuple like this could then be used to reference some of the
+					concatenated bits from rows 13, 14, and 20:
+
+					qcom,cpr-fuse-init-voltage = <1000 0 6 0>;
+- qcom,fuse-remap-base-row:	Integer which defines the virtual row number to use as a base when remapping
+				fuse bits.  The remap base row number can be any value as long as it is
+				greater than all of the real row numbers addressed in other properties of
+				the cpr-regulator device node.  This property is required if
+				qcom,fuse-remap-source is specified.
+- qcom,cpr-quot-min-diff:	Integer which defines the minimum target-quotient difference between
+				the highest and (highest - 1) fuse corner to keep CPR enabled. If this
+				property is not specified a default value of 50 is used.
+- qcom,cpr-fuse-quot-offset:	Array of quadruples in which each quadruple specifies a fuse location to
+				read in order to get the quotient offset for a fuse corner. The fuse values
+				are encoded as the difference between quotients of that fuse corner and its
+				adjacent lower fuse corner divided by an unpacking multiplier value defined
+				under qcom,cpr-fuse-quot-offset-scale property.
+				The 4 elements in one quadruple are:
+				[0]: => the fuse row number of the bits
+				[1]: => LSB bit position of the bits
+				[2]: => number of the bits
+				[3]: => fuse reading method, 0 for direct reading or 1 for SCM reading
+				The quadruples are ordered from the lowest fuse corner to the highest
+				fuse corner.
+				Quotient offset read from the fuse locations above can be overridden with
+				the property qcom,cpr-quot-adjust-scaling-factor-max.
+- qcom,cpr-fuse-quot-offset-scale:	Array of integer values which defines the multipliers to decode the quotient offsets
+				of each fuse corner. The elements in the array are ordered from the lowest voltage fuse corner
+				to the highest voltage fuse corner. If this property is not present, then all target quotient
+				parameters are assumed to have a multiplier of 1 (i.e. no decoding needed).
+- qcom,cpr-redun-fuse-quot-offset: Array of quadruples in which each quadruple specifies a fuse location to
+				read in order to get the redundant quotient offset for a fuse corner. This
+				property is the same as qcom,cpr-fuse-quot-offset except that it is only
+				utilized if a chip is configured to use the redundant set of fuse values.
+- qcom,cpr-fuse-min-quot-diff:	Array of values which define the minimum difference allowed between the adjusted
+				quotients of the fuse corners. The length of the array should be equal to the value
+				of the qcom,cpr-fuse-corners property. Where each element in the array maps to the
+				fuse corners in increasing order.
+- qcom,cpr-min-quot-diff-adjustment:	Array of integer tuples of target quotient offsets to be added to
+				the adjusted target quotients of each fuse corner. When the quotient difference
+				between two adjacent fuse corners is insufficient, the quotient for the higher fuse corner is
+				replaced with that of the lower fuse corner plus the adjustment value.
+				The elements in a tuple are ordered from lowest voltage corner to highest voltage corner.
+				Each tuple must be of the length defined by qcom,cpr-fuse-corners.
+				If the qcom,cpr-fuse-version-map property is specified, then qcom,cpr-min-quot-diff-adjustment
+				must contain the same number of tuples as qcom,cpr-fuse-version-map.  These tuples are then mapped
+				one-to-one in the order specified.  E.g. if the second qcom,cpr-fuse-version-map tuple matches
+				for a given device, then the quotient adjustments defined in the
+				second qcom,cpr-min-quot-diff-adjustment tuple will be applied.  If the
+				qcom,cpr-fuse-version-map property is not specified, then
+				qcom,cpr-min-quot-diff-adjustment must contain a single tuple which is then
+				applied unconditionally. The qcom,cpr-min-quot-diff-adjustment property must be specified
+				if the qcom,cpr-fuse-min-quot-diff property is specified.
+- qcom,cpr-skip-voltage-change-during-suspend: Boolean property which indicates that the CPR voltage
+				should not be adjusted based upon the number of online cores while
+				entering or exiting system suspend.
+- rpm-apc-supply:		Regulator to notify RPM of the APC operating
+				corner
+- qcom,rpm-apc-corner-map:	Array of integers which define the mapping of
+				the RPM corner to the corresponding APC virtual
+				corner. This property must be defined if
+				'rpm-apc-supply' is present.
+- qcom,vsens-corner-map:	Array of integers which define the mapping of the VSENS corner to the
+				corresponding APC fuse corner. The qcom,vsens-corner-map and
+				vdd-vsense-corner-supply properties must both be specified for a given
+				cpr-regulator device or neither must be specified.
+- vdd-vsens-corner-supply:	Regulator to specify the current operating fuse corner to the Voltage Sensor.
+- vdd-vsens-voltage-supply:	Regulator to specify the corner floor/ceiling voltages to the Voltage Sensor.
+- qcom,cpr-aging-sensor-id:	Array of CPR sensor IDs to be used in the CPR de-aging algorithm. The number
+				of values should be equal to number of sensors selected for age calibration.
+				If this property is not specified, then the de-aging procedure is not enabled.
+- qcom,cpr-de-aging-allowed:	Integer values that specify whether the CPR de-aging procedure is allowed or
+				not for a particular fuse revision. If the qcom,cpr-fuse-version-map
+				property is specified, then qcom,cpr-de-aging-allowed must contain the same number
+				of elements as there are tuples in qcom,cpr-fuse-version-map. If qcom,cpr-fuse-version-map
+				is not specified, then qcom,cpr-de-aging-allowed must contain a single value that
+				is used unconditionally. An element value of 1 means that the CPR de-aging procedure
+				can be performed for parts with the corresponding fuse revision. An element value of 0
+				means that CPR de-aging cannot be performed.
+				This property is required if the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-aging-ref-corner:	The vdd-apc-supply reference virtual voltage corner to be set during the CPR de-aging
+				measurements. This corner value is needed to set appropriate voltage on
+				the dependent voltage rails such as vdd-mx and mem-acc.
+				This property is required if the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-aging-ref-voltage:	The vdd-apc-supply reference voltage in microvolts to be set during the
+				CPR de-aging measurements.
+				This property is required if the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-max-aging-margin:	The maximum allowed aging voltage margin in microvolts. This is used to limit
+				the calculated aging voltage margin.
+				This property is required if the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-non-collapsible-sensors: Array of CPR sensor IDs which are in non-collapsible domain. The sensor IDs not
+				specified in the array should be bypassed for the de-aging procedure. The number of
+				elements should be less than or equal to 32. The values of the array elements should
+				be greater than or equal to 0 and less than or equal to 31.
+				This property is required for power-domains with bypass mux present in HW.
+				This property can be required if the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-aging-ro-scaling-factor:	The aging ring oscillator (RO) scaling factor with units of QUOT/V.
+				This value is used for calculating a voltage margin from RO measurements.
+				This property is required if the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-ro-scaling-factor:	Array of scaling factors with units of QUOT/V for each ring oscillator ordered
+				from the lowest to the highest RO. These values are used to calculate
+				the aging voltage margin adjustment for all of the ROs. Since CPR2 supports
+				exactly 8 ROs, the array must contain 8 elements corresponding to RO0 through RO7 in order.
+				If a given RO is unused for a fuse corner, then its scaling factor may be specified as 0.
+				This property is required if the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-aging-derate:	Array of scaling factors which define the amount of derating to apply to the reference
+				aging voltage margin adjustment for each of the fuse corners. Each element has units
+				of uV/mV. This property must be of length defined by qcom,cpr-fuse-corners.
+				The elements are ordered from the lowest to the highest fuse corner.
+				This property is required if the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-fuse-aging-init-quot-diff:	Array of quadruples in which each quadruple specifies a fuse location to read in
+				order to get an initial quotient difference. The difference between quot min and quot max
+				is fused as the initial quotient difference.
+				The 4 elements in one quadruple are:
+				[0]: => the fuse row number of the bits
+				[1]: => LSB bit position of the bits
+				[2]: => number of the bits
+				[3]: => fuse reading method, 0 for direct reading or 1 for SCM reading
+				The number of quadruples should be equal to the number of values specified in
+				the qcom,cpr-aging-sensor-id property. This property is required if
+				the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-thermal-sensor-id:	TSENS hardware sensor-id of the sensor which
+				needs to be monitored.
+- qcom,cpr-disable-temp-threshold:	The TSENS temperature threshold in degrees Celsius at which CPR
+				closed-loop is disabled. CPR closed-loop will stay disabled as long as the
+				temperature is below this threshold. This property is required
+				only if 'qcom,cpr-thermal-sensor-id' is present.
+- qcom,cpr-enable-temp-threshold:	The TSENS temperature threshold in degrees Celsius at which CPR
+				closed-loop is enabled. CPR closed-loop will stay enabled above this
+				temperature threshold. This property is required only if
+				'qcom,cpr-thermal-sensor-id' is present.
+- qcom,disable-closed-loop-in-pc:	Bool property to disable closed-loop CPR during
+				power-collapse. This can be enabled only for single core
+				designs. The property 'qcom,cpr-cpus' is required to enable this logic.
+Example:
+	apc_vreg_corner: regulator@f9018000 {
+		status = "okay";
+		compatible = "qcom,cpr-regulator";
+		reg = <0xf9018000 0x1000>, <0xfc4b8000 0x1000>;
+		reg-names = "rbcpr", "efuse_addr";
+		interrupts = <0 15 0>;
+		regulator-name = "apc_corner";
+		qcom,cpr-fuse-corners = <3>;
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <12>;
+
+		qcom,pvs-fuse = <22 6 5 1>;
+		qcom,pvs-fuse-redun-sel = <22 24 3 2 1>;
+		qcom,pvs-fuse-redun = <22 27 5 1>;
+
+		qcom,pvs-voltage-table =
+			<1050000 1150000 1350000>,
+			<1050000 1150000 1340000>,
+			<1050000 1150000 1330000>,
+			<1050000 1150000 1320000>,
+			<1050000 1150000 1310000>,
+			<1050000 1150000 1300000>,
+			<1050000 1150000 1290000>,
+			<1050000 1150000 1280000>,
+			<1050000 1150000 1270000>,
+			<1050000 1140000 1260000>,
+			<1050000 1130000 1250000>,
+			<1050000 1120000 1240000>,
+			<1050000 1110000 1230000>,
+			<1050000 1100000 1220000>,
+			<1050000 1090000 1210000>,
+			<1050000 1080000 1200000>,
+			<1050000 1070000 1190000>,
+			<1050000 1060000 1180000>,
+			<1050000 1050000 1170000>,
+			<1050000 1050000 1160000>,
+			<1050000 1050000 1150000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>;
+		qcom,cpr-voltage-ceiling = <1050000 1150000 1280000>;
+		qcom,cpr-voltage-floor = <1050000 1050000 1100000>;
+		vdd-apc-supply = <&pm8226_s2>;
+		vdd-apc-optional-prim-supply = <&ncp6335d>;
+		vdd-apc-optional-sec-supply = <&fan53555>;
+		vdd-mx-supply = <&pm8226_l3_ao>;
+		qcom,vdd-mx-vmax = <1350000>;
+		qcom,vdd-mx-vmin-method = <1>;
+		qcom,vdd-apc-step-up-limit = <1>;
+		qcom,vdd-apc-step-down-limit = <1>;
+		qcom,cpr-ref-clk = <19200>;
+		qcom,cpr-timer-delay = <5000>;
+		qcom,cpr-timer-cons-up = <1>;
+		qcom,cpr-timer-cons-down = <2>;
+		qcom,cpr-irq-line = <0>;
+		qcom,cpr-step-quotient = <15>;
+		qcom,cpr-up-threshold = <1>;
+		qcom,cpr-down-threshold = <2>;
+		qcom,cpr-idle-clocks = <5>;
+		qcom,cpr-gcnt-time = <1>;
+		qcom,cpr-clamp-timer-interval = <1>;
+		qcom,cpr-apc-volt-step = <5000>;
+
+		qcom,vsens-corner-map = <1 2 2>;
+		vdd-vsens-corner-supply = <&vsens_apc0_corner>;
+		vdd-vsens-voltage-supply = <&vsens_apc0_voltage>;
+
+		rpm-apc-supply = <&rpm_apc_vreg>;
+		qcom,rpm-apc-corner-map = <4 4 5 5 7 7 7 7 7 7 7 7>;
+
+		qcom,cpr-fuse-row = <138 1>;
+		qcom,cpr-fuse-bp-cpr-disable = <36>;
+		qcom,cpr-fuse-bp-scheme = <37>;
+		qcom,cpr-fuse-target-quot = <24 12 0>;
+		qcom,cpr-fuse-target-quot-size = <12 12 12>;
+		qcom,cpr-fuse-ro-sel = <54 38 41>;
+		qcom,cpr-fuse-revision = <140 26 2 0>;
+		qcom,cpr-fuse-redun-sel = <138 57 1 1 1>;
+		qcom,cpr-fuse-redun-row = <139 1>;
+		qcom,cpr-fuse-redun-target-quot = <24 12 0>;
+		qcom,cpr-fuse-redun-ro-sel = <46 36 39>;
+		qcom,cpr-fuse-cond-min-volt-sel = <54 42 6 7 1>;
+		qcom,cpr-cond-min-voltage = <1140000>;
+		qcom,cpr-fuse-uplift-sel = <22 53 1 0 0>;
+		qcom,cpr-uplift-voltage = <50000>;
+		qcom,cpr-uplift-quotient = <0 0 120>;
+		qcom,cpr-uplift-max-volt = <1350000>;
+		qcom,cpr-uplift-speed-bin = <1>;
+		qcom,speed-bin-fuse-sel = <22 0 3 0>;
+		qcom,cpr-corner-map = <1 1 2 2 3 3 3 3 3 3 3 3>;
+		qcom,cpr-corner-frequency-map =
+				<1 300000000>,
+				<2 384000000>,
+				<3 600000000>,
+				<4 787200000>,
+				<5 998400000>,
+				<6 1094400000>,
+				<7 1190400000>,
+				<8 1305600000>,
+				<9 1344000000>,
+				<10 1401600000>,
+				<11 1497600000>,
+				<12 1593600000>;
+		qcom,pvs-version-fuse-sel = <22 4 2 0>;
+		qcom,cpr-speed-bin-max-corners =
+				<0 1 2 4 7>,
+				<1 1 2 4 12>,
+				<2 1 2 4 10>,
+				<5 1 2 4 14>;
+		qcom,cpr-fuse-target-quot-scale =
+				<0 1>,
+				<0 1>,
+				<0 1>;
+		qcom,cpr-quot-adjust-scaling-factor-max = <0 650 650>;
+		qcom,cpr-fuse-quot-offset =
+				<138 53 5 0>,
+				<138 53 5 0>,
+				<138 48 5 0>,
+				<138 58 5 0>;
+		qcom,cpr-fuse-redun-quot-offset =
+				<200 53 5 0>,
+				<200 53 5 0>,
+				<200 48 5 0>,
+				<200 58 5 0>;
+		qcom,cpr-fuse-init-voltage =
+				<27 36 6 0>,
+				<27 18 6 0>,
+				<27 0 6 0>;
+		qcom,cpr-fuse-redun-init-voltage =
+				<140 36 6 0>,
+				<140 18 6 0>,
+				<140 0 6 0>;
+		qcom,cpr-init-voltage-ref = <1050000 1150000 1280000>;
+		qcom,cpr-init-voltage-step = <10000>;
+		qcom,cpr-voltage-ceiling-override =
+				<1 1 1050000 1050000 1150000 1150000 1280000
+				     1280000 1280000 1280000 1280000 1280000
+				     1280000 1280000>;
+		qcom,cpr-voltage-floor-override =
+				<1 1 1050000 1050000 1050000 1050000 1060000
+				     1070000 1080000 1090000 1100000 1100000
+				     1100000 1100000>;
+		qcom,cpr-scaled-init-voltage-as-ceiling;
+
+		qcom,cpr-fuse-version-map =
+				<0xffffffff 0xffffffff 2 4 4 4>,
+				<0xffffffff 0xffffffff 2 6 6 6>,
+				<0xffffffff 0xffffffff 3 4 4 4>;
+		qcom,cpr-quotient-adjustment =
+				<0 0 (-210)>,
+				<0 0 (-60)>,
+				<0 0 (-94)>;
+		qcom,cpr-quot-offset-adjustment =
+				<0 0 (-5)>;
+		qcom,cpr-init-voltage-adjustment =
+				<0 0 (-100000)>,
+				<0 0 (-100000)>,
+				<0 0 (-45000)>;
+		qcom,cpr-fuse-min-quot-diff = <0 0 40>;
+		qcom,cpr-min-quot-diff-adjustment =
+					<0 0 0>,
+					<0 0 72>,
+					<0 0 104>;
+		qcom,cpr-floor-to-ceiling-max-range =
+			<(-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1)>,
+			<(-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1)>,
+			<(-1) (-1) (-1) (-1) (-1) (-1) (-1) 50000 50000 50000 50000 50000>;
+		qcom,cpr-virtual-corner-init-voltage-adjustment =
+			<0 0 0 (-10000) 0 0 0 0 0 0 0 0>,
+			<0 0 0 0 0 0 0 0 0 0 0 (-20000)>,
+			<0 0 0 0 0 0 0 0 0 0 0 (-30000)>;
+		qcom,cpr-virtual-corner-quotient-adjustment =
+			<0 0 0 100 0 0 0 0 0 0 0 0>,
+			<0 0 0 0 0 0 0 0 0 0 0 (-300)>,
+			<0 0 0 (-60) 0 0 0 0 0 0 0 0>;
+		qcom,cpr-cpus = <&CPU0 &CPU1 &CPU2 &CPU3>;
+		qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment =
+			/* 1st fuse version tuple matched */
+			<0 0 0 (-10000) (-10000) (-10000) (-15000) (-15000) (-20000) 0 (-20000) (-30000) >, /* 0 CPUs online */
+			<0 0 0 (-10000) (-10000) (-10000) (-15000) (-15000) (-20000) 0 (-20000) (-30000) >, /* 1 CPUs online */
+			<0 0 0 (-5000) (-5000) (-5000) (-5000) (-5000) (-10000) 0 (-10000) (-10000) >, /* 2 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 3 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 4 CPUs online */
+			/* 2nd fuse version tuple matched */
+			<0 0 0 (-10000) (-10000) (-10000) (-15000) (-15000) (-20000) 0 (-20000) (-30000) >, /* 0 CPUs online */
+			<0 0 0 (-10000) (-10000) (-10000) (-15000) (-15000) (-20000) 0 (-20000) (-30000) >, /* 1 CPUs online */
+			<0 0 0 (-5000) (-5000) (-5000) (-5000) (-5000) (-10000) 0 (-10000) (-10000) >, /* 2 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 3 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 4 CPUs online */
+			/* 3rd fuse version tuple matched */
+			<0 0 0 (-10000) (-10000) (-10000) (-15000) (-15000) (-20000) 0 (-20000) (-30000) >, /* 0 CPUs online */
+			<0 0 0 (-10000) (-10000) (-10000) (-15000) (-15000) (-20000) 0 (-20000) (-30000) >, /* 1 CPUs online */
+			<0 0 0 (-5000) (-5000) (-5000) (-5000) (-5000) (-10000) 0 (-10000) (-10000) >, /* 2 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 3 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>; /* 4 CPUs online */
+		qcom,cpr-online-cpu-virtual-corner-quotient-adjustment =
+			/* 1st fuse version tuple matched */
+			<0 0 0 (-6) (-6) (-6) (-9) (-9) (-12) 0 (-12) (-18)>, /* 0 CPUs online */
+			<0 0 0 (-6) (-6) (-6) (-9) (-9) (-12) 0 (-12) (-18)>, /* 1 CPUs online */
+			<0 0 0 (-3) (-3) (-3) (-3) (-3) (-6) 0 (-6) (-6)>, /* 2 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 3 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 4 CPUs online */
+			/* 2nd fuse version tuple matched */
+			<0 0 0 (-6) (-6) (-6) (-9) (-9) (-12) 0 (-12) (-18)>, /* 0 CPUs online */
+			<0 0 0 (-6) (-6) (-6) (-9) (-9) (-12) 0 (-12) (-18)>, /* 1 CPUs online */
+			<0 0 0 (-3) (-3) (-3) (-3) (-3) (-6) 0 (-6) (-6)>, /* 2 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 3 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 4 CPUs online */
+			/* 3rd fuse version tuple matched */
+			<0 0 0 (-21) (-21) (-21) (-32) (-32) (-42) 0 (-42) (-63)>, /* 0 CPUs online */
+			<0 0 0 (-21) (-21) (-21) (-32) (-32) (-42) 0 (-42) (-63)>, /* 1 CPUs online */
+			<0 0 0 (-11) (-11) (-11) (-11) (-11) (-21) 0 (-21) (-21)>, /* 2 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 3 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>; /* 4 CPUs online */
+		qcom,cpr-allowed =
+			<0>,
+			<1>,
+			<1>;
+
+		qcom,fuse-remap-base-row = <1000>;
+		qcom,fuse-remap-source =
+				<140 7 3 0>,
+				<138 45 5 0>;
+		qcom,cpr-fuse-quot-offset-scale = <5 5 5>;
+
+		qcom,cpr-aging-sensor-id = <17, 18>;
+		qcom,cpr-aging-ref-corner = <4>;
+		qcom,cpr-aging-ref-voltage = <1050000>;
+		qcom,cpr-max-aging-margin = <15000>;
+		qcom,cpr-de-aging-allowed =
+				<0>,
+				<0>,
+				<1>;
+		qcom,cpr-non-collapsible-sensors= <7 12 17 22>;
+		qcom,cpr-aging-ro-scaling-factor = <3500>;
+		qcom,cpr-ro-scaling-factor = <0 2500 2500 2500 0 0 0 0>;
+		qcom,cpr-aging-derate = <1000 1000 1250>;
+		qcom,cpr-fuse-aging-init-quot-diff =
+				<101 0 8 0>,
+				<101 8 8 0>;
+
+		qcom,cpr-thermal-sensor-id = <9>;
+		qcom,cpr-disable-temp-threshold = <5>;
+		qcom,cpr-enable-temp-threshold = <10>;
+	};
diff --git a/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt b/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt
index 7de891e..b760758 100644
--- a/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt
@@ -44,6 +44,16 @@
 		    a particular PMIC found in the system.  This name must match
 		    to one that is defined by the bootloader.
 
+- qcom,regulator-type
+	Usage:      required if qcom,supported-modes is specified or if
+		    qcom,init-mode is specified in any subnodes
+	Value type: <string>
+	Definition: The physical type of the regulator including the PMIC
+		    family.  This is used for mode control.  Supported values:
+		    "pmic4-ldo", "pmic4-hfsmps", "pmic4-ftsmps", "pmic4-bob",
+		    "pmic5-ldo", "pmic5-hfsmps", "pmic5-ftsmps", and
+		    "pmic5-bob".
+
 - qcom,use-awake-state
 	Usage:      optional
 	Value type: <empty>
@@ -72,7 +82,7 @@
 	Value type: <prop-encoded-array>
 	Definition: A list of integers specifying the PMIC regulator modes
 		    supported by this regulator.  Supported values are
-		    RPMH_REGULATOR_MODE_* (i.e. 0 to 7).  Elements must be
+		    RPMH_REGULATOR_MODE_* (i.e. 0 to 4).  Elements must be
 		    specified in order from lowest to highest.
 
 - qcom,mode-threshold-currents
@@ -148,7 +158,7 @@
 	Usage:      optional; VRM regulators only
 	Value type: <u32>
 	Definition: Specifies the initial mode to request for a VRM regulator.
-		    Supported values are RPMH_REGULATOR_MODE_* (i.e. 0 to 7).
+		    Supported values are RPMH_REGULATOR_MODE_* (i.e. 0 to 4).
 
 - qcom,init-headroom-voltage
 	Usage:      optional; VRM regulators only
@@ -212,9 +222,10 @@
 	compatible = "qcom,rpmh-vrm-regulator";
 	mboxes = <&apps_rsc 0>;
 	qcom,resource-name = "smpa2";
+	qcom,regulator-type = "pmic4-smps";
 	qcom,supported-modes =
-		<RPMH_REGULATOR_MODE_SMPS_AUTO
-		 RPMH_REGULATOR_MODE_SMPS_PWM>;
+		<RPMH_REGULATOR_MODE_AUTO
+		 RPMH_REGULATOR_MODE_HPM>;
 	qcom,mode-threshold-currents = <0 2000000>;
 	pm8998_s2: regulator-s2 {
 		regulator-name = "pm8998_s2";
@@ -222,7 +233,7 @@
 		regulator-min-microvolt = <1100000>;
 		regulator-max-microvolt = <1200000>;
 		regulator-enable-ramp-delay = <200>;
-		qcom,init-mode = <RPMH_REGULATOR_MODE_SMPS_AUTO>;
+		qcom,init-mode = <RPMH_REGULATOR_MODE_AUTO>;
 		qcom,init-voltage = <1150000>;
 	};
 };
@@ -232,9 +243,10 @@
 	mboxes = <&disp_rsc 0>;
 	qcom,use-awake-state;
 	qcom,resource-name = "ldoa3";
+	qcom,regulator-type = "pmic4-ldo";
 	qcom,supported-modes =
-		<RPMH_REGULATOR_MODE_LDO_LPM
-		 RPMH_REGULATOR_MODE_LDO_HPM>;
+		<RPMH_REGULATOR_MODE_LPM
+		 RPMH_REGULATOR_MODE_HPM>;
 	qcom,mode-threshold-currents = <0 10000>;
 	qcom,always-wait-for-ack;
 	pm8998_l3_disp_ao: regulator-l3-ao {
@@ -250,7 +262,7 @@
 		qcom,set = <RPMH_REGULATOR_SET_SLEEP>;
 		regulator-min-microvolt = <1000000>;
 		regulator-max-microvolt = <1200000>;
-		qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		qcom,init-voltage = <1000000>;
 		qcom,init-enable = <0>;
 	};
@@ -260,6 +272,7 @@
 	compatible = "qcom,rpmh-vrm-regulator";
 	mboxes = <&apps_rsc 0>;
 	qcom,resource-name = "ldoa4";
+	qcom,regulator-type = "pmic4-ldo";
 	pm8998_l4-parent-supply = <&pm8998_s2>;
 	pm8998_l4: regulator-l4 {
 		regulator-name = "pm8998_l4";
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 34c2963..58c9bf8 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -293,6 +293,19 @@
 
  - compatible : "qcom,msm-pcm-hostless"
 
+* audio-load-mod
+
+Required properties:
+
+ - compatible : "qcom,audio-load-mod"
+
+Optional properties:
+
+ - compatible : "qcom,audio-test-mod"
+		Add this compatible as child device to load-module device.
+		This child device is added after lpass is up to invoke
+		deferred probe devices.
+
 * msm-ocmem-audio
 
 Required properties:
@@ -355,6 +368,8 @@
 
  - qcom,mclk-clk-reg:                       Indicate the register address for mclk.
 
+ - qcom,lpass-mclk-id:                      Property to update LPASS MCLK Id.
+
 * audio_slimslave
 
 Required properties:
@@ -640,6 +655,13 @@
                 compatible = "qcom,msm-pcm-hostless";
         };
 
+	audio_load_mod {
+		compatible = "qcom,audio-load-mod";
+		audio_test_mod {
+			compatible = "qcom,audio-test-mod";
+		};
+	};
+
 	qcom,msm-ocmem-audio {
 		compatible = "qcom,msm-ocmem-audio";
 		qcom,msm_bus,name = "audio-ocmem";
diff --git a/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt b/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
index 28ab2dd..97b71a7 100644
--- a/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
+++ b/Documentation/devicetree/bindings/thermal/qpnp-adc-tm.txt
@@ -61,6 +61,8 @@
 - qcom,adc-tm-recalib-check: Add this property to check if recalibration required due to inaccuracy.
 - hkadc_ldo-supply : Add this property if VADC needs to perform a Software Vote for the HKADC.
 - hkadc_ok-supply : Add this property if the VADC needs to perform a Software vote for the HKADC VREG_OK.
+- #thermal-sensor-cells : To register ADC sensors with of_thermal. Should be 1.
+  See ./thermal.txt for a description.
 
 Client required property:
 - qcom,<consumer name>-adc_tm : The phandle to the corresponding adc_tm device.
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index 89c817e..8654a3e 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -1,315 +1,110 @@
 MSM SoC HSUSB controllers
 
-OTG:
+EHCI
 
-Required properties :
-- compatible : should be "qcom,hsusb-otg"
-- regs : Array of offset and length of the register sets in the memory map
-- reg-names : indicates various iomem resources passed by name. The possible
-	strings in this field are:
-	"core": USB controller register space. (Required)
-	"tcsr": TCSR register for routing USB Controller signals to
-	either picoPHY0 or picoPHY1. (Optional)
-	"phy_csr": PHY Wrapper CSR register space. Provides register level
-	interface through AHB2PHY for performing PHY related operations
-	like retention and HV interrupts management.
-- interrupts: IRQ line
-- interrupt-names: OTG interrupt name(s) referenced in interrupts above
-            HSUSB OTG expects "core_irq" which is IRQ line from CORE and
-            "async_irq" from HSPHY for asynchronous wakeup events in LPM.
-            optional ones are described in next section.
-- qcom,hsusb-otg-phy-type: PHY type can be one of
-	    1 - Chipidea PHY (obsolete)
-	    2 - Synopsis Pico PHY
-	    3 - Synopsis Femto PHY
-	    4 - QUSB ULPI PHY
-- qcom,hsusb-otg-mode: Operational mode. Can be one of
-            1 - Peripheral only mode
-	    2 - Host only mode
-	    3 - OTG mode
-	    Based on the mode, OTG driver registers platform devices for
-	    gadget and host.
-- qcom,hsusb-otg-otg-control: OTG control (VBUS and ID notifications)
-  can be one of
-            1 - PHY control
-	    2 - PMIC control
-	    3 - User control (via debugfs)
-- <supply-name>-supply: handle to the regulator device tree node
-         Required "supply-name" is "HSUSB_VDDCX" (when voting for VDDCX) or
-         "hsusb_vdd_dig" (when voting for VDDCX Corner voltage),
-         "HSUSB_1p8-supply" and "HSUSB_3p3-supply".
-- qcom,vdd-voltage-level: This property must be a list of three integer
-	values (none, min, max) where each value represents either a voltage
-	in microvolts or a value corresponding to voltage corner. If usb core
-	supports svs, min value will have absolute SVS or SVS corner otherwise
-	min value will have absolute nominal or nominal corner.
-- clocks: a list of phandles to the USB clocks. Usage is as per
-	Documentation/devicetree/bindings/clock/clock-bindings.txt
-- clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
-	property.
+Required properties:
+- compatible:	Should contain "qcom,ehci-host"
+- regs:			offset and length of the register set in the memory map
+- usb-phy:		phandle for the PHY device
 
-	Required clocks:
-	"core_clk": USB core clock that is required for data transfers.
-	"iface_clk": USB core clock that is required for register access.
+Example EHCI controller device node:
 
-	Optional clocks:
-	"sleep_clk": PHY sleep clock. Required for interrupts.
-	"phy_reset_clk": PHY blocks asynchronous reset clock. Required
-		for the USB block reset. It is a reset only clock.
-	"phy_por_clk": Reset only clock for asserting/de-asserting
-		PHY POR signal. Required for overriding PHY parameters.
-	"phy_csr_clk": Required for accessing PHY CSR registers through
-		AHB2PHY interface.
-	"phy_ref_clk": Required when PHY have referance clock,
-	"xo": XO clock. The source clock that is used as a reference clock
-		to the PHY.
-	"bimc_clk", "snoc_clk", "pcnoc_clk": bus voting clocks. Used to
-		keep buses at a nominal frequency during USB peripheral
-		mode for achieving max throughput.
-- qcom,max-nominal-sysclk-rate: Indicates maximum nominal frequency for which
-	system clock should be voted whenever streaming mode is enabled.
-- resets: reset specifier pair consists of phandle for the reset provider
-	and reset lines used by this controller.
-- reset-names: reset signal name strings sorted in the same order as the resets
-	property.
-
-Optional properties :
-- interrupt-names : Optional interrupt resource entries are:
-    "pmic_id_irq" : Interrupt from PMIC for external ID pin notification.
-    "phy_irq" : Interrupt from PHY. Used for ID detection.
-- qcom,hsusb-otg-disable-reset: If present then core is RESET only during
-	    init, otherwise core is RESET for every cable disconnect as well
-- qcom,hsusb-otg-pnoc-errata-fix: If present then workaround for PNOC
-	    performance issue is applied which requires changing the mem-type
-	    attribute via VMIDMT.
-- qcom,hsusb-otg-default-mode: The default USB mode after boot-up.
-  Applicable only when OTG is controlled by user. Can be one of
-            0 - None. Low power mode
-            1 - Peripheral
-	    2 - Host
-- qcom,hsusb-otg-phy-init-seq: PHY configuration sequence. val, reg pairs
-  terminate with -1
-- qcom,hsusb-otg-power-budget: VBUS power budget in mA
-  0 will be treated as 500mA
-- qcom,hsusb-otg-pclk-src-name: The source of pclk
-- Refer to "Documentation/devicetree/bindings/arm/msm/msm-bus.txt" for
-  below optional properties:
-    - qcom,msm-bus,name
-    - qcom,msm-bus,num_cases - There are three valid cases for this: NONE, MAX
-		and MIN bandwidth votes. Minimum two cases must be defined for
-		both NONE and MAX votes. If MIN vote is different from NONE VOTE
-		then specify third case for MIN VOTE. If explicit NOC clock rates
-		are not specified then MAX value should be large enough to get
-		desired BUS frequencies. In case explicit NOC clock rates are
-		specified, peripheral mode bus bandwidth vote should be defined
-		to vote for arbitrated bandwidth so that 60MHz frequency is met.
-
-    - qcom,msm-bus,num_paths
-    - qcom,msm-bus,vectors
-- qcom,hsusb-otg-lpm-on-dev-suspend: If present then USB enter to
-	    low power mode upon receiving bus suspend.
-- qcom,hsusb-otg-clk-always-on-workaround: If present then USB core clocks
-	    remain active upon receiving bus suspend and USB cable is connected.
-	    Used for allowing USB to respond for remote wakup.
-- qcom,hsusb-otg-delay-lpm: If present then USB core will wait one second
-	after disconnect before entering low power mode.
-- <supply-name>-supply: handle to the regulator device tree node.
-         Optional "supply-name" is "vbus_otg" to supply vbus in host mode.
-- qcom,dp-manual-pullup: If present, vbus is not routed to USB controller/phy
-	and controller driver therefore enables pull-up explicitly before
-	starting controller using usbcmd run/stop bit.
-- qcom,usb2-enable-hsphy2: If present then USB2 controller is connected to 2nd
-	HSPHY.
-- qcom,hsusb-log2-itc: value of 2^(log2_itc-1) will be used as the
-	interrupt threshold (ITC), when log2_itc is between 1 to 7.
-- qcom,hsusb-l1-supported: If present, the device supports l1 (Link power
-	management).
-- qcom,no-selective-suspend: If present selective suspend is disabled on hub ports.
-- qcom,hsusb-otg-mpm-dpsehv-int: If present, indicates mpm interrupt to be
-	configured for detection of dp line transition during VDD minimization.
-- qcom,hsusb-otg-mpm-dmsehv-int: If present, indicates mpm interrupt to be
-	configured for detection of dm line transition during VDD minimization.
-- pinctrl-names : This should be defined if a target uses gpio and pinctrl framework.
-  See "pinctrl" in Documentation/devicetree/bindings/pinctrl/msm-pinctrl.txt.
-  It should specify the names of the configs that pinctrl can install in driver
-	Following are the pinctrl config that can be installed
-	"hsusb_active" : Active configuration of pins, this should specify active
-	config of vddmin gpio (if used) defined in their pin groups.
-	"hsusb_sleep" : Disabled configuration of pins, this should specify sleep
-	config of vddmin gpio (if used) defined in their pin groups.
-- qcom,hsusb-otg-vddmin-gpio = If present, indicates a gpio that will be used
-	to supply voltage to the D+ line during VDD minimization and peripheral
-	bus suspend. If not exists, then VDD minimization will not be allowed
-	during peripheral bus suspend.
-- qcom,ahb-async-bridge-bypass: If present, indicates that enable AHB2AHB By Pass
-	mode with device controller for better throughput. With this mode, USB Core
-	runs using PNOC clock and synchronous to it. Hence it is must to have proper
-	"qcom,msm-bus,vectors" to have high bus frequency. User shouldn't try to
-	enable this feature without proper bus voting. When this feature is enabled,
-	it is required to do HW reset during cable disconnect for host mode functionality
-	working and hence need to disable qcom,hsusb-otg-disable-reset. With this feature
-	enabled, USB HW has to vote for maximum PNOC frequency as USB HW cannot tolerate
-	changes in PNOC frequency which results in USB functionality failure.
-- qcom,disable-retention-with-vdd-min: If present don't allow phy retention but allow
-	vdd min.
-- qcom,usbin-vadc: Corresponding vadc device's phandle to read usbin voltage using VADC.
-	This will be used to get value of usb power supply's VOLTAGE_NOW property.
-- qcom,usbid-gpio: This corresponds to gpio which is used for USB ID detection.
-- qcom,hub-reset-gpio: This corresponds to gpio which is used for HUB reset.
-- qcom,sw-sel-gpio: This corresponds to gpio which is used for switch select routing
-	of D+/D- between the USB HUB and type B USB jack for peripheral mode.
-- qcom,bus-clk-rate: If present, indicates nominal bus frequency to be voted for
-	bimc/snoc/pcnoc clock with usb cable connected. If AHB2AHB bypass is enabled,
-	pcnoc value should be defined to very large number so that PNOC runs at max
-	frequency. If 'qcom,default-mode-svs' is also set then two set of frequencies
-	must be specified for SVS and NOM modes which user can change using sysfs node.
-- qcom,phy-dvdd-always-on: If present PHY DVDD is supplied by a always-on
-	regulator unlike vddcx/vddmx. PHY can keep D+ pull-up and D+/D-
-	pull-down resistors during peripheral and host bus suspend without
-	any re-work.
-- qcom,emulation: Indicates that we are running on emulation platform.
-- qcom,boost-sysclk-with-streaming: If present, enable controller specific
-	streaming feature. Also this flag can bump up usb system clock to max in streaming
-	mode. This flag enables streaming mode for all compositions and is different from
-	streaming-func property defined in android device node. Please refer Doumentation/
-	devicetree/bindings/usb/android-dev.txt for details about "streaming-func" property.
-- qcom,axi-prefetch-enable: If present, AXI64 interface will be used for transferring data
-       to/from DDR by controller.
-- qcom,enable-sdp-typec-current-limit: Indicates whether type-c current for SDP CHARGER to
-	be limited.
-- qcom,enable-phy-id-pullup: If present, PHY can keep D+ pull-up resistor on USB ID line
-	during cable disconnect.
-- qcom,max-svs-sysclk-rate: Indicates system clock frequency voted by driver in
-	non-perf mode. In perf mode driver uses qcom,max-nominal-sysclk-rate.
-- qcom,pm-qos-latency: This represents max tolerable CPU latency in microsecs,
-	which is used as a vote by driver to get max performance in perf mode.
-- qcom,default-mode-svs: Indicates USB system clock should run at SVS frequency.
-	User can bump it up using 'perf_mode' sysfs attribute for gadget.
-- qcom,vbus-low-as-hostmode: If present, specifies USB_VBUS to switch to host mode
-	if USB_VBUS is low or device mode if USB_VBUS is high.
-- qcom,usbeth-reset-gpio: If present then an external usb-to-eth is connected to
-	the USB host controller and its RESET_N signal is connected to this
-	usbeth-reset-gpio GPIO. It should be driven LOW to RESET the usb-to-eth.
-- extcon: phandles to external connector devices. First phandle should point to
-	external connector, which provide "USB" cable events, the second should
-	point to external connector device, which provide "USB-HOST" cable events.
-	A single phandle may be specified if a single connector device provides
-	both "USB" and "USB-HOST" events.
-
-Example HSUSB OTG controller device node :
-	usb@f9690000 {
-		compatible = "qcom,hsusb-otg";
-		reg = <0xf9690000 0x400>;
-		reg-names = "core";
-		interrupts = <134>;
-		interrupt-names = "core_irq";
-
-		qcom,hsusb-otg-phy-type = <2>;
-		qcom,hsusb-otg-mode = <1>;
-		qcom,hsusb-otg-otg-control = <1>;
-		qcom,hsusb-otg-disable-reset;
-		qcom,hsusb-otg-pnoc-errata-fix;
-		qcom,hsusb-otg-default-mode = <2>;
-		qcom,hsusb-otg-phy-init-seq = <0x01 0x90 0xffffffff>;
-		qcom,hsusb-otg-power-budget = <500>;
-		qcom,hsusb-otg-pclk-src-name = "dfab_usb_clk";
-		qcom,hsusb-otg-lpm-on-dev-suspend;
-		qcom,hsusb-otg-clk-always-on-workaround;
-		hsusb_vdd_dig-supply = <&pm8226_s1_corner>;
-                HSUSB_1p8-supply = <&pm8226_l10>;
-                HSUSB_3p3-supply = <&pm8226_l20>;
-		qcom,vdd-voltage-level = <1 5 7>;
-		qcom,dp-manual-pullup;
-		qcom,hsusb-otg-mpm-dpsehv-int = <49>;
-		qcom,hsusb-otg-mpm-dmsehv-int = <58>;
-		qcom,max-nominal-sysclk-rate = <133330000>;
-		qcom,max-svs-sysclk-rate = <100000000>;
-		qcom,pm-qos-latency = <59>;
-
-		qcom,msm-bus,name = "usb2";
-		qcom,msm-bus,num_cases = <2>;
-		qcom,msm-bus,num_paths = <1>;
-		qcom,msm-bus,vectors =
-				<87 512 0 0>,
-				<87 512 60000000 960000000>;
-		pinctrl-names = "hsusb_active","hsusb_sleep";
-		pinctrl-0 = <&vddmin_act>;
-		pinctrl-0 = <&vddmin_sus>;
-		qcom,hsusb-otg-vddmin-gpio = <&pm8019_mpps 6 0>;
-		qcom,disable-retention-with-vdd-min;
-		qcom,usbin-vadc = <&pm8226_vadc>;
-		qcom,usbid-gpio = <&msm_gpio 110 0>;
-	};
-
-MSM HSUSB EHCI controller
-
-Required properties :
-- compatible : should be "qcom,ehci-host"
-- reg : offset and length of the register set in the memory map
-- interrupts: IRQ lines used by this controller
-- interrupt-names : Required interrupt resource entries are:
-            HSUSB EHCI expects "core_irq" and optionally "async_irq".
-- <supply-name>-supply: handle to the regulator device tree node
-  Required "supply-name" is either "hsusb_vdd_dig" or "HSUSB_VDDCX"
-  "HSUSB_1p8-supply" "HSUSB_3p3-supply".
-- qcom,usb2-power-budget: maximum vbus power (in mA) that can be provided.
-- qcom,vdd-voltage-level: This property must be a list of five integer
-  values (no, 0.5vsuspend, 0.75suspend, min, max) where each value respresents
-  either a voltage in microvolts or a value corresponding to voltage corner.
-  First value represents value to vote when USB is not at all active, second
-  value represents value to vote when target is not connected to dock during low
-  power mode, third value represents vlaue to vote when target is connected to dock
-  and no peripheral connected over dock during low power mode, fourth value represents
-  minimum value to vote when USB is operational, fifth item represents maximum value
-  to vote for USB is operational.
-
-Optional properties :
-- qcom,usb2-enable-hsphy2: If present, select second PHY for USB operation.
-- pinctrl-names : This should be defined if a target uses pinctrl framework.
-  See "pinctrl" in Documentation/devicetree/bindings/pinctrl/msm-pinctrl.txt.
-  It should specify the names of the configs that pinctrl can install in driver
-  Following are the pinctrl configs that can be installed
-	"ehci_active" : Active configuration of pins, this should specify active
-	config defined in pin groups of used gpio's from resume and
-	ext-hub-reset.
-	"ehci_sleep" : Disabled configuration of pins, this should specify sleep
-	config defined in pin groups of used gpio's from resume and
-	ext-hub-reset.
-- qcom,resume-gpio: if present then peripheral connected to usb controller
-  cannot wakeup from XO shutdown using in-band usb bus resume. Use resume
-  gpio to wakeup peripheral.
-- qcom,ext-hub-reset-gpio: If present then an external HUB is connected to
-  the USB host controller and its RESET_N signal is connected to this
-  ext-hub-reset-gpio GPIO. It should be driven LOW to RESET the HUB.
-- qcom,usb2-enable-uicc: If present, usb2 port will be used for uicc card connection.
-- usb-phy: phandle for the PHY device, if described as a separate device tree node
-- qcom,pm-qos-latency: This property represents the maximum tolerable CPU latency in
-  microsecs, which is used as a vote to keep the CPUs in a high enough power state when
-  USB bus is in use (not suspended).
-- Refer to "Documentation/devicetree/bindings/arm/msm/msm-bus.txt" for
-  below optional properties:
-    - qcom,msm-bus,name
-    - qcom,msm-bus,num_cases - Two cases (NONE and MAX) for voting are supported.
-    - qcom,msm-bus,num_paths
-    - qcom,msm-bus,vectors
-
-Example MSM HSUSB EHCI controller device node :
-	ehci: qcom,ehci-host@f9a55000 {
+	ehci: ehci@f9a55000 {
 		compatible = "qcom,ehci-host";
 		reg = <0xf9a55000 0x400>;
-		interrupts = <0 134 0>, <0 140 0>;
-		interrupt-names = "core_irq", "async_irq";
-		/* If pinctrl is used and ext-hub-reset and resume gpio's are present*/
-		pinctrl-names = "ehci_active","ehci_sleep";
-		pinctrl-0 = <&ehci_reset_act &resume_act>;
-		pinctrl-1 = <&ehci_reset_sus &resume_sus>;
-		qcom,resume-gpio = <&msm_gpio 80 0>;
-		qcom,ext-hub-reset-gpio = <&msm_gpio 0 0>;
-		hsusb_vdd_dig-supply = <&pm8841_s2_corner>;
-		HSUSB_1p8-supply = <&pm8941_l6>;
-		HSUSB_3p3-supply = <&pm8941_l24>;
-		qcom,usb2-enable-hsphy2;
-		qcom,usb2-power-budget = <500>;
-		qcom,vdd-voltage-level = <1 2 3 5 7>;
-		qcom,usb2-enable-uicc;
+		usb-phy = <&usb_otg>;
+	};
+
+USB PHY with optional OTG:
+
+Required properties:
+- compatible:   Should contain:
+  "qcom,usb-otg-ci" for chipsets with ChipIdea 45nm PHY
+  "qcom,usb-otg-snps" for chipsets with Synopsys 28nm PHY
+
+- regs:         Offset and length of the register set in the memory map
+- interrupts:   interrupt-specifier for the OTG interrupt.
+
+- clocks:       A list of phandle + clock-specifier pairs for the
+                clocks listed in clock-names
+- clock-names:  Should contain the following:
+  "phy"         USB PHY reference clock
+  "core"        Protocol engine clock
+  "iface"       Interface bus clock
+  "alt_core"    Protocol engine clock for targets with asynchronous
+                reset methodology. (optional)
+
+- vdccx-supply: phandle to the regulator for the vdd supply for
+                digital circuit operation.
+- v1p8-supply:  phandle to the regulator for the 1.8V supply
+- v3p3-supply:  phandle to the regulator for the 3.3V supply
+
+- resets:       A list of phandle + reset-specifier pairs for the
+                resets listed in reset-names
+- reset-names:  Should contain the following:
+  "phy"         USB PHY controller reset
+  "link"        USB LINK controller reset
+
+- qcom,otg-control: OTG control (VBUS and ID notifications) can be one of
+                1 - PHY control
+                2 - PMIC control
+
+Optional properties:
+- dr_mode:      One of "host", "peripheral" or "otg". Defaults to "otg"
+
+- switch-gpio:  A phandle + gpio-specifier pair. Some boards are using Dual
+                SPDT USB Switch, witch is cotrolled by GPIO to de/multiplex
+                D+/D- USB lines between connectors.
+
+- qcom,phy-init-sequence: PHY configuration sequence values. This is related to Device
+                Mode Eye Diagram test. Start address at which these values will be
+                written is ULPI_EXT_VENDOR_SPECIFIC. Value of -1 is reserved as
+                "do not overwrite default value at this address".
+                For example: qcom,phy-init-sequence = < -1 0x63 >;
+                Will update only value at address ULPI_EXT_VENDOR_SPECIFIC + 1.
+
+- qcom,phy-num: Select number of pyco-phy to use, can be one of
+                0 - PHY one, default
+                1 - Second PHY
+                Some platforms may have configuration to allow USB
+                controller work with any of the two HSPHYs present.
+
+- qcom,vdd-levels: This property must be a list of three integer values
+                (no, min, max) where each value represents either a voltage
+                in microvolts or a value corresponding to voltage corner.
+
+- qcom,manual-pullup: If present, vbus is not routed to USB controller/phy
+                and controller driver therefore enables pull-up explicitly
+                before starting controller using usbcmd run/stop bit.
+
+- extcon:       phandles to external connector devices. First phandle
+                should point to external connector, which provide "USB"
+                cable events, the second should point to external connector
+                device, which provide "USB-HOST" cable events. If one of
+                the external connector devices is not required empty <0>
+                phandle should be specified.
+
+Example HSUSB OTG controller device node:
+
+    usb@f9a55000 {
+        compatible = "qcom,usb-otg-snps";
+        reg = <0xf9a55000 0x400>;
+        interrupts = <0 134 0>;
+        dr_mode = "peripheral";
+
+        clocks = <&gcc GCC_XO_CLK>, <&gcc GCC_USB_HS_SYSTEM_CLK>,
+                <&gcc GCC_USB_HS_AHB_CLK>;
+
+        clock-names = "phy", "core", "iface";
+
+        vddcx-supply = <&pm8841_s2_corner>;
+        v1p8-supply = <&pm8941_l6>;
+        v3p3-supply = <&pm8941_l24>;
+
+        resets = <&gcc GCC_USB2A_PHY_BCR>, <&gcc GCC_USB_HS_BCR>;
+        reset-names = "phy", "link";
+
+        qcom,otg-control = <1>;
+        qcom,phy-init-sequence = < -1 0x63 >;
+        qcom,vdd-levels = <1 5 7>;
 	};
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index f8c8a69..f7f4ced 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -184,6 +184,10 @@
    state when attached in host mode and "suspend" state when detached.
  - qcom,tune2-efuse-correction: The value to be adjusted from fused value for
    improved rise/fall times.
+ - nvmem-cells: specifies the handle to represent the SoC revision.
+   usually it is defined by qfprom device node.
+ - nvmem-cell-names: specifies the given nvmem cell name as defined in
+   qfprom node.
 
 Example:
 	qusb_phy: qusb@f9b39000 {
diff --git a/Documentation/devicetree/bindings/usb/usb-device.txt b/Documentation/devicetree/bindings/usb/usb-device.txt
index 1c35e7b..03ab8f5 100644
--- a/Documentation/devicetree/bindings/usb/usb-device.txt
+++ b/Documentation/devicetree/bindings/usb/usb-device.txt
@@ -11,7 +11,7 @@
   be used, but a device adhering to this binding may leave out all except
   for usbVID,PID.
 - reg: the port number which this device is connecting to, the range
-  is 1-31.
+  is 1-255.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index a37e441..e996ba5 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -272,6 +272,7 @@
 SUNW	Sun Microsystems, Inc
 swir	Sierra Wireless
 syna	Synaptics Inc.
+synaptics	Synaptics Inc.
 synology	Synology, Inc.
 tbs	TBS Technologies
 tcg	Trusted Computing Group
diff --git a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
new file mode 100644
index 0000000..fbe1bca
--- /dev/null
+++ b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
@@ -0,0 +1,110 @@
+* Qualcomm Technologies Inc. WCNSS Platform Driver
+
+WCNSS driver is the platform driver. It is used for performing the cold
+boot-up of the wireless device. It is responsible for adjusting
+the necessary I/O rails and enabling appropriate gpios for wireless
+connectivity subsystem.
+
+Required properties:
+- compatible: "wcnss_wlan"
+- reg: physical address and length of the register set for the device.
+- reg-names: "wcnss_mmio", "wcnss_fiq", "pronto_phy_base", "riva_phy_base",
+	"riva_ccu_base", "pronto_a2xb_base", "pronto_ccpu_base",
+	"pronto_saw2_base", "wlan_tx_phy_aborts","wlan_brdg_err_source",
+	"wlan_tx_status", "alarms_txctl", "alarms_tactl",
+	"pronto_mcu_base", "pronto_qfuse".
+- interupts: Pronto to Apps interrupts for tx done and rx pending.
+- qcom,pronto-vddmx-supply: regulator to supply pronto pll.
+- qcom,pronto-vddcx-supply: voltage corner regulator to supply WLAN/BT/FM
+digital module.
+- qcom,pronto-vddpx-supply: regulator to supply WLAN DAC.
+- qcom,iris-vddxo-supply  : regulator to supply RF XO.
+- qcom,iris-vddrfa-supply : regulator to supply RFA digital.
+- qcom,iris-vddpa-supply  : regulator to supply RF PA.
+- qcom,iris-vdddig-supply : regulator to supply RF digital(BT/FM).
+- gpios: gpio numbers to configure 5-wire interface of WLAN connectivity
+- qcom,has-48mhz-xo: boolean flag to determine the usage of 24MHz XO from RF
+- qcom,has-pronto-hw: boolean flag to determine the revId of the WLAN subsystem
+- qcom,wcnss-adc_tm: ADC handle for vbatt notification APIs.
+- qcom,wcnss-vadc: VADC handle for battery voltage notification APIs.
+- pinctrl-<n> : Pinctrl states as described in bindings/pinctrl/pinctrl-bindings.txt
+- pinctrl-names : Names corresponding to the numbered pinctrl states
+- clocks: from common clock binding: handle to xo, rf_clk and wcnss snoc clocks.
+- clock-names: Names of all the clocks that are accessed by the subsystem
+- qcom,vdd-voltage-level: This property represents (nominal, min, max) voltage
+for iris and pronto regulators in milli-volts.
+- qcom,vdd-current: This property represents current value for
+iris and pronto regulators in micro-amps.
+
+Optional properties:
+- qcom,has-autodetect-xo: boolean flag to determine whether Iris XO auto detect
+should be performed during boot up.
+- qcom,snoc-wcnss-clock-freq: indicates the wcnss snoc clock frequency in Hz.
+If wcnss_snoc clock is specified in the list of clocks, this property needs
+to be set to make it functional.
+- qcom,wlan-rx-buff-count: WLAN RX buffer count is a configurable value,
+using a smaller count for this buffer will reduce the memory usage.
+- qcom,is-pronto-v3: boolean flag to determine the pronto hardware version
+in use. subsequently correct workqueue will be used by DXE engine to push frames
+in TX data path.
+- qcom,is-dual-band-disable: boolean flag to determine the WLAN dual band
+capability.
+- qcom,is-pronto-vadc: boolean flag to determine Battery voltage feature
+support for pronto hardware.
+- qcom,wcnss-pm : <Core rail LDO#, PA rail LDO#, XO settling time,
+RPM power collapse enabled, standalone power collapse enabled>
+Power manager related parameter for LDO configuration.
+	11     -  WCN CORE rail LDO number
+	21     -  WCN PA rail LDO number
+	1200   -  WCN XO settling time (usec)
+	1      -  WCN RPM power collapse enabled
+	1      -  WCN standalone power collapse enabled
+	6      -  GPIO strength value
+- qcom,has-vsys-adc-channel: boolean flag to determine which ADC HW channel need
+to use for VBATT feature.
+- qcom,has-a2xb-split-reg: boolean flag to determine A2xb split timeout limit
+register is available or not.
+
+Example:
+
+qcom,wcnss-wlan@fb000000 {
+	compatible = "qcom,wcnss_wlan";
+	reg = <0xfb000000 0x280000>,
+	    <0xf9011008 0x04>;
+reg-names = "wcnss_mmio", "wcnss_fiq";
+	interrupts = <0 145 0 0 146 0>;
+	interrupt-names = "wcnss_wlantx_irq", "wcnss_wlanrx_irq";
+
+	qcom,pronto-vddmx-supply = <&pm8841_s1>;
+	qcom,pronto-vddcx-supply = <&pm8841_s2_corner>;
+	qcom,pronto-vddpx-supply = <&pm8941_s3>;
+	qcom,iris-vddxo-supply = <&pm8941_l6>;
+	qcom,iris-vddrfa-supply = <&pm8941_l11>;
+	qcom,iris-vddpa-supply = <&pm8941_l19>;
+	qcom,iris-vdddig-supply = <&pm8941_l3>;
+
+	gpios = <&msmgpio 36 0>, <&msmgpio 37 0>, <&msmgpio 38 0>,
+	      <&msmgpio 39 0>, <&msmgpio 40 0>;
+	qcom,has-48mhz-xo;
+	qcom,is-pronto-vt;
+	qcom,wlan-rx-buff-count = <512>;
+	qcom,has-pronto-hw;
+	qcom,wcnss-adc_tm = <&pm8226_adc_tm>;
+
+	pinctrl-names = "wcnss_default", "wcnss_sleep";
+	pinctrl-0 = <&wcnss_default>;
+	pinctrl-1 = <&wcnss_sleep>;
+	pinctrl-2 = <&wcnss_gpio_default>;
+
+	clocks = <&clock_rpm clk_xo_wlan_clk>,
+	       <&clock_rpm clk_rf_clk2>,
+	       <&clock_debug clk_gcc_debug_mux>,
+	       <&clock_gcc clk_wcnss_m_clk>,
+	       <&clock_gcc clk_snoc_wcnss_a_clk>;
+
+	clock-names = "xo", "rf_clk", "measure", "wcnss_debug",
+		"snoc_wcnss";
+
+	qcom,snoc-wcnss-clock-freq = <200000000>;
+	qcom,wcnss-pm = <11 21 1200 1 1 6>;
+};
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 2b576cc..9f5bfd6 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2707,6 +2707,11 @@
 	nosmt		[KNL,S390] Disable symmetric multithreading (SMT).
 			Equivalent to smt=1.
 
+	nospectre_v2	[X86] Disable all mitigations for the Spectre variant 2
+			(indirect branch prediction) vulnerability. System may
+			allow data leaks with this option, which is equivalent
+			to spectre_v2=off.
+
 	noxsave		[BUGS=X86] Disables x86 extended register state save
 			and restore using xsave. The kernel will fallback to
 			enabling legacy floating-point and sse state.
@@ -2811,6 +2816,8 @@
 	nopat		[X86] Disable PAT (page attribute table extension of
 			pagetables) support.
 
+	nopcid		[X86-64] Disable the PCID cpu feature.
+
 	norandmaps	Don't use address space randomization.  Equivalent to
 			echo 0 > /proc/sys/kernel/randomize_va_space
 
@@ -3339,6 +3346,21 @@
 	pt.		[PARIDE]
 			See Documentation/blockdev/paride.txt.
 
+	pti=		[X86_64] Control Page Table Isolation of user and
+			kernel address spaces.  Disabling this feature
+			removes hardening, but improves performance of
+			system calls and interrupts.
+
+			on   - unconditionally enable
+			off  - unconditionally disable
+			auto - kernel detects whether your CPU model is
+			       vulnerable to issues that PTI mitigates
+
+			Not specifying this option is equivalent to pti=auto.
+
+	nopti		[X86_64]
+			Equivalent to pti=off
+
 	pty.legacy_count=
 			[KNL] Number of legacy pty's. Overwrites compiled-in
 			default number.
@@ -3943,6 +3965,29 @@
 	sonypi.*=	[HW] Sony Programmable I/O Control Device driver
 			See Documentation/laptops/sonypi.txt
 
+	spectre_v2=	[X86] Control mitigation of Spectre variant 2
+			(indirect branch speculation) vulnerability.
+
+			on   - unconditionally enable
+			off  - unconditionally disable
+			auto - kernel detects whether your CPU model is
+			       vulnerable
+
+			Selecting 'on' will, and 'auto' may, choose a
+			mitigation method at run time according to the
+			CPU, the available microcode, the setting of the
+			CONFIG_RETPOLINE configuration option, and the
+			compiler with which the kernel was built.
+
+			Specific mitigations can also be selected manually:
+
+			retpoline	  - replace indirect branches
+			retpoline,generic - google's original retpoline
+			retpoline,amd     - AMD-specific minimal thunk
+
+			Not specifying this option is equivalent to
+			spectre_v2=auto.
+
 	spia_io_base=	[HW,MTD]
 	spia_fio_base=
 	spia_pedr=
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 9d2908d..7058d43 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1288,6 +1288,10 @@
 	224.0.0.X range.
 	Default TRUE
 
+nf_ipv4_defrag_skip - BOOLEAN
+	Skip defragmentation per interface if set.
+	Default : 0 (always defrag)
+
 Alexey Kuznetsov.
 kuznet@ms2.inr.ac.ru
 
diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.txt
new file mode 100644
index 0000000..d11eff6
--- /dev/null
+++ b/Documentation/x86/pti.txt
@@ -0,0 +1,186 @@
+Overview
+========
+
+Page Table Isolation (pti, previously known as KAISER[1]) is a
+countermeasure against attacks on the shared user/kernel address
+space such as the "Meltdown" approach[2].
+
+To mitigate this class of attacks, we create an independent set of
+page tables for use only when running userspace applications.  When
+the kernel is entered via syscalls, interrupts or exceptions, the
+page tables are switched to the full "kernel" copy.  When the system
+switches back to user mode, the user copy is used again.
+
+The userspace page tables contain only a minimal amount of kernel
+data: only what is needed to enter/exit the kernel such as the
+entry/exit functions themselves and the interrupt descriptor table
+(IDT).  There are a few strictly unnecessary things that get mapped
+such as the first C function when entering an interrupt (see
+comments in pti.c).
+
+This approach helps to ensure that side-channel attacks leveraging
+the paging structures do not function when PTI is enabled.  It can be
+enabled by setting CONFIG_PAGE_TABLE_ISOLATION=y at compile time.
+Once enabled at compile-time, it can be disabled at boot with the
+'nopti' or 'pti=' kernel parameters (see kernel-parameters.txt).
+
+Page Table Management
+=====================
+
+When PTI is enabled, the kernel manages two sets of page tables.
+The first set is very similar to the single set which is present in
+kernels without PTI.  This includes a complete mapping of userspace
+that the kernel can use for things like copy_to_user().
+
+Although _complete_, the user portion of the kernel page tables is
+crippled by setting the NX bit in the top level.  This ensures
+that any missed kernel->user CR3 switch will immediately crash
+userspace upon executing its first instruction.
+
+The userspace page tables map only the kernel data needed to enter
+and exit the kernel.  This data is entirely contained in the 'struct
+cpu_entry_area' structure which is placed in the fixmap which gives
+each CPU's copy of the area a compile-time-fixed virtual address.
+
+For new userspace mappings, the kernel makes the entries in its
+page tables like normal.  The only difference is when the kernel
+makes entries in the top (PGD) level.  In addition to setting the
+entry in the main kernel PGD, a copy of the entry is made in the
+userspace page tables' PGD.
+
+This sharing at the PGD level also inherently shares all the lower
+layers of the page tables.  This leaves a single, shared set of
+userspace page tables to manage.  One PTE to lock, one set of
+accessed bits, dirty bits, etc...
+
+Overhead
+========
+
+Protection against side-channel attacks is important.  But,
+this protection comes at a cost:
+
+1. Increased Memory Use
+  a. Each process now needs an order-1 PGD instead of order-0.
+     (Consumes an additional 4k per process).
+  b. The 'cpu_entry_area' structure must be 2MB in size and 2MB
+     aligned so that it can be mapped by setting a single PMD
+     entry.  This consumes nearly 2MB of RAM once the kernel
+     is decompressed, but no space in the kernel image itself.
+
+2. Runtime Cost
+  a. CR3 manipulation to switch between the page table copies
+     must be done at interrupt, syscall, and exception entry
+     and exit (it can be skipped when the kernel is interrupted,
+     though.)  Moves to CR3 are on the order of a hundred
+     cycles, and are required at every entry and exit.
+  b. A "trampoline" must be used for SYSCALL entry.  This
+     trampoline depends on a smaller set of resources than the
+     non-PTI SYSCALL entry code, so requires mapping fewer
+     things into the userspace page tables.  The downside is
+     that stacks must be switched at entry time.
+  d. Global pages are disabled for all kernel structures not
+     mapped into both kernel and userspace page tables.  This
+     feature of the MMU allows different processes to share TLB
+     entries mapping the kernel.  Losing the feature means more
+     TLB misses after a context switch.  The actual loss of
+     performance is very small, however, never exceeding 1%.
+  d. Process Context IDentifiers (PCID) is a CPU feature that
+     allows us to skip flushing the entire TLB when switching page
+     tables by setting a special bit in CR3 when the page tables
+     are changed.  This makes switching the page tables (at context
+     switch, or kernel entry/exit) cheaper.  But, on systems with
+     PCID support, the context switch code must flush both the user
+     and kernel entries out of the TLB.  The user PCID TLB flush is
+     deferred until the exit to userspace, minimizing the cost.
+     See intel.com/sdm for the gory PCID/INVPCID details.
+  e. The userspace page tables must be populated for each new
+     process.  Even without PTI, the shared kernel mappings
+     are created by copying top-level (PGD) entries into each
+     new process.  But, with PTI, there are now *two* kernel
+     mappings: one in the kernel page tables that maps everything
+     and one for the entry/exit structures.  At fork(), we need to
+     copy both.
+  f. In addition to the fork()-time copying, there must also
+     be an update to the userspace PGD any time a set_pgd() is done
+     on a PGD used to map userspace.  This ensures that the kernel
+     and userspace copies always map the same userspace
+     memory.
+  g. On systems without PCID support, each CR3 write flushes
+     the entire TLB.  That means that each syscall, interrupt
+     or exception flushes the TLB.
+  h. INVPCID is a TLB-flushing instruction which allows flushing
+     of TLB entries for non-current PCIDs.  Some systems support
+     PCIDs, but do not support INVPCID.  On these systems, addresses
+     can only be flushed from the TLB for the current PCID.  When
+     flushing a kernel address, we need to flush all PCIDs, so a
+     single kernel address flush will require a TLB-flushing CR3
+     write upon the next use of every PCID.
+
+Possible Future Work
+====================
+1. We can be more careful about not actually writing to CR3
+   unless its value is actually changed.
+2. Allow PTI to be enabled/disabled at runtime in addition to the
+   boot-time switching.
+
+Testing
+========
+
+To test stability of PTI, the following test procedure is recommended,
+ideally doing all of these in parallel:
+
+1. Set CONFIG_DEBUG_ENTRY=y
+2. Run several copies of all of the tools/testing/selftests/x86/ tests
+   (excluding MPX and protection_keys) in a loop on multiple CPUs for
+   several minutes.  These tests frequently uncover corner cases in the
+   kernel entry code.  In general, old kernels might cause these tests
+   themselves to crash, but they should never crash the kernel.
+3. Run the 'perf' tool in a mode (top or record) that generates many
+   frequent performance monitoring non-maskable interrupts (see "NMI"
+   in /proc/interrupts).  This exercises the NMI entry/exit code which
+   is known to trigger bugs in code paths that did not expect to be
+   interrupted, including nested NMIs.  Using "-c" boosts the rate of
+   NMIs, and using two -c with separate counters encourages nested NMIs
+   and less deterministic behavior.
+
+	while true; do perf record -c 10000 -e instructions,cycles -a sleep 10; done
+
+4. Launch a KVM virtual machine.
+5. Run 32-bit binaries on systems supporting the SYSCALL instruction.
+   This has been a lightly-tested code path and needs extra scrutiny.
+
+Debugging
+=========
+
+Bugs in PTI cause a few different signatures of crashes
+that are worth noting here.
+
+ * Failures of the selftests/x86 code.  Usually a bug in one of the
+   more obscure corners of entry_64.S
+ * Crashes in early boot, especially around CPU bringup.  Bugs
+   in the trampoline code or mappings cause these.
+ * Crashes at the first interrupt.  Caused by bugs in entry_64.S,
+   like screwing up a page table switch.  Also caused by
+   incorrectly mapping the IRQ handler entry code.
+ * Crashes at the first NMI.  The NMI code is separate from main
+   interrupt handlers and can have bugs that do not affect
+   normal interrupts.  Also caused by incorrectly mapping NMI
+   code.  NMIs that interrupt the entry code must be very
+   careful and can be the cause of crashes that show up when
+   running perf.
+ * Kernel crashes at the first exit to userspace.  entry_64.S
+   bugs, or failing to map some of the exit code.
+ * Crashes at first interrupt that interrupts userspace. The paths
+   in entry_64.S that return to userspace are sometimes separate
+   from the ones that return to the kernel.
+ * Double faults: overflowing the kernel stack because of page
+   faults upon page faults.  Caused by touching non-pti-mapped
+   data in the entry code, or forgetting to switch to kernel
+   CR3 before calling into C functions which are not pti-mapped.
+ * Userspace segfaults early in boot, sometimes manifesting
+   as mount(8) failing to mount the rootfs.  These have
+   tended to be TLB invalidation issues.  Usually invalidating
+   the wrong PCID, or otherwise missing an invalidation.
+
+1. https://gruss.cc/files/kaiser.pdf
+2. https://meltdownattack.com/meltdown.pdf
diff --git a/Makefile b/Makefile
index 061197a..cf9657e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 65
+SUBLEVEL = 77
 EXTRAVERSION =
 NAME = Roaring Lionus
 
@@ -374,9 +374,6 @@
 CFLAGS_KERNEL	=
 AFLAGS_KERNEL	=
 LDFLAGS_vmlinux =
-CFLAGS_GCOV	:= -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
-CFLAGS_KCOV	:= $(call cc-option,-fsanitize-coverage=trace-pc,)
-
 
 # Use USERINCLUDE when you must reference the UAPI directories only.
 USERINCLUDE    := \
@@ -397,21 +394,19 @@
 
 LINUXINCLUDE	+= $(filter-out $(LINUXINCLUDE),$(USERINCLUDE))
 
-KBUILD_CPPFLAGS := -D__KERNEL__
-
+KBUILD_AFLAGS   := -D__ASSEMBLY__
 KBUILD_CFLAGS   := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
 		   -fno-strict-aliasing -fno-common \
 		   -Werror-implicit-function-declaration \
 		   -Wno-format-security \
-		   -std=gnu89 $(call cc-option,-fno-PIE)
-
-
+		   -std=gnu89
+KBUILD_CPPFLAGS := -D__KERNEL__
 KBUILD_AFLAGS_KERNEL :=
 KBUILD_CFLAGS_KERNEL :=
-KBUILD_AFLAGS   := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
 KBUILD_AFLAGS_MODULE  := -DMODULE
 KBUILD_CFLAGS_MODULE  := -DMODULE
 KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+GCC_PLUGINS_CFLAGS :=
 
 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
 KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
@@ -424,7 +419,7 @@
 export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
 
 export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
-export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV CFLAGS_KASAN CFLAGS_UBSAN
+export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN
 export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
 export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
 export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
@@ -624,6 +619,12 @@
 # Defaults to vmlinux, but the arch makefile usually adds further targets
 all: vmlinux
 
+KBUILD_CFLAGS	+= $(call cc-option,-fno-PIE)
+KBUILD_AFLAGS	+= $(call cc-option,-fno-PIE)
+CFLAGS_GCOV	:= -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
+CFLAGS_KCOV	:= $(call cc-option,-fsanitize-coverage=trace-pc,)
+export CFLAGS_GCOV CFLAGS_KCOV
+
 # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
 # values of the respective KBUILD_* variables
 ARCH_CPPFLAGS :=
@@ -791,6 +792,9 @@
 # disable invalid "can't wrap" optimizations for signed / pointers
 KBUILD_CFLAGS	+= $(call cc-option,-fno-strict-overflow)
 
+# Make sure -fstack-check isn't enabled (like gentoo apparently did)
+KBUILD_CFLAGS  += $(call cc-option,-fno-stack-check,)
+
 # conserve stack if available
 KBUILD_CFLAGS   += $(call cc-option,-fconserve-stack)
 
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 41faf17..0684fd2 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -673,6 +673,7 @@
 		return 0;
 
 	__asm__ __volatile__(
+	"	mov	lp_count, %5		\n"
 	"	lp	3f			\n"
 	"1:	ldb.ab  %3, [%2, 1]		\n"
 	"	breq.d	%3, 0, 3f               \n"
@@ -689,8 +690,8 @@
 	"	.word   1b, 4b			\n"
 	"	.previous			\n"
 	: "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
-	: "g"(-EFAULT), "l"(count)
-	: "memory");
+	: "g"(-EFAULT), "r"(count)
+	: "lp_count", "lp_start", "lp_end", "memory");
 
 	return res;
 }
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index d8d8b82..41245ce 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -12,6 +12,7 @@
 	select ARCH_USE_BUILTIN_BSWAP
 	select ARCH_USE_CMPXCHG_LOCKREF
 	select ARCH_WANT_IPC_PARSE_VERSION
+	select ARM_PSCI_FW if PM
 	select BUILDTIME_EXTABLE_SORT if MMU
 	select CLONE_BACKWARDS
 	select CPU_PM if (SUSPEND || CPU_IDLE)
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 975c36e..8e6b393 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -668,6 +668,7 @@
 	ti,non-removable;
 	bus-width = <4>;
 	cap-power-off-card;
+	keep-power-in-suspend;
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc2_pins>;
 
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 064d84f..ce54a70 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -282,6 +282,7 @@
 				device_type = "pci";
 				ranges = <0x81000000 0 0          0x03000 0 0x00010000
 					  0x82000000 0 0x20013000 0x13000 0 0xffed000>;
+				bus-range = <0x00 0xff>;
 				#interrupt-cells = <1>;
 				num-lanes = <1>;
 				linux,pci-domain = <0>;
@@ -318,6 +319,7 @@
 				device_type = "pci";
 				ranges = <0x81000000 0 0          0x03000 0 0x00010000
 					  0x82000000 0 0x30013000 0x13000 0 0xffed000>;
+				bus-range = <0x00 0xff>;
 				#interrupt-cells = <1>;
 				num-lanes = <1>;
 				linux,pci-domain = <1>;
diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
index 08cce17..b4575bb 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
@@ -192,7 +192,7 @@
 	interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc1_pins &mmc1_cd>;
-	cd-gpios = <&gpio4 31 IRQ_TYPE_LEVEL_LOW>;		/* gpio127 */
+	cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>;		/* gpio127 */
 	vmmc-supply = <&vmmc1>;
 	bus-width = <4>;
 	cap-power-off-card;
@@ -249,9 +249,9 @@
 			OMAP3_CORE1_IOPAD(0x2110, PIN_INPUT | MUX_MODE0)   /* cam_xclka.cam_xclka */
 			OMAP3_CORE1_IOPAD(0x2112, PIN_INPUT | MUX_MODE0)   /* cam_pclk.cam_pclk */
 
-			OMAP3_CORE1_IOPAD(0x2114, PIN_INPUT | MUX_MODE0)   /* cam_d0.cam_d0 */
-			OMAP3_CORE1_IOPAD(0x2116, PIN_INPUT | MUX_MODE0)   /* cam_d1.cam_d1 */
-			OMAP3_CORE1_IOPAD(0x2118, PIN_INPUT | MUX_MODE0)   /* cam_d2.cam_d2 */
+			OMAP3_CORE1_IOPAD(0x2116, PIN_INPUT | MUX_MODE0)   /* cam_d0.cam_d0 */
+			OMAP3_CORE1_IOPAD(0x2118, PIN_INPUT | MUX_MODE0)   /* cam_d1.cam_d1 */
+			OMAP3_CORE1_IOPAD(0x211a, PIN_INPUT | MUX_MODE0)   /* cam_d2.cam_d2 */
 			OMAP3_CORE1_IOPAD(0x211c, PIN_INPUT | MUX_MODE0)   /* cam_d3.cam_d3 */
 			OMAP3_CORE1_IOPAD(0x211e, PIN_INPUT | MUX_MODE0)   /* cam_d4.cam_d4 */
 			OMAP3_CORE1_IOPAD(0x2120, PIN_INPUT | MUX_MODE0)   /* cam_d5.cam_d5 */
diff --git a/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi b/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi
index 2106759..8f7edab 100644
--- a/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -97,6 +97,7 @@
 			qcom,adc-vdd-reference = <1875>;
 			pinctrl-names = "default";
 			pinctrl-0 = <&ambient_therm_default>;
+			#thermal-sensor-cells = <1>;
 
 			chan@6 {
 				label = "die_temp";
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
index 94ccf9c..261829f 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
@@ -34,6 +34,26 @@
 	status = "ok";
 };
 
+&sdhc_1 {
+	vdd-supply = <&vreg_sd_mmc>;
+
+	vdd-io-supply = <&pmxpoorwills_l7>;
+	qcom,vdd-io-voltage-level = <1800000 2950000>;
+	qcom,vdd-io-current-level = <200 10000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_cd_on>;
+	pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_cd_off>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
+							200000000>;
+	qcom,devfreq,freq-table = <50000000 200000000>;
+
+	cd-gpios = <&tlmm 93 0x1>;
+
+	status = "ok";
+};
+
 &pmxpoorwills_vadc {
 	chan@83 {
 		label = "vph_pwr";
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-coresight.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-coresight.dtsi
new file mode 100644
index 0000000..f8baa04
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-coresight.dtsi
@@ -0,0 +1,1070 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	csr: csr@6001000 {
+		compatible = "qcom,coresight-csr";
+		reg = <0x6001000 0x1000>;
+		reg-names = "csr-base";
+
+		coresight-name = "coresight-csr";
+
+		qcom,blk-size = <1>;
+	};
+
+	tmc_etr: tmc@6048000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b961>;
+
+		reg = <0x6048000 0x1000>,
+		      <0x6064000 0x15000>;
+		reg-names = "tmc-base", "bam-base";
+
+		arm,buffer-size = <0x400000>;
+		arm,sg-enable;
+
+		coresight-name = "coresight-tmc-etr";
+		coresight-ctis = <&cti0 &cti8>;
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		interrupts = <GIC_SPI 251 IRQ_TYPE_EDGE_RISING>;
+		interrupt-names = "byte-cntr-irq";
+
+		port {
+			tmc_etr_in_replicator: endpoint {
+				slave-mode;
+				remote-endpoint = <&replicator_out_tmc_etr>;
+			};
+		};
+	};
+
+	replicator_qdss: replicator@6046000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b909>;
+
+		reg = <0x6046000 0x1000>;
+		reg-names = "replicator-base";
+
+		coresight-name = "coresight-replicator";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				replicator_out_tmc_etr: endpoint {
+					remote-endpoint=
+						<&tmc_etr_in_replicator>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				replicator_in_tmc_etf: endpoint {
+					slave-mode;
+					remote-endpoint=
+						<&tmc_etf_out_replicator>;
+				};
+			};
+		};
+	};
+
+	tmc_etf: tmc@6047000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b961>;
+
+		reg = <0x6047000 0x1000>;
+		reg-names = "tmc-base";
+
+		coresight-name = "coresight-tmc-etf";
+		coresight-ctis = <&cti0 &cti8>;
+		arm,default-sink;
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				tmc_etf_out_replicator: endpoint {
+					remote-endpoint =
+						<&replicator_in_tmc_etf>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tmc_etf_in_funnel_merg: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_merg_out_tmc_etf>;
+				};
+			};
+		};
+	};
+
+	funnel_merg: funnel@6045000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6045000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-merg";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_merg_out_tmc_etf: endpoint {
+					remote-endpoint =
+						<&tmc_etf_in_funnel_merg>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_merg_in_funnel_in0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_in0_out_funnel_merg>;
+				};
+			};
+
+			port@2 {
+				reg = <1>;
+				funnel_merg_in_funnel_in1: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_in1_out_funnel_merg>;
+				};
+			};
+		};
+	};
+
+	funnel_in0: funnel@6041000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6041000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-in0";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_in0_out_funnel_merg: endpoint {
+					remote-endpoint =
+						<&funnel_merg_in_funnel_in0>;
+				};
+			};
+
+			port@1 {
+				reg = <6>;
+				funnel_in0_in_funnel_qatb: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_qatb_out_funnel_in0>;
+				};
+			};
+
+			port@2 {
+				reg = <7>;
+				funnel_in0_in_stm: endpoint {
+					slave-mode;
+					remote-endpoint = <&stm_out_funnel_in0>;
+				};
+			};
+		};
+	};
+
+	stm: stm@6002000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b962>;
+
+		reg = <0x6002000 0x1000>,
+		      <0x16280000 0x180000>;
+		reg-names = "stm-base", "stm-stimulus-base";
+
+		coresight-name = "coresight-stm";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		port {
+			stm_out_funnel_in0: endpoint {
+				remote-endpoint = <&funnel_in0_in_stm>;
+			};
+		};
+
+	};
+
+	funnel_qatb: funnel@6005000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6005000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-qatb";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_qatb_out_funnel_in0: endpoint {
+					remote-endpoint =
+						<&funnel_in0_in_funnel_qatb>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_qatb_in_tpda: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpda_out_funnel_qatb>;
+				};
+			};
+		};
+	};
+
+	tpda: tpda@6004000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b969>;
+		reg = <0x6004000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda";
+
+		qcom,tpda-atid = <65>;
+		qcom,bc-elem-size = <10 32>,
+				    <13 32>;
+		qcom,tc-elem-size = <13 32>;
+		qcom,dsb-elem-size = <0 32>,
+				     <2 32>,
+				     <3 32>,
+				     <5 32>,
+				     <6 32>,
+				     <10 32>,
+				     <11 32>,
+				     <13 32>;
+		qcom,cmb-elem-size = <3 64>,
+				     <7 64>,
+				     <9 64>,
+				     <13 64>;
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_out_funnel_qatb: endpoint {
+					remote-endpoint =
+						<&funnel_qatb_in_tpda>;
+				};
+
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_in_funnel_ddr_0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_ddr_0_out_tpda>;
+				};
+			};
+
+			port@2 {
+				reg = <1>;
+				tpda_in_tpdm_vsense: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_vsense_out_tpda>;
+				};
+			};
+
+			port@3 {
+				reg = <2>;
+				tpda_in_tpdm_dcc: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_dcc_out_tpda>;
+				};
+			};
+
+			port@4 {
+				reg = <5>;
+				tpda_in_tpdm_center: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_center_out_tpda>;
+				};
+			};
+		};
+	};
+
+	funnel_ddr_0: funnel@69e2000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x69e2000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-ddr-0";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_ddr_0_out_tpda: endpoint {
+					remote-endpoint =
+					    <&tpda_in_funnel_ddr_0>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_ddr_0_in_tpdm_ddr: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpdm_ddr_out_funnel_ddr_0>;
+				};
+			};
+		};
+	};
+
+	tpdm_dcc: tpdm@6870280 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
+		reg = <0x6870280 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-dcc";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		port{
+			tpdm_dcc_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_dcc>;
+			};
+		};
+	};
+
+	tpdm_vsense: tpdm@6840000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
+		reg = <0x6840000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-vsense";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		port{
+			tpdm_vsense_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_vsense>;
+			};
+		};
+	};
+
+	tpdm_center: tpdm@6c28000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
+		reg = <0x6c28000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-center";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		port{
+			tpdm_center_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_center>;
+			};
+		};
+	};
+
+	tpdm_ddr: tpdm@69e0000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
+		reg = <0x69e0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-ddr";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		qcom,msr-fix-req;
+
+		port {
+			tpdm_ddr_out_funnel_ddr_0: endpoint {
+				remote-endpoint = <&funnel_ddr_0_in_tpdm_ddr>;
+			};
+		};
+	};
+
+	funnel_in1: funnel@6042000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6042000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-in1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_in1_out_funnel_merg: endpoint {
+					remote-endpoint =
+						<&funnel_merg_in_funnel_in1>;
+				};
+			};
+
+			port@1 {
+				reg = <2>;
+				funnel_in1_in_funnel_swao: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_swao_out_funnel_in1>;
+				};
+			};
+
+			port@2 {
+				reg = <3>;
+				funnel_in1_in_modem_etm0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&modem_etm0_out_funnel_in1>;
+				};
+			};
+
+			port@3 {
+				reg = <7>;
+				funnel_in1_in_tpda_modem: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpda_modem_out_funnel_in1>;
+				};
+			};
+		};
+	};
+
+	modem_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+
+		coresight-name = "coresight-modem-etm0";
+		qcom,inst-id = <2>;
+
+		port {
+			modem_etm0_out_funnel_in1: endpoint {
+				remote-endpoint =
+					<&funnel_in1_in_modem_etm0>;
+			};
+		};
+	};
+
+	funnel_swao:funnel@6b08000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6b08000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-swao";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_swao_out_funnel_in1: endpoint {
+					remote-endpoint =
+						<&funnel_in1_in_funnel_swao>;
+				};
+			};
+
+			port@1 {
+				reg = <7>;
+				funnel_swao_in_tpda_swao: endpoint {
+					slave-mode;
+					remote-endpoint=
+						<&tpda_swao_out_funnel_swao>;
+				};
+			};
+		};
+	};
+
+	tpda_modem: tpda@6832000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b969>;
+		reg = <0x6832000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-modem";
+
+		qcom,tpda-atid = <67>;
+		qcom,dsb-elem-size = <0 32>;
+		qcom,cmb-elem-size = <0 64>;
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_modem_out_funnel_in1: endpoint {
+					remote-endpoint =
+						<&funnel_in1_in_tpda_modem>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_modem_in_tpdm_modem: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_modem_out_tpda_modem>;
+				};
+			};
+		};
+	};
+
+	tpdm_modem: tpdm@6830000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
+		reg = <0x6830000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-modem";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		port {
+			tpdm_modem_out_tpda_modem: endpoint {
+				remote-endpoint = <&tpda_modem_in_tpdm_modem>;
+			};
+		};
+	};
+
+	tpda_swao: tpda@6b01000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b969>;
+		reg = <0x6b01000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-swao";
+
+		qcom,tpda-atid = <71>;
+		qcom,dsb-elem-size = <1 32>;
+		qcom,cmb-elem-size = <0 64>;
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				tpda_swao_out_funnel_swao: endpoint {
+					remote-endpoint =
+						<&funnel_swao_in_tpda_swao>;
+				};
+
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_swao_in_tpdm_swao0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_swao0_out_tpda_swao>;
+				};
+			};
+
+			port@2 {
+				reg = <1>;
+				tpda_swao_in_tpdm_swao1: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_swao1_out_tpda_swao>;
+				};
+
+			};
+		};
+	};
+
+	tpdm_swao0: tpdm@6b02000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
+
+		reg = <0x6b02000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-swao-0";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		port {
+			tpdm_swao0_out_tpda_swao: endpoint {
+				remote-endpoint = <&tpda_swao_in_tpdm_swao0>;
+			};
+		};
+	};
+
+	tpdm_swao1: tpdm@6b03000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
+		reg = <0x6b03000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name="coresight-tpdm-swao-1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		qcom,msr-fix-req;
+
+		port {
+			tpdm_swao1_out_tpda_swao: endpoint {
+				remote-endpoint = <&tpda_swao_in_tpdm_swao1>;
+			};
+		};
+	};
+
+	ipcb_tgu: tgu@6b0c000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b999>;
+		reg = <0x6b0c000 0x1000>;
+		reg-names = "tgu-base";
+		tgu-steps = <3>;
+		tgu-conditions = <4>;
+		tgu-regs = <4>;
+		tgu-timer-counters = <8>;
+
+		coresight-name = "coresight-tgu-ipcb";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti0: cti@6010000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6010000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti0";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+	};
+
+	cti1: cti@6011000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6011000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+	};
+
+	cti2: cti@6012000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6012000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti2";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti3: cti@6013000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6013000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti3";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+	};
+
+	cti4: cti@6014000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6014000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti4";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+	};
+
+	cti5: cti@6015000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6015000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti5";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+	};
+
+	cti6: cti@6016000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6016000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti6";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+	};
+
+	cti7: cti@6017000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6017000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti7";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+	};
+
+	cti8: cti@6018000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6018000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti8";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+	};
+
+	cti9: cti@6019000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6019000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti9";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+	};
+
+	cti10: cti@601a000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x601a000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti10";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+	};
+
+	cti11: cti@601b000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x601b000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti11";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+	};
+
+	cti12: cti@601c000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x601c000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti12";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+	};
+
+	cti13: cti@601d000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x601d000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti13";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+	};
+
+	cti14: cti@601e000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x601e000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti14";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+	};
+
+	cti15: cti@601f000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x601f000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti15";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+	};
+
+	cti_cpu0: cti@7003000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x7003000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-cpu0";
+		cpu = <&CPU0>;
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+	};
+
+	cti_modem_cpu0:cti@6837000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6837000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-modem-cpu0";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_modem_cpu1:cti@683b000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x683b000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-modem-cpu1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti0_swao:cti@6b04000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6b04000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-swao_cti0";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti1_swao:cti@6b05000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6b05000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-swao_cti1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti2_swao:cti@6b06000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6b06000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-swao_cti2";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti3_swao:cti@6b07000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6b07000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-swao_cti3";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti0_ddr0: cti@69e1000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x69e1000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-ddr_dl_0_cti";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti0_ddr1: cti@69e4000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x69e4000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-ddr_dl_1_cti0";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti1_ddr1: cti@69e5000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x69e5000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-ddr_dl_1_cti1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti2_ddr1: cti@69e6000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x69e6000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-ddr_dl_1_cti2";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	hwevent: hwevent@0x014066f0 {
+		compatible = "qcom,coresight-hwevent";
+		reg = <0x14066f0 0x4>,
+		      <0x14166f0 0x4>,
+		      <0x1406038 0x4>,
+		      <0x1416038 0x4>;
+		reg-names = "ddr-ch0-cfg", "ddr-ch23-cfg", "ddr-ch0-ctrl",
+			    "ddr-ch23-ctrl";
+
+		coresight-name = "coresight-hwevent";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts
index f580901..575febe 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts
@@ -33,6 +33,26 @@
 	status = "ok";
 };
 
+&sdhc_1 {
+	vdd-supply = <&vreg_sd_mmc>;
+
+	vdd-io-supply = <&pmxpoorwills_l7>;
+	qcom,vdd-io-voltage-level = <1800000 2950000>;
+	qcom,vdd-io-current-level = <200 10000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_cd_on>;
+	pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_cd_off>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
+							200000000>;
+	qcom,devfreq,freq-table = <50000000 200000000>;
+
+	cd-gpios = <&tlmm 93 0x1>;
+
+	status = "ok";
+};
+
 &pmxpoorwills_vadc {
 	chan@83 {
 		label = "vph_pwr";
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie.dtsi
new file mode 100644
index 0000000..e939bd2
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie.dtsi
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <dt-bindings/clock/qcom,gcc-sdxpoorwills.h>
+
+&soc {
+	pcie0: qcom,pcie@1c00000 {
+		compatible = "qcom,pci-msm";
+		cell-index = <0>;
+
+		reg = <0x01c00000 0x2000>,
+		      <0x01c02000 0x1000>,
+		      <0x40000000 0xf1d>,
+		      <0x40000f20 0xa8>,
+		      <0x40001000 0x1000>,
+		      <0x40100000 0x100000>,
+		      <0x40200000 0x100000>,
+		      <0x40300000 0x1d00000>,
+		      <0x01fce008 0x4>;
+
+		reg-names = "parf", "phy", "dm_core", "elbi", "iatu",
+				"conf", "io", "bars", "tcsr";
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges = <0x01000000 0x0 0x40200000 0x40200000 0x0 0x100000>,
+			<0x02000000 0x0 0x40300000 0x40300000 0x0 0x1d00000>;
+		interrupt-parent = <&pcie0>;
+		interrupts = <0 1 2 3 4 5>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0xffffffff>;
+		interrupt-map = <0 0 0 0 &intc 0 119 0
+				0 0 0 1 &intc 0 141 0
+				0 0 0 2 &intc 0 142 0
+				0 0 0 3 &intc 0 143 0
+				0 0 0 4 &intc 0 144 0
+				0 0 0 5 &intc 0 140 0>;
+
+		interrupt-names = "int_msi", "int_a", "int_b", "int_c",
+				"int_d", "int_global_int";
+
+		qcom,phy-sequence = <0x840 0x03 0x0
+				0x094 0x08 0x0
+				0x154 0x33 0x0
+				0x058 0x0f 0x0
+				0x0a4 0x42 0x0
+				0x1bc 0x11 0x0
+				0x0bc 0x82 0x0
+				0x0d4 0x03 0x0
+				0x0d0 0x55 0x0
+				0x0cc 0x55 0x0
+				0x0b0 0x1a 0x0
+				0x0ac 0x0a 0x0
+				0x158 0x01 0x0
+				0x074 0x06 0x0
+				0x07c 0x16 0x0
+				0x084 0x36 0x0
+				0x1b0 0x1e 0x0
+				0x1ac 0xb9 0x0
+				0x050 0x07 0x0
+				0x29c 0x12 0x0
+				0x284 0x05 0x0
+				0x234 0xd9 0x0
+				0x238 0xcc 0x0
+				0x51c 0x03 0x0
+				0x518 0x1c 0x0
+				0x524 0x14 0x0
+				0x4ec 0x0e 0x0
+				0x4f0 0x4a 0x0
+				0x4f4 0x0f 0x0
+				0x5b4 0x04 0x0
+				0x434 0x7f 0x0
+				0x444 0x70 0x0
+				0x510 0x17 0x0
+				0x4d8 0x01 0x0
+				0x598 0xe0 0x0
+				0x59c 0xc8 0x0
+				0x5a0 0xc8 0x0
+				0x5a4 0x09 0x0
+				0x5a8 0xb1 0x0
+				0x584 0x24 0x0
+				0x588 0xe4 0x0
+				0x58c 0xec 0x0
+				0x590 0x39 0x0
+				0x594 0x36 0x0
+				0x570 0xef 0x0
+				0x574 0xef 0x0
+				0x578 0x2f 0x0
+				0x57c 0xd3 0x0
+				0x580 0x40 0x0
+				0x4fc 0x00 0x0
+				0x4f8 0xc0 0x0
+				0x9a4 0x01 0x0
+				0xc90 0x00 0x0
+				0xc40 0x01 0x0
+				0xc48 0x01 0x0
+				0xca0 0x11 0x0
+				0x048 0x90 0x0
+				0xc1c 0xc1 0x0
+				0x988 0x88 0x0
+				0x998 0x08 0x0
+				0x8dc 0x0d 0x0
+				0x800 0x00 0x0
+				0x844 0x03 0x0>;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&pcie0_clkreq_default
+			&pcie0_perst_default
+			&pcie0_wake_default>;
+
+		perst-gpio = <&tlmm 57 0>;
+		wake-gpio = <&tlmm 53 0>;
+
+		gdsc-vdd-supply = <&gdsc_pcie>;
+		vreg-1.8-supply = <&pmxpoorwills_l1>;
+		vreg-0.9-supply = <&pmxpoorwills_l4>;
+		vreg-cx-supply = <&pmxpoorwills_s5_level>;
+
+		qcom,vreg-1.8-voltage-level = <1200000 1200000 24000>;
+		qcom,vreg-0.9-voltage-level = <872000 872000 24000>;
+		qcom,vreg-cx-voltage-level = <RPMH_REGULATOR_LEVEL_MAX
+						RPMH_REGULATOR_LEVEL_SVS 0>;
+
+		qcom,l0s-supported;
+		qcom,l1-supported;
+		qcom,l1ss-supported;
+		qcom,aux-clk-sync;
+
+		qcom,ep-latency = <10>;
+
+		qcom,slv-addr-space-size = <0x40000000>;
+
+		qcom,phy-status-offset = <0x814>;
+
+		qcom,cpl-timeout = <0x2>;
+
+		qcom,boot-option = <0x1>;
+
+		linux,pci-domain = <0>;
+
+		qcom,use-19p2mhz-aux-clk;
+
+		qcom,msm-bus,name = "pcie0";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<45 512 0 0>,
+				<45 512 500 800>;
+
+		clocks = <&clock_gcc GCC_PCIE_PIPE_CLK>,
+			<&clock_rpmh RPMH_CXO_CLK>,
+			<&clock_gcc GCC_PCIE_AUX_CLK>,
+			<&clock_gcc GCC_PCIE_CFG_AHB_CLK>,
+			<&clock_gcc GCC_PCIE_MSTR_AXI_CLK>,
+			<&clock_gcc GCC_PCIE_SLV_AXI_CLK>,
+			<&clock_gcc GCC_PCIE_0_CLKREF_CLK>,
+			<&clock_gcc GCC_PCIE_SLV_Q2A_AXI_CLK>,
+			<&clock_gcc GCC_PCIE_SLEEP_CLK>,
+			<&clock_gcc GCC_PCIE_PHY_REFGEN_CLK>;
+
+		clock-names = "pcie_0_pipe_clk", "pcie_0_ref_clk_src",
+				"pcie_0_aux_clk", "pcie_0_cfg_ahb_clk",
+				"pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk",
+				"pcie_0_ldo", "pcie_0_slv_q2a_axi_clk",
+				"pcie_0_sleep_clk", "pcie_phy_refgen_clk";
+
+		max-clock-frequency-hz = <0>, <0>, <0>, <0>, <0>, <0>,
+					<0>, <0>, <0>, <0>, <100000000>;
+
+		resets = <&clock_gcc GCC_PCIE_BCR>,
+			<&clock_gcc GCC_PCIE_PHY_BCR>;
+
+		reset-names = "pcie_0_core_reset",
+				"pcie_0_phy_reset";
+	};
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
index 82b65e2..deed94d 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,11 +13,13 @@
 &soc {
 	tlmm: pinctrl@3900000 {
 		compatible = "qcom,sdxpoorwills-pinctrl";
-		reg = <0x3900000 0x300000>;
+		reg = <0x3900000 0x300000>,
+			<0xB204900 0x280>;
 		interrupts = <0 212 0>;
 		gpio-controller;
 		#gpio-cells = <2>;
 		interrupt-controller;
+		interrupt-parent = <&pdc>;
 		#interrupt-cells = <2>;
 
 		uart2_console_active: uart2_console_active {
@@ -382,6 +384,44 @@
 			};
 		};
 
+		pcie0 {
+			pcie0_clkreq_default: pcie0_clkreq_default {
+				mux {
+					pins = "gpio56";
+					function = "pcie_clkreq";
+				};
+				config {
+					pins = "gpio56";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+
+			pcie0_perst_default: pcie0_perst_default {
+				mux {
+					pins = "gpio57";
+					function = "gpio";
+				};
+				config {
+					pins = "gpio57";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+
+			pcie0_wake_default: pcie0_wake_default {
+				mux {
+					pins = "gpio53";
+					function = "gpio";
+				};
+				config {
+					pins = "gpio53";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
 		/* HS UART CONFIGURATION */
 
 		blsp1_uart1a: blsp1_uart1a {
@@ -1299,6 +1339,82 @@
 			};
 		};
 
+		/* SDC pin type */
+		sdc1_clk_on: sdc1_clk_on {
+			config {
+				pins = "sdc1_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc1_clk_off: sdc1_clk_off {
+			config {
+				pins = "sdc1_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc1_cmd_on: sdc1_cmd_on {
+			config {
+				pins = "sdc1_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc1_cmd_off: sdc1_cmd_off {
+			config {
+				pins = "sdc1_cmd";
+				num-grp-pins = <1>;
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc1_data_on: sdc1_data_on {
+			config {
+				pins = "sdc1_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc1_data_off: sdc1_data_off {
+			config {
+				pins = "sdc1_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc1_cd_on: cd_on {
+			mux {
+				pins = "gpio93";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio93";
+				drive-strength = <2>;
+				bias-pull-up;
+			};
+		};
+
+		sdc1_cd_off: cd_off {
+			mux {
+				pins = "gpio93";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio93";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
 		smb_int_default: smb_int_default {
 			mux {
 				pins = "gpio42";
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pm.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pm.dtsi
new file mode 100644
index 0000000..eab887c
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pm.dtsi
@@ -0,0 +1,95 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+&soc {
+
+	qcom,lpm-levels {
+		compatible = "qcom,lpm-levels";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,pm-cluster@0{
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0>;
+			label = "system";
+			qcom,psci-mode-shift = <0>;
+			qcom,psci-mode-mask = <0xf>;
+
+			qcom,pm-cluster-level@0 {
+				reg = <0>;
+				label = "cx_active";
+				qcom,psci-mode = <0x0>;
+				qcom,latency-us = <270>;
+				qcom,ss-power = <455>;
+				qcom,energy-overhead = <270621>;
+				qcom,time-overhead = <500>;
+			};
+
+			qcom,pm-cluster-level@1 {
+				reg = <1>;
+				label = "cx_min";
+				qcom,psci-mode = <0x0>;
+				qcom,latency-us = <285>;
+				qcom,ss-power = <442>;
+				qcom,energy-overhead = <306621>;
+				qcom,time-overhead = <540>;
+				qcom,min-child-idx = <2>;
+				qcom,notify-rpm;
+				qcom,is-reset;
+			};
+
+			qcom,pm-cpu@0 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				qcom,psci-mode-shift = <0>;
+				qcom,psci-mode-mask = <0xf>;
+				qcom,cpu = <&CPU0>;
+
+				qcom,pm-cpu-level@0{
+					reg = <0>;
+					label = "wfi";
+					qcom,psci-cpu-mode = <0x1>;
+					qcom,latency-us = <1>;
+					qcom,ss-power = <473>;
+					qcom,energy-overhead = <100000>;
+					qcom,time-overhead = <25>;
+				};
+
+				qcom,pm-cpu-level@1 {
+					reg = <1>;
+					label ="standalone_pc";
+					qcom,psci-cpu-mode = <0x4>;
+					qcom,latency-us = <240>;
+					qcom,ss-power = <467>;
+					qcom,energy-overhead = <202781>;
+					qcom,time-overhead = <420>;
+					qcom,use-broadcast-timer;
+					qcom,is-reset;
+				};
+
+				qcom,pm-cpu-level@2 {
+					reg = <2>;
+					label = "system-pc";
+					qcom,psci-cpu-mode = <0x8>;
+					qcom,latency-us = <270>;
+					qcom,ss-power = <455>;
+					qcom,energy-overhead = <270621>;
+					qcom,time-overhead = <500>;
+					qcom,use-broadcast-timer;
+					qcom,is-reset;
+				};
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
index e62c4a3..37903b9 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,7 @@
  */
 
 #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/gpio/gpio.h>
 
 &soc {
 	/* RPMh regulators */
@@ -77,9 +78,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa1";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l1: regualtor-pmxpoorwills-11 {
 			regulator-name = "pmxpoorwills_l1";
@@ -87,7 +89,7 @@
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1200000>;
 			qcom,init-voltage = <1200000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -95,9 +97,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa2";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l2: regualtor-pmxpoorwills-12 {
 			 regulator-name = "pmxpoorwills_l2";
@@ -105,7 +108,7 @@
 			 regulator-min-microvolt = <1128000>;
 			 regulator-max-microvolt = <1128000>;
 			 qcom,init-voltage = <1128000>;
-			 qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			 qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		 };
 	};
 
@@ -113,9 +116,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa3";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l3: regualtor-pmxpoorwills-l3 {
 			regulator-name = "pmxpoorwills_l3";
@@ -123,7 +127,7 @@
 			regulator-min-microvolt = <800000>;
 			regulator-max-microvolt = <800000>;
 			qcom,init-voltage = <800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -131,9 +135,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa4";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l4: regualtor-pmxpoorwills-l4 {
 			regulator-name = "pmxpoorwills_l4";
@@ -141,7 +146,7 @@
 			regulator-min-microvolt = <872000>;
 			regulator-max-microvolt = <872000>;
 			qcom,init-voltage = <872000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -149,9 +154,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa5";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l5: regualtor-pmxpoorwills-l5 {
 			regulator-name = "pmxpoorwills_l5";
@@ -159,7 +165,7 @@
 			regulator-min-microvolt = <1704000>;
 			regulator-max-microvolt = <1704000>;
 			qcom,init-voltage = <1704000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -167,9 +173,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa7";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l7: regualtor-pmxpoorwills-l7 {
 			regulator-name = "pmxpoorwills_l7";
@@ -177,7 +184,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <2952000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -185,9 +192,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa8";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l8: regualtor-pmxpoorwills-l8 {
 			regulator-name = "pmxpoorwills_l8";
@@ -195,7 +203,7 @@
 			regulator-min-microvolt = <480000>;
 			regulator-max-microvolt = <900000>;
 			qcom,init-voltage = <480000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -231,9 +239,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa10";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l10: regualtor-pmxpoorwills-l10 {
 			regulator-name = "pmxpoorwills_l10";
@@ -241,7 +250,7 @@
 			regulator-min-microvolt = <3088000>;
 			regulator-max-microvolt = <3088000>;
 			qcom,init-voltage = <3088000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -249,9 +258,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa11";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l11: regualtor-pmxpoorwills-l11 {
 			  regulator-name = "pmxpoorwills_l11";
@@ -259,7 +269,7 @@
 			  regulator-min-microvolt = <1704000>;
 			  regulator-max-microvolt = <3000000>;
 			  qcom,init-voltage = <1704000>;
-			  qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			  qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		  };
 	};
 
@@ -267,9 +277,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa12";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l12: regualtor-pmxpoorwills-l12 {
 			  regulator-name = "pmxpoorwills_l12";
@@ -277,7 +288,7 @@
 			  regulator-min-microvolt = <2704000>;
 			  regulator-max-microvolt = <2704000>;
 			  qcom,init-voltage = <2704000>;
-			  qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			  qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		  };
 	};
 
@@ -285,9 +296,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa13";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l13: regualtor-pmxpoorwills-l13 {
 			  regulator-name = "pmxpoorwills_l13";
@@ -295,7 +307,7 @@
 			  regulator-min-microvolt = <1704000>;
 			  regulator-max-microvolt = <3000000>;
 			  qcom,init-voltage = <1704000>;
-			  qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			  qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		  };
 	};
 
@@ -303,9 +315,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa14";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l14: regualtor-pmxpoorwills-l14 {
 			  regulator-name = "pmxpoorwills_l14";
@@ -313,7 +326,7 @@
 			  regulator-min-microvolt = <600000>;
 			  regulator-max-microvolt = <800000>;
 			  qcom,init-voltage = <600000>;
-			  qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			  qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		  };
 	};
 
@@ -321,9 +334,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa16";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l16: regualtor-pmxpoorwills-l16 {
 			  regulator-name = "pmxpoorwills_l16";
@@ -331,7 +345,7 @@
 			  regulator-min-microvolt = <304000>;
 			  regulator-max-microvolt = <880000>;
 			  qcom,init-voltage = <304000>;
-			  qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			  qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		  };
 	};
 
@@ -359,4 +373,32 @@
 		regulator-max-microvolt = <1800000>;
 		regulator-always-on;
 	};
+
+	vreg_sd_mmc: vreg_sd_mmc {
+		compatible = "regulator-fixed";
+		regulator-name = "vreg_sd_mmc";
+		startup-delay-us = <4000>;
+		enable-active-high;
+		gpio = <&tlmm 92 GPIO_ACTIVE_HIGH>;
+	};
+
+	vreg_emac_phy: emac_phy_regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "emac_phy";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-enable-ramp-delay = <100>;
+		gpio = <&tlmm 96 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	vreg_rgmii_io_pads: rgmii_io_pads_regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "rgmii_io_pads";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-enable-ramp-delay = <100>;
+		gpio = <&tlmm 83 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
index 926044a..77e1763 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -74,6 +74,37 @@
 			snps,has-lpm-erratum;
 			snps,hird-threshold = /bits/ 8 <0x10>;
 		};
+
+		qcom,usbbam@a704000 {
+			compatible = "qcom,usb-bam-msm";
+			reg = <0xa704000 0x17000>;
+			interrupts = <0 132 0>;
+
+			qcom,bam-type = <0>;
+			qcom,usb-bam-fifo-baseaddr = <0x14689000>;
+			qcom,usb-bam-num-pipes = <8>;
+			qcom,ignore-core-reset-ack;
+			qcom,disable-clk-gating;
+			qcom,usb-bam-override-threshold = <0x4001>;
+			qcom,usb-bam-max-mbps-highspeed = <400>;
+			qcom,usb-bam-max-mbps-superspeed = <3600>;
+			qcom,reset-bam-on-connect;
+
+			qcom,pipe0 {
+				label = "ssusb-qdss-in-0";
+				qcom,usb-bam-mem-type = <2>;
+				qcom,dir = <1>;
+				qcom,pipe-num = <0>;
+				qcom,peer-bam = <0>;
+				qcom,peer-bam-physical-address = <0x6064000>;
+				qcom,src-bam-pipe-index = <0>;
+				qcom,dst-bam-pipe-index = <0>;
+				qcom,data-fifo-offset = <0x0>;
+				qcom,data-fifo-size = <0x1800>;
+				qcom,descriptor-fifo-offset = <0x1800>;
+				qcom,descriptor-fifo-size = <0x800>;
+			};
+		};
 	};
 
 	/* USB port for High Speed PHY */
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index f5351de..13d50f8 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,12 +17,13 @@
 #include <dt-bindings/clock/qcom,gcc-sdxpoorwills.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/clock/qcom,aop-qmp.h>
 
 / {
 	model = "Qualcomm Technologies, Inc. SDX POORWILLS";
 	compatible = "qcom,sdxpoorwills";
 	qcom,msm-id = <334 0x0>, <335 0x0>;
-	interrupt-parent = <&intc>;
+	interrupt-parent = <&pdc>;
 
 	reserved-memory {
 		#address-cells = <1>;
@@ -67,8 +68,9 @@
 		#address-cells = <1>;
 
 		CPU0: cpu@0 {
-			device-type = "cpu";
+			device_type = "cpu";
 			compatible = "arm,cortex-a7";
+			enable-method = "psci";
 			reg = <0x0>;
 			#cooling-cells = <2>;
 		};
@@ -76,6 +78,13 @@
 
 	aliases {
 		qpic_nand1 = &qnand_1;
+		pci-domain0 = &pcie0;
+		sdhc1 = &sdhc_1; /* SDC1 eMMC/SD/SDIO slot */
+	};
+
+	psci {
+		compatible = "arm,psci-1.0";
+		method = "smc";
 	};
 
 	soc: soc { };
@@ -93,6 +102,15 @@
 		#interrupt-cells = <3>;
 		reg = <0x17800000 0x1000>,
 		      <0x17802000 0x1000>;
+		interrupt-parent = <&intc>;
+	};
+
+	pdc: interrupt-controller@b210000{
+		compatible = "qcom,pdc-sdxpoorwills";
+		reg = <0xb210000 0x30000>;
+		#interrupt-cells = <3>;
+		interrupt-parent = <&intc>;
+		interrupt-controller;
 	};
 
 	timer {
@@ -221,6 +239,13 @@
 		mbox-names = "apps";
 	};
 
+	clock_aop: qcom,aopclk {
+		compatible = "qcom,aop-qmp-clk-v1";
+		#clock-cells = <1>;
+		mboxes = <&qmp_aop 0>;
+		mbox-names = "qdss_clk";
+	};
+
 	snoc_cnoc_keepalive: qcom,snoc_cnoc_keepalive {
 		compatible = "qcom,devbw";
 		governor = "powersave";
@@ -288,6 +313,45 @@
 		status = "disabled";
 	};
 
+	sdhc_1: sdhci@8804000 {
+		compatible = "qcom,sdhci-msm-v5";
+		reg = <0x8804000 0x1000>;
+		reg-names = "hc_mem";
+
+		interrupts = <0 210 0>, <0 227 0>;
+		interrupt-names = "hc_irq", "pwr_irq";
+
+		qcom,bus-width = <4>;
+
+		qcom,msm-bus,name = "sdhc1";
+		qcom,msm-bus,num-cases = <8>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =  <78 512 0 0>, /* No vote */
+				<78 512 1600 3200>,    /* 400 KB/s*/
+				<78 512 80000 160000>, /* 20 MB/s */
+				<78 512 100000 200000>, /* 25 MB/s */
+				<78 512 200000 400000>, /* 50 MB/s */
+				<78 512 400000 800000>, /* 100 MB/s */
+				<78 512 400000 800000>, /* 200 MB/s */
+				<78 512 2048000 4096000>; /* Max. bandwidth */
+		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+						100000000 200000000 4294967295>;
+
+		/* PM QoS */
+		qcom,pm-qos-cpu-groups = <0x0>;
+		qcom,pm-qos-cmdq-latency-us = <70>;
+		qcom,pm-qos-legacy-latency-us = <70>;
+		qcom,pm-qos-irq-type = "affine_cores";
+		qcom,pm-qos-irq-cpu = <0>;
+		qcom,pm-qos-irq-latency = <70>;
+
+		clocks = <&clock_gcc GCC_SDCC1_AHB_CLK>,
+			<&clock_gcc GCC_SDCC1_APPS_CLK>;
+		clock-names = "iface_clk", "core_clk";
+
+		status = "disabled";
+	};
+
 	qcom,msm-imem@1468B000 {
 		compatible = "qcom,msm-imem";
 		reg = <0x1468B000 0x1000>; /* Address and size of IMEM */
@@ -345,6 +409,7 @@
 		compatible = "qcom,pil-tz-generic";
 		qcom,pas-id = <0xf>;
 		qcom,firmware-name = "ipa_fws";
+		qcom,pil-force-shutdown;
 	};
 
 	spmi_bus: qcom,spmi@c440000 {
@@ -472,7 +537,7 @@
 		vdd_cx-supply = <&pmxpoorwills_s5_level>;
 		qcom,proxy-reg-names = "vdd_cx";
 
-		qcom,pas-id = <0>;
+		qcom,pas-id = <4>;
 		qcom,smem-id = <421>;
 		qcom,proxy-timeout-ms = <10000>;
 		qcom,sysmon-id = <0>;
@@ -541,6 +606,8 @@
 		qcom,mhi-event-ring-id-limits = <9 10>; /* start and end */
 		qcom,modem-cfg-emb-pipe-flt;
 		qcom,use-ipa-pm;
+		qcom,arm-smmu;
+		qcom,smmu-fast-map;
 		qcom,bandwidth-vote-for-ipa;
 		qcom,msm-bus,name = "ipa";
 		qcom,msm-bus,num-cases = <5>;
@@ -679,8 +746,67 @@
 			compatible = "qcom,smp2pgpio-map-ipa-1-in";
 			gpios = <&smp2pgpio_ipa_1_in 0 0>;
 		};
+
+		ipa_smmu_ap: ipa_smmu_ap {
+			compatible = "qcom,ipa-smmu-ap-cb";
+			iommus = <&apps_smmu 0x5E0 0x0>;
+			qcom,iova-mapping = <0x20000000 0x40000000>;
+			qcom,additional-mapping =
+			/* modem tables in IMEM */
+			<0x14686000 0x14686000 0x3000>;
+		};
+
+		ipa_smmu_wlan: ipa_smmu_wlan {
+			compatible = "qcom,ipa-smmu-wlan-cb";
+			iommus = <&apps_smmu 0x5E1 0x0>;
+			qcom,additional-mapping =
+			/* ipa-uc ram */
+			<0x1E60000 0x1E60000 0xA000>;
+		};
+
+		ipa_smmu_uc: ipa_smmu_uc {
+			compatible = "qcom,ipa-smmu-uc-cb";
+			iommus = <&apps_smmu 0x5E2 0x0>;
+			qcom,iova-mapping = <0x40000000 0x20000000>;
+		};
 	};
 
+	qmp_aop: qcom,qmp-aop@c300000 {
+		compatible = "qcom,qmp-mbox";
+		label = "aop";
+		reg = <0xc300000 0x400>,
+			<0x17811008 0x4>;
+		reg-names = "msgram", "irq-reg-base";
+		qcom,irq-mask = <0x2>;
+		interrupts = <GIC_SPI 221 IRQ_TYPE_EDGE_RISING>;
+		priority = <0>;
+		mbox-desc-offset = <0x0>;
+		#mbox-cells = <1>;
+	};
+
+	usb_detect: qcom,gpio-usbdetect {
+		compatible = "qcom,gpio-usbdetect";
+		interrupt-parent = <&spmi_bus>;
+		interrupts = <0x0 0x0d 0x0 IRQ_TYPE_NONE>;
+		interrupt-names = "vbus_det_irq";
+		status = "disabled";
+	};
+};
+
+#include "pmxpoorwills.dtsi"
+#include "sdxpoorwills-blsp.dtsi"
+#include "sdxpoorwills-regulator.dtsi"
+#include "sdxpoorwills-smp2p.dtsi"
+#include "sdxpoorwills-usb.dtsi"
+#include "sdxpoorwills-pcie.dtsi"
+#include "sdxpoorwills-bus.dtsi"
+#include "sdxpoorwills-thermal.dtsi"
+#include "sdxpoorwills-audio.dtsi"
+#include "sdxpoorwills-ion.dtsi"
+#include "msm-arm-smmu-sdxpoorwills.dtsi"
+#include "sdxpoorwills-coresight.dtsi"
+
+&soc {
 	emac_hw: qcom,emac@00020000 {
 		compatible = "qcom,emac-dwc-eqos";
 		reg = <0x20000 0x10000>,
@@ -698,24 +824,31 @@
 			"tx-ch3-intr", "tx-ch4-intr",
 			"rx-ch0-intr", "rx-ch1-intr",
 			"rx-ch2-intr", "rx-ch3-intr";
+		qcom,msm-bus,name = "emac";
+		qcom,msm-bus,num-cases = <3>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,vectors-KBps =
+			<98 512 1250 0>, <1 781 0 40000>,  /* 10Mbps vote */
+			<98 512 12500 0>, <1 781 0 40000>,  /* 100Mbps vote */
+			<98 512 125000 0>, <1 781 0 40000>; /* 1000Mbps vote */
+		qcom,bus-vector-names = "10", "100", "1000";
+		clocks = <&clock_gcc GCC_ETH_AXI_CLK>,
+			<&clock_gcc GCC_ETH_PTP_CLK>,
+			<&clock_gcc GCC_ETH_RGMII_CLK>,
+			<&clock_gcc GCC_ETH_SLAVE_AHB_CLK>;
+		clock-names = "eth_axi_clk", "eth_ptp_clk",
+			"eth_rgmii_clk", "eth_slave_ahb_clk";
+		qcom,phy-intr-redirect = <&tlmm 84 GPIO_ACTIVE_LOW>;
+		qcom,phy-reset = <&tlmm 85 GPIO_ACTIVE_LOW>;
+		vreg_rgmii-supply = <&vreg_rgmii>;
+		vreg_emac_phy-supply =  <&vreg_emac_phy>;
+		vreg_rgmii_io_pads-supply = <&vreg_rgmii_io_pads>;
+		gdsc_emac-supply = <&gdsc_emac>;
 		io-macro-info {
 			io-macro-bypass-mode = <0>;
 			io-interface = "rgmii";
 		};
 	};
-
-	qmp_aop: qcom,qmp-aop@c300000 {
-		compatible = "qcom,qmp-mbox";
-		label = "aop";
-		reg = <0xc300000 0x400>,
-			<0x17811008 0x4>;
-		reg-names = "msgram", "irq-reg-base";
-		qcom,irq-mask = <0x2>;
-		interrupts = <GIC_SPI 221 IRQ_TYPE_EDGE_RISING>;
-		priority = <0>;
-		mbox-desc-offset = <0x0>;
-		#mbox-cells = <1>;
-	};
 };
 
 #include "pmxpoorwills.dtsi"
@@ -723,8 +856,10 @@
 #include "sdxpoorwills-regulator.dtsi"
 #include "sdxpoorwills-smp2p.dtsi"
 #include "sdxpoorwills-usb.dtsi"
+#include "sdxpoorwills-pcie.dtsi"
 #include "sdxpoorwills-bus.dtsi"
 #include "sdxpoorwills-thermal.dtsi"
 #include "sdxpoorwills-audio.dtsi"
 #include "sdxpoorwills-ion.dtsi"
 #include "msm-arm-smmu-sdxpoorwills.dtsi"
+#include "sdxpoorwills-pm.dtsi"
diff --git a/arch/arm/configs/msm8953-perf_defconfig b/arch/arm/configs/msm8953-perf_defconfig
index fd1cac3..9d710e3 100644
--- a/arch/arm/configs/msm8953-perf_defconfig
+++ b/arch/arm/configs/msm8953-perf_defconfig
@@ -73,6 +73,7 @@
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_MSM=y
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
@@ -230,6 +231,7 @@
 CONFIG_IPC_ROUTER=y
 CONFIG_IPC_ROUTER_SECURITY=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
 CONFIG_DMA_CMA=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=y
@@ -280,6 +282,7 @@
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
 CONFIG_INPUT_UINPUT=y
 # CONFIG_SERIO_SERPORT is not set
 # CONFIG_VT is not set
@@ -300,9 +303,17 @@
 CONFIG_PINCTRL_MSM8953=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_GPIO_QPNP_PIN_DEBUG=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
 CONFIG_POWER_SUPPLY=y
+CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
+CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_TYPEC=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
 CONFIG_THERMAL_QPNP=y
@@ -310,6 +321,7 @@
 CONFIG_THERMAL_TSENS=y
 CONFIG_MSM_BCL_PERIPHERAL_CTL=y
 CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
@@ -317,6 +329,7 @@
 CONFIG_REGULATOR_MEM_ACC=y
 CONFIG_REGULATOR_MSM_GFX_LDO=y
 CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
 CONFIG_REGULATOR_QPNP=y
 CONFIG_MEDIA_SUPPORT=y
 CONFIG_MEDIA_CAMERA_SUPPORT=y
@@ -363,7 +376,10 @@
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH=y
 CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
@@ -425,6 +441,7 @@
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
diff --git a/arch/arm/configs/msm8953_defconfig b/arch/arm/configs/msm8953_defconfig
index c126ccd..03f297f 100644
--- a/arch/arm/configs/msm8953_defconfig
+++ b/arch/arm/configs/msm8953_defconfig
@@ -79,6 +79,7 @@
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_MSM=y
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
@@ -238,6 +239,7 @@
 CONFIG_IPC_ROUTER=y
 CONFIG_IPC_ROUTER_SECURITY=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
 CONFIG_DMA_CMA=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=y
@@ -290,6 +292,7 @@
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
 CONFIG_INPUT_UINPUT=y
 # CONFIG_SERIO_SERPORT is not set
 # CONFIG_VT is not set
@@ -310,9 +313,17 @@
 CONFIG_PINCTRL_MSM8953=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_GPIO_QPNP_PIN_DEBUG=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
 CONFIG_POWER_SUPPLY=y
+CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
+CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_TYPEC=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
 CONFIG_THERMAL_QPNP=y
@@ -320,6 +331,7 @@
 CONFIG_THERMAL_TSENS=y
 CONFIG_MSM_BCL_PERIPHERAL_CTL=y
 CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
@@ -327,6 +339,7 @@
 CONFIG_REGULATOR_MEM_ACC=y
 CONFIG_REGULATOR_MSM_GFX_LDO=y
 CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
 CONFIG_REGULATOR_QPNP=y
 CONFIG_MEDIA_SUPPORT=y
 CONFIG_MEDIA_CAMERA_SUPPORT=y
@@ -375,7 +388,10 @@
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH=y
 CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
@@ -442,6 +458,7 @@
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index 507d0ad..a515e00 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -25,6 +25,7 @@
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_SDXPOORWILLS=y
+CONFIG_PCI_MSM=y
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_CMA=y
@@ -44,9 +45,13 @@
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_ADVANCED_ROUTER=y
 CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
 CONFIG_IP_MROUTE=y
 CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
 CONFIG_IP_PIMSM_V2=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_GRE=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_IPV6_MROUTE=y
@@ -70,6 +75,8 @@
 CONFIG_NF_CONNTRACK_TFTP=y
 CONFIG_NF_CT_NETLINK=y
 CONFIG_NF_CT_NETLINK_TIMEOUT=y
+CONFIG_NF_CT_NETLINK_HELPER=y
+CONFIG_NETFILTER_NETLINK_GLUE_CT=y
 CONFIG_NETFILTER_XT_TARGET_LOG=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -77,6 +84,7 @@
 CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
 CONFIG_NETFILTER_XT_TARGET_TPROXY=y
 CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
 CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
 CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
@@ -100,6 +108,7 @@
 CONFIG_IP_NF_TARGET_REJECT=y
 CONFIG_IP_NF_NAT=y
 CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NATTYPE_MODULE=y
 CONFIG_IP_NF_TARGET_NETMAP=y
 CONFIG_IP_NF_TARGET_REDIRECT=y
 CONFIG_IP_NF_MANGLE=y
@@ -310,8 +319,10 @@
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_QPNP_REVID=y
+CONFIG_GPIO_USB_DETECT=y
 CONFIG_USB_BAM=y
 CONFIG_MSM_CLK_RPMH=y
+CONFIG_MSM_CLK_AOP_QMP=y
 CONFIG_MDM_GCC_SDXPOORWILLS=y
 CONFIG_MDM_CLOCK_CPU_SDXPOORWILLS=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
@@ -362,4 +373,13 @@
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_IPC_LOGGING=y
 CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index c21c8d5..ba1acda 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -27,6 +27,7 @@
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_SDXPOORWILLS=y
 # CONFIG_VDSO is not set
+CONFIG_PCI_MSM=y
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_CMA=y
@@ -46,9 +47,13 @@
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_ADVANCED_ROUTER=y
 CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
 CONFIG_IP_MROUTE=y
 CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
 CONFIG_IP_PIMSM_V2=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_GRE=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_IPV6_MROUTE=y
@@ -72,6 +77,8 @@
 CONFIG_NF_CONNTRACK_TFTP=y
 CONFIG_NF_CT_NETLINK=y
 CONFIG_NF_CT_NETLINK_TIMEOUT=y
+CONFIG_NF_CT_NETLINK_HELPER=y
+CONFIG_NETFILTER_NETLINK_GLUE_CT=y
 CONFIG_NETFILTER_XT_TARGET_LOG=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -79,6 +86,7 @@
 CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
 CONFIG_NETFILTER_XT_TARGET_TPROXY=y
 CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
 CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
 CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
@@ -102,6 +110,7 @@
 CONFIG_IP_NF_TARGET_REJECT=y
 CONFIG_IP_NF_NAT=y
 CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NATTYPE_MODULE=y
 CONFIG_IP_NF_TARGET_NETMAP=y
 CONFIG_IP_NF_TARGET_REDIRECT=y
 CONFIG_IP_NF_MANGLE=y
@@ -291,6 +300,7 @@
 CONFIG_MMC_TEST=m
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_QPNP=y
 CONFIG_DMADEVICES=y
@@ -308,7 +318,10 @@
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_QPNP_REVID=y
+CONFIG_GPIO_USB_DETECT=y
+CONFIG_USB_BAM=y
 CONFIG_MSM_CLK_RPMH=y
+CONFIG_MSM_CLK_AOP_QMP=y
 CONFIG_MDM_GCC_SDXPOORWILLS=y
 CONFIG_MDM_CLOCK_CPU_SDXPOORWILLS=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
@@ -338,15 +351,12 @@
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_MSM_PM=y
-CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
-CONFIG_EXTCON=y
 CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
 CONFIG_QCOM_SHOW_RESUME_IRQ=y
 CONFIG_ANDROID=y
-CONFIG_STM=y
 CONFIG_EXT3_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_VFAT_FS=y
@@ -378,6 +388,19 @@
 CONFIG_IPC_LOGGING=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_DEBUG_USER=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_TGU=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
 CONFIG_CRYPTO_ECB=y
 CONFIG_CRYPTO_CMAC=y
 CONFIG_CRYPTO_SHA256=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 68b06f9..12f99fd 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -516,4 +516,22 @@
 #endif
 	.endm
 
+	.macro	bug, msg, line
+#ifdef CONFIG_THUMB2_KERNEL
+1:	.inst	0xde02
+#else
+1:	.inst	0xe7f001f2
+#endif
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+	.pushsection .rodata.str, "aMS", %progbits, 1
+2:	.asciz	"\msg"
+	.popsection
+	.pushsection __bug_table, "aw"
+	.align	2
+	.word	1b, 2b
+	.hword	\line
+	.popsection
+#endif
+	.endm
+
 #endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index f13ae15..d2315ff 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -112,8 +112,12 @@
 #define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE	4096
 
-/* This is the base location for PIE (ET_DYN with INTERP) loads. */
-#define ELF_ET_DYN_BASE		0x400000UL
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk.  */
+
+#define ELF_ET_DYN_BASE	(TASK_SIZE / 3 * 2)
 
 /* When the program starts, a1 contains a pointer to a function to be 
    registered with atexit, as per the SVR4 ABI.  A value of 0 means we 
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index e22089f..98d6de1 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -161,8 +161,7 @@
 #else
 #define VTTBR_X		(5 - KVM_T0SZ)
 #endif
-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK  (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_BADDR_MASK  (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
 #define VTTBR_VMID_SHIFT  _AC(48, ULL)
 #define VTTBR_VMID_MASK(size)	(_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
 
@@ -209,6 +208,7 @@
 #define HSR_EC_IABT_HYP	(0x21)
 #define HSR_EC_DABT	(0x24)
 #define HSR_EC_DABT_HYP	(0x25)
+#define HSR_EC_MAX	(0x3f)
 
 #define HSR_WFI_IS_WFE		(_AC(1, UL) << 0)
 
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index a58bbaa..d10e362 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -223,6 +223,16 @@
 	return 8;
 }
 
+static inline void *kvm_get_hyp_vector(void)
+{
+	return kvm_ksym_ref(__kvm_hyp_vector);
+}
+
+static inline int kvm_map_vectors(void)
+{
+	return 0;
+}
+
 #endif	/* !__ASSEMBLY__ */
 
 #endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 062c484..906623e 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -22,6 +22,7 @@
 
 extern unsigned int user_debug;
 extern char* (*arch_read_hardware_id)(void);
+const char * __init arch_read_machine_name(void);
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 1f59ea05..b7e0125 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -478,11 +478,10 @@
 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
 
 static inline unsigned long __must_check
-__copy_from_user(void *to, const void __user *from, unsigned long n)
+__arch_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	unsigned int __ua_flags;
 
-	check_object_size(to, n, false);
 	__ua_flags = uaccess_save_and_enable();
 	n = arm_copy_from_user(to, from, n);
 	uaccess_restore(__ua_flags);
@@ -495,18 +494,15 @@
 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
 
 static inline unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+__arch_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 #ifndef CONFIG_UACCESS_WITH_MEMCPY
 	unsigned int __ua_flags;
-
-	check_object_size(from, n, true);
 	__ua_flags = uaccess_save_and_enable();
 	n = arm_copy_to_user(to, from, n);
 	uaccess_restore(__ua_flags);
 	return n;
 #else
-	check_object_size(from, n, true);
 	return arm_copy_to_user(to, from, n);
 #endif
 }
@@ -526,25 +522,49 @@
 }
 
 #else
-#define __copy_from_user(to, from, n)	(memcpy(to, (void __force *)from, n), 0)
-#define __copy_to_user(to, from, n)	(memcpy((void __force *)to, from, n), 0)
+#define __arch_copy_from_user(to, from, n)	\
+					(memcpy(to, (void __force *)from, n), 0)
+#define __arch_copy_to_user(to, from, n)	\
+					(memcpy((void __force *)to, from, n), 0)
 #define __clear_user(addr, n)		(memset((void __force *)addr, 0, n), 0)
 #endif
 
-static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	check_object_size(to, n, false);
+	return __arch_copy_from_user(to, from, n);
+}
+
+static inline unsigned long __must_check
+copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	unsigned long res = n;
+
+	check_object_size(to, n, false);
+
 	if (likely(access_ok(VERIFY_READ, from, n)))
-		res = __copy_from_user(to, from, n);
+		res = __arch_copy_from_user(to, from, n);
 	if (unlikely(res))
 		memset(to + (n - res), 0, res);
 	return res;
 }
 
-static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+	check_object_size(from, n, true);
+
+	return __arch_copy_to_user(to, from, n);
+}
+
+static inline unsigned long __must_check
+copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	check_object_size(from, n, true);
+
 	if (access_ok(VERIFY_WRITE, to, n))
-		n = __copy_to_user(to, from, n);
+		n = __arch_copy_to_user(to, from, n);
 	return n;
 }
 
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 6391728..e056c9a 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -299,6 +299,8 @@
 	mov	r2, sp
 	ldr	r1, [r2, #\offset + S_PSR]	@ get calling cpsr
 	ldr	lr, [r2, #\offset + S_PC]!	@ get pc
+	tst	r1, #PSR_I_BIT | 0x0f
+	bne	1f
 	msr	spsr_cxsf, r1			@ save in spsr_svc
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
 	@ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -313,6 +315,7 @@
 						@ after ldm {}^
 	add	sp, sp, #\offset + PT_REGS_SIZE
 	movs	pc, lr				@ return & move spsr_svc into cpsr
+1:	bug	"Returning to usermode but unexpected PSR bits set?", \@
 #elif defined(CONFIG_CPU_V7M)
 	@ V7M restore.
 	@ Note that we don't need to do clrex here as clearing the local
@@ -328,6 +331,8 @@
 	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
 	ldr	lr, [sp, #\offset + S_PC]	@ get pc
 	add	sp, sp, #\offset + S_SP
+	tst	r1, #PSR_I_BIT | 0x0f
+	bne	1f
 	msr	spsr_cxsf, r1			@ save in spsr_svc
 
 	@ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -340,6 +345,7 @@
 	.endif
 	add	sp, sp, #PT_REGS_SIZE - S_SP
 	movs	pc, lr				@ return & move spsr_svc into cpsr
+1:	bug	"Returning to usermode but unexpected PSR bits set?", \@
 #endif	/* !CONFIG_THUMB2_KERNEL */
 	.endm
 
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 877f461..09dd8ff 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1174,7 +1174,7 @@
 
 	return 0;
 }
-subsys_initcall(topology_init);
+postcore_initcall(topology_init);
 
 #ifdef CONFIG_HAVE_PROC_CPU
 static int __init proc_cpu_init(void)
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 066b6d4..42f5daf 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -79,7 +79,19 @@
 	return 1;
 }
 
+static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	u32 hsr = kvm_vcpu_get_hsr(vcpu);
+
+	kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
+		      hsr);
+
+	kvm_inject_undefined(vcpu);
+	return 1;
+}
+
 static exit_handle_fn arm_exit_handlers[] = {
+	[0 ... HSR_EC_MAX]	= kvm_handle_unknown_ec,
 	[HSR_EC_WFI]		= kvm_handle_wfx,
 	[HSR_EC_CP15_32]	= kvm_handle_cp15_32,
 	[HSR_EC_CP15_64]	= kvm_handle_cp15_64,
@@ -98,13 +110,6 @@
 {
 	u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
 
-	if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
-	    !arm_exit_handlers[hsr_ec]) {
-		kvm_err("Unknown exception class: hsr: %#08x\n",
-			(unsigned int)kvm_vcpu_get_hsr(vcpu));
-		BUG();
-	}
-
 	return arm_exit_handlers[hsr_ec];
 }
 
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index b6e715f..dac7ceb 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -112,7 +112,7 @@
 		}
 
 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
-			       data);
+			       &data);
 		data = vcpu_data_host_to_guest(vcpu, data, len);
 		vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
 	}
@@ -182,14 +182,14 @@
 		data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
 					       len);
 
-		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
+		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
 		kvm_mmio_write_buf(data_buf, len, data);
 
 		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
 				       data_buf);
 	} else {
 		trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
-			       fault_ipa, 0);
+			       fault_ipa, NULL);
 
 		ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
 				      data_buf);
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index f6ba589..c821c1d 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -32,7 +32,6 @@
 #include "soc.h"
 
 #define OMAP1_DMA_BASE			(0xfffed800)
-#define OMAP1_LOGICAL_DMA_CH_COUNT	17
 
 static u32 enable_1510_mode;
 
@@ -348,8 +347,6 @@
 		goto exit_iounmap;
 	}
 
-	d->lch_count		= OMAP1_LOGICAL_DMA_CH_COUNT;
-
 	/* Valid attributes for omap1 plus processors */
 	if (cpu_is_omap15xx())
 		d->dev_caps = ENABLE_1510_MODE;
@@ -366,13 +363,14 @@
 	d->dev_caps		|= CLEAR_CSR_ON_READ;
 	d->dev_caps		|= IS_WORD_16;
 
-	if (cpu_is_omap15xx())
-		d->chan_count = 9;
-	else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
-		if (!(d->dev_caps & ENABLE_1510_MODE))
-			d->chan_count = 16;
+	/* available logical channels */
+	if (cpu_is_omap15xx()) {
+		d->lch_count = 9;
+	} else {
+		if (d->dev_caps & ENABLE_1510_MODE)
+			d->lch_count = 9;
 		else
-			d->chan_count = 9;
+			d->lch_count = 16;
 	}
 
 	p = dma_plat_info;
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 8633c70..2944af8 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -367,7 +367,7 @@
 	return ret;
 }
 
-void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
+int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
 {
 	int err;
 	struct device *dev = &gpmc_onenand_device.dev;
@@ -393,15 +393,17 @@
 	if (err < 0) {
 		dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
 			gpmc_onenand_data->cs, err);
-		return;
+		return err;
 	}
 
 	gpmc_onenand_resource.end = gpmc_onenand_resource.start +
 							ONENAND_IO_SIZE - 1;
 
-	if (platform_device_register(&gpmc_onenand_device) < 0) {
+	err = platform_device_register(&gpmc_onenand_device);
+	if (err) {
 		dev_err(dev, "Unable to register OneNAND device\n");
 		gpmc_cs_free(gpmc_onenand_data->cs);
-		return;
 	}
+
+	return err;
 }
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 1cc4a6f..bca5415 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -3828,16 +3828,20 @@
  * Return: 0 if device named @dev_name is not likely to be accessible,
  * or 1 if it is likely to be accessible.
  */
-static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
-						       const char *dev_name)
+static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
+							const char *dev_name)
 {
+	struct device_node *node;
+	bool available;
+
 	if (!bus)
-		return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0;
+		return omap_type() == OMAP2_DEVICE_TYPE_GP;
 
-	if (of_device_is_available(of_find_node_by_name(bus, dev_name)))
-		return 1;
+	node = of_get_child_by_name(bus, dev_name);
+	available = of_device_is_available(node);
+	of_node_put(node);
 
-	return 0;
+	return available;
 }
 
 int __init omap3xxx_hwmod_init(void)
@@ -3906,15 +3910,20 @@
 
 	if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) {
 		r = omap_hwmod_register_links(h_sham);
-		if (r < 0)
+		if (r < 0) {
+			of_node_put(bus);
 			return r;
+		}
 	}
 
 	if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) {
 		r = omap_hwmod_register_links(h_aes);
-		if (r < 0)
+		if (r < 0) {
+			of_node_put(bus);
 			return r;
+		}
 	}
+	of_node_put(bus);
 
 	/*
 	 * Register hwmod links specific to certain ES levels of a
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 770216b..88676fe 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -147,7 +147,7 @@
 	.nshutdown_gpio = 137,
 	.dev_name = "/dev/ttyO1",
 	.flow_cntrl = 1,
-	.baud_rate = 300000,
+	.baud_rate = 3000000,
 };
 
 static struct platform_device wl18xx_device = {
@@ -162,7 +162,7 @@
 	.nshutdown_gpio = 162,
 	.dev_name = "/dev/ttyO1",
 	.flow_cntrl = 1,
-	.baud_rate = 300000,
+	.baud_rate = 3000000,
 };
 
 static struct platform_device wl128x_device = {
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index f9dfe80..405e34d 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -44,12 +44,14 @@
 	select HAVE_ARM_ARCH_TIMER
 	select MSM_CORTEX_A7
 	select PINCTRL
+	select PCI
 	select QCOM_SCM if SMP
 	select MSM_JTAG_MM if CORESIGHT_ETM
 	select PM_DEVFREQ
 	select COMMON_CLK
 	select COMMON_CLK_QCOM
 	select QCOM_GDSC
+	select GENERIC_CLOCKEVENTS_BROADCAST
 
 config ARCH_MSM8953
 	bool "Enable support for MSM8953"
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index bfff16a..0a05c0a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1045,13 +1045,31 @@
 	__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
 }
 
+/*
+ * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
+ * that the intention is to allow exporting memory allocated via the
+ * coherent DMA APIs through the dma_buf API, which only accepts a
+ * scattertable.  This presents a couple of problems:
+ * 1. Not all memory allocated via the coherent DMA APIs is backed by
+ *    a struct page
+ * 2. Passing coherent DMA memory into the streaming APIs is not allowed
+ *    as we will try to flush the memory through a different alias to that
+ *    actually being used (and the flushes are redundant.)
+ */
 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
 		 void *cpu_addr, dma_addr_t handle, size_t size,
 		 unsigned long attrs)
 {
-	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
+	unsigned long pfn = dma_to_pfn(dev, handle);
+	struct page *page;
 	int ret;
 
+	/* If the PFN is not valid, we do not have a struct page */
+	if (!pfn_valid(pfn))
+		return -ENXIO;
+
+	page = pfn_to_page(pfn);
+
 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
 	if (unlikely(ret))
 		return ret;
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 9fe8e24..e1f6f0d 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -126,8 +126,8 @@
 		.val	= PMD_SECT_USER,
 		.set	= "USR",
 	}, {
-		.mask	= L_PMD_SECT_RDONLY,
-		.val	= L_PMD_SECT_RDONLY,
+		.mask	= L_PMD_SECT_RDONLY | PMD_SECT_AP2,
+		.val	= L_PMD_SECT_RDONLY | PMD_SECT_AP2,
 		.set	= "ro",
 		.clear	= "RW",
 #elif __LINUX_ARM_ARCH__ >= 6
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index b46d914..cae69148a 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -678,8 +678,8 @@
 		.start  = (unsigned long)_stext,
 		.end    = (unsigned long)__init_begin,
 #ifdef CONFIG_ARM_LPAE
-		.mask   = ~L_PMD_SECT_RDONLY,
-		.prot   = L_PMD_SECT_RDONLY,
+		.mask   = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
+		.prot   = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
 #else
 		.mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
 		.prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index a4ec240..3eb018f 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -433,6 +433,7 @@
 	struct hlist_node *tmp;
 	unsigned long flags, orig_ret_address = 0;
 	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+	kprobe_opcode_t *correct_ret_addr = NULL;
 
 	INIT_HLIST_HEAD(&empty_rp);
 	kretprobe_hash_lock(current, &head, &flags);
@@ -455,15 +456,7 @@
 			/* another task is sharing our hash bucket */
 			continue;
 
-		if (ri->rp && ri->rp->handler) {
-			__this_cpu_write(current_kprobe, &ri->rp->kp);
-			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
-			ri->rp->handler(ri, regs);
-			__this_cpu_write(current_kprobe, NULL);
-		}
-
 		orig_ret_address = (unsigned long)ri->ret_addr;
-		recycle_rp_inst(ri, &empty_rp);
 
 		if (orig_ret_address != trampoline_address)
 			/*
@@ -475,6 +468,33 @@
 	}
 
 	kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+	correct_ret_addr = ri->ret_addr;
+	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+		if (ri->task != current)
+			/* another task is sharing our hash bucket */
+			continue;
+
+		orig_ret_address = (unsigned long)ri->ret_addr;
+		if (ri->rp && ri->rp->handler) {
+			__this_cpu_write(current_kprobe, &ri->rp->kp);
+			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+			ri->ret_addr = correct_ret_addr;
+			ri->rp->handler(ri, regs);
+			__this_cpu_write(current_kprobe, NULL);
+		}
+
+		recycle_rp_inst(ri, &empty_rp);
+
+		if (orig_ret_address != trampoline_address)
+			/*
+			 * This is the real return address. Any other
+			 * instances associated with this task are for
+			 * other calls deeper on the call stack
+			 */
+			break;
+	}
+
 	kretprobe_hash_unlock(current, &flags);
 
 	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
index 9775de2..a48354d 100644
--- a/arch/arm/probes/kprobes/test-core.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -976,7 +976,10 @@
 void __naked __kprobes_test_case_start(void)
 {
 	__asm__ __volatile__ (
-		"stmdb	sp!, {r4-r11}				\n\t"
+		"mov	r2, sp					\n\t"
+		"bic	r3, r2, #7				\n\t"
+		"mov	sp, r3					\n\t"
+		"stmdb	sp!, {r2-r11}				\n\t"
 		"sub	sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
 		"bic	r0, lr, #1  @ r0 = inline data		\n\t"
 		"mov	r1, sp					\n\t"
@@ -996,7 +999,8 @@
 		"movne	pc, r0					\n\t"
 		"mov	r0, r4					\n\t"
 		"add	sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
-		"ldmia	sp!, {r4-r11}				\n\t"
+		"ldmia	sp!, {r2-r11}				\n\t"
+		"mov	sp, r2					\n\t"
 		"mov	pc, r0					\n\t"
 	);
 }
@@ -1012,7 +1016,8 @@
 		"bxne	r0					\n\t"
 		"mov	r0, r4					\n\t"
 		"add	sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
-		"ldmia	sp!, {r4-r11}				\n\t"
+		"ldmia	sp!, {r2-r11}				\n\t"
+		"mov	sp, r2					\n\t"
 		"bx	r0					\n\t"
 	);
 }
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0848993..32a80d6 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -781,6 +781,34 @@
 	  However for 4K, we choose a higher default value, 11 as opposed to 10, giving us
 	  4M allocations matching the default size used by generic code.
 
+config UNMAP_KERNEL_AT_EL0
+	bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
+	default y
+	help
+	  Speculation attacks against some high-performance processors can
+	  be used to bypass MMU permission checks and leak kernel data to
+	  userspace. This can be defended against by unmapping the kernel
+	  when running in userspace, mapping it back in on exception entry
+	  via a trampoline page in the vector table.
+
+	  If unsure, say Y.
+
+config HARDEN_BRANCH_PREDICTOR
+	bool "Harden the branch predictor against aliasing attacks" if EXPERT
+	help
+	  Speculation attacks against some high-performance processors rely on
+	  being able to manipulate the branch predictor for a victim context by
+	  executing aliasing branches in the attacker context.  Such attacks
+	  can be partially mitigated against by clearing internal branch
+	  predictor state and limiting the prediction logic in some situations.
+
+	  This config option will take CPU-specific actions to harden the
+	  branch predictor against aliasing attacks and may rely on specific
+	  instruction sequences or control bits being set by the system
+	  firmware.
+
+	  If unsure, say Y.
+
 menuconfig ARMV8_DEPRECATED
 	bool "Emulate deprecated/obsolete ARMv8 instructions"
 	depends on COMPAT
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index d5a7418..d35cecb 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -121,6 +121,7 @@
 	select PM_OPP
 	select MFD_CORE
 	select SND_SOC_COMPRESS
+	select SND_HWDEP
 	help
 	  This enables support for the ARMv8 based Qualcomm chipsets.
 
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 1570602..b5e154e 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -14,8 +14,12 @@
 CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
 GZFLAGS		:=-9
 
-ifneq ($(CONFIG_RELOCATABLE),)
-LDFLAGS_vmlinux		+= -pie -shared -Bsymbolic
+ifeq ($(CONFIG_RELOCATABLE), y)
+# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
+# for relative relocs, since this leads to better Image compression
+# with the relocation offsets always being zero.
+LDFLAGS_vmlinux		+= -pie -shared -Bsymbolic \
+			$(call ld-option, --no-apply-dynamic-relocs)
 endif
 
 ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index eaa15ce..31386bd 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -10,7 +10,8 @@
 		sdm845-4k-panel-mtp-overlay.dtbo \
 		sdm845-4k-panel-cdp-overlay.dtbo \
 		sdm845-4k-panel-qrd-overlay.dtbo \
-		sdm845-v2-qvr-overlay.dtbo \
+		sdm845-v2-qvr-evt-overlay.dtbo \
+		sdm845-v2-qvr-dvt-overlay.dtbo \
 		sdm845-v2-cdp-overlay.dtbo \
 		sdm845-v2-mtp-overlay.dtbo \
 		sdm845-v2-qrd-overlay.dtbo \
@@ -41,7 +42,10 @@
 		sda845-v2.1-qrd-overlay.dtbo \
 		sda845-v2.1-4k-panel-cdp-overlay.dtbo \
 		sda845-v2.1-4k-panel-mtp-overlay.dtbo \
-		sda845-v2.1-4k-panel-qrd-overlay.dtbo
+		sda845-v2.1-4k-panel-qrd-overlay.dtbo \
+		sda845-v2.1-cdp-sdxpoorwills-overlay.dtbo \
+		sda845-v2.1-mtp-sdxpoorwills-overlay.dtbo \
+		sda845-v2-mtp-sdxpoorwills-overlay.dtbo
 
 sdm845-cdp-overlay.dtbo-base := sdm845.dtb
 sdm845-mtp-overlay.dtbo-base := sdm845.dtb
@@ -49,7 +53,8 @@
 sdm845-4k-panel-mtp-overlay.dtbo-base := sdm845.dtb
 sdm845-4k-panel-cdp-overlay.dtbo-base := sdm845.dtb
 sdm845-4k-panel-qrd-overlay.dtbo-base := sdm845.dtb
-sdm845-v2-qvr-overlay.dtbo-base := sdm845-v2.dtb
+sdm845-v2-qvr-evt-overlay.dtbo-base := sdm845-v2.dtb
+sdm845-v2-qvr-dvt-overlay.dtbo-base := sdm845-v2.dtb
 sdm845-v2-cdp-overlay.dtbo-base := sdm845-v2.dtb
 sdm845-v2-mtp-overlay.dtbo-base := sdm845-v2.dtb
 sdm845-v2-qrd-overlay.dtbo-base := sdm845-v2.dtb
@@ -81,6 +86,9 @@
 sda845-v2.1-4k-panel-cdp-overlay.dtbo-base := sda845-v2.1.dtb
 sda845-v2.1-4k-panel-mtp-overlay.dtbo-base := sda845-v2.1.dtb
 sda845-v2.1-4k-panel-qrd-overlay.dtbo-base := sda845-v2.1.dtb
+sda845-v2.1-cdp-sdxpoorwills-overlay.dtbo-base := sda845-v2.1.dtb
+sda845-v2.1-mtp-sdxpoorwills-overlay.dtbo-base := sda845-v2.1.dtb
+sda845-v2-mtp-sdxpoorwills-overlay.dtbo-base := sda845-v2.dtb
 else
 dtb-$(CONFIG_ARCH_SDM845) += sdm845-sim.dtb \
 	sdm845-rumi.dtb \
@@ -91,7 +99,8 @@
 	sdm845-v2-cdp.dtb \
 	sdm845-qrd.dtb \
 	sdm845-v2-qrd.dtb \
-	sdm845-v2-qvr.dtb \
+	sdm845-v2-qvr-evt.dtb \
+	sdm845-v2-qvr-dvt.dtb \
 	sdm845-4k-panel-mtp.dtb \
 	sdm845-4k-panel-cdp.dtb \
 	sdm845-4k-panel-qrd.dtb \
@@ -124,6 +133,8 @@
 		sda670-mtp-overlay.dtbo \
 		sda670-pm660a-cdp-overlay.dtbo \
 		sda670-pm660a-mtp-overlay.dtbo \
+		sdm670-tasha-codec-cdp-overlay.dtbo \
+		sdm670-pm660a-tasha-codec-cdp-overlay.dtbo \
 		qcs605-cdp-overlay.dtbo \
 		qcs605-mtp-overlay.dtbo \
 		qcs605-360camera-overlay.dtbo \
@@ -149,6 +160,8 @@
 sdm670-usbc-external-codec-mtp-overlay.dtbo-base := sdm670.dtb
 sdm670-usbc-external-codec-pm660a-cdp-overlay.dtbo-base := sdm670.dtb
 sdm670-usbc-external-codec-pm660a-mtp-overlay.dtbo-base := sdm670.dtb
+sdm670-tasha-codec-cdp-overlay.dtbo-base := sdm670.dtb
+sdm670-pm660a-tasha-codec-cdp-overlay.dtbo-base := sdm670.dtb
 sda670-cdp-overlay.dtbo-base := sda670.dtb
 sda670-mtp-overlay.dtbo-base := sda670.dtb
 sda670-pm660a-cdp-overlay.dtbo-base := sda670.dtb
@@ -181,6 +194,8 @@
 	sdm670-usbc-pm660a-mtp.dtb \
 	sda670-mtp.dtb \
 	sda670-cdp.dtb \
+	sdm670-tasha-codec-cdp.dtb \
+	sdm670-pm660a-tasha-codec-cdp.dtb \
 	sda670-pm660a-mtp.dtb \
 	sda670-pm660a-cdp.dtb \
 	qcs605-360camera.dtb \
@@ -191,6 +206,52 @@
 endif
 
 ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+dtbo-$(CONFIG_ARCH_MSM8953) += msm8953-mtp-overlay.dtbo \
+	msm8953-cdp-overlay.dtbo \
+	msm8953-rcm-overlay.dtbo \
+	msm8953-ipc-overlay.dtbo \
+	msm8953-qrd-overlay.dtbo \
+	msm8953-iot-mtp-overlay.dtbo \
+	msm8953-ext-codec-mtp-overlay.dtbo \
+	msm8953-ext-codec-rcm-overlay.dtbo \
+	msm8953-cdp-1200p-overlay.dtbo
+
+dtbo-$(CONFIG_ARCH_SDM450) += msm8953-mtp-overlay.dtbo \
+	msm8953-cdp-overlay.dtbo \
+	msm8953-rcm-overlay.dtbo \
+	msm8953-qrd-overlay.dtbo \
+	msm8953-iot-mtp-overlay.dtbo
+
+msm8953-mtp-overlay.dtbo-base := sdm450.dtb \
+	msm8953.dtb \
+	apq8053.dtb \
+	msm8953-pmi8940.dtb \
+	msm8953-pmi8937.dtb \
+	sdm450-pmi8940.dtb \
+	sdm450-pmi8937.dtb
+msm8953-cdp-overlay.dtbo-base := sdm450.dtb \
+	msm8953.dtb \
+	apq8053.dtb \
+	msm8953-pmi8940.dtb \
+	msm8953-pmi8937.dtb
+msm8953-rcm-overlay.dtbo-base := sdm450.dtb \
+	msm8953.dtb \
+	apq8053.dtb
+msm8953-ipc-overlay.dtbo-base := msm8953.dtb \
+	apq8053.dtb
+msm8953-qrd-overlay.dtbo-base := sdm450.dtb \
+	msm8953.dtb
+msm8953-iot-mtp-overlay.dtbo-base := sdm450.dtb \
+	msm8953.dtb \
+	apq8053.dtb
+msm8953-ext-codec-mtp-overlay.dtbo-base := msm8953.dtb \
+	apq8053.dtb \
+	msm8953-pmi8940.dtb \
+	msm8953-pmi8937.dtb
+msm8953-ext-codec-rcm-overlay.dtbo-base := msm8953.dtb \
+	apq8053.dtb
+msm8953-cdp-1200p-overlay.dtbo-base := msm8953.dtb
+
 else
 dtb-$(CONFIG_ARCH_MSM8953) += msm8953-cdp.dtb \
 	msm8953-mtp.dtb \
@@ -221,7 +282,16 @@
 	sdm450-qrd.dtb \
 	sdm450-pmi8940-mtp.dtb \
 	sdm450-pmi8937-mtp.dtb \
-	sdm450-iot-mtp.dtb
+	sdm450-iot-mtp.dtb \
+	sdm450-qrd-sku4.dtb \
+	sdm450-pmi632-cdp-s2.dtb \
+	sdm450-pmi632-mtp-s3.dtb
+
+dtb-$(CONFIG_ARCH_SDM632) += sdm632-rumi.dtb \
+	sdm632-cdp-s2.dtb	\
+	sdm632-mtp-s3.dtb	\
+	sdm632-qrd-sku4.dtb
+
 endif
 
 always		:= $(dtb-y)
diff --git a/arch/arm64/boot/dts/qcom/apq8053-cdp.dts b/arch/arm64/boot/dts/qcom/apq8053-cdp.dts
index 5e89e4f..57401d8 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-cdp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "apq8053.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 CDP";
diff --git a/arch/arm64/boot/dts/qcom/apq8053-ext-audio-mtp.dts b/arch/arm64/boot/dts/qcom/apq8053-ext-audio-mtp.dts
index 2c7b228..2d5e761a 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-ext-audio-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-ext-audio-mtp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "apq8053.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 Ext Codec MTP";
diff --git a/arch/arm64/boot/dts/qcom/apq8053-ext-codec-rcm.dts b/arch/arm64/boot/dts/qcom/apq8053-ext-codec-rcm.dts
index d026734..96e1d53 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-ext-codec-rcm.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-ext-codec-rcm.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "apq8053.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 Ext Codec RCM";
diff --git a/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts b/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
index 177e105..44b4792 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "apq8053.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 IOT MTP";
diff --git a/arch/arm64/boot/dts/qcom/apq8053-mtp.dts b/arch/arm64/boot/dts/qcom/apq8053-mtp.dts
index be544af..89b7624 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-mtp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "apq8053.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 MTP";
diff --git a/arch/arm64/boot/dts/qcom/apq8053-rcm.dts b/arch/arm64/boot/dts/qcom/apq8053-rcm.dts
index cc5bdaa..d70b99f 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-rcm.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-rcm.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "apq8053.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 RCM";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/apq8053.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/apq8053.dts
index 0a56c79..bf9e2f2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,13 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "apq8053.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 SOC";
+	compatible = "qcom,apq8053";
+	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+	qcom,pmic-name = "PMI8950";
 };
diff --git a/arch/arm64/boot/dts/qcom/apq8053.dtsi b/arch/arm64/boot/dts/qcom/apq8053.dtsi
index 15a1595..4600dc1 100644
--- a/arch/arm64/boot/dts/qcom/apq8053.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8053.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,9 +12,10 @@
  */
 #include "msm8953.dtsi"
 / {
-	model = "Qualcomm Technologies, Inc. APQ 8953";
+	model = "Qualcomm Technologies, Inc. APQ8053";
 	compatible = "qcom,apq8053";
 	qcom,msm-id = <304 0x0>;
+	qcom,msm-name = "APQ8053";
 };
 
 &secure_mem {
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-hx8399-truly-singlemipi-fhd-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-hx8399-truly-singlemipi-fhd-video.dtsi
new file mode 100644
index 0000000..3af01c1f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-hx8399-truly-singlemipi-fhd-video.dtsi
@@ -0,0 +1,114 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_hx8399_truly_cmd: qcom,mdss_dsi_hx8399_truly_cmd {
+		qcom,mdss-dsi-panel-name =
+			"hx8399 video mode dsi truly panel";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-lane-map = "lane_map_0123";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-t-clk-pre = <0x30>;
+		qcom,mdss-dsi-t-clk-post = <0x0e>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-lp11-init;
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <1080>;
+				qcom,mdss-dsi-panel-height = <2160>;
+				qcom,mdss-dsi-h-front-porch = <24>;
+				qcom,mdss-dsi-h-back-porch = <24>;
+				qcom,mdss-dsi-h-pulse-width = <16>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <40>;
+				qcom,mdss-dsi-v-front-porch = <36>;
+				qcom,mdss-dsi-v-pulse-width = <2>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-on-command = [
+					39 01 00 00 00 00 04 B9 FF 83 99
+					39 01 00 00 00 00 02 D2 88
+					39 01 00 00 00 00 10 B1 02 04 74 94 01
+					   32 33 11 11 E6 5D 56 73 02 02
+					39 01 00 00 00 00 10 B2 00 80 80 CC 05
+					   07 5A 11 10 10 00 1E 70 03 D4
+					39 01 00 00 00 00 2D B4 00 FF 59 59 0C
+					   AC 00 00 0C 00 07 0A 00 28 07 08 0C
+					   21 03 00 00 00 AE 87 59 59 0C AC 00
+					   00 0C 00 07 0A 00 28 07 08 0C 01 00
+					   00 AE 01
+					39 01 00 00 05 00 22 D3 00 00 01 01 00
+					   00 10 10 00 00 03 00 03 00 08 78 08
+					   78 00 00 00 00 00 24 02 05 05 03 00
+					   00 00 05 40
+					39 01 00 00 05 00 21 D5 20 20 19 19 18
+					   18 02 03 00 01 24 24 18 18 18 18 24
+					   24 00 00 00 00 00 00 00 00 2F 2F 30
+					   30 31 31
+					39 01 00 00 05 00 21 D6 24 24 18 18 19
+					   19 01 00 03 02 24 24 18 18 18 18 20
+					   20 40 40 40 40 40 40 40 40 2F 2F 30
+					   30 31 31
+					39 01 00 00 00 00 02 BD 00
+					39 01 00 00 00 00 11 D8 AA AA AA AA AA
+					   AA AA AA AA BA AA AA AA BA AA AA
+					39 01 00 00 00 00 02 BD 01
+					39 01 00 00 00 00 11 D8 82 EA AA AA 82
+					   EA AA AA 82 EA AA AA 82 EA AA AA
+					39 01 00 00 00 00 02 BD 02
+					39 01 00 00 00 00 09 D8 FF FF C0 3F FF
+					   FF C0 3F
+					39 01 00 00 00 00 02 BD 00
+					39 01 00 00 05 00 37 E0 08 2A 39 35 74
+					   7C 87 7F 84 8A 8E 91 93 96 9B 9C 9E
+					   A5 A6 AE A1 AF B2 5C 58 63 74 08 2A
+					   39 35 74 7C 87 7F 84 8A 8E 91 93 96
+					   9B 9C 9E A5 A6 AE A1 AF B2 5C 58 63
+					   74
+					39 01 00 00 00 00 03 B6 7E 7E
+					39 01 00 00 00 00 02 CC 08
+					39 01 00 00 00 00 06 C7 00 08 00 01 08
+					39 01 00 00 00 00 03 C0 25 5A
+					05 01 00 00 78 00 02 11 00
+					05 01 00 00 14 00 02 29 00];
+				qcom,mdss-dsi-off-command = [05 01 00 00 14 00
+				  02 28 00 05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
index 5529ed1..32892a7 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -181,7 +181,7 @@
 					15 01 00 00 00 00 02 ec 00
 					15 01 00 00 00 00 02 ff 10
 					15 01 00 00 00 00 02 bb 10
-					15 01 00 00 00 00 02 35 02
+					15 01 00 00 00 00 02 35 00
 					05 01 00 00 78 00 02 11 00
 					05 01 00 00 78 00 02 29 00];
 				qcom,mdss-dsi-off-command = [05 01 00 00 14
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi
index c059443..1b38d06 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -38,17 +38,13 @@
 		qcom,mdss-dsi-te-dcs-command = <1>;
 		qcom,mdss-dsi-te-check-enable;
 		qcom,mdss-dsi-te-using-te-pin;
-		qcom,mdss-dsi-panel-timings =
-			[da 34 24 00 64 68 28 38 2a 03 04 00];
-		qcom,mdss-dsi-t-clk-pre = <0x29>;
-		qcom,mdss-dsi-t-clk-post = <0x03>;
 		qcom,mdss-dsi-dma-trigger = "trigger_sw";
 		qcom,mdss-dsi-mdp-trigger = "none";
 		qcom,mdss-dsi-lp11-init;
 		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 		qcom,mdss-dsi-bl-min-level = <1>;
 		qcom,mdss-dsi-bl-max-level = <4095>;
-		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 50>;
 		qcom,mdss-dsi-display-timings {
 			timing@0 {
 				qcom,mdss-dsi-panel-framerate = <60>;
@@ -77,11 +73,11 @@
 					05 01 00 00 0a 00 02 20 00
 					15 01 00 00 00 00 02 bb 10
 					05 01 00 00 78 00 02 11 00
-					05 01 00 00 14 00 02 29 00
+					05 01 00 00 78 00 02 29 00
 				];
 				qcom,mdss-dsi-off-command = [
-					05 01 00 00 14 00 02
-					28 00 05 01 00 00 78 00 02 10 00
+					05 01 00 00 78 00 02 28 00
+					05 01 00 00 78 00 02 10 00
 				];
 				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
 				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
diff --git a/arch/arm64/boot/dts/qcom/external-soc.dtsi b/arch/arm64/boot/dts/qcom/external-soc.dtsi
new file mode 100644
index 0000000..e6609c0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/external-soc.dtsi
@@ -0,0 +1,37 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	mdm3: qcom,mdm3 {
+		compatible = "qcom,ext-sdxpoorwills";
+		cell-index = <0>;
+		#address-cells = <0>;
+		interrupt-parent = <&mdm3>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-names =
+			"err_fatal_irq",
+			"status_irq",
+			"mdm2ap_vddmin_irq";
+		/* modem attributes */
+		qcom,ramdump-delays-ms = <2000>;
+		qcom,ramdump-timeout-ms = <120000>;
+		qcom,vddmin-modes = "normal";
+		qcom,vddmin-drive-strength = <8>;
+		qcom,sfr-query;
+		qcom,sysmon-id = <20>;
+		qcom,ssctl-instance-id = <0x10>;
+		qcom,support-shutdown;
+		qcom,pil-force-shutdown;
+		status = "disabled";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-8953.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-8953.dtsi
index e794472..49e840be 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-8953.dtsi
@@ -1,5 +1,5 @@
 /*
- *Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,7 @@
 
 	/* A test device to test the SMMU operation */
 	kgsl_iommu_test_device0 {
+		status = "disabled";
 		compatible = "iommu-debug-test";
 		/* The SID should be valid one to get the proper
 		 *SMR,S2CR indices.
@@ -47,7 +48,7 @@
 	};
 
 	apps_iommu: qcom,iommu@1e00000 {
-		status = "disabled";
+		status = "okay";
 		compatible = "qcom,qsmmu-v500";
 		reg = <0x1e00000 0x40000>,
 			<0x1ee2000 0x20>;
diff --git a/arch/arm64/boot/dts/qcom/msm8937-regulator.dtsi b/arch/arm64/boot/dts/qcom/msm8937-regulator.dtsi
new file mode 100644
index 0000000..57272a4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8937-regulator.dtsi
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&rpm_bus {
+	rpm-regulator-smpa1 {
+		status = "okay";
+		pm8937_s1: regulator-s1 {
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <1225000>;
+			qcom,init-voltage = <1000000>;
+			status = "okay";
+		};
+	};
+
+	/* VDD_CX supply */
+	rpm-regulator-smpa2 {
+		status = "okay";
+		pm8937_s2_level: regulator-s2-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_s2_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_BINNING>;
+			qcom,use-voltage-level;
+		};
+
+		pm8937_s2_floor_level: regulator-s2-floor-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_s2_floor_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_BINNING>;
+			qcom,use-voltage-floor-level;
+			qcom,always-send-voltage;
+		};
+
+		pm8937_s2_level_ao: regulator-s2-level-ao {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_s2_level_ao";
+			qcom,set = <1>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_BINNING>;
+			qcom,use-voltage-level;
+		};
+	};
+
+	rpm-regulator-smpa3 {
+		status = "okay";
+		pm8937_s3: regulator-s3 {
+			regulator-min-microvolt = <1300000>;
+			regulator-max-microvolt = <1300000>;
+			qcom,init-voltage = <1300000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-smpa4 {
+		status = "okay";
+		pm8937_s4: regulator-s4 {
+			regulator-min-microvolt = <2050000>;
+			regulator-max-microvolt = <2050000>;
+			qcom,init-voltage = <2050000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa2 {
+		status = "okay";
+		pm8937_l2: regulator-l2 {
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+			qcom,init-voltage = <1200000>;
+			status = "okay";
+		};
+	};
+
+	/* VDD_MX supply */
+	rpm-regulator-ldoa3 {
+		status = "okay";
+		pm8937_l3_level_ao: regulator-l3-level-ao {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l3_level_ao";
+			qcom,set = <1>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-level;
+			qcom,always-send-voltage;
+		};
+
+		pm8937_l3_level_so: regulator-l3-level-so {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l3_level_so";
+			qcom,set = <2>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,init-voltage-level =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			qcom,use-voltage-level;
+		};
+	};
+
+	rpm-regulator-ldoa5 {
+		status = "okay";
+		pm8937_l5: regulator-l5 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			qcom,init-voltage = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa6 {
+		status = "okay";
+		pm8937_l6: regulator-l6 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			qcom,init-voltage = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa7 {
+		status = "okay";
+		pm8937_l7: regulator-l7 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			qcom,init-voltage = <1800000>;
+			status = "okay";
+		};
+
+		pm8937_l7_ao: regulator-l7-ao {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l7_ao";
+			qcom,set = <1>;
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			qcom,init-voltage = <1800000>;
+		};
+	};
+
+	rpm-regulator-ldoa8 {
+		status = "okay";
+		pm8937_l8: regulator-l8 {
+			regulator-min-microvolt = <2850000>;
+			regulator-max-microvolt = <2900000>;
+			qcom,init-voltage = <2900000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa9 {
+		status = "okay";
+		pm8937_l9: regulator-l9 {
+			regulator-min-microvolt = <3000000>;
+			regulator-max-microvolt = <3300000>;
+			qcom,init-voltage = <3000000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa10 {
+		status = "okay";
+		pm8937_l10: regulator-l10 {
+			regulator-min-microvolt = <2800000>;
+			regulator-max-microvolt = <3000000>;
+			qcom,init-voltage = <2800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa11 {
+		status = "okay";
+		pm8937_l11: regulator-l11 {
+			regulator-min-microvolt = <2950000>;
+			regulator-max-microvolt = <2950000>;
+			qcom,init-voltage = <2950000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa12 {
+		status = "okay";
+		pm8937_l12: regulator-l12 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <2950000>;
+			qcom,init-voltage = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa13 {
+		status = "okay";
+		pm8937_l13: regulator-l13 {
+			regulator-min-microvolt = <3075000>;
+			regulator-max-microvolt = <3075000>;
+			qcom,init-voltage = <3075000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa14 {
+		status = "okay";
+		pm8937_l14: regulator-l14 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <3300000>;
+			qcom,init-voltage = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa15 {
+		status = "okay";
+		pm8937_l15: regulator-l15 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <3300000>;
+			qcom,init-voltage = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa16 {
+		status = "okay";
+		pm8937_l16: regulator-l16 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			qcom,init-voltage = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa17 {
+		status = "okay";
+		pm8937_l17: regulator-l17 {
+			regulator-min-microvolt = <2800000>;
+			regulator-max-microvolt = <2900000>;
+			qcom,init-voltage = <2800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa19 {
+		status = "okay";
+		pm8937_l19: regulator-l19 {
+			regulator-min-microvolt = <1225000>;
+			regulator-max-microvolt = <1350000>;
+			qcom,init-voltage = <1225000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa22 {
+		status = "okay";
+		pm8937_l22: regulator-l22 {
+			regulator-min-microvolt = <2800000>;
+			regulator-max-microvolt = <2800000>;
+			qcom,init-voltage = <2800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa23 {
+		status = "okay";
+		pm8937_l23: regulator-l23 {
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+			qcom,init-voltage = <1200000>;
+			status = "okay";
+		};
+	};
+};
+
+/* SPM controlled regulators */
+&spmi_bus {
+	qcom,pm8937@1 {
+		/* PM8937 S5 + S6 = VDD_APC supply */
+		pm8937_s5: spm-regulator@2000 {
+			compatible = "qcom,spm-regulator";
+			reg = <0x2000 0x100>;
+			regulator-name = "pm8937_s5";
+			regulator-min-microvolt = <1050000>;
+			regulator-max-microvolt = <1350000>;
+		};
+	};
+};
+
+&soc {
+	mem_acc_vreg_corner: regulator@01946004 {
+		compatible = "qcom,mem-acc-regulator";
+		regulator-name = "mem_acc_corner";
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <3>;
+
+		qcom,acc-reg-addr-list =
+			<0x01942138 0x01942130 0x01942120
+			 0x01942124 0x01946000 0x01946004>;
+
+		qcom,acc-init-reg-config = <1 0xff>, <2 0x5555>, <6 0x55>;
+
+		qcom,num-acc-corners = <3>;
+		qcom,boot-acc-corner = <2>;
+		qcom,corner1-reg-config =
+			/* SVS+ => SVS+ */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			/* SVS+ => NOM */
+			<  3 0x1041041>, <  4  0x1041>, <  5  0x2020202>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			/* SVS+ => TURBO/NOM+ */
+			<  3 0x1041041>, <  4  0x1041>, <  5  0x2020202>,
+			<  3 0x0>,       <  4  0x0>,    <  5  0x0>;
+
+		qcom,corner2-reg-config =
+			/* NOM => SVS+ */
+			<  3 0x30c30c3>, <  4  0x30c3>, <  5  0x6060606>,
+			/* NOM => NOM */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			/* NOM => TURBO/NOM+ */
+			<  3 0x0>,       <  4  0x0>,    <  5  0x0>;
+
+		qcom,corner3-reg-config =
+			/* TURBO/NOM+ => SVS+ */
+			<  3 0x1041041>, <  4  0x1041>, <  5  0x2020202>,
+			<  3 0x30c30c3>, <  4  0x30c3>, <  5  0x6060606>,
+			/* TURBO/NOM+ => NOM */
+			<  3 0x1041041>, <  4  0x1041>, <  5  0x2020202>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			/* TURBO/NOM+ => TURBO/NOM+ */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>;
+	};
+
+	apc_vreg_corner: regulator@b018000 {
+		compatible = "qcom,cpr-regulator";
+		reg = <0xb018000 0x1000>, <0xb011064 4>, <0xa4000 0x1000>;
+		reg-names = "rbcpr", "rbcpr_clk", "efuse_addr";
+		interrupts = <0 15 0>;
+		regulator-name = "apc_corner";
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <7>;
+
+		qcom,cpr-fuse-corners = <3>;
+		qcom,cpr-voltage-ceiling = <1155000 1225000 1350000>;
+		qcom,cpr-voltage-floor =   <1050000 1050000 1090000>;
+		vdd-apc-supply = <&pm8937_s5>;
+
+		mem-acc-supply = <&mem_acc_vreg_corner>;
+
+		qcom,cpr-ref-clk = <19200>;
+		qcom,cpr-timer-delay = <5000>;
+		qcom,cpr-timer-cons-up = <0>;
+		qcom,cpr-timer-cons-down = <2>;
+		qcom,cpr-irq-line = <0>;
+		qcom,cpr-step-quotient = <10>;
+		qcom,cpr-up-threshold = <2>;
+		qcom,cpr-down-threshold = <4>;
+		qcom,cpr-idle-clocks = <15>;
+		qcom,cpr-gcnt-time = <1>;
+		qcom,vdd-apc-step-up-limit = <1>;
+		qcom,vdd-apc-step-down-limit = <1>;
+		qcom,cpr-apc-volt-step = <5000>;
+
+		qcom,cpr-fuse-row = <67 0>;
+		qcom,cpr-fuse-target-quot = <42 24 6>;
+		qcom,cpr-fuse-ro-sel = <60 57 54>;
+		qcom,cpr-init-voltage-ref = <1155000 1225000 1350000>;
+		qcom,cpr-fuse-init-voltage =
+					<67 36 6 0>,
+					<67 18 6 0>,
+					<67  0 6 0>;
+		qcom,cpr-fuse-quot-offset =
+					<71 26 6 0>,
+					<71 20 6 0>,
+					<70 54 7 0>;
+		qcom,cpr-fuse-quot-offset-scale = <5 5 5>;
+		qcom,cpr-init-voltage-step = <10000>;
+		qcom,cpr-corner-map = <1 2 3 3 3 3 3>;
+		qcom,cpr-corner-frequency-map =
+				<1 960000000>,
+				<2 1094400000>,
+				<3 1209600000>,
+				<4 1248000000>,
+				<5 1344000000>,
+				<6 1401000000>,
+				<7 1497600000>;
+		qcom,speed-bin-fuse-sel = <37 34 3 0>;
+		qcom,cpr-speed-bin-max-corners =
+					<0 0 1 2 6>,
+					<1 0 1 2 7>,
+					<2 0 1 2 3>;
+		qcom,cpr-fuse-revision = <69 39 3 0>;
+		qcom,cpr-quot-adjust-scaling-factor-max = <0 1400 1400>;
+		qcom,cpr-voltage-scaling-factor-max = <0 2000 2000>;
+		qcom,cpr-scaled-init-voltage-as-ceiling;
+		qcom,cpr-fuse-version-map =
+			<0	(-1)	1	(-1)	(-1)	(-1)>,
+			<(-1)	(-1)	2	(-1)	(-1)	(-1)>,
+			<(-1)	(-1)	3	(-1)	(-1)	(-1)>,
+			<(-1)	(-1)  (-1)	(-1)	(-1)	(-1)>;
+		qcom,cpr-quotient-adjustment =
+				<(-20)	(-40)	(-20)>,
+				<0	(-40)	 (20)>,
+				<0	  0	 (20)>,
+				<0	  0	    0>;
+		qcom,cpr-init-voltage-adjustment =
+				<0		0	      0>,
+				<(10000)     (15000)	(20000)>,
+				<0		0	      0>,
+				<0		0	      0>;
+		qcom,cpr-enable;
+	};
+
+	eldo2_pm8937: eldo2 {
+		compatible = "regulator-fixed";
+		regulator-name = "eldo2_pm8937";
+		startup-delay-us = <0>;
+		enable-active-high;
+		gpio = <&pm8937_gpios 7 0>;
+		regulator-always-on;
+	};
+
+	adv_vreg: adv_vreg {
+		compatible = "regulator-fixed";
+		regulator-name = "adv_vreg";
+		startup-delay-us = <400>;
+		enable-active-high;
+		gpio = <&pm8937_gpios 8 0>;
+	};
+
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-cdp-1200p-overlay.dts
index 0a56c79..03ec7b5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,12 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
+/plugin/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-cdp.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "CDP 1200P";
+	qcom,board-id = <1 1>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p.dts b/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p.dts
index a685380..96e364f 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "msm8953.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 CDP 1200P";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-cdp-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-cdp-overlay.dts
index 0a56c79..145a40c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,12 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
+/plugin/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-cdp.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "CDP";
+	qcom,board-id = <1 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953-cdp.dts b/arch/arm64/boot/dts/qcom/msm8953-cdp.dts
index 1f78902..34c5f8f 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "msm8953.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 CDP";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-cpu.dtsi b/arch/arm64/boot/dts/qcom/msm8953-cpu.dtsi
index 8d80a40..42d21f4 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-cpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-cpu.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -60,6 +60,7 @@
 			efficiency = <1024>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			next-level-cache = <&L2_0>;
+			#cooling-cells = <2>;
 			L2_0: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-level = <2>;
@@ -84,6 +85,7 @@
 			efficiency = <1024>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			next-level-cache = <&L2_0>;
+			#cooling-cells = <2>;
 			L1_I_1: l1-icache {
 			      compatible = "arm,arch-cache";
 			      qcom,dump-size = <0x8800>;
@@ -102,6 +104,7 @@
 			efficiency = <1024>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			next-level-cache = <&L2_0>;
+			#cooling-cells = <2>;
 			L1_I_2: l1-icache {
 			      compatible = "arm,arch-cache";
 			      qcom,dump-size = <0x8800>;
@@ -120,6 +123,7 @@
 			efficiency = <1024>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			next-level-cache = <&L2_0>;
+			#cooling-cells = <2>;
 			L1_I_3: l1-icache {
 			      compatible = "arm,arch-cache";
 			      qcom,dump-size = <0x8800>;
@@ -138,6 +142,7 @@
 			efficiency = <1126>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_1>;
 			next-level-cache = <&L2_1>;
+			#cooling-cells = <2>;
 			L2_1: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-level = <2>;
@@ -162,6 +167,7 @@
 			efficiency = <1126>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_1>;
 			next-level-cache = <&L2_1>;
+			#cooling-cells = <2>;
 			L1_I_101: l1-icache {
 			      compatible = "arm,arch-cache";
 			      qcom,dump-size = <0x8800>;
@@ -180,6 +186,7 @@
 			efficiency = <1126>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_1>;
 			next-level-cache = <&L2_1>;
+			#cooling-cells = <2>;
 			L1_I_102: l1-icache {
 			      compatible = "arm,arch-cache";
 			      qcom,dump-size = <0x8800>;
@@ -198,6 +205,7 @@
 			efficiency = <1126>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_1>;
 			next-level-cache = <&L2_1>;
+			#cooling-cells = <2>;
 			L1_I_103: l1-icache {
 			      compatible = "arm,arch-cache";
 			      qcom,dump-size = <0x8800>;
@@ -282,7 +290,7 @@
 };
 
 &soc {
-	cpuss_dump {
+	cpuss_dump: cpuss_dump {
 		compatible = "qcom,cpuss-dump";
 		qcom,l2_dump0 {
 			/* L2 cache dump for A53 cluster */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
index 0a56c79..08a343e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,13 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
+/plugin/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-mtp.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "Ext Codec MTP";
+	qcom,board-id= <8 1>;
 };
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
index 3dfd848..b80583e 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "msm8953.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 Ext Codec MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm-overlay.dts
index 0a56c79..45fdf06 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,12 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
+/plugin/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-cdp.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "Ext Codec RCM";
+	qcom,board-id = <21 1>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm.dts
index a81e212..d4224a4 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "msm8953.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 Ext Codec RCM";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi b/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi
new file mode 100644
index 0000000..5cf6eb2
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi
@@ -0,0 +1,273 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	pil_gpu: qcom,kgsl-hyp {
+		compatible = "qcom,pil-tz-generic";
+		qcom,pas-id = <13>;
+		qcom,firmware-name = "a506_zap";
+		memory-region = <&gpu_mem>;
+		qcom,mas-crypto = <&mas_crypto>;
+		clocks = <&clock_gcc clk_gcc_crypto_clk>,
+		<&clock_gcc clk_gcc_crypto_ahb_clk>,
+		<&clock_gcc clk_gcc_crypto_axi_clk>,
+		<&clock_gcc clk_crypto_clk_src>;
+		clock-names = "scm_core_clk", "scm_iface_clk",
+				"scm_bus_clk", "scm_core_clk_src";
+		qcom,proxy-clock-names = "scm_core_clk", "scm_iface_clk",
+				"scm_bus_clk", "scm_core_clk_src";
+		qcom,scm_core_clk_src-freq = <80000000>;
+	};
+
+	msm_bus: qcom,kgsl-busmon {
+		label = "kgsl-busmon";
+		compatible = "qcom,kgsl-busmon";
+	};
+
+	gpubw: qcom,gpubw {
+		compatible = "qcom,devbw";
+		governor = "bw_vbif";
+		qcom,src-dst-ports = <26 512>;
+		/*
+		 * active-only flag is used while registering the bus
+		 * governor.It helps release the bus vote when the CPU
+		 * subsystem is inactiv3
+		 */
+		qcom,active-only;
+		qcom,bw-tbl =
+			< 0    >, /*  off */
+			< 1611 >, /* 1. DDR:211.20 MHz BIMC: 105.60 MHz */
+			< 2124 >, /* 2. DDR:278.40 MHz BIMC: 139.20 MHz */
+			< 2929 >, /* 3. DDR:384.00 MHz BIMC: 192.00 MHz */
+			< 3222 >, /* 4. DDR:422.40 MHz BIMC: 211.20 MHz */
+			< 4248 >, /* 5. DDR:556.80 MHz BIMC: 278.40 MHz */
+			< 5126 >, /* 6. DDR:672.00 MHz BIMC: 336.00 MHz */
+			< 5859 >, /* 7. DDR:768.00 MHz BIMC: 384.00 MHz */
+			< 6152 >, /* 8. DDR:806.40 MHz BIMC: 403.20 MHz */
+			< 6445 >, /* 9. DDR:844.80 MHz BIMC: 422.40 MHz */
+			< 7104 >; /*10. DDR:931.20 MHz BIMC: 465.60 MHz */
+	};
+
+	msm_gpu: qcom,kgsl-3d0@1c00000 {
+		label = "kgsl-3d0";
+		compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
+		status = "ok";
+		reg = <0x1c00000 0x40000>;
+		reg-names = "kgsl_3d0_reg_memory";
+		interrupts = <0 33 0>;
+		interrupt-names = "kgsl_3d0_irq";
+		qcom,id = <0>;
+		qcom,chipid = <0x05000600>;
+
+		qcom,initial-pwrlevel = <4>;
+
+		qcom,idle-timeout = <80>; //msecs
+		qcom,deep-nap-timeout = <100>; //msecs
+		qcom,strtstp-sleepwake;
+
+		qcom,highest-bank-bit = <14>;
+
+		qcom,snapshot-size = <1048576>; //bytes
+
+		clocks = <&clock_gcc_gfx clk_gcc_oxili_gfx3d_clk>,
+			<&clock_gcc_gfx clk_gcc_oxili_ahb_clk>,
+			<&clock_gcc_gfx clk_gcc_bimc_gfx_clk>,
+			<&clock_gcc_gfx clk_gcc_bimc_gpu_clk>,
+			<&clock_gcc_gfx clk_gcc_oxili_timer_clk>,
+			<&clock_gcc_gfx clk_gcc_oxili_aon_clk>;
+
+		clock-names = "core_clk", "iface_clk",
+			      "mem_iface_clk", "alt_mem_iface_clk",
+			      "rbbmtimer_clk", "alwayson_clk";
+
+		/* Bus Scale Settings */
+		qcom,gpubw-dev = <&gpubw>;
+		qcom,bus-control;
+		qcom,bus-width = <16>;
+		qcom,msm-bus,name = "grp3d";
+		qcom,msm-bus,num-cases = <11>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<26 512 0 0>,	     /*  off          */
+				<26 512 0 1689600>, /* 1. 211.20 MHz */
+				<26 512 0 2227200>, /* 2. 278.40 MHz */
+				<26 512 0 3072000>, /* 3. 384.00 MHz */
+				<26 512 0 3379200>, /* 4.  422.40 MHz */
+				<26 512 0 4454400>, /* 5. 556.80 MHz */
+				<26 512 0 5376000>, /* 6. 672.00 MHz */
+				<26 512 0 6144000>, /* 7. 768.00 MHz */
+				<26 512 0 6451200>, /* 8. 806.40 MHz */
+				<26 512 0 6758400>, /* 9. 844.80 MHz */
+				<26 512 0 7449600>; /*10. 931.20 MHz */
+
+		/* GDSC regulator names */
+		regulator-names = "vddcx", "vdd";
+		/* GDSC oxili regulators */
+		vddcx-supply = <&gdsc_oxili_cx>;
+		vdd-supply = <&gdsc_oxili_gx>;
+
+		/* CPU latency parameter */
+		qcom,pm-qos-active-latency = <213>;
+		qcom,pm-qos-wakeup-latency = <213>;
+
+		/* Quirks */
+		qcom,gpu-quirk-two-pass-use-wfi;
+		qcom,gpu-quirk-dp2clockgating-disable;
+		qcom,gpu-quirk-lmloadkill-disable;
+
+		/* Trace bus */
+		coresight-id = <67>;
+		coresight-name = "coresight-gfx";
+		coresight-nr-inports = <0>;
+		coresight-outports = <0>;
+		coresight-child-list = <&funnel_mm>;
+		coresight-child-ports = <6>;
+
+		/* Enable context aware freq. scaling */
+		qcom,enable-ca-jump;
+
+		/* Context aware jump busy penalty in us */
+		qcom,ca-busy-penalty = <12000>;
+
+		/* Context aware jump target power level */
+		qcom,ca-target-pwrlevel = <3>;
+
+		/* Enable gpu cooling device */
+		#cooling-cells = <2>;
+
+		/* GPU Mempools */
+		qcom,gpu-mempools {
+			#address-cells= <1>;
+			#size-cells = <0>;
+			compatible = "qcom,gpu-mempools";
+
+			qcom,mempool-max-pages = <32768>;
+
+			/* 4K Page Pool configuration */
+			qcom,gpu-mempool@0 {
+				reg = <0>;
+				qcom,mempool-page-size = <4096>;
+			};
+			/* 64K Page Pool configuration */
+			qcom,gpu-mempool@1 {
+				reg = <1>;
+				qcom,mempool-page-size  = <65536>;
+			};
+		};
+
+		/* Power levels */
+		qcom,gpu-pwrlevels {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			compatible = "qcom,gpu-pwrlevels";
+
+			/* TURBO */
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <650000000>;
+				qcom,bus-freq = <10>;
+				qcom,bus-min = <10>;
+				qcom,bus-max = <10>;
+			};
+
+			/* NOM+ */
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <560000000>;
+				qcom,bus-freq = <10>;
+				qcom,bus-min = <8>;
+				qcom,bus-max = <10>;
+			};
+
+			/* NOM */
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <510000000>;
+				qcom,bus-freq = <9>;
+				qcom,bus-min = <6>;
+				qcom,bus-max = <10>;
+			};
+
+			/* SVS+ */
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <400000000>;
+				qcom,bus-freq = <7>;
+				qcom,bus-min = <5>;
+				qcom,bus-max = <8>;
+			};
+
+			/* SVS */
+			qcom,gpu-pwrlevel@4 {
+				reg = <4>;
+				qcom,gpu-freq = <320000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <2>;
+				qcom,bus-max = <6>;
+			};
+
+		       /* Low SVS */
+			qcom,gpu-pwrlevel@5 {
+				reg = <5>;
+				qcom,gpu-freq = <216000000>;
+				qcom,bus-freq = <1>;
+				qcom,bus-min = <1>;
+				qcom,bus-max = <4>;
+			};
+
+		       /* Min SVS */
+			qcom,gpu-pwrlevel@6 {
+				reg = <6>;
+				qcom,gpu-freq = <133300000>;
+				qcom,bus-freq = <1>;
+				qcom,bus-min = <1>;
+				qcom,bus-max = <4>;
+			};
+			/* XO */
+			qcom,gpu-pwrlevel@7 {
+				reg = <7>;
+				qcom,gpu-freq = <19200000>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+	};
+
+	kgsl_msm_iommu: qcom,kgsl-iommu@1c40000 {
+		compatible = "qcom,kgsl-smmu-v2";
+
+		reg = <0x1c40000 0x10000>;
+		qcom,protect = <0x40000 0x10000>;
+		qcom,micro-mmu-control = <0x6000>;
+
+		clocks = <&clock_gcc_gfx clk_gcc_oxili_ahb_clk>,
+			 <&clock_gcc_gfx clk_gcc_bimc_gfx_clk>;
+
+		clock-names = "gpu_ahb_clk", "gcc_bimc_gfx_clk";
+
+		qcom,secure_align_mask = <0xfff>;
+		qcom,retention;
+		gfx3d_user: gfx3d_user {
+			compatible = "qcom,smmu-kgsl-cb";
+			label = "gfx3d_user";
+			iommus = <&kgsl_smmu 0>;
+			qcom,gpu-offset = <0x48000>;
+		};
+		gfx3d_secure: gfx3d_secure {
+			compatible = "qcom,smmu-kgsl-cb";
+			iommus = <&kgsl_smmu 2>;
+			memory-region = <&secure_mem>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-iot-mtp-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-iot-mtp-overlay.dts
index 0a56c79..fec135d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-iot-mtp-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,12 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
+/plugin/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-mtp.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "IOT MTP";
+	qcom,board-id = <8 2>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953-iot-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-iot-mtp.dts
index 524e7ca..39c76cc 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-iot-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-iot-mtp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "msm8953.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 IOT MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-ipc-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-ipc-overlay.dts
index 0a56c79..3f957da 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ipc-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,12 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
+/plugin/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-ipc.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "IPC";
+	qcom,board-id = <12 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
index 0a56c79..49956df 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,12 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
+/plugin/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-mtp.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "MTP";
+	qcom,board-id = <8 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
index a45bb66..eec350d 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -48,6 +48,31 @@
 				};
 			};
 
+			uart1_console_active: uart1_console_active {
+				mux {
+					pins = "gpio20", "gpio21";
+					function = "blsp_uart6";
+				};
+
+				config {
+					pins = "gpio20", "gpio21";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			uart1_console_sleep: uart1_console_sleep {
+				mux {
+					pins = "gpio20", "gpio21";
+					function = "blsp_uart6";
+				};
+
+				config {
+					pins = "gpio20", "gpio21";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
 		};
 		cci {
 			cci0_active: cci0_active {
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pm.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pm.dtsi
index 0cbb0f2..da4f4df 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-pm.dtsi
@@ -264,4 +264,19 @@
 			};
 		};
 	};
+
+	qcom,rpm-stats@200000 {
+		compatible = "qcom,rpm-stats";
+		reg = <0x200000 0x1000>, <0x290014 0x4>, <0x29001c 0x4>;
+		reg-names = "phys_addr_base", "offset_addr";
+	};
+
+	qcom,rpm-master-stats@60150 {
+		compatible = "qcom,rpm-master-stats";
+		reg = <0x60150 0x5000>;
+		qcom,masters = "APSS", "MPSS", "PRONTO", "TZ", "LPASS";
+		qcom,master-stats-version = <2>;
+		qcom,master-offset = <4096>;
+	};
+
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8937-cdp.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-cdp.dts
index a751d5d..ad3d3ed 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pmi8937-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-cdp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "msm8953.dtsi"
+#include "pmi8937.dtsi"
 #include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8937.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8937 CDP";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8937-ext-codec-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-ext-codec-mtp.dts
index 13aba62..5abf198 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pmi8937-ext-codec-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-ext-codec-mtp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "msm8953.dtsi"
+#include "pmi8937.dtsi"
 #include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8937.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8937 Ext Codec MTP";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8937-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-mtp.dts
index 9d6be47..ee1c2a0 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pmi8937-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-mtp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "msm8953.dtsi"
+#include "pmi8937.dtsi"
 #include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8937.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8937 MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8937.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-pmi8937.dts
index 0a56c79..a9f64a4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8937.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,13 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8937 SOC";
+	compatible = "qcom,msm8953";
+	qcom,pmic-id = <0x010016 0x020037 0x0 0x0>;
+	qcom,pmic-name = "PMI8937";
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8937.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pmi8937.dtsi
new file mode 100644
index 0000000..a208e1a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8937.dtsi
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	led_flash0: qcom,camera-flash {
+		cell-index = <0>;
+		compatible = "qcom,camera-flash";
+		qcom,flash-type = <1>;
+		qcom,flash-source = <&pmi8937_flash0 &pmi8937_flash1>;
+		qcom,torch-source = <&pmi8937_torch0 &pmi8937_torch1>;
+		qcom,switch-source = <&pmi8937_switch>;
+	};
+};
+
+&usb3 {
+	vbus_dwc3-supply = <&smbcharger_charger_otg>;
+	extcon = <&pmi8937_charger>;
+};
+
+&pmi8937_charger {
+	qcom,external-typec;
+	qcom,typec-psy-name = "typec";
+};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8940-cdp.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-cdp.dts
index d2bb465..51622e0 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pmi8940-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-cdp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "msm8953.dtsi"
+#include "pmi8940.dtsi"
 #include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8940.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8940 CDP";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8940-ext-codec-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-ext-codec-mtp.dts
index dbbb6b8..92c67fa 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pmi8940-ext-codec-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-ext-codec-mtp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "msm8953.dtsi"
+#include "pmi8940.dtsi"
 #include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8940.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8940 Ext Codec MTP";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8940-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-mtp.dts
index 0fb793b..cb379b9 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pmi8940-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-mtp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "msm8953.dtsi"
+#include "pmi8940.dtsi"
 #include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8940.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8940 MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8940.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-pmi8940.dts
index 0a56c79..e9c80a0d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8940.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,13 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8940 SOC";
+	compatible = "qcom,msm8953";
+	qcom,pmic-id = <0x010016 0x020040 0x0 0x0>;
+	qcom,pmic-name = "PMI8940";
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8940.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pmi8940.dtsi
new file mode 100644
index 0000000..28fc0d7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8940.dtsi
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	led_flash0: qcom,camera-flash {
+		cell-index = <0>;
+		compatible = "qcom,camera-flash";
+		qcom,flash-type = <1>;
+		qcom,flash-source = <&pmi8940_flash0 &pmi8940_flash1>;
+		qcom,torch-source = <&pmi8940_torch0 &pmi8940_torch1>;
+		qcom,switch-source = <&pmi8940_switch>;
+	};
+};
+
+&usb3 {
+	vbus_dwc3-supply = <&smbcharger_charger_otg>;
+	extcon = <&pmi8940_charger>;
+};
+
+&labibb {
+	status = "ok";
+	qpnp,qpnp-labibb-mode = "lcd";
+};
+
+&ibb_regulator {
+	qcom,qpnp-ibb-discharge-resistor = <32>;
+};
+
+&pmi8940_charger {
+	qcom,external-typec;
+	qcom,typec-psy-name = "typec";
+};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8950.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pmi8950.dtsi
index 016baf2..944868b 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pmi8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8950.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -29,4 +29,11 @@
 
 &usb3 {
 	vbus_dwc3-supply = <&smbcharger_charger_otg>;
+	extcon = <&pmi8950_charger>;
 };
+
+&pmi8950_charger {
+	qcom,external-typec;
+	qcom,typec-psy-name = "typec";
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-qrd-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-qrd-overlay.dts
index 0a56c79..7f5fc4e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,12 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
+/plugin/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-qrd-sku3.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "QRD SKU3";
+	qcom,board-id = <0xb 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-rcm-overlay.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-rcm-overlay.dts
index 0a56c79..dbb7f57 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-rcm-overlay.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,12 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
+/plugin/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953-cdp.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "RCM";
+	qcom,board-id = <21 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953-rcm.dts b/arch/arm64/boot/dts/qcom/msm8953-rcm.dts
index a3117ed..625a4d6 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-rcm.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-rcm.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "msm8953.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 RCM";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-regulator.dtsi b/arch/arm64/boot/dts/qcom/msm8953-regulator.dtsi
index e4634c4..9468181 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-regulator.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -60,6 +60,14 @@
 					<RPM_SMD_REGULATOR_LEVEL_TURBO>;
 			qcom,use-voltage-level;
 		};
+
+		cx_cdev: regulator-cx-cdev {
+			compatible = "qcom,regulator-cooling-device";
+			regulator-cdev-supply = <&pm8953_s2_floor_level>;
+			regulator-levels = <RPM_SMD_REGULATOR_LEVEL_NOM
+					RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			#cooling-cells = <2>;
+		};
 	};
 
 	rpm-regulator-smpa3 {
diff --git a/arch/arm64/boot/dts/qcom/msm8953-thermal.dtsi b/arch/arm64/boot/dts/qcom/msm8953-thermal.dtsi
new file mode 100644
index 0000000..d5a6f52
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-thermal.dtsi
@@ -0,0 +1,1090 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/thermal/thermal.h>
+
+&soc {
+	qmi-tmd-devices {
+		compatible = "qcom,qmi_cooling_devices";
+
+		modem {
+			qcom,instance-id = <0x0>;
+
+			modem_pa: modem_pa {
+				qcom,qmi-dev-name = "pa";
+				#cooling-cells = <2>;
+			};
+
+			modem_proc: modem_proc {
+				qcom,qmi-dev-name = "modem";
+				#cooling-cells = <2>;
+			};
+
+			modem_current: modem_current {
+				qcom,qmi-dev-name = "modem_current";
+				#cooling-cells = <2>;
+			};
+
+			modem_vdd: modem_vdd {
+				qcom,qmi-dev-name = "cpuv_restriction_cold";
+				#cooling-cells = <2>;
+			};
+		};
+	};
+};
+
+&thermal_zones {
+	mdm-core-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&tsens0 1>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	qdsp-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&tsens0 2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	camera-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&tsens0 3>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc1-cpu0-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 4>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc1-cpu1-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 5>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc1-cpu2-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 6>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc1-cpu3-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 7>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc1-l2-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 8>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc0-cpu0-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 9>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc0-cpu1-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 10>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc0-cpu2-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 11>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc0-cpu3-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 12>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc0-l2-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 13>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	gpu0-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 14>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	gpu1-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 15>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	gpu1-step {
+		polling-delay-passive = <250>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 15>;
+		thermal-governor = "step_wise";
+		trips {
+			gpu_trip0: gpu-trip0 {
+				temperature = <95000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			gpu_cdev0 {
+				trip = <&gpu_trip0>;
+				cooling-device =
+					<&msm_gpu THERMAL_NO_LIMIT
+						THERMAL_NO_LIMIT>;
+			};
+		};
+	};
+
+	deca-cpu-max-step {
+		polling-delay-passive = <50>;
+		polling-delay = <100>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu_trip:cpu-trip {
+				temperature = <95000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU0 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu1_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu2_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU2 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu3_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU3 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu4_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU4 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu5_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU5 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu6_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU6 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu7_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU7 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+		};
+	};
+
+	pop-mem-step {
+		polling-delay-passive = <250>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 2>;
+		thermal-governor = "step_wise";
+		trips {
+			pop_trip: pop-trip {
+				temperature = <70000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			pop_cdev0 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU0 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev1 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev2 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU2 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev3 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU3 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev4 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU4 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev5 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU5 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev6 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU6 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev7 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU7 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+		};
+	};
+
+	apc1-cpu0-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 4>;
+		thermal-governor = "step_wise";
+		trips {
+			apc1_cpu0_trip: apc1-cpu0-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu4_cdev {
+				trip = <&apc1_cpu0_trip>;
+				cooling-device =
+					<&CPU4 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc1-cpu1-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 5>;
+		thermal-governor = "step_wise";
+		trips {
+			apc1_cpu1_trip: apc1-cpu1-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu5_cdev {
+				trip = <&apc1_cpu1_trip>;
+				cooling-device =
+					<&CPU5 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc1-cpu2-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 6>;
+		thermal-governor = "step_wise";
+		trips {
+			apc1_cpu2_trip: apc1-cpu2-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu6_cdev {
+				trip = <&apc1_cpu2_trip>;
+				cooling-device =
+					<&CPU6 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc1-cpu3-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 7>;
+		thermal-governor = "step_wise";
+		trips {
+			apc1_cpu3_trip: apc1-cpu3-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu7_cdev {
+				trip = <&apc1_cpu3_trip>;
+				cooling-device =
+					<&CPU7 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc0-cpu0-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 9>;
+		thermal-governor = "step_wise";
+		trips {
+			apc0_cpu0_trip: apc0-cpu0-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_cdev {
+				trip = <&apc0_cpu0_trip>;
+				cooling-device =
+					<&CPU0 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc0-cpu1-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 10>;
+		thermal-governor = "step_wise";
+		trips {
+			apc0_cpu1_trip: apc0-cpu1-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu1_cdev {
+				trip = <&apc0_cpu1_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc0-cpu2-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 11>;
+		thermal-governor = "step_wise";
+		trips {
+			apc0_cpu2_trip: apc0-cpu2-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu2_cdev {
+				trip = <&apc0_cpu2_trip>;
+				cooling-device =
+					<&CPU2 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc0-cpu3-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 12>;
+		thermal-governor = "step_wise";
+		trips {
+			apc0_cpu3_trip: apc0-cpu3-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu3_cdev {
+				trip = <&apc0_cpu3_trip>;
+				cooling-device =
+					<&CPU3 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	mdm-core-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 1>;
+		tracks-low;
+		trips {
+			mdm_core_trip: mdm-core-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&mdm_core_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&mdm_core_trip>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+			cx_vdd_cdev {
+				trip = <&mdm_core_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&mdm_core_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	qdsp-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 2>;
+		tracks-low;
+		trips {
+			qdsp_trip: qdsp-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&qdsp_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&qdsp_trip>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+			cx_vdd_cdev {
+				trip = <&qdsp_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&qdsp_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	camera-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 3>;
+		tracks-low;
+		trips {
+			camera_trip: camera-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+			cx_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc1-cpu0-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 4>;
+		tracks-low;
+		trips {
+			cpu4_trip: apc1-cpu0-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu4_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&cpu4_trip>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu4_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu4_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc1-cpu1-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 5>;
+		tracks-low;
+		trips {
+			cpu5_trip: apc1-cpu0-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu5_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&cpu5_trip>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu5_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu5_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc1-cpu2-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 6>;
+		tracks-low;
+		trips {
+			cpu6_trip: apc1-cpu2-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu6_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&cpu6_trip>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu6_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu6_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc1-cpu3-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 7>;
+		tracks-low;
+		trips {
+			cpu7_trip: apc1-cpu3-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu7_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&cpu7_trip>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu7_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu7_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc1-l2-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 8>;
+		tracks-low;
+		trips {
+			apc1_l2_trip: apc1-l2-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&apc1_l2_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&apc1_l2_trip>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+			cx_vdd_cdev {
+				trip = <&apc1_l2_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&apc1_l2_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc0-cpu0-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 9>;
+		tracks-low;
+		trips {
+			cpu0_trip: apc0-cpu0-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc0-cpu1-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 10>;
+		tracks-low;
+		trips {
+			cpu1_trip: apc0-cpu1-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc0-cpu2-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 11>;
+		tracks-low;
+		trips {
+			cpu2_trip: apc0-cpu2-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc0-cpu3-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 12>;
+		tracks-low;
+		trips {
+			cpu3_trip: apc0-cpu3-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc0-l2-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 13>;
+		tracks-low;
+		trips {
+			apc0_l2_trip: apc0-l2-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&apc0_l2_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&apc0_l2_trip>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+			cx_vdd_cdev {
+				trip = <&apc0_l2_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&apc0_l2_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	gpu0-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 14>;
+		tracks-low;
+		trips {
+			gpu0_trip: gpu0-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&gpu0_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&gpu0_trip>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+			cx_vdd_cdev {
+				trip = <&gpu0_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&gpu0_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	gpu1-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 15>;
+		tracks-low;
+		trips {
+			gpu1_trip: gpu1-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&gpu1_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&gpu1_trip>;
+				cooling-device = <&msm_gpu 2 2>;
+			};
+			cx_vdd_cdev {
+				trip = <&gpu1_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&gpu1_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953.dts
index 0a56c79..ddf2218 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,13 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "msm8953.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 SOC";
+	compatible = "qcom,msm8953";
+	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+	qcom,pmic-name = "PMI8950";
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index b4631e9..69cd4fc 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,15 +19,42 @@
 #include <dt-bindings/clock/msm-clocks-8953.h>
 
 / {
-	model = "Qualcomm Technologies, Inc. MSM 8953";
+	model = "Qualcomm Technologies, Inc. MSM8953";
 	compatible = "qcom,msm8953";
 	qcom,msm-id = <293 0x0>;
+	qcom,msm-name = "MSM8953";
 	interrupt-parent = <&intc>;
 
 	chosen {
 		bootargs = "sched_enable_hmp=1 sched_enable_power_aware=1";
 	};
 
+	firmware: firmware {
+		android {
+			compatible = "android,firmware";
+			fstab {
+				compatible = "android,fstab";
+				vendor {
+					compatible = "android,vendor";
+					dev = "/dev/block/platform/soc/7824900.sdhci/by-name/vendor";
+					type = "ext4";
+					mnt_flags = "ro,barrier=1,discard";
+					fsmgr_flags = "wait";
+					status = "ok";
+				};
+				system {
+					compatible = "android,system";
+					dev = "/dev/block/platform/soc/7824900.sdhci/by-name/system";
+					type = "ext4";
+					mnt_flags = "ro,barrier=1,discard";
+					fsmgr_flags = "wait";
+					status = "ok";
+				};
+
+			};
+		};
+	};
+
 	reserved-memory {
 		#address-cells = <2>;
 		#size-cells = <2>;
@@ -136,6 +163,7 @@
 #include "msm8953-coresight.dtsi"
 #include "msm8953-ion.dtsi"
 #include "msm-arm-smmu-8953.dtsi"
+#include "msm8953-gpu.dtsi"
 
 &soc {
 	#address-cells = <1>;
@@ -276,217 +304,7 @@
 		qcom,pipe-attr-ee;
 	};
 
-	thermal_zones: thermal-zones {
-		mdm-core-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "user_space";
-			thermal-sensors = <&tsens0 1>;
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		qdsp-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "user_space";
-			thermal-sensors = <&tsens0 2>;
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		camera-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "user_space";
-			thermal-sensors = <&tsens0 3>;
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc1_cpu0-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 4>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc1_cpu1-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 5>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc1_cpu2-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 6>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc1_cpu3-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 7>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc1_l2-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 8>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc0_cpu0-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 9>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc0_cpu1-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 10>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc0_cpu2-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 11>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc0_cpu3-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 12>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc0_l2-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 13>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		gpu0-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 14>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		gpu1-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 15>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-	};
+	thermal_zones: thermal-zones {};
 
 	tsens0: tsens@4a8000 {
 		compatible = "qcom,msm8953-tsens";
@@ -690,6 +508,16 @@
 		status = "disabled";
 	};
 
+	blsp1_serial1: serial@78b0000 {
+		compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+		reg = <0x78b0000 0x200>;
+		interrupts = <0 108 0>;
+		clocks = <&clock_gcc clk_gcc_blsp1_uart2_apps_clk>,
+			<&clock_gcc clk_gcc_blsp1_ahb_clk>;
+		clock-names = "core", "iface";
+		status = "disabled";
+	};
+
 	dma_blsp1: qcom,sps-dma@7884000 { /* BLSP1 */
 		#dma-cells = <4>;
 		compatible = "qcom,sps-dma";
@@ -847,6 +675,8 @@
 		reg = <0x1800000 0x80000>;
 		reg-names = "cc_base";
 		vdd_gfx-supply = <&gfx_vreg_corner>;
+		clocks = <&clock_gcc clk_xo_clk_src>;
+		clock-names = "xo";
 		qcom,gfxfreq-corner =
 			 <         0   0 >,
 			 < 133330000   1 >,  /* Min SVS   */
@@ -1253,7 +1083,7 @@
 		rpm-channel-type = <15>; /* SMD_APPS_RPM */
 		};
 
-	qcom,wdt@b017000 {
+	wdog: qcom,wdt@b017000 {
 		compatible = "qcom,msm-watchdog";
 		reg = <0xb017000 0x1000>;
 		reg-names = "wdt-base";
@@ -1788,6 +1618,47 @@
 		qcom,reset-ep-after-lpm-resume;
 	};
 
+	qcom,mss@4080000 {
+		compatible = "qcom,pil-q6v55-mss";
+		reg = <0x04080000 0x100>,
+		      <0x0194f000 0x010>,
+		      <0x01950000 0x008>,
+		      <0x01951000 0x008>,
+		      <0x04020000 0x040>,
+		      <0x01871000 0x004>;
+		reg-names = "qdsp6_base", "halt_q6", "halt_modem", "halt_nc",
+				 "rmb_base", "restart_reg";
+
+		interrupts = <GIC_SPI 24 IRQ_TYPE_EDGE_RISING>;
+		vdd_mss-supply = <&pm8953_s1>;
+		vdd_cx-supply = <&pm8953_s2_level>;
+		vdd_cx-voltage = <RPM_SMD_REGULATOR_LEVEL_TURBO>;
+		vdd_mx-supply = <&pm8953_s7_level_ao>;
+		vdd_mx-uV = <RPM_SMD_REGULATOR_LEVEL_TURBO>;
+		vdd_pll-supply = <&pm8953_l7>;
+		qcom,vdd_pll = <1800000>;
+		vdd_mss-uV = <RPM_SMD_REGULATOR_LEVEL_TURBO>;
+
+		clocks = <&clock_gcc clk_xo_pil_mss_clk>,
+			 <&clock_gcc clk_gcc_mss_cfg_ahb_clk>,
+			 <&clock_gcc clk_gcc_mss_q6_bimc_axi_clk>,
+			 <&clock_gcc clk_gcc_boot_rom_ahb_clk>;
+		clock-names = "xo", "iface_clk", "bus_clk", "mem_clk";
+		qcom,proxy-clock-names = "xo";
+		qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk";
+
+		qcom,pas-id = <5>;
+		qcom,pil-mss-memsetup;
+		qcom,firmware-name = "modem";
+		qcom,pil-self-auth;
+		qcom,sysmon-id = <0>;
+		qcom,ssctl-instance-id = <0x12>;
+		qcom,qdsp6v56-1-10;
+		qcom,reset-clk;
+
+		memory-region = <&modem_mem>;
+	};
+
 	qcom,lpass@c200000 {
 		compatible = "qcom,pil-tz-generic";
 		reg = <0xc200000 0x00100>;
@@ -1858,6 +1729,7 @@
 
 		vdd-supply = <&gdsc_venus>;
 		qcom,proxy-reg-names = "vdd";
+		qcom,mas-crypto = <&mas_crypto>;
 
 		clocks = <&clock_gcc clk_gcc_venus0_vcodec0_clk>,
 			<&clock_gcc clk_gcc_venus0_ahb_clk>,
@@ -1888,12 +1760,96 @@
 		qcom,firmware-name = "venus";
 		memory-region = <&venus_mem>;
 	};
+
+	qcom,wcnss-wlan@0a000000 {
+		compatible = "qcom,wcnss_wlan";
+		reg = <0x0a000000 0x280000>,
+		      <0x0b011008 0x04>,
+		      <0x0a21b000 0x3000>,
+		      <0x03204000 0x00000100>,
+		      <0x03200800 0x00000200>,
+		      <0x0a100400 0x00000200>,
+		      <0x0a205050 0x00000200>,
+		      <0x0a219000 0x00000020>,
+		      <0x0a080488 0x00000008>,
+		      <0x0a080fb0 0x00000008>,
+		      <0x0a08040c 0x00000008>,
+		      <0x0a0120a8 0x00000008>,
+		      <0x0a012448 0x00000008>,
+		      <0x0a080c00 0x00000001>;
+
+		reg-names = "wcnss_mmio", "wcnss_fiq",
+			    "pronto_phy_base", "riva_phy_base",
+			    "riva_ccu_base", "pronto_a2xb_base",
+			    "pronto_ccpu_base", "pronto_saw2_base",
+			    "wlan_tx_phy_aborts","wlan_brdg_err_source",
+			    "wlan_tx_status", "alarms_txctl",
+			    "alarms_tactl", "pronto_mcu_base";
+
+		interrupts = <0 145 0 0 146 0>;
+		interrupt-names = "wcnss_wlantx_irq", "wcnss_wlanrx_irq";
+
+		qcom,pronto-vddmx-supply = <&pm8953_s7_level_ao>;
+		qcom,pronto-vddcx-supply = <&pm8953_s2_level>;
+		qcom,pronto-vddpx-supply = <&pm8953_l5>;
+		qcom,iris-vddxo-supply   = <&pm8953_l7>;
+		qcom,iris-vddrfa-supply  = <&pm8953_l19>;
+		qcom,iris-vddpa-supply   = <&pm8953_l9>;
+		qcom,iris-vdddig-supply  = <&pm8953_l5>;
+
+		qcom,iris-vddxo-voltage-level = <1800000 0 1800000>;
+		qcom,iris-vddrfa-voltage-level = <1300000 0 1300000>;
+		qcom,iris-vddpa-voltage-level = <3300000 0 3300000>;
+		qcom,iris-vdddig-voltage-level = <1800000 0 1800000>;
+
+		qcom,vddmx-voltage-level = <RPM_SMD_REGULATOR_LEVEL_TURBO
+					RPM_SMD_REGULATOR_LEVEL_NONE
+					RPM_SMD_REGULATOR_LEVEL_TURBO>;
+		qcom,vddcx-voltage-level = <RPM_SMD_REGULATOR_LEVEL_NOM
+					RPM_SMD_REGULATOR_LEVEL_NONE
+					RPM_SMD_REGULATOR_LEVEL_TURBO>;
+		qcom,vddpx-voltage-level = <1800000 0 1800000>;
+
+		qcom,iris-vddxo-current = <10000>;
+		qcom,iris-vddrfa-current = <100000>;
+		qcom,iris-vddpa-current = <515000>;
+		qcom,iris-vdddig-current = <10000>;
+
+		qcom,pronto-vddmx-current = <0>;
+		qcom,pronto-vddcx-current = <0>;
+		qcom,pronto-vddpx-current = <0>;
+
+		pinctrl-names = "wcnss_default", "wcnss_sleep",
+				"wcnss_gpio_default";
+		pinctrl-0 = <&wcnss_default>;
+		pinctrl-1 = <&wcnss_sleep>;
+		pinctrl-2 = <&wcnss_gpio_default>;
+
+		gpios = <&tlmm 76 0>, <&tlmm 77 0>, <&tlmm 78 0>,
+			<&tlmm 79 0>, <&tlmm 80 0>;
+
+		clocks = <&clock_gcc clk_xo_wlan_clk>,
+			 <&clock_gcc clk_rf_clk2>,
+			 <&clock_debug clk_gcc_debug_mux>,
+			 <&clock_gcc clk_wcnss_m_clk>;
+
+		clock-names = "xo", "rf_clk", "measure", "wcnss_debug";
+
+		qcom,has-autodetect-xo;
+		qcom,is-pronto-v3;
+		qcom,has-pronto-hw;
+		qcom,has-vsys-adc-channel;
+		qcom,has-a2xb-split-reg;
+		qcom,wcnss-adc_tm = <&pm8953_adc_tm>;
+	};
+
 };
 
 #include "pm8953-rpm-regulator.dtsi"
 #include "pm8953.dtsi"
 #include "msm8953-regulator.dtsi"
 #include "msm-gdsc-8916.dtsi"
+#include "msm8953-thermal.dtsi"
 
 &gdsc_venus {
 	clock-names = "bus_clk", "core_clk";
diff --git a/arch/arm64/boot/dts/qcom/pm660.dtsi b/arch/arm64/boot/dts/qcom/pm660.dtsi
index 502b2fe..903f170a 100644
--- a/arch/arm64/boot/dts/qcom/pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm660.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -218,7 +218,6 @@
 				qcom,scale-function = <2>;
 				qcom,hw-settle-time = <2>;
 				qcom,fast-avg-setup = <0>;
-				qcom,vadc-thermal-node;
 			};
 
 			chan@4f {
@@ -230,7 +229,6 @@
 				qcom,scale-function = <2>;
 				qcom,hw-settle-time = <2>;
 				qcom,fast-avg-setup = <0>;
-				qcom,vadc-thermal-node;
 			};
 
 			chan@1d {
@@ -376,7 +374,7 @@
 			#clock-cells = <1>;
 			qcom,cxo-freq = <19200000>;
 			qcom,clkdiv-id = <1>;
-			qcom,clkdiv-init-freq = <19200000>;
+			qcom,clkdiv-init-freq = <9600000>;
 			status = "disabled";
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/pm8005.dtsi b/arch/arm64/boot/dts/qcom/pm8005.dtsi
index 1f8d20e..aff92a8 100644
--- a/arch/arm64/boot/dts/qcom/pm8005.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8005.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,11 +25,12 @@
 			reg = <0x100 0x100>;
 		};
 
-		qcom,temp-alarm@2400 {
+		pm8005_tz: qcom,temp-alarm@2400 {
 			compatible = "qcom,qpnp-temp-alarm";
 			reg = <0x2400 0x100>;
 			interrupts = <0x4 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
 			label = "pm8005_tz";
+			#thermal-sensor-cells = <0>;
 		};
 
 		pm8005_gpios: pinctrl@c000 {
@@ -79,3 +80,28 @@
 		};
 	};
 };
+
+&thermal_zones {
+	pm8005_tz {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8005_tz>;
+		trips {
+			pm8005-trip0 {
+				temperature = <105000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			pm8005-trip1 {
+				temperature = <125000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			pm8005-trip2 {
+				temperature = <145000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pm8937-rpm-regulator.dtsi b/arch/arm64/boot/dts/qcom/pm8937-rpm-regulator.dtsi
new file mode 100644
index 0000000..33a5e16
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pm8937-rpm-regulator.dtsi
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&rpm_bus {
+	rpm-regulator-smpa1 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <1>;
+		qcom,regulator-type = <1>;
+		qcom,hpm-min-load = <100000>;
+		status = "disabled";
+
+		regulator-s1 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_s1";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa2 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <2>;
+		qcom,regulator-type = <1>;
+		qcom,hpm-min-load = <100000>;
+		status = "disabled";
+
+		regulator-s2 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_s2";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa3 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <3>;
+		qcom,regulator-type = <1>;
+		qcom,hpm-min-load = <100000>;
+		status = "disabled";
+
+		regulator-s3 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_s3";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa4 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <4>;
+		qcom,regulator-type = <1>;
+		qcom,hpm-min-load = <100000>;
+		status = "disabled";
+
+		regulator-s4 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_s4";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa2 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <2>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l2 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l2";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa3 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <3>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l3 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l3";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa5 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <5>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l5 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l5";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa6 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <6>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l6 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l6";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa7 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <7>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l7 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l7";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa8 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <8>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l8 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l8";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa9 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <9>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l9 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l9";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa10 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <10>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l10 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l10";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa11 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <11>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l11 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l11";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa12 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <12>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l12 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l12";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa13 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <13>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <5000>;
+		status = "disabled";
+
+		regulator-l13 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l13";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa14 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <14>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <5000>;
+		status = "disabled";
+
+		regulator-l14 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l14";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa15 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <15>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <5000>;
+		status = "disabled";
+
+		regulator-l15 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l15";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa16 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <16>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <5000>;
+		status = "disabled";
+
+		regulator-l16 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l16";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa17 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <17>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l17 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l17";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa19 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <19>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l19 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l19";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa22 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <22>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l22 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l22";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa23 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <23>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l23 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l23";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pm8937.dtsi b/arch/arm64/boot/dts/qcom/pm8937.dtsi
new file mode 100644
index 0000000..6a61445
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pm8937.dtsi
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&spmi_bus {
+
+	qcom,pm8937@0 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x0 SPMI_USID>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		pm8937_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		qcom,power-on@800 {
+			compatible = "qcom,qpnp-power-on";
+			reg = <0x800 0x100>;
+			interrupts = <0x0 0x8 0x0 IRQ_TYPE_NONE>,
+				     <0x0 0x8 0x1 IRQ_TYPE_NONE>,
+				     <0x0 0x8 0x4 IRQ_TYPE_NONE>,
+				     <0x0 0x8 0x5 IRQ_TYPE_NONE>;
+			interrupt-names = "kpdpwr", "resin",
+				"resin-bark", "kpdpwr-resin-bark";
+			qcom,pon-dbc-delay = <15625>;
+			qcom,system-reset;
+
+			qcom,pon_1 {
+				qcom,pon-type = <0>;
+				qcom,pull-up = <1>;
+				linux,code = <116>;
+			};
+
+			qcom,pon_2 {
+				qcom,pon-type = <1>;
+				qcom,pull-up = <1>;
+				linux,code = <114>;
+			};
+		};
+
+		pm8937_temp_alarm: qcom,temp-alarm@2400 {
+			compatible = "qcom,qpnp-temp-alarm";
+			reg = <0x2400 0x100>;
+			interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
+			label = "pm8937_tz";
+			qcom,channel-num = <8>;
+			qcom,threshold-set = <0>;
+			qcom,temp_alarm-vadc = <&pm8937_vadc>;
+		};
+
+		pm8937_coincell: qcom,coincell@2800 {
+			compatible = "qcom,qpnp-coincell";
+			reg = <0x2800 0x100>;
+		};
+
+		pm8937_rtc: qcom,pm8937_rtc {
+			compatible = "qcom,qpnp-rtc";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,qpnp-rtc-write = <0>;
+			qcom,qpnp-rtc-alarm-pwrup = <0>;
+
+			qcom,pm8937_rtc_rw@6000 {
+				reg = <0x6000 0x100>;
+			};
+
+			qcom,pm8937_rtc_alarm@6100 {
+				reg = <0x6100 0x100>;
+				interrupts = <0x0 0x61 0x1 IRQ_TYPE_NONE>;
+			};
+		};
+
+		pm8937_mpps: mpps {
+			compatible = "qcom,spmi-mpp";
+			reg = <0xa000 0x400>;
+			interrupts = <0x0 0xa0 0 IRQ_TYPE_NONE>,
+				     <0x0 0xa1 0 IRQ_TYPE_NONE>,
+				     <0x0 0xa2 0 IRQ_TYPE_NONE>,
+				     <0x0 0xa3 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pm8937_mpp1", "pm8937_mpp2",
+					  "pm8937_mpp3", "pm8937_mpp4";
+			gpio-controller;
+			#gpio-cells = <2>;
+		};
+
+		pm8937_gpios: gpios {
+			compatible = "qcom,spmi-gpio";
+			reg = <0xc000 0x800>;
+			interrupts = <0x0 0xc0 0 IRQ_TYPE_NONE>,
+				     <0x0 0xc1 0 IRQ_TYPE_NONE>,
+				     <0x0 0xc4 0 IRQ_TYPE_NONE>,
+				     <0x0 0xc6 0 IRQ_TYPE_NONE>,
+				     <0x0 0xc7 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pm8937_gpio1", "pm8937_gpio2",
+					  "pm8937_gpio5", "pmi8937_gpio7",
+					  "pm8937_gpio8";
+			gpio-controller;
+			#gpio-cells = <2>;
+			qcom,gpios-disallowed = <3 4 6>;
+		};
+
+		pm8937_vadc: vadc@3100 {
+			compatible = "qcom,qpnp-vadc";
+			reg = <0x3100 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-bit-resolution = <15>;
+			qcom,adc-vdd-reference = <1800>;
+			qcom,vadc-poll-eoc;
+
+			chan@5 {
+				label = "vcoin";
+				reg = <5>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@7 {
+				label = "vph_pwr";
+				reg = <7>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@8 {
+				label = "die_temp";
+				reg = <8>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <3>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@9 {
+				label = "ref_625mv";
+				reg = <9>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@a {
+				label = "ref_1250v";
+				reg = <0xa>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@c {
+				label = "ref_buf_625mv";
+				reg = <0xc>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@36 {
+				label = "pa_therm0";
+				reg = <0x36>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@11 {
+				label = "pa_therm1";
+				reg = <0x11>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+				qcom,vadc-thermal-node;
+			};
+
+			chan@32 {
+				label = "xo_therm";
+				reg = <0x32>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <4>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+				qcom,vadc-thermal-node;
+			};
+
+			chan@3c {
+				label = "xo_therm_buf";
+				reg = <0x3c>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <4>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+				qcom,vadc-thermal-node;
+			};
+
+			chan@13 {
+				label = "case_therm";
+				reg = <0x13>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+				qcom,vadc-thermal-node;
+			};
+		};
+
+		pm8937_adc_tm: vadc@3400 {
+			compatible = "qcom,qpnp-adc-tm";
+			reg = <0x3400 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts =	<0x0 0x34 0x0 IRQ_TYPE_EDGE_RISING>,
+					<0x0 0x34 0x3 IRQ_TYPE_EDGE_RISING>,
+					<0x0 0x34 0x4 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names =	"eoc-int-en-set",
+						"high-thr-en-set",
+						"low-thr-en-set";
+			qcom,adc-bit-resolution = <15>;
+			qcom,adc-vdd-reference = <1800>;
+			qcom,adc_tm-vadc = <&pm8937_vadc>;
+
+			chan@36 {
+				label = "pa_therm0";
+				reg = <0x36>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+				qcom,btm-channel-number = <0x48>;
+				qcom,thermal-node;
+			};
+
+			chan@7 {
+				label = "vph_pwr";
+				reg = <0x7>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,btm-channel-number = <0x68>;
+			};
+		};
+
+	};
+
+	pm8937_1: qcom,pm8937@1 {
+		compatible = "qcom,spmi-pmic";
+		reg = <0x1 SPMI_USID>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		pm8937_pwm: pwm@bc00 {
+			status = "disabled";
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xbc00 0x100>;
+			reg-names = "qpnp-lpg-channel-base";
+			qcom,channel-id = <0>;
+			qcom,supported-sizes = <6>, <9>;
+			#pwm-cells = <2>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pm8953.dtsi b/arch/arm64/boot/dts/qcom/pm8953.dtsi
index 0ddb9f5..d77de72 100644
--- a/arch/arm64/boot/dts/qcom/pm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8953.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -49,7 +49,7 @@
 			};
 		};
 
-		pm8953_temp_alarm: qcom,temp-alarm@2400 {
+		pm8953_tz: qcom,temp-alarm@2400 {
 			compatible = "qcom,qpnp-temp-alarm";
 			reg = <0x2400 0x100>;
 			interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
@@ -57,6 +57,7 @@
 			qcom,channel-num = <8>;
 			qcom,threshold-set = <0>;
 			qcom,temp_alarm-vadc = <&pm8953_vadc>;
+			#thermal-sensor-cells = <0>;
 		};
 
 		pm8953_coincell: qcom,coincell@2800 {
@@ -65,105 +66,34 @@
 		};
 
 		pm8953_mpps: mpps {
-			compatible = "qcom,qpnp-pin";
-			spmi-dev-container;
+			compatible = "qcom,spmi-mpp";
+			reg = <0xa000 0x400>;
+
+			interrupts = <0x0 0xa0 0 IRQ_TYPE_NONE>,
+				<0x0 0xa1 0 IRQ_TYPE_NONE>,
+				<0x0 0xa2 0 IRQ_TYPE_NONE>,
+				<0x0 0xa3 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pm8953_mpp1", "pm8953_mpp2",
+					"pm8953_mpp3", "pm8953_mpp4";
+
 			gpio-controller;
 			#gpio-cells = <2>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			label = "pm8953-mpp";
-
-			mpp@a000 {
-				reg = <0xa000 0x100>;
-				qcom,pin-num = <1>;
-				status = "disabled";
-			};
-
-			mpp@a100 {
-				reg = <0xa100 0x100>;
-				qcom,pin-num = <2>;
-				/* MPP2 - PA_THERM config */
-				qcom,mode = <4>; /* AIN input */
-				qcom,invert = <1>; /* Enable MPP */
-				qcom,ain-route = <1>; /* AMUX 6 */
-				qcom,master-en = <1>;
-				qcom,src-sel = <0>; /* Function constant */
-			};
-
-			mpp@a200 {
-				reg = <0xa200 0x100>;
-				qcom,pin-num = <3>;
-				status = "disabled";
-			};
-
-			mpp@a300 {
-				reg = <0xa300 0x100>;
-				qcom,pin-num = <4>;
-				/* MPP4 - CASE_THERM config */
-				qcom,mode = <4>; /* AIN input */
-				qcom,invert = <1>; /* Enable MPP */
-				qcom,ain-route = <3>; /* AMUX 8 */
-				qcom,master-en = <1>;
-				qcom,src-sel = <0>; /* Function constant */
-			};
 		};
 
 		pm8953_gpios: gpios {
-			spmi-dev-container;
-			compatible = "qcom,qpnp-pin";
+			compatible = "qcom,spmi-gpio";
+			reg = <0xc000 0x800>;
+
+			interrupts = <0x0 0xc0 0 IRQ_TYPE_NONE>,
+				<0x0 0xc3 0 IRQ_TYPE_NONE>,
+				<0x0 0xc6 0 IRQ_TYPE_NONE>,
+				<0x0 0xc7 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pm8953_gpio1", "pm8953_gpio4",
+					"pm8953_gpio7", "pm8953_gpio8";
+
 			gpio-controller;
 			#gpio-cells = <2>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			label = "pm8953-gpio";
-
-			gpio@c000 {
-				reg = <0xc000 0x100>;
-				qcom,pin-num = <1>;
-				status = "disabled";
-			};
-
-			gpio@c100 {
-				reg = <0xc100 0x100>;
-				qcom,pin-num = <2>;
-				status = "disabled";
-			};
-
-			gpio@c200 {
-				reg = <0xc200 0x100>;
-				qcom,pin-num = <3>;
-				status = "disabled";
-			};
-
-			gpio@c300 {
-				reg = <0xc300 0x100>;
-				qcom,pin-num = <4>;
-				status = "disabled";
-			};
-
-			gpio@c400 {
-				reg = <0xc400 0x100>;
-				qcom,pin-num = <5>;
-				status = "disabled";
-			};
-
-			gpio@c500 {
-				reg = <0xc500 0x100>;
-				qcom,pin-num = <6>;
-				status = "disabled";
-			};
-
-			gpio@c600 {
-				reg = <0xc600 0x100>;
-				qcom,pin-num = <7>;
-				status = "disabled";
-			};
-
-			gpio@c700 {
-				reg = <0xc700 0x100>;
-				qcom,pin-num = <8>;
-				status = "disabled";
-			};
+			qcom,gpios-disallowed = <2 3 5 6>;
 		};
 
 		pm8953_vadc: vadc@3100 {
@@ -373,3 +303,29 @@
 		};
 	};
 };
+
+&thermal_zones {
+	pm8953_tz {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8953_tz>;
+
+		trips {
+			pm8953_trip0: pm8953-trip0 {
+				temperature = <105000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			pm8953_trip1: pm8953-trip1 {
+				temperature = <125000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			pm8953_trip2: pm8953-trip2 {
+				temperature = <145000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi632.dtsi b/arch/arm64/boot/dts/qcom/pmi632.dtsi
index b0fb23c..074b7da 100644
--- a/arch/arm64/boot/dts/qcom/pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi632.dtsi
@@ -32,6 +32,162 @@
 			qcom,secondary-pon-reset;
 		};
 
+		pmi632_vadc: vadc@3100 {
+			compatible = "qcom,qpnp-vadc-hc";
+			reg = <0x3100 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-vdd-reference = <1875>;
+			qcom,adc-full-scale-code = <0x70e4>;
+
+			chan@0 {
+				label = "ref_gnd";
+				reg = <0>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@1 {
+				label = "ref_1250v";
+				reg = <1>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@83 {
+				label = "vph_pwr";
+				reg = <0x83>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@84 {
+				label = "vbat_sns";
+				reg = <0x84>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@6 {
+				label = "die_temp";
+				reg = <6>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <19>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@7 {
+				label = "usb_in_i";
+				reg = <7>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <21>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@8 {
+				label = "usb_in_v";
+				reg = <8>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <8>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@9 {
+				label = "chg_temp";
+				reg = <9>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <18>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@4a {
+				label = "bat_therm";
+				reg = <0x4a>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <17>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@4b {
+				label = "bat_id";
+				reg = <0x4b>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@4c {
+				label = "xo_therm";
+				reg = <0x4c>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <4>;
+				qcom,hw-settle-time = <8>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+
+			chan@1e {
+				label = "mid_chg";
+				reg = <0x1e>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <3>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+		};
+
 		pmi632_tz: qcom,temp-alarm@2400 {
 			compatible = "qcom,qpnp-temp-alarm";
 			reg = <0x2400 0x100>;
@@ -49,7 +205,7 @@
 					<0x2 0xc4 0 IRQ_TYPE_NONE>,
 					<0x2 0xc5 0 IRQ_TYPE_NONE>,
 					<0x2 0xc6 0 IRQ_TYPE_NONE>,
-					<0x2 0xc7 0 IRQ_TYPE_NONE>,
+					<0x2 0xc7 0 IRQ_TYPE_NONE>;
 			interrupt-names = "pmi632_gpio2", "pmi632_gpio3",
 					"pmi632_gpio4", "pmi632_gpio5",
 					"pmi632_gpio6", "pmi632_gpio7",
@@ -66,6 +222,14 @@
 		#address-cells = <2>;
 		#size-cells = <0>;
 
+		pmi632_vib: qcom,vibrator@5700 {
+			compatible = "qcom,qpnp-vibrator-ldo";
+			reg = <0x5700 0x100>;
+			qcom,vib-ldo-volt-uv = <1504000>;
+			qcom,vib-overdrive-volt-uv = <3544000>;
+			status = "disabled";
+		};
+
 		pmi632_pwm_1: pwm@b300 {
 			compatible = "qcom,qpnp-pwm";
 			reg = <0xb300 0x100>;
@@ -115,5 +279,30 @@
 			#pwm-cells = <2>;
 			status = "disabled";
 		};
+
+		pmi632_lcdb: qpnp-lcdb@ec00 {
+			compatible = "qcom,qpnp-lcdb-regulator";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0xec00 0x100>;
+			interrupts = <0x3 0xec 0x1 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "sc-irq";
+
+			qcom,pmic-revid = <&pmi632_revid>;
+
+			lcdb_ldo_vreg: ldo {
+				label = "ldo";
+				regulator-name = "lcdb_ldo";
+				regulator-min-microvolt = <4000000>;
+				regulator-max-microvolt = <6000000>;
+			};
+
+			lcdb_ncp_vreg: ncp {
+				label = "ncp";
+				regulator-name = "lcdb_ncp";
+				regulator-min-microvolt = <4000000>;
+				regulator-max-microvolt = <6000000>;
+			};
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/pmi8937.dtsi b/arch/arm64/boot/dts/qcom/pmi8937.dtsi
new file mode 100644
index 0000000..a7aa08a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pmi8937.dtsi
@@ -0,0 +1,524 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/msm/power-on.h>
+
+&spmi_bus {
+
+	qcom,pmi8937@2 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x2 SPMI_USID>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		pmi8937_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		qcom,power-on@800 {
+			compatible = "qcom,qpnp-power-on";
+			reg = <0x800 0x100>;
+			qcom,secondary-pon-reset;
+			qcom,hard-reset-poweroff-type =
+				<PON_POWER_OFF_SHUTDOWN>;
+
+			pon_perph_reg: qcom,pon_perph_reg {
+				regulator-name = "pon_spare_reg";
+				qcom,pon-spare-reg-addr = <0x8c>;
+				qcom,pon-spare-reg-bit = <1>;
+			};
+		};
+
+		pmi8937_vadc: vadc@3100 {
+			compatible = "qcom,qpnp-vadc";
+			reg = <0x3100 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x2 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-bit-resolution = <15>;
+			qcom,adc-vdd-reference = <1800>;
+			qcom,vadc-poll-eoc;
+
+			chan@0 {
+				label = "usbin";
+				reg = <0>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <4>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@1 {
+				label = "dcin";
+				reg = <1>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <4>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@3 {
+				label = "vchg_sns";
+				reg = <3>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@9 {
+				label = "ref_625mv";
+				reg = <9>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@a {
+				label = "ref_1250v";
+				reg = <0xa>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@d {
+				label = "chg_temp";
+				reg = <0xd>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <16>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@43 {
+				label = "usb_dp";
+				reg = <0x43>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@44 {
+				label = "usb_dm";
+				reg = <0x44>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+		};
+
+		pmi8937_mpps: mpps {
+			compatible = "qcom,spmi-mpp";
+			reg = <0xa000 0x400>;
+			interrupts = <0x2 0xa0 0 IRQ_TYPE_NONE>,
+				     <0x2 0xa1 0 IRQ_TYPE_NONE>,
+				     <0x2 0xa2 0 IRQ_TYPE_NONE>,
+				     <0x2 0xa3 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pmi8937_mpp1", "pmi8937_mpp2",
+					  "pmi8937_mpp3", "pmi8937_mpp4";
+			gpio-controller;
+			#gpio-cells = <2>;
+		};
+
+		pmi8937_charger: qcom,qpnp-smbcharger {
+			compatible = "qcom,qpnp-smbcharger";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			qcom,iterm-ma = <100>;
+			qcom,float-voltage-mv = <4200>;
+			qcom,resume-delta-mv = <200>;
+			qcom,chg-inhibit-fg;
+			qcom,rparasitic-uohm = <100000>;
+			qcom,bms-psy-name = "bms";
+			qcom,thermal-mitigation = <1500 700 600 0>;
+			qcom,parallel-usb-min-current-ma = <1400>;
+			qcom,parallel-usb-9v-min-current-ma = <900>;
+			qcom,parallel-allowed-lowering-ma = <500>;
+			qcom,pmic-revid = <&pmi8937_revid>;
+			qcom,force-aicl-rerun;
+			qcom,aicl-rerun-period-s = <180>;
+			qcom,autoadjust-vfloat;
+
+			qcom,chgr@1000 {
+				reg = <0x1000 0x100>;
+				interrupts =	<0x2 0x10 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x10 0x1 IRQ_TYPE_NONE>,
+						<0x2 0x10 0x2 IRQ_TYPE_NONE>,
+						<0x2 0x10 0x3 IRQ_TYPE_NONE>,
+						<0x2 0x10 0x4 IRQ_TYPE_NONE>,
+						<0x2 0x10 0x5 IRQ_TYPE_NONE>,
+						<0x2 0x10 0x6 IRQ_TYPE_NONE>,
+						<0x2 0x10 0x7 IRQ_TYPE_NONE>;
+
+				interrupt-names =	"chg-error",
+							"chg-inhibit",
+							"chg-prechg-sft",
+							"chg-complete-chg-sft",
+							"chg-p2f-thr",
+							"chg-rechg-thr",
+							"chg-taper-thr",
+							"chg-tcc-thr";
+			};
+
+			qcom,otg@1100 {
+				reg = <0x1100 0x100>;
+				interrupts =	<0x2 0x11 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x11 0x1 IRQ_TYPE_NONE>,
+						<0x2 0x11 0x3 IRQ_TYPE_NONE>;
+				interrupt-names =	"otg-fail",
+							"otg-oc",
+						"usbid-change";
+			};
+
+			qcom,bat-if@1200 {
+				reg = <0x1200 0x100>;
+				interrupts =	<0x2 0x12 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x12 0x1 IRQ_TYPE_NONE>,
+						<0x2 0x12 0x2 IRQ_TYPE_NONE>,
+						<0x2 0x12 0x3 IRQ_TYPE_NONE>,
+						<0x2 0x12 0x4 IRQ_TYPE_NONE>,
+						<0x2 0x12 0x5 IRQ_TYPE_NONE>,
+						<0x2 0x12 0x6 IRQ_TYPE_NONE>,
+						<0x2 0x12 0x7 IRQ_TYPE_NONE>;
+
+				interrupt-names =	"batt-hot",
+							"batt-warm",
+							"batt-cold",
+							"batt-cool",
+						"batt-ov",
+							"batt-low",
+							"batt-missing",
+							"batt-term-missing";
+			};
+
+			qcom,usb-chgpth@1300 {
+				reg = <0x1300 0x100>;
+				interrupts =	<0x2 0x13 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x13 0x1 IRQ_TYPE_NONE>,
+						<0x2 0x13 0x2 IRQ_TYPE_NONE>,
+						<0x2 0x13 0x5 IRQ_TYPE_NONE>;
+
+				interrupt-names =	"usbin-uv",
+						"usbin-ov",
+							"usbin-src-det",
+							"aicl-done";
+			};
+
+			qcom,dc-chgpth@1400 {
+				reg = <0x1400 0x100>;
+				interrupts =	<0x2 0x14 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x14 0x1 IRQ_TYPE_NONE>;
+				interrupt-names =	"dcin-uv",
+							"dcin-ov";
+			};
+
+			qcom,chgr-misc@1600 {
+				reg = <0x1600 0x100>;
+				interrupts =	<0x2 0x16 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x16 0x1 IRQ_TYPE_NONE>,
+						<0x2 0x16 0x2 IRQ_TYPE_NONE>,
+						<0x2 0x16 0x3 IRQ_TYPE_NONE>,
+						<0x2 0x16 0x4 IRQ_TYPE_NONE>,
+						<0x2 0x16 0x5 IRQ_TYPE_NONE>;
+
+				interrupt-names =	"power-ok",
+							"temp-shutdown",
+							"wdog-timeout",
+							"flash-fail",
+							"otst2",
+							"otst3";
+			};
+
+			smbcharger_charger_otg: qcom,smbcharger-boost-otg {
+				regulator-name = "smbcharger_charger_otg";
+			};
+		};
+
+		pmi8937_fg: qcom,fg {
+			compatible = "qcom,qpnp-fg";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,resume-soc = <95>;
+			status = "okay";
+			qcom,bcl-lm-threshold-ma = <127>;
+			qcom,bcl-mh-threshold-ma = <405>;
+			qcom,fg-iterm-ma = <150>;
+			qcom,fg-chg-iterm-ma = <100>;
+			qcom,pmic-revid = <&pmi8937_revid>;
+			qcom,fg-cutoff-voltage-mv = <3500>;
+			qcom,cycle-counter-en;
+			qcom,capacity-learning-on;
+
+			qcom,fg-soc@4000 {
+			status = "okay";
+				reg = <0x4000 0x100>;
+				interrupts =	<0x2 0x40 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x40 0x1 IRQ_TYPE_NONE>,
+						<0x2 0x40 0x2 IRQ_TYPE_NONE>,
+						<0x2 0x40 0x3 IRQ_TYPE_NONE>,
+						<0x2 0x40 0x4 IRQ_TYPE_NONE>,
+						<0x2 0x40 0x5 IRQ_TYPE_NONE>,
+						<0x2 0x40 0x6 IRQ_TYPE_NONE>;
+
+				interrupt-names =	"high-soc",
+							"low-soc",
+							"full-soc",
+							"empty-soc",
+							"delta-soc",
+							"first-est-done",
+							"update-soc";
+			};
+
+			qcom,fg-batt@4100 {
+				reg = <0x4100 0x100>;
+				interrupts =	<0x2 0x41 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x41 0x1 IRQ_TYPE_NONE>,
+						<0x2 0x41 0x2 IRQ_TYPE_NONE>,
+						<0x2 0x41 0x3 IRQ_TYPE_NONE>,
+						<0x2 0x41 0x4 IRQ_TYPE_NONE>,
+						<0x2 0x41 0x5 IRQ_TYPE_NONE>,
+						<0x2 0x41 0x6 IRQ_TYPE_NONE>,
+						<0x2 0x41 0x7 IRQ_TYPE_NONE>;
+
+				interrupt-names =	"soft-cold",
+							"soft-hot",
+							"vbatt-low",
+							"batt-ided",
+							"batt-id-req",
+							"batt-unknown",
+							"batt-missing",
+							"batt-match";
+			};
+
+			qcom,revid-tp-rev@1f1 {
+				reg = <0x1f1 0x1>;
+			};
+
+			qcom,fg-memif@4400 {
+				status = "okay";
+				reg = <0x4400 0x100>;
+				interrupts =	<0x2 0x44 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x44 0x2 IRQ_TYPE_NONE>;
+
+				interrupt-names =	"mem-avail",
+							"data-rcvry-sug";
+			};
+		};
+
+		bcl@4200 {
+			compatible = "qcom,msm-bcl";
+			reg = <0x4200 0xff>;
+			reg-names = "fg_user_adc";
+			interrupts = <0x2 0x42 0x0 IRQ_TYPE_NONE>,
+				     <0x2 0x42 0x1 IRQ_TYPE_NONE>;
+			interrupt-names = "bcl-high-ibat-int",
+					"bcl-low-vbat-int";
+			qcom,vbat-scaling-factor = <39000>;
+			qcom,vbat-gain-numerator = <1>;
+			qcom,vbat-gain-denominator = <128>;
+			qcom,vbat-polling-delay-ms = <100>;
+			qcom,ibat-scaling-factor = <39000>;
+			qcom,ibat-gain-numerator = <1>;
+			qcom,ibat-gain-denominator = <128>;
+			qcom,ibat-offset-numerator = <1200>;
+			qcom,ibat-offset-denominator = <1>;
+			qcom,ibat-polling-delay-ms = <100>;
+		};
+
+		qcom,leds@a100 {
+			compatible = "qcom,leds-qpnp";
+			reg = <0xa100 0x100>;
+			label = "mpp";
+		};
+	};
+
+	qcom,pmi8937@3 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x3 SPMI_USID>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		pmi8937_pwm: pwm@b000 {
+			status = "disabled";
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb000 0x100>;
+			reg-names = "qpnp-lpg-channel-base";
+			qcom,channel-id = <0>;
+			qcom,supported-sizes = <6>, <9>;
+			#pwm-cells = <2>;
+		};
+
+		wled: qcom,leds@d800 {
+			compatible = "qcom,qpnp-wled";
+			reg = <0xd800 0x100>,
+				<0xd900 0x100>;
+			reg-names = "qpnp-wled-ctrl-base",
+					"qpnp-wled-sink-base";
+			interrupts = <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "sc-irq";
+			status = "okay";
+			linux,name = "wled";
+			linux,default-trigger = "bkl-trigger";
+			qcom,fdbk-output = "auto";
+			qcom,vref-mv = <350>;
+			qcom,switch-freq-khz = <800>;
+			qcom,ovp-mv = <29500>;
+			qcom,ilim-ma = <980>;
+			qcom,boost-duty-ns = <26>;
+			qcom,mod-freq-khz = <9600>;
+			qcom,dim-mode = "hybrid";
+			qcom,dim-method = "linear";
+			qcom,hyb-thres = <625>;
+			qcom,sync-dly-us = <800>;
+			qcom,fs-curr-ua = <20000>;
+			qcom,led-strings-list = [00 01];
+			qcom,en-ext-pfet-sc-pro;
+			qcom,pmic-revid = <&pmi8937_revid>;
+			qcom,cons-sync-write-delay-us = <1000>;
+		};
+
+		flash_led: qcom,leds@d300 {
+			compatible = "qcom,qpnp-flash-led";
+			status = "okay";
+			reg = <0xd300 0x100>;
+			label = "flash";
+			qcom,headroom = <500>;
+			qcom,startup-dly = <128>;
+			qcom,clamp-curr = <200>;
+			qcom,pmic-charger-support;
+			qcom,self-check-enabled;
+			qcom,thermal-derate-enabled;
+			qcom,thermal-derate-threshold = <100>;
+			qcom,thermal-derate-rate = "5_PERCENT";
+			qcom,current-ramp-enabled;
+			qcom,ramp_up_step = "6P7_US";
+			qcom,ramp_dn_step = "6P7_US";
+			qcom,vph-pwr-droop-enabled;
+			qcom,vph-pwr-droop-threshold = <3000>;
+			qcom,vph-pwr-droop-debounce-time = <10>;
+			qcom,headroom-sense-ch0-enabled;
+			qcom,headroom-sense-ch1-enabled;
+			qcom,pmic-revid = <&pmi8937_revid>;
+
+			pmi8937_flash0: qcom,flash_0 {
+				label = "flash";
+				qcom,led-name = "led:flash_0";
+				qcom,default-led-trigger =
+						"flash0_trigger";
+				qcom,max-current = <1000>;
+				qcom,duration = <1280>;
+				qcom,id = <0>;
+				qcom,current = <625>;
+			};
+
+			pmi8937_flash1: qcom,flash_1 {
+				label = "flash";
+				qcom,led-name = "led:flash_1";
+				qcom,default-led-trigger =
+						"flash1_trigger";
+				qcom,max-current = <1000>;
+				qcom,duration = <1280>;
+				qcom,id = <1>;
+				qcom,current = <625>;
+			};
+
+			pmi8937_torch0: qcom,torch_0 {
+				label = "torch";
+				qcom,led-name = "led:torch_0";
+				qcom,default-led-trigger =
+						"torch0_trigger";
+				qcom,max-current = <200>;
+				qcom,id = <0>;
+				qcom,current = <120>;
+			};
+
+			pmi8937_torch1: qcom,torch_1 {
+				label = "torch";
+				qcom,led-name = "led:torch_1";
+				qcom,default-led-trigger =
+						"torch1_trigger";
+				qcom,max-current = <200>;
+				qcom,id = <1>;
+				qcom,current = <120>;
+			};
+
+			pmi8937_switch: qcom,switch {
+				label = "switch";
+				qcom,led-name = "led:switch";
+				qcom,default-led-trigger =
+						"switch_trigger";
+				qcom,max-current = <1000>;
+				qcom,duration = <1280>;
+				qcom,id = <2>;
+				qcom,current = <625>;
+				reg0 {
+					regulator-name = "pon_spare_reg";
+				};
+			};
+		};
+
+		pmi_haptic: qcom,haptic@c000 {
+			compatible = "qcom,qpnp-haptic";
+			reg = <0xc000 0x100>;
+			interrupts = <0x3 0xc0 0x0 IRQ_TYPE_EDGE_BOTH>,
+				     <0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>;
+			interrupt-names = "sc-irq", "play-irq";
+			qcom,pmic-revid = <&pmi8937_revid>;
+			vcc_pon-supply = <&pon_perph_reg>;
+			qcom,play-mode = "direct";
+			qcom,wave-play-rate-us = <5263>;
+			qcom,actuator-type = "lra";
+			qcom,wave-shape = "square";
+			qcom,vmax-mv = <2000>;
+			qcom,ilim-ma = <800>;
+			qcom,sc-deb-cycles = <8>;
+			qcom,int-pwm-freq-khz = <505>;
+			qcom,en-brake;
+			qcom,brake-pattern = [03 03 00 00];
+			qcom,use-play-irq;
+			qcom,use-sc-irq;
+			qcom,wave-samples = [3e 3e 3e 3e 3e 3e 3e 3e];
+			qcom,wave-rep-cnt = <1>;
+			qcom,wave-samp-rep-cnt = <1>;
+			qcom,lra-auto-res-mode="qwd";
+			qcom,lra-high-z="opt1";
+			qcom,lra-res-cal-period = <4>;
+			qcom,correct-lra-drive-freq;
+			qcom,misc-trim-error-rc19p2-clk-reg-present;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi8940.dtsi b/arch/arm64/boot/dts/qcom/pmi8940.dtsi
new file mode 100644
index 0000000..c6d5c87
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pmi8940.dtsi
@@ -0,0 +1,594 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/msm/power-on.h>
+
+&spmi_bus {
+	qcom,pmi8940@2 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x2 SPMI_USID>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		pmi8940_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		qcom,power-on@800 {
+			compatible = "qcom,qpnp-power-on";
+			reg = <0x800 0x100>;
+			qcom,secondary-pon-reset;
+			qcom,hard-reset-poweroff-type =
+				<PON_POWER_OFF_SHUTDOWN>;
+
+			pon_perph_reg: qcom,pon_perph_reg {
+				regulator-name = "pon_spare_reg";
+				qcom,pon-spare-reg-addr = <0x8c>;
+				qcom,pon-spare-reg-bit = <1>;
+			};
+		};
+
+		pmi8940_vadc: vadc@3100 {
+			compatible = "qcom,qpnp-vadc";
+			reg = <0x3100 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x2 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-bit-resolution = <15>;
+			qcom,adc-vdd-reference = <1800>;
+			qcom,vadc-poll-eoc;
+
+			chan@0 {
+				label = "usbin";
+				reg = <0>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <4>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@1 {
+				label = "dcin";
+				reg = <1>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <4>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@3 {
+				label = "vchg_sns";
+				reg = <3>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@9 {
+				label = "ref_625mv";
+				reg = <9>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@a {
+				label = "ref_1250v";
+				reg = <0xa>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@d {
+				label = "chg_temp";
+				reg = <0xd>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <16>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@43 {
+				label = "usb_dp";
+				reg = <0x43>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@44 {
+				label = "usb_dm";
+				reg = <0x44>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+		};
+
+		pmi8940_mpps: mpps {
+			compatible = "qcom,spmi-mpp";
+			reg = <0xa000 0x400>;
+			interrupts = <0x2 0xa0 0 IRQ_TYPE_NONE>,
+				     <0x2 0xa1 0 IRQ_TYPE_NONE>,
+				     <0x2 0xa2 0 IRQ_TYPE_NONE>,
+				     <0x2 0xa3 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pmi8940_mpp1", "pmi8940_mpp2",
+					  "pmi8940_mpp3", "pmi8940_mpp4";
+			gpio-controller;
+			#gpio-cells = <2>;
+		};
+
+		pmi8940_charger: qcom,qpnp-smbcharger {
+			compatible = "qcom,qpnp-smbcharger";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			qcom,iterm-ma = <100>;
+			qcom,float-voltage-mv = <4200>;
+			qcom,resume-delta-mv = <200>;
+			qcom,chg-inhibit-fg;
+			qcom,rparasitic-uohm = <100000>;
+			qcom,bms-psy-name = "bms";
+			qcom,thermal-mitigation = <1500 700 600 0>;
+			qcom,pmic-revid = <&pmi8940_revid>;
+			qcom,force-aicl-rerun;
+			qcom,aicl-rerun-period-s = <180>;
+			qcom,autoadjust-vfloat;
+
+			qcom,chgr@1000 {
+				reg = <0x1000 0x100>;
+				interrupts =	<0x2 0x10 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x10 0x1 IRQ_TYPE_NONE>,
+						<0x2 0x10 0x2 IRQ_TYPE_NONE>,
+						<0x2 0x10 0x3 IRQ_TYPE_NONE>,
+						<0x2 0x10 0x4 IRQ_TYPE_NONE>,
+						<0x2 0x10 0x5 IRQ_TYPE_NONE>,
+						<0x2 0x10 0x6 IRQ_TYPE_NONE>,
+						<0x2 0x10 0x7 IRQ_TYPE_NONE>;
+
+				interrupt-names =	"chg-error",
+							"chg-inhibit",
+							"chg-prechg-sft",
+							"chg-complete-chg-sft",
+							"chg-p2f-thr",
+							"chg-rechg-thr",
+							"chg-taper-thr",
+							"chg-tcc-thr";
+			};
+
+			qcom,otg@1100 {
+				reg = <0x1100 0x100>;
+				interrupts =	<0x2 0x11 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x11 0x1 IRQ_TYPE_NONE>,
+						<0x2 0x11 0x3 IRQ_TYPE_NONE>;
+				interrupt-names =	"otg-fail",
+							"otg-oc",
+						"usbid-change";
+			};
+
+			qcom,bat-if@1200 {
+				reg = <0x1200 0x100>;
+				interrupts =	<0x2 0x12 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x12 0x1 IRQ_TYPE_NONE>,
+						<0x2 0x12 0x2 IRQ_TYPE_NONE>,
+						<0x2 0x12 0x3 IRQ_TYPE_NONE>,
+						<0x2 0x12 0x4 IRQ_TYPE_NONE>,
+						<0x2 0x12 0x5 IRQ_TYPE_NONE>,
+						<0x2 0x12 0x6 IRQ_TYPE_NONE>,
+						<0x2 0x12 0x7 IRQ_TYPE_NONE>;
+
+				interrupt-names =	"batt-hot",
+							"batt-warm",
+							"batt-cold",
+							"batt-cool",
+						"batt-ov",
+							"batt-low",
+							"batt-missing",
+							"batt-term-missing";
+			};
+
+			qcom,usb-chgpth@1300 {
+				reg = <0x1300 0x100>;
+				interrupts =	<0x2 0x13 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x13 0x1 IRQ_TYPE_NONE>,
+						<0x2 0x13 0x2 IRQ_TYPE_NONE>,
+						<0x2 0x13 0x5 IRQ_TYPE_NONE>;
+
+				interrupt-names =	"usbin-uv",
+						"usbin-ov",
+							"usbin-src-det",
+							"aicl-done";
+			};
+
+			qcom,dc-chgpth@1400 {
+				reg = <0x1400 0x100>;
+				interrupts =	<0x2 0x14 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x14 0x1 IRQ_TYPE_NONE>;
+				interrupt-names =	"dcin-uv",
+							"dcin-ov";
+			};
+
+			qcom,chgr-misc@1600 {
+				reg = <0x1600 0x100>;
+				interrupts =	<0x2 0x16 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x16 0x1 IRQ_TYPE_NONE>,
+						<0x2 0x16 0x2 IRQ_TYPE_NONE>,
+						<0x2 0x16 0x3 IRQ_TYPE_NONE>,
+						<0x2 0x16 0x4 IRQ_TYPE_NONE>,
+						<0x2 0x16 0x5 IRQ_TYPE_NONE>;
+
+				interrupt-names =	"power-ok",
+							"temp-shutdown",
+							"wdog-timeout",
+							"flash-fail",
+							"otst2",
+							"otst3";
+			};
+
+			smbcharger_charger_otg: qcom,smbcharger-boost-otg {
+				regulator-name = "smbcharger_charger_otg";
+			};
+		};
+
+		pmi8940_fg: qcom,fg {
+			compatible = "qcom,qpnp-fg";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,resume-soc = <95>;
+			status = "okay";
+			qcom,bcl-lm-threshold-ma = <127>;
+			qcom,bcl-mh-threshold-ma = <405>;
+			qcom,fg-iterm-ma = <150>;
+			qcom,fg-chg-iterm-ma = <100>;
+			qcom,pmic-revid = <&pmi8940_revid>;
+			qcom,fg-cutoff-voltage-mv = <3500>;
+			qcom,cycle-counter-en;
+			qcom,capacity-learning-on;
+
+			qcom,fg-soc@4000 {
+			status = "okay";
+				reg = <0x4000 0x100>;
+				interrupts =	<0x2 0x40 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x40 0x1 IRQ_TYPE_NONE>,
+						<0x2 0x40 0x2 IRQ_TYPE_NONE>,
+						<0x2 0x40 0x3 IRQ_TYPE_NONE>,
+						<0x2 0x40 0x4 IRQ_TYPE_NONE>,
+						<0x2 0x40 0x5 IRQ_TYPE_NONE>,
+						<0x2 0x40 0x6 IRQ_TYPE_NONE>;
+
+				interrupt-names =	"high-soc",
+							"low-soc",
+							"full-soc",
+							"empty-soc",
+							"delta-soc",
+							"first-est-done",
+							"update-soc";
+			};
+
+			qcom,fg-batt@4100 {
+				reg = <0x4100 0x100>;
+				interrupts =	<0x2 0x41 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x41 0x1 IRQ_TYPE_NONE>,
+						<0x2 0x41 0x2 IRQ_TYPE_NONE>,
+						<0x2 0x41 0x3 IRQ_TYPE_NONE>,
+						<0x2 0x41 0x4 IRQ_TYPE_NONE>,
+						<0x2 0x41 0x5 IRQ_TYPE_NONE>,
+						<0x2 0x41 0x6 IRQ_TYPE_NONE>,
+						<0x2 0x41 0x7 IRQ_TYPE_NONE>;
+
+				interrupt-names =	"soft-cold",
+							"soft-hot",
+							"vbatt-low",
+							"batt-ided",
+							"batt-id-req",
+							"batt-unknown",
+							"batt-missing",
+							"batt-match";
+			};
+
+			qcom,revid-tp-rev@1f1 {
+				reg = <0x1f1 0x1>;
+			};
+
+			qcom,fg-memif@4400 {
+				status = "okay";
+				reg = <0x4400 0x100>;
+				interrupts =	<0x2 0x44 0x0 IRQ_TYPE_NONE>,
+						<0x2 0x44 0x2 IRQ_TYPE_NONE>;
+
+				interrupt-names =	"mem-avail",
+							"data-rcvry-sug";
+			};
+		};
+
+		bcl@4200 {
+			compatible = "qcom,msm-bcl";
+			reg = <0x4200 0xff>;
+			reg-names = "fg_user_adc";
+			interrupts = <0x2 0x42 0x0 IRQ_TYPE_NONE>,
+				     <0x2 0x42 0x1 IRQ_TYPE_NONE>;
+			interrupt-names = "bcl-high-ibat-int",
+					"bcl-low-vbat-int";
+			qcom,vbat-scaling-factor = <39000>;
+			qcom,vbat-gain-numerator = <1>;
+			qcom,vbat-gain-denominator = <128>;
+			qcom,vbat-polling-delay-ms = <100>;
+			qcom,ibat-scaling-factor = <39000>;
+			qcom,ibat-gain-numerator = <1>;
+			qcom,ibat-gain-denominator = <128>;
+			qcom,ibat-offset-numerator = <1200>;
+			qcom,ibat-offset-denominator = <1>;
+			qcom,ibat-polling-delay-ms = <100>;
+		};
+
+		qcom,leds@a100 {
+			compatible = "qcom,leds-qpnp";
+			reg = <0xa100 0x100>;
+			label = "mpp";
+		};
+	};
+
+	qcom,pmi8940@3 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x3 SPMI_USID>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		pmi8940_pwm: pwm@b000 {
+			status = "disabled";
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb000 0x100>;
+			reg-names = "qpnp-lpg-channel-base";
+			qcom,channel-id = <0>;
+			qcom,supported-sizes = <6>, <9>;
+			#pwm-cells = <2>;
+		};
+
+		labibb: qpnp-labibb-regulator {
+			status = "disabled";
+			compatible = "qcom,qpnp-labibb-regulator";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,qpnp-labibb-mode = "lcd";
+			qcom,pmic-revid = <&pmi8940_revid>;
+
+			ibb_regulator: qcom,ibb@dc00 {
+				reg = <0xdc00 0x100>;
+				reg-names = "ibb_reg";
+				regulator-name = "ibb_reg";
+
+				regulator-min-microvolt = <4600000>;
+				regulator-max-microvolt = <6000000>;
+
+				qcom,qpnp-ibb-min-voltage = <1400000>;
+				qcom,qpnp-ibb-step-size = <100000>;
+				qcom,qpnp-ibb-slew-rate = <2000000>;
+				qcom,qpnp-ibb-use-default-voltage;
+				qcom,qpnp-ibb-init-voltage = <5500000>;
+				qcom,qpnp-ibb-init-amoled-voltage = <4000000>;
+				qcom,qpnp-ibb-init-lcd-voltage = <5500000>;
+
+				qcom,qpnp-ibb-soft-start = <1000>;
+
+				qcom,qpnp-ibb-discharge-resistor = <32>;
+				qcom,qpnp-ibb-lab-pwrup-delay = <8000>;
+				qcom,qpnp-ibb-lab-pwrdn-delay = <8000>;
+				qcom,qpnp-ibb-en-discharge;
+
+				qcom,qpnp-ibb-full-pull-down;
+				qcom,qpnp-ibb-pull-down-enable;
+				qcom,qpnp-ibb-switching-clock-frequency =
+									<1480>;
+				qcom,qpnp-ibb-limit-maximum-current = <1550>;
+				qcom,qpnp-ibb-debounce-cycle = <16>;
+				qcom,qpnp-ibb-limit-max-current-enable;
+				qcom,qpnp-ibb-ps-enable;
+			};
+
+			lab_regulator: qcom,lab@de00 {
+				reg = <0xde00 0x100>;
+				reg-names = "lab";
+				regulator-name = "lab_reg";
+
+				regulator-min-microvolt = <4600000>;
+				regulator-max-microvolt = <6000000>;
+
+				qcom,qpnp-lab-min-voltage = <4600000>;
+				qcom,qpnp-lab-step-size = <100000>;
+				qcom,qpnp-lab-slew-rate = <5000>;
+				qcom,qpnp-lab-use-default-voltage;
+				qcom,qpnp-lab-init-voltage = <5500000>;
+				qcom,qpnp-lab-init-amoled-voltage = <4600000>;
+				qcom,qpnp-lab-init-lcd-voltage = <5500000>;
+
+				qcom,qpnp-lab-soft-start = <800>;
+
+				qcom,qpnp-lab-full-pull-down;
+				qcom,qpnp-lab-pull-down-enable;
+				qcom,qpnp-lab-switching-clock-frequency =
+									<1600>;
+				qcom,qpnp-lab-limit-maximum-current = <800>;
+				qcom,qpnp-lab-limit-max-current-enable;
+				qcom,qpnp-lab-ps-threshold = <40>;
+				qcom,qpnp-lab-ps-enable;
+				qcom,qpnp-lab-nfet-size = <100>;
+				qcom,qpnp-lab-pfet-size = <100>;
+				qcom,qpnp-lab-max-precharge-time = <500>;
+			};
+		};
+
+		wled: qcom,leds@d800 {
+			compatible = "qcom,qpnp-wled";
+			reg = <0xd800 0x100>,
+				<0xd900 0x100>;
+			reg-names = "qpnp-wled-ctrl-base",
+					"qpnp-wled-sink-base";
+			interrupts = <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "sc-irq";
+			status = "okay";
+			linux,name = "wled";
+			linux,default-trigger = "bkl-trigger";
+			qcom,fdbk-output = "auto";
+			qcom,vref-mv = <350>;
+			qcom,switch-freq-khz = <800>;
+			qcom,ovp-mv = <29500>;
+			qcom,ilim-ma = <980>;
+			qcom,boost-duty-ns = <26>;
+			qcom,mod-freq-khz = <9600>;
+			qcom,dim-mode = "hybrid";
+			qcom,dim-method = "linear";
+			qcom,hyb-thres = <625>;
+			qcom,sync-dly-us = <800>;
+			qcom,fs-curr-ua = <20000>;
+			qcom,en-phase-stag;
+			qcom,led-strings-list = [00 01];
+			qcom,en-ext-pfet-sc-pro;
+			qcom,pmic-revid = <&pmi8940_revid>;
+			qcom,cons-sync-write-delay-us = <1000>;
+		};
+
+		flash_led: qcom,leds@d300 {
+			compatible = "qcom,qpnp-flash-led";
+			status = "okay";
+			reg = <0xd300 0x100>;
+			label = "flash";
+			qcom,headroom = <500>;
+			qcom,startup-dly = <128>;
+			qcom,clamp-curr = <200>;
+			qcom,pmic-charger-support;
+			qcom,self-check-enabled;
+			qcom,thermal-derate-enabled;
+			qcom,thermal-derate-threshold = <100>;
+			qcom,thermal-derate-rate = "5_PERCENT";
+			qcom,current-ramp-enabled;
+			qcom,ramp_up_step = "6P7_US";
+			qcom,ramp_dn_step = "6P7_US";
+			qcom,vph-pwr-droop-enabled;
+			qcom,vph-pwr-droop-threshold = <3000>;
+			qcom,vph-pwr-droop-debounce-time = <10>;
+			qcom,headroom-sense-ch0-enabled;
+			qcom,headroom-sense-ch1-enabled;
+			qcom,pmic-revid = <&pmi8940_revid>;
+
+			pmi8940_flash0: qcom,flash_0 {
+				label = "flash";
+				qcom,led-name = "led:flash_0";
+				qcom,default-led-trigger =
+						"flash0_trigger";
+				qcom,max-current = <1000>;
+				qcom,duration = <1280>;
+				qcom,id = <0>;
+				qcom,current = <625>;
+			};
+
+			pmi8940_flash1: qcom,flash_1 {
+				label = "flash";
+				qcom,led-name = "led:flash_1";
+				qcom,default-led-trigger =
+						"flash1_trigger";
+				qcom,max-current = <1000>;
+				qcom,duration = <1280>;
+				qcom,id = <1>;
+				qcom,current = <625>;
+			};
+
+			pmi8940_torch0: qcom,torch_0 {
+				label = "torch";
+				qcom,led-name = "led:torch_0";
+				qcom,default-led-trigger =
+						"torch0_trigger";
+				qcom,max-current = <200>;
+				qcom,id = <0>;
+				qcom,current = <120>;
+			};
+
+			pmi8940_torch1: qcom,torch_1 {
+				label = "torch";
+				qcom,led-name = "led:torch_1";
+				qcom,default-led-trigger =
+						"torch1_trigger";
+				qcom,max-current = <200>;
+				qcom,id = <1>;
+				qcom,current = <120>;
+			};
+
+			pmi8940_switch: qcom,switch {
+				label = "switch";
+				qcom,led-name = "led:switch";
+				qcom,default-led-trigger =
+						"switch_trigger";
+				qcom,max-current = <1000>;
+				qcom,duration = <1280>;
+				qcom,id = <2>;
+				qcom,current = <625>;
+				reg0 {
+					regulator-name = "pon_spare_reg";
+				};
+			};
+		};
+
+		pmi_haptic: qcom,haptic@c000 {
+			compatible = "qcom,qpnp-haptic";
+			reg = <0xc000 0x100>;
+			interrupts = <0x3 0xc0 0x0 IRQ_TYPE_EDGE_BOTH>,
+				     <0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>;
+			interrupt-names = "sc-irq", "play-irq";
+			qcom,pmic-revid = <&pmi8940_revid>;
+			vcc_pon-supply = <&pon_perph_reg>;
+			qcom,play-mode = "direct";
+			qcom,wave-play-rate-us = <5263>;
+			qcom,actuator-type = "lra";
+			qcom,wave-shape = "square";
+			qcom,vmax-mv = <2000>;
+			qcom,ilim-ma = <800>;
+			qcom,sc-deb-cycles = <8>;
+			qcom,int-pwm-freq-khz = <505>;
+			qcom,en-brake;
+			qcom,brake-pattern = [03 03 00 00];
+			qcom,use-play-irq;
+			qcom,use-sc-irq;
+			qcom,wave-samples = [3e 3e 3e 3e 3e 3e 3e 3e];
+			qcom,wave-rep-cnt = <1>;
+			qcom,wave-samp-rep-cnt = <1>;
+			qcom,lra-auto-res-mode="qwd";
+			qcom,lra-high-z="opt1";
+			qcom,lra-res-cal-period = <4>;
+			qcom,correct-lra-drive-freq;
+			qcom,misc-trim-error-rc19p2-clk-reg-present;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi8950.dtsi b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
index e731f5b..bab5774 100644
--- a/arch/arm64/boot/dts/qcom/pmi8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,8 @@
  */
 
 #include <dt-bindings/msm/power-on.h>
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 
 &spmi_bus {
 	qcom,pmi8950@2 {
@@ -140,57 +142,30 @@
 		};
 
 		pmi8950_gpios: gpios {
-			compatible = "qcom,qpnp-pin";
+			compatible = "qcom,spmi-gpio";
+			reg = <0xc000 0x200>;
+
+			interrupts = <0x2 0xc0 0 IRQ_TYPE_NONE>,
+				<0x2 0xc1 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pmi8950_gpio1", "pmi8950_gpio2";
+
 			gpio-controller;
 			#gpio-cells = <2>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			label = "pmi8950-gpio";
-
-			gpio@c000 {
-				reg = <0xc000 0x100>;
-				qcom,pin-num = <1>;
-				status = "disabled";
-			};
-
-			gpio@c100 {
-				reg = <0xc100 0x100>;
-				qcom,pin-num = <2>;
-				status = "disabled";
-			};
 		};
 
 		pmi8950_mpps: mpps {
-			compatible = "qcom,qpnp-pin";
+			compatible = "qcom,spmi-mpp";
+			reg = <0xa000 0x400>;
+
+			interrupts = <0x2 0xa0 0 IRQ_TYPE_NONE>,
+				<0x2 0xa1 0 IRQ_TYPE_NONE>,
+				<0x2 0xa2 0 IRQ_TYPE_NONE>,
+				<0x2 0xa3 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pmi8950_mpp1", "pmi8950_mpp2",
+					  "pmi8950_mpp3", "pmi8950_mpp4";
+
 			gpio-controller;
 			#gpio-cells = <2>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			label = "pmi8950-mpp";
-
-			mpp@a000 {
-				reg = <0xa000 0x100>;
-				qcom,pin-num = <1>;
-				status = "disabled";
-			};
-
-			mpp@a100 {
-				reg = <0xa100 0x100>;
-				qcom,pin-num = <2>;
-				status = "disabled";
-			};
-
-			mpp@a200 {
-				reg = <0xa200 0x100>;
-				qcom,pin-num = <3>;
-				status = "disabled";
-			};
-
-			mpp@a300 {
-				reg = <0xa300 0x100>;
-				qcom,pin-num = <4>;
-				status = "disabled";
-			};
 		};
 
 		pmi8950_charger: qcom,qpnp-smbcharger {
diff --git a/arch/arm64/boot/dts/qcom/qcs605-360camera-overlay.dts b/arch/arm64/boot/dts/qcom/qcs605-360camera-overlay.dts
index e7a2197..820f877 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-360camera-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/qcs605-360camera-overlay.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,10 +23,10 @@
 #include "qcs605-360camera.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. QCS605 PM660+PM660L 360camera";
+	model = "Qualcomm Technologies, Inc. QCS605 PM660+PM660L VRcamera";
 	compatible = "qcom,qcs605-mtp", "qcom,qcs605", "qcom,mtp";
 	qcom,msm-id = <347 0x0>;
-	qcom,board-id = <0x0000000b 1>;
+	qcom,board-id = <8 5>;
 	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
diff --git a/arch/arm64/boot/dts/qcom/qcs605-360camera.dts b/arch/arm64/boot/dts/qcom/qcs605-360camera.dts
index 8caad4b..c62f39d 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-360camera.dts
+++ b/arch/arm64/boot/dts/qcom/qcs605-360camera.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,7 +20,7 @@
 / {
 	model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L 360camera";
 	compatible = "qcom,qcs605-mtp", "qcom,qcs605", "qcom,mtp";
-	qcom,board-id = <0x0000000b 1>;
+	qcom,board-id = <8 5>;
 	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
diff --git a/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi b/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi
index 6670edd..0983acf 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,14 @@
 	status = "disabled";
 };
 
+&dsi_dual_nt36850_truly_cmd_display {
+	status = "disabled";
+};
+
+&dsi_dual_nt35597_truly_video {
+	status = "disabled";
+};
+
 &int_codec {
 	qcom,model = "sdm670-360cam-snd-card";
 	qcom,audio-routing =
diff --git a/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi b/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi
index a881ec4..382ba65 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi
@@ -157,9 +157,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa1";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l1: regulator-pm660-l1 {
 			regulator-name = "pm660_l1";
@@ -167,7 +168,7 @@
 			regulator-min-microvolt = <800000>;
 			regulator-max-microvolt = <800000>;
 			qcom,init-voltage = <800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -175,9 +176,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa2";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l2: regulator-pm660-l2 {
 			regulator-name = "pm660_l2";
@@ -185,7 +187,7 @@
 			regulator-min-microvolt = <1144000>;
 			regulator-max-microvolt = <1256000>;
 			qcom,init-voltage = <1144000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -193,9 +195,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa3";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l3: regulator-pm660-l3 {
 			regulator-name = "pm660_l3";
@@ -203,7 +206,7 @@
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1352000>;
 			qcom,init-voltage = <1200000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -211,9 +214,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa5";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l5: regulator-pm660-l5 {
 			regulator-name = "pm660_l5";
@@ -221,7 +225,7 @@
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1304000>;
 			qcom,init-voltage = <1200000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -229,9 +233,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa6";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l6: regulator-pm660-l6 {
 			regulator-name = "pm660_l6";
@@ -239,7 +244,7 @@
 			regulator-min-microvolt = <880000>;
 			regulator-max-microvolt = <880000>;
 			qcom,init-voltage = <880000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -260,9 +265,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa8";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l8: regulator-pm660-l8 {
 			regulator-name = "pm660_l8";
@@ -270,7 +276,7 @@
 			regulator-min-microvolt = <1696000>;
 			regulator-max-microvolt = <1952000>;
 			qcom,init-voltage = <1696000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -278,9 +284,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa9";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l9: regulator-pm660-l9 {
 			regulator-name = "pm660_l9";
@@ -288,7 +295,7 @@
 			regulator-min-microvolt = <1616000>;
 			regulator-max-microvolt = <1984000>;
 			qcom,init-voltage = <1616000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -296,9 +303,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa10";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l10: regulator-pm660-l10 {
 			regulator-name = "pm660_l10";
@@ -306,7 +314,7 @@
 			regulator-min-microvolt = <1696000>;
 			regulator-max-microvolt = <1952000>;
 			qcom,init-voltage = <1696000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -314,9 +322,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa11";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l11: regulator-pm660-l11 {
 			regulator-name = "pm660_l11";
@@ -324,7 +333,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1904000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -332,9 +341,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa12";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l12: regulator-pm660-l12 {
 			regulator-name = "pm660_l12";
@@ -342,7 +352,7 @@
 			regulator-min-microvolt = <1616000>;
 			regulator-max-microvolt = <1984000>;
 			qcom,init-voltage = <1616000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -350,9 +360,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa13";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l13: regulator-pm660-l13 {
 			regulator-name = "pm660_l13";
@@ -360,7 +371,7 @@
 			regulator-min-microvolt = <1696000>;
 			regulator-max-microvolt = <1904000>;
 			qcom,init-voltage = <1696000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -368,9 +379,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa14";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l14: regulator-pm660-l14 {
 			regulator-name = "pm660_l14";
@@ -378,7 +390,7 @@
 			regulator-min-microvolt = <1696000>;
 			regulator-max-microvolt = <1904000>;
 			qcom,init-voltage = <1696000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -386,9 +398,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa15";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l15: regulator-pm660-l15 {
 			regulator-name = "pm660_l15";
@@ -396,7 +409,7 @@
 			regulator-min-microvolt = <2896000>;
 			regulator-max-microvolt = <3000000>;
 			qcom,init-voltage = <2896000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -404,9 +417,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa16";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l16: regulator-pm660-l16 {
 		regulator-name = "pm660_l16";
@@ -414,7 +428,7 @@
 			regulator-min-microvolt = <2896000>;
 			regulator-max-microvolt = <3104000>;
 			qcom,init-voltage = <2896000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -422,9 +436,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa17";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l17: regulator-pm660-l17 {
 			regulator-name = "pm660_l17";
@@ -432,7 +447,7 @@
 			regulator-min-microvolt = <2920000>;
 			regulator-max-microvolt = <3232000>;
 			qcom,init-voltage = <2920000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -440,9 +455,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa18";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l18: regulator-pm660-l18 {
 			regulator-name = "pm660_l18";
@@ -450,7 +466,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <3000000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -458,9 +474,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa19";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l19: regulator-pm660-l19 {
 			regulator-name = "pm660_l19";
@@ -468,7 +485,7 @@
 			regulator-min-microvolt = <2944000>;
 			regulator-max-microvolt = <3304000>;
 			qcom,init-voltage = <2944000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi
index 6cf9a82..fcde397 100644
--- a/arch/arm64/boot/dts/qcom/qcs605.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,30 @@
 	qcom,msm-id = <347 0x0>;
 };
 
+&pil_modem_mem {
+	reg = <0 0x8b000000 0 0x3c00000>;
+};
+
+&pil_video_mem {
+	reg = <0 0x8ec00000 0 0x500000>;
+};
+
+&wlan_msa_mem {
+	reg = <0 0x8f100000 0 0x100000>;
+};
+
+&pil_cdsp_mem {
+	reg = <0 0x8f200000 0 0x800000>;
+};
+
+&pil_mba_mem {
+	reg = <0 0x8fa00000 0 0x200000>;
+};
+
+&pil_adsp_mem {
+	reg = <0 0x8fc00000 0 0x1e00000>;
+};
+
 &soc {
 	qcom,rmnet-ipa {
 		status = "disabled";
diff --git a/arch/arm64/boot/dts/qcom/sda632.dtsi b/arch/arm64/boot/dts/qcom/sda632.dtsi
new file mode 100644
index 0000000..8a71b19
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda632.dtsi
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "sdm632.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA632";
+	compatible = "qcom,sda632";
+	qcom,msm-id = <350 0x0>;
+};
+
+&secure_mem {
+	status = "disabled";
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sda845-sdxpoorwills.dtsi b/arch/arm64/boot/dts/qcom/sda845-sdxpoorwills.dtsi
new file mode 100644
index 0000000..944ca3b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda845-sdxpoorwills.dtsi
@@ -0,0 +1,271 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdm3 {
+	pinctrl-names = "default", "mdm_active", "mdm_suspend";
+	pinctrl-0 = <&ap2mdm_pon_reset_default>;
+	pinctrl-1 = <&ap2mdm_active &mdm2ap_active>;
+	pinctrl-2 = <&ap2mdm_sleep &mdm2ap_sleep>;
+	interrupt-map = <0 &tlmm 24 0x3
+			1 &tlmm 21 0x3>;
+	qcom,mdm2ap-errfatal-gpio = <&tlmm 24 0x00>;
+	qcom,ap2mdm-errfatal-gpio = <&tlmm 23 0x00>;
+	qcom,mdm2ap-status-gpio   = <&tlmm 22 0x00>;
+	qcom,ap2mdm-status-gpio   = <&tlmm 21 0x00>;
+	qcom,ap2mdm-soft-reset-gpio = <&pm8998_gpios 10 0>;
+	qcom,mdm-link-info = "0304_00.01.00";
+	status = "ok";
+};
+
+&pm8998_gpios {
+	ap2mdm_pon_reset {
+		ap2mdm_pon_reset_default: ap2mdm_pon_reset_default {
+			/* MDM PON conrol*/
+			pins = "gpio10";
+			function = "normal";
+			output-low;
+			power-source = <0>;
+		};
+	};
+};
+
+&pil_modem {
+	status = "disabled";
+};
+
+&pcie0_wake_default {
+	config {
+	       /delete-property/ bias-pull-down;
+	};
+};
+
+&led_flash_rear {
+	status = "disabled";
+};
+
+&led_flash_front {
+	status = "disabled";
+};
+
+&ois_rear {
+	status = "disabled";
+};
+
+&eeprom_rear {
+	status = "disabled";
+};
+
+&eeprom_rear_aux {
+	status = "disabled";
+};
+
+&eeprom_front {
+	status = "disabled";
+};
+
+&soc {
+	qcom,cam-req-mgr {
+		status = "disabled";
+	};
+
+	cam_csiphy0: qcom,csiphy@ac65000 {
+		status = "disabled";
+	};
+
+	cam_csiphy1: qcom,csiphy@ac66000 {
+		status = "disabled";
+	};
+
+	cam_csiphy2: qcom,csiphy@ac67000 {
+		status = "disabled";
+	};
+
+	cam_cci: qcom,cci@ac4a000 {
+		status = "disabled";
+
+		i2c_freq_100Khz: qcom,i2c_standard_mode {
+			status = "disabled";
+		};
+
+		i2c_freq_400Khz: qcom,i2c_fast_mode {
+			status = "disabled";
+		};
+
+		i2c_freq_custom: qcom,i2c_custom_mode {
+			status = "disabled";
+		};
+
+		i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
+			status = "disabled";
+		};
+	};
+
+	qcom,cam_smmu {
+		status = "disabled";
+
+		msm_cam_smmu_ife {
+			ife_iova_mem_map: iova-mem-map {
+				iova-mem-region-io {
+					status = "disabled";
+				};
+			};
+		};
+
+		msm_cam_smmu_jpeg {
+			jpeg_iova_mem_map: iova-mem-map {
+				iova-mem-region-io {
+					status = "disabled";
+				};
+			};
+		};
+
+		msm_cam_smmu_icp {
+			icp_iova_mem_map: iova-mem-map {
+				iova-mem-region-firmware {
+					status = "disabled";
+				};
+
+				iova-mem-region-shared {
+					status = "disabled";
+				};
+
+				iova-mem-region-io {
+					status = "disabled";
+				};
+			};
+		};
+
+		msm_cam_smmu_cpas_cdm {
+			cpas_cdm_iova_mem_map: iova-mem-map {
+				iova-mem-region-io {
+					status = "disabled";
+				};
+			};
+		};
+
+		msm_cam_smmu_fd {
+			fd_iova_mem_map: iova-mem-map {
+				iova-mem-region-io {
+					status = "disabled";
+				};
+			};
+		};
+	};
+
+	qcom,cam-cpas@ac40000 {
+		status = "disabled";
+	};
+
+	qcom,cam-cdm-intf {
+		status = "disabled";
+	};
+
+	qcom,cpas-cdm0@ac48000 {
+		status = "disabled";
+	};
+
+	qcom,cam-isp {
+		status = "disabled";
+	};
+
+	cam_csid0: qcom,csid0@acb3000 {
+		status = "disabled";
+	};
+
+	cam_vfe0: qcom,vfe0@acaf000 {
+		status = "disabled";
+	};
+
+	cam_csid1: qcom,csid1@acba000 {
+		status = "disabled";
+	};
+
+	cam_vfe1: qcom,vfe1@acb6000 {
+		status = "disabled";
+	};
+
+	cam_csid_lite: qcom,csid-lite@acc8000 {
+		status = "disabled";
+	};
+
+	cam_vfe_lite: qcom,vfe-lite@acc4000 {
+		status = "disabled";
+	};
+
+	qcom,cam-icp {
+		status = "disabled";
+	};
+
+	cam_a5: qcom,a5@ac00000 {
+		status = "disabled";
+	};
+
+	cam_ipe0: qcom,ipe0 {
+		status = "disabled";
+	};
+
+	cam_ipe1: qcom,ipe1 {
+		status = "disabled";
+	};
+
+	cam_bps: qcom,bps {
+		status = "disabled";
+	};
+
+	clock_camcc: qcom,camcc@ad00000 {
+		status = "disabled";
+	};
+
+	qcom,cam-jpeg {
+		status = "disabled";
+	};
+
+	cam_jpeg_enc: qcom,jpegenc@ac4e000 {
+		status = "disabled";
+	};
+
+	cam_jpeg_dma: qcom,jpegdma@0xac52000 {
+		status = "disabled";
+	};
+
+	qcom,cam-fd {
+		status = "disabled";
+	};
+
+	cam_fd: qcom,fd@ac5a000  {
+		status = "disabled";
+	};
+
+	qcom,cam-sensor@0 {
+		status = "disabled";
+	};
+
+	qcom,cam-sensor@1 {
+		status = "disabled";
+	};
+
+	qcom,cam-sensor@2 {
+		status = "disabled";
+	};
+
+	qcom,cam-sensor@3 {
+		status = "disabled";
+	};
+
+	cam_csiphy3: qcom,csiphy@ac68000 {
+		status = "disabled";
+	};
+};
+
+&wil6210 {
+	status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
index 6357886..bae7ee1 100644
--- a/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,14 +13,8 @@
 /dts-v1/;
 /plugin/;
 
-#include <dt-bindings/clock/qcom,gcc-sdm845.h>
-#include <dt-bindings/clock/qcom,camcc-sdm845.h>
-#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
-#include <dt-bindings/clock/qcom,rpmh.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-
-#include "sdm845-sde-display.dtsi"
 #include "sda845-v2-hdk.dtsi"
+#include "sdm845-sde-display.dtsi"
 #include "sdm845-hdk-audio-overlay.dtsi"
 
 / {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2-mtp-sdxpoorwills-overlay.dts
similarity index 66%
rename from arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
rename to arch/arm64/boot/dts/qcom/sda845-v2-mtp-sdxpoorwills-overlay.dts
index e1ec364..5377813 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2-mtp-sdxpoorwills-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,7 @@
  * GNU General Public License for more details.
  */
 
+
 /dts-v1/;
 /plugin/;
 
@@ -20,13 +21,14 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm845-sde-display.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-qvr-audio-overlay.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm845-mtp.dtsi"
+#include "sdm845-audio-overlay.dtsi"
+#include "external-soc.dtsi"
+#include "sda845-sdxpoorwills.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,msm-id = <321 0x20000>;
-	qcom,board-id = <0x01000B 0x20>;
+	model = "Qualcomm Technologies, Inc. SDA845 v2 + SDXPOORWILLS MTP";
+	compatible = "qcom,sda845-mtp", "qcom,sda845", "qcom,mtp";
+	qcom,msm-id = <341 0x20000>;
+	qcom,board-id = <8 5>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1-cdp-sdxpoorwills-overlay.dts
similarity index 66%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
copy to arch/arm64/boot/dts/qcom/sda845-v2.1-cdp-sdxpoorwills-overlay.dts
index e1ec364..10e4a32 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1-cdp-sdxpoorwills-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,7 @@
  * GNU General Public License for more details.
  */
 
+
 /dts-v1/;
 /plugin/;
 
@@ -20,13 +21,14 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm845-sde-display.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-qvr-audio-overlay.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm845-cdp.dtsi"
+#include "sdm845-audio-overlay.dtsi"
+#include "external-soc.dtsi"
+#include "sda845-sdxpoorwills.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,msm-id = <321 0x20000>;
-	qcom,board-id = <0x01000B 0x20>;
+	model = "Qualcomm Technologies, Inc. SDA845 v2.1 + SDXPOORWILLS CDP";
+	compatible = "qcom,sda845-cdp", "qcom,sda845", "qcom,cdp";
+	qcom,msm-id = <341 0x20001>;
+	qcom,board-id = <1 2>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1-mtp-sdxpoorwills-overlay.dts
similarity index 66%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
copy to arch/arm64/boot/dts/qcom/sda845-v2.1-mtp-sdxpoorwills-overlay.dts
index e1ec364..09fa20f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1-mtp-sdxpoorwills-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,7 @@
  * GNU General Public License for more details.
  */
 
+
 /dts-v1/;
 /plugin/;
 
@@ -20,13 +21,14 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm845-sde-display.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-qvr-audio-overlay.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm845-mtp.dtsi"
+#include "sdm845-audio-overlay.dtsi"
+#include "external-soc.dtsi"
+#include "sda845-sdxpoorwills.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,msm-id = <321 0x20000>;
-	qcom,board-id = <0x01000B 0x20>;
+	model = "Qualcomm Technologies, Inc. SDA845 v2.1 + SDXPOORWILLS MTP";
+	compatible = "qcom,sda845-mtp", "qcom,sda845", "qcom,mtp";
+	qcom,msm-id = <341 0x20001>;
+	qcom,board-id = <8 5>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm450-cdp.dts b/arch/arm64/boot/dts/qcom/sdm450-cdp.dts
index 3e06872..c55622a 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-cdp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "sdm450.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm450-iot-mtp.dts b/arch/arm64/boot/dts/qcom/sdm450-iot-mtp.dts
index 7fac030..9c8cd38 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-iot-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-iot-mtp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "sdm450.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 IOT MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm450-mtp.dts b/arch/arm64/boot/dts/qcom/sdm450-mtp.dts
index 2524b80..040b4ba 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-mtp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "sdm450.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632-cdp-s2.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi632-cdp-s2.dts
new file mode 100644
index 0000000..68f02a8
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632-cdp-s2.dts
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm450.dtsi"
+#include "sdm450-pmi632-cdp-s2.dtsi"
+#include "sdm450-pmi632.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM450 + PMI632 CDP S2";
+	compatible = "qcom,sdm450-cdp", "qcom,sdm450", "qcom,cdp";
+	qcom,board-id = <1 2>;
+	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632-cdp-s2.dtsi b/arch/arm64/boot/dts/qcom/sdm450-pmi632-cdp-s2.dtsi
new file mode 100644
index 0000000..220ec20
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632-cdp-s2.dtsi
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8953-cdp.dtsi"
+
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts
new file mode 100644
index 0000000..b9aadc1
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm450.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
+#include "sdm450-pmi632.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM450 + PMI632 MTP S3";
+	compatible = "qcom,sdm450-mtp", "qcom,sdm450", "qcom,mtp";
+	qcom,board-id = <8 3>;
+	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi
new file mode 100644
index 0000000..adb7f47
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8953-mtp.dtsi"
+
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
new file mode 100644
index 0000000..413612d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dtsi
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "pmi632.dtsi"
+
+&spmi_bus {
+	/delete-node/ qcom,pmi8950@2;
+	/delete-node/ qcom,pmi8950@3;
+};
+
+&pm8953_typec {
+	status = "disabled";
+};
+
+&pmi632_pon {
+	qcom,ps-hold-hard-reset-disable;
+	qcom,ps-hold-shutdown-disable;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi8937-mtp.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi8937-mtp.dts
index 6a6a09e..4964a5f 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi8937-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi8937-mtp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "sdm450.dtsi"
+#include "pmi8937.dtsi"
 #include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8937.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM450 + PMI8937 MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi8937.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450-pmi8937.dts
index 0a56c79..700e950 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi8937.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,13 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm450.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "Qualcomm Technologies, Inc. SDM450 + PMI8937 SOC";
+	compatible = "qcom,sdm450";
+	qcom,pmic-id = <0x010016 0x020037 0x0 0x0>;
+	qcom,pmic-name = "PMI8937";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi8940-mtp.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi8940-mtp.dts
index 3c4e802..9bed8d3 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi8940-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi8940-mtp.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "sdm450.dtsi"
+#include "pmi8940.dtsi"
 #include "msm8953-mtp.dtsi"
+#include "msm8953-pmi8940.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM450 + PMI8940 MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi8940.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450-pmi8940.dts
index 0a56c79..f50d177 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi8940.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,13 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm450.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "Qualcomm Technologies, Inc. SDM450 + PMI8940 SOC";
+	compatible = "qcom,sdm450";
+	qcom,pmic-id = <0x010016 0x020040 0x0 0x0>;
+	qcom,pmic-name = "PMI8940";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dts b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dts
new file mode 100644
index 0000000..977a978
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dts
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm450.dtsi"
+#include "sdm450-qrd-sku4.dtsi"
+#include "sdm450-pmi632.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM450 + PMI632 QRD SKU4";
+	compatible = "qcom,sdm450-qrd", "qcom,sdm450", "qcom,qrd";
+	qcom,board-id = <0xb 1>;
+	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi
new file mode 100644
index 0000000..0a98528
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8953-qrd.dtsi"
+
+&i2c_3 {
+	status = "disabled";
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm450-rcm.dts b/arch/arm64/boot/dts/qcom/sdm450-rcm.dts
index 4ab131a..1b7831b 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-rcm.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-rcm.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "sdm450.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-cdp.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 RCM";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450.dts
index 0a56c79..b829b81 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,13 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm450.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 SOC";
+	compatible = "qcom,sdm450";
+	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+	qcom,pmic-name = "PMI8950";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm450.dtsi b/arch/arm64/boot/dts/qcom/sdm450.dtsi
index b080ff7..3e24714 100644
--- a/arch/arm64/boot/dts/qcom/sdm450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm450.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
 	model = "Qualcomm Technologies, Inc. SDM450";
 	compatible = "qcom,sdm450";
 	qcom,msm-id = <338 0x0>;
+	qcom,msm-name = "SDM450";
 };
 
 &CPU4 {
@@ -34,3 +35,16 @@
 &CPU7 {
 	efficiency = <1024>;
 };
+
+&clock_gcc_gfx {
+	compatible = "qcom,gcc-gfx-sdm450";
+	qcom,gfxfreq-corner =
+		<         0   0 >,
+		< 133330000   1 >,  /* Min SVS   */
+		< 216000000   2 >,  /* Low SVS   */
+		< 320000000   3 >,  /* SVS       */
+		< 400000000   4 >,  /* SVS Plus  */
+		< 510000000   5 >,  /* NOM       */
+		< 560000000   6 >,  /* Nom Plus  */
+		< 600000000   7 >;  /* Turbo     */
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts b/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts
new file mode 100644
index 0000000..903b432
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm632-cdp-s2.dts
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm632.dtsi"
+#include "sdm450-pmi632-cdp-s2.dtsi"
+#include "sdm450-pmi632.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM632 + PMI632 + PMI8004 CDP S2";
+	compatible = "qcom,sdm632-cdp", "qcom,sdm632", "qcom,cdp";
+	qcom,board-id = <1 2>;
+	qcom,pmic-id = <0x010016 0x25 0xC 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm632-cpu.dtsi b/arch/arm64/boot/dts/qcom/sdm632-cpu.dtsi
new file mode 100644
index 0000000..031fd7e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm632-cpu.dtsi
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+	/delete-node/ cpus;
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&CPU0>;
+				};
+				core1 {
+					cpu = <&CPU1>;
+				};
+				core2 {
+					cpu = <&CPU2>;
+				};
+				core3 {
+					cpu = <&CPU3>;
+				};
+			};
+
+			cluster1 {
+				core0 {
+					cpu = <&CPU4>;
+				};
+				core1 {
+					cpu = <&CPU5>;
+				};
+				core2 {
+					cpu = <&CPU6>;
+				};
+				core3 {
+					cpu = <&CPU7>;
+				};
+			};
+		};
+
+		CPU0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x0>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
+			next-level-cache = <&L2_0>;
+			L2_0: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-level = <2>;
+			      /* A53 L2 dump not supported */
+			      qcom,dump-size = <0x0>;
+			};
+			L1_I_0: l1-icache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9040>;
+			};
+			L1_D_0: l1-dcache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9040>;
+			};
+			L1_TLB_0: l1-tlb {
+				qcom,dump-size = <0x2800>;
+			};
+		};
+
+		CPU1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			enable-method = "psci";
+			reg = <0x0 0x1>;
+			efficiency = <1024>;
+			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
+			next-level-cache = <&L2_0>;
+			L1_I_1: l1-icache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9040>;
+			};
+			L1_D_1: l1-dcache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9040>;
+			};
+			L1_TLB_1: l1-tlb {
+				qcom,dump-size = <0x2800>;
+			};
+		};
+
+		CPU2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			enable-method = "psci";
+			reg = <0x0 0x2>;
+			efficiency = <1024>;
+			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
+			next-level-cache = <&L2_0>;
+			L1_I_2: l1-icache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9040>;
+			};
+			L1_D_2: l1-dcache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9040>;
+			};
+			L1_TLB_2: l1-tlb {
+				qcom,dump-size = <0x2800>;
+			};
+		};
+
+		CPU3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			enable-method = "psci";
+			reg = <0x0 0x3>;
+			efficiency = <1024>;
+			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
+			next-level-cache = <&L2_0>;
+			L1_I_3: l1-icache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9040>;
+			};
+			L1_D_3: l1-dcache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9040>;
+			};
+			L1_TLB_3: l1-tlb {
+				qcom,dump-size = <0x2800>;
+			};
+		};
+
+		CPU4: cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			enable-method = "psci";
+			reg = <0x0 0x100>;
+			efficiency = <1638>;
+			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_1>;
+			next-level-cache = <&L2_1>;
+			L2_1: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-level = <2>;
+			};
+			L1_I_100: l1-icache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x12000>;
+			};
+			L1_D_100: l1-dcache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9040>;
+			};
+			L1_TLB_100: l1-tlb {
+				qcom,dump-size = <0x4800>;
+			};
+		};
+
+		CPU5: cpu@101 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53","arm,armv8";
+			enable-method = "psci";
+			reg = <0x0 0x101>;
+			efficiency = <1638>;
+			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_1>;
+			next-level-cache = <&L2_1>;
+			L1_I_101: l1-icache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x12000>;
+			};
+			L1_D_101: l1-dcache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9040>;
+			};
+			L1_TLB_101: l1-tlb {
+				qcom,dump-size = <0x4800>;
+			};
+		};
+
+		CPU6: cpu@102 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53","arm,armv8";
+			enable-method = "psci";
+			reg = <0x0 0x102>;
+			efficiency = <1638>;
+			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_1>;
+			next-level-cache = <&L2_1>;
+			L1_I_102: l1-icache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x12000>;
+			};
+			L1_D_102: l1-dcache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9040>;
+			};
+			L1_TLB_102: l1-tlb {
+				qcom,dump-size = <0x4800>;
+			};
+		};
+
+		CPU7: cpu@103 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53","arm,armv8";
+			enable-method = "psci";
+			reg = <0x0 0x103>;
+			efficiency = <1638>;
+			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_1>;
+			next-level-cache = <&L2_1>;
+			L1_I_103: l1-icache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x12000>;
+			};
+			L1_D_103: l1-dcache {
+			      compatible = "arm,arch-cache";
+			      qcom,dump-size = <0x9040>;
+			};
+			L1_TLB_103: l1-tlb {
+				qcom,dump-size = <0x4800>;
+			};
+		};
+	};
+};
+
+&cpuss_dump {
+	qcom,l1_tlb_dump0 {
+		qcom,dump-node = <&L1_TLB_0>;
+		qcom,dump-id = <0x20>;
+	};
+	qcom,l1_tlb_dump1 {
+		qcom,dump-node = <&L1_TLB_1>;
+		qcom,dump-id = <0x21>;
+	};
+	qcom,l1_tlb_dump2 {
+		qcom,dump-node = <&L1_TLB_2>;
+		qcom,dump-id = <0x22>;
+	};
+	qcom,l1_tlb_dump3 {
+		qcom,dump-node = <&L1_TLB_3>;
+		qcom,dump-id = <0x23>;
+	};
+	qcom,l1_tlb_dump100 {
+		qcom,dump-node = <&L1_TLB_100>;
+		qcom,dump-id = <0x24>;
+	};
+	qcom,l1_tlb_dump101 {
+		qcom,dump-node = <&L1_TLB_101>;
+		qcom,dump-id = <0x25>;
+	};
+	qcom,l1_tlb_dump102 {
+		qcom,dump-node = <&L1_TLB_102>;
+		qcom,dump-id = <0x26>;
+	};
+	qcom,l1_tlb_dump103 {
+		qcom,dump-node = <&L1_TLB_103>;
+		qcom,dump-id = <0x27>;
+	};
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts b/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts
new file mode 100644
index 0000000..6339c3c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm632-mtp-s3.dts
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm632.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
+#include "sdm450-pmi632.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM632 + PMI632 + PMI8004 MTP S3";
+	compatible = "qcom,sdm632-mtp", "qcom,sdm632", "qcom,mtp";
+	qcom,board-id = <8 3>;
+	qcom,pmic-id = <0x010016 0x25 0xC 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm632-qrd-sku4.dts b/arch/arm64/boot/dts/qcom/sdm632-qrd-sku4.dts
new file mode 100644
index 0000000..9f33721
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm632-qrd-sku4.dts
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm632.dtsi"
+#include "sdm450-qrd-sku4.dtsi"
+#include "sdm450-pmi632.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM632 + PMI632 + PMI8004 QRD SKU4";
+	compatible = "qcom,sdm632-qrd", "qcom,sdm632", "qcom,qrd";
+	qcom,board-id = <0xb 1>;
+	qcom,pmic-id = <0x010016 0x25 0xC 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm632-rumi.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm632-rumi.dts
index 0a56c79..5a6f88a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm632-rumi.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +11,14 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm632.dtsi"
+#include "sdm632-rumi.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	model = "Qualcomm Technologies, Inc. SDM632 RUMI";
+	compatible = "qcom,sdm632-rumi", "qcom,sdm632", "qcom,rumi";
+	qcom,board-id = <15 0>;
+	qcom,pmic-id = <0 0 0 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm632-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm632-rumi.dtsi
new file mode 100644
index 0000000..3ba8a4d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm632-rumi.dtsi
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&blsp1_serial1 {
+	status = "ok";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart1_console_active>;
+};
+
+&wdog {
+	status = "disabled";
+};
+
+&modem_mem {
+	status = "disabled";
+};
+
+&adsp_fw_mem {
+	status = "disabled";
+};
+
+&wcnss_fw_mem {
+	status = "disabled";
+};
+
+&venus_mem {
+	status = "disabled";
+};
+
+&secure_mem {
+	status = "disabled";
+};
+
+&qseecom_mem {
+	status = "disabled";
+};
+
+&adsp_mem {
+	status = "disabled";
+};
+
+&dfps_data_mem {
+	status = "disabled";
+};
+
+&cont_splash_mem {
+	status = "disabled";
+};
+
+&gpu_mem {
+	status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm632.dtsi b/arch/arm64/boot/dts/qcom/sdm632.dtsi
new file mode 100644
index 0000000..3ebd50e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm632.dtsi
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8953.dtsi"
+#include "sdm632-cpu.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM632";
+	compatible = "qcom,sdm632";
+	qcom,msm-id = <349 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
index 6510fa2..2b3cb39 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,6 +12,7 @@
  */
 #include "sdm670-wcd.dtsi"
 #include "sdm670-wsa881x.dtsi"
+#include "sdm670-lpi.dtsi"
 #include <dt-bindings/clock/qcom,audio-ext-clk.h>
 
 &tavil_snd {
@@ -59,6 +60,49 @@
 		"SpkrLeft", "SpkrRight";
 };
 
+&tasha_snd {
+	qcom,msm-mi2s-master = <1>, <1>, <1>, <1>, <1>;
+	qcom,audio-routing =
+		"AIF4 VI", "MCLK",
+		"RX_BIAS", "MCLK",
+		"MADINPUT", "MCLK",
+		"hifi amp", "LINEOUT1",
+		"hifi amp", "LINEOUT2",
+		"AMIC2", "MIC BIAS2",
+		"MIC BIAS2", "Headset Mic",
+		"AMIC3", "MIC BIAS2",
+		"MIC BIAS2", "ANCRight Headset Mic",
+		"AMIC4", "MIC BIAS2",
+		"MIC BIAS2", "ANCLeft Headset Mic",
+		"AMIC5", "MIC BIAS3",
+		"MIC BIAS3", "Handset Mic",
+		"DMIC0", "MIC BIAS1",
+		"MIC BIAS1", "Digital Mic0",
+		"DMIC1", "MIC BIAS1",
+		"MIC BIAS1", "Digital Mic1",
+		"DMIC2", "MIC BIAS3",
+		"MIC BIAS3", "Digital Mic2",
+		"DMIC3", "MIC BIAS3",
+		"MIC BIAS3", "Digital Mic3",
+		"DMIC4", "MIC BIAS4",
+		"MIC BIAS4", "Digital Mic4",
+		"DMIC5", "MIC BIAS4",
+		"MIC BIAS4", "Digital Mic5",
+		"SpkrLeft IN", "SPK1 OUT",
+		"SpkrRight IN", "SPK2 OUT";
+
+	qcom,msm-mbhc-hphl-swh = <0>;
+	qcom,msm-mbhc-gnd-swh = <0>;
+	qcom,msm-mclk-freq = <9600000>;
+	asoc-codec = <&stub_codec>;
+	asoc-codec-names = "msm-stub-codec.1";
+	qcom,wsa-max-devs = <2>;
+	qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_212>,
+		<&wsa881x_213>, <&wsa881x_214>;
+	qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+		"SpkrLeft", "SpkrRight";
+};
+
 &int_codec {
 	qcom,audio-routing =
 		"RX_BIAS", "INT_MCLK0",
@@ -121,13 +165,6 @@
 		pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
 	};
 
-	wcd_gnd_mic_swap_gpio: msm_cdc_pinctrl_gnd_mic_swap {
-		compatible = "qcom,msm-cdc-pinctrl";
-		pinctrl-names = "aud_active", "aud_sleep";
-		pinctrl-0 = <&wcd_gnd_mic_swap_active>;
-		pinctrl-1 = <&wcd_gnd_mic_swap_idle>;
-	};
-
 	cdc_pdm_gpios: cdc_pdm_pinctrl {
 		compatible = "qcom,msm-cdc-pinctrl";
 		pinctrl-names = "aud_active", "aud_sleep";
@@ -229,6 +266,31 @@
 		pinctrl-0 = <&wcd_intr_default>;
 	};
 
+	clock_audio_native: audio_ext_clk_native {
+		status = "disabled";
+		compatible = "qcom,audio-ref-clk";
+		#clock-cells = <1>;
+		qcom,lpass-mclk-id = <0x116>;
+		qcom,codec-mclk-clk-freq = <11289600>;
+		qcom,audio-ref-clk-gpio = <&lpi_tlmm 19 0>;
+		pinctrl-names = "sleep", "active";
+		pinctrl-0 = <&lpi_mclk0_sleep>;
+		pinctrl-1 = <&lpi_mclk0_active>;
+	};
+
+	clock_audio: audio_ext_clk {
+		status = "disabled";
+		compatible = "qcom,audio-ref-clk";
+		pinctrl-names = "active", "sleep";
+		pinctrl-0 = <&tasha_mclk_default>;
+		pinctrl-1 = <&tasha_mclk_default>;
+		qcom,audio-ref-clk-gpio = <&pm660_gpios 3 0>;
+		clock-names = "osr_clk";
+		clocks = <&pm660_div_clk>;
+		qcom,node_has_rpm_clock;
+		#clock-cells = <1>;
+	};
+
 	clock_audio_lnbb: audio_ext_clk_lnbb {
 		status = "disabled";
 		compatible = "qcom,audio-ref-clk";
@@ -261,6 +323,40 @@
 };
 
 &slim_aud {
+	wcd9335: tasha_codec {
+		status = "disabled";
+		compatible = "qcom,tasha-slim-pgd";
+		elemental-addr = [00 01 a0 01 17 02];
+
+		interrupt-parent = <&wcd9xxx_intc>;
+		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+		      17 18 19 20 21 22 23 24 25 26 27 28 29
+		      30>;
+
+		qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
+
+		clock-names = "wcd_clk", "wcd_native_clk";
+		clocks = <&clock_audio AUDIO_PMI_CLK>,
+			 <&clock_audio_native AUDIO_LPASS_MCLK>;
+
+		cdc-vdd-mic-bias-supply = <&pm660l_bob>;
+		qcom,cdc-vdd-mic-bias-voltage = <3312000 3312000>;
+		qcom,cdc-vdd-mic-bias-current = <30400>;
+
+		qcom,cdc-static-supplies = "cdc-vdd-mic-bias";
+
+		qcom,cdc-micbias1-mv = <1800>;
+		qcom,cdc-micbias2-mv = <1800>;
+		qcom,cdc-micbias3-mv = <1800>;
+		qcom,cdc-micbias4-mv = <1800>;
+
+		qcom,cdc-mclk-clk-rate = <9600000>;
+		qcom,cdc-slim-ifd = "tasha-slim-ifd";
+		qcom,cdc-slim-ifd-elemental-addr = [00 00 a0 01 17 02];
+		qcom,cdc-dmic-sample-rate = <4800000>;
+		qcom,cdc-mad-dmic-rate = <600000>;
+	};
+
 	wcd934x_cdc: tavil_codec {
 		status = "disabled";
 		compatible = "qcom,tavil-slim-pgd";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
index bda44cc..faaf644 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,10 +12,6 @@
  */
 
 #include "msm-audio-lpass.dtsi"
-#include "sdm670-wcd.dtsi"
-#include "sdm670-wsa881x.dtsi"
-#include "sdm670-lpi.dtsi"
-#include <dt-bindings/clock/qcom,audio-ext-clk.h>
 
 &msm_audio_ion {
 	iommus = <&apps_smmu 0x1801 0x0>;
@@ -23,6 +19,13 @@
 };
 
 &soc {
+	audio_load_mod {
+		compatible = "qcom,audio-load-mod";
+		audio_test_mod {
+			compatible = "qcom,audio-test-mod";
+		};
+	};
+
 	qcom,avtimer@62cf700c {
 		compatible = "qcom,avtimer";
 		reg = <0x62cf700c 0x4>,
@@ -99,6 +102,72 @@
 			"msm-dai-q6-tdm.36928", "msm-dai-q6-tdm.36929";
 	};
 
+	tasha_snd: sound-tasha {
+		status = "disabled";
+		compatible = "qcom,sdm670-asoc-snd-tasha";
+		qcom,model = "sdm670-tasha-snd-card";
+		qcom,wcn-btfm;
+		qcom,mi2s-audio-intf;
+		qcom,auxpcm-audio-intf;
+		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
+			<&loopback>, <&compress>, <&hostless>,
+			<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
+			<&pcm_noirq>, <&cpe3>;
+		asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+			"msm-pcm-dsp.2", "msm-voip-dsp",
+			"msm-pcm-voice", "msm-pcm-loopback",
+			"msm-compress-dsp", "msm-pcm-hostless",
+			"msm-pcm-afe", "msm-lsm-client",
+			"msm-pcm-routing", "msm-cpe-lsm",
+			"msm-compr-dsp", "msm-pcm-dsp-noirq",
+			"msm-cpe-lsm.3";
+		asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+			<&dai_mi2s2>, <&dai_mi2s3>, <&dai_mi2s4>,
+			<&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
+			<&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
+			<&dai_quin_auxpcm>,
+			<&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+			<&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>,
+			<&sb_4_rx>, <&sb_4_tx>, <&sb_5_rx>, <&sb_5_tx>,
+			<&sb_6_rx>, <&sb_7_rx>, <&sb_7_tx>,
+			<&sb_8_rx>, <&sb_8_tx>,
+			<&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+			<&afe_proxy_tx>, <&incall_record_rx>,
+			<&incall_record_tx>, <&incall_music_rx>,
+			<&incall_music_2_rx>,
+			<&usb_audio_rx>, <&usb_audio_tx>,
+			<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
+			<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
+			<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
+			<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>,
+			<&dai_quin_tdm_rx_0>, <&dai_quin_tdm_tx_0>;
+		asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+			"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+			"msm-dai-q6-mi2s.4",
+			"msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
+			"msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
+			"msm-dai-q6-auxpcm.5",
+			"msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+			"msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+			"msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389",
+			"msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+			"msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+			"msm-dai-q6-dev.16394", "msm-dai-q6-dev.16395",
+			"msm-dai-q6-dev.16396",
+			"msm-dai-q6-dev.16398", "msm-dai-q6-dev.16399",
+			"msm-dai-q6-dev.16400", "msm-dai-q6-dev.16401",
+			"msm-dai-q6-dev.224", "msm-dai-q6-dev.225",
+			"msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
+			"msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
+			"msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770",
+			"msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673",
+			"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865",
+			"msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
+			"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
+			"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913",
+			"msm-dai-q6-tdm.36928", "msm-dai-q6-tdm.36929";
+	};
+
 	int_codec: sound {
 		status = "okay";
 		compatible = "qcom,sdm670-asoc-snd";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm670-bus.dtsi
index 4f5a9b1..aa7cc97 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-bus.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -669,35 +669,6 @@
 			qcom,bcms = <&bcm_cn0>;
 		};
 
-		mas_qhm_tic: mas-qhm-tic {
-			cell-id = <MSM_BUS_MASTER_TIC>;
-			label = "mas-qhm-tic";
-			qcom,buswidth = <4>;
-			qcom,agg-ports = <1>;
-			qcom,connections = <&slv_qhs_tlmm_south
-				&slv_qhs_camera_cfg &slv_qhs_sdc4
-				&slv_qhs_sdc2 &slv_qhs_mnoc_cfg
-				&slv_qhs_ufs_mem_cfg &slv_qhs_glm
-				&slv_qhs_pdm &slv_qhs_a2_noc_cfg
-				&slv_qhs_qdss_cfg &slv_qhs_display_cfg
-				&slv_qhs_tcsr &slv_qhs_dcc_cfg
-				&slv_qhs_ddrss_cfg &slv_qns_cnoc_a2noc
-				&slv_qhs_snoc_cfg &slv_qhs_phy_refgen_south
-				&slv_qhs_gpuss_cfg &slv_qhs_venus_cfg
-				&slv_qhs_tsif &slv_qhs_compute_dsp_cfg
-				&slv_qhs_aop &slv_qhs_qupv3_north
-				&slv_srvc_cnoc &slv_qhs_usb3_0
-				&slv_qhs_ipa &slv_qhs_cpr_cx
-				&slv_qhs_a1_noc_cfg &slv_qhs_aoss
-				&slv_qhs_prng &slv_qhs_vsense_ctrl_cfg
-				&slv_qhs_emmc_cfg &slv_qhs_qupv3_south
-				&slv_qhs_spdm &slv_qhs_crypto0_cfg
-				&slv_qhs_pimem_cfg &slv_qhs_tlmm_north
-				&slv_qhs_clk_ctl &slv_qhs_imem_cfg>;
-			qcom,bus-dev = <&fab_config_noc>;
-			qcom,bcms = <&bcm_cn0>;
-		};
-
 		mas_qnm_snoc: mas-qnm-snoc {
 			cell-id = <MSM_BUS_SNOC_CNOC_MAS>;
 			label = "mas-qnm-snoc";
@@ -727,36 +698,6 @@
 			qcom,bcms = <&bcm_cn0>;
 		};
 
-		mas_xm_qdss_dap: mas-xm-qdss-dap {
-			cell-id = <MSM_BUS_MASTER_QDSS_DAP>;
-			label = "mas-xm-qdss-dap";
-			qcom,buswidth = <8>;
-			qcom,agg-ports = <1>;
-			qcom,connections = <&slv_qhs_tlmm_south
-				&slv_qhs_camera_cfg
-				&slv_qhs_sdc4
-				&slv_qhs_sdc2 &slv_qhs_mnoc_cfg
-				&slv_qhs_ufs_mem_cfg &slv_qhs_glm
-				&slv_qhs_pdm &slv_qhs_a2_noc_cfg
-				&slv_qhs_qdss_cfg &slv_qhs_display_cfg
-				&slv_qhs_tcsr &slv_qhs_dcc_cfg
-				&slv_qhs_ddrss_cfg &slv_qns_cnoc_a2noc
-				&slv_qhs_snoc_cfg &slv_qhs_phy_refgen_south
-				&slv_qhs_gpuss_cfg &slv_qhs_venus_cfg
-				&slv_qhs_tsif &slv_qhs_compute_dsp_cfg
-				&slv_qhs_aop &slv_qhs_qupv3_north
-				&slv_srvc_cnoc &slv_qhs_usb3_0
-				&slv_qhs_ipa &slv_qhs_cpr_cx
-				&slv_qhs_a1_noc_cfg &slv_qhs_aoss
-				&slv_qhs_prng &slv_qhs_vsense_ctrl_cfg
-				&slv_qhs_qupv3_south &slv_qhs_spdm
-				&slv_qhs_crypto0_cfg &slv_qhs_pimem_cfg
-				&slv_qhs_tlmm_north &slv_qhs_clk_ctl
-				&slv_qhs_imem_cfg>;
-			qcom,bus-dev = <&fab_config_noc>;
-			qcom,bcms = <&bcm_cn0>;
-		};
-
 		mas_qhm_cnoc: mas-qhm-cnoc {
 			cell-id = <MSM_BUS_MASTER_CNOC_DC_NOC>;
 			label = "mas-qhm-cnoc";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
index 1f40e20..715affd 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
@@ -229,6 +229,7 @@
 	qcom,cam_smmu {
 		compatible = "qcom,msm-cam-smmu";
 		status = "ok";
+		non-fatal-fault-disabled;
 
 		msm_cam_smmu_lrme {
 			compatible = "qcom,msm-cam-smmu-cb";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
index 7928ab5..6dc5c2c 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -169,6 +169,15 @@
 			};
 
 			port@1 {
+				reg = <6>;
+				funnel_swao_in_sensor_etm0: endpoint {
+					slave-mode;
+					remote-endpoint=
+						<&sensor_etm0_out_funnel_swao>;
+				};
+			};
+
+			port@2 {
 				reg = <7>;
 				funnel_swao_in_tpda_swao: endpoint {
 					slave-mode;
@@ -562,6 +571,8 @@
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
 
+		status = "disabled";
+
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -2057,6 +2068,20 @@
 		};
 	};
 
+	sensor_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+
+		coresight-name = "coresight-sensor-etm0";
+		qcom,inst-id = <8>;
+
+		port {
+			sensor_etm0_out_funnel_swao: endpoint {
+				remote-endpoint =
+					<&funnel_swao_in_sensor_etm0>;
+			};
+		};
+	};
+
 	audio_etm0 {
 		compatible = "qcom,coresight-remote-etm";
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
index 9e75ee0..75a2762 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,7 @@
 		compatible = "qcom,pil-tz-generic";
 		qcom,pas-id = <13>;
 		qcom,firmware-name = "a615_zap";
+		memory-region = <&pil_gpu_mem>;
 	};
 
 	msm_bus: qcom,kgsl-busmon{
@@ -46,9 +47,12 @@
 		label = "kgsl-3d0";
 		compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
 		status = "ok";
-		reg = <0x5000000 0x40000
-			0x780000 0x6300>;
-		reg-names = "kgsl_3d0_reg_memory", "qfprom_memory";
+		reg =   <0x5000000 0x40000>,
+			<0x5061000 0x800>,
+			<0x780000 0x6300>;
+		reg-names =     "kgsl_3d0_reg_memory",
+				"kgsl_3d0_cx_dbgc_memory",
+				"qfprom_memory";
 		interrupts = <0 300 0>;
 		interrupt-names = "kgsl_3d0_irq";
 		qcom,id = <0>;
@@ -58,6 +62,7 @@
 		qcom,initial-pwrlevel = <3>;
 
 		qcom,gpu-quirk-hfi-use-reg;
+		qcom,gpu-quirk-limit-uche-gbif-rw;
 
 		/* <HZ/12> */
 		qcom,idle-timeout = <80>;
@@ -117,7 +122,7 @@
 		cache-slices = <&llcc 12>, <&llcc 11>;
 
 		/* CPU latency parameter */
-		qcom,pm-qos-active-latency = <914>;
+		qcom,pm-qos-active-latency = <899>;
 		qcom,pm-qos-wakeup-latency = <899>;
 
 		/* Enable context aware freq. scaling */
@@ -134,6 +139,8 @@
 			#size-cells = <0>;
 			compatible = "qcom,gpu-coresight";
 
+			status = "disabled";
+
 			qcom,gpu-coresight@0 {
 				reg = <0>;
 				coresight-name = "coresight-gfx";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi
index cb0a386..22e9a7a 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi
@@ -14,5 +14,8 @@
 &int_codec {
 	qcom,msm-mbhc-usbc-audio-supported = <1>;
 	qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
-	qcom,us-euro-gpios = <&wcd_gnd_mic_swap_gpio>;
+	qcom,usbc-analog-en2-gpio = <&tlmm 40 0>;
+	pinctrl-names = "aud_active", "aud_sleep";
+	pinctrl-0 = <&wcd_usbc_analog_en2_active>;
+	pinctrl-1 = <&wcd_usbc_analog_en2_idle>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
index 46de412..3fd1229 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -27,6 +27,12 @@
 			qcom,ion-heap-type = "DMA";
 		};
 
+		qcom,ion-heap@19 { /* QSEECOM TA HEAP */
+			reg = <19>;
+			memory-region = <&qseecom_ta_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
 		qcom,ion-heap@13 { /* SPSS HEAP */
 			reg = <13>;
 			memory-region = <&sp_mem>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-lpi.dtsi b/arch/arm64/boot/dts/qcom/sdm670-lpi.dtsi
index c76fbce..948c51d 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-lpi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-lpi.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,34 @@
 		gpio-controller;
 		#gpio-cells = <2>;
 
+		lpi_mclk0_active: lpi_mclk0_active {
+			mux {
+				pins = "gpio19";
+				function = "func1";
+			};
+
+			config {
+				pins = "gpio19";
+				drive-strength = <8>;
+				bias-disable;
+				output-low;
+			};
+		};
+
+		lpi_mclk0_sleep: lpi_mclk0_sleep {
+			mux {
+				pins = "gpio19";
+				function = "func1";
+			};
+
+			config {
+				pins = "gpio19";
+				drive-strength = <2>;
+				bias-disable;
+				bias-pull-down;
+			};
+		};
+
 		cdc_pdm_clk_active: cdc_pdm_clk_active {
 			mux {
 				pins = "gpio18";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index 9025d6b..5684e19 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1505,6 +1505,67 @@
 			};
 		};
 
+		/* Tasha WSA speaker reset pins */
+		tasha_spkr_1_sd_n {
+			tasha_spkr_1_sd_n_sleep: tasha_spkr_1_sd_n_sleep {
+				mux {
+					pins = "gpio66";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio66";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;
+					input-enable;
+				};
+			};
+
+			tasha_spkr_1_sd_n_active: tasha_spkr_1_sd_n_active {
+				mux {
+					pins = "gpio66";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio66";
+					drive-strength = <16>;   /* 16 mA */
+					bias-disable;
+					output-high;
+				};
+			};
+		};
+
+		tasha_spkr_2_sd_n {
+			tasha_spkr_2_sd_n_sleep: tasha_spkr_2_sd_n_sleep {
+				mux {
+					pins = "gpio65";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio65";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;
+					input-enable;
+				};
+			};
+
+			tasha_spkr_2_sd_n_active: tasha_spkr_2_sd_n_active {
+				mux {
+					pins = "gpio65";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio65";
+					drive-strength = <16>;   /* 16 mA */
+					bias-disable;
+					output-high;
+				};
+			};
+		};
+
 		wcd_buck_vsel {
 			wcd_buck_vsel_default: wcd_buck_vsel_default{
 				mux {
@@ -1521,8 +1582,8 @@
 			};
 		};
 
-		wcd_gnd_mic_swap {
-			wcd_gnd_mic_swap_idle: wcd_gnd_mic_swap_idle {
+		wcd_usbc_analog_en2 {
+			wcd_usbc_analog_en2_idle: wcd_usbc_ana_en2_idle {
 				mux {
 					pins = "gpio40";
 					function = "gpio";
@@ -1536,7 +1597,7 @@
 				};
 			};
 
-			wcd_gnd_mic_swap_active: wcd_gnd_mic_swap_active {
+			wcd_usbc_analog_en2_active: wcd_usbc_ana_en2_active {
 				mux {
 					pins = "gpio40";
 					function = "gpio";
@@ -1995,6 +2056,19 @@
 	};
 };
 
+&pm660_gpios {
+	tasha_mclk {
+		tasha_mclk_default: tasha_mclk_default{
+			pins = "gpio3";
+			function = "func1";
+			qcom,drive-strength = <2>;
+			power-source = <0>;
+			bias-disable;
+			output-low;
+		};
+	};
+};
+
 &pm660l_gpios {
 	camera_rear_dvdd_en {
 		camera_rear_dvdd_en_default: camera_rear_dvdd_en_default {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
index b330cf5..5bf8df7 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
@@ -185,4 +185,8 @@
 		reg = <0xc300000 0x1000>, <0xc3f0004 0x4>;
 		reg-names = "phys_addr_base", "offset_addr";
 	};
+
+	qcom,rpmh-master-stats {
+		compatible = "qcom,rpmh-master-stats";
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm660a-tasha-codec-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-pm660a-tasha-codec-cdp-overlay.dts
new file mode 100644
index 0000000..b7cb820
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm660a-tasha-codec-cdp-overlay.dts
@@ -0,0 +1,74 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm670-cdp.dtsi"
+#include "pm660a.dtsi"
+#include "sdm670-tasha-codec.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A + Tasha Codec CDP";
+	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
+	qcom,msm-id = <336 0x0>;
+	qcom,board-id = <1 5>;
+	qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
+		       <0x0001001b 0x0002001a 0x0 0x0>,
+		       <0x0001001b 0x0202001a 0x0 0x0>;
+};
+
+&dsi_dual_nt35597_truly_video_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_panel_pwr_supply_labibb_amoled {
+	qcom,panel-supply-entry@2 {
+		reg = <2>;
+		qcom,supply-name = "lab";
+		qcom,supply-min-voltage = <4600000>;
+		qcom,supply-max-voltage = <6100000>;
+		qcom,supply-enable-load = <100000>;
+		qcom,supply-disable-load = <100>;
+	};
+
+	qcom,panel-supply-entry@3 {
+		reg = <3>;
+		qcom,supply-name = "ibb";
+		qcom,supply-min-voltage = <4000000>;
+		qcom,supply-max-voltage = <6300000>;
+		qcom,supply-enable-load = <100000>;
+		qcom,supply-disable-load = <100>;
+	};
+
+	qcom,panel-supply-entry@4 {
+		reg = <4>;
+		qcom,supply-name = "oledb";
+		qcom,supply-min-voltage = <5000000>;
+		qcom,supply-max-voltage = <8100000>;
+		qcom,supply-enable-load = <100000>;
+		qcom,supply-disable-load = <100>;
+	};
+};
+
+&dsi_rm67195_amoled_fhd_cmd_display {
+	qcom,dsi-display-active;
+	lab-supply = <&lab_regulator>;
+	ibb-supply = <&ibb_regulator>;
+	oledb-supply = <&pm660a_oledb>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm660a-tasha-codec-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-pm660a-tasha-codec-cdp.dts
new file mode 100644
index 0000000..1922b38
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm660a-tasha-codec-cdp.dts
@@ -0,0 +1,68 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm670.dtsi"
+#include "sdm670-cdp.dtsi"
+#include "pm660a.dtsi"
+#include "sdm670-tasha-codec.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A Tasha Codec CDP";
+	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
+	qcom,board-id = <1 5>;
+	qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
+		       <0x0001001b 0x0002001a 0x0 0x0>,
+		       <0x0001001b 0x0202001a 0x0 0x0>;
+};
+
+&dsi_dual_nt35597_truly_video_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_panel_pwr_supply_labibb_amoled {
+	qcom,panel-supply-entry@2 {
+		reg = <2>;
+		qcom,supply-name = "lab";
+		qcom,supply-min-voltage = <4600000>;
+		qcom,supply-max-voltage = <6100000>;
+		qcom,supply-enable-load = <100000>;
+		qcom,supply-disable-load = <100>;
+	};
+
+	qcom,panel-supply-entry@3 {
+		reg = <3>;
+		qcom,supply-name = "ibb";
+		qcom,supply-min-voltage = <4000000>;
+		qcom,supply-max-voltage = <6300000>;
+		qcom,supply-enable-load = <100000>;
+		qcom,supply-disable-load = <100>;
+	};
+
+	qcom,panel-supply-entry@4 {
+		reg = <4>;
+		qcom,supply-name = "oledb";
+		qcom,supply-min-voltage = <5000000>;
+		qcom,supply-max-voltage = <8100000>;
+		qcom,supply-enable-load = <100000>;
+		qcom,supply-disable-load = <100>;
+	};
+};
+
+&dsi_rm67195_amoled_fhd_cmd_display {
+	qcom,dsi-display-active;
+	lab-supply = <&lab_regulator>;
+	ibb-supply = <&ibb_regulator>;
+	oledb-supply = <&pm660a_oledb>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts
index 37eb4cd..73d1909 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -30,3 +30,22 @@
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
 };
+
+&dsi_dual_nt36850_truly_cmd_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_hx8399_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_hx8399_truly_cmd_display {
+	qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts
index dada4c6..680bc17 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts
@@ -24,3 +24,22 @@
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
 };
+
+&dsi_dual_nt36850_truly_cmd_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_hx8399_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_hx8399_truly_cmd_display {
+	qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
index cc4645f..9a7e742 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -160,9 +160,9 @@
 		    0x40 0x194 /* PLL_BIAS_CONTROL_1 */
 		    0x20 0x198 /* PLL_BIAS_CONTROL_2 */
 		    0x21 0x214 /* PWR_CTRL2 */
-		    0x07 0x220 /* IMP_CTRL1 */
-		    0x58 0x224 /* IMP_CTRL2 */
-		    0x77 0x240 /* TUNE1 */
+		    0x00 0x220 /* IMP_CTRL1 */
+		    0x1a 0x224 /* IMP_CTRL2 */
+		    0x47 0x240 /* TUNE1 */
 		    0x29 0x244 /* TUNE2 */
 		    0xca 0x248 /* TUNE3 */
 		    0x04 0x24c /* TUNE4 */
@@ -307,6 +307,12 @@
 	 qcom,dsi-display-active;
 };
 
+&dsi_panel_pwr_supply {
+	qcom,panel-supply-entry@2 {
+		qcom,supply-post-off-sleep = <5>;
+	};
+};
+
 &pm660l_wled {
 	status = "okay";
 	qcom,led-strings-list = [00 01];
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
index 225a6e6..6c143e4 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -32,7 +32,7 @@
 	 * instances only.
 	 */
 	qupv3_se6_4uart: qcom,qup_uart@0x898000 {
-		compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-serial-hs";
 		reg = <0x898000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
@@ -52,7 +52,7 @@
 	};
 
 	qupv3_se7_4uart: qcom,qup_uart@0x89c000 {
-		compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-serial-hs";
 		reg = <0x89c000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
@@ -425,7 +425,7 @@
 
 	/* Debug UART Instance for CDP/MTP platform */
 	qupv3_se9_2uart: qcom,qup_uart@0xa84000 {
-		compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-console";
 		reg = <0xa84000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
@@ -442,7 +442,7 @@
 
 	/* Debug UART Instance for RUMI platform */
 	qupv3_se10_2uart: qcom,qup_uart@0xa88000 {
-		compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-console";
 		reg = <0xa88000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
@@ -459,7 +459,7 @@
 
 	/* Debug UART Instance for CDP/MTP platform on SDM670 */
 	qupv3_se12_2uart: qcom,qup_uart@0xa90000 {
-		compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-console";
 		reg = <0xa90000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
index 9898ada..6b24593 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017,2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -158,9 +158,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa1";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		proxy-supply = <&pm660_l1>;
 		pm660_l1: regulator-pm660-l1 {
@@ -171,7 +172,7 @@
 			qcom,proxy-consumer-enable;
 			qcom,proxy-consumer-current = <43600>;
 			qcom,init-voltage = <1200000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -179,9 +180,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa2";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l2: regulator-pm660-l2 {
 			regulator-name = "pm660_l2";
@@ -189,7 +191,7 @@
 			regulator-min-microvolt = <1000000>;
 			regulator-max-microvolt = <1000000>;
 			qcom,init-voltage = <1000000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -197,9 +199,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa3";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l3: regulator-pm660-l3 {
 			regulator-name = "pm660_l3";
@@ -207,7 +210,7 @@
 			regulator-min-microvolt = <1000000>;
 			regulator-max-microvolt = <1000000>;
 			qcom,init-voltage = <1000000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -215,9 +218,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa5";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l5: regulator-pm660-l5 {
 			regulator-name = "pm660_l5";
@@ -225,7 +229,7 @@
 			regulator-min-microvolt = <800000>;
 			regulator-max-microvolt = <800000>;
 			qcom,init-voltage = <800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -233,17 +237,18 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa6";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
-		qcom,mode-threshold-currents = <0 1>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm660_l6: regulator-pm660-l6 {
 			regulator-name = "pm660_l6";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1248000>;
 			regulator-max-microvolt = <1304000>;
 			qcom,init-voltage = <1248000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -251,9 +256,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa7";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l7: regulator-pm660-l7 {
 			regulator-name = "pm660_l7";
@@ -261,7 +267,7 @@
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1200000>;
 			qcom,init-voltage = <1200000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -269,9 +275,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa8";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l8: regulator-pm660-l8 {
 			regulator-name = "pm660_l8";
@@ -279,7 +286,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -287,17 +294,18 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa9";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
-		qcom,mode-threshold-currents = <0 1>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm660_l9: regulator-pm660-l9 {
 			regulator-name = "pm660_l9";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -305,9 +313,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa10";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l10: regulator-pm660-l10 {
 			regulator-name = "pm660_l10";
@@ -315,7 +324,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -323,9 +332,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa11";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		proxy-supply = <&pm660_l11>;
 		pm660_l11: regulator-pm660-l11 {
@@ -336,7 +346,7 @@
 			qcom,proxy-consumer-enable;
 			qcom,proxy-consumer-current = <115000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -344,9 +354,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa12";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l12: regulator-pm660-l12 {
 			regulator-name = "pm660_l12";
@@ -354,7 +365,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -362,9 +373,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa13";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l13: regulator-pm660-l13 {
 			regulator-name = "pm660_l13";
@@ -372,7 +384,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -380,9 +392,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa14";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l14: regulator-pm660-l14 {
 			regulator-name = "pm660_l14";
@@ -390,7 +403,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -398,9 +411,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa15";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l15: regulator-pm660-l15 {
 			regulator-name = "pm660_l15";
@@ -408,7 +422,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <2950000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -416,9 +430,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa16";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l16: regulator-pm660-l16 {
 		regulator-name = "pm660_l16";
@@ -426,7 +441,7 @@
 			regulator-min-microvolt = <2700000>;
 			regulator-max-microvolt = <2700000>;
 			qcom,init-voltage = <2700000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -434,9 +449,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa17";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l17: regulator-pm660-l17 {
 			regulator-name = "pm660_l17";
@@ -444,7 +460,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <2950000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -452,17 +468,18 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa19";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
-		qcom,mode-threshold-currents = <0 1>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm660_l19: regulator-pm660-l19 {
 			regulator-name = "pm660_l19";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <3000000>;
 			regulator-max-microvolt = <3312000>;
 			qcom,init-voltage = <3000000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -470,9 +487,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldob1";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		proxy-supply = <&pm660l_l1>;
 		pm660l_l1: regulator-pm660l-l1 {
@@ -483,7 +501,7 @@
 			qcom,proxy-consumer-enable;
 			qcom,proxy-consumer-current = <72000>;
 			qcom,init-voltage = <880000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -491,9 +509,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldob2";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660l_l2: regulator-pm660l-l2 {
 			regulator-name = "pm660l_l2";
@@ -501,7 +520,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -509,9 +528,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldob3";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660l_l3: regulator-pm660l-l3 {
 			regulator-name = "pm660l_l3";
@@ -519,7 +539,7 @@
 			regulator-min-microvolt = <2850000>;
 			regulator-max-microvolt = <3008000>;
 			qcom,init-voltage = <2850000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -527,9 +547,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldob4";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660l_l4: regulator-pm660l-l4 {
 			regulator-name = "pm660l_l4";
@@ -537,7 +558,7 @@
 			regulator-min-microvolt = <2960000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <2960000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -545,9 +566,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldob5";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660l_l5: regulator-pm660l-l5 {
 			regulator-name = "pm660l_l5";
@@ -555,7 +577,7 @@
 			regulator-min-microvolt = <2960000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <2960000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -563,9 +585,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldob6";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660l_l6: regulator-pm660l-l6 {
 			regulator-name = "pm660l_l6";
@@ -573,7 +596,7 @@
 			regulator-min-microvolt = <3008000>;
 			regulator-max-microvolt = <3300000>;
 			qcom,init-voltage = <3008000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -581,9 +604,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldob7";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660l_l7: regulator-pm660l-l7 {
 			regulator-name = "pm660l_l7";
@@ -591,7 +615,7 @@
 			regulator-min-microvolt = <3088000>;
 			regulator-max-microvolt = <3100000>;
 			qcom,init-voltage = <3088000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -599,9 +623,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldob8";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660l_l8: regulator-pm660l-l8 {
 			regulator-name = "pm660l_l8";
@@ -609,7 +634,7 @@
 			regulator-min-microvolt = <3300000>;
 			regulator-max-microvolt = <3312000>;
 			qcom,init-voltage = <3300000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
index 7e426cf..007f937 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -26,6 +26,7 @@
 #include "dsi-panel-nt35695b-truly-fhd-cmd.dtsi"
 #include "dsi-panel-rm67195-amoled-fhd-cmd.dtsi"
 #include "dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi"
+#include "dsi-panel-hx8399-truly-singlemipi-fhd-video.dtsi"
 #include <dt-bindings/clock/mdss-10nm-pll-clk.h>
 
 &soc {
@@ -465,6 +466,29 @@
 		ibb-supply = <&lcdb_ncp_vreg>;
 	};
 
+	dsi_hx8399_truly_cmd_display: qcom,dsi-display@16 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_hx8399_truly_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0>;
+		qcom,dsi-phy = <&mdss_dsi_phy0>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 75 0>;
+
+		qcom,dsi-panel = <&dsi_hx8399_truly_cmd>;
+		vddio-supply = <&pm660_l11>;
+		lab-supply = <&lcdb_ldo_vreg>;
+		ibb-supply = <&lcdb_ncp_vreg>;
+	};
+
 	sde_wb: qcom,wb-display@0 {
 		compatible = "qcom,wb-display";
 		cell-index = <0>;
@@ -787,13 +811,14 @@
 };
 
 &dsi_nt35695b_truly_fhd_cmd {
-	qcom,mdss-dsi-t-clk-post = <0x07>;
-	qcom,mdss-dsi-t-clk-pre = <0x1c>;
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
 	qcom,ulps-enabled;
+	qcom,mdss-mdp-transfer-time-us = <14500>;
 	qcom,mdss-dsi-display-timings {
 		timing@0 {
-			qcom,mdss-dsi-panel-phy-timings = [00 1c 05 06 0b 0c
-				05 07 05 03 04 00];
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22
+				07 07 05 03 04 00];
 			qcom,display-topology = <1 0 1>;
 			qcom,default-topology-index = <0>;
 		};
@@ -801,8 +826,15 @@
 };
 
 &dsi_dual_nt36850_truly_cmd {
-	qcom,mdss-dsi-t-clk-post = <0x0E>;
+	qcom,mdss-dsi-t-clk-post = <0x28>;
 	qcom,mdss-dsi-t-clk-pre = <0x30>;
+	qcom,esd-check-enabled;
+	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+	qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+	qcom,mdss-dsi-panel-status-value = <0x9c>;
+	qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+	qcom,mdss-dsi-panel-status-read-length = <1>;
 	qcom,mdss-dsi-display-timings {
 		timing@0{
 			qcom,mdss-dsi-panel-phy-timings = [00 1f 08 08 24 23 08
@@ -813,3 +845,16 @@
 		};
 	};
 };
+
+&dsi_hx8399_truly_cmd {
+	qcom,mdss-dsi-t-clk-post = <0x0E>;
+	qcom,mdss-dsi-t-clk-pre = <0x30>;
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1f 08 08 24 22 08
+				08 05 03 04 00];
+			qcom,display-topology = <1 0 1>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
index a918687..7c4e682 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -397,6 +397,9 @@
 		qcom,mdss-inline-rot-danger-lut = <0x0055aaff 0x0000ffff>;
 		qcom,mdss-inline-rot-safe-lut = <0x0000f000 0x0000ff00>;
 
+		qcom,mdss-rot-qos-cpu-mask = <0xf>;
+		qcom,mdss-rot-qos-cpu-dma-latency = <75>;
+
 		qcom,mdss-default-ot-rd-limit = <32>;
 		qcom,mdss-default-ot-wr-limit = <32>;
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670-tasha-codec-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-tasha-codec-audio-overlay.dtsi
new file mode 100644
index 0000000..80d3879
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-tasha-codec-audio-overlay.dtsi
@@ -0,0 +1,80 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "sdm670-audio-overlay.dtsi"
+
+&pmic_analog_codec {
+	status = "disabled";
+};
+
+&msm_sdw_codec {
+	status = "disabled";
+};
+
+&cdc_pdm_gpios {
+	status = "disabled";
+};
+
+&cdc_comp_gpios {
+	status = "disabled";
+};
+
+&cdc_dmic_gpios {
+	status = "disabled";
+};
+
+&cdc_sdw_gpios {
+	status = "disabled";
+};
+
+&wsa_spkr_en1 {
+	status = "disabled";
+};
+
+&wsa_spkr_en2 {
+	status = "disabled";
+};
+
+&qupv3_se8_spi {
+	status = "disabled";
+};
+
+&wcd9xxx_intc {
+	status = "okay";
+};
+
+&slim_aud {
+	status = "okay";
+};
+
+&dai_slim {
+	status = "okay";
+};
+
+&wcd9335 {
+	status = "okay";
+};
+
+&clock_audio {
+	status = "okay";
+};
+
+&clock_audio_native {
+	status = "okay";
+};
+
+&wcd_rst_gpio {
+	status = "okay";
+};
+
+&wcd9xxx_intc {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-tasha-codec-cdp-overlay.dts
similarity index 62%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
copy to arch/arm64/boot/dts/qcom/sdm670-tasha-codec-cdp-overlay.dts
index e1ec364..af8244a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-tasha-codec-cdp-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,14 +19,15 @@
 #include <dt-bindings/clock/qcom,rpmh.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
-#include "sdm845-sde-display.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-qvr-audio-overlay.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm670-cdp.dtsi"
+#include "sdm670-tasha-codec.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,msm-id = <321 0x20000>;
-	qcom,board-id = <0x01000B 0x20>;
+	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L Tasha Codec CDP";
+	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
+	qcom,msm-id = <336 0x0>;
+	qcom,board-id = <1 5>;
+	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+		       <0x0001001b 0x0102001a 0x0 0x0>,
+		       <0x0001001b 0x0201011a 0x0 0x0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-tasha-codec-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-tasha-codec-cdp.dts
new file mode 100644
index 0000000..55d2fc2
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-tasha-codec-cdp.dts
@@ -0,0 +1,27 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm670.dtsi"
+#include "sdm670-cdp.dtsi"
+#include "sdm670-tasha-codec.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L Tasha Codec CDP";
+	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
+	qcom,board-id = <1 5>;
+	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+		       <0x0001001b 0x0102001a 0x0 0x0>,
+		       <0x0001001b 0x0201011a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-tasha-codec.dtsi b/arch/arm64/boot/dts/qcom/sdm670-tasha-codec.dtsi
new file mode 100644
index 0000000..1fc0fd5
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-tasha-codec.dtsi
@@ -0,0 +1,33 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm670-tasha-codec-audio-overlay.dtsi"
+
+&int_codec {
+	status = "disabled";
+};
+
+&pm660_div_clk {
+	status = "okay";
+};
+
+&tasha_snd {
+	status = "okay";
+};
+
+&slim_aud {
+	status = "okay";
+};
+
+&dai_slim {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-thermal.dtsi b/arch/arm64/boot/dts/qcom/sdm670-thermal.dtsi
index 8cbc84f..a0fa9cf 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-thermal.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-thermal.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -471,7 +471,7 @@
 			};
 			gpu_vdd_cdev {
 				trip = <&aoss0_trip>;
-				cooling-device = <&msm_gpu 4 4>;
+				cooling-device = <&msm_gpu 0 0>;
 			};
 			cx_vdd_cdev {
 				trip = <&aoss0_trip>;
@@ -496,606 +496,6 @@
 		};
 	};
 
-	cpu0-silver-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens0 1>;
-		tracks-low;
-		trips {
-			cpu0_trip: cpu0-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&cpu0_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&cpu0_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&cpu0_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&cpu0_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&cpu0_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&cpu0_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&cpu0_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&cpu0_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	cpu1-silver-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens0 2>;
-		tracks-low;
-		trips {
-			cpu1_trip: cpu1-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&cpu1_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&cpu1_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&cpu1_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&cpu1_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&cpu1_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&cpu1_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&cpu1_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&cpu1_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	cpu2-silver-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens0 3>;
-		tracks-low;
-		trips {
-			cpu2_trip: cpu2-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&cpu2_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&cpu2_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&cpu2_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&cpu2_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&cpu2_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&cpu2_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&cpu2_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&cpu2_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	cpu3-silver-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens0 4>;
-		tracks-low;
-		trips {
-			cpu3_trip: cpu3-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&cpu3_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&cpu3_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&cpu3_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&cpu3_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&cpu3_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&cpu3_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&cpu3_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&cpu3_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	cpuss-0-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens0 5>;
-		tracks-low;
-		trips {
-			l3_0_trip: l3-0-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&l3_0_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&l3_0_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&l3_0_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&l3_0_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&l3_0_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&l3_0_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&l3_0_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&l3_0_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	cpuss-1-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens0 6>;
-		tracks-low;
-		trips {
-			l3_1_trip: l3-1-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&l3_1_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&l3_1_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&l3_1_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&l3_1_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&l3_1_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&l3_1_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&l3_1_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&l3_1_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	cpu4-silver-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens0 7>;
-		tracks-low;
-		trips {
-			cpu4_trip: cpu4-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&cpu4_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&cpu4_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&cpu4_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&cpu4_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&cpu4_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&cpu4_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&cpu4_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&cpu4_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	cpu5-silver-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens0 8>;
-		tracks-low;
-		trips {
-			cpu5_trip: cpu5-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&cpu5_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&cpu5_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&cpu5_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&cpu5_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&cpu5_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&cpu5_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&cpu5_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&cpu5_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	cpu0-gold-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens0 9>;
-		tracks-low;
-		trips {
-			cpug0_trip: cpug0-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&cpug0_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&cpug0_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&cpug0_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&cpug0_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&cpug0_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&cpug0_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&cpug0_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&cpug0_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	cpu1-gold-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens0 10>;
-		tracks-low;
-		trips {
-			cpug1_trip: cpug1-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&cpug1_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&cpug1_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&cpug1_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&cpug1_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&cpug1_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&cpug1_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&cpug1_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&cpug1_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	gpu0-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens0 11>;
-		tracks-low;
-		trips {
-			gpu0_trip_l: gpu0-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&gpu0_trip_l>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&gpu0_trip_l>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&gpu0_trip_l>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&gpu0_trip_l>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&gpu0_trip_l>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&gpu0_trip_l>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&gpu0_trip_l>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&gpu0_trip_l>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	gpu1-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens0 12>;
-		tracks-low;
-		trips {
-			gpu1_trip_l: gpu1-trip_l {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&gpu1_trip_l>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&gpu1_trip_l>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&gpu1_trip_l>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&gpu1_trip_l>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&gpu1_trip_l>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&gpu1_trip_l>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&gpu1_trip_l>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&gpu1_trip_l>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
 	aoss1-lowf {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
@@ -1121,7 +521,7 @@
 			};
 			gpu_vdd_cdev {
 				trip = <&aoss1_trip>;
-				cooling-device = <&msm_gpu 4 4>;
+				cooling-device = <&msm_gpu 0 0>;
 			};
 			cx_vdd_cdev {
 				trip = <&aoss1_trip>;
@@ -1146,356 +546,6 @@
 		};
 	};
 
-	mdm-dsp-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens1 1>;
-		tracks-low;
-		trips {
-			dsp_trip: dsp-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&dsp_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&dsp_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&dsp_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&dsp_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&dsp_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&dsp_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&dsp_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&dsp_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	ddr-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens1 2>;
-		tracks-low;
-		trips {
-			ddr_trip: ddr-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&ddr_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&ddr_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&ddr_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&ddr_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&ddr_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&ddr_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&ddr_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&ddr_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	wlan-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens1 3>;
-		tracks-low;
-		trips {
-			wlan_trip: wlan-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&wlan_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&wlan_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&wlan_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&wlan_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&wlan_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&wlan_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&wlan_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&wlan_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	compute-hvx-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens1 4>;
-		tracks-low;
-		trips {
-			hvx_trip: hvx-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&hvx_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&hvx_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&hvx_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&hvx_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&hvx_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&hvx_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&hvx_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&hvx_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	camera-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens1 5>;
-		tracks-low;
-		trips {
-			camera_trip: camera-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&camera_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&camera_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&camera_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&camera_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&camera_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&camera_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&camera_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&camera_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	mmss-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens1 6>;
-		tracks-low;
-		trips {
-			mmss_trip: mmss-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&mmss_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&mmss_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&mmss_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&mmss_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&mmss_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&mmss_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&mmss_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&mmss_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	mdm-core-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens1 7>;
-		tracks-low;
-		trips {
-			mdm_trip: mdm-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&mdm_trip>;
-				cooling-device = <&CPU0 2 2>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&mdm_trip>;
-				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
-							(THERMAL_MAX_LIMIT-8)>;
-			};
-			gpu_vdd_cdev {
-				trip = <&mdm_trip>;
-				cooling-device = <&msm_gpu 4 4>;
-			};
-			cx_vdd_cdev {
-				trip = <&mdm_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&mdm_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&mdm_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&mdm_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&mdm_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
 	lmh-dcvs-01 {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi
index 84c7459..3df6d09 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -27,8 +27,7 @@
 };
 
 &usb0 {
-	/delete-property/ iommus;
-	/delete-property/ qcom,smmu-s1-bypass;
+	qcom,pm-qos-latency = <601>; /* CPU-CLUSTER-WFI-LVL latency +1 */
 	extcon = <0>, <0>, <&eud>, <0>, <0>;
 };
 
@@ -36,6 +35,28 @@
 	vdd-supply = <&pm660l_l1>;
 	vdda18-supply = <&pm660_l10>;
 	vdda33-supply = <&pm660l_l7>;
+	qcom,qusb-phy-init-seq =
+			/* <value reg_offset> */
+			   <0x23 0x210 /* PWR_CTRL1 */
+			    0x03 0x04  /* PLL_ANALOG_CONTROLS_TWO */
+			    0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+			    0x80 0x2c  /* PLL_CMODE */
+			    0x0a 0x184 /* PLL_LOCK_DELAY */
+			    0x19 0xb4  /* PLL_DIGITAL_TIMERS_TWO */
+			    0x40 0x194 /* PLL_BIAS_CONTROL_1 */
+			    0x20 0x198 /* PLL_BIAS_CONTROL_2 */
+			    0x21 0x214 /* PWR_CTRL2 */
+			    0x08 0x220 /* IMP_CTRL1 */
+			    0x58 0x224 /* IMP_CTRL2 */
+			    0x45 0x240 /* TUNE1 */
+			    0x29 0x244 /* TUNE2 */
+			    0xca 0x248 /* TUNE3 */
+			    0x04 0x24c /* TUNE4 */
+			    0x03 0x250 /* TUNE5 */
+			    0x00 0x23c /* CHG_CTRL2 */
+			    0x22 0x210>; /* PWR_CTRL1 */
+	nvmem-cells = <&minor_rev>;
+	nvmem-cell-names = "minor_rev";
 };
 
 &usb_qmp_dp_phy {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-wcd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-wcd.dtsi
index f8d2a04..d7120d0 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-wcd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-wcd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -164,4 +164,20 @@
 			pinctrl-1 = <&hph_en1_wcd_sleep>;
 		};
 	};
+
+	tasha_codec {
+		wsa_spkr_sd1: msm_cdc_pinctrll {
+			compatible = "qcom,msm-cdc-pinctrl";
+			pinctrl-names = "aud_active", "aud_sleep";
+			pinctrl-0 = <&tasha_spkr_1_sd_n_active>;
+			pinctrl-1 = <&tasha_spkr_1_sd_n_sleep>;
+		};
+
+		wsa_spkr_sd2: msm_cdc_pinctrlr {
+			compatible = "qcom,msm-cdc-pinctrl";
+			pinctrl-names = "aud_active", "aud_sleep";
+			pinctrl-0 = <&tasha_spkr_2_sd_n_active>;
+			pinctrl-1 = <&tasha_spkr_2_sd_n_sleep>;
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-wsa881x.dtsi b/arch/arm64/boot/dts/qcom/sdm670-wsa881x.dtsi
index c35850d..5dfe244 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-wsa881x.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-wsa881x.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -42,4 +42,36 @@
 			};
 		};
 	};
+
+	tasha_codec {
+		swr_master {
+			compatible = "qcom,swr-wcd";
+			#address-cells = <2>;
+			#size-cells = <0>;
+
+			wsa881x_211: wsa881x@20170211 {
+				compatible = "qcom,wsa881x";
+				reg = <0x0 0x20170211>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_sd1>;
+			};
+
+			wsa881x_212: wsa881x@20170212 {
+				compatible = "qcom,wsa881x";
+				reg = <0x0 0x20170212>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_sd2>;
+			};
+
+			wsa881x_213: wsa881x@21170213 {
+				compatible = "qcom,wsa881x";
+				reg = <0x0 0x21170213>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_sd1>;
+			};
+
+			wsa881x_214: wsa881x@21170214 {
+				compatible = "qcom,wsa881x";
+				reg = <0x0 0x21170214>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_sd2>;
+			};
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 4d38f954..3cc0aa3 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -529,6 +529,30 @@
 			reg = <0 0x93e00000 0 0x1e00000>;
 		};
 
+		pil_ipa_fw_mem: ips_fw_region@0x95c00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x95c00000 0 0x10000>;
+		};
+
+		pil_ipa_gsi_mem: ipa_gsi_region@0x95c10000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x95c10000 0 0x5000>;
+		};
+
+		pil_gpu_mem: gpu_region@0x95c15000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x95c15000 0 0x2000>;
+		};
+
+		qseecom_mem: qseecom_region@0x9e400000 {
+			compatible = "shared-dma-pool";
+			no-map;
+			reg = <0 0x9e400000 0 0x1400000>;
+		};
+
 		adsp_mem: adsp_region {
 			compatible = "shared-dma-pool";
 			alloc-ranges = <0 0x00000000 0 0xffffffff>;
@@ -537,12 +561,12 @@
 			size = <0 0xc00000>;
 		};
 
-		qseecom_mem: qseecom_region {
+		qseecom_ta_mem: qseecom_ta_region {
 			compatible = "shared-dma-pool";
 			alloc-ranges = <0 0x00000000 0 0xffffffff>;
-			no-map;
+			reusable;
 			alignment = <0 0x400000>;
-			size = <0 0x1400000>;
+			size = <0 0x1000000>;
 		};
 
 		sp_mem: sp_region {  /* SPSS-HLOS ION shared mem */
@@ -745,6 +769,7 @@
 			compatible = "qcom,memshare-peripheral";
 			qcom,peripheral-size = <0x500000>;
 			qcom,client-id = <1>;
+			qcom,allocate-boot-time;
 			label = "modem";
 		};
 	};
@@ -1073,7 +1098,7 @@
 		vdd_pwrcl_mx_ao-supply = <&pm660l_s1_level_ao>;
 
 		qcom,mx-turbo-freq = <1440000000 1708000000 3300000001>;
-		l3-devs = <&l3_cpu0 &l3_cpu6>;
+		l3-devs = <&l3_cpu0 &l3_cpu6 &l3_cdsp>;
 
 		clock-names = "xo_ao";
 		clocks = <&clock_rpmh RPMH_CXO_CLK_A>;
@@ -1772,6 +1797,29 @@
 		cell-index = <0>;
 	};
 
+	ufs_ice: ufsice@1d90000 {
+		compatible = "qcom,ice";
+		reg = <0x1d90000 0x8000>;
+		qcom,enable-ice-clk;
+		clock-names = "ufs_core_clk", "bus_clk",
+				"iface_clk", "ice_core_clk";
+		clocks = <&clock_gcc GCC_UFS_PHY_AXI_CLK>,
+			 <&clock_gcc GCC_UFS_MEM_CLKREF_CLK>,
+			 <&clock_gcc GCC_UFS_PHY_AHB_CLK>,
+			 <&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>;
+		qcom,op-freq-hz = <0>, <0>, <0>, <300000000>;
+		vdd-hba-supply = <&ufs_phy_gdsc>;
+		qcom,msm-bus,name = "ufs_ice_noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<1 650 0 0>,    /* No vote */
+				<1 650 1000 0>; /* Max. bandwidth */
+		qcom,bus-vector-names = "MIN",
+					"MAX";
+		qcom,instance-type = "ufs";
+	};
+
 	ufsphy_mem: ufsphy_mem@1d87000 {
 		reg = <0x1d87000 0xe00>; /* PHY regs */
 		reg-names = "phy_mem";
@@ -1795,6 +1843,7 @@
 		interrupts = <0 265 0>;
 		phys = <&ufsphy_mem>;
 		phy-names = "ufsphy";
+		ufs-qcom-crypto = <&ufs_ice>;
 
 		lanes-per-direction = <1>;
 		dev-ref-clk-freq = <0>; /* 19.2 MHz */
@@ -1947,7 +1996,7 @@
 		qcom,arm-smmu;
 		qcom,bandwidth-vote-for-ipa;
 		qcom,msm-bus,name = "ipa";
-		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-cases = <5>;
 		qcom,msm-bus,num-paths = <4>;
 		qcom,msm-bus,vectors-KBps =
 		/* No vote */
@@ -1955,22 +2004,28 @@
 			<90 585 0 0>,
 			<1 676 0 0>,
 			<143 777 0 0>,
+		/* SVS2 */
+			<90 512 80000 600000>,
+			<90 585 80000 350000>,
+			<1 676 40000 40000>, /*gcc_config_noc_clk_src */
+			<143 777 0 75>, /* IB defined for IPA2X_clk in MHz*/
 		/* SVS */
 			<90 512 80000 640000>,
 			<90 585 80000 640000>,
 			<1 676 80000 80000>,
-			<143 777 0 150>, /* IB defined for IPA clk in MHz*/
+			<143 777 0 150>, /* IB defined for IPA2X_clk in MHz*/
 		/* NOMINAL */
 			<90 512 206000 960000>,
 			<90 585 206000 960000>,
 			<1 676 206000 160000>,
-			<143 777 0 300>, /* IB defined for IPA clk in MHz*/
+			<143 777 0 300>, /* IB defined for IPA2X_clk in MHz*/
 		/* TURBO */
 			<90 512 206000 3600000>,
 			<90 585 206000 3600000>,
 			<1 676 206000 300000>,
 			<143 777 0 355>; /* IB defined for IPA clk in MHz*/
-		qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO";
+		qcom,bus-vector-names =
+			"MIN", "SVS2", "SVS", "NOMINAL", "TURBO";
 
 		/* IPA RAM mmap */
 		qcom,ipa-ram-mmap = <
@@ -2084,6 +2139,7 @@
 		compatible = "qcom,pil-tz-generic";
 		qcom,pas-id = <0xf>;
 		qcom,firmware-name = "ipa_fws";
+		memory-region = <&pil_ipa_fw_mem>;
 	};
 
 	pil_modem: qcom,mss@4080000 {
@@ -2126,6 +2182,7 @@
 		qcom,firmware-name = "modem";
 		qcom,pil-self-auth;
 		qcom,sysmon-id = <0>;
+		qcom,minidump-id = <3>;
 		qcom,ssctl-instance-id = <0x12>;
 		qcom,override-acc;
 		qcom,signal-aop;
@@ -2309,6 +2366,8 @@
 		qcom,ddr-config = <0xC3040873>;
 
 		qcom,nonremovable;
+		nvmem-cells = <&minor_rev>;
+		nvmem-cell-names = "minor_rev";
 
 		status = "disabled";
 	};
@@ -2382,11 +2441,13 @@
 	qcom,msm-adsprpc-mem {
 		compatible = "qcom,msm-adsprpc-mem-region";
 		memory-region = <&adsp_mem>;
+		restrict-access;
 	};
 
 	qcom,msm_fastrpc {
 		compatible = "qcom,msm-fastrpc-compute";
-		qcom,adsp-remoteheap-vmid = <37>;
+		qcom,adsp-remoteheap-vmid = <22 37>;
+		qcom,fastrpc-adsp-audio-pdr;
 
 		qcom,msm_fastrpc_compute_cb1 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
@@ -2537,8 +2598,6 @@
 		qcom,count-unit = <0x10000>;
 		qcom,hw-timer-hz = <19200000>;
 		qcom,target-dev = <&cpubw>;
-		qcom,byte-mid-mask = <0xe000>;
-		qcom,byte-mid-match = <0xe000>;
 	};
 
 	memlat_cpu0: qcom,memlat-cpu0 {
@@ -2637,10 +2696,9 @@
 		qcom,cachemiss-ev = <0x17>;
 		qcom,core-dev-table =
 			<  576000  300000000 >,
-			<  748800  556800000 >,
-			<  998400  806400000 >,
-			< 1209660  940800000 >,
-			< 1516800 1190400000 >,
+			<  998400  556800000 >,
+			< 1209660  844800000 >,
+			< 1516800  940800000 >,
 			< 1612800 1382400000 >,
 			< 1708000 1440000000 >;
 	};
@@ -2755,6 +2813,13 @@
 				< 2457600 MHZ_TO_MBPS(1804, 4) >;
 	};
 
+	l3_cdsp: qcom,l3-cdsp {
+		compatible = "devfreq-simple-dev";
+		clock-names = "devfreq_clk";
+		clocks = <&clock_cpucc L3_MISC_VOTE_CLK>;
+		governor = "powersave";
+	};
+
 	cpu_pmu: cpu-pmu {
 		compatible = "arm,armv8-pmuv3";
 		qcom,irq-is-percpu;
@@ -2770,6 +2835,20 @@
 		compatible = "syscon";
 		reg = <0x5091008 0x4>;
 	};
+
+	qfprom: qfprom@0x780000 {
+		compatible	= "qcom,qfprom";
+		reg		= <0x00780000 0x1000>;
+		#address-cells	= <1>;
+		#size-cells	= <1>;
+		ranges;
+
+		minor_rev: minor_rev@0x78014c {
+			reg = <0x14c 0x4>;
+			bits = <0x1c 0x2>;
+		};
+	};
+
 };
 
 #include "pm660.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index a3a48af..d708a12 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -91,16 +91,26 @@
 		pinctrl-0 = <&camera_dvdd_en_default>;
 		vin-supply = <&pm8998_s3>;
 	};
+
+	camera_vana_ldo: gpio-regulator@4 {
+		compatible = "regulator-fixed";
+		reg = <0x04 0x00>;
+		regulator-name = "camera_vana_ldo";
+		regulator-min-microvolt = <2850000>;
+		regulator-max-microvolt = <2850000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&tlmm 8 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&cam_sensor_rear_vana>;
+		vin-supply = <&pmi8998_bob>;
+	};
 };
 
 &cam_cci {
 	qcom,cam-res-mgr {
 		compatible = "qcom,cam-res-mgr";
 		status = "ok";
-		shared-gpios = <8>;
-		pinctrl-names = "cam_res_mgr_default", "cam_res_mgr_suspend";
-		pinctrl-0 = <&cam_res_mgr_active>;
-		pinctrl-1 = <&cam_res_mgr_suspend>;
 	};
 
 	actuator_rear: qcom,actuator@0 {
@@ -339,13 +349,13 @@
 		eeprom-src = <&eeprom_rear_aux>;
 		cam_vdig-supply = <&camera_ldo>;
 		cam_vio-supply = <&pm8998_lvs1>;
-		cam_vana-supply = <&pmi8998_bob>;
+		cam_vana-supply = <&camera_vana_ldo>;
 		cam_clk-supply = <&titan_top_gdsc>;
 		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
 			"cam_clk";
 		rgltr-cntrl-support;
-		rgltr-min-voltage = <1050000 0 3312000 0>;
-		rgltr-max-voltage = <1050000 0 3600000 0>;
+		rgltr-min-voltage = <1050000 0 2850000 0>;
+		rgltr-max-voltage = <1050000 0 2850000 0>;
 		rgltr-load-current = <105000 0 80000 0>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
@@ -354,15 +364,12 @@
 		pinctrl-1 = <&cam_sensor_mclk2_suspend
 				&cam_sensor_rear2_suspend>;
 		gpios = <&tlmm 15 0>,
-			<&tlmm 9 0>,
-			<&tlmm 8 0>;
+			<&tlmm 9 0>;
 		gpio-reset = <1>;
-		gpio-vana = <2>;
-		gpio-req-tbl-num = <0 1 2>;
-		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
 		gpio-req-tbl-label = "CAMIF_MCLK1",
-					"CAM_RESET1",
-					"CAM_VANA1";
+					"CAM_RESET1";
 		sensor-mode = <0>;
 		cci-master = <1>;
 		status = "ok";
@@ -384,14 +391,14 @@
 		actuator-src = <&actuator_front>;
 		led-flash-src = <&led_flash_front>;
 		cam_vio-supply = <&pm8998_lvs1>;
-		cam_vana-supply = <&pmi8998_bob>;
+		cam_vana-supply = <&camera_vana_ldo>;
 		cam_vdig-supply = <&camera_ldo>;
 		cam_clk-supply = <&titan_top_gdsc>;
 		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 			"cam_clk";
 		rgltr-cntrl-support;
-		rgltr-min-voltage = <0 3312000 1050000 0>;
-		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-min-voltage = <0 2850000 1050000 0>;
+		rgltr-max-voltage = <0 2850000 1050000 0>;
 		rgltr-load-current = <0 80000 105000 0>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
@@ -400,15 +407,12 @@
 		pinctrl-1 = <&cam_sensor_mclk1_suspend
 				 &cam_sensor_front_suspend>;
 		gpios = <&tlmm 14 0>,
-			<&tlmm 28 0>,
-			<&tlmm 8 0>;
+			<&tlmm 28 0>;
 		gpio-reset = <1>;
-		gpio-vana = <2>;
-		gpio-req-tbl-num = <0 1 2>;
-		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
 		gpio-req-tbl-label = "CAMIF_MCLK2",
-					"CAM_RESET2",
-					"CAM_VANA1";
+					"CAM_RESET2";
 		sensor-mode = <0>;
 		cci-master = <1>;
 		status = "ok";
@@ -428,14 +432,14 @@
 		sensor-position-yaw = <0>;
 		led-flash-src = <&led_flash_iris>;
 		cam_vio-supply = <&pm8998_lvs1>;
-		cam_vana-supply = <&pmi8998_bob>;
+		cam_vana-supply = <&camera_vana_ldo>;
 		cam_vdig-supply = <&camera_ldo>;
 		cam_clk-supply = <&titan_top_gdsc>;
 		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 			"cam_clk";
 		rgltr-cntrl-support;
-		rgltr-min-voltage = <0 3312000 1050000 0>;
-		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-min-voltage = <0 2850000 1050000 0>;
+		rgltr-max-voltage = <0 2850000 1050000 0>;
 		rgltr-load-current = <0 80000 105000 0>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
@@ -444,15 +448,12 @@
 		pinctrl-1 = <&cam_sensor_mclk3_suspend
 				 &cam_sensor_iris_suspend>;
 		gpios = <&tlmm 16 0>,
-			<&tlmm 9 0>,
-			<&tlmm 8 0>;
+			<&tlmm 9 0>;
 		gpio-reset = <1>;
-		gpio-vana = <2>;
-		gpio-req-tbl-num = <0 1 2>;
-		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
 		gpio-req-tbl-label = "CAMIF_MCLK3",
-					"CAM_RESET3",
-					"CAM_VANA1";
+					"CAM_RESET3";
 		sensor-mode = <0>;
 		cci-master = <1>;
 		status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index dffb5e0..5a88dc2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -488,8 +488,8 @@
 
 		trips {
 			active-config0 {
-				temperature = <65000>;
-				hysteresis = <1000>;
+				temperature = <125000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
@@ -503,8 +503,8 @@
 
 		trips {
 			active-config0 {
-				temperature = <65000>;
-				hysteresis = <1000>;
+				temperature = <125000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
@@ -518,8 +518,8 @@
 
 		trips {
 			active-config0 {
-				temperature = <65000>;
-				hysteresis = <1000>;
+				temperature = <125000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
@@ -533,8 +533,8 @@
 
 		trips {
 			active-config0 {
-				temperature = <65000>;
-				hysteresis = <1000>;
+				temperature = <125000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index fcfab09..a094f65 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,23 @@
 		reg-names = "csr-base";
 
 		coresight-name = "coresight-csr";
+		qcom,usb-bam-support;
+		qcom,hwctrl-set-support;
+		qcom,set-byte-cntr-support;
+
+		qcom,blk-size = <1>;
+	};
+
+	swao_csr: csr@6b0e000 {
+		compatible = "qcom,coresight-csr";
+		reg = <0x6b0e000 0x1000>;
+		reg-names = "csr-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		coresight-name = "coresight-swao-csr";
+		qcom,timestamp-support;
 
 		qcom,blk-size = <1>;
 	};
@@ -113,6 +130,7 @@
 		reg-names = "tmc-base";
 
 		coresight-name = "coresight-tmc-etf-swao";
+		coresight-csr = <&csr>;
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
@@ -166,6 +184,15 @@
 			};
 
 			port@1 {
+				reg = <6>;
+				funnel_swao_in_sensor_etm0: endpoint {
+					slave-mode;
+					remote-endpoint=
+						<&sensor_etm0_out_funnel_swao>;
+				};
+			};
+
+			port@2 {
 				reg = <7>;
 				funnel_swao_in_tpda_swao: endpoint {
 					slave-mode;
@@ -277,6 +304,7 @@
 
 		coresight-name = "coresight-tmc-etr";
 		coresight-ctis = <&cti0 &cti8>;
+		coresight-csr = <&csr>;
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
@@ -301,6 +329,7 @@
 
 		coresight-name = "coresight-tmc-etf";
 		coresight-ctis = <&cti0 &cti8>;
+		coresight-csr = <&csr>;
 		arm,default-sink;
 
 		clocks = <&clock_aop QDSS_CLK>;
@@ -405,6 +434,7 @@
 			    "ddr-ch23-ctrl";
 
 		coresight-name = "coresight-hwevent";
+		coresight-csr = <&csr>;
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
@@ -548,6 +578,8 @@
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
 
+		status = "disabled";
+
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -2047,6 +2079,20 @@
 		};
 	};
 
+	sensor_etm0 {
+		compatible = "qcom,coresight-remote-etm";
+
+		coresight-name = "coresight-sensor-etm0";
+		qcom,inst-id = <8>;
+
+		port {
+			sensor_etm0_out_funnel_swao: endpoint {
+				remote-endpoint =
+					<&funnel_swao_in_sensor_etm0>;
+			};
+		};
+	};
+
 	modem_etm0 {
 		compatible = "qcom,coresight-remote-etm";
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index ee0ad1f..000f5d3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -16,6 +16,7 @@
 		compatible = "qcom,pil-tz-generic";
 		qcom,pas-id = <13>;
 		qcom,firmware-name = "a630_zap";
+		memory-region = <&pil_gpu_mem>;
 	};
 
 	msm_bus: qcom,kgsl-busmon{
@@ -127,6 +128,8 @@
 			#size-cells = <0>;
 			compatible = "qcom,gpu-coresight";
 
+			status = "disabled";
+
 			qcom,gpu-coresight@0 {
 				reg = <0>;
 				coresight-name = "coresight-gfx";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
index c16e1d8..1c7269a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -328,6 +328,13 @@
 		/delete-property/ pinctrl-0;
 	};
 
+	gpio-regulator@4 {
+		/delete-property/ gpio;
+		/delete-property/ vin-supply;
+		/delete-property/ pinctrl-names;
+		/delete-property/ pinctrl-0;
+	};
+
 	/delete-node/ qcom,spmi-debug@6b22000;
 
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 825f121..fc4b674 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -490,8 +490,8 @@
 
 		trips {
 			active-config0 {
-				temperature = <65000>;
-				hysteresis = <1000>;
+				temperature = <125000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
@@ -505,8 +505,8 @@
 
 		trips {
 			active-config0 {
-				temperature = <65000>;
-				hysteresis = <1000>;
+				temperature = <125000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
@@ -520,8 +520,8 @@
 
 		trips {
 			active-config0 {
-				temperature = <65000>;
-				hysteresis = <1000>;
+				temperature = <125000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
@@ -535,8 +535,8 @@
 
 		trips {
 			active-config0 {
-				temperature = <65000>;
-				hysteresis = <1000>;
+				temperature = <125000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
index daf5687..af7feb5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -202,6 +202,8 @@
 
 		qcom,ep-latency = <10>;
 
+		qcom,phy-status-offset = <0x974>;
+
 		qcom,boot-option = <0x1>;
 
 		linux,pci-domain = <0>;
@@ -535,6 +537,8 @@
 
 		qcom,slv-addr-space-size = <0x20000000>;
 
+		qcom,phy-status-offset = <0x1aac>;
+
 		qcom,boot-option = <0x1>;
 
 		linux,pci-domain = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 191e76d..78be790 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -749,6 +749,123 @@
 			};
 		};
 
+		/* add pingrp for touchscreen */
+		pmx_ts_int_active {
+			ts_int_active: ts_int_active {
+				mux {
+					pins = "gpio122";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio122";
+					drive-strength = <8>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		pmx_ts_int_suspend {
+			ts_int_suspend1: ts_int_suspend1 {
+				mux {
+					pins = "gpio122";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio122";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
+		pmx_ts_reset_active {
+			ts_reset_active: ts_reset_active {
+				mux {
+					pins = "gpio99";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio99";
+					drive-strength = <8>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		pmx_ts_reset_suspend {
+			ts_reset_suspend1: ts_reset_suspend1 {
+				mux {
+					pins = "gpio99";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio99";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
+		pmx_ts_release {
+			ts_release: ts_release {
+				mux {
+					pins = "gpio122", "gpio99";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio122", "gpio99";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
+		ts_mux {
+			ts_active: ts_active {
+				mux {
+					pins = "gpio99", "gpio122";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio99", "gpio122";
+					drive-strength = <16>;
+					bias-pull-up;
+				};
+			};
+
+			ts_reset_suspend: ts_reset_suspend {
+				mux {
+					pins = "gpio99";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio99";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+
+			ts_int_suspend: ts_int_suspend {
+				mux {
+					pins = "gpio122";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio122";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
+
 		sec_aux_pcm {
 			sec_aux_pcm_sleep: sec_aux_pcm_sleep {
 				mux {
@@ -3099,6 +3216,20 @@
 			};
 		};
 
+		cam_sensor_rear_vana: cam_sensor_rear_vana {
+			/*  AVDD LDO */
+			mux {
+				pins = "gpio8";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio8";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
 		cam_res_mgr_active: cam_res_mgr_active {
 			/* AVDD_LDO*/
 			mux {
@@ -3200,6 +3331,77 @@
 				bias-pull-down;		/* pull down */
 			};
 		};
+
+		ap2mdm {
+			ap2mdm_active: ap2mdm_active {
+				mux {
+					/* ap2mdm-status
+					 * ap2mdm-errfatal
+					 * ap2mdm-vddmin
+					 */
+					pins = "gpio21", "gpio23";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio21", "gpio23";
+					drive-strength = <16>;
+					bias-disable;
+				};
+			};
+			ap2mdm_sleep: ap2mdm_sleep {
+				mux {
+					/* ap2mdm-status
+					 * ap2mdm-errfatal
+					 * ap2mdm-vddmin
+					 */
+					pins = "gpio21", "gpio23";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio21", "gpio23";
+					drive-strength = <8>;
+					bias-disable;
+				};
+
+			};
+		};
+
+		mdm2ap {
+			mdm2ap_active: mdm2ap_active {
+				mux {
+				/* mdm2ap-status
+				 * mdm2ap-errfatal
+				 * mdm2ap-vddmin
+				 */
+					pins = "gpio22", "gpio20";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio22", "gpio20";
+					drive-strength = <8>;
+					bias-disable;
+				};
+			};
+			mdm2ap_sleep: mdm2ap_sleep {
+				mux {
+					/* mdm2ap-status
+					 * mdm2ap-errfatal
+					 * mdm2ap-vddmin
+					 */
+					pins = "gpio22", "gpio20";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio22", "gpio20";
+					drive-strength = <8>;
+					bias-disable;
+				};
+			};
+		};
 	};
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
index b24ef1d..ee10cfc 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
@@ -139,4 +139,8 @@
 		reg = <0xC300000 0x1000>, <0xC3F0004 0x4>;
 		reg-names = "phys_addr_base", "offset_addr";
 	};
+
+	qcom,rpmh-master-stats {
+		compatible = "qcom,rpmh-master-stats";
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
index 810afde..5fce5ff 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -32,7 +32,7 @@
 	 * instances only.
 	 */
 	qupv3_se6_4uart: qcom,qup_uart@0x898000 {
-		compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-serial-hs";
 		reg = <0x898000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
@@ -50,7 +50,7 @@
 	};
 
 	qupv3_se7_4uart: qcom,qup_uart@0x89c000 {
-		compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-serial-hs";
 		reg = <0x89c000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
@@ -423,7 +423,7 @@
 
 	/* Debug UART Instance for CDP/MTP platform */
 	qupv3_se9_2uart: qcom,qup_uart@0xa84000 {
-		compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-console";
 		reg = <0xa84000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
@@ -440,7 +440,7 @@
 
 	/* Debug UART Instance for RUMI platform */
 	qupv3_se10_2uart: qcom,qup_uart@0xa88000 {
-		compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-console";
 		reg = <0xa88000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
index 0c1f097..a5c6ab5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,12 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
 #include "sdm845-pmic-overlay.dtsi"
 #include "sdm845-pinctrl-overlay.dtsi"
 #include "smb1355.dtsi"
@@ -60,6 +66,30 @@
 	qcom,sw-jeita-enable;
 };
 
+&qupv3_se3_i2c {
+	status = "ok";
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 63 0x00>;
+		qcom,nq-ven = <&tlmm 12 0x00>;
+		qcom,nq-firm = <&tlmm 62 0x00>;
+		qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+		qcom,nq-esepwr = <&tlmm 116 0x00>;
+		interrupt-parent = <&tlmm>;
+		qcom,clk-src = "BBCLK3";
+		interrupts = <63 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active
+			     &nfc_enable_active
+			     &nfc_clk_default>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+		clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
+		clock-names = "ref_clk";
+	};
+};
+
 &qupv3_se10_i2c {
 	status = "ok";
 };
@@ -106,6 +136,29 @@
 	};
 };
 
+&qusb_phy0 {
+		qcom,qusb-phy-init-seq =
+			/* <value reg_offset> */
+			   <0x23 0x210 /* PWR_CTRL1 */
+			    0x03 0x04  /* PLL_ANALOG_CONTROLS_TWO */
+			    0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+			    0x80 0x2c  /* PLL_CMODE */
+			    0x0a 0x184 /* PLL_LOCK_DELAY */
+			    0x19 0xb4  /* PLL_DIGITAL_TIMERS_TWO */
+			    0x40 0x194 /* PLL_BIAS_CONTROL_1 */
+			    0x20 0x198 /* PLL_BIAS_CONTROL_2 */
+			    0x21 0x214 /* PWR_CTRL2 */
+			    0x00 0x220 /* IMP_CTRL1 */
+			    0x58 0x224 /* IMP_CTRL2 */
+			    0x27 0x240 /* TUNE1 */
+			    0x29 0x244 /* TUNE2 */
+			    0xca 0x248 /* TUNE3 */
+			    0x04 0x24c /* TUNE4 */
+			    0x03 0x250 /* TUNE5 */
+			    0x00 0x23c /* CHG_CTRL2 */
+			    0x22 0x210>; /* PWR_CTRL1 */
+};
+
 &pmi8998_haptics {
 	qcom,vmax-mv = <1800>;
 	qcom,wave-play-rate-us = <4255>;
@@ -153,8 +206,8 @@
 	qcom,vdd-io-current-level = <200 22000>;
 
 	pinctrl-names = "active", "sleep";
-	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &storage_cd>;
-	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &storage_cd>;
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on &storage_cd>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &storage_cd>;
 
 	cd-gpios = <&tlmm 126 GPIO_ACTIVE_HIGH>;
 
@@ -164,3 +217,30 @@
 &wil6210 {
 	status = "ok";
 };
+
+&qupv3_se5_i2c {
+	status = "ok";
+	synaptics_dsx@20 {
+		compatible = "synaptics,dsx-i2c";
+		reg = <0x20>;
+		interrupt-parent = <&tlmm>;
+		interrupts = <122 0x2008>;
+		vdd-supply = <&pm8998_l14>;
+		avdd-supply = <&pm8998_l28>;
+		pinctrl-names = "pmx_ts_active", "pmx_ts_suspend",
+				"pmx_ts_release";
+		pinctrl-0 = <&ts_int_active &ts_reset_active>;
+		pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+		pinctrl-2 = <&ts_release>;
+		synaptics,pwr-reg-name = "avdd";
+		synaptics,bus-reg-name = "vdd";
+		synaptics,ub-i2c-addr = <0x2c>;
+		synaptics,irq-gpio = <&tlmm 122 0x2008>;
+		synaptics,reset-gpio = <&tlmm 99 0x0>;
+		synaptics,irq-on-state = <0>;
+		synaptics,power-delay-ms = <200>;
+		synaptics,reset-delay-ms = <200>;
+		synaptics,reset-on-state = <0>;
+		synaptics,reset-active-ms = <20>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index 9672b94..ec8665b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -167,9 +167,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa1";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		proxy-supply = <&pm8998_l1>;
 		pm8998_l1: regulator-l1 {
@@ -180,7 +181,7 @@
 			qcom,proxy-consumer-enable;
 			qcom,proxy-consumer-current = <72000>;
 			qcom,init-voltage = <880000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 
 		pm8998_l1_ao: regulator-l1-ao {
@@ -189,7 +190,7 @@
 			regulator-min-microvolt = <880000>;
 			regulator-max-microvolt = <880000>;
 			qcom,init-voltage = <880000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 
 		regulator-l1-so {
@@ -198,7 +199,7 @@
 			regulator-min-microvolt = <880000>;
 			regulator-max-microvolt = <880000>;
 			qcom,init-voltage = <880000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 			qcom,init-enable = <0>;
 		};
 	};
@@ -207,9 +208,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa2";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 30000>;
 		pm8998_l2: regulator-l2 {
 			regulator-name = "pm8998_l2";
@@ -217,7 +219,7 @@
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1200000>;
 			qcom,init-voltage = <1200000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 			regulator-always-on;
 		};
 	};
@@ -226,9 +228,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa3";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l3: regulator-l3 {
 			regulator-name = "pm8998_l3";
@@ -236,7 +239,7 @@
 			regulator-min-microvolt = <1000000>;
 			regulator-max-microvolt = <1000000>;
 			qcom,init-voltage = <1000000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -257,9 +260,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa5";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l5: regulator-l5 {
 			regulator-name = "pm8998_l5";
@@ -267,7 +271,7 @@
 			regulator-min-microvolt = <800000>;
 			regulator-max-microvolt = <800000>;
 			qcom,init-voltage = <800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -275,9 +279,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa6";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l6: regulator-l6 {
 			regulator-name = "pm8998_l6";
@@ -285,7 +290,7 @@
 			regulator-min-microvolt = <1856000>;
 			regulator-max-microvolt = <1856000>;
 			qcom,init-voltage = <1856000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -293,9 +298,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa7";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l7: regulator-l7 {
 			regulator-name = "pm8998_l7";
@@ -303,7 +309,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -311,9 +317,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa8";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l8: regulator-l8 {
 			regulator-name = "pm8998_l8";
@@ -321,7 +328,7 @@
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1248000>;
 			qcom,init-voltage = <1200000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -329,9 +336,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa9";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l9: regulator-l9 {
 			regulator-name = "pm8998_l9";
@@ -339,7 +347,7 @@
 			regulator-min-microvolt = <1704000>;
 			regulator-max-microvolt = <2928000>;
 			qcom,init-voltage = <1704000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -347,9 +355,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa10";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l10: regulator-l10 {
 			regulator-name = "pm8998_l10";
@@ -357,7 +366,7 @@
 			regulator-min-microvolt = <1704000>;
 			regulator-max-microvolt = <2928000>;
 			qcom,init-voltage = <1704000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -365,9 +374,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa11";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l11: regulator-l11 {
 			regulator-name = "pm8998_l11";
@@ -375,7 +385,7 @@
 			regulator-min-microvolt = <1000000>;
 			regulator-max-microvolt = <1048000>;
 			qcom,init-voltage = <1000000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -383,9 +393,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa12";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l12: regulator-l12 {
 			regulator-name = "pm8998_l12";
@@ -393,7 +404,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -401,9 +412,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa13";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l13: regulator-l13 {
 			regulator-name = "pm8998_l13";
@@ -411,7 +423,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -419,9 +431,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa14";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		proxy-supply = <&pm8998_l14>;
 		pm8998_l14: regulator-l14 {
@@ -432,7 +445,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1880000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -440,9 +453,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa15";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l15: regulator-l15 {
 			regulator-name = "pm8998_l15";
@@ -450,7 +464,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -458,9 +472,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa16";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l16: regulator-l16 {
 			regulator-name = "pm8998_l16";
@@ -468,7 +483,7 @@
 			regulator-min-microvolt = <2704000>;
 			regulator-max-microvolt = <2704000>;
 			qcom,init-voltage = <2704000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -476,9 +491,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa17";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l17: regulator-l17 {
 			regulator-name = "pm8998_l17";
@@ -486,7 +502,7 @@
 			regulator-min-microvolt = <1304000>;
 			regulator-max-microvolt = <1304000>;
 			qcom,init-voltage = <1304000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -494,9 +510,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa18";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l18: regulator-l18 {
 			regulator-name = "pm8998_l18";
@@ -504,7 +521,7 @@
 			regulator-min-microvolt = <2704000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <2704000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -512,9 +529,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa19";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l19: regulator-l19 {
 			regulator-name = "pm8998_l19";
@@ -522,7 +540,7 @@
 			regulator-min-microvolt = <2856000>;
 			regulator-max-microvolt = <3104000>;
 			qcom,init-voltage = <2856000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -530,9 +548,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa20";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l20: regulator-l20 {
 			regulator-name = "pm8998_l20";
@@ -540,7 +559,7 @@
 			regulator-min-microvolt = <2704000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <2704000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_HPM>;
 		};
 	};
 
@@ -548,9 +567,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa21";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l21: regulator-l21 {
 			regulator-name = "pm8998_l21";
@@ -558,7 +578,7 @@
 			regulator-min-microvolt = <2704000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <2704000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -566,9 +586,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa22";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l22: regulator-l22 {
 			regulator-name = "pm8998_l22";
@@ -576,7 +597,7 @@
 			regulator-min-microvolt = <2864000>;
 			regulator-max-microvolt = <3312000>;
 			qcom,init-voltage = <2864000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -584,9 +605,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa23";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l23: regulator-l23 {
 			regulator-name = "pm8998_l23";
@@ -594,7 +616,7 @@
 			regulator-min-microvolt = <3000000>;
 			regulator-max-microvolt = <3312000>;
 			qcom,init-voltage = <3000000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -602,9 +624,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa24";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l24-parent-supply = <&pm8998_l12>;
 		pm8998_l24: regulator-l24 {
@@ -613,7 +636,7 @@
 			regulator-min-microvolt = <3088000>;
 			regulator-max-microvolt = <3088000>;
 			qcom,init-voltage = <3088000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -621,9 +644,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa25";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l25: regulator-l25 {
 			regulator-name = "pm8998_l25";
@@ -631,7 +655,7 @@
 			regulator-min-microvolt = <3000000>;
 			regulator-max-microvolt = <3312000>;
 			qcom,init-voltage = <3000000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -639,9 +663,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa26";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		proxy-supply = <&pm8998_l26>;
 		pm8998_l26: regulator-l26 {
@@ -652,7 +677,7 @@
 			qcom,proxy-consumer-enable;
 			qcom,proxy-consumer-current = <43600>;
 			qcom,init-voltage = <1200000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -673,9 +698,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa28";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l28: regulator-l28 {
 			regulator-name = "pm8998_l28";
@@ -683,7 +709,7 @@
 			regulator-min-microvolt = <2856000>;
 			regulator-max-microvolt = <3008000>;
 			qcom,init-voltage = <2856000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -715,6 +741,7 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "bobb1";
+		qcom,regulator-type = "pmic4-bob";
 		qcom,send-defaults;
 
 		pmi8998_bob: regulator-bob {
@@ -723,7 +750,7 @@
 			regulator-min-microvolt = <3312000>;
 			regulator-max-microvolt = <3600000>;
 			qcom,init-voltage = <3312000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_BOB_PASS>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_PASS>;
 		};
 
 		pmi8998_bob_ao: regulator-bob-ao {
@@ -732,7 +759,7 @@
 			regulator-min-microvolt = <3312000>;
 			regulator-max-microvolt = <3600000>;
 			qcom,init-voltage = <3312000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_BOB_AUTO>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_AUTO>;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
index d2ee9eb..05d77d3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
@@ -156,6 +156,7 @@
 	qcom,cam_smmu {
 		compatible = "qcom,msm-cam-smmu";
 		status = "ok";
+		non-fatal-fault-disabled;
 
 		msm_cam_smmu_lrme {
 			compatible = "qcom,msm-cam-smmu-cb";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt-overlay.dts
similarity index 65%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt-overlay.dts
index 0a56c79..d26c975 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +10,15 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
+/plugin/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm845-v2-qvr-dvt.dtsi"
+#include "sdm845-qvr-audio-overlay.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
+	model = "Qualcomm Technologies, Inc. SDM845 V2 DVT QVR";
 	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	qcom,msm-id = <321 0x20000>;
+	qcom,board-id = <0x02000B 0x20>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt.dts
similarity index 71%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt.dts
index 0a56c79..9110954 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,11 +14,10 @@
 /dts-v1/;
 
 #include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm845-v2-qvr-dvt.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
+	model = "Qualcomm Technologies, Inc. SDM845 V2 DVT QVR";
 	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
+	qcom,board-id = <0x02000B 0x20>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt.dtsi
similarity index 66%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt.dtsi
index 0a56c79..c629c53 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-dvt.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -9,16 +9,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-
-
-/dts-v1/;
-
-#include "sdm845-v2.dtsi"
 #include "sdm845-qvr.dtsi"
+#include "sdm845-sde-display.dtsi"
 #include "sdm845-camera-sensor-qvr.dtsi"
 
-/ {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt-overlay.dts
similarity index 70%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt-overlay.dts
index 0a56c79..5172098 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +10,15 @@
  * GNU General Public License for more details.
  */
 
-
 /dts-v1/;
+/plugin/;
 
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm845-v2-qvr-evt.dtsi"
+#include "sdm845-qvr-audio-overlay.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
+	model = "Qualcomm Technologies, Inc. SDM845 V2 EVT QVR";
 	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
+	qcom,msm-id = <321 0x20000>;
 	qcom,board-id = <0x01000B 0x20>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt.dts
similarity index 76%
rename from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
rename to arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt.dts
index 0a56c79..19b12e2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,11 +14,10 @@
 /dts-v1/;
 
 #include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-#include "sdm845-camera-sensor-qvr.dtsi"
+#include "sdm845-v2-qvr-evt.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
+	model = "Qualcomm Technologies, Inc. SDM845 V2 EVT QVR";
 	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
 	qcom,board-id = <0x01000B 0x20>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt.dtsi
similarity index 66%
copy from arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt.dtsi
index 0a56c79..c629c53 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-evt.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -9,16 +9,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-
-
-/dts-v1/;
-
-#include "sdm845-v2.dtsi"
 #include "sdm845-qvr.dtsi"
+#include "sdm845-sde-display.dtsi"
 #include "sdm845-camera-sensor-qvr.dtsi"
 
-/ {
-	model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
-	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
-	qcom,board-id = <0x01000B 0x20>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index 947d28b..1551952 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -51,6 +51,7 @@
 			compatible = "qcom,memshare-peripheral";
 			qcom,peripheral-size = <0x500000>;
 			qcom,client-id = <1>;
+			qcom,allocate-boot-time;
 			label = "modem";
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 213dfdb..7832165 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -2648,6 +2648,7 @@
 		qcom,ipa-wdi2;
 		qcom,use-64-bit-dma-mask;
 		qcom,arm-smmu;
+		qcom,smmu-fast-map;
 		qcom,bandwidth-vote-for-ipa;
 		qcom,msm-bus,name = "ipa";
 		qcom,msm-bus,num-cases = <5>;
@@ -2770,7 +2771,6 @@
 
 		ipa_smmu_ap: ipa_smmu_ap {
 			compatible = "qcom,ipa-smmu-ap-cb";
-			qcom,smmu-s1-bypass;
 			iommus = <&apps_smmu 0x720 0x0>;
 			qcom,iova-mapping = <0x20000000 0x40000000>;
 			qcom,additional-mapping =
@@ -2780,7 +2780,6 @@
 
 		ipa_smmu_wlan: ipa_smmu_wlan {
 			compatible = "qcom,ipa-smmu-wlan-cb";
-			qcom,smmu-s1-bypass;
 			iommus = <&apps_smmu 0x721 0x0>;
 			qcom,additional-mapping =
 			/* ipa-uc ram */
@@ -2789,7 +2788,6 @@
 
 		ipa_smmu_uc: ipa_smmu_uc {
 			compatible = "qcom,ipa-smmu-uc-cb";
-			qcom,smmu-s1-bypass;
 			iommus = <&apps_smmu 0x722 0x0>;
 			qcom,iova-mapping = <0x40000000 0x20000000>;
 		};
@@ -3089,7 +3087,6 @@
 		vdd-3.3-ch0-supply = <&pm8998_l25>;
 		qcom,vdd-0.8-cx-mx-config = <800000 800000>;
 		qcom,vdd-3.3-ch0-config = <3104000 3312000>;
-		qcom,smmu-s1-bypass;
 	};
 
 	qmi-tmd-devices {
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index de90d43..698059c 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -83,6 +83,7 @@
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_MSM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -230,6 +231,7 @@
 CONFIG_IPC_ROUTER=y
 CONFIG_IPC_ROUTER_SECURITY=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
 CONFIG_DMA_CMA=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=y
@@ -293,6 +295,7 @@
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_MSM_SMD_PKT=y
+CONFIG_MSM_ADSPRPC=y
 CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_MSM_V2=y
@@ -320,11 +323,17 @@
 CONFIG_MSM_APM=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
-CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_CPR4_APSS=y
@@ -332,6 +341,7 @@
 CONFIG_REGULATOR_MEM_ACC=y
 CONFIG_REGULATOR_MSM_GFX_LDO=y
 CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
 CONFIG_REGULATOR_QPNP=y
 CONFIG_REGULATOR_RPM_SMD=y
 CONFIG_REGULATOR_SPM=y
@@ -432,6 +442,7 @@
 CONFIG_LEDS_QPNP_FLASH=y
 CONFIG_LEDS_QPNP_WLED=y
 CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
@@ -457,6 +468,7 @@
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_MSM_SPM=y
 CONFIG_MSM_L2_SPM=y
 CONFIG_MSM_BOOT_STATS=y
@@ -495,6 +507,7 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS_POSIX_ACL=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index 8145f47..a9eae76 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -90,6 +90,7 @@
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_MSM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -239,6 +240,7 @@
 CONFIG_IPC_ROUTER=y
 CONFIG_IPC_ROUTER_SECURITY=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
 CONFIG_DMA_CMA=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=y
@@ -303,6 +305,7 @@
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_MSM_SMD_PKT=y
+CONFIG_MSM_ADSPRPC=y
 CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_MSM_V2=y
@@ -330,11 +333,17 @@
 CONFIG_MSM_APM=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
-CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_CPR4_APSS=y
@@ -342,6 +351,7 @@
 CONFIG_REGULATOR_MEM_ACC=y
 CONFIG_REGULATOR_MSM_GFX_LDO=y
 CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
 CONFIG_REGULATOR_QPNP=y
 CONFIG_REGULATOR_RPM_SMD=y
 CONFIG_REGULATOR_SPM=y
@@ -444,6 +454,7 @@
 CONFIG_LEDS_QPNP_FLASH=y
 CONFIG_LEDS_QPNP_WLED=y
 CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
@@ -469,6 +480,7 @@
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_IOMMU_DEBUG=y
 CONFIG_IOMMU_DEBUG_TRACKING=y
 CONFIG_IOMMU_TESTS=y
@@ -503,6 +515,9 @@
 CONFIG_QCOM_DCC=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_WCNSS_CORE=y
+CONFIG_WCNSS_CORE_PRONTO=y
+CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
 CONFIG_PWM=y
@@ -518,6 +533,7 @@
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index 1904209..8aa1e7d 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -564,6 +564,8 @@
 CONFIG_ARM_GIC_V3_ACL=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_NVMEM=y
+CONFIG_QCOM_QFPROM=y
 CONFIG_SENSORS_SSC=y
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT2_FS=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index 670627d..667377f 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -582,6 +582,8 @@
 CONFIG_PHY_XGENE=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_NVMEM=y
+CONFIG_QCOM_QFPROM=y
 CONFIG_SENSORS_SSC=y
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT2_FS=y
@@ -593,6 +595,7 @@
 CONFIG_EXT4_FS_ICE_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
@@ -618,7 +621,6 @@
 CONFIG_DEBUG_OBJECTS_FREE=y
 CONFIG_DEBUG_OBJECTS_TIMERS=y
 CONFIG_DEBUG_OBJECTS_WORK=y
-CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
 CONFIG_SLUB_DEBUG_ON=y
 CONFIG_DEBUG_KMEMLEAK=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 0ff77bd..f7ef61e 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -64,6 +64,7 @@
 CONFIG_ZSMALLOC=y
 CONFIG_BALANCE_ANON_FILE_RECLAIM=y
 CONFIG_SECCOMP=y
+CONFIG_HARDEN_BRANCH_PREDICTOR=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
 CONFIG_CP15_BARRIER_EMULATION=y
@@ -450,6 +451,12 @@
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_QPNP=y
+CONFIG_ESOC=y
+CONFIG_ESOC_DEV=y
+CONFIG_ESOC_CLIENT=y
+CONFIG_ESOC_MDM_4x=y
+CONFIG_ESOC_MDM_DRV=y
+CONFIG_ESOC_MDM_DBG_ENG=y
 CONFIG_DMADEVICES=y
 CONFIG_QCOM_GPI_DMA=y
 CONFIG_UIO=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index c9c1f28..a4f0ffa 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -68,6 +68,7 @@
 CONFIG_ZSMALLOC=y
 CONFIG_BALANCE_ANON_FILE_RECLAIM=y
 CONFIG_SECCOMP=y
+CONFIG_HARDEN_BRANCH_PREDICTOR=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
 CONFIG_CP15_BARRIER_EMULATION=y
@@ -458,6 +459,13 @@
 CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_QPNP=y
+CONFIG_ESOC=y
+CONFIG_ESOC_DEV=y
+CONFIG_ESOC_CLIENT=y
+CONFIG_ESOC_DEBUG=y
+CONFIG_ESOC_MDM_4x=y
+CONFIG_ESOC_MDM_DRV=y
+CONFIG_ESOC_MDM_DBG_ENG=y
 CONFIG_DMADEVICES=y
 CONFIG_QCOM_GPI_DMA=y
 CONFIG_QCOM_GPI_DMA_DEBUG=y
@@ -605,7 +613,6 @@
 CONFIG_DEBUG_OBJECTS_FREE=y
 CONFIG_DEBUG_OBJECTS_TIMERS=y
 CONFIG_DEBUG_OBJECTS_WORK=y
-CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
 CONFIG_SLUB_DEBUG_ON=y
 CONFIG_DEBUG_KMEMLEAK=y
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index ef5970e..46d0448 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -452,17 +452,4 @@
 	mrs	\rd, sp_el0
 	.endm
 
-/*
- * Errata workaround post TTBR0_EL1 update.
- */
-	.macro	post_ttbr0_update_workaround
-#ifdef CONFIG_CAVIUM_ERRATUM_27456
-alternative_if ARM64_WORKAROUND_CAVIUM_27456
-	ic	iallu
-	dsb	nsh
-	isb
-alternative_else_nop_endif
-#endif
-	.endm
-
 #endif	/* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 87b4465..d64bf94 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -35,6 +35,10 @@
 #define ARM64_HYP_OFFSET_LOW			14
 #define ARM64_MISMATCHED_CACHE_LINE_SIZE	15
 
-#define ARM64_NCAPS				16
+#define ARM64_UNMAP_KERNEL_AT_EL0		16
+
+#define ARM64_HARDEN_BRANCH_PREDICTOR		17
+
+#define ARM64_NCAPS				18
 
 #endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index f8682a3..ddbf3b1 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -63,7 +63,6 @@
 ({									\
 	u32 _model = (midr) & MIDR_CPU_MODEL_MASK;			\
 	u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);	\
-									\
 	_model == (model) && rv >= (rv_min) && rv <= (rv_max);		\
  })
 
@@ -76,7 +75,11 @@
 #define ARM_CPU_PART_AEM_V8		0xD0F
 #define ARM_CPU_PART_FOUNDATION		0xD00
 #define ARM_CPU_PART_CORTEX_A57		0xD07
+#define ARM_CPU_PART_CORTEX_A72		0xD08
 #define ARM_CPU_PART_CORTEX_A53		0xD03
+#define ARM_CPU_PART_CORTEX_A73		0xD09
+#define ARM_CPU_PART_CORTEX_A75		0xD0A
+#define ARM_CPU_PART_KRYO3G		0x802
 
 #define APM_CPU_PART_POTENZA		0x000
 
@@ -87,6 +90,10 @@
 
 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
 #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
+#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
+#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
+#define MIDR_KRYO3G	MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO3G)
 #define MIDR_THUNDERX	MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
 
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 1fb0230..40a8a94 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -169,7 +169,7 @@
 #ifdef CONFIG_COMPAT
 
 /* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
-#define COMPAT_ELF_ET_DYN_BASE		0x000400000UL
+#define COMPAT_ELF_ET_DYN_BASE		(2 * TASK_SIZE_32 / 3)
 
 /* AArch32 registers. */
 #define COMPAT_ELF_NGREG		18
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index d14c478..85997c0 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -175,6 +175,12 @@
 #define ESR_ELx_SYS64_ISS_SYS_CTR_READ	(ESR_ELx_SYS64_ISS_SYS_CTR | \
 					 ESR_ELx_SYS64_ISS_DIR_READ)
 
+#define ESR_ELx_SYS64_ISS_SYS_CNTVCT	(ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 2, 14, 0) | \
+					 ESR_ELx_SYS64_ISS_DIR_READ)
+
+#define ESR_ELx_SYS64_ISS_SYS_CNTFRQ	(ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 0, 14, 0) | \
+					 ESR_ELx_SYS64_ISS_DIR_READ)
+
 #ifndef __ASSEMBLY__
 #include <asm/types.h>
 
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index caf86be..d8e5805 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -51,6 +51,12 @@
 
 	FIX_EARLYCON_MEM_BASE,
 	FIX_TEXT_POKE0,
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+	FIX_ENTRY_TRAMP_DATA,
+	FIX_ENTRY_TRAMP_TEXT,
+#define TRAMP_VALIAS		(__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 	__end_of_permanent_fixed_addresses,
 
 	/*
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 7803343..77a27af 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -78,8 +78,16 @@
 /*
  * Initial memory map attributes.
  */
-#define SWAPPER_PTE_FLAGS	(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
-#define SWAPPER_PMD_FLAGS	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+#define _SWAPPER_PTE_FLAGS	(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define _SWAPPER_PMD_FLAGS	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define SWAPPER_PTE_FLAGS	(_SWAPPER_PTE_FLAGS | PTE_NG)
+#define SWAPPER_PMD_FLAGS	(_SWAPPER_PMD_FLAGS | PMD_SECT_NG)
+#else
+#define SWAPPER_PTE_FLAGS	_SWAPPER_PTE_FLAGS
+#define SWAPPER_PMD_FLAGS	_SWAPPER_PMD_FLAGS
+#endif
 
 #if ARM64_SWAPPER_USES_SECTION_MAPS
 #define SWAPPER_MM_MMUFLAGS	(PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 2a2752b..0dbc1c6 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -170,8 +170,7 @@
 #define VTCR_EL2_FLAGS			(VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
 #define VTTBR_X				(VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
 
-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
 #define VTTBR_VMID_SHIFT  (UL(48))
 #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
 
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index ef305f8..35ea9c1 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -313,5 +313,43 @@
 	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
 }
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu.h>
+
+static inline void *kvm_get_hyp_vector(void)
+{
+	struct bp_hardening_data *data = arm64_get_bp_hardening_data();
+	void *vect = kvm_ksym_ref(__kvm_hyp_vector);
+
+	if (data->fn) {
+		vect = __bp_harden_hyp_vecs_start +
+		       data->hyp_vectors_slot * SZ_2K;
+
+		if (!has_vhe())
+			vect = lm_alias(vect);
+	}
+
+	return vect;
+}
+
+static inline int kvm_map_vectors(void)
+{
+	return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
+				   kvm_ksym_ref(__bp_harden_hyp_vecs_end),
+				   PAGE_HYP_EXEC);
+}
+
+#else
+static inline void *kvm_get_hyp_vector(void)
+{
+	return kvm_ksym_ref(__kvm_hyp_vector);
+}
+
+static inline int kvm_map_vectors(void)
+{
+	return 0;
+}
+#endif
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 8d9fce0..f543df3 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -16,6 +16,13 @@
 #ifndef __ASM_MMU_H
 #define __ASM_MMU_H
 
+#define USER_ASID_FLAG	(UL(1) << 48)
+#define TTBR_ASID_MASK	(UL(0xffff) << 48)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/percpu.h>
+
 typedef struct {
 	atomic64_t	id;
 	void		*vdso;
@@ -28,6 +35,49 @@
  */
 #define ASID(mm)	((mm)->context.id.counter & 0xffff)
 
+static inline bool arm64_kernel_unmapped_at_el0(void)
+{
+	return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
+	       cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0);
+}
+
+typedef void (*bp_hardening_cb_t)(void);
+
+struct bp_hardening_data {
+	int			hyp_vectors_slot;
+	bp_hardening_cb_t	fn;
+};
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
+
+DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+	return raw_cpu_ptr(&bp_hardening_data);
+}
+
+static inline void arm64_apply_bp_hardening(void)
+{
+	struct bp_hardening_data *d;
+
+	if (!cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
+		return;
+
+	d = arm64_get_bp_hardening_data();
+	if (d->fn)
+		d->fn();
+}
+#else
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+	return NULL;
+}
+
+static inline void arm64_apply_bp_hardening(void)	{ }
+#endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
 extern void paging_init(void);
 extern void bootmem_init(void);
 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
@@ -37,4 +87,5 @@
 			       pgprot_t prot, bool allow_block_mappings);
 extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
 
+#endif	/* !__ASSEMBLY__ */
 #endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 8f8dde1..af0215a 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -57,6 +57,13 @@
 	isb();
 }
 
+static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
+{
+	BUG_ON(pgd == swapper_pg_dir);
+	cpu_set_reserved_ttbr0();
+	cpu_do_switch_mm(virt_to_phys(pgd),mm);
+}
+
 /*
  * TCR.T0SZ value to use when the ID map is active. Usually equals
  * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index eb0c2bd..8df4cb6 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -272,6 +272,7 @@
 #define TCR_TG1_4K		(UL(2) << TCR_TG1_SHIFT)
 #define TCR_TG1_64K		(UL(3) << TCR_TG1_SHIFT)
 
+#define TCR_A1			(UL(1) << 22)
 #define TCR_ASID16		(UL(1) << 36)
 #define TCR_TBI0		(UL(1) << 37)
 #define TCR_HA			(UL(1) << 39)
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 2142c77..84b5283 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -34,8 +34,16 @@
 
 #include <asm/pgtable-types.h>
 
-#define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
-#define PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+#define _PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define _PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define PROT_DEFAULT		(_PROT_DEFAULT | PTE_NG)
+#define PROT_SECT_DEFAULT	(_PROT_SECT_DEFAULT | PMD_SECT_NG)
+#else
+#define PROT_DEFAULT		_PROT_DEFAULT
+#define PROT_SECT_DEFAULT	_PROT_SECT_DEFAULT
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 #define PROT_DEVICE_nGnRnE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
 #define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
@@ -48,6 +56,7 @@
 #define PROT_SECT_NORMAL_EXEC	(PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
 
 #define _PAGE_DEFAULT		(PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
+#define _HYP_PAGE_DEFAULT	(_PAGE_DEFAULT & ~PTE_NG)
 
 #define PAGE_KERNEL		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_RO		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
@@ -55,15 +64,15 @@
 #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_EXEC_CONT	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
 
-#define PAGE_HYP		__pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
-#define PAGE_HYP_EXEC		__pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
-#define PAGE_HYP_RO		__pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
+#define PAGE_HYP		__pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
+#define PAGE_HYP_EXEC		__pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
+#define PAGE_HYP_RO		__pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
 #define PAGE_HYP_DEVICE		__pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
 
 #define PAGE_S2			__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
 #define PAGE_S2_DEVICE		__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
 
-#define PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_NG | PTE_PXN | PTE_UXN)
 #define PAGE_SHARED		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
 #define PAGE_SHARED_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
 #define PAGE_COPY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index c05ee84..9f1bba6 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -92,6 +92,8 @@
 	((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
 #define pte_valid_young(pte) \
 	((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
+#define pte_valid_user(pte) \
+	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
 
 /*
  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
@@ -101,6 +103,18 @@
 #define pte_accessible(mm, pte)	\
 	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
 
+/*
+ * p??_access_permitted() is true for valid user mappings (subject to the
+ * write permission check) other than user execute-only which do not have the
+ * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
+ */
+#define pte_access_permitted(pte, write) \
+	(pte_valid_user(pte) && (!(write) || pte_write(pte)))
+#define pmd_access_permitted(pmd, write) \
+	(pte_access_permitted(pmd_pte(pmd), (write)))
+#define pud_access_permitted(pud, write) \
+	(pte_access_permitted(pud_pte(pud), (write)))
+
 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
 {
 	pte_val(pte) &= ~pgprot_val(prot);
@@ -707,6 +721,7 @@
 
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
 
 /*
  * Encode and decode a swap entry:
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 220633b7..9da52c2 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -39,12 +39,6 @@
 
 #include <asm/memory.h>
 
-#define cpu_switch_mm(pgd,mm)				\
-do {							\
-	BUG_ON(pgd == swapper_pg_dir);			\
-	cpu_do_switch_mm(virt_to_phys(pgd),mm);		\
-} while (0)
-
 #endif /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* __ASM_PROCFNS_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 7393cc7..88bbe36 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -117,6 +117,9 @@
 #define ID_AA64ISAR0_AES_SHIFT		4
 
 /* id_aa64pfr0 */
+#define ID_AA64PFR0_CSV3_SHIFT		60
+#define ID_AA64PFR0_CSV2_SHIFT		56
+#define ID_AA64PFR0_SVE_SHIFT		32
 #define ID_AA64PFR0_GIC_SHIFT		24
 #define ID_AA64PFR0_ASIMD_SHIFT		20
 #define ID_AA64PFR0_FP_SHIFT		16
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index b64410c..96c11e7 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -46,6 +46,8 @@
 extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
 extern char* (*arch_read_hardware_id)(void);
 
+const char * __init arch_read_machine_name(void);
+
 #define show_unhandled_signals_ratelimited()				\
 ({									\
 	static DEFINE_RATELIMIT_STATE(_rs,				\
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index deab523..ad6bd8b 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -23,6 +23,7 @@
 
 #include <linux/sched.h>
 #include <asm/cputype.h>
+#include <asm/mmu.h>
 
 /*
  * Raw TLBI operations.
@@ -42,6 +43,11 @@
 
 #define __tlbi(op, ...)		__TLBI_N(op, ##__VA_ARGS__, 1, 0)
 
+#define __tlbi_user(op, arg) do {						\
+	if (arm64_kernel_unmapped_at_el0())					\
+		__tlbi(op, (arg) | USER_ASID_FLAG);				\
+} while (0)
+
 /*
  *	TLB Management
  *	==============
@@ -103,6 +109,7 @@
 
 	dsb(ishst);
 	__tlbi(aside1is, asid);
+	__tlbi_user(aside1is, asid);
 	dsb(ish);
 }
 
@@ -113,6 +120,7 @@
 
 	dsb(ishst);
 	__tlbi(vale1is, addr);
+	__tlbi_user(vale1is, addr);
 	dsb(ish);
 }
 
@@ -139,10 +147,13 @@
 
 	dsb(ishst);
 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
-		if (last_level)
+		if (last_level) {
 			__tlbi(vale1is, addr);
-		else
+			__tlbi_user(vale1is, addr);
+		} else {
 			__tlbi(vae1is, addr);
+			__tlbi_user(vae1is, addr);
+		}
 	}
 	dsb(ish);
 }
@@ -182,6 +193,7 @@
 	unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
 
 	__tlbi(vae1is, addr);
+	__tlbi_user(vae1is, addr);
 	dsb(ish);
 }
 
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 4d9222a..8b38b0d 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -20,6 +20,7 @@
 
 #include <asm/alternative.h>
 #include <asm/kernel-pgtable.h>
+#include <asm/mmu.h>
 #include <asm/sysreg.h>
 
 #ifndef __ASSEMBLY__
@@ -133,15 +134,19 @@
 {
 	unsigned long ttbr;
 
+	ttbr = read_sysreg(ttbr1_el1);
 	/* reserved_ttbr0 placed at the end of swapper_pg_dir */
-	ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
-	write_sysreg(ttbr, ttbr0_el1);
+	write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
+	isb();
+	/* Set reserved ASID */
+	ttbr &= ~TTBR_ASID_MASK;
+	write_sysreg(ttbr, ttbr1_el1);
 	isb();
 }
 
 static inline void __uaccess_ttbr0_enable(void)
 {
-	unsigned long flags;
+	unsigned long flags, ttbr0, ttbr1;
 
 	/*
 	 * Disable interrupts to avoid preemption between reading the 'ttbr0'
@@ -149,7 +154,16 @@
 	 * roll-over and an update of 'ttbr0'.
 	 */
 	local_irq_save(flags);
-	write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
+	ttbr0 = current_thread_info()->ttbr0;
+
+	/* Restore active ASID */
+	ttbr1 = read_sysreg(ttbr1_el1);
+	ttbr1 |= ttbr0 & TTBR_ASID_MASK;
+	write_sysreg(ttbr1, ttbr1_el1);
+	isb();
+
+	/* Restore user page table */
+	write_sysreg(ttbr0, ttbr0_el1);
 	isb();
 	local_irq_restore(flags);
 }
@@ -439,11 +453,20 @@
 	add	\tmp1, \tmp1, #SWAPPER_DIR_SIZE	// reserved_ttbr0 at the end of swapper_pg_dir
 	msr	ttbr0_el1, \tmp1		// set reserved TTBR0_EL1
 	isb
+	sub     \tmp1, \tmp1, #SWAPPER_DIR_SIZE
+	bic     \tmp1, \tmp1, #TTBR_ASID_MASK
+	msr     ttbr1_el1, \tmp1                // set reserved ASID
+	isb
 	.endm
 
-	.macro	__uaccess_ttbr0_enable, tmp1
+	.macro	__uaccess_ttbr0_enable, tmp1, tmp2
 	get_thread_info \tmp1
 	ldr	\tmp1, [\tmp1, #TSK_TI_TTBR0]	// load saved TTBR0_EL1
+	mrs     \tmp2, ttbr1_el1
+	extr    \tmp2, \tmp2, \tmp1, #48
+	ror     \tmp2, \tmp2, #16
+	msr     ttbr1_el1, \tmp2                // set the active ASID
+	isb
 	msr	ttbr0_el1, \tmp1		// set the non-PAN TTBR0_EL1
 	isb
 	.endm
@@ -454,18 +477,18 @@
 alternative_else_nop_endif
 	.endm
 
-	.macro	uaccess_ttbr0_enable, tmp1, tmp2
+	.macro	uaccess_ttbr0_enable, tmp1, tmp2, tmp3
 alternative_if_not ARM64_HAS_PAN
-	save_and_disable_irq \tmp2		// avoid preemption
-	__uaccess_ttbr0_enable \tmp1
-	restore_irq \tmp2
+	save_and_disable_irq \tmp3		// avoid preemption
+	__uaccess_ttbr0_enable \tmp1, \tmp2
+	restore_irq \tmp3
 alternative_else_nop_endif
 	.endm
 #else
 	.macro	uaccess_ttbr0_disable, tmp1
 	.endm
 
-	.macro	uaccess_ttbr0_enable, tmp1, tmp2
+	.macro	uaccess_ttbr0_enable, tmp1, tmp2, tmp3
 	.endm
 #endif
 
@@ -479,8 +502,8 @@
 alternative_else_nop_endif
 	.endm
 
-	.macro	uaccess_enable_not_uao, tmp1, tmp2
-	uaccess_ttbr0_enable \tmp1, \tmp2
+	.macro	uaccess_enable_not_uao, tmp1, tmp2, tmp3
+	uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
 alternative_if ARM64_ALT_PAN_NOT_UAO
 	SET_PSTATE_PAN(0)
 alternative_else_nop_endif
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 2c03b01..446eabd 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -52,6 +52,10 @@
 arm64-obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o	\
 					   cpu-reset.o
 
+ifeq ($(CONFIG_KVM),y)
+arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR)	+= bpi.o
+endif
+
 obj-y					+= $(arm64-obj-y) vdso/ probes/
 obj-m					+= $(arm64-obj-m)
 head-y					:= head.o
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index b3bb7ef..5d2d356 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -24,6 +24,7 @@
 #include <linux/kvm_host.h>
 #include <linux/suspend.h>
 #include <asm/cpufeature.h>
+#include <asm/fixmap.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/smp_plat.h>
@@ -147,11 +148,14 @@
   DEFINE(ARM_SMCCC_RES_X2_OFFS,		offsetof(struct arm_smccc_res, a2));
   DEFINE(ARM_SMCCC_QUIRK_ID_OFFS,	offsetof(struct arm_smccc_quirk, id));
   DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS,	offsetof(struct arm_smccc_quirk, state));
-
   BLANK();
   DEFINE(HIBERN_PBE_ORIG,	offsetof(struct pbe, orig_address));
   DEFINE(HIBERN_PBE_ADDR,	offsetof(struct pbe, address));
   DEFINE(HIBERN_PBE_NEXT,	offsetof(struct pbe, next));
   DEFINE(ARM64_FTR_SYSVAL,	offsetof(struct arm64_ftr_reg, sys_val));
+  BLANK();
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+  DEFINE(TRAMP_VALIAS,		TRAMP_VALIAS);
+#endif
   return 0;
 }
diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
new file mode 100644
index 0000000..dec95bd
--- /dev/null
+++ b/arch/arm64/kernel/bpi.S
@@ -0,0 +1,79 @@
+/*
+ * Contains CPU specific branch predictor invalidation sequences
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+.macro ventry target
+	.rept 31
+	nop
+	.endr
+	b	\target
+.endm
+
+.macro vectors target
+	ventry \target + 0x000
+	ventry \target + 0x080
+	ventry \target + 0x100
+	ventry \target + 0x180
+
+	ventry \target + 0x200
+	ventry \target + 0x280
+	ventry \target + 0x300
+	ventry \target + 0x380
+
+	ventry \target + 0x400
+	ventry \target + 0x480
+	ventry \target + 0x500
+	ventry \target + 0x580
+
+	ventry \target + 0x600
+	ventry \target + 0x680
+	ventry \target + 0x700
+	ventry \target + 0x780
+.endm
+
+	.align	11
+ENTRY(__bp_harden_hyp_vecs_start)
+	.rept 4
+	vectors __kvm_hyp_vector
+	.endr
+ENTRY(__bp_harden_hyp_vecs_end)
+ENTRY(__psci_hyp_bp_inval_start)
+	sub	sp, sp, #(8 * 18)
+	stp	x16, x17, [sp, #(16 * 0)]
+	stp	x14, x15, [sp, #(16 * 1)]
+	stp	x12, x13, [sp, #(16 * 2)]
+	stp	x10, x11, [sp, #(16 * 3)]
+	stp	x8, x9, [sp, #(16 * 4)]
+	stp	x6, x7, [sp, #(16 * 5)]
+	stp	x4, x5, [sp, #(16 * 6)]
+	stp	x2, x3, [sp, #(16 * 7)]
+	stp	x0, x1, [sp, #(16 * 8)]
+	mov	x0, #0x84000000
+	smc	#0
+	ldp	x16, x17, [sp, #(16 * 0)]
+	ldp	x14, x15, [sp, #(16 * 1)]
+	ldp	x12, x13, [sp, #(16 * 2)]
+	ldp	x10, x11, [sp, #(16 * 3)]
+	ldp	x8, x9, [sp, #(16 * 4)]
+	ldp	x6, x7, [sp, #(16 * 5)]
+	ldp	x4, x5, [sp, #(16 * 6)]
+	ldp	x2, x3, [sp, #(16 * 7)]
+	ldp	x0, x1, [sp, #(16 * 8)]
+	add	sp, sp, #(8 * 18)
+ENTRY(__psci_hyp_bp_inval_end)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index b75e917..653359b 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -46,6 +46,100 @@
 	return 0;
 }
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+#ifdef CONFIG_KVM
+extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
+
+static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+				const char *hyp_vecs_end)
+{
+	void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
+	int i;
+
+	for (i = 0; i < SZ_2K; i += 0x80)
+		memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
+
+	flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+}
+
+static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+				      const char *hyp_vecs_start,
+				      const char *hyp_vecs_end)
+{
+	static int last_slot = -1;
+	static DEFINE_SPINLOCK(bp_lock);
+	int cpu, slot = -1;
+
+	spin_lock(&bp_lock);
+	for_each_possible_cpu(cpu) {
+		if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
+			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
+			break;
+		}
+	}
+
+	if (slot == -1) {
+		last_slot++;
+		BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
+			/ SZ_2K) <= last_slot);
+		slot = last_slot;
+		__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
+	}
+
+	__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+	__this_cpu_write(bp_hardening_data.fn, fn);
+	spin_unlock(&bp_lock);
+}
+#else
+#define __psci_hyp_bp_inval_start	NULL
+#define __psci_hyp_bp_inval_end		NULL
+
+static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+				      const char *hyp_vecs_start,
+				      const char *hyp_vecs_end)
+{
+	__this_cpu_write(bp_hardening_data.fn, fn);
+}
+#endif	/* CONFIG_KVM */
+
+static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
+				     bp_hardening_cb_t fn,
+				     const char *hyp_vecs_start,
+				     const char *hyp_vecs_end)
+{
+	u64 pfr0;
+
+	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+		return;
+
+	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+		return;
+
+	__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
+}
+
+#include <linux/psci.h>
+
+static int enable_psci_bp_hardening(void *data)
+{
+	const struct arm64_cpu_capabilities *entry = data;
+
+	if (psci_ops.get_version)
+		install_bp_hardening_cb(entry,
+				       (bp_hardening_cb_t)psci_ops.get_version,
+				       __psci_hyp_bp_inval_start,
+				       __psci_hyp_bp_inval_end);
+
+	return 0;
+}
+#endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
 #define MIDR_RANGE(model, min, max) \
 	.def_scope = SCOPE_LOCAL_CPU, \
 	.matches = is_affected_midr_range, \
@@ -53,6 +147,13 @@
 	.midr_range_min = min, \
 	.midr_range_max = max
 
+#define MIDR_ALL_VERSIONS(model) \
+	.def_scope = SCOPE_LOCAL_CPU, \
+	.matches = is_affected_midr_range, \
+	.midr_model = model, \
+	.midr_range_min = 0, \
+	.midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
+
 const struct arm64_cpu_capabilities arm64_errata[] = {
 #if	defined(CONFIG_ARM64_ERRATUM_826319) || \
 	defined(CONFIG_ARM64_ERRATUM_827319) || \
@@ -130,6 +231,33 @@
 		.def_scope = SCOPE_LOCAL_CPU,
 		.enable = cpu_enable_trap_ctr_access,
 	},
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+		.enable = enable_psci_bp_hardening,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+		.enable = enable_psci_bp_hardening,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+		.enable = enable_psci_bp_hardening,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+		.enable = enable_psci_bp_hardening,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_KRYO3G),
+		.enable = enable_psci_bp_hardening,
+	},
+#endif
 	{
 	}
 };
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 0127e1b..80ff3df5 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -100,6 +100,7 @@
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
 	S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
 	S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
+	ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
 	/* Linux doesn't care about the EL3 */
 	ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
@@ -748,6 +749,44 @@
 	return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
 }
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+				int __unused)
+{
+	u64 pfr0 = read_system_reg(SYS_ID_AA64PFR0_EL1);
+
+	/* Forced on command line? */
+	if (__kpti_forced) {
+		pr_info_once("kernel page table isolation forced %s by command line option\n",
+			     __kpti_forced > 0 ? "ON" : "OFF");
+		return __kpti_forced > 0;
+	}
+
+	/* Useful for KASLR robustness */
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+		return true;
+
+	/* Defer to CPU feature registers */
+	return !cpuid_feature_extract_unsigned_field(pfr0,
+						     ID_AA64PFR0_CSV3_SHIFT);
+}
+
+static int __init parse_kpti(char *str)
+{
+	bool enabled;
+	int ret = strtobool(str, &enabled);
+
+	if (ret)
+		return ret;
+
+	__kpti_forced = enabled ? 1 : -1;
+	return 0;
+}
+__setup("kpti=", parse_kpti);
+#endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
 	{
 		.desc = "GIC system register CPU interface",
@@ -831,6 +870,14 @@
 		.def_scope = SCOPE_SYSTEM,
 		.matches = hyp_offset_low,
 	},
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+	{
+		.desc = "Kernel page table isolation (KPTI)",
+		.capability = ARM64_UNMAP_KERNEL_AT_EL0,
+		.def_scope = SCOPE_SYSTEM,
+		.matches = unmap_kernel_at_el0,
+	},
+#endif
 	{},
 };
 
@@ -951,7 +998,7 @@
 			 * uses an IPI, giving us a PSTATE that disappears when
 			 * we return.
 			 */
-			stop_machine(caps->enable, NULL, cpu_online_mask);
+			stop_machine(caps->enable, (void *)caps, cpu_online_mask);
 }
 
 /*
@@ -1007,7 +1054,7 @@
 			cpu_die_early();
 		}
 		if (caps->enable)
-			caps->enable(NULL);
+			caps->enable((void *)caps);
 	}
 }
 
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 718c4c8..8030583 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -29,6 +29,7 @@
 #include <asm/esr.h>
 #include <asm/irq.h>
 #include <asm/memory.h>
+#include <asm/mmu.h>
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 #include <asm/uaccess.h>
@@ -70,8 +71,31 @@
 #define BAD_FIQ		2
 #define BAD_ERROR	3
 
-	.macro	kernel_entry, el, regsize = 64
+	.macro kernel_ventry, el, label, regsize = 64
+	.align 7
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+alternative_if ARM64_UNMAP_KERNEL_AT_EL0
+	.if	\el == 0
+	.if	\regsize == 64
+	mrs	x30, tpidrro_el0
+	msr	tpidrro_el0, xzr
+	.else
+	mov	x30, xzr
+	.endif
+	.endif
+alternative_else_nop_endif
+#endif
+
 	sub	sp, sp, #S_FRAME_SIZE
+	b	el\()\el\()_\label
+	.endm
+
+	.macro tramp_alias, dst, sym
+	mov_q	\dst, TRAMP_VALIAS
+	add	\dst, \dst, #(\sym - .entry.tramp.text)
+	.endm
+
+	.macro	kernel_entry, el, regsize = 64
 	.if	\regsize == 32
 	mov	w0, w0				// zero upper 32 bits of x0
 	.endif
@@ -126,8 +150,8 @@
 alternative_else_nop_endif
 
 	.if	\el != 0
-	mrs	x21, ttbr0_el1
-	tst	x21, #0xffff << 48		// Check for the reserved ASID
+	mrs	x21, ttbr1_el1
+	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
 	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
 	b.eq	1f				// TTBR0 access already disabled
 	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
@@ -190,7 +214,7 @@
 	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
 	.endif
 
-	__uaccess_ttbr0_enable x0
+	__uaccess_ttbr0_enable x0, x1
 
 	.if	\el == 0
 	/*
@@ -199,7 +223,7 @@
 	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
 	 * corruption).
 	 */
-	post_ttbr0_update_workaround
+	bl	post_ttbr_update_workaround
 	.endif
 1:
 	.if	\el != 0
@@ -211,18 +235,20 @@
 	.if	\el == 0
 	ldr	x23, [sp, #S_SP]		// load return stack pointer
 	msr	sp_el0, x23
+	tst	x22, #PSR_MODE32_BIT		// native task?
+	b.eq	3f
+
 #ifdef CONFIG_ARM64_ERRATUM_845719
 alternative_if ARM64_WORKAROUND_845719
-	tbz	x22, #4, 1f
 #ifdef CONFIG_PID_IN_CONTEXTIDR
 	mrs	x29, contextidr_el1
 	msr	contextidr_el1, x29
 #else
 	msr contextidr_el1, xzr
 #endif
-1:
 alternative_else_nop_endif
 #endif
+3:
 	.endif
 
 	msr	elr_el1, x21			// set up the return data
@@ -244,7 +270,21 @@
 	ldp	x28, x29, [sp, #16 * 14]
 	ldr	lr, [sp, #S_LR]
 	add	sp, sp, #S_FRAME_SIZE		// restore sp
-	eret					// return to kernel
+
+	.if	\el == 0
+alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+	bne	4f
+	msr	far_el1, x30
+	tramp_alias	x30, tramp_exit_native
+	br	x30
+4:
+	tramp_alias	x30, tramp_exit_compat
+	br	x30
+#endif
+	.else
+	eret
+	.endif
 	.endm
 
 	.macro	irq_stack_entry
@@ -316,31 +356,31 @@
 
 	.align	11
 ENTRY(vectors)
-	ventry	el1_sync_invalid		// Synchronous EL1t
-	ventry	el1_irq_invalid			// IRQ EL1t
-	ventry	el1_fiq_invalid			// FIQ EL1t
-	ventry	el1_error_invalid		// Error EL1t
+	kernel_ventry	1, sync_invalid			// Synchronous EL1t
+	kernel_ventry	1, irq_invalid			// IRQ EL1t
+	kernel_ventry	1, fiq_invalid			// FIQ EL1t
+	kernel_ventry	1, error_invalid		// Error EL1t
 
-	ventry	el1_sync			// Synchronous EL1h
-	ventry	el1_irq				// IRQ EL1h
-	ventry	el1_fiq_invalid			// FIQ EL1h
-	ventry	el1_error_invalid		// Error EL1h
+	kernel_ventry	1, sync				// Synchronous EL1h
+	kernel_ventry	1, irq				// IRQ EL1h
+	kernel_ventry	1, fiq_invalid			// FIQ EL1h
+	kernel_ventry	1, error_invalid		// Error EL1h
 
-	ventry	el0_sync			// Synchronous 64-bit EL0
-	ventry	el0_irq				// IRQ 64-bit EL0
-	ventry	el0_fiq_invalid			// FIQ 64-bit EL0
-	ventry	el0_error_invalid		// Error 64-bit EL0
+	kernel_ventry	0, sync				// Synchronous 64-bit EL0
+	kernel_ventry	0, irq				// IRQ 64-bit EL0
+	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
+	kernel_ventry	0, error_invalid		// Error 64-bit EL0
 
 #ifdef CONFIG_COMPAT
-	ventry	el0_sync_compat			// Synchronous 32-bit EL0
-	ventry	el0_irq_compat			// IRQ 32-bit EL0
-	ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
-	ventry	el0_error_invalid_compat	// Error 32-bit EL0
+	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
+	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
+	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
+	kernel_ventry	0, error_invalid_compat, 32	// Error 32-bit EL0
 #else
-	ventry	el0_sync_invalid		// Synchronous 32-bit EL0
-	ventry	el0_irq_invalid			// IRQ 32-bit EL0
-	ventry	el0_fiq_invalid			// FIQ 32-bit EL0
-	ventry	el0_error_invalid		// Error 32-bit EL0
+	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
+	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
+	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
+	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
 #endif
 END(vectors)
 
@@ -608,11 +648,14 @@
 	mrs	x26, far_el1
 	// enable interrupts before calling the main handler
 	enable_dbg_and_irq
+#ifdef CONFIG_TRACE_IRQFLAGS
+	bl	trace_hardirqs_off
+#endif
 	ct_user_exit
 	mov	x0, x26
 	mov	x1, x25
 	mov	x2, sp
-	bl	do_mem_abort
+	bl	do_el0_ia_bp_hardening
 	b	ret_to_user
 el0_fpsimd_acc:
 	/*
@@ -859,6 +902,119 @@
 
 	.popsection				// .entry.text
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+/*
+ * Exception vectors trampoline.
+ */
+	.pushsection ".entry.tramp.text", "ax"
+
+	.macro tramp_map_kernel, tmp
+	mrs	\tmp, ttbr1_el1
+	sub	\tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
+	bic	\tmp, \tmp, #USER_ASID_FLAG
+	msr	ttbr1_el1, \tmp
+#ifdef CONFIG_ARCH_MSM8996
+	/* ASID already in \tmp[63:48] */
+	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
+	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
+	/* 2MB boundary containing the vectors, so we nobble the walk cache */
+	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
+	isb
+	tlbi	vae1, \tmp
+	dsb	nsh
+#endif /* CONFIG_ARCH_MSM8996 */
+	.endm
+
+	.macro tramp_unmap_kernel, tmp
+	mrs	\tmp, ttbr1_el1
+	add	\tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
+	orr	\tmp, \tmp, #USER_ASID_FLAG
+	msr	ttbr1_el1, \tmp
+	/*
+	 * We avoid running the post_ttbr_update_workaround here because the
+	 * user and kernel ASIDs don't have conflicting mappings, so any
+	 * "blessing" as described in:
+	 *
+	 *   http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
+	 *
+	 * will not hurt correctness. Whilst this may partially defeat the
+	 * point of using split ASIDs in the first place, it avoids
+	 * the hit of invalidating the entire I-cache on every return to
+	 * userspace.
+	 */
+	.endm
+
+	.macro tramp_ventry, regsize = 64
+	.align	7
+1:
+	.if	\regsize == 64
+	msr	tpidrro_el0, x30	// Restored in kernel_ventry
+	.endif
+	bl	2f
+	b	.
+2:
+	tramp_map_kernel	x30
+#ifdef CONFIG_RANDOMIZE_BASE
+	adr	x30, tramp_vectors + PAGE_SIZE
+#ifndef CONFIG_ARCH_MSM8996
+	isb
+#endif
+	ldr	x30, [x30]
+#else
+	ldr	x30, =vectors
+#endif
+	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
+	msr	vbar_el1, x30
+	add	x30, x30, #(1b - tramp_vectors)
+	isb
+	ret
+	.endm
+
+	.macro tramp_exit, regsize = 64
+	adr	x30, tramp_vectors
+	msr	vbar_el1, x30
+	tramp_unmap_kernel	x30
+	.if	\regsize == 64
+	mrs	x30, far_el1
+	.endif
+	eret
+	.endm
+
+	.align	11
+ENTRY(tramp_vectors)
+	.space	0x400
+
+	tramp_ventry
+	tramp_ventry
+	tramp_ventry
+	tramp_ventry
+
+	tramp_ventry	32
+	tramp_ventry	32
+	tramp_ventry	32
+	tramp_ventry	32
+END(tramp_vectors)
+
+ENTRY(tramp_exit_native)
+	tramp_exit
+END(tramp_exit_native)
+
+ENTRY(tramp_exit_compat)
+	tramp_exit	32
+END(tramp_exit_compat)
+
+	.ltorg
+	.popsection				// .entry.tramp.text
+#ifdef CONFIG_RANDOMIZE_BASE
+	.pushsection ".rodata", "a"
+	.align PAGE_SHIFT
+	.globl	__entry_tramp_data_start
+__entry_tramp_data_start:
+	.quad	vectors
+	.popsection				// .rodata
+#endif /* CONFIG_RANDOMIZE_BASE */
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
 /*
  * Special system call wrappers.
  */
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 5fe594e..ee0ea17 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -324,6 +324,15 @@
 
 	memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
 
+	/*
+	 * In case p was allocated the same task_struct pointer as some
+	 * other recently-exited task, make sure p is disassociated from
+	 * any cpu that may have run that now-exited task recently.
+	 * Otherwise we could erroneously skip reloading the FPSIMD
+	 * registers for p.
+	 */
+	fpsimd_flush_task_state(p);
+
 	if (likely(!(p->flags & PF_KTHREAD))) {
 		*childregs = *current_pt_regs();
 		childregs->regs[0] = 0;
@@ -366,17 +375,17 @@
 
 static void tls_thread_switch(struct task_struct *next)
 {
-	unsigned long tpidr, tpidrro;
+	unsigned long tpidr;
 
 	tpidr = read_sysreg(tpidr_el0);
 	*task_user_tls(current) = tpidr;
 
-	tpidr = *task_user_tls(next);
-	tpidrro = is_compat_thread(task_thread_info(next)) ?
-		  next->thread.tp_value : 0;
+	if (is_compat_thread(task_thread_info(next)))
+		write_sysreg(next->thread.tp_value, tpidrro_el0);
+	else if (!arm64_kernel_unmapped_at_el0())
+		write_sysreg(0, tpidrro_el0);
 
-	write_sysreg(tpidr, tpidr_el0);
-	write_sysreg(tpidrro, tpidrro_el0);
+	write_sysreg(*task_user_tls(next), tpidr_el0);
 }
 
 /* Restore the UAO state depending on next's addr_limit */
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index a58fb92..f58539f 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -65,6 +65,7 @@
 #include <asm/efi.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/mmu_context.h>
+#include <asm/system_misc.h>
 
 phys_addr_t __fdt_pointer __initdata;
 
@@ -186,6 +187,11 @@
 		pr_warn("Large number of MPIDR hash buckets detected\n");
 }
 
+const char * __init __weak arch_read_machine_name(void)
+{
+	return of_flat_dt_get_machine_name();
+}
+
 static void __init setup_machine_fdt(phys_addr_t dt_phys)
 {
 	void *dt_virt = fixmap_remap_fdt(dt_phys);
@@ -201,7 +207,7 @@
 			cpu_relax();
 	}
 
-	machine_name = of_flat_dt_get_machine_name();
+	machine_name = arch_read_machine_name();
 	if (machine_name) {
 		dump_stack_set_arch_desc("%s (DT)", machine_name);
 		pr_info("Machine: %s\n", machine_name);
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 19f3515..cd53836 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -33,6 +33,7 @@
 #include <linux/syscalls.h>
 
 #include <asm/atomic.h>
+#include <asm/barrier.h>
 #include <asm/bug.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
@@ -540,6 +541,25 @@
 	regs->pc += 4;
 }
 
+static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+	int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+
+	isb();
+	if (rt != 31)
+		regs->regs[rt] = arch_counter_get_cntvct();
+	regs->pc += 4;
+}
+
+static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+	int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+
+	if (rt != 31)
+		regs->regs[rt] = read_sysreg(cntfrq_el0);
+	regs->pc += 4;
+}
+
 struct sys64_hook {
 	unsigned int esr_mask;
 	unsigned int esr_val;
@@ -558,6 +578,18 @@
 		.esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
 		.handler = ctr_read_handler,
 	},
+	{
+		/* Trap read access to CNTVCT_EL0 */
+		.esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
+		.esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
+		.handler = cntvct_read_handler,
+	},
+	{
+		/* Trap read access to CNTFRQ_EL0 */
+		.esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
+		.esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
+		.handler = cntfrq_read_handler,
+	},
 	{},
 };
 
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index b8deffa..34d3ed6 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -56,6 +56,17 @@
 #define HIBERNATE_TEXT
 #endif
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define TRAMP_TEXT					\
+	. = ALIGN(PAGE_SIZE);				\
+	VMLINUX_SYMBOL(__entry_tramp_text_start) = .;	\
+	*(.entry.tramp.text)				\
+	. = ALIGN(PAGE_SIZE);				\
+	VMLINUX_SYMBOL(__entry_tramp_text_end) = .;
+#else
+#define TRAMP_TEXT
+#endif
+
 /*
  * The size of the PE/COFF section that covers the kernel image, which
  * runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -128,6 +139,7 @@
 			HYPERVISOR_TEXT
 			IDMAP_TEXT
 			HIBERNATE_TEXT
+			TRAMP_TEXT
 			*(.fixup)
 			*(.gnu.warning)
 		. = ALIGN(16);
@@ -221,6 +233,11 @@
 	. += RESERVED_TTBR0_SIZE;
 #endif
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+	tramp_pg_dir = .;
+	. += PAGE_SIZE;
+#endif
+
 	_end = .;
 
 	STABS_DEBUG
@@ -240,7 +257,10 @@
 ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
 	<= SZ_4K, "Hibernate exit text too big or misaligned")
 #endif
-
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
+	"Entry trampoline text too big")
+#endif
 /*
  * If padding is applied before .head.text, virt<->phys conversions will fail.
  */
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index a204adf..85baada 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -125,7 +125,19 @@
 	return ret;
 }
 
+static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	u32 hsr = kvm_vcpu_get_hsr(vcpu);
+
+	kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
+		      hsr, esr_get_class_string(hsr));
+
+	kvm_inject_undefined(vcpu);
+	return 1;
+}
+
 static exit_handle_fn arm_exit_handlers[] = {
+	[0 ... ESR_ELx_EC_MAX]	= kvm_handle_unknown_ec,
 	[ESR_ELx_EC_WFx]	= kvm_handle_wfx,
 	[ESR_ELx_EC_CP15_32]	= kvm_handle_cp15_32,
 	[ESR_ELx_EC_CP15_64]	= kvm_handle_cp15_64,
@@ -151,13 +163,6 @@
 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
 	u8 hsr_ec = ESR_ELx_EC(hsr);
 
-	if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
-	    !arm_exit_handlers[hsr_ec]) {
-		kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
-			hsr, esr_get_class_string(hsr));
-		BUG();
-	}
-
 	return arm_exit_handlers[hsr_ec];
 }
 
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 0c848c1..3eab6ac 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -17,6 +17,7 @@
 
 #include <linux/types.h>
 #include <linux/jump_label.h>
+#include <uapi/linux/psci.h>
 
 #include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
@@ -50,7 +51,7 @@
 	val &= ~CPACR_EL1_FPEN;
 	write_sysreg(val, cpacr_el1);
 
-	write_sysreg(__kvm_hyp_vector, vbar_el1);
+	write_sysreg(kvm_get_hyp_vector(), vbar_el1);
 }
 
 static void __hyp_text __activate_traps_nvhe(void)
@@ -308,6 +309,18 @@
 	if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
 		goto again;
 
+	if (exit_code == ARM_EXCEPTION_TRAP &&
+	    (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 ||
+	     kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32) &&
+	    vcpu_get_reg(vcpu, 0) == PSCI_0_2_FN_PSCI_VERSION) {
+		u64 val = PSCI_RET_NOT_SUPPORTED;
+		if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+			val = 2;
+
+		vcpu_set_reg(vcpu, 0, val);
+		goto again;
+	}
+
 	if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
 	    exit_code == ARM_EXCEPTION_TRAP) {
 		bool valid;
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index d7150e3..dd65ca2 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -30,7 +30,7 @@
  * Alignment fixed up by hardware.
  */
 ENTRY(__clear_user)
-	uaccess_enable_not_uao x2, x3
+	uaccess_enable_not_uao x2, x3, x4
 	mov	x2, x1			// save the size for fixup return
 	subs	x1, x1, #8
 	b.mi	2f
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index cfe1339..7e7e687 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -64,7 +64,7 @@
 
 end	.req	x5
 ENTRY(__arch_copy_from_user)
-	uaccess_enable_not_uao x3, x4
+	uaccess_enable_not_uao x3, x4, x5
 	add	end, x0, x2
 #include "copy_template.S"
 	uaccess_disable_not_uao x3
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 718b1c4..074d52f 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -65,7 +65,7 @@
 
 end	.req	x5
 ENTRY(__copy_in_user)
-	uaccess_enable_not_uao x3, x4
+	uaccess_enable_not_uao x3, x4, x5
 	add	end, x0, x2
 #include "copy_template.S"
 	uaccess_disable_not_uao x3
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index e99e31c..6711844 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -63,7 +63,7 @@
 
 end	.req	x5
 ENTRY(__arch_copy_to_user)
-	uaccess_enable_not_uao x3, x4
+	uaccess_enable_not_uao x3, x4, x5
 	add	end, x0, x2
 #include "copy_template.S"
 	uaccess_disable_not_uao x3
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 97de0eb..9dd6d32 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -122,7 +122,7 @@
  *	- end     - virtual end address of region
  */
 ENTRY(__flush_cache_user_range)
-	uaccess_ttbr0_enable x2, x3
+	uaccess_ttbr0_enable x2, x3, x4
 	dcache_line_size x2, x3
 	sub	x3, x2, #1
 	bic	x4, x0, x3
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 4c63cb1..da5add9 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -39,7 +39,16 @@
 
 #define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
 #define ASID_FIRST_VERSION	(1UL << asid_bits)
-#define NUM_USER_ASIDS		ASID_FIRST_VERSION
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define NUM_USER_ASIDS		(ASID_FIRST_VERSION >> 1)
+#define asid2idx(asid)		(((asid) & ~ASID_MASK) >> 1)
+#define idx2asid(idx)		(((idx) << 1) & ~ASID_MASK)
+#else
+#define NUM_USER_ASIDS		(ASID_FIRST_VERSION)
+#define asid2idx(asid)		((asid) & ~ASID_MASK)
+#define idx2asid(idx)		asid2idx(idx)
+#endif
 
 /* Get the ASIDBits supported by the current CPU */
 static u32 get_cpu_asid_bits(void)
@@ -104,7 +113,7 @@
 		 */
 		if (asid == 0)
 			asid = per_cpu(reserved_asids, i);
-		__set_bit(asid & ~ASID_MASK, asid_map);
+		__set_bit(asid2idx(asid), asid_map);
 		per_cpu(reserved_asids, i) = asid;
 	}
 
@@ -159,16 +168,16 @@
 		 * We had a valid ASID in a previous life, so try to re-use
 		 * it if possible.
 		 */
-		asid &= ~ASID_MASK;
-		if (!__test_and_set_bit(asid, asid_map))
+		if (!__test_and_set_bit(asid2idx(asid), asid_map))
 			return newasid;
 	}
 
 	/*
 	 * Allocate a free ASID. If we can't find one, take a note of the
-	 * currently active ASIDs and mark the TLBs as requiring flushes.
-	 * We always count from ASID #1, as we use ASID #0 when setting a
-	 * reserved TTBR0 for the init_mm.
+	 * currently active ASIDs and mark the TLBs as requiring flushes.  We
+	 * always count from ASID #2 (index 1), as we use ASID #0 when setting
+	 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
+	 * pairs.
 	 */
 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
 	if (asid != NUM_USER_ASIDS)
@@ -185,7 +194,7 @@
 set_asid:
 	__set_bit(asid, asid_map);
 	cur_idx = asid;
-	return asid | generation;
+	return idx2asid(asid) | generation;
 }
 
 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
@@ -229,6 +238,17 @@
 		cpu_switch_mm(mm->pgd, mm);
 }
 
+/* Errata workaround post TTBRx_EL1 update. */
+asmlinkage void post_ttbr_update_workaround(void)
+{
+	asm(ALTERNATIVE("nop; nop; nop",
+			"ic iallu; dsb nsh; isb",
+			ARM64_WORKAROUND_CAVIUM_27456,
+			CONFIG_CAVIUM_ERRATUM_27456));
+
+	arm64_apply_bp_hardening();
+}
+
 static int asids_init(void)
 {
 	asid_bits = get_cpu_asid_bits();
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index b5d88f8..2b8950e 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -473,7 +473,7 @@
 				unsigned int esr,
 				struct pt_regs *regs)
 {
-#define SCM_TLB_CONFLICT_CMD	0x1B
+#define SCM_TLB_CONFLICT_CMD	0x1F
 	struct scm_desc desc = {
 		.args[0] = addr,
 		.arginfo = SCM_ARGS(1),
@@ -618,6 +618,22 @@
 	arm64_notify_die("", regs, &info, esr);
 }
 
+asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
+						   unsigned int esr,
+						   struct pt_regs *regs)
+{
+	/*
+	 * We've taken an instruction abort from userspace and not yet
+	 * re-enabled IRQs. If the address is a kernel address, apply
+	 * BP hardening prior to enabling IRQs and pre-emption.
+	 */
+	if (addr > TASK_SIZE)
+		arm64_apply_bp_hardening();
+
+	local_irq_enable();
+	do_mem_abort(addr, esr, regs);
+}
+
 /*
  * Handle stack alignment exceptions.
  */
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f8ef496..2b35b67 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -299,6 +299,7 @@
 		arm64_dma_phys_limit = max_zone_dma_phys();
 	else
 		arm64_dma_phys_limit = PHYS_MASK + 1;
+	high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
 	dma_contiguous_reserve(arm64_dma_phys_limit);
 
 	memblock_allow_resize();
@@ -325,7 +326,6 @@
 	sparse_init();
 	zone_sizes_init(min, max);
 
-	high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
 	memblock_dump_all();
 }
 
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 41efd5e..c66fa93 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -458,6 +458,37 @@
 	vm_area_add_early(vma);
 }
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static int __init map_entry_trampoline(void)
+{
+	extern char __entry_tramp_text_start[];
+
+	pgprot_t prot = PAGE_KERNEL_EXEC;
+	phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
+
+	/* The trampoline is always mapped and can therefore be global */
+	pgprot_val(prot) &= ~PTE_NG;
+
+	/* Map only the text into the trampoline page table */
+	memset(tramp_pg_dir, 0, PGD_SIZE);
+	__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
+			     prot, pgd_pgtable_alloc, 0);
+
+	/* Map both the text and data into the kernel page table */
+	__set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+		extern char __entry_tramp_data_start[];
+
+		__set_fixmap(FIX_ENTRY_TRAMP_DATA,
+			     __pa_symbol(__entry_tramp_data_start),
+			     PAGE_KERNEL_RO);
+	}
+
+	return 0;
+}
+core_initcall(map_entry_trampoline);
+#endif
+
 /*
  * Create fine-grained mappings for the kernel.
  */
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 8d21250..fa20d13 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -184,12 +184,14 @@
  *	- pgd_phys - physical address of new TTB
  */
 ENTRY(cpu_do_switch_mm)
+	mrs	x2, ttbr1_el1
 	mmid	x1, x1				// get mm->context.id
-	bfi	x0, x1, #48, #16		// set the ASID
-	msr	ttbr0_el1, x0			// set TTBR0
+	bfi	x2, x1, #48, #16		// set the ASID
+	msr	ttbr1_el1, x2			// in TTBR1 (since TCR.A1 is set)
 	isb
-	post_ttbr0_update_workaround
-	ret
+	msr	ttbr0_el1, x0			// now update TTBR0
+	isb
+	b	post_ttbr_update_workaround	// Back to C code...
 ENDPROC(cpu_do_switch_mm)
 
 	.pushsection ".idmap.text", "ax"
@@ -270,7 +272,7 @@
 	 * both user and kernel.
 	 */
 	ldr	x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
-			TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
+			TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1
 	tcr_set_idmap_t0sz	x10, x9
 
 	/*
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index b41aff2..f542252 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -100,7 +100,7 @@
 	 * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
 	 * is enabled (it implies that hardware UAO and PAN disabled).
 	 */
-	uaccess_ttbr0_enable x6, x7
+	uaccess_ttbr0_enable x6, x7, x8
 	hvc XEN_IMM
 
 	/*
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 3c1bd64..88c4b77 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -319,11 +319,14 @@
 
 config GPIO_ADI
 	def_bool y
+	depends on !PINCTRL
 	depends on (BF51x || BF52x || BF53x || BF538 || BF539 || BF561)
 
-config PINCTRL
+config PINCTRL_BLACKFIN_ADI2
 	def_bool y
-	depends on BF54x || BF60x
+	depends on (BF54x || BF60x)
+	select PINCTRL
+	select PINCTRL_ADI2
 
 config MEM_MT48LC64M4A2FB_7E
 	bool
diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
index f3337ee..a93cf06 100644
--- a/arch/blackfin/Kconfig.debug
+++ b/arch/blackfin/Kconfig.debug
@@ -17,6 +17,7 @@
 
 config DEBUG_MMRS
 	tristate "Generate Blackfin MMR tree"
+	depends on !PINCTRL
 	select DEBUG_FS
 	help
 	  Create a tree of Blackfin MMRs via the debugfs tree.  If
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 87131cd..6d3a504 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -169,7 +169,7 @@
 	max_pfn = max_low_pfn = PFN_DOWN(_ramend);
 	high_memory = (void *)_ramend;
 
-	m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
+	m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
 	module_fixup(NULL, __start_fixup, __stop_fixup);
 
 	/* setup bootmem data */
diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c
index d20ae63..46abe9e 100644
--- a/arch/mips/bcm47xx/leds.c
+++ b/arch/mips/bcm47xx/leds.c
@@ -330,7 +330,7 @@
 /* Verified on: WRT54GS V1.0 */
 static const struct gpio_led
 bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = {
-	BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
 	BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
 	BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
 };
diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile
index d61bc2a..7d90a87 100644
--- a/arch/mips/boot/dts/brcm/Makefile
+++ b/arch/mips/boot/dts/brcm/Makefile
@@ -22,7 +22,6 @@
 	bcm63268-comtrend-vr-3032u.dtb \
 	bcm93384wvg.dtb \
 	bcm93384wvg_viper.dtb \
-	bcm96358nb4ser.dtb \
 	bcm96368mvwg.dtb \
 	bcm9ejtagprb.dtb \
 	bcm97125cbmb.dtb \
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 83054f7..8333ce9 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -19,6 +19,9 @@
 #include <asm/asmmacro-64.h>
 #endif
 
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
 /*
  * Helper macros for generating raw instruction encodings.
  */
@@ -105,6 +108,7 @@
 	.macro	fpu_save_16odd thread
 	.set	push
 	.set	mips64r2
+	.set	fp=64
 	SET_HARDFLOAT
 	sdc1	$f1,  THREAD_FPR1(\thread)
 	sdc1	$f3,  THREAD_FPR3(\thread)
@@ -163,6 +167,7 @@
 	.macro	fpu_restore_16odd thread
 	.set	push
 	.set	mips64r2
+	.set	fp=64
 	SET_HARDFLOAT
 	ldc1	$f1,  THREAD_FPR1(\thread)
 	ldc1	$f3,  THREAD_FPR3(\thread)
@@ -234,9 +239,6 @@
 	.endm
 
 #ifdef TOOLCHAIN_SUPPORTS_MSA
-/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
-#undef fp
-
 	.macro	_cfcmsa	rd, cs
 	.set	push
 	.set	mips32r2
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index c558bce..6e716a5 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -683,6 +683,18 @@
 	struct task_struct *t;
 	int max_users;
 
+	/* If nothing to change, return right away, successfully.  */
+	if (value == mips_get_process_fp_mode(task))
+		return 0;
+
+	/* Only accept a mode change if 64-bit FP enabled for o32.  */
+	if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
+		return -EOPNOTSUPP;
+
+	/* And only for o32 tasks.  */
+	if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
+		return -EOPNOTSUPP;
+
 	/* Check the value is valid */
 	if (value & ~known_bits)
 		return -EOPNOTSUPP;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 3de0260..0c8ae2c 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -439,25 +439,38 @@
 
 #endif /* CONFIG_64BIT */
 
-static int fpr_get(struct task_struct *target,
-		   const struct user_regset *regset,
-		   unsigned int pos, unsigned int count,
-		   void *kbuf, void __user *ubuf)
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer,
+ * !CONFIG_CPU_HAS_MSA variant.  FP context's general register slots
+ * correspond 1:1 to buffer slots.  Only general registers are copied.
+ */
+static int fpr_get_fpa(struct task_struct *target,
+		       unsigned int *pos, unsigned int *count,
+		       void **kbuf, void __user **ubuf)
 {
-	unsigned i;
-	int err;
+	return user_regset_copyout(pos, count, kbuf, ubuf,
+				   &target->thread.fpu,
+				   0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
+}
+
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer,
+ * CONFIG_CPU_HAS_MSA variant.  Only lower 64 bits of FP context's
+ * general register slots are copied to buffer slots.  Only general
+ * registers are copied.
+ */
+static int fpr_get_msa(struct task_struct *target,
+		       unsigned int *pos, unsigned int *count,
+		       void **kbuf, void __user **ubuf)
+{
+	unsigned int i;
 	u64 fpr_val;
+	int err;
 
-	/* XXX fcr31  */
-
-	if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
-		return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-					   &target->thread.fpu,
-					   0, sizeof(elf_fpregset_t));
-
+	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 	for (i = 0; i < NUM_FPU_REGS; i++) {
 		fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
-		err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+		err = user_regset_copyout(pos, count, kbuf, ubuf,
 					  &fpr_val, i * sizeof(elf_fpreg_t),
 					  (i + 1) * sizeof(elf_fpreg_t));
 		if (err)
@@ -467,27 +480,64 @@
 	return 0;
 }
 
-static int fpr_set(struct task_struct *target,
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer.
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCSR register separately.
+ */
+static int fpr_get(struct task_struct *target,
 		   const struct user_regset *regset,
 		   unsigned int pos, unsigned int count,
-		   const void *kbuf, const void __user *ubuf)
+		   void *kbuf, void __user *ubuf)
 {
-	unsigned i;
+	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
 	int err;
+
+	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+		err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
+	else
+		err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
+	if (err)
+		return err;
+
+	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				  &target->thread.fpu.fcr31,
+				  fcr31_pos, fcr31_pos + sizeof(u32));
+
+	return err;
+}
+
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context,
+ * !CONFIG_CPU_HAS_MSA variant.   Buffer slots correspond 1:1 to FP
+ * context's general register slots.  Only general registers are copied.
+ */
+static int fpr_set_fpa(struct task_struct *target,
+		       unsigned int *pos, unsigned int *count,
+		       const void **kbuf, const void __user **ubuf)
+{
+	return user_regset_copyin(pos, count, kbuf, ubuf,
+				  &target->thread.fpu,
+				  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
+}
+
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context,
+ * CONFIG_CPU_HAS_MSA variant.  Buffer slots are copied to lower 64
+ * bits only of FP context's general register slots.  Only general
+ * registers are copied.
+ */
+static int fpr_set_msa(struct task_struct *target,
+		       unsigned int *pos, unsigned int *count,
+		       const void **kbuf, const void __user **ubuf)
+{
+	unsigned int i;
 	u64 fpr_val;
-
-	/* XXX fcr31  */
-
-	init_fp_ctx(target);
-
-	if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
-		return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-					  &target->thread.fpu,
-					  0, sizeof(elf_fpregset_t));
+	int err;
 
 	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
-	for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
-		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+	for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
+		err = user_regset_copyin(pos, count, kbuf, ubuf,
 					 &fpr_val, i * sizeof(elf_fpreg_t),
 					 (i + 1) * sizeof(elf_fpreg_t));
 		if (err)
@@ -498,6 +548,53 @@
 	return 0;
 }
 
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context.
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCSR register separately.
+ *
+ * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
+ * which is supposed to have been guaranteed by the kernel before
+ * calling us, e.g. in `ptrace_regset'.  We enforce that requirement,
+ * so that we can safely avoid preinitializing temporaries for
+ * partial register writes.
+ */
+static int fpr_set(struct task_struct *target,
+		   const struct user_regset *regset,
+		   unsigned int pos, unsigned int count,
+		   const void *kbuf, const void __user *ubuf)
+{
+	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+	u32 fcr31;
+	int err;
+
+	BUG_ON(count % sizeof(elf_fpreg_t));
+
+	if (pos + count > sizeof(elf_fpregset_t))
+		return -EIO;
+
+	init_fp_ctx(target);
+
+	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+		err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
+	else
+		err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
+	if (err)
+		return err;
+
+	if (count > 0) {
+		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+					 &fcr31,
+					 fcr31_pos, fcr31_pos + sizeof(u32));
+		if (err)
+			return err;
+
+		ptrace_setfcr31(target, fcr31);
+	}
+
+	return err;
+}
+
 enum mips_regset {
 	REGSET_GPR,
 	REGSET_FPR,
@@ -647,6 +744,19 @@
 	.n		= ARRAY_SIZE(mips64_regsets),
 };
 
+#ifdef CONFIG_MIPS32_N32
+
+static const struct user_regset_view user_mipsn32_view = {
+	.name		= "mipsn32",
+	.e_flags	= EF_MIPS_ABI2,
+	.e_machine	= ELF_ARCH,
+	.ei_osabi	= ELF_OSABI,
+	.regsets	= mips64_regsets,
+	.n		= ARRAY_SIZE(mips64_regsets),
+};
+
+#endif /* CONFIG_MIPS32_N32 */
+
 #endif /* CONFIG_64BIT */
 
 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
@@ -658,6 +768,10 @@
 	if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
 		return &user_mips_view;
 #endif
+#ifdef CONFIG_MIPS32_N32
+	if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
+		return &user_mipsn32_view;
+#endif
 	return &user_mips64_view;
 #endif
 }
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 9ade60c..7f2519c 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -1781,7 +1781,7 @@
 			SPFROMREG(fs, MIPSInst_FS(ir));
 			SPFROMREG(fd, MIPSInst_FD(ir));
 			rv.s = ieee754sp_maddf(fd, fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fmsubf_op: {
@@ -1794,7 +1794,7 @@
 			SPFROMREG(fs, MIPSInst_FS(ir));
 			SPFROMREG(fd, MIPSInst_FD(ir));
 			rv.s = ieee754sp_msubf(fd, fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case frint_op: {
@@ -1818,7 +1818,7 @@
 			SPFROMREG(fs, MIPSInst_FS(ir));
 			rv.w = ieee754sp_2008class(fs);
 			rfmt = w_fmt;
-			break;
+			goto copcsr;
 		}
 
 		case fmin_op: {
@@ -1830,7 +1830,7 @@
 			SPFROMREG(ft, MIPSInst_FT(ir));
 			SPFROMREG(fs, MIPSInst_FS(ir));
 			rv.s = ieee754sp_fmin(fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fmina_op: {
@@ -1842,7 +1842,7 @@
 			SPFROMREG(ft, MIPSInst_FT(ir));
 			SPFROMREG(fs, MIPSInst_FS(ir));
 			rv.s = ieee754sp_fmina(fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fmax_op: {
@@ -1854,7 +1854,7 @@
 			SPFROMREG(ft, MIPSInst_FT(ir));
 			SPFROMREG(fs, MIPSInst_FS(ir));
 			rv.s = ieee754sp_fmax(fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fmaxa_op: {
@@ -1866,7 +1866,7 @@
 			SPFROMREG(ft, MIPSInst_FT(ir));
 			SPFROMREG(fs, MIPSInst_FS(ir));
 			rv.s = ieee754sp_fmaxa(fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fabs_op:
@@ -2110,7 +2110,7 @@
 			DPFROMREG(fs, MIPSInst_FS(ir));
 			DPFROMREG(fd, MIPSInst_FD(ir));
 			rv.d = ieee754dp_maddf(fd, fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fmsubf_op: {
@@ -2123,7 +2123,7 @@
 			DPFROMREG(fs, MIPSInst_FS(ir));
 			DPFROMREG(fd, MIPSInst_FD(ir));
 			rv.d = ieee754dp_msubf(fd, fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case frint_op: {
@@ -2147,7 +2147,7 @@
 			DPFROMREG(fs, MIPSInst_FS(ir));
 			rv.w = ieee754dp_2008class(fs);
 			rfmt = w_fmt;
-			break;
+			goto copcsr;
 		}
 
 		case fmin_op: {
@@ -2159,7 +2159,7 @@
 			DPFROMREG(ft, MIPSInst_FT(ir));
 			DPFROMREG(fs, MIPSInst_FS(ir));
 			rv.d = ieee754dp_fmin(fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fmina_op: {
@@ -2171,7 +2171,7 @@
 			DPFROMREG(ft, MIPSInst_FT(ir));
 			DPFROMREG(fs, MIPSInst_FS(ir));
 			rv.d = ieee754dp_fmina(fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fmax_op: {
@@ -2183,7 +2183,7 @@
 			DPFROMREG(ft, MIPSInst_FT(ir));
 			DPFROMREG(fs, MIPSInst_FS(ir));
 			rv.d = ieee754dp_fmax(fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fmaxa_op: {
@@ -2195,7 +2195,7 @@
 			DPFROMREG(ft, MIPSInst_FT(ir));
 			DPFROMREG(fs, MIPSInst_FS(ir));
 			rv.d = ieee754dp_fmaxa(fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fabs_op:
diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
index 628c513..a7962f7 100644
--- a/arch/mips/pci/pci-mt7620.c
+++ b/arch/mips/pci/pci-mt7620.c
@@ -121,7 +121,7 @@
 		else
 			break;
 		if (retry++ > WAITRETRY_MAX) {
-			printk(KERN_WARN "PCIE-PHY retry failed.\n");
+			pr_warn("PCIE-PHY retry failed.\n");
 			return -1;
 		}
 	}
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index 6f892c1..0696142 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -141,8 +141,8 @@
 	FUNC("i2c", 0, 4, 2),
 };
 
-static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
-static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
+static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) };
+static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) };
 static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
 static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
 
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index 140faa1..1311e6b 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -211,7 +211,7 @@
 	case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break;		\
 	case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break;		\
 	case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break;		\
-	case 8: __get_user_asm2(x, ptr, retval);			\
+	case 8: __get_user_asm2(x, ptr, retval); break;			\
 	default: (x) = __get_user_bad();				\
 	}								\
 } while (0)
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
index 8be707e..82dea14 100644
--- a/arch/parisc/include/asm/ldcw.h
+++ b/arch/parisc/include/asm/ldcw.h
@@ -11,6 +11,7 @@
    for the semaphore.  */
 
 #define __PA_LDCW_ALIGNMENT	16
+#define __PA_LDCW_ALIGN_ORDER	4
 #define __ldcw_align(a) ({					\
 	unsigned long __ret = (unsigned long) &(a)->lock[0];	\
 	__ret = (__ret + __PA_LDCW_ALIGNMENT - 1)		\
@@ -28,6 +29,7 @@
    ldcd). */
 
 #define __PA_LDCW_ALIGNMENT	4
+#define __PA_LDCW_ALIGN_ORDER	2
 #define __ldcw_align(a) (&(a)->slock)
 #define __LDCW	"ldcw,co"
 
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 4fcff2d..e3d3e8e 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -35,6 +35,7 @@
 #include <asm/pgtable.h>
 #include <asm/signal.h>
 #include <asm/unistd.h>
+#include <asm/ldcw.h>
 #include <asm/thread_info.h>
 
 #include <linux/linkage.h>
@@ -46,6 +47,14 @@
 #endif
 
 	.import		pa_tlb_lock,data
+	.macro  load_pa_tlb_lock reg
+#if __PA_LDCW_ALIGNMENT > 4
+	load32	PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
+	depi	0,31,__PA_LDCW_ALIGN_ORDER, \reg
+#else
+	load32	PA(pa_tlb_lock), \reg
+#endif
+	.endm
 
 	/* space_to_prot macro creates a prot id from a space id */
 
@@ -457,7 +466,7 @@
 	.macro		tlb_lock	spc,ptp,pte,tmp,tmp1,fault
 #ifdef CONFIG_SMP
 	cmpib,COND(=),n	0,\spc,2f
-	load32		PA(pa_tlb_lock),\tmp
+	load_pa_tlb_lock \tmp
 1:	LDCW		0(\tmp),\tmp1
 	cmpib,COND(=)	0,\tmp1,1b
 	nop
@@ -480,7 +489,7 @@
 	/* Release pa_tlb_lock lock. */
 	.macro		tlb_unlock1	spc,tmp
 #ifdef CONFIG_SMP
-	load32		PA(pa_tlb_lock),\tmp
+	load_pa_tlb_lock \tmp
 	tlb_unlock0	\spc,\tmp
 #endif
 	.endm
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index adf7187..2d40c4f 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -36,6 +36,7 @@
 #include <asm/assembly.h>
 #include <asm/pgtable.h>
 #include <asm/cache.h>
+#include <asm/ldcw.h>
 #include <linux/linkage.h>
 
 	.text
@@ -333,8 +334,12 @@
 
 	.macro	tlb_lock	la,flags,tmp
 #ifdef CONFIG_SMP
-	ldil		L%pa_tlb_lock,%r1
-	ldo		R%pa_tlb_lock(%r1),\la
+#if __PA_LDCW_ALIGNMENT > 4
+	load32		pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
+	depi		0,31,__PA_LDCW_ALIGN_ORDER, \la
+#else
+	load32		pa_tlb_lock, \la
+#endif
 	rsm		PSW_SM_I,\flags
 1:	LDCW		0(\la),\tmp
 	cmpib,<>,n	0,\tmp,3f
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 7593787..c3a532a 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -39,6 +39,7 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/fs.h>
+#include <linux/cpu.h>
 #include <linux/module.h>
 #include <linux/personality.h>
 #include <linux/ptrace.h>
@@ -181,6 +182,44 @@
 }
 
 /*
+ * Idle thread support
+ *
+ * Detect when running on QEMU with SeaBIOS PDC Firmware and let
+ * QEMU idle the host too.
+ */
+
+int running_on_qemu __read_mostly;
+
+void __cpuidle arch_cpu_idle_dead(void)
+{
+	/* nop on real hardware, qemu will offline CPU. */
+	asm volatile("or %%r31,%%r31,%%r31\n":::);
+}
+
+void __cpuidle arch_cpu_idle(void)
+{
+	local_irq_enable();
+
+	/* nop on real hardware, qemu will idle sleep. */
+	asm volatile("or %%r10,%%r10,%%r10\n":::);
+}
+
+static int __init parisc_idle_init(void)
+{
+	const char *marker;
+
+	/* check QEMU/SeaBIOS marker in PAGE0 */
+	marker = (char *) &PAGE0->pad0;
+	running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
+
+	if (!running_on_qemu)
+		cpu_idle_poll_ctrl(1);
+
+	return 0;
+}
+arch_initcall(parisc_idle_init);
+
+/*
  * Copy architecture-specific thread state
  */
 int
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 41e60a9..e775f80 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -690,15 +690,15 @@
 	/* ELF32 Process entry path */
 lws_compare_and_swap_2:
 #ifdef CONFIG_64BIT
-	/* Clip the input registers */
+	/* Clip the input registers. We don't need to clip %r23 as we
+	   only use it for word operations */
 	depdi	0, 31, 32, %r26
 	depdi	0, 31, 32, %r25
 	depdi	0, 31, 32, %r24
-	depdi	0, 31, 32, %r23
 #endif
 
 	/* Check the validity of the size pointer */
-	subi,>>= 4, %r23, %r0
+	subi,>>= 3, %r23, %r0
 	b,n	lws_exit_nosys
 
 	/* Jump to the functions which will load the old and new values into
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 617dece..a60c9c6 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -72,8 +72,15 @@
 MULTIPLEWORD	:= -mmultiple
 endif
 
-cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(call cc-option,-mbig-endian)
+ifdef CONFIG_PPC64
+cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(call cc-option,-mabi=elfv1)
+cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(call cc-option,-mcall-aixdesc)
+aflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(call cc-option,-mabi=elfv1)
+aflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -mabi=elfv2
+endif
+
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -mlittle-endian
+cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(call cc-option,-mbig-endian)
 ifneq ($(cc-name),clang)
   cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -mno-strict-align
 endif
@@ -113,7 +120,9 @@
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
 AFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv2)
 else
+CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv1)
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mcall-aixdesc)
+AFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv1)
 endif
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mno-pointers-to-nested-functions)
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index f61cad3..4c935f7 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -201,6 +201,10 @@
 					      unsigned long phys);
 extern void hash__vmemmap_remove_mapping(unsigned long start,
 				     unsigned long page_size);
+
+int hash__create_section_mapping(unsigned long start, unsigned long end);
+int hash__remove_section_mapping(unsigned long start, unsigned long end);
+
 #endif /* !__ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index 1e8fceb..430d038 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -53,17 +53,25 @@
 	return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
 }
 
+static inline u32 from64to32(u64 x)
+{
+	/* add up 32-bit and 32-bit for 32+c bit */
+	x = (x & 0xffffffff) + (x >> 32);
+	/* add up carry.. */
+	x = (x & 0xffffffff) + (x >> 32);
+	return (u32)x;
+}
+
 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
 					__u8 proto, __wsum sum)
 {
 #ifdef __powerpc64__
-	unsigned long s = (__force u32)sum;
+	u64 s = (__force u32)sum;
 
 	s += (__force u32)saddr;
 	s += (__force u32)daddr;
 	s += proto + len;
-	s += (s >> 32);
-	return (__force __wsum) s;
+	return (__force __wsum) from64to32(s);
 #else
     __asm__("\n\
 	addc %0,%0,%1 \n\
@@ -100,7 +108,7 @@
 
 #ifdef __powerpc64__
 	res += (__force u64)addend;
-	return (__force __wsum)((u32)res + (res >> 32));
+	return (__force __wsum) from64to32(res);
 #else
 	asm("addc %0,%0,%1;"
 	    "addze %0,%0;"
@@ -123,8 +131,7 @@
 
 	for (i = 0; i < ihl - 1; i++, ptr++)
 		s += *ptr;
-	s += (s >> 32);
-	return (__force __wsum)s;
+	return (__force __wsum)from64to32(s);
 #else
 	__wsum sum, tmp;
 
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 7803756..9e05c88 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -97,6 +97,7 @@
 	beqlr
 	li	r0,0
 	mtspr	SPRN_LPID,r0
+	mtspr	SPRN_PID,r0
 	mfspr	r3,SPRN_LPCR
 	LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
 	or	r3, r3, r4
@@ -119,6 +120,7 @@
 	beqlr
 	li	r0,0
 	mtspr	SPRN_LPID,r0
+	mtspr	SPRN_PID,r0
 	mfspr   r3,SPRN_LPCR
 	LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
 	or	r3, r3, r4
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index bbe77ae..3600c0d 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -102,7 +102,7 @@
 static void do_signal(struct task_struct *tsk)
 {
 	sigset_t *oldset = sigmask_to_save();
-	struct ksignal ksig;
+	struct ksignal ksig = { .sig = 0 };
 	int ret;
 	int is32 = is_32bit_task();
 
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 78dabf06..bd66628 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -747,7 +747,7 @@
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-int create_section_mapping(unsigned long start, unsigned long end)
+int hash__create_section_mapping(unsigned long start, unsigned long end)
 {
 	int rc = htab_bolt_mapping(start, end, __pa(start),
 				   pgprot_val(PAGE_KERNEL), mmu_linear_psize,
@@ -761,7 +761,7 @@
 	return rc;
 }
 
-int remove_section_mapping(unsigned long start, unsigned long end)
+int hash__remove_section_mapping(unsigned long start, unsigned long end)
 {
 	int rc = htab_remove_mapping(start, end, mmu_linear_psize,
 				     mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index f4f437c..0fad7f6 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -125,3 +125,21 @@
 	else if (mmu_hash_ops.hpte_clear_all)
 		mmu_hash_ops.hpte_clear_all();
 }
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int create_section_mapping(unsigned long start, unsigned long end)
+{
+	if (radix_enabled())
+		return -ENODEV;
+
+	return hash__create_section_mapping(start, end);
+}
+
+int remove_section_mapping(unsigned long start, unsigned long end)
+{
+	if (radix_enabled())
+		return -ENODEV;
+
+	return hash__remove_section_mapping(start, end);
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 9a25dce..44c33ee 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -173,6 +173,10 @@
 	 */
 	register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
 	pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
+	asm volatile("ptesync" : : : "memory");
+	asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
+		     "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
+	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
 }
 
 static void __init radix_init_partition_table(void)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 72c27b8..083f9274 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -401,8 +401,12 @@
 	int ret;
 	__u64 target;
 
-	if (is_kernel_addr(addr))
-		return branch_target((unsigned int *)addr);
+	if (is_kernel_addr(addr)) {
+		if (probe_kernel_read(&instr, (void *)addr, sizeof(instr)))
+			return 0;
+
+		return branch_target(&instr);
+	}
 
 	/* Userspace: need copy instruction here then translate it */
 	pagefault_disable();
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 7b2ca16..991c6a5 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -516,7 +516,7 @@
 {
 	if (s1 < s2)
 		return 1;
-	if (s2 > s1)
+	if (s1 > s2)
 		return -1;
 
 	return memcmp(d1, d2, s1);
diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c
index 83bebee..0f7b16e 100644
--- a/arch/powerpc/platforms/powernv/opal-async.c
+++ b/arch/powerpc/platforms/powernv/opal-async.c
@@ -39,18 +39,18 @@
 	int token;
 
 	spin_lock_irqsave(&opal_async_comp_lock, flags);
-	token = find_first_bit(opal_async_complete_map, opal_max_async_tokens);
+	token = find_first_zero_bit(opal_async_token_map, opal_max_async_tokens);
 	if (token >= opal_max_async_tokens) {
 		token = -EBUSY;
 		goto out;
 	}
 
-	if (__test_and_set_bit(token, opal_async_token_map)) {
+	if (!__test_and_clear_bit(token, opal_async_complete_map)) {
 		token = -EBUSY;
 		goto out;
 	}
 
-	__clear_bit(token, opal_async_complete_map);
+	__set_bit(token, opal_async_token_map);
 
 out:
 	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index dcdfee0..f602307 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2623,6 +2623,9 @@
 	level_shift = entries_shift + 3;
 	level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
 
+	if ((level_shift - 3) * levels + page_shift >= 60)
+		return -EINVAL;
+
 	/* Allocate TCE table */
 	addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
 			levels, tce_table_size, &offset, &total_allocated);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index efe8b6b..b33faa0 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -289,7 +289,7 @@
 {
 	unsigned long ret_freq;
 
-	ret_freq = cpufreq_quick_get(cpu) * 1000ul;
+	ret_freq = cpufreq_get(cpu) * 1000ul;
 
 	/*
 	 * If the backend cpufreq driver does not exist,
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index ada29ea..f523ac8 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -274,7 +274,9 @@
 			if (bank->disk->major > 0)
 				unregister_blkdev(bank->disk->major,
 						bank->disk->disk_name);
-			del_gendisk(bank->disk);
+			if (bank->disk->flags & GENHD_FL_UP)
+				del_gendisk(bank->disk);
+			put_disk(bank->disk);
 		}
 		device->dev.platform_data = NULL;
 		if (bank->io_addr != 0)
@@ -299,6 +301,7 @@
 	device_remove_file(&device->dev, &dev_attr_ecc);
 	free_irq(bank->irq_id, device);
 	del_gendisk(bank->disk);
+	put_disk(bank->disk);
 	iounmap((void __iomem *) bank->io_addr);
 	kfree(bank);
 
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index f267ee0..716353b 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -845,12 +845,12 @@
 
 u32 ipic_get_mcp_status(void)
 {
-	return ipic_read(primary_ipic->regs, IPIC_SERMR);
+	return ipic_read(primary_ipic->regs, IPIC_SERSR);
 }
 
 void ipic_clear_mcp_status(u32 mask)
 {
-	ipic_write(primary_ipic->regs, IPIC_SERMR, mask);
+	ipic_write(primary_ipic->regs, IPIC_SERSR, mask);
 }
 
 /* Return an interrupt vector or 0 if no interrupt is pending. */
diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h
new file mode 100644
index 0000000..2c3413b
--- /dev/null
+++ b/arch/s390/include/asm/asm-prototypes.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_S390_PROTOTYPES_H
+
+#include <linux/kvm_host.h>
+#include <linux/ftrace.h>
+#include <asm/fpu/api.h>
+#include <asm-generic/asm-prototypes.h>
+
+#endif /* _ASM_S390_PROTOTYPES_H */
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index 649eb62..9e02cb7 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -81,6 +81,6 @@
 int zpci_load(u64 *data, u64 req, u64 offset);
 int zpci_store(u64 data, u64 req, u64 offset);
 int zpci_store_block(const u64 *data, u64 req, u64 offset);
-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
+int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
 
 #endif
diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h
index 402ad6d..c54a931 100644
--- a/arch/s390/include/asm/runtime_instr.h
+++ b/arch/s390/include/asm/runtime_instr.h
@@ -85,6 +85,8 @@
 		load_runtime_instr_cb(&runtime_instr_empty_cb);
 }
 
-void exit_thread_runtime_instr(void);
+struct task_struct;
+
+void runtime_instr_release(struct task_struct *tsk);
 
 #endif /* _RUNTIME_INSTR_H */
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 12d45f0..ff2fbda 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -29,17 +29,16 @@
 }
 
 #define switch_to(prev,next,last) do {					\
-	if (prev->mm) {							\
-		save_fpu_regs();					\
-		save_access_regs(&prev->thread.acrs[0]);		\
-		save_ri_cb(prev->thread.ri_cb);				\
-	}								\
-	if (next->mm) {							\
-		update_cr_regs(next);					\
-		set_cpu_flag(CIF_FPU);					\
-		restore_access_regs(&next->thread.acrs[0]);		\
-		restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);	\
-	}								\
+	/* save_fpu_regs() sets the CIF_FPU flag, which enforces	\
+	 * a restore of the floating point / vector registers as	\
+	 * soon as the next task returns to user space			\
+	 */								\
+	save_fpu_regs();						\
+	save_access_regs(&prev->thread.acrs[0]);			\
+	save_ri_cb(prev->thread.ri_cb);					\
+	update_cr_regs(next);						\
+	restore_access_regs(&next->thread.acrs[0]);			\
+	restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);		\
 	prev = __switch_to(prev,next);					\
 } while (0)
 
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 0f9cd90..f06a9a0 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -263,6 +263,7 @@
 		return retval;
 	}
 
+	groups_sort(group_info);
 	retval = set_current_groups(group_info);
 	put_group_info(group_info);
 
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index c74c592..aaf9dab 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1548,6 +1548,7 @@
 	{ "vfsq", 0xce, INSTR_VRR_VV000MM },
 	{ "vfs", 0xe2, INSTR_VRR_VVV00MM },
 	{ "vftci", 0x4a, INSTR_VRI_VVIMM },
+	{ "", 0, INSTR_INVALID }
 };
 
 static struct s390_insn opcode_eb[] = {
@@ -1953,7 +1954,7 @@
 {
 	char *mode = user_mode(regs) ? "User" : "Krnl";
 	unsigned char code[64];
-	char buffer[64], *ptr;
+	char buffer[128], *ptr;
 	mm_segment_t old_fs;
 	unsigned long addr;
 	int start, end, opsize, hops, i;
@@ -2016,7 +2017,7 @@
 		start += opsize;
 		pr_cont("%s", buffer);
 		ptr = buffer;
-		ptr += sprintf(ptr, "\n          ");
+		ptr += sprintf(ptr, "\n\t  ");
 		hops++;
 	}
 	pr_cont("\n");
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 0c19686..29d8744 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -345,8 +345,10 @@
 		S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
 	if (test_facility(40))
 		S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
-	if (test_facility(50) && test_facility(73))
+	if (test_facility(50) && test_facility(73)) {
 		S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
+		__ctl_set_bit(0, 55);
+	}
 	if (test_facility(51))
 		S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
 	if (test_facility(129)) {
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index bba4fa7..8382fc6 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -70,8 +70,6 @@
  */
 void exit_thread(struct task_struct *tsk)
 {
-	if (tsk == current)
-		exit_thread_runtime_instr();
 }
 
 void flush_thread(void)
@@ -84,6 +82,7 @@
 
 void arch_release_task_struct(struct task_struct *tsk)
 {
+	runtime_instr_release(tsk);
 }
 
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
@@ -120,6 +119,7 @@
 	memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
 	memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
 	clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
+	p->thread.per_flags = 0;
 	/* Initialize per thread user and system timer values */
 	ti = task_thread_info(p);
 	ti->user_timer = 0;
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index fffa0e5..fd03a75 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -18,11 +18,24 @@
 /* empty control block to disable RI by loading it */
 struct runtime_instr_cb runtime_instr_empty_cb;
 
+void runtime_instr_release(struct task_struct *tsk)
+{
+	kfree(tsk->thread.ri_cb);
+}
+
 static void disable_runtime_instr(void)
 {
-	struct pt_regs *regs = task_pt_regs(current);
+	struct task_struct *task = current;
+	struct pt_regs *regs;
 
+	if (!task->thread.ri_cb)
+		return;
+	regs = task_pt_regs(task);
+	preempt_disable();
 	load_runtime_instr_cb(&runtime_instr_empty_cb);
+	kfree(task->thread.ri_cb);
+	task->thread.ri_cb = NULL;
+	preempt_enable();
 
 	/*
 	 * Make sure the RI bit is deleted from the PSW. If the user did not
@@ -43,17 +56,6 @@
 	cb->valid = 1;
 }
 
-void exit_thread_runtime_instr(void)
-{
-	struct task_struct *task = current;
-
-	if (!task->thread.ri_cb)
-		return;
-	disable_runtime_instr();
-	kfree(task->thread.ri_cb);
-	task->thread.ri_cb = NULL;
-}
-
 SYSCALL_DEFINE1(s390_runtime_instr, int, command)
 {
 	struct runtime_instr_cb *cb;
@@ -62,9 +64,7 @@
 		return -EOPNOTSUPP;
 
 	if (command == S390_RUNTIME_INSTR_STOP) {
-		preempt_disable();
-		exit_thread_runtime_instr();
-		preempt_enable();
+		disable_runtime_instr();
 		return 0;
 	}
 
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 9b59e62..709da45 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -369,10 +369,10 @@
 SYSCALL(sys_sendmmsg,compat_sys_sendmmsg)
 SYSCALL(sys_socket,sys_socket)
 SYSCALL(sys_socketpair,compat_sys_socketpair)		/* 360 */
-SYSCALL(sys_bind,sys_bind)
-SYSCALL(sys_connect,sys_connect)
+SYSCALL(sys_bind,compat_sys_bind)
+SYSCALL(sys_connect,compat_sys_connect)
 SYSCALL(sys_listen,sys_listen)
-SYSCALL(sys_accept4,sys_accept4)
+SYSCALL(sys_accept4,compat_sys_accept4)
 SYSCALL(sys_getsockopt,compat_sys_getsockopt)		/* 365 */
 SYSCALL(sys_setsockopt,compat_sys_setsockopt)
 SYSCALL(sys_getsockname,compat_sys_getsockname)
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index e184353..c2905a1 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -197,8 +197,6 @@
 		VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
 		return -EAGAIN;
 	}
-	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
-		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 	return 0;
 }
 
@@ -209,6 +207,9 @@
 	int reg1, reg2;
 	int rc;
 
+	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
 	rc = try_handle_skey(vcpu);
 	if (rc)
 		return rc != -EAGAIN ? rc : 0;
@@ -238,6 +239,9 @@
 	int reg1, reg2;
 	int rc;
 
+	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
 	rc = try_handle_skey(vcpu);
 	if (rc)
 		return rc != -EAGAIN ? rc : 0;
@@ -273,6 +277,9 @@
 	int reg1, reg2;
 	int rc;
 
+	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
 	rc = try_handle_skey(vcpu);
 	if (rc)
 		return rc != -EAGAIN ? rc : 0;
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 15ffc19..03a1d59 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -354,7 +354,8 @@
 				/* End of second scan with interrupts on. */
 				break;
 			/* First scan complete, reenable interrupts. */
-			zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+			if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC))
+				break;
 			si = 0;
 			continue;
 		}
@@ -928,7 +929,7 @@
 	if (!s390_pci_probe)
 		return 0;
 
-	if (!test_facility(69) || !test_facility(71) || !test_facility(72))
+	if (!test_facility(69) || !test_facility(71))
 		return 0;
 
 	rc = zpci_debug_init();
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index fa8d7d4..248146d 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -7,6 +7,7 @@
 #include <linux/export.h>
 #include <linux/errno.h>
 #include <linux/delay.h>
+#include <asm/facility.h>
 #include <asm/pci_insn.h>
 #include <asm/pci_debug.h>
 #include <asm/processor.h>
@@ -91,11 +92,14 @@
 }
 
 /* Set Interruption Controls */
-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
+int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
 {
+	if (!test_facility(72))
+		return -EIO;
 	asm volatile (
 		"	.insn	rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
 		: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
+	return 0;
 }
 
 /* PCI Load */
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 57154c6..0f183ff 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2391,10 +2391,17 @@
 {
 	high_memory = __va(last_valid_pfn << PAGE_SHIFT);
 
-	register_page_bootmem_info();
 	free_all_bootmem();
 
 	/*
+	 * Must be done after boot memory is put on freelist, because here we
+	 * might set fields in deferred struct pages that have not yet been
+	 * initialized, and free_all_bootmem() initializes all the reserved
+	 * deferred pages for us.
+	 */
+	register_page_bootmem_info();
+
+	/*
 	 * Set up the zero page, mark it reserved, so that page count
 	 * is not manipulated when freeing the page from user ptes.
 	 */
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index c7f2a52..83a73cf 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -54,6 +54,7 @@
 enum mbus_module srmmu_modtype;
 static unsigned int hwbug_bitmask;
 int vac_cache_size;
+EXPORT_SYMBOL(vac_cache_size);
 int vac_line_size;
 
 extern struct resource sparc_iomap;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 64e9609d..d2f5372 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -46,7 +46,7 @@
 	select ARCH_USE_CMPXCHG_LOCKREF		if X86_64
 	select ARCH_USE_QUEUED_RWLOCKS
 	select ARCH_USE_QUEUED_SPINLOCKS
-	select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
+	select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 	select ARCH_WANTS_DYNAMIC_TASK_STRUCT
 	select ARCH_WANT_FRAME_POINTERS
 	select ARCH_WANT_IPC_PARSE_VERSION	if X86_32
@@ -65,6 +65,7 @@
 	select GENERIC_CLOCKEVENTS_MIN_ADJUST
 	select GENERIC_CMOS_UPDATE
 	select GENERIC_CPU_AUTOPROBE
+	select GENERIC_CPU_VULNERABILITIES
 	select GENERIC_EARLY_IOREMAP
 	select GENERIC_FIND_FIRST_BIT
 	select GENERIC_IOMAP
@@ -408,6 +409,19 @@
        def_bool y
        depends on X86_GOLDFISH
 
+config RETPOLINE
+	bool "Avoid speculative indirect branches in kernel"
+	default y
+	---help---
+	  Compile kernel with the retpoline compiler options to guard against
+	  kernel-to-user data leaks by avoiding speculative indirect
+	  branches. Requires a compiler with -mindirect-branch=thunk-extern
+	  support for full protection. The kernel may run slower.
+
+	  Without compiler support, at least indirect branches in assembler
+	  code are eliminated. Since this includes the syscall entry path,
+	  it is not entirely pointless.
+
 if X86_32
 config X86_EXTENDED_PLATFORM
 	bool "Support for extended (non-PC) x86 platforms"
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 75725dc..10a7d0f 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -184,6 +184,14 @@
 KBUILD_CFLAGS += $(mflags-y)
 KBUILD_AFLAGS += $(mflags-y)
 
+# Avoid indirect branches in kernel to deal with Spectre
+ifdef CONFIG_RETPOLINE
+    RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
+    ifneq ($(RETPOLINE_CFLAGS),)
+        KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
+    endif
+endif
+
 archscripts: scripts_basic
 	$(Q)$(MAKE) $(build)=arch/x86/tools relocs
 
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 766a521..2728e1b 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -9,6 +9,7 @@
  */
 #undef CONFIG_PARAVIRT
 #undef CONFIG_PARAVIRT_SPINLOCKS
+#undef CONFIG_PAGE_TABLE_ISOLATION
 #undef CONFIG_KASAN
 
 #include <linux/linkage.h>
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 383a6f8..fa8801b 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -32,6 +32,7 @@
 #include <linux/linkage.h>
 #include <asm/inst.h>
 #include <asm/frame.h>
+#include <asm/nospec-branch.h>
 
 /*
  * The following macros are used to move an (un)aligned 16 byte value to/from
@@ -2734,7 +2735,7 @@
 	pxor INC, STATE4
 	movdqu IV, 0x30(OUTP)
 
-	call *%r11
+	CALL_NOSPEC %r11
 
 	movdqu 0x00(OUTP), INC
 	pxor INC, STATE1
@@ -2779,7 +2780,7 @@
 	_aesni_gf128mul_x_ble()
 	movups IV, (IVP)
 
-	call *%r11
+	CALL_NOSPEC %r11
 
 	movdqu 0x40(OUTP), INC
 	pxor INC, STATE1
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index aa9e8bd..77ff4de 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -17,6 +17,7 @@
 
 #include <linux/linkage.h>
 #include <asm/frame.h>
+#include <asm/nospec-branch.h>
 
 #define CAMELLIA_TABLE_BYTE_LEN 272
 
@@ -1224,7 +1225,7 @@
 	vpxor 14 * 16(%rax), %xmm15, %xmm14;
 	vpxor 15 * 16(%rax), %xmm15, %xmm15;
 
-	call *%r9;
+	CALL_NOSPEC %r9;
 
 	addq $(16 * 16), %rsp;
 
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
index 16186c1..7384342 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -12,6 +12,7 @@
 
 #include <linux/linkage.h>
 #include <asm/frame.h>
+#include <asm/nospec-branch.h>
 
 #define CAMELLIA_TABLE_BYTE_LEN 272
 
@@ -1337,7 +1338,7 @@
 	vpxor 14 * 32(%rax), %ymm15, %ymm14;
 	vpxor 15 * 32(%rax), %ymm15, %ymm15;
 
-	call *%r9;
+	CALL_NOSPEC %r9;
 
 	addq $(16 * 32), %rsp;
 
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index dc05f01..174fd41 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -45,6 +45,7 @@
 
 #include <asm/inst.h>
 #include <linux/linkage.h>
+#include <asm/nospec-branch.h>
 
 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
 
@@ -172,7 +173,7 @@
 	movzxw  (bufp, %rax, 2), len
 	lea	crc_array(%rip), bufp
 	lea     (bufp, len, 1), bufp
-	jmp     *bufp
+	JMP_NOSPEC bufp
 
 	################################################################
 	## 2a) PROCESS FULL BLOCKS:
diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
index 399a29d..cb91a64 100644
--- a/arch/x86/crypto/salsa20_glue.c
+++ b/arch/x86/crypto/salsa20_glue.c
@@ -59,13 +59,6 @@
 
 	salsa20_ivsetup(ctx, walk.iv);
 
-	if (likely(walk.nbytes == nbytes))
-	{
-		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
-				      walk.dst.virt.addr, nbytes);
-		return blkcipher_walk_done(desc, &walk, 0);
-	}
-
 	while (walk.nbytes >= 64) {
 		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
 				      walk.dst.virt.addr,
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index edba860..bdc9aea 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -45,6 +45,7 @@
 #include <asm/asm.h>
 #include <asm/smap.h>
 #include <asm/export.h>
+#include <asm/nospec-branch.h>
 
 	.section .entry.text, "ax"
 
@@ -260,7 +261,7 @@
 
 	/* kernel thread */
 1:	movl	%edi, %eax
-	call	*%ebx
+	CALL_NOSPEC %ebx
 	/*
 	 * A kernel thread is allowed to return here after successfully
 	 * calling do_execve().  Exit to userspace to complete the execve()
@@ -984,7 +985,8 @@
 	movl	0x4(%ebp), %edx
 	subl	$MCOUNT_INSN_SIZE, %eax
 
-	call	*ftrace_trace_function
+	movl    ftrace_trace_function, %ecx
+	CALL_NOSPEC %ecx
 
 	popl	%edx
 	popl	%ecx
@@ -1020,7 +1022,7 @@
 	movl	%eax, %ecx
 	popl	%edx
 	popl	%eax
-	jmp	*%ecx
+	JMP_NOSPEC %ecx
 #endif
 
 #ifdef CONFIG_TRACING
@@ -1062,7 +1064,7 @@
 	movl	%ecx, %es
 	TRACE_IRQS_OFF
 	movl	%esp, %eax			# pt_regs pointer
-	call	*%edi
+	CALL_NOSPEC %edi
 	jmp	ret_from_exception
 END(page_fault)
 
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index e7b0e7f..b9c901ce 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -36,6 +36,8 @@
 #include <asm/smap.h>
 #include <asm/pgtable_types.h>
 #include <asm/export.h>
+#include <asm/kaiser.h>
+#include <asm/nospec-branch.h>
 #include <linux/err.h>
 
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
@@ -146,6 +148,7 @@
 	 * it is too small to ever cause noticeable irq latency.
 	 */
 	SWAPGS_UNSAFE_STACK
+	SWITCH_KERNEL_CR3_NO_STACK
 	/*
 	 * A hypervisor implementation might want to use a label
 	 * after the swapgs, so that it can do the swapgs
@@ -206,7 +209,12 @@
 	 * It might end up jumping to the slow path.  If it jumps, RAX
 	 * and all argument registers are clobbered.
 	 */
+#ifdef CONFIG_RETPOLINE
+	movq	sys_call_table(, %rax, 8), %rax
+	call	__x86_indirect_thunk_rax
+#else
 	call	*sys_call_table(, %rax, 8)
+#endif
 .Lentry_SYSCALL_64_after_fastpath_call:
 
 	movq	%rax, RAX(%rsp)
@@ -228,6 +236,14 @@
 	movq	RIP(%rsp), %rcx
 	movq	EFLAGS(%rsp), %r11
 	RESTORE_C_REGS_EXCEPT_RCX_R11
+	/*
+	 * This opens a window where we have a user CR3, but are
+	 * running in the kernel.  This makes using the CS
+	 * register useless for telling whether or not we need to
+	 * switch CR3 in NMIs.  Normal interrupts are OK because
+	 * they are off here.
+	 */
+	SWITCH_USER_CR3
 	movq	RSP(%rsp), %rsp
 	USERGS_SYSRET64
 
@@ -323,10 +339,26 @@
 syscall_return_via_sysret:
 	/* rcx and r11 are already restored (see code above) */
 	RESTORE_C_REGS_EXCEPT_RCX_R11
+	/*
+	 * This opens a window where we have a user CR3, but are
+	 * running in the kernel.  This makes using the CS
+	 * register useless for telling whether or not we need to
+	 * switch CR3 in NMIs.  Normal interrupts are OK because
+	 * they are off here.
+	 */
+	SWITCH_USER_CR3
 	movq	RSP(%rsp), %rsp
 	USERGS_SYSRET64
 
 opportunistic_sysret_failed:
+	/*
+	 * This opens a window where we have a user CR3, but are
+	 * running in the kernel.  This makes using the CS
+	 * register useless for telling whether or not we need to
+	 * switch CR3 in NMIs.  Normal interrupts are OK because
+	 * they are off here.
+	 */
+	SWITCH_USER_CR3
 	SWAPGS
 	jmp	restore_c_regs_and_iret
 END(entry_SYSCALL_64)
@@ -354,7 +386,7 @@
 	jmp	entry_SYSCALL64_slow_path
 
 1:
-	jmp	*%rax				/* Called from C */
+	JMP_NOSPEC %rax				/* Called from C */
 END(stub_ptregs_64)
 
 .macro ptregs_stub func
@@ -424,13 +456,14 @@
 	movq	%rsp, %rdi
 	call	syscall_return_slowpath	/* returns with IRQs disabled */
 	TRACE_IRQS_ON			/* user mode is traced as IRQS on */
+	SWITCH_USER_CR3
 	SWAPGS
 	jmp	restore_regs_and_iret
 
 1:
 	/* kernel thread */
 	movq	%r12, %rdi
-	call	*%rbx
+	CALL_NOSPEC %rbx
 	/*
 	 * A kernel thread is allowed to return here after successfully
 	 * calling do_execve().  Exit to userspace to complete the execve()
@@ -478,6 +511,7 @@
 	 * tracking that we're in kernel mode.
 	 */
 	SWAPGS
+	SWITCH_KERNEL_CR3
 
 	/*
 	 * We need to tell lockdep that IRQs are off.  We can't do this until
@@ -535,6 +569,7 @@
 	mov	%rsp,%rdi
 	call	prepare_exit_to_usermode
 	TRACE_IRQS_IRETQ
+	SWITCH_USER_CR3
 	SWAPGS
 	jmp	restore_regs_and_iret
 
@@ -612,6 +647,7 @@
 
 	pushq	%rdi				/* Stash user RDI */
 	SWAPGS
+	SWITCH_KERNEL_CR3
 	movq	PER_CPU_VAR(espfix_waddr), %rdi
 	movq	%rax, (0*8)(%rdi)		/* user RAX */
 	movq	(1*8)(%rsp), %rax		/* user RIP */
@@ -638,6 +674,7 @@
 	 * still points to an RO alias of the ESPFIX stack.
 	 */
 	orq	PER_CPU_VAR(espfix_stack), %rax
+	SWITCH_USER_CR3
 	SWAPGS
 	movq	%rax, %rsp
 
@@ -1022,7 +1059,11 @@
 /*
  * Save all registers in pt_regs, and switch gs if needed.
  * Use slow, but surefire "are we in kernel?" check.
- * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
+ *
+ * Return: ebx=0: needs swapgs but not SWITCH_USER_CR3 in paranoid_exit
+ *         ebx=1: needs neither swapgs nor SWITCH_USER_CR3 in paranoid_exit
+ *         ebx=2: needs both swapgs and SWITCH_USER_CR3 in paranoid_exit
+ *         ebx=3: needs SWITCH_USER_CR3 but not swapgs in paranoid_exit
  */
 ENTRY(paranoid_entry)
 	cld
@@ -1035,7 +1076,26 @@
 	js	1f				/* negative -> in kernel */
 	SWAPGS
 	xorl	%ebx, %ebx
-1:	ret
+1:
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	/*
+	 * We might have come in between a swapgs and a SWITCH_KERNEL_CR3
+	 * on entry, or between a SWITCH_USER_CR3 and a swapgs on exit.
+	 * Do a conditional SWITCH_KERNEL_CR3: this could safely be done
+	 * unconditionally, but we need to find out whether the reverse
+	 * should be done on return (conveyed to paranoid_exit in %ebx).
+	 */
+	ALTERNATIVE "jmp 2f", "movq %cr3, %rax", X86_FEATURE_KAISER
+	testl	$KAISER_SHADOW_PGD_OFFSET, %eax
+	jz	2f
+	orl	$2, %ebx
+	andq	$(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax
+	/* If PCID enabled, set X86_CR3_PCID_NOFLUSH_BIT */
+	ALTERNATIVE "", "bts $63, %rax", X86_FEATURE_PCID
+	movq	%rax, %cr3
+2:
+#endif
+	ret
 END(paranoid_entry)
 
 /*
@@ -1048,19 +1108,26 @@
  * be complicated.  Fortunately, we there's no good reason
  * to try to handle preemption here.
  *
- * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
+ * On entry: ebx=0: needs swapgs but not SWITCH_USER_CR3
+ *           ebx=1: needs neither swapgs nor SWITCH_USER_CR3
+ *           ebx=2: needs both swapgs and SWITCH_USER_CR3
+ *           ebx=3: needs SWITCH_USER_CR3 but not swapgs
  */
 ENTRY(paranoid_exit)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF_DEBUG
-	testl	%ebx, %ebx			/* swapgs needed? */
-	jnz	paranoid_exit_no_swapgs
-	TRACE_IRQS_IRETQ
-	SWAPGS_UNSAFE_STACK
-	jmp	paranoid_exit_restore
-paranoid_exit_no_swapgs:
 	TRACE_IRQS_IRETQ_DEBUG
-paranoid_exit_restore:
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	/* No ALTERNATIVE for X86_FEATURE_KAISER: paranoid_entry sets %ebx */
+	testl	$2, %ebx			/* SWITCH_USER_CR3 needed? */
+	jz	paranoid_exit_no_switch
+	SWITCH_USER_CR3
+paranoid_exit_no_switch:
+#endif
+	testl	$1, %ebx			/* swapgs needed? */
+	jnz	paranoid_exit_no_swapgs
+	SWAPGS_UNSAFE_STACK
+paranoid_exit_no_swapgs:
 	RESTORE_EXTRA_REGS
 	RESTORE_C_REGS
 	REMOVE_PT_GPREGS_FROM_STACK 8
@@ -1075,6 +1142,13 @@
 	cld
 	SAVE_C_REGS 8
 	SAVE_EXTRA_REGS 8
+	/*
+	 * error_entry() always returns with a kernel gsbase and
+	 * CR3.  We must also have a kernel CR3/gsbase before
+	 * calling TRACE_IRQS_*.  Just unconditionally switch to
+	 * the kernel CR3 here.
+	 */
+	SWITCH_KERNEL_CR3
 	xorl	%ebx, %ebx
 	testb	$3, CS+8(%rsp)
 	jz	.Lerror_kernelspace
@@ -1235,6 +1309,10 @@
 	 */
 
 	SWAPGS_UNSAFE_STACK
+	/*
+	 * percpu variables are mapped with user CR3, so no need
+	 * to switch CR3 here.
+	 */
 	cld
 	movq	%rsp, %rdx
 	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
@@ -1268,12 +1346,34 @@
 
 	movq	%rsp, %rdi
 	movq	$-1, %rsi
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	/* Unconditionally use kernel CR3 for do_nmi() */
+	/* %rax is saved above, so OK to clobber here */
+	ALTERNATIVE "jmp 2f", "movq %cr3, %rax", X86_FEATURE_KAISER
+	/* If PCID enabled, NOFLUSH now and NOFLUSH on return */
+	ALTERNATIVE "", "bts $63, %rax", X86_FEATURE_PCID
+	pushq	%rax
+	/* mask off "user" bit of pgd address and 12 PCID bits: */
+	andq	$(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax
+	movq	%rax, %cr3
+2:
+#endif
 	call	do_nmi
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	/*
+	 * Unconditionally restore CR3.  I know we return to
+	 * kernel code that needs user CR3, but do we ever return
+	 * to "user mode" where we need the kernel CR3?
+	 */
+	ALTERNATIVE "", "popq %rax; movq %rax, %cr3", X86_FEATURE_KAISER
+#endif
+
 	/*
 	 * Return back to user mode.  We must *not* do the normal exit
-	 * work, because we don't want to enable interrupts.  Fortunately,
-	 * do_nmi doesn't modify pt_regs.
+	 * work, because we don't want to enable interrupts.  Do not
+	 * switch to user CR3: we might be going back to kernel code
+	 * that had a user CR3 set.
 	 */
 	SWAPGS
 	jmp	restore_c_regs_and_iret
@@ -1470,22 +1570,55 @@
 	ALLOC_PT_GPREGS_ON_STACK
 
 	/*
-	 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
-	 * as we should not be calling schedule in NMI context.
-	 * Even with normal interrupts enabled. An NMI should not be
-	 * setting NEED_RESCHED or anything that normal interrupts and
-	 * exceptions might do.
+	 * Use the same approach as paranoid_entry to handle SWAPGS, but
+	 * without CR3 handling since we do that differently in NMIs.  No
+	 * need to use paranoid_exit as we should not be calling schedule
+	 * in NMI context.  Even with normal interrupts enabled. An NMI
+	 * should not be setting NEED_RESCHED or anything that normal
+	 * interrupts and exceptions might do.
 	 */
-	call	paranoid_entry
-
-	/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
+	cld
+	SAVE_C_REGS
+	SAVE_EXTRA_REGS
+	movl	$1, %ebx
+	movl	$MSR_GS_BASE, %ecx
+	rdmsr
+	testl	%edx, %edx
+	js	1f				/* negative -> in kernel */
+	SWAPGS
+	xorl	%ebx, %ebx
+1:
 	movq	%rsp, %rdi
 	movq	$-1, %rsi
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	/* Unconditionally use kernel CR3 for do_nmi() */
+	/* %rax is saved above, so OK to clobber here */
+	ALTERNATIVE "jmp 2f", "movq %cr3, %rax", X86_FEATURE_KAISER
+	/* If PCID enabled, NOFLUSH now and NOFLUSH on return */
+	ALTERNATIVE "", "bts $63, %rax", X86_FEATURE_PCID
+	pushq	%rax
+	/* mask off "user" bit of pgd address and 12 PCID bits: */
+	andq	$(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax
+	movq	%rax, %cr3
+2:
+#endif
+
+	/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
 	call	do_nmi
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	/*
+	 * Unconditionally restore CR3.  We might be returning to
+	 * kernel code that needs user CR3, like just just before
+	 * a sysret.
+	 */
+	ALTERNATIVE "", "popq %rax; movq %rax, %cr3", X86_FEATURE_KAISER
+#endif
+
 	testl	%ebx, %ebx			/* swapgs needed? */
 	jnz	nmi_restore
 nmi_swapgs:
+	/* We fixed up CR3 above, so no need to switch it here */
 	SWAPGS_UNSAFE_STACK
 nmi_restore:
 	RESTORE_EXTRA_REGS
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index e1721da..d76a976 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -13,6 +13,8 @@
 #include <asm/irqflags.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
+#include <asm/pgtable_types.h>
+#include <asm/kaiser.h>
 #include <linux/linkage.h>
 #include <linux/err.h>
 
@@ -48,6 +50,7 @@
 ENTRY(entry_SYSENTER_compat)
 	/* Interrupts are off on entry. */
 	SWAPGS_UNSAFE_STACK
+	SWITCH_KERNEL_CR3_NO_STACK
 	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
 	/*
@@ -184,6 +187,7 @@
 ENTRY(entry_SYSCALL_compat)
 	/* Interrupts are off on entry. */
 	SWAPGS_UNSAFE_STACK
+	SWITCH_KERNEL_CR3_NO_STACK
 
 	/* Stash user ESP and switch to the kernel stack. */
 	movl	%esp, %r8d
@@ -259,6 +263,7 @@
 	xorq	%r8, %r8
 	xorq	%r9, %r9
 	xorq	%r10, %r10
+	SWITCH_USER_CR3
 	movq	RSP-ORIG_RAX(%rsp), %rsp
 	swapgs
 	sysretl
@@ -297,7 +302,7 @@
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 	ASM_CLAC			/* Do this early to minimize exposure */
 	SWAPGS
-
+	SWITCH_KERNEL_CR3_NO_STACK
 	/*
 	 * User tracing code (ptrace or signal handlers) might assume that
 	 * the saved RAX contains a 32-bit number when we're invoking a 32-bit
@@ -338,6 +343,7 @@
 
 	/* Go back to user mode. */
 	TRACE_IRQS_ON
+	SWITCH_USER_CR3
 	SWAPGS
 	jmp	restore_regs_and_iret
 END(entry_INT80_compat)
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 636c4b3..6bb7e92 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -66,6 +66,11 @@
 }
 early_param("vsyscall", vsyscall_setup);
 
+bool vsyscall_enabled(void)
+{
+	return vsyscall_mode != NONE;
+}
+
 static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
 			      const char *message)
 {
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index be20239..8e7a3f1 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -2,11 +2,15 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 
+#include <asm/kaiser.h>
 #include <asm/perf_event.h>
 #include <asm/insn.h>
 
 #include "../perf_event.h"
 
+static
+DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(struct debug_store, cpu_debug_store);
+
 /* The size of a BTS record in bytes: */
 #define BTS_RECORD_SIZE		24
 
@@ -268,6 +272,39 @@
 
 static DEFINE_PER_CPU(void *, insn_buffer);
 
+static void *dsalloc(size_t size, gfp_t flags, int node)
+{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	unsigned int order = get_order(size);
+	struct page *page;
+	unsigned long addr;
+
+	page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
+	if (!page)
+		return NULL;
+	addr = (unsigned long)page_address(page);
+	if (kaiser_add_mapping(addr, size, __PAGE_KERNEL) < 0) {
+		__free_pages(page, order);
+		addr = 0;
+	}
+	return (void *)addr;
+#else
+	return kmalloc_node(size, flags | __GFP_ZERO, node);
+#endif
+}
+
+static void dsfree(const void *buffer, size_t size)
+{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	if (!buffer)
+		return;
+	kaiser_remove_mapping((unsigned long)buffer, size);
+	free_pages((unsigned long)buffer, get_order(size));
+#else
+	kfree(buffer);
+#endif
+}
+
 static int alloc_pebs_buffer(int cpu)
 {
 	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
@@ -278,7 +315,7 @@
 	if (!x86_pmu.pebs)
 		return 0;
 
-	buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
+	buffer = dsalloc(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
 	if (unlikely(!buffer))
 		return -ENOMEM;
 
@@ -289,7 +326,7 @@
 	if (x86_pmu.intel_cap.pebs_format < 2) {
 		ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
 		if (!ibuffer) {
-			kfree(buffer);
+			dsfree(buffer, x86_pmu.pebs_buffer_size);
 			return -ENOMEM;
 		}
 		per_cpu(insn_buffer, cpu) = ibuffer;
@@ -315,7 +352,8 @@
 	kfree(per_cpu(insn_buffer, cpu));
 	per_cpu(insn_buffer, cpu) = NULL;
 
-	kfree((void *)(unsigned long)ds->pebs_buffer_base);
+	dsfree((void *)(unsigned long)ds->pebs_buffer_base,
+			x86_pmu.pebs_buffer_size);
 	ds->pebs_buffer_base = 0;
 }
 
@@ -329,7 +367,7 @@
 	if (!x86_pmu.bts)
 		return 0;
 
-	buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
+	buffer = dsalloc(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
 	if (unlikely(!buffer)) {
 		WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
 		return -ENOMEM;
@@ -355,19 +393,15 @@
 	if (!ds || !x86_pmu.bts)
 		return;
 
-	kfree((void *)(unsigned long)ds->bts_buffer_base);
+	dsfree((void *)(unsigned long)ds->bts_buffer_base, BTS_BUFFER_SIZE);
 	ds->bts_buffer_base = 0;
 }
 
 static int alloc_ds_buffer(int cpu)
 {
-	int node = cpu_to_node(cpu);
-	struct debug_store *ds;
+	struct debug_store *ds = per_cpu_ptr(&cpu_debug_store, cpu);
 
-	ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node);
-	if (unlikely(!ds))
-		return -ENOMEM;
-
+	memset(ds, 0, sizeof(*ds));
 	per_cpu(cpu_hw_events, cpu).ds = ds;
 
 	return 0;
@@ -381,7 +415,6 @@
 		return;
 
 	per_cpu(cpu_hw_events, cpu).ds = NULL;
-	kfree(ds);
 }
 
 void release_ds_buffers(void)
@@ -1389,9 +1422,13 @@
 			continue;
 
 		/* log dropped samples number */
-		if (error[bit])
+		if (error[bit]) {
 			perf_log_lost_samples(event, error[bit]);
 
+			if (perf_event_account_interrupt(event))
+				x86_pmu_stop(event, 0);
+		}
+
 		if (counts[bit]) {
 			__intel_pmu_pebs_event(event, iregs, base,
 					       top, bit, counts[bit]);
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index d4aea31..deca9b9 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -139,7 +139,7 @@
 	".popsection\n"							\
 	".pushsection .altinstr_replacement, \"ax\"\n"			\
 	ALTINSTR_REPLACEMENT(newinstr, feature, 1)			\
-	".popsection"
+	".popsection\n"
 
 #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
 	OLDINSTR_2(oldinstr, 1, 2)					\
@@ -150,7 +150,7 @@
 	".pushsection .altinstr_replacement, \"ax\"\n"			\
 	ALTINSTR_REPLACEMENT(newinstr1, feature1, 1)			\
 	ALTINSTR_REPLACEMENT(newinstr2, feature2, 2)			\
-	".popsection"
+	".popsection\n"
 
 /*
  * Alternative instructions for different CPU types or capabilities.
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index 44b8762..b15aa40 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -10,7 +10,32 @@
 #include <asm/pgtable.h>
 #include <asm/special_insns.h>
 #include <asm/preempt.h>
+#include <asm/asm.h>
 
 #ifndef CONFIG_X86_CMPXCHG64
 extern void cmpxchg8b_emu(void);
 #endif
+
+#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_X86_32
+#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void);
+#else
+#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void);
+INDIRECT_THUNK(8)
+INDIRECT_THUNK(9)
+INDIRECT_THUNK(10)
+INDIRECT_THUNK(11)
+INDIRECT_THUNK(12)
+INDIRECT_THUNK(13)
+INDIRECT_THUNK(14)
+INDIRECT_THUNK(15)
+#endif
+INDIRECT_THUNK(ax)
+INDIRECT_THUNK(bx)
+INDIRECT_THUNK(cx)
+INDIRECT_THUNK(dx)
+INDIRECT_THUNK(si)
+INDIRECT_THUNK(di)
+INDIRECT_THUNK(bp)
+INDIRECT_THUNK(sp)
+#endif /* CONFIG_RETPOLINE */
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 7acb51c..0052352 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -125,4 +125,15 @@
 /* For C file, we already have NOKPROBE_SYMBOL macro */
 #endif
 
+#ifndef __ASSEMBLY__
+/*
+ * This output constraint should be used for any inline asm which has a "call"
+ * instruction.  Otherwise the asm may be inserted before the frame pointer
+ * gets set up by the containing function.  If you forget to do this, objtool
+ * may print a "call without frame pointer save/setup" warning.
+ */
+register unsigned long current_stack_pointer asm(_ASM_SP);
+#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
+#endif
+
 #endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/cmdline.h b/arch/x86/include/asm/cmdline.h
index e01f7f7..84ae170 100644
--- a/arch/x86/include/asm/cmdline.h
+++ b/arch/x86/include/asm/cmdline.h
@@ -2,5 +2,7 @@
 #define _ASM_X86_CMDLINE_H
 
 int cmdline_find_option_bool(const char *cmdline_ptr, const char *option);
+int cmdline_find_option(const char *cmdline_ptr, const char *option,
+			char *buffer, int bufsize);
 
 #endif /* _ASM_X86_CMDLINE_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 1d2b69f..9ea67a0 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -135,6 +135,8 @@
 	set_bit(bit, (unsigned long *)cpu_caps_set);	\
 } while (0)
 
+#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
+
 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS)
 /*
  * Static testing of CPU features.  Used the same as boot_cpu_has().
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index ed10b5b..4467568 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -189,14 +189,21 @@
 
 #define X86_FEATURE_CPB		( 7*32+ 2) /* AMD Core Performance Boost */
 #define X86_FEATURE_EPB		( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */
 
 #define X86_FEATURE_HW_PSTATE	( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 
+#define X86_FEATURE_RETPOLINE	( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
+
 #define X86_FEATURE_INTEL_PT	( 7*32+15) /* Intel Processor Trace */
 #define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
 
+/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
+#define X86_FEATURE_KAISER	( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
+
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
 #define X86_FEATURE_VNMI        ( 8*32+ 1) /* Intel Virtual NMI */
@@ -312,5 +319,8 @@
 #define X86_BUG_SWAPGS_FENCE	X86_BUG(11) /* SWAPGS without input dep on GS */
 #define X86_BUG_MONITOR		X86_BUG(12) /* IPI required to wake up remote CPU */
 #define X86_BUG_AMD_E400	X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+#define X86_BUG_CPU_MELTDOWN	X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
+#define X86_BUG_SPECTRE_V1	X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+#define X86_BUG_SPECTRE_V2	X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 12080d8..2ed5a2b 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -43,7 +43,7 @@
 	struct desc_struct gdt[GDT_ENTRIES];
 } __attribute__((aligned(PAGE_SIZE)));
 
-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
+DECLARE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(struct gdt_page, gdt_page);
 
 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
 {
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index 85599ad..21c5ac1 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -21,11 +21,13 @@
 # define DISABLE_K6_MTRR	(1<<(X86_FEATURE_K6_MTRR & 31))
 # define DISABLE_CYRIX_ARR	(1<<(X86_FEATURE_CYRIX_ARR & 31))
 # define DISABLE_CENTAUR_MCR	(1<<(X86_FEATURE_CENTAUR_MCR & 31))
+# define DISABLE_PCID		0
 #else
 # define DISABLE_VME		0
 # define DISABLE_K6_MTRR	0
 # define DISABLE_CYRIX_ARR	0
 # define DISABLE_CENTAUR_MCR	0
+# define DISABLE_PCID		(1<<(X86_FEATURE_PCID & 31))
 #endif /* CONFIG_X86_64 */
 
 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
@@ -43,7 +45,7 @@
 #define DISABLED_MASK1	0
 #define DISABLED_MASK2	0
 #define DISABLED_MASK3	(DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
-#define DISABLED_MASK4	0
+#define DISABLED_MASK4	(DISABLE_PCID)
 #define DISABLED_MASK5	0
 #define DISABLED_MASK6	0
 #define DISABLED_MASK7	0
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 59405a2..9b76cd3 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -22,8 +22,8 @@
 #ifdef CONFIG_SMP
 	unsigned int irq_resched_count;
 	unsigned int irq_call_count;
-	unsigned int irq_tlb_count;
 #endif
+	unsigned int irq_tlb_count;
 #ifdef CONFIG_X86_THERMAL_VECTOR
 	unsigned int irq_thermal_count;
 #endif
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index b90e105..0817d63 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -178,7 +178,7 @@
 #define VECTOR_RETRIGGERED	((void *)~0UL)
 
 typedef struct irq_desc* vector_irq_t[NR_VECTORS];
-DECLARE_PER_CPU(vector_irq_t, vector_irq);
+DECLARE_PER_CPU_USER_MAPPED(vector_irq_t, vector_irq);
 
 #endif /* !ASSEMBLY_ */
 
diff --git a/arch/x86/include/asm/kaiser.h b/arch/x86/include/asm/kaiser.h
new file mode 100644
index 0000000..802bbbd
--- /dev/null
+++ b/arch/x86/include/asm/kaiser.h
@@ -0,0 +1,141 @@
+#ifndef _ASM_X86_KAISER_H
+#define _ASM_X86_KAISER_H
+
+#include <uapi/asm/processor-flags.h> /* For PCID constants */
+
+/*
+ * This file includes the definitions for the KAISER feature.
+ * KAISER is a counter measure against x86_64 side channel attacks on
+ * the kernel virtual memory.  It has a shadow pgd for every process: the
+ * shadow pgd has a minimalistic kernel-set mapped, but includes the whole
+ * user memory. Within a kernel context switch, or when an interrupt is handled,
+ * the pgd is switched to the normal one. When the system switches to user mode,
+ * the shadow pgd is enabled. By this, the virtual memory caches are freed,
+ * and the user may not attack the whole kernel memory.
+ *
+ * A minimalistic kernel mapping holds the parts needed to be mapped in user
+ * mode, such as the entry/exit functions of the user space, or the stacks.
+ */
+
+#define KAISER_SHADOW_PGD_OFFSET 0x1000
+
+#ifdef __ASSEMBLY__
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+
+.macro _SWITCH_TO_KERNEL_CR3 reg
+movq %cr3, \reg
+andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), \reg
+/* If PCID enabled, set X86_CR3_PCID_NOFLUSH_BIT */
+ALTERNATIVE "", "bts $63, \reg", X86_FEATURE_PCID
+movq \reg, %cr3
+.endm
+
+.macro _SWITCH_TO_USER_CR3 reg regb
+/*
+ * regb must be the low byte portion of reg: because we have arranged
+ * for the low byte of the user PCID to serve as the high byte of NOFLUSH
+ * (0x80 for each when PCID is enabled, or 0x00 when PCID and NOFLUSH are
+ * not enabled): so that the one register can update both memory and cr3.
+ */
+movq %cr3, \reg
+orq  PER_CPU_VAR(x86_cr3_pcid_user), \reg
+js   9f
+/* If PCID enabled, FLUSH this time, reset to NOFLUSH for next time */
+movb \regb, PER_CPU_VAR(x86_cr3_pcid_user+7)
+9:
+movq \reg, %cr3
+.endm
+
+.macro SWITCH_KERNEL_CR3
+ALTERNATIVE "jmp 8f", "pushq %rax", X86_FEATURE_KAISER
+_SWITCH_TO_KERNEL_CR3 %rax
+popq %rax
+8:
+.endm
+
+.macro SWITCH_USER_CR3
+ALTERNATIVE "jmp 8f", "pushq %rax", X86_FEATURE_KAISER
+_SWITCH_TO_USER_CR3 %rax %al
+popq %rax
+8:
+.endm
+
+.macro SWITCH_KERNEL_CR3_NO_STACK
+ALTERNATIVE "jmp 8f", \
+	__stringify(movq %rax, PER_CPU_VAR(unsafe_stack_register_backup)), \
+	X86_FEATURE_KAISER
+_SWITCH_TO_KERNEL_CR3 %rax
+movq PER_CPU_VAR(unsafe_stack_register_backup), %rax
+8:
+.endm
+
+#else /* CONFIG_PAGE_TABLE_ISOLATION */
+
+.macro SWITCH_KERNEL_CR3
+.endm
+.macro SWITCH_USER_CR3
+.endm
+.macro SWITCH_KERNEL_CR3_NO_STACK
+.endm
+
+#endif /* CONFIG_PAGE_TABLE_ISOLATION */
+
+#else /* __ASSEMBLY__ */
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+/*
+ * Upon kernel/user mode switch, it may happen that the address
+ * space has to be switched before the registers have been
+ * stored.  To change the address space, another register is
+ * needed.  A register therefore has to be stored/restored.
+*/
+DECLARE_PER_CPU_USER_MAPPED(unsigned long, unsafe_stack_register_backup);
+
+DECLARE_PER_CPU(unsigned long, x86_cr3_pcid_user);
+
+extern char __per_cpu_user_mapped_start[], __per_cpu_user_mapped_end[];
+
+extern int kaiser_enabled;
+extern void __init kaiser_check_boottime_disable(void);
+#else
+#define kaiser_enabled	0
+static inline void __init kaiser_check_boottime_disable(void) {}
+#endif /* CONFIG_PAGE_TABLE_ISOLATION */
+
+/*
+ * Kaiser function prototypes are needed even when CONFIG_PAGE_TABLE_ISOLATION is not set,
+ * so as to build with tests on kaiser_enabled instead of #ifdefs.
+ */
+
+/**
+ *  kaiser_add_mapping - map a virtual memory part to the shadow (user) mapping
+ *  @addr: the start address of the range
+ *  @size: the size of the range
+ *  @flags: The mapping flags of the pages
+ *
+ *  The mapping is done on a global scope, so no bigger
+ *  synchronization has to be done.  the pages have to be
+ *  manually unmapped again when they are not needed any longer.
+ */
+extern int kaiser_add_mapping(unsigned long addr, unsigned long size, unsigned long flags);
+
+/**
+ *  kaiser_remove_mapping - unmap a virtual memory part of the shadow mapping
+ *  @addr: the start address of the range
+ *  @size: the size of the range
+ */
+extern void kaiser_remove_mapping(unsigned long start, unsigned long size);
+
+/**
+ *  kaiser_init - Initialize the shadow mapping
+ *
+ *  Most parts of the shadow mapping can be mapped upon boot
+ *  time.  Only per-process things like the thread stacks
+ *  or a new LDT have to be mapped at runtime.  These boot-
+ *  time mappings are permanent and never unmapped.
+ */
+extern void kaiser_init(void);
+
+#endif /* __ASSEMBLY */
+
+#endif /* _ASM_X86_KAISER_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index bdde807..cbd1d44 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1397,4 +1397,7 @@
 #endif
 }
 
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+		unsigned long start, unsigned long end);
+
 #endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 72198c6..8b272a0 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -33,12 +33,6 @@
 #endif
 } mm_context_t;
 
-#ifdef CONFIG_SMP
 void leave_mm(int cpu);
-#else
-static inline void leave_mm(int cpu)
-{
-}
-#endif
 
 #endif /* _ASM_X86_MMU_H */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index f9dd224..d23e355 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -99,10 +99,8 @@
 
 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
-#ifdef CONFIG_SMP
 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
 		this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
-#endif
 }
 
 static inline int init_new_context(struct task_struct *tsk,
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index b601dda..b11c4c0 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -330,6 +330,9 @@
 #define FAM10H_MMIO_CONF_BASE_MASK	0xfffffffULL
 #define FAM10H_MMIO_CONF_BASE_SHIFT	20
 #define MSR_FAM10H_NODE_ID		0xc001100c
+#define MSR_F10H_DECFG			0xc0011029
+#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT	1
+#define MSR_F10H_DECFG_LFENCE_SERIALIZE		BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
 
 /* K8 MSRs */
 #define MSR_K8_TOP_MEM1			0xc001001a
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
new file mode 100644
index 0000000..402a11c
--- /dev/null
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NOSPEC_BRANCH_H__
+#define __NOSPEC_BRANCH_H__
+
+#include <asm/alternative.h>
+#include <asm/alternative-asm.h>
+#include <asm/cpufeatures.h>
+
+/*
+ * Fill the CPU return stack buffer.
+ *
+ * Each entry in the RSB, if used for a speculative 'ret', contains an
+ * infinite 'pause; jmp' loop to capture speculative execution.
+ *
+ * This is required in various cases for retpoline and IBRS-based
+ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+ * eliminate potentially bogus entries from the RSB, and sometimes
+ * purely to ensure that it doesn't get empty, which on some CPUs would
+ * allow predictions from other (unwanted!) sources to be used.
+ *
+ * We define a CPP macro such that it can be used from both .S files and
+ * inline assembly. It's possible to do a .macro and then include that
+ * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
+ */
+
+#define RSB_CLEAR_LOOPS		32	/* To forcibly overwrite all entries */
+#define RSB_FILL_LOOPS		16	/* To avoid underflow */
+
+/*
+ * Google experimented with loop-unrolling and this turned out to be
+ * the optimal version — two calls, each with their own speculation
+ * trap should their return address end up getting used, in a loop.
+ */
+#define __FILL_RETURN_BUFFER(reg, nr, sp)	\
+	mov	$(nr/2), reg;			\
+771:						\
+	call	772f;				\
+773:	/* speculation trap */			\
+	pause;					\
+	jmp	773b;				\
+772:						\
+	call	774f;				\
+775:	/* speculation trap */			\
+	pause;					\
+	jmp	775b;				\
+774:						\
+	dec	reg;				\
+	jnz	771b;				\
+	add	$(BITS_PER_LONG/8) * nr, sp;
+
+#ifdef __ASSEMBLY__
+
+/*
+ * This should be used immediately before a retpoline alternative.  It tells
+ * objtool where the retpolines are so that it can make sense of the control
+ * flow by just reading the original instruction(s) and ignoring the
+ * alternatives.
+ */
+.macro ANNOTATE_NOSPEC_ALTERNATIVE
+	.Lannotate_\@:
+	.pushsection .discard.nospec
+	.long .Lannotate_\@ - .
+	.popsection
+.endm
+
+/*
+ * These are the bare retpoline primitives for indirect jmp and call.
+ * Do not use these directly; they only exist to make the ALTERNATIVE
+ * invocation below less ugly.
+ */
+.macro RETPOLINE_JMP reg:req
+	call	.Ldo_rop_\@
+.Lspec_trap_\@:
+	pause
+	jmp	.Lspec_trap_\@
+.Ldo_rop_\@:
+	mov	\reg, (%_ASM_SP)
+	ret
+.endm
+
+/*
+ * This is a wrapper around RETPOLINE_JMP so the called function in reg
+ * returns to the instruction after the macro.
+ */
+.macro RETPOLINE_CALL reg:req
+	jmp	.Ldo_call_\@
+.Ldo_retpoline_jmp_\@:
+	RETPOLINE_JMP \reg
+.Ldo_call_\@:
+	call	.Ldo_retpoline_jmp_\@
+.endm
+
+/*
+ * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
+ * indirect jmp/call which may be susceptible to the Spectre variant 2
+ * attack.
+ */
+.macro JMP_NOSPEC reg:req
+#ifdef CONFIG_RETPOLINE
+	ANNOTATE_NOSPEC_ALTERNATIVE
+	ALTERNATIVE_2 __stringify(jmp *\reg),				\
+		__stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE,	\
+		__stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
+#else
+	jmp	*\reg
+#endif
+.endm
+
+.macro CALL_NOSPEC reg:req
+#ifdef CONFIG_RETPOLINE
+	ANNOTATE_NOSPEC_ALTERNATIVE
+	ALTERNATIVE_2 __stringify(call *\reg),				\
+		__stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
+		__stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
+#else
+	call	*\reg
+#endif
+.endm
+
+ /*
+  * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
+  * monstrosity above, manually.
+  */
+.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
+#ifdef CONFIG_RETPOLINE
+	ANNOTATE_NOSPEC_ALTERNATIVE
+	ALTERNATIVE "jmp .Lskip_rsb_\@",				\
+		__stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP))	\
+		\ftr
+.Lskip_rsb_\@:
+#endif
+.endm
+
+#else /* __ASSEMBLY__ */
+
+#define ANNOTATE_NOSPEC_ALTERNATIVE				\
+	"999:\n\t"						\
+	".pushsection .discard.nospec\n\t"			\
+	".long 999b - .\n\t"					\
+	".popsection\n\t"
+
+#if defined(CONFIG_X86_64) && defined(RETPOLINE)
+
+/*
+ * Since the inline asm uses the %V modifier which is only in newer GCC,
+ * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
+ */
+# define CALL_NOSPEC						\
+	ANNOTATE_NOSPEC_ALTERNATIVE				\
+	ALTERNATIVE(						\
+	"call *%[thunk_target]\n",				\
+	"call __x86_indirect_thunk_%V[thunk_target]\n",		\
+	X86_FEATURE_RETPOLINE)
+# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
+
+#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
+/*
+ * For i386 we use the original ret-equivalent retpoline, because
+ * otherwise we'll run out of registers. We don't care about CET
+ * here, anyway.
+ */
+# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n",	\
+	"       jmp    904f;\n"					\
+	"       .align 16\n"					\
+	"901:	call   903f;\n"					\
+	"902:	pause;\n"					\
+	"       jmp    902b;\n"					\
+	"       .align 16\n"					\
+	"903:	addl   $4, %%esp;\n"				\
+	"       pushl  %[thunk_target];\n"			\
+	"       ret;\n"						\
+	"       .align 16\n"					\
+	"904:	call   901b;\n",				\
+	X86_FEATURE_RETPOLINE)
+
+# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+#else /* No retpoline for C / inline asm */
+# define CALL_NOSPEC "call *%[thunk_target]\n"
+# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+#endif
+
+/* The Spectre V2 mitigation variants */
+enum spectre_v2_mitigation {
+	SPECTRE_V2_NONE,
+	SPECTRE_V2_RETPOLINE_MINIMAL,
+	SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
+	SPECTRE_V2_RETPOLINE_GENERIC,
+	SPECTRE_V2_RETPOLINE_AMD,
+	SPECTRE_V2_IBRS,
+};
+
+/*
+ * On VMEXIT we must ensure that no RSB predictions learned in the guest
+ * can be followed in the host, by overwriting the RSB completely. Both
+ * retpoline and IBRS mitigations for Spectre v2 need this; only on future
+ * CPUs with IBRS_ATT *might* it be avoided.
+ */
+static inline void vmexit_fill_RSB(void)
+{
+#ifdef CONFIG_RETPOLINE
+	unsigned long loops = RSB_CLEAR_LOOPS / 2;
+
+	asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
+		      ALTERNATIVE("jmp 910f",
+				  __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
+				  X86_FEATURE_RETPOLINE)
+		      "910:"
+		      : "=&r" (loops), ASM_CALL_CONSTRAINT
+		      : "r" (loops) : "memory" );
+#endif
+}
+#endif /* __ASSEMBLY__ */
+#endif /* __NOSPEC_BRANCH_H__ */
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index b6d4259..1178a51 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -27,6 +27,17 @@
  */
 extern gfp_t __userpte_alloc_gfp;
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+/*
+ * Instead of one PGD, we acquire two PGDs.  Being order-1, it is
+ * both 8k in size and 8k-aligned.  That lets us just flip bit 12
+ * in a pointer to swap between the two 4k halves.
+ */
+#define PGD_ALLOCATION_ORDER 1
+#else
+#define PGD_ALLOCATION_ORDER 0
+#endif
+
 /*
  * Allocate and free page tables.
  */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 437feb4..2536f90 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -18,6 +18,12 @@
 #ifndef __ASSEMBLY__
 #include <asm/x86_init.h>
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+extern int kaiser_enabled;
+#else
+#define kaiser_enabled 0
+#endif
+
 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
 void ptdump_walk_pgd_level_checkwx(void);
 
@@ -690,7 +696,17 @@
 
 static inline int pgd_bad(pgd_t pgd)
 {
-	return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
+	pgdval_t ignore_flags = _PAGE_USER;
+	/*
+	 * We set NX on KAISER pgds that map userspace memory so
+	 * that userspace can not meaningfully use the kernel
+	 * page table by accident; it will fault on the first
+	 * instruction it tries to run.  See native_set_pgd().
+	 */
+	if (kaiser_enabled)
+		ignore_flags |= _PAGE_NX;
+
+	return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
 }
 
 static inline int pgd_none(pgd_t pgd)
@@ -903,7 +919,15 @@
  */
 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
 {
-       memcpy(dst, src, count * sizeof(pgd_t));
+	memcpy(dst, src, count * sizeof(pgd_t));
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	if (kaiser_enabled) {
+		/* Clone the shadow pgd part as well */
+		memcpy(native_get_shadow_pgd(dst),
+			native_get_shadow_pgd(src),
+			count * sizeof(pgd_t));
+	}
+#endif
 }
 
 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 1cc82ec..ce97c8c6 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -106,9 +106,32 @@
 	native_set_pud(pud, native_make_pud(0));
 }
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+extern pgd_t kaiser_set_shadow_pgd(pgd_t *pgdp, pgd_t pgd);
+
+static inline pgd_t *native_get_shadow_pgd(pgd_t *pgdp)
+{
+#ifdef CONFIG_DEBUG_VM
+	/* linux/mmdebug.h may not have been included at this point */
+	BUG_ON(!kaiser_enabled);
+#endif
+	return (pgd_t *)((unsigned long)pgdp | (unsigned long)PAGE_SIZE);
+}
+#else
+static inline pgd_t kaiser_set_shadow_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+	return pgd;
+}
+static inline pgd_t *native_get_shadow_pgd(pgd_t *pgdp)
+{
+	BUILD_BUG_ON(1);
+	return NULL;
+}
+#endif /* CONFIG_PAGE_TABLE_ISOLATION */
+
 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
-	*pgdp = pgd;
+	*pgdp = kaiser_set_shadow_pgd(pgdp, pgd);
 }
 
 static inline void native_pgd_clear(pgd_t *pgd)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 8b4de22..f1c8ac4 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -119,7 +119,7 @@
 #define _PAGE_DEVMAP	(_AT(pteval_t, 0))
 #endif
 
-#define _PAGE_PROTNONE	(_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
+#define _PAGE_PROTNONE  (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
 
 #define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |	\
 			 _PAGE_ACCESSED | _PAGE_DIRTY)
@@ -137,6 +137,33 @@
 			 _PAGE_SOFT_DIRTY)
 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
 
+/* The ASID is the lower 12 bits of CR3 */
+#define X86_CR3_PCID_ASID_MASK  (_AC((1<<12)-1,UL))
+
+/* Mask for all the PCID-related bits in CR3: */
+#define X86_CR3_PCID_MASK       (X86_CR3_PCID_NOFLUSH | X86_CR3_PCID_ASID_MASK)
+#define X86_CR3_PCID_ASID_KERN  (_AC(0x0,UL))
+
+#if defined(CONFIG_PAGE_TABLE_ISOLATION) && defined(CONFIG_X86_64)
+/* Let X86_CR3_PCID_ASID_USER be usable for the X86_CR3_PCID_NOFLUSH bit */
+#define X86_CR3_PCID_ASID_USER	(_AC(0x80,UL))
+
+#define X86_CR3_PCID_KERN_FLUSH		(X86_CR3_PCID_ASID_KERN)
+#define X86_CR3_PCID_USER_FLUSH		(X86_CR3_PCID_ASID_USER)
+#define X86_CR3_PCID_KERN_NOFLUSH	(X86_CR3_PCID_NOFLUSH | X86_CR3_PCID_ASID_KERN)
+#define X86_CR3_PCID_USER_NOFLUSH	(X86_CR3_PCID_NOFLUSH | X86_CR3_PCID_ASID_USER)
+#else
+#define X86_CR3_PCID_ASID_USER  (_AC(0x0,UL))
+/*
+ * PCIDs are unsupported on 32-bit and none of these bits can be
+ * set in CR3:
+ */
+#define X86_CR3_PCID_KERN_FLUSH		(0)
+#define X86_CR3_PCID_USER_FLUSH		(0)
+#define X86_CR3_PCID_KERN_NOFLUSH	(0)
+#define X86_CR3_PCID_USER_NOFLUSH	(0)
+#endif
+
 /*
  * The cache modes defined here are used to translate between pure SW usage
  * and the HW defined cache mode bits and/or PAT entries.
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 83db0ea..e40b19c 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -156,8 +156,8 @@
 extern struct cpuinfo_x86	new_cpu_data;
 
 extern struct tss_struct	doublefault_tss;
-extern __u32			cpu_caps_cleared[NCAPINTS];
-extern __u32			cpu_caps_set[NCAPINTS];
+extern __u32			cpu_caps_cleared[NCAPINTS + NBUGINTS];
+extern __u32			cpu_caps_set[NCAPINTS + NBUGINTS];
 
 #ifdef CONFIG_SMP
 DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
@@ -308,7 +308,7 @@
 
 } ____cacheline_aligned;
 
-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
+DECLARE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(struct tss_struct, cpu_tss);
 
 #ifdef CONFIG_X86_32
 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 91dfcaf..bad25bb 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -21,7 +21,7 @@
 asmlinkage long sys_iopl(unsigned int);
 
 /* kernel/ldt.c */
-asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
+asmlinkage long sys_modify_ldt(int, void __user *, unsigned long);
 
 /* kernel/signal.c */
 asmlinkage long sys_rt_sigreturn(void);
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index ad6f5eb0..bdf9c4c 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -152,17 +152,6 @@
  */
 #ifndef __ASSEMBLY__
 
-static inline unsigned long current_stack_pointer(void)
-{
-	unsigned long sp;
-#ifdef CONFIG_X86_64
-	asm("mov %%rsp,%0" : "=g" (sp));
-#else
-	asm("mov %%esp,%0" : "=g" (sp));
-#endif
-	return sp;
-}
-
 /*
  * Walks up the stack frames to make sure that the specified object is
  * entirely contained by a single stack frame.
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index fc5abff..94146f6 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -7,6 +7,7 @@
 #include <asm/processor.h>
 #include <asm/cpufeature.h>
 #include <asm/special_insns.h>
+#include <asm/smp.h>
 
 static inline void __invpcid(unsigned long pcid, unsigned long addr,
 			     unsigned long type)
@@ -65,10 +66,8 @@
 #endif
 
 struct tlb_state {
-#ifdef CONFIG_SMP
 	struct mm_struct *active_mm;
 	int state;
-#endif
 
 	/*
 	 * Access to this CR4 shadow and to H/W CR4 is protected by
@@ -133,6 +132,24 @@
 	cr4_set_bits(mask);
 }
 
+/*
+ * Declare a couple of kaiser interfaces here for convenience,
+ * to avoid the need for asm/kaiser.h in unexpected places.
+ */
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+extern int kaiser_enabled;
+extern void kaiser_setup_pcid(void);
+extern void kaiser_flush_tlb_on_return_to_user(void);
+#else
+#define kaiser_enabled 0
+static inline void kaiser_setup_pcid(void)
+{
+}
+static inline void kaiser_flush_tlb_on_return_to_user(void)
+{
+}
+#endif
+
 static inline void __native_flush_tlb(void)
 {
 	/*
@@ -141,6 +158,8 @@
 	 * back:
 	 */
 	preempt_disable();
+	if (kaiser_enabled)
+		kaiser_flush_tlb_on_return_to_user();
 	native_write_cr3(native_read_cr3());
 	preempt_enable();
 }
@@ -150,20 +169,27 @@
 	unsigned long cr4;
 
 	cr4 = this_cpu_read(cpu_tlbstate.cr4);
-	/* clear PGE */
-	native_write_cr4(cr4 & ~X86_CR4_PGE);
-	/* write old PGE again and flush TLBs */
-	native_write_cr4(cr4);
+	if (cr4 & X86_CR4_PGE) {
+		/* clear PGE and flush TLB of all entries */
+		native_write_cr4(cr4 & ~X86_CR4_PGE);
+		/* restore PGE as it was before */
+		native_write_cr4(cr4);
+	} else {
+		/* do it with cr3, letting kaiser flush user PCID */
+		__native_flush_tlb();
+	}
 }
 
 static inline void __native_flush_tlb_global(void)
 {
 	unsigned long flags;
 
-	if (static_cpu_has(X86_FEATURE_INVPCID)) {
+	if (this_cpu_has(X86_FEATURE_INVPCID)) {
 		/*
 		 * Using INVPCID is considerably faster than a pair of writes
 		 * to CR4 sandwiched inside an IRQ flag save/restore.
+		 *
+		 * Note, this works with CR4.PCIDE=0 or 1.
 		 */
 		invpcid_flush_all();
 		return;
@@ -175,23 +201,52 @@
 	 * be called from deep inside debugging code.)
 	 */
 	raw_local_irq_save(flags);
-
 	__native_flush_tlb_global_irq_disabled();
-
 	raw_local_irq_restore(flags);
 }
 
 static inline void __native_flush_tlb_single(unsigned long addr)
 {
-	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+	/*
+	 * SIMICS #GP's if you run INVPCID with type 2/3
+	 * and X86_CR4_PCIDE clear.  Shame!
+	 *
+	 * The ASIDs used below are hard-coded.  But, we must not
+	 * call invpcid(type=1/2) before CR4.PCIDE=1.  Just call
+	 * invlpg in the case we are called early.
+	 */
+
+	if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) {
+		if (kaiser_enabled)
+			kaiser_flush_tlb_on_return_to_user();
+		asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+		return;
+	}
+	/* Flush the address out of both PCIDs. */
+	/*
+	 * An optimization here might be to determine addresses
+	 * that are only kernel-mapped and only flush the kernel
+	 * ASID.  But, userspace flushes are probably much more
+	 * important performance-wise.
+	 *
+	 * Make sure to do only a single invpcid when KAISER is
+	 * disabled and we have only a single ASID.
+	 */
+	if (kaiser_enabled)
+		invpcid_flush_one(X86_CR3_PCID_ASID_USER, addr);
+	invpcid_flush_one(X86_CR3_PCID_ASID_KERN, addr);
 }
 
 static inline void __flush_tlb_all(void)
 {
-	if (boot_cpu_has(X86_FEATURE_PGE))
-		__flush_tlb_global();
-	else
-		__flush_tlb();
+	__flush_tlb_global();
+	/*
+	 * Note: if we somehow had PCID but not PGE, then this wouldn't work --
+	 * we'd end up flushing kernel translations for the current ASID but
+	 * we might fail to flush kernel translations for other cached ASIDs.
+	 *
+	 * To avoid this issue, we force PCID off if PGE is off.
+	 */
 }
 
 static inline void __flush_tlb_one(unsigned long addr)
@@ -205,7 +260,6 @@
 /*
  * TLB flushing:
  *
- *  - flush_tlb() flushes the current mm struct TLBs
  *  - flush_tlb_all() flushes all processes TLBs
  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
  *  - flush_tlb_page(vma, vmaddr) flushes one page
@@ -217,84 +271,6 @@
  * and page-granular flushes are available only on i486 and up.
  */
 
-#ifndef CONFIG_SMP
-
-/* "_up" is for UniProcessor.
- *
- * This is a helper for other header functions.  *Not* intended to be called
- * directly.  All global TLB flushes need to either call this, or to bump the
- * vm statistics themselves.
- */
-static inline void __flush_tlb_up(void)
-{
-	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-	__flush_tlb();
-}
-
-static inline void flush_tlb_all(void)
-{
-	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-	__flush_tlb_all();
-}
-
-static inline void flush_tlb(void)
-{
-	__flush_tlb_up();
-}
-
-static inline void local_flush_tlb(void)
-{
-	__flush_tlb_up();
-}
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
-	if (mm == current->active_mm)
-		__flush_tlb_up();
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
-				  unsigned long addr)
-{
-	if (vma->vm_mm == current->active_mm)
-		__flush_tlb_one(addr);
-}
-
-static inline void flush_tlb_range(struct vm_area_struct *vma,
-				   unsigned long start, unsigned long end)
-{
-	if (vma->vm_mm == current->active_mm)
-		__flush_tlb_up();
-}
-
-static inline void flush_tlb_mm_range(struct mm_struct *mm,
-	   unsigned long start, unsigned long end, unsigned long vmflag)
-{
-	if (mm == current->active_mm)
-		__flush_tlb_up();
-}
-
-static inline void native_flush_tlb_others(const struct cpumask *cpumask,
-					   struct mm_struct *mm,
-					   unsigned long start,
-					   unsigned long end)
-{
-}
-
-static inline void reset_lazy_tlbstate(void)
-{
-}
-
-static inline void flush_tlb_kernel_range(unsigned long start,
-					  unsigned long end)
-{
-	flush_tlb_all();
-}
-
-#else  /* SMP */
-
-#include <asm/smp.h>
-
 #define local_flush_tlb() __flush_tlb()
 
 #define flush_tlb_mm(mm)	flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
@@ -303,13 +279,14 @@
 		flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
 
 extern void flush_tlb_all(void);
-extern void flush_tlb_current_task(void);
-extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 				unsigned long end, unsigned long vmflag);
 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
-#define flush_tlb()	flush_tlb_current_task()
+static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
+{
+	flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
+}
 
 void native_flush_tlb_others(const struct cpumask *cpumask,
 				struct mm_struct *mm,
@@ -324,8 +301,6 @@
 	this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
 }
 
-#endif	/* SMP */
-
 #ifndef CONFIG_PARAVIRT
 #define flush_tlb_others(mask, mm, start, end)	\
 	native_flush_tlb_others(mask, mm, start, end)
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index 6ba66ee..4865e10 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -12,12 +12,14 @@
  * Returns true if handled.
  */
 extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
+extern bool vsyscall_enabled(void);
 #else
 static inline void map_vsyscall(void) {}
 static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 {
 	return false;
 }
+static inline bool vsyscall_enabled(void) { return false; }
 #endif
 
 #endif /* _ASM_X86_VSYSCALL_H */
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 8b678af..ccdc23d 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -44,6 +44,7 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/smap.h>
+#include <asm/nospec-branch.h>
 
 #include <xen/interface/xen.h>
 #include <xen/interface/sched.h>
@@ -216,9 +217,9 @@
 	__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
 
 	stac();
-	asm volatile("call *%[call]"
+	asm volatile(CALL_NOSPEC
 		     : __HYPERCALL_5PARAM
-		     : [call] "a" (&hypercall_page[call])
+		     : [thunk_target] "a" (&hypercall_page[call])
 		     : __HYPERCALL_CLOBBER5);
 	clac();
 
diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h
index 567de50..6768d13 100644
--- a/arch/x86/include/uapi/asm/processor-flags.h
+++ b/arch/x86/include/uapi/asm/processor-flags.h
@@ -77,7 +77,8 @@
 #define X86_CR3_PWT		_BITUL(X86_CR3_PWT_BIT)
 #define X86_CR3_PCD_BIT		4 /* Page Cache Disable */
 #define X86_CR3_PCD		_BITUL(X86_CR3_PCD_BIT)
-#define X86_CR3_PCID_MASK	_AC(0x00000fff,UL) /* PCID Mask */
+#define X86_CR3_PCID_NOFLUSH_BIT 63 /* Preserve old PCID */
+#define X86_CR3_PCID_NOFLUSH    _BITULL(X86_CR3_PCID_NOFLUSH_BIT)
 
 /*
  * Intel CPU features in CR4
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index b89bef9..0a1e8a6 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -335,13 +335,12 @@
 #ifdef CONFIG_X86_IO_APIC
 #define MP_ISA_BUS		0
 
+static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
+						u8 trigger, u32 gsi);
+
 static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
 					  u32 gsi)
 {
-	int ioapic;
-	int pin;
-	struct mpc_intsrc mp_irq;
-
 	/*
 	 * Check bus_irq boundary.
 	 */
@@ -351,14 +350,6 @@
 	}
 
 	/*
-	 * Convert 'gsi' to 'ioapic.pin'.
-	 */
-	ioapic = mp_find_ioapic(gsi);
-	if (ioapic < 0)
-		return;
-	pin = mp_find_ioapic_pin(ioapic, gsi);
-
-	/*
 	 * TBD: This check is for faulty timer entries, where the override
 	 *      erroneously sets the trigger to level, resulting in a HUGE
 	 *      increase of timer interrupts!
@@ -366,16 +357,8 @@
 	if ((bus_irq == 0) && (trigger == 3))
 		trigger = 1;
 
-	mp_irq.type = MP_INTSRC;
-	mp_irq.irqtype = mp_INT;
-	mp_irq.irqflag = (trigger << 2) | polarity;
-	mp_irq.srcbus = MP_ISA_BUS;
-	mp_irq.srcbusirq = bus_irq;	/* IRQ */
-	mp_irq.dstapic = mpc_ioapic_id(ioapic); /* APIC ID */
-	mp_irq.dstirq = pin;	/* INTIN# */
-
-	mp_save_irq(&mp_irq);
-
+	if (mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi) < 0)
+		return;
 	/*
 	 * Reset default identity mapping if gsi is also an legacy IRQ,
 	 * otherwise there will be more than one entry with the same GSI
@@ -422,6 +405,34 @@
 	return 0;
 }
 
+static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
+						u8 trigger, u32 gsi)
+{
+	struct mpc_intsrc mp_irq;
+	int ioapic, pin;
+
+	/* Convert 'gsi' to 'ioapic.pin'(INTIN#) */
+	ioapic = mp_find_ioapic(gsi);
+	if (ioapic < 0) {
+		pr_warn("Failed to find ioapic for gsi : %u\n", gsi);
+		return ioapic;
+	}
+
+	pin = mp_find_ioapic_pin(ioapic, gsi);
+
+	mp_irq.type = MP_INTSRC;
+	mp_irq.irqtype = mp_INT;
+	mp_irq.irqflag = (trigger << 2) | polarity;
+	mp_irq.srcbus = MP_ISA_BUS;
+	mp_irq.srcbusirq = bus_irq;
+	mp_irq.dstapic = mpc_ioapic_id(ioapic);
+	mp_irq.dstirq = pin;
+
+	mp_save_irq(&mp_irq);
+
+	return 0;
+}
+
 static int __init
 acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
 {
@@ -466,7 +477,11 @@
 	if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
 		polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
 
-	mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+	if (bus_irq < NR_IRQS_LEGACY)
+		mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+	else
+		mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi);
+
 	acpi_penalize_sci_irq(bus_irq, trigger, polarity);
 
 	/*
@@ -720,7 +735,7 @@
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 #include <acpi/processor.h>
 
-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
+static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
 {
 #ifdef CONFIG_ACPI_NUMA
 	int nid;
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 5cb272a..10d5a3d 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -340,9 +340,12 @@
 static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
 {
 	unsigned long flags;
+	int i;
 
-	if (instr[0] != 0x90)
-		return;
+	for (i = 0; i < a->padlen; i++) {
+		if (instr[i] != 0x90)
+			return;
+	}
 
 	local_irq_save(flags);
 	add_nops(instr + (a->instrlen - a->padlen), a->padlen);
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 4a8697f..33b6367 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -20,13 +20,11 @@
 obj-y			+= common.o
 obj-y			+= rdrand.o
 obj-y			+= match.o
+obj-y			+= bugs.o
 
 obj-$(CONFIG_PROC_FS)	+= proc.o
 obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
 
-obj-$(CONFIG_X86_32)	+= bugs.o
-obj-$(CONFIG_X86_64)	+= bugs_64.o
-
 obj-$(CONFIG_CPU_SUP_INTEL)		+= intel.o
 obj-$(CONFIG_CPU_SUP_AMD)		+= amd.o
 obj-$(CONFIG_CPU_SUP_CYRIX_32)		+= cyrix.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 2b4cf04..1b89f0c 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -782,8 +782,32 @@
 		set_cpu_cap(c, X86_FEATURE_K8);
 
 	if (cpu_has(c, X86_FEATURE_XMM2)) {
-		/* MFENCE stops RDTSC speculation */
-		set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+		unsigned long long val;
+		int ret;
+
+		/*
+		 * A serializing LFENCE has less overhead than MFENCE, so
+		 * use it for execution serialization.  On families which
+		 * don't have that MSR, LFENCE is already serializing.
+		 * msr_set_bit() uses the safe accessors, too, even if the MSR
+		 * is not present.
+		 */
+		msr_set_bit(MSR_F10H_DECFG,
+			    MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
+
+		/*
+		 * Verify that the MSR write was successful (could be running
+		 * under a hypervisor) and only then assume that LFENCE is
+		 * serializing.
+		 */
+		ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
+		if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
+			/* A serializing LFENCE stops RDTSC speculation */
+			set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
+		} else {
+			/* MFENCE stops RDTSC speculation */
+			set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+		}
 	}
 
 	/*
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index bd17db1..49d25dd 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -9,6 +9,10 @@
  */
 #include <linux/init.h>
 #include <linux/utsname.h>
+#include <linux/cpu.h>
+
+#include <asm/nospec-branch.h>
+#include <asm/cmdline.h>
 #include <asm/bugs.h>
 #include <asm/processor.h>
 #include <asm/processor-flags.h>
@@ -16,15 +20,24 @@
 #include <asm/msr.h>
 #include <asm/paravirt.h>
 #include <asm/alternative.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+static void __init spectre_v2_select_mitigation(void);
 
 void __init check_bugs(void)
 {
 	identify_boot_cpu();
-#ifndef CONFIG_SMP
-	pr_info("CPU: ");
-	print_cpu_info(&boot_cpu_data);
-#endif
 
+	if (!IS_ENABLED(CONFIG_SMP)) {
+		pr_info("CPU: ");
+		print_cpu_info(&boot_cpu_data);
+	}
+
+	/* Select the proper spectre mitigation before patching alternatives */
+	spectre_v2_select_mitigation();
+
+#ifdef CONFIG_X86_32
 	/*
 	 * Check whether we are able to run this kernel safely on SMP.
 	 *
@@ -40,4 +53,194 @@
 	alternative_instructions();
 
 	fpu__init_check_bugs();
+#else /* CONFIG_X86_64 */
+	alternative_instructions();
+
+	/*
+	 * Make sure the first 2MB area is not mapped by huge pages
+	 * There are typically fixed size MTRRs in there and overlapping
+	 * MTRRs into large pages causes slow downs.
+	 *
+	 * Right now we don't do that with gbpages because there seems
+	 * very little benefit for that case.
+	 */
+	if (!direct_gbpages)
+		set_memory_4k((unsigned long)__va(0), 1);
+#endif
 }
+
+/* The kernel command line selection */
+enum spectre_v2_mitigation_cmd {
+	SPECTRE_V2_CMD_NONE,
+	SPECTRE_V2_CMD_AUTO,
+	SPECTRE_V2_CMD_FORCE,
+	SPECTRE_V2_CMD_RETPOLINE,
+	SPECTRE_V2_CMD_RETPOLINE_GENERIC,
+	SPECTRE_V2_CMD_RETPOLINE_AMD,
+};
+
+static const char *spectre_v2_strings[] = {
+	[SPECTRE_V2_NONE]			= "Vulnerable",
+	[SPECTRE_V2_RETPOLINE_MINIMAL]		= "Vulnerable: Minimal generic ASM retpoline",
+	[SPECTRE_V2_RETPOLINE_MINIMAL_AMD]	= "Vulnerable: Minimal AMD ASM retpoline",
+	[SPECTRE_V2_RETPOLINE_GENERIC]		= "Mitigation: Full generic retpoline",
+	[SPECTRE_V2_RETPOLINE_AMD]		= "Mitigation: Full AMD retpoline",
+};
+
+#undef pr_fmt
+#define pr_fmt(fmt)     "Spectre V2 mitigation: " fmt
+
+static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+
+static void __init spec2_print_if_insecure(const char *reason)
+{
+	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+		pr_info("%s\n", reason);
+}
+
+static void __init spec2_print_if_secure(const char *reason)
+{
+	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+		pr_info("%s\n", reason);
+}
+
+static inline bool retp_compiler(void)
+{
+	return __is_defined(RETPOLINE);
+}
+
+static inline bool match_option(const char *arg, int arglen, const char *opt)
+{
+	int len = strlen(opt);
+
+	return len == arglen && !strncmp(arg, opt, len);
+}
+
+static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
+{
+	char arg[20];
+	int ret;
+
+	ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
+				  sizeof(arg));
+	if (ret > 0)  {
+		if (match_option(arg, ret, "off")) {
+			goto disable;
+		} else if (match_option(arg, ret, "on")) {
+			spec2_print_if_secure("force enabled on command line.");
+			return SPECTRE_V2_CMD_FORCE;
+		} else if (match_option(arg, ret, "retpoline")) {
+			spec2_print_if_insecure("retpoline selected on command line.");
+			return SPECTRE_V2_CMD_RETPOLINE;
+		} else if (match_option(arg, ret, "retpoline,amd")) {
+			if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+				pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
+				return SPECTRE_V2_CMD_AUTO;
+			}
+			spec2_print_if_insecure("AMD retpoline selected on command line.");
+			return SPECTRE_V2_CMD_RETPOLINE_AMD;
+		} else if (match_option(arg, ret, "retpoline,generic")) {
+			spec2_print_if_insecure("generic retpoline selected on command line.");
+			return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
+		} else if (match_option(arg, ret, "auto")) {
+			return SPECTRE_V2_CMD_AUTO;
+		}
+	}
+
+	if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
+		return SPECTRE_V2_CMD_AUTO;
+disable:
+	spec2_print_if_insecure("disabled on command line.");
+	return SPECTRE_V2_CMD_NONE;
+}
+
+static void __init spectre_v2_select_mitigation(void)
+{
+	enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+	enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
+
+	/*
+	 * If the CPU is not affected and the command line mode is NONE or AUTO
+	 * then nothing to do.
+	 */
+	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
+	    (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
+		return;
+
+	switch (cmd) {
+	case SPECTRE_V2_CMD_NONE:
+		return;
+
+	case SPECTRE_V2_CMD_FORCE:
+		/* FALLTRHU */
+	case SPECTRE_V2_CMD_AUTO:
+		goto retpoline_auto;
+
+	case SPECTRE_V2_CMD_RETPOLINE_AMD:
+		if (IS_ENABLED(CONFIG_RETPOLINE))
+			goto retpoline_amd;
+		break;
+	case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
+		if (IS_ENABLED(CONFIG_RETPOLINE))
+			goto retpoline_generic;
+		break;
+	case SPECTRE_V2_CMD_RETPOLINE:
+		if (IS_ENABLED(CONFIG_RETPOLINE))
+			goto retpoline_auto;
+		break;
+	}
+	pr_err("kernel not compiled with retpoline; no mitigation available!");
+	return;
+
+retpoline_auto:
+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+	retpoline_amd:
+		if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
+			pr_err("LFENCE not serializing. Switching to generic retpoline\n");
+			goto retpoline_generic;
+		}
+		mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
+					 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
+		setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
+		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+	} else {
+	retpoline_generic:
+		mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
+					 SPECTRE_V2_RETPOLINE_MINIMAL;
+		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+	}
+
+	spectre_v2_enabled = mode;
+	pr_info("%s\n", spectre_v2_strings[mode]);
+}
+
+#undef pr_fmt
+
+#ifdef CONFIG_SYSFS
+ssize_t cpu_show_meltdown(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
+		return sprintf(buf, "Not affected\n");
+	if (boot_cpu_has(X86_FEATURE_KAISER))
+		return sprintf(buf, "Mitigation: PTI\n");
+	return sprintf(buf, "Vulnerable\n");
+}
+
+ssize_t cpu_show_spectre_v1(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
+		return sprintf(buf, "Not affected\n");
+	return sprintf(buf, "Vulnerable\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+		return sprintf(buf, "Not affected\n");
+
+	return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
+}
+#endif
diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c
deleted file mode 100644
index a972ac4..0000000
--- a/arch/x86/kernel/cpu/bugs_64.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- *  Copyright (C) 1994  Linus Torvalds
- *  Copyright (C) 2000  SuSE
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/alternative.h>
-#include <asm/bugs.h>
-#include <asm/processor.h>
-#include <asm/mtrr.h>
-#include <asm/cacheflush.h>
-
-void __init check_bugs(void)
-{
-	identify_boot_cpu();
-#if !defined(CONFIG_SMP)
-	pr_info("CPU: ");
-	print_cpu_info(&boot_cpu_data);
-#endif
-	alternative_instructions();
-
-	/*
-	 * Make sure the first 2MB area is not mapped by huge pages
-	 * There are typically fixed size MTRRs in there and overlapping
-	 * MTRRs into large pages causes slow downs.
-	 *
-	 * Right now we don't do that with gbpages because there seems
-	 * very little benefit for that case.
-	 */
-	if (!direct_gbpages)
-		set_memory_4k((unsigned long)__va(0), 1);
-}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4eece91..7b9ae04 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -93,7 +93,7 @@
 
 static const struct cpu_dev *this_cpu = &default_cpu;
 
-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
+DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(struct gdt_page, gdt_page) = { .gdt = {
 #ifdef CONFIG_X86_64
 	/*
 	 * We need valid kernel segments for data and code in long mode too
@@ -163,6 +163,24 @@
 }
 __setup("nompx", x86_mpx_setup);
 
+#ifdef CONFIG_X86_64
+static int __init x86_pcid_setup(char *s)
+{
+	/* require an exact match without trailing characters */
+	if (strlen(s))
+		return 0;
+
+	/* do not emit a message if the feature is not present */
+	if (!boot_cpu_has(X86_FEATURE_PCID))
+		return 1;
+
+	setup_clear_cpu_cap(X86_FEATURE_PCID);
+	pr_info("nopcid: PCID feature disabled\n");
+	return 1;
+}
+__setup("nopcid", x86_pcid_setup);
+#endif
+
 static int __init x86_noinvpcid_setup(char *s)
 {
 	/* noinvpcid doesn't accept parameters */
@@ -306,6 +324,39 @@
 	}
 }
 
+static void setup_pcid(struct cpuinfo_x86 *c)
+{
+	if (cpu_has(c, X86_FEATURE_PCID)) {
+		if (cpu_has(c, X86_FEATURE_PGE) || kaiser_enabled) {
+			cr4_set_bits(X86_CR4_PCIDE);
+			/*
+			 * INVPCID has two "groups" of types:
+			 * 1/2: Invalidate an individual address
+			 * 3/4: Invalidate all contexts
+			 *
+			 * 1/2 take a PCID, but 3/4 do not.  So, 3/4
+			 * ignore the PCID argument in the descriptor.
+			 * But, we have to be careful not to call 1/2
+			 * with an actual non-zero PCID in them before
+			 * we do the above cr4_set_bits().
+			 */
+			if (cpu_has(c, X86_FEATURE_INVPCID))
+				set_cpu_cap(c, X86_FEATURE_INVPCID_SINGLE);
+		} else {
+			/*
+			 * flush_tlb_all(), as currently implemented, won't
+			 * work if PCID is on but PGE is not.  Since that
+			 * combination doesn't exist on real hardware, there's
+			 * no reason to try to fully support it, but it's
+			 * polite to avoid corrupting data if we're on
+			 * an improperly configured VM.
+			 */
+			clear_cpu_cap(c, X86_FEATURE_PCID);
+		}
+	}
+	kaiser_setup_pcid();
+}
+
 /*
  * Protection Keys are not available in 32-bit mode.
  */
@@ -429,8 +480,8 @@
 	return NULL;		/* Not found */
 }
 
-__u32 cpu_caps_cleared[NCAPINTS];
-__u32 cpu_caps_set[NCAPINTS];
+__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
+__u32 cpu_caps_set[NCAPINTS + NBUGINTS];
 
 void load_percpu_segment(int cpu)
 {
@@ -655,6 +706,16 @@
 	}
 }
 
+static void apply_forced_caps(struct cpuinfo_x86 *c)
+{
+	int i;
+
+	for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
+		c->x86_capability[i] &= ~cpu_caps_cleared[i];
+		c->x86_capability[i] |= cpu_caps_set[i];
+	}
+}
+
 void get_cpu_cap(struct cpuinfo_x86 *c)
 {
 	u32 eax, ebx, ecx, edx;
@@ -821,7 +882,22 @@
 	}
 
 	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
+
+	/* Assume for now that ALL x86 CPUs are insecure */
+	setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+
+	setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+	setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
 	fpu__init_system(c);
+
+#ifdef CONFIG_X86_32
+	/*
+	 * Regardless of whether PCID is enumerated, the SDM says
+	 * that it can't be enabled in 32-bit mode.
+	 */
+	setup_clear_cpu_cap(X86_FEATURE_PCID);
+#endif
 }
 
 void __init early_cpu_init(void)
@@ -1035,10 +1111,7 @@
 		this_cpu->c_identify(c);
 
 	/* Clear/Set all flags overridden by options, after probe */
-	for (i = 0; i < NCAPINTS; i++) {
-		c->x86_capability[i] &= ~cpu_caps_cleared[i];
-		c->x86_capability[i] |= cpu_caps_set[i];
-	}
+	apply_forced_caps(c);
 
 #ifdef CONFIG_X86_64
 	c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
@@ -1064,6 +1137,9 @@
 	setup_smep(c);
 	setup_smap(c);
 
+	/* Set up PCID */
+	setup_pcid(c);
+
 	/*
 	 * The vendor-specific functions might have changed features.
 	 * Now we do "generic changes."
@@ -1097,10 +1173,7 @@
 	 * Clear/Set all flags overridden by options, need do it
 	 * before following smp all cpus cap AND.
 	 */
-	for (i = 0; i < NCAPINTS; i++) {
-		c->x86_capability[i] &= ~cpu_caps_cleared[i];
-		c->x86_capability[i] |= cpu_caps_set[i];
-	}
+	apply_forced_caps(c);
 
 	/*
 	 * On SMP, boot_cpu_data holds the common feature set between
@@ -1325,7 +1398,7 @@
 	  [DEBUG_STACK - 1]			= DEBUG_STKSZ
 };
 
-static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(char, exception_stacks
 	[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
 
 /* May not be marked __init: used by software suspend */
@@ -1483,6 +1556,14 @@
 	 * try to read it.
 	 */
 	cr4_init_shadow();
+	if (!kaiser_enabled) {
+		/*
+		 * secondary_startup_64() deferred setting PGE in cr4:
+		 * probe_page_size_mask() sets it on the boot cpu,
+		 * but it needs to be set on each secondary cpu.
+		 */
+		cr4_set_bits(X86_CR4_PGE);
+	}
 
 	/*
 	 * Load microcode on this cpu if a valid microcode is available.
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 017bda1..b74bb29 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -592,6 +592,7 @@
 #define F14H_MPB_MAX_SIZE 1824
 #define F15H_MPB_MAX_SIZE 4096
 #define F16H_MPB_MAX_SIZE 3458
+#define F17H_MPB_MAX_SIZE 3200
 
 	switch (family) {
 	case 0x14:
@@ -603,6 +604,9 @@
 	case 0x16:
 		max_size = F16H_MPB_MAX_SIZE;
 		break;
+	case 0x17:
+		max_size = F17H_MPB_MAX_SIZE;
+		break;
 	default:
 		max_size = F1XH_MPB_MAX_SIZE;
 		break;
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 13dbcc0..ac3e636 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -1051,8 +1051,17 @@
 {
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-	if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) {
-		pr_err_once("late loading on model 79 is disabled.\n");
+	/*
+	 * Late loading on model 79 with microcode revision less than 0x0b000021
+	 * may result in a system hang. This behavior is documented in item
+	 * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
+	 */
+	if (c->x86 == 6 &&
+	    c->x86_model == INTEL_FAM6_BROADWELL_X &&
+	    c->x86_mask == 0x01 &&
+	    c->microcode < 0x0b000021) {
+		pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
+		pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
 		return true;
 	}
 
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 04f89ca..e33b385 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -41,6 +41,7 @@
 #include <asm/pgalloc.h>
 #include <asm/setup.h>
 #include <asm/espfix.h>
+#include <asm/kaiser.h>
 
 /*
  * Note: we only need 6*8 = 48 bytes for the espfix stack, but round
@@ -126,6 +127,15 @@
 	/* Install the espfix pud into the kernel page directory */
 	pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
 	pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
+	/*
+	 * Just copy the top-level PGD that is mapping the espfix
+	 * area to ensure it is mapped into the shadow user page
+	 * tables.
+	 */
+	if (kaiser_enabled) {
+		set_pgd(native_get_shadow_pgd(pgd_p),
+			__pgd(_KERNPG_TABLE | __pa((pud_t *)espfix_pud_page)));
+	}
 
 	/* Randomize the locations */
 	init_espfix_random();
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 095ef7d..abfbb61b 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -1077,6 +1077,7 @@
 	 * Add back in the features that came in from userspace:
 	 */
 	xsave->header.xfeatures |= xfeatures;
+	xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xsave->header.xfeatures;
 
 	return 0;
 }
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index b4421cc..67cd7c1 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -190,8 +190,8 @@
 	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
 1:
 
-	/* Enable PAE mode and PGE */
-	movl	$(X86_CR4_PAE | X86_CR4_PGE), %ecx
+	/* Enable PAE and PSE, but defer PGE until kaiser_enabled is decided */
+	movl	$(X86_CR4_PAE | X86_CR4_PSE), %ecx
 	movq	%rcx, %cr4
 
 	/* Setup early boot stage 4 level pagetables. */
@@ -405,6 +405,27 @@
 	.balign	PAGE_SIZE; \
 GLOBAL(name)
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+/*
+ * Each PGD needs to be 8k long and 8k aligned.  We do not
+ * ever go out to userspace with these, so we do not
+ * strictly *need* the second page, but this allows us to
+ * have a single set_pgd() implementation that does not
+ * need to worry about whether it has 4k or 8k to work
+ * with.
+ *
+ * This ensures PGDs are 8k long:
+ */
+#define KAISER_USER_PGD_FILL	512
+/* This ensures they are 8k-aligned: */
+#define NEXT_PGD_PAGE(name) \
+	.balign 2 * PAGE_SIZE; \
+GLOBAL(name)
+#else
+#define NEXT_PGD_PAGE(name) NEXT_PAGE(name)
+#define KAISER_USER_PGD_FILL	0
+#endif
+
 /* Automate the creation of 1 to 1 mapping pmd entries */
 #define PMDS(START, PERM, COUNT)			\
 	i = 0 ;						\
@@ -414,9 +435,10 @@
 	.endr
 
 	__INITDATA
-NEXT_PAGE(early_level4_pgt)
+NEXT_PGD_PAGE(early_level4_pgt)
 	.fill	511,8,0
 	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+	.fill	KAISER_USER_PGD_FILL,8,0
 
 NEXT_PAGE(early_dynamic_pgts)
 	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
@@ -424,16 +446,18 @@
 	.data
 
 #ifndef CONFIG_XEN
-NEXT_PAGE(init_level4_pgt)
+NEXT_PGD_PAGE(init_level4_pgt)
 	.fill	512,8,0
+	.fill	KAISER_USER_PGD_FILL,8,0
 #else
-NEXT_PAGE(init_level4_pgt)
+NEXT_PGD_PAGE(init_level4_pgt)
 	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
 	.org    init_level4_pgt + L4_PAGE_OFFSET*8, 0
 	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
 	.org    init_level4_pgt + L4_START_KERNEL*8, 0
 	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
 	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+	.fill	KAISER_USER_PGD_FILL,8,0
 
 NEXT_PAGE(level3_ident_pgt)
 	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
@@ -444,6 +468,7 @@
 	 */
 	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
 #endif
+	.fill	KAISER_USER_PGD_FILL,8,0
 
 NEXT_PAGE(level3_kernel_pgt)
 	.fill	L3_START_KERNEL,8,0
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 932348fb..9512529 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -354,7 +354,7 @@
 
 		irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
 		irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
-		disable_irq(hdev->irq);
+		disable_hardirq(hdev->irq);
 		irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
 		enable_irq(hdev->irq);
 	}
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 1f38d9a..2763573 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -19,6 +19,7 @@
 #include <linux/mm.h>
 
 #include <asm/apic.h>
+#include <asm/nospec-branch.h>
 
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
 
@@ -54,17 +55,17 @@
 static void call_on_stack(void *func, void *stack)
 {
 	asm volatile("xchgl	%%ebx,%%esp	\n"
-		     "call	*%%edi		\n"
+		     CALL_NOSPEC
 		     "movl	%%ebx,%%esp	\n"
 		     : "=b" (stack)
 		     : "0" (stack),
-		       "D"(func)
+		       [thunk_target] "D"(func)
 		     : "memory", "cc", "edx", "ecx", "eax");
 }
 
 static inline void *current_stack(void)
 {
-	return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
+	return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
 }
 
 static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
@@ -88,17 +89,17 @@
 
 	/* Save the next esp at the bottom of the stack */
 	prev_esp = (u32 *)irqstk;
-	*prev_esp = current_stack_pointer();
+	*prev_esp = current_stack_pointer;
 
 	if (unlikely(overflow))
 		call_on_stack(print_stack_overflow, isp);
 
 	asm volatile("xchgl	%%ebx,%%esp	\n"
-		     "call	*%%edi		\n"
+		     CALL_NOSPEC
 		     "movl	%%ebx,%%esp	\n"
 		     : "=a" (arg1), "=b" (isp)
 		     :  "0" (desc),   "1" (isp),
-			"D" (desc->handle_irq)
+			[thunk_target] "D" (desc->handle_irq)
 		     : "memory", "cc", "ecx");
 	return 1;
 }
@@ -139,7 +140,7 @@
 
 	/* Push the previous esp onto the stack */
 	prev_esp = (u32 *)irqstk;
-	*prev_esp = current_stack_pointer();
+	*prev_esp = current_stack_pointer;
 
 	call_on_stack(__do_softirq, isp);
 }
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 1423ab1..f480b38 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -51,7 +51,7 @@
 	.flags = IRQF_NO_THREAD,
 };
 
-DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
+DEFINE_PER_CPU_USER_MAPPED(vector_irq_t, vector_irq) = {
 	[0 ... NR_VECTORS - 1] = VECTOR_UNUSED,
 };
 
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 5f8f0b3..2c0b0b6 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -26,7 +26,7 @@
 #include "common.h"
 
 static nokprobe_inline
-int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
 		      struct kprobe_ctlblk *kcb, unsigned long orig_ip)
 {
 	/*
@@ -41,20 +41,21 @@
 	__this_cpu_write(current_kprobe, NULL);
 	if (orig_ip)
 		regs->ip = orig_ip;
-	return 1;
 }
 
 int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
 		    struct kprobe_ctlblk *kcb)
 {
-	if (kprobe_ftrace(p))
-		return __skip_singlestep(p, regs, kcb, 0);
-	else
-		return 0;
+	if (kprobe_ftrace(p)) {
+		__skip_singlestep(p, regs, kcb, 0);
+		preempt_enable_no_resched();
+		return 1;
+	}
+	return 0;
 }
 NOKPROBE_SYMBOL(skip_singlestep);
 
-/* Ftrace callback handler for kprobes */
+/* Ftrace callback handler for kprobes -- called under preepmt disabed */
 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
 			   struct ftrace_ops *ops, struct pt_regs *regs)
 {
@@ -77,13 +78,17 @@
 		/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
 		regs->ip = ip + sizeof(kprobe_opcode_t);
 
+		/* To emulate trap based kprobes, preempt_disable here */
+		preempt_disable();
 		__this_cpu_write(current_kprobe, p);
 		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-		if (!p->pre_handler || !p->pre_handler(p, regs))
+		if (!p->pre_handler || !p->pre_handler(p, regs)) {
 			__skip_singlestep(p, regs, kcb, orig_ip);
+			preempt_enable_no_resched();
+		}
 		/*
 		 * If pre_handler returns !0, it sets regs->ip and
-		 * resets current kprobe.
+		 * resets current kprobe, and keep preempt count +1.
 		 */
 	}
 end:
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 6707039..8bc68cf 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -12,9 +12,11 @@
 #include <linux/string.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
+#include <linux/syscalls.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/uaccess.h>
+#include <linux/kaiser.h>
 
 #include <asm/ldt.h>
 #include <asm/desc.h>
@@ -33,11 +35,21 @@
 	set_ldt(pc->ldt->entries, pc->ldt->size);
 }
 
+static void __free_ldt_struct(struct ldt_struct *ldt)
+{
+	if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
+		vfree(ldt->entries);
+	else
+		free_page((unsigned long)ldt->entries);
+	kfree(ldt);
+}
+
 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
 static struct ldt_struct *alloc_ldt_struct(int size)
 {
 	struct ldt_struct *new_ldt;
 	int alloc_size;
+	int ret;
 
 	if (size > LDT_ENTRIES)
 		return NULL;
@@ -65,7 +77,13 @@
 		return NULL;
 	}
 
+	ret = kaiser_add_mapping((unsigned long)new_ldt->entries, alloc_size,
+				 __PAGE_KERNEL);
 	new_ldt->size = size;
+	if (ret) {
+		__free_ldt_struct(new_ldt);
+		return NULL;
+	}
 	return new_ldt;
 }
 
@@ -91,12 +109,10 @@
 	if (likely(!ldt))
 		return;
 
+	kaiser_remove_mapping((unsigned long)ldt->entries,
+			      ldt->size * LDT_ENTRY_SIZE);
 	paravirt_free_ldt(ldt->entries, ldt->size);
-	if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
-		vfree(ldt->entries);
-	else
-		free_page((unsigned long)ldt->entries);
-	kfree(ldt);
+	__free_ldt_struct(ldt);
 }
 
 /*
@@ -271,8 +287,8 @@
 	return error;
 }
 
-asmlinkage int sys_modify_ldt(int func, void __user *ptr,
-			      unsigned long bytecount)
+SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
+		unsigned long , bytecount)
 {
 	int ret = -ENOSYS;
 
@@ -290,5 +306,14 @@
 		ret = write_ldt(ptr, bytecount, 0);
 		break;
 	}
-	return ret;
+	/*
+	 * The SYSCALL_DEFINE() macros give us an 'unsigned long'
+	 * return type, but tht ABI for sys_modify_ldt() expects
+	 * 'int'.  This cast gives us an int-sized value in %rax
+	 * for the return code.  The 'unsigned' is necessary so
+	 * the compiler does not try to sign-extend the negative
+	 * return codes into the high half of the register when
+	 * taking the value from int->long.
+	 */
+	return (unsigned int)ret;
 }
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 7b0d3da..287ec3b 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -8,7 +8,7 @@
 #include <asm/ptrace.h>
 #include <asm/ftrace.h>
 #include <asm/export.h>
-
+#include <asm/nospec-branch.h>
 
 	.code64
 	.section .entry.text, "ax"
@@ -290,8 +290,9 @@
 	 * ip and parent ip are used and the list function is called when
 	 * function tracing is enabled.
 	 */
-	call   *ftrace_trace_function
 
+	movq ftrace_trace_function, %r8
+	CALL_NOSPEC %r8
 	restore_mcount_regs
 
 	jmp fgraph_trace
@@ -334,5 +335,5 @@
 	movq 8(%rsp), %rdx
 	movq (%rsp), %rax
 	addq $24, %rsp
-	jmp *%rdi
+	JMP_NOSPEC %rdi
 #endif
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index bb3840c..ee43b36 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -9,7 +9,6 @@
 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
-DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
 DEF_NATIVE(pv_cpu_ops, clts, "clts");
 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
 
@@ -59,7 +58,6 @@
 		PATCH_SITE(pv_mmu_ops, read_cr3);
 		PATCH_SITE(pv_mmu_ops, write_cr3);
 		PATCH_SITE(pv_cpu_ops, clts);
-		PATCH_SITE(pv_mmu_ops, flush_tlb_single);
 		PATCH_SITE(pv_cpu_ops, wbinvd);
 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
 		case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index fc7cf64..54b2711 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -41,7 +41,7 @@
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */
-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
+__visible DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(struct tss_struct, cpu_tss) = {
 	.x86_tss = {
 		.sp0 = TOP_OF_INIT_STACK,
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 067f981..ce020a6 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -106,6 +106,10 @@
 	load_cr3(initial_page_table);
 #else
 	write_cr3(real_mode_header->trampoline_pgd);
+
+	/* Exiting long mode will fail if CR4.PCIDE is set. */
+	if (static_cpu_has(X86_FEATURE_PCID))
+		cr4_clear_bits(X86_CR4_PCIDE);
 #endif
 
 	/* Jump to the identity-mapped low memory code */
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index feaab07..6b55012 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -114,6 +114,7 @@
 #include <asm/microcode.h>
 #include <asm/mmu_context.h>
 #include <asm/kaslr.h>
+#include <asm/kaiser.h>
 
 /*
  * max_low_pfn_mapped: highest direct mapped pfn under 4GB
@@ -1019,6 +1020,12 @@
 	 */
 	init_hypervisor_platform();
 
+	/*
+	 * This needs to happen right after XENPV is set on xen and
+	 * kaiser_enabled is checked below in cleanup_highmap().
+	 */
+	kaiser_check_boottime_disable();
+
 	x86_init.resources.probe_roms();
 
 	/* after parse_early_param, so could debug it */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9fe7b9e..e803d72 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -115,14 +115,10 @@
 	spin_lock_irqsave(&rtc_lock, flags);
 	CMOS_WRITE(0xa, 0xf);
 	spin_unlock_irqrestore(&rtc_lock, flags);
-	local_flush_tlb();
-	pr_debug("1.\n");
 	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
 							start_eip >> 4;
-	pr_debug("2.\n");
 	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
 							start_eip & 0xf;
-	pr_debug("3.\n");
 }
 
 static inline void smpboot_restore_warm_reset_vector(void)
@@ -130,11 +126,6 @@
 	unsigned long flags;
 
 	/*
-	 * Install writable page 0 entry to set BIOS data area.
-	 */
-	local_flush_tlb();
-
-	/*
 	 * Paranoid:  Set warm reset code and vector here back
 	 * to default values.
 	 */
diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
index 1c113db..2bb5ee4 100644
--- a/arch/x86/kernel/tracepoint.c
+++ b/arch/x86/kernel/tracepoint.c
@@ -9,10 +9,12 @@
 #include <linux/atomic.h>
 
 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
+__aligned(PAGE_SIZE)
 struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
 				(unsigned long) trace_idt_table };
 
 /* No need to be aligned, but done to keep all IDTs defined the same way. */
+__aligned(PAGE_SIZE)
 gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
 
 static int trace_irq_vector_refcount;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index bd4e3d4..322f433 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -153,7 +153,7 @@
 	 * from double_fault.
 	 */
 	BUG_ON((unsigned long)(current_top_of_stack() -
-			       current_stack_pointer()) >= THREAD_SIZE);
+			       current_stack_pointer) >= THREAD_SIZE);
 
 	preempt_enable_no_resched();
 }
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 01f30e5..4b30128 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -191,7 +191,7 @@
 	pte_unmap_unlock(pte, ptl);
 out:
 	up_write(&mm->mmap_sem);
-	flush_tlb();
+	flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, 0UL);
 }
 
 
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 72b737b..c8f8dd8 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2395,9 +2395,21 @@
 }
 
 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
-				     u64 cr0, u64 cr4)
+				    u64 cr0, u64 cr3, u64 cr4)
 {
 	int bad;
+	u64 pcid;
+
+	/* In order to later set CR4.PCIDE, CR3[11:0] must be zero.  */
+	pcid = 0;
+	if (cr4 & X86_CR4_PCIDE) {
+		pcid = cr3 & 0xfff;
+		cr3 &= ~0xfff;
+	}
+
+	bad = ctxt->ops->set_cr(ctxt, 3, cr3);
+	if (bad)
+		return X86EMUL_UNHANDLEABLE;
 
 	/*
 	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
@@ -2416,6 +2428,12 @@
 		bad = ctxt->ops->set_cr(ctxt, 4, cr4);
 		if (bad)
 			return X86EMUL_UNHANDLEABLE;
+		if (pcid) {
+			bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
+			if (bad)
+				return X86EMUL_UNHANDLEABLE;
+		}
+
 	}
 
 	return X86EMUL_CONTINUE;
@@ -2426,11 +2444,11 @@
 	struct desc_struct desc;
 	struct desc_ptr dt;
 	u16 selector;
-	u32 val, cr0, cr4;
+	u32 val, cr0, cr3, cr4;
 	int i;
 
 	cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
-	ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
+	cr3 =                      GET_SMSTATE(u32, smbase, 0x7ff8);
 	ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
 	ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
 
@@ -2472,14 +2490,14 @@
 
 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
 
-	return rsm_enter_protected_mode(ctxt, cr0, cr4);
+	return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 }
 
 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 {
 	struct desc_struct desc;
 	struct desc_ptr dt;
-	u64 val, cr0, cr4;
+	u64 val, cr0, cr3, cr4;
 	u32 base3;
 	u16 selector;
 	int i, r;
@@ -2496,7 +2514,7 @@
 	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
 	cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
-	ctxt->ops->set_cr(ctxt, 3,  GET_SMSTATE(u64, smbase, 0x7f50));
+	cr3 =                       GET_SMSTATE(u64, smbase, 0x7f50);
 	cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
 	val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
@@ -2524,7 +2542,7 @@
 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
 	ctxt->ops->set_gdt(ctxt, &dt);
 
-	r = rsm_enter_protected_mode(ctxt, cr0, cr4);
+	r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 	if (r != X86EMUL_CONTINUE)
 		return r;
 
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 3f05c04..b24b3c6 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -246,9 +246,14 @@
 	recalculate_apic_map(apic->vcpu->kvm);
 }
 
+static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
+{
+	return ((id >> 4) << 16) | (1 << (id & 0xf));
+}
+
 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
 {
-	u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
+	u32 ldr = kvm_apic_calc_x2apic_ldr(id);
 
 	kvm_lapic_set_reg(apic, APIC_ID, id);
 	kvm_lapic_set_reg(apic, APIC_LDR, ldr);
@@ -2029,6 +2034,7 @@
 {
 	if (apic_x2apic_mode(vcpu->arch.apic)) {
 		u32 *id = (u32 *)(s->regs + APIC_ID);
+		u32 *ldr = (u32 *)(s->regs + APIC_LDR);
 
 		if (vcpu->kvm->arch.x2apic_format) {
 			if (*id != vcpu->vcpu_id)
@@ -2039,6 +2045,10 @@
 			else
 				*id <<= 24;
 		}
+
+		/* In x2APIC mode, the LDR is fixed and based on the id */
+		if (set)
+			*ldr = kvm_apic_calc_x2apic_ldr(*id);
 	}
 
 	return 0;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d29c745..0a324e1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5052,13 +5052,13 @@
 {
 	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
 					    sizeof(struct pte_list_desc),
-					    0, 0, NULL);
+					    0, SLAB_ACCOUNT, NULL);
 	if (!pte_list_desc_cache)
 		goto nomem;
 
 	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
 						  sizeof(struct kvm_mmu_page),
-						  0, 0, NULL);
+						  0, SLAB_ACCOUNT, NULL);
 	if (!mmu_page_header_cache)
 		goto nomem;
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8ca1eca..24af898 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -44,6 +44,7 @@
 #include <asm/debugreg.h>
 #include <asm/kvm_para.h>
 #include <asm/irq_remapping.h>
+#include <asm/nospec-branch.h>
 
 #include <asm/virtext.h>
 #include "trace.h"
@@ -1382,6 +1383,9 @@
 	unsigned long flags;
 	struct kvm_arch *vm_data = &kvm->arch;
 
+	if (!avic)
+		return;
+
 	avic_free_vm_id(vm_data->avic_vm_id);
 
 	if (vm_data->avic_logical_id_table_page)
@@ -2149,6 +2153,8 @@
 	int er;
 
 	er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
+	if (er == EMULATE_USER_EXIT)
+		return 0;
 	if (er != EMULATE_DONE)
 		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
 	return 1;
@@ -3583,6 +3589,13 @@
 	u32 ecx = msr->index;
 	u64 data = msr->data;
 	switch (ecx) {
+	case MSR_IA32_CR_PAT:
+		if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
+			return 1;
+		vcpu->arch.pat = data;
+		svm->vmcb->save.g_pat = data;
+		mark_dirty(svm->vmcb, VMCB_NPT);
+		break;
 	case MSR_IA32_TSC:
 		kvm_write_tsc(vcpu, msr);
 		break;
@@ -4857,6 +4870,25 @@
 		"mov %%r14, %c[r14](%[svm]) \n\t"
 		"mov %%r15, %c[r15](%[svm]) \n\t"
 #endif
+		/*
+		* Clear host registers marked as clobbered to prevent
+		* speculative use.
+		*/
+		"xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
+		"xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
+		"xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
+		"xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
+		"xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
+#ifdef CONFIG_X86_64
+		"xor %%r8, %%r8 \n\t"
+		"xor %%r9, %%r9 \n\t"
+		"xor %%r10, %%r10 \n\t"
+		"xor %%r11, %%r11 \n\t"
+		"xor %%r12, %%r12 \n\t"
+		"xor %%r13, %%r13 \n\t"
+		"xor %%r14, %%r14 \n\t"
+		"xor %%r15, %%r15 \n\t"
+#endif
 		"pop %%" _ASM_BP
 		:
 		: [svm]"a"(svm),
@@ -4886,6 +4918,9 @@
 #endif
 		);
 
+	/* Eliminate branch target predictions from guest mode */
+	vmexit_fill_RSB();
+
 #ifdef CONFIG_X86_64
 	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
 #else
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a8ae57a..3ca6d15 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -48,6 +48,7 @@
 #include <asm/kexec.h>
 #include <asm/apic.h>
 #include <asm/irq_remapping.h>
+#include <asm/nospec-branch.h>
 
 #include "trace.h"
 #include "pmu.h"
@@ -857,8 +858,16 @@
 {
 	BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
 
-	if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) ||
-	    vmcs_field_to_offset_table[field] == 0)
+	if (field >= ARRAY_SIZE(vmcs_field_to_offset_table))
+		return -ENOENT;
+
+	/*
+	 * FIXME: Mitigation for CVE-2017-5753.  To be replaced with a
+	 * generic mechanism.
+	 */
+	asm("lfence");
+
+	if (vmcs_field_to_offset_table[field] == 0)
 		return -ENOENT;
 
 	return vmcs_field_to_offset_table[field];
@@ -1199,6 +1208,11 @@
 	return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
 }
 
+static inline bool cpu_has_vmx_invvpid(void)
+{
+	return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
+}
+
 static inline bool cpu_has_vmx_ept(void)
 {
 	return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -3816,6 +3830,12 @@
 	__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
 }
 
+static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
+{
+	if (enable_ept)
+		vmx_flush_tlb(vcpu);
+}
+
 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
 {
 	ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -5502,6 +5522,8 @@
 			return 1;
 		}
 		er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
+		if (er == EMULATE_USER_EXIT)
+			return 0;
 		if (er != EMULATE_DONE)
 			kvm_queue_exception(vcpu, UD_VECTOR);
 		return 1;
@@ -6411,12 +6433,7 @@
 	memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
 	memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
 
-	/*
-	 * Allow direct access to the PC debug port (it is often used for I/O
-	 * delays, but the vmexits simply slow things down).
-	 */
 	memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
-	clear_bit(0x80, vmx_io_bitmap_a);
 
 	memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
 
@@ -6431,8 +6448,10 @@
 	if (boot_cpu_has(X86_FEATURE_NX))
 		kvm_enable_efer_bits(EFER_NX);
 
-	if (!cpu_has_vmx_vpid())
+	if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
+		!(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
 		enable_vpid = 0;
+
 	if (!cpu_has_vmx_shadow_vmcs())
 		enable_shadow_vmcs = 0;
 	if (enable_shadow_vmcs)
@@ -7206,9 +7225,8 @@
 static int handle_vmclear(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	u32 zero = 0;
 	gpa_t vmptr;
-	struct vmcs12 *vmcs12;
-	struct page *page;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
@@ -7219,22 +7237,9 @@
 	if (vmptr == vmx->nested.current_vmptr)
 		nested_release_vmcs12(vmx);
 
-	page = nested_get_page(vcpu, vmptr);
-	if (page == NULL) {
-		/*
-		 * For accurate processor emulation, VMCLEAR beyond available
-		 * physical memory should do nothing at all. However, it is
-		 * possible that a nested vmx bug, not a guest hypervisor bug,
-		 * resulted in this case, so let's shut down before doing any
-		 * more damage:
-		 */
-		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
-		return 1;
-	}
-	vmcs12 = kmap(page);
-	vmcs12->launch_state = 0;
-	kunmap(page);
-	nested_release_page(page);
+	kvm_vcpu_write_guest(vcpu,
+			vmptr + offsetof(struct vmcs12, launch_state),
+			&zero, sizeof(zero));
 
 	nested_free_vmcs02(vmx, vmptr);
 
@@ -8511,6 +8516,7 @@
 	} else {
 		sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
 		sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+		vmx_flush_tlb_ept_only(vcpu);
 	}
 	vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
 
@@ -8536,8 +8542,10 @@
 	 */
 	if (!is_guest_mode(vcpu) ||
 	    !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
-			     SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
+			     SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
 		vmcs_write64(APIC_ACCESS_ADDR, hpa);
+		vmx_flush_tlb_ept_only(vcpu);
+	}
 }
 
 static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
@@ -8949,6 +8957,7 @@
 		/* Save guest registers, load host registers, keep flags */
 		"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
 		"pop %0 \n\t"
+		"setbe %c[fail](%0)\n\t"
 		"mov %%" _ASM_AX ", %c[rax](%0) \n\t"
 		"mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
 		__ASM_SIZE(pop) " %c[rcx](%0) \n\t"
@@ -8965,12 +8974,23 @@
 		"mov %%r13, %c[r13](%0) \n\t"
 		"mov %%r14, %c[r14](%0) \n\t"
 		"mov %%r15, %c[r15](%0) \n\t"
+		"xor %%r8d,  %%r8d \n\t"
+		"xor %%r9d,  %%r9d \n\t"
+		"xor %%r10d, %%r10d \n\t"
+		"xor %%r11d, %%r11d \n\t"
+		"xor %%r12d, %%r12d \n\t"
+		"xor %%r13d, %%r13d \n\t"
+		"xor %%r14d, %%r14d \n\t"
+		"xor %%r15d, %%r15d \n\t"
 #endif
 		"mov %%cr2, %%" _ASM_AX "   \n\t"
 		"mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
 
+		"xor %%eax, %%eax \n\t"
+		"xor %%ebx, %%ebx \n\t"
+		"xor %%esi, %%esi \n\t"
+		"xor %%edi, %%edi \n\t"
 		"pop  %%" _ASM_BP "; pop  %%" _ASM_DX " \n\t"
-		"setbe %c[fail](%0) \n\t"
 		".pushsection .rodata \n\t"
 		".global vmx_return \n\t"
 		"vmx_return: " _ASM_PTR " 2b \n\t"
@@ -9007,6 +9027,9 @@
 #endif
 	      );
 
+	/* Eliminate branch target predictions from guest mode */
+	vmexit_fill_RSB();
+
 	/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
 	if (debugctlmsr)
 		update_debugctlmsr(debugctlmsr);
@@ -9560,10 +9583,8 @@
 		return false;
 
 	page = nested_get_page(vcpu, vmcs12->msr_bitmap);
-	if (!page) {
-		WARN_ON(1);
+	if (!page)
 		return false;
-	}
 	msr_bitmap_l1 = (unsigned long *)kmap(page);
 	if (!msr_bitmap_l1) {
 		nested_release_page_clean(page);
@@ -10112,6 +10133,9 @@
 	if (nested_cpu_has_ept(vmcs12)) {
 		kvm_mmu_unload(vcpu);
 		nested_ept_init_mmu_context(vcpu);
+	} else if (nested_cpu_has2(vmcs12,
+				   SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+		vmx_flush_tlb_ept_only(vcpu);
 	}
 
 	if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
@@ -10715,6 +10739,8 @@
 	vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
 	vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
 	vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
+	vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
+	vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
 
 	/* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1.  */
 	if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
@@ -10850,6 +10876,10 @@
 		vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
 		vmx_set_virtual_x2apic_mode(vcpu,
 				vcpu->arch.apic_base & X2APIC_ENABLE);
+	} else if (!nested_cpu_has_ept(vmcs12) &&
+		   nested_cpu_has2(vmcs12,
+				   SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+		vmx_flush_tlb_ept_only(vcpu);
 	}
 
 	/* This is needed for same reason as it was needed in prepare_vmcs02 */
@@ -10899,8 +10929,10 @@
  */
 static void vmx_leave_nested(struct kvm_vcpu *vcpu)
 {
-	if (is_guest_mode(vcpu))
+	if (is_guest_mode(vcpu)) {
+		to_vmx(vcpu)->nested.nested_run_pending = 0;
 		nested_vmx_vmexit(vcpu, -1, 0, 0);
+	}
 	free_nested(to_vmx(vcpu));
 }
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 595f814..d3f80cc 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -773,7 +773,8 @@
 			return 1;
 
 		/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
-		if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
+		if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_ASID_MASK) ||
+		    !is_long_mode(vcpu))
 			return 1;
 	}
 
@@ -1797,6 +1798,9 @@
 	 */
 	BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
 
+	if (guest_hv_clock.version & 1)
+		++guest_hv_clock.version;  /* first time write, random junk */
+
 	vcpu->hv_clock.version = guest_hv_clock.version + 1;
 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
 				&vcpu->hv_clock,
@@ -4260,7 +4264,7 @@
 					 addr, n, v))
 		    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
 			break;
-		trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
+		trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
 		handled += n;
 		addr += n;
 		len -= n;
@@ -4513,7 +4517,7 @@
 {
 	if (vcpu->mmio_read_completed) {
 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
-			       vcpu->mmio_fragments[0].gpa, *(u64 *)val);
+			       vcpu->mmio_fragments[0].gpa, val);
 		vcpu->mmio_read_completed = 0;
 		return 1;
 	}
@@ -4535,14 +4539,14 @@
 
 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
 {
-	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
+	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
 	return vcpu_mmio_write(vcpu, gpa, bytes, val);
 }
 
 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
 			  void *val, int bytes)
 {
-	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
+	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
 	return X86EMUL_IO_NEEDED;
 }
 
@@ -5576,6 +5580,8 @@
 			if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
 						emulation_type))
 				return EMULATE_DONE;
+			if (ctxt->have_exception && inject_emulated_exception(vcpu))
+				return EMULATE_DONE;
 			if (emulation_type & EMULTYPE_SKIP)
 				return EMULATE_FAIL;
 			return handle_emulation_failure(vcpu);
@@ -6521,6 +6527,20 @@
 	kvm_x86_ops->tlb_flush(vcpu);
 }
 
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+		unsigned long start, unsigned long end)
+{
+	unsigned long apic_address;
+
+	/*
+	 * The physical address of apic access page is stored in the VMCS.
+	 * Update it when it becomes invalid.
+	 */
+	apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
+	if (start <= apic_address && apic_address < end)
+		kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
+}
+
 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
 {
 	struct page *page = NULL;
@@ -7113,7 +7133,7 @@
 #endif
 
 	kvm_rip_write(vcpu, regs->rip);
-	kvm_set_rflags(vcpu, regs->rflags);
+	kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
 
 	vcpu->arch.exception.pending = false;
 
@@ -8424,11 +8444,11 @@
 {
 	struct x86_exception fault;
 
-	trace_kvm_async_pf_ready(work->arch.token, work->gva);
 	if (work->wakeup_all)
 		work->arch.token = ~0; /* broadcast wakeup */
 	else
 		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+	trace_kvm_async_pf_ready(work->arch.token, work->gva);
 
 	if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
 	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 34a7413..6bf1898 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -25,6 +25,7 @@
 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
 lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
 lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+lib-$(CONFIG_RETPOLINE) += retpoline.o
 
 obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
 
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 4d34bb5..46e71a7 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -29,7 +29,8 @@
 #include <asm/errno.h>
 #include <asm/asm.h>
 #include <asm/export.h>
-				
+#include <asm/nospec-branch.h>
+
 /*
  * computes a partial checksum, e.g. for TCP/UDP fragments
  */
@@ -156,7 +157,7 @@
 	negl %ebx
 	lea 45f(%ebx,%ebx,2), %ebx
 	testl %esi, %esi
-	jmp *%ebx
+	JMP_NOSPEC %ebx
 
 	# Handle 2-byte-aligned regions
 20:	addw (%esi), %ax
@@ -439,7 +440,7 @@
 	andl $-32,%edx
 	lea 3f(%ebx,%ebx), %ebx
 	testl %esi, %esi 
-	jmp *%ebx
+	JMP_NOSPEC %ebx
 1:	addl $64,%esi
 	addl $64,%edi 
 	SRC(movb -32(%edx),%bl)	; SRC(movb (%edx),%bl)
diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c
index 5cc78bf..3261abb 100644
--- a/arch/x86/lib/cmdline.c
+++ b/arch/x86/lib/cmdline.c
@@ -104,7 +104,112 @@
 	return 0;	/* Buffer overrun */
 }
 
+/*
+ * Find a non-boolean option (i.e. option=argument). In accordance with
+ * standard Linux practice, if this option is repeated, this returns the
+ * last instance on the command line.
+ *
+ * @cmdline: the cmdline string
+ * @max_cmdline_size: the maximum size of cmdline
+ * @option: option string to look for
+ * @buffer: memory buffer to return the option argument
+ * @bufsize: size of the supplied memory buffer
+ *
+ * Returns the length of the argument (regardless of if it was
+ * truncated to fit in the buffer), or -1 on not found.
+ */
+static int
+__cmdline_find_option(const char *cmdline, int max_cmdline_size,
+		      const char *option, char *buffer, int bufsize)
+{
+	char c;
+	int pos = 0, len = -1;
+	const char *opptr = NULL;
+	char *bufptr = buffer;
+	enum {
+		st_wordstart = 0,	/* Start of word/after whitespace */
+		st_wordcmp,	/* Comparing this word */
+		st_wordskip,	/* Miscompare, skip */
+		st_bufcpy,	/* Copying this to buffer */
+	} state = st_wordstart;
+
+	if (!cmdline)
+		return -1;      /* No command line */
+
+	/*
+	 * This 'pos' check ensures we do not overrun
+	 * a non-NULL-terminated 'cmdline'
+	 */
+	while (pos++ < max_cmdline_size) {
+		c = *(char *)cmdline++;
+		if (!c)
+			break;
+
+		switch (state) {
+		case st_wordstart:
+			if (myisspace(c))
+				break;
+
+			state = st_wordcmp;
+			opptr = option;
+			/* fall through */
+
+		case st_wordcmp:
+			if ((c == '=') && !*opptr) {
+				/*
+				 * We matched all the way to the end of the
+				 * option we were looking for, prepare to
+				 * copy the argument.
+				 */
+				len = 0;
+				bufptr = buffer;
+				state = st_bufcpy;
+				break;
+			} else if (c == *opptr++) {
+				/*
+				 * We are currently matching, so continue
+				 * to the next character on the cmdline.
+				 */
+				break;
+			}
+			state = st_wordskip;
+			/* fall through */
+
+		case st_wordskip:
+			if (myisspace(c))
+				state = st_wordstart;
+			break;
+
+		case st_bufcpy:
+			if (myisspace(c)) {
+				state = st_wordstart;
+			} else {
+				/*
+				 * Increment len, but don't overrun the
+				 * supplied buffer and leave room for the
+				 * NULL terminator.
+				 */
+				if (++len < bufsize)
+					*bufptr++ = c;
+			}
+			break;
+		}
+	}
+
+	if (bufsize)
+		*bufptr = '\0';
+
+	return len;
+}
+
 int cmdline_find_option_bool(const char *cmdline, const char *option)
 {
 	return __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
 }
+
+int cmdline_find_option(const char *cmdline, const char *option, char *buffer,
+			int bufsize)
+{
+	return __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option,
+				     buffer, bufsize);
+}
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
new file mode 100644
index 0000000..cb45c6c
--- /dev/null
+++ b/arch/x86/lib/retpoline.S
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/stringify.h>
+#include <linux/linkage.h>
+#include <asm/dwarf2.h>
+#include <asm/cpufeatures.h>
+#include <asm/alternative-asm.h>
+#include <asm/export.h>
+#include <asm/nospec-branch.h>
+
+.macro THUNK reg
+	.section .text.__x86.indirect_thunk.\reg
+
+ENTRY(__x86_indirect_thunk_\reg)
+	CFI_STARTPROC
+	JMP_NOSPEC %\reg
+	CFI_ENDPROC
+ENDPROC(__x86_indirect_thunk_\reg)
+.endm
+
+/*
+ * Despite being an assembler file we can't just use .irp here
+ * because __KSYM_DEPS__ only uses the C preprocessor and would
+ * only see one instance of "__x86_indirect_thunk_\reg" rather
+ * than one per register with the correct names. So we do it
+ * the simple and nasty way...
+ */
+#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg)
+#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
+
+GENERATE_THUNK(_ASM_AX)
+GENERATE_THUNK(_ASM_BX)
+GENERATE_THUNK(_ASM_CX)
+GENERATE_THUNK(_ASM_DX)
+GENERATE_THUNK(_ASM_SI)
+GENERATE_THUNK(_ASM_DI)
+GENERATE_THUNK(_ASM_BP)
+GENERATE_THUNK(_ASM_SP)
+#ifdef CONFIG_64BIT
+GENERATE_THUNK(r8)
+GENERATE_THUNK(r9)
+GENERATE_THUNK(r10)
+GENERATE_THUNK(r11)
+GENERATE_THUNK(r12)
+GENERATE_THUNK(r13)
+GENERATE_THUNK(r14)
+GENERATE_THUNK(r15)
+#endif
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 767be7c..1754e09 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -896,7 +896,7 @@
 
 GrpTable: Grp3_1
 0: TEST Eb,Ib
-1:
+1: TEST Eb,Ib
 2: NOT Eb
 3: NEG Eb
 4: MUL AL,Eb
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 96d2b84..c548b46 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -37,5 +37,5 @@
 
 obj-$(CONFIG_X86_INTEL_MPX)	+= mpx.o
 obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
-obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
-
+obj-$(CONFIG_RANDOMIZE_MEMORY)	+= kaslr.o
+obj-$(CONFIG_PAGE_TABLE_ISOLATION)		+= kaiser.o
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 1dd7960..8b5ff88 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1393,7 +1393,17 @@
 	 * make sure we exit gracefully rather than endlessly redo
 	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
 	 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
+	 *
+	 * Note that handle_userfault() may also release and reacquire mmap_sem
+	 * (and not return with VM_FAULT_RETRY), when returning to userland to
+	 * repeat the page fault later with a VM_FAULT_NOPAGE retval
+	 * (potentially after handling any pending signal during the return to
+	 * userland). The return to userland is identified whenever
+	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
+	 * Thus we have to be careful about not touching vma after handling the
+	 * fault, so we read the pkey beforehand.
 	 */
+	pkey = vma_pkey(vma);
 	fault = handle_mm_fault(vma, address, flags);
 	major |= fault & VM_FAULT_MAJOR;
 
@@ -1420,7 +1430,6 @@
 		return;
 	}
 
-	pkey = vma_pkey(vma);
 	up_read(&mm->mmap_sem);
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 		mm_fault_error(regs, error_code, address, &pkey, fault);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 889e761..f92bdb9 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -177,7 +177,7 @@
 		cr4_set_bits_and_update_boot(X86_CR4_PSE);
 
 	/* Enable PGE if available */
-	if (boot_cpu_has(X86_FEATURE_PGE)) {
+	if (boot_cpu_has(X86_FEATURE_PGE) && !kaiser_enabled) {
 		cr4_set_bits_and_update_boot(X86_CR4_PGE);
 		__supported_pte_mask |= _PAGE_GLOBAL;
 	} else
@@ -764,13 +764,11 @@
 }
 
 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
-#ifdef CONFIG_SMP
 	.active_mm = &init_mm,
 	.state = 0,
-#endif
 	.cr4 = ~0UL,	/* fail hard if we screw up cr4 shadow initialization */
 };
-EXPORT_SYMBOL_GPL(cpu_tlbstate);
+EXPORT_PER_CPU_SYMBOL(cpu_tlbstate);
 
 void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
 {
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 3e27ded..7df8e3a 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -324,6 +324,16 @@
 			continue;
 		if (vaddr < (unsigned long) _text || vaddr > end)
 			set_pmd(pmd, __pmd(0));
+		else if (kaiser_enabled) {
+			/*
+			 * level2_kernel_pgt is initialized with _PAGE_GLOBAL:
+			 * clear that now.  This is not important, so long as
+			 * CR4.PGE remains clear, but it removes an anomaly.
+			 * Physical mapping setup below avoids _PAGE_GLOBAL
+			 * by use of massage_pgprot() inside pfn_pte() etc.
+			 */
+			set_pmd(pmd, pmd_clear_flags(*pmd, _PAGE_GLOBAL));
+		}
 	}
 }
 
diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
new file mode 100644
index 0000000..a8ade08
--- /dev/null
+++ b/arch/x86/mm/kaiser.c
@@ -0,0 +1,483 @@
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt)     "Kernel/User page tables isolation: " fmt
+
+#include <asm/kaiser.h>
+#include <asm/tlbflush.h>	/* to verify its kaiser declarations */
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/desc.h>
+#include <asm/cmdline.h>
+#include <asm/vsyscall.h>
+
+int kaiser_enabled __read_mostly = 1;
+EXPORT_SYMBOL(kaiser_enabled);	/* for inlined TLB flush functions */
+
+__visible
+DEFINE_PER_CPU_USER_MAPPED(unsigned long, unsafe_stack_register_backup);
+
+/*
+ * These can have bit 63 set, so we can not just use a plain "or"
+ * instruction to get their value or'd into CR3.  It would take
+ * another register.  So, we use a memory reference to these instead.
+ *
+ * This is also handy because systems that do not support PCIDs
+ * just end up or'ing a 0 into their CR3, which does no harm.
+ */
+DEFINE_PER_CPU(unsigned long, x86_cr3_pcid_user);
+
+/*
+ * At runtime, the only things we map are some things for CPU
+ * hotplug, and stacks for new processes.  No two CPUs will ever
+ * be populating the same addresses, so we only need to ensure
+ * that we protect between two CPUs trying to allocate and
+ * populate the same page table page.
+ *
+ * Only take this lock when doing a set_p[4um]d(), but it is not
+ * needed for doing a set_pte().  We assume that only the *owner*
+ * of a given allocation will be doing this for _their_
+ * allocation.
+ *
+ * This ensures that once a system has been running for a while
+ * and there have been stacks all over and these page tables
+ * are fully populated, there will be no further acquisitions of
+ * this lock.
+ */
+static DEFINE_SPINLOCK(shadow_table_allocation_lock);
+
+/*
+ * Returns -1 on error.
+ */
+static inline unsigned long get_pa_from_mapping(unsigned long vaddr)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *pte;
+
+	pgd = pgd_offset_k(vaddr);
+	/*
+	 * We made all the kernel PGDs present in kaiser_init().
+	 * We expect them to stay that way.
+	 */
+	BUG_ON(pgd_none(*pgd));
+	/*
+	 * PGDs are either 512GB or 128TB on all x86_64
+	 * configurations.  We don't handle these.
+	 */
+	BUG_ON(pgd_large(*pgd));
+
+	pud = pud_offset(pgd, vaddr);
+	if (pud_none(*pud)) {
+		WARN_ON_ONCE(1);
+		return -1;
+	}
+
+	if (pud_large(*pud))
+		return (pud_pfn(*pud) << PAGE_SHIFT) | (vaddr & ~PUD_PAGE_MASK);
+
+	pmd = pmd_offset(pud, vaddr);
+	if (pmd_none(*pmd)) {
+		WARN_ON_ONCE(1);
+		return -1;
+	}
+
+	if (pmd_large(*pmd))
+		return (pmd_pfn(*pmd) << PAGE_SHIFT) | (vaddr & ~PMD_PAGE_MASK);
+
+	pte = pte_offset_kernel(pmd, vaddr);
+	if (pte_none(*pte)) {
+		WARN_ON_ONCE(1);
+		return -1;
+	}
+
+	return (pte_pfn(*pte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
+}
+
+/*
+ * This is a relatively normal page table walk, except that it
+ * also tries to allocate page tables pages along the way.
+ *
+ * Returns a pointer to a PTE on success, or NULL on failure.
+ */
+static pte_t *kaiser_pagetable_walk(unsigned long address, bool user)
+{
+	pmd_t *pmd;
+	pud_t *pud;
+	pgd_t *pgd = native_get_shadow_pgd(pgd_offset_k(address));
+	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+	unsigned long prot = _KERNPG_TABLE;
+
+	if (pgd_none(*pgd)) {
+		WARN_ONCE(1, "All shadow pgds should have been populated");
+		return NULL;
+	}
+	BUILD_BUG_ON(pgd_large(*pgd) != 0);
+
+	if (user) {
+		/*
+		 * The vsyscall page is the only page that will have
+		 *  _PAGE_USER set. Catch everything else.
+		 */
+		BUG_ON(address != VSYSCALL_ADDR);
+
+		set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
+		prot = _PAGE_TABLE;
+	}
+
+	pud = pud_offset(pgd, address);
+	/* The shadow page tables do not use large mappings: */
+	if (pud_large(*pud)) {
+		WARN_ON(1);
+		return NULL;
+	}
+	if (pud_none(*pud)) {
+		unsigned long new_pmd_page = __get_free_page(gfp);
+		if (!new_pmd_page)
+			return NULL;
+		spin_lock(&shadow_table_allocation_lock);
+		if (pud_none(*pud)) {
+			set_pud(pud, __pud(prot | __pa(new_pmd_page)));
+			__inc_zone_page_state(virt_to_page((void *)
+						new_pmd_page), NR_KAISERTABLE);
+		} else
+			free_page(new_pmd_page);
+		spin_unlock(&shadow_table_allocation_lock);
+	}
+
+	pmd = pmd_offset(pud, address);
+	/* The shadow page tables do not use large mappings: */
+	if (pmd_large(*pmd)) {
+		WARN_ON(1);
+		return NULL;
+	}
+	if (pmd_none(*pmd)) {
+		unsigned long new_pte_page = __get_free_page(gfp);
+		if (!new_pte_page)
+			return NULL;
+		spin_lock(&shadow_table_allocation_lock);
+		if (pmd_none(*pmd)) {
+			set_pmd(pmd, __pmd(prot | __pa(new_pte_page)));
+			__inc_zone_page_state(virt_to_page((void *)
+						new_pte_page), NR_KAISERTABLE);
+		} else
+			free_page(new_pte_page);
+		spin_unlock(&shadow_table_allocation_lock);
+	}
+
+	return pte_offset_kernel(pmd, address);
+}
+
+static int kaiser_add_user_map(const void *__start_addr, unsigned long size,
+			       unsigned long flags)
+{
+	int ret = 0;
+	pte_t *pte;
+	unsigned long start_addr = (unsigned long )__start_addr;
+	unsigned long address = start_addr & PAGE_MASK;
+	unsigned long end_addr = PAGE_ALIGN(start_addr + size);
+	unsigned long target_address;
+
+	/*
+	 * It is convenient for callers to pass in __PAGE_KERNEL etc,
+	 * and there is no actual harm from setting _PAGE_GLOBAL, so
+	 * long as CR4.PGE is not set.  But it is nonetheless troubling
+	 * to see Kaiser itself setting _PAGE_GLOBAL (now that "nokaiser"
+	 * requires that not to be #defined to 0): so mask it off here.
+	 */
+	flags &= ~_PAGE_GLOBAL;
+	if (!(__supported_pte_mask & _PAGE_NX))
+		flags &= ~_PAGE_NX;
+
+	for (; address < end_addr; address += PAGE_SIZE) {
+		target_address = get_pa_from_mapping(address);
+		if (target_address == -1) {
+			ret = -EIO;
+			break;
+		}
+		pte = kaiser_pagetable_walk(address, flags & _PAGE_USER);
+		if (!pte) {
+			ret = -ENOMEM;
+			break;
+		}
+		if (pte_none(*pte)) {
+			set_pte(pte, __pte(flags | target_address));
+		} else {
+			pte_t tmp;
+			set_pte(&tmp, __pte(flags | target_address));
+			WARN_ON_ONCE(!pte_same(*pte, tmp));
+		}
+	}
+	return ret;
+}
+
+static int kaiser_add_user_map_ptrs(const void *start, const void *end, unsigned long flags)
+{
+	unsigned long size = end - start;
+
+	return kaiser_add_user_map(start, size, flags);
+}
+
+/*
+ * Ensure that the top level of the (shadow) page tables are
+ * entirely populated.  This ensures that all processes that get
+ * forked have the same entries.  This way, we do not have to
+ * ever go set up new entries in older processes.
+ *
+ * Note: we never free these, so there are no updates to them
+ * after this.
+ */
+static void __init kaiser_init_all_pgds(void)
+{
+	pgd_t *pgd;
+	int i = 0;
+
+	pgd = native_get_shadow_pgd(pgd_offset_k((unsigned long )0));
+	for (i = PTRS_PER_PGD / 2; i < PTRS_PER_PGD; i++) {
+		pgd_t new_pgd;
+		pud_t *pud = pud_alloc_one(&init_mm,
+					   PAGE_OFFSET + i * PGDIR_SIZE);
+		if (!pud) {
+			WARN_ON(1);
+			break;
+		}
+		inc_zone_page_state(virt_to_page(pud), NR_KAISERTABLE);
+		new_pgd = __pgd(_KERNPG_TABLE |__pa(pud));
+		/*
+		 * Make sure not to stomp on some other pgd entry.
+		 */
+		if (!pgd_none(pgd[i])) {
+			WARN_ON(1);
+			continue;
+		}
+		set_pgd(pgd + i, new_pgd);
+	}
+}
+
+#define kaiser_add_user_map_early(start, size, flags) do {	\
+	int __ret = kaiser_add_user_map(start, size, flags);	\
+	WARN_ON(__ret);						\
+} while (0)
+
+#define kaiser_add_user_map_ptrs_early(start, end, flags) do {		\
+	int __ret = kaiser_add_user_map_ptrs(start, end, flags);	\
+	WARN_ON(__ret);							\
+} while (0)
+
+void __init kaiser_check_boottime_disable(void)
+{
+	bool enable = true;
+	char arg[5];
+	int ret;
+
+	if (boot_cpu_has(X86_FEATURE_XENPV))
+		goto silent_disable;
+
+	ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
+	if (ret > 0) {
+		if (!strncmp(arg, "on", 2))
+			goto enable;
+
+		if (!strncmp(arg, "off", 3))
+			goto disable;
+
+		if (!strncmp(arg, "auto", 4))
+			goto skip;
+	}
+
+	if (cmdline_find_option_bool(boot_command_line, "nopti"))
+		goto disable;
+
+skip:
+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+		goto disable;
+
+enable:
+	if (enable)
+		setup_force_cpu_cap(X86_FEATURE_KAISER);
+
+	return;
+
+disable:
+	pr_info("disabled\n");
+
+silent_disable:
+	kaiser_enabled = 0;
+	setup_clear_cpu_cap(X86_FEATURE_KAISER);
+}
+
+/*
+ * If anything in here fails, we will likely die on one of the
+ * first kernel->user transitions and init will die.  But, we
+ * will have most of the kernel up by then and should be able to
+ * get a clean warning out of it.  If we BUG_ON() here, we run
+ * the risk of being before we have good console output.
+ */
+void __init kaiser_init(void)
+{
+	int cpu;
+
+	if (!kaiser_enabled)
+		return;
+
+	kaiser_init_all_pgds();
+
+	/*
+	 * Note that this sets _PAGE_USER and it needs to happen when the
+	 * pagetable hierarchy gets created, i.e., early. Otherwise
+	 * kaiser_pagetable_walk() will encounter initialized PTEs in the
+	 * hierarchy and not set the proper permissions, leading to the
+	 * pagefaults with page-protection violations when trying to read the
+	 * vsyscall page. For example.
+	 */
+	if (vsyscall_enabled())
+		kaiser_add_user_map_early((void *)VSYSCALL_ADDR,
+					  PAGE_SIZE,
+					   __PAGE_KERNEL_VSYSCALL);
+
+	for_each_possible_cpu(cpu) {
+		void *percpu_vaddr = __per_cpu_user_mapped_start +
+				     per_cpu_offset(cpu);
+		unsigned long percpu_sz = __per_cpu_user_mapped_end -
+					  __per_cpu_user_mapped_start;
+		kaiser_add_user_map_early(percpu_vaddr, percpu_sz,
+					  __PAGE_KERNEL);
+	}
+
+	/*
+	 * Map the entry/exit text section, which is needed at
+	 * switches from user to and from kernel.
+	 */
+	kaiser_add_user_map_ptrs_early(__entry_text_start, __entry_text_end,
+				       __PAGE_KERNEL_RX);
+
+#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
+	kaiser_add_user_map_ptrs_early(__irqentry_text_start,
+				       __irqentry_text_end,
+				       __PAGE_KERNEL_RX);
+#endif
+	kaiser_add_user_map_early((void *)idt_descr.address,
+				  sizeof(gate_desc) * NR_VECTORS,
+				  __PAGE_KERNEL_RO);
+#ifdef CONFIG_TRACING
+	kaiser_add_user_map_early(&trace_idt_descr,
+				  sizeof(trace_idt_descr),
+				  __PAGE_KERNEL);
+	kaiser_add_user_map_early(&trace_idt_table,
+				  sizeof(gate_desc) * NR_VECTORS,
+				  __PAGE_KERNEL);
+#endif
+	kaiser_add_user_map_early(&debug_idt_descr, sizeof(debug_idt_descr),
+				  __PAGE_KERNEL);
+	kaiser_add_user_map_early(&debug_idt_table,
+				  sizeof(gate_desc) * NR_VECTORS,
+				  __PAGE_KERNEL);
+
+	pr_info("enabled\n");
+}
+
+/* Add a mapping to the shadow mapping, and synchronize the mappings */
+int kaiser_add_mapping(unsigned long addr, unsigned long size, unsigned long flags)
+{
+	if (!kaiser_enabled)
+		return 0;
+	return kaiser_add_user_map((const void *)addr, size, flags);
+}
+
+void kaiser_remove_mapping(unsigned long start, unsigned long size)
+{
+	extern void unmap_pud_range_nofree(pgd_t *pgd,
+				unsigned long start, unsigned long end);
+	unsigned long end = start + size;
+	unsigned long addr, next;
+	pgd_t *pgd;
+
+	if (!kaiser_enabled)
+		return;
+	pgd = native_get_shadow_pgd(pgd_offset_k(start));
+	for (addr = start; addr < end; pgd++, addr = next) {
+		next = pgd_addr_end(addr, end);
+		unmap_pud_range_nofree(pgd, addr, next);
+	}
+}
+
+/*
+ * Page table pages are page-aligned.  The lower half of the top
+ * level is used for userspace and the top half for the kernel.
+ * This returns true for user pages that need to get copied into
+ * both the user and kernel copies of the page tables, and false
+ * for kernel pages that should only be in the kernel copy.
+ */
+static inline bool is_userspace_pgd(pgd_t *pgdp)
+{
+	return ((unsigned long)pgdp % PAGE_SIZE) < (PAGE_SIZE / 2);
+}
+
+pgd_t kaiser_set_shadow_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+	if (!kaiser_enabled)
+		return pgd;
+	/*
+	 * Do we need to also populate the shadow pgd?  Check _PAGE_USER to
+	 * skip cases like kexec and EFI which make temporary low mappings.
+	 */
+	if (pgd.pgd & _PAGE_USER) {
+		if (is_userspace_pgd(pgdp)) {
+			native_get_shadow_pgd(pgdp)->pgd = pgd.pgd;
+			/*
+			 * Even if the entry is *mapping* userspace, ensure
+			 * that userspace can not use it.  This way, if we
+			 * get out to userspace running on the kernel CR3,
+			 * userspace will crash instead of running.
+			 */
+			if (__supported_pte_mask & _PAGE_NX)
+				pgd.pgd |= _PAGE_NX;
+		}
+	} else if (!pgd.pgd) {
+		/*
+		 * pgd_clear() cannot check _PAGE_USER, and is even used to
+		 * clear corrupted pgd entries: so just rely on cases like
+		 * kexec and EFI never to be using pgd_clear().
+		 */
+		if (!WARN_ON_ONCE((unsigned long)pgdp & PAGE_SIZE) &&
+		    is_userspace_pgd(pgdp))
+			native_get_shadow_pgd(pgdp)->pgd = pgd.pgd;
+	}
+	return pgd;
+}
+
+void kaiser_setup_pcid(void)
+{
+	unsigned long user_cr3 = KAISER_SHADOW_PGD_OFFSET;
+
+	if (this_cpu_has(X86_FEATURE_PCID))
+		user_cr3 |= X86_CR3_PCID_USER_NOFLUSH;
+	/*
+	 * These variables are used by the entry/exit
+	 * code to change PCID and pgd and TLB flushing.
+	 */
+	this_cpu_write(x86_cr3_pcid_user, user_cr3);
+}
+
+/*
+ * Make a note that this cpu will need to flush USER tlb on return to user.
+ * If cpu does not have PCID, then the NOFLUSH bit will never have been set.
+ */
+void kaiser_flush_tlb_on_return_to_user(void)
+{
+	if (this_cpu_has(X86_FEATURE_PCID))
+		this_cpu_write(x86_cr3_pcid_user,
+			X86_CR3_PCID_USER_FLUSH | KAISER_SHADOW_PGD_OFFSET);
+}
+EXPORT_SYMBOL(kaiser_flush_tlb_on_return_to_user);
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index aed2064..319183d 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -189,6 +189,6 @@
 		*pud_tramp = *pud;
 	}
 
-	set_pgd(&trampoline_pgd_entry,
-		__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
+	/* Avoid set_pgd(), in case it's complicated by CONFIG_PAGE_TABLE_ISOLATION */
+	trampoline_pgd_entry = __pgd(_KERNPG_TABLE | __pa(pud_page_tramp));
 }
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index e3353c9..73dcb0e1 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -52,6 +52,7 @@
 #define CPA_FLUSHTLB 1
 #define CPA_ARRAY 2
 #define CPA_PAGES_ARRAY 4
+#define CPA_FREE_PAGETABLES 8
 
 #ifdef CONFIG_PROC_FS
 static unsigned long direct_pages_count[PG_LEVEL_NUM];
@@ -729,10 +730,13 @@
 	return 0;
 }
 
-static bool try_to_free_pte_page(pte_t *pte)
+static bool try_to_free_pte_page(struct cpa_data *cpa, pte_t *pte)
 {
 	int i;
 
+	if (!(cpa->flags & CPA_FREE_PAGETABLES))
+		return false;
+
 	for (i = 0; i < PTRS_PER_PTE; i++)
 		if (!pte_none(pte[i]))
 			return false;
@@ -741,10 +745,13 @@
 	return true;
 }
 
-static bool try_to_free_pmd_page(pmd_t *pmd)
+static bool try_to_free_pmd_page(struct cpa_data *cpa, pmd_t *pmd)
 {
 	int i;
 
+	if (!(cpa->flags & CPA_FREE_PAGETABLES))
+		return false;
+
 	for (i = 0; i < PTRS_PER_PMD; i++)
 		if (!pmd_none(pmd[i]))
 			return false;
@@ -753,7 +760,9 @@
 	return true;
 }
 
-static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
+static bool unmap_pte_range(struct cpa_data *cpa, pmd_t *pmd,
+			    unsigned long start,
+			    unsigned long end)
 {
 	pte_t *pte = pte_offset_kernel(pmd, start);
 
@@ -764,22 +773,23 @@
 		pte++;
 	}
 
-	if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
+	if (try_to_free_pte_page(cpa, (pte_t *)pmd_page_vaddr(*pmd))) {
 		pmd_clear(pmd);
 		return true;
 	}
 	return false;
 }
 
-static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
+static void __unmap_pmd_range(struct cpa_data *cpa, pud_t *pud, pmd_t *pmd,
 			      unsigned long start, unsigned long end)
 {
-	if (unmap_pte_range(pmd, start, end))
-		if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
+	if (unmap_pte_range(cpa, pmd, start, end))
+		if (try_to_free_pmd_page(cpa, (pmd_t *)pud_page_vaddr(*pud)))
 			pud_clear(pud);
 }
 
-static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
+static void unmap_pmd_range(struct cpa_data *cpa, pud_t *pud,
+			    unsigned long start, unsigned long end)
 {
 	pmd_t *pmd = pmd_offset(pud, start);
 
@@ -790,7 +800,7 @@
 		unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
 		unsigned long pre_end = min_t(unsigned long, end, next_page);
 
-		__unmap_pmd_range(pud, pmd, start, pre_end);
+		__unmap_pmd_range(cpa, pud, pmd, start, pre_end);
 
 		start = pre_end;
 		pmd++;
@@ -803,7 +813,8 @@
 		if (pmd_large(*pmd))
 			pmd_clear(pmd);
 		else
-			__unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
+			__unmap_pmd_range(cpa, pud, pmd,
+					  start, start + PMD_SIZE);
 
 		start += PMD_SIZE;
 		pmd++;
@@ -813,17 +824,19 @@
 	 * 4K leftovers?
 	 */
 	if (start < end)
-		return __unmap_pmd_range(pud, pmd, start, end);
+		return __unmap_pmd_range(cpa, pud, pmd, start, end);
 
 	/*
 	 * Try again to free the PMD page if haven't succeeded above.
 	 */
 	if (!pud_none(*pud))
-		if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
+		if (try_to_free_pmd_page(cpa, (pmd_t *)pud_page_vaddr(*pud)))
 			pud_clear(pud);
 }
 
-static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
+static void __unmap_pud_range(struct cpa_data *cpa, pgd_t *pgd,
+			      unsigned long start,
+			      unsigned long end)
 {
 	pud_t *pud = pud_offset(pgd, start);
 
@@ -834,7 +847,7 @@
 		unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
 		unsigned long pre_end	= min_t(unsigned long, end, next_page);
 
-		unmap_pmd_range(pud, start, pre_end);
+		unmap_pmd_range(cpa, pud, start, pre_end);
 
 		start = pre_end;
 		pud++;
@@ -848,7 +861,7 @@
 		if (pud_large(*pud))
 			pud_clear(pud);
 		else
-			unmap_pmd_range(pud, start, start + PUD_SIZE);
+			unmap_pmd_range(cpa, pud, start, start + PUD_SIZE);
 
 		start += PUD_SIZE;
 		pud++;
@@ -858,7 +871,7 @@
 	 * 2M leftovers?
 	 */
 	if (start < end)
-		unmap_pmd_range(pud, start, end);
+		unmap_pmd_range(cpa, pud, start, end);
 
 	/*
 	 * No need to try to free the PUD page because we'll free it in
@@ -866,6 +879,24 @@
 	 */
 }
 
+static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
+{
+	struct cpa_data cpa = {
+		.flags = CPA_FREE_PAGETABLES,
+	};
+
+	__unmap_pud_range(&cpa, pgd, start, end);
+}
+
+void unmap_pud_range_nofree(pgd_t *pgd, unsigned long start, unsigned long end)
+{
+	struct cpa_data cpa = {
+		.flags = 0,
+	};
+
+	__unmap_pud_range(&cpa, pgd, start, end);
+}
+
 static int alloc_pte_page(pmd_t *pmd)
 {
 	pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 3feec5a..209b946 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -344,14 +344,15 @@
 		kmem_cache_free(pgd_cache, pgd);
 }
 #else
+
 static inline pgd_t *_pgd_alloc(void)
 {
-	return (pgd_t *)__get_free_page(PGALLOC_GFP);
+	return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
 }
 
 static inline void _pgd_free(pgd_t *pgd)
 {
-	free_page((unsigned long)pgd);
+	free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
 }
 #endif /* CONFIG_X86_PAE */
 
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 75fb011..578973a 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -6,16 +6,17 @@
 #include <linux/interrupt.h>
 #include <linux/export.h>
 #include <linux/cpu.h>
+#include <linux/debugfs.h>
 
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
 #include <asm/cache.h>
 #include <asm/apic.h>
 #include <asm/uv/uv.h>
-#include <linux/debugfs.h>
+#include <asm/kaiser.h>
 
 /*
- *	Smarter SMP flushing macros.
+ *	TLB flushing, formerly SMP-only
  *		c/o Linus Torvalds.
  *
  *	These mean you can really definitely utterly forget about
@@ -28,14 +29,42 @@
  *	Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
  */
 
-#ifdef CONFIG_SMP
-
 struct flush_tlb_info {
 	struct mm_struct *flush_mm;
 	unsigned long flush_start;
 	unsigned long flush_end;
 };
 
+static void load_new_mm_cr3(pgd_t *pgdir)
+{
+	unsigned long new_mm_cr3 = __pa(pgdir);
+
+	if (kaiser_enabled) {
+		/*
+		 * We reuse the same PCID for different tasks, so we must
+		 * flush all the entries for the PCID out when we change tasks.
+		 * Flush KERN below, flush USER when returning to userspace in
+		 * kaiser's SWITCH_USER_CR3 (_SWITCH_TO_USER_CR3) macro.
+		 *
+		 * invpcid_flush_single_context(X86_CR3_PCID_ASID_USER) could
+		 * do it here, but can only be used if X86_FEATURE_INVPCID is
+		 * available - and many machines support pcid without invpcid.
+		 *
+		 * If X86_CR3_PCID_KERN_FLUSH actually added something, then it
+		 * would be needed in the write_cr3() below - if PCIDs enabled.
+		 */
+		BUILD_BUG_ON(X86_CR3_PCID_KERN_FLUSH);
+		kaiser_flush_tlb_on_return_to_user();
+	}
+
+	/*
+	 * Caution: many callers of this function expect
+	 * that load_cr3() is serializing and orders TLB
+	 * fills with respect to the mm_cpumask writes.
+	 */
+	write_cr3(new_mm_cr3);
+}
+
 /*
  * We cannot call mmdrop() because we are in interrupt context,
  * instead update mm->cpu_vm_mask.
@@ -47,7 +76,7 @@
 		BUG();
 	if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
 		cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
-		load_cr3(swapper_pg_dir);
+		load_new_mm_cr3(swapper_pg_dir);
 		/*
 		 * This gets called in the idle path where RCU
 		 * functions differently.  Tracing normally
@@ -59,8 +88,6 @@
 }
 EXPORT_SYMBOL_GPL(leave_mm);
 
-#endif /* CONFIG_SMP */
-
 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 	       struct task_struct *tsk)
 {
@@ -83,7 +110,7 @@
 			 * mapped in the new pgd, we'll double-fault.  Forcibly
 			 * map it.
 			 */
-			unsigned int stack_pgd_index = pgd_index(current_stack_pointer());
+			unsigned int stack_pgd_index = pgd_index(current_stack_pointer);
 
 			pgd_t *pgd = next->pgd + stack_pgd_index;
 
@@ -91,10 +118,8 @@
 				set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
 		}
 
-#ifdef CONFIG_SMP
 		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
 		this_cpu_write(cpu_tlbstate.active_mm, next);
-#endif
 
 		cpumask_set_cpu(cpu, mm_cpumask(next));
 
@@ -126,7 +151,7 @@
 		 * ordering guarantee we need.
 		 *
 		 */
-		load_cr3(next->pgd);
+		load_new_mm_cr3(next->pgd);
 
 		trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
 
@@ -152,9 +177,7 @@
 		if (unlikely(prev->context.ldt != next->context.ldt))
 			load_mm_ldt(next);
 #endif
-	}
-#ifdef CONFIG_SMP
-	  else {
+	} else {
 		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
 		BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
 
@@ -175,17 +198,14 @@
 			 * As above, load_cr3() is serializing and orders TLB
 			 * fills with respect to the mm_cpumask write.
 			 */
-			load_cr3(next->pgd);
+			load_new_mm_cr3(next->pgd);
 			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
 			load_mm_cr4(next);
 			load_mm_ldt(next);
 		}
 	}
-#endif
 }
 
-#ifdef CONFIG_SMP
-
 /*
  * The flush IPI assumes that a thread switch happens in this order:
  * [cpu0: the cpu that switches]
@@ -287,23 +307,6 @@
 	smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
 }
 
-void flush_tlb_current_task(void)
-{
-	struct mm_struct *mm = current->mm;
-
-	preempt_disable();
-
-	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-
-	/* This is an implicit full barrier that synchronizes with switch_mm. */
-	local_flush_tlb();
-
-	trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
-	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
-		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
-	preempt_enable();
-}
-
 /*
  * See Documentation/x86/tlb.txt for details.  We choose 33
  * because it is large enough to cover the vast majority (at
@@ -324,6 +327,12 @@
 	unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
 
 	preempt_disable();
+
+	if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
+		base_pages_to_flush = (end - start) >> PAGE_SHIFT;
+	if (base_pages_to_flush > tlb_single_page_flush_ceiling)
+		base_pages_to_flush = TLB_FLUSH_ALL;
+
 	if (current->active_mm != mm) {
 		/* Synchronize with switch_mm. */
 		smp_mb();
@@ -340,15 +349,11 @@
 		goto out;
 	}
 
-	if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
-		base_pages_to_flush = (end - start) >> PAGE_SHIFT;
-
 	/*
 	 * Both branches below are implicit full barriers (MOV to CR or
 	 * INVLPG) that synchronize with switch_mm.
 	 */
-	if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
-		base_pages_to_flush = TLB_FLUSH_ALL;
+	if (base_pages_to_flush == TLB_FLUSH_ALL) {
 		count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
 		local_flush_tlb();
 	} else {
@@ -369,33 +374,6 @@
 	preempt_enable();
 }
 
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
-{
-	struct mm_struct *mm = vma->vm_mm;
-
-	preempt_disable();
-
-	if (current->active_mm == mm) {
-		if (current->mm) {
-			/*
-			 * Implicit full barrier (INVLPG) that synchronizes
-			 * with switch_mm.
-			 */
-			__flush_tlb_one(start);
-		} else {
-			leave_mm(smp_processor_id());
-
-			/* Synchronize with switch_mm. */
-			smp_mb();
-		}
-	}
-
-	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
-		flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
-
-	preempt_enable();
-}
-
 static void do_flush_tlb_all(void *info)
 {
 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
@@ -480,5 +458,3 @@
 	return 0;
 }
 late_initcall(create_tlb_single_page_flush_ceiling);
-
-#endif /* CONFIG_SMP */
diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
index bb461cf..526536c 100644
--- a/arch/x86/pci/broadcom_bus.c
+++ b/arch/x86/pci/broadcom_bus.c
@@ -97,7 +97,7 @@
 	 * We should get host bridge information from ACPI unless the BIOS
 	 * doesn't support it.
 	 */
-	if (acpi_os_get_root_pointer())
+	if (!acpi_disabled && acpi_os_get_root_pointer())
 		return 0;
 #endif
 
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 2f25a36..dcb2d9d 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -142,7 +142,7 @@
 		return 0;
 
 	gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
-	efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
+	efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
 	if (!efi_pgd)
 		return -ENOMEM;
 
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 9e42842..0f017518 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1848,7 +1848,6 @@
 
 	ops.write_payload_first(pnode, first);
 	ops.write_payload_last(pnode, last);
-	ops.write_g_sw_ack(pnode, 0xffffUL);
 
 	/* in effect, all msg_type's are set to MSG_NOOP */
 	memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c
index 836a1eb..3ee234b6 100644
--- a/arch/x86/um/ldt.c
+++ b/arch/x86/um/ldt.c
@@ -6,6 +6,7 @@
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/syscalls.h>
 #include <linux/uaccess.h>
 #include <asm/unistd.h>
 #include <os.h>
@@ -369,7 +370,9 @@
 	mm->arch.ldt.entry_count = 0;
 }
 
-int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
+SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
+		unsigned long , bytecount)
 {
-	return do_modify_ldt_skas(func, ptr, bytecount);
+	/* See non-um modify_ldt() for why we do this cast */
+	return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount);
 }
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 8f1f7ef..2bea87c 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -444,6 +444,12 @@
 		~((1 << X86_FEATURE_MTRR) |  /* disable MTRR */
 		  (1 << X86_FEATURE_ACC));   /* thermal monitoring */
 
+	/*
+	 * Xen PV would need some work to support PCID: CR3 handling as well
+	 * as xen_flush_tlb_others() would need updating.
+	 */
+	cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_PCID % 32));  /* disable PCID */
+
 	if (!xen_initial_domain())
 		cpuid_leaf1_edx_mask &=
 			~((1 << X86_FEATURE_ACPI));  /* disable ACPI */
diff --git a/block/badblocks.c b/block/badblocks.c
index 6ebcef2..2fe6c11 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -178,7 +178,7 @@
 
 	if (bb->shift < 0)
 		/* badblocks are disabled */
-		return 0;
+		return 1;
 
 	if (bb->shift) {
 		/* round the start down, and the end up */
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b08ccbb..8ba0af7 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -185,7 +185,8 @@
 	}
 
 	wb_congested = wb_congested_get_create(&q->backing_dev_info,
-					       blkcg->css.id, GFP_NOWAIT);
+					       blkcg->css.id,
+					       GFP_NOWAIT | __GFP_NOWARN);
 	if (!wb_congested) {
 		ret = -ENOMEM;
 		goto err_put_css;
@@ -193,7 +194,7 @@
 
 	/* allocate */
 	if (!new_blkg) {
-		new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT);
+		new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
 		if (unlikely(!new_blkg)) {
 			ret = -ENOMEM;
 			goto err_put_congested;
@@ -1022,7 +1023,7 @@
 	}
 
 	spin_lock_init(&blkcg->lock);
-	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
+	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
 	INIT_HLIST_HEAD(&blkcg->blkg_list);
 #ifdef CONFIG_CGROUP_WRITEBACK
 	INIT_LIST_HEAD(&blkcg->cgwb_list);
@@ -1240,7 +1241,7 @@
 		if (blkg->pd[pol->plid])
 			continue;
 
-		pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node);
+		pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
 		if (!pd)
 			swap(pd, pd_prealloc);
 		if (!pd) {
diff --git a/block/blk-core.c b/block/blk-core.c
index 9fc567c..37b814a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -284,6 +284,7 @@
 void blk_sync_queue(struct request_queue *q)
 {
 	del_timer_sync(&q->timeout);
+	cancel_work_sync(&q->timeout_work);
 
 	if (q->mq_ops) {
 		struct blk_mq_hw_ctx *hctx;
@@ -528,8 +529,8 @@
 
 		blk_queue_for_each_rl(rl, q) {
 			if (rl->rq_pool) {
-				wake_up(&rl->wait[BLK_RW_SYNC]);
-				wake_up(&rl->wait[BLK_RW_ASYNC]);
+				wake_up_all(&rl->wait[BLK_RW_SYNC]);
+				wake_up_all(&rl->wait[BLK_RW_ASYNC]);
 			}
 		}
 	}
@@ -722,6 +723,7 @@
 	setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
 		    laptop_mode_timer_fn, (unsigned long) q);
 	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+	INIT_WORK(&q->timeout_work, NULL);
 	INIT_LIST_HEAD(&q->queue_head);
 	INIT_LIST_HEAD(&q->timeout_list);
 	INIT_LIST_HEAD(&q->icq_list);
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 01fb455..8c0894e 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -429,7 +429,7 @@
 	kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
 }
 
-static void blk_mq_sysfs_init(struct request_queue *q)
+void blk_mq_sysfs_init(struct request_queue *q)
 {
 	struct blk_mq_ctx *ctx;
 	int cpu;
@@ -449,8 +449,6 @@
 
 	blk_mq_disable_hotplug();
 
-	blk_mq_sysfs_init(q);
-
 	ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
 	if (ret < 0)
 		goto out;
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index dcf5ce3..4bc701b 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -311,6 +311,9 @@
 	for (i = 0; i < set->nr_hw_queues; i++) {
 		struct blk_mq_tags *tags = set->tags[i];
 
+		if (!tags)
+			continue;
+
 		for (j = 0; j < tags->nr_tags; j++) {
 			if (!tags->rqs[j])
 				continue;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a7db634..74ff73f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1707,7 +1707,6 @@
 		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
 		struct blk_mq_hw_ctx *hctx;
 
-		memset(__ctx, 0, sizeof(*__ctx));
 		__ctx->cpu = i;
 		spin_lock_init(&__ctx->lock);
 		INIT_LIST_HEAD(&__ctx->rq_list);
@@ -1970,6 +1969,9 @@
 	if (!q->queue_ctx)
 		goto err_exit;
 
+	/* init q->mq_kobj and sw queues' kobjects */
+	blk_mq_sysfs_init(q);
+
 	q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
 						GFP_KERNEL, set->numa_node);
 	if (!q->queue_hw_ctx)
diff --git a/block/blk-mq.h b/block/blk-mq.h
index e5d2524..c55bcf6 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -50,6 +50,7 @@
 /*
  * sysfs helpers
  */
+extern void blk_mq_sysfs_init(struct request_queue *q);
 extern int blk_mq_sysfs_register(struct request_queue *q);
 extern void blk_mq_sysfs_unregister(struct request_queue *q);
 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index a30441a..220661a 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -135,8 +135,6 @@
 	struct request *rq, *tmp;
 	int next_set = 0;
 
-	if (blk_queue_enter(q, true))
-		return;
 	spin_lock_irqsave(q->queue_lock, flags);
 
 	list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
@@ -146,7 +144,6 @@
 		mod_timer(&q->timeout, round_jiffies_up(next));
 
 	spin_unlock_irqrestore(q->queue_lock, flags);
-	blk_queue_exit(q);
 }
 
 /**
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4ac4910..6a90155 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3868,7 +3868,8 @@
 			goto out;
 	}
 
-	cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
+	cfqq = kmem_cache_alloc_node(cfq_pool,
+				     GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
 				     cfqd->queue->node);
 	if (!cfqq) {
 		cfqq = &cfqd->oom_cfqq;
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 1fad2a6..5c098ff 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -167,6 +167,18 @@
 
 			spawn->alg = NULL;
 			spawns = &inst->alg.cra_users;
+
+			/*
+			 * We may encounter an unregistered instance here, since
+			 * an instance's spawns are set up prior to the instance
+			 * being registered.  An unregistered instance will have
+			 * NULL ->cra_users.next, since ->cra_users isn't
+			 * properly initialized until registration.  But an
+			 * unregistered instance cannot have any users, so treat
+			 * it the same as ->cra_users being empty.
+			 */
+			if (spawns->next == NULL)
+				break;
 		}
 	} while ((spawns = crypto_more_spawns(alg, &stack, &top,
 					      &secondary_spawns)));
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index 2ffd697..5a37962 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -150,7 +150,7 @@
 		pr_devel("Sig %u: Found cert serial match X.509[%u]\n",
 			 sinfo->index, certix);
 
-		if (x509->pub->pkey_algo != sinfo->sig->pkey_algo) {
+		if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) {
 			pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n",
 				sinfo->index);
 			continue;
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index c80765b..029f705 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -408,6 +408,8 @@
 	ctx->cert->pub->pkey_algo = "rsa";
 
 	/* Discard the BIT STRING metadata */
+	if (vlen < 1 || *(const u8 *)value != 0)
+		return -EBADMSG;
 	ctx->key = value + 1;
 	ctx->key_size = vlen - 1;
 	return 0;
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index fb73229..e16009a 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -125,7 +125,7 @@
 	}
 
 	ret = -EKEYREJECTED;
-	if (cert->pub->pkey_algo != cert->sig->pkey_algo)
+	if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0)
 		goto out;
 
 	ret = public_key_verify_signature(cert->pub, cert->sig);
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index e899ef5..cb1c3a3 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -610,6 +610,11 @@
 						    algt->mask));
 	if (IS_ERR(poly))
 		return PTR_ERR(poly);
+	poly_hash = __crypto_hash_alg_common(poly);
+
+	err = -EINVAL;
+	if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
+		goto out_put_poly;
 
 	err = -ENOMEM;
 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
@@ -618,7 +623,6 @@
 
 	ctx = aead_instance_ctx(inst);
 	ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
-	poly_hash = __crypto_hash_alg_common(poly);
 	err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
 				      aead_crypto_instance(inst));
 	if (err)
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 72e38c0..ba07fb6 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -194,11 +194,15 @@
 	salg = shash_attr_alg(tb[1], 0, 0);
 	if (IS_ERR(salg))
 		return PTR_ERR(salg);
+	alg = &salg->base;
 
+	/* The underlying hash algorithm must be unkeyed */
 	err = -EINVAL;
+	if (crypto_shash_alg_has_setkey(salg))
+		goto out_put_alg;
+
 	ds = salg->digestsize;
 	ss = salg->statesize;
-	alg = &salg->base;
 	if (ds > alg->cra_blocksize ||
 	    ss < alg->cra_blocksize)
 		goto out_put_alg;
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index c207458..a14100e 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -80,6 +80,7 @@
 		pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
 		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 		INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
+		spin_lock_init(&cpu_queue->q_lock);
 	}
 	return 0;
 }
@@ -103,15 +104,16 @@
 	int cpu, err;
 	struct mcryptd_cpu_queue *cpu_queue;
 
-	cpu = get_cpu();
-	cpu_queue = this_cpu_ptr(queue->cpu_queue);
-	rctx->tag.cpu = cpu;
+	cpu_queue = raw_cpu_ptr(queue->cpu_queue);
+	spin_lock(&cpu_queue->q_lock);
+	cpu = smp_processor_id();
+	rctx->tag.cpu = smp_processor_id();
 
 	err = crypto_enqueue_request(&cpu_queue->queue, request);
 	pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
 		 cpu, cpu_queue, request);
+	spin_unlock(&cpu_queue->q_lock);
 	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
-	put_cpu();
 
 	return err;
 }
@@ -160,16 +162,11 @@
 	cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
 	i = 0;
 	while (i < MCRYPTD_BATCH || single_task_running()) {
-		/*
-		 * preempt_disable/enable is used to prevent
-		 * being preempted by mcryptd_enqueue_request()
-		 */
-		local_bh_disable();
-		preempt_disable();
+
+		spin_lock_bh(&cpu_queue->q_lock);
 		backlog = crypto_get_backlog(&cpu_queue->queue);
 		req = crypto_dequeue_request(&cpu_queue->queue);
-		preempt_enable();
-		local_bh_enable();
+		spin_unlock_bh(&cpu_queue->q_lock);
 
 		if (!req) {
 			mcryptd_opportunistic_flush();
@@ -184,7 +181,7 @@
 		++i;
 	}
 	if (cpu_queue->queue.qlen)
-		queue_work(kcrypto_wq, &cpu_queue->work);
+		queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
 }
 
 void mcryptd_flusher(struct work_struct *__work)
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index ee9cfb9..f8ec3d4 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -254,6 +254,14 @@
 	crypto_free_aead(ctx->child);
 }
 
+static void pcrypt_free(struct aead_instance *inst)
+{
+	struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
+
+	crypto_drop_aead(&ctx->spawn);
+	kfree(inst);
+}
+
 static int pcrypt_init_instance(struct crypto_instance *inst,
 				struct crypto_alg *alg)
 {
@@ -319,6 +327,8 @@
 	inst->alg.encrypt = pcrypt_aead_encrypt;
 	inst->alg.decrypt = pcrypt_aead_decrypt;
 
+	inst->free = pcrypt_free;
+
 	err = aead_register_instance(tmpl, inst);
 	if (err)
 		goto out_drop_aead;
@@ -349,14 +359,6 @@
 	return -EINVAL;
 }
 
-static void pcrypt_free(struct crypto_instance *inst)
-{
-	struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-	crypto_drop_aead(&ctx->spawn);
-	kfree(inst);
-}
-
 static int pcrypt_cpumask_change_notify(struct notifier_block *self,
 					unsigned long val, void *data)
 {
@@ -469,7 +471,6 @@
 static struct crypto_template pcrypt_tmpl = {
 	.name = "pcrypt",
 	.create = pcrypt_create,
-	.free = pcrypt_free,
 	.module = THIS_MODULE,
 };
 
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c
index 0b66dc8..cad395d 100644
--- a/crypto/rsa_helper.c
+++ b/crypto/rsa_helper.c
@@ -30,7 +30,7 @@
 		return -EINVAL;
 
 	if (fips_enabled) {
-		while (!*ptr && n_sz) {
+		while (n_sz && !*ptr) {
 			ptr++;
 			n_sz--;
 		}
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index f550b5d..d7da0ee 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -188,13 +188,6 @@
 
 	salsa20_ivsetup(ctx, walk.iv);
 
-	if (likely(walk.nbytes == nbytes))
-	{
-		salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
-				      walk.src.virt.addr, nbytes);
-		return blkcipher_walk_done(desc, &walk, 0);
-	}
-
 	while (walk.nbytes >= 64) {
 		salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
 				      walk.src.virt.addr,
diff --git a/crypto/shash.c b/crypto/shash.c
index 4d8a671..9bd5044 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -24,11 +24,12 @@
 
 static const struct crypto_type crypto_shash_type;
 
-static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
-			   unsigned int keylen)
+int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+		    unsigned int keylen)
 {
 	return -ENOSYS;
 }
+EXPORT_SYMBOL_GPL(shash_no_setkey);
 
 static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
 				  unsigned int keylen)
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index ae22f05..e3af318 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -342,7 +342,7 @@
 			}
 
 			sg_init_aead(sg, xbuf,
-				    *b_size + (enc ? authsize : 0));
+				    *b_size + (enc ? 0 : authsize));
 
 			sg_init_aead(sgout, xoutbuf,
 				    *b_size + (enc ? authsize : 0));
@@ -350,7 +350,9 @@
 			sg_set_buf(&sg[0], assoc, aad_size);
 			sg_set_buf(&sgout[0], assoc, aad_size);
 
-			aead_request_set_crypt(req, sg, sgout, *b_size, iv);
+			aead_request_set_crypt(req, sg, sgout,
+					       *b_size + (enc ? 0 : authsize),
+					       iv);
 			aead_request_set_ad(req, aad_size);
 
 			if (secs)
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 3de3b6b..f43a586 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -182,11 +182,6 @@
 
 void __weak arch_unregister_cpu(int cpu) {}
 
-int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
-{
-	return -ENODEV;
-}
-
 static int acpi_processor_hotadd_init(struct acpi_processor *pr)
 {
 	unsigned long long sta;
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index ec4f507..4558cc7 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1020,7 +1020,7 @@
 	/* The record may be cleared by others, try read next record */
 	if (len == -ENOENT)
 		goto skip;
-	else if (len < sizeof(*rcd)) {
+	else if (len < 0 || len < sizeof(*rcd)) {
 		rc = -EIO;
 		goto out;
 	}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 56190d0..0a3ca20 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1197,7 +1197,6 @@
 	acpi_wakeup_device_init();
 	acpi_debugger_init();
 	acpi_setup_sb_notify_handler();
-	acpi_set_processor_mapping();
 	return 0;
 }
 
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 5187469..c3bcb7f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -482,8 +482,11 @@
 {
 	if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
 		ec_log_drv("event unblocked");
-	if (!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
-		advance_transaction(ec);
+	/*
+	 * Unconditionally invoke this once after enabling the event
+	 * handling mechanism to detect the pending events.
+	 */
+	advance_transaction(ec);
 }
 
 static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
@@ -1458,11 +1461,10 @@
 			if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
 			    ec->reference_count >= 1)
 				acpi_ec_enable_gpe(ec, true);
-
-			/* EC is fully operational, allow queries */
-			acpi_ec_enable_event(ec);
 		}
 	}
+	/* EC is fully operational, allow queries */
+	acpi_ec_enable_event(ec);
 
 	return 0;
 }
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index f3bc901..fe03d00 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1390,6 +1390,11 @@
 				dev_name(&adev_dimm->dev));
 		return -ENXIO;
 	}
+	/*
+	 * Record nfit_mem for the notification path to track back to
+	 * the nfit sysfs attributes for this dimm device object.
+	 */
+	dev_set_drvdata(&adev_dimm->dev, nfit_mem);
 
 	/*
 	 * Until standardization materializes we need to consider 4
@@ -1446,9 +1451,11 @@
 			sysfs_put(nfit_mem->flags_attr);
 			nfit_mem->flags_attr = NULL;
 		}
-		if (adev_dimm)
+		if (adev_dimm) {
 			acpi_remove_notify_handler(adev_dimm->handle,
 					ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
+			dev_set_drvdata(&adev_dimm->dev, NULL);
+		}
 	}
 	mutex_unlock(&acpi_desc->init_mutex);
 }
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 5c78ee1..fd59ae8 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -280,79 +280,6 @@
 }
 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
 
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
-static bool __init
-map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid)
-{
-	int type, id;
-	u32 acpi_id;
-	acpi_status status;
-	acpi_object_type acpi_type;
-	unsigned long long tmp;
-	union acpi_object object = { 0 };
-	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
-
-	status = acpi_get_type(handle, &acpi_type);
-	if (ACPI_FAILURE(status))
-		return false;
-
-	switch (acpi_type) {
-	case ACPI_TYPE_PROCESSOR:
-		status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
-		if (ACPI_FAILURE(status))
-			return false;
-		acpi_id = object.processor.proc_id;
-
-		/* validate the acpi_id */
-		if(acpi_processor_validate_proc_id(acpi_id))
-			return false;
-		break;
-	case ACPI_TYPE_DEVICE:
-		status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
-		if (ACPI_FAILURE(status))
-			return false;
-		acpi_id = tmp;
-		break;
-	default:
-		return false;
-	}
-
-	type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
-
-	*phys_id = __acpi_get_phys_id(handle, type, acpi_id, false);
-	id = acpi_map_cpuid(*phys_id, acpi_id);
-
-	if (id < 0)
-		return false;
-	*cpuid = id;
-	return true;
-}
-
-static acpi_status __init
-set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context,
-			   void **rv)
-{
-	phys_cpuid_t phys_id;
-	int cpu_id;
-
-	if (!map_processor(handle, &phys_id, &cpu_id))
-		return AE_ERROR;
-
-	acpi_map_cpu2node(handle, cpu_id, phys_id);
-	return AE_OK;
-}
-
-void __init acpi_set_processor_mapping(void)
-{
-	/* Set persistent cpu <-> node mapping for all processors. */
-	acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
-			    ACPI_UINT32_MAX, set_processor_node_mapping,
-			    NULL, NULL, NULL);
-}
-#else
-void __init acpi_set_processor_mapping(void) {}
-#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-
 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
 static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
 			 u64 *phys_addr, int *ioapic_id)
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 0e1ec37..6475a13 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2329,8 +2329,8 @@
 		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
 			eflags |= ATA_EFLAG_DUBIOUS_XFER;
 		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
+		trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
 	}
-	trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
 	DPRINTK("EXIT\n");
 }
 
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 051b615..8d22acd 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1481,7 +1481,6 @@
 		break;
 
 	default:
-		WARN_ON_ONCE(1);
 		return AC_ERR_SYSTEM;
 	}
 
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 5fc81e2..e55f418 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2802,7 +2802,7 @@
 	return err;
 
 out_free_irq:
-	free_irq(dev->irq, dev);
+	free_irq(irq, dev);
 out_free:
 	kfree(dev);
 out_release:
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index d02e7c0..0651010 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -235,6 +235,9 @@
 config GENERIC_CPU_AUTOPROBE
 	bool
 
+config GENERIC_CPU_VULNERABILITIES
+	bool
+
 config SOC_BUS
 	bool
 
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index e17ad53..4fe86f7 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -590,10 +590,58 @@
 #endif
 }
 
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+
+ssize_t __weak cpu_show_meltdown(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spectre_v1(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spectre_v2(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
+static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+
+static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+	&dev_attr_meltdown.attr,
+	&dev_attr_spectre_v1.attr,
+	&dev_attr_spectre_v2.attr,
+	NULL
+};
+
+static const struct attribute_group cpu_root_vulnerabilities_group = {
+	.name  = "vulnerabilities",
+	.attrs = cpu_root_vulnerabilities_attrs,
+};
+
+static void __init cpu_register_vulnerabilities(void)
+{
+	if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
+			       &cpu_root_vulnerabilities_group))
+		pr_err("Unable to register CPU vulnerabilities\n");
+}
+
+#else
+static inline void cpu_register_vulnerabilities(void) { }
+#endif
+
 void __init cpu_dev_init(void)
 {
 	if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
 		panic("Failed to register CPU subsystem");
 
 	cpu_dev_register_generic();
+	cpu_register_vulnerabilities();
 }
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index cd6ccdc..372d10a 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -39,7 +39,7 @@
 {
 	struct isa_driver *isa_driver = dev->platform_data;
 
-	if (isa_driver->probe)
+	if (isa_driver && isa_driver->probe)
 		return isa_driver->probe(dev, to_isa_dev(dev)->id);
 
 	return 0;
@@ -49,7 +49,7 @@
 {
 	struct isa_driver *isa_driver = dev->platform_data;
 
-	if (isa_driver->remove)
+	if (isa_driver && isa_driver->remove)
 		return isa_driver->remove(dev, to_isa_dev(dev)->id);
 
 	return 0;
@@ -59,7 +59,7 @@
 {
 	struct isa_driver *isa_driver = dev->platform_data;
 
-	if (isa_driver->shutdown)
+	if (isa_driver && isa_driver->shutdown)
 		isa_driver->shutdown(dev, to_isa_dev(dev)->id);
 }
 
@@ -67,7 +67,7 @@
 {
 	struct isa_driver *isa_driver = dev->platform_data;
 
-	if (isa_driver->suspend)
+	if (isa_driver && isa_driver->suspend)
 		return isa_driver->suspend(dev, to_isa_dev(dev)->id, state);
 
 	return 0;
@@ -77,7 +77,7 @@
 {
 	struct isa_driver *isa_driver = dev->platform_data;
 
-	if (isa_driver->resume)
+	if (isa_driver && isa_driver->resume)
 		return isa_driver->resume(dev, to_isa_dev(dev)->id);
 
 	return 0;
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 6441dfd..a7c5b79 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -331,7 +331,7 @@
 	opp_table = _find_opp_table(dev);
 	if (IS_ERR(opp_table)) {
 		count = PTR_ERR(opp_table);
-		dev_err(dev, "%s: OPP table not found (%d)\n",
+		dev_dbg(dev, "%s: OPP table not found (%d)\n",
 			__func__, count);
 		goto out_unlock;
 	}
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
index b52c617..6937944 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/base/power/opp/of.c
@@ -348,6 +348,7 @@
 		if (ret) {
 			dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
 				ret);
+			of_node_put(np);
 			goto free_table;
 		}
 	}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 98b767d..4d30da2 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -272,6 +272,7 @@
 	int result, flags;
 	struct nbd_request request;
 	unsigned long size = blk_rq_bytes(req);
+	struct bio *bio;
 	u32 type;
 
 	if (req->cmd_type == REQ_TYPE_DRV_PRIV)
@@ -305,16 +306,20 @@
 		return -EIO;
 	}
 
-	if (type == NBD_CMD_WRITE) {
-		struct req_iterator iter;
+	if (type != NBD_CMD_WRITE)
+		return 0;
+
+	flags = 0;
+	bio = req->bio;
+	while (bio) {
+		struct bio *next = bio->bi_next;
+		struct bvec_iter iter;
 		struct bio_vec bvec;
-		/*
-		 * we are really probing at internals to determine
-		 * whether to set MSG_MORE or not...
-		 */
-		rq_for_each_segment(bvec, req, iter) {
-			flags = 0;
-			if (!rq_iter_last(bvec, iter))
+
+		bio_for_each_segment(bvec, bio, iter) {
+			bool is_last = !next && bio_iter_last(bvec, iter);
+
+			if (is_last)
 				flags = MSG_MORE;
 			dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
 				cmd, bvec.bv_len);
@@ -325,7 +330,16 @@
 					result);
 				return -EIO;
 			}
+			/*
+			 * The completion might already have come in,
+			 * so break for the last one instead of letting
+			 * the iterator do it. This prevents use-after-free
+			 * of the bio.
+			 */
+			if (is_last)
+				break;
 		}
+		bio = next;
 	}
 	return 0;
 }
@@ -654,7 +668,10 @@
 		return nbd_size_set(nbd, bdev, nbd->blksize, arg);
 
 	case NBD_SET_TIMEOUT:
-		nbd->tag_set.timeout = arg * HZ;
+		if (arg) {
+			nbd->tag_set.timeout = arg * HZ;
+			blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
+		}
 		return 0;
 
 	case NBD_SET_FLAGS:
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 24f4b54..e32badd 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4511,7 +4511,7 @@
 	segment_size = rbd_obj_bytes(&rbd_dev->header);
 	blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
 	q->limits.max_sectors = queue_max_hw_sectors(q);
-	blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
+	blk_queue_max_segments(q, USHRT_MAX);
 	blk_queue_max_segment_size(q, segment_size);
 	blk_queue_io_min(q, segment_size);
 	blk_queue_io_opt(q, segment_size);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 9701cc2..ed9de1b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1296,6 +1296,8 @@
 	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
 	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
 	zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
+	zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE;
+	zram->disk->queue->limits.chunk_sectors = 0;
 	blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
 	/*
 	 * zram_bio_discard() will clear all logical blocks if logical block
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 8900823..10f5613 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -1755,14 +1755,17 @@
 	raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
 	mutex_init(&cci_pmu->reserve_mutex);
 	atomic_set(&cci_pmu->active_events, 0);
-	cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
+	cpumask_set_cpu(get_cpu(), &cci_pmu->cpus);
 
 	ret = cci_pmu_init(cci_pmu, pdev);
-	if (ret)
+	if (ret) {
+		put_cpu();
 		return ret;
+	}
 
 	cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
 					 &cci_pmu->node);
+	put_cpu();
 	pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
 	return 0;
 }
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index aee8346..45d7ecc 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -1271,11 +1271,16 @@
 		int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id);
 
 		name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL);
+		if (!name) {
+			err = -ENOMEM;
+			goto error_choose_name;
+		}
 		snprintf(name, len + 1, "ccn_%d", ccn->dt.id);
 	}
 
 	/* Perf driver registration */
 	ccn->dt.pmu = (struct pmu) {
+		.module = THIS_MODULE,
 		.attr_groups = arm_ccn_pmu_attr_groups,
 		.task_ctx_nr = perf_invalid_context,
 		.event_init = arm_ccn_pmu_event_init,
@@ -1297,7 +1302,7 @@
 	}
 
 	/* Pick one CPU which we will use to collect data from CCN... */
-	cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
+	cpumask_set_cpu(get_cpu(), &ccn->dt.cpu);
 
 	/* Also make sure that the overflow interrupt is handled by this CPU */
 	if (ccn->irq) {
@@ -1314,10 +1319,13 @@
 
 	cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
 					 &ccn->dt.node);
+	put_cpu();
 	return 0;
 
 error_pmu_register:
 error_set_affinity:
+	put_cpu();
+error_choose_name:
 	ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
 	for (i = 0; i < ccn->num_xps; i++)
 		writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
@@ -1578,8 +1586,8 @@
 
 static void __exit arm_ccn_exit(void)
 {
-	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
 	platform_driver_unregister(&arm_ccn_driver);
+	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
 }
 
 module_init(arm_ccn_init);
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 795c9d9..2051d92 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -178,6 +178,7 @@
 	.match		= sunxi_rsb_device_match,
 	.probe		= sunxi_rsb_device_probe,
 	.remove		= sunxi_rsb_device_remove,
+	.uevent		= of_device_uevent_modalias,
 };
 
 static void sunxi_rsb_dev_release(struct device *dev)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 1ea2053..b0d0181 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -607,7 +607,7 @@
 
 config MSM_ADSPRPC
         tristate "QTI ADSP RPC driver"
-        depends on MSM_GLINK
+        depends on MSM_GLINK || MSM_SMD
         help
           Provides a communication mechanism that allows for clients to
           make remote method invocations across processor boundary to
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 3058ce3..9a0424a 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,8 @@
 #include <soc/qcom/glink.h>
 #include <soc/qcom/subsystem_notif.h>
 #include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/service-notifier.h>
+#include <soc/qcom/service-locator.h>
 #include <linux/scatterlist.h>
 #include <linux/fs.h>
 #include <linux/uaccess.h>
@@ -58,6 +60,9 @@
 #define VMID_ADSP_Q6    6
 #define DEBUGFS_SIZE 1024
 
+#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME   "audio_pdr_adsprpc"
+#define AUDIO_PDR_ADSP_SERVICE_NAME              "avs/audio"
+
 #define RPC_TIMEOUT	(5 * HZ)
 #define BALIGN		128
 #define NUM_CHANNELS	4	/* adsp, mdsp, slpi, cdsp*/
@@ -78,7 +83,8 @@
 #define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
 #define FASTRPC_GLINK_INTENT_LEN  (64)
 
-#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
+#define PERF_KEYS \
+	"count:flush:map:copy:glink:getargs:putargs:invalidate:invoke:tid:ptr"
 #define FASTRPC_STATIC_HANDLE_LISTENER (3)
 #define FASTRPC_STATIC_HANDLE_MAX (20)
 #define FASTRPC_LATENCY_CTRL_ENB  (1)
@@ -91,17 +97,27 @@
 #define PERF(enb, cnt, ff) \
 	{\
 		struct timespec startT = {0};\
-		if (enb) {\
+		int64_t *counter = cnt;\
+		if (enb && counter) {\
 			getnstimeofday(&startT);\
 		} \
 		ff ;\
-		if (enb) {\
-			cnt += getnstimediff(&startT);\
+		if (enb && counter) {\
+			*counter += getnstimediff(&startT);\
 		} \
 	}
 
+#define GET_COUNTER(perf_ptr, offset)  \
+	(perf_ptr != NULL ?\
+		(((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
+			(int64_t *)(perf_ptr + offset)\
+				: (int64_t *)NULL) : (int64_t *)NULL)
+
 static int fastrpc_glink_open(int cid);
 static void fastrpc_glink_close(void *chan, int cid);
+static int fastrpc_audio_pdr_notifier_cb(struct notifier_block *nb,
+					unsigned long code,
+					void *data);
 static struct dentry *debugfs_root;
 static struct dentry *debugfs_global_file;
 
@@ -146,6 +162,12 @@
 	return addr;
 }
 
+struct secure_vm {
+	int *vmid;
+	int *vmperm;
+	int vmcount;
+};
+
 struct fastrpc_file;
 
 struct fastrpc_buf {
@@ -210,6 +232,16 @@
 	int used;
 };
 
+struct fastrpc_static_pd {
+	char *spdname;
+	struct notifier_block pdrnb;
+	struct notifier_block get_service_nb;
+	void *pdrhandle;
+	int pdrcount;
+	int prevpdrcount;
+	int ispdup;
+};
+
 struct fastrpc_glink_info {
 	int link_state;
 	int port_state;
@@ -224,6 +256,7 @@
 	void *chan;
 	struct device *dev;
 	struct fastrpc_session_ctx session[NUM_SESSIONS];
+	struct fastrpc_static_pd spd[NUM_SESSIONS];
 	struct completion work;
 	struct completion workport;
 	struct notifier_block nb;
@@ -234,7 +267,7 @@
 	int prevssrcount;
 	int issubsystemup;
 	int vmid;
-	int rhvmid;
+	struct secure_vm rhvm;
 	int ramdumpenabled;
 	void *remoteheap_ramdump_dev;
 	struct fastrpc_glink_info link;
@@ -278,6 +311,19 @@
 	uintptr_t attr;
 };
 
+enum fastrpc_perfkeys {
+	PERF_COUNT = 0,
+	PERF_FLUSH = 1,
+	PERF_MAP = 2,
+	PERF_COPY = 3,
+	PERF_LINK = 4,
+	PERF_GETARGS = 5,
+	PERF_PUTARGS = 6,
+	PERF_INVARGS = 7,
+	PERF_INVOKE = 8,
+	PERF_KEY_MAX = 9,
+};
+
 struct fastrpc_perf {
 	int64_t count;
 	int64_t flush;
@@ -288,6 +334,8 @@
 	int64_t putargs;
 	int64_t invargs;
 	int64_t invoke;
+	int64_t tid;
+	struct hlist_node hn;
 };
 
 struct fastrpc_file {
@@ -305,12 +353,16 @@
 	int cid;
 	int ssrcount;
 	int pd;
+	char *spdname;
 	int file_close;
 	struct fastrpc_apps *apps;
-	struct fastrpc_perf perf;
+	struct hlist_head perf;
 	struct dentry *debugfs_file;
+	struct mutex perf_mutex;
 	struct pm_qos_request pm_qos_req;
 	int qos_request;
+	struct mutex map_mutex;
+	struct mutex fl_map_mutex;
 };
 
 static struct fastrpc_apps gfa;
@@ -321,6 +373,14 @@
 		.subsys = "adsp",
 		.link.link_info.edge = "lpass",
 		.link.link_info.transport = "smem",
+		.spd = {
+			{
+				.spdname =
+					AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
+				.pdrnb.notifier_call =
+						fastrpc_audio_pdr_notifier_cb,
+			}
+		},
 	},
 	{
 		.name = "mdsprpc-smd",
@@ -342,6 +402,9 @@
 	},
 };
 
+static int hlosvm[1] = {VMID_HLOS};
+static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
 static inline int64_t getnstimediff(struct timespec *start)
 {
 	int64_t ns;
@@ -353,6 +416,46 @@
 	return ns;
 }
 
+static inline int64_t *getperfcounter(struct fastrpc_file *fl, int key)
+{
+	int err = 0;
+	int64_t *val = NULL;
+	struct fastrpc_perf *perf = NULL, *fperf = NULL;
+	struct hlist_node *n = NULL;
+
+	VERIFY(err, !IS_ERR_OR_NULL(fl));
+	if (err)
+		goto bail;
+
+	mutex_lock(&fl->perf_mutex);
+	hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
+		if (perf->tid == current->pid) {
+			fperf = perf;
+			break;
+		}
+	}
+
+	if (IS_ERR_OR_NULL(fperf)) {
+		fperf = kzalloc(sizeof(*fperf), GFP_KERNEL);
+
+		VERIFY(err, !IS_ERR_OR_NULL(fperf));
+		if (err) {
+			mutex_unlock(&fl->perf_mutex);
+			kfree(fperf);
+			goto bail;
+		}
+
+		fperf->tid = current->pid;
+		hlist_add_head(&fperf->hn, &fl->perf);
+	}
+
+	val = ((int64_t *)fperf) + key;
+	mutex_unlock(&fl->perf_mutex);
+bail:
+	return val;
+}
+
+
 static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
 {
 	struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
@@ -417,9 +520,7 @@
 	} else {
 		struct fastrpc_file *fl = map->fl;
 
-		spin_lock(&fl->hlock);
 		hlist_add_head(&map->hn, &fl->maps);
-		spin_unlock(&fl->hlock);
 	}
 }
 
@@ -448,7 +549,6 @@
 		}
 		spin_unlock(&me->hlock);
 	} else {
-		spin_lock(&fl->hlock);
 		hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
 			if (va >= map->va &&
 				va + len <= map->va + map->len &&
@@ -459,7 +559,6 @@
 				break;
 			}
 		}
-		spin_unlock(&fl->hlock);
 	}
 	if (match) {
 		*ppmap = match;
@@ -507,7 +606,6 @@
 		*ppmap = match;
 		return 0;
 	}
-	spin_lock(&fl->hlock);
 	hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
 		if (map->raddr == va &&
 			map->raddr + map->len == va + len &&
@@ -517,7 +615,6 @@
 			break;
 		}
 	}
-	spin_unlock(&fl->hlock);
 	if (match) {
 		*ppmap = match;
 		return 0;
@@ -545,11 +642,9 @@
 		if (map->refs > 0)
 			return;
 	} else {
-		spin_lock(&fl->hlock);
 		map->refs--;
 		if (!map->refs)
 			hlist_del_init(&map->hn);
-		spin_unlock(&fl->hlock);
 		if (map->refs > 0 && !flags)
 			return;
 	}
@@ -910,7 +1005,7 @@
 #define K_COPY_TO_USER(err, kernel, dst, src, size) \
 	do {\
 		if (!(kernel))\
-			VERIFY(err, 0 == copy_to_user((void __user *)(dst), \
+			VERIFY(err, 0 == copy_to_user((void __user *)(dst),\
 						(src), (size)));\
 		else\
 			memmove((dst), (src), (size));\
@@ -1010,8 +1105,11 @@
 	spin_lock(&ctx->fl->hlock);
 	hlist_del_init(&ctx->hn);
 	spin_unlock(&ctx->fl->hlock);
+	mutex_lock(&ctx->fl->fl_map_mutex);
 	for (i = 0; i < nbufs; ++i)
 		fastrpc_mmap_free(ctx->maps[i], 0);
+
+	mutex_unlock(&ctx->fl->fl_map_mutex);
 	fastrpc_buf_free(ctx->buf, 1);
 	ctx->magic = 0;
 	kfree(ctx);
@@ -1053,6 +1151,21 @@
 	spin_unlock(&me->hlock);
 
 }
+
+static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname)
+{
+	struct fastrpc_file *fl;
+	struct hlist_node *n;
+
+	spin_lock(&me->hlock);
+	hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
+		if (fl->spdname && !strcmp(spdname, fl->spdname))
+			fastrpc_notify_users(fl);
+	}
+	spin_unlock(&me->hlock);
+
+}
+
 static void context_list_ctor(struct fastrpc_ctx_lst *me)
 {
 	INIT_HLIST_HEAD(&me->interrupted);
@@ -1128,6 +1241,7 @@
 	int mflags = 0;
 	uint64_t *fdlist;
 	uint32_t *crclist;
+	int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
 
 	/* calculate size of the metadata */
 	rpra = NULL;
@@ -1135,24 +1249,32 @@
 	pages = smq_phy_page_start(sc, list);
 	ipage = pages;
 
+	PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
 	for (i = 0; i < bufs; ++i) {
 		uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
 		size_t len = lpra[i].buf.len;
 
+		mutex_lock(&ctx->fl->fl_map_mutex);
 		if (ctx->fds[i] && (ctx->fds[i] != -1))
 			fastrpc_mmap_create(ctx->fl, ctx->fds[i],
 					ctx->attrs[i], buf, len,
 					mflags, &ctx->maps[i]);
+		mutex_unlock(&ctx->fl->fl_map_mutex);
 		ipage += 1;
 	}
+	PERF_END);
 	handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
+	mutex_lock(&ctx->fl->fl_map_mutex);
 	for (i = bufs; i < bufs + handles; i++) {
 		VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
 				FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
-		if (err)
+		if (err) {
+			mutex_unlock(&ctx->fl->fl_map_mutex);
 			goto bail;
+		}
 		ipage += 1;
 	}
+	mutex_unlock(&ctx->fl->fl_map_mutex);
 	metalen = copylen = (size_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
 				 (sizeof(uint32_t) * M_CRCLIST);
 
@@ -1206,7 +1328,7 @@
 	}
 
 	/* map ion buffers */
-	PERF(ctx->fl->profile, ctx->fl->perf.map,
+	PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
 	for (i = 0; rpra && i < inbufs + outbufs; ++i) {
 		struct fastrpc_mmap *map = ctx->maps[i];
 		uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
@@ -1257,9 +1379,9 @@
 	memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
 
 	/* copy non ion buffers */
-	PERF(ctx->fl->profile, ctx->fl->perf.copy,
+	PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
 	rlen = copylen - metalen;
-	for (oix = 0; oix < inbufs + outbufs; ++oix) {
+	for (oix = 0; rpra && oix < inbufs + outbufs; ++oix) {
 		int i = ctx->overps[oix]->raix;
 		struct fastrpc_mmap *map = ctx->maps[i];
 		size_t mlen;
@@ -1297,7 +1419,7 @@
 	}
 	PERF_END);
 
-	PERF(ctx->fl->profile, ctx->fl->perf.flush,
+	PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
 	for (oix = 0; oix < inbufs + outbufs; ++oix) {
 		int i = ctx->overps[oix]->raix;
 		struct fastrpc_mmap *map = ctx->maps[i];
@@ -1310,7 +1432,7 @@
 		if (map && (map->attr & FASTRPC_ATTR_COHERENT))
 			continue;
 
-		if (rpra[i].buf.len && ctx->overps[oix]->mstart)
+		if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart)
 			dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
 			uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
 	}
@@ -1322,7 +1444,7 @@
 	}
 
 	if (!ctx->fl->sctx->smmu.coherent) {
-		PERF(ctx->fl->profile, ctx->fl->perf.flush,
+		PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
 		dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
 		PERF_END);
 	}
@@ -1361,10 +1483,13 @@
 			if (err)
 				goto bail;
 		} else {
+			mutex_lock(&ctx->fl->fl_map_mutex);
 			fastrpc_mmap_free(ctx->maps[i], 0);
+			mutex_unlock(&ctx->fl->fl_map_mutex);
 			ctx->maps[i] = NULL;
 		}
 	}
+	mutex_lock(&ctx->fl->fl_map_mutex);
 	if (inbufs + outbufs + handles) {
 		for (i = 0; i < M_FDLIST; i++) {
 			if (!fdlist[i])
@@ -1374,6 +1499,7 @@
 				fastrpc_mmap_free(mmap, 0);
 		}
 	}
+	mutex_unlock(&ctx->fl->fl_map_mutex);
 	if (ctx->crc && crclist && rpra)
 		K_COPY_TO_USER(err, kernel, ctx->crc,
 			crclist, M_CRCLIST*sizeof(uint32_t));
@@ -1501,6 +1627,7 @@
 	int i;
 
 	INIT_HLIST_HEAD(&me->drivers);
+	INIT_HLIST_HEAD(&me->maps);
 	spin_lock_init(&me->hlock);
 	mutex_init(&me->smd_mutex);
 	me->channel = &gcinfo[0];
@@ -1523,6 +1650,7 @@
 	int interrupted = 0;
 	int err = 0;
 	struct timespec invoket = {0};
+	int64_t *perf_counter = getperfcounter(fl, PERF_COUNT);
 
 	if (fl->profile)
 		getnstimeofday(&invoket);
@@ -1553,16 +1681,20 @@
 		goto bail;
 
 	if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
-		PERF(fl->profile, fl->perf.getargs,
+		PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
 		VERIFY(err, 0 == get_args(kernel, ctx));
 		PERF_END);
 		if (err)
 			goto bail;
 	}
 
-	if (!fl->sctx->smmu.coherent)
+	if (!fl->sctx->smmu.coherent) {
+		PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
 		inv_args_pre(ctx);
-	PERF(fl->profile, fl->perf.link,
+		PERF_END);
+	}
+
+	PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
 	VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
 	PERF_END);
 
@@ -1578,7 +1710,7 @@
 			goto bail;
 	}
 
-	PERF(fl->profile, fl->perf.invargs,
+	PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
 	if (!fl->sctx->smmu.coherent)
 		inv_args(ctx);
 	PERF_END);
@@ -1587,7 +1719,7 @@
 	if (err)
 		goto bail;
 
-	PERF(fl->profile, fl->perf.putargs,
+	PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS),
 	VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
 	PERF_END);
 	if (err)
@@ -1601,15 +1733,44 @@
 		err = ECONNRESET;
 
 	if (fl->profile && !interrupted) {
-		if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
-			fl->perf.invoke += getnstimediff(&invoket);
-		if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX)
-			fl->perf.count++;
+		if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER) {
+			int64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE);
+
+			if (count)
+				*count += getnstimediff(&invoket);
+		}
+		if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX) {
+			int64_t *count = GET_COUNTER(perf_counter, PERF_COUNT);
+
+			if (count)
+				*count = *count+1;
+		}
 	}
 	return err;
 }
 
+static int fastrpc_get_adsp_session(char *name, int *session)
+{
+	struct fastrpc_apps *me = &gfa;
+	int err = 0, i;
+
+	for (i = 0; i < NUM_SESSIONS; i++) {
+		if (!me->channel[0].spd[i].spdname)
+			continue;
+		if (!strcmp(name, me->channel[0].spd[i].spdname))
+			break;
+	}
+	VERIFY(err, i < NUM_SESSIONS);
+	if (err)
+		goto bail;
+	*session = i;
+bail:
+	return err;
+}
+
+static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl);
 static int fastrpc_channel_open(struct fastrpc_file *fl);
+static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl);
 static int fastrpc_init_process(struct fastrpc_file *fl,
 				struct fastrpc_ioctl_init_attrs *uproc)
 {
@@ -1620,10 +1781,6 @@
 	struct smq_phy_page pages[1];
 	struct fastrpc_mmap *file = NULL, *mem = NULL;
 	char *proc_name = NULL;
-	int srcVM[1] = {VMID_HLOS};
-	int destVM[1] = {me->channel[fl->cid].rhvmid};
-	int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
-	int hlosVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
 
 	VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
 	if (err)
@@ -1668,8 +1825,10 @@
 		if (err)
 			goto bail;
 		if (init->filelen) {
+			mutex_lock(&fl->fl_map_mutex);
 			VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
 				init->file, init->filelen, mflags, &file));
+			mutex_unlock(&fl->fl_map_mutex);
 			if (err)
 				goto bail;
 		}
@@ -1678,8 +1837,10 @@
 			init->memlen));
 		if (err)
 			goto bail;
+		mutex_lock(&fl->fl_map_mutex);
 		VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
 				init->mem, init->memlen, mflags, &mem));
+		mutex_unlock(&fl->fl_map_mutex);
 		if (err)
 			goto bail;
 		inbuf.pageslen = 1;
@@ -1749,17 +1910,27 @@
 		inbuf.pgid = current->tgid;
 		inbuf.namelen = init->filelen;
 		inbuf.pageslen = 0;
+
+		if (!strcmp(proc_name, "audiopd")) {
+			fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
+			VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
+		}
+
 		if (!me->staticpd_flags) {
 			inbuf.pageslen = 1;
+			mutex_lock(&fl->fl_map_mutex);
 			VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
 				 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
 				 &mem));
+			mutex_unlock(&fl->fl_map_mutex);
 			if (err)
 				goto bail;
 			phys = mem->phys;
 			size = mem->size;
 			VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
-					srcVM, 1, destVM, destVMperm, 1));
+				hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
+				me->channel[fl->cid].rhvm.vmperm,
+				me->channel[fl->cid].rhvm.vmcount));
 			if (err) {
 				pr_err("ADSPRPC: hyp_assign_phys fail err %d",
 							 err);
@@ -1805,11 +1976,18 @@
 	if (mem && err) {
 		if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
 			hyp_assign_phys(mem->phys, (uint64_t)mem->size,
-					destVM, 1, srcVM, hlosVMperm, 1);
+					me->channel[fl->cid].rhvm.vmid,
+					me->channel[fl->cid].rhvm.vmcount,
+					hlosvm, hlosvmperm, 1);
+		mutex_lock(&fl->fl_map_mutex);
 		fastrpc_mmap_free(mem, 0);
+		mutex_unlock(&fl->fl_map_mutex);
 	}
-	if (file)
+	if (file) {
+		mutex_lock(&fl->fl_map_mutex);
 		fastrpc_mmap_free(file, 0);
+		mutex_unlock(&fl->fl_map_mutex);
+	}
 	return err;
 }
 
@@ -1898,13 +2076,10 @@
 		err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
 			TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
 	} else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
-
-		int srcVM[1] = {VMID_HLOS};
-		int destVM[1] = {me->channel[fl->cid].rhvmid};
-		int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
-
 		VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
-				srcVM, 1, destVM, destVMperm, 1));
+				hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
+				me->channel[fl->cid].rhvm.vmperm,
+				me->channel[fl->cid].rhvm.vmcount));
 		if (err)
 			goto bail;
 	}
@@ -1917,7 +2092,6 @@
 {
 	int err = 0;
 	struct fastrpc_apps *me = &gfa;
-	int srcVM[1] = {me->channel[fl->cid].rhvmid};
 	int destVM[1] = {VMID_HLOS};
 	int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
 
@@ -1955,7 +2129,9 @@
 			TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
 	} else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
 		VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
-					srcVM, 1, destVM, destVMperm, 1));
+					me->channel[fl->cid].rhvm.vmid,
+					me->channel[fl->cid].rhvm.vmcount,
+					destVM, destVMperm, 1));
 		if (err)
 			goto bail;
 	}
@@ -2051,6 +2227,33 @@
 	return err;
 }
 
+static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
+{
+	struct fastrpc_apps *me = &gfa;
+	int session = 0, err = 0;
+
+	VERIFY(err, !fastrpc_get_adsp_session(
+			AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
+	if (err)
+		goto bail;
+	if (me->channel[fl->cid].spd[session].pdrcount !=
+		me->channel[fl->cid].spd[session].prevpdrcount) {
+		if (fastrpc_mmap_remove_ssr(fl))
+			pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
+		me->channel[fl->cid].spd[session].prevpdrcount =
+				me->channel[fl->cid].spd[session].pdrcount;
+	}
+	if (!me->channel[fl->cid].spd[session].ispdup) {
+		VERIFY(err, 0);
+		if (err) {
+			err = -ENOTCONN;
+			goto bail;
+		}
+	}
+bail:
+	return err;
+}
+
 static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
 			     size_t len, struct fastrpc_mmap **ppmap);
 
@@ -2062,16 +2265,25 @@
 	int err = 0;
 	struct fastrpc_mmap *map = NULL;
 
+	mutex_lock(&fl->map_mutex);
+	mutex_lock(&fl->fl_map_mutex);
 	VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
+	mutex_unlock(&fl->fl_map_mutex);
 	if (err)
 		goto bail;
 	VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
 	if (err)
 		goto bail;
+	mutex_lock(&fl->fl_map_mutex);
 	fastrpc_mmap_free(map, 0);
+	mutex_unlock(&fl->fl_map_mutex);
 bail:
-	if (err && map)
+	if (err && map) {
+		mutex_lock(&fl->fl_map_mutex);
 		fastrpc_mmap_add(map);
+		mutex_unlock(&fl->fl_map_mutex);
+	}
+	mutex_unlock(&fl->map_mutex);
 	return err;
 }
 
@@ -2083,16 +2295,18 @@
 	VERIFY(err, (fl && ud));
 	if (err)
 		goto bail;
-
+	mutex_lock(&fl->fl_map_mutex);
 	if (!fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
 		pr_err("mapping not found to unamp %x va %llx %x\n",
 			ud->fd, (unsigned long long)ud->va,
 			(unsigned int)ud->len);
 		err = -1;
+		mutex_unlock(&fl->fl_map_mutex);
 		goto bail;
 	}
 	if (map)
-	fastrpc_mmap_free(map, 0);
+		fastrpc_mmap_free(map, 0);
+	mutex_unlock(&fl->fl_map_mutex);
 bail:
 	return err;
 }
@@ -2105,13 +2319,18 @@
 	struct fastrpc_mmap *map = NULL;
 	int err = 0;
 
+	mutex_lock(&fl->map_mutex);
+	mutex_lock(&fl->fl_map_mutex);
 	if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
-			 ud->size, ud->flags, 1, &map))
+			 ud->size, ud->flags, 1, &map)) {
+		mutex_unlock(&fl->fl_map_mutex);
+		mutex_unlock(&fl->map_mutex);
 		return 0;
-
+	}
 	VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
 			(uintptr_t)ud->vaddrin, ud->size,
 			 ud->flags, &map));
+	mutex_unlock(&fl->fl_map_mutex);
 	if (err)
 		goto bail;
 	VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
@@ -2119,8 +2338,12 @@
 		goto bail;
 	ud->vaddrout = map->raddr;
  bail:
-	if (err && map)
+	if (err && map) {
+		mutex_lock(&fl->fl_map_mutex);
 		fastrpc_mmap_free(map, 0);
+		mutex_unlock(&fl->fl_map_mutex);
+	}
+	mutex_unlock(&fl->map_mutex);
 	return err;
 }
 
@@ -2260,8 +2483,9 @@
 
 static int fastrpc_file_free(struct fastrpc_file *fl)
 {
-	struct hlist_node *n;
+	struct hlist_node *n = NULL;
 	struct fastrpc_mmap *map = NULL;
+	struct fastrpc_perf *perf = NULL, *fperf = NULL;
 	int cid;
 
 	if (!fl)
@@ -2283,9 +2507,11 @@
 	spin_unlock(&fl->hlock);
 	fastrpc_context_list_dtor(fl);
 	fastrpc_buf_list_free(fl);
+	mutex_lock(&fl->fl_map_mutex);
 	hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
 		fastrpc_mmap_free(map, 1);
 	}
+	mutex_unlock(&fl->fl_map_mutex);
 	if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
 		kref_put_mutex(&fl->apps->channel[cid].kref,
 				fastrpc_channel_close, &fl->apps->smd_mutex);
@@ -2293,6 +2519,22 @@
 		fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
 	if (fl->secsctx)
 		fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
+
+	mutex_lock(&fl->perf_mutex);
+	do {
+		struct hlist_node *pn = NULL;
+
+		fperf = NULL;
+		hlist_for_each_entry_safe(perf, pn, &fl->perf, hn) {
+			hlist_del_init(&perf->hn);
+			fperf = perf;
+			break;
+		}
+		kfree(fperf);
+	} while (fperf);
+	mutex_unlock(&fl->perf_mutex);
+	mutex_destroy(&fl->perf_mutex);
+	mutex_destroy(&fl->fl_map_mutex);
 	kfree(fl);
 	return 0;
 }
@@ -2306,6 +2548,7 @@
 			pm_qos_remove_request(&fl->pm_qos_req);
 		if (fl->debugfs_file != NULL)
 			debugfs_remove(fl->debugfs_file);
+		mutex_destroy(&fl->map_mutex);
 		fastrpc_file_free(fl);
 		file->private_data = NULL;
 	}
@@ -2588,7 +2831,7 @@
 		if (err)
 			pr_warn("adsprpc: initial intent fail for %d err %d\n",
 					 cid, err);
-		if (me->channel[cid].ssrcount !=
+		if (cid == 0 && me->channel[cid].ssrcount !=
 				 me->channel[cid].prevssrcount) {
 			if (fastrpc_mmap_remove_ssr(fl))
 				pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
@@ -2617,6 +2860,7 @@
 	context_list_ctor(&fl->clst);
 	spin_lock_init(&fl->hlock);
 	INIT_HLIST_HEAD(&fl->maps);
+	INIT_HLIST_HEAD(&fl->perf);
 	INIT_HLIST_HEAD(&fl->bufs);
 	INIT_HLIST_NODE(&fl->hn);
 	fl->sessionid = 0;
@@ -2629,9 +2873,12 @@
 	memset(&fl->perf, 0, sizeof(fl->perf));
 	fl->qos_request = 0;
 	filp->private_data = fl;
+	mutex_init(&fl->map_mutex);
+	mutex_init(&fl->fl_map_mutex);
 	spin_lock(&me->hlock);
 	hlist_add_head(&fl->hn, &me->drivers);
 	spin_unlock(&me->hlock);
+	mutex_init(&fl->perf_mutex);
 	return 0;
 }
 
@@ -2815,8 +3062,23 @@
 				goto bail;
 		}
 		if (p.perf.data) {
-			K_COPY_TO_USER(err, 0, (void *)p.perf.data,
-						 &fl->perf, sizeof(fl->perf));
+			struct fastrpc_perf *perf = NULL, *fperf = NULL;
+			struct hlist_node *n = NULL;
+
+			mutex_lock(&fl->perf_mutex);
+			hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
+				if (perf->tid == current->pid) {
+					fperf = perf;
+					break;
+				}
+			}
+
+			mutex_unlock(&fl->perf_mutex);
+
+			if (fperf) {
+				K_COPY_TO_USER(err, 0, (void *)p.perf.data,
+					fperf, sizeof(*fperf));
+			}
 		}
 		K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
 		if (err)
@@ -2912,6 +3174,64 @@
 	return NOTIFY_DONE;
 }
 
+static int fastrpc_audio_pdr_notifier_cb(struct notifier_block *pdrnb,
+					unsigned long code,
+					void *data)
+{
+	struct fastrpc_apps *me = &gfa;
+	struct fastrpc_static_pd *spd;
+	struct notif_data *notifdata = data;
+
+	spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
+	if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
+		mutex_lock(&me->smd_mutex);
+		spd->pdrcount++;
+		spd->ispdup = 0;
+		pr_info("ADSPRPC: Audio PDR notifier %d %s\n",
+					MAJOR(me->dev_no), spd->spdname);
+		mutex_unlock(&me->smd_mutex);
+		if (!strcmp(spd->spdname,
+				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
+			me->staticpd_flags = 0;
+		fastrpc_notify_pdr_drivers(me, spd->spdname);
+	} else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
+		if (me->channel[0].remoteheap_ramdump_dev &&
+				notifdata->enable_ramdump) {
+			me->channel[0].ramdumpenabled = 1;
+		}
+	} else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
+		spd->ispdup = 1;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int fastrpc_get_service_location_notify(struct notifier_block *nb,
+					     unsigned long opcode, void *data)
+{
+	struct fastrpc_static_pd *spd;
+	struct pd_qmi_client_data *pdr = data;
+	int curr_state = 0;
+
+	spd = container_of(nb, struct fastrpc_static_pd, get_service_nb);
+	if (opcode == LOCATOR_DOWN) {
+		pr_err("ADSPRPC: Audio PD restart notifier locator down\n");
+		return NOTIFY_DONE;
+	}
+
+	if (pdr->total_domains == 1) {
+		spd->pdrhandle = service_notif_register_notifier(
+				pdr->domain_list[0].name,
+				pdr->domain_list[0].instance_id,
+				&spd->pdrnb, &curr_state);
+		if (IS_ERR(spd->pdrhandle))
+			pr_err("ADSPRPC: Unable to register notifier\n");
+	} else
+		pr_err("ADSPRPC: Service returned invalid domains\n");
+
+	return NOTIFY_DONE;
+}
+
 static const struct file_operations fops = {
 	.open = fastrpc_device_open,
 	.release = fastrpc_device_release,
@@ -2991,6 +3311,46 @@
 	return err;
 }
 
+static void init_secure_vmid_list(struct device *dev, char *prop_name,
+						struct secure_vm *destvm)
+{
+	int err = 0;
+	u32 len = 0, i = 0;
+	u32 *rhvmlist = NULL;
+	u32 *rhvmpermlist = NULL;
+
+	if (!of_find_property(dev->of_node, prop_name, &len))
+		goto bail;
+	if (len == 0)
+		goto bail;
+	len /= sizeof(u32);
+	VERIFY(err, NULL != (rhvmlist = kcalloc(len, sizeof(u32), GFP_KERNEL)));
+	if (err)
+		goto bail;
+	VERIFY(err, NULL != (rhvmpermlist = kcalloc(len, sizeof(u32),
+					 GFP_KERNEL)));
+	if (err)
+		goto bail;
+	for (i = 0; i < len; i++) {
+		err = of_property_read_u32_index(dev->of_node, prop_name, i,
+								&rhvmlist[i]);
+		rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
+		pr_info("ADSPRPC: Secure VMID = %d", rhvmlist[i]);
+		if (err) {
+			pr_err("ADSPRPC: Failed to read VMID\n");
+			goto bail;
+		}
+	}
+	destvm->vmid = rhvmlist;
+	destvm->vmperm = rhvmpermlist;
+	destvm->vmcount = len;
+bail:
+	if (err) {
+		kfree(rhvmlist);
+		kfree(rhvmpermlist);
+	}
+}
+
 static int fastrpc_probe(struct platform_device *pdev)
 {
 	int err = 0;
@@ -3001,14 +3361,14 @@
 	struct platform_device *ion_pdev;
 	struct cma *cma;
 	uint32_t val;
+	int ret = 0;
 
 
 	if (of_device_is_compatible(dev->of_node,
 					"qcom,msm-fastrpc-compute")) {
-		of_property_read_u32(dev->of_node, "qcom,adsp-remoteheap-vmid",
-			&gcinfo[0].rhvmid);
+		init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
+							&gcinfo[0].rhvm);
 
-		pr_info("ADSPRPC : vmids adsp=%d\n", gcinfo[0].rhvmid);
 
 		of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
 			&me->latency);
@@ -3039,7 +3399,8 @@
 				break;
 			}
 		}
-		if (range.addr) {
+		if (range.addr && !of_property_read_bool(dev->of_node,
+							 "restrict-access")) {
 			int srcVM[1] = {VMID_HLOS};
 			int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
 						VMID_ADSP_Q6};
@@ -3056,7 +3417,26 @@
 		}
 		return 0;
 	}
+	if (of_property_read_bool(dev->of_node,
+					"qcom,fastrpc-adsp-audio-pdr")) {
+		int session;
 
+		VERIFY(err, !fastrpc_get_adsp_session(
+			AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
+		if (err)
+			goto spdbail;
+		me->channel[0].spd[session].get_service_nb.notifier_call =
+					fastrpc_get_service_location_notify;
+		ret = get_service_location(
+				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
+				AUDIO_PDR_ADSP_SERVICE_NAME,
+				&me->channel[0].spd[session].get_service_nb);
+		if (ret)
+			pr_err("ADSPRPC: Get service location failed: %d\n",
+								ret);
+	}
+spdbail:
+	err = 0;
 	VERIFY(err, !of_platform_populate(pdev->dev.of_node,
 					  fastrpc_match_table,
 					  NULL, &pdev->dev));
@@ -3089,6 +3469,8 @@
 				sess->smmu.mapping = NULL;
 			}
 		}
+		kfree(chan->rhvm.vmid);
+		kfree(chan->rhvm.vmperm);
 	}
 }
 
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 83f44ce..166bd16 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -512,7 +512,7 @@
 				if (driver->client_map[j].pid != 0 &&
 					driver->client_map[j].pid ==
 					driver->md_session_map[i]->pid) {
-					if (!(driver->data_ready[i] & type)) {
+					if (!(driver->data_ready[j] & type)) {
 						driver->data_ready[j] |= type;
 						atomic_inc(
 						&driver->data_ready_notif[j]);
@@ -594,14 +594,15 @@
 	 * Check if command is valid. If the command is asking for
 	 * status, then the processor mask field is to be ignored.
 	 */
-	if ((version != 2) || (cmd > STATUS_STM) ||
-		((cmd != STATUS_STM) && ((mask == 0) || (0 != (mask >> 4))))) {
+	if ((version != 2) || (cmd > STM_AUTO_QUERY) ||
+		((cmd != STATUS_STM && cmd != STM_AUTO_QUERY) &&
+		((mask == 0) || (0 != (mask >> 4))))) {
 		/* Command is invalid. Send bad param message response */
 		dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE;
 		for (i = 0; i < STM_CMD_NUM_BYTES; i++)
 			dest_buf[i+1] = *(buf + i);
 		return STM_CMD_NUM_BYTES+1;
-	} else if (cmd != STATUS_STM) {
+	} else if (cmd != STATUS_STM && cmd != STM_AUTO_QUERY) {
 		if (mask & DIAG_STM_MODEM)
 			diag_process_stm_mask(cmd, DIAG_STM_MODEM,
 					      PERIPHERAL_MODEM);
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
index 848ad87..4a0ee11 100644
--- a/drivers/char/diag/diagfwd_cntl.h
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -77,7 +77,7 @@
 #define DISABLE_STM	0
 #define ENABLE_STM	1
 #define STATUS_STM	2
-
+#define STM_AUTO_QUERY  3
 #define UPDATE_PERIPHERAL_STM_STATE	1
 #define CLEAR_PERIPHERAL_STM_STATE	2
 
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
index f27f358..f8c3fde 100644
--- a/drivers/char/diag/diagfwd_mhi.c
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -552,6 +552,8 @@
 	struct mhi_result *result = NULL;
 	struct diag_mhi_ch_t *ch = NULL;
 	void *buf = NULL;
+	struct diag_mhi_info *mhi_info = NULL;
+	unsigned long flags;
 
 	if (!cb_info)
 		return;
@@ -603,13 +605,6 @@
 		queue_work(diag_mhi[index].mhi_wq,
 			   &(diag_mhi[index].open_work));
 		break;
-	case MHI_CB_MHI_DISABLED:
-		DIAG_LOG(DIAG_DEBUG_BRIDGE,
-			 "received mhi disabled notifiation port: %d ch: %d\n",
-			 index, ch->type);
-		atomic_set(&(ch->opened), 0);
-		__mhi_close(&diag_mhi[index], CHANNELS_CLOSED);
-		break;
 	case MHI_CB_XFER:
 		/*
 		 * If the channel is a read channel, this is a read
@@ -636,6 +631,24 @@
 					   result->bytes_xferd,
 					   diag_mhi[index].id);
 		break;
+	case MHI_CB_MHI_DISABLED:
+	case MHI_CB_SYS_ERROR:
+	case MHI_CB_MHI_SHUTDOWN:
+		DIAG_LOG(DIAG_DEBUG_BRIDGE,
+			 "received mhi link down cb: %d port: %d ch: %d\n",
+			 cb_info->cb_reason, index, ch->type);
+		mhi_info = &diag_mhi[index];
+		if (!mhi_info->enabled)
+			return;
+		spin_lock_irqsave(&mhi_info->lock, flags);
+		mhi_info->enabled = 0;
+		spin_unlock_irqrestore(&mhi_info->lock, flags);
+		atomic_set(&(mhi_info->read_ch.opened), 0);
+		atomic_set(&(mhi_info->write_ch.opened), 0);
+		flush_workqueue(mhi_info->mhi_wq);
+		mhi_buf_tbl_clear(mhi_info);
+		diag_remote_dev_close(mhi_info->dev_id);
+		break;
 	default:
 		pr_err("diag: In %s, invalid cb reason 0x%x\n", __func__,
 		       cb_info->cb_reason);
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
index af8bf00..f3c587d 100644
--- a/drivers/char/diag/diagfwd_socket.c
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -513,8 +513,10 @@
 		info->hdl->sk->sk_user_data = NULL;
 		info->hdl->sk->sk_data_ready = NULL;
 		write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+		mutex_lock(&info->socket_info_mutex);
 		sock_release(info->hdl);
 		info->hdl = NULL;
+		mutex_unlock(&info->socket_info_mutex);
 		wake_up_interruptible(&info->read_wait_q);
 	}
 	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n", info->name);
@@ -820,6 +822,8 @@
 		break;
 	}
 
+	if (info->port_type == PORT_TYPE_CLIENT)
+		mutex_init(&info->socket_info_mutex);
 	info->svc_id = DIAG_SVC_ID;
 	info->ins_id = ins_base + ins_offset;
 	info->inited = 1;
@@ -1031,6 +1035,8 @@
 	diagfwd_deregister(info->peripheral, info->type, (void *)info);
 	info->fwd_ctxt = NULL;
 	info->hdl = NULL;
+	if (info->port_type == PORT_TYPE_CLIENT)
+		mutex_destroy(&info->socket_info_mutex);
 	if (info->wq)
 		destroy_workqueue(info->wq);
 
@@ -1119,13 +1125,28 @@
 		read_msg.msg_name = &src_addr;
 		read_msg.msg_namelen = sizeof(src_addr);
 
+		if (info->port_type != PORT_TYPE_SERVER) {
+			mutex_lock(&info->socket_info_mutex);
+			if (!info->hdl) {
+				DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"%s closing read thread\n",
+					info->name);
+				mutex_unlock(&info->socket_info_mutex);
+				goto fail;
+			}
+		}
 		pkt_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1, 0,
 					 MSG_PEEK);
-		if (pkt_len <= 0)
+		if (pkt_len <= 0) {
+			if (info->port_type != PORT_TYPE_SERVER)
+				mutex_unlock(&info->socket_info_mutex);
 			break;
+		}
 
 		if (pkt_len > bytes_remaining) {
 			buf_full = 1;
+			if (info->port_type != PORT_TYPE_SERVER)
+				mutex_unlock(&info->socket_info_mutex);
 			break;
 		}
 
@@ -1135,6 +1156,8 @@
 
 		read_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1,
 					  pkt_len, 0);
+		if (info->port_type != PORT_TYPE_SERVER)
+			mutex_unlock(&info->socket_info_mutex);
 		if (read_len <= 0)
 			goto fail;
 
@@ -1211,7 +1234,16 @@
 	write_msg.msg_name = &info->remote_addr;
 	write_msg.msg_namelen = sizeof(info->remote_addr);
 	write_msg.msg_flags |= MSG_DONTWAIT;
+	if (info->port_type != PORT_TYPE_SERVER) {
+		mutex_lock(&info->socket_info_mutex);
+		if (!info->hdl) {
+			mutex_unlock(&info->socket_info_mutex);
+			return -ENODEV;
+		}
+	}
 	write_len = kernel_sendmsg(info->hdl, &write_msg, &iov, 1, len);
+	if (info->port_type != PORT_TYPE_SERVER)
+		mutex_unlock(&info->socket_info_mutex);
 	if (write_len < 0) {
 		err = write_len;
 		/*
diff --git a/drivers/char/diag/diagfwd_socket.h b/drivers/char/diag/diagfwd_socket.h
index a9487b1..c42be06 100644
--- a/drivers/char/diag/diagfwd_socket.h
+++ b/drivers/char/diag/diagfwd_socket.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -65,6 +65,7 @@
 	struct work_struct read_work;
 	struct diagfwd_info *fwd_ctxt;
 	wait_queue_head_t read_wait_q;
+	struct mutex socket_info_mutex;
 };
 
 union cntl_port_msg {
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index a112c01..e0a5315 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -241,6 +241,9 @@
 	/* The timer for this si. */
 	struct timer_list   si_timer;
 
+	/* This flag is set, if the timer can be set */
+	bool		    timer_can_start;
+
 	/* This flag is set, if the timer is running (timer_pending() isn't enough) */
 	bool		    timer_running;
 
@@ -416,6 +419,8 @@
 
 static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
 {
+	if (!smi_info->timer_can_start)
+		return;
 	smi_info->last_timeout_jiffies = jiffies;
 	mod_timer(&smi_info->si_timer, new_val);
 	smi_info->timer_running = true;
@@ -435,21 +440,18 @@
 	smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
 }
 
-static void start_check_enables(struct smi_info *smi_info, bool start_timer)
+static void start_check_enables(struct smi_info *smi_info)
 {
 	unsigned char msg[2];
 
 	msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 
-	if (start_timer)
-		start_new_msg(smi_info, msg, 2);
-	else
-		smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+	start_new_msg(smi_info, msg, 2);
 	smi_info->si_state = SI_CHECKING_ENABLES;
 }
 
-static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
+static void start_clear_flags(struct smi_info *smi_info)
 {
 	unsigned char msg[3];
 
@@ -458,10 +460,7 @@
 	msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
 	msg[2] = WDT_PRE_TIMEOUT_INT;
 
-	if (start_timer)
-		start_new_msg(smi_info, msg, 3);
-	else
-		smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+	start_new_msg(smi_info, msg, 3);
 	smi_info->si_state = SI_CLEARING_FLAGS;
 }
 
@@ -496,11 +495,11 @@
  * Note that we cannot just use disable_irq(), since the interrupt may
  * be shared.
  */
-static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
+static inline bool disable_si_irq(struct smi_info *smi_info)
 {
 	if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
 		smi_info->interrupt_disabled = true;
-		start_check_enables(smi_info, start_timer);
+		start_check_enables(smi_info);
 		return true;
 	}
 	return false;
@@ -510,7 +509,7 @@
 {
 	if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
 		smi_info->interrupt_disabled = false;
-		start_check_enables(smi_info, true);
+		start_check_enables(smi_info);
 		return true;
 	}
 	return false;
@@ -528,7 +527,7 @@
 
 	msg = ipmi_alloc_smi_msg();
 	if (!msg) {
-		if (!disable_si_irq(smi_info, true))
+		if (!disable_si_irq(smi_info))
 			smi_info->si_state = SI_NORMAL;
 	} else if (enable_si_irq(smi_info)) {
 		ipmi_free_smi_msg(msg);
@@ -544,7 +543,7 @@
 		/* Watchdog pre-timeout */
 		smi_inc_stat(smi_info, watchdog_pretimeouts);
 
-		start_clear_flags(smi_info, true);
+		start_clear_flags(smi_info);
 		smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
 		if (smi_info->intf)
 			ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -927,7 +926,7 @@
 		 * disable and messages disabled.
 		 */
 		if (smi_info->supports_event_msg_buff || smi_info->irq) {
-			start_check_enables(smi_info, true);
+			start_check_enables(smi_info);
 		} else {
 			smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
 			if (!smi_info->curr_msg)
@@ -1234,6 +1233,7 @@
 
 	/* Set up the timer that drives the interface. */
 	setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+	new_smi->timer_can_start = true;
 	smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
 
 	/* Try to claim any interrupts. */
@@ -3448,10 +3448,12 @@
 	check_set_rcv_irq(smi_info);
 }
 
-static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
+static inline void stop_timer_and_thread(struct smi_info *smi_info)
 {
 	if (smi_info->thread != NULL)
 		kthread_stop(smi_info->thread);
+
+	smi_info->timer_can_start = false;
 	if (smi_info->timer_running)
 		del_timer_sync(&smi_info->si_timer);
 }
@@ -3593,7 +3595,7 @@
 	 * Start clearing the flags before we enable interrupts or the
 	 * timer to avoid racing with the timer.
 	 */
-	start_clear_flags(new_smi, false);
+	start_clear_flags(new_smi);
 
 	/*
 	 * IRQ is defined to be set when non-zero.  req_events will
@@ -3671,7 +3673,7 @@
 	return 0;
 
 out_err_stop_timer:
-	wait_for_timer_and_thread(new_smi);
+	stop_timer_and_thread(new_smi);
 
 out_err:
 	new_smi->interrupt_disabled = true;
@@ -3865,7 +3867,7 @@
 	 */
 	if (to_clean->irq_cleanup)
 		to_clean->irq_cleanup(to_clean);
-	wait_for_timer_and_thread(to_clean);
+	stop_timer_and_thread(to_clean);
 
 	/*
 	 * Timeouts are stopped, now make sure the interrupts are off
@@ -3876,7 +3878,7 @@
 		poll(to_clean);
 		schedule_timeout_uninterruptible(1);
 	}
-	disable_si_irq(to_clean, false);
+	disable_si_irq(to_clean);
 	while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
 		poll(to_clean);
 		schedule_timeout_uninterruptible(1);
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index c0e8e1f..2bfaf22 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -144,7 +144,7 @@
 	{ HI6220_BBPPLL_SEL,    "bbppll_sel",    "pll0_bbp_gate",  CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 9,  0, },
 	{ HI6220_MEDIA_PLL_SRC, "media_pll_src", "pll_media_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 10, 0, },
 	{ HI6220_MMC2_SEL,      "mmc2_sel",      "mmc2_mux1",      CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 11, 0, },
-	{ HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll",         CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 12, 0, },
+	{ HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll",         CLK_SET_RATE_PARENT|CLK_IS_CRITICAL,   0x270, 12, 0, },
 };
 
 static struct hisi_mux_clock hi6220_mux_clks_sys[] __initdata = {
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index ce8ea10..93a1966 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -487,7 +487,7 @@
 	clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
 	clk[IMX6QDL_CLK_GPU3D_CORE]   = imx_clk_gate2("gpu3d_core",    "gpu3d_core_podf",   base + 0x6c, 26);
 	clk[IMX6QDL_CLK_HDMI_IAHB]    = imx_clk_gate2("hdmi_iahb",     "ahb",               base + 0x70, 0);
-	clk[IMX6QDL_CLK_HDMI_ISFR]    = imx_clk_gate2("hdmi_isfr",     "video_27m",         base + 0x70, 4);
+	clk[IMX6QDL_CLK_HDMI_ISFR]    = imx_clk_gate2("hdmi_isfr",     "mipi_core_cfg",     base + 0x70, 4);
 	clk[IMX6QDL_CLK_I2C1]         = imx_clk_gate2("i2c1",          "ipg_per",           base + 0x70, 6);
 	clk[IMX6QDL_CLK_I2C2]         = imx_clk_gate2("i2c2",          "ipg_per",           base + 0x70, 8);
 	clk[IMX6QDL_CLK_I2C3]         = imx_clk_gate2("i2c3",          "ipg_per",           base + 0x70, 10);
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index 9f24fcf..e425e50 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -185,6 +185,7 @@
 	uint32_t pcw_reg;
 	int pcw_shift;
 	const struct mtk_pll_div_table *div_table;
+	const char *parent_name;
 };
 
 void mtk_clk_register_plls(struct device_node *node,
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index 0c2deac..1502384 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -302,7 +302,10 @@
 
 	init.name = data->name;
 	init.ops = &mtk_pll_ops;
-	init.parent_names = &parent_name;
+	if (data->parent_name)
+		init.parent_names = &data->parent_name;
+	else
+		init.parent_names = &parent_name;
 	init.num_parents = 1;
 
 	clk = clk_register(NULL, &pll->hw);
diff --git a/drivers/clk/msm/clock-gcc-8953.c b/drivers/clk/msm/clock-gcc-8953.c
index 797f851..b2dc3d26 100644
--- a/drivers/clk/msm/clock-gcc-8953.c
+++ b/drivers/clk/msm/clock-gcc-8953.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2786,6 +2786,7 @@
 	.base = &virt_bases[GFX_BASE],
 	.c = {
 		.dbg_name = "gcc_oxili_timer_clk",
+		.parent = &xo_clk_src.c,
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_oxili_timer_clk.c),
 	},
@@ -4071,8 +4072,17 @@
 	struct resource *res;
 	int ret;
 	u32 regval;
+	struct clk *xo_clk;
 	bool compat_bin = false;
 
+	/* Require the GCC-RPM-XO clock to be registered first */
+	xo_clk = devm_clk_get(&pdev->dev, "xo");
+	if (IS_ERR(xo_clk)) {
+		if (PTR_ERR(xo_clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get xo clock\n");
+		return PTR_ERR(xo_clk);
+	}
+
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
 	if (!res) {
 		dev_err(&pdev->dev, "Register base not defined\n");
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index b593065..8ab6ce4 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -525,10 +525,20 @@
 };
 
 static const struct freq_tbl ftbl_gcc_apps_clk[] = {
-	F(48000000, P_XO,	   1, 0, 0),
+	F(48000000,  P_XO,         1, 0, 0),
 	F(200000000, P_FEPLL200,   1, 0, 0),
+	F(384000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(413000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(448000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(488000000, P_DDRPLLAPSS, 1, 0, 0),
 	F(500000000, P_FEPLL500,   1, 0, 0),
-	F(626000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(512000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(537000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(565000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(597000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(632000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(672000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(716000000, P_DDRPLLAPSS, 1, 0, 0),
 	{ }
 };
 
diff --git a/drivers/clk/qcom/gcc-sdxpoorwills.c b/drivers/clk/qcom/gcc-sdxpoorwills.c
index 696d7fb..c6e8faa 100644
--- a/drivers/clk/qcom/gcc-sdxpoorwills.c
+++ b/drivers/clk/qcom/gcc-sdxpoorwills.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1316,33 +1316,6 @@
 	},
 };
 
-static struct clk_branch gcc_mss_cfg_ahb_clk = {
-	.halt_reg = 0x40000,
-	.halt_check = BRANCH_HALT,
-	.hwcg_reg = 0x40000,
-	.hwcg_bit = 1,
-	.clkr = {
-		.enable_reg = 0x40000,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_mss_cfg_ahb_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_gate2 gcc_mss_gpll0_div_clk_src = {
-	.udelay = 500,
-	.clkr = {
-		.enable_reg = 0x6d004,
-		.enable_mask = BIT(17),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_mss_gpll0_div_clk_src",
-			.ops = &clk_gate2_ops,
-		},
-	},
-};
-
 static struct clk_branch gcc_pcie_0_clkref_clk = {
 	.halt_reg = 0x88004,
 	.halt_check = BRANCH_HALT,
@@ -1794,8 +1767,6 @@
 	[GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
 	[GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
 	[GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
-	[GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
-	[GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
 	[GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
 	[GCC_PCIE_AUX_CLK] = &gcc_pcie_aux_clk.clkr,
 	[GCC_PCIE_AUX_PHY_CLK_SRC] = &gcc_pcie_aux_phy_clk_src.clkr,
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 3f9fcd9..7290205 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,8 @@
 #include "mdss-dsi-pll.h"
 #include "mdss-pll.h"
 #include <dt-bindings/clock/mdss-10nm-pll-clk.h>
+#define CREATE_TRACE_POINTS
+#include "mdss_pll_trace.h"
 
 #define VCO_DELAY_USEC 1
 
@@ -890,8 +892,13 @@
 		MDSS_PLL_REG_W(pll->pll_base, PLL_PLL_OUTDIV_RATE,
 					pll->cached_outdiv);
 	}
-
+	MDSS_PLL_ATRACE_BEGIN("pll_lock");
+	trace_mdss_pll_lock_start((u64)pll->vco_cached_rate,
+			pll->vco_current_rate,
+			pll->cached_cfg0, pll->cached_cfg1,
+			pll->cached_outdiv, pll->resource_ref_cnt);
 	rc = dsi_pll_enable(vco);
+	MDSS_PLL_ATRACE_END("pll_lock");
 	if (rc) {
 		mdss_pll_resource_enable(pll, false);
 		pr_err("pll(%d) enable failed, rc=%d\n", pll->index, rc);
diff --git a/drivers/clk/qcom/mdss/mdss_pll_trace.h b/drivers/clk/qcom/mdss/mdss_pll_trace.h
new file mode 100644
index 0000000..cd4fda6
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss_pll_trace.h
@@ -0,0 +1,116 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_MDSS_PLL_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MDSS_PLL_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mdss_pll
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mdss_pll_trace
+
+
+TRACE_EVENT(mdss_pll_lock_start,
+	TP_PROTO(
+			u64 vco_cached_rate,
+			s64 vco_current_rate,
+			u32 cached_cfg0,
+			u32 cached_cfg1,
+			u32 cached_outdiv,
+			u32 resource_ref_cnt),
+	TP_ARGS(
+			vco_cached_rate,
+			vco_current_rate,
+			cached_cfg0,
+			cached_cfg1,
+			cached_outdiv,
+			resource_ref_cnt),
+	TP_STRUCT__entry(
+			__field(u64, vco_cached_rate)
+			__field(s64, vco_current_rate)
+			__field(u32, cached_cfg0)
+			__field(u32, cached_cfg1)
+			__field(u32, cached_outdiv)
+			__field(u32, resource_ref_cnt)
+
+	),
+	TP_fast_assign(
+			__entry->vco_cached_rate = vco_cached_rate;
+			__entry->vco_current_rate = vco_current_rate;
+			__entry->cached_cfg0 = cached_cfg0;
+			__entry->cached_cfg1 = cached_cfg1;
+			__entry->cached_outdiv = cached_outdiv;
+			__entry->resource_ref_cnt = resource_ref_cnt;
+	),
+	 TP_printk(
+		"vco_cached_rate=%llu vco_current_rate=%lld cached_cfg0=%d cached_cfg1=%d cached_outdiv=%d resource_ref_cnt=%d",
+			__entry->vco_cached_rate,
+			__entry->vco_current_rate,
+			__entry->cached_cfg0,
+			__entry->cached_cfg1,
+			__entry->cached_outdiv,
+			__entry->resource_ref_cnt)
+);
+
+TRACE_EVENT(pll_tracing_mark_write,
+	TP_PROTO(int pid, const char *name, bool trace_begin),
+	TP_ARGS(pid, name, trace_begin),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(trace_name, name)
+			__field(bool, trace_begin)
+	),
+	TP_fast_assign(
+			__entry->pid = pid;
+			__assign_str(trace_name, name);
+			__entry->trace_begin = trace_begin;
+	),
+	TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+		__entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(mdss_pll_trace_counter,
+	TP_PROTO(int pid, char *name, int value),
+	TP_ARGS(pid, name, value),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(counter_name, name)
+			__field(int, value)
+	),
+	TP_fast_assign(
+			__entry->pid = current->tgid;
+			__assign_str(counter_name, name);
+			__entry->value = value;
+	),
+	TP_printk("%d|%s|%d", __entry->pid,
+			__get_str(counter_name), __entry->value)
+)
+
+#define MDSS_PLL_ATRACE_END(name) trace_pll_tracing_mark_write(current->tgid,\
+		name, 0)
+#define MDSS_PLL_ATRACE_BEGIN(name) trace_pll_tracing_mark_write(current->tgid,\
+		name, 1)
+#define MDSS_PLL_ATRACE_FUNC() MDSS_PLL_ATRACE_BEGIN(__func__)
+#define MDSS_PLL_ATRACE_INT(name, value) \
+	trace_mdss_pll_trace_counter(current->tgid, name, value)
+
+
+#endif /* _MDSS_PLL_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 0cca360..9fe0939 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -468,8 +468,8 @@
 static SUNXI_CCU_MUX_WITH_GATE(daudio1_clk, "daudio1", daudio_parents,
 			       0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
 
-static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio",
-			     0x0c0, 0, 4, BIT(31), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_MUX_WITH_GATE(spdif_clk, "spdif", daudio_parents,
+			       0x0c0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
 
 static SUNXI_CCU_GATE(usb_phy0_clk,	"usb-phy0",	"osc24M",
 		      0x0cc, BIT(8), 0);
@@ -608,7 +608,7 @@
 				 0x150, 0, 4, 24, 2, BIT(31),
 				 CLK_SET_RATE_PARENT);
 
-static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0);
+static SUNXI_CCU_GATE(hdmi_ddc_clk, "ddc", "osc24M", 0x150, BIT(30), 0);
 
 static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);
 
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 9bd1f78..e1dc4e5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -752,6 +752,13 @@
 	.num_resets	= ARRAY_SIZE(sun8i_a33_ccu_resets),
 };
 
+static struct ccu_mux_nb sun8i_a33_cpu_nb = {
+	.common		= &cpux_clk.common,
+	.cm		= &cpux_clk.mux,
+	.delay_us	= 1, /* > 8 clock cycles at 24 MHz */
+	.bypass_index	= 1, /* index of 24 MHz oscillator */
+};
+
 static void __init sun8i_a33_ccu_setup(struct device_node *node)
 {
 	void __iomem *reg;
@@ -775,6 +782,9 @@
 	writel(val, reg + SUN8I_A33_PLL_MIPI_REG);
 
 	sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
+
+	ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
+				  &sun8i_a33_cpu_nb);
 }
 CLK_OF_DECLARE(sun8i_a33_ccu, "allwinner,sun8i-a33-ccu",
 	       sun8i_a33_ccu_setup);
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index 6041bdb..f69f9e8 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -16,6 +16,7 @@
 
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
+#include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -83,9 +84,20 @@
 	return 0;
 }
 
+static int sun9i_mmc_reset_reset(struct reset_controller_dev *rcdev,
+				 unsigned long id)
+{
+	sun9i_mmc_reset_assert(rcdev, id);
+	udelay(10);
+	sun9i_mmc_reset_deassert(rcdev, id);
+
+	return 0;
+}
+
 static const struct reset_control_ops sun9i_mmc_reset_ops = {
 	.assert		= sun9i_mmc_reset_assert,
 	.deassert	= sun9i_mmc_reset_deassert,
+	.reset		= sun9i_mmc_reset_reset,
 };
 
 static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 8e2db5e..af520d8 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -963,7 +963,7 @@
 	 * U71 divider of cclk_lp.
 	 */
 	clk = tegra_clk_register_divider("pll_p_out3_cclklp", "pll_p_out3",
-				clk_base + SUPER_CCLKG_DIVIDER, 0,
+				clk_base + SUPER_CCLKLP_DIVIDER, 0,
 				TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
 	clk_register_clkdev(clk, "pll_p_out3_cclklp", NULL);
 
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index c773332..7d060ff 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -265,8 +265,7 @@
 
 		/* Get configuration for the ATL instances */
 		snprintf(prop, sizeof(prop), "atl%u", i);
-		of_node_get(node);
-		cfg_node = of_find_node_by_name(node, prop);
+		cfg_node = of_get_child_by_name(node, prop);
 		if (cfg_node) {
 			ret = of_property_read_u32(cfg_node, "bws",
 						   &cdesc->bws);
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
index 5d029991..481225a 100644
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -98,7 +98,7 @@
 const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
 	UNIPHIER_CLK_FACTOR("spll", -1, "ref", 120, 1),		/* 2400 MHz */
 	UNIPHIER_CLK_FACTOR("dapll1", -1, "ref", 128, 1),	/* 2560 MHz */
-	UNIPHIER_CLK_FACTOR("dapll2", -1, "ref", 144, 125),	/* 2949.12 MHz */
+	UNIPHIER_CLK_FACTOR("dapll2", -1, "dapll1", 144, 125),	/* 2949.12 MHz */
 	UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40),
 	UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
 	UNIPHIER_PRO5_SYS_CLK_SD,
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index e2c6e43..cd6d307 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -305,6 +305,14 @@
 	  This must be disabled for hardware validation purposes to detect any
 	  hardware anomalies of missing events.
 
+config ARM_ARCH_TIMER_VCT_ACCESS
+	bool "Support for ARM architected timer virtual counter access in userspace"
+	default !ARM64
+	depends on ARM_ARCH_TIMER
+	help
+	  This option enables support for reading the ARM architected timer's
+	  virtual counter in userspace.
+
 config FSL_ERRATUM_A008585
 	bool "Workaround for Freescale/NXP Erratum A-008585"
 	default y
@@ -315,6 +323,14 @@
 	  value").  The workaround will only be active if the
 	  fsl,erratum-a008585 property is found in the timer node.
 
+config ARM_ARCH_TIMER_VCT_ACCESS
+	bool "Support for ARM architected timer virtual counter access in userspace"
+	default !ARM64
+	depends on ARM_ARCH_TIMER
+	help
+	  This option enables support for reading the ARM architected timer's
+	  virtual counter in userspace.
+
 config ARM_GLOBAL_TIMER
 	bool "Support for the ARM global timer" if COMPILE_TEST
 	select CLKSRC_OF if OF
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 5db1897..5aa9914 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -48,6 +48,8 @@
 #define CNTFRQ		0x10
 #define CNTP_TVAL	0x28
 #define CNTP_CTL	0x2c
+#define CNTCVAL_LO	0x30
+#define CNTCVAL_HI	0x34
 #define CNTV_TVAL	0x38
 #define CNTV_CTL	0x3c
 
@@ -441,14 +443,18 @@
 {
 	u32 cntkctl = arch_timer_get_cntkctl();
 
-	/* Disable user access to the timers */
+	/* Disable user access to the timers and the physical counter */
 	/* Also disable virtual event stream */
 	cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
 			| ARCH_TIMER_USR_VT_ACCESS_EN
-			| ARCH_TIMER_VIRT_EVT_EN);
+			| ARCH_TIMER_VIRT_EVT_EN
+			| ARCH_TIMER_USR_PCT_ACCESS_EN);
 
-	/* Enable user access to the virtual and physical counters */
-	cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN | ARCH_TIMER_USR_PCT_ACCESS_EN;
+	/* Enable user access to the virtual counter */
+	if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_VCT_ACCESS))
+		cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+	else
+		cntkctl &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
 
 	arch_timer_set_cntkctl(cntkctl);
 }
@@ -541,6 +547,23 @@
 	return arch_timer_rate;
 }
 
+void arch_timer_mem_get_cval(u32 *lo, u32 *hi)
+{
+	u32 ctrl;
+
+	*lo = *hi = ~0U;
+
+	if (!arch_counter_base)
+		return;
+
+	ctrl = readl_relaxed_no_log(arch_counter_base + CNTV_CTL);
+
+	if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
+		*lo = readl_relaxed_no_log(arch_counter_base + CNTCVAL_LO);
+		*hi = readl_relaxed_no_log(arch_counter_base + CNTCVAL_HI);
+	}
+}
+
 static u64 arch_counter_get_cntvct_mem(void)
 {
 	u32 vct_lo, vct_hi, tmp_hi;
@@ -873,7 +896,7 @@
 		return ret;
 
 	arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
-	
+
 	return 0;
 }
 
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e8c7af52..1d5dba9 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -990,11 +990,19 @@
 	.release	= cpufreq_sysfs_release,
 };
 
-static int add_cpu_dev_symlink(struct cpufreq_policy *policy,
-			       struct device *dev)
+static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
 {
+	struct device *dev = get_cpu_device(cpu);
+
+	if (!dev)
+		return;
+
+	if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
+		return;
+
 	dev_dbg(dev, "%s: Adding symlink\n", __func__);
-	return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
+	if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
+		dev_err(dev, "cpufreq symlink creation failed\n");
 }
 
 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
@@ -1257,10 +1265,10 @@
 		policy->user_policy.min = policy->min;
 		policy->user_policy.max = policy->max;
 
-		write_lock_irqsave(&cpufreq_driver_lock, flags);
-		for_each_cpu(j, policy->related_cpus)
+		for_each_cpu(j, policy->related_cpus) {
 			per_cpu(cpufreq_cpu_data, j) = policy;
-		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+			add_cpu_dev_symlink(policy, j);
+		}
 	} else {
 		policy->min = policy->user_policy.min;
 		policy->max = policy->user_policy.max;
@@ -1357,13 +1365,15 @@
 
 	if (cpufreq_driver->exit)
 		cpufreq_driver->exit(policy);
+
+	for_each_cpu(j, policy->real_cpus)
+		remove_cpu_dev_symlink(policy, get_cpu_device(j));
+
 out_free_policy:
 	cpufreq_policy_free(policy, !new_policy);
 	return ret;
 }
 
-static int cpufreq_offline(unsigned int cpu);
-
 /**
  * cpufreq_add_dev - the cpufreq interface for a CPU device.
  * @dev: CPU device.
@@ -1385,16 +1395,10 @@
 
 	/* Create sysfs link on CPU registration */
 	policy = per_cpu(cpufreq_cpu_data, cpu);
-	if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus))
-		return 0;
+	if (policy)
+		add_cpu_dev_symlink(policy, cpu);
 
-	ret = add_cpu_dev_symlink(policy, dev);
-	if (ret) {
-		cpumask_clear_cpu(cpu, policy->real_cpus);
-		cpufreq_offline(cpu);
-	}
-
-	return ret;
+	return 0;
 }
 
 static int cpufreq_offline(unsigned int cpu)
diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c
index 90fac32..8a5ad70 100644
--- a/drivers/cpufreq/qcom-cpufreq.c
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -27,10 +27,13 @@
 #include <linux/err.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/cpu_cooling.h>
 #include <trace/events/power.h>
 
 static DEFINE_MUTEX(l2bw_lock);
 
+static struct thermal_cooling_device *cdev[NR_CPUS];
 static struct clk *cpu_clk[NR_CPUS];
 static struct clk *l2_clk;
 static DEFINE_PER_CPU(struct cpufreq_frequency_table *, freq_table);
@@ -308,6 +311,52 @@
 	NULL,
 };
 
+static void msm_cpufreq_ready(struct cpufreq_policy *policy)
+{
+	struct device_node *np, *lmh_node;
+	unsigned int cpu = 0;
+
+	if (cdev[policy->cpu])
+		return;
+
+	np = of_cpu_device_node_get(policy->cpu);
+	if (WARN_ON(!np))
+		return;
+
+	/*
+	 * For now, just loading the cooling device;
+	 * thermal DT code takes care of matching them.
+	 */
+	if (of_find_property(np, "#cooling-cells", NULL)) {
+		lmh_node = of_parse_phandle(np, "qcom,lmh-dcvs", 0);
+		if (lmh_node) {
+			of_node_put(lmh_node);
+			goto ready_exit;
+		}
+
+		for_each_cpu(cpu, policy->related_cpus) {
+			cpumask_t cpu_mask  = CPU_MASK_NONE;
+
+			of_node_put(np);
+			np = of_cpu_device_node_get(cpu);
+			if (WARN_ON(!np))
+				return;
+
+			cpumask_set_cpu(cpu, &cpu_mask);
+			cdev[cpu] = of_cpufreq_cooling_register(np, &cpu_mask);
+			if (IS_ERR(cdev[cpu])) {
+				pr_err(
+				"running cpufreq for CPU%d without cooling dev: %ld\n",
+				cpu, PTR_ERR(cdev[cpu]));
+				cdev[cpu] = NULL;
+			}
+		}
+	}
+
+ready_exit:
+	of_node_put(np);
+}
+
 static struct cpufreq_driver msm_cpufreq_driver = {
 	/* lps calculations are handled here. */
 	.flags		= CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
@@ -318,6 +367,7 @@
 	.get		= msm_cpufreq_get_freq,
 	.name		= "msm",
 	.attr		= msm_freq_attr,
+	.ready		= msm_cpufreq_ready,
 };
 
 static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev,
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 7fe442c..854a567 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -164,6 +164,24 @@
 		drv->state_count += 1;
 	}
 
+	/*
+	 * On the PowerNV platform cpu_present may be less than cpu_possible in
+	 * cases when firmware detects the CPU, but it is not available to the
+	 * OS.  If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at
+	 * run time and hence cpu_devices are not created for those CPUs by the
+	 * generic topology_init().
+	 *
+	 * drv->cpumask defaults to cpu_possible_mask in
+	 * __cpuidle_driver_init().  This breaks cpuidle on PowerNV where
+	 * cpu_devices are not created for CPUs in cpu_possible_mask that
+	 * cannot be hot-added later at run time.
+	 *
+	 * Trying cpuidle_register_device() on a CPU without a cpu_device is
+	 * incorrect, so pass a correct CPU mask to the generic cpuidle driver.
+	 */
+
+	drv->cpumask = (struct cpumask *)cpu_present_mask;
+
 	return 0;
 }
 
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index a3e1de0..3eddf43 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -189,6 +189,7 @@
 			return -EBUSY;
 		}
 		target_state = &drv->states[index];
+		broadcast = false;
 	}
 
 	/* Take note of the planned idle state. */
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 5452ad8..6c66b7f 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
  * Copyright (C) 2009 Intel Corporation
  *
@@ -619,7 +619,7 @@
 
 	next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
 
-	if (is_cpu_biased(dev->cpu))
+	if (is_cpu_biased(dev->cpu) && (!cpu_isolated(dev->cpu)))
 		goto done_select;
 
 	for (i = 0; i < cpu->nlevels; i++) {
@@ -1043,18 +1043,9 @@
 	}
 
 	if (level->notify_rpm) {
-		uint64_t us;
-		uint32_t pred_us;
-
-		us = get_cluster_sleep_time(cluster, NULL, from_idle,
-								&pred_us);
-
-		us = us + 1;
-
 		clear_predict_history();
 		clear_cl_predict_history();
-
-		if (system_sleep_enter(us))
+		if (system_sleep_enter())
 			return -EBUSY;
 	}
 	/* Notify cluster enter event after successfully config completion */
@@ -1260,7 +1251,15 @@
 
 		state_id |= (level->psci_id & cluster->psci_mode_mask)
 					<< cluster->psci_mode_shift;
-		(*aff_lvl)++;
+
+		/*
+		 * We may have updated the broadcast timers, update
+		 * the wakeup value by reading the bc timer directly.
+		 */
+		if (level->notify_rpm)
+			system_sleep_update_wakeup();
+		if (level->psci_id)
+			(*aff_lvl)++;
 	}
 unlock_and_return:
 	spin_unlock(&cluster->sync_lock);
@@ -1733,6 +1732,18 @@
 {
 	int rc;
 
+#ifdef CONFIG_ARM
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		rc = arm_cpuidle_init(smp_processor_id());
+		if (rc) {
+			pr_err("CPU%d ARM CPUidle init failed (%d)\n", cpu, rc);
+			return rc;
+		}
+	}
+#endif
+
 	rc = platform_driver_register(&lpm_driver);
 	if (rc) {
 		pr_info("Error registering %s\n", lpm_driver.driver.name);
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 832a2c3..9e98a5f 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -613,6 +613,18 @@
 	struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
 	int error;
 
+	/*
+	 * Return if cpu_device is not setup for this CPU.
+	 *
+	 * This could happen if the arch did not set up cpu_device
+	 * since this CPU is not in cpu_present mask and the
+	 * driver did not send a correct CPU mask during registration.
+	 * Without this check we would end up passing bogus
+	 * value for &cpu_dev->kobj in kobject_init_and_add()
+	 */
+	if (!cpu_dev)
+		return -ENODEV;
+
 	kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
 	if (!kdev)
 		return -ENOMEM;
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index ecfdcfe..4f41d6d 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -34,12 +34,12 @@
 #define PPC405EX_CE_RESET                       0x00000008
 
 #define CRYPTO4XX_CRYPTO_PRIORITY		300
-#define PPC4XX_LAST_PD				63
-#define PPC4XX_NUM_PD				64
-#define PPC4XX_LAST_GD				1023
+#define PPC4XX_NUM_PD				256
+#define PPC4XX_LAST_PD				(PPC4XX_NUM_PD - 1)
 #define PPC4XX_NUM_GD				1024
-#define PPC4XX_LAST_SD				63
-#define PPC4XX_NUM_SD				64
+#define PPC4XX_LAST_GD				(PPC4XX_NUM_GD - 1)
+#define PPC4XX_NUM_SD				256
+#define PPC4XX_LAST_SD				(PPC4XX_NUM_SD - 1)
 #define PPC4XX_SD_BUFFER_SIZE			2048
 
 #define PD_ENTRY_INUSE				1
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5d4c050..e2bcacc 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -41,6 +41,7 @@
 	struct device		*dev;
 	int ridx;
 	struct caam_job_ring __iomem *rregs;	/* JobR's register space */
+	struct tasklet_struct irqtask;
 	int irq;			/* One per queue */
 
 	/* Number of scatterlist crypt transforms active on the JobR */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 757c27f..9e7f281 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -73,6 +73,8 @@
 
 	ret = caam_reset_hw_jr(dev);
 
+	tasklet_kill(&jrp->irqtask);
+
 	/* Release interrupt */
 	free_irq(jrp->irq, dev);
 
@@ -128,7 +130,7 @@
 
 	/*
 	 * Check the output ring for ready responses, kick
-	 * the threaded irq if jobs done.
+	 * tasklet if jobs done.
 	 */
 	irqstate = rd_reg32(&jrp->rregs->jrintstatus);
 	if (!irqstate)
@@ -150,13 +152,18 @@
 	/* Have valid interrupt at this point, just ACK and trigger */
 	wr_reg32(&jrp->rregs->jrintstatus, irqstate);
 
-	return IRQ_WAKE_THREAD;
+	preempt_disable();
+	tasklet_schedule(&jrp->irqtask);
+	preempt_enable();
+
+	return IRQ_HANDLED;
 }
 
-static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void caam_jr_dequeue(unsigned long devarg)
 {
 	int hw_idx, sw_idx, i, head, tail;
-	struct device *dev = st_dev;
+	struct device *dev = (struct device *)devarg;
 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
 	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
 	u32 *userdesc, userstatus;
@@ -230,8 +237,6 @@
 
 	/* reenable / unmask IRQs */
 	clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
-
-	return IRQ_HANDLED;
 }
 
 /**
@@ -389,10 +394,11 @@
 
 	jrp = dev_get_drvdata(dev);
 
+	tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+
 	/* Connect job ring interrupt handler. */
-	error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
-				     caam_jr_threadirq, IRQF_SHARED,
-				     dev_name(dev), dev);
+	error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+			    dev_name(dev), dev);
 	if (error) {
 		dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
 			jrp->ridx, jrp->irq);
@@ -454,6 +460,7 @@
 out_free_irq:
 	free_irq(jrp->irq, dev);
 out_kill_deq:
+	tasklet_kill(&jrp->irqtask);
 	return error;
 }
 
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index e423d33..3629184 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -273,7 +273,8 @@
 #define CESA_TDMA_SRC_IN_SRAM			BIT(30)
 #define CESA_TDMA_END_OF_REQ			BIT(29)
 #define CESA_TDMA_BREAK_CHAIN			BIT(28)
-#define CESA_TDMA_TYPE_MSK			GENMASK(27, 0)
+#define CESA_TDMA_SET_STATE			BIT(27)
+#define CESA_TDMA_TYPE_MSK			GENMASK(26, 0)
 #define CESA_TDMA_DUMMY				0
 #define CESA_TDMA_DATA				1
 #define CESA_TDMA_OP				2
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 77712b3..662cf4d 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -280,13 +280,32 @@
 	sreq->offset = 0;
 }
 
+static void mv_cesa_ahash_dma_step(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	struct mv_cesa_req *base = &creq->base;
+
+	/* We must explicitly set the digest state. */
+	if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
+		struct mv_cesa_engine *engine = base->engine;
+		int i;
+
+		/* Set the hash state in the IVDIG regs. */
+		for (i = 0; i < ARRAY_SIZE(creq->state); i++)
+			writel_relaxed(creq->state[i], engine->regs +
+				       CESA_IVDIG(i));
+	}
+
+	mv_cesa_dma_step(base);
+}
+
 static void mv_cesa_ahash_step(struct crypto_async_request *req)
 {
 	struct ahash_request *ahashreq = ahash_request_cast(req);
 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 
 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
-		mv_cesa_dma_step(&creq->base);
+		mv_cesa_ahash_dma_step(ahashreq);
 	else
 		mv_cesa_ahash_std_step(ahashreq);
 }
@@ -562,11 +581,15 @@
 	struct mv_cesa_ahash_dma_iter iter;
 	struct mv_cesa_op_ctx *op = NULL;
 	unsigned int frag_len;
+	bool set_state = false;
 	int ret;
 
 	basereq->chain.first = NULL;
 	basereq->chain.last = NULL;
 
+	if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
+		set_state = true;
+
 	if (creq->src_nents) {
 		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
 				 DMA_TO_DEVICE);
@@ -650,6 +673,15 @@
 	basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
 				       CESA_TDMA_BREAK_CHAIN);
 
+	if (set_state) {
+		/*
+		 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
+		 * let the step logic know that the IVDIG registers should be
+		 * explicitly set before launching a TDMA chain.
+		 */
+		basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
+	}
+
 	return 0;
 
 err_free_tdma:
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 9fd7a5f..0cda6e3 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -112,7 +112,14 @@
 		last->next = dreq->chain.first;
 		engine->chain.last = dreq->chain.last;
 
-		if (!(last->flags & CESA_TDMA_BREAK_CHAIN))
+		/*
+		 * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
+		 * the last element of the current chain, or if the request
+		 * being queued needs the IV regs to be set before lauching
+		 * the request.
+		 */
+		if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
+		    !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
 			last->next_dma = dreq->chain.first->cur_dma;
 	}
 }
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index c5aac25..b365ad7 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1620,6 +1620,7 @@
 					  CWQ_ENTRY_SIZE, 0, NULL);
 	if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
 		kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+		queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
 		return -ENOMEM;
 	}
 	return 0;
@@ -1629,6 +1630,8 @@
 {
 	kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
 	kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
+	queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
+	queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
 }
 
 static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index dce1af0..a668286 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -805,8 +805,9 @@
 		dev_warn(dev, "feed control interrupt is not available.\n");
 		goto err_irq;
 	}
-	err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
-			       IRQF_SHARED, pdev->name, pdev);
+	err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
+					s5p_aes_interrupt, IRQF_ONESHOT,
+					pdev->name, pdev);
 	if (err < 0) {
 		dev_warn(dev, "feed control interrupt is not available.\n");
 		goto err_irq;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index e2d323f..1c8d79d 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1232,12 +1232,11 @@
 			sg_link_tbl_len += authsize;
 	}
 
-	sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
-				  &desc->ptr[4], sg_count, areq->assoclen,
-				  tbl_off);
+	ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
+			     &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
 
-	if (sg_count > 1) {
-		tbl_off += sg_count;
+	if (ret > 1) {
+		tbl_off += ret;
 		sync_needed = true;
 	}
 
@@ -1248,14 +1247,15 @@
 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
 	}
 
-	sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc,
-				  &desc->ptr[5], sg_count, areq->assoclen,
-				  tbl_off);
+	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
+			     sg_count, areq->assoclen, tbl_off);
 
 	if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
 		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
 
-	if (sg_count > 1) {
+	/* ICV data */
+	if (ret > 1) {
+		tbl_off += ret;
 		edesc->icv_ool = true;
 		sync_needed = true;
 
@@ -1265,9 +1265,7 @@
 				     sizeof(struct talitos_ptr) + authsize;
 
 			/* Add an entry to the link table for ICV data */
-			tbl_ptr += sg_count - 1;
-			to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1);
-			tbl_ptr++;
+			to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
 			to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
 					       is_sec1);
 			to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
@@ -1275,18 +1273,33 @@
 			/* icv data follows link tables */
 			to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
 				       is_sec1);
+		} else {
+			dma_addr_t addr = edesc->dma_link_tbl;
+
+			if (is_sec1)
+				addr += areq->assoclen + cryptlen;
+			else
+				addr += sizeof(struct talitos_ptr) * tbl_off;
+
+			to_talitos_ptr(&desc->ptr[6], addr, is_sec1);
+			to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
+		}
+	} else if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
+		ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
+				     &desc->ptr[6], sg_count, areq->assoclen +
+							      cryptlen,
+				     tbl_off);
+		if (ret > 1) {
+			tbl_off += ret;
+			edesc->icv_ool = true;
+			sync_needed = true;
+		} else {
+			edesc->icv_ool = false;
 		}
 	} else {
 		edesc->icv_ool = false;
 	}
 
-	/* ICV data */
-	if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
-		to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
-		to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl +
-			       areq->assoclen + cryptlen, is_sec1);
-	}
-
 	/* iv out */
 	if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
 		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -1494,12 +1507,20 @@
 			     const u8 *key, unsigned int keylen)
 {
 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	u32 tmp[DES_EXPKEY_WORDS];
 
 	if (keylen > TALITOS_MAX_KEY_SIZE) {
 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 		return -EINVAL;
 	}
 
+	if (unlikely(crypto_ablkcipher_get_flags(cipher) &
+		     CRYPTO_TFM_REQ_WEAK_KEY) &&
+	    !des_ekey(tmp, key)) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
+		return -EINVAL;
+	}
+
 	memcpy(&ctx->key, key, keylen);
 	ctx->keylen = keylen;
 
@@ -2614,7 +2635,7 @@
 				.ivsize = AES_BLOCK_SIZE,
 			}
 		},
-		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
 				     DESC_HDR_SEL0_AESU |
 				     DESC_HDR_MODE0_AESU_CTR,
 	},
@@ -3047,6 +3068,11 @@
 		t_alg->algt.alg.aead.setkey = aead_setkey;
 		t_alg->algt.alg.aead.encrypt = aead_encrypt;
 		t_alg->algt.alg.aead.decrypt = aead_decrypt;
+		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
+		    !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
+			kfree(t_alg);
+			return ERR_PTR(-ENOTSUPP);
+		}
 		break;
 	case CRYPTO_ALG_TYPE_AHASH:
 		alg = &t_alg->algt.alg.hash.halg.base;
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
index f9b758f..33e16261 100644
--- a/drivers/devfreq/bimc-bwmon.c
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,7 +35,8 @@
 #define	GLB_INT_EN(m)		((m)->global_base + 0x10C)
 #define MON_INT_STATUS(m)	((m)->base + 0x100)
 #define MON_INT_STATUS_MASK	0x03
-#define MON2_INT_STATUS_MASK	0xF0
+#define MON2_INT_STATUS_MASK	0xA0
+#define MON2_INT_DISABLE_MASK	0xF0
 #define MON2_INT_STATUS_SHIFT	4
 #define MON_INT_CLR(m)		((m)->base + 0x108)
 #define	MON_INT_EN(m)		((m)->base + 0x10C)
@@ -63,7 +64,8 @@
 #define MON3_INT_STATUS(m)	((m)->base + 0x00)
 #define MON3_INT_CLR(m)		((m)->base + 0x08)
 #define MON3_INT_EN(m)		((m)->base + 0x0C)
-#define MON3_INT_STATUS_MASK	0x0F
+#define MON3_INT_STATUS_MASK	0x0A
+#define MON3_INT_DISABLE_MASK	0x0F
 #define MON3_EN(m)		((m)->base + 0x10)
 #define MON3_CLEAR(m)		((m)->base + 0x14)
 #define MON3_MASK(m)		((m)->base + 0x18)
@@ -283,12 +285,12 @@
 	case MON2:
 		mon_glb_irq_disable(m);
 		val = readl_relaxed(MON_INT_EN(m));
-		val &= ~MON2_INT_STATUS_MASK;
+		val &= ~MON2_INT_DISABLE_MASK;
 		writel_relaxed(val, MON_INT_EN(m));
 		break;
 	case MON3:
 		val = readl_relaxed(MON3_INT_EN(m));
-		val &= ~MON3_INT_STATUS_MASK;
+		val &= ~MON3_INT_DISABLE_MASK;
 		writel_relaxed(val, MON3_INT_EN(m));
 		break;
 	}
diff --git a/drivers/devfreq/governor_spdm_bw_hyp.c b/drivers/devfreq/governor_spdm_bw_hyp.c
index 5751ab6..7e7e0ee 100644
--- a/drivers/devfreq/governor_spdm_bw_hyp.c
+++ b/drivers/devfreq/governor_spdm_bw_hyp.c
@@ -1,5 +1,5 @@
 /*
- *Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  *This program is free software; you can redistribute it and/or modify
  *it under the terms of the GNU General Public License version 2 and
@@ -42,7 +42,7 @@
 
 	rpm_req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET, SPDM_RES_TYPE,
 					 SPDM_RES_ID, 1);
-	if (!rpm_req)
+	if (IS_ERR_OR_NULL(rpm_req))
 		return -ENODEV;
 	msm_rpm_add_kvp_data(rpm_req, SPDM_KEY, (const uint8_t *)&one,
 			     sizeof(int));
@@ -61,7 +61,7 @@
 
 	rpm_req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET, SPDM_RES_TYPE,
 					 SPDM_RES_ID, 1);
-	if (!rpm_req)
+	if (IS_ERR_OR_NULL(rpm_req))
 		return -ENODEV;
 	msm_rpm_add_kvp_data(rpm_req, SPDM_KEY, (const uint8_t *)&zero,
 			     sizeof(int));
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 094548b..883b3be 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -280,6 +280,31 @@
 EXPORT_SYMBOL(fence_add_callback);
 
 /**
+ * fence_get_status - returns the status upon completion
+ * @fence: [in]	the fence to query
+ *
+ * This wraps fence_get_status_locked() to return the error status
+ * condition on a signaled fence. See fence_get_status_locked() for more
+ * details.
+ *
+ * Returns 0 if the fence has not yet been signaled, 1 if the fence has
+ * been signaled without an error condition, or a negative error code
+ * if the fence has been completed in err.
+ */
+int fence_get_status(struct fence *fence)
+{
+	unsigned long flags;
+	int status;
+
+	spin_lock_irqsave(fence->lock, flags);
+	status = fence_get_status_locked(fence);
+	spin_unlock_irqrestore(fence->lock, flags);
+
+	return status;
+}
+EXPORT_SYMBOL(fence_get_status);
+
+/**
  * fence_remove_callback - remove a callback from the signaling list
  * @fence:	[in]	the fence to wait on
  * @cb:		[in]	the callback to remove
@@ -529,6 +554,7 @@
 	fence->context = context;
 	fence->seqno = seqno;
 	fence->flags = 0UL;
+	fence->error = 0;
 
 	trace_fence_init(fence);
 }
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 0cb8d9d..9dc86d3 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -96,9 +96,9 @@
 	obj->context = fence_context_alloc(1);
 	strlcpy(obj->name, name, sizeof(obj->name));
 
-	INIT_LIST_HEAD(&obj->child_list_head);
-	INIT_LIST_HEAD(&obj->active_list_head);
-	spin_lock_init(&obj->child_list_lock);
+	obj->pt_tree = RB_ROOT;
+	INIT_LIST_HEAD(&obj->pt_list);
+	spin_lock_init(&obj->lock);
 
 	sync_timeline_debug_add(obj);
 
@@ -125,68 +125,6 @@
 	kref_put(&obj->kref, sync_timeline_free);
 }
 
-/**
- * sync_timeline_signal() - signal a status change on a sync_timeline
- * @obj:	sync_timeline to signal
- * @inc:	num to increment on timeline->value
- *
- * A sync implementation should call this any time one of it's fences
- * has signaled or has an error condition.
- */
-static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
-{
-	unsigned long flags;
-	struct sync_pt *pt, *next;
-
-	trace_sync_timeline(obj);
-
-	spin_lock_irqsave(&obj->child_list_lock, flags);
-
-	obj->value += inc;
-
-	list_for_each_entry_safe(pt, next, &obj->active_list_head,
-				 active_list) {
-		if (fence_is_signaled_locked(&pt->base))
-			list_del_init(&pt->active_list);
-	}
-
-	spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-
-/**
- * sync_pt_create() - creates a sync pt
- * @parent:	fence's parent sync_timeline
- * @size:	size to allocate for this pt
- * @inc:	value of the fence
- *
- * Creates a new sync_pt as a child of @parent.  @size bytes will be
- * allocated allowing for implementation specific data to be kept after
- * the generic sync_timeline struct. Returns the sync_pt object or
- * NULL in case of error.
- */
-static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
-			     unsigned int value)
-{
-	unsigned long flags;
-	struct sync_pt *pt;
-
-	if (size < sizeof(*pt))
-		return NULL;
-
-	pt = kzalloc(size, GFP_KERNEL);
-	if (!pt)
-		return NULL;
-
-	spin_lock_irqsave(&obj->child_list_lock, flags);
-	sync_timeline_get(obj);
-	fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
-		   obj->context, value);
-	list_add_tail(&pt->child_list, &obj->child_list_head);
-	INIT_LIST_HEAD(&pt->active_list);
-	spin_unlock_irqrestore(&obj->child_list_lock, flags);
-	return pt;
-}
-
 static const char *timeline_fence_get_driver_name(struct fence *fence)
 {
 	return "sw_sync";
@@ -203,13 +141,17 @@
 {
 	struct sync_pt *pt = fence_to_sync_pt(fence);
 	struct sync_timeline *parent = fence_parent(fence);
-	unsigned long flags;
 
-	spin_lock_irqsave(fence->lock, flags);
-	list_del(&pt->child_list);
-	if (!list_empty(&pt->active_list))
-		list_del(&pt->active_list);
-	spin_unlock_irqrestore(fence->lock, flags);
+	if (!list_empty(&pt->link)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(fence->lock, flags);
+		if (!list_empty(&pt->link)) {
+			list_del(&pt->link);
+			rb_erase(&pt->node, &parent->pt_tree);
+		}
+		spin_unlock_irqrestore(fence->lock, flags);
+	}
 
 	sync_timeline_put(parent);
 	fence_free(fence);
@@ -219,18 +161,11 @@
 {
 	struct sync_timeline *parent = fence_parent(fence);
 
-	return (fence->seqno > parent->value) ? false : true;
+	return !__fence_is_later(fence->seqno, parent->value);
 }
 
 static bool timeline_fence_enable_signaling(struct fence *fence)
 {
-	struct sync_pt *pt = fence_to_sync_pt(fence);
-	struct sync_timeline *parent = fence_parent(fence);
-
-	if (timeline_fence_signaled(fence))
-		return false;
-
-	list_add_tail(&pt->active_list, &parent->active_list_head);
 	return true;
 }
 
@@ -238,7 +173,7 @@
 {
 	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
 
-	list_del_init(&pt->active_list);
+	list_del_init(&pt->link);
 }
 
 static void timeline_fence_value_str(struct fence *fence,
@@ -267,6 +202,107 @@
 	.timeline_value_str = timeline_fence_timeline_value_str,
 };
 
+/**
+ * sync_timeline_signal() - signal a status change on a sync_timeline
+ * @obj:	sync_timeline to signal
+ * @inc:	num to increment on timeline->value
+ *
+ * A sync implementation should call this any time one of it's fences
+ * has signaled or has an error condition.
+ */
+static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+{
+	struct sync_pt *pt, *next;
+
+	trace_sync_timeline(obj);
+
+	spin_lock_irq(&obj->lock);
+
+	obj->value += inc;
+
+	list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
+		if (!timeline_fence_signaled(&pt->base))
+			break;
+
+		list_del_init(&pt->link);
+		rb_erase(&pt->node, &obj->pt_tree);
+
+		/*
+		 * A signal callback may release the last reference to this
+		 * fence, causing it to be freed. That operation has to be
+		 * last to avoid a use after free inside this loop, and must
+		 * be after we remove the fence from the timeline in order to
+		 * prevent deadlocking on timeline->lock inside
+		 * timeline_fence_release().
+		 */
+		fence_signal_locked(&pt->base);
+	}
+
+	spin_unlock_irq(&obj->lock);
+}
+
+/**
+ * sync_pt_create() - creates a sync pt
+ * @parent:	fence's parent sync_timeline
+ * @inc:	value of the fence
+ *
+ * Creates a new sync_pt as a child of @parent.  @size bytes will be
+ * allocated allowing for implementation specific data to be kept after
+ * the generic sync_timeline struct. Returns the sync_pt object or
+ * NULL in case of error.
+ */
+static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
+				      unsigned int value)
+{
+	struct sync_pt *pt;
+
+	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+	if (!pt)
+		return NULL;
+
+	sync_timeline_get(obj);
+	fence_init(&pt->base, &timeline_fence_ops, &obj->lock,
+		   obj->context, value);
+	INIT_LIST_HEAD(&pt->link);
+
+	spin_lock_irq(&obj->lock);
+	if (!fence_is_signaled_locked(&pt->base)) {
+		struct rb_node **p = &obj->pt_tree.rb_node;
+		struct rb_node *parent = NULL;
+
+		while (*p) {
+			struct sync_pt *other;
+			int cmp;
+
+			parent = *p;
+			other = rb_entry(parent, typeof(*pt), node);
+			cmp = value - other->base.seqno;
+			if (cmp > 0) {
+				p = &parent->rb_right;
+			} else if (cmp < 0) {
+				p = &parent->rb_left;
+			} else {
+				if (fence_get_rcu(&other->base)) {
+					fence_put(&pt->base);
+					pt = other;
+					goto unlock;
+				}
+				p = &parent->rb_left;
+			}
+		}
+		rb_link_node(&pt->node, parent, p);
+		rb_insert_color(&pt->node, &obj->pt_tree);
+
+		parent = rb_next(&pt->node);
+		list_add_tail(&pt->link,
+			      parent ? &rb_entry(parent, typeof(*pt), node)->link : &obj->pt_list);
+	}
+unlock:
+	spin_unlock_irq(&obj->lock);
+
+	return pt;
+}
+
 /*
  * *WARNING*
  *
@@ -293,8 +329,16 @@
 static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
 {
 	struct sync_timeline *obj = file->private_data;
+	struct sync_pt *pt, *next;
 
-	smp_wmb();
+	spin_lock_irq(&obj->lock);
+
+	list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
+		fence_set_error(&pt->base, -ENOENT);
+		fence_signal_locked(&pt->base);
+	}
+
+	spin_unlock_irq(&obj->lock);
 
 	sync_timeline_put(obj);
 	return 0;
@@ -317,7 +361,7 @@
 		goto err;
 	}
 
-	pt = sync_pt_create(obj, sizeof(*pt), data.value);
+	pt = sync_pt_create(obj, data.value);
 	if (!pt) {
 		err = -ENOMEM;
 		goto err;
@@ -353,6 +397,11 @@
 	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
 		return -EFAULT;
 
+	while (value > INT_MAX)  {
+		sync_timeline_signal(obj, INT_MAX);
+		value -= INT_MAX;
+	}
+
 	sync_timeline_signal(obj, value);
 
 	return 0;
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 2dd4c3d..858263d 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -62,29 +62,29 @@
 
 static const char *sync_status_str(int status)
 {
-	if (status == 0)
-		return "signaled";
+	if (status < 0)
+		return "error";
 
 	if (status > 0)
-		return "active";
+		return "signaled";
 
-	return "error";
+	return "active";
 }
 
-static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
+static void sync_print_fence(struct seq_file *s,
+			     struct fence *fence, bool show)
 {
-	int status = 1;
 	struct sync_timeline *parent = fence_parent(fence);
+	int status;
 
-	if (fence_is_signaled_locked(fence))
-		status = fence->status;
+	status = fence_get_status_locked(fence);
 
 	seq_printf(s, "  %s%sfence %s",
 		   show ? parent->name : "",
 		   show ? "_" : "",
 		   sync_status_str(status));
 
-	if (status <= 0) {
+	if (status) {
 		struct timespec64 ts64 =
 			ktime_to_timespec64(fence->timestamp);
 
@@ -116,17 +116,15 @@
 static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
 {
 	struct list_head *pos;
-	unsigned long flags;
 
 	seq_printf(s, "%s: %d\n", obj->name, obj->value);
 
-	spin_lock_irqsave(&obj->child_list_lock, flags);
-	list_for_each(pos, &obj->child_list_head) {
-		struct sync_pt *pt =
-			container_of(pos, struct sync_pt, child_list);
+	spin_lock_irq(&obj->lock);
+	list_for_each(pos, &obj->pt_list) {
+		struct sync_pt *pt = container_of(pos, struct sync_pt, link);
 		sync_print_fence(s, &pt->base, false);
 	}
-	spin_unlock_irqrestore(&obj->child_list_lock, flags);
+	spin_unlock_irq(&obj->lock);
 }
 
 static void sync_print_sync_file(struct seq_file *s,
@@ -135,7 +133,7 @@
 	int i;
 
 	seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
-		   sync_status_str(!fence_is_signaled(sync_file->fence)));
+		   sync_status_str(fence_get_status(sync_file->fence)));
 
 	if (fence_is_array(sync_file->fence)) {
 		struct fence_array *array = to_fence_array(sync_file->fence);
@@ -149,12 +147,11 @@
 
 static int sync_debugfs_show(struct seq_file *s, void *unused)
 {
-	unsigned long flags;
 	struct list_head *pos;
 
 	seq_puts(s, "objs:\n--------------\n");
 
-	spin_lock_irqsave(&sync_timeline_list_lock, flags);
+	spin_lock_irq(&sync_timeline_list_lock);
 	list_for_each(pos, &sync_timeline_list_head) {
 		struct sync_timeline *obj =
 			container_of(pos, struct sync_timeline,
@@ -163,11 +160,11 @@
 		sync_print_obj(s, obj);
 		seq_puts(s, "\n");
 	}
-	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+	spin_unlock_irq(&sync_timeline_list_lock);
 
 	seq_puts(s, "fences:\n--------------\n");
 
-	spin_lock_irqsave(&sync_file_list_lock, flags);
+	spin_lock_irq(&sync_file_list_lock);
 	list_for_each(pos, &sync_file_list_head) {
 		struct sync_file *sync_file =
 			container_of(pos, struct sync_file, sync_file_list);
@@ -175,7 +172,7 @@
 		sync_print_sync_file(s, sync_file);
 		seq_puts(s, "\n");
 	}
-	spin_unlock_irqrestore(&sync_file_list_lock, flags);
+	spin_unlock_irq(&sync_file_list_lock);
 	return 0;
 }
 
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index d269aa6..9615dc0 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -14,6 +14,7 @@
 #define _LINUX_SYNC_H
 
 #include <linux/list.h>
+#include <linux/rbtree.h>
 #include <linux/spinlock.h>
 #include <linux/fence.h>
 
@@ -24,43 +25,41 @@
  * struct sync_timeline - sync object
  * @kref:		reference count on fence.
  * @name:		name of the sync_timeline. Useful for debugging
- * @child_list_head:	list of children sync_pts for this sync_timeline
- * @child_list_lock:	lock protecting @child_list_head and fence.status
- * @active_list_head:	list of active (unsignaled/errored) sync_pts
+ * @lock:		lock protecting @pt_list and @value
+ * @pt_tree:		rbtree of active (unsignaled/errored) sync_pts
+ * @pt_list:		list of active (unsignaled/errored) sync_pts
  * @sync_timeline_list:	membership in global sync_timeline_list
  */
 struct sync_timeline {
 	struct kref		kref;
 	char			name[32];
 
-	/* protected by child_list_lock */
+	/* protected by lock */
 	u64			context;
 	int			value;
 
-	struct list_head	child_list_head;
-	spinlock_t		child_list_lock;
-
-	struct list_head	active_list_head;
+	struct rb_root		pt_tree;
+	struct list_head	pt_list;
+	spinlock_t		lock;
 
 	struct list_head	sync_timeline_list;
 };
 
 static inline struct sync_timeline *fence_parent(struct fence *fence)
 {
-	return container_of(fence->lock, struct sync_timeline,
-			    child_list_lock);
+	return container_of(fence->lock, struct sync_timeline, lock);
 }
 
 /**
  * struct sync_pt - sync_pt object
  * @base: base fence object
- * @child_list: sync timeline child's list
- * @active_list: sync timeline active child's list
+ * @link: link on the sync timeline's list
+ * @node: node in the sync timeline's tree
  */
 struct sync_pt {
 	struct fence base;
-	struct list_head child_list;
-	struct list_head active_list;
+	struct list_head link;
+	struct rb_node node;
 };
 
 #ifdef CONFIG_SW_SYNC
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 2f34a01..7053bb4 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -376,10 +376,8 @@
 		sizeof(info->obj_name));
 	strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
 		sizeof(info->driver_name));
-	if (fence_is_signaled(fence))
-		info->status = fence->status >= 0 ? 1 : fence->status;
-	else
-		info->status = 0;
+
+	info->status = fence_get_status(fence);
 	info->timestamp_ns = ktime_to_ns(fence->timestamp);
 }
 
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 6b53526..3db94e8 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1107,12 +1107,14 @@
 	switch (order) {
 	case 0 ... 1:
 		return &unmap_pool[0];
+#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
 	case 2 ... 4:
 		return &unmap_pool[1];
 	case 5 ... 7:
 		return &unmap_pool[2];
 	case 8:
 		return &unmap_pool[3];
+#endif
 	default:
 		BUG();
 		return NULL;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index fbb7551..e0bd578 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -158,6 +158,12 @@
 #define PATTERN_OVERWRITE	0x20
 #define PATTERN_COUNT_MASK	0x1f
 
+/* poor man's completion - we want to use wait_event_freezable() on it */
+struct dmatest_done {
+	bool			done;
+	wait_queue_head_t	*wait;
+};
+
 struct dmatest_thread {
 	struct list_head	node;
 	struct dmatest_info	*info;
@@ -166,6 +172,8 @@
 	u8			**srcs;
 	u8			**dsts;
 	enum dma_transaction_type type;
+	wait_queue_head_t done_wait;
+	struct dmatest_done test_done;
 	bool			done;
 };
 
@@ -326,18 +334,25 @@
 	return error_count;
 }
 
-/* poor man's completion - we want to use wait_event_freezable() on it */
-struct dmatest_done {
-	bool			done;
-	wait_queue_head_t	*wait;
-};
 
 static void dmatest_callback(void *arg)
 {
 	struct dmatest_done *done = arg;
-
-	done->done = true;
-	wake_up_all(done->wait);
+	struct dmatest_thread *thread =
+		container_of(arg, struct dmatest_thread, done_wait);
+	if (!thread->done) {
+		done->done = true;
+		wake_up_all(done->wait);
+	} else {
+		/*
+		 * If thread->done, it means that this callback occurred
+		 * after the parent thread has cleaned up. This can
+		 * happen in the case that driver doesn't implement
+		 * the terminate_all() functionality and a dma operation
+		 * did not occur within the timeout period
+		 */
+		WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
+	}
 }
 
 static unsigned int min_odd(unsigned int x, unsigned int y)
@@ -408,9 +423,8 @@
  */
 static int dmatest_func(void *data)
 {
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
 	struct dmatest_thread	*thread = data;
-	struct dmatest_done	done = { .wait = &done_wait };
+	struct dmatest_done	*done = &thread->test_done;
 	struct dmatest_info	*info;
 	struct dmatest_params	*params;
 	struct dma_chan		*chan;
@@ -637,9 +651,9 @@
 			continue;
 		}
 
-		done.done = false;
+		done->done = false;
 		tx->callback = dmatest_callback;
-		tx->callback_param = &done;
+		tx->callback_param = done;
 		cookie = tx->tx_submit(tx);
 
 		if (dma_submit_error(cookie)) {
@@ -652,21 +666,12 @@
 		}
 		dma_async_issue_pending(chan);
 
-		wait_event_freezable_timeout(done_wait, done.done,
+		wait_event_freezable_timeout(thread->done_wait, done->done,
 					     msecs_to_jiffies(params->timeout));
 
 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 
-		if (!done.done) {
-			/*
-			 * We're leaving the timed out dma operation with
-			 * dangling pointer to done_wait.  To make this
-			 * correct, we'll need to allocate wait_done for
-			 * each test iteration and perform "who's gonna
-			 * free it this time?" dancing.  For now, just
-			 * leave it dangling.
-			 */
-			WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
+		if (!done->done) {
 			dmaengine_unmap_put(um);
 			result("test timed out", total_tests, src_off, dst_off,
 			       len, 0);
@@ -747,7 +752,7 @@
 		dmatest_KBs(runtime, total_len), ret);
 
 	/* terminate all transfers on specified channels */
-	if (ret)
+	if (ret || failed_tests)
 		dmaengine_terminate_all(chan);
 
 	thread->done = true;
@@ -807,6 +812,8 @@
 		thread->info = info;
 		thread->chan = dtc->chan;
 		thread->type = type;
+		thread->test_done.wait = &thread->done_wait;
+		init_waitqueue_head(&thread->done_wait);
 		smp_wmb();
 		thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
 				dma_chan_name(chan), op, i);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 9f3dbc8..fb2e747 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1694,7 +1694,6 @@
 static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 {
 	struct pl330_thread *thrd = NULL;
-	unsigned long flags;
 	int chans, i;
 
 	if (pl330->state == DYING)
@@ -1702,8 +1701,6 @@
 
 	chans = pl330->pcfg.num_chan;
 
-	spin_lock_irqsave(&pl330->lock, flags);
-
 	for (i = 0; i < chans; i++) {
 		thrd = &pl330->channels[i];
 		if ((thrd->free) && (!_manager_ns(thrd) ||
@@ -1721,8 +1718,6 @@
 		thrd = NULL;
 	}
 
-	spin_unlock_irqrestore(&pl330->lock, flags);
-
 	return thrd;
 }
 
@@ -1740,7 +1735,6 @@
 static void pl330_release_channel(struct pl330_thread *thrd)
 {
 	struct pl330_dmac *pl330;
-	unsigned long flags;
 
 	if (!thrd || thrd->free)
 		return;
@@ -1752,10 +1746,8 @@
 
 	pl330 = thrd->dmac;
 
-	spin_lock_irqsave(&pl330->lock, flags);
 	_free_event(thrd, thrd->ev);
 	thrd->free = true;
-	spin_unlock_irqrestore(&pl330->lock, flags);
 }
 
 /* Initialize the structure for PL330 configuration, that can be used
@@ -2120,20 +2112,20 @@
 	struct pl330_dmac *pl330 = pch->dmac;
 	unsigned long flags;
 
-	spin_lock_irqsave(&pch->lock, flags);
+	spin_lock_irqsave(&pl330->lock, flags);
 
 	dma_cookie_init(chan);
 	pch->cyclic = false;
 
 	pch->thread = pl330_request_channel(pl330);
 	if (!pch->thread) {
-		spin_unlock_irqrestore(&pch->lock, flags);
+		spin_unlock_irqrestore(&pl330->lock, flags);
 		return -ENOMEM;
 	}
 
 	tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
 
-	spin_unlock_irqrestore(&pch->lock, flags);
+	spin_unlock_irqrestore(&pl330->lock, flags);
 
 	return 1;
 }
@@ -2236,12 +2228,13 @@
 static void pl330_free_chan_resources(struct dma_chan *chan)
 {
 	struct dma_pl330_chan *pch = to_pchan(chan);
+	struct pl330_dmac *pl330 = pch->dmac;
 	unsigned long flags;
 
 	tasklet_kill(&pch->task);
 
 	pm_runtime_get_sync(pch->dmac->ddma.dev);
-	spin_lock_irqsave(&pch->lock, flags);
+	spin_lock_irqsave(&pl330->lock, flags);
 
 	pl330_release_channel(pch->thread);
 	pch->thread = NULL;
@@ -2249,7 +2242,7 @@
 	if (pch->cyclic)
 		list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
 
-	spin_unlock_irqrestore(&pch->lock, flags);
+	spin_unlock_irqrestore(&pl330->lock, flags);
 	pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
 	pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
 }
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index da0e81d..065b765 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -56,24 +56,6 @@
 	} while (0)
 
 /* gpii specific logging macros */
-#define GPII_REG(gpii, ch, fmt, ...) do { \
-	if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \
-		pr_info("%s:%u:%s: " fmt, gpii->label, \
-			ch, __func__, ##__VA_ARGS__); \
-	if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \
-		ipc_log_string(gpii->ilctxt, \
-			       "ch:%u %s: " fmt, ch, \
-			       __func__, ##__VA_ARGS__); \
-	} while (0)
-#define GPII_VERB(gpii, ch, fmt, ...) do { \
-	if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \
-		pr_info("%s:%u:%s: " fmt, gpii->label, \
-			ch, __func__, ##__VA_ARGS__); \
-	if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \
-		ipc_log_string(gpii->ilctxt, \
-			       "ch:%u %s: " fmt, ch, \
-			       __func__, ##__VA_ARGS__); \
-	} while (0)
 #define GPII_INFO(gpii, ch, fmt, ...) do { \
 	if (gpii->klog_lvl >= LOG_LVL_INFO) \
 		pr_info("%s:%u:%s: " fmt, gpii->label, ch, \
@@ -123,11 +105,33 @@
 #define IPC_LOG_PAGES (40)
 #define GPI_DBG_LOG_SIZE (SZ_1K) /* size must be power of 2 */
 #define CMD_TIMEOUT_MS (1000)
+#define GPII_REG(gpii, ch, fmt, ...) do { \
+	if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \
+		pr_info("%s:%u:%s: " fmt, gpii->label, \
+			ch, __func__, ##__VA_ARGS__); \
+	if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \
+		ipc_log_string(gpii->ilctxt, \
+			       "ch:%u %s: " fmt, ch, \
+			       __func__, ##__VA_ARGS__); \
+	} while (0)
+#define GPII_VERB(gpii, ch, fmt, ...) do { \
+	if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \
+		pr_info("%s:%u:%s: " fmt, gpii->label, \
+			ch, __func__, ##__VA_ARGS__); \
+	if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \
+		ipc_log_string(gpii->ilctxt, \
+			       "ch:%u %s: " fmt, ch, \
+			       __func__, ##__VA_ARGS__); \
+	} while (0)
+
 #else
 #define IPC_LOG_PAGES (2)
 #define GPI_DBG_LOG_SIZE (0) /* size must be power of 2 */
 #define DEFAULT_IPC_LOG_LVL (LOG_LVL_ERROR)
 #define CMD_TIMEOUT_MS (250)
+/* verbose and register logging are disabled if !debug */
+#define GPII_REG(gpii, ch, fmt, ...)
+#define GPII_VERB(gpii, ch, fmt, ...)
 #endif
 
 #define GPI_LABEL_SIZE (256)
@@ -468,7 +472,6 @@
 	u32 req_tres; /* # of tre's client requested */
 	u32 dir;
 	struct gpi_ring ch_ring;
-	struct gpi_ring sg_ring; /* points to client scatterlist */
 	struct gpi_client_info client_info;
 };
 
@@ -510,7 +513,6 @@
 struct gpi_desc {
 	struct virt_dma_desc vd;
 	void *wp; /* points to TRE last queued during issue_pending */
-	struct sg_tre *sg_tre; /* points to last scatterlist */
 	void *db; /* DB register to program */
 	struct gpii_chan *gpii_chan;
 };
@@ -936,11 +938,8 @@
 /* process transfer completion interrupt */
 static void gpi_process_ieob(struct gpii *gpii)
 {
-	u32 ieob_irq;
 
-	ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
-	gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
-	GPII_VERB(gpii, GPI_DBG_COMMON, "IEOB_IRQ:0x%x\n", ieob_irq);
+	gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
 
 	/* process events based on priority */
 	if (likely(gpii->ev_priority >= EV_PRIORITY_TASKLET)) {
@@ -1106,6 +1105,14 @@
 			type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB);
 		}
 
+		/* transfer complete interrupt */
+		if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
+			GPII_VERB(gpii, GPI_DBG_COMMON,
+				  "process IEOB interrupts\n");
+			gpi_process_ieob(gpii);
+			type &= ~GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
+		}
+
 		/* event control irq */
 		if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) {
 			u32 ev_state;
@@ -1148,14 +1155,6 @@
 			type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
 		}
 
-		/* transfer complete interrupt */
-		if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
-			GPII_VERB(gpii, GPI_DBG_COMMON,
-				  "process IEOB interrupts\n");
-			gpi_process_ieob(gpii);
-			type &= ~GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
-		}
-
 		if (type) {
 			GPII_CRITIC(gpii, GPI_DBG_COMMON,
 				 "Unhandled interrupt status:0x%x\n", type);
@@ -1176,11 +1175,10 @@
 static void gpi_process_qup_notif_event(struct gpii_chan *gpii_chan,
 					struct qup_notif_event *notif_event)
 {
-	struct gpii *gpii = gpii_chan->gpii;
 	struct gpi_client_info *client_info = &gpii_chan->client_info;
 	struct msm_gpi_cb msm_gpi_cb;
 
-	GPII_VERB(gpii, gpii_chan->chid,
+	GPII_VERB(gpii_chan->gpii, gpii_chan->chid,
 		  "status:0x%x time:0x%x count:0x%x\n",
 		  notif_event->status, notif_event->time, notif_event->count);
 
@@ -1188,7 +1186,7 @@
 	msm_gpi_cb.status = notif_event->status;
 	msm_gpi_cb.timestamp = notif_event->time;
 	msm_gpi_cb.count = notif_event->count;
-	GPII_VERB(gpii, gpii_chan->chid, "sending CB event:%s\n",
+	GPII_VERB(gpii_chan->gpii, gpii_chan->chid, "sending CB event:%s\n",
 		  TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
 	client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
 			      client_info->cb_param);
@@ -1200,11 +1198,8 @@
 {
 	struct gpii *gpii = gpii_chan->gpii;
 	struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
-	struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
 	struct virt_dma_desc *vd;
 	struct gpi_desc *gpi_desc;
-	struct msm_gpi_tre *client_tre;
-	void *sg_tre;
 	void *tre = ch_ring->base +
 		(ch_ring->el_size * imed_event->tre_index);
 	struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
@@ -1262,8 +1257,6 @@
 	list_del(&vd->node);
 	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
 
-	sg_tre = gpi_desc->sg_tre;
-	client_tre = ((struct sg_tre *)sg_tre)->ptr;
 
 	/*
 	 * RP pointed by Event is to last TRE processed,
@@ -1273,38 +1266,26 @@
 	if (tre >= (ch_ring->base + ch_ring->len))
 		tre = ch_ring->base;
 	ch_ring->rp = tre;
-	sg_tre += sg_ring->el_size;
-	if (sg_tre >= (sg_ring->base + sg_ring->len))
-		sg_tre = sg_ring->base;
-	sg_ring->rp = sg_tre;
 
 	/* make sure rp updates are immediately visible to all cores */
 	smp_wmb();
 
-	/* update Immediate data from Event back in to TRE if it's RX channel */
-	if (gpii_chan->dir == GPI_CHTYPE_DIR_IN) {
-		client_tre->dword[0] =
-			((struct msm_gpi_tre *)imed_event)->dword[0];
-		client_tre->dword[1] =
-			((struct msm_gpi_tre *)imed_event)->dword[1];
-		client_tre->dword[2] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD2(
-						      imed_event->length);
-	}
-
 	tx_cb_param = vd->tx.callback_param;
-	if (tx_cb_param) {
+	if (vd->tx.callback && tx_cb_param) {
+		struct msm_gpi_tre *imed_tre = &tx_cb_param->imed_tre;
+
 		GPII_VERB(gpii, gpii_chan->chid,
 			  "cb_length:%u compl_code:0x%x status:0x%x\n",
 			  imed_event->length, imed_event->code,
 			  imed_event->status);
+		/* Update immediate data if any from event */
+		*imed_tre = *((struct msm_gpi_tre *)imed_event);
 		tx_cb_param->length = imed_event->length;
 		tx_cb_param->completion_code = imed_event->code;
 		tx_cb_param->status = imed_event->status;
+		vd->tx.callback(tx_cb_param);
 	}
-
-	spin_lock_irqsave(&gpii_chan->vc.lock, flags);
-	vchan_cookie_complete(vd);
-	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
+	kfree(gpi_desc);
 }
 
 /* processing transfer completion events */
@@ -1313,13 +1294,10 @@
 {
 	struct gpii *gpii = gpii_chan->gpii;
 	struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
-	struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
 	void *ev_rp = to_virtual(ch_ring, compl_event->ptr);
-	struct msm_gpi_tre *client_tre;
 	struct virt_dma_desc *vd;
 	struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
 	struct gpi_desc *gpi_desc;
-	void *sg_tre = NULL;
 	unsigned long flags;
 
 	/* only process events on active channel */
@@ -1366,8 +1344,6 @@
 	list_del(&vd->node);
 	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
 
-	sg_tre = gpi_desc->sg_tre;
-	client_tre = ((struct sg_tre *)sg_tre)->ptr;
 
 	/*
 	 * RP pointed by Event is to last TRE processed,
@@ -1377,16 +1353,12 @@
 	if (ev_rp >= (ch_ring->base + ch_ring->len))
 		ev_rp = ch_ring->base;
 	ch_ring->rp = ev_rp;
-	sg_tre += sg_ring->el_size;
-	if (sg_tre >= (sg_ring->base + sg_ring->len))
-		sg_tre = sg_ring->base;
-	sg_ring->rp = sg_tre;
 
 	/* update must be visible to other cores */
 	smp_wmb();
 
 	tx_cb_param = vd->tx.callback_param;
-	if (tx_cb_param) {
+	if (vd->tx.callback && tx_cb_param) {
 		GPII_VERB(gpii, gpii_chan->chid,
 			  "cb_length:%u compl_code:0x%x status:0x%x\n",
 			  compl_event->length, compl_event->code,
@@ -1394,37 +1366,36 @@
 		tx_cb_param->length = compl_event->length;
 		tx_cb_param->completion_code = compl_event->code;
 		tx_cb_param->status = compl_event->status;
+		vd->tx.callback(tx_cb_param);
 	}
-
-	spin_lock_irqsave(&gpii_chan->vc.lock, flags);
-	vchan_cookie_complete(vd);
-	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
+	kfree(gpi_desc);
 }
 
 /* process all events */
 static void gpi_process_events(struct gpii *gpii)
 {
 	struct gpi_ring *ev_ring = &gpii->ev_ring;
-	u32 cntxt_rp, local_rp;
+	phys_addr_t cntxt_rp, local_rp;
+	void *rp;
 	union gpi_event *gpi_event;
 	struct gpii_chan *gpii_chan;
 	u32 chid, type;
-	u32 ieob_irq;
 
 	cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
-	local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
+	rp = to_virtual(ev_ring, cntxt_rp);
+	local_rp = to_physical(ev_ring, ev_ring->rp);
 
-	GPII_VERB(gpii, GPI_DBG_COMMON, "cntxt_rp: 0x08%x local_rp:0x08%x\n",
-		  cntxt_rp, local_rp);
+	GPII_VERB(gpii, GPI_DBG_COMMON, "cntxt_rp:%pa local_rp:%pa\n",
+		  &cntxt_rp, &local_rp);
 
 	do {
-		while (local_rp != cntxt_rp) {
+		while (rp != ev_ring->rp) {
 			gpi_event = ev_ring->rp;
 			chid = gpi_event->xfer_compl_event.chid;
 			type = gpi_event->xfer_compl_event.type;
 			GPII_VERB(gpii, GPI_DBG_COMMON,
-				  "rp:0x%08x chid:%u type:0x%x %08x %08x %08x %08x\n",
-				  local_rp, chid, type,
+				  "chid:%u type:0x%x %08x %08x %08x %08x\n",
+				  chid, type,
 				  gpi_event->gpi_ere.dword[0],
 				  gpi_event->gpi_ere.dword[1],
 				  gpi_event->gpi_ere.dword[2],
@@ -1456,22 +1427,18 @@
 					  type);
 			}
 			gpi_ring_recycle_ev_element(ev_ring);
-			local_rp = (u32)to_physical(ev_ring,
-						    (void *)ev_ring->rp);
 		}
 		gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
 
 		/* clear pending IEOB events */
-		ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
-		gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
+		gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
 
 		cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
-		local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
+		rp = to_virtual(ev_ring, cntxt_rp);
 
-	} while (cntxt_rp != local_rp);
+	} while (rp != ev_ring->rp);
 
-	GPII_VERB(gpii, GPI_DBG_COMMON, "exit: c_rp:0x%x l_rp:0x%x\n", cntxt_rp,
-		  local_rp);
+	GPII_VERB(gpii, GPI_DBG_COMMON, "exit: c_rp:%pa\n", &cntxt_rp);
 }
 
 /* processing events using tasklet */
@@ -1532,7 +1499,6 @@
 {
 	struct gpii *gpii = gpii_chan->gpii;
 	struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
-	struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
 	unsigned long flags;
 	LIST_HEAD(list);
 	int ret;
@@ -1549,8 +1515,6 @@
 	/* initialize the local ring ptrs */
 	ch_ring->rp = ch_ring->base;
 	ch_ring->wp = ch_ring->base;
-	sg_ring->rp = sg_ring->base;
-	sg_ring->wp = sg_ring->base;
 
 	/* visible to other cores */
 	smp_wmb();
@@ -1840,11 +1804,8 @@
 static void gpi_free_ring(struct gpi_ring *ring,
 			  struct gpii *gpii)
 {
-	if (ring->dma_handle)
-		dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
-				  ring->pre_aligned, ring->dma_handle);
-	else
-		vfree(ring->pre_aligned);
+	dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
+			  ring->pre_aligned, ring->dma_handle);
 	memset(ring, 0, sizeof(*ring));
 }
 
@@ -1852,51 +1813,34 @@
 static int gpi_alloc_ring(struct gpi_ring *ring,
 			  u32 elements,
 			  u32 el_size,
-			  struct gpii *gpii,
-			  bool alloc_coherent)
+			  struct gpii *gpii)
 {
 	u64 len = elements * el_size;
 	int bit;
 
-	if (alloc_coherent) {
-		/* ring len must be power of 2 */
-		bit = find_last_bit((unsigned long *)&len, 32);
-		if (((1 << bit) - 1) & len)
-			bit++;
-		len = 1 << bit;
-		ring->alloc_size = (len + (len - 1));
-		GPII_INFO(gpii, GPI_DBG_COMMON,
-			  "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
-			  elements, el_size, (elements * el_size), len,
-			  ring->alloc_size);
-		ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
-						       ring->alloc_size,
-						       &ring->dma_handle,
-						       GFP_KERNEL);
-		if (!ring->pre_aligned) {
-			GPII_CRITIC(gpii, GPI_DBG_COMMON,
-				    "could not alloc size:%lu mem for ring\n",
-				    ring->alloc_size);
-			return -ENOMEM;
-		}
-
-		/* align the physical mem */
-		ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
-		ring->base = ring->pre_aligned +
-			(ring->phys_addr - ring->dma_handle);
-	} else {
-		ring->pre_aligned = vmalloc(len);
-		if (!ring->pre_aligned) {
-			GPII_CRITIC(gpii, GPI_DBG_COMMON,
-				    "could not allocsize:%llu mem for ring\n",
-				    len);
-			return -ENOMEM;
-		}
-		ring->phys_addr = 0;
-		ring->dma_handle = 0;
-		ring->base = ring->pre_aligned;
+	/* ring len must be power of 2 */
+	bit = find_last_bit((unsigned long *)&len, 32);
+	if (((1 << bit) - 1) & len)
+		bit++;
+	len = 1 << bit;
+	ring->alloc_size = (len + (len - 1));
+	GPII_INFO(gpii, GPI_DBG_COMMON,
+		  "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
+		  elements, el_size, (elements * el_size), len,
+		  ring->alloc_size);
+	ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
+					       ring->alloc_size,
+					       &ring->dma_handle, GFP_KERNEL);
+	if (!ring->pre_aligned) {
+		GPII_CRITIC(gpii, GPI_DBG_COMMON,
+			    "could not alloc size:%lu mem for ring\n",
+			    ring->alloc_size);
+		return -ENOMEM;
 	}
 
+	/* align the physical mem */
+	ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
+	ring->base = ring->pre_aligned + (ring->phys_addr - ring->dma_handle);
 	ring->rp = ring->base;
 	ring->wp = ring->base;
 	ring->len = len;
@@ -1920,8 +1864,7 @@
 static void gpi_queue_xfer(struct gpii *gpii,
 			   struct gpii_chan *gpii_chan,
 			   struct msm_gpi_tre *gpi_tre,
-			   void **wp,
-			   struct sg_tre **sg_tre)
+			   void **wp)
 {
 	struct msm_gpi_tre *ch_tre;
 	int ret;
@@ -1933,18 +1876,9 @@
 			    "Error adding ring element to xfer ring\n");
 		return;
 	}
-	/* get next sg tre location we can use */
-	ret = gpi_ring_add_element(&gpii_chan->sg_ring, (void **)sg_tre);
-	if (unlikely(ret)) {
-		GPII_CRITIC(gpii, gpii_chan->chid,
-			    "Error adding ring element to sg ring\n");
-		return;
-	}
 
 	/* copy the tre info */
 	memcpy(ch_tre, gpi_tre, sizeof(*ch_tre));
-	(*sg_tre)->ptr = gpi_tre;
-	(*sg_tre)->wp = ch_tre;
 	*wp = ch_tre;
 }
 
@@ -2122,14 +2056,12 @@
 {
 	struct gpii_chan *gpii_chan = to_gpii_chan(chan);
 	struct gpii *gpii = gpii_chan->gpii;
-	u32 nr, sg_nr;
+	u32 nr;
 	u32 nr_req = 0;
 	int i, j;
 	struct scatterlist *sg;
 	struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
-	struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
 	void *tre, *wp = NULL;
-	struct sg_tre *sg_tre = NULL;
 	const gfp_t gfp = GFP_ATOMIC;
 	struct gpi_desc *gpi_desc;
 
@@ -2143,20 +2075,17 @@
 
 	/* calculate # of elements required & available */
 	nr = gpi_ring_num_elements_avail(ch_ring);
-	sg_nr = gpi_ring_num_elements_avail(sg_ring);
 	for_each_sg(sgl, sg, sg_len, i) {
 		GPII_VERB(gpii, gpii_chan->chid,
 			  "%d of %u len:%u\n", i, sg_len, sg->length);
 		nr_req += (sg->length / ch_ring->el_size);
 	}
-	GPII_VERB(gpii, gpii_chan->chid,
-		  "nr_elements_avail:%u sg_avail:%u required:%u\n",
-		  nr, sg_nr, nr_req);
+	GPII_VERB(gpii, gpii_chan->chid, "el avail:%u req:%u\n", nr, nr_req);
 
-	if (nr < nr_req || sg_nr < nr_req) {
+	if (nr < nr_req) {
 		GPII_ERR(gpii, gpii_chan->chid,
-			 "not enough space in ring, avail:%u,%u required:%u\n",
-			 nr, sg_nr, nr_req);
+			 "not enough space in ring, avail:%u required:%u\n",
+			 nr, nr_req);
 		return NULL;
 	}
 
@@ -2171,12 +2100,11 @@
 	for_each_sg(sgl, sg, sg_len, i)
 		for (j = 0, tre = sg_virt(sg); j < sg->length;
 		     j += ch_ring->el_size, tre += ch_ring->el_size)
-			gpi_queue_xfer(gpii, gpii_chan, tre, &wp, &sg_tre);
+			gpi_queue_xfer(gpii, gpii_chan, tre, &wp);
 
 	/* set up the descriptor */
 	gpi_desc->db = ch_ring->wp;
 	gpi_desc->wp = wp;
-	gpi_desc->sg_tre = sg_tre;
 	gpi_desc->gpii_chan = gpii_chan;
 	GPII_VERB(gpii, gpii_chan->chid, "exit wp:0x%0llx rp:0x%0llx\n",
 		  to_physical(ch_ring, ch_ring->wp),
@@ -2271,7 +2199,7 @@
 		elements = max(gpii->gpii_chan[0].req_tres,
 			       gpii->gpii_chan[1].req_tres);
 		ret = gpi_alloc_ring(&gpii->ev_ring, elements << ev_factor,
-				     sizeof(union gpi_event), gpii, true);
+				     sizeof(union gpi_event), gpii);
 		if (ret) {
 			GPII_ERR(gpii, gpii_chan->chid,
 				 "error allocating mem for ev ring\n");
@@ -2396,7 +2324,6 @@
 
 	/* free all allocated memory */
 	gpi_free_ring(&gpii_chan->ch_ring, gpii);
-	gpi_free_ring(&gpii_chan->sg_ring, gpii);
 	vchan_free_chan_resources(&gpii_chan->vc);
 
 	write_lock_irq(&gpii->pm_lock);
@@ -2451,26 +2378,15 @@
 
 	/* allocate memory for transfer ring */
 	ret = gpi_alloc_ring(&gpii_chan->ch_ring, gpii_chan->req_tres,
-			     sizeof(struct msm_gpi_tre), gpii, true);
+			     sizeof(struct msm_gpi_tre), gpii);
 	if (ret) {
 		GPII_ERR(gpii, gpii_chan->chid,
 			 "error allocating xfer ring, ret:%d\n", ret);
 		goto xfer_alloc_err;
 	}
-
-	ret = gpi_alloc_ring(&gpii_chan->sg_ring, gpii_chan->ch_ring.elements,
-			     sizeof(struct sg_tre), gpii, false);
-	if (ret) {
-		GPII_ERR(gpii, gpii_chan->chid,
-			 "error allocating sg ring, ret:%d\n", ret);
-		goto sg_alloc_error;
-	}
 	mutex_unlock(&gpii->ctrl_lock);
 
 	return 0;
-
-sg_alloc_error:
-	gpi_free_ring(&gpii_chan->ch_ring, gpii);
 xfer_alloc_err:
 	mutex_unlock(&gpii->ctrl_lock);
 
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 307547f..ae3f60b 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -884,7 +884,7 @@
 	struct virt_dma_desc *vdesc;
 	enum dma_status status;
 	unsigned long flags;
-	u32 residue;
+	u32 residue = 0;
 
 	status = dma_cookie_status(c, cookie, state);
 	if ((status == DMA_COMPLETE) || (!state))
@@ -892,16 +892,12 @@
 
 	spin_lock_irqsave(&chan->vchan.lock, flags);
 	vdesc = vchan_find_desc(&chan->vchan, cookie);
-	if (cookie == chan->desc->vdesc.tx.cookie) {
+	if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
 		residue = stm32_dma_desc_residue(chan, chan->desc,
 						 chan->next_sg);
-	} else if (vdesc) {
+	else if (vdesc)
 		residue = stm32_dma_desc_residue(chan,
 						 to_stm32_dma_desc(vdesc), 0);
-	} else {
-		residue = 0;
-	}
-
 	dma_set_residue(state, residue);
 
 	spin_unlock_irqrestore(&chan->vchan.lock, flags);
@@ -976,21 +972,18 @@
 	struct stm32_dma_chan *chan;
 	struct dma_chan *c;
 
-	if (dma_spec->args_count < 3)
+	if (dma_spec->args_count < 4)
 		return NULL;
 
 	cfg.channel_id = dma_spec->args[0];
 	cfg.request_line = dma_spec->args[1];
 	cfg.stream_config = dma_spec->args[2];
-	cfg.threshold = 0;
+	cfg.threshold = dma_spec->args[3];
 
 	if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
 				STM32_DMA_MAX_REQUEST_ID))
 		return NULL;
 
-	if (dma_spec->args_count > 3)
-		cfg.threshold = dma_spec->args[3];
-
 	chan = &dmadev->chan[cfg.channel_id];
 
 	c = dma_get_slave_channel(&chan->vchan.chan);
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 88a00d0..43e88d8 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -49,12 +49,12 @@
 
 struct ti_am335x_xbar_map {
 	u16 dma_line;
-	u16 mux_val;
+	u8 mux_val;
 };
 
-static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val)
+static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
 {
-	writeb_relaxed(val & 0x1f, iomem + event);
+	writeb_relaxed(val, iomem + event);
 }
 
 static void ti_am335x_xbar_free(struct device *dev, void *route_data)
@@ -105,7 +105,7 @@
 	}
 
 	map->dma_line = (u16)dma_spec->args[0];
-	map->mux_val = (u16)dma_spec->args[2];
+	map->mux_val = (u8)dma_spec->args[2];
 
 	dma_spec->args[2] = 0;
 	dma_spec->args_count = 2;
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index 245d759..6059d81 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -813,6 +813,7 @@
 	INIT_LIST_HEAD(&d->slave.channels);
 	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
 	dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
+	dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
 	dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
 	d->slave.dev = &op->dev;
 	d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 72e07e3..16e0eb5 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -227,7 +227,7 @@
 #define			NREC_RDWR(x)		(((x)>>11) & 1)
 #define			NREC_RANK(x)		(((x)>>8) & 0x7)
 #define		NRECMEMB		0xC0
-#define			NREC_CAS(x)		(((x)>>16) & 0xFFFFFF)
+#define			NREC_CAS(x)		(((x)>>16) & 0xFFF)
 #define			NREC_RAS(x)		((x) & 0x7FFF)
 #define		NRECFGLOG		0xC4
 #define		NREEECFBDA		0xC8
@@ -371,7 +371,7 @@
 	/* These registers are input ONLY if there was a
 	 * Non-Recoverable Error */
 	u16 nrecmema;		/* Non-Recoverable Mem log A */
-	u16 nrecmemb;		/* Non-Recoverable Mem log B */
+	u32 nrecmemb;		/* Non-Recoverable Mem log B */
 
 };
 
@@ -407,7 +407,7 @@
 				NERR_FAT_FBD, &info->nerr_fat_fbd);
 		pci_read_config_word(pvt->branchmap_werrors,
 				NRECMEMA, &info->nrecmema);
-		pci_read_config_word(pvt->branchmap_werrors,
+		pci_read_config_dword(pvt->branchmap_werrors,
 				NRECMEMB, &info->nrecmemb);
 
 		/* Clear the error bits, by writing them back */
@@ -1293,7 +1293,7 @@
 			dimm->mtype = MEM_FB_DDR2;
 
 			/* ask what device type on this row */
-			if (MTR_DRAM_WIDTH(mtr))
+			if (MTR_DRAM_WIDTH(mtr) == 8)
 				dimm->dtype = DEV_X8;
 			else
 				dimm->dtype = DEV_X4;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 6ef6ad1..2ea2f32 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -368,7 +368,7 @@
 
 	/* These registers are input ONLY if there was a Non-Rec Error */
 	u16 nrecmema;		/* Non-Recoverable Mem log A */
-	u16 nrecmemb;		/* Non-Recoverable Mem log B */
+	u32 nrecmemb;		/* Non-Recoverable Mem log B */
 
 };
 
@@ -458,7 +458,7 @@
 				NERR_FAT_FBD, &info->nerr_fat_fbd);
 		pci_read_config_word(pvt->branchmap_werrors,
 				NRECMEMA, &info->nrecmema);
-		pci_read_config_word(pvt->branchmap_werrors,
+		pci_read_config_dword(pvt->branchmap_werrors,
 				NRECMEMB, &info->nrecmemb);
 
 		/* Clear the error bits, by writing them back */
@@ -1207,13 +1207,14 @@
 
 			dimm->nr_pages = size_mb << 8;
 			dimm->grain = 8;
-			dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
+			dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ?
+				      DEV_X8 : DEV_X4;
 			dimm->mtype = MEM_FB_DDR2;
 			/*
 			 * The eccc mechanism is SDDC (aka SECC), with
 			 * is similar to Chipkill.
 			 */
-			dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
+			dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ?
 					  EDAC_S8ECD8ED : EDAC_S4ECD4ED;
 			ndimms++;
 		}
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 5477522..3c47e63 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -2510,6 +2510,7 @@
 			break;
 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
 			pvt->pci_ta = pdev;
+			break;
 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
 			pvt->pci_ras = pdev;
 			break;
diff --git a/drivers/esoc/Kconfig b/drivers/esoc/Kconfig
index 3c65f69..4613150 100644
--- a/drivers/esoc/Kconfig
+++ b/drivers/esoc/Kconfig
@@ -38,7 +38,7 @@
 	  allow logging of different esoc driver traces.
 
 config ESOC_MDM_4x
-	bool "Add support for external mdm9x25/mdm9x35/mdm9x55"
+	bool "Add support for external modem"
 	help
 	  In some Qualcomm Technologies, Inc. boards, an external modem such as
 	  mdm9x25 or mdm9x35 is connected to a primary msm. The primary soc can
@@ -49,7 +49,7 @@
 	tristate "Command engine for 4x series external modems"
 	help
 	  Provides a command engine to control the behavior of an external modem
-	  such as mdm9x25/mdm9x35/mdm9x55/QSC. Allows the primary soc to put the
+	  such as mdm9x25/mdm9x35/mdm9x55/sdxpoorwills/QSC. Allows the primary soc to put the
 	  external modem in a specific mode. Also listens for events on the
 	  external modem.
 
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index 677e21d..bbec9d3 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -794,6 +794,28 @@
 	mdm->gpio_state_running = NULL;
 	return retval;
 }
+
+static void mdm_release_ipc_gpio(struct mdm_ctrl *mdm)
+{
+	int i;
+
+	if (!mdm)
+		return;
+
+	for (i = 0; i < NUM_GPIOS; ++i)
+		if (gpio_is_valid(MDM_GPIO(mdm, i)))
+			gpio_free(MDM_GPIO(mdm, i));
+}
+
+static void mdm_free_irq(struct mdm_ctrl *mdm)
+{
+	if (!mdm)
+		return;
+
+	free_irq(mdm->errfatal_irq, mdm);
+	free_irq(mdm->status_irq, mdm);
+}
+
 static int mdm9x25_setup_hw(struct mdm_ctrl *mdm,
 					const struct mdm_ops *ops,
 					struct platform_device *pdev)
@@ -1028,6 +1050,108 @@
 	return 0;
 }
 
+static int sdxpoorwills_setup_hw(struct mdm_ctrl *mdm,
+				const struct mdm_ops *ops,
+				struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *node;
+	struct esoc_clink *esoc;
+	const struct esoc_clink_ops *clink_ops = ops->clink_ops;
+	const struct mdm_pon_ops *pon_ops = ops->pon_ops;
+
+	mdm->dev = &pdev->dev;
+	mdm->pon_ops = pon_ops;
+	node = pdev->dev.of_node;
+
+	esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(esoc)) {
+		dev_err(mdm->dev, "cannot allocate esoc device\n");
+		return PTR_ERR(esoc);
+	}
+
+	esoc->pdev = pdev;
+
+	mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
+	if (!mdm->mdm_queue) {
+		dev_err(mdm->dev, "could not create mdm_queue\n");
+		return -ENOMEM;
+	}
+
+	mdm->irq_mask = 0;
+	mdm->ready = false;
+
+	ret = mdm_dt_parse_gpios(mdm);
+	if (ret) {
+		dev_err(mdm->dev, "Failed to parse DT gpios\n");
+		goto err_destroy_wrkq;
+	}
+
+	ret = mdm_pon_dt_init(mdm);
+	if (ret) {
+		dev_err(mdm->dev, "Failed to parse PON DT gpio\n");
+		goto err_destroy_wrkq;
+	}
+
+	ret = mdm_pinctrl_init(mdm);
+	if (ret) {
+		dev_err(mdm->dev, "Failed to init pinctrl\n");
+		goto err_destroy_wrkq;
+	}
+
+	ret = mdm_pon_setup(mdm);
+	if (ret) {
+		dev_err(mdm->dev, "Failed to setup PON\n");
+		goto err_destroy_wrkq;
+	}
+
+	ret = mdm_configure_ipc(mdm, pdev);
+	if (ret) {
+		dev_err(mdm->dev, "Failed to configure the ipc\n");
+		goto err_release_ipc;
+	}
+
+	esoc->name = SDXPOORWILLS_LABEL;
+	esoc->link_name = SDXPOORWILLS_PCIE;
+
+	ret = of_property_read_string(node, "qcom,mdm-link-info",
+					&esoc->link_info);
+	if (ret)
+		dev_info(mdm->dev, "esoc link info missing\n");
+
+	esoc->clink_ops = clink_ops;
+	esoc->parent = mdm->dev;
+	esoc->owner = THIS_MODULE;
+	esoc->np = pdev->dev.of_node;
+	set_esoc_clink_data(esoc, mdm);
+
+	ret = esoc_clink_register(esoc);
+	if (ret) {
+		dev_err(mdm->dev, "esoc registration failed\n");
+		goto err_free_irq;
+	}
+	dev_dbg(mdm->dev, "esoc registration done\n");
+
+	init_completion(&mdm->debug_done);
+	INIT_WORK(&mdm->mdm_status_work, mdm_status_fn);
+	INIT_WORK(&mdm->restart_reason_work, mdm_get_restart_reason);
+	INIT_DELAYED_WORK(&mdm->mdm2ap_status_check_work, mdm2ap_status_check);
+	mdm->get_restart_reason = false;
+	mdm->debug_fail = false;
+	mdm->esoc = esoc;
+	mdm->init = 0;
+
+	return 0;
+
+err_free_irq:
+	mdm_free_irq(mdm);
+err_release_ipc:
+	mdm_release_ipc_gpio(mdm);
+err_destroy_wrkq:
+	destroy_workqueue(mdm->mdm_queue);
+	return ret;
+}
+
 static struct esoc_clink_ops mdm_cops = {
 	.cmd_exe = mdm_cmd_exe,
 	.get_status = mdm_get_status,
@@ -1053,6 +1177,12 @@
 	.pon_ops = &mdm9x55_pon_ops,
 };
 
+static struct mdm_ops sdxpoorwills_ops = {
+	.clink_ops = &mdm_cops,
+	.config_hw = sdxpoorwills_setup_hw,
+	.pon_ops = &sdxpoorwills_pon_ops,
+};
+
 static const struct of_device_id mdm_dt_match[] = {
 	{ .compatible = "qcom,ext-mdm9x25",
 		.data = &mdm9x25_ops, },
@@ -1060,6 +1190,8 @@
 		.data = &mdm9x35_ops, },
 	{ .compatible = "qcom,ext-mdm9x55",
 		.data = &mdm9x55_ops, },
+	{ .compatible = "qcom,ext-sdxpoorwills",
+		.data = &sdxpoorwills_ops, },
 	{},
 };
 MODULE_DEVICE_TABLE(of, mdm_dt_match);
diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c
index 77ae84b..4291bbc 100644
--- a/drivers/esoc/esoc-mdm-drv.c
+++ b/drivers/esoc/esoc-mdm-drv.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -309,6 +309,10 @@
 		.name = "MDM9x55",
 		.data = NULL,
 	},
+	{
+		.name = "SDXPOORWILLS",
+		.data = NULL,
+	},
 };
 
 static struct esoc_drv esoc_ssr_drv = {
diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c
index 0e85776..9624275 100644
--- a/drivers/esoc/esoc-mdm-pon.c
+++ b/drivers/esoc/esoc-mdm-pon.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -60,6 +60,24 @@
 	return 0;
 }
 
+/* This function can be called from atomic context. */
+static int sdxpoorwills_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
+{
+	int soft_reset_direction_assert = mdm->soft_reset_inverted;
+
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			soft_reset_direction_assert);
+	/*
+	 * Allow PS hold assert to be detected
+	 */
+	if (!atomic)
+		usleep_range(80000, 180000);
+	else
+		mdelay(100);
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			!soft_reset_direction_assert);
+	return 0;
+}
 
 static int mdm4x_do_first_power_on(struct mdm_ctrl *mdm)
 {
@@ -99,6 +117,7 @@
 {
 	struct device *dev = mdm->dev;
 	int soft_reset_direction = mdm->soft_reset_inverted ? 1 : 0;
+
 	/* Assert the soft reset line whether mdm2ap_status went low or not */
 	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
 					soft_reset_direction);
@@ -135,6 +154,27 @@
 	return 0;
 }
 
+static int sdxpoorwills_power_down(struct mdm_ctrl *mdm)
+{
+	struct device *dev = mdm->dev;
+	int soft_reset_direction = mdm->soft_reset_inverted ? 1 : 0;
+
+	/* Assert the soft reset line whether mdm2ap_status went low or not */
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+					soft_reset_direction);
+	dev_info(dev, "Doing a hard reset\n");
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+						soft_reset_direction);
+	/*
+	 * Currently, there is a debounce timer on the charm PMIC. It is
+	 * necessary to hold the PMIC RESET low for 325ms
+	 * for the reset to fully take place. Sleep here to ensure the
+	 * reset has occurred before the function exits.
+	 */
+	mdelay(325);
+	return 0;
+}
+
 static void mdm4x_cold_reset(struct mdm_ctrl *mdm)
 {
 	if (!gpio_is_valid(MDM_GPIO(mdm, AP2MDM_SOFT_RESET)))
@@ -158,6 +198,16 @@
 			!mdm->soft_reset_inverted);
 }
 
+static void sdxpoorwills_cold_reset(struct mdm_ctrl *mdm)
+{
+	dev_info(mdm->dev, "Triggering mdm cold reset");
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			!!mdm->soft_reset_inverted);
+	mdelay(600);
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			!mdm->soft_reset_inverted);
+}
+
 static int mdm4x_pon_dt_init(struct mdm_ctrl *mdm)
 {
 	int val;
@@ -215,3 +265,12 @@
 	.dt_init = mdm4x_pon_dt_init,
 	.setup = mdm4x_pon_setup,
 };
+
+struct mdm_pon_ops sdxpoorwills_pon_ops = {
+	.pon = mdm4x_do_first_power_on,
+	.soft_reset = sdxpoorwills_toggle_soft_reset,
+	.poff_force = sdxpoorwills_power_down,
+	.cold_reset = sdxpoorwills_cold_reset,
+	.dt_init = mdm4x_pon_dt_init,
+	.setup = mdm4x_pon_setup,
+};
diff --git a/drivers/esoc/esoc-mdm.h b/drivers/esoc/esoc-mdm.h
index 621d913..baf4e0b 100644
--- a/drivers/esoc/esoc-mdm.h
+++ b/drivers/esoc/esoc-mdm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,8 @@
 #define MDM9x35_HSIC			"HSIC"
 #define MDM9x55_LABEL			"MDM9x55"
 #define MDM9x55_PCIE			"PCIe"
+#define SDXPOORWILLS_LABEL		"SDXPOORWILLS"
+#define SDXPOORWILLS_PCIE		"PCIe"
 #define MDM2AP_STATUS_TIMEOUT_MS	120000L
 #define MDM_MODEM_TIMEOUT		3000
 #define DEF_RAMDUMP_TIMEOUT		120000
@@ -150,4 +152,5 @@
 extern struct mdm_pon_ops mdm9x25_pon_ops;
 extern struct mdm_pon_ops mdm9x35_pon_ops;
 extern struct mdm_pon_ops mdm9x55_pon_ops;
+extern struct mdm_pon_ops sdxpoorwills_pon_ops;
 #endif
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index a4944e2..2f47c5b 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -120,8 +120,7 @@
 	return str - buf;
 }
 
-static struct kobj_attribute efi_attr_systab =
-			__ATTR(systab, 0400, systab_show, NULL);
+static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
 
 #define EFI_FIELD(var) efi.var
 
@@ -385,7 +384,6 @@
 			return 0;
 		}
 	}
-	pr_err_once("requested map not found.\n");
 	return -ENOENT;
 }
 
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 1491407..311c9d0 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -106,7 +106,7 @@
 };
 
 /* Generic ESRT Entry ("ESRE") support. */
-static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
+static ssize_t fw_class_show(struct esre_entry *entry, char *buf)
 {
 	char *str = buf;
 
@@ -117,18 +117,16 @@
 	return str - buf;
 }
 
-static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400,
-	esre_fw_class_show, NULL);
+static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400);
 
 #define esre_attr_decl(name, size, fmt) \
-static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \
+static ssize_t name##_show(struct esre_entry *entry, char *buf) \
 { \
 	return sprintf(buf, fmt "\n", \
 		       le##size##_to_cpu(entry->esre.esre1->name)); \
 } \
 \
-static struct esre_attribute esre_##name = __ATTR(name, 0400, \
-	esre_##name##_show, NULL)
+static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400)
 
 esre_attr_decl(fw_type, 32, "%u");
 esre_attr_decl(fw_version, 32, "%u");
@@ -193,14 +191,13 @@
 
 /* support for displaying ESRT fields at the top level */
 #define esrt_attr_decl(name, size, fmt) \
-static ssize_t esrt_##name##_show(struct kobject *kobj, \
+static ssize_t name##_show(struct kobject *kobj, \
 				  struct kobj_attribute *attr, char *buf)\
 { \
 	return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \
 } \
 \
-static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \
-	esrt_##name##_show, NULL)
+static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400)
 
 esrt_attr_decl(fw_resource_count, 32, "%u");
 esrt_attr_decl(fw_resource_count_max, 32, "%u");
@@ -254,7 +251,7 @@
 
 	rc = efi_mem_desc_lookup(efi.esrt, &md);
 	if (rc < 0) {
-		pr_err("ESRT header is not in the memory map.\n");
+		pr_warn("ESRT header is not in the memory map.\n");
 		return;
 	}
 
@@ -431,7 +428,7 @@
 err_remove_esrt:
 	kobject_put(esrt_kobj);
 err:
-	kfree(esrt);
+	memunmap(esrt);
 	esrt = NULL;
 	return error;
 }
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
index 8e64b77..f377609 100644
--- a/drivers/firmware/efi/runtime-map.c
+++ b/drivers/firmware/efi/runtime-map.c
@@ -63,11 +63,11 @@
 	return map_attr->show(entry, buf);
 }
 
-static struct map_attribute map_type_attr = __ATTR_RO(type);
-static struct map_attribute map_phys_addr_attr   = __ATTR_RO(phys_addr);
-static struct map_attribute map_virt_addr_attr  = __ATTR_RO(virt_addr);
-static struct map_attribute map_num_pages_attr  = __ATTR_RO(num_pages);
-static struct map_attribute map_attribute_attr  = __ATTR_RO(attribute);
+static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400);
+static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400);
+static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400);
+static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400);
+static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400);
 
 /*
  * These are default attributes that are added for every memmap entry.
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 8fe8805..3d50bae 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -249,8 +249,9 @@
 }
 
 #ifdef CONFIG_CPU_IDLE
-static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+static __maybe_unused DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
 
+#ifdef CONFIG_DT_IDLE_STATES
 static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
 {
 	int i, ret, count = 0;
@@ -303,6 +304,10 @@
 	kfree(psci_states);
 	return ret;
 }
+#else
+static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
+{ return 0; }
+#endif
 
 #ifdef CONFIG_ACPI
 #include <acpi/processor.h>
@@ -493,6 +498,8 @@
 static void __init psci_0_2_set_functions(void)
 {
 	pr_info("Using standard PSCI v0.2 function IDs\n");
+	psci_ops.get_version = psci_get_version;
+
 	psci_function_id[PSCI_FN_CPU_SUSPEND] =
 					PSCI_FN_NATIVE(0_2, CPU_SUSPEND);
 	psci_ops.cpu_suspend = psci_cpu_suspend;
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 5bddbd5..3fe6a21 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -90,21 +90,18 @@
 
 	altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
 
-	if (type == IRQ_TYPE_NONE)
+	if (type == IRQ_TYPE_NONE) {
+		irq_set_handler_locked(d, handle_bad_irq);
 		return 0;
-	if (type == IRQ_TYPE_LEVEL_HIGH &&
-		altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH)
+	}
+	if (type == altera_gc->interrupt_trigger) {
+		if (type == IRQ_TYPE_LEVEL_HIGH)
+			irq_set_handler_locked(d, handle_level_irq);
+		else
+			irq_set_handler_locked(d, handle_simple_irq);
 		return 0;
-	if (type == IRQ_TYPE_EDGE_RISING &&
-		altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING)
-		return 0;
-	if (type == IRQ_TYPE_EDGE_FALLING &&
-		altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING)
-		return 0;
-	if (type == IRQ_TYPE_EDGE_BOTH &&
-		altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH)
-		return 0;
-
+	}
+	irq_set_handler_locked(d, handle_bad_irq);
 	return -EINVAL;
 }
 
@@ -230,7 +227,6 @@
 	chained_irq_exit(chip, desc);
 }
 
-
 static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
 {
 	struct altera_gpio_chip *altera_gc;
@@ -310,7 +306,7 @@
 	altera_gc->interrupt_trigger = reg;
 
 	ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0,
-		handle_simple_irq, IRQ_TYPE_NONE);
+		handle_bad_irq, IRQ_TYPE_NONE);
 
 	if (ret) {
 		dev_err(&pdev->dev, "could not add irqchip\n");
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 1ef85b0..d27e936 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -126,7 +126,7 @@
 	int i;
 	int base;
 	int ngpio;
-	char chip_name[sizeof(GPIO_NAME) + 3];
+	char *chip_name;
 
 	if (gpio_mockup_params_nr < 2)
 		return -EINVAL;
@@ -146,8 +146,12 @@
 			ngpio = gpio_mockup_ranges[i * 2 + 1] - base;
 
 		if (ngpio >= 0) {
-			sprintf(chip_name, "%s-%c", GPIO_NAME,
-				pins_name_start + i);
+			chip_name = devm_kasprintf(dev, GFP_KERNEL,
+						   "%s-%c", GPIO_NAME,
+						   pins_name_start + i);
+			if (!chip_name)
+				return -ENOMEM;
+
 			ret = mockup_gpio_add(dev, &cntr[i],
 					      chip_name, base, ngpio);
 		} else {
diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
index 8363cb5..8a08e81 100644
--- a/drivers/gpu/drm/amd/acp/Makefile
+++ b/drivers/gpu/drm/amd/acp/Makefile
@@ -3,6 +3,4 @@
 # of AMDSOC/AMDGPU drm driver.
 # It provides the HW control for ACP related functionalities.
 
-subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
-
 AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index f8fdbd1..26afdff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1788,34 +1788,32 @@
 		WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
 }
 
-/* Atom needs data in little endian format
- * so swap as appropriate when copying data to
- * or from atom. Note that atom operates on
- * dw units.
+/* Atom needs data in little endian format so swap as appropriate when copying
+ * data to or from atom. Note that atom operates on dw units.
+ *
+ * Use to_le=true when sending data to atom and provide at least
+ * ALIGN(num_bytes,4) bytes in the dst buffer.
+ *
+ * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
+ * byes in the src buffer.
  */
 void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
 {
 #ifdef __BIG_ENDIAN
-	u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
-	u32 *dst32, *src32;
+	u32 src_tmp[5], dst_tmp[5];
 	int i;
+	u8 align_num_bytes = ALIGN(num_bytes, 4);
 
-	memcpy(src_tmp, src, num_bytes);
-	src32 = (u32 *)src_tmp;
-	dst32 = (u32 *)dst_tmp;
 	if (to_le) {
-		for (i = 0; i < ((num_bytes + 3) / 4); i++)
-			dst32[i] = cpu_to_le32(src32[i]);
-		memcpy(dst, dst_tmp, num_bytes);
+		memcpy(src_tmp, src, num_bytes);
+		for (i = 0; i < align_num_bytes / 4; i++)
+			dst_tmp[i] = cpu_to_le32(src_tmp[i]);
+		memcpy(dst, dst_tmp, align_num_bytes);
 	} else {
-		u8 dws = num_bytes & ~3;
-		for (i = 0; i < ((num_bytes + 3) / 4); i++)
-			dst32[i] = le32_to_cpu(src32[i]);
-		memcpy(dst, dst_tmp, dws);
-		if (num_bytes % 4) {
-			for (i = 0; i < (num_bytes % 4); i++)
-				dst[dws+i] = dst_tmp[dws+i];
-		}
+		memcpy(src_tmp, src, align_num_bytes);
+		for (i = 0; i < align_num_bytes / 4; i++)
+			dst_tmp[i] = le32_to_cpu(src_tmp[i]);
+		memcpy(dst, dst_tmp, num_bytes);
 	}
 #else
 	memcpy(dst, src, num_bytes);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index bfb4b91..f26d1fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -240,6 +240,8 @@
 	for (; i >= 0; i--)
 		drm_free_large(p->chunks[i].kdata);
 	kfree(p->chunks);
+	p->chunks = NULL;
+	p->nchunks = 0;
 put_ctx:
 	amdgpu_ctx_put(p->ctx);
 free_chunk:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e41d4ba..ce9797b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2020,8 +2020,11 @@
 	}
 
 	r = amdgpu_late_init(adev);
-	if (r)
+	if (r) {
+		if (fbcon)
+			console_unlock();
 		return r;
+	}
 
 	/* pin cursors */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 743a12d..3bb2b9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -648,7 +648,7 @@
 	uint32_t allocated = 0;
 	uint32_t tmp, handle = 0;
 	uint32_t *size = &tmp;
-	int i, r, idx = 0;
+	int i, r = 0, idx = 0;
 
 	r = amdgpu_cs_sysvm_access_required(p);
 	if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 968c426..4750375 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -744,7 +744,7 @@
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                                    struct amdgpu_vm *vm)
 {
-	int r;
+	int r = 0;
 
 	r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
 	if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index c2bd9f0..6d75fd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -565,11 +565,8 @@
 
 static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
 {
-	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-
-	kfree(amdgpu_encoder->enc_priv);
 	drm_encoder_cleanup(encoder);
-	kfree(amdgpu_encoder);
+	kfree(encoder);
 }
 
 static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 50f0cf2..7522f79 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -182,7 +182,7 @@
 		WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
 
 		data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
-		data &= ~0xffc00000;
+		data &= ~0x3ff;
 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
 
 		data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 4477c55..a8b59b3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -850,9 +850,9 @@
 		const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
 {
 	hwmgr->platform_descriptor.overdriveLimit.engineClock =
-		le16_to_cpu(powerplay_table->ulMaxODEngineClock);
+		le32_to_cpu(powerplay_table->ulMaxODEngineClock);
 	hwmgr->platform_descriptor.overdriveLimit.memoryClock =
-		le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
+		le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
 
 	hwmgr->platform_descriptor.minOverdriveVDDC = 0;
 	hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index ee07bb4..11f54df 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -348,14 +348,12 @@
 
 	BUG_ON(!hole_node->hole_follows || node->allocated);
 
-	if (adj_start < start)
-		adj_start = start;
-	if (adj_end > end)
-		adj_end = end;
-
 	if (mm->color_adjust)
 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
+	adj_start = max(adj_start, start);
+	adj_end = min(adj_end, end);
+
 	if (flags & DRM_MM_CREATE_TOP)
 		adj_start = adj_end - size;
 
@@ -566,17 +564,15 @@
 			       flags & DRM_MM_SEARCH_BELOW) {
 		u64 hole_size = adj_end - adj_start;
 
-		if (adj_start < start)
-			adj_start = start;
-		if (adj_end > end)
-			adj_end = end;
-
 		if (mm->color_adjust) {
 			mm->color_adjust(entry, color, &adj_start, &adj_end);
 			if (adj_end <= adj_start)
 				continue;
 		}
 
+		adj_start = max(adj_start, start);
+		adj_end = min(adj_end, end);
+
 		if (!check_free_hole(adj_start, adj_end, size, alignment))
 			continue;
 
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 6ca1f31..6dd09c3 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -46,7 +46,8 @@
 	BIT_CLKS_ENABLED,
 	BIT_IRQS_ENABLED,
 	BIT_WIN_UPDATED,
-	BIT_SUSPENDED
+	BIT_SUSPENDED,
+	BIT_REQUEST_UPDATE
 };
 
 struct decon_context {
@@ -315,6 +316,7 @@
 
 	/* window enable */
 	decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
+	set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
 }
 
 static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -327,6 +329,7 @@
 		return;
 
 	decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
+	set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
 }
 
 static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -340,8 +343,8 @@
 	for (i = ctx->first_win; i < WINDOWS_NR; i++)
 		decon_shadow_protect_win(ctx, i, false);
 
-	/* standalone update */
-	decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+	if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
+		decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
 
 	if (ctx->out_type & IFTYPE_I80)
 		set_bit(BIT_WIN_UPDATED, &ctx->flags);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index f2ae72b..2abc47b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -246,6 +246,15 @@
 	if (IS_ERR(exynos_gem))
 		return exynos_gem;
 
+	if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
+		/*
+		 * when no IOMMU is available, all allocated buffers are
+		 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
+		 */
+		flags &= ~EXYNOS_BO_NONCONTIG;
+		DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
+	}
+
 	/* set memory type and cache attribute from user side. */
 	exynos_gem->flags = flags;
 
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index cc2fde2..c9eef0f 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -243,7 +243,6 @@
 		return PTR_ERR(fsl_dev->state);
 	}
 
-	clk_disable_unprepare(fsl_dev->pix_clk);
 	clk_disable_unprepare(fsl_dev->clk);
 
 	return 0;
@@ -266,6 +265,7 @@
 	if (fsl_dev->tcon)
 		fsl_tcon_bypass_enable(fsl_dev->tcon);
 	fsl_dcu_drm_init_planes(fsl_dev->drm);
+	enable_irq(fsl_dev->irq);
 	drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
 
 	console_lock();
@@ -273,7 +273,6 @@
 	console_unlock();
 
 	drm_kms_helper_poll_enable(fsl_dev->drm);
-	enable_irq(fsl_dev->irq);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 7e7a4d4..0f563c9 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -521,9 +521,12 @@
 {
 	struct ade_crtc *acrtc = to_ade_crtc(crtc);
 	struct ade_hw_ctx *ctx = acrtc->ctx;
+	struct drm_display_mode *mode = &crtc->state->mode;
+	struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
 
 	if (!ctx->power_on)
 		(void)ade_power_up(ctx);
+	ade_ldi_set_mode(acrtc, mode, adj_mode);
 }
 
 static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3ce9ba3..a19ec06 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -457,6 +457,7 @@
 
 struct intel_pipe_wm {
 	struct intel_wm_level wm[5];
+	struct intel_wm_level raw_wm[5];
 	uint32_t linetime;
 	bool fbc_wm_enabled;
 	bool pipe_enabled;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 79aab9a..6769aa1 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -430,7 +430,9 @@
 gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
 {
 	return (i + 1 < num &&
-		!(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+		msgs[i].addr == msgs[i + 1].addr &&
+		!(msgs[i].flags & I2C_M_RD) &&
+		(msgs[i].len == 1 || msgs[i].len == 2) &&
 		(msgs[i + 1].flags & I2C_M_RD));
 }
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 277a802..49de476 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -27,7 +27,6 @@
 
 #include <linux/cpufreq.h>
 #include <drm/drm_plane_helper.h>
-#include <drm/drm_atomic_helper.h>
 #include "i915_drv.h"
 #include "intel_drv.h"
 #include "../../../platform/x86/intel_ips.h"
@@ -2018,9 +2017,9 @@
 				 const struct intel_crtc *intel_crtc,
 				 int level,
 				 struct intel_crtc_state *cstate,
-				 const struct intel_plane_state *pristate,
-				 const struct intel_plane_state *sprstate,
-				 const struct intel_plane_state *curstate,
+				 struct intel_plane_state *pristate,
+				 struct intel_plane_state *sprstate,
+				 struct intel_plane_state *curstate,
 				 struct intel_wm_level *result)
 {
 	uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2342,24 +2341,28 @@
 	struct intel_pipe_wm *pipe_wm;
 	struct drm_device *dev = state->dev;
 	const struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_plane *plane;
-	const struct drm_plane_state *plane_state;
-	const struct intel_plane_state *pristate = NULL;
-	const struct intel_plane_state *sprstate = NULL;
-	const struct intel_plane_state *curstate = NULL;
+	struct intel_plane *intel_plane;
+	struct intel_plane_state *pristate = NULL;
+	struct intel_plane_state *sprstate = NULL;
+	struct intel_plane_state *curstate = NULL;
 	int level, max_level = ilk_wm_max_level(dev), usable_level;
 	struct ilk_wm_maximums max;
 
 	pipe_wm = &cstate->wm.ilk.optimal;
 
-	drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
-		const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
+	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+		struct intel_plane_state *ps;
 
-		if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+		ps = intel_atomic_get_existing_plane_state(state,
+							   intel_plane);
+		if (!ps)
+			continue;
+
+		if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
 			pristate = ps;
-		else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+		else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
 			sprstate = ps;
-		else if (plane->type == DRM_PLANE_TYPE_CURSOR)
+		else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
 			curstate = ps;
 	}
 
@@ -2381,9 +2384,11 @@
 	if (pipe_wm->sprites_scaled)
 		usable_level = 0;
 
-	memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
 	ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
-			     pristate, sprstate, curstate, &pipe_wm->wm[0]);
+			     pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
+
+	memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
+	pipe_wm->wm[0] = pipe_wm->raw_wm[0];
 
 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 		pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
@@ -2393,8 +2398,8 @@
 
 	ilk_compute_wm_reg_maximums(dev, 1, &max);
 
-	for (level = 1; level <= usable_level; level++) {
-		struct intel_wm_level *wm = &pipe_wm->wm[level];
+	for (level = 1; level <= max_level; level++) {
+		struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
 
 		ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
 				     pristate, sprstate, curstate, wm);
@@ -2404,10 +2409,13 @@
 		 * register maximums since such watermarks are
 		 * always invalid.
 		 */
-		if (!ilk_validate_wm_level(level, &max, wm)) {
-			memset(wm, 0, sizeof(*wm));
-			break;
-		}
+		if (level > usable_level)
+			continue;
+
+		if (ilk_validate_wm_level(level, &max, wm))
+			pipe_wm->wm[level] = *wm;
+		else
+			usable_level = level;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index cf83f65..48dfc16 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -321,7 +321,8 @@
 {
 	struct mtk_drm_private *private = dev_get_drvdata(dev);
 
-	drm_put_dev(private->drm);
+	drm_dev_unregister(private->drm);
+	drm_dev_unref(private->drm);
 	private->drm = NULL;
 }
 
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 79f2ec9..0a3fb24 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -29,22 +29,25 @@
 	struct dp_aux dp_aux;
 	struct dp_catalog_aux *catalog;
 	struct dp_aux_cfg *cfg;
-
 	struct mutex mutex;
 	struct completion comp;
+	struct drm_dp_aux drm_aux;
 
-	u32 aux_error_num;
-	u32 retry_cnt;
 	bool cmd_busy;
 	bool native;
 	bool read;
 	bool no_send_addr;
 	bool no_send_stop;
+
 	u32 offset;
 	u32 segment;
+	u32 aux_error_num;
+	u32 retry_cnt;
+
 	atomic_t aborted;
 
-	struct drm_dp_aux drm_aux;
+	u8 *dpcd;
+	u8 *edid;
 };
 
 static char *dp_aux_get_error(u32 aux_error)
@@ -320,6 +323,7 @@
  *
  * @aux: DP AUX private structure
  * @input_msg: input message from DRM upstream APIs
+ * @send_seg: send the seg to sink
  *
  * return: void
  *
@@ -327,7 +331,7 @@
  * sinks that do not handle the i2c middle-of-transaction flag correctly.
  */
 static void dp_aux_transfer_helper(struct dp_aux_private *aux,
-		struct drm_dp_aux_msg *input_msg)
+		struct drm_dp_aux_msg *input_msg, bool send_seg)
 {
 	struct drm_dp_aux_msg helper_msg;
 	u32 const message_size = 0x10;
@@ -346,7 +350,7 @@
 	 * duplicate AUX transactions related to this while reading the
 	 * first 16 bytes of each block.
 	 */
-	if (!(aux->offset % edid_block_length))
+	if (!(aux->offset % edid_block_length) || !send_seg)
 		goto end;
 
 	aux->read = false;
@@ -388,26 +392,16 @@
 		aux->segment = 0x0; /* reset segment at end of block */
 }
 
-/*
- * This function does the real job to process an AUX transaction.
- * It will call aux_reset() function to reset the AUX channel,
- * if the waiting is timeout.
- */
-static ssize_t dp_aux_transfer(struct drm_dp_aux *drm_aux,
-		struct drm_dp_aux_msg *msg)
+static int dp_aux_transfer_ready(struct dp_aux_private *aux,
+		struct drm_dp_aux_msg *msg, bool send_seg)
 {
-	ssize_t ret;
+	int ret = 0;
 	int const aux_cmd_native_max = 16;
 	int const aux_cmd_i2c_max = 128;
-	int const retry_count = 5;
-	struct dp_aux_private *aux = container_of(drm_aux,
-		struct dp_aux_private, drm_aux);
-
-	mutex_lock(&aux->mutex);
 
 	if (atomic_read(&aux->aborted)) {
 		ret = -ETIMEDOUT;
-		goto unlock_exit;
+		goto error;
 	}
 
 	aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
@@ -416,8 +410,7 @@
 	if ((msg->size == 0) || (msg->buffer == NULL)) {
 		msg->reply = aux->native ?
 			DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
-		ret = msg->size;
-		goto unlock_exit;
+		goto error;
 	}
 
 	/* msg sanity check */
@@ -426,14 +419,14 @@
 		pr_err("%s: invalid msg: size(%zu), request(%x)\n",
 			__func__, msg->size, msg->request);
 		ret = -EINVAL;
-		goto unlock_exit;
+		goto error;
 	}
 
 	dp_aux_update_offset_and_segment(aux, msg);
-	dp_aux_transfer_helper(aux, msg);
+
+	dp_aux_transfer_helper(aux, msg, send_seg);
 
 	aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
-	aux->cmd_busy = true;
 
 	if (aux->read) {
 		aux->no_send_addr = true;
@@ -443,6 +436,98 @@
 		aux->no_send_stop = true;
 	}
 
+	aux->cmd_busy = true;
+error:
+	return ret;
+}
+
+static ssize_t dp_aux_transfer_debug(struct drm_dp_aux *drm_aux,
+		struct drm_dp_aux_msg *msg)
+{
+	u8 buf[SZ_64];
+	u32 timeout;
+	ssize_t ret;
+	struct dp_aux_private *aux = container_of(drm_aux,
+		struct dp_aux_private, drm_aux);
+
+	ret = dp_aux_transfer_ready(aux, msg, false);
+	if (ret)
+		goto end;
+
+	aux->aux_error_num = DP_AUX_ERR_NONE;
+
+	if (aux->native) {
+		if (aux->read && ((msg->address + msg->size) < SZ_1K)) {
+			aux->dp_aux.reg = msg->address;
+
+			reinit_completion(&aux->comp);
+			timeout = wait_for_completion_timeout(&aux->comp, HZ);
+			if (!timeout)
+				pr_err("aux timeout for 0x%x\n", msg->address);
+
+			aux->dp_aux.reg = 0xFFFF;
+
+			memcpy(msg->buffer, aux->dpcd + msg->address,
+				msg->size);
+			aux->aux_error_num = DP_AUX_ERR_NONE;
+		} else {
+			memset(msg->buffer, 0, msg->size);
+		}
+	} else {
+		if (aux->read && msg->address == 0x50) {
+			memcpy(msg->buffer,
+				aux->edid + aux->offset - 16,
+				msg->size);
+		}
+	}
+
+	if (aux->aux_error_num == DP_AUX_ERR_NONE) {
+		snprintf(buf, SZ_64, "[drm-dp] dbg: %5s %5s %5xh(%2zu): ",
+			aux->native ? "NATIVE" : "I2C",
+			aux->read ? "READ" : "WRITE",
+			msg->address, msg->size);
+
+		print_hex_dump(KERN_DEBUG, buf,
+			DUMP_PREFIX_NONE, 8, 1, msg->buffer, msg->size, false);
+
+		msg->reply = aux->native ?
+			DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+	} else {
+		/* Reply defer to retry */
+		msg->reply = aux->native ?
+			DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
+	}
+
+	ret = msg->size;
+end:
+	return ret;
+}
+
+/*
+ * This function does the real job to process an AUX transaction.
+ * It will call aux_reset() function to reset the AUX channel,
+ * if the waiting is timeout.
+ */
+static ssize_t dp_aux_transfer(struct drm_dp_aux *drm_aux,
+		struct drm_dp_aux_msg *msg)
+{
+	u8 buf[SZ_64];
+	ssize_t ret;
+	int const retry_count = 5;
+	struct dp_aux_private *aux = container_of(drm_aux,
+		struct dp_aux_private, drm_aux);
+
+	mutex_lock(&aux->mutex);
+
+	ret = dp_aux_transfer_ready(aux, msg, true);
+	if (ret)
+		goto unlock_exit;
+
+	if (!aux->cmd_busy) {
+		ret = msg->size;
+		goto unlock_exit;
+	}
+
 	ret = dp_aux_cmd_fifo_tx(aux, msg);
 	if ((ret < 0) && aux->native && !atomic_read(&aux->aborted)) {
 		aux->retry_cnt++;
@@ -459,6 +544,14 @@
 		if (aux->read)
 			dp_aux_cmd_fifo_rx(aux, msg);
 
+		snprintf(buf, SZ_64, "[drm-dp] %5s %5s %5xh(%2zu): ",
+			aux->native ? "NATIVE" : "I2C",
+			aux->read ? "READ" : "WRITE",
+			msg->address, msg->size);
+
+		print_hex_dump(KERN_DEBUG, buf,
+			DUMP_PREFIX_NONE, 8, 1, msg->buffer, msg->size, false);
+
 		msg->reply = aux->native ?
 			DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
 	} else {
@@ -558,6 +651,41 @@
 	drm_dp_aux_unregister(&aux->drm_aux);
 }
 
+static void dp_aux_dpcd_updated(struct dp_aux *dp_aux)
+{
+	struct dp_aux_private *aux;
+
+	if (!dp_aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+	complete(&aux->comp);
+}
+
+static void dp_aux_set_sim_mode(struct dp_aux *dp_aux, bool en,
+		u8 *edid, u8 *dpcd)
+{
+	struct dp_aux_private *aux;
+
+	if (!dp_aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+	aux->edid = edid;
+	aux->dpcd = dpcd;
+
+	if (en)
+		aux->drm_aux.transfer = dp_aux_transfer_debug;
+	else
+		aux->drm_aux.transfer = dp_aux_transfer;
+}
+
 struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog,
 		struct dp_aux_cfg *aux_cfg)
 {
@@ -586,6 +714,7 @@
 	aux->cfg = aux_cfg;
 	dp_aux = &aux->dp_aux;
 	aux->retry_cnt = 0;
+	aux->dp_aux.reg = 0xFFFF;
 
 	dp_aux->isr     = dp_aux_isr;
 	dp_aux->init    = dp_aux_init;
@@ -594,6 +723,8 @@
 	dp_aux->drm_aux_deregister = dp_aux_deregister;
 	dp_aux->reconfig = dp_aux_reconfig;
 	dp_aux->abort = dp_aux_abort_transaction;
+	dp_aux->dpcd_updated = dp_aux_dpcd_updated;
+	dp_aux->set_sim_mode = dp_aux_set_sim_mode;
 
 	return dp_aux;
 error:
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index e8cb1cc..bf52d57 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,19 @@
 #include "dp_catalog.h"
 #include "drm_dp_helper.h"
 
+#define DP_STATE_NOTIFICATION_SENT          BIT(0)
+#define DP_STATE_TRAIN_1_STARTED            BIT(1)
+#define DP_STATE_TRAIN_1_SUCCEEDED          BIT(2)
+#define DP_STATE_TRAIN_1_FAILED             BIT(3)
+#define DP_STATE_TRAIN_2_STARTED            BIT(4)
+#define DP_STATE_TRAIN_2_SUCCEEDED          BIT(5)
+#define DP_STATE_TRAIN_2_FAILED             BIT(6)
+#define DP_STATE_CTRL_POWERED_ON            BIT(7)
+#define DP_STATE_CTRL_POWERED_OFF           BIT(8)
+#define DP_STATE_LINK_MAINTENANCE_STARTED   BIT(9)
+#define DP_STATE_LINK_MAINTENANCE_COMPLETED BIT(10)
+#define DP_STATE_LINK_MAINTENANCE_FAILED    BIT(11)
+
 enum dp_aux_error {
 	DP_AUX_ERR_NONE	= 0,
 	DP_AUX_ERR_ADDR	= -1,
@@ -29,6 +42,9 @@
 };
 
 struct dp_aux {
+	u32 reg;
+	u32 state;
+
 	struct drm_dp_aux *drm_aux;
 	int (*drm_aux_register)(struct dp_aux *aux);
 	void (*drm_aux_deregister)(struct dp_aux *aux);
@@ -37,6 +53,8 @@
 	void (*deinit)(struct dp_aux *aux);
 	void (*reconfig)(struct dp_aux *aux);
 	void (*abort)(struct dp_aux *aux);
+	void (*dpcd_updated)(struct dp_aux *aux);
+	void (*set_sim_mode)(struct dp_aux *aux, bool en, u8 *edid, u8 *dpcd);
 };
 
 struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog,
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 006f723..2e2887e 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -71,6 +71,8 @@
 	struct completion video_comp;
 
 	bool orientation;
+	bool power_on;
+
 	atomic_t aborted;
 
 	u32 pixel_rate;
@@ -128,6 +130,11 @@
 
 	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
 
+	if (!ctrl->power_on || atomic_read(&ctrl->aborted)) {
+		pr_err("CTRL off, return\n");
+		return;
+	}
+
 	reinit_completion(&ctrl->idle_comp);
 	dp_ctrl_state_ctrl(ctrl, ST_PUSH_IDLE);
 
@@ -813,6 +820,10 @@
 	u8 link_status[DP_LINK_STATUS_SIZE];
 	int const maximum_retries = 5;
 
+	ctrl->aux->state &= ~DP_STATE_TRAIN_1_FAILED;
+	ctrl->aux->state &= ~DP_STATE_TRAIN_1_SUCCEEDED;
+	ctrl->aux->state |= DP_STATE_TRAIN_1_STARTED;
+
 	dp_ctrl_state_ctrl(ctrl, 0);
 	/* Make sure to clear the current pattern before starting a new one */
 	wmb();
@@ -822,18 +833,18 @@
 		DP_LINK_SCRAMBLING_DISABLE); /* train_1 */
 	if (ret <= 0) {
 		ret = -EINVAL;
-		return ret;
+		goto end;
 	}
 
 	ret = dp_ctrl_update_vx_px(ctrl);
 	if (ret <= 0) {
 		ret = -EINVAL;
-		return ret;
+		goto end;
 	}
 
 	tries = 0;
 	old_v_level = ctrl->link->phy_params.v_level;
-	while (1) {
+	while (!atomic_read(&ctrl->aborted)) {
 		drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd);
 
 		ret = dp_ctrl_read_link_status(ctrl, link_status);
@@ -872,6 +883,13 @@
 			break;
 		}
 	}
+end:
+	ctrl->aux->state &= ~DP_STATE_TRAIN_1_STARTED;
+
+	if (ret)
+		ctrl->aux->state |= DP_STATE_TRAIN_1_FAILED;
+	else
+		ctrl->aux->state |= DP_STATE_TRAIN_1_SUCCEEDED;
 
 	return ret;
 }
@@ -915,6 +933,10 @@
 	int const maximum_retries = 5;
 	u8 link_status[DP_LINK_STATUS_SIZE];
 
+	ctrl->aux->state &= ~DP_STATE_TRAIN_2_FAILED;
+	ctrl->aux->state &= ~DP_STATE_TRAIN_2_SUCCEEDED;
+	ctrl->aux->state |= DP_STATE_TRAIN_2_STARTED;
+
 	dp_ctrl_state_ctrl(ctrl, 0);
 	/* Make sure to clear the current pattern before starting a new one */
 	wmb();
@@ -927,14 +949,14 @@
 	ret = dp_ctrl_update_vx_px(ctrl);
 	if (ret <= 0) {
 		ret = -EINVAL;
-		return ret;
+		goto end;
 	}
 	ctrl->catalog->set_pattern(ctrl->catalog, pattern);
 	ret = dp_ctrl_train_pattern_set(ctrl,
 		pattern | DP_RECOVERED_CLOCK_OUT_EN);
 	if (ret <= 0) {
 		ret = -EINVAL;
-		return ret;
+		goto end;
 	}
 
 	do  {
@@ -960,8 +982,14 @@
 			ret = -EINVAL;
 			break;
 		}
-	} while (1);
+	} while (!atomic_read(&ctrl->aborted));
+end:
+	ctrl->aux->state &= ~DP_STATE_TRAIN_2_STARTED;
 
+	if (ret)
+		ctrl->aux->state |= DP_STATE_TRAIN_2_FAILED;
+	else
+		ctrl->aux->state |= DP_STATE_TRAIN_2_SUCCEEDED;
 	return ret;
 }
 
@@ -1102,8 +1130,7 @@
 	return ctrl->power->clk_enable(ctrl->power, DP_CTRL_PM, false);
 }
 
-static int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl,
-	bool flip, bool multi_func)
+static int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset)
 {
 	struct dp_ctrl_private *ctrl;
 	struct dp_catalog_ctrl *catalog;
@@ -1118,7 +1145,7 @@
 	ctrl->orientation = flip;
 	catalog = ctrl->catalog;
 
-	if (!multi_func) {
+	if (reset) {
 		catalog->usb_reset(ctrl->catalog, flip);
 		catalog->phy_reset(ctrl->catalog);
 	}
@@ -1180,6 +1207,15 @@
 
 	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
 
+	if (!ctrl->power_on || atomic_read(&ctrl->aborted)) {
+		pr_err("CTRL off, return\n");
+		return -EINVAL;
+	}
+
+	ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_COMPLETED;
+	ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_FAILED;
+	ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_STARTED;
+
 	ctrl->dp_ctrl.push_idle(&ctrl->dp_ctrl);
 	ctrl->dp_ctrl.reset(&ctrl->dp_ctrl);
 
@@ -1219,6 +1255,13 @@
 		ret = dp_ctrl_setup_main_link(ctrl, true);
 	} while (ret == -EAGAIN);
 
+	ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_STARTED;
+
+	if (ret)
+		ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_FAILED;
+	else
+		ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_COMPLETED;
+
 	return ret;
 }
 
@@ -1341,7 +1384,6 @@
 	atomic_set(&ctrl->aborted, 0);
 	rate = ctrl->panel->link_info.rate;
 
-	ctrl->power->clk_enable(ctrl->power, DP_CORE_PM, true);
 	ctrl->catalog->hpd_config(ctrl->catalog, true);
 
 	if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
@@ -1396,6 +1438,7 @@
 	if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
 		dp_ctrl_send_phy_test_pattern(ctrl);
 
+	ctrl->power_on = true;
 	pr_debug("End-\n");
 
 end:
@@ -1419,6 +1462,7 @@
 
 	dp_ctrl_disable_mainlink_clocks(ctrl);
 
+	ctrl->power_on = false;
 	pr_debug("DP off done\n");
 }
 
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index 229c779..31d8f07 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,7 +23,7 @@
 #include "dp_catalog.h"
 
 struct dp_ctrl {
-	int (*init)(struct dp_ctrl *dp_ctrl, bool flip, bool multi_func);
+	int (*init)(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
 	void (*deinit)(struct dp_ctrl *dp_ctrl);
 	int (*on)(struct dp_ctrl *dp_ctrl);
 	void (*off)(struct dp_ctrl *dp_ctrl);
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index 0b3d903..a63b2c5 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -36,15 +36,52 @@
 	u8 *dpcd;
 	u32 dpcd_size;
 
+	int vdo;
+
 	struct dp_usbpd *usbpd;
 	struct dp_link *link;
 	struct dp_panel *panel;
+	struct dp_aux *aux;
 	struct drm_connector **connector;
 	struct device *dev;
-
+	struct work_struct sim_work;
 	struct dp_debug dp_debug;
 };
 
+static int dp_debug_get_edid_buf(struct dp_debug_private *debug)
+{
+	int rc = 0;
+
+	if (!debug->edid) {
+		debug->edid = devm_kzalloc(debug->dev, SZ_256, GFP_KERNEL);
+		if (!debug->edid) {
+			rc = -ENOMEM;
+			goto end;
+		}
+
+		debug->edid_size = SZ_256;
+	}
+end:
+	return rc;
+}
+
+static int dp_debug_get_dpcd_buf(struct dp_debug_private *debug)
+{
+	int rc = 0;
+
+	if (!debug->dpcd) {
+		debug->dpcd = devm_kzalloc(debug->dev, SZ_1K, GFP_KERNEL);
+		if (!debug->dpcd) {
+			rc = -ENOMEM;
+			goto end;
+		}
+
+		debug->dpcd_size = SZ_1K;
+	}
+end:
+	return rc;
+}
+
 static ssize_t dp_debug_write_edid(struct file *file,
 		const char __user *user_buff, size_t count, loff_t *ppos)
 {
@@ -75,7 +112,8 @@
 	edid_size = size / char_to_nib;
 	buf_t = buf;
 
-	memset(debug->edid, 0, debug->edid_size);
+	if (dp_debug_get_edid_buf(debug))
+		goto bail;
 
 	if (edid_size != debug->edid_size) {
 		pr_debug("clearing debug edid\n");
@@ -100,13 +138,13 @@
 		buf_t += char_to_nib;
 	}
 
-	print_hex_dump(KERN_DEBUG, "DEBUG EDID: ", DUMP_PREFIX_NONE,
-		16, 1, debug->edid, debug->edid_size, false);
-
 	edid = debug->edid;
 bail:
 	kfree(buf);
-	debug->panel->set_edid(debug->panel, edid);
+
+	if (!debug->dp_debug.sim_mode)
+		debug->panel->set_edid(debug->panel, edid);
+
 	return rc;
 }
 
@@ -119,8 +157,8 @@
 	size_t dpcd_size = 0;
 	size_t size = 0, dpcd_buf_index = 0;
 	ssize_t rc = count;
-
-	pr_debug("count=%zu\n", count);
+	char offset_ch[5];
+	u32 offset;
 
 	if (!debug)
 		return -ENODEV;
@@ -128,7 +166,7 @@
 	if (*ppos)
 		goto bail;
 
-	size = min_t(size_t, count, SZ_32);
+	size = min_t(size_t, count, SZ_2K);
 
 	buf = kzalloc(size, GFP_KERNEL);
 	if (!buf) {
@@ -139,16 +177,30 @@
 	if (copy_from_user(buf, user_buff, size))
 		goto bail;
 
-	dpcd_size = size / char_to_nib;
-	buf_t = buf;
+	memcpy(offset_ch, buf, 4);
+	offset_ch[4] = '\0';
 
-	memset(debug->dpcd, 0, debug->dpcd_size);
-
-	if (dpcd_size != debug->dpcd_size) {
-		pr_debug("clearing debug dpcd\n");
+	if (kstrtoint(offset_ch, 16, &offset)) {
+		pr_err("offset kstrtoint error\n");
 		goto bail;
 	}
 
+	if (dp_debug_get_dpcd_buf(debug))
+		goto bail;
+
+	if (offset == 0xFFFF) {
+		pr_err("clearing dpcd\n");
+		memset(debug->dpcd, 0, debug->dpcd_size);
+		goto bail;
+	}
+
+	size -= 4;
+
+	dpcd_size = size / char_to_nib;
+	buf_t = buf + 4;
+
+	dpcd_buf_index = offset;
+
 	while (dpcd_size--) {
 		char t[3];
 		int d;
@@ -167,16 +219,39 @@
 		buf_t += char_to_nib;
 	}
 
-	print_hex_dump(KERN_DEBUG, "DEBUG DPCD: ", DUMP_PREFIX_NONE,
-		8, 1, debug->dpcd, debug->dpcd_size, false);
-
 	dpcd = debug->dpcd;
 bail:
 	kfree(buf);
-	debug->panel->set_dpcd(debug->panel, dpcd);
+	if (debug->dp_debug.sim_mode)
+		debug->aux->dpcd_updated(debug->aux);
+	else
+		debug->panel->set_dpcd(debug->panel, dpcd);
+
 	return rc;
 }
 
+static ssize_t dp_debug_read_dpcd(struct file *file,
+		char __user *user_buff, size_t count, loff_t *ppos)
+{
+	struct dp_debug_private *debug = file->private_data;
+	char buf[SZ_8];
+	u32 len = 0;
+
+	if (!debug)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	len += snprintf(buf, SZ_8, "0x%x\n", debug->aux->reg);
+
+	if (copy_to_user(user_buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;
+	return len;
+}
+
 static ssize_t dp_debug_write_hpd(struct file *file,
 		const char __user *user_buff, size_t count, loff_t *ppos)
 {
@@ -421,7 +496,6 @@
 	struct dp_debug_private *debug = file->private_data;
 	char *buf;
 	u32 len = 0, rc = 0;
-	u64 lclk = 0;
 	u32 max_size = SZ_4K;
 
 	if (!debug)
@@ -434,124 +508,60 @@
 	if (!buf)
 		return -ENOMEM;
 
-	rc = snprintf(buf + len, max_size, "\tname = %s\n", DEBUG_NAME);
+	rc = snprintf(buf + len, max_size, "\tstate=0x%x\n", debug->aux->state);
 	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
 		goto error;
 
-	rc = snprintf(buf + len, max_size,
-		"\tdp_panel\n\t\tmax_pclk_khz = %d\n",
-		debug->panel->max_pclk_khz);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size,
-		"\tdrm_dp_link\n\t\trate = %u\n",
+	rc = snprintf(buf + len, max_size, "\tlink_rate=%u\n",
 		debug->panel->link_info.rate);
 	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
 		goto error;
 
-	rc = snprintf(buf + len, max_size,
-		"\t\tnum_lanes = %u\n",
+	rc = snprintf(buf + len, max_size, "\tnum_lanes=%u\n",
 		debug->panel->link_info.num_lanes);
 	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
 		goto error;
 
-	rc = snprintf(buf + len, max_size,
-		"\t\tcapabilities = %lu\n",
-		debug->panel->link_info.capabilities);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size,
-		"\tdp_panel_info:\n\t\tactive = %dx%d\n",
+	rc = snprintf(buf + len, max_size, "\tresolution=%dx%d@%dHz\n",
 		debug->panel->pinfo.h_active,
-		debug->panel->pinfo.v_active);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size,
-		"\t\tback_porch = %dx%d\n",
-		debug->panel->pinfo.h_back_porch,
-		debug->panel->pinfo.v_back_porch);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size,
-		"\t\tfront_porch = %dx%d\n",
-		debug->panel->pinfo.h_front_porch,
-		debug->panel->pinfo.v_front_porch);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size,
-		"\t\tsync_width = %dx%d\n",
-		debug->panel->pinfo.h_sync_width,
-		debug->panel->pinfo.v_sync_width);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size,
-		"\t\tactive_low = %dx%d\n",
-		debug->panel->pinfo.h_active_low,
-		debug->panel->pinfo.v_active_low);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size,
-		"\t\th_skew = %d\n",
-		debug->panel->pinfo.h_skew);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	rc = snprintf(buf + len, max_size,
-		"\t\trefresh rate = %d\n",
+		debug->panel->pinfo.v_active,
 		debug->panel->pinfo.refresh_rate);
 	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
 		goto error;
 
-	rc = snprintf(buf + len, max_size,
-		"\t\tpixel clock khz = %d\n",
+	rc = snprintf(buf + len, max_size, "\tpclock=%dKHz\n",
 		debug->panel->pinfo.pixel_clk_khz);
 	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
 		goto error;
 
-	rc = snprintf(buf + len, max_size,
-		"\t\tbpp = %d\n",
+	rc = snprintf(buf + len, max_size, "\tbpp=%d\n",
 		debug->panel->pinfo.bpp);
 	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
 		goto error;
 
 	/* Link Information */
-	rc = snprintf(buf + len, max_size,
-		"\tdp_link:\n\t\ttest_requested = %d\n",
-		debug->link->sink_request);
+	rc = snprintf(buf + len, max_size, "\ttest_req=%s\n",
+		dp_link_get_test_name(debug->link->sink_request));
 	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
 		goto error;
 
 	rc = snprintf(buf + len, max_size,
-		"\t\tlane_count = %d\n", debug->link->link_params.lane_count);
+		"\tlane_count=%d\n", debug->link->link_params.lane_count);
 	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
 		goto error;
 
 	rc = snprintf(buf + len, max_size,
-		"\t\tbw_code = %d\n", debug->link->link_params.bw_code);
-	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
-		goto error;
-
-	lclk = drm_dp_bw_code_to_link_rate(
-			debug->link->link_params.bw_code) * 1000;
-	rc = snprintf(buf + len, max_size,
-		"\t\tlclk = %lld\n", lclk);
+		"\tbw_code=%d\n", debug->link->link_params.bw_code);
 	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
 		goto error;
 
 	rc = snprintf(buf + len, max_size,
-		"\t\tv_level = %d\n", debug->link->phy_params.v_level);
+		"\tv_level=%d\n", debug->link->phy_params.v_level);
 	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
 		goto error;
 
 	rc = snprintf(buf + len, max_size,
-		"\t\tp_level = %d\n", debug->link->phy_params.p_level);
+		"\tp_level=%d\n", debug->link->phy_params.p_level);
 	if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
 		goto error;
 
@@ -665,6 +675,8 @@
 		pr_err("invalid input\n");
 		len = -EINVAL;
 	}
+
+	debug->panel->setup_hdr(debug->panel, &c_state->hdr_meta);
 end:
 	return len;
 }
@@ -814,6 +826,90 @@
 	return rc;
 }
 
+static ssize_t dp_debug_write_sim(struct file *file,
+		const char __user *user_buff, size_t count, loff_t *ppos)
+{
+	struct dp_debug_private *debug = file->private_data;
+	char buf[SZ_8];
+	size_t len = 0;
+	int sim;
+
+	if (!debug)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	/* Leave room for termination char */
+	len = min_t(size_t, count, SZ_8 - 1);
+	if (copy_from_user(buf, user_buff, len))
+		goto end;
+
+	buf[len] = '\0';
+
+	if (kstrtoint(buf, 10, &sim) != 0)
+		goto end;
+
+	if (sim) {
+		if (dp_debug_get_edid_buf(debug))
+			goto end;
+
+		if (dp_debug_get_dpcd_buf(debug))
+			goto error;
+	} else {
+		if (debug->edid) {
+			devm_kfree(debug->dev, debug->edid);
+			debug->edid = NULL;
+		}
+
+		if (debug->dpcd) {
+			devm_kfree(debug->dev, debug->dpcd);
+			debug->dpcd = NULL;
+		}
+	}
+
+	debug->dp_debug.sim_mode = !!sim;
+
+	debug->aux->set_sim_mode(debug->aux, debug->dp_debug.sim_mode,
+			debug->edid, debug->dpcd);
+end:
+	return len;
+error:
+	devm_kfree(debug->dev, debug->edid);
+	return len;
+}
+
+static ssize_t dp_debug_write_attention(struct file *file,
+		const char __user *user_buff, size_t count, loff_t *ppos)
+{
+	struct dp_debug_private *debug = file->private_data;
+	char buf[SZ_8];
+	size_t len = 0;
+	int vdo;
+
+	if (!debug)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	/* Leave room for termination char */
+	len = min_t(size_t, count, SZ_8 - 1);
+	if (copy_from_user(buf, user_buff, len))
+		goto end;
+
+	buf[len] = '\0';
+
+	if (kstrtoint(buf, 10, &vdo) != 0)
+		goto end;
+
+	debug->vdo = vdo;
+
+	schedule_work(&debug->sim_work);
+end:
+	return len;
+}
+
 static const struct file_operations dp_debug_fops = {
 	.open = simple_open,
 	.read = dp_debug_read_info,
@@ -838,6 +934,7 @@
 static const struct file_operations dpcd_fops = {
 	.open = simple_open,
 	.write = dp_debug_write_dpcd,
+	.read = dp_debug_read_dpcd,
 };
 
 static const struct file_operations connected_fops = {
@@ -863,6 +960,16 @@
 	.read = dp_debug_read_hdr,
 };
 
+static const struct file_operations sim_fops = {
+	.open = simple_open,
+	.write = dp_debug_write_sim,
+};
+
+static const struct file_operations attention_fops = {
+	.open = simple_open,
+	.write = dp_debug_write_attention,
+};
+
 static int dp_debug_init(struct dp_debug *dp_debug)
 {
 	int rc = 0;
@@ -965,6 +1072,26 @@
 		goto error_remove_dir;
 	}
 
+	file = debugfs_create_file("sim", 0644, dir,
+		debug, &sim_fops);
+
+	if (IS_ERR_OR_NULL(file)) {
+		rc = PTR_ERR(file);
+		pr_err("[%s] debugfs sim failed, rc=%d\n",
+			DEBUG_NAME, rc);
+		goto error_remove_dir;
+	}
+
+	file = debugfs_create_file("attention", 0644, dir,
+		debug, &attention_fops);
+
+	if (IS_ERR_OR_NULL(file)) {
+		rc = PTR_ERR(file);
+		pr_err("[%s] debugfs attention failed, rc=%d\n",
+			DEBUG_NAME, rc);
+		goto error_remove_dir;
+	}
+
 	return 0;
 
 error_remove_dir:
@@ -975,9 +1102,17 @@
 	return rc;
 }
 
+static void dp_debug_sim_work(struct work_struct *work)
+{
+	struct dp_debug_private *debug =
+		container_of(work, typeof(*debug), sim_work);
+
+	debug->usbpd->simulate_attention(debug->usbpd, debug->vdo);
+}
+
 struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
 			struct dp_usbpd *usbpd, struct dp_link *link,
-			struct drm_connector **connector)
+			struct dp_aux *aux, struct drm_connector **connector)
 {
 	int rc = 0;
 	struct dp_debug_private *debug;
@@ -995,28 +1130,13 @@
 		goto error;
 	}
 
-	debug->edid = devm_kzalloc(dev, SZ_256, GFP_KERNEL);
-	if (!debug->edid) {
-		rc = -ENOMEM;
-		kfree(debug);
-		goto error;
-	}
-
-	debug->edid_size = SZ_256;
-
-	debug->dpcd = devm_kzalloc(dev, SZ_16, GFP_KERNEL);
-	if (!debug->dpcd) {
-		rc = -ENOMEM;
-		kfree(debug);
-		goto error;
-	}
-
-	debug->dpcd_size = SZ_16;
+	INIT_WORK(&debug->sim_work, dp_debug_sim_work);
 
 	debug->dp_debug.debug_en = false;
 	debug->usbpd = usbpd;
 	debug->link = link;
 	debug->panel = panel;
+	debug->aux = aux;
 	debug->dev = dev;
 	debug->connector = connector;
 
@@ -1061,7 +1181,11 @@
 
 	dp_debug_deinit(dp_debug);
 
-	devm_kfree(debug->dev, debug->edid);
-	devm_kfree(debug->dev, debug->dpcd);
+	if (debug->edid)
+		devm_kfree(debug->dev, debug->edid);
+
+	if (debug->dpcd)
+		devm_kfree(debug->dev, debug->dpcd);
+
 	devm_kfree(debug->dev, debug);
 }
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index 3b2d23e..5a5a786 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,7 @@
 #include "dp_panel.h"
 #include "dp_link.h"
 #include "dp_usbpd.h"
+#include "dp_aux.h"
 
 /**
  * struct dp_debug
@@ -29,6 +30,7 @@
  */
 struct dp_debug {
 	bool debug_en;
+	bool sim_mode;
 	bool psm_enabled;
 	int aspect_ratio;
 	int vdisplay;
@@ -52,7 +54,7 @@
  */
 struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
 			struct dp_usbpd *usbpd, struct dp_link *link,
-			struct drm_connector **connector);
+			struct dp_aux *aux, struct drm_connector **connector);
 /**
  * dp_debug_put()
  *
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 51cc57b..053ee20 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,8 @@
 #include <linux/of_irq.h>
 #include <linux/hdcp_qseecom.h>
 
+#include "sde_connector.h"
+
 #include "msm_drv.h"
 #include "dp_usbpd.h"
 #include "dp_parser.h"
@@ -87,11 +89,12 @@
 
 	struct workqueue_struct *wq;
 	struct delayed_work hdcp_cb_work;
-	struct work_struct connect_work;
+	struct delayed_work connect_work;
 	struct work_struct attention_work;
 	struct mutex hdcp_mutex;
 	struct mutex session_lock;
 	int hdcp_status;
+	unsigned long audio_status;
 };
 
 static const struct of_device_id dp_dt_match[] = {
@@ -99,6 +102,11 @@
 	{}
 };
 
+static bool dp_display_framework_ready(struct dp_display_private *dp)
+{
+	return dp->dp_display.post_open ? false : true;
+}
+
 static inline bool dp_display_is_hdcp_enabled(struct dp_display_private *dp)
 {
 	return dp->hdcp.feature_enabled &&
@@ -448,35 +456,39 @@
 	}
 
 	/* if cable is already connected, send notification */
-	if (dp_display->is_connected)
-		dp_display_send_hpd_event(dp);
+	if (dp->usbpd->hpd_high)
+		queue_delayed_work(dp->wq, &dp->connect_work, HZ * 10);
 	else
 		dp_display->post_open = NULL;
-
 }
 
 static int dp_display_send_hpd_notification(struct dp_display_private *dp,
 		bool hpd)
 {
+	u32 timeout_sec;
+	int ret = 0;
+
 	dp->dp_display.is_connected = hpd;
 
-	/* in case, framework is not yet up, don't notify hpd */
-	if (dp->dp_display.post_open)
-		return 0;
+	if  (dp_display_framework_ready(dp))
+		timeout_sec = 5;
+	else
+		timeout_sec = 10;
+
+	dp->aux->state |= DP_STATE_NOTIFICATION_SENT;
 
 	reinit_completion(&dp->notification_comp);
 	dp_display_send_hpd_event(dp);
 
-	if (!wait_for_completion_timeout(&dp->notification_comp, HZ * 5)) {
+	if (!wait_for_completion_timeout(&dp->notification_comp,
+						HZ * timeout_sec)) {
 		pr_warn("%s timeout\n", hpd ? "connect" : "disconnect");
-		/* cancel any pending request */
-		dp->ctrl->abort(dp->ctrl);
-		dp->aux->abort(dp->aux);
-
-		return -EINVAL;
+		ret = -EINVAL;
 	}
 
-	return 0;
+	dp->aux->state &= ~DP_STATE_NOTIFICATION_SENT;
+
+	return ret;
 }
 
 static int dp_display_process_hpd_high(struct dp_display_private *dp)
@@ -494,28 +506,24 @@
 	if (!dp->dp_display.connector)
 		return 0;
 
-	rc = dp->panel->read_sink_caps(dp->panel, dp->dp_display.connector);
+	rc = dp->panel->read_sink_caps(dp->panel,
+		dp->dp_display.connector, dp->usbpd->multi_func);
 	if (rc) {
-		if (rc == -ETIMEDOUT) {
-			pr_err("Sink cap read failed, skip notification\n");
+		/*
+		 * ETIMEDOUT --> cable may have been removed
+		 * ENOTCONN --> no downstream device connected
+		 */
+		if (rc == -ETIMEDOUT || rc == -ENOTCONN)
 			goto end;
-		} else {
+		else
 			goto notify;
-		}
-	}
-
-	dp->link->process_request(dp->link);
-
-	if (dp_display_is_sink_count_zero(dp)) {
-		pr_debug("no downstream devices connected\n");
-		rc = -EINVAL;
-		goto end;
 	}
 
 	edid = dp->panel->edid_ctrl->edid;
 
 	dp->audio_supported = drm_detect_monitor_audio(edid);
 
+	dp->link->process_request(dp->link);
 	dp->panel->handle_sink_request(dp->panel);
 
 	dp->dp_display.max_pclk_khz = dp->parser->max_pclk_khz;
@@ -529,6 +537,7 @@
 static void dp_display_host_init(struct dp_display_private *dp)
 {
 	bool flip = false;
+	bool reset;
 
 	if (dp->core_initialized) {
 		pr_debug("DP core already initialized\n");
@@ -538,8 +547,10 @@
 	if (dp->usbpd->orientation == ORIENTATION_CC2)
 		flip = true;
 
+	reset = dp->debug->sim_mode ? false : !dp->usbpd->multi_func;
+
 	dp->power->init(dp->power, flip);
-	dp->ctrl->init(dp->ctrl, flip, dp->usbpd->multi_func);
+	dp->ctrl->init(dp->ctrl, flip, reset);
 	enable_irq(dp->irq);
 	dp->core_initialized = true;
 }
@@ -555,6 +566,7 @@
 	dp->power->deinit(dp->power);
 	disable_irq(dp->irq);
 	dp->core_initialized = false;
+	dp->aux->state = 0;
 }
 
 static int dp_display_process_hpd_low(struct dp_display_private *dp)
@@ -572,9 +584,9 @@
 	if (dp->audio_supported)
 		dp->audio->off(dp->audio);
 
-	rc = dp_display_send_hpd_notification(dp, false);
+	dp->audio_status = -ENODEV;
 
-	dp->aux->deinit(dp->aux);
+	rc = dp_display_send_hpd_notification(dp, false);
 
 	dp->panel->video_test = false;
 
@@ -601,8 +613,9 @@
 
 	dp_display_host_init(dp);
 
-	if (dp->usbpd->hpd_high)
-		queue_work(dp->wq, &dp->connect_work);
+	/* check for hpd high and framework ready */
+	if  (dp->usbpd->hpd_high && dp_display_framework_ready(dp))
+		queue_delayed_work(dp->wq, &dp->connect_work, 0);
 end:
 	return rc;
 }
@@ -619,6 +632,8 @@
 
 	dp->ctrl->push_idle(dp->ctrl);
 	dp->ctrl->off(dp->ctrl);
+	dp->panel->deinit(dp->panel);
+	dp->aux->deinit(dp->aux);
 	dp->power_on = false;
 }
 
@@ -658,6 +673,13 @@
 		goto end;
 	}
 
+	/*
+	 * In case cable/dongle is disconnected during adb shell stop,
+	 * reset psm_enabled flag to false since it is no more needed
+	 */
+	if (dp->dp_display.post_open)
+		dp->debug->psm_enabled = false;
+
 	if (dp->debug->psm_enabled)
 		dp->link->psm_config(dp->link, &dp->panel->link_info, true);
 
@@ -666,6 +688,7 @@
 	dp->aux->abort(dp->aux);
 
 	/* wait for idle state */
+	cancel_delayed_work(&dp->connect_work);
 	flush_workqueue(dp->wq);
 
 	dp_display_handle_disconnect(dp);
@@ -677,13 +700,13 @@
 {
 	mutex_lock(&dp->audio->ops_lock);
 
-	if (dp->audio_supported)
+	if (dp->audio_supported && !IS_ERR_VALUE(dp->audio_status))
 		dp->audio->off(dp->audio);
 
 	dp->ctrl->link_maintenance(dp->ctrl);
 
-	if (dp->audio_supported)
-		dp->audio->on(dp->audio);
+	if (dp->audio_supported && !IS_ERR_VALUE(dp->audio_status))
+		dp->audio_status = dp->audio->on(dp->audio);
 
 	mutex_unlock(&dp->audio->ops_lock);
 }
@@ -706,7 +729,7 @@
 			return;
 		}
 
-		queue_work(dp->wq, &dp->connect_work);
+		queue_delayed_work(dp->wq, &dp->connect_work, 0);
 		return;
 	}
 
@@ -752,17 +775,19 @@
 		return -ENODEV;
 	}
 
-	if (dp->usbpd->hpd_irq && dp->usbpd->hpd_high) {
+	if (dp->usbpd->hpd_irq && dp->usbpd->hpd_high &&
+	    dp->power_on) {
 		dp->link->process_request(dp->link);
 		queue_work(dp->wq, &dp->attention_work);
 	} else if (dp->usbpd->hpd_high) {
-		queue_work(dp->wq, &dp->connect_work);
+		queue_delayed_work(dp->wq, &dp->connect_work, 0);
 	} else {
 		/* cancel any pending request */
 		dp->ctrl->abort(dp->ctrl);
 		dp->aux->abort(dp->aux);
 
 		/* wait for idle state */
+		cancel_delayed_work(&dp->connect_work);
 		flush_workqueue(dp->wq);
 
 		dp_display_handle_disconnect(dp);
@@ -773,7 +798,8 @@
 
 static void dp_display_connect_work(struct work_struct *work)
 {
-	struct dp_display_private *dp = container_of(work,
+	struct delayed_work *dw = to_delayed_work(work);
+	struct dp_display_private *dp = container_of(dw,
 			struct dp_display_private, connect_work);
 
 	if (dp->dp_display.is_connected) {
@@ -919,7 +945,7 @@
 	}
 
 	dp->debug = dp_debug_get(dev, dp->panel, dp->usbpd,
-				dp->link, &dp->dp_display.connector);
+				dp->link, dp->aux, &dp->dp_display.connector);
 	if (IS_ERR(dp->debug)) {
 		rc = PTR_ERR(dp->debug);
 		pr_err("failed to initialize debug, rc = %d\n", rc);
@@ -1069,7 +1095,7 @@
 	if (dp->audio_supported) {
 		dp->audio->bw_code = dp->link->link_params.bw_code;
 		dp->audio->lane_count = dp->link->link_params.lane_count;
-		dp->audio->on(dp->audio);
+		dp->audio_status = dp->audio->on(dp->audio);
 	}
 
 	dp_display_update_hdcp_info(dp);
@@ -1080,9 +1106,12 @@
 		dp->hdcp_status = HDCP_STATE_AUTHENTICATING;
 		queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ / 2);
 	}
+
+	dp->panel->setup_hdr(dp->panel, NULL);
 end:
 	/* clear framework event notifier */
 	dp_display->post_open = NULL;
+	dp->aux->state |= DP_STATE_CTRL_POWERED_ON;
 
 	complete_all(&dp->notification_comp);
 	mutex_unlock(&dp->session_lock);
@@ -1115,6 +1144,14 @@
 			dp->hdcp.ops->off(dp->hdcp.data);
 	}
 
+	if (dp->usbpd->hpd_high && dp->usbpd->alt_mode_cfg_done) {
+		if (dp->audio_supported)
+			dp->audio->off(dp->audio);
+
+		dp->link->psm_config(dp->link, &dp->panel->link_info, true);
+		dp->debug->psm_enabled = true;
+	}
+
 	dp->ctrl->push_idle(dp->ctrl);
 end:
 	mutex_unlock(&dp->session_lock);
@@ -1124,6 +1161,8 @@
 static int dp_display_disable(struct dp_display *dp_display)
 {
 	struct dp_display_private *dp;
+	struct drm_connector *connector;
+	struct sde_connector_state *c_state;
 
 	if (!dp_display) {
 		pr_err("invalid input\n");
@@ -1131,6 +1170,8 @@
 	}
 
 	dp = container_of(dp_display, struct dp_display_private, dp_display);
+	connector = dp->dp_display.connector;
+	c_state = to_sde_connector_state(connector->state);
 
 	mutex_lock(&dp->session_lock);
 
@@ -1141,9 +1182,27 @@
 
 	dp->ctrl->off(dp->ctrl);
 	dp->panel->deinit(dp->panel);
+	dp->aux->deinit(dp->aux);
 
+	connector->hdr_eotf = 0;
+	connector->hdr_metadata_type_one = 0;
+	connector->hdr_max_luminance = 0;
+	connector->hdr_avg_luminance = 0;
+	connector->hdr_min_luminance = 0;
+
+	memset(&c_state->hdr_meta, 0, sizeof(c_state->hdr_meta));
+
+	/*
+	 * In case of framework reboot, the DP off sequence is executed without
+	 * any notification from driver. Initialize post_open callback to notify
+	 * DP connection once framework restarts.
+	 */
+	if (dp->usbpd->hpd_high && dp->usbpd->alt_mode_cfg_done) {
+		dp_display->post_open = dp_display_post_open;
+		dp->dp_display.is_connected = false;
+	}
 	dp->power_on = false;
-
+	dp->aux->state = DP_STATE_CTRL_POWERED_OFF;
 end:
 	complete_all(&dp->notification_comp);
 	mutex_unlock(&dp->session_lock);
@@ -1251,8 +1310,7 @@
 	return ret;
 }
 
-
-static int dp_display_pre_kickoff(struct dp_display *dp_display,
+static int dp_display_config_hdr(struct dp_display *dp_display,
 			struct drm_msm_ext_hdr_metadata *hdr)
 {
 	int rc = 0;
@@ -1265,8 +1323,7 @@
 
 	dp = container_of(dp_display, struct dp_display_private, dp_display);
 
-	if (hdr->hdr_supported && dp->panel->hdr_supported(dp->panel))
-		rc = dp->panel->setup_hdr(dp->panel, hdr);
+	rc = dp->panel->setup_hdr(dp->panel, hdr);
 
 	return rc;
 }
@@ -1280,7 +1337,7 @@
 	}
 
 	INIT_DELAYED_WORK(&dp->hdcp_cb_work, dp_display_hdcp_cb_work);
-	INIT_WORK(&dp->connect_work, dp_display_connect_work);
+	INIT_DELAYED_WORK(&dp->connect_work, dp_display_connect_work);
 	INIT_WORK(&dp->attention_work, dp_display_attention_work);
 
 	return 0;
@@ -1307,6 +1364,7 @@
 
 	dp->pdev = pdev;
 	dp->name = "drm_dp";
+	dp->audio_status = -ENODEV;
 
 	rc = dp_display_create_workqueue(dp);
 	if (rc) {
@@ -1331,7 +1389,7 @@
 	g_dp_display->get_debug     = dp_get_debug;
 	g_dp_display->post_open     = dp_display_post_open;
 	g_dp_display->post_init     = dp_display_post_init;
-	g_dp_display->pre_kickoff   = dp_display_pre_kickoff;
+	g_dp_display->config_hdr    = dp_display_config_hdr;
 
 	rc = component_add(&pdev->dev, &dp_display_comp_ops);
 	if (rc) {
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index c55e6c8..266de5f 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -43,7 +43,7 @@
 	int (*request_irq)(struct dp_display *dp_display);
 	struct dp_debug *(*get_debug)(struct dp_display *dp_display);
 	void (*post_open)(struct dp_display *dp_display);
-	int (*pre_kickoff)(struct dp_display *dp_display,
+	int (*config_hdr)(struct dp_display *dp_display,
 				struct drm_msm_ext_hdr_metadata *hdr_meta);
 	void (*post_init)(struct dp_display *dp_display);
 };
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 7746b8e..b834230 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -276,18 +276,17 @@
 	.mode_set     = dp_bridge_mode_set,
 };
 
-int dp_connector_pre_kickoff(struct drm_connector *connector,
-		void *display,
-		struct msm_display_kickoff_params *params)
+int dp_connector_config_hdr(void *display,
+	struct sde_connector_state *c_state)
 {
 	struct dp_display *dp = display;
 
-	if (!connector || !display || !params) {
+	if (!display || !c_state) {
 		pr_err("invalid params\n");
 		return -EINVAL;
 	}
 
-	return dp->pre_kickoff(dp, params->hdr_meta);
+	return dp->config_hdr(dp, &c_state->hdr_meta);
 }
 
 int dp_connector_post_init(struct drm_connector *connector, void *display)
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index 89b0a7e..3ca10c2 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -32,15 +32,13 @@
 };
 
 /**
- * dp_connector_pre_kickoff - callback to perform pre kickoff initialization
- * @connector: Pointer to drm connector structure
+ * dp_connector_config_hdr - callback to configure HDR
  * @display: Pointer to private display handle
- * @params: Pointer to kickoff parameters
+ * @c_state: connect state data
  * Returns: Zero on success
  */
-int dp_connector_pre_kickoff(struct drm_connector *connector,
-		void *display,
-		struct msm_display_kickoff_params *params);
+int dp_connector_config_hdr(void *display,
+		struct sde_connector_state *c_state);
 
 /**
  * dp_connector_post_init - callback to perform additional initialization steps
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 3ca247c..05629dd 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -724,24 +724,6 @@
 	return ret;
 }
 
-static char *dp_link_get_test_name(u32 test_requested)
-{
-	switch (test_requested) {
-	case DP_TEST_LINK_TRAINING:
-		return DP_LINK_ENUM_STR(DP_TEST_LINK_TRAINING);
-	case DP_TEST_LINK_VIDEO_PATTERN:
-		return DP_LINK_ENUM_STR(DP_TEST_LINK_VIDEO_PATTERN);
-	case DP_TEST_LINK_EDID_READ:
-		return DP_LINK_ENUM_STR(DP_TEST_LINK_EDID_READ);
-	case DP_TEST_LINK_PHY_TEST_PATTERN:
-		return DP_LINK_ENUM_STR(DP_TEST_LINK_PHY_TEST_PATTERN);
-	case DP_TEST_LINK_AUDIO_PATTERN:
-		return DP_LINK_ENUM_STR(DP_TEST_LINK_AUDIO_PATTERN);
-	default:
-		return "unknown";
-	}
-}
-
 /**
  * dp_link_is_video_audio_test_requested() - checks for audio/video link request
  * @link: link requested by the sink
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index 6f79b6a..46d30a7 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -83,6 +83,24 @@
 	u32 bw_code;
 };
 
+static inline char *dp_link_get_test_name(u32 test_requested)
+{
+	switch (test_requested) {
+	case DP_TEST_LINK_TRAINING:
+		return DP_LINK_ENUM_STR(DP_TEST_LINK_TRAINING);
+	case DP_TEST_LINK_VIDEO_PATTERN:
+		return DP_LINK_ENUM_STR(DP_TEST_LINK_VIDEO_PATTERN);
+	case DP_TEST_LINK_EDID_READ:
+		return DP_LINK_ENUM_STR(DP_TEST_LINK_EDID_READ);
+	case DP_TEST_LINK_PHY_TEST_PATTERN:
+		return DP_LINK_ENUM_STR(DP_TEST_LINK_PHY_TEST_PATTERN);
+	case DP_TEST_LINK_AUDIO_PATTERN:
+		return DP_LINK_ENUM_STR(DP_TEST_LINK_AUDIO_PATTERN);
+	default:
+		return "unknown";
+	}
+}
+
 struct dp_link {
 	u32 sink_request;
 	u32 test_response;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index b5dd9bc..7132699 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -104,7 +104,7 @@
 static const u8 product_desc[16] = {83, 110, 97, 112, 100, 114, 97, 103,
 	111, 110, 0, 0, 0, 0, 0, 0};
 
-static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
+static int dp_panel_read_dpcd(struct dp_panel *dp_panel, bool multi_func)
 {
 	int rlen, rc = 0;
 	struct dp_panel_private *panel;
@@ -136,9 +136,6 @@
 
 			goto end;
 		}
-
-		print_hex_dump(KERN_DEBUG, "[drm-dp] SINK DPCD: ",
-			DUMP_PREFIX_NONE, 8, 1, dp_panel->dpcd, rlen, false);
 	}
 
 	rlen = drm_dp_dpcd_read(panel->aux->drm_aux,
@@ -176,6 +173,10 @@
 	link_info->num_lanes = dp_panel->dpcd[DP_MAX_LANE_COUNT] &
 				DP_MAX_LANE_COUNT_MASK;
 
+	if (multi_func)
+		link_info->num_lanes = min_t(unsigned int,
+			link_info->num_lanes, 2);
+
 	pr_debug("lane_count=%d\n", link_info->num_lanes);
 
 	if (drm_dp_enhanced_frame_cap(dpcd))
@@ -271,51 +272,49 @@
 static int dp_panel_read_edid(struct dp_panel *dp_panel,
 	struct drm_connector *connector)
 {
+	int ret = 0;
 	struct dp_panel_private *panel;
 
 	if (!dp_panel) {
 		pr_err("invalid input\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto end;
 	}
 
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 
 	if (panel->custom_edid) {
 		pr_debug("skip edid read in debug mode\n");
-		return 0;
+		goto end;
 	}
 
 	sde_get_edid(connector, &panel->aux->drm_aux->ddc,
 		(void **)&dp_panel->edid_ctrl);
 	if (!dp_panel->edid_ctrl->edid) {
 		pr_err("EDID read failed\n");
-	} else {
-		u8 *buf = (u8 *)dp_panel->edid_ctrl->edid;
-		u32 size = buf[0x7E] ? 256 : 128;
-
-		print_hex_dump(KERN_DEBUG, "[drm-dp] SINK EDID: ",
-			DUMP_PREFIX_NONE, 16, 1, buf, size, false);
-
-		return 0;
+		ret = -EINVAL;
+		goto end;
 	}
-
-	return -EINVAL;
+end:
+	return ret;
 }
 
 static int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
-	struct drm_connector *connector)
+	struct drm_connector *connector, bool multi_func)
 {
-	int rc = 0;
+	int rc = 0, rlen, count, downstream_ports;
+	const int count_len = 1;
 	struct dp_panel_private *panel;
 
 	if (!dp_panel || !connector) {
 		pr_err("invalid input\n");
-		return -EINVAL;
+		rc = -EINVAL;
+		goto end;
 	}
 
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 
-	rc = dp_panel_read_dpcd(dp_panel);
+	rc = dp_panel_read_dpcd(dp_panel, multi_func);
 	if (rc || !is_link_rate_valid(drm_dp_link_rate_to_bw_code(
 		dp_panel->link_info.rate)) || !is_lane_count_valid(
 		dp_panel->link_info.num_lanes) ||
@@ -323,19 +322,35 @@
 		dp_panel->max_bw_code)) {
 		if ((rc == -ETIMEDOUT) || (rc == -ENODEV)) {
 			pr_err("DPCD read failed, return early\n");
-			return rc;
+			goto end;
 		}
 		pr_err("panel dpcd read failed/incorrect, set default params\n");
 		dp_panel_set_default_link_params(dp_panel);
 	}
 
+	downstream_ports = dp_panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+				DP_DWN_STRM_PORT_PRESENT;
+
+	if (downstream_ports) {
+		rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_SINK_COUNT,
+				&count, count_len);
+		if (rlen == count_len) {
+			count = DP_GET_SINK_COUNT(count);
+			if (!count) {
+				pr_err("no downstream ports connected\n");
+				rc = -ENOTCONN;
+				goto end;
+			}
+		}
+	}
+
 	rc = dp_panel_read_edid(dp_panel, connector);
 	if (rc) {
 		pr_err("panel edid read failed, set failsafe mode\n");
 		return rc;
 	}
-
-	return 0;
+end:
+	return rc;
 }
 
 static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
@@ -644,6 +659,7 @@
 {
 	int rc = 0;
 	struct dp_panel_private *panel;
+	struct dp_catalog_hdr_data *hdr;
 
 	if (!dp_panel) {
 		pr_err("invalid input\n");
@@ -651,11 +667,13 @@
 	}
 
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+	hdr = &panel->catalog->hdr_data;
 
 	if (!panel->custom_edid)
 		sde_free_edid((void **)&dp_panel->edid_ctrl);
 
 	memset(&dp_panel->pinfo, 0, sizeof(dp_panel->pinfo));
+	memset(&hdr->hdr_meta, 0, sizeof(hdr->hdr_meta));
 	panel->panel_on = false;
 
 	return rc;
@@ -702,30 +720,6 @@
 		(panel->minor >= 4 || panel->vscext_supported);
 }
 
-static bool dp_panel_is_validate_hdr_state(struct dp_panel_private *panel,
-		struct drm_msm_ext_hdr_metadata *hdr_meta)
-{
-	struct drm_msm_ext_hdr_metadata *panel_hdr_meta =
-			&panel->catalog->hdr_data.hdr_meta;
-
-	if (!hdr_meta)
-		goto end;
-
-	/* bail out if HDR not active */
-	if (hdr_meta->hdr_state == HDR_DISABLED &&
-	    panel->hdr_state == HDR_DISABLED)
-		goto end;
-
-	/* bail out if same meta data is received */
-	if (hdr_meta->hdr_state == HDR_ENABLED &&
-		panel_hdr_meta->eotf == hdr_meta->eotf)
-		goto end;
-
-	return true;
-end:
-	return false;
-}
-
 static int dp_panel_setup_hdr(struct dp_panel *dp_panel,
 		struct drm_msm_ext_hdr_metadata *hdr_meta)
 {
@@ -740,14 +734,18 @@
 	}
 
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+	hdr = &panel->catalog->hdr_data;
 
-	if (!dp_panel_is_validate_hdr_state(panel, hdr_meta))
-		goto end;
+	/* use cached meta data in case meta data not provided */
+	if (!hdr_meta) {
+		if (hdr->hdr_meta.hdr_state)
+			goto cached;
+		else
+			goto end;
+	}
 
 	panel->hdr_state = hdr_meta->hdr_state;
 
-	hdr = &panel->catalog->hdr_data;
-
 	hdr->ext_header_byte0 = 0x00;
 	hdr->ext_header_byte1 = 0x04;
 	hdr->ext_header_byte2 = 0x1F;
@@ -782,8 +780,9 @@
 		memcpy(&hdr->hdr_meta, hdr_meta, sizeof(hdr->hdr_meta));
 	else
 		memset(&hdr->hdr_meta, 0, sizeof(hdr->hdr_meta));
-
-	panel->catalog->config_hdr(panel->catalog, panel->hdr_state);
+cached:
+	if (panel->panel_on)
+		panel->catalog->config_hdr(panel->catalog, panel->hdr_state);
 end:
 	return rc;
 }
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 2583f61..6c2e186 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -80,7 +80,7 @@
 	int (*deinit)(struct dp_panel *dp_panel);
 	int (*timing_cfg)(struct dp_panel *dp_panel);
 	int (*read_sink_caps)(struct dp_panel *dp_panel,
-		struct drm_connector *connector);
+		struct drm_connector *connector, bool multi_func);
 	u32 (*get_min_req_link_rate)(struct dp_panel *dp_panel);
 	u32 (*get_mode_bpp)(struct dp_panel *dp_panel, u32 mode_max_bpp,
 			u32 mode_pclk_khz);
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.c b/drivers/gpu/drm/msm/dp/dp_usbpd.c
index 2bd3bd4..42eb9b0 100644
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.c
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -426,6 +426,28 @@
 	return rc;
 }
 
+static int dp_usbpd_simulate_attention(struct dp_usbpd *dp_usbpd, int vdo)
+{
+	int rc = 0;
+	struct dp_usbpd_private *pd;
+
+	if (!dp_usbpd) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	pd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd);
+
+	pd->vdo = vdo;
+	dp_usbpd_get_status(pd);
+
+	if (pd->dp_cb && pd->dp_cb->attention)
+		pd->dp_cb->attention(pd->dev);
+error:
+	return rc;
+}
+
 struct dp_usbpd *dp_usbpd_get(struct device *dev, struct dp_usbpd_cb *cb)
 {
 	int rc = 0;
@@ -475,6 +497,7 @@
 
 	dp_usbpd = &usbpd->dp_usbpd;
 	dp_usbpd->simulate_connect = dp_usbpd_simulate_connect;
+	dp_usbpd->simulate_attention = dp_usbpd_simulate_attention;
 
 	return dp_usbpd;
 error:
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.h b/drivers/gpu/drm/msm/dp/dp_usbpd.h
index e70ad7d..0a7efd9 100644
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.h
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -50,6 +50,7 @@
  * @alt_mode_cfg_done: bool to specify alt mode status
  * @debug_en: bool to specify debug mode
  * @simulate_connect: simulate disconnect or connect for debug mode
+ * @simulate_attention: simulate attention messages for debug mode
  */
 struct dp_usbpd {
 	enum dp_usbpd_port port;
@@ -65,6 +66,7 @@
 	bool debug_en;
 
 	int (*simulate_connect)(struct dp_usbpd *dp_usbpd, bool hpd);
+	int (*simulate_attention)(struct dp_usbpd *dp_usbpd, int vdo);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
index 61406fe..38eba8d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
@@ -285,13 +285,17 @@
 	return rc;
 }
 
-static int dsi_link_clk_set_rate(struct dsi_link_clks *l_clks)
+static int dsi_link_clk_set_rate(struct dsi_link_clks *l_clks, int index)
 {
 	int rc = 0;
 	struct dsi_clk_mngr *mngr;
 
-	mngr = container_of(l_clks, struct dsi_clk_mngr, link_clks[0]);
+	if (index >= MAX_DSI_CTRL) {
+		pr_err("Invalid DSI ctrl index\n");
+		return -EINVAL;
+	}
 
+	mngr = container_of(l_clks, struct dsi_clk_mngr, link_clks[index]);
 	if (mngr->is_cont_splash_enabled)
 		return 0;
 	/*
@@ -443,11 +447,16 @@
 /**
  * dsi_link_clk_start() - enable dsi link clocks
  */
-int dsi_link_clk_start(struct dsi_link_clks *clks)
+static int dsi_link_clk_start(struct dsi_link_clks *clks, int index)
 {
 	int rc = 0;
 
-	rc = dsi_link_clk_set_rate(clks);
+	if (index >= MAX_DSI_CTRL) {
+		pr_err("Invalid DSI ctrl index\n");
+		return -EINVAL;
+	}
+
+	rc = dsi_link_clk_set_rate(clks, index);
 	if (rc) {
 		pr_err("failed to set clk rates, rc = %d\n", rc);
 		goto error;
@@ -561,7 +570,7 @@
 
 	m_clks = &clks[master_ndx];
 
-	rc = dsi_link_clk_start(m_clks);
+	rc = dsi_link_clk_start(m_clks, master_ndx);
 	if (rc) {
 		pr_err("failed to turn on master clocks, rc=%d\n", rc);
 		goto error;
@@ -573,7 +582,7 @@
 		if (!clk || (clk == m_clks))
 			continue;
 
-		rc = dsi_link_clk_start(clk);
+		rc = dsi_link_clk_start(clk, i);
 		if (rc) {
 			pr_err("failed to turn on clocks, rc=%d\n", rc);
 			goto error_disable_master;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index 77df585..ca58896 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -247,7 +247,7 @@
 	u32 cmd_buffer_iova;
 	u32 cmd_len;
 	void *vaddr;
-	u32 secure_mode;
+	bool secure_mode;
 
 	/* Debug Information */
 	struct dentry *debugfs_root;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 3d99172..c8edb09 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -262,6 +262,7 @@
 		display_ctrl->ctrl->cmd_buffer_size = display->cmd_buffer_size;
 		display_ctrl->ctrl->cmd_buffer_iova = display->cmd_buffer_iova;
 		display_ctrl->ctrl->vaddr = display->vaddr;
+		display_ctrl->ctrl->secure_mode = is_detach;
 	}
 
 end:
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index fd50256..a1e4685 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -320,9 +320,11 @@
 
 		cur_mode = crtc_state->crtc->mode;
 
+		/* No DMS/VRR when drm pipeline is changing */
 		if (!drm_mode_equal(&cur_mode, adjusted_mode) &&
-			(!(dsi_mode.dsi_mode_flags &
-				DSI_MODE_FLAG_VRR)))
+			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR)) &&
+			(!crtc_state->active_changed ||
+			 display->is_cont_splash_enabled))
 			dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
 	}
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 7671496..0ffece3 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -2143,7 +2143,7 @@
 		priv_info->phy_timing_len = len;
 	};
 
-	mode->pixel_clk_khz = (DSI_H_TOTAL(&mode->timing) *
+	mode->pixel_clk_khz = (mode->timing.h_active *
 			DSI_V_TOTAL(&mode->timing) *
 			mode->timing.refresh_rate) / 1000;
 	return rc;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 00cf225..c1a670d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -1436,42 +1436,6 @@
 }
 
 /**
- * msm_drv_framebuffer_remove - remove and unreference a framebuffer object
- * @fb: framebuffer to remove
- */
-void msm_drv_framebuffer_remove(struct drm_framebuffer *fb)
-{
-	struct drm_device *dev;
-
-	if (!fb)
-		return;
-
-	dev = fb->dev;
-
-	WARN_ON(!list_empty(&fb->filp_head));
-
-	drm_framebuffer_unreference(fb);
-}
-
-struct msm_drv_rmfb2_work {
-	struct work_struct work;
-	struct list_head fbs;
-};
-
-static void msm_drv_rmfb2_work_fn(struct work_struct *w)
-{
-	struct msm_drv_rmfb2_work *arg = container_of(w, typeof(*arg), work);
-
-	while (!list_empty(&arg->fbs)) {
-		struct drm_framebuffer *fb =
-			list_first_entry(&arg->fbs, typeof(*fb), filp_head);
-
-		list_del_init(&fb->filp_head);
-		msm_drv_framebuffer_remove(fb);
-	}
-}
-
-/**
  * msm_ioctl_rmfb2 - remove an FB from the configuration
  * @dev: drm device for the ioctl
  * @data: data pointer for the ioctl
@@ -1514,25 +1478,7 @@
 	list_del_init(&fb->filp_head);
 	mutex_unlock(&file_priv->fbs_lock);
 
-	/*
-	 * we now own the reference that was stored in the fbs list
-	 *
-	 * drm_framebuffer_remove may fail with -EINTR on pending signals,
-	 * so run this in a separate stack as there's no way to correctly
-	 * handle this after the fb is already removed from the lookup table.
-	 */
-	if (drm_framebuffer_read_refcount(fb) > 1) {
-		struct msm_drv_rmfb2_work arg;
-
-		INIT_WORK_ONSTACK(&arg.work, msm_drv_rmfb2_work_fn);
-		INIT_LIST_HEAD(&arg.fbs);
-		list_add_tail(&fb->filp_head, &arg.fbs);
-
-		schedule_work(&arg.work);
-		flush_work(&arg.work);
-		destroy_work_on_stack(&arg.work);
-	} else
-		drm_framebuffer_unreference(fb);
+	drm_framebuffer_unreference(fb);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index 42aea7e..3e084d5 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
@@ -613,6 +613,7 @@
 	if (!node)
 		return;
 
+	spin_lock_irqsave(&node->state_lock, flags);
 	if (node->state == IRQ_DISABLED) {
 		ret = sde_core_irq_enable(kms, &irq_idx, 1);
 		if (ret)
@@ -620,6 +621,7 @@
 		else
 			node->state = IRQ_ENABLED;
 	}
+	spin_unlock_irqrestore(&node->state_lock, flags);
 }
 
 static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
@@ -1623,6 +1625,7 @@
 	struct sde_crtc *crtc;
 	int i;
 	int irq_idx, ret;
+	unsigned long flags;
 	struct sde_cp_node prop_node;
 	struct sde_crtc_irq_info *node = NULL;
 
@@ -1673,6 +1676,7 @@
 
 	if (!en) {
 		if (node) {
+			spin_lock_irqsave(&node->state_lock, flags);
 			if (node->state == IRQ_ENABLED) {
 				ret = sde_core_irq_disable(kms, &irq_idx, 1);
 				if (ret)
@@ -1683,6 +1687,7 @@
 			} else {
 				node->state = IRQ_NOINIT;
 			}
+			spin_unlock_irqrestore(&node->state_lock, flags);
 		} else {
 			DRM_ERROR("failed to get node from crtc event list\n");
 		}
@@ -1701,6 +1706,7 @@
 
 	if (node) {
 		/* device resume or resume from IPC cases */
+		spin_lock_irqsave(&node->state_lock, flags);
 		if (node->state == IRQ_DISABLED || node->state == IRQ_NOINIT) {
 			ret = sde_core_irq_enable(kms, &irq_idx, 1);
 			if (ret) {
@@ -1712,6 +1718,7 @@
 				node->state = IRQ_ENABLED;
 			}
 		}
+		spin_unlock_irqrestore(&node->state_lock, flags);
 	} else {
 		/* request from userspace to register the event
 		 * in this case, node has not been added into the event list
@@ -1807,14 +1814,17 @@
 		return;
 	}
 
+	spin_lock_irqsave(&node->state_lock, flags);
 	if (node->state == IRQ_ENABLED) {
 		if (sde_core_irq_disable_nolock(kms, irq_idx)) {
 			DRM_ERROR("failed to disable irq %d, ret %d\n",
 				irq_idx, ret);
+			spin_unlock_irqrestore(&node->state_lock, flags);
 			return;
 		}
 		node->state = IRQ_DISABLED;
 	}
+	spin_unlock_irqrestore(&node->state_lock, flags);
 
 	/* lock histogram buffer */
 	for (i = 0; i < crtc->num_mixers; i++) {
@@ -1886,6 +1896,7 @@
 	struct sde_crtc *crtc;
 	struct sde_crtc_irq_info *node = NULL;
 	int i, irq_idx, ret = 0;
+	unsigned long flags;
 
 	if (!crtc_drm || !hist_irq) {
 		DRM_ERROR("invalid crtc %pK irq %pK\n", crtc_drm, hist_irq);
@@ -1928,6 +1939,7 @@
 	if (!en) {
 		if (node) {
 			/* device suspend case or suspend to IPC cases */
+			spin_lock_irqsave(&node->state_lock, flags);
 			if (node->state == IRQ_ENABLED) {
 				ret = sde_core_irq_disable(kms, &irq_idx, 1);
 				if (ret)
@@ -1938,6 +1950,7 @@
 			} else {
 				node->state = IRQ_NOINIT;
 			}
+			spin_unlock_irqrestore(&node->state_lock, flags);
 		} else {
 			DRM_ERROR("failed to get node from crtc event list\n");
 		}
@@ -1957,6 +1970,7 @@
 
 	if (node) {
 		/* device resume or resume from IPC cases */
+		spin_lock_irqsave(&node->state_lock, flags);
 		if (node->state == IRQ_DISABLED || node->state == IRQ_NOINIT) {
 			ret = sde_core_irq_enable(kms, &irq_idx, 1);
 			if (ret) {
@@ -1968,6 +1982,7 @@
 				node->state = IRQ_ENABLED;
 			}
 		}
+		spin_unlock_irqrestore(&node->state_lock, flags);
 	} else {
 		/* request from userspace to register the event
 		 * in this case, node has not been added into the event list
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index cfe4419..655390b 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -471,8 +471,11 @@
 	}
 	c_conn->last_panel_power_mode = mode;
 
-	if (mode != SDE_MODE_DPMS_ON)
+	if (mode != SDE_MODE_DPMS_ON) {
+		mutex_unlock(&c_conn->lock);
 		sde_connector_schedule_status_work(connector, false);
+		mutex_lock(&c_conn->lock);
+	}
 
 	return rc;
 }
@@ -939,34 +942,38 @@
 	struct sde_connector_state *c_state,
 	void *usr_ptr)
 {
+	int rc = 0;
 	struct drm_connector *connector;
 	struct drm_msm_ext_hdr_metadata *hdr_meta;
 	int i;
 
 	if (!c_conn || !c_state) {
 		SDE_ERROR_CONN(c_conn, "invalid args\n");
-		return -EINVAL;
+		rc = -EINVAL;
+		goto end;
 	}
 
 	connector = &c_conn->base;
 
 	if (!connector->hdr_supported) {
 		SDE_ERROR_CONN(c_conn, "sink doesn't support HDR\n");
-		return -ENOTSUPP;
+		rc = -ENOTSUPP;
+		goto end;
 	}
 
 	memset(&c_state->hdr_meta, 0, sizeof(c_state->hdr_meta));
 
 	if (!usr_ptr) {
 		SDE_DEBUG_CONN(c_conn, "hdr metadata cleared\n");
-		return 0;
+		goto end;
 	}
 
 	if (copy_from_user(&c_state->hdr_meta,
 		(void __user *)usr_ptr,
 			sizeof(*hdr_meta))) {
 		SDE_ERROR_CONN(c_conn, "failed to copy hdr metadata\n");
-		return -EFAULT;
+		rc = -EFAULT;
+		goto end;
 	}
 
 	hdr_meta = &c_state->hdr_meta;
@@ -989,7 +996,10 @@
 				   hdr_meta->display_primaries_y[i]);
 	}
 
-	return 0;
+	if (c_conn->ops.config_hdr)
+		rc = c_conn->ops.config_hdr(c_conn->display, c_state);
+end:
+	return rc;
 }
 
 static int sde_connector_atomic_set_property(struct drm_connector *connector,
@@ -1722,7 +1732,7 @@
 			conn->base.base.id, conn->encoder->base.id);
 	panel_dead = true;
 	event.type = DRM_EVENT_PANEL_DEAD;
-	event.length = sizeof(u32);
+	event.length = sizeof(bool);
 	msm_mode_object_event_notify(&conn->base.base,
 		conn->base.dev, &event, (u8 *)&panel_dead);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 7cf09b7..9c37869 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -241,6 +241,16 @@
 	 */
 	int (*cmd_transfer)(void *display, const char *cmd_buf,
 			u32 cmd_buf_len);
+
+	/**
+	 * config_hdr - configure HDR
+	 * @display: Pointer to private display handle
+	 * @c_state: Pointer to connector state
+	 * Returns: Zero on success, negative error code for failures
+	 */
+	int (*config_hdr)(void *display,
+		struct sde_connector_state *c_state);
+
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index a6f22c9..442104b 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -513,6 +513,7 @@
 	struct msm_drm_private *priv;
 	int i;
 	int rc;
+	unsigned long irq_flags;
 
 	if (!sde_kms) {
 		SDE_ERROR("invalid sde_kms\n");
@@ -543,6 +544,7 @@
 	sde_disable_all_irqs(sde_kms);
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 
+	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
 	kfree(sde_kms->irq_obj.irq_cb_tbl);
 	kfree(sde_kms->irq_obj.enable_counts);
 	kfree(sde_kms->irq_obj.irq_counts);
@@ -550,6 +552,7 @@
 	sde_kms->irq_obj.enable_counts = NULL;
 	sde_kms->irq_obj.irq_counts = NULL;
 	sde_kms->irq_obj.total_irqs = 0;
+	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
 }
 
 static void sde_core_irq_mask(struct irq_data *irqd)
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 52e291d..1ee75c4 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -1403,6 +1403,58 @@
 	}
 }
 
+/**
+ * _sde_crtc_calc_inline_prefill - calculate rotator start prefill
+ * @crtc: Pointer to drm crtc
+ * return: prefill time in lines
+ */
+static u32 _sde_crtc_calc_inline_prefill(struct drm_crtc *crtc)
+{
+	struct sde_kms *sde_kms;
+
+	if (!crtc) {
+		SDE_ERROR("invalid parameters\n");
+		return 0;
+	}
+
+	sde_kms = _sde_crtc_get_kms(crtc);
+	if (!sde_kms || !sde_kms->catalog) {
+		SDE_ERROR("invalid kms\n");
+		return 0;
+	}
+
+	return sde_kms->catalog->sbuf_prefill + sde_kms->catalog->sbuf_headroom;
+}
+
+uint64_t sde_crtc_get_sbuf_clk(struct drm_crtc_state *state)
+{
+	struct sde_crtc_state *cstate;
+	u64 tmp;
+
+	if (!state) {
+		SDE_ERROR("invalid crtc state\n");
+		return 0;
+	}
+	cstate = to_sde_crtc_state(state);
+
+	/*
+	 * Select the max of the current and previous frame's user mode
+	 * clock setting so that reductions in clock voting don't take effect
+	 * until the current frame has completed.
+	 *
+	 * If the sbuf_clk_rate[] FIFO hasn't yet been updated in this commit
+	 * cycle (as part of the CRTC's atomic check), compare the current
+	 * clock value against sbuf_clk_rate[1] instead of comparing the
+	 * sbuf_clk_rate[0]/sbuf_clk_rate[1] values.
+	 */
+	if (cstate->sbuf_clk_shifted)
+		tmp = cstate->sbuf_clk_rate[0];
+	else
+		tmp = sde_crtc_get_property(cstate, CRTC_PROP_ROT_CLK);
+
+	return max_t(u64, cstate->sbuf_clk_rate[1], tmp);
+}
+
 static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
 		struct drm_crtc_state *old_state, struct sde_crtc *sde_crtc,
 		struct sde_crtc_mixer *mixer)
@@ -1418,12 +1470,11 @@
 	struct sde_hw_stage_cfg *stage_cfg;
 	struct sde_rect plane_crtc_roi;
 
-	u32 flush_mask, flush_sbuf;
+	u32 flush_mask, flush_sbuf, prefill;
 	uint32_t stage_idx, lm_idx;
 	int zpos_cnt[SDE_STAGE_MAX + 1] = { 0 };
 	int i;
 	bool bg_alpha_enable = false;
-	u32 prefill = 0;
 
 	if (!sde_crtc || !crtc->state || !mixer) {
 		SDE_ERROR("invalid sde_crtc or mixer\n");
@@ -1435,7 +1486,7 @@
 	stage_cfg = &sde_crtc->stage_cfg;
 	cstate = to_sde_crtc_state(crtc->state);
 
-	cstate->sbuf_prefill_line = 0;
+	cstate->sbuf_prefill_line = _sde_crtc_calc_inline_prefill(crtc);
 	sde_crtc->sbuf_flush_mask_old = sde_crtc->sbuf_flush_mask_all;
 	sde_crtc->sbuf_flush_mask_all = 0x0;
 	sde_crtc->sbuf_flush_mask_delta = 0x0;
@@ -1453,8 +1504,9 @@
 		pstate = to_sde_plane_state(state);
 		fb = state->fb;
 
-		prefill = sde_plane_rot_calc_prefill(plane);
-		if (prefill > cstate->sbuf_prefill_line)
+		/* assume all rotated planes report the same prefill amount */
+		prefill = sde_plane_rot_get_prefill(plane);
+		if (prefill)
 			cstate->sbuf_prefill_line = prefill;
 
 		sde_plane_get_ctl_flush(plane, ctl, &flush_mask, &flush_sbuf);
@@ -2459,9 +2511,6 @@
 							SDE_EVTLOG_FUNC_CASE3);
 		}
 
-		if (fevent->event & SDE_ENCODER_FRAME_EVENT_DONE)
-			sde_core_perf_crtc_update(crtc, 0, false);
-
 		if (fevent->event & (SDE_ENCODER_FRAME_EVENT_DONE
 					| SDE_ENCODER_FRAME_EVENT_ERROR))
 			frame_done = true;
@@ -2545,6 +2594,8 @@
 	SDE_EVT32_VERBOSE(DRMID(crtc));
 	smmu_state = &sde_crtc->smmu_state;
 
+	sde_core_perf_crtc_update(crtc, 0, false);
+
 	/* complete secure transitions if any */
 	if (smmu_state->transition_type == POST_COMMIT)
 		sde_crtc_secure_ctrl(crtc, true);
@@ -3224,7 +3275,8 @@
 	 * smmu state is attached,
 	 */
 	if ((smmu_state->state != DETACHED) &&
-			(smmu_state->state != DETACH_ALL_REQ))
+			(smmu_state->state != DETACH_ALL_REQ) &&
+			sde_crtc->enabled)
 		sde_cp_crtc_apply_properties(crtc);
 
 	/*
@@ -4003,6 +4055,9 @@
 	/* clear destination scaler dirty bit */
 	cstate->ds_dirty = false;
 
+	/* record whether or not the sbuf_clk_rate fifo has been shifted */
+	cstate->sbuf_clk_shifted = false;
+
 	/* duplicate base helper */
 	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
 
@@ -4432,28 +4487,41 @@
 }
 
 static int _sde_crtc_excl_rect_overlap_check(struct plane_state pstates[],
-	int cnt, int curr_cnt, struct sde_rect *excl_rect, int z_pos)
+	int cnt, int curr_cnt, struct sde_rect *excl_rect)
 {
 	struct sde_rect dst_rect, intersect;
 	int i, rc = -EINVAL;
 	const struct drm_plane_state *pstate;
 
-	/* start checking from next plane */
-	for (i = curr_cnt; i < cnt; i++) {
+	for (i = 0; i < cnt; i++) {
+		if (i == curr_cnt)
+			continue;
+
 		pstate = pstates[i].drm_pstate;
 		POPULATE_RECT(&dst_rect, pstate->crtc_x, pstate->crtc_y,
 				pstate->crtc_w, pstate->crtc_h, false);
 		sde_kms_rect_intersect(&dst_rect, excl_rect, &intersect);
 
+		/* complete intersection of excl_rect is required */
 		if (intersect.w == excl_rect->w && intersect.h == excl_rect->h
-				/* next plane may be on same z-order */
-				&& z_pos != pstates[i].stage) {
+			    /* intersecting rect should be in another z_order */
+			    && pstates[curr_cnt].stage != pstates[i].stage) {
 			rc = 0;
 			goto end;
 		}
 	}
 
-	SDE_ERROR("excl rect does not find top overlapping rect\n");
+	SDE_ERROR(
+	    "no overlapping rect for [%d] z_pos:%d, excl_rect:{%d,%d,%d,%d}\n",
+			i, pstates[curr_cnt].stage,
+			excl_rect->x, excl_rect->y, excl_rect->w, excl_rect->h);
+	for (i = 0; i < cnt; i++) {
+		pstate = pstates[i].drm_pstate;
+		SDE_ERROR("[%d] p:%d, z_pos:%d, src:{%d,%d,%d,%d}\n",
+				i, pstate->plane->base.id, pstates[i].stage,
+				pstate->crtc_x, pstate->crtc_y,
+				pstate->crtc_w, pstate->crtc_h);
+	}
 end:
 	return rc;
 }
@@ -4495,9 +4563,9 @@
 		pstate = pstates[i].drm_pstate;
 		sde_pstate = to_sde_plane_state(pstate);
 		if (sde_pstate->excl_rect.w && sde_pstate->excl_rect.h) {
-			/* check overlap on all top z-order */
+			/* check overlap on any other z-order */
 			rc = _sde_crtc_excl_rect_overlap_check(pstates, cnt,
-			     i + 1, &sde_pstate->excl_rect, pstates[i].stage);
+			     i, &sde_pstate->excl_rect);
 			if (rc)
 				goto end;
 		}
@@ -4643,6 +4711,12 @@
 	_sde_crtc_setup_is_ppsplit(state);
 	_sde_crtc_setup_lm_bounds(crtc, state);
 
+	/* record current/previous sbuf clock rate for later */
+	cstate->sbuf_clk_rate[0] = cstate->sbuf_clk_rate[1];
+	cstate->sbuf_clk_rate[1] = sde_crtc_get_property(
+			cstate, CRTC_PROP_ROT_CLK);
+	cstate->sbuf_clk_shifted = true;
+
 	 /* get plane state for all drm planes associated with crtc state */
 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
 		if (IS_ERR_OR_NULL(pstate)) {
@@ -4654,7 +4728,7 @@
 
 		/* identify attached planes that are not in the delta state */
 		if (!drm_atomic_get_existing_plane_state(state->state, plane)) {
-			rc = sde_plane_confirm_hw_rsvps(plane, pstate);
+			rc = sde_plane_confirm_hw_rsvps(plane, pstate, state);
 			if (rc) {
 				SDE_ERROR("crtc%d confirmation hw failed %d\n",
 						crtc->base.id, rc);
@@ -6059,6 +6133,7 @@
 			INIT_LIST_HEAD(&node->list);
 			node->func = custom_events[i].func;
 			node->event = event;
+			spin_lock_init(&node->state_lock);
 			break;
 		}
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 1de3675..78f15ec 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -393,6 +393,9 @@
  * @new_perf: new performance state being requested
  * @sbuf_cfg: stream buffer configuration
  * @sbuf_prefill_line: number of line for inline rotator prefetch
+ * @sbuf_clk_rate : previous and current user specified inline rotator clock
+ * @sbuf_clk_shifted : whether or not sbuf_clk_rate has been shifted as part
+ *	of crtc atomic check
  */
 struct sde_crtc_state {
 	struct drm_crtc_state base;
@@ -424,6 +427,8 @@
 	struct sde_core_perf_params new_perf;
 	struct sde_ctl_sbuf_cfg sbuf_cfg;
 	u32 sbuf_prefill_line;
+	u64 sbuf_clk_rate[2];
+	bool sbuf_clk_shifted;
 
 	struct sde_crtc_respool rp;
 };
@@ -440,7 +445,8 @@
  * @event: event type of the interrupt
  * @func: function pointer to enable/disable the interrupt
  * @list: list of user customized event in crtc
- * @ref_count: reference count for the interrupt
+ * @state: state of the interrupt
+ * @state_lock: spin lock for interrupt state
  */
 struct sde_crtc_irq_info {
 	struct sde_irq_callback irq;
@@ -449,6 +455,7 @@
 			struct sde_irq_callback *irq);
 	struct list_head list;
 	enum sde_crtc_irq_state state;
+	spinlock_t state_lock;
 };
 
 #define to_sde_crtc_state(x) \
@@ -771,4 +778,11 @@
 void sde_crtc_update_cont_splash_mixer_settings(
 		struct drm_crtc *crtc);
 
+/**
+ * sde_crtc_get_sbuf_clk - get user specified sbuf clock settings
+ * @state: Pointer to DRM crtc state object
+ * Returns: Filtered sbuf clock setting from user space
+ */
+uint64_t sde_crtc_get_sbuf_clk(struct drm_crtc_state *state);
+
 #endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 7162b06..92ab669 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -121,6 +121,13 @@
  *	Event signals that there were no frame updates for
  *	IDLE_POWERCOLLAPSE_DURATION time. This would disable MDP/DSI core clocks
  *      and request RSC with IDLE state and change the resource state to IDLE.
+ * @SDE_ENC_RC_EVENT_EARLY_WAKEUP:
+ *	This event is triggered from the input event thread when touch event is
+ *	received from the input device. On receiving this event,
+ *      - If the device is in SDE_ENC_RC_STATE_IDLE state, it turns ON the
+	  clocks and enable RSC.
+ *      - If the device is in SDE_ENC_RC_STATE_ON state, it resets the delayed
+ *        off work since a new commit is imminent.
  */
 enum sde_enc_rc_events {
 	SDE_ENC_RC_EVENT_KICKOFF = 1,
@@ -129,7 +136,8 @@
 	SDE_ENC_RC_EVENT_STOP,
 	SDE_ENC_RC_EVENT_PRE_MODESET,
 	SDE_ENC_RC_EVENT_POST_MODESET,
-	SDE_ENC_RC_EVENT_ENTER_IDLE
+	SDE_ENC_RC_EVENT_ENTER_IDLE,
+	SDE_ENC_RC_EVENT_EARLY_WAKEUP,
 };
 
 /*
@@ -194,6 +202,8 @@
  * @delayed_off_work:		delayed worker to schedule disabling of
  *				clks and resources after IDLE_TIMEOUT time.
  * @vsync_event_work:		worker to handle vsync event for autorefresh
+ * @input_event_work:		worker to handle input device touch events
+ * @input_handler:			handler for input device events
  * @topology:                   topology of the display
  * @vblank_enabled:		boolean to track userspace vblank vote
  * @rsc_config:			rsc configuration for display vtotal, fps, etc.
@@ -238,6 +248,8 @@
 	enum sde_enc_rc_states rc_state;
 	struct kthread_delayed_work delayed_off_work;
 	struct kthread_work vsync_event_work;
+	struct kthread_work input_event_work;
+	struct input_handler *input_handler;
 	struct msm_display_topology topology;
 	bool vblank_enabled;
 
@@ -709,6 +721,11 @@
 	drm_encoder_cleanup(drm_enc);
 	mutex_destroy(&sde_enc->enc_lock);
 
+	if (sde_enc->input_handler) {
+		input_unregister_handler(sde_enc->input_handler);
+		kfree(sde_enc->input_handler);
+	}
+
 	kfree(sde_enc);
 }
 
@@ -1816,6 +1833,45 @@
 	return 0;
 }
 
+static void sde_encoder_input_event_handler(struct input_handle *handle,
+	unsigned int type, unsigned int code, int value)
+{
+	struct drm_encoder *drm_enc = NULL;
+	struct sde_encoder_virt *sde_enc = NULL;
+	struct msm_drm_thread *disp_thread = NULL;
+	struct msm_drm_private *priv = NULL;
+
+	if (!handle || !handle->handler || !handle->handler->private) {
+		SDE_ERROR("invalid encoder for the input event\n");
+		return;
+	}
+
+	drm_enc = (struct drm_encoder *)handle->handler->private;
+	if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+
+	priv = drm_enc->dev->dev_private;
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	if (!sde_enc->crtc || (sde_enc->crtc->index
+			>= ARRAY_SIZE(priv->disp_thread))) {
+		SDE_DEBUG_ENC(sde_enc,
+			"invalid cached CRTC: %d or crtc index: %d\n",
+			sde_enc->crtc == NULL,
+			sde_enc->crtc ? sde_enc->crtc->index : -EINVAL);
+		return;
+	}
+
+	SDE_EVT32_VERBOSE(DRMID(drm_enc));
+
+	disp_thread = &priv->disp_thread[sde_enc->crtc->index];
+
+	kthread_queue_work(&disp_thread->worker,
+				&sde_enc->input_event_work);
+}
+
+
 static int sde_encoder_resource_control(struct drm_encoder *drm_enc,
 		u32 sw_event)
 {
@@ -1967,7 +2023,7 @@
 			idle_pc_duration = IDLE_POWERCOLLAPSE_DURATION;
 
 		if (!autorefresh_enabled)
-			kthread_queue_delayed_work(
+			kthread_mod_delayed_work(
 				&disp_thread->worker,
 				&sde_enc->delayed_off_work,
 				msecs_to_jiffies(idle_pc_duration));
@@ -2177,7 +2233,57 @@
 
 		mutex_unlock(&sde_enc->rc_lock);
 		break;
+	case SDE_ENC_RC_EVENT_EARLY_WAKEUP:
+		if (!sde_enc->crtc ||
+			sde_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
+			SDE_DEBUG_ENC(sde_enc,
+				"invalid crtc:%d or crtc index:%d , sw_event:%u\n",
+				sde_enc->crtc == NULL,
+				sde_enc->crtc ? sde_enc->crtc->index : -EINVAL,
+				sw_event);
+			return -EINVAL;
+		}
 
+		disp_thread = &priv->disp_thread[sde_enc->crtc->index];
+
+		mutex_lock(&sde_enc->rc_lock);
+
+		if (sde_enc->rc_state == SDE_ENC_RC_STATE_ON) {
+			if (sde_enc->cur_master &&
+				sde_enc->cur_master->ops.is_autorefresh_enabled)
+				autorefresh_enabled =
+				sde_enc->cur_master->ops.is_autorefresh_enabled(
+							sde_enc->cur_master);
+			if (autorefresh_enabled) {
+				SDE_DEBUG_ENC(sde_enc,
+					"not handling early wakeup since auto refresh is enabled\n");
+				mutex_lock(&sde_enc->rc_lock);
+				return 0;
+			}
+
+			if (!sde_crtc_frame_pending(sde_enc->crtc))
+				kthread_mod_delayed_work(&disp_thread->worker,
+						&sde_enc->delayed_off_work,
+						msecs_to_jiffies(
+						IDLE_POWERCOLLAPSE_DURATION));
+		} else if (sde_enc->rc_state == SDE_ENC_RC_STATE_IDLE) {
+			/* enable all the clks and resources */
+			_sde_encoder_resource_control_rsc_update(drm_enc, true);
+			_sde_encoder_resource_control_helper(drm_enc, true);
+
+			kthread_mod_delayed_work(&disp_thread->worker,
+						&sde_enc->delayed_off_work,
+						msecs_to_jiffies(
+						IDLE_POWERCOLLAPSE_DURATION));
+
+			sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
+		}
+
+		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
+				SDE_ENC_RC_STATE_ON, SDE_EVTLOG_FUNC_CASE8);
+
+		mutex_unlock(&sde_enc->rc_lock);
+		break;
 	default:
 		SDE_EVT32(DRMID(drm_enc), sw_event, SDE_EVTLOG_ERROR);
 		SDE_ERROR("unexpected sw_event: %d\n", sw_event);
@@ -3402,6 +3508,116 @@
 				&sde_enc->vsync_event_work);
 }
 
+static void sde_encoder_input_event_work_handler(struct kthread_work *work)
+{
+	struct sde_encoder_virt *sde_enc = container_of(work,
+				struct sde_encoder_virt, input_event_work);
+
+	if (!sde_enc) {
+		SDE_ERROR("invalid sde encoder\n");
+		return;
+	}
+
+	sde_encoder_resource_control(&sde_enc->base,
+			SDE_ENC_RC_EVENT_EARLY_WAKEUP);
+}
+
+static int _sde_encoder_input_connect(struct input_handler *handler,
+	struct input_dev *dev, const struct input_device_id *id)
+{
+	struct input_handle *handle;
+	int rc = 0;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = handler->name;
+
+	rc = input_register_handle(handle);
+	if (rc) {
+		pr_err("failed to register input handle\n");
+		goto error;
+	}
+
+	rc = input_open_device(handle);
+	if (rc) {
+		pr_err("failed to open input device\n");
+		goto error_unregister;
+	}
+
+	return 0;
+
+error_unregister:
+	input_unregister_handle(handle);
+
+error:
+	kfree(handle);
+
+	return rc;
+}
+
+static void _sde_encoder_input_disconnect(struct input_handle *handle)
+{
+	 input_close_device(handle);
+	 input_unregister_handle(handle);
+	 kfree(handle);
+}
+
+/**
+ * Structure for specifying event parameters on which to receive callbacks.
+ * This structure will trigger a callback in case of a touch event (specified by
+ * EV_ABS) where there is a change in X and Y coordinates,
+ */
+static const struct input_device_id sde_input_ids[] = {
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+		.evbit = { BIT_MASK(EV_ABS) },
+		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+					BIT_MASK(ABS_MT_POSITION_X) |
+					BIT_MASK(ABS_MT_POSITION_Y) },
+	},
+	{ },
+};
+
+static int _sde_encoder_input_handler(
+		struct sde_encoder_virt *sde_enc)
+{
+	struct input_handler *input_handler = NULL;
+	int rc = 0;
+
+	if (sde_enc->input_handler) {
+		SDE_ERROR_ENC(sde_enc,
+				"input_handle is active. unexpected\n");
+		return -EINVAL;
+	}
+
+	input_handler = kzalloc(sizeof(*sde_enc->input_handler), GFP_KERNEL);
+	if (!input_handler)
+		return -ENOMEM;
+
+	input_handler->event = sde_encoder_input_event_handler;
+	input_handler->connect = _sde_encoder_input_connect;
+	input_handler->disconnect = _sde_encoder_input_disconnect;
+	input_handler->name = "sde";
+	input_handler->id_table = sde_input_ids;
+	input_handler->private = sde_enc;
+
+	rc = input_register_handler(input_handler);
+	if (rc) {
+		SDE_ERROR_ENC(sde_enc,
+			"input_register_handler failed, rc= %d\n", rc);
+		kfree(input_handler);
+		return rc;
+	}
+
+	sde_enc->input_handler = input_handler;
+
+	return rc;
+}
+
 static void sde_encoder_vsync_event_work_handler(struct kthread_work *work)
 {
 	struct sde_encoder_virt *sde_enc = container_of(work,
@@ -3523,6 +3739,7 @@
 	SDE_ATRACE_BEGIN("enc_prepare_for_kickoff");
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		phys = sde_enc->phys_encs[i];
+		params->is_primary = sde_enc->disp_info.is_primary;
 		if (phys) {
 			if (phys->ops.prepare_for_kickoff) {
 				rc = phys->ops.prepare_for_kickoff(
@@ -3878,6 +4095,7 @@
 
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
 		if (!phys || !phys->ops.collect_misr)
 			continue;
 
@@ -4261,6 +4479,13 @@
 		sde_enc->rsc_client = NULL;
 	}
 
+	if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
+		ret = _sde_encoder_input_handler(sde_enc);
+		if (ret)
+			SDE_ERROR(
+			"input handler registration failed, rc = %d\n", ret);
+	}
+
 	mutex_init(&sde_enc->rc_lock);
 	kthread_init_delayed_work(&sde_enc->delayed_off_work,
 			sde_encoder_off_work);
@@ -4269,6 +4494,9 @@
 	kthread_init_work(&sde_enc->vsync_event_work,
 			sde_encoder_vsync_event_work_handler);
 
+	kthread_init_work(&sde_enc->input_event_work,
+			sde_encoder_input_event_work_handler);
+
 	memcpy(&sde_enc->disp_info, disp_info, sizeof(*disp_info));
 
 	SDE_DEBUG_ENC(sde_enc, "created\n");
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 8038eb6..2c84e20 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -54,11 +54,13 @@
 /**
  * sde_encoder_kickoff_params - info encoder requires at kickoff
  * @inline_rotate_prefill: number of lines to prefill for inline rotation
+ * @is_primary: set to true if the display is primary display
  * @affected_displays:  bitmask, bit set means the ROI of the commit lies within
  *                      the bounds of the physical display at the bit index
  */
 struct sde_encoder_kickoff_params {
 	u32 inline_rotate_prefill;
+	u32 is_primary;
 	unsigned long affected_displays;
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index ad27b7f..a3f09b6 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -245,9 +245,10 @@
  *	HW layer requires VSYNC counter of first pixel of tgt VFP line.
  * @phys_enc: Pointer to physical encoder
  * @rot_fetch_lines: number of line to prefill, or 0 to disable
+ * @is_primary: set true if the display is primary display
  */
 static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
-		u32 rot_fetch_lines)
+		u32 rot_fetch_lines, u32 is_primary)
 {
 	struct sde_encoder_phys_vid *vid_enc =
 		to_sde_encoder_phys_vid(phys_enc);
@@ -264,7 +265,8 @@
 			!phys_enc->hw_ctl->ops.get_bitmask_intf ||
 			!phys_enc->hw_ctl->ops.update_pending_flush ||
 			!vid_enc->hw_intf->ops.setup_rot_start ||
-			!phys_enc->sde_kms)
+			!phys_enc->sde_kms ||
+			!is_primary)
 		return;
 
 	timing = &vid_enc->timing_params;
@@ -299,6 +301,8 @@
 		rot_fetch_start_vsync_counter);
 
 	if (!phys_enc->sde_kms->splash_data.cont_splash_en) {
+		SDE_EVT32(DRMID(phys_enc->parent), f.enable, f.fetch_start);
+
 		phys_enc->hw_ctl->ops.get_bitmask_intf(
 				phys_enc->hw_ctl, &flush_mask,
 				vid_enc->hw_intf->idx);
@@ -308,10 +312,10 @@
 		spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
 		vid_enc->hw_intf->ops.setup_rot_start(vid_enc->hw_intf, &f);
 		spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
-	}
 
-	vid_enc->rot_fetch = f;
-	vid_enc->rot_fetch_valid = true;
+		vid_enc->rot_fetch = f;
+		vid_enc->rot_fetch_valid = true;
+	}
 }
 
 static bool sde_encoder_phys_vid_mode_fixup(
@@ -401,7 +405,8 @@
 			to_sde_encoder_phys_vid(phys_enc);
 	struct sde_hw_ctl *hw_ctl;
 	unsigned long lock_flags;
-	u32 flush_register = 0;
+	u32 flush_register = ~0;
+	u32 reset_status = 0;
 	int new_cnt = -1, old_cnt = -1;
 	u32 event = 0;
 
@@ -414,40 +419,48 @@
 
 	SDE_ATRACE_BEGIN("vblank_irq");
 
-	/* signal only for master, where there is a pending kickoff */
-	if (sde_encoder_phys_vid_is_master(phys_enc)
-			&& atomic_add_unless(
-				&phys_enc->pending_retire_fence_cnt, -1, 0)) {
-		event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE
-				| SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
-
-		if (phys_enc->parent_ops.handle_frame_done)
-			phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
-				phys_enc, event);
-	}
-
-	if (phys_enc->parent_ops.handle_vblank_virt)
-		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
-				phys_enc);
-
-	old_cnt  = atomic_read(&phys_enc->pending_kickoff_cnt);
-
 	/*
 	 * only decrement the pending flush count if we've actually flushed
 	 * hardware. due to sw irq latency, vblank may have already happened
 	 * so we need to double-check with hw that it accepted the flush bits
 	 */
 	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+
+	old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
+
 	if (hw_ctl && hw_ctl->ops.get_flush_register)
 		flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
 
-	if (flush_register == 0)
-		new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
-				-1, 0);
+	if (flush_register)
+		goto not_flushed;
+
+	new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+	/* signal only for master, where there is a pending kickoff */
+	if (sde_encoder_phys_vid_is_master(phys_enc)) {
+		if (atomic_add_unless(&phys_enc->pending_retire_fence_cnt,
+					-1, 0))
+			event |= SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE |
+				SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
+	}
+
+not_flushed:
+	if (hw_ctl && hw_ctl->ops.get_reset)
+		reset_status = hw_ctl->ops.get_reset(hw_ctl);
+
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
 
+	if (event && phys_enc->parent_ops.handle_frame_done)
+		phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
+			phys_enc, event);
+
+	if (phys_enc->parent_ops.handle_vblank_virt)
+		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+				phys_enc);
+
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
-			old_cnt, new_cnt, flush_register, event);
+			old_cnt, new_cnt, reset_status ? SDE_EVTLOG_ERROR : 0,
+			flush_register, event);
 
 	/* Signal any waiting atomic commit thread */
 	wake_up_all(&phys_enc->pending_kickoff_wq);
@@ -865,7 +878,8 @@
 		vid_enc->error_count = 0;
 	}
 
-	programmable_rot_fetch_config(phys_enc, params->inline_rotate_prefill);
+	programmable_rot_fetch_config(phys_enc,
+			params->inline_rotate_prefill, params->is_primary);
 
 	return rc;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index c23afc5..cdc6a9c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -95,6 +95,7 @@
 #define PROP_BITVALUE_ACCESS(p, i, j, k)	((p + i)->bit_value[j][k])
 
 #define DEFAULT_SBUF_HEADROOM		(20)
+#define DEFAULT_SBUF_PREFILL		(128)
 
 /*
  * Default parameter values
@@ -1680,6 +1681,8 @@
 		if (sde_cfg->has_wb_ubwc)
 			set_bit(SDE_WB_UBWC, &wb->features);
 
+		set_bit(SDE_WB_XY_ROI_OFFSET, &wb->features);
+
 		for (j = 0; j < sde_cfg->mdp_count; j++) {
 			sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].reg_off =
 				PROP_BITVALUE_ACCESS(prop_value,
@@ -1953,6 +1956,7 @@
 	if (sde_cfg->rot_count) {
 		sde_cfg->has_sbuf = true;
 		sde_cfg->sbuf_headroom = DEFAULT_SBUF_HEADROOM;
+		sde_cfg->sbuf_prefill = DEFAULT_SBUF_PREFILL;
 	}
 
 end:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 1cd65ea..aa6c482 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -918,6 +918,7 @@
  * @ubwc_version       UBWC feature version (0x0 for not supported)
  * @has_sbuf           indicate if stream buffer is available
  * @sbuf_headroom      stream buffer headroom in lines
+ * @sbuf_prefill       stream buffer prefill default in lines
  * @has_idle_pc        indicate if idle power collapse feature is supported
  * @has_hdr            HDR feature support
  * @dma_formats        Supported formats for dma pipe
@@ -944,6 +945,7 @@
 	u32 ubwc_version;
 	bool has_sbuf;
 	u32 sbuf_headroom;
+	u32 sbuf_prefill;
 	bool has_idle_pc;
 	u32 vbif_qos_nlvl;
 	u32 ts_prefill_rev;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index b8c790f..303d96e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -347,6 +347,13 @@
 	return status;
 }
 
+static u32 sde_hw_ctl_get_reset_status(struct sde_hw_ctl *ctx)
+{
+	if (!ctx)
+		return 0;
+	return (u32)SDE_REG_READ(&ctx->hw, CTL_SW_RESET);
+}
+
 static int sde_hw_ctl_reset_control(struct sde_hw_ctl *ctx)
 {
 	struct sde_hw_blk_reg_map *c = &ctx->hw;
@@ -632,6 +639,7 @@
 	ops->read_ctl_layers = sde_hw_ctl_read_ctl_layers;
 	ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
 	ops->reset = sde_hw_ctl_reset_control;
+	ops->get_reset = sde_hw_ctl_get_reset_status;
 	ops->hard_reset = sde_hw_ctl_hard_reset;
 	ops->wait_reset_status = sde_hw_ctl_wait_reset_status;
 	ops->clear_all_blendstages = sde_hw_ctl_clear_all_blendstages;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index 435fc21..9eb31f1 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -153,6 +153,13 @@
 	int (*reset)(struct sde_hw_ctl *c);
 
 	/**
+	 * get_reset - check ctl reset status bit
+	 * @ctx    : ctl path ctx pointer
+	 * Returns: current value of ctl reset status
+	 */
+	u32 (*get_reset)(struct sde_hw_ctl *ctx);
+
+	/**
 	 * hard_reset - force reset on ctl_path
 	 * @ctx    : ctl path ctx pointer
 	 * @enable : whether to enable/disable hard reset
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.c b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
index 8d386a8..facec3d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
@@ -552,6 +552,28 @@
 }
 
 /**
+ * sde_hw_rot_adjust_prefill_bw - update prefill bw based on pipe config
+ * @hw: Pointer to rotator hardware driver
+ * @data: Pointer to command descriptor
+ * @prefill_bw: adjusted prefill bw (output)
+ * return: 0 if success; error code otherwise
+ */
+static int sde_hw_rot_adjust_prefill_bw(struct sde_hw_rot *hw,
+		struct sde_hw_rot_cmd *data, u64 *prefill_bw)
+{
+	if (!hw || !data || !prefill_bw) {
+		SDE_ERROR("invalid parameter(s)\n");
+		return -EINVAL;
+	}
+
+	/* adjust bw for scaling */
+	if (data->dst_rect_h)
+		*prefill_bw = mult_frac(data->prefill_bw, data->crtc_h,
+				data->dst_rect_h);
+	return 0;
+}
+
+/**
  * sde_hw_rot_commit - commit/execute given rotator command
  * @hw: Pointer to rotator hardware driver
  * @data: Pointer to command descriptor
@@ -683,6 +705,8 @@
 				&rot_cmd.dst_planes);
 	}
 
+	sde_hw_rot_adjust_prefill_bw(hw, data, &rot_cmd.prefill_bw);
+
 	/* only process any command if client is master or for validation */
 	if (data->master || hw_cmd == SDE_HW_ROT_CMD_VALIDATE) {
 		SDE_DEBUG("dispatch seq:%d cmd:%d\n", data->sequence_id,
@@ -918,6 +942,7 @@
 	/* Assign ops */
 	c->idx = idx;
 	c->caps = cfg;
+	c->catalog = m;
 	_setup_rot_ops(&c->ops, c->caps->features);
 	snprintf(c->name, ARRAY_SIZE(c->name), "sde_rot_%d", idx - ROT_0);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.h b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
index ea88d05..59f30ed 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
@@ -73,6 +73,7 @@
  * @dst_rect_y: destination rectangle y coordinate
  * @dst_rect_w: destination rectangle width
  * @dst_rect_h: destination rectangle height
+ * @crtc_h: sspp output height
  * @priv_handle: private handle of rotator driver (output)
  */
 struct sde_hw_rot_cmd {
@@ -110,6 +111,7 @@
 	u32 dst_rect_y;
 	u32 dst_rect_w;
 	u32 dst_rect_h;
+	u32 crtc_h;
 	void *priv_handle;
 };
 
@@ -133,6 +135,7 @@
  * @hw: hardware address map
  * @idx: instance index
  * @caps: capabilities bitmask
+ * @catalog: pointer to hardware catalog
  * @ops: operation table
  * @rot_ctx: pointer to private rotator context
  * @format_caps: pointer to pixel format capability  array
@@ -144,6 +147,7 @@
 	char name[SDE_HW_ROT_NAME_SIZE];
 	int idx;
 	const struct sde_rot_cfg *caps;
+	struct sde_mdss_cfg *catalog;
 	struct sde_hw_rot_ops ops;
 	void *rot_ctx;
 	struct sde_format_extended *format_caps;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index d4d6998..5d3835c 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -949,7 +949,7 @@
 		.get_mode_info  = dp_connector_get_mode_info,
 		.post_open  = dp_connector_post_open,
 		.check_status = NULL,
-		.pre_kickoff  = dp_connector_pre_kickoff,
+		.config_hdr = dp_connector_config_hdr,
 		.cmd_transfer = NULL,
 	};
 	struct msm_display_info info;
@@ -2999,12 +2999,19 @@
 		SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name);
 	}
 
-	if (sde_kms->splash_data.cont_splash_en)
+	if (sde_kms->splash_data.cont_splash_en) {
 		SDE_DEBUG("Skipping MDP Resources disable\n");
-	else
+	} else {
+		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
+			sde_power_data_bus_set_quota(&priv->phandle,
+				sde_kms->core_client,
+				SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, i,
+				SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
+				SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
+
 		sde_power_resource_enable(&priv->phandle,
 						sde_kms->core_client, false);
-
+	}
 	return 0;
 
 genpd_err:
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index f2f870f..a1898ac 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -182,7 +182,7 @@
 	return cstate;
 }
 
-static bool sde_plane_enabled(struct drm_plane_state *state)
+static bool sde_plane_enabled(const struct drm_plane_state *state)
 {
 	return state && state->fb && state->crtc;
 }
@@ -1558,50 +1558,38 @@
 	.get = _sde_plane_fbo_get,
 };
 
-/**
- * sde_plane_rot_calc_prefill - calculate rotator start prefill
- * @plane: Pointer to drm plane
- * return: prefill time in line
- */
-u32 sde_plane_rot_calc_prefill(struct drm_plane *plane)
+u32 sde_plane_rot_get_prefill(struct drm_plane *plane)
 {
 	struct drm_plane_state *state;
 	struct sde_plane_state *pstate;
 	struct sde_plane_rot_state *rstate;
 	struct sde_kms *sde_kms;
-	u32 blocksize = 128;
-	u32 prefill_line = 0;
+	u32 blocksize = 0;
 
 	if (!plane || !plane->state || !plane->state->fb) {
 		SDE_ERROR("invalid parameters\n");
 		return 0;
 	}
 
-	sde_kms = _sde_plane_get_kms(plane);
 	state = plane->state;
 	pstate = to_sde_plane_state(state);
 	rstate = &pstate->rot;
 
+	if (!rstate->out_fb_format)
+		return 0;
+
+	sde_kms = _sde_plane_get_kms(plane);
 	if (!sde_kms || !sde_kms->catalog) {
 		SDE_ERROR("invalid kms\n");
 		return 0;
 	}
 
-	if (rstate->out_fb_format)
-		sde_format_get_block_size(rstate->out_fb_format,
-				&blocksize, &blocksize);
+	/* return zero if out_fb_format isn't valid */
+	if (sde_format_get_block_size(rstate->out_fb_format,
+			&blocksize, &blocksize))
+		return 0;
 
-	prefill_line = blocksize + sde_kms->catalog->sbuf_headroom;
-	prefill_line = mult_frac(prefill_line, rstate->out_src_h >> 16,
-			state->crtc_h);
-	SDE_DEBUG(
-		"plane%d.%d blk:%u head:%u vdst/vsrc:%u/%u prefill:%u\n",
-			plane->base.id, rstate->sequence_id,
-			blocksize, sde_kms->catalog->sbuf_headroom,
-			state->crtc_h, rstate->out_src_h >> 16,
-			prefill_line);
-
-	return prefill_line;
+	return blocksize + sde_kms->catalog->sbuf_headroom;
 }
 
 /**
@@ -1908,8 +1896,7 @@
 
 	rot_cmd->prefill_bw = sde_crtc_get_property(sde_cstate,
 			CRTC_PROP_ROT_PREFILL_BW);
-	rot_cmd->clkrate = sde_crtc_get_property(sde_cstate,
-			CRTC_PROP_ROT_CLK);
+	rot_cmd->clkrate = sde_crtc_get_sbuf_clk(cstate);
 	rot_cmd->dst_writeback = psde->sbuf_writeback;
 
 	if (sde_crtc_get_intf_mode(state->crtc) == INTF_MODE_VIDEO)
@@ -1937,6 +1924,7 @@
 	rot_cmd->dst_rect_y = 0;
 	rot_cmd->dst_rect_w = drm_rect_width(&rstate->out_rot_rect) >> 16;
 	rot_cmd->dst_rect_h = drm_rect_height(&rstate->out_rot_rect) >> 16;
+	rot_cmd->crtc_h = state->crtc_h;
 
 	if (hw_cmd == SDE_HW_ROT_CMD_COMMIT) {
 		struct sde_hw_fmt_layout layout;
@@ -2966,30 +2954,22 @@
 }
 
 int sde_plane_confirm_hw_rsvps(struct drm_plane *plane,
-		const struct drm_plane_state *state)
+		const struct drm_plane_state *state,
+		struct drm_crtc_state *cstate)
 {
-	struct drm_crtc_state *cstate;
 	struct sde_plane_state *pstate;
 	struct sde_plane_rot_state *rstate;
 	struct sde_hw_blk *hw_blk;
 
-	if (!plane || !state) {
-		SDE_ERROR("invalid plane/state\n");
+	if (!plane || !state || !cstate) {
+		SDE_ERROR("invalid parameters\n");
 		return -EINVAL;
 	}
 
 	pstate = to_sde_plane_state(state);
 	rstate = &pstate->rot;
 
-	/* cstate will be null if crtc is disconnected from plane */
-	cstate = _sde_plane_get_crtc_state((struct drm_plane_state *)state);
-	if (IS_ERR_OR_NULL(cstate)) {
-		SDE_ERROR("invalid crtc state\n");
-		return -EINVAL;
-	}
-
-	if (sde_plane_enabled((struct drm_plane_state *)state) &&
-			rstate->out_sbuf) {
+	if (sde_plane_enabled(state) && rstate->out_sbuf) {
 		SDE_DEBUG("plane%d.%d acquire rotator, fb %d\n",
 				plane->base.id, rstate->sequence_id,
 				state->fb ? state->fb->base.id : -1);
@@ -3005,7 +2985,15 @@
 					SDE_EVTLOG_ERROR);
 			return -EINVAL;
 		}
+
+		_sde_plane_rot_get_fb(plane, cstate, rstate);
+
+		SDE_EVT32(DRMID(plane), rstate->sequence_id,
+				state->fb ? state->fb->base.id : -1,
+				rstate->out_fb ? rstate->out_fb->base.id : -1,
+				hw_blk->id);
 	}
+
 	return 0;
 }
 
@@ -3269,7 +3257,8 @@
 
 	if (!fb || !old_fb) {
 		SDE_DEBUG_PLANE(psde, "can't compare fb handles\n");
-	} else if (fb->pixel_format != old_fb->pixel_format) {
+	} else if ((fb->pixel_format != old_fb->pixel_format) ||
+			pstate->const_alpha_en != old_pstate->const_alpha_en) {
 		SDE_DEBUG_PLANE(psde, "format change\n");
 		pstate->dirty |= SDE_PLANE_DIRTY_FORMAT | SDE_PLANE_DIRTY_RECTS;
 	} else {
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index e8b621c..ad58097 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -204,10 +204,12 @@
  * sde_plane_confirm_hw_rsvps - reserve an sbuf resource, if needed
  * @plane: Pointer to DRM plane object
  * @state: Pointer to plane state
+ * @cstate: Pointer to crtc state containing the resource pool
  * Returns: Zero on success
  */
 int sde_plane_confirm_hw_rsvps(struct drm_plane *plane,
-		const struct drm_plane_state *state);
+		const struct drm_plane_state *state,
+		struct drm_crtc_state *cstate);
 
 /**
  * sde_plane_get_ctl_flush - get control flush mask
@@ -220,11 +222,11 @@
 		u32 *flush_sspp, u32 *flush_rot);
 
 /**
- * sde_plane_rot_calc_prefill - calculate rotator start prefill
+ * sde_plane_rot_get_prefill - calculate rotator start prefill
  * @plane: Pointer to drm plane
- * return: prefill time in line
+ * return: prefill time in lines
  */
-u32 sde_plane_rot_calc_prefill(struct drm_plane *plane);
+u32 sde_plane_rot_get_prefill(struct drm_plane *plane);
 
 /**
  * sde_plane_restore - restore hw state if previously power collapsed
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index f9092e2..93304e16 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1129,17 +1129,29 @@
 	return ret;
 }
 
-/**
- * poll_intr_status - Gets HW interrupt status based on
- *			given lookup IRQ index.
- * @intr:	HW interrupt handle
- * @irq_idx:	Lookup irq index return from irq_idx_lookup
- * @msec:	Maximum delay allowed to check intr status
- * return:	return zero on success.
- */
-static u32 _sde_rm_poll_intr_status_for_cont_splash
-			(struct sde_hw_intr *intr,
-			int irq_idx, u32 const msec)
+static void _sde_rm_clear_irq_status(struct sde_hw_intr *hw_intr,
+	int irq_idx_pp_done, int irq_idx_autorefresh)
+{
+	u32 intr_value = 0;
+
+	if ((irq_idx_pp_done >= 0) && (hw_intr->ops.get_intr_status_nomask)) {
+		intr_value = hw_intr->ops.get_intr_status_nomask(hw_intr,
+			irq_idx_pp_done, false);
+		hw_intr->ops.clear_intr_status_force_mask(hw_intr,
+			irq_idx_pp_done, intr_value);
+	}
+
+	if ((irq_idx_autorefresh >= 0) &&
+			(hw_intr->ops.get_intr_status_nomask)) {
+		intr_value = hw_intr->ops.get_intr_status_nomask(hw_intr,
+			irq_idx_autorefresh, false);
+		hw_intr->ops.clear_intr_status_force_mask(hw_intr,
+			irq_idx_autorefresh, intr_value);
+	}
+}
+
+static u32 _sde_rm_poll_intr_status_for_cont_splash(struct sde_hw_intr *intr,
+	int irq_idx_pp_done, int irq_idx_autorefresh, u32 const msec)
 {
 	int i;
 	u32 status = 0;
@@ -1153,19 +1165,112 @@
 
 	for (i = 0; i < loop; i++) {
 		status = intr->ops.get_intr_status_nomask
-				(intr, irq_idx, false);
+				(intr, irq_idx_pp_done, false);
 
-		if (status & BIT(irq_idx)) {
-			SDE_DEBUG(" Poll success. i=%d, status=0x%x\n",
+		if (status & BIT(irq_idx_pp_done)) {
+			SDE_DEBUG("pp_done received i=%d, status=0x%x\n",
 							i, status);
-			return 0;
+			SDE_EVT32(status, i, irq_idx_pp_done);
+
+			if (status & BIT(irq_idx_autorefresh))
+				_sde_rm_clear_irq_status(intr,
+					irq_idx_pp_done, irq_idx_autorefresh);
+			else
+				return 0;
 		}
 		usleep_range(delay_us, delay_us + 10);
 	}
+
+	SDE_EVT32(status, irq_idx_pp_done, SDE_EVTLOG_ERROR);
 	SDE_ERROR("polling timed out. status = 0x%x\n", status);
 	return -ETIMEDOUT;
 }
 
+static int _sde_rm_autorefresh_disable(struct sde_hw_pingpong *pp,
+		struct sde_hw_intr *hw_intr)
+{
+	u32 const timeout_ms = 35; /* Max two vsyncs delay */
+	int rc = 0, i, loop = 3;
+	struct sde_hw_pp_vsync_info info;
+	int irq_idx_pp_done = -1, irq_idx_autorefresh = -1;
+	struct sde_hw_autorefresh cfg = {0};
+
+	if (!pp->ops.get_autorefresh || !pp->ops.setup_autorefresh ||
+		!pp->ops.connect_external_te || !pp->ops.get_vsync_info) {
+		SDE_ERROR("autorefresh update api not supported\n");
+		return 0;
+	}
+
+	/* read default autorefresh configuration */
+	pp->ops.get_autorefresh(pp, &cfg);
+	if (!cfg.enable) {
+		SDE_DEBUG("autorefresh already disabled\n");
+		SDE_EVT32(pp->idx - PINGPONG_0, SDE_EVTLOG_FUNC_CASE1);
+		return 0;
+	}
+
+	/* disable external TE first */
+	pp->ops.connect_external_te(pp, false);
+
+	/* get all IRQ indexes */
+	if (hw_intr->ops.irq_idx_lookup) {
+		irq_idx_pp_done = hw_intr->ops.irq_idx_lookup(
+				SDE_IRQ_TYPE_PING_PONG_COMP, pp->idx);
+		irq_idx_autorefresh = hw_intr->ops.irq_idx_lookup(
+				SDE_IRQ_TYPE_PING_PONG_AUTO_REF, pp->idx);
+		SDE_DEBUG("pp_done itr_idx = %d autorefresh irq_idx:%d\n",
+				irq_idx_pp_done, irq_idx_autorefresh);
+	}
+
+	/* disable autorefresh */
+	cfg.enable = false;
+	pp->ops.setup_autorefresh(pp, &cfg);
+
+	SDE_EVT32(pp->idx - PINGPONG_0, irq_idx_pp_done, irq_idx_autorefresh);
+	_sde_rm_clear_irq_status(hw_intr, irq_idx_pp_done, irq_idx_autorefresh);
+
+	/*
+	 * Check the line count again if
+	 * the line count is equal to the active
+	 * height to make sure their is no
+	 * additional frame updates
+	 */
+	for (i = 0; i < loop; i++) {
+		info.wr_ptr_line_count = 0;
+		info.rd_ptr_init_val = 0;
+		pp->ops.get_vsync_info(pp, &info);
+
+		SDE_EVT32(pp->idx - PINGPONG_0, info.wr_ptr_line_count,
+			info.rd_ptr_init_val, SDE_EVTLOG_FUNC_CASE1);
+
+		/* wait for read ptr intr */
+		rc = _sde_rm_poll_intr_status_for_cont_splash(hw_intr,
+			irq_idx_pp_done, irq_idx_autorefresh, timeout_ms);
+
+		info.wr_ptr_line_count = 0;
+		info.rd_ptr_init_val = 0;
+		pp->ops.get_vsync_info(pp, &info);
+		SDE_DEBUG("i=%d, line count=%d\n", i, info.wr_ptr_line_count);
+
+		SDE_EVT32(pp->idx - PINGPONG_0, info.wr_ptr_line_count,
+			info.rd_ptr_init_val, SDE_EVTLOG_FUNC_CASE2);
+
+		/* log line count and return */
+		if (!rc)
+			break;
+		/*
+		 * Wait for few milli seconds for line count
+		 * to increase if any frame transfer is
+		 * pending.
+		 */
+		usleep_range(3000, 4000);
+	}
+
+	pp->ops.connect_external_te(pp, true);
+
+	return rc;
+}
+
 /**
  * sde_rm_get_pp_dsc_for_cont_splash - retrieve the current dsc enabled blocks
  *	and disable autorefresh if enabled.
@@ -1181,9 +1286,7 @@
 {
 	int index = 0;
 	int value, dsc_cnt = 0;
-	struct sde_hw_autorefresh cfg;
 	struct sde_rm_hw_iter iter_pp;
-	int irq_idx_pp_done = -1;
 
 	if (!rm || !sde_kms || !dsc_ids) {
 		SDE_ERROR("invalid input parameters\n");
@@ -1195,11 +1298,7 @@
 	while (_sde_rm_get_hw_locked(rm, &iter_pp)) {
 		struct sde_hw_pingpong *pp =
 				to_sde_hw_pingpong(iter_pp.blk->hw);
-		u32 intr_value = 0;
-		u32 const timeout_ms = 35; /* Max two vsyncs delay */
-		int rc = 0, i, loop = 2;
 		struct sde_hw_intr *hw_intr = NULL;
-		struct sde_hw_pp_vsync_info info;
 
 		if (!pp->ops.get_dsc_status) {
 			SDE_ERROR("get_dsc_status ops not initialized\n");
@@ -1219,70 +1318,7 @@
 		}
 		index++;
 
-		if (!pp->ops.get_autorefresh) {
-			SDE_ERROR("get_autorefresh api not supported\n");
-			return 0;
-		}
-		memset(&cfg, 0, sizeof(cfg));
-		if (!pp->ops.get_autorefresh(pp, &cfg)
-				&& (cfg.enable)
-				&& (pp->ops.setup_autorefresh)) {
-			if (hw_intr->ops.irq_idx_lookup) {
-				irq_idx_pp_done = hw_intr->ops.irq_idx_lookup
-					(SDE_IRQ_TYPE_PING_PONG_COMP,
-								pp->idx);
-				SDE_DEBUG(" itr_idx = %d\n", irq_idx_pp_done);
-			}
-
-			if ((irq_idx_pp_done >= 0) &&
-					(hw_intr->ops.get_intr_status_nomask)) {
-				intr_value = hw_intr->ops.get_intr_status_nomask
-					(hw_intr, irq_idx_pp_done, false);
-				hw_intr->ops.clear_intr_status_force_mask
-					(hw_intr, irq_idx_pp_done, intr_value);
-			}
-			cfg.enable = false;
-			SDE_DEBUG("Disabling autorefresh\n");
-			pp->ops.setup_autorefresh(pp, &cfg);
-
-			/*
-			 * Check the line count again if
-			 * the line count is equal to the active
-			 * height to make sure their is no
-			 * additional frame updates
-			 */
-			for (i = 0; i < loop; i++) {
-				info.wr_ptr_line_count = 0;
-				info.rd_ptr_init_val = 0;
-				if (pp->ops.get_vsync_info)
-					pp->ops.get_vsync_info(pp, &info);
-				/*
-				 * For cmd-mode using external-TE logic,
-				 * the rd_ptr_init_val is equal to
-				 * active-height. Use this init_val to
-				 * compare that with lane count. Need
-				 * to implement a different check
-				 * if external-TE is not used.
-				 */
-				if (info.wr_ptr_line_count
-						< info.rd_ptr_init_val) {
-					/* wait for read ptr intr */
-					rc =
-					_sde_rm_poll_intr_status_for_cont_splash
-					(hw_intr, irq_idx_pp_done, timeout_ms);
-					if (!rc)
-						break;
-				}
-				SDE_DEBUG("i=%d, line count=%d\n",
-						i, info.wr_ptr_line_count);
-				/*
-				 * Wait for few milli seconds for line count
-				 * to increase if any frame transfer is
-				 * pending.
-				 */
-				usleep_range(3000, 4000);
-			}
-		}
+		_sde_rm_autorefresh_disable(pp, hw_intr);
 	}
 
 	return dsc_cnt;
diff --git a/drivers/gpu/drm/msm/sde_hdcp_1x.c b/drivers/gpu/drm/msm/sde_hdcp_1x.c
index c012f9d..2f900ece 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_1x.c
+++ b/drivers/gpu/drm/msm/sde_hdcp_1x.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -251,21 +251,17 @@
 static int sde_hdcp_1x_load_keys(void *input)
 {
 	int rc = 0;
-	bool use_sw_keys = false;
-	u32 reg_val;
-	u32 ksv_lsb_addr, ksv_msb_addr;
 	u32 aksv_lsb, aksv_msb;
 	u8 aksv[5];
 	struct dss_io_data *dp_ahb;
 	struct dss_io_data *dp_aux;
 	struct dss_io_data *dp_link;
-	struct dss_io_data *qfprom_io;
 	struct sde_hdcp_1x *hdcp = input;
 	struct sde_hdcp_reg_set *reg_set;
 
 	if (!hdcp || !hdcp->init_data.dp_ahb ||
 		!hdcp->init_data.dp_aux ||
-		!hdcp->init_data.qfprom_io) {
+		!hdcp->init_data.dp_link) {
 		pr_err("invalid input\n");
 		rc = -EINVAL;
 		goto end;
@@ -282,38 +278,12 @@
 	dp_ahb = hdcp->init_data.dp_ahb;
 	dp_aux = hdcp->init_data.dp_aux;
 	dp_link = hdcp->init_data.dp_link;
-	qfprom_io = hdcp->init_data.qfprom_io;
 	reg_set = &hdcp->reg_set;
 
-	/* On compatible hardware, use SW keys */
-	reg_val = DSS_REG_R(qfprom_io, SEC_CTRL_HW_VERSION);
-	if (reg_val >= HDCP_SEL_MIN_SEC_VERSION) {
-		reg_val = DSS_REG_R(qfprom_io,
-			QFPROM_RAW_FEAT_CONFIG_ROW0_MSB +
-			QFPROM_RAW_VERSION_4);
-
-		if (!(reg_val & BIT(23)))
-			use_sw_keys = true;
-	}
-
-	if (use_sw_keys) {
-		if (hdcp1_set_keys(&aksv_msb, &aksv_lsb)) {
-			pr_err("setting hdcp SW keys failed\n");
-			rc = -EINVAL;
-			goto end;
-		}
-	} else {
-		/* Fetch aksv from QFPROM, this info should be public. */
-		ksv_lsb_addr = HDCP_KSV_LSB;
-		ksv_msb_addr = HDCP_KSV_MSB;
-
-		if (hdcp->init_data.sec_access) {
-			ksv_lsb_addr += HDCP_KSV_VERSION_4_OFFSET;
-			ksv_msb_addr += HDCP_KSV_VERSION_4_OFFSET;
-		}
-
-		aksv_lsb = DSS_REG_R(qfprom_io, ksv_lsb_addr);
-		aksv_msb = DSS_REG_R(qfprom_io, ksv_msb_addr);
+	if (hdcp1_set_keys(&aksv_msb, &aksv_lsb)) {
+		pr_err("setting hdcp SW keys failed\n");
+		rc = -EINVAL;
+		goto end;
 	}
 
 	pr_debug("%s: AKSV=%02x%08x\n", SDE_HDCP_STATE_NAME,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index af267c3..ee5883f 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -147,9 +147,6 @@
 	struct drm_gem_object *obj = buffer->priv;
 	int ret = 0;
 
-	if (WARN_ON(!obj->filp))
-		return -EINVAL;
-
 	ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
 	if (ret < 0)
 		return ret;
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 27cb424..6f65846 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -369,6 +369,7 @@
 	drm_panel_remove(&panel->base);
 
 	panel_simple_disable(&panel->base);
+	panel_simple_unprepare(&panel->base);
 
 	if (panel->ddc)
 		put_device(&panel->ddc->dev);
@@ -384,6 +385,7 @@
 	struct panel_simple *panel = dev_get_drvdata(dev);
 
 	panel_simple_disable(&panel->base);
+	panel_simple_unprepare(&panel->base);
 }
 
 static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = {
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 432cb46..fd7682b 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -45,34 +45,32 @@
 
 /***** radeon AUX functions *****/
 
-/* Atom needs data in little endian format
- * so swap as appropriate when copying data to
- * or from atom. Note that atom operates on
- * dw units.
+/* Atom needs data in little endian format so swap as appropriate when copying
+ * data to or from atom. Note that atom operates on dw units.
+ *
+ * Use to_le=true when sending data to atom and provide at least
+ * ALIGN(num_bytes,4) bytes in the dst buffer.
+ *
+ * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
+ * byes in the src buffer.
  */
 void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
 {
 #ifdef __BIG_ENDIAN
-	u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
-	u32 *dst32, *src32;
+	u32 src_tmp[5], dst_tmp[5];
 	int i;
+	u8 align_num_bytes = ALIGN(num_bytes, 4);
 
-	memcpy(src_tmp, src, num_bytes);
-	src32 = (u32 *)src_tmp;
-	dst32 = (u32 *)dst_tmp;
 	if (to_le) {
-		for (i = 0; i < ((num_bytes + 3) / 4); i++)
-			dst32[i] = cpu_to_le32(src32[i]);
-		memcpy(dst, dst_tmp, num_bytes);
+		memcpy(src_tmp, src, num_bytes);
+		for (i = 0; i < align_num_bytes / 4; i++)
+			dst_tmp[i] = cpu_to_le32(src_tmp[i]);
+		memcpy(dst, dst_tmp, align_num_bytes);
 	} else {
-		u8 dws = num_bytes & ~3;
-		for (i = 0; i < ((num_bytes + 3) / 4); i++)
-			dst32[i] = le32_to_cpu(src32[i]);
-		memcpy(dst, dst_tmp, dws);
-		if (num_bytes % 4) {
-			for (i = 0; i < (num_bytes % 4); i++)
-				dst[dws+i] = dst_tmp[dws+i];
-		}
+		memcpy(src_tmp, src, align_num_bytes);
+		for (i = 0; i < align_num_bytes / 4; i++)
+			dst_tmp[i] = le32_to_cpu(src_tmp[i]);
+		memcpy(dst, dst_tmp, num_bytes);
 	}
 #else
 	memcpy(dst, src, num_bytes);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0daad44..af84705 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -252,7 +252,6 @@
 	}
 
 	info->par = rfbdev;
-	info->skip_vt_switch = true;
 
 	ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
 	if (ret) {
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 13ba73f..8bd9e6c 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3029,6 +3029,16 @@
 			max_sclk = 75000;
 			max_mclk = 80000;
 		}
+	} else if (rdev->family == CHIP_OLAND) {
+		if ((rdev->pdev->revision == 0xC7) ||
+		    (rdev->pdev->revision == 0x80) ||
+		    (rdev->pdev->revision == 0x81) ||
+		    (rdev->pdev->revision == 0x83) ||
+		    (rdev->pdev->revision == 0x87) ||
+		    (rdev->pdev->device == 0x6604) ||
+		    (rdev->pdev->device == 0x6605)) {
+			max_sclk = 75000;
+		}
 	}
 	/* Apply dpm quirks */
 	while (p && p->chip_device != 0) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 6e6c59a..223944a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -172,7 +172,7 @@
 	ret = sun4i_backend_drm_format_to_layer(plane, fb->pixel_format, &val);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Invalid format\n");
-		return val;
+		return ret;
 	}
 
 	regmap_update_bits(backend->regs, SUN4I_BACKEND_ATTCTL_REG1(layer),
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index bf6e216..7d22f98 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -473,6 +473,7 @@
 	INIT_LIST_HEAD(&fbo->lru);
 	INIT_LIST_HEAD(&fbo->swap);
 	INIT_LIST_HEAD(&fbo->io_reserve_lru);
+	mutex_init(&fbo->wu_mutex);
 	fbo->moving = NULL;
 	drm_vma_node_reset(&fbo->vma_node);
 	atomic_set(&fbo->cpu_writers, 0);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index fefb9d9..81f5a55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2729,6 +2729,8 @@
 	}
 
 	view_type = vmw_view_cmd_to_type(header->id);
+	if (view_type == vmw_view_max)
+		return -EINVAL;
 	cmd = container_of(header, typeof(*cmd), header);
 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 				user_surface_converter,
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 5bfed6f..fef45ec 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -941,6 +941,8 @@
 #define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H	0x1F84D
 #define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L	0x1F84E
 #define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H	0x1F84F
+#define A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L	0x1F888
+#define A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H	0x1F889
 #define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL	0x1F8C0
 #define A6XX_GMU_PWR_COL_INTER_FRAME_HYST	0x1F8C1
 #define A6XX_GMU_PWR_COL_SPTPRAC_HYST		0x1F8C2
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 26c1c39..da37baf 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -335,8 +335,8 @@
 		.num_protected_regs = 0x20,
 		.busy_mask = 0xFFFFFFFE,
 		.gpmufw_name = "a630_gmu.bin",
-		.gpmu_major = 0x0,
-		.gpmu_minor = 0x005,
+		.gpmu_major = 0x1,
+		.gpmu_minor = 0x003,
 		.gpmu_tsens = 0x000C000D,
 		.max_power = 5448,
 	},
@@ -357,7 +357,7 @@
 		.busy_mask = 0xFFFFFFFE,
 		.gpmufw_name = "a630_gmu.bin",
 		.gpmu_major = 0x1,
-		.gpmu_minor = 0x001,
+		.gpmu_minor = 0x003,
 		.gpmu_tsens = 0x000C000D,
 		.max_power = 5448,
 	},
@@ -368,7 +368,7 @@
 		.minor = 5,
 		.patchid = ANY_ID,
 		.features = ADRENO_64BIT | ADRENO_RPMH |
-			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION,
+			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC,
 		.sqefw_name = "a630_sqe.fw",
 		.zap_name = "a615_zap",
 		.gpudev = &adreno_a6xx_gpudev,
@@ -377,6 +377,6 @@
 		.busy_mask = 0xFFFFFFFE,
 		.gpmufw_name = "a630_gmu.bin",
 		.gpmu_major = 0x1,
-		.gpmu_minor = 0x001,
+		.gpmu_minor = 0x003,
 	},
 };
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 8d18fc2..942621e 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
 #include <linux/input.h>
 #include <linux/io.h>
 #include <soc/qcom/scm.h>
+#include <linux/nvmem-consumer.h>
 
 #include <linux/msm-bus-board.h>
 #include <linux/msm-bus.h>
@@ -755,6 +756,107 @@
 	return NULL;
 }
 
+static struct {
+	unsigned int quirk;
+	const char *prop;
+} adreno_quirks[] = {
+	 { ADRENO_QUIRK_TWO_PASS_USE_WFI, "qcom,gpu-quirk-two-pass-use-wfi" },
+	 { ADRENO_QUIRK_IOMMU_SYNC, "qcom,gpu-quirk-iommu-sync" },
+	 { ADRENO_QUIRK_CRITICAL_PACKETS, "qcom,gpu-quirk-critical-packets" },
+	 { ADRENO_QUIRK_FAULT_DETECT_MASK, "qcom,gpu-quirk-fault-detect-mask" },
+	 { ADRENO_QUIRK_DISABLE_RB_DP2CLOCKGATING,
+			"qcom,gpu-quirk-dp2clockgating-disable" },
+	 { ADRENO_QUIRK_DISABLE_LMLOADKILL,
+			"qcom,gpu-quirk-lmloadkill-disable" },
+	{ ADRENO_QUIRK_HFI_USE_REG, "qcom,gpu-quirk-hfi-use-reg" },
+	{ ADRENO_QUIRK_SECVID_SET_ONCE, "qcom,gpu-quirk-secvid-set-once" },
+	{ ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW,
+			"qcom,gpu-quirk-limit-uche-gbif-rw" },
+};
+
+#if defined(CONFIG_NVMEM) && defined(CONFIG_QCOM_QFPROM)
+static struct device_node *
+adreno_get_soc_hw_revision_node(struct platform_device *pdev)
+{
+	struct device_node *node, *child;
+	struct nvmem_cell *cell;
+	ssize_t len;
+	u32 *buf, hw_rev, rev;
+
+	node = of_find_node_by_name(pdev->dev.of_node, "qcom,soc-hw-revisions");
+	if (node == NULL)
+		goto err;
+
+	/* read the soc hw revision and select revision node */
+	cell = nvmem_cell_get(&pdev->dev, "minor_rev");
+	if (IS_ERR_OR_NULL(cell)) {
+		if (PTR_ERR(cell) == -EPROBE_DEFER)
+			return (void *)cell;
+
+		KGSL_CORE_ERR("Unable to get nvmem cell: ret=%ld\n",
+				PTR_ERR(cell));
+		goto err;
+	}
+
+	buf = nvmem_cell_read(cell, &len);
+	nvmem_cell_put(cell);
+
+	if (IS_ERR_OR_NULL(buf)) {
+		KGSL_CORE_ERR("Unable to read nvmem cell: ret=%ld\n",
+				PTR_ERR(buf));
+		goto err;
+	}
+
+	hw_rev = *buf;
+	kfree(buf);
+
+	for_each_child_of_node(node, child) {
+		if (of_property_read_u32(child, "reg", &rev))
+			continue;
+
+		if (rev == hw_rev)
+			return child;
+	}
+
+err:
+	/* fall back to parent node */
+	return pdev->dev.of_node;
+}
+#else
+static struct device_node *
+adreno_get_soc_hw_revision_node(struct platform_device *pdev)
+{
+	return pdev->dev.of_node;
+}
+#endif
+
+
+static int adreno_update_soc_hw_revision_quirks(
+		struct adreno_device *adreno_dev, struct platform_device *pdev)
+{
+	struct device_node *node;
+	int i;
+
+	node = adreno_get_soc_hw_revision_node(pdev);
+	if (IS_ERR(node))
+		return PTR_ERR(node);
+
+	/* get chip id, fall back to parent if revision node does not have it */
+	if (of_property_read_u32(node, "qcom,chipid", &adreno_dev->chipid))
+		if (of_property_read_u32(pdev->dev.of_node,
+				"qcom,chipid", &adreno_dev->chipid))
+			KGSL_DRV_FATAL(KGSL_DEVICE(adreno_dev),
+			"No GPU chip ID was specified\n");
+
+	/* update quirk */
+	for (i = 0; i < ARRAY_SIZE(adreno_quirks); i++) {
+		if (of_property_read_bool(node, adreno_quirks[i].prop))
+			adreno_dev->quirks |= adreno_quirks[i].quirk;
+	}
+
+	return 0;
+}
+
 static void
 adreno_identify_gpu(struct adreno_device *adreno_dev)
 {
@@ -762,11 +864,6 @@
 	struct adreno_gpudev *gpudev;
 	int i;
 
-	if (kgsl_property_read_u32(KGSL_DEVICE(adreno_dev), "qcom,chipid",
-		&adreno_dev->chipid))
-		KGSL_DRV_FATAL(KGSL_DEVICE(adreno_dev),
-			"No GPU chip ID was specified\n");
-
 	adreno_dev->gpucore = _get_gpu_core(adreno_dev->chipid);
 
 	if (adreno_dev->gpucore == NULL)
@@ -932,29 +1029,12 @@
 	return of_id ? (struct adreno_device *) of_id->data : NULL;
 }
 
-static struct {
-	unsigned int quirk;
-	const char *prop;
-} adreno_quirks[] = {
-	 { ADRENO_QUIRK_TWO_PASS_USE_WFI, "qcom,gpu-quirk-two-pass-use-wfi" },
-	 { ADRENO_QUIRK_IOMMU_SYNC, "qcom,gpu-quirk-iommu-sync" },
-	 { ADRENO_QUIRK_CRITICAL_PACKETS, "qcom,gpu-quirk-critical-packets" },
-	 { ADRENO_QUIRK_FAULT_DETECT_MASK, "qcom,gpu-quirk-fault-detect-mask" },
-	 { ADRENO_QUIRK_DISABLE_RB_DP2CLOCKGATING,
-			"qcom,gpu-quirk-dp2clockgating-disable" },
-	 { ADRENO_QUIRK_DISABLE_LMLOADKILL,
-			"qcom,gpu-quirk-lmloadkill-disable" },
-	{ ADRENO_QUIRK_HFI_USE_REG, "qcom,gpu-quirk-hfi-use-reg" },
-	{ ADRENO_QUIRK_SECVID_SET_ONCE, "qcom,gpu-quirk-secvid-set-once" },
-};
-
 static int adreno_of_get_power(struct adreno_device *adreno_dev,
 		struct platform_device *pdev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct device_node *node = pdev->dev.of_node;
 	struct resource *res;
-	int i;
 	unsigned int timeout;
 
 	if (of_property_read_string(node, "label", &pdev->name)) {
@@ -965,12 +1045,6 @@
 	if (adreno_of_read_property(node, "qcom,id", &pdev->id))
 		return -EINVAL;
 
-	/* Set up quirks and other boolean options */
-	for (i = 0; i < ARRAY_SIZE(adreno_quirks); i++) {
-		if (of_property_read_bool(node, adreno_quirks[i].prop))
-			adreno_dev->quirks |= adreno_quirks[i].quirk;
-	}
-
 	/* Get starting physical address of device registers */
 	res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
 					   device->iomemname);
@@ -1084,6 +1158,33 @@
 		KGSL_DRV_WARN(device, "cx_dbgc ioremap failed\n");
 }
 
+static bool adreno_is_gpu_disabled(struct adreno_device *adreno_dev)
+{
+	unsigned int row0;
+	unsigned int pte_row0_msb[3];
+	int ret;
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+	if (of_property_read_u32_array(device->pdev->dev.of_node,
+		"qcom,gpu-disable-fuse", pte_row0_msb, 3))
+		return false;
+	/*
+	 * Read the fuse value to disable GPU driver if fuse
+	 * is blown. By default(fuse value is 0) GPU is enabled.
+	 */
+	if (adreno_efuse_map(adreno_dev))
+		return false;
+
+	ret = adreno_efuse_read_u32(adreno_dev, pte_row0_msb[0], &row0);
+	adreno_efuse_unmap(adreno_dev);
+
+	if (ret)
+		return false;
+
+	return (row0 >> pte_row0_msb[2]) &
+			pte_row0_msb[1] ? true : false;
+}
+
 static int adreno_probe(struct platform_device *pdev)
 {
 	struct kgsl_device *device;
@@ -1100,6 +1201,17 @@
 	device = KGSL_DEVICE(adreno_dev);
 	device->pdev = pdev;
 
+	if (adreno_is_gpu_disabled(adreno_dev)) {
+		pr_err("adreno: GPU is disabled on this device");
+		return -ENODEV;
+	}
+
+	status = adreno_update_soc_hw_revision_quirks(adreno_dev, pdev);
+	if (status) {
+		device->pdev = NULL;
+		return status;
+	}
+
 	/* Get the chip ID from the DT and set up target specific parameters */
 	adreno_identify_gpu(adreno_dev);
 
@@ -1483,7 +1595,7 @@
 		adreno_writereg64(adreno_dev,
 			ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
 			ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
-			KGSL_IOMMU_SECURE_BASE);
+			KGSL_IOMMU_SECURE_BASE(&device->mmu));
 		adreno_writereg(adreno_dev,
 			ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
 			KGSL_IOMMU_SECURE_SIZE);
@@ -1814,11 +1926,6 @@
 
 error_mmu_off:
 	kgsl_mmu_stop(&device->mmu);
-	if (gpudev->oob_clear &&
-			ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
-		gpudev->oob_clear(adreno_dev,
-				OOB_BOOT_SLUMBER_CLEAR_MASK);
-	}
 
 error_pwr_off:
 	/* set the state back to original state */
@@ -2072,7 +2179,7 @@
 				 * anything to mmap().
 				 */
 				shadowprop.gpuaddr =
-					(unsigned int) device->memstore.gpuaddr;
+					(unsigned long)device->memstore.gpuaddr;
 				shadowprop.size = device->memstore.size;
 				/* GSL needs this to be set, even if it
 				 * appears to be meaningless
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 458a0fa..4fb0089 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -144,6 +144,12 @@
 #define ADRENO_QUIRK_HFI_USE_REG BIT(6)
 /* Only set protected SECVID registers once */
 #define ADRENO_QUIRK_SECVID_SET_ONCE BIT(7)
+/*
+ * Limit number of read and write transactions from
+ * UCHE block to GBIF to avoid possible deadlock
+ * between GBIF, SMMU and MEMNOC.
+ */
+#define ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW BIT(8)
 
 /* Flags to control command packet settings */
 #define KGSL_CMD_FLAGS_NONE             0
@@ -1949,7 +1955,7 @@
 	return ret;
 }
 
-void adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
+int adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
 	enum adreno_regs offset, unsigned int val,
 	unsigned int fence_mask);
 #endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index baf366e..a615dca 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -2424,8 +2424,8 @@
 	adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
 		A5XX_CP_RB_CNTL_DEFAULT);
 
-	adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
-			rb->buffer_desc.gpuaddr);
+	adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
+			ADRENO_REG_CP_RB_BASE_HI, rb->buffer_desc.gpuaddr);
 
 	ret = a5xx_microcode_load(adreno_dev);
 	if (ret)
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index b682abe..d8b347d 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -55,7 +55,6 @@
 
 static const struct adreno_vbif_data a615_gbif[] = {
 	{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
-	{A6XX_UCHE_GBIF_GX_CONFIG, 0x10200F9},
 	{0, 0},
 };
 
@@ -64,6 +63,9 @@
 	{ adreno_is_a615, a615_gbif },
 };
 
+
+static unsigned long a6xx_oob_state_bitmask;
+
 struct kgsl_hwcg_reg {
 	unsigned int off;
 	unsigned int val;
@@ -325,7 +327,6 @@
 static struct reg_list_pair a6xx_ifpc_pwrup_reglist[] = {
 	{ A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x0 },
 	{ A6XX_CP_CHICKEN_DBG, 0x0 },
-	{ A6XX_CP_ADDR_MODE_CNTL, 0x0 },
 	{ A6XX_CP_DBG_ECO_CNTL, 0x0 },
 	{ A6XX_CP_PROTECT_CNTL, 0x0 },
 	{ A6XX_CP_PROTECT_REG, 0x0 },
@@ -363,7 +364,7 @@
 	{ A6XX_CP_AHB_CNTL, 0x0 },
 };
 
-static struct reg_list_pair a615_ifpc_pwrup_reglist[] = {
+static struct reg_list_pair a615_pwrup_reglist[] = {
 	{ A6XX_UCHE_GBIF_GX_CONFIG, 0x0 },
 };
 
@@ -378,6 +379,21 @@
 		A6XX_CP_ALWAYS_ON_COUNTER_HI;
 }
 
+static uint64_t read_AO_counter(struct kgsl_device *device)
+{
+	unsigned int l, h, h1;
+
+	kgsl_gmu_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h);
+	kgsl_gmu_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l);
+	kgsl_gmu_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h1);
+
+	if (h == h1)
+		return (uint64_t) l | ((uint64_t) h << 32);
+
+	kgsl_gmu_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l);
+	return (uint64_t) l | ((uint64_t) h1 << 32);
+}
+
 static void a6xx_pwrup_reglist_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -588,7 +604,6 @@
 	uint32_t i;
 	struct cpu_gpu_lock *lock;
 	struct reg_list_pair *r;
-	uint16_t a615_list_size = 0;
 
 	/* Set up the register values */
 	for (i = 0; i < ARRAY_SIZE(a6xx_ifpc_pwrup_reglist); i++) {
@@ -601,19 +616,6 @@
 		kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
 	}
 
-	if (adreno_is_a615(adreno_dev)) {
-		for (i = 0; i < ARRAY_SIZE(a615_ifpc_pwrup_reglist); i++) {
-			r = &a615_ifpc_pwrup_reglist[i];
-			kgsl_regread(KGSL_DEVICE(adreno_dev),
-				r->offset, &r->val);
-		}
-
-		a615_list_size = sizeof(a615_ifpc_pwrup_reglist);
-
-		memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock),
-			a615_ifpc_pwrup_reglist, a615_list_size);
-	}
-
 	lock = (struct cpu_gpu_lock *) adreno_dev->pwrup_reglist.hostptr;
 	lock->flag_ucode = 0;
 	lock->flag_kmd = 0;
@@ -632,16 +634,29 @@
 	 * of the static IFPC-only register list.
 	 */
 	lock->list_length = (sizeof(a6xx_ifpc_pwrup_reglist) +
-			sizeof(a6xx_pwrup_reglist) + a615_list_size) >> 2;
-	lock->list_offset = (sizeof(a6xx_ifpc_pwrup_reglist) +
-			a615_list_size) >> 2;
+			sizeof(a6xx_pwrup_reglist)) >> 2;
+	lock->list_offset = sizeof(a6xx_ifpc_pwrup_reglist) >> 2;
 
-	memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
-		+ a615_list_size,
+	memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock),
 		a6xx_ifpc_pwrup_reglist, sizeof(a6xx_ifpc_pwrup_reglist));
 	memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
-		+ sizeof(a6xx_ifpc_pwrup_reglist) + a615_list_size,
-		a6xx_pwrup_reglist, sizeof(a6xx_pwrup_reglist));
+		+ sizeof(a6xx_ifpc_pwrup_reglist), a6xx_pwrup_reglist,
+		sizeof(a6xx_pwrup_reglist));
+
+	if (adreno_is_a615(adreno_dev)) {
+		for (i = 0; i < ARRAY_SIZE(a615_pwrup_reglist); i++) {
+			r = &a615_pwrup_reglist[i];
+			kgsl_regread(KGSL_DEVICE(adreno_dev),
+				r->offset, &r->val);
+		}
+
+		memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
+			+ sizeof(a6xx_ifpc_pwrup_reglist)
+			+ sizeof(a6xx_pwrup_reglist), a615_pwrup_reglist,
+			sizeof(a615_pwrup_reglist));
+
+		lock->list_length += sizeof(a615_pwrup_reglist);
+	}
 }
 
 /*
@@ -670,6 +685,9 @@
 	adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
 			ARRAY_SIZE(a6xx_vbif_platforms));
 
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW))
+		kgsl_regwrite(device, A6XX_UCHE_GBIF_GX_CONFIG, 0x10200F9);
+
 	/* Make all blocks contribute to the GPU BUSY perf counter */
 	kgsl_regwrite(device, A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
 
@@ -1066,8 +1084,8 @@
 	adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
 					A6XX_CP_RB_CNTL_DEFAULT);
 
-	adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
-			rb->buffer_desc.gpuaddr);
+	adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
+			ADRENO_REG_CP_RB_BASE_HI, rb->buffer_desc.gpuaddr);
 
 	ret = a6xx_microcode_load(adreno_dev);
 	if (ret)
@@ -1434,7 +1452,7 @@
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	int ret = 0;
 
-	if (!kgsl_gmu_isenabled(device))
+	if (!kgsl_gmu_isenabled(device) || !clear_mask)
 		return 0;
 
 	kgsl_gmu_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, set_mask);
@@ -1450,6 +1468,8 @@
 
 	kgsl_gmu_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, clear_mask);
 
+	set_bit((fls(clear_mask) - 1), &a6xx_oob_state_bitmask);
+
 	trace_kgsl_gmu_oob_set(set_mask);
 	return ret;
 }
@@ -1464,10 +1484,15 @@
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 
-	if (!kgsl_gmu_isenabled(device))
+	if (!kgsl_gmu_isenabled(device) || !clear_mask)
 		return;
 
-	kgsl_gmu_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, clear_mask);
+	if (test_and_clear_bit(fls(clear_mask) - 1,
+				&a6xx_oob_state_bitmask))
+		kgsl_gmu_regwrite(device,
+			A6XX_GMU_HOST2GMU_INTR_SET,
+			clear_mask);
+
 	trace_kgsl_gmu_oob_clear(clear_mask);
 }
 
@@ -1548,7 +1573,7 @@
 #define SP_CLK_OFF		BIT(4)
 #define GX_GDSC_POWER_OFF	BIT(6)
 #define GX_CLK_OFF		BIT(7)
-
+#define is_on(val)		(!(val & (GX_GDSC_POWER_OFF | GX_CLK_OFF)))
 /*
  * a6xx_gx_is_on() - Check if GX is on using pwr status register
  * @adreno_dev - Pointer to adreno_device
@@ -1564,7 +1589,7 @@
 		return true;
 
 	kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
-	return !(val & (GX_GDSC_POWER_OFF | GX_CLK_OFF));
+	return is_on(val);
 }
 
 /*
@@ -1940,48 +1965,65 @@
 	return true;
 }
 
+static bool idle_trandition_complete(unsigned int idle_level,
+	unsigned int gmu_power_reg,
+	unsigned int sptprac_clk_reg)
+{
+	if (idle_level != gmu_power_reg)
+		return false;
+
+	switch (idle_level) {
+	case GPU_HW_IFPC:
+		if (is_on(sptprac_clk_reg))
+			return false;
+		break;
+	/* other GMU idle levels can be added here */
+	case GPU_HW_ACTIVE:
+	default:
+		break;
+	}
+	return true;
+}
+
 static int a6xx_wait_for_lowest_idle(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct gmu_device *gmu = &device->gmu;
-	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
-	unsigned int reg;
+	unsigned int reg, reg1;
 	unsigned long t;
+	uint64_t ts1, ts2, ts3;
 
 	if (!kgsl_gmu_isenabled(device))
 		return 0;
 
+	ts1 = read_AO_counter(device);
+
 	t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
-	while (!time_after(jiffies, t)) {
-		adreno_read_gmureg(ADRENO_DEVICE(device),
-				ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
+	do {
+		kgsl_gmu_regread(device,
+			A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
+		kgsl_gmu_regread(device,
+			A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &reg1);
 
-		/* SPTPRAC PC has the same idle level as IFPC */
-		if ((reg == gmu->idle_level) ||
-				(gmu->idle_level == GPU_HW_SPTP_PC &&
-				reg == GPU_HW_IFPC)) {
-			/* IFPC is not complete until GX is off */
-			if (gmu->idle_level != GPU_HW_IFPC ||
-					!gpudev->gx_is_on(adreno_dev))
-				return 0;
-		}
-
+		if (idle_trandition_complete(gmu->idle_level, reg, reg1))
+			return 0;
 		/* Wait 100us to reduce unnecessary AHB bus traffic */
 		usleep_range(10, 100);
-	}
+	} while (!time_after(jiffies, t));
 
+	ts2 = read_AO_counter(device);
 	/* Check one last time */
-	adreno_read_gmureg(ADRENO_DEVICE(device),
-			ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
-	if ((reg == gmu->idle_level) ||
-			(gmu->idle_level == GPU_HW_SPTP_PC &&
-			reg == GPU_HW_IFPC)) {
-		if (gmu->idle_level != GPU_HW_IFPC ||
-				!gpudev->gx_is_on(adreno_dev))
-			return 0;
-	}
 
-	WARN(1, "Timeout waiting for lowest idle level: %d\n", reg);
+	kgsl_gmu_regread(device, A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
+	kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &reg1);
+
+	if (idle_trandition_complete(gmu->idle_level, reg, reg1))
+		return 0;
+
+	ts3 = read_AO_counter(device);
+	WARN(1, "Timeout waiting for lowest idle: %08x %llx %llx %llx %x\n",
+		reg, ts1, ts2, ts3, reg1);
+
 	return -ETIMEDOUT;
 }
 
@@ -2274,6 +2316,9 @@
 		ret = a6xx_gmu_suspend(device);
 		break;
 	case GMU_FW_STOP:
+		if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
+			a6xx_oob_clear(adreno_dev,
+					OOB_BOOT_SLUMBER_CLEAR_MASK);
 		ret = a6xx_rpmh_power_off_gpu(device);
 		break;
 	case GMU_DCVS_NOHFI:
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 5572cd7..afd1be5 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -376,6 +376,7 @@
 	A6XX_DBGBUS_CX           = 0x17,
 	A6XX_DBGBUS_GMU_GX       = 0x18,
 	A6XX_DBGBUS_TPFCHE       = 0x19,
+	A6XX_DBGBUS_GBIF_GX      = 0x1a,
 	A6XX_DBGBUS_GPC          = 0x1d,
 	A6XX_DBGBUS_LARC         = 0x1e,
 	A6XX_DBGBUS_HLSQ_SPTP    = 0x1f,
@@ -1161,11 +1162,14 @@
 	}
 
 	header->id = block->block_id;
+	if ((block->block_id == A6XX_DBGBUS_VBIF) &&
+		adreno_has_gbif(adreno_dev))
+		header->id = A6XX_DBGBUS_GBIF_GX;
 	header->count = dwords * 2;
 
 	block_id = block->block_id;
 	/* GMU_GX data is read using the GMU_CX block id on A630 */
-	if (adreno_is_a630(adreno_dev) &&
+	if ((adreno_is_a630(adreno_dev) || adreno_is_a615(adreno_dev)) &&
 		(block_id == A6XX_DBGBUS_GMU_GX))
 		block_id = A6XX_DBGBUS_GMU_CX;
 
@@ -1428,18 +1432,18 @@
 				KGSL_SNAPSHOT_SECTION_DEBUGBUS,
 				snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
 				(void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
-			/*
-			 * Get debugbus for GBIF CX part if GPU has GBIF block
-			 * GBIF uses exactly same ID as of VBIF so use
-			 * it as it is.
-			 */
-			if (adreno_has_gbif(adreno_dev))
-				kgsl_snapshot_add_section(device,
-					KGSL_SNAPSHOT_SECTION_DEBUGBUS,
-					snapshot,
-					a6xx_snapshot_cx_dbgc_debugbus_block,
-					(void *) &a6xx_vbif_debugbus_blocks);
 		}
+		/*
+		 * Get debugbus for GBIF CX part if GPU has GBIF block
+		 * GBIF uses exactly same ID as of VBIF so use
+		 * it as it is.
+		 */
+		if (adreno_has_gbif(adreno_dev))
+			kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_DEBUGBUS,
+				snapshot,
+				a6xx_snapshot_cx_dbgc_debugbus_block,
+				(void *) &a6xx_vbif_debugbus_blocks);
 	}
 }
 
@@ -1474,6 +1478,8 @@
 				a6xx_gmu_gx_registers,
 				ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
 	}
+
+	a6xx_snapshot_debugbus(device, snapshot);
 }
 
 /* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
@@ -1631,8 +1637,6 @@
 		a6xx_snapshot_dbgahb_regs(device, snapshot);
 	}
 
-	a6xx_snapshot_debugbus(device, snapshot);
-
 }
 
 static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 52a35c4..5168d9e 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -89,6 +89,7 @@
 		struct adreno_ringbuffer *rb)
 {
 	unsigned long flags;
+	int ret = 0;
 
 	spin_lock_irqsave(&rb->preempt_lock, flags);
 	if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
@@ -104,7 +105,7 @@
 			 * Ensure the write posted after a possible
 			 * GMU wakeup (write could have dropped during wakeup)
 			 */
-			adreno_gmu_fenced_write(adreno_dev,
+			ret = adreno_gmu_fenced_write(adreno_dev,
 				ADRENO_REG_CP_RB_WPTR, rb->_wptr,
 				FENCE_STATUS_WRITEDROPPED0_MASK);
 
@@ -113,6 +114,10 @@
 
 	rb->wptr = rb->_wptr;
 	spin_unlock_irqrestore(&rb->preempt_lock, flags);
+
+	if (ret)
+		kgsl_device_snapshot(KGSL_DEVICE(adreno_dev), NULL, false);
+
 }
 
 void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb,
diff --git a/drivers/gpu/msm/adreno_sysfs.c b/drivers/gpu/msm/adreno_sysfs.c
index 2d2c9e5..022aa9f 100644
--- a/drivers/gpu/msm/adreno_sysfs.c
+++ b/drivers/gpu/msm/adreno_sysfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -31,7 +31,7 @@
 
 #define _ADRENO_SYSFS_ATTR_RO(_name, __show) \
 struct adreno_sysfs_attribute adreno_attr_##_name = { \
-	.attr = __ATTR(_name, 0644, __show, NULL), \
+	.attr = __ATTR(_name, 0444, __show, NULL), \
 	.show = _ ## _name ## _show, \
 	.store = NULL, \
 }
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index df06a0d..52d45bb 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1307,10 +1307,13 @@
 	do {
 		if (!regulator_is_enabled(gmu->cx_gdsc))
 			return 0;
-		cond_resched();
+		usleep_range(10, 100);
 
 	} while (!(time_after(jiffies, t)));
 
+	if (!regulator_is_enabled(gmu->cx_gdsc))
+		return 0;
+
 	dev_err(&gmu->pdev->dev, "GMU CX gdsc off timeout");
 	return -ETIMEDOUT;
 }
@@ -1630,7 +1633,7 @@
  * the write to the fenced register went through. If it didn't then we retry
  * the write until it goes through or we time out.
  */
-void adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
+int adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
 		enum adreno_regs offset, unsigned int val,
 		unsigned int fence_mask)
 {
@@ -1639,7 +1642,7 @@
 	adreno_writereg(adreno_dev, offset, val);
 
 	if (!kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev)))
-		return;
+		return 0;
 
 	for (i = 0; i < GMU_WAKEUP_RETRY_MAX; i++) {
 		adreno_read_gmureg(adreno_dev, ADRENO_REG_GMU_AHB_FENCE_STATUS,
@@ -1650,7 +1653,7 @@
 		 * was successful
 		 */
 		if (!(status & fence_mask))
-			return;
+			return 0;
 		/* Wait a small amount of time before trying again */
 		udelay(GMU_WAKEUP_DELAY_US);
 
@@ -1660,4 +1663,5 @@
 
 	dev_err(adreno_dev->dev.dev,
 		"GMU fenced register write timed out: reg %x\n", offset);
+	return -ETIMEDOUT;
 }
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index 90e87e4..19fa972 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -62,7 +62,7 @@
 #define CXGXCPUBUSYIGNAHB	BIT(30)
 
 /* GMU timeouts */
-#define GMU_IDLE_TIMEOUT        10 /* ms */
+#define GMU_IDLE_TIMEOUT        100 /* ms */
 
 /* Constants for GMU OOBs */
 #define OOB_BOOT_OPTION         0
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 0338c5fd..4b11bbe 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,12 +35,14 @@
 #include "kgsl_pwrctrl.h"
 
 #define CP_APERTURE_REG	0
+#define CP_SMMU_APERTURE_ID 0x1B
 
 #define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))
 
-#define ADDR_IN_GLOBAL(_a) \
-	(((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE) && \
-	 ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE + KGSL_IOMMU_GLOBAL_MEM_SIZE)))
+#define ADDR_IN_GLOBAL(_mmu, _a) \
+	(((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu)) && \
+	 ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) + \
+	 KGSL_IOMMU_GLOBAL_MEM_SIZE)))
 
 /*
  * Flag to set SMMU memory attributes required to
@@ -184,7 +186,8 @@
 	if (entry != NULL) {
 		struct kgsl_pagetable *pagetable = device->mmu.securepagetable;
 		entry->pagetable = pagetable;
-		entry->gpuaddr = KGSL_IOMMU_SECURE_BASE + secure_global_size;
+		entry->gpuaddr = KGSL_IOMMU_SECURE_BASE(&device->mmu) +
+			secure_global_size;
 
 		ret = kgsl_mmu_map(pagetable, entry);
 		if (ret == 0)
@@ -223,7 +226,8 @@
 			KGSL_IOMMU_GLOBAL_MEM_SIZE))
 		return;
 
-	memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE + global_pt_alloc;
+	memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + global_pt_alloc;
+
 	memdesc->priv |= KGSL_MEMDESC_GLOBAL;
 	global_pt_alloc += memdesc->size;
 
@@ -641,7 +645,7 @@
 	/* Set the maximum possible size as an initial value */
 	nextentry->gpuaddr = (uint64_t) -1;
 
-	if (ADDR_IN_GLOBAL(faultaddr)) {
+	if (ADDR_IN_GLOBAL(mmu, faultaddr)) {
 		_get_global_entries(faultaddr, preventry, nextentry);
 	} else if (context) {
 		private = context->proc_priv;
@@ -1030,13 +1034,13 @@
 		struct kgsl_iommu_pt *pt)
 {
 	if (mmu->secured && pagetable->name == KGSL_MMU_SECURE_PT) {
-		pt->compat_va_start = KGSL_IOMMU_SECURE_BASE;
-		pt->compat_va_end = KGSL_IOMMU_SECURE_END;
-		pt->va_start = KGSL_IOMMU_SECURE_BASE;
-		pt->va_end = KGSL_IOMMU_SECURE_END;
+		pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu);
+		pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu);
+		pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu);
+		pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
 	} else {
 		pt->compat_va_start = KGSL_IOMMU_SVM_BASE32;
-		pt->compat_va_end = KGSL_IOMMU_SVM_END32;
+		pt->compat_va_end = KGSL_IOMMU_SECURE_BASE(mmu);
 		pt->va_start = KGSL_IOMMU_VA_BASE64;
 		pt->va_end = KGSL_IOMMU_VA_END64;
 	}
@@ -1045,7 +1049,7 @@
 		pagetable->name != KGSL_MMU_SECURE_PT) {
 		if ((BITS_PER_LONG == 32) || is_compat_task()) {
 			pt->svm_start = KGSL_IOMMU_SVM_BASE32;
-			pt->svm_end = KGSL_IOMMU_SVM_END32;
+			pt->svm_end = KGSL_IOMMU_SECURE_BASE(mmu);
 		} else {
 			pt->svm_start = KGSL_IOMMU_SVM_BASE64;
 			pt->svm_end = KGSL_IOMMU_SVM_END64;
@@ -1059,19 +1063,19 @@
 {
 	if (mmu->secured) {
 		if (pagetable->name == KGSL_MMU_SECURE_PT) {
-			pt->compat_va_start = KGSL_IOMMU_SECURE_BASE;
-			pt->compat_va_end = KGSL_IOMMU_SECURE_END;
-			pt->va_start = KGSL_IOMMU_SECURE_BASE;
-			pt->va_end = KGSL_IOMMU_SECURE_END;
+			pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu);
+			pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu);
+			pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu);
+			pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
 		} else {
 			pt->va_start = KGSL_IOMMU_SVM_BASE32;
-			pt->va_end = KGSL_IOMMU_SECURE_BASE;
+			pt->va_end = KGSL_IOMMU_SECURE_BASE(mmu);
 			pt->compat_va_start = pt->va_start;
 			pt->compat_va_end = pt->va_end;
 		}
 	} else {
 		pt->va_start = KGSL_IOMMU_SVM_BASE32;
-		pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE;
+		pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu);
 		pt->compat_va_start = pt->va_start;
 		pt->compat_va_end = pt->va_end;
 	}
@@ -1166,7 +1170,7 @@
 	desc.args[3] = 0xFFFFFFFF;
 	desc.arginfo = SCM_ARGS(4);
 
-	return scm_call2(SCM_SIP_FNID(SCM_SVC_MP, 0x1B), &desc);
+	return scm_call2(SCM_SIP_FNID(SCM_SVC_MP, CP_SMMU_APERTURE_ID), &desc);
 }
 
 static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
@@ -1209,7 +1213,8 @@
 		goto done;
 	}
 
-	if (!MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE)) {
+	if (!MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) &&
+		scm_is_call_available(SCM_SVC_MP, CP_SMMU_APERTURE_ID)) {
 		ret = program_smmu_aperture(cb_num, CP_APERTURE_REG);
 		if (ret) {
 			pr_err("SMMU aperture programming call failed with error %d\n",
@@ -2381,7 +2386,8 @@
 	struct rb_node *node;
 
 	/* Make sure the requested address doesn't fall in the global range */
-	if (ADDR_IN_GLOBAL(gpuaddr) || ADDR_IN_GLOBAL(gpuaddr + size))
+	if (ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr) ||
+			ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr + size))
 		return -ENOMEM;
 
 	spin_lock(&pagetable->lock);
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index acf8ae4..462ff3b 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -24,12 +24,17 @@
  * are mapped into all pagetables.
  */
 #define KGSL_IOMMU_GLOBAL_MEM_SIZE	(20 * SZ_1M)
-#define KGSL_IOMMU_GLOBAL_MEM_BASE	0xf8000000
+#define KGSL_IOMMU_GLOBAL_MEM_BASE32	0xf8000000
+#define KGSL_IOMMU_GLOBAL_MEM_BASE64	TASK_SIZE_32
+
+#define KGSL_IOMMU_GLOBAL_MEM_BASE(__mmu)	\
+	(MMU_FEATURE(__mmu, KGSL_MMU_64BIT) ? \
+		KGSL_IOMMU_GLOBAL_MEM_BASE64 : KGSL_IOMMU_GLOBAL_MEM_BASE32)
 
 #define KGSL_IOMMU_SECURE_SIZE SZ_256M
-#define KGSL_IOMMU_SECURE_END KGSL_IOMMU_GLOBAL_MEM_BASE
-#define KGSL_IOMMU_SECURE_BASE	\
-	(KGSL_IOMMU_GLOBAL_MEM_BASE - KGSL_IOMMU_SECURE_SIZE)
+#define KGSL_IOMMU_SECURE_END(_mmu) KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu)
+#define KGSL_IOMMU_SECURE_BASE(_mmu)	\
+	(KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) - KGSL_IOMMU_SECURE_SIZE)
 
 #define KGSL_IOMMU_SVM_BASE32		0x300000
 #define KGSL_IOMMU_SVM_END32		(0xC0000000 - SZ_16M)
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index cda7a5b..7509ceb 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -77,6 +77,12 @@
 static void kgsl_pwrctrl_request_state(struct kgsl_device *device,
 				unsigned int state);
 static int _isense_clk_set_rate(struct kgsl_pwrctrl *pwr, int level);
+static int kgsl_pwrctrl_clk_set_rate(struct clk *grp_clk, unsigned int freq,
+				const char *name);
+static void _gpu_clk_prepare_enable(struct kgsl_device *device,
+				struct clk *clk, const char *name);
+static void _bimc_clk_prepare_enable(struct kgsl_device *device,
+				struct clk *clk, const char *name);
 
 /**
  * _record_pwrevent() - Record the history of the new event
@@ -165,9 +171,10 @@
 					int popp)
 {
 	unsigned int max_pwrlevel = max_t(unsigned int, pwr->thermal_pwrlevel,
-		pwr->max_pwrlevel);
-	unsigned int min_pwrlevel = max_t(unsigned int, pwr->thermal_pwrlevel,
-		pwr->min_pwrlevel);
+					pwr->max_pwrlevel);
+	unsigned int min_pwrlevel = min_t(unsigned int,
+					pwr->thermal_pwrlevel_floor,
+					pwr->min_pwrlevel);
 
 	switch (pwrc->type) {
 	case KGSL_CONSTRAINT_PWRLEVEL: {
@@ -260,7 +267,8 @@
 		clear_bit(GMU_DCVS_REPLAY, &gmu->flags);
 	} else
 		/* Linux clock driver scales GPU freq */
-		ret = clk_set_rate(pwr->grp_clks[0], pl->gpu_freq);
+		ret = kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[0],
+			pl->gpu_freq, clocks[0]);
 
 	if (ret)
 		KGSL_PWR_ERR(device, "GPU clk freq set failure: %d\n", ret);
@@ -477,9 +485,12 @@
 	if (pwr->gpu_bimc_int_clk) {
 		if (pwr->active_pwrlevel == 0 &&
 				!pwr->gpu_bimc_interface_enabled) {
-			clk_set_rate(pwr->gpu_bimc_int_clk,
-					pwr->gpu_bimc_int_clk_freq);
-			clk_prepare_enable(pwr->gpu_bimc_int_clk);
+			kgsl_pwrctrl_clk_set_rate(pwr->gpu_bimc_int_clk,
+					pwr->gpu_bimc_int_clk_freq,
+					"bimc_gpu_clk");
+			_bimc_clk_prepare_enable(device,
+					pwr->gpu_bimc_int_clk,
+					"bimc_gpu_clk");
 			pwr->gpu_bimc_interface_enabled = 1;
 		} else if (pwr->previous_pwrlevel == 0
 				&& pwr->gpu_bimc_interface_enabled) {
@@ -1740,24 +1751,23 @@
 					_isense_clk_set_rate(pwr,
 						pwr->active_pwrlevel);
 				}
-
-				for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
-					clk_prepare(pwr->grp_clks[i]);
 			}
-			/*
-			 * as last step, enable grp_clk
-			 * this is to let GPU interrupt to come
-			 */
+
 			for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
-				clk_enable(pwr->grp_clks[i]);
+				_gpu_clk_prepare_enable(device,
+						pwr->grp_clks[i], clocks[i]);
+
 			/* Enable the gpu-bimc-interface clocks */
 			if (pwr->gpu_bimc_int_clk) {
 				if (pwr->active_pwrlevel == 0 &&
 					!pwr->gpu_bimc_interface_enabled) {
-					clk_set_rate(pwr->gpu_bimc_int_clk,
-						pwr->gpu_bimc_int_clk_freq);
-					clk_prepare_enable(
-						pwr->gpu_bimc_int_clk);
+					kgsl_pwrctrl_clk_set_rate(
+						pwr->gpu_bimc_int_clk,
+						pwr->gpu_bimc_int_clk_freq,
+						"bimc_gpu_clk");
+					_bimc_clk_prepare_enable(device,
+						pwr->gpu_bimc_int_clk,
+						"bimc_gpu_clk");
 					pwr->gpu_bimc_interface_enabled = 1;
 				}
 			}
@@ -2085,7 +2095,54 @@
 	rate = clk_round_rate(pwr->grp_clks[pwr->isense_clk_indx],
 		level > pwr->isense_clk_on_level ?
 		KGSL_XO_CLK_FREQ : KGSL_ISENSE_CLK_FREQ);
-	return clk_set_rate(pwr->grp_clks[pwr->isense_clk_indx], rate);
+	return kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[pwr->isense_clk_indx],
+			rate, clocks[pwr->isense_clk_indx]);
+}
+
+/*
+ * _gpu_clk_prepare_enable - Enable the specified GPU clock
+ * Try once to enable it and then BUG() for debug
+ */
+static void _gpu_clk_prepare_enable(struct kgsl_device *device,
+		struct clk *clk, const char *name)
+{
+	int ret;
+
+	if (device->state == KGSL_STATE_NAP) {
+		ret = clk_enable(clk);
+		if (ret)
+			goto err;
+		return;
+	}
+
+	ret = clk_prepare_enable(clk);
+	if (!ret)
+		return;
+err:
+	/* Failure is fatal so BUG() to facilitate debug */
+	KGSL_DRV_FATAL(device, "KGSL:%s enable error:%d\n", name, ret);
+}
+
+/*
+ * _bimc_clk_prepare_enable - Enable the specified GPU clock
+ *  Try once to enable it and then BUG() for debug
+ */
+static void _bimc_clk_prepare_enable(struct kgsl_device *device,
+		struct clk *clk, const char *name)
+{
+	int ret = clk_prepare_enable(clk);
+	/* Failure is fatal so BUG() to facilitate debug */
+	if (ret)
+		KGSL_DRV_FATAL(device, "KGSL:%s enable error:%d\n", name, ret);
+}
+
+static int kgsl_pwrctrl_clk_set_rate(struct clk *grp_clk, unsigned int freq,
+		const char *name)
+{
+	int ret = clk_set_rate(grp_clk, freq);
+
+	WARN(ret, "KGSL:%s set freq %d failed:%d\n", name, freq, ret);
+	return ret;
 }
 
 static inline void _close_pcl(struct kgsl_pwrctrl *pwr)
@@ -2207,6 +2264,7 @@
 	pwr->max_pwrlevel = 0;
 	pwr->min_pwrlevel = pwr->num_pwrlevels - 2;
 	pwr->thermal_pwrlevel = 0;
+	pwr->thermal_pwrlevel_floor = pwr->min_pwrlevel;
 
 	pwr->wakeup_maxpwrlevel = 0;
 
@@ -2224,8 +2282,11 @@
 
 	kgsl_clk_set_rate(device, pwr->num_pwrlevels - 1);
 
-	clk_set_rate(pwr->grp_clks[6],
-		clk_round_rate(pwr->grp_clks[6], KGSL_RBBMTIMER_CLK_FREQ));
+	if (pwr->grp_clks[6] != NULL)
+		kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[6],
+			clk_round_rate(pwr->grp_clks[6],
+			KGSL_RBBMTIMER_CLK_FREQ),
+			clocks[6]);
 
 	_isense_clk_set_rate(pwr, pwr->num_pwrlevels - 1);
 
@@ -2732,9 +2793,11 @@
 				 * GPU will not be powered on
 				 */
 				WARN_ONCE(1, "Failed to recover GMU\n");
-				device->snapshot->recovered = false;
+				if (device->snapshot)
+					device->snapshot->recovered = false;
 			} else {
-				device->snapshot->recovered = true;
+				if (device->snapshot)
+					device->snapshot->recovered = true;
 			}
 
 			clear_bit(GMU_FAULT, &gmu->flags);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 2a45de7..0780737 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -125,9 +125,11 @@
  * @grp_clks - Array of clocks structures that we control
  * @power_flags - Control flags for power
  * @pwrlevels - List of supported power levels
+ * @nb - Notifier block to receive GPU OPP change event
  * @active_pwrlevel - The currently active power level
  * @previous_pwrlevel - The power level before transition
  * @thermal_pwrlevel - maximum powerlevel constraint from thermal
+ * @thermal_pwrlevel_floor - minimum powerlevel constraint from thermal
  * @default_pwrlevel - device wake up power level
  * @max_pwrlevel - maximum allowable powerlevel per the user
  * @min_pwrlevel - minimum allowable powerlevel per the user
@@ -179,9 +181,11 @@
 	unsigned long power_flags;
 	unsigned long ctrl_flags;
 	struct kgsl_pwrlevel pwrlevels[KGSL_MAX_PWRLEVELS];
+	struct notifier_block nb;
 	unsigned int active_pwrlevel;
 	unsigned int previous_pwrlevel;
 	unsigned int thermal_pwrlevel;
+	unsigned int thermal_pwrlevel_floor;
 	unsigned int default_pwrlevel;
 	unsigned int wakeup_maxpwrlevel;
 	unsigned int max_pwrlevel;
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 6825c2b..c4ff22f 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,6 +15,7 @@
 #include <linux/kernel.h>
 #include <linux/hrtimer.h>
 #include <linux/devfreq_cooling.h>
+#include <linux/pm_opp.h>
 
 #include "kgsl.h"
 #include "kgsl_pwrscale.h"
@@ -533,7 +534,6 @@
 	int level;
 	unsigned int i;
 	unsigned long cur_freq, rec_freq;
-	struct dev_pm_opp *opp;
 
 	if (device == NULL)
 		return -ENODEV;
@@ -552,20 +552,7 @@
 		return 0;
 	}
 
-	/*
-	 * Thermal framework might have disabled/enabled OPP entries
-	 * for mitigation. So find the recommended frequency matching
-	 * the available opp entries
-	 */
-	rcu_read_lock();
 	rec_freq = *freq;
-	opp = devfreq_recommended_opp(dev, &rec_freq, flags);
-	if (IS_ERR(opp)) {
-		rcu_read_unlock();
-		return PTR_ERR(opp);
-	}
-	rec_freq = dev_pm_opp_get_freq(opp);
-	rcu_read_unlock();
 
 	mutex_lock(&device->mutex);
 	cur_freq = kgsl_pwrctrl_active_freq(pwr);
@@ -870,6 +857,125 @@
 	return 0;
 }
 
+/*
+ * opp_notify - Callback function registered to receive OPP events.
+ * @nb: The notifier block
+ * @type: The event type. Two OPP events are expected in this function:
+ *      - OPP_EVENT_ENABLE: an GPU OPP is enabled. The in_opp parameter
+ *	contains the OPP that is enabled
+ *	- OPP_EVENT_DISALBE: an GPU OPP is disabled. The in_opp parameter
+ *	contains the OPP that is disabled.
+ * @in_opp: the GPU OPP whose status is changed and triggered the event
+ *
+ * GPU OPP event callback function. The function subscribe GPU OPP status
+ * change and update thermal power level accordingly.
+ */
+
+static int opp_notify(struct notifier_block *nb,
+	unsigned long type, void *in_opp)
+{
+	int result = -EINVAL, level, min_level, max_level;
+	struct kgsl_pwrctrl *pwr = container_of(nb, struct kgsl_pwrctrl, nb);
+	struct kgsl_device *device = container_of(pwr,
+			struct kgsl_device, pwrctrl);
+	struct device *dev = &device->pdev->dev;
+	struct dev_pm_opp *opp;
+	unsigned long min_freq = 0, max_freq = pwr->pwrlevels[0].gpu_freq;
+
+	if (type != OPP_EVENT_ENABLE && type != OPP_EVENT_DISABLE)
+		return result;
+
+	rcu_read_lock();
+	opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
+	if (IS_ERR(opp)) {
+		rcu_read_unlock();
+		return PTR_ERR(opp);
+	}
+
+	max_freq = dev_pm_opp_get_freq(opp);
+	if (!max_freq) {
+		rcu_read_unlock();
+		return result;
+	}
+
+	opp = dev_pm_opp_find_freq_ceil(dev, &min_freq);
+	if (IS_ERR(opp))
+		min_freq = pwr->pwrlevels[pwr->min_pwrlevel].gpu_freq;
+
+	rcu_read_unlock();
+
+	mutex_lock(&device->mutex);
+
+	max_level = pwr->thermal_pwrlevel;
+	min_level = pwr->thermal_pwrlevel_floor;
+
+	/* Thermal limit cannot be lower than lowest non-zero operating freq */
+	for (level = 0; level < (pwr->num_pwrlevels - 1); level++)
+		if (pwr->pwrlevels[level].gpu_freq == max_freq)
+			max_level = level;
+		if (pwr->pwrlevels[level].gpu_freq == min_freq)
+			min_level = level;
+
+
+	pwr->thermal_pwrlevel = max_level;
+	pwr->thermal_pwrlevel_floor = min_level;
+
+	/* Update the current level using the new limit */
+	kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel);
+	mutex_unlock(&device->mutex);
+
+	return 0;
+}
+
+/*
+ * kgsl_opp_add_notifier - add a fine grained notifier.
+ * @dev: The device
+ * @nb: Notifier block that will receive updates.
+ *
+ * Add a notifier to receive GPU OPP_EVENT_* events
+ * from the OPP framework.
+ */
+static int kgsl_opp_add_notifier(struct device *dev,
+		struct notifier_block *nb)
+{
+	struct srcu_notifier_head *nh;
+	int ret = 0;
+
+	rcu_read_lock();
+	nh = dev_pm_opp_get_notifier(dev);
+	if (IS_ERR(nh))
+		ret = PTR_ERR(nh);
+	rcu_read_unlock();
+	if (!ret)
+		ret = srcu_notifier_chain_register(nh, nb);
+
+	return ret;
+}
+
+/*
+ * kgsl_opp_remove_notifier - remove registered opp event notifier.
+ * @dev: The device
+ * @nb: Notifier block that will receive updates.
+ *
+ * Remove gpu notifier that receives GPU OPP_EVENT_* events
+ * from the OPP framework.
+ */
+static int kgsl_opp_remove_notifier(struct device *dev,
+		struct notifier_block *nb)
+{
+	struct srcu_notifier_head *nh;
+	int ret = 0;
+
+	rcu_read_lock();
+	nh = dev_pm_opp_get_notifier(dev);
+	if (IS_ERR(nh))
+		ret = PTR_ERR(nh);
+	rcu_read_unlock();
+	if (!ret)
+		ret = srcu_notifier_chain_unregister(nh, nb);
+
+	return ret;
+}
 
 /*
  * kgsl_pwrscale_init - Initialize pwrscale.
@@ -900,6 +1006,10 @@
 	gpu_profile = &pwrscale->gpu_profile;
 	profile = &pwrscale->gpu_profile.profile;
 
+	pwr->nb.notifier_call = opp_notify;
+
+	kgsl_opp_add_notifier(dev, &pwr->nb);
+
 	srcu_init_notifier_head(&pwrscale->nh);
 
 	profile->initial_freq =
@@ -1053,7 +1163,9 @@
 {
 	int i;
 	struct kgsl_pwrscale *pwrscale;
+	struct kgsl_pwrctrl *pwr;
 
+	pwr = &device->pwrctrl;
 	pwrscale = &device->pwrscale;
 	if (!pwrscale->devfreqptr)
 		return;
@@ -1068,6 +1180,7 @@
 	kgsl_midframe = NULL;
 	device->pwrscale.devfreqptr = NULL;
 	srcu_cleanup_notifier_head(&device->pwrscale.nh);
+	kgsl_opp_remove_notifier(&device->pdev->dev, &pwr->nb);
 	for (i = 0; i < KGSL_PWREVENT_MAX; i++)
 		kfree(pwrscale->history[i].events);
 }
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index cd4599c..8eed456 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -175,11 +175,11 @@
 	Support for Cherry Cymotion keyboard.
 
 config HID_CHICONY
-	tristate "Chicony Tactical pad"
+	tristate "Chicony devices"
 	depends on HID
 	default !EXPERT
 	---help---
-	Support for Chicony Tactical pad.
+	Support for Chicony Tactical pad and special keys on Chicony keyboards.
 
 config HID_CORSAIR
 	tristate "Corsair devices"
@@ -190,6 +190,7 @@
 
 	Supported devices:
 	- Vengeance K90
+	- Scimitar PRO RGB
 
 config HID_PRODIKEYS
 	tristate "Prodikeys PC-MIDI Keyboard support"
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index bc3cec1..f04ed9a 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -86,6 +86,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, ch_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index d42ace8..f2a7483 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1874,6 +1874,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
@@ -1908,6 +1909,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
@@ -2106,6 +2108,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index c0303f6..9ba5d98 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -3,8 +3,10 @@
  *
  * Supported devices:
  *  - Vengeance K90 Keyboard
+ *  - Scimitar PRO RGB Gaming Mouse
  *
  * Copyright (c) 2015 Clement Vuchener
+ * Copyright (c) 2017 Oscar Campos
  */
 
 /*
@@ -670,10 +672,51 @@
 	return 0;
 }
 
+/*
+ * The report descriptor of Corsair Scimitar RGB Pro gaming mouse is
+ * non parseable as they define two consecutive Logical Minimum for
+ * the Usage Page (Consumer) in rdescs bytes 75 and 77 being 77 0x16
+ * that should be obviousy 0x26 for Logical Magimum of 16 bits. This
+ * prevents poper parsing of the report descriptor due Logical
+ * Minimum being larger than Logical Maximum.
+ *
+ * This driver fixes the report descriptor for:
+ * - USB ID b1c:1b3e, sold as Scimitar RGB Pro Gaming mouse
+ */
+
+static __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+        unsigned int *rsize)
+{
+	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+	if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+		/*
+		 * Corsair Scimitar RGB Pro report descriptor is broken and
+		 * defines two different Logical Minimum for the Consumer
+		 * Application. The byte 77 should be a 0x26 defining a 16
+		 * bits integer for the Logical Maximum but it is a 0x16
+		 * instead (Logical Minimum)
+		 */
+		switch (hdev->product) {
+		case USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB:
+			if (*rsize >= 172 && rdesc[75] == 0x15 && rdesc[77] == 0x16
+			&& rdesc[78] == 0xff && rdesc[79] == 0x0f) {
+				hid_info(hdev, "Fixing up report descriptor\n");
+				rdesc[77] = 0x26;
+			}
+			break;
+		}
+
+	}
+	return rdesc;
+}
+
 static const struct hid_device_id corsair_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90),
 		.driver_data = CORSAIR_USE_K90_MACRO |
 			       CORSAIR_USE_K90_BACKLIGHT },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR,
+            USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
 	{}
 };
 
@@ -686,10 +729,14 @@
 	.event = corsair_event,
 	.remove = corsair_remove,
 	.input_mapping = corsair_input_mapping,
+	.report_fixup = corsair_mouse_report_fixup,
 };
 
 module_hid_driver(corsair_driver);
 
 MODULE_LICENSE("GPL");
+/* Original K90 driver author */
 MODULE_AUTHOR("Clement Vuchener");
+/* Scimitar PRO RGB driver author */
+MODULE_AUTHOR("Oscar Campos");
 MODULE_DESCRIPTION("HID driver for Corsair devices");
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index e06c134..7af7781 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -188,6 +188,8 @@
 				 HID_REQ_GET_REPORT);
 	if (ret != CP2112_GPIO_CONFIG_LENGTH) {
 		hid_err(hdev, "error requesting GPIO config: %d\n", ret);
+		if (ret >= 0)
+			ret = -EIO;
 		goto exit;
 	}
 
@@ -197,8 +199,10 @@
 	ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
 				 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
 				 HID_REQ_SET_REPORT);
-	if (ret < 0) {
+	if (ret != CP2112_GPIO_CONFIG_LENGTH) {
 		hid_err(hdev, "error setting GPIO config: %d\n", ret);
+		if (ret >= 0)
+			ret = -EIO;
 		goto exit;
 	}
 
@@ -206,7 +210,7 @@
 
 exit:
 	mutex_unlock(&dev->lock);
-	return ret < 0 ? ret : -EIO;
+	return ret;
 }
 
 static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 08fd3f8..244b97c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -277,6 +277,9 @@
 #define USB_DEVICE_ID_CORSAIR_K70RGB    0x1b13
 #define USB_DEVICE_ID_CORSAIR_STRAFE    0x1b15
 #define USB_DEVICE_ID_CORSAIR_K65RGB    0x1b17
+#define USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE  0x1b38
+#define USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE  0x1b39
+#define USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB  0x1b3e
 
 #define USB_VENDOR_ID_CREATIVELABS	0x041e
 #define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51	0x322c
@@ -558,6 +561,7 @@
 
 #define USB_VENDOR_ID_JESS		0x0c45
 #define USB_DEVICE_ID_JESS_YUREX	0x1010
+#define USB_DEVICE_ID_JESS_ZEN_AIO_KBD	0x5112
 
 #define USB_VENDOR_ID_JESS2		0x0f30
 #define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
@@ -1076,6 +1080,7 @@
 
 #define USB_VENDOR_ID_XIN_MO			0x16c0
 #define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE	0x05e1
+#define USB_DEVICE_ID_THT_2P_ARCADE		0x75e1
 
 #define USB_VENDOR_ID_XIROKU		0x1477
 #define USB_DEVICE_ID_XIROKU_SPX	0x1006
diff --git a/drivers/hid/hid-xinmo.c b/drivers/hid/hid-xinmo.c
index 7df5227..9ad7731 100644
--- a/drivers/hid/hid-xinmo.c
+++ b/drivers/hid/hid-xinmo.c
@@ -46,6 +46,7 @@
 
 static const struct hid_device_id xinmo_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
 	{ }
 };
 
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 2b16207..1916f80 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -80,6 +80,9 @@
 	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
 	{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
 	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index e0a8216..13c32eb4 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -31,6 +31,7 @@
 #include <linux/clockchips.h>
 #include <asm/hyperv.h>
 #include <asm/mshyperv.h>
+#include <asm/nospec-branch.h>
 #include "hyperv_vmbus.h"
 
 /* The one and only */
@@ -103,9 +104,10 @@
 		return (u64)ULLONG_MAX;
 
 	__asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
-	__asm__ __volatile__("call *%3" : "=a" (hv_status) :
+	__asm__ __volatile__(CALL_NOSPEC :
+			     "=a" (hv_status) :
 			     "c" (control), "d" (input_address),
-			     "m" (hypercall_page));
+			     THUNK_TARGET(hypercall_page));
 
 	return hv_status;
 
@@ -123,11 +125,12 @@
 	if (!hypercall_page)
 		return (u64)ULLONG_MAX;
 
-	__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
+	__asm__ __volatile__ (CALL_NOSPEC : "=d"(hv_status_hi),
 			      "=a"(hv_status_lo) : "d" (control_hi),
 			      "a" (control_lo), "b" (input_address_hi),
 			      "c" (input_address_lo), "D"(output_address_hi),
-			      "S"(output_address_lo), "m" (hypercall_page));
+			      "S"(output_address_lo),
+			      THUNK_TARGET(hypercall_page));
 
 	return hv_status_lo | ((u64)hv_status_hi << 32);
 #endif /* !x86_64 */
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 75126e4..4442007 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -61,7 +61,6 @@
 static const char fcopy_devname[] = "vmbus/hv_fcopy";
 static u8 *recv_buffer;
 static struct hvutil_transport *hvt;
-static struct completion release_event;
 /*
  * This state maintains the version number registered by the daemon.
  */
@@ -322,7 +321,6 @@
 
 	if (cancel_delayed_work_sync(&fcopy_timeout_work))
 		fcopy_respond_to_host(HV_E_FAIL);
-	complete(&release_event);
 }
 
 int hv_fcopy_init(struct hv_util_service *srv)
@@ -330,7 +328,6 @@
 	recv_buffer = srv->recv_buffer;
 	fcopy_transaction.recv_channel = srv->channel;
 
-	init_completion(&release_event);
 	/*
 	 * When this driver loads, the user level daemon that
 	 * processes the host requests may not yet be running.
@@ -352,5 +349,4 @@
 	fcopy_transaction.state = HVUTIL_DEVICE_DYING;
 	cancel_delayed_work_sync(&fcopy_timeout_work);
 	hvutil_transport_destroy(hvt);
-	wait_for_completion(&release_event);
 }
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 3abfc59..5e1fdc8 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -88,7 +88,6 @@
 static const char kvp_devname[] = "vmbus/hv_kvp";
 static u8 *recv_buffer;
 static struct hvutil_transport *hvt;
-static struct completion release_event;
 /*
  * Register the kernel component with the user-level daemon.
  * As part of this registration, pass the LIC version number.
@@ -717,7 +716,6 @@
 	if (cancel_delayed_work_sync(&kvp_timeout_work))
 		kvp_respond_to_host(NULL, HV_E_FAIL);
 	kvp_transaction.state = HVUTIL_DEVICE_INIT;
-	complete(&release_event);
 }
 
 int
@@ -726,7 +724,6 @@
 	recv_buffer = srv->recv_buffer;
 	kvp_transaction.recv_channel = srv->channel;
 
-	init_completion(&release_event);
 	/*
 	 * When this driver loads, the user level daemon that
 	 * processes the host requests may not yet be running.
@@ -750,5 +747,4 @@
 	cancel_delayed_work_sync(&kvp_timeout_work);
 	cancel_work_sync(&kvp_sendkey_work);
 	hvutil_transport_destroy(hvt);
-	wait_for_completion(&release_event);
 }
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index a76e3db..a670713 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -66,7 +66,6 @@
 static const char vss_devname[] = "vmbus/hv_vss";
 static __u8 *recv_buffer;
 static struct hvutil_transport *hvt;
-static struct completion release_event;
 
 static void vss_timeout_func(struct work_struct *dummy);
 static void vss_handle_request(struct work_struct *dummy);
@@ -331,13 +330,11 @@
 	if (cancel_delayed_work_sync(&vss_timeout_work))
 		vss_respond_to_host(HV_E_FAIL);
 	vss_transaction.state = HVUTIL_DEVICE_INIT;
-	complete(&release_event);
 }
 
 int
 hv_vss_init(struct hv_util_service *srv)
 {
-	init_completion(&release_event);
 	if (vmbus_proto_version < VERSION_WIN8_1) {
 		pr_warn("Integration service 'Backup (volume snapshot)'"
 			" not supported on this host version.\n");
@@ -368,5 +365,4 @@
 	cancel_delayed_work_sync(&vss_timeout_work);
 	cancel_work_sync(&vss_handle_request_work);
 	hvutil_transport_destroy(hvt);
-	wait_for_completion(&release_event);
 }
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index c235a95..4402a71 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -182,10 +182,11 @@
 	 * connects back.
 	 */
 	hvt_reset(hvt);
-	mutex_unlock(&hvt->lock);
 
 	if (mode_old == HVUTIL_TRANSPORT_DESTROY)
-		hvt_transport_free(hvt);
+		complete(&hvt->release);
+
+	mutex_unlock(&hvt->lock);
 
 	return 0;
 }
@@ -304,6 +305,7 @@
 
 	init_waitqueue_head(&hvt->outmsg_q);
 	mutex_init(&hvt->lock);
+	init_completion(&hvt->release);
 
 	spin_lock(&hvt_list_lock);
 	list_add(&hvt->list, &hvt_list);
@@ -351,6 +353,8 @@
 	if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0)
 		cn_del_callback(&hvt->cn_id);
 
-	if (mode_old != HVUTIL_TRANSPORT_CHARDEV)
-		hvt_transport_free(hvt);
+	if (mode_old == HVUTIL_TRANSPORT_CHARDEV)
+		wait_for_completion(&hvt->release);
+
+	hvt_transport_free(hvt);
 }
diff --git a/drivers/hv/hv_utils_transport.h b/drivers/hv/hv_utils_transport.h
index d98f522..79afb62 100644
--- a/drivers/hv/hv_utils_transport.h
+++ b/drivers/hv/hv_utils_transport.h
@@ -41,6 +41,7 @@
 	int outmsg_len;                     /* its length */
 	wait_queue_head_t outmsg_q;         /* poll/read wait queue */
 	struct mutex lock;                  /* protects struct members */
+	struct completion release;          /* synchronize with fd release */
 };
 
 struct hvutil_transport *hvutil_transport_init(const char *name,
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index cccef87..975c43d 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -646,6 +646,9 @@
 		else
 			err = atk_read_value_new(sensor, value);
 
+		if (err)
+			return err;
+
 		sensor->is_valid = true;
 		sensor->last_updated = jiffies;
 		sensor->cached_value = *value;
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 1bf22ef..0f1f642 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -22,6 +22,7 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/bitops.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -45,6 +46,7 @@
 #define JC42_REG_TEMP		0x05
 #define JC42_REG_MANID		0x06
 #define JC42_REG_DEVICEID	0x07
+#define JC42_REG_SMBUS		0x22 /* NXP and Atmel, possibly others? */
 
 /* Status bits in temperature register */
 #define JC42_ALARM_CRIT_BIT	15
@@ -73,6 +75,9 @@
 #define ONS_MANID		0x1b09  /* ON Semiconductor */
 #define STM_MANID		0x104a  /* ST Microelectronics */
 
+/* SMBUS register */
+#define SMBUS_STMOUT		BIT(7)  /* SMBus time-out, active low */
+
 /* Supported chips */
 
 /* Analog Devices */
@@ -476,6 +481,22 @@
 
 	data->extended = !!(cap & JC42_CAP_RANGE);
 
+	if (device_property_read_bool(dev, "smbus-timeout-disable")) {
+		int smbus;
+
+		/*
+		 * Not all chips support this register, but from a
+		 * quick read of various datasheets no chip appears
+		 * incompatible with the below attempt to disable
+		 * the timeout. And the whole thing is opt-in...
+		 */
+		smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS);
+		if (smbus < 0)
+			return smbus;
+		i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS,
+					     smbus | SMBUS_STMOUT);
+	}
+
 	config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG);
 	if (config < 0)
 		return config;
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index c1b9275..281491c 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -311,7 +311,7 @@
 		data->pwm[channel] = val << 8;
 		err = i2c_smbus_write_word_swapped(client,
 						   MAX31790_REG_PWMOUT(channel),
-						   val);
+						   data->pwm[channel]);
 		break;
 	case hwmon_pwm_enable:
 		fan_config = data->fan_config[channel];
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
index 16a3e7d..f3e16b3 100644
--- a/drivers/hwmon/qpnp-adc-common.c
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -39,9 +39,22 @@
 #define PMI_CHG_SCALE_2		391750000000
 #define QPNP_VADC_HC_VREF_CODE		0x4000
 #define QPNP_VADC_HC_VDD_REFERENCE_MV	1875
+#define CHRG_SCALE_1 -250
+#define CHRG_SCALE_2 377500000
+#define DIE_SCALE_1 500
+#define DIE_SCALE_2 -273150000
+
 /* Clamp negative ADC code to 0 */
 #define QPNP_VADC_HC_MAX_CODE		0x7FFF
 
+/*Invalid current reading*/
+#define QPNP_IADC_INV		0x8000
+
+#define IADC_SCALE_1 0xffff
+#define IADC_SCALE_2 152593
+
+#define USBIN_I_SCALE 25
+
 /*
  * Units for temperature below (on x axis) is in 0.1DegC as
  * required by the battery driver. Note the resolution used
@@ -590,6 +603,80 @@
 	{30,	125}
 };
 
+/* Voltage to temperature */
+static const struct qpnp_vadc_map_pt adcmap_batt_therm[] = {
+	{1770,	-400},
+	{1757,	-380},
+	{1743,	-360},
+	{1727,	-340},
+	{1710,	-320},
+	{1691,	-300},
+	{1671,	-280},
+	{1650,	-260},
+	{1627,	-240},
+	{1602,	-220},
+	{1576,	-200},
+	{1548,	-180},
+	{1519,	-160},
+	{1488,	-140},
+	{1456,	-120},
+	{1423,	-100},
+	{1388,	-80},
+	{1353,	-60},
+	{1316,	-40},
+	{1278,	-20},
+	{1240,	0},
+	{1201,	20},
+	{1162,	40},
+	{1122,	60},
+	{1082,	80},
+	{1042,	100},
+	{1003,	120},
+	{964,	140},
+	{925,	160},
+	{887,	180},
+	{849,	200},
+	{812,	220},
+	{777,	240},
+	{742,	260},
+	{708,	280},
+	{675,	300},
+	{643,	320},
+	{613,	340},
+	{583,	360},
+	{555,	380},
+	{528,	400},
+	{502,	420},
+	{477,	440},
+	{453,	460},
+	{430,	480},
+	{409,	500},
+	{388,	520},
+	{369,	540},
+	{350,	560},
+	{333,	580},
+	{316,	600},
+	{300,	620},
+	{285,	640},
+	{271,	660},
+	{257,	680},
+	{245,	700},
+	{233,	720},
+	{221,	740},
+	{210,	760},
+	{200,	780},
+	{190,	800},
+	{181,	820},
+	{173,	840},
+	{164,	860},
+	{157,	880},
+	{149,	900},
+	{142,	920},
+	{136,	940},
+	{129,	960},
+	{124,	980}
+};
+
 /*
  * Voltage to temperature table for 100k pull up for NTCG104EF104 with
  * 1.875V reference.
@@ -899,6 +986,36 @@
 }
 EXPORT_SYMBOL(qpnp_adc_tdkntcg_therm);
 
+int32_t qpnp_adc_batt_therm(struct qpnp_vadc_chip *chip,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t batt_thm_voltage = 0;
+
+	if (!chan_properties || !chan_properties->offset_gain_numerator ||
+		!chan_properties->offset_gain_denominator || !adc_properties
+		|| !adc_chan_result)
+		return -EINVAL;
+
+	if (adc_properties->adc_hc) {
+		/* (code * vref_vadc (1.875V) * 1000) / (scale_code * 1000) */
+		if (adc_code > QPNP_VADC_HC_MAX_CODE)
+			adc_code = 0;
+		batt_thm_voltage = (int64_t) adc_code;
+		batt_thm_voltage *= (adc_properties->adc_vdd_reference
+							* 1000);
+		batt_thm_voltage = div64_s64(batt_thm_voltage,
+				adc_properties->full_scale_code * 1000);
+		qpnp_adc_map_voltage_temp(adcmap_batt_therm,
+			ARRAY_SIZE(adcmap_batt_therm),
+			batt_thm_voltage, &adc_chan_result->physical);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_batt_therm);
+
 int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *chip,
 		int32_t adc_code,
 		const struct qpnp_adc_properties *adc_properties,
@@ -920,6 +1037,70 @@
 }
 EXPORT_SYMBOL(qpnp_adc_scale_batt_therm);
 
+int32_t qpnp_adc_scale_chrg_temp(struct qpnp_vadc_chip *vadc,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int rc = 0;
+
+	if (!chan_properties || !chan_properties->offset_gain_numerator ||
+		!chan_properties->offset_gain_denominator || !adc_properties
+		|| !adc_chan_result)
+		return -EINVAL;
+
+	rc = qpnp_adc_scale_default(vadc, adc_code, adc_properties,
+			chan_properties, adc_chan_result);
+	if (rc < 0)
+		return rc;
+
+	pr_debug("raw_code:%x, v_adc:%lld\n", adc_code,
+						adc_chan_result->physical);
+	adc_chan_result->physical = (int64_t) ((CHRG_SCALE_1) *
+					(adc_chan_result->physical));
+	adc_chan_result->physical = (int64_t) (adc_chan_result->physical +
+							CHRG_SCALE_2);
+	adc_chan_result->physical = (int64_t) adc_chan_result->physical;
+	adc_chan_result->physical = div64_s64(adc_chan_result->physical,
+								1000000);
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_chrg_temp);
+
+int32_t qpnp_adc_scale_die_temp(struct qpnp_vadc_chip *vadc,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int rc = 0;
+
+	if (!chan_properties || !chan_properties->offset_gain_numerator ||
+		!chan_properties->offset_gain_denominator || !adc_properties
+		|| !adc_chan_result)
+		return -EINVAL;
+
+	rc = qpnp_adc_scale_default(vadc, adc_code, adc_properties,
+			chan_properties, adc_chan_result);
+	if (rc < 0)
+		return rc;
+
+	pr_debug("raw_code:%x, v_adc:%lld\n", adc_code,
+						adc_chan_result->physical);
+	adc_chan_result->physical = (int64_t) ((DIE_SCALE_1) *
+					(adc_chan_result->physical));
+	adc_chan_result->physical = (int64_t) (adc_chan_result->physical +
+							DIE_SCALE_2);
+	adc_chan_result->physical = (int64_t) adc_chan_result->physical;
+	adc_chan_result->physical = div64_s64(adc_chan_result->physical,
+								1000000);
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_die_temp);
+
 int32_t qpnp_adc_scale_qrd_batt_therm(struct qpnp_vadc_chip *chip,
 		int32_t adc_code,
 		const struct qpnp_adc_properties *adc_properties,
@@ -1279,6 +1460,73 @@
 }
 EXPORT_SYMBOL(qpnp_adc_scale_default);
 
+int32_t qpnp_iadc_scale_default(struct qpnp_vadc_chip *vadc,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int64_t scale_current = 0;
+
+	if (!chan_properties || !chan_properties->offset_gain_numerator ||
+		!chan_properties->offset_gain_denominator || !adc_properties
+		|| !adc_chan_result)
+		return -EINVAL;
+
+	if (adc_properties->adc_hc) {
+
+		if (adc_code == QPNP_IADC_INV)
+			return -EINVAL;
+
+		scale_current = (int64_t) adc_code;
+
+		if (adc_code > QPNP_IADC_INV) {
+		scale_current = ((~scale_current) & IADC_SCALE_1);
+		scale_current++;
+		scale_current = -scale_current;
+		}
+	}
+
+	scale_current *= IADC_SCALE_2;
+	scale_current = div64_s64(scale_current,
+				1000);
+	scale_current *= chan_properties->offset_gain_denominator;
+	scale_current = div64_s64(scale_current,
+				chan_properties->offset_gain_numerator);
+	adc_chan_result->measurement = scale_current;
+	/*
+	 * Note: adc_chan_result->measurement is in uA.
+	 */
+	adc_chan_result->physical = adc_chan_result->measurement;
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_iadc_scale_default);
+
+int qpnp_adc_scale_usbin_curr(struct qpnp_vadc_chip *vadc,
+		int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	int rc = 0;
+
+	rc = qpnp_adc_scale_default(vadc, adc_code, adc_properties,
+			chan_properties, adc_chan_result);
+	if (rc < 0)
+		return rc;
+
+	pr_debug("raw_code:%x, v_adc:%lld\n", adc_code,
+						adc_chan_result->physical);
+	adc_chan_result->physical = (int64_t) ((USBIN_I_SCALE) *
+					adc_chan_result->physical);
+	adc_chan_result->physical = div64_s64(adc_chan_result->physical,
+								10);
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_usbin_curr);
+
 int32_t qpnp_adc_usb_scaler(struct qpnp_vadc_chip *chip,
 		struct qpnp_adc_tm_btm_param *param,
 		uint32_t *low_threshold, uint32_t *high_threshold)
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
index 4b5e206..8b44c0f 100644
--- a/drivers/hwmon/qpnp-adc-voltage.c
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -219,6 +219,11 @@
 	[SCALE_NCP_03WF683_THERM] = {qpnp_adc_scale_therm_ncp03},
 	[SCALE_QRD_SKUT1_BATT_THERM] = {qpnp_adc_scale_qrd_skut1_batt_therm},
 	[SCALE_PMI_CHG_TEMP] = {qpnp_adc_scale_pmi_chg_temp},
+	[SCALE_BATT_THERM_TEMP] = {qpnp_adc_batt_therm},
+	[SCALE_CHRG_TEMP] = {qpnp_adc_scale_chrg_temp},
+	[SCALE_DIE_TEMP] = {qpnp_adc_scale_die_temp},
+	[SCALE_I_DEFAULT] = {qpnp_iadc_scale_default},
+	[SCALE_USBIN_I] = {qpnp_adc_scale_usbin_curr},
 };
 
 static struct qpnp_vadc_rscale_fn adc_vadc_rscale_fn[] = {
@@ -2618,6 +2623,7 @@
 				&qpnp_vadc_thermal_ops);
 			if (IS_ERR(vadc->vadc_therm_chan[i].tz_dev)) {
 				pr_err("thermal device register failed.\n");
+				rc = PTR_ERR(vadc->vadc_therm_chan[i].tz_dev);
 				goto thermal_err_sens;
 			}
 		}
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index 36e3db2..0bc9439 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
 #include <linux/bitmap.h>
 #include <linux/of.h>
 #include <linux/coresight.h>
+#include <soc/qcom/memory_dump.h>
 
 #include "coresight-priv.h"
 
@@ -225,6 +226,7 @@
 struct mcmb_dataset {
 	uint8_t		mcmb_trig_lane;
 	uint8_t		mcmb_lane_select;
+	uint32_t		*mcmb_msr_dump_ptr;
 };
 
 struct cmb_dataset {
@@ -609,6 +611,11 @@
 	val = val & ~BM(10, 17);
 	val = val | (BMVAL(mcmb->mcmb_lane_select, 0, 7) << 10);
 
+	if (mcmb->mcmb_msr_dump_ptr) {
+		for (i = 0; i < TPDM_CMB_MAX_MSR; i++)
+			mcmb->mcmb_msr_dump_ptr[i] = drvdata->cmb->msr[i];
+	}
+
 	tpdm_writel(drvdata, val, TPDM_CMB_CR);
 	/* Set the enable bit */
 	val = val | BIT(0);
@@ -3593,6 +3600,11 @@
 		return -EPERM;
 
 	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
 	TPDM_UNLOCK(drvdata);
 	val = tpdm_readl(drvdata, TPDM_CMB_READVAL);
 	TPDM_LOCK(drvdata);
@@ -3615,6 +3627,11 @@
 		return -EPERM;
 
 	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
 	TPDM_UNLOCK(drvdata);
 	val = tpdm_readl(drvdata, TPDM_CMB_READCTL);
 	TPDM_LOCK(drvdata);
@@ -3643,6 +3660,11 @@
 		return -EPERM;
 
 	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
 	TPDM_UNLOCK(drvdata);
 	tpdm_writel(drvdata, val, TPDM_CMB_READCTL);
 	TPDM_LOCK(drvdata);
@@ -3741,6 +3763,11 @@
 		return -EPERM;
 
 	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
 	TPDM_UNLOCK(drvdata);
 	tpdm_writel(drvdata, val, TPDM_CMB_MARKR);
 	TPDM_LOCK(drvdata);
@@ -3910,6 +3937,12 @@
 						  GFP_KERNEL);
 		if (!drvdata->cmb->mcmb)
 			return -ENOMEM;
+
+		if (of_property_read_bool(drvdata->dev->of_node,
+						    "qcom,dump-enable"))
+			drvdata->cmb->mcmb->mcmb_msr_dump_ptr =
+				(uint32_t *)get_msm_dump_ptr(
+						MSM_DUMP_DATA_TPDM_SWAO_MCMB);
 	}
 	return 0;
 }
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 63b5db4..e0f3244 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -95,6 +95,11 @@
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
 		.driver_data = (kernel_ulong_t)0,
 	},
+	{
+		/* Gemini Lake */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
+		.driver_data = (kernel_ulong_t)0,
+	},
 	{ 0 },
 };
 
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 6869712..45d6771 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -962,10 +962,6 @@
 		goto err_clk_dis;
 	}
 
-	ret = i2c_add_adapter(&id->adap);
-	if (ret < 0)
-		goto err_clk_dis;
-
 	/*
 	 * Cadence I2C controller has a bug wherein it generates
 	 * invalid read transaction after HW timeout in master receiver mode.
@@ -975,6 +971,10 @@
 	 */
 	cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
 
+	ret = i2c_add_adapter(&id->adap);
+	if (ret < 0)
+		goto err_clk_dis;
+
 	dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
 		 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
 
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index eb3627f..e6fe21a 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1592,6 +1592,9 @@
 	/* Default timeout in interrupt mode: 200 ms */
 	priv->adapter.timeout = HZ / 5;
 
+	if (dev->irq == IRQ_NOTCONNECTED)
+		priv->features &= ~FEATURE_IRQ;
+
 	if (priv->features & FEATURE_IRQ) {
 		u16 pcictl, pcists;
 
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 8f11d34..c811af4 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -218,8 +218,12 @@
 	}
 
 	if (riic->is_last || riic->err) {
-		riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
+		riic_clear_set_bit(riic, ICIER_TEIE, ICIER_SPIE, RIIC_ICIER);
 		writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
+	} else {
+		/* Transfer is complete, but do not send STOP */
+		riic_clear_set_bit(riic, ICIER_TEIE, 0, RIIC_ICIER);
+		complete(&riic->msg_done);
 	}
 
 	return IRQ_HANDLED;
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 472641f..af05e20 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -269,6 +269,7 @@
 
 		conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]);
 		conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
+		conv_time += conv_time / 10; /* 10% internal clock inaccuracy */
 		usleep_range(conv_time, conv_time + 1);
 		data->conv_invalid = false;
 	}
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index fe89b68..263e972 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -119,7 +119,7 @@
 	if (ret < 0)
 		dev_err(&chip->client->dev, "Error writing reg_cmd\n");
 
-	return 0;
+	return ret;
 }
 
 /**
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 809a028..a09d6ee 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1482,7 +1482,7 @@
 	return id_priv;
 }
 
-static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
+static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
 {
 	return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
 }
@@ -1877,7 +1877,8 @@
 	struct rdma_id_private *listen_id, *conn_id = NULL;
 	struct rdma_cm_event event;
 	struct net_device *net_dev;
-	int offset, ret;
+	u8 offset;
+	int ret;
 
 	listen_id = cma_id_from_event(cm_id, ib_event, &net_dev);
 	if (IS_ERR(listen_id))
@@ -3309,7 +3310,8 @@
 	struct ib_cm_sidr_req_param req;
 	struct ib_cm_id	*id;
 	void *private_data;
-	int offset, ret;
+	u8 offset;
+	int ret;
 
 	memset(&req, 0, sizeof req);
 	offset = cma_user_data_offset(id_priv);
@@ -3366,7 +3368,8 @@
 	struct rdma_route *route;
 	void *private_data;
 	struct ib_cm_id	*id;
-	int offset, ret;
+	u8 offset;
+	int ret;
 
 	memset(&req, 0, sizeof req);
 	offset = cma_user_data_offset(id_priv);
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index a754fc7..ff12b8d 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -196,7 +196,7 @@
 		irq_poll_disable(&cq->iop);
 		break;
 	case IB_POLL_WORKQUEUE:
-		flush_work(&cq->work);
+		cancel_work_sync(&cq->work);
 		break;
 	default:
 		WARN_ON_ONCE(1);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 19c6477..a856371 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -575,10 +575,10 @@
 			ret = -EAGAIN;
 			goto skip_cqe;
 		}
-		if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
+		if (unlikely(!CQE_STATUS(hw_cqe) &&
+			     CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
 			t4_set_wq_in_error(wq);
-			hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
-			goto proc_cqe;
+			hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
 		}
 		goto proc_cqe;
 	}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 862381a..b55adf5 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -171,7 +171,7 @@
 			__be32 msn;
 		} rcqe;
 		struct {
-			u32 stag;
+			__be32 stag;
 			u16 nada2;
 			u16 cidx;
 		} scqe;
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 010c709..58c531d 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -675,8 +675,8 @@
 	__u16  wrid;
 	__u8   r1[3];
 	__u8   len16;
-	__u32  r2;
-	__u32  stag;
+	__be32  r2;
+	__be32  stag;
 	struct fw_ri_tpte tpte;
 	__u64  pbl[2];
 };
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 24d0820..4682909 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -9769,7 +9769,7 @@
 		goto unimplemented;
 
 	case HFI1_IB_CFG_OP_VLS:
-		val = ppd->vls_operational;
+		val = ppd->actual_vls_operational;
 		break;
 	case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
 		val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 6fd043b..7db2001 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -159,6 +159,9 @@
 		return NOTIFY_DONE;
 
 	iwdev = &hdl->device;
+	if (iwdev->init_state < INET_NOTIFIER)
+		return NOTIFY_DONE;
+
 	netdev = iwdev->ldev->netdev;
 	upper_dev = netdev_master_upper_dev_get(netdev);
 	if (netdev != event_netdev)
@@ -231,6 +234,9 @@
 		return NOTIFY_DONE;
 
 	iwdev = &hdl->device;
+	if (iwdev->init_state < INET_NOTIFIER)
+		return NOTIFY_DONE;
+
 	netdev = iwdev->ldev->netdev;
 	if (netdev != event_netdev)
 		return NOTIFY_DONE;
@@ -280,6 +286,8 @@
 		if (!iwhdl)
 			return NOTIFY_DONE;
 		iwdev = &iwhdl->device;
+		if (iwdev->init_state < INET_NOTIFIER)
+			return NOTIFY_DONE;
 		p = (__be32 *)neigh->primary_key;
 		i40iw_copy_ip_ntohl(local_ipaddr, p);
 		if (neigh->nud_state & NUD_VALID) {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index c224543..709d649 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1669,7 +1669,7 @@
 			context->mtu_msgmax = (IB_MTU_4096 << 5) |
 					      ilog2(dev->dev->caps.max_gso_sz);
 		else
-			context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
+			context->mtu_msgmax = (IB_MTU_4096 << 5) | 13;
 	} else if (attr_mask & IB_QP_PATH_MTU) {
 		if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
 			pr_err("path MTU (%u) is invalid\n",
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 786f640..a2120ff 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2514,6 +2514,8 @@
 	qp->real_qp    = qp;
 	qp->uobject    = NULL;
 	qp->qp_type    = MLX5_IB_QPT_REG_UMR;
+	qp->send_cq    = init_attr->send_cq;
+	qp->recv_cq    = init_attr->recv_cq;
 
 	attr->qp_state = IB_QPS_INIT;
 	attr->port_num = 1;
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 63890eb..eccf703 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -404,9 +404,9 @@
 	}
 
 	if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
-		packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
-	else
 		packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+	else
+		packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
 
 	packet->roce_mode = roce_mode;
 	memcpy(packet->header.vaddr, ud_header_buffer, header_size);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 4ba019e..35d5b89 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1653,7 +1653,7 @@
 	int status = 0;
 
 	if (new_state == qp->state)
-		return 1;
+		return 0;
 
 	switch (qp->state) {
 	case QED_ROCE_QP_STATE_RESET:
diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
index e202b81..6b712ee 100644
--- a/drivers/infiniband/sw/rdmavt/mmap.c
+++ b/drivers/infiniband/sw/rdmavt/mmap.c
@@ -170,9 +170,9 @@
 
 	spin_lock_irq(&rdi->mmap_offset_lock);
 	if (rdi->mmap_offset == 0)
-		rdi->mmap_offset = PAGE_SIZE;
+		rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
 	ip->offset = rdi->mmap_offset;
-	rdi->mmap_offset += size;
+	rdi->mmap_offset += ALIGN(size, SHMLBA);
 	spin_unlock_irq(&rdi->mmap_offset_lock);
 
 	INIT_LIST_HEAD(&ip->pending_mmaps);
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index c572a4c..bd812e0 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -156,10 +156,10 @@
 	spin_lock_bh(&rxe->mmap_offset_lock);
 
 	if (rxe->mmap_offset == 0)
-		rxe->mmap_offset = PAGE_SIZE;
+		rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
 
 	ip->info.offset = rxe->mmap_offset;
-	rxe->mmap_offset += size;
+	rxe->mmap_offset += ALIGN(size, SHMLBA);
 
 	spin_unlock_bh(&rxe->mmap_offset_lock);
 
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index ee26a1b..1c4e5b2 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -412,6 +412,8 @@
 	elem = kmem_cache_zalloc(pool_cache(pool),
 				 (pool->flags & RXE_POOL_ATOMIC) ?
 				 GFP_ATOMIC : GFP_KERNEL);
+	if (!elem)
+		return NULL;
 
 	elem->pool = pool;
 	kref_init(&elem->ref_cnt);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 9d08478..5b0ca35 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -726,11 +726,11 @@
 	ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
 	if (ret) {
 		qp->need_req_skb = 1;
-		kfree_skb(skb);
 
 		rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
 
 		if (ret == -EAGAIN) {
+			kfree_skb(skb);
 			rxe_run_task(&qp->req.task, 1);
 			goto exit;
 		}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 39101b1..39e31b2 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -799,18 +799,17 @@
 		/* Unreachable */
 		WARN_ON(1);
 
-	/* We successfully processed this new request. */
-	qp->resp.msn++;
-
 	/* next expected psn, read handles this separately */
 	qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
 
 	qp->resp.opcode = pkt->opcode;
 	qp->resp.status = IB_WC_SUCCESS;
 
-	if (pkt->mask & RXE_COMP_MASK)
+	if (pkt->mask & RXE_COMP_MASK) {
+		/* We successfully processed this new request. */
+		qp->resp.msn++;
 		return RESPST_COMPLETE;
-	else if (qp_type(qp) == IB_QPT_RC)
+	} else if (qp_type(qp) == IB_QPT_RC)
 		return RESPST_ACKNOWLEDGE;
 	else
 		return RESPST_CLEANUP;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 830fecb..335bd2c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1177,10 +1177,15 @@
 		ipoib_ib_dev_down(dev);
 
 	if (level == IPOIB_FLUSH_HEAVY) {
+		rtnl_lock();
 		if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
 			ipoib_ib_dev_stop(dev);
-		if (ipoib_ib_dev_open(dev) != 0)
+
+		result = ipoib_ib_dev_open(dev);
+		rtnl_unlock();
+		if (result)
 			return;
+
 		if (netif_queue_stopped(dev))
 			netif_start_queue(dev);
 	}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 0be6a7c..cb48e22 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -430,6 +430,7 @@
 	struct list_head		  list;
 	struct iser_reg_resources	  rsc;
 	struct iser_pi_context		 *pi_ctx;
+	struct list_head                  all_list;
 };
 
 /**
@@ -443,6 +444,7 @@
 	struct list_head        list;
 	spinlock_t              lock;
 	int                     size;
+	struct list_head        all_list;
 };
 
 /**
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index a4b791d..bc6f5bb 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -362,6 +362,7 @@
 	int i, ret;
 
 	INIT_LIST_HEAD(&fr_pool->list);
+	INIT_LIST_HEAD(&fr_pool->all_list);
 	spin_lock_init(&fr_pool->lock);
 	fr_pool->size = 0;
 	for (i = 0; i < cmds_max; i++) {
@@ -373,6 +374,7 @@
 		}
 
 		list_add_tail(&desc->list, &fr_pool->list);
+		list_add_tail(&desc->all_list, &fr_pool->all_list);
 		fr_pool->size++;
 	}
 
@@ -392,13 +394,13 @@
 	struct iser_fr_desc *desc, *tmp;
 	int i = 0;
 
-	if (list_empty(&fr_pool->list))
+	if (list_empty(&fr_pool->all_list))
 		return;
 
 	iser_info("freeing conn %p fr pool\n", ib_conn);
 
-	list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) {
-		list_del(&desc->list);
+	list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
+		list_del(&desc->all_list);
 		iser_free_reg_res(&desc->rsc);
 		if (desc->pi_ctx)
 			iser_free_pi_ctx(desc->pi_ctx);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 1eee8f7..84f9185 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -648,12 +648,19 @@
 static int srp_lookup_path(struct srp_rdma_ch *ch)
 {
 	struct srp_target_port *target = ch->target;
-	int ret;
+	int ret = -ENODEV;
 
 	ch->path.numb_path = 1;
 
 	init_completion(&ch->done);
 
+	/*
+	 * Avoid that the SCSI host can be removed by srp_remove_target()
+	 * before srp_path_rec_completion() is called.
+	 */
+	if (!scsi_host_get(target->scsi_host))
+		goto out;
+
 	ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
 					       target->srp_host->srp_dev->dev,
 					       target->srp_host->port,
@@ -667,18 +674,24 @@
 					       GFP_KERNEL,
 					       srp_path_rec_completion,
 					       ch, &ch->path_query);
-	if (ch->path_query_id < 0)
-		return ch->path_query_id;
+	ret = ch->path_query_id;
+	if (ret < 0)
+		goto put;
 
 	ret = wait_for_completion_interruptible(&ch->done);
 	if (ret < 0)
-		return ret;
+		goto put;
 
-	if (ch->status < 0)
+	ret = ch->status;
+	if (ret < 0)
 		shost_printk(KERN_WARNING, target->scsi_host,
 			     PFX "Path record query failed\n");
 
-	return ch->status;
+put:
+	scsi_host_put(target->scsi_host);
+
+out:
+	return ret;
 }
 
 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0b1f69e..29ab814 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -992,8 +992,7 @@
 		return -ENOMEM;
 
 	attr->qp_state = IB_QPS_INIT;
-	attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
-	    IB_ACCESS_REMOTE_WRITE;
+	attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
 	attr->port_num = ch->sport->port;
 	attr->pkey_index = 0;
 
@@ -2750,7 +2749,7 @@
 {
 	const char *p;
 	unsigned len, count, leading_zero_bytes;
-	int ret, rc;
+	int ret;
 
 	p = name;
 	if (strncasecmp(p, "0x", 2) == 0)
@@ -2762,10 +2761,9 @@
 	count = min(len / 2, 16U);
 	leading_zero_bytes = 16 - count;
 	memset(i_port_id, 0, leading_zero_bytes);
-	rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
-	if (rc < 0)
-		pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
-	ret = 0;
+	ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
+	if (ret < 0)
+		pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret);
 out:
 	return ret;
 }
diff --git a/drivers/input/misc/hbtp_input.c b/drivers/input/misc/hbtp_input.c
index 108ed032..90493b1 100644
--- a/drivers/input/misc/hbtp_input.c
+++ b/drivers/input/misc/hbtp_input.c
@@ -1181,12 +1181,6 @@
 			pr_err("%s: failed to disable GPIO pins\n", __func__);
 			goto err_pin_disable;
 		}
-
-		rc = hbtp_pdev_power_on(ts, false);
-		if (rc) {
-			pr_err("%s: failed to disable power\n", __func__);
-			goto err_power_disable;
-		}
 		ts->power_suspended = true;
 		if (ts->input_dev) {
 			kobject_uevent_env(&ts->input_dev->dev.kobj,
@@ -1214,8 +1208,7 @@
 	}
 	mutex_unlock(&hbtp->mutex);
 	return 0;
-err_power_disable:
-	hbtp_pinctrl_enable(ts, true);
+
 err_pin_disable:
 	mutex_unlock(&hbtp->mutex);
 	return rc;
@@ -1234,12 +1227,6 @@
 			mutex_unlock(&hbtp->mutex);
 			return 0;
 		}
-		rc = hbtp_pdev_power_on(ts, true);
-		if (rc) {
-			pr_err("%s: failed to enable panel power\n", __func__);
-			goto err_power_on;
-		}
-
 		rc = hbtp_pinctrl_enable(ts, true);
 
 		if (rc) {
@@ -1287,8 +1274,6 @@
 
 err_pin_enable:
 	hbtp_pdev_power_on(ts, false);
-err_power_on:
-	mutex_unlock(&hbtp->mutex);
 	return rc;
 }
 
@@ -1359,6 +1344,12 @@
 		hbtp->vcc_dig = vcc_dig;
 	}
 
+	error = hbtp_pdev_power_on(hbtp, true);
+	if (error) {
+		pr_err("%s: failed to power on\n", __func__);
+		return error;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/input/misc/qpnp-power-on.c b/drivers/input/misc/qpnp-power-on.c
index 339f94c..5fa0d4b 100644
--- a/drivers/input/misc/qpnp-power-on.c
+++ b/drivers/input/misc/qpnp-power-on.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -212,6 +212,9 @@
 	int			warm_reset_poff_type;
 	int			hard_reset_poff_type;
 	int			shutdown_poff_type;
+	int			resin_warm_reset_type;
+	int			resin_hard_reset_type;
+	int			resin_shutdown_type;
 	u16			base;
 	u8			subtype;
 	u8			pon_ver;
@@ -219,7 +222,12 @@
 	u8			warm_reset_reason2;
 	bool			is_spon;
 	bool			store_hard_reset_reason;
+	bool			resin_hard_reset_disable;
+	bool			resin_shutdown_disable;
+	bool			ps_hold_hard_reset_disable;
+	bool			ps_hold_shutdown_disable;
 	bool			kpdpwr_dbc_enable;
+	bool			resin_pon_reset;
 	ktime_t			kpdpwr_last_release_time;
 };
 
@@ -478,6 +486,7 @@
 		enum pon_power_off_type type)
 {
 	int rc;
+	bool disable = false;
 	u16 rst_en_reg;
 
 	if (pon->pon_ver == QPNP_PON_GEN1_V1)
@@ -497,10 +506,12 @@
 	case PON_POWER_OFF_HARD_RESET:
 		if (pon->hard_reset_poff_type != -EINVAL)
 			type = pon->hard_reset_poff_type;
+		disable = pon->ps_hold_hard_reset_disable;
 		break;
 	case PON_POWER_OFF_SHUTDOWN:
 		if (pon->shutdown_poff_type != -EINVAL)
 			type = pon->shutdown_poff_type;
+		disable = pon->ps_hold_shutdown_disable;
 		break;
 	default:
 		break;
@@ -513,6 +524,13 @@
 			rst_en_reg, rc);
 
 	/*
+	 * Check if ps-hold power off configuration needs to be disabled.
+	 * If yes, then return without configuring.
+	 */
+	if (disable)
+		return rc;
+
+	/*
 	 * We need 10 sleep clock cycles here. But since the clock is
 	 * internally generated, we need to add 50% tolerance to be
 	 * conservative.
@@ -533,7 +551,80 @@
 			"Unable to write to addr=%hx, rc(%d)\n",
 			rst_en_reg, rc);
 
-	dev_dbg(&pon->pdev->dev, "power off type = 0x%02X\n", type);
+	dev_dbg(&pon->pdev->dev, "ps_hold power off type = 0x%02X\n", type);
+	return rc;
+}
+
+static int qpnp_resin_pon_reset_config(struct qpnp_pon *pon,
+		enum pon_power_off_type type)
+{
+	int rc;
+	bool disable = false;
+	u16 rst_en_reg;
+
+	if (pon->pon_ver == QPNP_PON_GEN1_V1)
+		rst_en_reg = QPNP_PON_RESIN_S2_CNTL(pon);
+	else
+		rst_en_reg = QPNP_PON_RESIN_S2_CNTL2(pon);
+
+	/*
+	 * Based on the poweroff type set for a PON device through device tree
+	 * change the type being configured into PON_RESIN_S2_CTL.
+	 */
+	switch (type) {
+	case PON_POWER_OFF_WARM_RESET:
+		if (pon->resin_warm_reset_type != -EINVAL)
+			type = pon->resin_warm_reset_type;
+		break;
+	case PON_POWER_OFF_HARD_RESET:
+		if (pon->resin_hard_reset_type != -EINVAL)
+			type = pon->resin_hard_reset_type;
+		disable = pon->resin_hard_reset_disable;
+		break;
+	case PON_POWER_OFF_SHUTDOWN:
+		if (pon->resin_shutdown_type != -EINVAL)
+			type = pon->resin_shutdown_type;
+		disable = pon->resin_shutdown_disable;
+		break;
+	default:
+		break;
+	}
+
+	rc = qpnp_pon_masked_write(pon, rst_en_reg, QPNP_PON_S2_CNTL_EN, 0);
+	if (rc)
+		dev_err(&pon->pdev->dev,
+			"Unable to write to addr=%hx, rc(%d)\n",
+			rst_en_reg, rc);
+
+	/*
+	 * Check if resin power off configuration needs to be disabled.
+	 * If yes, then return without configuring.
+	 */
+	if (disable)
+		return rc;
+
+	/*
+	 * We need 10 sleep clock cycles here. But since the clock is
+	 * internally generated, we need to add 50% tolerance to be
+	 * conservative.
+	 */
+	udelay(500);
+
+	rc = qpnp_pon_masked_write(pon, QPNP_PON_RESIN_S2_CNTL(pon),
+				   QPNP_PON_S2_CNTL_TYPE_MASK, type);
+	if (rc)
+		dev_err(&pon->pdev->dev,
+			"Unable to write to addr=%x, rc(%d)\n",
+				QPNP_PON_RESIN_S2_CNTL(pon), rc);
+
+	rc = qpnp_pon_masked_write(pon, rst_en_reg, QPNP_PON_S2_CNTL_EN,
+						    QPNP_PON_S2_CNTL_EN);
+	if (rc)
+		dev_err(&pon->pdev->dev,
+			"Unable to write to addr=%hx, rc(%d)\n",
+			rst_en_reg, rc);
+
+	dev_dbg(&pon->pdev->dev, "resin power off type = 0x%02X\n", type);
 	return rc;
 }
 
@@ -588,6 +679,15 @@
 				rc);
 			goto out;
 		}
+		if (pon->resin_pon_reset) {
+			rc = qpnp_resin_pon_reset_config(pon, type);
+			if (rc) {
+				dev_err(&pon->pdev->dev,
+					"Error configuring secondary PON resin rc: %d\n",
+					rc);
+				goto out;
+			}
+		}
 	}
 	/* Set ship mode here if it has been requested */
 	if (!!pon_ship_mode_en) {
@@ -2029,7 +2129,7 @@
 		dev_err(&pdev->dev,
 			"Couldn't find reg in node = %s rc = %d\n",
 			pdev->dev.of_node->full_name, rc);
-		return rc;
+		goto err_out;
 	}
 	pon->base = base;
 
@@ -2041,7 +2141,8 @@
 			pon->num_pon_config++;
 		} else {
 			pr_err("Unknown sub-node\n");
-			return -EINVAL;
+			rc = -EINVAL;
+			goto err_out;
 		}
 	}
 
@@ -2053,7 +2154,7 @@
 	if (rc) {
 		dev_err(&pdev->dev, "Error in pon_regulator_init rc: %d\n",
 			rc);
-		return rc;
+		goto err_out;
 	}
 
 	if (!pon->num_pon_config)
@@ -2072,7 +2173,7 @@
 		dev_err(&pon->pdev->dev,
 			"Unable to read PON_PERPH_SUBTYPE register rc: %d\n",
 			rc);
-		return rc;
+		goto err_out;
 	}
 	pon->subtype = temp;
 
@@ -2083,7 +2184,7 @@
 		dev_err(&pon->pdev->dev,
 			"Unable to read addr=%x, rc(%d)\n",
 			QPNP_PON_REVISION2(pon), rc);
-		return rc;
+		goto err_out;
 	}
 
 	pon->pon_ver = temp;
@@ -2100,7 +2201,7 @@
 		dev_err(&pon->pdev->dev,
 			"Invalid PON_PERPH_SUBTYPE value %x\n",
 			pon->subtype);
-		return -EINVAL;
+		goto err_out;
 	}
 
 	pr_debug("%s: pon_subtype=%x, pon_version=%x\n", __func__,
@@ -2111,7 +2212,7 @@
 		dev_err(&pon->pdev->dev,
 			"Unable to store/clear WARM_RESET_REASONx registers rc: %d\n",
 			rc);
-		return rc;
+		goto err_out;
 	}
 
 	/* PON reason */
@@ -2120,7 +2221,7 @@
 		dev_err(&pon->pdev->dev,
 			"Unable to read PON_RESASON1 reg rc: %d\n",
 			rc);
-		return rc;
+		goto err_out;
 	}
 
 	if (sys_reset)
@@ -2147,14 +2248,14 @@
 		rc = read_gen2_pon_off_reason(pon, &poff_sts,
 						&reason_index_offset);
 		if (rc)
-			return rc;
+			goto err_out;
 	} else {
 		rc = regmap_bulk_read(pon->regmap, QPNP_POFF_REASON1(pon),
 			buf, 2);
 		if (rc) {
 			dev_err(&pon->pdev->dev, "Unable to read POFF_REASON regs rc:%d\n",
 				rc);
-			return rc;
+			goto err_out;
 		}
 		poff_sts = buf[0] | (buf[1] << 8);
 	}
@@ -2186,7 +2287,7 @@
 			dev_err(&pon->pdev->dev,
 				"Unable to read s3 timer rc:%d\n",
 				rc);
-			return rc;
+			goto err_out;
 		}
 	} else {
 		if (s3_debounce > QPNP_PON_S3_TIMER_SECS_MAX) {
@@ -2205,7 +2306,7 @@
 		if (rc) {
 			dev_err(&pdev->dev, "Unable to do SEC_ACCESS rc:%d\n",
 				rc);
-			return rc;
+			goto err_out;
 		}
 
 		rc = qpnp_pon_masked_write(pon, QPNP_PON_S3_DBC_CTL(pon),
@@ -2214,7 +2315,7 @@
 			dev_err(&pdev->dev,
 				"Unable to set S3 debounce rc:%d\n",
 				rc);
-			return rc;
+			goto err_out;
 		}
 	}
 
@@ -2225,7 +2326,7 @@
 	if (rc && rc != -EINVAL) {
 		dev_err(&pon->pdev->dev, "Unable to read s3 timer rc: %d\n",
 			rc);
-		return rc;
+		goto err_out;
 	}
 
 	if (!strcmp(s3_src, "kpdpwr"))
@@ -2247,7 +2348,7 @@
 	if (rc) {
 		dev_err(&pdev->dev, "Unable to program s3 source rc: %d\n",
 			rc);
-		return rc;
+		goto err_out;
 	}
 
 	dev_set_drvdata(&pdev->dev, pon);
@@ -2259,7 +2360,7 @@
 	if (rc) {
 		dev_err(&pdev->dev,
 			"Unable to initialize PON configurations rc: %d\n", rc);
-		return rc;
+		goto err_out;
 	}
 
 	rc = of_property_read_u32(pon->pdev->dev.of_node,
@@ -2268,21 +2369,21 @@
 		if (rc != -EINVAL) {
 			dev_err(&pdev->dev,
 				"Unable to read debounce delay rc: %d\n", rc);
-			return rc;
+			goto err_out;
 		}
 	} else {
 		rc = qpnp_pon_set_dbc(pon, delay);
 		if (rc) {
 			dev_err(&pdev->dev,
 				"Unable to set PON debounce delay rc=%d\n", rc);
-			return rc;
+			goto err_out;
 		}
 	}
 	rc = qpnp_pon_get_dbc(pon, &pon->dbc_time_us);
 	if (rc) {
 		dev_err(&pdev->dev,
 			"Unable to get PON debounce delay rc=%d\n", rc);
-		return rc;
+		goto err_out;
 	}
 
 	pon->kpdpwr_dbc_enable = of_property_read_bool(pon->pdev->dev.of_node,
@@ -2295,7 +2396,7 @@
 		if (rc != -EINVAL) {
 			dev_err(&pdev->dev, "Unable to read warm reset poweroff type rc: %d\n",
 				rc);
-			return rc;
+			goto err_out;
 		}
 		pon->warm_reset_poff_type = -EINVAL;
 	} else if (pon->warm_reset_poff_type <= PON_POWER_OFF_RESERVED ||
@@ -2311,7 +2412,7 @@
 		if (rc != -EINVAL) {
 			dev_err(&pdev->dev, "Unable to read hard reset poweroff type rc: %d\n",
 				rc);
-			return rc;
+			goto err_out;
 		}
 		pon->hard_reset_poff_type = -EINVAL;
 	} else if (pon->hard_reset_poff_type <= PON_POWER_OFF_RESERVED ||
@@ -2327,7 +2428,7 @@
 		if (rc != -EINVAL) {
 			dev_err(&pdev->dev, "Unable to read shutdown poweroff type rc: %d\n",
 				rc);
-			return rc;
+			goto err_out;
 		}
 		pon->shutdown_poff_type = -EINVAL;
 	} else if (pon->shutdown_poff_type <= PON_POWER_OFF_RESERVED ||
@@ -2336,10 +2437,73 @@
 		pon->shutdown_poff_type = -EINVAL;
 	}
 
+	pon->ps_hold_hard_reset_disable =
+					of_property_read_bool(pdev->dev.of_node,
+					"qcom,ps-hold-hard-reset-disable");
+	pon->ps_hold_shutdown_disable = of_property_read_bool(pdev->dev.of_node,
+					"qcom,ps-hold-shutdown-disable");
+
+
+	pon->resin_pon_reset = of_property_read_bool(pdev->dev.of_node,
+					"qcom,resin-pon-reset");
+
+	rc = of_property_read_u32(pon->pdev->dev.of_node,
+				"qcom,resin-warm-reset-type",
+				&pon->resin_warm_reset_type);
+	if (rc) {
+		if (rc != -EINVAL) {
+			dev_err(&pdev->dev, "Unable to read resin warm reset poweroff type rc: %d\n",
+				rc);
+			goto err_out;
+		}
+		pon->resin_warm_reset_type = -EINVAL;
+	} else if (pon->resin_warm_reset_type <= PON_POWER_OFF_RESERVED ||
+			pon->resin_warm_reset_type >= PON_POWER_OFF_MAX_TYPE) {
+		dev_err(&pdev->dev, "Invalid resin-warm-reset-type\n");
+		pon->resin_warm_reset_type = -EINVAL;
+	}
+
+	rc = of_property_read_u32(pon->pdev->dev.of_node,
+				"qcom,resin-hard-reset-type",
+				&pon->resin_hard_reset_type);
+	if (rc) {
+		if (rc != -EINVAL) {
+			dev_err(&pdev->dev, "Unable to read resin hard reset poweroff type rc: %d\n",
+				rc);
+			goto err_out;
+		}
+		pon->resin_hard_reset_type = -EINVAL;
+	} else if (pon->resin_hard_reset_type <= PON_POWER_OFF_RESERVED ||
+			pon->resin_hard_reset_type >= PON_POWER_OFF_MAX_TYPE) {
+		dev_err(&pdev->dev, "Invalid resin-hard-reset-type\n");
+		pon->resin_hard_reset_type = -EINVAL;
+	}
+
+	rc = of_property_read_u32(pon->pdev->dev.of_node,
+				"qcom,resin-shutdown-type",
+				&pon->resin_shutdown_type);
+	if (rc) {
+		if (rc != -EINVAL) {
+			dev_err(&pdev->dev, "Unable to read resin shutdown poweroff type rc: %d\n",
+				rc);
+			goto err_out;
+		}
+		pon->resin_shutdown_type = -EINVAL;
+	} else if (pon->resin_shutdown_type <= PON_POWER_OFF_RESERVED ||
+			pon->resin_shutdown_type >= PON_POWER_OFF_MAX_TYPE) {
+		dev_err(&pdev->dev, "Invalid resin-shutdown-type\n");
+		pon->resin_shutdown_type = -EINVAL;
+	}
+
+	pon->resin_hard_reset_disable = of_property_read_bool(pdev->dev.of_node,
+					"qcom,resin-hard-reset-disable");
+	pon->resin_shutdown_disable = of_property_read_bool(pdev->dev.of_node,
+					"qcom,resin-shutdown-disable");
+
 	rc = device_create_file(&pdev->dev, &dev_attr_debounce_us);
 	if (rc) {
 		dev_err(&pdev->dev, "sys file creation failed rc: %d\n", rc);
-		return rc;
+		goto err_out;
 	}
 
 	if (of_property_read_bool(pdev->dev.of_node,
@@ -2347,7 +2511,8 @@
 		if (sys_reset) {
 			dev_err(&pdev->dev,
 				"qcom,system-reset property shouldn't be used along with qcom,secondary-pon-reset property\n");
-			return -EINVAL;
+			rc = -EINVAL;
+			goto err_out;
 		}
 		spin_lock_irqsave(&spon_list_slock, flags);
 		list_add(&pon->list, &spon_dev_list);
@@ -2361,6 +2526,11 @@
 
 	qpnp_pon_debugfs_init(pdev);
 	return 0;
+
+err_out:
+	if (sys_reset)
+		sys_reset_dev = NULL;
+	return rc;
 }
 
 static int qpnp_pon_remove(struct platform_device *pdev)
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index cd834da..59603a5 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1609,7 +1609,7 @@
 		case 5:
 			etd->hw_version = 3;
 			break;
-		case 6 ... 14:
+		case 6 ... 15:
 			etd->hw_version = 4;
 			break;
 		default:
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index dbf0983..d1051e3 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -520,6 +520,13 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
 		},
 	},
+	{
+		/* TUXEDO BU1406 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
+		},
+	},
 	{ }
 };
 
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index efca013..86168b9 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -11,6 +11,8 @@
 
 if INPUT_TOUCHSCREEN
 
+source "drivers/input/touchscreen/synaptics_dsx_2.6/Kconfig"
+
 config TOUCHSCREEN_PROPERTIES
 	def_tristate INPUT
 	depends on INPUT
@@ -1214,4 +1216,14 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called bu21023_ts.
 
+config TOUCHSCREEN_SYNAPTICS_DSX
+        bool "Synaptics Touchscreen Driver"
+        depends on I2C
+        help
+          Say Y here if you have a Synaptics Touchscreen.
+
+          If unsure, say N.
+
+source "drivers/input/touchscreen/synaptics_dsx/Kconfig"
+
 endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 81b8645..7ac5a98 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -71,6 +71,8 @@
 obj-$(CONFIG_TOUCHSCREEN_SUN4I)		+= sun4i-ts.o
 obj-$(CONFIG_TOUCHSCREEN_SUR40)		+= sur40.o
 obj-$(CONFIG_TOUCHSCREEN_SURFACE3_SPI)	+= surface3_spi.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX) += synaptics_dsx/
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_v26)	+= synaptics_dsx_2.6/
 obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC)	+= ti_am335x_tsc.o
 obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213)	+= touchit213.o
 obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT)	+= touchright.o
diff --git a/drivers/input/touchscreen/synaptics_dsx/Kconfig b/drivers/input/touchscreen/synaptics_dsx/Kconfig
new file mode 100644
index 0000000..b54e792
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/Kconfig
@@ -0,0 +1,138 @@
+#
+# Synaptics DSX touchscreen driver configuration
+#
+menuconfig TOUCHSCREEN_SYNAPTICS_DSX
+	bool "Synaptics DSX touchscreen"
+	default y
+	help
+	  Say Y here if you have a Synaptics DSX touchscreen connected
+	  to your system.
+
+	  If unsure, say N.
+
+if TOUCHSCREEN_SYNAPTICS_DSX
+
+choice
+	default TOUCHSCREEN_SYNAPTICS_DSX_I2C
+	prompt "Synaptics DSX bus interface"
+config TOUCHSCREEN_SYNAPTICS_DSX_I2C
+	bool "RMI over I2C"
+	depends on I2C
+config TOUCHSCREEN_SYNAPTICS_DSX_SPI
+	bool "RMI over SPI"
+	depends on SPI_MASTER
+config TOUCHSCREEN_SYNAPTICS_DSX_RMI_HID_I2C
+	bool "HID over I2C"
+	depends on I2C
+endchoice
+
+config TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	tristate "Synaptics DSX core driver module"
+	depends on I2C || SPI_MASTER
+	help
+	  Say Y here to enable basic touch reporting functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_core.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV
+	tristate "Synaptics DSX RMI device module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for direct RMI register access.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_rmi_dev.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE
+	tristate "Synaptics DSX firmware update module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for doing firmware update.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_fw_update.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+	bool "Synaptics DSX firmware update sysfs attributes"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE
+	help
+	  Say Y here to enable support for sysfs attributes for
+	  performing firmware update in a development environment.
+	  This does not affect the core or other subsystem attributes.
+
+	  If unsure, say N.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_TEST_REPORTING
+	tristate "Synaptics DSX test reporting module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for retrieving production test reports.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_test_reporting.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_PROXIMITY
+	tristate "Synaptics DSX proximity module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for proximity functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_proximity.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_ACTIVE_PEN
+	tristate "Synaptics DSX active pen module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for active pen functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_active_pen.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_GESTURE
+	tristate "Synaptics DSX user defined gesture module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for user defined gesture functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_gesture.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_VIDEO
+	tristate "Synaptics DSX video module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for video communication functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_video.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_DEBUG
+	tristate "Synaptics DSX debug module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for firmware debug functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_debug.
+
+endif
diff --git a/drivers/input/touchscreen/synaptics_dsx/Makefile b/drivers/input/touchscreen/synaptics_dsx/Makefile
new file mode 100644
index 0000000..191dcdc
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the Synaptics DSX touchscreen driver.
+#
+
+# Each configuration option enables a list of files.
+
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_I2C) += synaptics_dsx_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_SPI) += synaptics_dsx_spi.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_HID_I2C) += synaptics_dsx_rmi_hid_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE) += synaptics_dsx_core.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV) += synaptics_dsx_rmi_dev.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE) += synaptics_dsx_fw_update.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_TEST_REPORTING) += synaptics_dsx_test_reporting.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_PROXIMITY) += synaptics_dsx_proximity.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_ACTIVE_PEN) += synaptics_dsx_active_pen.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_GESTURE) += synaptics_dsx_gesture.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_VIDEO) += synaptics_dsx_video.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_DEBUG) += synaptics_dsx_debug.o
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_active_pen.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_active_pen.c
new file mode 100644
index 0000000..3666e87
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_active_pen.c
@@ -0,0 +1,607 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define APEN_PHYS_NAME "synaptics_dsx/active_pen"
+
+#define ACTIVE_PEN_MAX_PRESSURE_16BIT 65535
+#define ACTIVE_PEN_MAX_PRESSURE_8BIT 255
+
+struct synaptics_rmi4_f12_query_8 {
+	union {
+		struct {
+			unsigned char size_of_query9;
+			struct {
+				unsigned char data0_is_present:1;
+				unsigned char data1_is_present:1;
+				unsigned char data2_is_present:1;
+				unsigned char data3_is_present:1;
+				unsigned char data4_is_present:1;
+				unsigned char data5_is_present:1;
+				unsigned char data6_is_present:1;
+				unsigned char data7_is_present:1;
+			} __packed;
+		};
+		unsigned char data[2];
+	};
+};
+
+struct apen_data_8b_pressure {
+	union {
+		struct {
+			unsigned char status_pen:1;
+			unsigned char status_invert:1;
+			unsigned char status_barrel:1;
+			unsigned char status_reserved:5;
+			unsigned char x_lsb;
+			unsigned char x_msb;
+			unsigned char y_lsb;
+			unsigned char y_msb;
+			unsigned char pressure_msb;
+			unsigned char battery_state;
+			unsigned char pen_id_0_7;
+			unsigned char pen_id_8_15;
+			unsigned char pen_id_16_23;
+			unsigned char pen_id_24_31;
+		} __packed;
+		unsigned char data[11];
+	};
+};
+
+struct apen_data {
+	union {
+		struct {
+			unsigned char status_pen:1;
+			unsigned char status_invert:1;
+			unsigned char status_barrel:1;
+			unsigned char status_reserved:5;
+			unsigned char x_lsb;
+			unsigned char x_msb;
+			unsigned char y_lsb;
+			unsigned char y_msb;
+			unsigned char pressure_lsb;
+			unsigned char pressure_msb;
+			unsigned char battery_state;
+			unsigned char pen_id_0_7;
+			unsigned char pen_id_8_15;
+			unsigned char pen_id_16_23;
+			unsigned char pen_id_24_31;
+		} __packed;
+		unsigned char data[12];
+	};
+};
+
+struct synaptics_rmi4_apen_handle {
+	bool apen_present;
+	unsigned char intr_mask;
+	unsigned char battery_state;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	unsigned short apen_data_addr;
+	unsigned short max_pressure;
+	unsigned int pen_id;
+	struct input_dev *apen_dev;
+	struct apen_data *apen_data;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_apen_handle *apen;
+
+DECLARE_COMPLETION(apen_remove_complete);
+
+static void apen_lift(void)
+{
+	input_report_key(apen->apen_dev, BTN_TOUCH, 0);
+	input_report_key(apen->apen_dev, BTN_TOOL_PEN, 0);
+	input_report_key(apen->apen_dev, BTN_TOOL_RUBBER, 0);
+	input_sync(apen->apen_dev);
+	apen->apen_present = false;
+}
+
+static void apen_report(void)
+{
+	int retval;
+	int x;
+	int y;
+	int pressure;
+	static int invert = -1;
+	struct apen_data_8b_pressure *apen_data_8b;
+	struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			apen->apen_data_addr,
+			apen->apen_data->data,
+			sizeof(apen->apen_data->data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read active pen data\n",
+				__func__);
+		return;
+	}
+
+	if (apen->apen_data->status_pen == 0) {
+		if (apen->apen_present)
+			apen_lift();
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: No active pen data\n",
+				__func__);
+
+		return;
+	}
+
+	x = (apen->apen_data->x_msb << 8) | (apen->apen_data->x_lsb);
+	y = (apen->apen_data->y_msb << 8) | (apen->apen_data->y_lsb);
+
+	if ((x == -1) && (y == -1)) {
+		if (apen->apen_present)
+			apen_lift();
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Active pen in range but no valid x & y\n",
+				__func__);
+
+		return;
+	}
+
+	if (!apen->apen_present)
+		invert = -1;
+
+	if (invert != -1 && invert != apen->apen_data->status_invert)
+		apen_lift();
+
+	invert = apen->apen_data->status_invert;
+
+	if (apen->max_pressure == ACTIVE_PEN_MAX_PRESSURE_16BIT) {
+		pressure = (apen->apen_data->pressure_msb << 8) |
+				apen->apen_data->pressure_lsb;
+		apen->battery_state = apen->apen_data->battery_state;
+		apen->pen_id = (apen->apen_data->pen_id_24_31 << 24) |
+				(apen->apen_data->pen_id_16_23 << 16) |
+				(apen->apen_data->pen_id_8_15 << 8) |
+				apen->apen_data->pen_id_0_7;
+	} else {
+		apen_data_8b = (struct apen_data_8b_pressure *)apen->apen_data;
+		pressure = apen_data_8b->pressure_msb;
+		apen->battery_state = apen_data_8b->battery_state;
+		apen->pen_id = (apen_data_8b->pen_id_24_31 << 24) |
+				(apen_data_8b->pen_id_16_23 << 16) |
+				(apen_data_8b->pen_id_8_15 << 8) |
+				apen_data_8b->pen_id_0_7;
+	}
+
+	input_report_key(apen->apen_dev, BTN_TOUCH, pressure > 0 ? 1 : 0);
+	input_report_key(apen->apen_dev,
+			apen->apen_data->status_invert > 0 ?
+			BTN_TOOL_RUBBER : BTN_TOOL_PEN, 1);
+	input_report_key(apen->apen_dev,
+			BTN_STYLUS, apen->apen_data->status_barrel > 0 ?
+			1 : 0);
+	input_report_abs(apen->apen_dev, ABS_X, x);
+	input_report_abs(apen->apen_dev, ABS_Y, y);
+	input_report_abs(apen->apen_dev, ABS_PRESSURE, pressure);
+
+	input_sync(apen->apen_dev);
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Active pen: status = %d, invert = %d, barrel = %d, x = %d, y = %d, pressure = %d\n",
+			__func__,
+			apen->apen_data->status_pen,
+			apen->apen_data->status_invert,
+			apen->apen_data->status_barrel,
+			x, y, pressure);
+
+	apen->apen_present = true;
+}
+
+static void apen_set_params(void)
+{
+	input_set_abs_params(apen->apen_dev, ABS_X, 0,
+			apen->rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(apen->apen_dev, ABS_Y, 0,
+			apen->rmi4_data->sensor_max_y, 0, 0);
+	input_set_abs_params(apen->apen_dev, ABS_PRESSURE, 0,
+			apen->max_pressure, 0, 0);
+
+	return;
+}
+
+static int apen_pressure(struct synaptics_rmi4_f12_query_8 *query_8)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char data_reg_presence;
+	unsigned char size_of_query_9;
+	unsigned char *query_9;
+	unsigned char *data_desc;
+	struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+	data_reg_presence = query_8->data[1];
+
+	size_of_query_9 = query_8->size_of_query9;
+	query_9 = kmalloc(size_of_query_9, GFP_KERNEL);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			apen->query_base_addr + 9,
+			query_9,
+			size_of_query_9);
+	if (retval < 0)
+		goto exit;
+
+	data_desc = query_9;
+
+	for (ii = 0; ii < 6; ii++) {
+		if (!(data_reg_presence & (1 << ii)))
+			continue; /* The data register is not present */
+		data_desc++; /* Jump over the size entry */
+		while (*data_desc & (1 << 7))
+			data_desc++;
+		data_desc++; /* Go to the next descriptor */
+	}
+
+	data_desc++; /* Jump over the size entry */
+	/* Check for the presence of subpackets 1 and 2 */
+	if ((*data_desc & (3 << 1)) == (3 << 1))
+		apen->max_pressure = ACTIVE_PEN_MAX_PRESSURE_16BIT;
+	else
+		apen->max_pressure = ACTIVE_PEN_MAX_PRESSURE_8BIT;
+
+exit:
+	kfree(query_9);
+
+	return retval;
+}
+
+static int apen_reg_init(void)
+{
+	int retval;
+	unsigned char data_offset;
+	unsigned char size_of_query8;
+	struct synaptics_rmi4_f12_query_8 query_8;
+	struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			apen->query_base_addr + 7,
+			&size_of_query8,
+			sizeof(size_of_query8));
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			apen->query_base_addr + 8,
+			query_8.data,
+			sizeof(query_8.data));
+	if (retval < 0)
+		return retval;
+
+	if ((size_of_query8 >= 2) && (query_8.data6_is_present)) {
+		data_offset = query_8.data0_is_present +
+				query_8.data1_is_present +
+				query_8.data2_is_present +
+				query_8.data3_is_present +
+				query_8.data4_is_present +
+				query_8.data5_is_present;
+		apen->apen_data_addr = apen->data_base_addr + data_offset;
+		retval = apen_pressure(&query_8);
+		if (retval < 0)
+			return retval;
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Active pen support unavailable\n",
+				__func__);
+		retval = -ENODEV;
+	}
+
+	return retval;
+}
+
+static int apen_scan_pdt(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char page;
+	unsigned char intr_count = 0;
+	unsigned char intr_off;
+	unsigned char intr_src;
+	unsigned short addr;
+	struct synaptics_rmi4_fn_desc fd;
+	struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&fd,
+					sizeof(fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (fd.fn_number) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Found F%02x\n",
+						__func__, fd.fn_number);
+				switch (fd.fn_number) {
+				case SYNAPTICS_RMI4_F12:
+					goto f12_found;
+				}
+			} else {
+				break;
+			}
+
+			intr_count += fd.intr_src_count;
+		}
+	}
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to find F12\n",
+			__func__);
+	return -EINVAL;
+
+f12_found:
+	apen->query_base_addr = fd.query_base_addr | (page << 8);
+	apen->control_base_addr = fd.ctrl_base_addr | (page << 8);
+	apen->data_base_addr = fd.data_base_addr | (page << 8);
+	apen->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+	retval = apen_reg_init();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to initialize active pen registers\n",
+				__func__);
+		return retval;
+	}
+
+	apen->intr_mask = 0;
+	intr_src = fd.intr_src_count;
+	intr_off = intr_count % 8;
+	for (ii = intr_off;
+			ii < (intr_src + intr_off);
+			ii++) {
+		apen->intr_mask |= 1 << ii;
+	}
+
+	rmi4_data->intr_mask[0] |= apen->intr_mask;
+
+	addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			addr,
+			&(rmi4_data->intr_mask[0]),
+			sizeof(rmi4_data->intr_mask[0]));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set interrupt enable bit\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_apen_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!apen)
+		return;
+
+	if (apen->intr_mask & intr_mask)
+		apen_report();
+
+	return;
+}
+
+static int synaptics_rmi4_apen_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	if (apen) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	apen = kzalloc(sizeof(*apen), GFP_KERNEL);
+	if (!apen) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for apen\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	apen->apen_data = kzalloc(sizeof(*(apen->apen_data)), GFP_KERNEL);
+	if (!apen->apen_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for apen_data\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_apen;
+	}
+
+	apen->rmi4_data = rmi4_data;
+
+	retval = apen_scan_pdt();
+	if (retval < 0)
+		goto exit_free_apen_data;
+
+	apen->apen_dev = input_allocate_device();
+	if (apen->apen_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate active pen device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_apen_data;
+	}
+
+	apen->apen_dev->name = ACTIVE_PEN_DRIVER_NAME;
+	apen->apen_dev->phys = APEN_PHYS_NAME;
+	apen->apen_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	apen->apen_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	apen->apen_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(apen->apen_dev, rmi4_data);
+
+	set_bit(EV_KEY, apen->apen_dev->evbit);
+	set_bit(EV_ABS, apen->apen_dev->evbit);
+	set_bit(BTN_TOUCH, apen->apen_dev->keybit);
+	set_bit(BTN_TOOL_PEN, apen->apen_dev->keybit);
+	set_bit(BTN_TOOL_RUBBER, apen->apen_dev->keybit);
+	set_bit(BTN_STYLUS, apen->apen_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, apen->apen_dev->propbit);
+#endif
+
+	apen_set_params();
+
+	retval = input_register_device(apen->apen_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register active pen device\n",
+				__func__);
+		goto exit_free_input_device;
+	}
+
+	return 0;
+
+exit_free_input_device:
+	input_free_device(apen->apen_dev);
+
+exit_free_apen_data:
+	kfree(apen->apen_data);
+
+exit_free_apen:
+	kfree(apen);
+	apen = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_apen_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen)
+		goto exit;
+
+	input_unregister_device(apen->apen_dev);
+	kfree(apen->apen_data);
+	kfree(apen);
+	apen = NULL;
+
+exit:
+	complete(&apen_remove_complete);
+}
+
+static void synaptics_rmi4_apen_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen) {
+		synaptics_rmi4_apen_init(rmi4_data);
+		return;
+	}
+
+	apen_lift();
+
+	apen_scan_pdt();
+}
+
+static void synaptics_rmi4_apen_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen)
+		return;
+
+	apen_lift();
+}
+
+static void synaptics_rmi4_apen_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen)
+		return;
+
+	apen_lift();
+}
+
+static void synaptics_rmi4_apen_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen)
+		return;
+
+	apen_lift();
+}
+
+static struct synaptics_rmi4_exp_fn active_pen_module = {
+	.fn_type = RMI_ACTIVE_PEN,
+	.init = synaptics_rmi4_apen_init,
+	.remove = synaptics_rmi4_apen_remove,
+	.reset = synaptics_rmi4_apen_reset,
+	.reinit = synaptics_rmi4_apen_reinit,
+	.early_suspend = synaptics_rmi4_apen_e_suspend,
+	.suspend = synaptics_rmi4_apen_suspend,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = synaptics_rmi4_apen_attn,
+};
+
+static int __init rmi4_active_pen_module_init(void)
+{
+	synaptics_rmi4_new_function(&active_pen_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_active_pen_module_exit(void)
+{
+	synaptics_rmi4_new_function(&active_pen_module, false);
+
+	wait_for_completion(&apen_remove_complete);
+}
+
+module_init(rmi4_active_pen_module_init);
+module_exit(rmi4_active_pen_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Active Pen Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
new file mode 100644
index 0000000..3d808fb
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
@@ -0,0 +1,4888 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+#ifdef KERNEL_ABOVE_2_6_38
+#include <linux/input/mt.h>
+#endif
+
+#include <linux/msm_drm_notify.h>
+
+#define INPUT_PHYS_NAME "synaptics_dsx/touch_input"
+#define STYLUS_PHYS_NAME "synaptics_dsx/stylus"
+
+#define VIRTUAL_KEY_MAP_FILE_NAME "virtualkeys." PLATFORM_DRIVER_NAME
+
+#ifdef KERNEL_ABOVE_2_6_38
+#define TYPE_B_PROTOCOL
+#endif
+
+/*
+#define USE_DATA_SERVER
+*/
+
+#define WAKEUP_GESTURE false
+
+#define NO_0D_WHILE_2D
+#define REPORT_2D_Z
+#define REPORT_2D_W
+/*
+#define REPORT_2D_PRESSURE
+*/
+
+#define F12_DATA_15_WORKAROUND
+
+#define IGNORE_FN_INIT_FAILURE
+#define FB_READY_RESET
+#define FB_READY_WAIT_MS 100
+#define FB_READY_TIMEOUT_S 30
+#ifdef SYNA_TDDI
+#define TDDI_LPWG_WAIT_US 10
+#endif
+#define RPT_TYPE (1 << 0)
+#define RPT_X_LSB (1 << 1)
+#define RPT_X_MSB (1 << 2)
+#define RPT_Y_LSB (1 << 3)
+#define RPT_Y_MSB (1 << 4)
+#define RPT_Z (1 << 5)
+#define RPT_WX (1 << 6)
+#define RPT_WY (1 << 7)
+#define RPT_DEFAULT (RPT_TYPE | RPT_X_LSB | RPT_X_MSB | RPT_Y_LSB | RPT_Y_MSB)
+
+#define REBUILD_WORK_DELAY_MS 500 /* ms */
+
+#define EXP_FN_WORK_DELAY_MS 500 /* ms */
+#define MAX_F11_TOUCH_WIDTH 15
+#define MAX_F12_TOUCH_WIDTH 255
+
+#define CHECK_STATUS_TIMEOUT_MS 100
+
+#define F01_STD_QUERY_LEN 21
+#define F01_BUID_ID_OFFSET 18
+
+#define STATUS_NO_ERROR 0x00
+#define STATUS_RESET_OCCURRED 0x01
+#define STATUS_INVALID_CONFIG 0x02
+#define STATUS_DEVICE_FAILURE 0x03
+#define STATUS_CONFIG_CRC_FAILURE 0x04
+#define STATUS_FIRMWARE_CRC_FAILURE 0x05
+#define STATUS_CRC_IN_PROGRESS 0x06
+
+#define NORMAL_OPERATION (0 << 0)
+#define SENSOR_SLEEP (1 << 0)
+#define NO_SLEEP_OFF (0 << 2)
+#define NO_SLEEP_ON (1 << 2)
+#define CONFIGURED (1 << 7)
+
+#define F11_CONTINUOUS_MODE 0x00
+#define F11_WAKEUP_GESTURE_MODE 0x04
+#define F12_CONTINUOUS_MODE 0x00
+#define F12_WAKEUP_GESTURE_MODE 0x02
+#define F12_UDG_DETECT 0x0f
+
+static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
+		bool *was_in_bl_mode);
+static int synaptics_rmi4_free_fingers(struct synaptics_rmi4_data *rmi4_data);
+static int synaptics_rmi4_reinit_device(struct synaptics_rmi4_data *rmi4_data);
+static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data,
+		bool rebuild);
+#ifdef CONFIG_FB
+static int synaptics_rmi4_dsi_panel_notifier_cb(struct notifier_block *self,
+		unsigned long event, void *data);
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#ifndef CONFIG_FB
+#define USE_EARLYSUSPEND
+#endif
+#endif
+
+#ifdef USE_EARLYSUSPEND
+static int synaptics_rmi4_early_suspend(struct early_suspend *h);
+
+static int synaptics_rmi4_late_resume(struct early_suspend *h);
+#endif
+
+static int synaptics_rmi4_suspend(struct device *dev);
+
+static int synaptics_rmi4_resume(struct device *dev);
+
+static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_suspend_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_wake_gesture_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_wake_gesture_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+#ifdef USE_DATA_SERVER
+static ssize_t synaptics_rmi4_synad_pid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+#endif
+
+static ssize_t synaptics_rmi4_virtual_key_map_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf);
+
+struct synaptics_rmi4_f01_device_status {
+	union {
+		struct {
+			unsigned char status_code:4;
+			unsigned char reserved:2;
+			unsigned char flash_prog:1;
+			unsigned char unconfigured:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f11_query_0_5 {
+	union {
+		struct {
+			/* query 0 */
+			unsigned char f11_query0_b0__2:3;
+			unsigned char has_query_9:1;
+			unsigned char has_query_11:1;
+			unsigned char has_query_12:1;
+			unsigned char has_query_27:1;
+			unsigned char has_query_28:1;
+
+			/* query 1 */
+			unsigned char num_of_fingers:3;
+			unsigned char has_rel:1;
+			unsigned char has_abs:1;
+			unsigned char has_gestures:1;
+			unsigned char has_sensitibity_adjust:1;
+			unsigned char f11_query1_b7:1;
+
+			/* query 2 */
+			unsigned char num_of_x_electrodes;
+
+			/* query 3 */
+			unsigned char num_of_y_electrodes;
+
+			/* query 4 */
+			unsigned char max_electrodes:7;
+			unsigned char f11_query4_b7:1;
+
+			/* query 5 */
+			unsigned char abs_data_size:2;
+			unsigned char has_anchored_finger:1;
+			unsigned char has_adj_hyst:1;
+			unsigned char has_dribble:1;
+			unsigned char has_bending_correction:1;
+			unsigned char has_large_object_suppression:1;
+			unsigned char has_jitter_filter:1;
+		} __packed;
+		unsigned char data[6];
+	};
+};
+
+struct synaptics_rmi4_f11_query_7_8 {
+	union {
+		struct {
+			/* query 7 */
+			unsigned char has_single_tap:1;
+			unsigned char has_tap_and_hold:1;
+			unsigned char has_double_tap:1;
+			unsigned char has_early_tap:1;
+			unsigned char has_flick:1;
+			unsigned char has_press:1;
+			unsigned char has_pinch:1;
+			unsigned char has_chiral_scroll:1;
+
+			/* query 8 */
+			unsigned char has_palm_detect:1;
+			unsigned char has_rotate:1;
+			unsigned char has_touch_shapes:1;
+			unsigned char has_scroll_zones:1;
+			unsigned char individual_scroll_zones:1;
+			unsigned char has_multi_finger_scroll:1;
+			unsigned char has_multi_finger_scroll_edge_motion:1;
+			unsigned char has_multi_finger_scroll_inertia:1;
+		} __packed;
+		unsigned char data[2];
+	};
+};
+
+struct synaptics_rmi4_f11_query_9 {
+	union {
+		struct {
+			unsigned char has_pen:1;
+			unsigned char has_proximity:1;
+			unsigned char has_large_object_sensitivity:1;
+			unsigned char has_suppress_on_large_object_detect:1;
+			unsigned char has_two_pen_thresholds:1;
+			unsigned char has_contact_geometry:1;
+			unsigned char has_pen_hover_discrimination:1;
+			unsigned char has_pen_hover_and_edge_filters:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f11_query_12 {
+	union {
+		struct {
+			unsigned char has_small_object_detection:1;
+			unsigned char has_small_object_detection_tuning:1;
+			unsigned char has_8bit_w:1;
+			unsigned char has_2d_adjustable_mapping:1;
+			unsigned char has_general_information_2:1;
+			unsigned char has_physical_properties:1;
+			unsigned char has_finger_limit:1;
+			unsigned char has_linear_cofficient_2:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f11_query_27 {
+	union {
+		struct {
+			unsigned char f11_query27_b0:1;
+			unsigned char has_pen_position_correction:1;
+			unsigned char has_pen_jitter_filter_coefficient:1;
+			unsigned char has_group_decomposition:1;
+			unsigned char has_wakeup_gesture:1;
+			unsigned char has_small_finger_correction:1;
+			unsigned char has_data_37:1;
+			unsigned char f11_query27_b7:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f11_ctrl_6_9 {
+	union {
+		struct {
+			unsigned char sensor_max_x_pos_7_0;
+			unsigned char sensor_max_x_pos_11_8:4;
+			unsigned char f11_ctrl7_b4__7:4;
+			unsigned char sensor_max_y_pos_7_0;
+			unsigned char sensor_max_y_pos_11_8:4;
+			unsigned char f11_ctrl9_b4__7:4;
+		} __packed;
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f11_data_1_5 {
+	union {
+		struct {
+			unsigned char x_position_11_4;
+			unsigned char y_position_11_4;
+			unsigned char x_position_3_0:4;
+			unsigned char y_position_3_0:4;
+			unsigned char wx:4;
+			unsigned char wy:4;
+			unsigned char z;
+		} __packed;
+		unsigned char data[5];
+	};
+};
+
+struct synaptics_rmi4_f12_query_5 {
+	union {
+		struct {
+			unsigned char size_of_query6;
+			struct {
+				unsigned char ctrl0_is_present:1;
+				unsigned char ctrl1_is_present:1;
+				unsigned char ctrl2_is_present:1;
+				unsigned char ctrl3_is_present:1;
+				unsigned char ctrl4_is_present:1;
+				unsigned char ctrl5_is_present:1;
+				unsigned char ctrl6_is_present:1;
+				unsigned char ctrl7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl8_is_present:1;
+				unsigned char ctrl9_is_present:1;
+				unsigned char ctrl10_is_present:1;
+				unsigned char ctrl11_is_present:1;
+				unsigned char ctrl12_is_present:1;
+				unsigned char ctrl13_is_present:1;
+				unsigned char ctrl14_is_present:1;
+				unsigned char ctrl15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl16_is_present:1;
+				unsigned char ctrl17_is_present:1;
+				unsigned char ctrl18_is_present:1;
+				unsigned char ctrl19_is_present:1;
+				unsigned char ctrl20_is_present:1;
+				unsigned char ctrl21_is_present:1;
+				unsigned char ctrl22_is_present:1;
+				unsigned char ctrl23_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl24_is_present:1;
+				unsigned char ctrl25_is_present:1;
+				unsigned char ctrl26_is_present:1;
+				unsigned char ctrl27_is_present:1;
+				unsigned char ctrl28_is_present:1;
+				unsigned char ctrl29_is_present:1;
+				unsigned char ctrl30_is_present:1;
+				unsigned char ctrl31_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl32_is_present:1;
+				unsigned char ctrl33_is_present:1;
+				unsigned char ctrl34_is_present:1;
+				unsigned char ctrl35_is_present:1;
+				unsigned char ctrl36_is_present:1;
+				unsigned char ctrl37_is_present:1;
+				unsigned char ctrl38_is_present:1;
+				unsigned char ctrl39_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl40_is_present:1;
+				unsigned char ctrl41_is_present:1;
+				unsigned char ctrl42_is_present:1;
+				unsigned char ctrl43_is_present:1;
+				unsigned char ctrl44_is_present:1;
+				unsigned char ctrl45_is_present:1;
+				unsigned char ctrl46_is_present:1;
+				unsigned char ctrl47_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl48_is_present:1;
+				unsigned char ctrl49_is_present:1;
+				unsigned char ctrl50_is_present:1;
+				unsigned char ctrl51_is_present:1;
+				unsigned char ctrl52_is_present:1;
+				unsigned char ctrl53_is_present:1;
+				unsigned char ctrl54_is_present:1;
+				unsigned char ctrl55_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl56_is_present:1;
+				unsigned char ctrl57_is_present:1;
+				unsigned char ctrl58_is_present:1;
+				unsigned char ctrl59_is_present:1;
+				unsigned char ctrl60_is_present:1;
+				unsigned char ctrl61_is_present:1;
+				unsigned char ctrl62_is_present:1;
+				unsigned char ctrl63_is_present:1;
+			} __packed;
+		};
+		unsigned char data[9];
+	};
+};
+
+struct synaptics_rmi4_f12_query_8 {
+	union {
+		struct {
+			unsigned char size_of_query9;
+			struct {
+				unsigned char data0_is_present:1;
+				unsigned char data1_is_present:1;
+				unsigned char data2_is_present:1;
+				unsigned char data3_is_present:1;
+				unsigned char data4_is_present:1;
+				unsigned char data5_is_present:1;
+				unsigned char data6_is_present:1;
+				unsigned char data7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data8_is_present:1;
+				unsigned char data9_is_present:1;
+				unsigned char data10_is_present:1;
+				unsigned char data11_is_present:1;
+				unsigned char data12_is_present:1;
+				unsigned char data13_is_present:1;
+				unsigned char data14_is_present:1;
+				unsigned char data15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data16_is_present:1;
+				unsigned char data17_is_present:1;
+				unsigned char data18_is_present:1;
+				unsigned char data19_is_present:1;
+				unsigned char data20_is_present:1;
+				unsigned char data21_is_present:1;
+				unsigned char data22_is_present:1;
+				unsigned char data23_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data24_is_present:1;
+				unsigned char data25_is_present:1;
+				unsigned char data26_is_present:1;
+				unsigned char data27_is_present:1;
+				unsigned char data28_is_present:1;
+				unsigned char data29_is_present:1;
+				unsigned char data30_is_present:1;
+				unsigned char data31_is_present:1;
+			} __packed;
+		};
+		unsigned char data[5];
+	};
+};
+
+struct synaptics_rmi4_f12_ctrl_8 {
+	union {
+		struct {
+			unsigned char max_x_coord_lsb;
+			unsigned char max_x_coord_msb;
+			unsigned char max_y_coord_lsb;
+			unsigned char max_y_coord_msb;
+			unsigned char rx_pitch_lsb;
+			unsigned char rx_pitch_msb;
+			unsigned char tx_pitch_lsb;
+			unsigned char tx_pitch_msb;
+			unsigned char low_rx_clip;
+			unsigned char high_rx_clip;
+			unsigned char low_tx_clip;
+			unsigned char high_tx_clip;
+			unsigned char num_of_rx;
+			unsigned char num_of_tx;
+		};
+		unsigned char data[14];
+	};
+};
+
+struct synaptics_rmi4_f12_ctrl_23 {
+	union {
+		struct {
+			unsigned char finger_enable:1;
+			unsigned char active_stylus_enable:1;
+			unsigned char palm_enable:1;
+			unsigned char unclassified_object_enable:1;
+			unsigned char hovering_finger_enable:1;
+			unsigned char gloved_finger_enable:1;
+			unsigned char f12_ctr23_00_b6__7:2;
+			unsigned char max_reported_objects;
+			unsigned char f12_ctr23_02_b0:1;
+			unsigned char report_active_stylus_as_finger:1;
+			unsigned char report_palm_as_finger:1;
+			unsigned char report_unclassified_object_as_finger:1;
+			unsigned char report_hovering_finger_as_finger:1;
+			unsigned char report_gloved_finger_as_finger:1;
+			unsigned char report_narrow_object_swipe_as_finger:1;
+			unsigned char report_handedge_as_finger:1;
+			unsigned char cover_enable:1;
+			unsigned char stylus_enable:1;
+			unsigned char eraser_enable:1;
+			unsigned char small_object_enable:1;
+			unsigned char f12_ctr23_03_b4__7:4;
+			unsigned char report_cover_as_finger:1;
+			unsigned char report_stylus_as_finger:1;
+			unsigned char report_eraser_as_finger:1;
+			unsigned char report_small_object_as_finger:1;
+			unsigned char f12_ctr23_04_b4__7:4;
+		};
+		unsigned char data[5];
+	};
+};
+
+struct synaptics_rmi4_f12_ctrl_31 {
+	union {
+		struct {
+			unsigned char max_x_coord_lsb;
+			unsigned char max_x_coord_msb;
+			unsigned char max_y_coord_lsb;
+			unsigned char max_y_coord_msb;
+			unsigned char rx_pitch_lsb;
+			unsigned char rx_pitch_msb;
+			unsigned char rx_clip_low;
+			unsigned char rx_clip_high;
+			unsigned char wedge_clip_low;
+			unsigned char wedge_clip_high;
+			unsigned char num_of_p;
+			unsigned char num_of_q;
+		};
+		unsigned char data[12];
+	};
+};
+
+struct synaptics_rmi4_f12_ctrl_58 {
+	union {
+		struct {
+			unsigned char reporting_format;
+			unsigned char f12_ctr58_00_reserved;
+			unsigned char min_force_lsb;
+			unsigned char min_force_msb;
+			unsigned char max_force_lsb;
+			unsigned char max_force_msb;
+			unsigned char light_press_threshold_lsb;
+			unsigned char light_press_threshold_msb;
+			unsigned char light_press_hysteresis_lsb;
+			unsigned char light_press_hysteresis_msb;
+			unsigned char hard_press_threshold_lsb;
+			unsigned char hard_press_threshold_msb;
+			unsigned char hard_press_hysteresis_lsb;
+			unsigned char hard_press_hysteresis_msb;
+		};
+		unsigned char data[14];
+	};
+};
+
+struct synaptics_rmi4_f12_finger_data {
+	unsigned char object_type_and_status;
+	unsigned char x_lsb;
+	unsigned char x_msb;
+	unsigned char y_lsb;
+	unsigned char y_msb;
+#ifdef REPORT_2D_Z
+	unsigned char z;
+#endif
+#ifdef REPORT_2D_W
+	unsigned char wx;
+	unsigned char wy;
+#endif
+};
+
+struct synaptics_rmi4_f1a_query {
+	union {
+		struct {
+			unsigned char max_button_count:3;
+			unsigned char f1a_query0_b3__4:2;
+			unsigned char has_query4:1;
+			unsigned char has_query3:1;
+			unsigned char has_query2:1;
+			unsigned char has_general_control:1;
+			unsigned char has_interrupt_enable:1;
+			unsigned char has_multibutton_select:1;
+			unsigned char has_tx_rx_map:1;
+			unsigned char has_perbutton_threshold:1;
+			unsigned char has_release_threshold:1;
+			unsigned char has_strongestbtn_hysteresis:1;
+			unsigned char has_filter_strength:1;
+		} __packed;
+		unsigned char data[2];
+	};
+};
+
+struct synaptics_rmi4_f1a_query_4 {
+	union {
+		struct {
+			unsigned char has_ctrl19:1;
+			unsigned char f1a_query4_b1__4:4;
+			unsigned char has_ctrl24:1;
+			unsigned char f1a_query4_b6__7:2;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f1a_control_0 {
+	union {
+		struct {
+			unsigned char multibutton_report:2;
+			unsigned char filter_mode:2;
+			unsigned char reserved:4;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f1a_control {
+	struct synaptics_rmi4_f1a_control_0 general_control;
+	unsigned char button_int_enable;
+	unsigned char multi_button;
+	unsigned char *txrx_map;
+	unsigned char *button_threshold;
+	unsigned char button_release_threshold;
+	unsigned char strongest_button_hysteresis;
+	unsigned char filter_strength;
+};
+
+struct synaptics_rmi4_f1a_handle {
+	int button_bitmask_size;
+	unsigned char max_count;
+	unsigned char valid_button_count;
+	unsigned char *button_data_buffer;
+	unsigned char *button_map;
+	struct synaptics_rmi4_f1a_query button_query;
+	struct synaptics_rmi4_f1a_control button_control;
+};
+
+struct synaptics_rmi4_exp_fhandler {
+	struct synaptics_rmi4_exp_fn *exp_fn;
+	bool insert;
+	bool remove;
+	struct list_head link;
+};
+
+struct synaptics_rmi4_exp_fn_data {
+	bool initialized;
+	bool queue_work;
+	struct mutex mutex;
+	struct list_head list;
+	struct delayed_work work;
+	struct workqueue_struct *workqueue;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_exp_fn_data exp_data;
+
+static struct synaptics_dsx_button_map *vir_button_map;
+
+#ifdef USE_DATA_SERVER
+static pid_t synad_pid;
+static struct task_struct *synad_task;
+static struct siginfo interrupt_signal;
+#endif
+
+static struct device_attribute attrs[] = {
+	__ATTR(reset, 0220,
+			synaptics_rmi4_show_error,
+			synaptics_rmi4_f01_reset_store),
+	__ATTR(productinfo, 0444,
+			synaptics_rmi4_f01_productinfo_show,
+			synaptics_rmi4_store_error),
+	__ATTR(buildid, 0444,
+			synaptics_rmi4_f01_buildid_show,
+			synaptics_rmi4_store_error),
+	__ATTR(flashprog, 0444,
+			synaptics_rmi4_f01_flashprog_show,
+			synaptics_rmi4_store_error),
+	__ATTR(0dbutton, 0664,
+			synaptics_rmi4_0dbutton_show,
+			synaptics_rmi4_0dbutton_store),
+	__ATTR(suspend, 0220,
+			synaptics_rmi4_show_error,
+			synaptics_rmi4_suspend_store),
+	__ATTR(wake_gesture, 0664,
+			synaptics_rmi4_wake_gesture_show,
+			synaptics_rmi4_wake_gesture_store),
+#ifdef USE_DATA_SERVER
+	__ATTR(synad_pid, 0220,
+			synaptics_rmi4_show_error,
+			synaptics_rmi4_synad_pid_store),
+#endif
+};
+
+static struct kobj_attribute virtual_key_map_attr = {
+	.attr = {
+		.name = VIRTUAL_KEY_MAP_FILE_NAME,
+		.mode = 0444,
+	},
+	.show = synaptics_rmi4_virtual_key_map_show,
+};
+
+static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int reset;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (kstrtouint(buf, 10, &reset) != 1)
+		return -EINVAL;
+
+	if (reset != 1)
+		return -EINVAL;
+
+	retval = synaptics_rmi4_reset_device(rmi4_data, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command, error = %d\n",
+				__func__, retval);
+		return retval;
+	}
+
+	return count;
+}
+
+static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "0x%02x 0x%02x\n",
+			(rmi4_data->rmi4_mod_info.product_info[0]),
+			(rmi4_data->rmi4_mod_info.product_info[1]));
+}
+
+static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			rmi4_data->firmware_id);
+}
+
+static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	struct synaptics_rmi4_f01_device_status device_status;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_data_base_addr,
+			device_status.data,
+			sizeof(device_status.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read device status, error = %d\n",
+				__func__, retval);
+		return retval;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			device_status.flash_prog);
+}
+
+static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			rmi4_data->button_0d_enabled);
+}
+
+static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	unsigned char ii;
+	unsigned char intr_enable;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	input = input > 0 ? 1 : 0;
+
+	if (rmi4_data->button_0d_enabled == input)
+		return count;
+
+	if (list_empty(&rmi->support_fn_list))
+		return -ENODEV;
+
+	list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+		if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) {
+			ii = fhandler->intr_reg_num;
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr + 1 + ii,
+					&intr_enable,
+					sizeof(intr_enable));
+			if (retval < 0)
+				return retval;
+
+			if (input == 1)
+				intr_enable |= fhandler->intr_mask;
+			else
+				intr_enable &= ~fhandler->intr_mask;
+
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr + 1 + ii,
+					&intr_enable,
+					sizeof(intr_enable));
+			if (retval < 0)
+				return retval;
+		}
+	}
+
+	rmi4_data->button_0d_enabled = input;
+
+	return count;
+}
+
+static ssize_t synaptics_rmi4_suspend_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		synaptics_rmi4_suspend(dev);
+	else if (input == 0)
+		synaptics_rmi4_resume(dev);
+	else
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t synaptics_rmi4_wake_gesture_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			rmi4_data->enable_wakeup_gesture);
+}
+
+static ssize_t synaptics_rmi4_wake_gesture_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	input = input > 0 ? 1 : 0;
+
+	if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture)
+		rmi4_data->enable_wakeup_gesture = input;
+
+	return count;
+}
+
+#ifdef USE_DATA_SERVER
+static ssize_t synaptics_rmi4_synad_pid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	synad_pid = input;
+
+	if (synad_pid) {
+		synad_task = pid_task(find_vpid(synad_pid), PIDTYPE_PID);
+		if (!synad_task)
+			return -EINVAL;
+	}
+
+	return count;
+}
+#endif
+
+static ssize_t synaptics_rmi4_virtual_key_map_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	int ii;
+	int cnt;
+	int count = 0;
+
+	for (ii = 0; ii < vir_button_map->nbuttons; ii++) {
+		cnt = snprintf(buf, PAGE_SIZE - count, "0x01:%d:%d:%d:%d:%d\n",
+				vir_button_map->map[ii * 5 + 0],
+				vir_button_map->map[ii * 5 + 1],
+				vir_button_map->map[ii * 5 + 2],
+				vir_button_map->map[ii * 5 + 3],
+				vir_button_map->map[ii * 5 + 4]);
+		buf += cnt;
+		count += cnt;
+	}
+
+	return count;
+}
+
+static int synaptics_rmi4_f11_wg(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval;
+	unsigned char reporting_control;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+		if (fhandler->fn_number == SYNAPTICS_RMI4_F11)
+			break;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.ctrl_base,
+			&reporting_control,
+			sizeof(reporting_control));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change reporting mode\n",
+				__func__);
+		return retval;
+	}
+
+	reporting_control = (reporting_control & ~MASK_3BIT);
+	if (enable)
+		reporting_control |= F11_WAKEUP_GESTURE_MODE;
+	else
+		reporting_control |= F11_CONTINUOUS_MODE;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			fhandler->full_addr.ctrl_base,
+			&reporting_control,
+			sizeof(reporting_control));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change reporting mode\n",
+				__func__);
+		return retval;
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_f12_wg(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval;
+	unsigned char offset;
+	unsigned char reporting_control[3];
+	struct synaptics_rmi4_f12_extra_data *extra_data;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+		if (fhandler->fn_number == SYNAPTICS_RMI4_F12)
+			break;
+	}
+
+	extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+	offset = extra_data->ctrl20_offset;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.ctrl_base + offset,
+			reporting_control,
+			sizeof(reporting_control));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change reporting mode\n",
+				__func__);
+		return retval;
+	}
+
+	if (enable)
+		reporting_control[rmi4_data->set_wakeup_gesture] = F12_WAKEUP_GESTURE_MODE;
+	else
+		reporting_control[rmi4_data->set_wakeup_gesture] = F12_CONTINUOUS_MODE;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			fhandler->full_addr.ctrl_base + offset,
+			reporting_control,
+			sizeof(reporting_control));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change reporting mode\n",
+				__func__);
+		return retval;
+	}
+
+	return retval;
+}
+
+static void synaptics_rmi4_wakeup_gesture(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	if (rmi4_data->f11_wakeup_gesture)
+		synaptics_rmi4_f11_wg(rmi4_data, enable);
+	else if (rmi4_data->f12_wakeup_gesture)
+		synaptics_rmi4_f12_wg(rmi4_data, enable);
+}
+
+static int synaptics_rmi4_f11_abs_report(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	unsigned char touch_count = 0; /* number of touch points */
+	unsigned char reg_index;
+	unsigned char finger;
+	unsigned char fingers_supported;
+	unsigned char num_of_finger_status_regs;
+	unsigned char finger_shift;
+	unsigned char finger_status;
+	unsigned char finger_status_reg[3];
+	unsigned char detected_gestures;
+	unsigned short data_addr;
+	unsigned short data_offset;
+	int x;
+	int y;
+	int wx;
+	int wy;
+	int temp;
+	struct synaptics_rmi4_f11_data_1_5 data;
+	struct synaptics_rmi4_f11_extra_data *extra_data;
+
+	/*
+	 * The number of finger status registers is determined by the
+	 * maximum number of fingers supported - 2 bits per finger. So
+	 * the number of finger status registers to read is:
+	 * register_count = ceil(max_num_of_fingers / 4)
+	 */
+	fingers_supported = fhandler->num_of_data_points;
+	num_of_finger_status_regs = (fingers_supported + 3) / 4;
+	data_addr = fhandler->full_addr.data_base;
+
+	extra_data = (struct synaptics_rmi4_f11_extra_data *)fhandler->extra;
+
+	if (rmi4_data->suspend && rmi4_data->enable_wakeup_gesture) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_addr + extra_data->data38_offset,
+				&detected_gestures,
+				sizeof(detected_gestures));
+		if (retval < 0)
+			return 0;
+
+		if (detected_gestures) {
+			input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 1);
+			input_sync(rmi4_data->input_dev);
+			input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 0);
+			input_sync(rmi4_data->input_dev);
+			rmi4_data->suspend = false;
+		}
+/*		synaptics_rmi4_wakeup_gesture(rmi4_data, false); */
+		return 0;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_addr,
+			finger_status_reg,
+			num_of_finger_status_regs);
+	if (retval < 0)
+		return 0;
+
+	mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+	for (finger = 0; finger < fingers_supported; finger++) {
+		reg_index = finger / 4;
+		finger_shift = (finger % 4) * 2;
+		finger_status = (finger_status_reg[reg_index] >> finger_shift)
+				& MASK_2BIT;
+
+		/*
+		 * Each 2-bit finger status field represents the following:
+		 * 00 = finger not present
+		 * 01 = finger present and data accurate
+		 * 10 = finger present but data may be inaccurate
+		 * 11 = reserved
+		 */
+#ifdef TYPE_B_PROTOCOL
+		input_mt_slot(rmi4_data->input_dev, finger);
+		input_mt_report_slot_state(rmi4_data->input_dev,
+				MT_TOOL_FINGER, finger_status);
+#endif
+
+		if (finger_status) {
+			data_offset = data_addr +
+					num_of_finger_status_regs +
+					(finger * sizeof(data.data));
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					data_offset,
+					data.data,
+					sizeof(data.data));
+			if (retval < 0) {
+				touch_count = 0;
+				goto exit;
+			}
+
+			x = (data.x_position_11_4 << 4) | data.x_position_3_0;
+			y = (data.y_position_11_4 << 4) | data.y_position_3_0;
+			wx = data.wx;
+			wy = data.wy;
+
+			if (rmi4_data->hw_if->board_data->swap_axes) {
+				temp = x;
+				x = y;
+				y = temp;
+				temp = wx;
+				wx = wy;
+				wy = temp;
+			}
+
+			if (rmi4_data->hw_if->board_data->x_flip)
+				x = rmi4_data->sensor_max_x - x;
+			if (rmi4_data->hw_if->board_data->y_flip)
+				y = rmi4_data->sensor_max_y - y;
+
+			input_report_key(rmi4_data->input_dev,
+					BTN_TOUCH, 1);
+			input_report_key(rmi4_data->input_dev,
+					BTN_TOOL_FINGER, 1);
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_POSITION_X, x);
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_POSITION_Y, y);
+#ifdef REPORT_2D_W
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_TOUCH_MAJOR, max(wx, wy));
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_TOUCH_MINOR, min(wx, wy));
+#endif
+#ifndef TYPE_B_PROTOCOL
+			input_mt_sync(rmi4_data->input_dev);
+#endif
+
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Finger %d: status = 0x%02x, x = %d, y = %d, wx = %d, wy = %d\n",
+					__func__, finger,
+					finger_status,
+					x, y, wx, wy);
+
+			touch_count++;
+		}
+	}
+
+	if (touch_count == 0) {
+		input_report_key(rmi4_data->input_dev,
+				BTN_TOUCH, 0);
+		input_report_key(rmi4_data->input_dev,
+				BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+		input_mt_sync(rmi4_data->input_dev);
+#endif
+	}
+
+	input_sync(rmi4_data->input_dev);
+
+exit:
+	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+	return touch_count;
+}
+
+static int synaptics_rmi4_f12_abs_report(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	unsigned char touch_count = 0; /* number of touch points */
+	unsigned char index;
+	unsigned char finger;
+	unsigned char fingers_to_process;
+	unsigned char finger_status;
+	unsigned char size_of_2d_data;
+	unsigned char gesture_type;
+	unsigned short data_addr;
+	int x;
+	int y;
+	int wx;
+	int wy;
+	int temp;
+#if defined(REPORT_2D_PRESSURE) || defined(F51_DISCRETE_FORCE)
+	int pressure;
+#endif
+#ifdef REPORT_2D_PRESSURE
+	unsigned char f_fingers;
+	unsigned char f_lsb;
+	unsigned char f_msb;
+	unsigned char *f_data;
+#endif
+#ifdef F51_DISCRETE_FORCE
+	unsigned char force_level;
+#endif
+	struct synaptics_rmi4_f12_extra_data *extra_data;
+	struct synaptics_rmi4_f12_finger_data *data;
+	struct synaptics_rmi4_f12_finger_data *finger_data;
+	static unsigned char finger_presence;
+	static unsigned char stylus_presence;
+#ifdef F12_DATA_15_WORKAROUND
+	static unsigned char objects_already_present;
+#endif
+
+	fingers_to_process = fhandler->num_of_data_points;
+	data_addr = fhandler->full_addr.data_base;
+	extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+	size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
+
+	if (rmi4_data->suspend && rmi4_data->enable_wakeup_gesture) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_addr + extra_data->data4_offset,
+				rmi4_data->gesture_detection,
+				sizeof(rmi4_data->gesture_detection));
+		if (retval < 0)
+			return 0;
+
+		gesture_type = rmi4_data->gesture_detection[0];
+
+		if (gesture_type && gesture_type != F12_UDG_DETECT) {
+			input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 1);
+			input_sync(rmi4_data->input_dev);
+			input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 0);
+			input_sync(rmi4_data->input_dev);
+			/* synaptics_rmi4_wakeup_gesture(rmi4_data, false); */
+			/* rmi4_data->suspend = false; */
+		}
+
+		return 0;
+	}
+
+	/* Determine the total number of fingers to process */
+	if (extra_data->data15_size) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_addr + extra_data->data15_offset,
+				extra_data->data15_data,
+				extra_data->data15_size);
+		if (retval < 0)
+			return 0;
+
+		/* Start checking from the highest bit */
+		index = extra_data->data15_size - 1; /* Highest byte */
+		finger = (fingers_to_process - 1) % 8; /* Highest bit */
+		do {
+			if (extra_data->data15_data[index] & (1 << finger))
+				break;
+
+			if (finger) {
+				finger--;
+			} else if (index > 0) {
+				index--; /* Move to the next lower byte */
+				finger = 7;
+			}
+
+			fingers_to_process--;
+		} while (fingers_to_process);
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Number of fingers to process = %d\n",
+			__func__, fingers_to_process);
+	}
+
+#ifdef F12_DATA_15_WORKAROUND
+	fingers_to_process = max(fingers_to_process, objects_already_present);
+#endif
+
+	if (!fingers_to_process) {
+		synaptics_rmi4_free_fingers(rmi4_data);
+		finger_presence = 0;
+		stylus_presence = 0;
+		return 0;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_addr + extra_data->data1_offset,
+			(unsigned char *)fhandler->data,
+			fingers_to_process * size_of_2d_data);
+	if (retval < 0)
+		return 0;
+
+	data = (struct synaptics_rmi4_f12_finger_data *)fhandler->data;
+
+#ifdef REPORT_2D_PRESSURE
+	if (rmi4_data->report_pressure) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_addr + extra_data->data29_offset,
+				extra_data->data29_data,
+				extra_data->data29_size);
+		if (retval < 0)
+			return 0;
+	}
+#endif
+
+	mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+	for (finger = 0; finger < fingers_to_process; finger++) {
+		finger_data = data + finger;
+		finger_status = finger_data->object_type_and_status;
+
+#ifdef F12_DATA_15_WORKAROUND
+		objects_already_present = finger + 1;
+#endif
+
+		x = (finger_data->x_msb << 8) | (finger_data->x_lsb);
+		y = (finger_data->y_msb << 8) | (finger_data->y_lsb);
+#ifdef REPORT_2D_W
+		wx = finger_data->wx;
+		wy = finger_data->wy;
+#endif
+
+		if (rmi4_data->hw_if->board_data->swap_axes) {
+			temp = x;
+			x = y;
+			y = temp;
+			temp = wx;
+			wx = wy;
+			wy = temp;
+		}
+
+		if (rmi4_data->hw_if->board_data->x_flip)
+			x = rmi4_data->sensor_max_x - x;
+		if (rmi4_data->hw_if->board_data->y_flip)
+			y = rmi4_data->sensor_max_y - y;
+
+		switch (finger_status) {
+		case F12_FINGER_STATUS:
+		case F12_GLOVED_FINGER_STATUS:
+			/* Stylus has priority over fingers */
+			if (stylus_presence)
+				break;
+#ifdef TYPE_B_PROTOCOL
+			input_mt_slot(rmi4_data->input_dev, finger);
+			input_mt_report_slot_state(rmi4_data->input_dev,
+					MT_TOOL_FINGER, 1);
+#endif
+
+			input_report_key(rmi4_data->input_dev,
+					BTN_TOUCH, 1);
+			input_report_key(rmi4_data->input_dev,
+					BTN_TOOL_FINGER, 1);
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_POSITION_X, x);
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_POSITION_Y, y);
+#ifdef REPORT_2D_W
+			if (rmi4_data->wedge_sensor) {
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_TOUCH_MAJOR, wx);
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_TOUCH_MINOR, wx);
+			} else {
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_TOUCH_MAJOR,
+						max(wx, wy));
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_TOUCH_MINOR,
+						min(wx, wy));
+			}
+#endif
+#ifdef REPORT_2D_PRESSURE
+			if (rmi4_data->report_pressure) {
+				f_fingers = extra_data->data29_size / 2;
+				f_data = extra_data->data29_data;
+				if (finger + 1 > f_fingers) {
+					pressure = 1;
+				} else {
+					f_lsb = finger * 2;
+					f_msb = finger * 2 + 1;
+					pressure = (int)f_data[f_lsb] << 0 |
+							(int)f_data[f_msb] << 8;
+				}
+				pressure = pressure > 0 ? pressure : 1;
+				if (pressure > rmi4_data->force_max)
+					pressure = rmi4_data->force_max;
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_PRESSURE, pressure);
+			}
+#elif defined(F51_DISCRETE_FORCE)
+			if (finger == 0) {
+				retval = synaptics_rmi4_reg_read(rmi4_data,
+						FORCE_LEVEL_ADDR,
+						&force_level,
+						sizeof(force_level));
+				if (retval < 0)
+					return 0;
+				pressure = force_level > 0 ? force_level : 1;
+			} else {
+				pressure = 1;
+			}
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_PRESSURE, pressure);
+#endif
+#ifndef TYPE_B_PROTOCOL
+			input_mt_sync(rmi4_data->input_dev);
+#endif
+
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Finger %d: status = 0x%02x, x = %d, y = %d, wx = %d, wy = %d\n",
+					__func__, finger,
+					finger_status,
+					x, y, wx, wy);
+
+			finger_presence = 1;
+			touch_count++;
+			break;
+		case F12_PALM_STATUS:
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Finger %d: x = %d, y = %d, wx = %d, wy = %d\n",
+					__func__, finger,
+					x, y, wx, wy);
+			break;
+		case F12_STYLUS_STATUS:
+		case F12_ERASER_STATUS:
+			if (!rmi4_data->stylus_enable)
+				break;
+			/* Stylus has priority over fingers */
+			if (finger_presence) {
+				mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+				synaptics_rmi4_free_fingers(rmi4_data);
+				mutex_lock(&(rmi4_data->rmi4_report_mutex));
+				finger_presence = 0;
+			}
+			if (stylus_presence) {/* Allow one stylus at a timee */
+				if (finger + 1 != stylus_presence)
+					break;
+			}
+			input_report_key(rmi4_data->stylus_dev,
+					BTN_TOUCH, 1);
+			if (finger_status == F12_STYLUS_STATUS) {
+				input_report_key(rmi4_data->stylus_dev,
+						BTN_TOOL_PEN, 1);
+			} else {
+				input_report_key(rmi4_data->stylus_dev,
+						BTN_TOOL_RUBBER, 1);
+			}
+			input_report_abs(rmi4_data->stylus_dev,
+					ABS_X, x);
+			input_report_abs(rmi4_data->stylus_dev,
+					ABS_Y, y);
+			input_sync(rmi4_data->stylus_dev);
+
+			stylus_presence = finger + 1;
+			touch_count++;
+			break;
+		default:
+#ifdef TYPE_B_PROTOCOL
+			input_mt_slot(rmi4_data->input_dev, finger);
+			input_mt_report_slot_state(rmi4_data->input_dev,
+					MT_TOOL_FINGER, 0);
+#endif
+			break;
+		}
+	}
+
+	if (touch_count == 0) {
+		finger_presence = 0;
+#ifdef F12_DATA_15_WORKAROUND
+		objects_already_present = 0;
+#endif
+		input_report_key(rmi4_data->input_dev,
+				BTN_TOUCH, 0);
+		input_report_key(rmi4_data->input_dev,
+				BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+		input_mt_sync(rmi4_data->input_dev);
+#endif
+
+		if (rmi4_data->stylus_enable) {
+			stylus_presence = 0;
+			input_report_key(rmi4_data->stylus_dev,
+					BTN_TOUCH, 0);
+			input_report_key(rmi4_data->stylus_dev,
+					BTN_TOOL_PEN, 0);
+			if (rmi4_data->eraser_enable) {
+				input_report_key(rmi4_data->stylus_dev,
+						BTN_TOOL_RUBBER, 0);
+			}
+			input_sync(rmi4_data->stylus_dev);
+		}
+	}
+
+	input_sync(rmi4_data->input_dev);
+
+	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+	return touch_count;
+}
+
+static int synaptics_rmi4_f1a_report(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	unsigned char touch_count = 0;
+	unsigned char button;
+	unsigned char index;
+	unsigned char shift;
+	unsigned char status;
+	unsigned char *data;
+	unsigned short data_addr = fhandler->full_addr.data_base;
+	struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+	static unsigned char do_once = 1;
+	static bool current_status[MAX_NUMBER_OF_BUTTONS];
+#ifdef NO_0D_WHILE_2D
+	static bool before_2d_status[MAX_NUMBER_OF_BUTTONS];
+	static bool while_2d_status[MAX_NUMBER_OF_BUTTONS];
+#endif
+
+	if (do_once) {
+		memset(current_status, 0, sizeof(current_status));
+#ifdef NO_0D_WHILE_2D
+		memset(before_2d_status, 0, sizeof(before_2d_status));
+		memset(while_2d_status, 0, sizeof(while_2d_status));
+#endif
+		do_once = 0;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_addr,
+			f1a->button_data_buffer,
+			f1a->button_bitmask_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read button data registers\n",
+				__func__);
+		return retval;
+	}
+
+	data = f1a->button_data_buffer;
+
+	mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+	for (button = 0; button < f1a->valid_button_count; button++) {
+		index = button / 8;
+		shift = button % 8;
+		status = ((data[index] >> shift) & MASK_1BIT);
+
+		if (current_status[button] == status)
+			continue;
+		else
+			current_status[button] = status;
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Button %d (code %d) ->%d\n",
+				__func__, button,
+				f1a->button_map[button],
+				status);
+#ifdef NO_0D_WHILE_2D
+		if (rmi4_data->fingers_on_2d == false) {
+			if (status == 1) {
+				before_2d_status[button] = 1;
+			} else {
+				if (while_2d_status[button] == 1) {
+					while_2d_status[button] = 0;
+					continue;
+				} else {
+					before_2d_status[button] = 0;
+				}
+			}
+			touch_count++;
+			input_report_key(rmi4_data->input_dev,
+					f1a->button_map[button],
+					status);
+		} else {
+			if (before_2d_status[button] == 1) {
+				before_2d_status[button] = 0;
+				touch_count++;
+				input_report_key(rmi4_data->input_dev,
+						f1a->button_map[button],
+						status);
+			} else {
+				if (status == 1)
+					while_2d_status[button] = 1;
+				else
+					while_2d_status[button] = 0;
+			}
+		}
+#else
+		touch_count++;
+		input_report_key(rmi4_data->input_dev,
+				f1a->button_map[button],
+				status);
+#endif
+	}
+
+	if (touch_count)
+		input_sync(rmi4_data->input_dev);
+
+	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+	return retval;
+}
+
+static void synaptics_rmi4_report_touch(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	unsigned char touch_count_2d;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Function %02x reporting\n",
+			__func__, fhandler->fn_number);
+
+	switch (fhandler->fn_number) {
+	case SYNAPTICS_RMI4_F11:
+		touch_count_2d = synaptics_rmi4_f11_abs_report(rmi4_data,
+				fhandler);
+
+		if (touch_count_2d)
+			rmi4_data->fingers_on_2d = true;
+		else
+			rmi4_data->fingers_on_2d = false;
+		break;
+	case SYNAPTICS_RMI4_F12:
+		touch_count_2d = synaptics_rmi4_f12_abs_report(rmi4_data,
+				fhandler);
+
+		if (touch_count_2d)
+			rmi4_data->fingers_on_2d = true;
+		else
+			rmi4_data->fingers_on_2d = false;
+		break;
+	case SYNAPTICS_RMI4_F1A:
+		synaptics_rmi4_f1a_report(rmi4_data, fhandler);
+		break;
+#ifdef USE_DATA_SERVER
+	case SYNAPTICS_RMI4_F21:
+		if (synad_pid)
+			send_sig_info(SIGIO, &interrupt_signal, synad_task);
+		break;
+#endif
+	default:
+		break;
+	}
+}
+
+static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
+		bool report)
+{
+	int retval;
+	unsigned char data[MAX_INTR_REGISTERS + 1];
+	unsigned char *intr = &data[1];
+	bool was_in_bl_mode;
+	struct synaptics_rmi4_f01_device_status status;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	/*
+	 * Get interrupt status information from F01 Data1 register to
+	 * determine the source(s) that are flagging the interrupt.
+	 */
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_data_base_addr,
+			data,
+			rmi4_data->num_of_intr_regs + 1);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read interrupt status\n",
+				__func__);
+		return retval;
+	}
+
+	status.data[0] = data[0];
+	if (status.status_code == STATUS_CRC_IN_PROGRESS) {
+		retval = synaptics_rmi4_check_status(rmi4_data,
+				&was_in_bl_mode);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to check status\n",
+					__func__);
+			return retval;
+		}
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_data_base_addr,
+				status.data,
+				sizeof(status.data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read device status\n",
+					__func__);
+			return retval;
+		}
+	}
+	if (status.unconfigured && !status.flash_prog) {
+		pr_notice("%s: spontaneous reset detected\n", __func__);
+		retval = synaptics_rmi4_reinit_device(rmi4_data);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to reinit device\n",
+					__func__);
+		}
+	}
+
+	if (!report)
+		return retval;
+
+	/*
+	 * Traverse the function handler list and service the source(s)
+	 * of the interrupt accordingly.
+	 */
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->num_of_data_sources) {
+				if (fhandler->intr_mask &
+						intr[fhandler->intr_reg_num]) {
+					synaptics_rmi4_report_touch(rmi4_data,
+							fhandler);
+				}
+			}
+		}
+	}
+
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link) {
+			if (!exp_fhandler->insert &&
+					!exp_fhandler->remove &&
+					(exp_fhandler->exp_fn->attn != NULL))
+				exp_fhandler->exp_fn->attn(rmi4_data, intr[0]);
+		}
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	return retval;
+}
+
+static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
+{
+	struct synaptics_rmi4_data *rmi4_data = data;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (gpio_get_value(bdata->irq_gpio) != bdata->irq_on_state)
+		goto exit;
+
+	synaptics_rmi4_sensor_report(rmi4_data, true);
+
+exit:
+	return IRQ_HANDLED;
+}
+
+static int synaptics_rmi4_int_enable(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval = 0;
+	unsigned char ii;
+	unsigned char zero = 0x00;
+	unsigned char *intr_mask;
+	unsigned short intr_addr;
+
+	intr_mask = rmi4_data->intr_mask;
+
+	for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) {
+		if (intr_mask[ii] != 0x00) {
+			intr_addr = rmi4_data->f01_ctrl_base_addr + 1 + ii;
+			if (enable) {
+				retval = synaptics_rmi4_reg_write(rmi4_data,
+						intr_addr,
+						&(intr_mask[ii]),
+						sizeof(intr_mask[ii]));
+				if (retval < 0)
+					return retval;
+			} else {
+				retval = synaptics_rmi4_reg_write(rmi4_data,
+						intr_addr,
+						&zero,
+						sizeof(zero));
+				if (retval < 0)
+					return retval;
+			}
+		}
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_irq_enable(struct synaptics_rmi4_data *rmi4_data,
+		bool enable, bool attn_only)
+{
+	int retval = 0;
+	unsigned char data[MAX_INTR_REGISTERS];
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	mutex_lock(&(rmi4_data->rmi4_irq_enable_mutex));
+
+	if (attn_only) {
+		retval = synaptics_rmi4_int_enable(rmi4_data, enable);
+		goto exit;
+	}
+
+	if (enable) {
+		if (rmi4_data->irq_enabled) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Interrupt already enabled\n",
+					__func__);
+			goto exit;
+		}
+
+		retval = synaptics_rmi4_int_enable(rmi4_data, false);
+		if (retval < 0)
+			goto exit;
+
+		/* Clear interrupts */
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_data_base_addr + 1,
+				data,
+				rmi4_data->num_of_intr_regs);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read interrupt status\n",
+					__func__);
+			goto exit;
+		}
+
+		retval = request_threaded_irq(rmi4_data->irq, NULL,
+				synaptics_rmi4_irq, bdata->irq_flags,
+				PLATFORM_DRIVER_NAME, rmi4_data);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create irq thread\n",
+					__func__);
+			goto exit;
+		}
+
+		retval = synaptics_rmi4_int_enable(rmi4_data, true);
+		if (retval < 0)
+			goto exit;
+
+		rmi4_data->irq_enabled = true;
+	} else {
+		if (rmi4_data->irq_enabled) {
+			disable_irq(rmi4_data->irq);
+			free_irq(rmi4_data->irq, rmi4_data);
+			rmi4_data->irq_enabled = false;
+		}
+	}
+
+exit:
+	mutex_unlock(&(rmi4_data->rmi4_irq_enable_mutex));
+
+	return retval;
+}
+
+static void synaptics_rmi4_set_intr_mask(struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	unsigned char ii;
+	unsigned char intr_offset;
+
+	fhandler->intr_reg_num = (intr_count + 7) / 8;
+	if (fhandler->intr_reg_num != 0)
+		fhandler->intr_reg_num -= 1;
+
+	/* Set an enable bit for each data source */
+	intr_offset = intr_count % 8;
+	fhandler->intr_mask = 0;
+	for (ii = intr_offset;
+			ii < (fd->intr_src_count + intr_offset);
+			ii++)
+		fhandler->intr_mask |= 1 << ii;
+}
+
+static int synaptics_rmi4_f01_init(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	fhandler->fn_number = fd->fn_number;
+	fhandler->num_of_data_sources = fd->intr_src_count;
+	fhandler->data = NULL;
+	fhandler->extra = NULL;
+
+	synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+	rmi4_data->f01_query_base_addr = fd->query_base_addr;
+	rmi4_data->f01_ctrl_base_addr = fd->ctrl_base_addr;
+	rmi4_data->f01_data_base_addr = fd->data_base_addr;
+	rmi4_data->f01_cmd_base_addr = fd->cmd_base_addr;
+
+	return 0;
+}
+
+static int synaptics_rmi4_f11_init(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	int retval;
+	int temp;
+	unsigned char offset;
+	unsigned char fingers_supported;
+	struct synaptics_rmi4_f11_extra_data *extra_data;
+	struct synaptics_rmi4_f11_query_0_5 query_0_5;
+	struct synaptics_rmi4_f11_query_7_8 query_7_8;
+	struct synaptics_rmi4_f11_query_9 query_9;
+	struct synaptics_rmi4_f11_query_12 query_12;
+	struct synaptics_rmi4_f11_query_27 query_27;
+	struct synaptics_rmi4_f11_ctrl_6_9 control_6_9;
+	const struct synaptics_dsx_board_data *bdata =
+				rmi4_data->hw_if->board_data;
+
+	fhandler->fn_number = fd->fn_number;
+	fhandler->num_of_data_sources = fd->intr_src_count;
+	fhandler->extra = kmalloc(sizeof(*extra_data), GFP_KERNEL);
+	if (!fhandler->extra) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fhandler->extra\n",
+				__func__);
+		return -ENOMEM;
+	}
+	extra_data = (struct synaptics_rmi4_f11_extra_data *)fhandler->extra;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base,
+			query_0_5.data,
+			sizeof(query_0_5.data));
+	if (retval < 0)
+		return retval;
+
+	/* Maximum number of fingers supported */
+	if (query_0_5.num_of_fingers <= 4)
+		fhandler->num_of_data_points = query_0_5.num_of_fingers + 1;
+	else if (query_0_5.num_of_fingers == 5)
+		fhandler->num_of_data_points = 10;
+
+	rmi4_data->num_of_fingers = fhandler->num_of_data_points;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.ctrl_base + 6,
+			control_6_9.data,
+			sizeof(control_6_9.data));
+	if (retval < 0)
+		return retval;
+
+	/* Maximum x and y */
+	rmi4_data->sensor_max_x = control_6_9.sensor_max_x_pos_7_0 |
+			(control_6_9.sensor_max_x_pos_11_8 << 8);
+	rmi4_data->sensor_max_y = control_6_9.sensor_max_y_pos_7_0 |
+			(control_6_9.sensor_max_y_pos_11_8 << 8);
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Function %02x max x = %d max y = %d\n",
+			__func__, fhandler->fn_number,
+			rmi4_data->sensor_max_x,
+			rmi4_data->sensor_max_y);
+
+	rmi4_data->max_touch_width = MAX_F11_TOUCH_WIDTH;
+
+	if (bdata->swap_axes) {
+		temp = rmi4_data->sensor_max_x;
+		rmi4_data->sensor_max_x = rmi4_data->sensor_max_y;
+		rmi4_data->sensor_max_y = temp;
+	}
+
+	synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+	fhandler->data = NULL;
+
+	offset = sizeof(query_0_5.data);
+
+	/* query 6 */
+	if (query_0_5.has_rel)
+		offset += 1;
+
+	/* queries 7 8 */
+	if (query_0_5.has_gestures) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_7_8.data,
+				sizeof(query_7_8.data));
+		if (retval < 0)
+			return retval;
+
+		offset += sizeof(query_7_8.data);
+	}
+
+	/* query 9 */
+	if (query_0_5.has_query_9) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_9.data,
+				sizeof(query_9.data));
+		if (retval < 0)
+			return retval;
+
+		offset += sizeof(query_9.data);
+	}
+
+	/* query 10 */
+	if (query_0_5.has_gestures && query_7_8.has_touch_shapes)
+		offset += 1;
+
+	/* query 11 */
+	if (query_0_5.has_query_11)
+		offset += 1;
+
+	/* query 12 */
+	if (query_0_5.has_query_12) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_12.data,
+				sizeof(query_12.data));
+		if (retval < 0)
+			return retval;
+
+		offset += sizeof(query_12.data);
+	}
+
+	/* query 13 */
+	if (query_0_5.has_jitter_filter)
+		offset += 1;
+
+	/* query 14 */
+	if (query_0_5.has_query_12 && query_12.has_general_information_2)
+		offset += 1;
+
+	/* queries 15 16 17 18 19 20 21 22 23 24 25 26*/
+	if (query_0_5.has_query_12 && query_12.has_physical_properties)
+		offset += 12;
+
+	/* query 27 */
+	if (query_0_5.has_query_27) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_27.data,
+				sizeof(query_27.data));
+		if (retval < 0)
+			return retval;
+
+		rmi4_data->f11_wakeup_gesture = query_27.has_wakeup_gesture;
+	}
+
+	if (!rmi4_data->f11_wakeup_gesture)
+		return retval;
+
+	/* data 0 */
+	fingers_supported = fhandler->num_of_data_points;
+	offset = (fingers_supported + 3) / 4;
+
+	/* data 1 2 3 4 5 */
+	offset += 5 * fingers_supported;
+
+	/* data 6 7 */
+	if (query_0_5.has_rel)
+		offset += 2 * fingers_supported;
+
+	/* data 8 */
+	if (query_0_5.has_gestures && query_7_8.data[0])
+		offset += 1;
+
+	/* data 9 */
+	if (query_0_5.has_gestures && (query_7_8.data[0] || query_7_8.data[1]))
+		offset += 1;
+
+	/* data 10 */
+	if (query_0_5.has_gestures &&
+			(query_7_8.has_pinch || query_7_8.has_flick))
+		offset += 1;
+
+	/* data 11 12 */
+	if (query_0_5.has_gestures &&
+			(query_7_8.has_flick || query_7_8.has_rotate))
+		offset += 2;
+
+	/* data 13 */
+	if (query_0_5.has_gestures && query_7_8.has_touch_shapes)
+		offset += (fingers_supported + 3) / 4;
+
+	/* data 14 15 */
+	if (query_0_5.has_gestures &&
+			(query_7_8.has_scroll_zones ||
+			query_7_8.has_multi_finger_scroll ||
+			query_7_8.has_chiral_scroll))
+		offset += 2;
+
+	/* data 16 17 */
+	if (query_0_5.has_gestures &&
+			(query_7_8.has_scroll_zones &&
+			query_7_8.individual_scroll_zones))
+		offset += 2;
+
+	/* data 18 19 20 21 22 23 24 25 26 27 */
+	if (query_0_5.has_query_9 && query_9.has_contact_geometry)
+		offset += 10 * fingers_supported;
+
+	/* data 28 */
+	if (query_0_5.has_bending_correction ||
+			query_0_5.has_large_object_suppression)
+		offset += 1;
+
+	/* data 29 30 31 */
+	if (query_0_5.has_query_9 && query_9.has_pen_hover_discrimination)
+		offset += 3;
+
+	/* data 32 */
+	if (query_0_5.has_query_12 &&
+			query_12.has_small_object_detection_tuning)
+		offset += 1;
+
+	/* data 33 34 */
+	if (query_0_5.has_query_27 && query_27.f11_query27_b0)
+		offset += 2;
+
+	/* data 35 */
+	if (query_0_5.has_query_12 && query_12.has_8bit_w)
+		offset += fingers_supported;
+
+	/* data 36 */
+	if (query_0_5.has_bending_correction)
+		offset += 1;
+
+	/* data 37 */
+	if (query_0_5.has_query_27 && query_27.has_data_37)
+		offset += 1;
+
+	/* data 38 */
+	if (query_0_5.has_query_27 && query_27.has_wakeup_gesture)
+		extra_data->data38_offset = offset;
+
+	return retval;
+}
+
+static int synaptics_rmi4_f12_set_enables(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short ctrl28)
+{
+	int retval;
+	static unsigned short ctrl_28_address;
+
+	if (ctrl28)
+		ctrl_28_address = ctrl28;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			ctrl_28_address,
+			&rmi4_data->report_enable,
+			sizeof(rmi4_data->report_enable));
+	if (retval < 0)
+		return retval;
+
+	return retval;
+}
+
+static int synaptics_rmi4_f12_find_sub(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		unsigned char *presence, unsigned char presence_size,
+		unsigned char structure_offset, unsigned char reg,
+		unsigned char sub)
+{
+	int retval;
+	unsigned char cnt;
+	unsigned char regnum;
+	unsigned char bitnum;
+	unsigned char p_index;
+	unsigned char s_index;
+	unsigned char offset;
+	unsigned char max_reg;
+	unsigned char *structure;
+
+	max_reg = (presence_size - 1) * 8 - 1;
+
+	if (reg > max_reg) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Register number (%d) over limit\n",
+				__func__, reg);
+		return -EINVAL;
+	}
+
+	p_index = reg / 8 + 1;
+	bitnum = reg % 8;
+	if ((presence[p_index] & (1 << bitnum)) == 0x00) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Register %d is not present\n",
+				__func__, reg);
+		return -EINVAL;
+	}
+
+	structure = kmalloc(presence[0], GFP_KERNEL);
+	if (!structure) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for structure register\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + structure_offset,
+			structure,
+			presence[0]);
+	if (retval < 0)
+		goto exit;
+
+	s_index = 0;
+
+	for (regnum = 0; regnum < reg; regnum++) {
+		p_index = regnum / 8 + 1;
+		bitnum = regnum % 8;
+		if ((presence[p_index] & (1 << bitnum)) == 0x00)
+			continue;
+
+		if (structure[s_index] == 0x00)
+			s_index += 3;
+		else
+			s_index++;
+
+		while (structure[s_index] & ~MASK_7BIT)
+			s_index++;
+
+		s_index++;
+	}
+
+	cnt = 0;
+	s_index++;
+	offset = sub / 7;
+	bitnum = sub % 7;
+
+	do {
+		if (cnt == offset) {
+			if (structure[s_index + cnt] & (1 << bitnum))
+				retval = 1;
+			else
+				retval = 0;
+			goto exit;
+		}
+		cnt++;
+	} while (structure[s_index + cnt - 1] & ~MASK_7BIT);
+
+	retval = 0;
+
+exit:
+	kfree(structure);
+
+	return retval;
+}
+
+static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	int retval = 0;
+	int temp;
+	unsigned char subpacket;
+	unsigned char ctrl_23_size;
+	unsigned char size_of_2d_data;
+	unsigned char size_of_query5;
+	unsigned char size_of_query8;
+	unsigned char ctrl_8_offset;
+	unsigned char ctrl_20_offset;
+	unsigned char ctrl_23_offset;
+	unsigned char ctrl_28_offset;
+	unsigned char ctrl_31_offset;
+	unsigned char ctrl_58_offset;
+	unsigned char num_of_fingers;
+	struct synaptics_rmi4_f12_extra_data *extra_data;
+	struct synaptics_rmi4_f12_query_5 *query_5 = NULL;
+	struct synaptics_rmi4_f12_query_8 *query_8 = NULL;
+	struct synaptics_rmi4_f12_ctrl_8 *ctrl_8 = NULL;
+	struct synaptics_rmi4_f12_ctrl_23 *ctrl_23 = NULL;
+	struct synaptics_rmi4_f12_ctrl_31 *ctrl_31 = NULL;
+	struct synaptics_rmi4_f12_ctrl_58 *ctrl_58 = NULL;
+	const struct synaptics_dsx_board_data *bdata =
+				rmi4_data->hw_if->board_data;
+
+	fhandler->fn_number = fd->fn_number;
+	fhandler->num_of_data_sources = fd->intr_src_count;
+	fhandler->extra = kmalloc(sizeof(*extra_data), GFP_KERNEL);
+	if (!fhandler->extra) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fhandler->extra\n",
+				__func__);
+		return -ENOMEM;
+	}
+	extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+	size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
+
+	query_5 = kzalloc(sizeof(*query_5), GFP_KERNEL);
+	if (!query_5) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_5\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	query_8 = kzalloc(sizeof(*query_8), GFP_KERNEL);
+	if (!query_8) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_8\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	ctrl_8 = kzalloc(sizeof(*ctrl_8), GFP_KERNEL);
+	if (!ctrl_8) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_8\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	ctrl_23 = kzalloc(sizeof(*ctrl_23), GFP_KERNEL);
+	if (!ctrl_23) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_23\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	ctrl_31 = kzalloc(sizeof(*ctrl_31), GFP_KERNEL);
+	if (!ctrl_31) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_31\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	ctrl_58 = kzalloc(sizeof(*ctrl_58), GFP_KERNEL);
+	if (!ctrl_58) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_58\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + 4,
+			&size_of_query5,
+			sizeof(size_of_query5));
+	if (retval < 0)
+		goto exit;
+
+	if (size_of_query5 > sizeof(query_5->data))
+		size_of_query5 = sizeof(query_5->data);
+	memset(query_5->data, 0x00, sizeof(query_5->data));
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + 5,
+			query_5->data,
+			size_of_query5);
+	if (retval < 0)
+		goto exit;
+
+	ctrl_8_offset = query_5->ctrl0_is_present +
+			query_5->ctrl1_is_present +
+			query_5->ctrl2_is_present +
+			query_5->ctrl3_is_present +
+			query_5->ctrl4_is_present +
+			query_5->ctrl5_is_present +
+			query_5->ctrl6_is_present +
+			query_5->ctrl7_is_present;
+
+	ctrl_20_offset = ctrl_8_offset +
+			query_5->ctrl8_is_present +
+			query_5->ctrl9_is_present +
+			query_5->ctrl10_is_present +
+			query_5->ctrl11_is_present +
+			query_5->ctrl12_is_present +
+			query_5->ctrl13_is_present +
+			query_5->ctrl14_is_present +
+			query_5->ctrl15_is_present +
+			query_5->ctrl16_is_present +
+			query_5->ctrl17_is_present +
+			query_5->ctrl18_is_present +
+			query_5->ctrl19_is_present;
+
+	ctrl_23_offset = ctrl_20_offset +
+			query_5->ctrl20_is_present +
+			query_5->ctrl21_is_present +
+			query_5->ctrl22_is_present;
+
+	ctrl_28_offset = ctrl_23_offset +
+			query_5->ctrl23_is_present +
+			query_5->ctrl24_is_present +
+			query_5->ctrl25_is_present +
+			query_5->ctrl26_is_present +
+			query_5->ctrl27_is_present;
+
+	ctrl_31_offset = ctrl_28_offset +
+			query_5->ctrl28_is_present +
+			query_5->ctrl29_is_present +
+			query_5->ctrl30_is_present;
+
+	ctrl_58_offset = ctrl_31_offset +
+			query_5->ctrl31_is_present +
+			query_5->ctrl32_is_present +
+			query_5->ctrl33_is_present +
+			query_5->ctrl34_is_present +
+			query_5->ctrl35_is_present +
+			query_5->ctrl36_is_present +
+			query_5->ctrl37_is_present +
+			query_5->ctrl38_is_present +
+			query_5->ctrl39_is_present +
+			query_5->ctrl40_is_present +
+			query_5->ctrl41_is_present +
+			query_5->ctrl42_is_present +
+			query_5->ctrl43_is_present +
+			query_5->ctrl44_is_present +
+			query_5->ctrl45_is_present +
+			query_5->ctrl46_is_present +
+			query_5->ctrl47_is_present +
+			query_5->ctrl48_is_present +
+			query_5->ctrl49_is_present +
+			query_5->ctrl50_is_present +
+			query_5->ctrl51_is_present +
+			query_5->ctrl52_is_present +
+			query_5->ctrl53_is_present +
+			query_5->ctrl54_is_present +
+			query_5->ctrl55_is_present +
+			query_5->ctrl56_is_present +
+			query_5->ctrl57_is_present;
+
+	ctrl_23_size = 2;
+	for (subpacket = 2; subpacket <= 4; subpacket++) {
+		retval = synaptics_rmi4_f12_find_sub(rmi4_data,
+				fhandler, query_5->data, sizeof(query_5->data),
+				6, 23, subpacket);
+		if (retval == 1)
+			ctrl_23_size++;
+		else if (retval < 0)
+			goto exit;
+
+	}
+
+	retval = synaptics_rmi4_f12_find_sub(rmi4_data,
+			fhandler, query_5->data, sizeof(query_5->data),
+			6, 20, 0);
+	if (retval == 1)
+		rmi4_data->set_wakeup_gesture = 2;
+	else if (retval == 0)
+		rmi4_data->set_wakeup_gesture = 0;
+	else if (retval < 0)
+		goto exit;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.ctrl_base + ctrl_23_offset,
+			ctrl_23->data,
+			ctrl_23_size);
+	if (retval < 0)
+		goto exit;
+
+	/* Maximum number of fingers supported */
+	fhandler->num_of_data_points = min_t(unsigned char,
+			ctrl_23->max_reported_objects,
+			(unsigned char)F12_FINGERS_TO_SUPPORT);
+
+	num_of_fingers = fhandler->num_of_data_points;
+	rmi4_data->num_of_fingers = num_of_fingers;
+
+	rmi4_data->stylus_enable = ctrl_23->stylus_enable;
+	rmi4_data->eraser_enable = ctrl_23->eraser_enable;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + 7,
+			&size_of_query8,
+			sizeof(size_of_query8));
+	if (retval < 0)
+		goto exit;
+
+	if (size_of_query8 > sizeof(query_8->data))
+		size_of_query8 = sizeof(query_8->data);
+	memset(query_8->data, 0x00, sizeof(query_8->data));
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + 8,
+			query_8->data,
+			size_of_query8);
+	if (retval < 0)
+		goto exit;
+
+	/* Determine the presence of the Data0 register */
+	extra_data->data1_offset = query_8->data0_is_present;
+
+	if ((size_of_query8 >= 3) && (query_8->data15_is_present)) {
+		extra_data->data15_offset = query_8->data0_is_present +
+				query_8->data1_is_present +
+				query_8->data2_is_present +
+				query_8->data3_is_present +
+				query_8->data4_is_present +
+				query_8->data5_is_present +
+				query_8->data6_is_present +
+				query_8->data7_is_present +
+				query_8->data8_is_present +
+				query_8->data9_is_present +
+				query_8->data10_is_present +
+				query_8->data11_is_present +
+				query_8->data12_is_present +
+				query_8->data13_is_present +
+				query_8->data14_is_present;
+		extra_data->data15_size = (num_of_fingers + 7) / 8;
+	} else {
+		extra_data->data15_size = 0;
+	}
+
+#ifdef REPORT_2D_PRESSURE
+	if ((size_of_query8 >= 5) && (query_8->data29_is_present)) {
+		extra_data->data29_offset = query_8->data0_is_present +
+				query_8->data1_is_present +
+				query_8->data2_is_present +
+				query_8->data3_is_present +
+				query_8->data4_is_present +
+				query_8->data5_is_present +
+				query_8->data6_is_present +
+				query_8->data7_is_present +
+				query_8->data8_is_present +
+				query_8->data9_is_present +
+				query_8->data10_is_present +
+				query_8->data11_is_present +
+				query_8->data12_is_present +
+				query_8->data13_is_present +
+				query_8->data14_is_present +
+				query_8->data15_is_present +
+				query_8->data16_is_present +
+				query_8->data17_is_present +
+				query_8->data18_is_present +
+				query_8->data19_is_present +
+				query_8->data20_is_present +
+				query_8->data21_is_present +
+				query_8->data22_is_present +
+				query_8->data23_is_present +
+				query_8->data24_is_present +
+				query_8->data25_is_present +
+				query_8->data26_is_present +
+				query_8->data27_is_present +
+				query_8->data28_is_present;
+		extra_data->data29_size = 0;
+		for (subpacket = 0; subpacket <= num_of_fingers; subpacket++) {
+			retval = synaptics_rmi4_f12_find_sub(rmi4_data,
+					fhandler, query_8->data,
+					sizeof(query_8->data),
+					9, 29, subpacket);
+			if (retval == 1)
+				extra_data->data29_size += 2;
+			else if (retval < 0)
+				goto exit;
+		}
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.ctrl_base + ctrl_58_offset,
+				ctrl_58->data,
+				sizeof(ctrl_58->data));
+		if (retval < 0)
+			goto exit;
+		rmi4_data->force_min =
+				(int)(ctrl_58->min_force_lsb << 0) |
+				(int)(ctrl_58->min_force_msb << 8);
+		rmi4_data->force_max =
+				(int)(ctrl_58->max_force_lsb << 0) |
+				(int)(ctrl_58->max_force_msb << 8);
+		rmi4_data->report_pressure = true;
+	} else {
+		extra_data->data29_size = 0;
+		rmi4_data->report_pressure = false;
+	}
+#endif
+
+	rmi4_data->report_enable = RPT_DEFAULT;
+#ifdef REPORT_2D_Z
+	rmi4_data->report_enable |= RPT_Z;
+#endif
+#ifdef REPORT_2D_W
+	rmi4_data->report_enable |= (RPT_WX | RPT_WY);
+#endif
+
+	retval = synaptics_rmi4_f12_set_enables(rmi4_data,
+			fhandler->full_addr.ctrl_base + ctrl_28_offset);
+	if (retval < 0)
+		goto exit;
+
+	if (query_5->ctrl8_is_present) {
+		rmi4_data->wedge_sensor = false;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.ctrl_base + ctrl_8_offset,
+				ctrl_8->data,
+				sizeof(ctrl_8->data));
+		if (retval < 0)
+			goto exit;
+
+		/* Maximum x and y */
+		rmi4_data->sensor_max_x =
+				((unsigned int)ctrl_8->max_x_coord_lsb << 0) |
+				((unsigned int)ctrl_8->max_x_coord_msb << 8);
+		rmi4_data->sensor_max_y =
+				((unsigned int)ctrl_8->max_y_coord_lsb << 0) |
+				((unsigned int)ctrl_8->max_y_coord_msb << 8);
+
+		rmi4_data->max_touch_width = MAX_F12_TOUCH_WIDTH;
+	} else {
+		rmi4_data->wedge_sensor = true;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.ctrl_base + ctrl_31_offset,
+				ctrl_31->data,
+				sizeof(ctrl_31->data));
+		if (retval < 0)
+			goto exit;
+
+		/* Maximum x and y */
+		rmi4_data->sensor_max_x =
+				((unsigned int)ctrl_31->max_x_coord_lsb << 0) |
+				((unsigned int)ctrl_31->max_x_coord_msb << 8);
+		rmi4_data->sensor_max_y =
+				((unsigned int)ctrl_31->max_y_coord_lsb << 0) |
+				((unsigned int)ctrl_31->max_y_coord_msb << 8);
+
+		rmi4_data->max_touch_width = MAX_F12_TOUCH_WIDTH;
+	}
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Function %02x max x = %d max y = %d\n",
+			__func__, fhandler->fn_number,
+			rmi4_data->sensor_max_x,
+			rmi4_data->sensor_max_y);
+
+	if (bdata->swap_axes) {
+		temp = rmi4_data->sensor_max_x;
+		rmi4_data->sensor_max_x = rmi4_data->sensor_max_y;
+		rmi4_data->sensor_max_y = temp;
+	}
+
+	rmi4_data->f12_wakeup_gesture = query_5->ctrl27_is_present;
+	if (rmi4_data->f12_wakeup_gesture) {
+		extra_data->ctrl20_offset = ctrl_20_offset;
+		extra_data->data4_offset = query_8->data0_is_present +
+				query_8->data1_is_present +
+				query_8->data2_is_present +
+				query_8->data3_is_present;
+	}
+
+	synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+	/* Allocate memory for finger data storage space */
+	fhandler->data_size = num_of_fingers * size_of_2d_data;
+	fhandler->data = kmalloc(fhandler->data_size, GFP_KERNEL);
+	if (!fhandler->data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fhandler->data\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+exit:
+	kfree(query_5);
+	kfree(query_8);
+	kfree(ctrl_8);
+	kfree(ctrl_23);
+	kfree(ctrl_31);
+	kfree(ctrl_58);
+
+	return retval;
+}
+
+static int synaptics_rmi4_f1a_alloc_mem(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	struct synaptics_rmi4_f1a_handle *f1a;
+
+	f1a = kzalloc(sizeof(*f1a), GFP_KERNEL);
+	if (!f1a) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for function handle\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	fhandler->data = (void *)f1a;
+	fhandler->extra = NULL;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base,
+			f1a->button_query.data,
+			sizeof(f1a->button_query.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read query registers\n",
+				__func__);
+		return retval;
+	}
+
+	f1a->max_count = f1a->button_query.max_button_count + 1;
+
+	f1a->button_control.txrx_map = kzalloc(f1a->max_count * 2, GFP_KERNEL);
+	if (!f1a->button_control.txrx_map) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for tx rx mapping\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	f1a->button_bitmask_size = (f1a->max_count + 7) / 8;
+
+	f1a->button_data_buffer = kcalloc(f1a->button_bitmask_size,
+			sizeof(*(f1a->button_data_buffer)), GFP_KERNEL);
+	if (!f1a->button_data_buffer) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for data buffer\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	f1a->button_map = kcalloc(f1a->max_count,
+			sizeof(*(f1a->button_map)), GFP_KERNEL);
+	if (!f1a->button_map) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for button map\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_f1a_button_map(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char offset = 0;
+	struct synaptics_rmi4_f1a_query_4 query_4;
+	struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	rmi4_data->valid_button_count = f1a->valid_button_count;
+
+	offset = f1a->button_query.has_general_control +
+			f1a->button_query.has_interrupt_enable +
+			f1a->button_query.has_multibutton_select;
+
+	if (f1a->button_query.has_tx_rx_map) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.ctrl_base + offset,
+				f1a->button_control.txrx_map,
+				f1a->max_count * 2);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read tx rx mapping\n",
+					__func__);
+			return retval;
+		}
+
+		rmi4_data->button_txrx_mapping = f1a->button_control.txrx_map;
+	}
+
+	if (f1a->button_query.has_query4) {
+		offset = 2 + f1a->button_query.has_query2 +
+				f1a->button_query.has_query3;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_4.data,
+				sizeof(query_4.data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read button features 4\n",
+					__func__);
+			return retval;
+		}
+
+		if (query_4.has_ctrl24)
+			rmi4_data->external_afe_buttons = true;
+		else
+			rmi4_data->external_afe_buttons = false;
+	}
+
+	if (!bdata->cap_button_map) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: cap_button_map is NULL in board file\n",
+				__func__);
+		return -ENODEV;
+	} else if (!bdata->cap_button_map->map) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Button map is missing in board file\n",
+				__func__);
+		return -ENODEV;
+	} else {
+		if (bdata->cap_button_map->nbuttons != f1a->max_count) {
+			f1a->valid_button_count = min(f1a->max_count,
+					bdata->cap_button_map->nbuttons);
+		} else {
+			f1a->valid_button_count = f1a->max_count;
+		}
+
+		for (ii = 0; ii < f1a->valid_button_count; ii++)
+			f1a->button_map[ii] = bdata->cap_button_map->map[ii];
+
+		rmi4_data->valid_button_count = f1a->valid_button_count;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_f1a_kfree(struct synaptics_rmi4_fn *fhandler)
+{
+	struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+
+	if (f1a) {
+		kfree(f1a->button_control.txrx_map);
+		kfree(f1a->button_data_buffer);
+		kfree(f1a->button_map);
+		kfree(f1a);
+		fhandler->data = NULL;
+	}
+}
+
+static int synaptics_rmi4_f1a_init(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	int retval;
+
+	fhandler->fn_number = fd->fn_number;
+	fhandler->num_of_data_sources = fd->intr_src_count;
+
+	synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+	retval = synaptics_rmi4_f1a_alloc_mem(rmi4_data, fhandler);
+	if (retval < 0)
+		goto error_exit;
+
+	retval = synaptics_rmi4_f1a_button_map(rmi4_data, fhandler);
+	if (retval < 0)
+		goto error_exit;
+
+	rmi4_data->button_0d_enabled = 1;
+
+	return 0;
+
+error_exit:
+	synaptics_rmi4_f1a_kfree(fhandler);
+
+	return retval;
+}
+
+static void synaptics_rmi4_empty_fn_list(struct synaptics_rmi4_data *rmi4_data)
+{
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_fn *fhandler_temp;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry_safe(fhandler,
+				fhandler_temp,
+				&rmi->support_fn_list,
+				link) {
+			if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) {
+				synaptics_rmi4_f1a_kfree(fhandler);
+			} else {
+				kfree(fhandler->extra);
+				kfree(fhandler->data);
+			}
+			list_del(&fhandler->link);
+			kfree(fhandler);
+		}
+	}
+	INIT_LIST_HEAD(&rmi->support_fn_list);
+}
+
+static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
+		bool *was_in_bl_mode)
+{
+	int retval;
+	int timeout = CHECK_STATUS_TIMEOUT_MS;
+	struct synaptics_rmi4_f01_device_status status;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_data_base_addr,
+			status.data,
+			sizeof(status.data));
+	if (retval < 0)
+		return retval;
+
+	while (status.status_code == STATUS_CRC_IN_PROGRESS) {
+		if (timeout > 0)
+			msleep(20);
+		else
+			return -EINVAL;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_data_base_addr,
+				status.data,
+				sizeof(status.data));
+		if (retval < 0)
+			return retval;
+
+		timeout -= 20;
+	}
+
+	if (timeout != CHECK_STATUS_TIMEOUT_MS)
+		*was_in_bl_mode = true;
+
+	if (status.flash_prog == 1) {
+		rmi4_data->flash_prog_mode = true;
+		pr_notice("%s: In flash prog mode, status = 0x%02x\n",
+				__func__,
+				status.status_code);
+	} else {
+		rmi4_data->flash_prog_mode = false;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_set_configured(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char device_ctrl;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set configured\n",
+				__func__);
+		return retval;
+	}
+
+	rmi4_data->no_sleep_setting = device_ctrl & NO_SLEEP_ON;
+	device_ctrl |= CONFIGURED;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set configured\n",
+				__func__);
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_alloc_fh(struct synaptics_rmi4_fn **fhandler,
+		struct synaptics_rmi4_fn_desc *rmi_fd, int page_number)
+{
+	*fhandler = kzalloc(sizeof(**fhandler), GFP_KERNEL);
+	if (!(*fhandler))
+		return -ENOMEM;
+
+	(*fhandler)->full_addr.data_base =
+			(rmi_fd->data_base_addr |
+			(page_number << 8));
+	(*fhandler)->full_addr.ctrl_base =
+			(rmi_fd->ctrl_base_addr |
+			(page_number << 8));
+	(*fhandler)->full_addr.cmd_base =
+			(rmi_fd->cmd_base_addr |
+			(page_number << 8));
+	(*fhandler)->full_addr.query_base =
+			(rmi_fd->query_base_addr |
+			(page_number << 8));
+
+	return 0;
+}
+
+static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char page_number;
+	unsigned char intr_count;
+	unsigned char *f01_query;
+	unsigned short pdt_entry_addr;
+	bool f01found;
+	bool f35found;
+	bool was_in_bl_mode;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+rescan_pdt:
+	f01found = false;
+	f35found = false;
+	was_in_bl_mode = false;
+	intr_count = 0;
+	INIT_LIST_HEAD(&rmi->support_fn_list);
+
+	/* Scan the page description tables of the pages to service */
+	for (page_number = 0; page_number < PAGES_TO_SERVICE; page_number++) {
+		for (pdt_entry_addr = PDT_START; pdt_entry_addr > PDT_END;
+				pdt_entry_addr -= PDT_ENTRY_SIZE) {
+			pdt_entry_addr |= (page_number << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					pdt_entry_addr,
+					(unsigned char *)&rmi_fd,
+					sizeof(rmi_fd));
+			if (retval < 0)
+				return retval;
+
+			pdt_entry_addr &= ~(MASK_8BIT << 8);
+
+			fhandler = NULL;
+
+			if (rmi_fd.fn_number == 0) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Reached end of PDT\n",
+						__func__);
+				break;
+			}
+
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: F%02x found (page %d)\n",
+					__func__, rmi_fd.fn_number,
+					page_number);
+
+			switch (rmi_fd.fn_number) {
+			case SYNAPTICS_RMI4_F01:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				f01found = true;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				retval = synaptics_rmi4_f01_init(rmi4_data,
+						fhandler, &rmi_fd, intr_count);
+				if (retval < 0)
+					return retval;
+
+				retval = synaptics_rmi4_check_status(rmi4_data,
+						&was_in_bl_mode);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to check status\n",
+							__func__);
+					return retval;
+				}
+
+				if (was_in_bl_mode) {
+					kfree(fhandler);
+					fhandler = NULL;
+					goto rescan_pdt;
+				}
+
+				if (rmi4_data->flash_prog_mode)
+					goto flash_prog_mode;
+
+				break;
+			case SYNAPTICS_RMI4_F11:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				retval = synaptics_rmi4_f11_init(rmi4_data,
+						fhandler, &rmi_fd, intr_count);
+				if (retval < 0)
+					return retval;
+				break;
+			case SYNAPTICS_RMI4_F12:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				retval = synaptics_rmi4_f12_init(rmi4_data,
+						fhandler, &rmi_fd, intr_count);
+				if (retval < 0)
+					return retval;
+				break;
+			case SYNAPTICS_RMI4_F1A:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				retval = synaptics_rmi4_f1a_init(rmi4_data,
+						fhandler, &rmi_fd, intr_count);
+				if (retval < 0) {
+#ifdef IGNORE_FN_INIT_FAILURE
+					kfree(fhandler);
+					fhandler = NULL;
+#else
+					return retval;
+#endif
+				}
+				break;
+#ifdef USE_DATA_SERVER
+			case SYNAPTICS_RMI4_F21:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				fhandler->fn_number = rmi_fd.fn_number;
+				fhandler->num_of_data_sources =
+						rmi_fd.intr_src_count;
+
+				synaptics_rmi4_set_intr_mask(fhandler, &rmi_fd,
+						intr_count);
+				break;
+#endif
+			case SYNAPTICS_RMI4_F35:
+				f35found = true;
+				break;
+#ifdef F51_DISCRETE_FORCE
+			case SYNAPTICS_RMI4_F51:
+				rmi4_data->f51_query_base_addr =
+						rmi_fd.query_base_addr |
+						(page_number << 8);
+				break;
+#endif
+			}
+
+			/* Accumulate the interrupt count */
+			intr_count += rmi_fd.intr_src_count;
+
+			if (fhandler && rmi_fd.intr_src_count) {
+				list_add_tail(&fhandler->link,
+						&rmi->support_fn_list);
+			}
+		}
+	}
+
+	if (!f01found) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to find F01\n",
+				__func__);
+		if (!f35found) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to find F35\n",
+					__func__);
+			return -EINVAL;
+		} else {
+			pr_notice("%s: In microbootloader mode\n",
+					__func__);
+			return 0;
+		}
+	}
+
+flash_prog_mode:
+	rmi4_data->num_of_intr_regs = (intr_count + 7) / 8;
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Number of interrupt registers = %d\n",
+			__func__, rmi4_data->num_of_intr_regs);
+
+	f01_query = kmalloc(F01_STD_QUERY_LEN, GFP_KERNEL);
+	if (!f01_query) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for f01_query\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_query_base_addr,
+			f01_query,
+			F01_STD_QUERY_LEN);
+	if (retval < 0) {
+		kfree(f01_query);
+		return retval;
+	}
+
+	/* RMI Version 4.0 currently supported */
+	rmi->version_major = 4;
+	rmi->version_minor = 0;
+
+	rmi->manufacturer_id = f01_query[0];
+	rmi->product_props = f01_query[1];
+	rmi->product_info[0] = f01_query[2];
+	rmi->product_info[1] = f01_query[3];
+	retval = secure_memcpy(rmi->product_id_string,
+			sizeof(rmi->product_id_string),
+			&f01_query[11],
+			F01_STD_QUERY_LEN - 11,
+			PRODUCT_ID_SIZE);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy product ID string\n",
+				__func__);
+	}
+
+	kfree(f01_query);
+
+	if (rmi->manufacturer_id != 1) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Non-Synaptics device found, manufacturer ID = %d\n",
+				__func__, rmi->manufacturer_id);
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_query_base_addr + F01_BUID_ID_OFFSET,
+			rmi->build_id,
+			sizeof(rmi->build_id));
+	if (retval < 0)
+		return retval;
+
+	rmi4_data->firmware_id = (unsigned int)rmi->build_id[0] +
+			(unsigned int)rmi->build_id[1] * 0x100 +
+			(unsigned int)rmi->build_id[2] * 0x10000;
+
+	memset(rmi4_data->intr_mask, 0x00, sizeof(rmi4_data->intr_mask));
+
+	/*
+	 * Map out the interrupt bit masks for the interrupt sources
+	 * from the registered function handlers.
+	 */
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->num_of_data_sources) {
+				rmi4_data->intr_mask[fhandler->intr_reg_num] |=
+						fhandler->intr_mask;
+			}
+		}
+	}
+
+	if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture)
+		rmi4_data->enable_wakeup_gesture = WAKEUP_GESTURE;
+	else
+		rmi4_data->enable_wakeup_gesture = false;
+
+	synaptics_rmi4_set_configured(rmi4_data);
+
+	return 0;
+}
+
+static int synaptics_rmi4_gpio_setup(int gpio, bool config, int dir, int state)
+{
+	int retval = 0;
+	unsigned char buf[16];
+
+	if (config) {
+		snprintf(buf, PAGE_SIZE, "dsx_gpio_%u\n", gpio);
+
+		retval = gpio_request(gpio, buf);
+		if (retval) {
+			pr_err("%s: Failed to get gpio %d (code: %d)",
+					__func__, gpio, retval);
+			return retval;
+		}
+
+		if (dir == 0)
+			retval = gpio_direction_input(gpio);
+		else
+			retval = gpio_direction_output(gpio, state);
+		if (retval) {
+			pr_err("%s: Failed to set gpio %d direction",
+					__func__, gpio);
+			return retval;
+		}
+	} else {
+		gpio_free(gpio);
+	}
+
+	return retval;
+}
+
+static void synaptics_rmi4_set_params(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char ii;
+	struct synaptics_rmi4_f1a_handle *f1a;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_POSITION_X, 0,
+			rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_POSITION_Y, 0,
+			rmi4_data->sensor_max_y, 0, 0);
+#ifdef REPORT_2D_W
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_TOUCH_MAJOR, 0,
+			rmi4_data->max_touch_width, 0, 0);
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_TOUCH_MINOR, 0,
+			rmi4_data->max_touch_width, 0, 0);
+#endif
+
+	rmi4_data->input_settings.sensor_max_x = rmi4_data->sensor_max_x;
+	rmi4_data->input_settings.sensor_max_y = rmi4_data->sensor_max_y;
+	rmi4_data->input_settings.max_touch_width = rmi4_data->max_touch_width;
+
+#ifdef REPORT_2D_PRESSURE
+	if (rmi4_data->report_pressure) {
+		input_set_abs_params(rmi4_data->input_dev,
+				ABS_MT_PRESSURE, rmi4_data->force_min,
+				rmi4_data->force_max, 0, 0);
+
+		rmi4_data->input_settings.force_min = rmi4_data->force_min;
+		rmi4_data->input_settings.force_max = rmi4_data->force_max;
+	}
+#elif defined(F51_DISCRETE_FORCE)
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_PRESSURE, 0,
+			FORCE_LEVEL_MAX, 0, 0);
+#endif
+
+#ifdef TYPE_B_PROTOCOL
+#ifdef KERNEL_ABOVE_3_6
+	input_mt_init_slots(rmi4_data->input_dev,
+			rmi4_data->num_of_fingers, INPUT_MT_DIRECT);
+#else
+	input_mt_init_slots(rmi4_data->input_dev,
+			rmi4_data->num_of_fingers);
+#endif
+#endif
+
+	rmi4_data->input_settings.num_of_fingers = rmi4_data->num_of_fingers;
+
+	f1a = NULL;
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
+				f1a = fhandler->data;
+		}
+	}
+
+	if (f1a) {
+		for (ii = 0; ii < f1a->valid_button_count; ii++) {
+			set_bit(f1a->button_map[ii],
+					rmi4_data->input_dev->keybit);
+			input_set_capability(rmi4_data->input_dev,
+					EV_KEY, f1a->button_map[ii]);
+		}
+
+		rmi4_data->input_settings.valid_button_count =
+				f1a->valid_button_count;
+	}
+
+	if (vir_button_map->nbuttons) {
+		for (ii = 0; ii < vir_button_map->nbuttons; ii++) {
+			set_bit(vir_button_map->map[ii * 5],
+					rmi4_data->input_dev->keybit);
+			input_set_capability(rmi4_data->input_dev,
+					EV_KEY, vir_button_map->map[ii * 5]);
+		}
+	}
+
+	if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture) {
+		set_bit(KEY_WAKEUP, rmi4_data->input_dev->keybit);
+		input_set_capability(rmi4_data->input_dev, EV_KEY, KEY_WAKEUP);
+	}
+}
+
+static int synaptics_rmi4_set_input_dev(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	const struct synaptics_dsx_board_data *bdata =
+				rmi4_data->hw_if->board_data;
+
+	rmi4_data->input_dev = input_allocate_device();
+	if (rmi4_data->input_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate input device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto err_input_device;
+	}
+
+	retval = synaptics_rmi4_query_device(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to query device\n",
+				__func__);
+		goto err_query_device;
+	}
+
+	rmi4_data->input_dev->name = PLATFORM_DRIVER_NAME;
+	rmi4_data->input_dev->phys = INPUT_PHYS_NAME;
+	rmi4_data->input_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	rmi4_data->input_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	rmi4_data->input_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(rmi4_data->input_dev, rmi4_data);
+
+	set_bit(EV_SYN, rmi4_data->input_dev->evbit);
+	set_bit(EV_KEY, rmi4_data->input_dev->evbit);
+	set_bit(EV_ABS, rmi4_data->input_dev->evbit);
+	set_bit(BTN_TOUCH, rmi4_data->input_dev->keybit);
+	set_bit(BTN_TOOL_FINGER, rmi4_data->input_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, rmi4_data->input_dev->propbit);
+#endif
+
+	if (bdata->max_y_for_2d >= 0)
+		rmi4_data->sensor_max_y = bdata->max_y_for_2d;
+
+	synaptics_rmi4_set_params(rmi4_data);
+
+	retval = input_register_device(rmi4_data->input_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register input device\n",
+				__func__);
+		goto err_register_input;
+	}
+
+	rmi4_data->input_settings.stylus_enable = rmi4_data->stylus_enable;
+	rmi4_data->input_settings.eraser_enable = rmi4_data->eraser_enable;
+
+	if (!rmi4_data->stylus_enable)
+		return 0;
+
+	rmi4_data->stylus_dev = input_allocate_device();
+	if (rmi4_data->stylus_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate stylus device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto err_stylus_device;
+	}
+
+	rmi4_data->stylus_dev->name = STYLUS_DRIVER_NAME;
+	rmi4_data->stylus_dev->phys = STYLUS_PHYS_NAME;
+	rmi4_data->stylus_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	rmi4_data->stylus_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	rmi4_data->stylus_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(rmi4_data->stylus_dev, rmi4_data);
+
+	set_bit(EV_KEY, rmi4_data->stylus_dev->evbit);
+	set_bit(EV_ABS, rmi4_data->stylus_dev->evbit);
+	set_bit(BTN_TOUCH, rmi4_data->stylus_dev->keybit);
+	set_bit(BTN_TOOL_PEN, rmi4_data->stylus_dev->keybit);
+	if (rmi4_data->eraser_enable)
+		set_bit(BTN_TOOL_RUBBER, rmi4_data->stylus_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, rmi4_data->stylus_dev->propbit);
+#endif
+
+	input_set_abs_params(rmi4_data->stylus_dev, ABS_X, 0,
+			rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(rmi4_data->stylus_dev, ABS_Y, 0,
+			rmi4_data->sensor_max_y, 0, 0);
+
+	retval = input_register_device(rmi4_data->stylus_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register stylus device\n",
+				__func__);
+		goto err_register_stylus;
+	}
+
+	return 0;
+
+err_register_stylus:
+	rmi4_data->stylus_dev = NULL;
+
+err_stylus_device:
+	input_unregister_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+
+err_register_input:
+err_query_device:
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+	input_free_device(rmi4_data->input_dev);
+
+err_input_device:
+	return retval;
+}
+
+static int synaptics_rmi4_set_gpio(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	retval = synaptics_rmi4_gpio_setup(
+			bdata->irq_gpio,
+			true, 0, 0);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to configure attention GPIO\n",
+				__func__);
+		goto err_gpio_irq;
+	}
+
+	if (bdata->power_gpio >= 0) {
+		retval = synaptics_rmi4_gpio_setup(
+				bdata->power_gpio,
+				true, 1, !bdata->power_on_state);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to configure power GPIO\n",
+					__func__);
+			goto err_gpio_power;
+		}
+	}
+
+	if (bdata->reset_gpio >= 0) {
+		retval = synaptics_rmi4_gpio_setup(
+				bdata->reset_gpio,
+				true, 1, !bdata->reset_on_state);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to configure reset GPIO\n",
+					__func__);
+			goto err_gpio_reset;
+		}
+	}
+
+	if (bdata->power_gpio >= 0) {
+		gpio_set_value(bdata->power_gpio, bdata->power_on_state);
+		msleep(bdata->power_delay_ms);
+	}
+
+	if (bdata->reset_gpio >= 0) {
+		gpio_set_value(bdata->reset_gpio, bdata->reset_on_state);
+		msleep(bdata->reset_active_ms);
+		gpio_set_value(bdata->reset_gpio, !bdata->reset_on_state);
+		msleep(bdata->reset_delay_ms);
+	}
+
+	return 0;
+
+err_gpio_reset:
+	if (bdata->power_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+err_gpio_power:
+	synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+err_gpio_irq:
+	return retval;
+}
+
+static int synaptics_dsx_pinctrl_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	/* Get pinctrl if target uses pinctrl */
+	rmi4_data->ts_pinctrl = devm_pinctrl_get((rmi4_data->pdev->dev.parent));
+	if (IS_ERR_OR_NULL(rmi4_data->ts_pinctrl)) {
+		retval = PTR_ERR(rmi4_data->ts_pinctrl);
+		dev_err(rmi4_data->pdev->dev.parent,
+			"Target does not use pinctrl %d\n", retval);
+		goto err_pinctrl_get;
+	}
+
+	rmi4_data->pinctrl_state_active
+		= pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_active");
+	if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_active)) {
+		retval = PTR_ERR(rmi4_data->pinctrl_state_active);
+		dev_err(rmi4_data->pdev->dev.parent,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_ACTIVE, retval);
+		goto err_pinctrl_lookup;
+	}
+
+	rmi4_data->pinctrl_state_suspend
+		= pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_suspend");
+	if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_suspend)) {
+		retval = PTR_ERR(rmi4_data->pinctrl_state_suspend);
+		dev_err(rmi4_data->pdev->dev.parent,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_SUSPEND, retval);
+		goto err_pinctrl_lookup;
+	}
+
+	rmi4_data->pinctrl_state_release
+		= pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_release");
+	if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+		retval = PTR_ERR(rmi4_data->pinctrl_state_release);
+		dev_err(rmi4_data->pdev->dev.parent,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_RELEASE, retval);
+	}
+
+	return 0;
+
+err_pinctrl_lookup:
+	devm_pinctrl_put(rmi4_data->ts_pinctrl);
+err_pinctrl_get:
+	rmi4_data->ts_pinctrl = NULL;
+	return retval;
+}
+
+
+static int synaptics_rmi4_get_reg(struct synaptics_rmi4_data *rmi4_data,
+		bool get)
+{
+	int retval;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (!get) {
+		retval = 0;
+		goto regulator_put;
+	}
+
+	if ((bdata->pwr_reg_name != NULL) && (*bdata->pwr_reg_name != 0)) {
+		rmi4_data->pwr_reg = regulator_get(rmi4_data->pdev->dev.parent,
+				bdata->pwr_reg_name);
+		if (IS_ERR(rmi4_data->pwr_reg)) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to get power regulator\n",
+					__func__);
+			retval = PTR_ERR(rmi4_data->pwr_reg);
+			goto regulator_put;
+		}
+	}
+
+	retval = regulator_set_load(rmi4_data->pwr_reg,
+		20000);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to set regulator current avdd\n",
+				__func__);
+		goto regulator_put;
+	}
+
+	retval = regulator_set_voltage(rmi4_data->pwr_reg,
+			3000000,
+			3000000);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set regulator voltage avdd\n",
+				__func__);
+		goto regulator_put;
+	}
+
+	if ((bdata->bus_reg_name != NULL) && (*bdata->bus_reg_name != 0)) {
+		rmi4_data->bus_reg = regulator_get(rmi4_data->pdev->dev.parent,
+				bdata->bus_reg_name);
+		if (IS_ERR(rmi4_data->bus_reg)) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to get bus pullup regulator\n",
+					__func__);
+			retval = PTR_ERR(rmi4_data->bus_reg);
+			goto regulator_put;
+		}
+	}
+
+	retval = regulator_set_load(rmi4_data->bus_reg,
+		62000);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set regulator current vdd\n",
+				__func__);
+		goto regulator_put;
+	}
+
+	retval = regulator_set_voltage(rmi4_data->bus_reg,
+			1800000,
+			1800000);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set regulator voltage avdd\n",
+				__func__);
+		goto regulator_put;
+	}
+
+	return 0;
+
+regulator_put:
+	if (rmi4_data->pwr_reg) {
+		regulator_put(rmi4_data->pwr_reg);
+		rmi4_data->pwr_reg = NULL;
+	}
+
+	if (rmi4_data->bus_reg) {
+		regulator_put(rmi4_data->bus_reg);
+		rmi4_data->bus_reg = NULL;
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_enable_reg(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (!enable) {
+		retval = 0;
+		goto disable_pwr_reg;
+	}
+
+	if (rmi4_data->bus_reg && rmi4_data->vdd_status == 0) {
+		retval = regulator_enable(rmi4_data->bus_reg);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to enable bus pullup regulator\n",
+					__func__);
+			goto exit;
+		}
+		rmi4_data->vdd_status = 1;
+	}
+
+	if (rmi4_data->pwr_reg && rmi4_data->avdd_status == 0) {
+		retval = regulator_enable(rmi4_data->pwr_reg);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to enable power regulator\n",
+					__func__);
+			goto disable_bus_reg;
+		}
+		rmi4_data->avdd_status = 1;
+		msleep(bdata->power_delay_ms);
+	}
+
+	return 0;
+
+disable_pwr_reg:
+	if (rmi4_data->pwr_reg && rmi4_data->avdd_status == 1) {
+		regulator_disable(rmi4_data->pwr_reg);
+		rmi4_data->avdd_status = 0;
+	}
+
+disable_bus_reg:
+	if (rmi4_data->bus_reg && rmi4_data->vdd_status == 1) {
+		regulator_disable(rmi4_data->bus_reg);
+		rmi4_data->vdd_status = 0;
+	}
+
+exit:
+	return retval;
+}
+
+static int synaptics_rmi4_free_fingers(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char ii;
+
+	mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+#ifdef TYPE_B_PROTOCOL
+	for (ii = 0; ii < rmi4_data->num_of_fingers; ii++) {
+		input_mt_slot(rmi4_data->input_dev, ii);
+		input_mt_report_slot_state(rmi4_data->input_dev,
+				MT_TOOL_FINGER, 0);
+	}
+#endif
+	input_report_key(rmi4_data->input_dev,
+			BTN_TOUCH, 0);
+	input_report_key(rmi4_data->input_dev,
+			BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+	input_mt_sync(rmi4_data->input_dev);
+#endif
+	input_sync(rmi4_data->input_dev);
+
+	if (rmi4_data->stylus_enable) {
+		input_report_key(rmi4_data->stylus_dev,
+				BTN_TOUCH, 0);
+		input_report_key(rmi4_data->stylus_dev,
+				BTN_TOOL_PEN, 0);
+		if (rmi4_data->eraser_enable) {
+			input_report_key(rmi4_data->stylus_dev,
+					BTN_TOOL_RUBBER, 0);
+		}
+		input_sync(rmi4_data->stylus_dev);
+	}
+
+	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+	rmi4_data->fingers_on_2d = false;
+
+	return 0;
+}
+
+static int synaptics_rmi4_sw_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char command = 0x01;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_cmd_base_addr,
+			&command,
+			sizeof(command));
+	if (retval < 0)
+		return retval;
+
+	msleep(rmi4_data->hw_if->board_data->reset_delay_ms);
+
+	if (rmi4_data->hw_if->ui_hw_init) {
+		retval = rmi4_data->hw_if->ui_hw_init(rmi4_data);
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_do_rebuild(struct synaptics_rmi4_data *rmi4_data)
+{
+	struct synaptics_rmi4_input_settings *settings;
+
+	settings = &(rmi4_data->input_settings);
+
+	if (settings->num_of_fingers != rmi4_data->num_of_fingers)
+		return 1;
+
+	if (settings->valid_button_count != rmi4_data->valid_button_count)
+		return 1;
+
+	if (settings->max_touch_width != rmi4_data->max_touch_width)
+		return 1;
+
+	if (settings->sensor_max_x != rmi4_data->sensor_max_x)
+		return 1;
+
+	if (settings->sensor_max_y != rmi4_data->sensor_max_y)
+		return 1;
+
+	if (settings->force_min != rmi4_data->force_min)
+		return 1;
+
+	if (settings->force_max != rmi4_data->force_max)
+		return 1;
+
+	if (settings->stylus_enable != rmi4_data->stylus_enable)
+		return 1;
+
+	if (settings->eraser_enable != rmi4_data->eraser_enable)
+		return 1;
+
+	return 0;
+}
+
+static void synaptics_rmi4_rebuild_work(struct work_struct *work)
+{
+	int retval;
+	unsigned char attr_count;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct delayed_work *delayed_work =
+			container_of(work, struct delayed_work, work);
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(delayed_work, struct synaptics_rmi4_data,
+			rb_work);
+
+	mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+	mutex_lock(&exp_data.mutex);
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->remove != NULL)
+				exp_fhandler->exp_fn->remove(rmi4_data);
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	synaptics_rmi4_free_fingers(rmi4_data);
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+	input_unregister_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+	if (rmi4_data->stylus_enable) {
+		input_unregister_device(rmi4_data->stylus_dev);
+		rmi4_data->stylus_dev = NULL;
+	}
+
+	retval = synaptics_rmi4_set_input_dev(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set up input device\n",
+				__func__);
+		goto exit;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			goto exit;
+		}
+	}
+
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->init != NULL)
+				exp_fhandler->exp_fn->init(rmi4_data);
+	}
+
+exit:
+	synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+	mutex_unlock(&exp_data.mutex);
+
+	mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+}
+
+static int synaptics_rmi4_reinit_device(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+	synaptics_rmi4_free_fingers(rmi4_data);
+
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->fn_number == SYNAPTICS_RMI4_F12) {
+				synaptics_rmi4_f12_set_enables(rmi4_data, 0);
+				break;
+			}
+		}
+	}
+
+	retval = synaptics_rmi4_int_enable(rmi4_data, true);
+	if (retval < 0)
+		goto exit;
+
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->reinit != NULL)
+				exp_fhandler->exp_fn->reinit(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	synaptics_rmi4_set_configured(rmi4_data);
+
+	retval = 0;
+
+exit:
+	mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+	return retval;
+}
+
+static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data,
+		bool rebuild)
+{
+	int retval;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+
+	mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+	retval = synaptics_rmi4_sw_reset(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+		goto exit;
+	}
+
+	synaptics_rmi4_free_fingers(rmi4_data);
+
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+
+	retval = synaptics_rmi4_query_device(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to query device\n",
+				__func__);
+		goto exit;
+	}
+
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->reset != NULL)
+				exp_fhandler->exp_fn->reset(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	retval = 0;
+
+exit:
+	synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+	mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+
+	if (rebuild && synaptics_rmi4_do_rebuild(rmi4_data)) {
+		queue_delayed_work(rmi4_data->rb_workqueue,
+				&rmi4_data->rb_work,
+				msecs_to_jiffies(REBUILD_WORK_DELAY_MS));
+	}
+
+	return retval;
+}
+
+#ifdef FB_READY_RESET
+static void synaptics_rmi4_reset_work(struct work_struct *work)
+{
+	int retval = 0;
+	unsigned int timeout;
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(work, struct synaptics_rmi4_data,
+			reset_work);
+
+	timeout = FB_READY_TIMEOUT_S * 1000 / FB_READY_WAIT_MS + 1;
+
+	while (!rmi4_data->fb_ready) {
+		msleep(FB_READY_WAIT_MS);
+		timeout--;
+		if (timeout == 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Timed out waiting for FB ready\n",
+					__func__);
+			goto err;
+		}
+	}
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	retval = synaptics_rmi4_reset_device(rmi4_data, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+	}
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+err:
+
+	dev_err(rmi4_data->pdev->dev.parent,
+		"%s: Timed out waiting for FB ready\n",
+		__func__);
+
+}
+#endif
+
+static int synaptics_rmi4_sleep_enable(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval;
+	unsigned char device_ctrl;
+	unsigned char no_sleep_setting = rmi4_data->no_sleep_setting;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read device control\n",
+				__func__);
+		return retval;
+	}
+
+	device_ctrl = device_ctrl & ~MASK_3BIT;
+	if (enable)
+		device_ctrl = device_ctrl | SENSOR_SLEEP;
+	else
+		device_ctrl = device_ctrl | no_sleep_setting | NORMAL_OPERATION;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write device control\n",
+				__func__);
+		return retval;
+	}
+
+	rmi4_data->sensor_sleep = enable;
+
+	return retval;
+}
+
+static void synaptics_rmi4_exp_fn_work(struct work_struct *work)
+{
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler_temp;
+	struct synaptics_rmi4_data *rmi4_data = exp_data.rmi4_data;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+	mutex_lock(&rmi4_data->rmi4_reset_mutex);
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry_safe(exp_fhandler,
+				exp_fhandler_temp,
+				&exp_data.list,
+				link) {
+			if ((exp_fhandler->exp_fn->init != NULL) &&
+					exp_fhandler->insert) {
+				exp_fhandler->exp_fn->init(rmi4_data);
+				exp_fhandler->insert = false;
+			} else if ((exp_fhandler->exp_fn->remove != NULL) &&
+					exp_fhandler->remove) {
+				exp_fhandler->exp_fn->remove(rmi4_data);
+				list_del(&exp_fhandler->link);
+				kfree(exp_fhandler);
+			}
+		}
+	}
+	mutex_unlock(&exp_data.mutex);
+	mutex_unlock(&rmi4_data->rmi4_reset_mutex);
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+}
+
+void synaptics_rmi4_new_function(struct synaptics_rmi4_exp_fn *exp_fn,
+		bool insert)
+{
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+
+	if (!exp_data.initialized) {
+		mutex_init(&exp_data.mutex);
+		INIT_LIST_HEAD(&exp_data.list);
+		exp_data.initialized = true;
+	}
+
+	mutex_lock(&exp_data.mutex);
+	if (insert) {
+		exp_fhandler = kzalloc(sizeof(*exp_fhandler), GFP_KERNEL);
+		if (!exp_fhandler) {
+			pr_err("%s: Failed to alloc mem for expansion function\n",
+					__func__);
+			goto exit;
+		}
+		exp_fhandler->exp_fn = exp_fn;
+		exp_fhandler->insert = true;
+		exp_fhandler->remove = false;
+		list_add_tail(&exp_fhandler->link, &exp_data.list);
+	} else if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link) {
+			if (exp_fhandler->exp_fn->fn_type == exp_fn->fn_type) {
+				exp_fhandler->insert = false;
+				exp_fhandler->remove = true;
+				goto exit;
+			}
+		}
+	}
+
+exit:
+	mutex_unlock(&exp_data.mutex);
+
+	if (exp_data.queue_work) {
+		queue_delayed_work(exp_data.workqueue,
+				&exp_data.work,
+				msecs_to_jiffies(EXP_FN_WORK_DELAY_MS));
+	}
+}
+EXPORT_SYMBOL(synaptics_rmi4_new_function);
+
+static int synaptics_rmi4_probe(struct platform_device *pdev)
+{
+	int retval;
+	unsigned char attr_count;
+	struct synaptics_rmi4_data *rmi4_data;
+	const struct synaptics_dsx_hw_interface *hw_if;
+	const struct synaptics_dsx_board_data *bdata;
+
+	hw_if = pdev->dev.platform_data;
+	if (!hw_if) {
+		dev_err(&pdev->dev,
+				"%s: No hardware interface found\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	bdata = hw_if->board_data;
+	if (!bdata) {
+		dev_err(&pdev->dev,
+				"%s: No board data found\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	rmi4_data = kzalloc(sizeof(*rmi4_data), GFP_KERNEL);
+	if (!rmi4_data) {
+		dev_err(&pdev->dev,
+				"%s: Failed to alloc mem for rmi4_data\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	rmi4_data->pdev = pdev;
+	rmi4_data->current_page = MASK_8BIT;
+	rmi4_data->hw_if = hw_if;
+	rmi4_data->suspend = false;
+	rmi4_data->irq_enabled = false;
+	rmi4_data->fingers_on_2d = false;
+
+	rmi4_data->reset_device = synaptics_rmi4_reset_device;
+	rmi4_data->irq_enable = synaptics_rmi4_irq_enable;
+	rmi4_data->sleep_enable = synaptics_rmi4_sleep_enable;
+	rmi4_data->report_touch = synaptics_rmi4_report_touch;
+
+	mutex_init(&(rmi4_data->rmi4_reset_mutex));
+	mutex_init(&(rmi4_data->rmi4_report_mutex));
+	mutex_init(&(rmi4_data->rmi4_io_ctrl_mutex));
+	mutex_init(&(rmi4_data->rmi4_exp_init_mutex));
+	mutex_init(&(rmi4_data->rmi4_irq_enable_mutex));
+
+	platform_set_drvdata(pdev, rmi4_data);
+
+	vir_button_map = bdata->vir_button_map;
+
+	retval = synaptics_rmi4_get_reg(rmi4_data, true);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to get regulators\n",
+				__func__);
+		goto err_get_reg;
+	}
+
+	retval = synaptics_rmi4_enable_reg(rmi4_data, true);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to enable regulators\n",
+				__func__);
+		goto err_enable_reg;
+	}
+
+	retval = synaptics_rmi4_set_gpio(rmi4_data);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to set up GPIO's\n",
+				__func__);
+		goto err_set_gpio;
+	}
+
+	retval = synaptics_dsx_pinctrl_init(rmi4_data);
+		if (!retval && rmi4_data->ts_pinctrl) {
+			/*
+			* Pinctrl handle is optional. If pinctrl handle is found
+			* let pins to be configured in active state. If not
+			* found continue further without error.
+			*/
+			retval = pinctrl_select_state(rmi4_data->ts_pinctrl,
+					rmi4_data->pinctrl_state_active);
+			if (retval < 0) {
+				dev_err(&pdev->dev,
+					"%s: Failed to select %s pinstate %d\n",
+					__func__, PINCTRL_STATE_ACTIVE, retval);
+			}
+		}
+
+	if (hw_if->ui_hw_init) {
+		retval = hw_if->ui_hw_init(rmi4_data);
+		if (retval < 0) {
+			dev_err(&pdev->dev,
+					"%s: Failed to initialize hardware interface\n",
+					__func__);
+			goto err_ui_hw_init;
+		}
+	}
+
+	retval = synaptics_rmi4_set_input_dev(rmi4_data);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to set up input device\n",
+				__func__);
+		goto err_set_input_dev;
+	}
+
+#ifdef CONFIG_FB
+	rmi4_data->fb_notifier.notifier_call = synaptics_rmi4_dsi_panel_notifier_cb;
+	retval = msm_drm_register_client(&rmi4_data->fb_notifier);
+	if (retval < 0) {
+
+
+		dev_err(&pdev->dev,
+				"%s: Failed to register fb notifier client\n",
+				__func__);
+	}
+#endif
+
+#ifdef USE_EARLYSUSPEND
+	rmi4_data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+	rmi4_data->early_suspend.suspend = synaptics_rmi4_early_suspend;
+	rmi4_data->early_suspend.resume = synaptics_rmi4_late_resume;
+	register_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+	if (!exp_data.initialized) {
+		mutex_init(&exp_data.mutex);
+		INIT_LIST_HEAD(&exp_data.list);
+		exp_data.initialized = true;
+	}
+
+	rmi4_data->irq = gpio_to_irq(bdata->irq_gpio);
+
+	retval = synaptics_rmi4_irq_enable(rmi4_data, true, false);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to enable attention interrupt\n",
+				__func__);
+		goto err_enable_irq;
+	}
+
+	if (vir_button_map->nbuttons) {
+		rmi4_data->board_prop_dir = kobject_create_and_add(
+				"board_properties", NULL);
+		if (!rmi4_data->board_prop_dir) {
+			dev_err(&pdev->dev,
+					"%s: Failed to create board_properties directory\n",
+					__func__);
+			goto err_virtual_buttons;
+		} else {
+			retval = sysfs_create_file(rmi4_data->board_prop_dir,
+					&virtual_key_map_attr.attr);
+			if (retval < 0) {
+				dev_err(&pdev->dev,
+						"%s: Failed to create virtual key map file\n",
+						__func__);
+				goto err_virtual_buttons;
+			}
+		}
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(&pdev->dev,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			goto err_sysfs;
+		}
+	}
+
+#ifdef USE_DATA_SERVER
+	memset(&interrupt_signal, 0, sizeof(interrupt_signal));
+	interrupt_signal.si_signo = SIGIO;
+	interrupt_signal.si_code = SI_USER;
+#endif
+
+	rmi4_data->rb_workqueue =
+			create_singlethread_workqueue("dsx_rebuild_workqueue");
+	INIT_DELAYED_WORK(&rmi4_data->rb_work, synaptics_rmi4_rebuild_work);
+
+	exp_data.workqueue = create_singlethread_workqueue("dsx_exp_workqueue");
+	INIT_DELAYED_WORK(&exp_data.work, synaptics_rmi4_exp_fn_work);
+	exp_data.rmi4_data = rmi4_data;
+	exp_data.queue_work = true;
+	queue_delayed_work(exp_data.workqueue,
+			&exp_data.work,
+			0);
+
+#ifdef FB_READY_RESET
+	rmi4_data->reset_workqueue =
+			create_singlethread_workqueue("dsx_reset_workqueue");
+	INIT_WORK(&rmi4_data->reset_work, synaptics_rmi4_reset_work);
+	queue_work(rmi4_data->reset_workqueue, &rmi4_data->reset_work);
+#endif
+
+	return retval;
+
+err_sysfs:
+	for (attr_count--; attr_count >= 0; attr_count--) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+err_virtual_buttons:
+	if (rmi4_data->board_prop_dir) {
+		sysfs_remove_file(rmi4_data->board_prop_dir,
+				&virtual_key_map_attr.attr);
+		kobject_put(rmi4_data->board_prop_dir);
+	}
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+err_enable_irq:
+#ifdef CONFIG_FB
+	msm_drm_unregister_client(&rmi4_data->fb_notifier);
+#endif
+
+#ifdef USE_EARLYSUSPEND
+	unregister_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+	input_unregister_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+	if (rmi4_data->stylus_enable) {
+		input_unregister_device(rmi4_data->stylus_dev);
+		rmi4_data->stylus_dev = NULL;
+	}
+
+err_set_input_dev:
+	synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+	if (bdata->reset_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->reset_gpio, false, 0, 0);
+
+	if (bdata->power_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+err_ui_hw_init:
+err_set_gpio:
+	synaptics_rmi4_enable_reg(rmi4_data, false);
+
+	if (rmi4_data->ts_pinctrl) {
+		if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+			devm_pinctrl_put(rmi4_data->ts_pinctrl);
+			rmi4_data->ts_pinctrl = NULL;
+		} else {
+			retval = pinctrl_select_state(
+				rmi4_data->ts_pinctrl,
+				rmi4_data->pinctrl_state_release);
+			if (retval)
+				dev_err(&pdev->dev,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+		}
+	}
+
+err_enable_reg:
+	synaptics_rmi4_get_reg(rmi4_data, false);
+
+err_get_reg:
+	kfree(rmi4_data);
+
+	return retval;
+}
+
+static int synaptics_rmi4_remove(struct platform_device *pdev)
+{
+	unsigned char attr_count;
+	struct synaptics_rmi4_data *rmi4_data = platform_get_drvdata(pdev);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+#ifdef FB_READY_RESET
+	cancel_work_sync(&rmi4_data->reset_work);
+	flush_workqueue(rmi4_data->reset_workqueue);
+	destroy_workqueue(rmi4_data->reset_workqueue);
+#endif
+
+	cancel_delayed_work_sync(&exp_data.work);
+	flush_workqueue(exp_data.workqueue);
+	destroy_workqueue(exp_data.workqueue);
+
+	cancel_delayed_work_sync(&rmi4_data->rb_work);
+	flush_workqueue(rmi4_data->rb_workqueue);
+	destroy_workqueue(rmi4_data->rb_workqueue);
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	if (rmi4_data->board_prop_dir) {
+		sysfs_remove_file(rmi4_data->board_prop_dir,
+				&virtual_key_map_attr.attr);
+		kobject_put(rmi4_data->board_prop_dir);
+	}
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+#ifdef CONFIG_FB
+	msm_drm_unregister_client(&rmi4_data->fb_notifier);
+#endif
+
+#ifdef USE_EARLYSUSPEND
+	unregister_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+	input_unregister_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+	if (rmi4_data->stylus_enable) {
+		input_unregister_device(rmi4_data->stylus_dev);
+		rmi4_data->stylus_dev = NULL;
+	}
+
+	synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+	if (bdata->reset_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->reset_gpio, false, 0, 0);
+
+	if (bdata->power_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+	if (rmi4_data->ts_pinctrl) {
+			if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+				devm_pinctrl_put(rmi4_data->ts_pinctrl);
+				rmi4_data->ts_pinctrl = NULL;
+			} else {
+				pinctrl_select_state(
+					rmi4_data->ts_pinctrl,
+					rmi4_data->pinctrl_state_release);
+			}
+		}
+
+	synaptics_rmi4_enable_reg(rmi4_data, false);
+	synaptics_rmi4_get_reg(rmi4_data, false);
+
+	kfree(rmi4_data);
+
+	return 0;
+}
+
+#ifdef CONFIG_FB
+static int synaptics_rmi4_dsi_panel_notifier_cb(struct notifier_block *self,
+		unsigned long event, void *data)
+{
+	int transition;
+	struct msm_drm_notifier *evdata = data;
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(self, struct synaptics_rmi4_data,
+			fb_notifier);
+
+	if (!evdata || (evdata->id != 0))
+		return 0;
+
+	if (evdata && evdata->data && rmi4_data) {
+		if (event == MSM_DRM_EVENT_BLANK) {
+			transition = *(int *)evdata->data;
+			if (transition == MSM_DRM_BLANK_POWERDOWN) {
+				synaptics_rmi4_suspend(&rmi4_data->pdev->dev);
+				rmi4_data->fb_ready = false;
+			} else if (transition == MSM_DRM_BLANK_UNBLANK) {
+				synaptics_rmi4_resume(&rmi4_data->pdev->dev);
+				rmi4_data->fb_ready = true;
+			}
+		}
+	}
+
+	return 0;
+}
+#endif
+
+#ifdef USE_EARLYSUSPEND
+static int synaptics_rmi4_early_suspend(struct early_suspend *h)
+{
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(h, struct synaptics_rmi4_data,
+			early_suspend);
+	unsigned char device_ctrl;
+
+	if (rmi4_data->stay_awake)
+		return retval;
+
+	if (rmi4_data->enable_wakeup_gesture) {
+		if (rmi4_data->no_sleep_setting) {
+			synaptics_rmi4_reg_read(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+			device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+			synaptics_rmi4_reg_write(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+		}
+		synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+		enable_irq_wake(rmi4_data->irq);
+		goto exit;
+	}
+
+#ifdef SYNA_TDDI
+	if (rmi4_data->no_sleep_setting) {
+		synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_ctrl_base_addr,
+				&device_ctrl,
+				sizeof(device_ctrl));
+		device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+		synaptics_rmi4_reg_write(rmi4_data,
+				rmi4_data->f01_ctrl_base_addr,
+				&device_ctrl,
+				sizeof(device_ctrl));
+	}
+	synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+	usleep(TDDI_LPWG_WAIT_US);
+#endif
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+	synaptics_rmi4_sleep_enable(rmi4_data, true);
+	synaptics_rmi4_free_fingers(rmi4_data);
+
+exit:
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->early_suspend != NULL)
+				exp_fhandler->exp_fn->early_suspend(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	rmi4_data->suspend = true;
+
+	return retval;
+}
+
+static int synaptics_rmi4_late_resume(struct early_suspend *h)
+{
+#ifdef FB_READY_RESET
+	int retval;
+#endif
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(h, struct synaptics_rmi4_data,
+			early_suspend);
+
+	if (rmi4_data->stay_awake)
+		return retval;
+
+	if (rmi4_data->enable_wakeup_gesture) {
+		disable_irq_wake(rmi4_data->irq);
+		goto exit;
+	}
+
+	rmi4_data->current_page = MASK_8BIT;
+
+	if (rmi4_data->suspend) {
+		synaptics_rmi4_sleep_enable(rmi4_data, false);
+		synaptics_rmi4_irq_enable(rmi4_data, true, false);
+	}
+
+exit:
+#ifdef FB_READY_RESET
+	if (rmi4_data->suspend) {
+		retval = synaptics_rmi4_reset_device(rmi4_data, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to issue reset command\n",
+					__func__);
+		}
+	}
+#endif
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->late_resume != NULL)
+				exp_fhandler->exp_fn->late_resume(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	rmi4_data->suspend = false;
+
+	return retval;
+}
+#endif
+
+static int synaptics_rmi4_suspend(struct device *dev)
+{
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+	unsigned char device_ctrl;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (rmi4_data->stay_awake)
+		return 0;
+
+	if (rmi4_data->enable_wakeup_gesture) {
+		if (rmi4_data->no_sleep_setting) {
+			synaptics_rmi4_reg_read(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+			device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+			synaptics_rmi4_reg_write(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+		}
+		synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+		enable_irq_wake(rmi4_data->irq);
+		goto exit;
+	}
+
+	if (!rmi4_data->suspend) {
+#ifdef SYNA_TDDI
+		if (rmi4_data->no_sleep_setting) {
+			synaptics_rmi4_reg_read(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+			device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+			synaptics_rmi4_reg_write(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+		}
+		synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+		usleep(TDDI_LPWG_WAIT_US);
+#endif
+		synaptics_rmi4_irq_enable(rmi4_data, false, false);
+		synaptics_rmi4_sleep_enable(rmi4_data, true);
+		synaptics_rmi4_free_fingers(rmi4_data);
+	}
+
+	if (bdata->reset_gpio >= 0) {
+		gpio_set_value(bdata->reset_gpio, bdata->reset_on_state);
+		msleep(bdata->reset_active_ms);
+	}
+
+	synaptics_rmi4_enable_reg(rmi4_data, false);
+
+exit:
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->suspend != NULL)
+				exp_fhandler->exp_fn->suspend(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	rmi4_data->suspend = true;
+
+	return 0;
+}
+
+static int synaptics_rmi4_resume(struct device *dev)
+{
+#ifdef FB_READY_RESET
+	int retval;
+#endif
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+	if (rmi4_data->stay_awake)
+		return 0;
+
+	if (rmi4_data->enable_wakeup_gesture) {
+		disable_irq_wake(rmi4_data->irq);
+		synaptics_rmi4_wakeup_gesture(rmi4_data, false);
+		goto exit;
+	}
+
+	synaptics_rmi4_enable_reg(rmi4_data, true);
+
+	if (bdata->reset_gpio >= 0) {
+		gpio_set_value(bdata->reset_gpio, bdata->reset_on_state);
+		msleep(bdata->reset_active_ms);
+		gpio_set_value(bdata->reset_gpio, !bdata->reset_on_state);
+		msleep(bdata->reset_delay_ms);
+	}
+
+
+	rmi4_data->current_page = MASK_8BIT;
+
+	synaptics_rmi4_sleep_enable(rmi4_data, false);
+	synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+exit:
+#ifdef FB_READY_RESET
+	retval = synaptics_rmi4_reset_device(rmi4_data, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+	}
+#endif
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->resume != NULL)
+				exp_fhandler->exp_fn->resume(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	rmi4_data->suspend = false;
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = {
+#ifndef CONFIG_FB
+	.suspend = synaptics_rmi4_suspend,
+	.resume = synaptics_rmi4_resume,
+#endif
+};
+#endif
+
+static struct platform_driver synaptics_rmi4_driver = {
+	.driver = {
+		.name = PLATFORM_DRIVER_NAME,
+		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &synaptics_rmi4_dev_pm_ops,
+#endif
+	},
+	.probe = synaptics_rmi4_probe,
+	.remove = synaptics_rmi4_remove,
+};
+
+static int __init synaptics_rmi4_init(void)
+{
+	int retval;
+
+	retval = synaptics_rmi4_bus_init();
+	if (retval)
+		return retval;
+
+	return platform_driver_register(&synaptics_rmi4_driver);
+}
+
+static void __exit synaptics_rmi4_exit(void)
+{
+	platform_driver_unregister(&synaptics_rmi4_driver);
+
+	synaptics_rmi4_bus_exit();
+}
+
+module_init(synaptics_rmi4_init);
+module_exit(synaptics_rmi4_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Touch Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h
new file mode 100644
index 0000000..3e0c0db
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h
@@ -0,0 +1,535 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#ifndef _SYNAPTICS_DSX_RMI4_H_
+#define _SYNAPTICS_DSX_RMI4_H_
+
+#define SYNAPTICS_DS4 (1 << 0)
+#define SYNAPTICS_DS5 (1 << 1)
+#define SYNAPTICS_DSX_DRIVER_PRODUCT (SYNAPTICS_DS4 | SYNAPTICS_DS5)
+#define SYNAPTICS_DSX_DRIVER_VERSION 0x2070
+
+#include <linux/version.h>
+#ifdef CONFIG_FB
+#include <linux/notifier.h>
+#include <linux/fb.h>
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38))
+#define KERNEL_ABOVE_2_6_38
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+#define KERNEL_ABOVE_3_6
+#endif
+
+#ifdef KERNEL_ABOVE_2_6_38
+#define sstrtoul(...) kstrtoul(__VA_ARGS__)
+#else
+#define sstrtoul(...) strict_strtoul(__VA_ARGS__)
+#endif
+/*
+*#define F51_DISCRETE_FORCE
+*#ifdef F51_DISCRETE_FORCE
+*#define FORCE_LEVEL_ADDR 0x0419
+*#define FORCE_LEVEL_MAX 255
+*#define CAL_DATA_SIZE 144
+*#endif
+*#define SYNA_TDDI
+*/
+#define PDT_PROPS (0X00EF)
+#define PDT_START (0x00E9)
+#define PDT_END (0x00D0)
+#define PDT_ENTRY_SIZE (0x0006)
+#define PAGES_TO_SERVICE (10)
+#define PAGE_SELECT_LEN (2)
+#define ADDRESS_LEN (2)
+
+#define SYNAPTICS_RMI4_F01 (0x01)
+#define SYNAPTICS_RMI4_F11 (0x11)
+#define SYNAPTICS_RMI4_F12 (0x12)
+#define SYNAPTICS_RMI4_F1A (0x1A)
+#define SYNAPTICS_RMI4_F21 (0x21)
+#define SYNAPTICS_RMI4_F34 (0x34)
+#define SYNAPTICS_RMI4_F35 (0x35)
+#define SYNAPTICS_RMI4_F38 (0x38)
+#define SYNAPTICS_RMI4_F51 (0x51)
+#define SYNAPTICS_RMI4_F54 (0x54)
+#define SYNAPTICS_RMI4_F55 (0x55)
+#define SYNAPTICS_RMI4_FDB (0xDB)
+
+#define PRODUCT_INFO_SIZE 2
+#define PRODUCT_ID_SIZE 10
+#define BUILD_ID_SIZE 3
+
+#define F12_FINGERS_TO_SUPPORT 10
+#define F12_NO_OBJECT_STATUS 0x00
+#define F12_FINGER_STATUS 0x01
+#define F12_ACTIVE_STYLUS_STATUS 0x02
+#define F12_PALM_STATUS 0x03
+#define F12_HOVERING_FINGER_STATUS 0x05
+#define F12_GLOVED_FINGER_STATUS 0x06
+#define F12_NARROW_OBJECT_STATUS 0x07
+#define F12_HAND_EDGE_STATUS 0x08
+#define F12_COVER_STATUS 0x0A
+#define F12_STYLUS_STATUS 0x0B
+#define F12_ERASER_STATUS 0x0C
+#define F12_SMALL_OBJECT_STATUS 0x0D
+
+#define F12_GESTURE_DETECTION_LEN 5
+
+#define MAX_NUMBER_OF_BUTTONS 4
+#define MAX_INTR_REGISTERS 4
+
+#define MASK_16BIT 0xFFFF
+#define MASK_8BIT 0xFF
+#define MASK_7BIT 0x7F
+#define MASK_6BIT 0x3F
+#define MASK_5BIT 0x1F
+#define MASK_4BIT 0x0F
+#define MASK_3BIT 0x07
+#define MASK_2BIT 0x03
+#define MASK_1BIT 0x01
+
+#define PINCTRL_STATE_ACTIVE    "pmx_ts_active"
+#define PINCTRL_STATE_SUSPEND   "pmx_ts_suspend"
+#define PINCTRL_STATE_RELEASE   "pmx_ts_release"
+
+enum exp_fn {
+	RMI_DEV = 0,
+	RMI_FW_UPDATER,
+	RMI_TEST_REPORTING,
+	RMI_PROXIMITY,
+	RMI_ACTIVE_PEN,
+	RMI_GESTURE,
+	RMI_VIDEO,
+	RMI_DEBUG,
+	RMI_LAST,
+};
+
+/*
+ * struct synaptics_rmi4_fn_desc - function descriptor fields in PDT entry
+ * @query_base_addr: base address for query registers
+ * @cmd_base_addr: base address for command registers
+ * @ctrl_base_addr: base address for control registers
+ * @data_base_addr: base address for data registers
+ * @intr_src_count: number of interrupt sources
+ * @fn_version: version of function
+ * @fn_number: function number
+ */
+struct synaptics_rmi4_fn_desc {
+	union {
+		struct {
+			unsigned char query_base_addr;
+			unsigned char cmd_base_addr;
+			unsigned char ctrl_base_addr;
+			unsigned char data_base_addr;
+			unsigned char intr_src_count:3;
+			unsigned char reserved_1:2;
+			unsigned char fn_version:2;
+			unsigned char reserved_2:1;
+			unsigned char fn_number;
+		} __packed;
+		unsigned char data[6];
+	};
+};
+
+/*
+ * synaptics_rmi4_fn_full_addr - full 16-bit base addresses
+ * @query_base: 16-bit base address for query registers
+ * @cmd_base: 16-bit base address for command registers
+ * @ctrl_base: 16-bit base address for control registers
+ * @data_base: 16-bit base address for data registers
+ */
+struct synaptics_rmi4_fn_full_addr {
+	unsigned short query_base;
+	unsigned short cmd_base;
+	unsigned short ctrl_base;
+	unsigned short data_base;
+};
+
+/*
+ * struct synaptics_rmi4_f11_extra_data - extra data of F$11
+ * @data38_offset: offset to F11_2D_DATA38 register
+ */
+struct synaptics_rmi4_f11_extra_data {
+	unsigned char data38_offset;
+};
+
+/*
+ * struct synaptics_rmi4_f12_extra_data - extra data of F$12
+ * @data1_offset: offset to F12_2D_DATA01 register
+ * @data4_offset: offset to F12_2D_DATA04 register
+ * @data15_offset: offset to F12_2D_DATA15 register
+ * @data15_size: size of F12_2D_DATA15 register
+ * @data15_data: buffer for reading F12_2D_DATA15 register
+ * @data29_offset: offset to F12_2D_DATA29 register
+ * @data29_size: size of F12_2D_DATA29 register
+ * @data29_data: buffer for reading F12_2D_DATA29 register
+ * @ctrl20_offset: offset to F12_2D_CTRL20 register
+ */
+struct synaptics_rmi4_f12_extra_data {
+	unsigned char data1_offset;
+	unsigned char data4_offset;
+	unsigned char data15_offset;
+	unsigned char data15_size;
+	unsigned char data15_data[(F12_FINGERS_TO_SUPPORT + 7) / 8];
+	unsigned char data29_offset;
+	unsigned char data29_size;
+	unsigned char data29_data[F12_FINGERS_TO_SUPPORT * 2];
+	unsigned char ctrl20_offset;
+};
+
+/*
+ * struct synaptics_rmi4_fn - RMI function handler
+ * @fn_number: function number
+ * @num_of_data_sources: number of data sources
+ * @num_of_data_points: maximum number of fingers supported
+ * @intr_reg_num: index to associated interrupt register
+ * @intr_mask: interrupt mask
+ * @full_addr: full 16-bit base addresses of function registers
+ * @link: linked list for function handlers
+ * @data_size: size of private data
+ * @data: pointer to private data
+ * @extra: pointer to extra data
+ */
+struct synaptics_rmi4_fn {
+	unsigned char fn_number;
+	unsigned char num_of_data_sources;
+	unsigned char num_of_data_points;
+	unsigned char intr_reg_num;
+	unsigned char intr_mask;
+	struct synaptics_rmi4_fn_full_addr full_addr;
+	struct list_head link;
+	int data_size;
+	void *data;
+	void *extra;
+};
+
+/*
+ * struct synaptics_rmi4_input_settings - current input settings
+ * @num_of_fingers: maximum number of fingers for 2D touch
+ * @valid_button_count: number of valid 0D buttons
+ * @max_touch_width: maximum touch width
+ * @sensor_max_x: maximum x coordinate for 2D touch
+ * @sensor_max_y: maximum y coordinate for 2D touch
+ * @force_min: minimum force value
+ * @force_max: maximum force value
+ * @stylus_enable: flag to indicate reporting of stylus data
+ * @eraser_enable: flag to indicate reporting of eraser data
+ */
+struct synaptics_rmi4_input_settings {
+	unsigned char num_of_fingers;
+	unsigned char valid_button_count;
+	unsigned char max_touch_width;
+	int sensor_max_x;
+	int sensor_max_y;
+	int force_min;
+	int force_max;
+	bool stylus_enable;
+	bool eraser_enable;
+};
+
+/*
+ * struct synaptics_rmi4_device_info - device information
+ * @version_major: RMI protocol major version number
+ * @version_minor: RMI protocol minor version number
+ * @manufacturer_id: manufacturer ID
+ * @product_props: product properties
+ * @product_info: product information
+ * @product_id_string: product ID
+ * @build_id: firmware build ID
+ * @support_fn_list: linked list for function handlers
+ */
+struct synaptics_rmi4_device_info {
+	unsigned int version_major;
+	unsigned int version_minor;
+	unsigned char manufacturer_id;
+	unsigned char product_props;
+	unsigned char product_info[PRODUCT_INFO_SIZE];
+	unsigned char product_id_string[PRODUCT_ID_SIZE + 1];
+	unsigned char build_id[BUILD_ID_SIZE];
+	struct list_head support_fn_list;
+};
+
+/*
+ * struct synaptics_rmi4_data - RMI4 device instance data
+ * @pdev: pointer to platform device
+ * @input_dev: pointer to associated input device
+ * @stylus_dev: pointer to associated stylus device
+ * @hw_if: pointer to hardware interface data
+ * @rmi4_mod_info: device information
+ * @board_prop_dir: /sys/board_properties directory for virtual key map file
+ * @pwr_reg: pointer to regulator for power control
+ * @bus_reg: pointer to regulator for bus pullup control
+ * @rmi4_reset_mutex: mutex for software reset
+ * @rmi4_report_mutex: mutex for input event reporting
+ * @rmi4_io_ctrl_mutex: mutex for communication interface I/O
+ * @rmi4_exp_init_mutex: mutex for expansion function module initialization
+ * @rmi4_irq_enable_mutex: mutex for enabling/disabling interrupt
+ * @rb_work: work for rebuilding input device
+ * @rb_workqueue: workqueue for rebuilding input device
+ * @fb_notifier: framebuffer notifier client
+ * @reset_work: work for issuing reset after display framebuffer ready
+ * @reset_workqueue: workqueue for issuing reset after display framebuffer ready
+ * @early_suspend: early suspend power management
+ * @current_page: current RMI page for register access
+ * @button_0d_enabled: switch for enabling 0d button support
+ * @num_of_tx: number of Tx channels for 2D touch
+ * @num_of_rx: number of Rx channels for 2D touch
+ * @num_of_fingers: maximum number of fingers for 2D touch
+ * @max_touch_width: maximum touch width
+ * @valid_button_count: number of valid 0D buttons
+ * @report_enable: input data to report for F$12
+ * @no_sleep_setting: default setting of NoSleep in F01_RMI_CTRL00 register
+ * @gesture_detection: detected gesture type and properties
+ * @intr_mask: interrupt enable mask
+ * @button_txrx_mapping: Tx Rx mapping of 0D buttons
+ * @num_of_intr_regs: number of interrupt registers
+ * @f01_query_base_addr: query base address for f$01
+ * @f01_cmd_base_addr: command base address for f$01
+ * @f01_ctrl_base_addr: control base address for f$01
+ * @f01_data_base_addr: data base address for f$01
+ * @f51_query_base_addr: query base address for f$51
+ * @firmware_id: firmware build ID
+ * @irq: attention interrupt
+ * @sensor_max_x: maximum x coordinate for 2D touch
+ * @sensor_max_y: maximum y coordinate for 2D touch
+ * @force_min: minimum force value
+ * @force_max: maximum force value
+ * @set_wakeup_gesture: location of set wakeup gesture
+ * @flash_prog_mode: flag to indicate flash programming mode status
+ * @irq_enabled: flag to indicate attention interrupt enable status
+ * @fingers_on_2d: flag to indicate presence of fingers in 2D area
+ * @suspend: flag to indicate whether in suspend state
+ * @sensor_sleep: flag to indicate sleep state of sensor
+ * @stay_awake: flag to indicate whether to stay awake during suspend
+ * @fb_ready: flag to indicate whether display framebuffer in ready state
+ * @f11_wakeup_gesture: flag to indicate support for wakeup gestures in F$11
+ * @f12_wakeup_gesture: flag to indicate support for wakeup gestures in F$12
+ * @enable_wakeup_gesture: flag to indicate usage of wakeup gestures
+ * @wedge_sensor: flag to indicate use of wedge sensor
+ * @report_pressure: flag to indicate reporting of pressure data
+ * @stylus_enable: flag to indicate reporting of stylus data
+ * @eraser_enable: flag to indicate reporting of eraser data
+ * @external_afe_buttons: flag to indicate presence of external AFE buttons
+ * @reset_device: pointer to device reset function
+ * @irq_enable: pointer to interrupt enable function
+ * @sleep_enable: pointer to sleep enable function
+ * @report_touch: pointer to touch reporting function
+ */
+struct synaptics_rmi4_data {
+	struct platform_device *pdev;
+	struct input_dev *input_dev;
+	struct input_dev *stylus_dev;
+	const struct synaptics_dsx_hw_interface *hw_if;
+	struct synaptics_rmi4_device_info rmi4_mod_info;
+	struct synaptics_rmi4_input_settings input_settings;
+	struct kobject *board_prop_dir;
+	struct regulator *pwr_reg;
+	struct regulator *bus_reg;
+	struct mutex rmi4_reset_mutex;
+	struct mutex rmi4_report_mutex;
+	struct mutex rmi4_io_ctrl_mutex;
+	struct mutex rmi4_exp_init_mutex;
+	struct mutex rmi4_irq_enable_mutex;
+	struct delayed_work rb_work;
+	struct workqueue_struct *rb_workqueue;
+	struct pinctrl *ts_pinctrl;
+	struct pinctrl_state *pinctrl_state_active;
+	struct pinctrl_state *pinctrl_state_suspend;
+	struct pinctrl_state *pinctrl_state_release;
+#ifdef CONFIG_FB
+	struct notifier_block fb_notifier;
+	struct work_struct reset_work;
+	struct workqueue_struct *reset_workqueue;
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend early_suspend;
+#endif
+	unsigned char current_page;
+	unsigned char button_0d_enabled;
+	unsigned char num_of_tx;
+	unsigned char num_of_rx;
+	unsigned char num_of_fingers;
+	unsigned char max_touch_width;
+	unsigned char valid_button_count;
+	unsigned char report_enable;
+	unsigned char no_sleep_setting;
+	unsigned char gesture_detection[F12_GESTURE_DETECTION_LEN];
+	unsigned char intr_mask[MAX_INTR_REGISTERS];
+	unsigned char *button_txrx_mapping;
+	unsigned short num_of_intr_regs;
+	unsigned short f01_query_base_addr;
+	unsigned short f01_cmd_base_addr;
+	unsigned short f01_ctrl_base_addr;
+	unsigned short f01_data_base_addr;
+#ifdef F51_DISCRETE_FORCE
+	unsigned short f51_query_base_addr;
+#endif
+	unsigned int firmware_id;
+	int irq;
+	int sensor_max_x;
+	int sensor_max_y;
+	int force_min;
+	int force_max;
+	int set_wakeup_gesture;
+	int avdd_status;
+	int vdd_status;
+	bool flash_prog_mode;
+	bool irq_enabled;
+	bool fingers_on_2d;
+	bool suspend;
+	bool sensor_sleep;
+	bool stay_awake;
+	bool fb_ready;
+	bool f11_wakeup_gesture;
+	bool f12_wakeup_gesture;
+	bool enable_wakeup_gesture;
+	bool wedge_sensor;
+	bool report_pressure;
+	bool stylus_enable;
+	bool eraser_enable;
+	bool external_afe_buttons;
+	int (*reset_device)(struct synaptics_rmi4_data *rmi4_data,
+			bool rebuild);
+	int (*irq_enable)(struct synaptics_rmi4_data *rmi4_data, bool enable,
+			bool attn_only);
+	int (*sleep_enable)(struct synaptics_rmi4_data *rmi4_data,
+			bool enable);
+	void (*report_touch)(struct synaptics_rmi4_data *rmi4_data,
+			struct synaptics_rmi4_fn *fhandler);
+};
+
+struct synaptics_dsx_bus_access {
+	unsigned char type;
+	int (*read)(struct synaptics_rmi4_data *rmi4_data, unsigned short addr,
+		unsigned char *data, unsigned int length);
+	int (*write)(struct synaptics_rmi4_data *rmi4_data, unsigned short addr,
+		unsigned char *data, unsigned int length);
+};
+
+struct synaptics_dsx_hw_interface {
+	struct synaptics_dsx_board_data *board_data;
+	const struct synaptics_dsx_bus_access *bus_access;
+	int (*bl_hw_init)(struct synaptics_rmi4_data *rmi4_data);
+	int (*ui_hw_init)(struct synaptics_rmi4_data *rmi4_data);
+};
+
+struct synaptics_rmi4_exp_fn {
+	enum exp_fn fn_type;
+	int (*init)(struct synaptics_rmi4_data *rmi4_data);
+	void (*remove)(struct synaptics_rmi4_data *rmi4_data);
+	void (*reset)(struct synaptics_rmi4_data *rmi4_data);
+	void (*reinit)(struct synaptics_rmi4_data *rmi4_data);
+	void (*early_suspend)(struct synaptics_rmi4_data *rmi4_data);
+	void (*suspend)(struct synaptics_rmi4_data *rmi4_data);
+	void (*resume)(struct synaptics_rmi4_data *rmi4_data);
+	void (*late_resume)(struct synaptics_rmi4_data *rmi4_data);
+	void (*attn)(struct synaptics_rmi4_data *rmi4_data,
+			unsigned char intr_mask);
+};
+
+int synaptics_rmi4_bus_init(void);
+
+void synaptics_rmi4_bus_exit(void);
+
+void synaptics_rmi4_new_function(struct synaptics_rmi4_exp_fn *exp_fn_module,
+		bool insert);
+
+int synaptics_fw_updater(const unsigned char *fw_data);
+
+static inline int synaptics_rmi4_reg_read(
+		struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr,
+		unsigned char *data,
+		unsigned int len)
+{
+	return rmi4_data->hw_if->bus_access->read(rmi4_data, addr, data, len);
+}
+
+static inline int synaptics_rmi4_reg_write(
+		struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr,
+		unsigned char *data,
+		unsigned int len)
+{
+	return rmi4_data->hw_if->bus_access->write(rmi4_data, addr, data, len);
+}
+
+static inline ssize_t synaptics_rmi4_show_error(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	dev_warn(dev, "%s Attempted to read from write-only attribute %s\n",
+			__func__, attr->attr.name);
+	return -EPERM;
+}
+
+static inline ssize_t synaptics_rmi4_store_error(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	dev_warn(dev, "%s Attempted to write to read-only attribute %s\n",
+			__func__, attr->attr.name);
+	return -EPERM;
+}
+
+static inline int secure_memcpy(unsigned char *dest, unsigned int dest_size,
+		const unsigned char *src, unsigned int src_size,
+		unsigned int count)
+{
+	if (dest == NULL || src == NULL)
+		return -EINVAL;
+
+	if (count > dest_size || count > src_size)
+		return -EINVAL;
+
+	memcpy((void *)dest, (const void *)src, count);
+
+	return 0;
+}
+
+static inline void batohs(unsigned short *dest, unsigned char *src)
+{
+	*dest = src[1] * 0x100 + src[0];
+}
+
+static inline void hstoba(unsigned char *dest, unsigned short src)
+{
+	dest[0] = src % 0x100;
+	dest[1] = src / 0x100;
+}
+
+#endif
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
new file mode 100644
index 0000000..395def9
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
@@ -0,0 +1,5834 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define FW_IHEX_NAME "synaptics/startup_fw_update.bin"
+#define FW_IMAGE_NAME "synaptics/startup_fw_update.img"
+/*
+*#define DO_STARTUP_FW_UPDATE
+*/
+/*
+*#ifdef DO_STARTUP_FW_UPDATE
+*#ifdef CONFIG_FB
+*#define WAIT_FOR_FB_READY
+*#define FB_READY_WAIT_MS 100
+*#define FB_READY_TIMEOUT_S 30
+*#endif
+*#endif
+*/
+/*
+*#define MAX_WRITE_SIZE 4096
+*/
+
+#define ENABLE_SYS_REFLASH false
+#define FORCE_UPDATE false
+#define DO_LOCKDOWN false
+
+#define MAX_IMAGE_NAME_LEN 256
+#define MAX_FIRMWARE_ID_LEN 10
+
+#define IMAGE_HEADER_VERSION_05 0x05
+#define IMAGE_HEADER_VERSION_06 0x06
+#define IMAGE_HEADER_VERSION_10 0x10
+
+#define IMAGE_AREA_OFFSET 0x100
+#define LOCKDOWN_SIZE 0x50
+
+#define MAX_UTILITY_PARAMS 20
+
+#define V5V6_BOOTLOADER_ID_OFFSET 0
+#define V5V6_CONFIG_ID_SIZE 4
+
+#define V5_PROPERTIES_OFFSET 2
+#define V5_BLOCK_SIZE_OFFSET 3
+#define V5_BLOCK_COUNT_OFFSET 5
+#define V5_BLOCK_NUMBER_OFFSET 0
+#define V5_BLOCK_DATA_OFFSET 2
+
+#define V6_PROPERTIES_OFFSET 1
+#define V6_BLOCK_SIZE_OFFSET 2
+#define V6_BLOCK_COUNT_OFFSET 3
+#define V6_PROPERTIES_2_OFFSET 4
+#define V6_GUEST_CODE_BLOCK_COUNT_OFFSET 5
+#define V6_BLOCK_NUMBER_OFFSET 0
+#define V6_BLOCK_DATA_OFFSET 1
+#define V6_FLASH_COMMAND_OFFSET 2
+#define V6_FLASH_STATUS_OFFSET 3
+
+#define V7_CONFIG_ID_SIZE 32
+
+#define V7_FLASH_STATUS_OFFSET 0
+#define V7_PARTITION_ID_OFFSET 1
+#define V7_BLOCK_NUMBER_OFFSET 2
+#define V7_TRANSFER_LENGTH_OFFSET 3
+#define V7_COMMAND_OFFSET 4
+#define V7_PAYLOAD_OFFSET 5
+
+#define V7_PARTITION_SUPPORT_BYTES 4
+
+#define F35_ERROR_CODE_OFFSET 0
+#define F35_FLASH_STATUS_OFFSET 5
+#define F35_CHUNK_NUM_LSB_OFFSET 0
+#define F35_CHUNK_NUM_MSB_OFFSET 1
+#define F35_CHUNK_DATA_OFFSET 2
+#define F35_CHUNK_COMMAND_OFFSET 18
+
+#define F35_CHUNK_SIZE 16
+#define F35_ERASE_ALL_WAIT_MS 5000
+#define F35_RESET_WAIT_MS 250
+
+#define SLEEP_MODE_NORMAL (0x00)
+#define SLEEP_MODE_SENSOR_SLEEP (0x01)
+#define SLEEP_MODE_RESERVED0 (0x02)
+#define SLEEP_MODE_RESERVED1 (0x03)
+
+#define ENABLE_WAIT_MS (1 * 1000)
+#define WRITE_WAIT_MS (3 * 1000)
+#define ERASE_WAIT_MS (5 * 1000)
+
+#define MIN_SLEEP_TIME_US 50
+#define MAX_SLEEP_TIME_US 100
+
+#define INT_DISABLE_WAIT_MS 20
+#define ENTER_FLASH_PROG_WAIT_MS 20
+#define READ_CONFIG_WAIT_MS 20
+
+static int fwu_do_reflash(void);
+
+static int fwu_recovery_check_status(void);
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static ssize_t fwu_sysfs_show_image(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t fwu_sysfs_store_image(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t fwu_sysfs_do_recovery_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_write_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_read_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_config_area_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_image_name_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_image_size_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_block_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_firmware_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_configuration_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_disp_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_perm_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_bl_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_utility_parameter_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_guest_code_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_write_guest_code_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+#ifdef SYNA_TDDI
+static ssize_t fwu_sysfs_write_lockdown_code_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_read_lockdown_code_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+#endif
+
+#endif
+
+enum f34_version {
+	F34_V0 = 0,
+	F34_V1,
+	F34_V2,
+};
+
+enum bl_version {
+	BL_V5 = 5,
+	BL_V6 = 6,
+	BL_V7 = 7,
+	BL_V8 = 8,
+};
+
+enum flash_area {
+	NONE = 0,
+	UI_FIRMWARE,
+	UI_CONFIG,
+};
+
+enum update_mode {
+	NORMAL = 1,
+	FORCE = 2,
+	LOCKDOWN = 8,
+};
+
+enum config_area {
+	UI_CONFIG_AREA = 0,
+	PM_CONFIG_AREA,
+	BL_CONFIG_AREA,
+	DP_CONFIG_AREA,
+	FLASH_CONFIG_AREA,
+#ifdef SYNA_TDDI
+	TDDI_FORCE_CONFIG_AREA,
+	TDDI_LCM_DATA_AREA,
+	TDDI_OEM_DATA_AREA,
+#endif
+	UPP_AREA,
+};
+
+enum v7_status {
+	SUCCESS = 0x00,
+	DEVICE_NOT_IN_BOOTLOADER_MODE,
+	INVALID_PARTITION,
+	INVALID_COMMAND,
+	INVALID_BLOCK_OFFSET,
+	INVALID_TRANSFER,
+	NOT_ERASED,
+	FLASH_PROGRAMMING_KEY_INCORRECT,
+	BAD_PARTITION_TABLE,
+	CHECKSUM_FAILED,
+	FLASH_HARDWARE_FAILURE = 0x1f,
+};
+
+enum v7_partition_id {
+	BOOTLOADER_PARTITION = 0x01,
+	DEVICE_CONFIG_PARTITION,
+	FLASH_CONFIG_PARTITION,
+	MANUFACTURING_BLOCK_PARTITION,
+	GUEST_SERIALIZATION_PARTITION,
+	GLOBAL_PARAMETERS_PARTITION,
+	CORE_CODE_PARTITION,
+	CORE_CONFIG_PARTITION,
+	GUEST_CODE_PARTITION,
+	DISPLAY_CONFIG_PARTITION,
+	EXTERNAL_TOUCH_AFE_CONFIG_PARTITION,
+	UTILITY_PARAMETER_PARTITION,
+};
+
+enum v7_flash_command {
+	CMD_V7_IDLE = 0x00,
+	CMD_V7_ENTER_BL,
+	CMD_V7_READ,
+	CMD_V7_WRITE,
+	CMD_V7_ERASE,
+	CMD_V7_ERASE_AP,
+	CMD_V7_SENSOR_ID,
+};
+
+enum v5v6_flash_command {
+	CMD_V5V6_IDLE = 0x0,
+	CMD_V5V6_WRITE_FW = 0x2,
+	CMD_V5V6_ERASE_ALL = 0x3,
+	CMD_V5V6_WRITE_LOCKDOWN = 0x4,
+	CMD_V5V6_READ_CONFIG = 0x5,
+	CMD_V5V6_WRITE_CONFIG = 0x6,
+	CMD_V5V6_ERASE_UI_CONFIG = 0x7,
+	CMD_V5V6_ERASE_BL_CONFIG = 0x9,
+	CMD_V5V6_ERASE_DISP_CONFIG = 0xa,
+	CMD_V5V6_ERASE_GUEST_CODE = 0xb,
+	CMD_V5V6_WRITE_GUEST_CODE = 0xc,
+	CMD_V5V6_ERASE_CHIP = 0x0d,
+	CMD_V5V6_ENABLE_FLASH_PROG = 0xf,
+#ifdef SYNA_TDDI
+	CMD_V5V6_ERASE_FORCE_CONFIG = 0x11,
+	CMD_V5V6_READ_FORCE_CONFIG = 0x12,
+	CMD_V5V6_WRITE_FORCE_CONFIG = 0x13,
+	CMD_V5V6_ERASE_LOCKDOWN_DATA = 0x1a,
+	CMD_V5V6_READ_LOCKDOWN_DATA = 0x1b,
+	CMD_V5V6_WRITE_LOCKDOWN_DATA = 0x1c,
+	CMD_V5V6_ERASE_LCM_DATA = 0x1d,
+	CMD_V5V6_ERASE_OEM_DATA = 0x1e,
+#endif
+};
+
+enum flash_command {
+	CMD_IDLE = 0,
+	CMD_WRITE_FW,
+	CMD_WRITE_CONFIG,
+	CMD_WRITE_LOCKDOWN,
+	CMD_WRITE_GUEST_CODE,
+	CMD_WRITE_BOOTLOADER,
+	CMD_WRITE_UTILITY_PARAM,
+	CMD_READ_CONFIG,
+	CMD_ERASE_ALL,
+	CMD_ERASE_UI_FIRMWARE,
+	CMD_ERASE_UI_CONFIG,
+	CMD_ERASE_BL_CONFIG,
+	CMD_ERASE_DISP_CONFIG,
+	CMD_ERASE_FLASH_CONFIG,
+	CMD_ERASE_GUEST_CODE,
+	CMD_ERASE_BOOTLOADER,
+	CMD_ERASE_UTILITY_PARAMETER,
+	CMD_ENABLE_FLASH_PROG,
+#ifdef SYNA_TDDI
+	CMD_ERASE_CHIP,
+	CMD_ERASE_FORCE_CONFIG,
+	CMD_READ_FORCE_CONFIG,
+	CMD_WRITE_FORCE_CONFIG,
+	CMD_ERASE_LOCKDOWN_DATA,
+	CMD_READ_LOCKDOWN_DATA,
+	CMD_WRITE_LOCKDOWN_DATA,
+	CMD_ERASE_LCM_DATA,
+	CMD_READ_LCM_DATA,
+	CMD_WRITE_LCM_DATA,
+	CMD_ERASE_OEM_DATA,
+	CMD_READ_OEM_DATA,
+	CMD_WRITE_OEM_DATA,
+#endif
+};
+
+enum f35_flash_command {
+	CMD_F35_IDLE = 0x0,
+	CMD_F35_RESERVED = 0x1,
+	CMD_F35_WRITE_CHUNK = 0x2,
+	CMD_F35_ERASE_ALL = 0x3,
+	CMD_F35_RESET = 0x10,
+};
+
+enum container_id {
+	TOP_LEVEL_CONTAINER = 0,
+	UI_CONTAINER,
+	UI_CONFIG_CONTAINER,
+	BL_CONTAINER,
+	BL_IMAGE_CONTAINER,
+	BL_CONFIG_CONTAINER,
+	BL_LOCKDOWN_INFO_CONTAINER,
+	PERMANENT_CONFIG_CONTAINER,
+	GUEST_CODE_CONTAINER,
+	BL_PROTOCOL_DESCRIPTOR_CONTAINER,
+	UI_PROTOCOL_DESCRIPTOR_CONTAINER,
+	RMI_SELF_DISCOVERY_CONTAINER,
+	RMI_PAGE_CONTENT_CONTAINER,
+	GENERAL_INFORMATION_CONTAINER,
+	DEVICE_CONFIG_CONTAINER,
+	FLASH_CONFIG_CONTAINER,
+	GUEST_SERIALIZATION_CONTAINER,
+	GLOBAL_PARAMETERS_CONTAINER,
+	CORE_CODE_CONTAINER,
+	CORE_CONFIG_CONTAINER,
+	DISPLAY_CONFIG_CONTAINER,
+	EXTERNAL_TOUCH_AFE_CONFIG_CONTAINER,
+	UTILITY_CONTAINER,
+	UTILITY_PARAMETER_CONTAINER,
+};
+
+enum utility_parameter_id {
+	UNUSED = 0,
+	FORCE_PARAMETER,
+	ANTI_BENDING_PARAMETER,
+};
+
+struct pdt_properties {
+	union {
+		struct {
+			unsigned char reserved_1:6;
+			unsigned char has_bsr:1;
+			unsigned char reserved_2:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct partition_table {
+	unsigned char partition_id:5;
+	unsigned char byte_0_reserved:3;
+	unsigned char byte_1_reserved;
+	unsigned char partition_length_7_0;
+	unsigned char partition_length_15_8;
+	unsigned char start_physical_address_7_0;
+	unsigned char start_physical_address_15_8;
+	unsigned char partition_properties_7_0;
+	unsigned char partition_properties_15_8;
+} __packed;
+
+struct f01_device_control {
+	union {
+		struct {
+			unsigned char sleep_mode:2;
+			unsigned char nosleep:1;
+			unsigned char reserved:2;
+			unsigned char charger_connected:1;
+			unsigned char report_rate:1;
+			unsigned char configured:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f34_v7_query_0 {
+	union {
+		struct {
+			unsigned char subpacket_1_size:3;
+			unsigned char has_config_id:1;
+			unsigned char f34_query0_b4:1;
+			unsigned char has_thqa:1;
+			unsigned char f34_query0_b6__7:2;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f34_v7_query_1_7 {
+	union {
+		struct {
+			/* query 1 */
+			unsigned char bl_minor_revision;
+			unsigned char bl_major_revision;
+
+			/* query 2 */
+			unsigned char bl_fw_id_7_0;
+			unsigned char bl_fw_id_15_8;
+			unsigned char bl_fw_id_23_16;
+			unsigned char bl_fw_id_31_24;
+
+			/* query 3 */
+			unsigned char minimum_write_size;
+			unsigned char block_size_7_0;
+			unsigned char block_size_15_8;
+			unsigned char flash_page_size_7_0;
+			unsigned char flash_page_size_15_8;
+
+			/* query 4 */
+			unsigned char adjustable_partition_area_size_7_0;
+			unsigned char adjustable_partition_area_size_15_8;
+
+			/* query 5 */
+			unsigned char flash_config_length_7_0;
+			unsigned char flash_config_length_15_8;
+
+			/* query 6 */
+			unsigned char payload_length_7_0;
+			unsigned char payload_length_15_8;
+
+			/* query 7 */
+			unsigned char f34_query7_b0:1;
+			unsigned char has_bootloader:1;
+			unsigned char has_device_config:1;
+			unsigned char has_flash_config:1;
+			unsigned char has_manufacturing_block:1;
+			unsigned char has_guest_serialization:1;
+			unsigned char has_global_parameters:1;
+			unsigned char has_core_code:1;
+			unsigned char has_core_config:1;
+			unsigned char has_guest_code:1;
+			unsigned char has_display_config:1;
+			unsigned char f34_query7_b11__15:5;
+			unsigned char f34_query7_b16__23;
+			unsigned char f34_query7_b24__31;
+		} __packed;
+		unsigned char data[21];
+	};
+};
+
+struct f34_v7_data0 {
+	union {
+		struct {
+			unsigned char operation_status:5;
+			unsigned char device_cfg_status:2;
+			unsigned char bl_mode:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f34_v7_data_1_5 {
+	union {
+		struct {
+			unsigned char partition_id:5;
+			unsigned char f34_data1_b5__7:3;
+			unsigned char block_offset_7_0;
+			unsigned char block_offset_15_8;
+			unsigned char transfer_length_7_0;
+			unsigned char transfer_length_15_8;
+			unsigned char command;
+			unsigned char payload_0;
+			unsigned char payload_1;
+		} __packed;
+		unsigned char data[8];
+	};
+};
+
+struct f34_v5v6_flash_properties {
+	union {
+		struct {
+			unsigned char reg_map:1;
+			unsigned char unlocked:1;
+			unsigned char has_config_id:1;
+			unsigned char has_pm_config:1;
+			unsigned char has_bl_config:1;
+			unsigned char has_disp_config:1;
+			unsigned char has_ctrl1:1;
+			unsigned char has_query4:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f34_v5v6_flash_properties_2 {
+	union {
+		struct {
+			unsigned char has_guest_code:1;
+			unsigned char f34_query4_b1:1;
+			unsigned char has_gesture_config:1;
+			unsigned char has_force_config:1;
+			unsigned char has_lockdown_data:1;
+			unsigned char has_lcm_data:1;
+			unsigned char has_oem_data:1;
+			unsigned char f34_query4_b7:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct register_offset {
+	unsigned char properties;
+	unsigned char properties_2;
+	unsigned char block_size;
+	unsigned char block_count;
+	unsigned char gc_block_count;
+	unsigned char flash_status;
+	unsigned char partition_id;
+	unsigned char block_number;
+	unsigned char transfer_length;
+	unsigned char flash_cmd;
+	unsigned char payload;
+};
+
+struct block_count {
+	unsigned short ui_firmware;
+	unsigned short ui_config;
+	unsigned short dp_config;
+	unsigned short pm_config;
+	unsigned short fl_config;
+	unsigned short bl_image;
+	unsigned short bl_config;
+	unsigned short utility_param;
+	unsigned short lockdown;
+	unsigned short guest_code;
+#ifdef SYNA_TDDI
+	unsigned short tddi_force_config;
+	unsigned short tddi_lockdown_data;
+	unsigned short tddi_lcm_data;
+	unsigned short tddi_oem_data;
+#endif
+	unsigned short total_count;
+};
+
+struct physical_address {
+	unsigned short ui_firmware;
+	unsigned short ui_config;
+	unsigned short dp_config;
+	unsigned short pm_config;
+	unsigned short fl_config;
+	unsigned short bl_image;
+	unsigned short bl_config;
+	unsigned short utility_param;
+	unsigned short lockdown;
+	unsigned short guest_code;
+};
+
+struct container_descriptor {
+	unsigned char content_checksum[4];
+	unsigned char container_id[2];
+	unsigned char minor_version;
+	unsigned char major_version;
+	unsigned char reserved_08;
+	unsigned char reserved_09;
+	unsigned char reserved_0a;
+	unsigned char reserved_0b;
+	unsigned char container_option_flags[4];
+	unsigned char content_options_length[4];
+	unsigned char content_options_address[4];
+	unsigned char content_length[4];
+	unsigned char content_address[4];
+};
+
+struct image_header_10 {
+	unsigned char checksum[4];
+	unsigned char reserved_04;
+	unsigned char reserved_05;
+	unsigned char minor_header_version;
+	unsigned char major_header_version;
+	unsigned char reserved_08;
+	unsigned char reserved_09;
+	unsigned char reserved_0a;
+	unsigned char reserved_0b;
+	unsigned char top_level_container_start_addr[4];
+};
+
+struct image_header_05_06 {
+	/* 0x00 - 0x0f */
+	unsigned char checksum[4];
+	unsigned char reserved_04;
+	unsigned char reserved_05;
+	unsigned char options_firmware_id:1;
+	unsigned char options_bootloader:1;
+	unsigned char options_guest_code:1;
+	unsigned char options_tddi:1;
+	unsigned char options_reserved:4;
+	unsigned char header_version;
+	unsigned char firmware_size[4];
+	unsigned char config_size[4];
+	/* 0x10 - 0x1f */
+	unsigned char product_id[PRODUCT_ID_SIZE];
+	unsigned char package_id[2];
+	unsigned char package_id_revision[2];
+	unsigned char product_info[PRODUCT_INFO_SIZE];
+	/* 0x20 - 0x2f */
+	unsigned char bootloader_addr[4];
+	unsigned char bootloader_size[4];
+	unsigned char ui_addr[4];
+	unsigned char ui_size[4];
+	/* 0x30 - 0x3f */
+	unsigned char ds_id[16];
+	/* 0x40 - 0x4f */
+	union {
+		struct {
+			unsigned char cstmr_product_id[PRODUCT_ID_SIZE];
+			unsigned char reserved_4a_4f[6];
+		};
+		struct {
+			unsigned char dsp_cfg_addr[4];
+			unsigned char dsp_cfg_size[4];
+			unsigned char reserved_48_4f[8];
+		};
+	};
+	/* 0x50 - 0x53 */
+	unsigned char firmware_id[4];
+};
+
+struct block_data {
+	unsigned int size;
+	const unsigned char *data;
+};
+
+struct image_metadata {
+	bool contains_firmware_id;
+	bool contains_bootloader;
+	bool contains_guest_code;
+	bool contains_disp_config;
+	bool contains_perm_config;
+	bool contains_flash_config;
+	bool contains_utility_param;
+	unsigned int firmware_id;
+	unsigned int checksum;
+	unsigned int bootloader_size;
+	unsigned int disp_config_offset;
+	unsigned char bl_version;
+	unsigned char product_id[PRODUCT_ID_SIZE + 1];
+	unsigned char cstmr_product_id[PRODUCT_ID_SIZE + 1];
+	unsigned char utility_param_id[MAX_UTILITY_PARAMS];
+	struct block_data bootloader;
+	struct block_data utility;
+	struct block_data ui_firmware;
+	struct block_data ui_config;
+	struct block_data dp_config;
+	struct block_data pm_config;
+	struct block_data fl_config;
+	struct block_data bl_image;
+	struct block_data bl_config;
+	struct block_data utility_param[MAX_UTILITY_PARAMS];
+	struct block_data lockdown;
+	struct block_data guest_code;
+	struct block_count blkcount;
+	struct physical_address phyaddr;
+};
+
+struct synaptics_rmi4_fwu_handle {
+	enum bl_version bl_version;
+	bool initialized;
+	bool in_bl_mode;
+	bool in_ub_mode;
+	bool bl_mode_device;
+	bool force_update;
+	bool do_lockdown;
+	bool has_guest_code;
+#ifdef SYNA_TDDI
+	bool has_force_config;
+	bool has_lockdown_data;
+	bool has_lcm_data;
+	bool has_oem_data;
+#endif
+	bool has_utility_param;
+	bool new_partition_table;
+	bool incompatible_partition_tables;
+	bool write_bootloader;
+	unsigned int data_pos;
+	unsigned char *ext_data_source;
+	unsigned char *read_config_buf;
+	unsigned char intr_mask;
+	unsigned char command;
+	unsigned char bootloader_id[2];
+	unsigned char config_id[32];
+	unsigned char flash_status;
+	unsigned char partitions;
+#ifdef F51_DISCRETE_FORCE
+	unsigned char *cal_data;
+	unsigned short cal_data_off;
+	unsigned short cal_data_size;
+	unsigned short cal_data_buf_size;
+	unsigned short cal_packet_data_size;
+#endif
+	unsigned short block_size;
+	unsigned short config_size;
+	unsigned short config_area;
+	unsigned short config_block_count;
+	unsigned short flash_config_length;
+	unsigned short payload_length;
+	unsigned short partition_table_bytes;
+	unsigned short read_config_buf_size;
+	const unsigned char *config_data;
+	const unsigned char *image;
+	unsigned char *image_name;
+	unsigned int image_size;
+	struct image_metadata img;
+	struct register_offset off;
+	struct block_count blkcount;
+	struct physical_address phyaddr;
+	struct f34_v5v6_flash_properties flash_properties;
+	struct synaptics_rmi4_fn_desc f34_fd;
+	struct synaptics_rmi4_fn_desc f35_fd;
+	struct synaptics_rmi4_data *rmi4_data;
+	struct workqueue_struct *fwu_workqueue;
+	struct work_struct fwu_work;
+};
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static struct bin_attribute dev_attr_data = {
+	.attr = {
+		.name = "data",
+		.mode = 0664,
+	},
+	.size = 0,
+	.read = fwu_sysfs_show_image,
+	.write = fwu_sysfs_store_image,
+};
+#endif
+
+static struct device_attribute attrs[] = {
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+	__ATTR(dorecovery, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_do_recovery_store),
+	__ATTR(doreflash, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_do_reflash_store),
+	__ATTR(writeconfig, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_write_config_store),
+	__ATTR(readconfig, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_read_config_store),
+	__ATTR(configarea, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_config_area_store),
+	__ATTR(imagename, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_image_name_store),
+	__ATTR(imagesize, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_image_size_store),
+	__ATTR(blocksize, 0444,
+			fwu_sysfs_block_size_show,
+			synaptics_rmi4_store_error),
+	__ATTR(fwblockcount, 0444,
+			fwu_sysfs_firmware_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(configblockcount, 0444,
+			fwu_sysfs_configuration_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(dispconfigblockcount, 0444,
+			fwu_sysfs_disp_config_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(permconfigblockcount, 0444,
+			fwu_sysfs_perm_config_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(blconfigblockcount, 0444,
+			fwu_sysfs_bl_config_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(uppblockcount, 0444,
+			fwu_sysfs_utility_parameter_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(guestcodeblockcount, 0444,
+			fwu_sysfs_guest_code_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(writeguestcode, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_write_guest_code_store),
+#ifdef SYNA_TDDI
+	__ATTR(lockdowncode, 0664,
+			fwu_sysfs_read_lockdown_code_show,
+			fwu_sysfs_write_lockdown_code_store),
+#endif
+#endif
+};
+
+static struct synaptics_rmi4_fwu_handle *fwu;
+
+DECLARE_COMPLETION(fwu_remove_complete);
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+DEFINE_MUTEX(fwu_sysfs_mutex);
+#endif
+
+static void calculate_checksum(unsigned short *data, unsigned long len,
+		unsigned long *result)
+{
+	unsigned long temp;
+	unsigned long sum1 = 0xffff;
+	unsigned long sum2 = 0xffff;
+
+	*result = 0xffffffff;
+
+	while (len--) {
+		temp = *data;
+		sum1 += temp;
+		sum2 += sum1;
+		sum1 = (sum1 & 0xffff) + (sum1 >> 16);
+		sum2 = (sum2 & 0xffff) + (sum2 >> 16);
+		data++;
+	}
+
+	*result = sum2 << 16 | sum1;
+
+	return;
+}
+
+static void convert_to_little_endian(unsigned char *dest, unsigned long src)
+{
+	dest[0] = (unsigned char)(src & 0xff);
+	dest[1] = (unsigned char)((src >> 8) & 0xff);
+	dest[2] = (unsigned char)((src >> 16) & 0xff);
+	dest[3] = (unsigned char)((src >> 24) & 0xff);
+
+	return;
+}
+
+static unsigned int le_to_uint(const unsigned char *ptr)
+{
+	return (unsigned int)ptr[0] +
+			(unsigned int)ptr[1] * 0x100 +
+			(unsigned int)ptr[2] * 0x10000 +
+			(unsigned int)ptr[3] * 0x1000000;
+}
+
+#ifdef F51_DISCRETE_FORCE
+static int fwu_f51_force_data_init(void)
+{
+	int retval;
+	unsigned char query_count;
+	unsigned char packet_info;
+	unsigned char offset[2];
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f51_query_base_addr + 7,
+			offset,
+			sizeof(offset));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read force data offset\n",
+				__func__);
+		return retval;
+	}
+
+	fwu->cal_data_off = offset[0] | offset[1] << 8;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f51_query_base_addr,
+			&query_count,
+			sizeof(query_count));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read number of F51 query registers\n",
+				__func__);
+		return retval;
+	}
+
+	if (query_count >= 10) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f51_query_base_addr + 9,
+				&packet_info,
+				sizeof(packet_info));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read F51 packet register info\n",
+					__func__);
+			return retval;
+		}
+
+		if (packet_info & MASK_1BIT) {
+			fwu->cal_packet_data_size = packet_info >> 1;
+			fwu->cal_packet_data_size *= 2;
+		} else {
+			fwu->cal_packet_data_size = 0;
+		}
+	} else {
+		fwu->cal_packet_data_size = 0;
+	}
+
+	fwu->cal_data_size = CAL_DATA_SIZE + fwu->cal_packet_data_size;
+	if (fwu->cal_data_size > fwu->cal_data_buf_size) {
+		kfree(fwu->cal_data);
+		fwu->cal_data_buf_size = fwu->cal_data_size;
+		fwu->cal_data = kmalloc(fwu->cal_data_buf_size, GFP_KERNEL);
+		if (!fwu->cal_data) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for fwu->cal_data\n",
+					__func__);
+			fwu->cal_data_buf_size = 0;
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+#endif
+
+static int fwu_allocate_read_config_buf(unsigned int count)
+{
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (count > fwu->read_config_buf_size) {
+		kfree(fwu->read_config_buf);
+		fwu->read_config_buf = kzalloc(count, GFP_KERNEL);
+		if (!fwu->read_config_buf) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for fwu->read_config_buf\n",
+					__func__);
+			fwu->read_config_buf_size = 0;
+			return -ENOMEM;
+		}
+		fwu->read_config_buf_size = count;
+	}
+
+	return 0;
+}
+
+static void fwu_compare_partition_tables(void)
+{
+	fwu->incompatible_partition_tables = false;
+
+	if (fwu->phyaddr.bl_image != fwu->img.phyaddr.bl_image)
+		fwu->incompatible_partition_tables = true;
+	else if (fwu->phyaddr.lockdown != fwu->img.phyaddr.lockdown)
+		fwu->incompatible_partition_tables = true;
+	else if (fwu->phyaddr.bl_config != fwu->img.phyaddr.bl_config)
+		fwu->incompatible_partition_tables = true;
+	else if (fwu->phyaddr.utility_param != fwu->img.phyaddr.utility_param)
+		fwu->incompatible_partition_tables = true;
+
+	if (fwu->bl_version == BL_V7) {
+		if (fwu->phyaddr.fl_config != fwu->img.phyaddr.fl_config)
+			fwu->incompatible_partition_tables = true;
+	}
+
+	fwu->new_partition_table = false;
+
+	if (fwu->phyaddr.ui_firmware != fwu->img.phyaddr.ui_firmware)
+		fwu->new_partition_table = true;
+	else if (fwu->phyaddr.ui_config != fwu->img.phyaddr.ui_config)
+		fwu->new_partition_table = true;
+
+	if (fwu->flash_properties.has_disp_config) {
+		if (fwu->phyaddr.dp_config != fwu->img.phyaddr.dp_config)
+			fwu->new_partition_table = true;
+	}
+
+	if (fwu->has_guest_code) {
+		if (fwu->phyaddr.guest_code != fwu->img.phyaddr.guest_code)
+			fwu->new_partition_table = true;
+	}
+
+	return;
+}
+
+static void fwu_parse_partition_table(const unsigned char *partition_table,
+		struct block_count *blkcount, struct physical_address *phyaddr)
+{
+	unsigned char ii;
+	unsigned char index;
+	unsigned char offset;
+	unsigned short partition_length;
+	unsigned short physical_address;
+	struct partition_table *ptable;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	for (ii = 0; ii < fwu->partitions; ii++) {
+		index = ii * 8 + 2;
+		ptable = (struct partition_table *)&partition_table[index];
+		partition_length = ptable->partition_length_15_8 << 8 |
+				ptable->partition_length_7_0;
+		physical_address = ptable->start_physical_address_15_8 << 8 |
+				ptable->start_physical_address_7_0;
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Partition entry %d:\n",
+				__func__, ii);
+		for (offset = 0; offset < 8; offset++) {
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: 0x%02x\n",
+					__func__,
+					partition_table[index + offset]);
+		}
+		switch (ptable->partition_id) {
+		case CORE_CODE_PARTITION:
+			blkcount->ui_firmware = partition_length;
+			phyaddr->ui_firmware = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Core code block count: %d\n",
+					__func__, blkcount->ui_firmware);
+			blkcount->total_count += partition_length;
+			break;
+		case CORE_CONFIG_PARTITION:
+			blkcount->ui_config = partition_length;
+			phyaddr->ui_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Core config block count: %d\n",
+					__func__, blkcount->ui_config);
+			blkcount->total_count += partition_length;
+			break;
+		case BOOTLOADER_PARTITION:
+			blkcount->bl_image = partition_length;
+			phyaddr->bl_image = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Bootloader block count: %d\n",
+					__func__, blkcount->bl_image);
+			blkcount->total_count += partition_length;
+			break;
+		case UTILITY_PARAMETER_PARTITION:
+			blkcount->utility_param = partition_length;
+			phyaddr->utility_param = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Utility parameter block count: %d\n",
+					__func__, blkcount->utility_param);
+			blkcount->total_count += partition_length;
+			break;
+		case DISPLAY_CONFIG_PARTITION:
+			blkcount->dp_config = partition_length;
+			phyaddr->dp_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Display config block count: %d\n",
+					__func__, blkcount->dp_config);
+			blkcount->total_count += partition_length;
+			break;
+		case FLASH_CONFIG_PARTITION:
+			blkcount->fl_config = partition_length;
+			phyaddr->fl_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Flash config block count: %d\n",
+					__func__, blkcount->fl_config);
+			blkcount->total_count += partition_length;
+			break;
+		case GUEST_CODE_PARTITION:
+			blkcount->guest_code = partition_length;
+			phyaddr->guest_code = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Guest code block count: %d\n",
+					__func__, blkcount->guest_code);
+			blkcount->total_count += partition_length;
+			break;
+		case GUEST_SERIALIZATION_PARTITION:
+			blkcount->pm_config = partition_length;
+			phyaddr->pm_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Guest serialization block count: %d\n",
+					__func__, blkcount->pm_config);
+			blkcount->total_count += partition_length;
+			break;
+		case GLOBAL_PARAMETERS_PARTITION:
+			blkcount->bl_config = partition_length;
+			phyaddr->bl_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Global parameters block count: %d\n",
+					__func__, blkcount->bl_config);
+			blkcount->total_count += partition_length;
+			break;
+		case DEVICE_CONFIG_PARTITION:
+			blkcount->lockdown = partition_length;
+			phyaddr->lockdown = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Device config block count: %d\n",
+					__func__, blkcount->lockdown);
+			blkcount->total_count += partition_length;
+			break;
+		};
+	}
+
+	return;
+}
+
+static void fwu_parse_image_header_10_utility(const unsigned char *image)
+{
+	unsigned char ii;
+	unsigned char num_of_containers;
+	unsigned int addr;
+	unsigned int container_id;
+	unsigned int length;
+	const unsigned char *content;
+	struct container_descriptor *descriptor;
+
+	num_of_containers = fwu->img.utility.size / 4;
+
+	for (ii = 0; ii < num_of_containers; ii++) {
+		if (ii >= MAX_UTILITY_PARAMS)
+			continue;
+		addr = le_to_uint(fwu->img.utility.data + (ii * 4));
+		descriptor = (struct container_descriptor *)(image + addr);
+		container_id = descriptor->container_id[0] |
+				descriptor->container_id[1] << 8;
+		content = image + le_to_uint(descriptor->content_address);
+		length = le_to_uint(descriptor->content_length);
+		switch (container_id) {
+		case UTILITY_PARAMETER_CONTAINER:
+			fwu->img.utility_param[ii].data = content;
+			fwu->img.utility_param[ii].size = length;
+			fwu->img.utility_param_id[ii] = content[0];
+			break;
+		default:
+			break;
+		};
+	}
+
+	return;
+}
+
+static void fwu_parse_image_header_10_bootloader(const unsigned char *image)
+{
+	unsigned char ii;
+	unsigned char num_of_containers;
+	unsigned int addr;
+	unsigned int container_id;
+	unsigned int length;
+	const unsigned char *content;
+	struct container_descriptor *descriptor;
+
+	num_of_containers = (fwu->img.bootloader.size - 4) / 4;
+
+	for (ii = 1; ii <= num_of_containers; ii++) {
+		addr = le_to_uint(fwu->img.bootloader.data + (ii * 4));
+		descriptor = (struct container_descriptor *)(image + addr);
+		container_id = descriptor->container_id[0] |
+				descriptor->container_id[1] << 8;
+		content = image + le_to_uint(descriptor->content_address);
+		length = le_to_uint(descriptor->content_length);
+		switch (container_id) {
+		case BL_IMAGE_CONTAINER:
+			fwu->img.bl_image.data = content;
+			fwu->img.bl_image.size = length;
+			break;
+		case BL_CONFIG_CONTAINER:
+		case GLOBAL_PARAMETERS_CONTAINER:
+			fwu->img.bl_config.data = content;
+			fwu->img.bl_config.size = length;
+			break;
+		case BL_LOCKDOWN_INFO_CONTAINER:
+		case DEVICE_CONFIG_CONTAINER:
+			fwu->img.lockdown.data = content;
+			fwu->img.lockdown.size = length;
+			break;
+		default:
+			break;
+		};
+	}
+
+	return;
+}
+
+static void fwu_parse_image_header_10(void)
+{
+	unsigned char ii;
+	unsigned char num_of_containers;
+	unsigned int addr;
+	unsigned int offset;
+	unsigned int container_id;
+	unsigned int length;
+	const unsigned char *image;
+	const unsigned char *content;
+	struct container_descriptor *descriptor;
+	struct image_header_10 *header;
+
+	image = fwu->image;
+	header = (struct image_header_10 *)image;
+
+	fwu->img.checksum = le_to_uint(header->checksum);
+
+	/* address of top level container */
+	offset = le_to_uint(header->top_level_container_start_addr);
+	descriptor = (struct container_descriptor *)(image + offset);
+
+	/* address of top level container content */
+	offset = le_to_uint(descriptor->content_address);
+	num_of_containers = le_to_uint(descriptor->content_length) / 4;
+
+	for (ii = 0; ii < num_of_containers; ii++) {
+		addr = le_to_uint(image + offset);
+		offset += 4;
+		descriptor = (struct container_descriptor *)(image + addr);
+		container_id = descriptor->container_id[0] |
+				descriptor->container_id[1] << 8;
+		content = image + le_to_uint(descriptor->content_address);
+		length = le_to_uint(descriptor->content_length);
+		switch (container_id) {
+		case UI_CONTAINER:
+		case CORE_CODE_CONTAINER:
+			fwu->img.ui_firmware.data = content;
+			fwu->img.ui_firmware.size = length;
+			break;
+		case UI_CONFIG_CONTAINER:
+		case CORE_CONFIG_CONTAINER:
+			fwu->img.ui_config.data = content;
+			fwu->img.ui_config.size = length;
+			break;
+		case BL_CONTAINER:
+			fwu->img.bl_version = *content;
+			fwu->img.bootloader.data = content;
+			fwu->img.bootloader.size = length;
+			fwu_parse_image_header_10_bootloader(image);
+			break;
+		case UTILITY_CONTAINER:
+			fwu->img.utility.data = content;
+			fwu->img.utility.size = length;
+			fwu_parse_image_header_10_utility(image);
+			break;
+		case GUEST_CODE_CONTAINER:
+			fwu->img.contains_guest_code = true;
+			fwu->img.guest_code.data = content;
+			fwu->img.guest_code.size = length;
+			break;
+		case DISPLAY_CONFIG_CONTAINER:
+			fwu->img.contains_disp_config = true;
+			fwu->img.dp_config.data = content;
+			fwu->img.dp_config.size = length;
+			break;
+		case PERMANENT_CONFIG_CONTAINER:
+		case GUEST_SERIALIZATION_CONTAINER:
+			fwu->img.contains_perm_config = true;
+			fwu->img.pm_config.data = content;
+			fwu->img.pm_config.size = length;
+			break;
+		case FLASH_CONFIG_CONTAINER:
+			fwu->img.contains_flash_config = true;
+			fwu->img.fl_config.data = content;
+			fwu->img.fl_config.size = length;
+			break;
+		case GENERAL_INFORMATION_CONTAINER:
+			fwu->img.contains_firmware_id = true;
+			fwu->img.firmware_id = le_to_uint(content + 4);
+			break;
+		default:
+			break;
+		}
+	}
+
+	return;
+}
+
+static void fwu_parse_image_header_05_06(void)
+{
+	int retval;
+	const unsigned char *image;
+	struct image_header_05_06 *header;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	image = fwu->image;
+	header = (struct image_header_05_06 *)image;
+
+	fwu->img.checksum = le_to_uint(header->checksum);
+
+	fwu->img.bl_version = header->header_version;
+
+	fwu->img.contains_bootloader = header->options_bootloader;
+	if (fwu->img.contains_bootloader)
+		fwu->img.bootloader_size = le_to_uint(header->bootloader_size);
+
+	fwu->img.ui_firmware.size = le_to_uint(header->firmware_size);
+	if (fwu->img.ui_firmware.size) {
+		fwu->img.ui_firmware.data = image + IMAGE_AREA_OFFSET;
+		if (fwu->img.contains_bootloader)
+			fwu->img.ui_firmware.data += fwu->img.bootloader_size;
+	}
+
+	if ((fwu->img.bl_version == BL_V6) && header->options_tddi)
+		fwu->img.ui_firmware.data = image + IMAGE_AREA_OFFSET;
+
+	fwu->img.ui_config.size = le_to_uint(header->config_size);
+	if (fwu->img.ui_config.size) {
+		fwu->img.ui_config.data = fwu->img.ui_firmware.data +
+				fwu->img.ui_firmware.size;
+	}
+
+	if (fwu->img.contains_bootloader || header->options_tddi)
+		fwu->img.contains_disp_config = true;
+	else
+		fwu->img.contains_disp_config = false;
+
+	if (fwu->img.contains_disp_config) {
+		fwu->img.disp_config_offset = le_to_uint(header->dsp_cfg_addr);
+		fwu->img.dp_config.size = le_to_uint(header->dsp_cfg_size);
+		fwu->img.dp_config.data = image + fwu->img.disp_config_offset;
+	} else {
+		retval = secure_memcpy(fwu->img.cstmr_product_id,
+				sizeof(fwu->img.cstmr_product_id),
+				header->cstmr_product_id,
+				sizeof(header->cstmr_product_id),
+				PRODUCT_ID_SIZE);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy custom product ID string\n",
+					__func__);
+		}
+		fwu->img.cstmr_product_id[PRODUCT_ID_SIZE] = 0;
+	}
+
+	fwu->img.contains_firmware_id = header->options_firmware_id;
+	if (fwu->img.contains_firmware_id)
+		fwu->img.firmware_id = le_to_uint(header->firmware_id);
+
+	retval = secure_memcpy(fwu->img.product_id,
+			sizeof(fwu->img.product_id),
+			header->product_id,
+			sizeof(header->product_id),
+			PRODUCT_ID_SIZE);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy product ID string\n",
+				__func__);
+	}
+	fwu->img.product_id[PRODUCT_ID_SIZE] = 0;
+
+	fwu->img.lockdown.size = LOCKDOWN_SIZE;
+	fwu->img.lockdown.data = image + IMAGE_AREA_OFFSET - LOCKDOWN_SIZE;
+
+	return;
+}
+
+static int fwu_parse_image_info(void)
+{
+	struct image_header_10 *header;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	header = (struct image_header_10 *)fwu->image;
+
+	memset(&fwu->img, 0x00, sizeof(fwu->img));
+
+	switch (header->major_header_version) {
+	case IMAGE_HEADER_VERSION_10:
+		fwu_parse_image_header_10();
+		break;
+	case IMAGE_HEADER_VERSION_05:
+	case IMAGE_HEADER_VERSION_06:
+		fwu_parse_image_header_05_06();
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Unsupported image file format (0x%02x)\n",
+				__func__, header->major_header_version);
+		return -EINVAL;
+	}
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8) {
+		if (!fwu->img.contains_flash_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: No flash config found in firmware image\n",
+					__func__);
+			return -EINVAL;
+		}
+
+		fwu_parse_partition_table(fwu->img.fl_config.data,
+				&fwu->img.blkcount, &fwu->img.phyaddr);
+
+		if (fwu->img.blkcount.utility_param)
+			fwu->img.contains_utility_param = true;
+
+		fwu_compare_partition_tables();
+	} else {
+		fwu->new_partition_table = false;
+		fwu->incompatible_partition_tables = false;
+	}
+
+	return 0;
+}
+
+static int fwu_read_flash_status(void)
+{
+	int retval;
+	unsigned char status;
+	unsigned char command;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fwu->f34_fd.data_base_addr + fwu->off.flash_status,
+			&status,
+			sizeof(status));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash status\n",
+				__func__);
+		return retval;
+	}
+
+	fwu->in_bl_mode = status >> 7;
+
+	if (fwu->bl_version == BL_V5)
+		fwu->flash_status = (status >> 4) & MASK_3BIT;
+	else if (fwu->bl_version == BL_V6)
+		fwu->flash_status = status & MASK_3BIT;
+	else if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		fwu->flash_status = status & MASK_5BIT;
+
+	if (fwu->write_bootloader)
+		fwu->flash_status = 0x00;
+
+	if (fwu->flash_status != 0x00) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Flash status = %d, command = 0x%02x\n",
+				__func__, fwu->flash_status, fwu->command);
+	}
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8) {
+		if (fwu->flash_status == 0x08)
+			fwu->flash_status = 0x00;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fwu->f34_fd.data_base_addr + fwu->off.flash_cmd,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash command\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->bl_version == BL_V5)
+		fwu->command = command & MASK_4BIT;
+	else if (fwu->bl_version == BL_V6)
+		fwu->command = command & MASK_6BIT;
+	else if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		fwu->command = command;
+
+	if (fwu->write_bootloader)
+		fwu->command = 0x00;
+
+	return 0;
+}
+
+static int fwu_wait_for_idle(int timeout_ms, bool poll)
+{
+	int count = 0;
+	int timeout_count = ((timeout_ms * 1000) / MAX_SLEEP_TIME_US) + 1;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	do {
+		usleep_range(MIN_SLEEP_TIME_US, MAX_SLEEP_TIME_US);
+
+		count++;
+		if (poll || (count == timeout_count))
+			fwu_read_flash_status();
+
+		if ((fwu->command == CMD_IDLE) && (fwu->flash_status == 0x00))
+			return 0;
+	} while (count < timeout_count);
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Timed out waiting for idle status\n",
+			__func__);
+
+	return -ETIMEDOUT;
+}
+
+static int fwu_write_f34_v7_command_single_transaction(unsigned char cmd)
+{
+	int retval;
+	unsigned char data_base;
+	struct f34_v7_data_1_5 data_1_5;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	memset(data_1_5.data, 0x00, sizeof(data_1_5.data));
+
+	switch (cmd) {
+	case CMD_ERASE_ALL:
+		data_1_5.partition_id = CORE_CODE_PARTITION;
+		data_1_5.command = CMD_V7_ERASE_AP;
+		break;
+	case CMD_ERASE_UI_FIRMWARE:
+		data_1_5.partition_id = CORE_CODE_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_BL_CONFIG:
+		data_1_5.partition_id = GLOBAL_PARAMETERS_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_UI_CONFIG:
+		data_1_5.partition_id = CORE_CONFIG_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_DISP_CONFIG:
+		data_1_5.partition_id = DISPLAY_CONFIG_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_FLASH_CONFIG:
+		data_1_5.partition_id = FLASH_CONFIG_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_GUEST_CODE:
+		data_1_5.partition_id = GUEST_CODE_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_BOOTLOADER:
+		data_1_5.partition_id = BOOTLOADER_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_UTILITY_PARAMETER:
+		data_1_5.partition_id = UTILITY_PARAMETER_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ENABLE_FLASH_PROG:
+		data_1_5.partition_id = BOOTLOADER_PARTITION;
+		data_1_5.command = CMD_V7_ENTER_BL;
+		break;
+	};
+
+	data_1_5.payload_0 = fwu->bootloader_id[0];
+	data_1_5.payload_1 = fwu->bootloader_id[1];
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.partition_id,
+			data_1_5.data,
+			sizeof(data_1_5.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write single transaction command\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_v7_command(unsigned char cmd)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char command;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	switch (cmd) {
+	case CMD_WRITE_FW:
+	case CMD_WRITE_CONFIG:
+	case CMD_WRITE_LOCKDOWN:
+	case CMD_WRITE_GUEST_CODE:
+	case CMD_WRITE_BOOTLOADER:
+	case CMD_WRITE_UTILITY_PARAM:
+		command = CMD_V7_WRITE;
+		break;
+	case CMD_READ_CONFIG:
+		command = CMD_V7_READ;
+		break;
+	case CMD_ERASE_ALL:
+		command = CMD_V7_ERASE_AP;
+		break;
+	case CMD_ERASE_UI_FIRMWARE:
+	case CMD_ERASE_BL_CONFIG:
+	case CMD_ERASE_UI_CONFIG:
+	case CMD_ERASE_DISP_CONFIG:
+	case CMD_ERASE_FLASH_CONFIG:
+	case CMD_ERASE_GUEST_CODE:
+	case CMD_ERASE_BOOTLOADER:
+	case CMD_ERASE_UTILITY_PARAMETER:
+		command = CMD_V7_ERASE;
+		break;
+	case CMD_ENABLE_FLASH_PROG:
+		command = CMD_V7_ENTER_BL;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid command 0x%02x\n",
+				__func__, cmd);
+		return -EINVAL;
+	};
+
+	fwu->command = command;
+
+	switch (cmd) {
+	case CMD_ERASE_ALL:
+	case CMD_ERASE_UI_FIRMWARE:
+	case CMD_ERASE_BL_CONFIG:
+	case CMD_ERASE_UI_CONFIG:
+	case CMD_ERASE_DISP_CONFIG:
+	case CMD_ERASE_FLASH_CONFIG:
+	case CMD_ERASE_GUEST_CODE:
+	case CMD_ERASE_BOOTLOADER:
+	case CMD_ERASE_UTILITY_PARAMETER:
+	case CMD_ENABLE_FLASH_PROG:
+		retval = fwu_write_f34_v7_command_single_transaction(cmd);
+		if (retval < 0)
+			return retval;
+		else
+			return 0;
+	default:
+		break;
+	};
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.flash_cmd,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write flash command\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_v5v6_command(unsigned char cmd)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char command;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	switch (cmd) {
+	case CMD_IDLE:
+		command = CMD_V5V6_IDLE;
+		break;
+	case CMD_WRITE_FW:
+		command = CMD_V5V6_WRITE_FW;
+		break;
+	case CMD_WRITE_CONFIG:
+		command = CMD_V5V6_WRITE_CONFIG;
+		break;
+	case CMD_WRITE_LOCKDOWN:
+		command = CMD_V5V6_WRITE_LOCKDOWN;
+		break;
+	case CMD_WRITE_GUEST_CODE:
+		command = CMD_V5V6_WRITE_GUEST_CODE;
+		break;
+	case CMD_READ_CONFIG:
+		command = CMD_V5V6_READ_CONFIG;
+		break;
+	case CMD_ERASE_ALL:
+		command = CMD_V5V6_ERASE_ALL;
+		break;
+	case CMD_ERASE_UI_CONFIG:
+		command = CMD_V5V6_ERASE_UI_CONFIG;
+		break;
+	case CMD_ERASE_DISP_CONFIG:
+		command = CMD_V5V6_ERASE_DISP_CONFIG;
+		break;
+	case CMD_ERASE_GUEST_CODE:
+		command = CMD_V5V6_ERASE_GUEST_CODE;
+		break;
+	case CMD_ENABLE_FLASH_PROG:
+		command = CMD_V5V6_ENABLE_FLASH_PROG;
+		break;
+#ifdef SYNA_TDDI
+	case CMD_ERASE_CHIP:
+		command = CMD_V5V6_ERASE_CHIP;
+		break;
+	case CMD_ERASE_FORCE_CONFIG:
+		command = CMD_V5V6_ERASE_FORCE_CONFIG;
+		break;
+	case CMD_READ_FORCE_CONFIG:
+		command = CMD_V5V6_READ_FORCE_CONFIG;
+		break;
+	case CMD_WRITE_FORCE_CONFIG:
+		command = CMD_V5V6_WRITE_CONFIG;
+		break;
+	case CMD_ERASE_LOCKDOWN_DATA:
+		command = CMD_V5V6_ERASE_LOCKDOWN_DATA;
+		break;
+	case CMD_READ_LOCKDOWN_DATA:
+		command = CMD_V5V6_READ_LOCKDOWN_DATA;
+		break;
+	case CMD_WRITE_LOCKDOWN_DATA:
+		command = CMD_V5V6_WRITE_LOCKDOWN_DATA;
+		break;
+	case CMD_ERASE_LCM_DATA:
+		command = CMD_V5V6_ERASE_LCM_DATA;
+		break;
+	case CMD_ERASE_OEM_DATA:
+		command = CMD_V5V6_ERASE_OEM_DATA;
+		break;
+#endif
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid command 0x%02x\n",
+				__func__, cmd);
+		return -EINVAL;
+	}
+
+	switch (cmd) {
+	case CMD_ERASE_ALL:
+	case CMD_ERASE_UI_CONFIG:
+	case CMD_ERASE_DISP_CONFIG:
+	case CMD_ERASE_GUEST_CODE:
+#ifdef SYNA_TDDI
+	case CMD_ERASE_CHIP:
+	case CMD_ERASE_FORCE_CONFIG:
+	case CMD_ERASE_LOCKDOWN_DATA:
+	case CMD_ERASE_LCM_DATA:
+	case CMD_ERASE_OEM_DATA:
+#endif
+	case CMD_ENABLE_FLASH_PROG:
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				data_base + fwu->off.payload,
+				fwu->bootloader_id,
+				sizeof(fwu->bootloader_id));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write bootloader ID\n",
+					__func__);
+			return retval;
+		}
+		break;
+	default:
+		break;
+	};
+
+	fwu->command = command;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.flash_cmd,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write command 0x%02x\n",
+				__func__, command);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_command(unsigned char cmd)
+{
+	int retval;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		retval = fwu_write_f34_v7_command(cmd);
+	else
+		retval = fwu_write_f34_v5v6_command(cmd);
+
+	return retval;
+}
+
+static int fwu_write_f34_v7_partition_id(unsigned char cmd)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char partition;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	switch (cmd) {
+	case CMD_WRITE_FW:
+		partition = CORE_CODE_PARTITION;
+		break;
+	case CMD_WRITE_CONFIG:
+	case CMD_READ_CONFIG:
+		if (fwu->config_area == UI_CONFIG_AREA)
+			partition = CORE_CONFIG_PARTITION;
+		else if (fwu->config_area == DP_CONFIG_AREA)
+			partition = DISPLAY_CONFIG_PARTITION;
+		else if (fwu->config_area == PM_CONFIG_AREA)
+			partition = GUEST_SERIALIZATION_PARTITION;
+		else if (fwu->config_area == BL_CONFIG_AREA)
+			partition = GLOBAL_PARAMETERS_PARTITION;
+		else if (fwu->config_area == FLASH_CONFIG_AREA)
+			partition = FLASH_CONFIG_PARTITION;
+		else if (fwu->config_area == UPP_AREA)
+			partition = UTILITY_PARAMETER_PARTITION;
+		break;
+	case CMD_WRITE_LOCKDOWN:
+		partition = DEVICE_CONFIG_PARTITION;
+		break;
+	case CMD_WRITE_GUEST_CODE:
+		partition = GUEST_CODE_PARTITION;
+		break;
+	case CMD_WRITE_BOOTLOADER:
+		partition = BOOTLOADER_PARTITION;
+		break;
+	case CMD_WRITE_UTILITY_PARAM:
+		partition = UTILITY_PARAMETER_PARTITION;
+		break;
+	case CMD_ERASE_ALL:
+		partition = CORE_CODE_PARTITION;
+		break;
+	case CMD_ERASE_BL_CONFIG:
+		partition = GLOBAL_PARAMETERS_PARTITION;
+		break;
+	case CMD_ERASE_UI_CONFIG:
+		partition = CORE_CONFIG_PARTITION;
+		break;
+	case CMD_ERASE_DISP_CONFIG:
+		partition = DISPLAY_CONFIG_PARTITION;
+		break;
+	case CMD_ERASE_FLASH_CONFIG:
+		partition = FLASH_CONFIG_PARTITION;
+		break;
+	case CMD_ERASE_GUEST_CODE:
+		partition = GUEST_CODE_PARTITION;
+		break;
+	case CMD_ERASE_BOOTLOADER:
+		partition = BOOTLOADER_PARTITION;
+		break;
+	case CMD_ENABLE_FLASH_PROG:
+		partition = BOOTLOADER_PARTITION;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid command 0x%02x\n",
+				__func__, cmd);
+		return -EINVAL;
+	};
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.partition_id,
+			&partition,
+			sizeof(partition));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write partition ID\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_partition_id(unsigned char cmd)
+{
+	int retval;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		retval = fwu_write_f34_v7_partition_id(cmd);
+	else
+		retval = 0;
+
+	return retval;
+}
+
+static int fwu_read_f34_v7_partition_table(unsigned char *partition_table)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char length[2];
+	unsigned short block_number = 0;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+
+	retval = fwu_write_f34_partition_id(CMD_READ_CONFIG);
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.block_number,
+			(unsigned char *)&block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	length[0] = (unsigned char)(fwu->flash_config_length & MASK_8BIT);
+	length[1] = (unsigned char)(fwu->flash_config_length >> 8);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.transfer_length,
+			length,
+			sizeof(length));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write transfer length\n",
+				__func__);
+		return retval;
+	}
+
+	retval = fwu_write_f34_command(CMD_READ_CONFIG);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write command\n",
+				__func__);
+		return retval;
+	}
+
+	msleep(READ_CONFIG_WAIT_MS);
+
+	retval = fwu_wait_for_idle(WRITE_WAIT_MS, true);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to wait for idle status\n",
+				__func__);
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_base + fwu->off.payload,
+			partition_table,
+			fwu->partition_table_bytes);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read block data\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_read_f34_v7_queries(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char query_base;
+	unsigned char index;
+	unsigned char offset;
+	unsigned char *ptable;
+	struct f34_v7_query_0 query_0;
+	struct f34_v7_query_1_7 query_1_7;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	query_base = fwu->f34_fd.query_base_addr;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			query_base,
+			query_0.data,
+			sizeof(query_0.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read query 0\n",
+				__func__);
+		return retval;
+	}
+
+	offset = query_0.subpacket_1_size + 1;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			query_base + offset,
+			query_1_7.data,
+			sizeof(query_1_7.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read queries 1 to 7\n",
+				__func__);
+		return retval;
+	}
+
+	fwu->bootloader_id[0] = query_1_7.bl_minor_revision;
+	fwu->bootloader_id[1] = query_1_7.bl_major_revision;
+
+	if (fwu->bootloader_id[1] == BL_V8)
+		fwu->bl_version = BL_V8;
+
+	fwu->block_size = query_1_7.block_size_15_8 << 8 |
+			query_1_7.block_size_7_0;
+
+	fwu->flash_config_length = query_1_7.flash_config_length_15_8 << 8 |
+			query_1_7.flash_config_length_7_0;
+
+	fwu->payload_length = query_1_7.payload_length_15_8 << 8 |
+			query_1_7.payload_length_7_0;
+
+	fwu->off.flash_status = V7_FLASH_STATUS_OFFSET;
+	fwu->off.partition_id = V7_PARTITION_ID_OFFSET;
+	fwu->off.block_number = V7_BLOCK_NUMBER_OFFSET;
+	fwu->off.transfer_length = V7_TRANSFER_LENGTH_OFFSET;
+	fwu->off.flash_cmd = V7_COMMAND_OFFSET;
+	fwu->off.payload = V7_PAYLOAD_OFFSET;
+
+	index = sizeof(query_1_7.data) - V7_PARTITION_SUPPORT_BYTES;
+
+	fwu->partitions = 0;
+	for (offset = 0; offset < V7_PARTITION_SUPPORT_BYTES; offset++) {
+		for (ii = 0; ii < 8; ii++) {
+			if (query_1_7.data[index + offset] & (1 << ii))
+				fwu->partitions++;
+		}
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Supported partitions: 0x%02x\n",
+				__func__, query_1_7.data[index + offset]);
+	}
+
+	fwu->partition_table_bytes = fwu->partitions * 8 + 2;
+
+	ptable = kzalloc(fwu->partition_table_bytes, GFP_KERNEL);
+	if (!ptable) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for partition table\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	retval = fwu_read_f34_v7_partition_table(ptable);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read partition table\n",
+				__func__);
+		kfree(ptable);
+		return retval;
+	}
+
+	fwu_parse_partition_table(ptable, &fwu->blkcount, &fwu->phyaddr);
+
+	if (fwu->blkcount.dp_config)
+		fwu->flash_properties.has_disp_config = 1;
+	else
+		fwu->flash_properties.has_disp_config = 0;
+
+	if (fwu->blkcount.pm_config)
+		fwu->flash_properties.has_pm_config = 1;
+	else
+		fwu->flash_properties.has_pm_config = 0;
+
+	if (fwu->blkcount.bl_config)
+		fwu->flash_properties.has_bl_config = 1;
+	else
+		fwu->flash_properties.has_bl_config = 0;
+
+	if (fwu->blkcount.guest_code)
+		fwu->has_guest_code = 1;
+	else
+		fwu->has_guest_code = 0;
+
+	if (fwu->blkcount.utility_param)
+		fwu->has_utility_param = 1;
+	else
+		fwu->has_utility_param = 0;
+
+	kfree(ptable);
+
+	return 0;
+}
+
+static int fwu_read_f34_v5v6_queries(void)
+{
+	int retval;
+	unsigned char count;
+	unsigned char base;
+	unsigned char offset;
+	unsigned char buf[10];
+	struct f34_v5v6_flash_properties_2 properties_2;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f34_fd.query_base_addr;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + V5V6_BOOTLOADER_ID_OFFSET,
+			fwu->bootloader_id,
+			sizeof(fwu->bootloader_id));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read bootloader ID\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->bl_version == BL_V5) {
+		fwu->off.properties = V5_PROPERTIES_OFFSET;
+		fwu->off.block_size = V5_BLOCK_SIZE_OFFSET;
+		fwu->off.block_count = V5_BLOCK_COUNT_OFFSET;
+		fwu->off.block_number = V5_BLOCK_NUMBER_OFFSET;
+		fwu->off.payload = V5_BLOCK_DATA_OFFSET;
+	} else if (fwu->bl_version == BL_V6) {
+		fwu->off.properties = V6_PROPERTIES_OFFSET;
+		fwu->off.properties_2 = V6_PROPERTIES_2_OFFSET;
+		fwu->off.block_size = V6_BLOCK_SIZE_OFFSET;
+		fwu->off.block_count = V6_BLOCK_COUNT_OFFSET;
+		fwu->off.gc_block_count = V6_GUEST_CODE_BLOCK_COUNT_OFFSET;
+		fwu->off.block_number = V6_BLOCK_NUMBER_OFFSET;
+		fwu->off.payload = V6_BLOCK_DATA_OFFSET;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + fwu->off.block_size,
+			buf,
+			2);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read block size info\n",
+				__func__);
+		return retval;
+	}
+
+	batohs(&fwu->block_size, &(buf[0]));
+
+	if (fwu->bl_version == BL_V5) {
+		fwu->off.flash_cmd = fwu->off.payload + fwu->block_size;
+		fwu->off.flash_status = fwu->off.flash_cmd;
+	} else if (fwu->bl_version == BL_V6) {
+		fwu->off.flash_cmd = V6_FLASH_COMMAND_OFFSET;
+		fwu->off.flash_status = V6_FLASH_STATUS_OFFSET;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + fwu->off.properties,
+			fwu->flash_properties.data,
+			sizeof(fwu->flash_properties.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash properties\n",
+				__func__);
+		return retval;
+	}
+
+	count = 4;
+
+	if (fwu->flash_properties.has_pm_config)
+		count += 2;
+
+	if (fwu->flash_properties.has_bl_config)
+		count += 2;
+
+	if (fwu->flash_properties.has_disp_config)
+		count += 2;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + fwu->off.block_count,
+			buf,
+			count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read block count info\n",
+				__func__);
+		return retval;
+	}
+
+	batohs(&fwu->blkcount.ui_firmware, &(buf[0]));
+	batohs(&fwu->blkcount.ui_config, &(buf[2]));
+
+	count = 4;
+
+	if (fwu->flash_properties.has_pm_config) {
+		batohs(&fwu->blkcount.pm_config, &(buf[count]));
+		count += 2;
+	}
+
+	if (fwu->flash_properties.has_bl_config) {
+		batohs(&fwu->blkcount.bl_config, &(buf[count]));
+		count += 2;
+	}
+
+	if (fwu->flash_properties.has_disp_config)
+		batohs(&fwu->blkcount.dp_config, &(buf[count]));
+
+	fwu->has_guest_code = false;
+#ifdef SYNA_TDDI
+	fwu->has_force_config = false;
+	fwu->has_lockdown_data = false;
+	fwu->has_lcm_data = false;
+	fwu->has_oem_data = false;
+#endif
+
+	if (fwu->flash_properties.has_query4) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				base + fwu->off.properties_2,
+				properties_2.data,
+				sizeof(properties_2.data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read flash properties 2\n",
+					__func__);
+			return retval;
+		}
+		offset = fwu->off.properties_2 + 1;
+		count = 0;
+		if (properties_2.has_guest_code) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					base + offset + count,
+					buf,
+					2);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to read guest code block count\n",
+						__func__);
+				return retval;
+			}
+
+			batohs(&fwu->blkcount.guest_code, &(buf[0]));
+			count++;
+			fwu->has_guest_code = true;
+		}
+#ifdef SYNA_TDDI
+		if (properties_2.has_force_config) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					base + offset + count,
+					buf,
+					2);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read tddi force block count\n",
+					__func__);
+				return retval;
+			}
+			batohs(&fwu->blkcount.tddi_force_config, &(buf[0]));
+			count++;
+			fwu->has_force_config = true;
+		}
+		if (properties_2.has_lockdown_data) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					base + offset + count,
+					buf,
+					2);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read tddi lockdown block count\n",
+					__func__);
+				return retval;
+			}
+			batohs(&fwu->blkcount.tddi_lockdown_data, &(buf[0]));
+			count++;
+			fwu->has_lockdown_data = true;
+		}
+		if (properties_2.has_lcm_data) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					base + offset + count,
+					buf,
+					2);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read tddi lcm block count\n",
+					__func__);
+				return retval;
+			}
+			batohs(&fwu->blkcount.tddi_lcm_data, &(buf[0]));
+			count++;
+			fwu->has_lcm_data = true;
+		}
+		if (properties_2.has_oem_data) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					base + offset + count,
+					buf,
+					2);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read tddi oem block count\n",
+					__func__);
+				return retval;
+			}
+			batohs(&fwu->blkcount.tddi_oem_data, &(buf[0]));
+			fwu->has_oem_data = true;
+		}
+#endif
+	}
+
+	fwu->has_utility_param = false;
+
+	return 0;
+}
+
+static int fwu_read_f34_queries(void)
+{
+	int retval;
+
+	memset(&fwu->blkcount, 0x00, sizeof(fwu->blkcount));
+	memset(&fwu->phyaddr, 0x00, sizeof(fwu->phyaddr));
+
+	if (fwu->bl_version == BL_V7)
+		retval = fwu_read_f34_v7_queries();
+	else
+		retval = fwu_read_f34_v5v6_queries();
+
+	return retval;
+}
+
+static int fwu_write_f34_v7_blocks(unsigned char *block_ptr,
+		unsigned short block_cnt, unsigned char command)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char length[2];
+	unsigned short transfer;
+	unsigned short remaining = block_cnt;
+	unsigned short block_number = 0;
+	unsigned short left_bytes;
+	unsigned short write_size;
+	unsigned short max_write_size;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	retval = fwu_write_f34_partition_id(command);
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.block_number,
+			(unsigned char *)&block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	do {
+		if (remaining / fwu->payload_length)
+			transfer = fwu->payload_length;
+		else
+			transfer = remaining;
+
+		length[0] = (unsigned char)(transfer & MASK_8BIT);
+		length[1] = (unsigned char)(transfer >> 8);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				data_base + fwu->off.transfer_length,
+				length,
+				sizeof(length));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write transfer length (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = fwu_write_f34_command(command);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write command (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+#ifdef MAX_WRITE_SIZE
+		max_write_size = MAX_WRITE_SIZE;
+		if (max_write_size >= transfer * fwu->block_size)
+			max_write_size = transfer * fwu->block_size;
+		else if (max_write_size > fwu->block_size)
+			max_write_size -= max_write_size % fwu->block_size;
+		else
+			max_write_size = fwu->block_size;
+#else
+		max_write_size = transfer * fwu->block_size;
+#endif
+		left_bytes = transfer * fwu->block_size;
+
+		do {
+			if (left_bytes / max_write_size)
+				write_size = max_write_size;
+			else
+				write_size = left_bytes;
+
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					data_base + fwu->off.payload,
+					block_ptr,
+					write_size);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to write block data (remaining = %d)\n",
+						__func__, remaining);
+				return retval;
+			}
+
+			block_ptr += write_size;
+			left_bytes -= write_size;
+		} while (left_bytes);
+
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to wait for idle status (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		remaining -= transfer;
+	} while (remaining);
+
+	return 0;
+}
+
+static int fwu_write_f34_v5v6_blocks(unsigned char *block_ptr,
+		unsigned short block_cnt, unsigned char command)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char block_number[] = {0, 0};
+	unsigned short blk;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	block_number[1] |= (fwu->config_area << 5);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.block_number,
+			block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	for (blk = 0; blk < block_cnt; blk++) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				data_base + fwu->off.payload,
+				block_ptr,
+				fwu->block_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write block data (block %d)\n",
+					__func__, blk);
+			return retval;
+		}
+
+		retval = fwu_write_f34_command(command);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write command for block %d\n",
+					__func__, blk);
+			return retval;
+		}
+
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to wait for idle status (block %d)\n",
+					__func__, blk);
+			return retval;
+		}
+
+		block_ptr += fwu->block_size;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_blocks(unsigned char *block_ptr,
+		unsigned short block_cnt, unsigned char cmd)
+{
+	int retval;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		retval = fwu_write_f34_v7_blocks(block_ptr, block_cnt, cmd);
+	else
+		retval = fwu_write_f34_v5v6_blocks(block_ptr, block_cnt, cmd);
+
+	return retval;
+}
+
+static int fwu_read_f34_v7_blocks(unsigned short block_cnt,
+		unsigned char command)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char length[2];
+	unsigned short transfer;
+	unsigned short remaining = block_cnt;
+	unsigned short block_number = 0;
+	unsigned short index = 0;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	retval = fwu_write_f34_partition_id(command);
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.block_number,
+			(unsigned char *)&block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	do {
+		if (remaining / fwu->payload_length)
+			transfer = fwu->payload_length;
+		else
+			transfer = remaining;
+
+		length[0] = (unsigned char)(transfer & MASK_8BIT);
+		length[1] = (unsigned char)(transfer >> 8);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				data_base + fwu->off.transfer_length,
+				length,
+				sizeof(length));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write transfer length (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = fwu_write_f34_command(command);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write command (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to wait for idle status (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_base + fwu->off.payload,
+				&fwu->read_config_buf[index],
+				transfer * fwu->block_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read block data (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		index += (transfer * fwu->block_size);
+		remaining -= transfer;
+	} while (remaining);
+
+	return 0;
+}
+
+static int fwu_read_f34_v5v6_blocks(unsigned short block_cnt,
+		unsigned char command)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char block_number[] = {0, 0};
+	unsigned short blk;
+	unsigned short index = 0;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	block_number[1] |= (fwu->config_area << 5);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.block_number,
+			block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	for (blk = 0; blk < block_cnt; blk++) {
+		retval = fwu_write_f34_command(command);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write read config command\n",
+					__func__);
+			return retval;
+		}
+
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to wait for idle status\n",
+					__func__);
+			return retval;
+		}
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_base + fwu->off.payload,
+				&fwu->read_config_buf[index],
+				fwu->block_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read block data (block %d)\n",
+					__func__, blk);
+			return retval;
+		}
+
+		index += fwu->block_size;
+	}
+
+	return 0;
+}
+
+static int fwu_read_f34_blocks(unsigned short block_cnt, unsigned char cmd)
+{
+	int retval;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		retval = fwu_read_f34_v7_blocks(block_cnt, cmd);
+	else
+		retval = fwu_read_f34_v5v6_blocks(block_cnt, cmd);
+
+	return retval;
+}
+
+static int fwu_get_image_firmware_id(unsigned int *fw_id)
+{
+	int retval;
+	unsigned char index = 0;
+	char *strptr;
+	char *firmware_id;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->img.contains_firmware_id) {
+		*fw_id = fwu->img.firmware_id;
+	} else {
+		strptr = strnstr(fwu->image_name, "PR", MAX_IMAGE_NAME_LEN);
+		if (!strptr) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: No valid PR number (PRxxxxxxx) found in image file name (%s)\n",
+					__func__, fwu->image_name);
+			return -EINVAL;
+		}
+
+		strptr += 2;
+		firmware_id = kzalloc(MAX_FIRMWARE_ID_LEN, GFP_KERNEL);
+		if (!firmware_id) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for firmware_id\n",
+					__func__);
+			return -ENOMEM;
+		}
+		while (strptr[index] >= '0' && strptr[index] <= '9') {
+			firmware_id[index] = strptr[index];
+			index++;
+			if (index == MAX_FIRMWARE_ID_LEN - 1)
+				break;
+		}
+
+		retval = sstrtoul(firmware_id, 10, (unsigned long *)fw_id);
+		kfree(firmware_id);
+		if (retval) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to obtain image firmware ID\n",
+					__func__);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int fwu_get_device_config_id(void)
+{
+	int retval;
+	unsigned char config_id_size;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		config_id_size = V7_CONFIG_ID_SIZE;
+	else
+		config_id_size = V5V6_CONFIG_ID_SIZE;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+				fwu->f34_fd.ctrl_base_addr,
+				fwu->config_id,
+				config_id_size);
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static enum flash_area fwu_go_nogo(void)
+{
+	int retval;
+	enum flash_area flash_area = NONE;
+	unsigned char ii;
+	unsigned char config_id_size;
+	unsigned int device_fw_id;
+	unsigned int image_fw_id;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->force_update) {
+		flash_area = UI_FIRMWARE;
+		goto exit;
+	}
+
+	/* Update both UI and config if device is in bootloader mode */
+	if (fwu->bl_mode_device) {
+		flash_area = UI_FIRMWARE;
+		goto exit;
+	}
+
+	/* Get device firmware ID */
+	device_fw_id = rmi4_data->firmware_id;
+	dev_info(rmi4_data->pdev->dev.parent,
+			"%s: Device firmware ID = %d\n",
+			__func__, device_fw_id);
+
+	/* Get image firmware ID */
+	retval = fwu_get_image_firmware_id(&image_fw_id);
+	if (retval < 0) {
+		flash_area = NONE;
+		goto exit;
+	}
+	dev_info(rmi4_data->pdev->dev.parent,
+			"%s: Image firmware ID = %d\n",
+			__func__, image_fw_id);
+
+	if (image_fw_id > device_fw_id) {
+		flash_area = UI_FIRMWARE;
+		goto exit;
+	} else if (image_fw_id < device_fw_id) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Image firmware ID older than device firmware ID\n",
+				__func__);
+		flash_area = NONE;
+		goto exit;
+	}
+
+	/* Get device config ID */
+	retval = fwu_get_device_config_id();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read device config ID\n",
+				__func__);
+		flash_area = NONE;
+		goto exit;
+	}
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		config_id_size = V7_CONFIG_ID_SIZE;
+	else
+		config_id_size = V5V6_CONFIG_ID_SIZE;
+
+	for (ii = 0; ii < config_id_size; ii++) {
+		if (fwu->img.ui_config.data[ii] > fwu->config_id[ii]) {
+			flash_area = UI_CONFIG;
+			goto exit;
+		} else if (fwu->img.ui_config.data[ii] < fwu->config_id[ii]) {
+			flash_area = NONE;
+			goto exit;
+		}
+	}
+
+	flash_area = NONE;
+
+exit:
+	if (flash_area == NONE) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: No need to do reflash\n",
+				__func__);
+	} else {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Updating %s\n",
+				__func__,
+				flash_area == UI_FIRMWARE ?
+				"UI firmware and config" :
+				"UI config only");
+	}
+
+	return flash_area;
+}
+
+static int fwu_scan_pdt(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char intr_count = 0;
+	unsigned char intr_off;
+	unsigned char intr_src;
+	unsigned short addr;
+	bool f01found = false;
+	bool f34found = false;
+	bool f35found = false;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	fwu->in_ub_mode = false;
+
+	for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				addr,
+				(unsigned char *)&rmi_fd,
+				sizeof(rmi_fd));
+		if (retval < 0)
+			return retval;
+
+		if (rmi_fd.fn_number) {
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Found F%02x\n",
+					__func__, rmi_fd.fn_number);
+			switch (rmi_fd.fn_number) {
+			case SYNAPTICS_RMI4_F01:
+				f01found = true;
+
+				rmi4_data->f01_query_base_addr =
+						rmi_fd.query_base_addr;
+				rmi4_data->f01_ctrl_base_addr =
+						rmi_fd.ctrl_base_addr;
+				rmi4_data->f01_data_base_addr =
+						rmi_fd.data_base_addr;
+				rmi4_data->f01_cmd_base_addr =
+						rmi_fd.cmd_base_addr;
+				break;
+			case SYNAPTICS_RMI4_F34:
+				f34found = true;
+				fwu->f34_fd.query_base_addr =
+						rmi_fd.query_base_addr;
+				fwu->f34_fd.ctrl_base_addr =
+						rmi_fd.ctrl_base_addr;
+				fwu->f34_fd.data_base_addr =
+						rmi_fd.data_base_addr;
+
+				switch (rmi_fd.fn_version) {
+				case F34_V0:
+					fwu->bl_version = BL_V5;
+					break;
+				case F34_V1:
+					fwu->bl_version = BL_V6;
+					break;
+				case F34_V2:
+					fwu->bl_version = BL_V7;
+					break;
+				default:
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Unrecognized F34 version\n",
+							__func__);
+					return -EINVAL;
+				}
+
+				fwu->intr_mask = 0;
+				intr_src = rmi_fd.intr_src_count;
+				intr_off = intr_count % 8;
+				for (ii = intr_off;
+						ii < (intr_src + intr_off);
+						ii++) {
+					fwu->intr_mask |= 1 << ii;
+				}
+				break;
+			case SYNAPTICS_RMI4_F35:
+				f35found = true;
+				fwu->f35_fd.query_base_addr =
+						rmi_fd.query_base_addr;
+				fwu->f35_fd.ctrl_base_addr =
+						rmi_fd.ctrl_base_addr;
+				fwu->f35_fd.data_base_addr =
+						rmi_fd.data_base_addr;
+				fwu->f35_fd.cmd_base_addr =
+						rmi_fd.cmd_base_addr;
+				break;
+			}
+		} else {
+			break;
+		}
+
+		intr_count += rmi_fd.intr_src_count;
+	}
+
+	if (!f01found || !f34found) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to find both F01 and F34\n",
+				__func__);
+		if (!f35found) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to find F35\n",
+					__func__);
+			return -EINVAL;
+		} else {
+			fwu->in_ub_mode = true;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: In microbootloader mode\n",
+					__func__);
+			fwu_recovery_check_status();
+			return 0;
+		}
+	}
+
+	rmi4_data->intr_mask[0] |= fwu->intr_mask;
+
+	addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			addr,
+			&(rmi4_data->intr_mask[0]),
+			sizeof(rmi4_data->intr_mask[0]));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set interrupt enable bit\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_enter_flash_prog(void)
+{
+	int retval;
+	struct f01_device_control f01_device_control;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_read_flash_status();
+	if (retval < 0)
+		return retval;
+
+	if (fwu->in_bl_mode)
+		return 0;
+
+	retval = rmi4_data->irq_enable(rmi4_data, false, true);
+	if (retval < 0)
+		return retval;
+
+	msleep(INT_DISABLE_WAIT_MS);
+
+	retval = fwu_write_f34_command(CMD_ENABLE_FLASH_PROG);
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_wait_for_idle(ENABLE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	if (!fwu->in_bl_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: BL mode not entered\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (rmi4_data->hw_if->bl_hw_init) {
+		retval = rmi4_data->hw_if->bl_hw_init(rmi4_data);
+		if (retval < 0)
+			return retval;
+	}
+
+	retval = fwu_scan_pdt();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_read_f34_queries();
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			f01_device_control.data,
+			sizeof(f01_device_control.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F01 device control\n",
+				__func__);
+		return retval;
+	}
+
+	f01_device_control.nosleep = true;
+	f01_device_control.sleep_mode = SLEEP_MODE_NORMAL;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			f01_device_control.data,
+			sizeof(f01_device_control.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write F01 device control\n",
+				__func__);
+		return retval;
+	}
+
+	msleep(ENTER_FLASH_PROG_WAIT_MS);
+
+	return retval;
+}
+
+static int fwu_check_ui_firmware_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.ui_firmware.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.ui_firmware) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: UI firmware size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_check_ui_configuration_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.ui_config.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.ui_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: UI configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_check_dp_configuration_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.dp_config.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.dp_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Display configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static int fwu_check_pm_configuration_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.pm_config.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.pm_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Permanent configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+#endif
+
+static int fwu_check_bl_configuration_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.bl_config.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.bl_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Bootloader configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_check_guest_code_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.guest_code.size / fwu->block_size;
+	if (block_count != fwu->blkcount.guest_code) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Guest code size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_erase_configuration(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_UI_CONFIG);
+		if (retval < 0)
+			return retval;
+		break;
+	case DP_CONFIG_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_DISP_CONFIG);
+		if (retval < 0)
+			return retval;
+		break;
+	case BL_CONFIG_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_BL_CONFIG);
+		if (retval < 0)
+			return retval;
+		break;
+	case FLASH_CONFIG_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_FLASH_CONFIG);
+		if (retval < 0)
+			return retval;
+		break;
+	case UPP_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_UTILITY_PARAMETER);
+		if (retval < 0)
+			return retval;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid config area\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Erase command written\n",
+			__func__);
+
+	retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Idle status detected\n",
+			__func__);
+
+	return retval;
+}
+
+static int fwu_erase_bootloader(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_write_f34_command(CMD_ERASE_BOOTLOADER);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Erase command written\n",
+			__func__);
+
+	retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Idle status detected\n",
+			__func__);
+
+	return 0;
+}
+
+#ifdef SYNA_TDDI
+static int fwu_erase_lockdown_data(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_write_f34_command(CMD_ERASE_LOCKDOWN_DATA);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Erase command written\n",
+			__func__);
+
+	msleep(100);
+
+	retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Idle status detected\n",
+			__func__);
+
+	return 0;
+}
+
+#endif
+
+static int fwu_erase_guest_code(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_write_f34_command(CMD_ERASE_GUEST_CODE);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Erase command written\n",
+			__func__);
+
+	retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Idle status detected\n",
+			__func__);
+
+	return 0;
+}
+
+static int fwu_erase_all(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->bl_version == BL_V7) {
+		retval = fwu_write_f34_command(CMD_ERASE_UI_FIRMWARE);
+		if (retval < 0)
+			return retval;
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Erase command written\n",
+				__func__);
+
+		retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+		if (retval < 0)
+			return retval;
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Idle status detected\n",
+				__func__);
+
+		fwu->config_area = UI_CONFIG_AREA;
+		retval = fwu_erase_configuration();
+		if (retval < 0)
+			return retval;
+	} else {
+		retval = fwu_write_f34_command(CMD_ERASE_ALL);
+		if (retval < 0)
+			return retval;
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Erase all command written\n",
+				__func__);
+
+		retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+		if (!(fwu->bl_version == BL_V8 &&
+				fwu->flash_status == BAD_PARTITION_TABLE)) {
+			if (retval < 0)
+				return retval;
+		}
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Idle status detected\n",
+				__func__);
+
+		if (fwu->bl_version == BL_V8)
+			return 0;
+	}
+
+	if (fwu->flash_properties.has_disp_config) {
+		fwu->config_area = DP_CONFIG_AREA;
+		retval = fwu_erase_configuration();
+		if (retval < 0)
+			return retval;
+	}
+
+	if (fwu->has_guest_code) {
+		retval = fwu_erase_guest_code();
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_firmware(void)
+{
+	unsigned short firmware_block_count;
+
+	firmware_block_count = fwu->img.ui_firmware.size / fwu->block_size;
+
+	return fwu_write_f34_blocks((unsigned char *)fwu->img.ui_firmware.data,
+			firmware_block_count, CMD_WRITE_FW);
+}
+
+static int fwu_write_bootloader(void)
+{
+	int retval;
+	unsigned short bootloader_block_count;
+
+	bootloader_block_count = fwu->img.bl_image.size / fwu->block_size;
+
+	fwu->write_bootloader = true;
+	retval = fwu_write_f34_blocks((unsigned char *)fwu->img.bl_image.data,
+			bootloader_block_count, CMD_WRITE_BOOTLOADER);
+	fwu->write_bootloader = false;
+
+	return retval;
+}
+
+static int fwu_write_utility_parameter(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char checksum_array[4];
+	unsigned char *pbuf;
+	unsigned short remaining_size;
+	unsigned short utility_param_size;
+	unsigned long checksum;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	utility_param_size = fwu->blkcount.utility_param * fwu->block_size;
+	retval = fwu_allocate_read_config_buf(utility_param_size);
+	if (retval < 0)
+		return retval;
+	memset(fwu->read_config_buf, 0x00, utility_param_size);
+
+	pbuf = fwu->read_config_buf;
+	remaining_size = utility_param_size - 4;
+
+	for (ii = 0; ii < MAX_UTILITY_PARAMS; ii++) {
+		if (fwu->img.utility_param_id[ii] == UNUSED)
+			continue;
+
+#ifdef F51_DISCRETE_FORCE
+		if (fwu->img.utility_param_id[ii] == FORCE_PARAMETER) {
+			if (fwu->bl_mode_device) {
+				dev_info(rmi4_data->pdev->dev.parent,
+						"%s: Device in bootloader mode, skipping calibration data restoration\n",
+						__func__);
+				goto image_param;
+			}
+			retval = secure_memcpy(&(pbuf[4]),
+					remaining_size - 4,
+					fwu->cal_data,
+					fwu->cal_data_buf_size,
+					fwu->cal_data_size);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to copy force calibration data\n",
+						__func__);
+				return retval;
+			}
+			pbuf[0] = FORCE_PARAMETER;
+			pbuf[1] = 0x00;
+			pbuf[2] = (4 + fwu->cal_data_size) / 2;
+			pbuf += (fwu->cal_data_size + 4);
+			remaining_size -= (fwu->cal_data_size + 4);
+			continue;
+		}
+image_param:
+#endif
+
+		retval = secure_memcpy(pbuf,
+				remaining_size,
+				fwu->img.utility_param[ii].data,
+				fwu->img.utility_param[ii].size,
+				fwu->img.utility_param[ii].size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy utility parameter data\n",
+					__func__);
+			return retval;
+		}
+		pbuf += fwu->img.utility_param[ii].size;
+		remaining_size -= fwu->img.utility_param[ii].size;
+	}
+
+	calculate_checksum((unsigned short *)fwu->read_config_buf,
+			((utility_param_size - 4) / 2),
+			&checksum);
+
+	convert_to_little_endian(checksum_array, checksum);
+
+	fwu->read_config_buf[utility_param_size - 4] = checksum_array[0];
+	fwu->read_config_buf[utility_param_size - 3] = checksum_array[1];
+	fwu->read_config_buf[utility_param_size - 2] = checksum_array[2];
+	fwu->read_config_buf[utility_param_size - 1] = checksum_array[3];
+
+	retval = fwu_write_f34_blocks((unsigned char *)fwu->read_config_buf,
+			fwu->blkcount.utility_param, CMD_WRITE_UTILITY_PARAM);
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int fwu_write_configuration(void)
+{
+	return fwu_write_f34_blocks((unsigned char *)fwu->config_data,
+			fwu->config_block_count, CMD_WRITE_CONFIG);
+}
+
+static int fwu_write_ui_configuration(void)
+{
+	fwu->config_area = UI_CONFIG_AREA;
+	fwu->config_data = fwu->img.ui_config.data;
+	fwu->config_size = fwu->img.ui_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	return fwu_write_configuration();
+}
+
+static int fwu_write_dp_configuration(void)
+{
+	fwu->config_area = DP_CONFIG_AREA;
+	fwu->config_data = fwu->img.dp_config.data;
+	fwu->config_size = fwu->img.dp_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	return fwu_write_configuration();
+}
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static int fwu_write_pm_configuration(void)
+{
+	fwu->config_area = PM_CONFIG_AREA;
+	fwu->config_data = fwu->img.pm_config.data;
+	fwu->config_size = fwu->img.pm_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	return fwu_write_configuration();
+}
+
+#ifdef SYNA_TDDI
+static int fwu_write_tddi_lockdown_data(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_write_f34_blocks(fwu->read_config_buf,
+			fwu->blkcount.tddi_lockdown_data,
+			CMD_WRITE_LOCKDOWN_DATA);
+	if (retval < 0)
+		return retval;
+	rmi4_data->reset_device(rmi4_data, false);
+	return 0;
+}
+#endif
+#endif
+
+static int fwu_write_flash_configuration(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+	fwu->config_data = fwu->img.fl_config.data;
+	fwu->config_size = fwu->img.fl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	if (fwu->config_block_count != fwu->blkcount.fl_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Flash configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	retval = fwu_erase_configuration();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	rmi4_data->reset_device(rmi4_data, false);
+
+	return 0;
+}
+
+static int fwu_write_guest_code(void)
+{
+	int retval;
+	unsigned short guest_code_block_count;
+
+	guest_code_block_count = fwu->img.guest_code.size / fwu->block_size;
+
+	retval = fwu_write_f34_blocks((unsigned char *)fwu->img.guest_code.data,
+			guest_code_block_count, CMD_WRITE_GUEST_CODE);
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int fwu_write_lockdown(void)
+{
+	unsigned short lockdown_block_count;
+
+	lockdown_block_count = fwu->img.lockdown.size / fwu->block_size;
+
+	return fwu_write_f34_blocks((unsigned char *)fwu->img.lockdown.data,
+			lockdown_block_count, CMD_WRITE_LOCKDOWN);
+}
+
+static int fwu_write_partition_table_v8(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+	fwu->config_data = fwu->img.fl_config.data;
+	fwu->config_size = fwu->img.fl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	if (fwu->config_block_count != fwu->blkcount.fl_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Flash configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	rmi4_data->reset_device(rmi4_data, false);
+
+	return 0;
+}
+
+static int fwu_write_partition_table_v7(void)
+{
+	int retval;
+	unsigned short block_count;
+
+	block_count = fwu->blkcount.bl_config;
+	fwu->config_area = BL_CONFIG_AREA;
+	fwu->config_size = fwu->block_size * block_count;
+
+	retval = fwu_allocate_read_config_buf(fwu->config_size);
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_read_f34_blocks(block_count, CMD_READ_CONFIG);
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_erase_configuration();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_write_flash_configuration();
+	if (retval < 0)
+		return retval;
+
+	fwu->config_area = BL_CONFIG_AREA;
+	fwu->config_data = fwu->read_config_buf;
+	fwu->config_size = fwu->img.bl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int fwu_write_bl_area_v7(void)
+{
+	int retval;
+	bool has_utility_param;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	has_utility_param = fwu->has_utility_param;
+
+	if (fwu->has_utility_param) {
+		fwu->config_area = UPP_AREA;
+		retval = fwu_erase_configuration();
+		if (retval < 0)
+			return retval;
+	}
+
+	fwu->config_area = BL_CONFIG_AREA;
+	retval = fwu_erase_configuration();
+	if (retval < 0)
+		return retval;
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+	retval = fwu_erase_configuration();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_erase_bootloader();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_write_bootloader();
+	if (retval < 0)
+		return retval;
+
+	msleep(rmi4_data->hw_if->board_data->reset_delay_ms);
+	rmi4_data->reset_device(rmi4_data, false);
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+	fwu->config_data = fwu->img.fl_config.data;
+	fwu->config_size = fwu->img.fl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+	rmi4_data->reset_device(rmi4_data, false);
+
+	fwu->config_area = BL_CONFIG_AREA;
+	fwu->config_data = fwu->img.bl_config.data;
+	fwu->config_size = fwu->img.bl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	if (fwu->img.contains_utility_param) {
+		retval = fwu_write_utility_parameter();
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_do_reflash(void)
+{
+	int retval;
+	bool do_bl_update = false;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!fwu->new_partition_table) {
+		retval = fwu_check_ui_firmware_size();
+		if (retval < 0)
+			return retval;
+
+		retval = fwu_check_ui_configuration_size();
+		if (retval < 0)
+			return retval;
+
+		if (fwu->flash_properties.has_disp_config &&
+				fwu->img.contains_disp_config) {
+			retval = fwu_check_dp_configuration_size();
+			if (retval < 0)
+				return retval;
+		}
+
+		if (fwu->has_guest_code && fwu->img.contains_guest_code) {
+			retval = fwu_check_guest_code_size();
+			if (retval < 0)
+				return retval;
+		}
+	} else if (fwu->bl_version == BL_V7) {
+		retval = fwu_check_bl_configuration_size();
+		if (retval < 0)
+			return retval;
+	}
+
+	if (!fwu->has_utility_param && fwu->img.contains_utility_param) {
+		if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+			do_bl_update = true;
+	}
+
+	if (fwu->has_utility_param && !fwu->img.contains_utility_param) {
+		if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+			do_bl_update = true;
+	}
+
+	if (!do_bl_update && fwu->incompatible_partition_tables) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Incompatible partition tables\n",
+				__func__);
+		return -EINVAL;
+	} else if (!do_bl_update && fwu->new_partition_table) {
+		if (!fwu->force_update) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Partition table mismatch\n",
+					__func__);
+			return -EINVAL;
+		}
+	}
+
+	retval = fwu_erase_all();
+	if (retval < 0)
+		return retval;
+
+	if (do_bl_update) {
+		retval = fwu_write_bl_area_v7();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Bootloader area programmed\n", __func__);
+	} else if (fwu->bl_version == BL_V7 && fwu->new_partition_table) {
+		retval = fwu_write_partition_table_v7();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Partition table programmed\n", __func__);
+	} else if (fwu->bl_version == BL_V8) {
+		retval = fwu_write_partition_table_v8();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Partition table programmed\n", __func__);
+	}
+
+	fwu->config_area = UI_CONFIG_AREA;
+	if (fwu->flash_properties.has_disp_config &&
+			fwu->img.contains_disp_config) {
+		retval = fwu_write_dp_configuration();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Display configuration programmed\n", __func__);
+	}
+
+	retval = fwu_write_ui_configuration();
+	if (retval < 0)
+		return retval;
+	pr_notice("%s: Configuration programmed\n", __func__);
+
+	if (fwu->has_guest_code && fwu->img.contains_guest_code) {
+		retval = fwu_write_guest_code();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Guest code programmed\n", __func__);
+	}
+
+	retval = fwu_write_firmware();
+	if (retval < 0)
+		return retval;
+	pr_notice("%s: Firmware programmed\n", __func__);
+
+	return retval;
+}
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static int fwu_do_read_config(void)
+{
+	int retval;
+	unsigned short block_count;
+	unsigned short config_area;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		block_count = fwu->blkcount.ui_config;
+		break;
+	case DP_CONFIG_AREA:
+		if (!fwu->flash_properties.has_disp_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Display configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.dp_config;
+		break;
+	case PM_CONFIG_AREA:
+		if (!fwu->flash_properties.has_pm_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Permanent configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.pm_config;
+		break;
+	case BL_CONFIG_AREA:
+		if (!fwu->flash_properties.has_bl_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Bootloader configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.bl_config;
+		break;
+	case UPP_AREA:
+		if (!fwu->has_utility_param) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Utility parameter not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.utility_param;
+		break;
+#ifdef SYNA_TDDI
+	case TDDI_FORCE_CONFIG_AREA:
+		if (!fwu->has_force_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: force configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.tddi_force_config;
+		break;
+	case TDDI_OEM_DATA_AREA:
+		if (!fwu->has_oem_data) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: oem data not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.tddi_oem_data;
+		break;
+	case TDDI_LCM_DATA_AREA:
+		if (!fwu->has_lcm_data) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: lcm data not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.tddi_lcm_data;
+		break;
+#endif
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid config area\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (block_count == 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid block count\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	if (fwu->bl_version == BL_V5 || fwu->bl_version == BL_V6) {
+		config_area = fwu->config_area;
+		retval = fwu_enter_flash_prog();
+		fwu->config_area = config_area;
+		if (retval < 0)
+			goto exit;
+	}
+
+	fwu->config_size = fwu->block_size * block_count;
+
+	retval = fwu_allocate_read_config_buf(fwu->config_size);
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_read_f34_blocks(block_count, CMD_READ_CONFIG);
+
+exit:
+	if (fwu->bl_version == BL_V5 || fwu->bl_version == BL_V6)
+		rmi4_data->reset_device(rmi4_data, false);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	return retval;
+}
+
+#ifdef SYNA_TDDI
+static int fwu_do_read_tddi_lockdown_data(void)
+{
+	int retval = -EINVAL;
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->blkcount.tddi_lockdown_data;
+	fwu->config_size = fwu->block_size * block_count;
+
+	if (fwu->bl_version != BL_V6) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not support lockdown data in bl v.%d\n",
+				__func__,
+				fwu->bl_version);
+		goto exit;
+	} else if (!fwu->has_lockdown_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not support lockdown data\n", __func__);
+		goto exit;
+	}
+
+	kfree(fwu->read_config_buf);
+
+	fwu->read_config_buf = kzalloc(fwu->config_size, GFP_KERNEL);
+
+	if (!fwu->read_config_buf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fwu->read_config_buf\n",
+				__func__);
+		fwu->read_config_buf_size = 0;
+		retval = -ENOMEM;
+		goto exit;
+	}
+	fwu->read_config_buf_size = fwu->config_size;
+	retval = fwu_read_f34_blocks(block_count, CMD_READ_LOCKDOWN_DATA);
+exit:
+	return retval;
+}
+
+int get_tddi_lockdown_data(unsigned char *lockdown_data, unsigned short leng)
+{
+	int retval;
+
+	retval = fwu_do_read_tddi_lockdown_data();
+	if (retval < 0)
+		return retval;
+	memcpy(lockdown_data, fwu->read_config_buf, leng);
+	return retval;
+}
+
+int set_tddi_lockdown_data(unsigned char *lockdown_data, unsigned short leng)
+{
+	int retval = -EINVAL;
+	unsigned long checksum;
+	unsigned char checksum_array[4];
+	unsigned short blk_cnt;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->bl_version != BL_V6) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not support lockdown data in bl v.%d\n",
+				__func__,
+				fwu->bl_version);
+		goto exit;
+	} else if (!fwu->has_lockdown_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not support lockdown data\n", __func__);
+		goto exit;
+	}
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_erase_lockdown_data();
+	if (retval < 0)
+		goto exit;
+
+	blk_cnt = fwu->blkcount.tddi_lockdown_data;
+
+	fwu->config_size = fwu->blkcount.tddi_lockdown_data * fwu->block_size;
+	retval = fwu_allocate_read_config_buf(fwu->config_size);
+	if (retval < 0)
+		goto exit;
+	memset(fwu->read_config_buf, 0x00, fwu->config_size);
+	retval = secure_memcpy(fwu->read_config_buf, fwu->config_size,
+			lockdown_data, leng, leng);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy tddi lockdwon data\n",
+				__func__);
+		goto exit;
+	}
+
+	calculate_checksum((unsigned short *)fwu->read_config_buf,
+			((fwu->config_size - 4) / 2),
+			&checksum);
+
+	convert_to_little_endian(checksum_array, checksum);
+
+	fwu->read_config_buf[blk_cnt * fwu->block_size - 4] = checksum_array[0];
+	fwu->read_config_buf[blk_cnt * fwu->block_size - 3] = checksum_array[1];
+	fwu->read_config_buf[blk_cnt * fwu->block_size - 2] = checksum_array[2];
+	fwu->read_config_buf[blk_cnt * fwu->block_size - 1] = checksum_array[3];
+	retval = fwu_write_tddi_lockdown_data();
+exit:
+	return retval;
+}
+#endif
+#endif
+
+static int fwu_do_lockdown_v7(void)
+{
+	int retval;
+	struct f34_v7_data0 status;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fwu->f34_fd.data_base_addr + fwu->off.flash_status,
+			status.data,
+			sizeof(status.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash status\n",
+				__func__);
+		return retval;
+	}
+
+	if (status.device_cfg_status == 2) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Device already locked down\n",
+				__func__);
+		return 0;
+	}
+
+	retval = fwu_write_lockdown();
+	if (retval < 0)
+		return retval;
+
+	pr_notice("%s: Lockdown programmed\n", __func__);
+
+	return retval;
+}
+
+static int fwu_do_lockdown_v5v6(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+#ifdef SYNA_TDDI
+	unsigned char *img_ld;
+
+	img_ld = (unsigned char *)fwu->img.lockdown.data;
+	if (fwu->has_lockdown_data) {
+		retval = set_tddi_lockdown_data(img_ld,
+				LOCKDOWN_SIZE);
+		if (retval < 0)
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write lockdown data\n",
+					__func__);
+		return retval;
+	}
+#endif
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fwu->f34_fd.query_base_addr + fwu->off.properties,
+			fwu->flash_properties.data,
+			sizeof(fwu->flash_properties.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash properties\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->flash_properties.unlocked == 0) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Device already locked down\n",
+				__func__);
+		return 0;
+	}
+
+	retval = fwu_write_lockdown();
+	if (retval < 0)
+		return retval;
+
+	pr_notice("%s: Lockdown programmed\n", __func__);
+
+	return retval;
+}
+
+#ifdef F51_DISCRETE_FORCE
+static int fwu_do_restore_f51_cal_data(void)
+{
+	int retval;
+	unsigned char checksum_array[4];
+	unsigned short block_count;
+	unsigned long checksum;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->blkcount.ui_config;
+	fwu->config_size = fwu->block_size * block_count;
+	fwu->config_area = UI_CONFIG_AREA;
+
+	retval = fwu_allocate_read_config_buf(fwu->config_size);
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_read_f34_blocks(block_count, CMD_READ_CONFIG);
+	if (retval < 0)
+		return retval;
+
+	retval = secure_memcpy(&fwu->read_config_buf[fwu->cal_data_off],
+			fwu->cal_data_size, fwu->cal_data,
+			fwu->cal_data_buf_size, fwu->cal_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to restore calibration data\n",
+				__func__);
+		return retval;
+	}
+
+	calculate_checksum((unsigned short *)fwu->read_config_buf,
+			((fwu->config_size - 4) / 2),
+			&checksum);
+
+	convert_to_little_endian(checksum_array, checksum);
+
+	fwu->read_config_buf[fwu->config_size - 4] = checksum_array[0];
+	fwu->read_config_buf[fwu->config_size - 3] = checksum_array[1];
+	fwu->read_config_buf[fwu->config_size - 2] = checksum_array[2];
+	fwu->read_config_buf[fwu->config_size - 1] = checksum_array[3];
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		return retval;
+
+	fwu->config_area = UI_CONFIG_AREA;
+	fwu->config_data = fwu->read_config_buf;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	retval = fwu_erase_configuration();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static int fwu_start_write_guest_code(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_parse_image_info();
+	if (retval < 0)
+		return -EINVAL;
+
+	if (!fwu->has_guest_code) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Guest code not supported\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (!fwu->img.contains_guest_code) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: No guest code in firmware image\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	pr_notice("%s: Start of write guest code process\n", __func__);
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_check_guest_code_size();
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_erase_guest_code();
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_write_guest_code();
+	if (retval < 0)
+		goto exit;
+
+	pr_notice("%s: Guest code programmed\n", __func__);
+
+exit:
+	rmi4_data->reset_device(rmi4_data, false);
+
+	pr_notice("%s: End of write guest code process\n", __func__);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	rmi4_data->stay_awake = false;
+
+	return retval;
+}
+
+static int fwu_start_write_config(void)
+{
+	int retval;
+	unsigned short config_area;
+	unsigned int device_fw_id;
+	unsigned int image_fw_id;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_parse_image_info();
+	if (retval < 0)
+		return -EINVAL;
+
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		device_fw_id = rmi4_data->firmware_id;
+		retval = fwu_get_image_firmware_id(&image_fw_id);
+		if (retval < 0)
+			return retval;
+		if (device_fw_id != image_fw_id) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Device and image firmware IDs don't match\n",
+					__func__);
+			return -EINVAL;
+		}
+		retval = fwu_check_ui_configuration_size();
+		if (retval < 0)
+			return retval;
+		break;
+	case DP_CONFIG_AREA:
+		if (!fwu->flash_properties.has_disp_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Display configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		if (!fwu->img.contains_disp_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: No display configuration in firmware image\n",
+					__func__);
+			return -EINVAL;
+		}
+		retval = fwu_check_dp_configuration_size();
+		if (retval < 0)
+			return retval;
+		break;
+	case PM_CONFIG_AREA:
+		if (!fwu->flash_properties.has_pm_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Permanent configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		if (!fwu->img.contains_perm_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: No permanent configuration in firmware image\n",
+					__func__);
+			return -EINVAL;
+		}
+		retval = fwu_check_pm_configuration_size();
+		if (retval < 0)
+			return retval;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Configuration not supported\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	pr_notice("%s: Start of write config process\n", __func__);
+
+	config_area = fwu->config_area;
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		goto exit;
+
+	fwu->config_area = config_area;
+
+	if (fwu->config_area != PM_CONFIG_AREA) {
+		retval = fwu_erase_configuration();
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to erase config\n",
+					__func__);
+			goto exit;
+		}
+	}
+
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		retval = fwu_write_ui_configuration();
+		if (retval < 0)
+			goto exit;
+		break;
+	case DP_CONFIG_AREA:
+		retval = fwu_write_dp_configuration();
+		if (retval < 0)
+			goto exit;
+		break;
+	case PM_CONFIG_AREA:
+		retval = fwu_write_pm_configuration();
+		if (retval < 0)
+			goto exit;
+		break;
+	}
+
+	pr_notice("%s: Config written\n", __func__);
+
+exit:
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		rmi4_data->reset_device(rmi4_data, true);
+		break;
+	case DP_CONFIG_AREA:
+	case PM_CONFIG_AREA:
+		rmi4_data->reset_device(rmi4_data, false);
+		break;
+	}
+
+	pr_notice("%s: End of write config process\n", __func__);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	rmi4_data->stay_awake = false;
+
+	return retval;
+}
+#endif
+
+static int fwu_start_reflash(void)
+{
+	int retval = 0;
+	enum flash_area flash_area;
+	bool do_rebuild = false;
+	const struct firmware *fw_entry = NULL;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	pr_notice("%s: Start of reflash process\n", __func__);
+
+	if (fwu->image == NULL) {
+		retval = secure_memcpy(fwu->image_name, MAX_IMAGE_NAME_LEN,
+				FW_IMAGE_NAME, sizeof(FW_IMAGE_NAME),
+				sizeof(FW_IMAGE_NAME));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy image file name\n",
+					__func__);
+			goto exit;
+		}
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Requesting firmware image %s\n",
+				__func__, fwu->image_name);
+
+		retval = request_firmware(&fw_entry, fwu->image_name,
+				rmi4_data->pdev->dev.parent);
+		if (retval != 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Firmware image %s not available\n",
+					__func__, fwu->image_name);
+			retval = -EINVAL;
+			goto exit;
+		}
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Firmware image size = %d\n",
+				__func__, (unsigned int)fw_entry->size);
+
+		fwu->image = fw_entry->data;
+	}
+
+	retval = fwu_parse_image_info();
+	if (retval < 0)
+		goto exit;
+
+	if (fwu->blkcount.total_count != fwu->img.blkcount.total_count) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Flash size mismatch\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (fwu->bl_version != fwu->img.bl_version) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Bootloader version mismatch\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	retval = fwu_read_flash_status();
+	if (retval < 0)
+		goto exit;
+
+	if (fwu->in_bl_mode) {
+		fwu->bl_mode_device = true;
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Device in bootloader mode\n",
+				__func__);
+	} else {
+		fwu->bl_mode_device = false;
+	}
+
+	flash_area = fwu_go_nogo();
+
+	if (flash_area != NONE) {
+		retval = fwu_enter_flash_prog();
+		if (retval < 0) {
+			rmi4_data->reset_device(rmi4_data, false);
+			goto exit;
+		}
+	}
+
+#ifdef F51_DISCRETE_FORCE
+	if (flash_area != NONE && !fwu->bl_mode_device) {
+		fwu->config_size = fwu->block_size * fwu->blkcount.ui_config;
+		fwu->config_area = UI_CONFIG_AREA;
+
+		retval = fwu_allocate_read_config_buf(fwu->config_size);
+		if (retval < 0) {
+			rmi4_data->reset_device(rmi4_data, false);
+			goto exit;
+		}
+
+		retval = fwu_read_f34_blocks(fwu->blkcount.ui_config,
+				CMD_READ_CONFIG);
+		if (retval < 0) {
+			rmi4_data->reset_device(rmi4_data, false);
+			goto exit;
+		}
+
+		retval = secure_memcpy(fwu->cal_data, fwu->cal_data_buf_size,
+				&fwu->read_config_buf[fwu->cal_data_off],
+				fwu->cal_data_size, fwu->cal_data_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to save calibration data\n",
+					__func__);
+			rmi4_data->reset_device(rmi4_data, false);
+			goto exit;
+		}
+	}
+#endif
+
+	switch (flash_area) {
+	case UI_FIRMWARE:
+		do_rebuild = true;
+		retval = fwu_do_reflash();
+#ifdef F51_DISCRETE_FORCE
+		if (retval < 0)
+			break;
+
+		if (fwu->has_utility_param || fwu->img.contains_utility_param)
+			break;
+
+		rmi4_data->reset_device(rmi4_data, false);
+
+		if (fwu->bl_mode_device || fwu->in_bl_mode) {
+			dev_info(rmi4_data->pdev->dev.parent,
+					"%s: Device in bootloader mode, skipping calibration data restoration\n",
+					__func__);
+			break;
+		}
+
+		retval = fwu_do_restore_f51_cal_data();
+#endif
+		break;
+	case UI_CONFIG:
+		do_rebuild = true;
+		retval = fwu_check_ui_configuration_size();
+		if (retval < 0)
+			break;
+		fwu->config_area = UI_CONFIG_AREA;
+		retval = fwu_erase_configuration();
+		if (retval < 0)
+			break;
+		retval = fwu_write_ui_configuration();
+#ifdef F51_DISCRETE_FORCE
+		if (retval < 0)
+			break;
+
+		if (fwu->has_utility_param)
+			break;
+
+		retval = fwu_do_restore_f51_cal_data();
+#endif
+		break;
+	case NONE:
+	default:
+		break;
+	}
+
+	if (retval < 0) {
+		do_rebuild = false;
+		rmi4_data->reset_device(rmi4_data, false);
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do reflash\n",
+				__func__);
+		goto exit;
+	}
+
+	if (fwu->do_lockdown && (fwu->img.lockdown.data != NULL)) {
+		switch (fwu->bl_version) {
+		case BL_V5:
+		case BL_V6:
+			retval = fwu_do_lockdown_v5v6();
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to do lockdown\n",
+						__func__);
+			}
+			rmi4_data->reset_device(rmi4_data, false);
+			break;
+		case BL_V7:
+		case BL_V8:
+			retval = fwu_do_lockdown_v7();
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to do lockdown\n",
+						__func__);
+			}
+			rmi4_data->reset_device(rmi4_data, false);
+			break;
+		default:
+			break;
+		}
+	}
+
+exit:
+	if (fw_entry)
+		release_firmware(fw_entry);
+
+	if (do_rebuild)
+		rmi4_data->reset_device(rmi4_data, true);
+
+	pr_notice("%s: End of reflash process\n", __func__);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	rmi4_data->stay_awake = false;
+
+	return retval;
+}
+
+static int fwu_recovery_check_status(void)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char status;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f35_fd.data_base_addr;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_base + F35_ERROR_CODE_OFFSET,
+			&status,
+			1);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read status\n",
+				__func__);
+		return retval;
+	}
+
+	status = status & MASK_5BIT;
+
+	if (status != 0x00) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Recovery mode status = %d\n",
+				__func__, status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_recovery_erase_completion(void)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char command;
+	unsigned char status;
+	unsigned int timeout = F35_ERASE_ALL_WAIT_MS / 20;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f35_fd.data_base_addr;
+
+	do {
+		command = 0x01;
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				fwu->f35_fd.cmd_base_addr,
+				&command,
+				sizeof(command));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to issue command\n",
+					__func__);
+			return retval;
+		}
+
+		do {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					fwu->f35_fd.cmd_base_addr,
+					&command,
+					sizeof(command));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to read command status\n",
+						__func__);
+				return retval;
+			}
+
+			if ((command & 0x01) == 0x00)
+				break;
+
+			msleep(20);
+			timeout--;
+		} while (timeout > 0);
+
+		if (timeout == 0)
+			goto exit;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_base + F35_FLASH_STATUS_OFFSET,
+				&status,
+				sizeof(status));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read flash status\n",
+					__func__);
+			return retval;
+		}
+
+		if ((status & 0x01) == 0x00)
+			break;
+
+		msleep(20);
+		timeout--;
+	} while (timeout > 0);
+
+exit:
+	if (timeout == 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Timed out waiting for flash erase completion\n",
+				__func__);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int fwu_recovery_erase_all(void)
+{
+	int retval;
+	unsigned char ctrl_base;
+	unsigned char command = CMD_F35_ERASE_ALL;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	ctrl_base = fwu->f35_fd.ctrl_base_addr;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			ctrl_base + F35_CHUNK_COMMAND_OFFSET,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue erase all command\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->f35_fd.cmd_base_addr) {
+		retval = fwu_recovery_erase_completion();
+		if (retval < 0)
+			return retval;
+	} else {
+		msleep(F35_ERASE_ALL_WAIT_MS);
+	}
+
+	retval = fwu_recovery_check_status();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int fwu_recovery_write_chunk(void)
+{
+	int retval;
+	unsigned char ctrl_base;
+	unsigned char chunk_number[] = {0, 0};
+	unsigned char chunk_spare;
+	unsigned char chunk_size;
+	unsigned char buf[F35_CHUNK_SIZE + 1];
+	unsigned short chunk;
+	unsigned short chunk_total;
+	unsigned short bytes_written = 0;
+	unsigned char *chunk_ptr = (unsigned char *)fwu->image;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	ctrl_base = fwu->f35_fd.ctrl_base_addr;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			ctrl_base + F35_CHUNK_NUM_LSB_OFFSET,
+			chunk_number,
+			sizeof(chunk_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write chunk number\n",
+				__func__);
+		return retval;
+	}
+
+	buf[sizeof(buf) - 1] = CMD_F35_WRITE_CHUNK;
+
+	chunk_total = fwu->image_size / F35_CHUNK_SIZE;
+	chunk_spare = fwu->image_size % F35_CHUNK_SIZE;
+	if (chunk_spare)
+		chunk_total++;
+
+	for (chunk = 0; chunk < chunk_total; chunk++) {
+		if (chunk_spare && chunk == chunk_total - 1)
+			chunk_size = chunk_spare;
+		else
+			chunk_size = F35_CHUNK_SIZE;
+
+		memset(buf, 0x00, F35_CHUNK_SIZE);
+		secure_memcpy(buf, sizeof(buf), chunk_ptr,
+					fwu->image_size - bytes_written,
+					chunk_size);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				ctrl_base + F35_CHUNK_DATA_OFFSET,
+				buf,
+				sizeof(buf));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write chunk data (chunk %d)\n",
+					__func__, chunk);
+			return retval;
+		}
+		chunk_ptr += chunk_size;
+		bytes_written += chunk_size;
+	}
+
+	retval = fwu_recovery_check_status();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write chunk data\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_recovery_reset(void)
+{
+	int retval;
+	unsigned char ctrl_base;
+	unsigned char command = CMD_F35_RESET;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	ctrl_base = fwu->f35_fd.ctrl_base_addr;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			ctrl_base + F35_CHUNK_COMMAND_OFFSET,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+		return retval;
+	}
+
+	msleep(F35_RESET_WAIT_MS);
+
+	return 0;
+}
+
+static int fwu_start_recovery(void)
+{
+	int retval;
+	const struct firmware *fw_entry = NULL;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	pr_notice("%s: Start of recovery process\n", __func__);
+
+	if (fwu->image == NULL) {
+		retval = secure_memcpy(fwu->image_name, MAX_IMAGE_NAME_LEN,
+				FW_IHEX_NAME, sizeof(FW_IHEX_NAME),
+				sizeof(FW_IHEX_NAME));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy ihex file name\n",
+					__func__);
+			goto exit;
+		}
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Requesting firmware ihex %s\n",
+				__func__, fwu->image_name);
+
+		retval = request_firmware(&fw_entry, fwu->image_name,
+				rmi4_data->pdev->dev.parent);
+		if (retval != 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Firmware ihex %s not available\n",
+					__func__, fwu->image_name);
+			retval = -EINVAL;
+			goto exit;
+		}
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Firmware image size = %d\n",
+				__func__, (unsigned int)fw_entry->size);
+
+		fwu->image = fw_entry->data;
+		fwu->image_size = fw_entry->size;
+	}
+
+	retval = rmi4_data->irq_enable(rmi4_data, false, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to disable interrupt\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = fwu_recovery_erase_all();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do erase all in recovery mode\n",
+				__func__);
+		goto exit;
+	}
+
+	pr_notice("%s: External flash erased\n", __func__);
+
+	retval = fwu_recovery_write_chunk();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write chunk data in recovery mode\n",
+				__func__);
+		goto exit;
+	}
+
+	pr_notice("%s: Chunk data programmed\n", __func__);
+
+	retval = fwu_recovery_reset();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to reset device in recovery mode\n",
+				__func__);
+		goto exit;
+	}
+
+	pr_notice("%s: Recovery mode reset issued\n", __func__);
+
+	rmi4_data->reset_device(rmi4_data, true);
+
+	retval = 0;
+
+exit:
+	if (fw_entry)
+		release_firmware(fw_entry);
+
+	pr_notice("%s: End of recovery process\n", __func__);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	rmi4_data->stay_awake = false;
+
+	return retval;
+}
+
+int synaptics_fw_updater(const unsigned char *fw_data)
+{
+	int retval;
+
+	if (!fwu)
+		return -ENODEV;
+
+	if (!fwu->initialized)
+		return -ENODEV;
+
+	if (fwu->in_ub_mode) {
+		fwu->image = NULL;
+		retval = fwu_start_recovery();
+		if (retval < 0)
+			return retval;
+	}
+
+	fwu->image = fw_data;
+
+	retval = fwu_start_reflash();
+
+	fwu->image = NULL;
+
+	return retval;
+}
+EXPORT_SYMBOL(synaptics_fw_updater);
+
+#ifdef DO_STARTUP_FW_UPDATE
+static void fwu_startup_fw_update_work(struct work_struct *work)
+{
+	static unsigned char do_once = 1;
+#ifdef WAIT_FOR_FB_READY
+	unsigned int timeout;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+#endif
+
+	if (!do_once)
+		return;
+	do_once = 0;
+
+#ifdef WAIT_FOR_FB_READY
+	timeout = FB_READY_TIMEOUT_S * 1000 / FB_READY_WAIT_MS + 1;
+
+	while (!rmi4_data->fb_ready) {
+		msleep(FB_READY_WAIT_MS);
+		timeout--;
+		if (timeout == 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Timed out waiting for FB ready\n",
+					__func__);
+			return;
+		}
+	}
+#endif
+
+	synaptics_fw_updater(NULL);
+
+	return;
+}
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+static ssize_t fwu_sysfs_show_image(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (count < fwu->config_size) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not enough space (%d bytes) in buffer\n",
+				__func__, (unsigned int)count);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	retval = secure_memcpy(buf, count, fwu->read_config_buf,
+			fwu->read_config_buf_size, fwu->config_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy config data\n",
+				__func__);
+		goto exit;
+	} else {
+		retval = fwu->config_size;
+	}
+
+exit:
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_store_image(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = secure_memcpy(&fwu->ext_data_source[fwu->data_pos],
+			fwu->image_size - fwu->data_pos, buf, count, count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy image data\n",
+				__func__);
+		goto exit;
+	} else {
+		retval = count;
+	}
+
+	fwu->data_pos += count;
+
+exit:
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_do_recovery_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (kstrtouint(buf, 10, &input) != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not in microbootloader mode\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->ext_data_source) {
+		retval = -EINVAL;
+		goto exit;
+	} else {
+		fwu->image = fwu->ext_data_source;
+	}
+
+	retval = fwu_start_recovery();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do recovery\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = NULL;
+	fwu->image = NULL;
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (kstrtouint(buf, 10, &input) != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: In microbootloader mode\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->ext_data_source) {
+		retval =  -EINVAL;
+		goto exit;
+	} else {
+		fwu->image = fwu->ext_data_source;
+	}
+
+	if (input & LOCKDOWN) {
+		fwu->do_lockdown = true;
+		input &= ~LOCKDOWN;
+	}
+
+	if ((input != NORMAL) && (input != FORCE)) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (input == FORCE)
+		fwu->force_update = true;
+
+	retval = synaptics_fw_updater(fwu->image);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do reflash\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = NULL;
+	fwu->image = NULL;
+	fwu->force_update = FORCE_UPDATE;
+	fwu->do_lockdown = DO_LOCKDOWN;
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_write_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (kstrtouint(buf, 10, &input) != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (input != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: In microbootloader mode\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->ext_data_source) {
+		retval = -EINVAL;
+		goto exit;
+	} else {
+		fwu->image = fwu->ext_data_source;
+	}
+
+	retval = fwu_start_write_config();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write config\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = NULL;
+	fwu->image = NULL;
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_read_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: In microbootloader mode\n",
+				__func__);
+		retval =  -EINVAL;
+		goto exit;
+	}
+
+	retval = fwu_do_read_config();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read config\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_config_area_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long config_area;
+
+	retval = sstrtoul(buf, 10, &config_area);
+	if (retval)
+		return retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	fwu->config_area = config_area;
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return count;
+}
+
+static ssize_t fwu_sysfs_image_name_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = secure_memcpy(fwu->image_name, MAX_IMAGE_NAME_LEN,
+			buf, count, count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy image file name\n",
+				__func__);
+	} else {
+		retval = count;
+	}
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_image_size_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long size;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &size);
+	if (retval)
+		return retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	fwu->image_size = size;
+	fwu->data_pos = 0;
+
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = kzalloc(fwu->image_size, GFP_KERNEL);
+	if (!fwu->ext_data_source) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for image data\n",
+				__func__);
+		retval = -ENOMEM;
+	} else {
+		retval = count;
+	}
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_block_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval =  snprintf(buf, PAGE_SIZE, "%u\n", fwu->block_size);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_firmware_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.ui_firmware);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_configuration_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.ui_config);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_disp_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.dp_config);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_perm_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.pm_config);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_bl_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.bl_config);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_utility_parameter_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.utility_param);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_guest_code_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.guest_code);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_write_guest_code_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (kstrtouint(buf, 10, &input) != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (input != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: In microbootloader mode\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->ext_data_source) {
+		retval = -EINVAL;
+		goto exit;
+	} else {
+		fwu->image = fwu->ext_data_source;
+	}
+
+	retval = fwu_start_write_guest_code();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write guest code\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = NULL;
+	fwu->image = NULL;
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+#ifdef SYNA_TDDI
+static ssize_t fwu_sysfs_read_lockdown_code_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned short lockdown_data_size;
+	unsigned char *lockdown_data;
+	char ld_val[2];
+	int retval = 0;
+	int i = 0;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	lockdown_data_size = fwu->blkcount.tddi_lockdown_data * fwu->block_size;
+	lockdown_data = kzalloc(lockdown_data_size, GFP_KERNEL);
+	if (!lockdown_data) {
+		mutex_unlock(&fwu_sysfs_mutex);
+		return -ENOMEM;
+	}
+
+	if (get_tddi_lockdown_data(lockdown_data, lockdown_data_size) < 0) {
+		kfree(lockdown_data);
+		mutex_unlock(&fwu_sysfs_mutex);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < lockdown_data_size; i++) {
+		retval += snprintf(ld_val, PAGE_SIZE, "%02x",
+				*(lockdown_data + i));
+		strlcat(buf, ld_val, lockdown_data_size);
+	}
+	*(buf + retval) = '\n';
+	kfree(lockdown_data);
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval + 1;
+}
+
+static ssize_t fwu_sysfs_write_lockdown_code_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned short lockdown_data_size = (count - 1) / 2;
+	unsigned char *lockdown_data;
+	unsigned char temp[2];
+	int ld_val;
+	int i = 0;
+
+	for (i = 0; i < (count - 1); i++) {
+		if (((*buf >= '0') && (*buf <= '9')) ||
+				(('a' < *buf) && (*buf > 'f')) ||
+				(('A' < *buf) && (*buf > 'F')))
+			continue;
+		else
+			return -EINVAL;
+	}
+
+	if (count % 2 != 1)
+		return -EINVAL;
+
+	lockdown_data = kzalloc(lockdown_data_size, GFP_KERNEL);
+	if (!lockdown_data)
+		return -ENOMEM;
+
+	for (i = 0; i < lockdown_data_size; i++) {
+		memcpy(temp, (buf + 2 * i), sizeof(temp));
+		if (kstrtoint(temp, 16, &ld_val) == 1)
+			*(lockdown_data + i) = ld_val & 0xff;
+	}
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (set_tddi_lockdown_data(lockdown_data, lockdown_data_size) < 0) {
+		kfree(lockdown_data);
+		mutex_unlock(&fwu_sysfs_mutex);
+		return -EINVAL;
+	}
+	kfree(lockdown_data);
+	mutex_unlock(&fwu_sysfs_mutex);
+	return count;
+}
+#endif
+#endif
+static void synaptics_rmi4_fwu_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!fwu)
+		return;
+
+	if (fwu->intr_mask & intr_mask)
+		fwu_read_flash_status();
+
+	return;
+}
+
+static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char attr_count;
+	struct pdt_properties pdt_props;
+
+	if (fwu) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	fwu = kzalloc(sizeof(*fwu), GFP_KERNEL);
+	if (!fwu) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fwu\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	fwu->image_name = kzalloc(MAX_IMAGE_NAME_LEN, GFP_KERNEL);
+	if (!fwu->image_name) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for image name\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_fwu;
+	}
+
+	fwu->rmi4_data = rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			PDT_PROPS,
+			pdt_props.data,
+			sizeof(pdt_props.data));
+	if (retval < 0) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read PDT properties, assuming 0x00\n",
+				__func__);
+	} else if (pdt_props.has_bsr) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Reflash for LTS not currently supported\n",
+				__func__);
+		retval = -ENODEV;
+		goto exit_free_mem;
+	}
+
+	retval = fwu_scan_pdt();
+	if (retval < 0)
+		goto exit_free_mem;
+
+	if (!fwu->in_ub_mode) {
+		retval = fwu_read_f34_queries();
+		if (retval < 0)
+			goto exit_free_mem;
+
+		retval = fwu_get_device_config_id();
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read device config ID\n",
+					__func__);
+			goto exit_free_mem;
+		}
+	}
+
+	fwu->force_update = FORCE_UPDATE;
+	fwu->do_lockdown = DO_LOCKDOWN;
+	fwu->initialized = true;
+
+#ifdef DO_STARTUP_FW_UPDATE
+	fwu->fwu_workqueue = create_singlethread_workqueue("fwu_workqueue");
+	INIT_WORK(&fwu->fwu_work, fwu_startup_fw_update_work);
+	queue_work(fwu->fwu_workqueue,
+			&fwu->fwu_work);
+#endif
+
+#ifdef F51_DISCRETE_FORCE
+	fwu_read_flash_status();
+	if (!fwu->in_bl_mode) {
+		retval = fwu_f51_force_data_init();
+		if (retval < 0)
+			goto exit_free_mem;
+	}
+#endif
+
+	if (ENABLE_SYS_REFLASH == false)
+		return 0;
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+	retval = sysfs_create_bin_file(&rmi4_data->input_dev->dev.kobj,
+			&dev_attr_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs bin file\n",
+				__func__);
+		goto exit_free_mem;
+	}
+#endif
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			retval = -ENODEV;
+			goto exit_remove_attrs;
+		}
+	}
+
+	return 0;
+
+exit_remove_attrs:
+	for (attr_count--; attr_count >= 0; attr_count--) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+	sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+#endif
+
+exit_free_mem:
+	kfree(fwu->image_name);
+
+exit_free_fwu:
+	kfree(fwu);
+	fwu = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_fwu_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char attr_count;
+
+	if (!fwu)
+		goto exit;
+
+#ifdef DO_STARTUP_FW_UPDATE
+	cancel_work_sync(&fwu->fwu_work);
+	flush_workqueue(fwu->fwu_workqueue);
+	destroy_workqueue(fwu->fwu_workqueue);
+#endif
+
+#ifdef F51_DISCRETE_FORCE
+	kfree(fwu->cal_data);
+#endif
+	kfree(fwu->read_config_buf);
+	kfree(fwu->image_name);
+	kfree(fwu);
+	fwu = NULL;
+
+	if (ENABLE_SYS_REFLASH == false)
+		goto exit;
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+	sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+#endif
+
+exit:
+	complete(&fwu_remove_complete);
+
+	return;
+}
+
+static void synaptics_rmi4_fwu_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	if (!fwu) {
+		synaptics_rmi4_fwu_init(rmi4_data);
+		return;
+	}
+
+	retval = fwu_scan_pdt();
+	if (retval < 0)
+		return;
+
+	if (!fwu->in_ub_mode)
+		fwu_read_f34_queries();
+
+#ifdef F51_DISCRETE_FORCE
+	fwu_read_flash_status();
+	if (!fwu->in_bl_mode)
+		fwu_f51_force_data_init();
+#endif
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn fwu_module = {
+	.fn_type = RMI_FW_UPDATER,
+	.init = synaptics_rmi4_fwu_init,
+	.remove = synaptics_rmi4_fwu_remove,
+	.reset = synaptics_rmi4_fwu_reset,
+	.reinit = NULL,
+	.early_suspend = NULL,
+	.suspend = NULL,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = synaptics_rmi4_fwu_attn,
+};
+
+static int __init rmi4_fw_update_module_init(void)
+{
+	synaptics_rmi4_new_function(&fwu_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_fw_update_module_exit(void)
+{
+	synaptics_rmi4_new_function(&fwu_module, false);
+
+	wait_for_completion(&fwu_remove_complete);
+
+	return;
+}
+
+module_init(rmi4_fw_update_module_init);
+module_exit(rmi4_fw_update_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX FW Update Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_gesture.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_gesture.c
new file mode 100644
index 0000000..875670b
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_gesture.c
@@ -0,0 +1,2308 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define GESTURE_PHYS_NAME "synaptics_dsx/gesture"
+
+#define TUNING_SYSFS_DIR_NAME "tuning"
+
+#define STORE_GESTURES
+#ifdef STORE_GESTURES
+#define GESTURES_TO_STORE 10
+#endif
+
+#define CTRL23_FINGER_REPORT_ENABLE_BIT 0
+#define CTRL27_UDG_ENABLE_BIT 4
+#define WAKEUP_GESTURE_MODE 0x02
+
+static ssize_t udg_sysfs_engine_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_detection_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_detection_score_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_detection_index_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_registration_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_registration_begin_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_registration_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_max_index_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_detection_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_index_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_template_valid_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_valid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_template_clear_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_trace_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_template_data_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_trace_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_template_displacement_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_displacement_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_rotation_invariance_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_rotation_invariance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_scale_invariance_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_scale_invariance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_threshold_factor_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_threshold_factor_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_match_metric_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_match_metric_threshold_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_max_inter_stroke_time_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_max_inter_stroke_time_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static int udg_read_tuning_params(void);
+
+static int udg_write_tuning_params(void);
+
+static int udg_detection_enable(bool enable);
+
+static int udg_engine_enable(bool enable);
+
+static int udg_set_index(unsigned char index);
+
+#ifdef STORE_GESTURES
+static int udg_read_valid_data(void);
+static int udg_write_valid_data(void);
+static int udg_read_template_data(unsigned char index);
+static int udg_write_template_data(void);
+#endif
+
+enum gesture_type {
+	DETECTION = 0x0f,
+	REGISTRATION = 0x10,
+};
+
+struct udg_tuning {
+	union {
+		struct {
+			unsigned char maximum_number_of_templates;
+			unsigned char template_size;
+			unsigned char template_disp_lsb;
+			unsigned char template_disp_msb;
+			unsigned char rotation_inv_lsb;
+			unsigned char rotation_inv_msb;
+			unsigned char scale_inv_lsb;
+			unsigned char scale_inv_msb;
+			unsigned char thres_factor_lsb;
+			unsigned char thres_factor_msb;
+			unsigned char metric_thres_lsb;
+			unsigned char metric_thres_msb;
+			unsigned char inter_stroke_lsb;
+			unsigned char inter_stroke_msb;
+		} __packed;
+		unsigned char data[14];
+	};
+};
+
+struct udg_addr {
+	unsigned short data_4;
+	unsigned short ctrl_18;
+	unsigned short ctrl_20;
+	unsigned short ctrl_23;
+	unsigned short ctrl_27;
+	unsigned short ctrl_41;
+	unsigned short trace_x;
+	unsigned short trace_y;
+	unsigned short trace_segment;
+	unsigned short template_helper;
+	unsigned short template_data;
+	unsigned short template_flags;
+};
+
+struct synaptics_rmi4_f12_query_0 {
+	union {
+		struct {
+			struct {
+				unsigned char has_register_descriptors:1;
+				unsigned char has_closed_cover:1;
+				unsigned char has_fast_glove_detect:1;
+				unsigned char has_dribble:1;
+				unsigned char has_4p4_jitter_filter_strength:1;
+				unsigned char f12_query0_s0_b5__7:3;
+			} __packed;
+			struct {
+				unsigned char max_num_templates:4;
+				unsigned char f12_query0_s1_b4__7:4;
+				unsigned char template_size_lsb;
+				unsigned char template_size_msb;
+			} __packed;
+		};
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f12_query_5 {
+	union {
+		struct {
+			unsigned char size_of_query6;
+			struct {
+				unsigned char ctrl0_is_present:1;
+				unsigned char ctrl1_is_present:1;
+				unsigned char ctrl2_is_present:1;
+				unsigned char ctrl3_is_present:1;
+				unsigned char ctrl4_is_present:1;
+				unsigned char ctrl5_is_present:1;
+				unsigned char ctrl6_is_present:1;
+				unsigned char ctrl7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl8_is_present:1;
+				unsigned char ctrl9_is_present:1;
+				unsigned char ctrl10_is_present:1;
+				unsigned char ctrl11_is_present:1;
+				unsigned char ctrl12_is_present:1;
+				unsigned char ctrl13_is_present:1;
+				unsigned char ctrl14_is_present:1;
+				unsigned char ctrl15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl16_is_present:1;
+				unsigned char ctrl17_is_present:1;
+				unsigned char ctrl18_is_present:1;
+				unsigned char ctrl19_is_present:1;
+				unsigned char ctrl20_is_present:1;
+				unsigned char ctrl21_is_present:1;
+				unsigned char ctrl22_is_present:1;
+				unsigned char ctrl23_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl24_is_present:1;
+				unsigned char ctrl25_is_present:1;
+				unsigned char ctrl26_is_present:1;
+				unsigned char ctrl27_is_present:1;
+				unsigned char ctrl28_is_present:1;
+				unsigned char ctrl29_is_present:1;
+				unsigned char ctrl30_is_present:1;
+				unsigned char ctrl31_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl32_is_present:1;
+				unsigned char ctrl33_is_present:1;
+				unsigned char ctrl34_is_present:1;
+				unsigned char ctrl35_is_present:1;
+				unsigned char ctrl36_is_present:1;
+				unsigned char ctrl37_is_present:1;
+				unsigned char ctrl38_is_present:1;
+				unsigned char ctrl39_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl40_is_present:1;
+				unsigned char ctrl41_is_present:1;
+				unsigned char ctrl42_is_present:1;
+				unsigned char ctrl43_is_present:1;
+				unsigned char ctrl44_is_present:1;
+				unsigned char ctrl45_is_present:1;
+				unsigned char ctrl46_is_present:1;
+				unsigned char ctrl47_is_present:1;
+			} __packed;
+		};
+		unsigned char data[7];
+	};
+};
+
+struct synaptics_rmi4_f12_query_8 {
+	union {
+		struct {
+			unsigned char size_of_query9;
+			struct {
+				unsigned char data0_is_present:1;
+				unsigned char data1_is_present:1;
+				unsigned char data2_is_present:1;
+				unsigned char data3_is_present:1;
+				unsigned char data4_is_present:1;
+				unsigned char data5_is_present:1;
+				unsigned char data6_is_present:1;
+				unsigned char data7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data8_is_present:1;
+				unsigned char data9_is_present:1;
+				unsigned char data10_is_present:1;
+				unsigned char data11_is_present:1;
+				unsigned char data12_is_present:1;
+				unsigned char data13_is_present:1;
+				unsigned char data14_is_present:1;
+				unsigned char data15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data16_is_present:1;
+				unsigned char data17_is_present:1;
+				unsigned char data18_is_present:1;
+				unsigned char data19_is_present:1;
+				unsigned char data20_is_present:1;
+				unsigned char data21_is_present:1;
+				unsigned char data22_is_present:1;
+				unsigned char data23_is_present:1;
+			} __packed;
+		};
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f12_control_41 {
+	union {
+		struct {
+			unsigned char enable_registration:1;
+			unsigned char template_index:4;
+			unsigned char begin:1;
+			unsigned char f12_ctrl41_b6__7:2;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_udg_handle {
+	atomic_t attn_event;
+	unsigned char intr_mask;
+	unsigned char report_flags;
+	unsigned char object_type_enable1;
+	unsigned char object_type_enable2;
+	unsigned char trace_size;
+	unsigned char template_index;
+	unsigned char max_num_templates;
+	unsigned char detection_score;
+	unsigned char detection_index;
+	unsigned char detection_status;
+	unsigned char registration_status;
+	unsigned char *ctrl_buf;
+	unsigned char *trace_data_buf;
+	unsigned char *template_data_buf;
+#ifdef STORE_GESTURES
+	unsigned char gestures_to_store;
+	unsigned char *storage_buf;
+	unsigned char valid_buf[2];
+#endif
+	unsigned short trace_data_buf_size;
+	unsigned short template_size;
+	unsigned short template_data_size;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	unsigned short ctrl_18_sub10_off;
+	unsigned short ctrl_20_sub1_off;
+	unsigned short ctrl_23_sub3_off;
+	unsigned short ctrl_27_sub5_off;
+	struct input_dev *udg_dev;
+	struct kobject *tuning_dir;
+	struct udg_addr addr;
+	struct udg_tuning tuning;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct device_attribute attrs[] = {
+	__ATTR(engine_enable, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_engine_enable_store),
+	__ATTR(detection_enable, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_detection_enable_store),
+	__ATTR(detection_score, 0444,
+			udg_sysfs_detection_score_show,
+			synaptics_rmi4_store_error),
+	__ATTR(detection_index, 0444,
+			udg_sysfs_detection_index_show,
+			synaptics_rmi4_store_error),
+	__ATTR(registration_enable, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_registration_enable_store),
+	__ATTR(registration_begin, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_registration_begin_store),
+	__ATTR(registration_status, 0444,
+			udg_sysfs_registration_status_show,
+			synaptics_rmi4_store_error),
+	__ATTR(template_size, 0444,
+			udg_sysfs_template_size_show,
+			synaptics_rmi4_store_error),
+	__ATTR(template_max_index, 0444,
+			udg_sysfs_template_max_index_show,
+			synaptics_rmi4_store_error),
+	__ATTR(template_detection, 0444,
+			udg_sysfs_template_detection_show,
+			synaptics_rmi4_store_error),
+	__ATTR(template_index, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_template_index_store),
+	__ATTR(template_valid, 0664,
+			udg_sysfs_template_valid_show,
+			udg_sysfs_template_valid_store),
+	__ATTR(template_clear, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_template_clear_store),
+	__ATTR(trace_size, 0444,
+			udg_sysfs_trace_size_show,
+			synaptics_rmi4_store_error),
+};
+
+static struct bin_attribute template_data = {
+	.attr = {
+		.name = "template_data",
+		.mode = 0664,
+	},
+	.size = 0,
+	.read = udg_sysfs_template_data_show,
+	.write = udg_sysfs_template_data_store,
+};
+
+static struct bin_attribute trace_data = {
+	.attr = {
+		.name = "trace_data",
+		.mode = 0444,
+	},
+	.size = 0,
+	.read = udg_sysfs_trace_data_show,
+	.write = NULL,
+};
+
+static struct device_attribute params[] = {
+	__ATTR(template_displacement, 0664,
+			udg_sysfs_template_displacement_show,
+			udg_sysfs_template_displacement_store),
+	__ATTR(rotation_invariance, 0664,
+			udg_sysfs_rotation_invariance_show,
+			udg_sysfs_rotation_invariance_store),
+	__ATTR(scale_invariance, 0664,
+			udg_sysfs_scale_invariance_show,
+			udg_sysfs_scale_invariance_store),
+	__ATTR(threshold_factor, 0664,
+			udg_sysfs_threshold_factor_show,
+			udg_sysfs_threshold_factor_store),
+	__ATTR(match_metric_threshold, 0664,
+			udg_sysfs_match_metric_threshold_show,
+			udg_sysfs_match_metric_threshold_store),
+	__ATTR(max_inter_stroke_time, 0664,
+			udg_sysfs_max_inter_stroke_time_show,
+			udg_sysfs_max_inter_stroke_time_store),
+};
+
+static struct synaptics_rmi4_udg_handle *udg;
+
+static unsigned char ctrl_18_sub_size[] = {10, 10, 10, 2, 3, 4, 3, 3, 1, 1};
+static unsigned char ctrl_20_sub_size[] = {2};
+static unsigned char ctrl_23_sub_size[] = {1, 1, 1};
+static unsigned char ctrl_27_sub_size[] = {1, 5, 2, 1, 7};
+
+DECLARE_COMPLETION(udg_remove_complete);
+
+static ssize_t udg_sysfs_engine_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool enable;
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		enable = true;
+	else if (input == 0)
+		enable = false;
+	else
+		return -EINVAL;
+
+	retval = udg_engine_enable(enable);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_detection_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool enable;
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		enable = true;
+	else if (input == 0)
+		enable = false;
+	else
+		return -EINVAL;
+
+	udg->detection_status = 0;
+
+	retval = udg_detection_enable(enable);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_detection_score_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->detection_score);
+}
+
+static ssize_t udg_sysfs_detection_index_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->detection_index);
+}
+
+static ssize_t udg_sysfs_registration_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool enable;
+	unsigned int input;
+	struct synaptics_rmi4_f12_control_41 control_41;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		enable = true;
+	else if (input == 0)
+		enable = false;
+	else
+		return -EINVAL;
+
+	if (enable) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.ctrl_23,
+				udg->ctrl_buf,
+				udg->ctrl_23_sub3_off + 1);
+		if (retval < 0)
+			return retval;
+
+		udg->ctrl_buf[0] = 0;
+		udg->ctrl_buf[0] |= (1 << CTRL23_FINGER_REPORT_ENABLE_BIT);
+		if (udg->ctrl_23_sub3_off)
+			udg->ctrl_buf[udg->ctrl_23_sub3_off] = 0;
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.ctrl_23,
+				udg->ctrl_buf,
+				udg->ctrl_23_sub3_off + 1);
+		if (retval < 0)
+			return retval;
+	} else {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.ctrl_23,
+				udg->ctrl_buf,
+				udg->ctrl_23_sub3_off + 1);
+		if (retval < 0)
+			return retval;
+
+		udg->ctrl_buf[0] = udg->object_type_enable1;
+		if (udg->ctrl_23_sub3_off) {
+			udg->ctrl_buf[udg->ctrl_23_sub3_off] =
+					udg->object_type_enable2;
+		}
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.ctrl_23,
+				udg->ctrl_buf,
+				udg->ctrl_23_sub3_off + 1);
+		if (retval < 0)
+			return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	control_41.enable_registration = enable ? 1 : 0;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_registration_begin_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool begin;
+	unsigned int input;
+	struct synaptics_rmi4_f12_control_41 control_41;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		begin = true;
+	else if (input == 0)
+		begin = false;
+	else
+		return -EINVAL;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	control_41.begin = begin ? 1 : 0;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_registration_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%02x\n", udg->registration_status);
+}
+
+static ssize_t udg_sysfs_template_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->template_size);
+}
+
+static ssize_t udg_sysfs_template_max_index_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->max_num_templates - 1);
+}
+
+static ssize_t udg_sysfs_template_detection_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	int attn_event;
+	unsigned char detection_status;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	attn_event = atomic_read(&udg->attn_event);
+	atomic_set(&udg->attn_event, 0);
+
+	if (attn_event == 0)
+		return snprintf(buf, PAGE_SIZE, "0\n");
+
+	if (udg->detection_status == 0) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.data_4,
+				rmi4_data->gesture_detection,
+				sizeof(rmi4_data->gesture_detection));
+		if (retval < 0)
+			return retval;
+
+		udg->detection_status = rmi4_data->gesture_detection[0];
+	}
+
+	detection_status = udg->detection_status;
+	udg->detection_status = 0;
+
+	switch (detection_status) {
+	case DETECTION:
+		udg->detection_score = rmi4_data->gesture_detection[1];
+		udg->detection_index = rmi4_data->gesture_detection[4];
+		udg->trace_size = rmi4_data->gesture_detection[3];
+		break;
+	case REGISTRATION:
+		udg->registration_status = rmi4_data->gesture_detection[1];
+		udg->trace_size = rmi4_data->gesture_detection[3];
+		break;
+	default:
+		return snprintf(buf, PAGE_SIZE, "0\n");
+	}
+
+	return snprintf(buf, PAGE_SIZE, "0x%02x\n", detection_status);
+}
+
+static ssize_t udg_sysfs_template_index_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long index;
+
+	retval = sstrtoul(buf, 10, &index);
+	if (retval)
+		return retval;
+
+	retval = udg_set_index((unsigned char)index);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_template_valid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned char valid;
+	unsigned char offset;
+	unsigned char byte_num;
+	unsigned char template_flags[2];
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	byte_num = udg->template_index / 8;
+	offset = udg->template_index % 8;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_flags,
+			template_flags,
+			sizeof(template_flags));
+	if (retval < 0)
+		return retval;
+
+	valid = (template_flags[byte_num] & (1 << offset)) >> offset;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", valid);
+}
+
+static ssize_t udg_sysfs_template_valid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long valid;
+	unsigned char offset;
+	unsigned char byte_num;
+	unsigned char template_flags[2];
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &valid);
+	if (retval)
+		return retval;
+
+	if (valid > 0)
+		valid = 1;
+
+	byte_num = udg->template_index / 8;
+	offset = udg->template_index % 8;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_flags,
+			template_flags,
+			sizeof(template_flags));
+	if (retval < 0)
+		return retval;
+
+	if (valid)
+		template_flags[byte_num] |= (1 << offset);
+	else
+		template_flags[byte_num] &= ~(1 << offset);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.template_flags,
+			template_flags,
+			sizeof(template_flags));
+	if (retval < 0)
+		return retval;
+
+#ifdef STORE_GESTURES
+	udg_read_valid_data();
+#endif
+
+	return count;
+}
+
+static ssize_t udg_sysfs_template_clear_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	const char cmd[] = {'0', 0};
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	memset(udg->template_data_buf, 0x00, udg->template_data_size);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.template_data,
+			udg->template_data_buf,
+			udg->template_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to clear template data\n",
+				__func__);
+		return retval;
+	}
+
+	retval = udg_sysfs_template_valid_store(dev, attr, cmd, 1);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to clear valid bit\n",
+				__func__);
+		return retval;
+	}
+
+#ifdef STORE_GESTURES
+	udg_read_template_data(udg->template_index);
+	udg_read_valid_data();
+#endif
+
+	return count;
+}
+
+static ssize_t udg_sysfs_trace_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->trace_size);
+}
+
+static ssize_t udg_sysfs_trace_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned short index = 0;
+	unsigned short trace_data_size;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	trace_data_size = udg->trace_size * 5;
+
+	if (trace_data_size == 0)
+		return -EINVAL;
+
+	if (count < trace_data_size) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not enough space (%d bytes) in buffer\n",
+				__func__, (unsigned int)count);
+		return -EINVAL;
+	}
+
+	if (udg->trace_data_buf_size < trace_data_size) {
+		if (udg->trace_data_buf_size)
+			kfree(udg->trace_data_buf);
+		udg->trace_data_buf = kzalloc(trace_data_size, GFP_KERNEL);
+		if (!udg->trace_data_buf) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for trace data buffer\n",
+					__func__);
+			udg->trace_data_buf_size = 0;
+			return -ENOMEM;
+		}
+		udg->trace_data_buf_size = trace_data_size;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.trace_x,
+			&udg->trace_data_buf[index],
+			udg->trace_size * 2);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read trace X data\n",
+				__func__);
+		return retval;
+	} else {
+		index += udg->trace_size * 2;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.trace_y,
+			&udg->trace_data_buf[index],
+			udg->trace_size * 2);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read trace Y data\n",
+				__func__);
+		return retval;
+	} else {
+		index += udg->trace_size * 2;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.trace_segment,
+			&udg->trace_data_buf[index],
+			udg->trace_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read trace segment data\n",
+				__func__);
+		return retval;
+	}
+
+	retval = secure_memcpy(buf, count, udg->trace_data_buf,
+			udg->trace_data_buf_size, trace_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy trace data\n",
+				__func__);
+		return retval;
+	}
+
+	return trace_data_size;
+}
+
+static ssize_t udg_sysfs_template_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (count < udg->template_data_size) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not enough space (%d bytes) in buffer\n",
+				__func__, (unsigned int)count);
+		return -EINVAL;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_data,
+			udg->template_data_buf,
+			udg->template_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read template data\n",
+				__func__);
+		return retval;
+	}
+
+	retval = secure_memcpy(buf, count, udg->template_data_buf,
+			udg->template_data_size, udg->template_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy template data\n",
+				__func__);
+		return retval;
+	}
+
+#ifdef STORE_GESTURES
+	udg_read_template_data(udg->template_index);
+	udg_read_valid_data();
+#endif
+
+	return udg->template_data_size;
+}
+
+static ssize_t udg_sysfs_template_data_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = secure_memcpy(udg->template_data_buf, udg->template_data_size,
+			buf, count, count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy template data\n",
+				__func__);
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.template_data,
+			udg->template_data_buf,
+			count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write template data\n",
+				__func__);
+		return retval;
+	}
+
+#ifdef STORE_GESTURES
+	udg_read_template_data(udg->template_index);
+	udg_read_valid_data();
+#endif
+
+	return count;
+}
+
+static ssize_t udg_sysfs_template_displacement_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short template_displacement;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	template_displacement =
+			((unsigned short)udg->tuning.template_disp_lsb << 0) |
+			((unsigned short)udg->tuning.template_disp_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", template_displacement);
+}
+
+static ssize_t udg_sysfs_template_displacement_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.template_disp_lsb = (unsigned char)(input >> 0);
+	udg->tuning.template_disp_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_rotation_invariance_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short rotation_invariance;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	rotation_invariance =
+			((unsigned short)udg->tuning.rotation_inv_lsb << 0) |
+			((unsigned short)udg->tuning.rotation_inv_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", rotation_invariance);
+}
+
+static ssize_t udg_sysfs_rotation_invariance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.rotation_inv_lsb = (unsigned char)(input >> 0);
+	udg->tuning.rotation_inv_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_scale_invariance_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short scale_invariance;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	scale_invariance =
+			((unsigned short)udg->tuning.scale_inv_lsb << 0) |
+			((unsigned short)udg->tuning.scale_inv_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", scale_invariance);
+}
+
+static ssize_t udg_sysfs_scale_invariance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.scale_inv_lsb = (unsigned char)(input >> 0);
+	udg->tuning.scale_inv_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_threshold_factor_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short threshold_factor;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	threshold_factor =
+			((unsigned short)udg->tuning.thres_factor_lsb << 0) |
+			((unsigned short)udg->tuning.thres_factor_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", threshold_factor);
+}
+
+static ssize_t udg_sysfs_threshold_factor_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.thres_factor_lsb = (unsigned char)(input >> 0);
+	udg->tuning.thres_factor_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_match_metric_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short match_metric_threshold;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	match_metric_threshold =
+			((unsigned short)udg->tuning.metric_thres_lsb << 0) |
+			((unsigned short)udg->tuning.metric_thres_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", match_metric_threshold);
+}
+
+static ssize_t udg_sysfs_match_metric_threshold_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.metric_thres_lsb = (unsigned char)(input >> 0);
+	udg->tuning.metric_thres_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_max_inter_stroke_time_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short max_inter_stroke_time;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	max_inter_stroke_time =
+			((unsigned short)udg->tuning.inter_stroke_lsb << 0) |
+			((unsigned short)udg->tuning.inter_stroke_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", max_inter_stroke_time);
+}
+
+static ssize_t udg_sysfs_max_inter_stroke_time_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.inter_stroke_lsb = (unsigned char)(input >> 0);
+	udg->tuning.inter_stroke_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static int udg_ctrl_subpacket(unsigned char ctrlreg,
+		unsigned char subpacket,
+		struct synaptics_rmi4_f12_query_5 *query_5)
+{
+	int retval;
+	unsigned char cnt;
+	unsigned char regnum;
+	unsigned char bitnum;
+	unsigned char q5_index;
+	unsigned char q6_index;
+	unsigned char offset;
+	unsigned char max_ctrlreg;
+	unsigned char *query_6;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	max_ctrlreg = (sizeof(query_5->data) - 1) * 8 - 1;
+
+	if (ctrlreg > max_ctrlreg) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Control register number (%d) over limit\n",
+				__func__, ctrlreg);
+		return -EINVAL;
+	}
+
+	q5_index = ctrlreg / 8 + 1;
+	bitnum = ctrlreg % 8;
+	if ((query_5->data[q5_index] & (1 << bitnum)) == 0x00) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Control %d is not present\n",
+				__func__, ctrlreg);
+		return -EINVAL;
+	}
+
+	query_6 = kmalloc(query_5->size_of_query6, GFP_KERNEL);
+	if (!query_6) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query 6\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 6,
+			query_6,
+			query_5->size_of_query6);
+	if (retval < 0)
+		goto exit;
+
+	q6_index = 0;
+
+	for (regnum = 0; regnum < ctrlreg; regnum++) {
+		q5_index = regnum / 8 + 1;
+		bitnum = regnum % 8;
+		if ((query_5->data[q5_index] & (1 << bitnum)) == 0x00)
+			continue;
+
+		if (query_6[q6_index] == 0x00)
+			q6_index += 3;
+		else
+			q6_index++;
+
+		while (query_6[q6_index] & ~MASK_7BIT)
+			q6_index++;
+
+		q6_index++;
+	}
+
+	cnt = 0;
+	q6_index++;
+	offset = subpacket / 7;
+	bitnum = subpacket % 7;
+
+	do {
+		if (cnt == offset) {
+			if (query_6[q6_index + cnt] & (1 << bitnum))
+				retval = 1;
+			else
+				retval = 0;
+			goto exit;
+		}
+		cnt++;
+	} while (query_6[q6_index + cnt - 1] & ~MASK_7BIT);
+
+	retval = 0;
+
+exit:
+	kfree(query_6);
+
+	return retval;
+}
+
+static int udg_read_tuning_params(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_18,
+			udg->ctrl_buf,
+			udg->ctrl_18_sub10_off + sizeof(struct udg_tuning));
+	if (retval < 0)
+		return retval;
+
+	secure_memcpy(udg->tuning.data,
+			sizeof(udg->tuning.data),
+			(unsigned char *)&udg->ctrl_buf[udg->ctrl_18_sub10_off],
+			sizeof(struct udg_tuning),
+			sizeof(struct udg_tuning));
+
+	return 0;
+}
+
+static int udg_write_tuning_params(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	secure_memcpy((unsigned char *)&udg->ctrl_buf[udg->ctrl_18_sub10_off],
+			sizeof(struct udg_tuning),
+			udg->tuning.data,
+			sizeof(udg->tuning.data),
+			sizeof(struct udg_tuning));
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_18,
+			udg->ctrl_buf,
+			udg->ctrl_18_sub10_off + sizeof(struct udg_tuning));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int udg_detection_enable(bool enable)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_20,
+			udg->ctrl_buf,
+			udg->ctrl_20_sub1_off + 1);
+	if (retval < 0)
+		return retval;
+
+	if (enable)
+		udg->ctrl_buf[udg->ctrl_20_sub1_off] = WAKEUP_GESTURE_MODE;
+	else
+		udg->ctrl_buf[udg->ctrl_20_sub1_off] = udg->report_flags;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_20,
+			udg->ctrl_buf,
+			udg->ctrl_20_sub1_off + 1);
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int udg_engine_enable(bool enable)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (enable) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.ctrl_27,
+				udg->ctrl_buf,
+				udg->ctrl_27_sub5_off + 1);
+		if (retval < 0)
+			return retval;
+
+		udg->ctrl_buf[udg->ctrl_27_sub5_off] |=
+				(1 << CTRL27_UDG_ENABLE_BIT);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.ctrl_27,
+				udg->ctrl_buf,
+				udg->ctrl_27_sub5_off + 1);
+		if (retval < 0)
+			return retval;
+	} else {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.ctrl_27,
+				udg->ctrl_buf,
+				udg->ctrl_27_sub5_off + 1);
+		if (retval < 0)
+			return retval;
+
+		udg->ctrl_buf[udg->ctrl_27_sub5_off] &=
+				~(1 << CTRL27_UDG_ENABLE_BIT);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.ctrl_27,
+				udg->ctrl_buf,
+				udg->ctrl_27_sub5_off + 1);
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static void udg_report(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	atomic_set(&udg->attn_event, 1);
+
+	if (rmi4_data->suspend) {
+		if (rmi4_data->gesture_detection[0] == 0) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					udg->addr.data_4,
+					rmi4_data->gesture_detection,
+					sizeof(rmi4_data->gesture_detection));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to read gesture detection\n",
+						__func__);
+				return;
+			}
+		}
+
+		udg->detection_status = rmi4_data->gesture_detection[0];
+		rmi4_data->gesture_detection[0] = 0;
+
+		if (udg->detection_status == DETECTION) {
+			input_report_key(udg->udg_dev, KEY_WAKEUP, 1);
+			input_sync(udg->udg_dev);
+			input_report_key(udg->udg_dev, KEY_WAKEUP, 0);
+			input_sync(udg->udg_dev);
+			rmi4_data->suspend = false;
+		}
+	}
+
+	return;
+}
+
+static int udg_set_index(unsigned char index)
+{
+	int retval;
+	struct synaptics_rmi4_f12_control_41 control_41;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (index >= udg->max_num_templates)
+		return -EINVAL;
+
+	udg->template_index = index;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	control_41.template_index = udg->template_index;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+#ifdef STORE_GESTURES
+static int udg_read_valid_data(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_flags,
+			udg->valid_buf,
+			sizeof(udg->valid_buf));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int udg_write_valid_data(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.template_flags,
+			udg->valid_buf,
+			sizeof(udg->valid_buf));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int udg_read_template_data(unsigned char index)
+{
+	int retval;
+	unsigned char *storage;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	udg_set_index(index);
+	storage = &(udg->storage_buf[index * udg->template_data_size]);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_data,
+			storage,
+			udg->template_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read template data\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int udg_write_template_data(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char *storage;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	for (ii = 0; ii < udg->gestures_to_store; ii++) {
+		udg_set_index(ii);
+		storage = &(udg->storage_buf[ii * udg->template_data_size]);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.template_data,
+				storage,
+				udg->template_data_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write template data\n",
+					__func__);
+			return retval;
+		}
+	}
+
+	return 0;
+}
+#endif
+
+static int udg_reg_init(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char data_offset;
+	unsigned char size_of_query;
+	unsigned char ctrl_18_offset;
+	unsigned char ctrl_20_offset;
+	unsigned char ctrl_23_offset;
+	unsigned char ctrl_27_offset;
+	unsigned char ctrl_41_offset;
+	struct synaptics_rmi4_f12_query_0 query_0;
+	struct synaptics_rmi4_f12_query_5 query_5;
+	struct synaptics_rmi4_f12_query_8 query_8;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 7,
+			&size_of_query,
+			sizeof(size_of_query));
+	if (retval < 0)
+		return retval;
+
+	if (size_of_query < 4) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: User defined gesture support unavailable (missing data registers)\n",
+				__func__);
+		retval = -ENODEV;
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 8,
+			query_8.data,
+			sizeof(query_8.data));
+	if (retval < 0)
+		return retval;
+
+	if ((query_8.data16_is_present) &&
+			(query_8.data17_is_present) &&
+			(query_8.data18_is_present) &&
+			(query_8.data19_is_present) &&
+			(query_8.data20_is_present) &&
+			(query_8.data21_is_present)) {
+		data_offset = query_8.data0_is_present +
+				query_8.data1_is_present +
+				query_8.data2_is_present +
+				query_8.data3_is_present;
+		udg->addr.data_4 = udg->data_base_addr + data_offset;
+		data_offset = data_offset +
+				query_8.data4_is_present +
+				query_8.data5_is_present +
+				query_8.data6_is_present +
+				query_8.data7_is_present +
+				query_8.data8_is_present +
+				query_8.data9_is_present +
+				query_8.data10_is_present +
+				query_8.data11_is_present +
+				query_8.data12_is_present +
+				query_8.data13_is_present +
+				query_8.data14_is_present +
+				query_8.data15_is_present;
+		udg->addr.trace_x = udg->data_base_addr + data_offset;
+		udg->addr.trace_y = udg->addr.trace_x + 1;
+		udg->addr.trace_segment = udg->addr.trace_y + 1;
+		udg->addr.template_helper = udg->addr.trace_segment + 1;
+		udg->addr.template_data = udg->addr.template_helper + 1;
+		udg->addr.template_flags = udg->addr.template_data + 1;
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: User defined gesture support unavailable (missing data registers)\n",
+				__func__);
+		retval = -ENODEV;
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 4,
+			&size_of_query,
+			sizeof(size_of_query));
+	if (retval < 0)
+		return retval;
+
+	if (size_of_query < 7) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: User defined gesture support unavailable (missing control registers)\n",
+				__func__);
+		retval = -ENODEV;
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 5,
+			query_5.data,
+			sizeof(query_5.data));
+	if (retval < 0)
+		return retval;
+
+	ctrl_18_offset = query_5.ctrl0_is_present +
+			query_5.ctrl1_is_present +
+			query_5.ctrl2_is_present +
+			query_5.ctrl3_is_present +
+			query_5.ctrl4_is_present +
+			query_5.ctrl5_is_present +
+			query_5.ctrl6_is_present +
+			query_5.ctrl7_is_present +
+			query_5.ctrl8_is_present +
+			query_5.ctrl9_is_present +
+			query_5.ctrl10_is_present +
+			query_5.ctrl11_is_present +
+			query_5.ctrl12_is_present +
+			query_5.ctrl13_is_present +
+			query_5.ctrl14_is_present +
+			query_5.ctrl15_is_present +
+			query_5.ctrl16_is_present +
+			query_5.ctrl17_is_present;
+
+	ctrl_20_offset = ctrl_18_offset +
+			query_5.ctrl18_is_present +
+			query_5.ctrl19_is_present;
+
+	ctrl_23_offset = ctrl_20_offset +
+			query_5.ctrl20_is_present +
+			query_5.ctrl21_is_present +
+			query_5.ctrl22_is_present;
+
+	ctrl_27_offset = ctrl_23_offset+
+			query_5.ctrl23_is_present +
+			query_5.ctrl24_is_present +
+			query_5.ctrl25_is_present +
+			query_5.ctrl26_is_present;
+
+	ctrl_41_offset = ctrl_27_offset+
+			query_5.ctrl27_is_present +
+			query_5.ctrl28_is_present +
+			query_5.ctrl29_is_present +
+			query_5.ctrl30_is_present +
+			query_5.ctrl31_is_present +
+			query_5.ctrl32_is_present +
+			query_5.ctrl33_is_present +
+			query_5.ctrl34_is_present +
+			query_5.ctrl35_is_present +
+			query_5.ctrl36_is_present +
+			query_5.ctrl37_is_present +
+			query_5.ctrl38_is_present +
+			query_5.ctrl39_is_present +
+			query_5.ctrl40_is_present;
+
+	udg->addr.ctrl_18 = udg->control_base_addr + ctrl_18_offset;
+	udg->addr.ctrl_20 = udg->control_base_addr + ctrl_20_offset;
+	udg->addr.ctrl_23 = udg->control_base_addr + ctrl_23_offset;
+	udg->addr.ctrl_27 = udg->control_base_addr + ctrl_27_offset;
+	udg->addr.ctrl_41 = udg->control_base_addr + ctrl_41_offset;
+
+	udg->ctrl_18_sub10_off = 0;
+	for (ii = 0; ii < 10; ii++) {
+		retval = udg_ctrl_subpacket(18, ii, &query_5);
+		if (retval == 1)
+			udg->ctrl_18_sub10_off += ctrl_18_sub_size[ii];
+		else if (retval < 0)
+			return retval;
+	}
+
+	udg->ctrl_20_sub1_off = 0;
+	for (ii = 0; ii < 1; ii++) {
+		retval = udg_ctrl_subpacket(20, ii, &query_5);
+		if (retval == 1)
+			udg->ctrl_20_sub1_off += ctrl_20_sub_size[ii];
+		else if (retval < 0)
+			return retval;
+	}
+
+	udg->ctrl_23_sub3_off = 0;
+	for (ii = 0; ii < 3; ii++) {
+		retval = udg_ctrl_subpacket(23, ii, &query_5);
+		if (retval == 1)
+			udg->ctrl_23_sub3_off += ctrl_23_sub_size[ii];
+		else if (retval < 0)
+			return retval;
+	}
+
+	retval = udg_ctrl_subpacket(23, 3, &query_5);
+	if (retval == 0)
+		udg->ctrl_23_sub3_off = 0;
+	else if (retval < 0)
+		return retval;
+
+	udg->ctrl_27_sub5_off = 0;
+	for (ii = 0; ii < 5; ii++) {
+		retval = udg_ctrl_subpacket(27, ii, &query_5);
+		if (retval == 1)
+			udg->ctrl_27_sub5_off += ctrl_27_sub_size[ii];
+		else if (retval < 0)
+			return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 0,
+			query_0.data,
+			sizeof(query_0.data));
+	if (retval < 0)
+		return retval;
+
+	udg->max_num_templates = query_0.max_num_templates;
+	udg->template_size =
+			((unsigned short)query_0.template_size_lsb << 0) |
+			((unsigned short)query_0.template_size_msb << 8);
+	udg->template_data_size = udg->template_size * 4 * 2 + 4 + 1;
+
+#ifdef STORE_GESTURES
+	udg->gestures_to_store = udg->max_num_templates;
+	if (GESTURES_TO_STORE < udg->gestures_to_store)
+		udg->gestures_to_store = GESTURES_TO_STORE;
+#endif
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_20,
+			udg->ctrl_buf,
+			udg->ctrl_20_sub1_off + 1);
+	if (retval < 0)
+		return retval;
+
+	udg->report_flags = udg->ctrl_buf[udg->ctrl_20_sub1_off];
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_23,
+			udg->ctrl_buf,
+			udg->ctrl_23_sub3_off + 1);
+	if (retval < 0)
+		return retval;
+
+	udg->object_type_enable1 = udg->ctrl_buf[0];
+	if (udg->ctrl_23_sub3_off)
+		udg->object_type_enable2 = udg->ctrl_buf[udg->ctrl_23_sub3_off];
+
+	return retval;
+}
+
+static int udg_scan_pdt(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char page;
+	unsigned char intr_count = 0;
+	unsigned char intr_off;
+	unsigned char intr_src;
+	unsigned short addr;
+	struct synaptics_rmi4_fn_desc fd;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&fd,
+					sizeof(fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (fd.fn_number) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Found F%02x\n",
+						__func__, fd.fn_number);
+				switch (fd.fn_number) {
+				case SYNAPTICS_RMI4_F12:
+					goto f12_found;
+					break;
+				}
+			} else {
+				break;
+			}
+
+			intr_count += fd.intr_src_count;
+		}
+	}
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to find F12\n",
+			__func__);
+	return -EINVAL;
+
+f12_found:
+	udg->query_base_addr = fd.query_base_addr | (page << 8);
+	udg->control_base_addr = fd.ctrl_base_addr | (page << 8);
+	udg->data_base_addr = fd.data_base_addr | (page << 8);
+	udg->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+	retval = udg_reg_init();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to initialize user defined gesture registers\n",
+				__func__);
+		return retval;
+	}
+
+	udg->intr_mask = 0;
+	intr_src = fd.intr_src_count;
+	intr_off = intr_count % 8;
+	for (ii = intr_off;
+			ii < (intr_src + intr_off);
+			ii++) {
+		udg->intr_mask |= 1 << ii;
+	}
+
+	rmi4_data->intr_mask[0] |= udg->intr_mask;
+
+	addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			addr,
+			&rmi4_data->intr_mask[0],
+			sizeof(rmi4_data->intr_mask[0]));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set interrupt enable bit\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_udg_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!udg)
+		return;
+
+	if (udg->intr_mask & intr_mask)
+		udg_report();
+
+	return;
+}
+
+static int synaptics_rmi4_udg_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char size;
+	unsigned char attr_count;
+	unsigned char param_count;
+
+	if (udg) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	udg = kzalloc(sizeof(*udg), GFP_KERNEL);
+	if (!udg) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for udg\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	size = 0;
+	for (ii = 0; ii < sizeof(ctrl_18_sub_size); ii++)
+		size += ctrl_18_sub_size[ii];
+	size += sizeof(struct udg_tuning);
+	udg->ctrl_buf = kzalloc(size, GFP_KERNEL);
+	if (!udg->ctrl_buf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_buf\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_udg;
+	}
+
+	udg->rmi4_data = rmi4_data;
+
+	retval = udg_scan_pdt();
+	if (retval < 0)
+		goto exit_free_ctrl_buf;
+
+	udg->template_data_buf = kzalloc(udg->template_data_size, GFP_KERNEL);
+	if (!udg->template_data_buf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for template_data_buf\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_ctrl_buf;
+	}
+
+#ifdef STORE_GESTURES
+	udg->storage_buf = kzalloc(
+			udg->template_data_size * udg->gestures_to_store,
+			GFP_KERNEL);
+	if (!udg->storage_buf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for storage_buf\n",
+				__func__);
+		kfree(udg->template_data_buf);
+		retval = -ENOMEM;
+		goto exit_free_ctrl_buf;
+	}
+#endif
+
+	udg->udg_dev = input_allocate_device();
+	if (udg->udg_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate gesture device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_template_data_buf;
+	}
+
+	udg->udg_dev->name = GESTURE_DRIVER_NAME;
+	udg->udg_dev->phys = GESTURE_PHYS_NAME;
+	udg->udg_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	udg->udg_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	udg->udg_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(udg->udg_dev, rmi4_data);
+
+	set_bit(EV_KEY, udg->udg_dev->evbit);
+	set_bit(KEY_WAKEUP, udg->udg_dev->keybit);
+	input_set_capability(udg->udg_dev, EV_KEY, KEY_WAKEUP);
+
+	retval = input_register_device(udg->udg_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register gesture device\n",
+				__func__);
+		input_free_device(udg->udg_dev);
+		goto exit_free_template_data_buf;
+	}
+
+	udg->tuning_dir = kobject_create_and_add(TUNING_SYSFS_DIR_NAME,
+			&udg->udg_dev->dev.kobj);
+	if (!udg->tuning_dir) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create tuning sysfs directory\n",
+				__func__);
+		goto exit_unregister_input_device;
+	}
+
+	retval = sysfs_create_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create template data bin file\n",
+				__func__);
+		goto exit_remove_sysfs_directory;
+	}
+
+	retval = sysfs_create_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create trace data bin file\n",
+				__func__);
+		goto exit_remove_bin_file;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&udg->udg_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			retval = -ENODEV;
+			goto exit_remove_attrs;
+		}
+	}
+
+	for (param_count = 0; param_count < ARRAY_SIZE(params); param_count++) {
+		retval = sysfs_create_file(udg->tuning_dir,
+				&params[param_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create tuning parameters\n",
+					__func__);
+			retval = -ENODEV;
+			goto exit_remove_params;
+		}
+	}
+
+	retval = udg_engine_enable(true);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to enable gesture engine\n",
+				__func__);
+		goto exit_remove_params;
+	}
+
+	return 0;
+
+exit_remove_params:
+	for (param_count--; param_count >= 0; param_count--) {
+		sysfs_remove_file(udg->tuning_dir,
+				&params[param_count].attr);
+	}
+
+exit_remove_attrs:
+	for (attr_count--; attr_count >= 0; attr_count--) {
+		sysfs_remove_file(&udg->udg_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+
+exit_remove_bin_file:
+	sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+
+exit_remove_sysfs_directory:
+	kobject_put(udg->tuning_dir);
+
+exit_unregister_input_device:
+	input_unregister_device(udg->udg_dev);
+
+exit_free_template_data_buf:
+#ifdef STORE_GESTURES
+	kfree(udg->storage_buf);
+#endif
+	kfree(udg->template_data_buf);
+
+exit_free_ctrl_buf:
+	kfree(udg->ctrl_buf);
+
+exit_free_udg:
+	kfree(udg);
+	udg = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_udg_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char count;
+
+	if (!udg)
+		goto exit;
+
+	for (count = 0; count < ARRAY_SIZE(params); count++) {
+		sysfs_remove_file(udg->tuning_dir,
+				&params[count].attr);
+	}
+
+	for (count = 0; count < ARRAY_SIZE(attrs); count++) {
+		sysfs_remove_file(&udg->udg_dev->dev.kobj,
+				&attrs[count].attr);
+	}
+
+	sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+	sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+	kobject_put(udg->tuning_dir);
+
+	input_unregister_device(udg->udg_dev);
+#ifdef STORE_GESTURES
+	kfree(udg->storage_buf);
+#endif
+	kfree(udg->template_data_buf);
+	kfree(udg->trace_data_buf);
+	kfree(udg->ctrl_buf);
+	kfree(udg);
+	udg = NULL;
+
+exit:
+	complete(&udg_remove_complete);
+
+	return;
+}
+
+static void synaptics_rmi4_udg_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg) {
+		synaptics_rmi4_udg_init(rmi4_data);
+		return;
+	}
+
+	udg_scan_pdt();
+	udg_engine_enable(true);
+#ifdef STORE_GESTURES
+	udg_write_template_data();
+	udg_write_valid_data();
+#endif
+
+	return;
+}
+
+static void synaptics_rmi4_udg_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	udg_engine_enable(true);
+#ifdef STORE_GESTURES
+	udg_write_template_data();
+	udg_write_valid_data();
+#endif
+
+	return;
+}
+
+static void synaptics_rmi4_udg_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	rmi4_data->sleep_enable(rmi4_data, false);
+	rmi4_data->irq_enable(rmi4_data, true, false);
+	enable_irq_wake(rmi4_data->irq);
+
+	udg_engine_enable(true);
+	udg_detection_enable(true);
+
+	return;
+}
+
+static void synaptics_rmi4_udg_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	rmi4_data->sleep_enable(rmi4_data, false);
+	rmi4_data->irq_enable(rmi4_data, true, false);
+	enable_irq_wake(rmi4_data->irq);
+
+	udg_engine_enable(true);
+	udg_detection_enable(true);
+
+	return;
+}
+
+static void synaptics_rmi4_udg_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	disable_irq_wake(rmi4_data->irq);
+	udg_detection_enable(false);
+
+	return;
+}
+
+static void synaptics_rmi4_udg_l_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	disable_irq_wake(rmi4_data->irq);
+	udg_detection_enable(false);
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn gesture_module = {
+	.fn_type = RMI_GESTURE,
+	.init = synaptics_rmi4_udg_init,
+	.remove = synaptics_rmi4_udg_remove,
+	.reset = synaptics_rmi4_udg_reset,
+	.reinit = synaptics_rmi4_udg_reinit,
+	.early_suspend = synaptics_rmi4_udg_e_suspend,
+	.suspend = synaptics_rmi4_udg_suspend,
+	.resume = synaptics_rmi4_udg_resume,
+	.late_resume = synaptics_rmi4_udg_l_resume,
+	.attn = synaptics_rmi4_udg_attn,
+};
+
+static int __init rmi4_gesture_module_init(void)
+{
+	synaptics_rmi4_new_function(&gesture_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_gesture_module_exit(void)
+{
+	synaptics_rmi4_new_function(&gesture_module, false);
+
+	wait_for_completion(&udg_remove_complete);
+
+	return;
+}
+
+module_init(rmi4_gesture_module_init);
+module_exit(rmi4_gesture_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX User Defined Gesture Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
new file mode 100644
index 0000000..7725cd3
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
@@ -0,0 +1,606 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SYN_I2C_RETRY_TIMES 10
+#define rd_msgs  1
+
+static unsigned char *wr_buf;
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_i2c_device;
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+	int retval;
+	u32 value;
+	const char *name;
+	struct property *prop;
+	struct device_node *np = dev->of_node;
+
+	bdata->irq_gpio = of_get_named_gpio_flags(np,
+			"synaptics,irq-gpio", 0,
+			(enum of_gpio_flags *)&bdata->irq_flags);
+
+	retval = of_property_read_u32(np, "synaptics,irq-on-state",
+			&value);
+	if (retval < 0)
+		bdata->irq_on_state = 0;
+	else
+		bdata->irq_on_state = value;
+
+	retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+	if (retval < 0)
+		bdata->pwr_reg_name = NULL;
+	else
+		bdata->pwr_reg_name = name;
+
+	retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+	if (retval < 0)
+		bdata->bus_reg_name = NULL;
+	else
+		bdata->bus_reg_name = name;
+
+	prop = of_find_property(np, "synaptics,power-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->power_gpio = of_get_named_gpio_flags(np,
+				"synaptics,power-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,power-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_on_state = value;
+		}
+	} else {
+		bdata->power_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_delay_ms = value;
+		}
+	} else {
+		bdata->power_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->reset_gpio = of_get_named_gpio_flags(np,
+				"synaptics,reset-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,reset-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_on_state = value;
+		}
+		retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_active_ms = value;
+		}
+	} else {
+		bdata->reset_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_delay_ms = value;
+		}
+	} else {
+		bdata->reset_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->max_y_for_2d = value;
+		}
+	} else {
+		bdata->max_y_for_2d = -1;
+	}
+
+	bdata->swap_axes = of_property_read_bool(np, "synaptics,swap-axes");
+	bdata->x_flip = of_property_read_bool(np, "synaptics,x-flip");
+	bdata->y_flip = of_property_read_bool(np, "synaptics,y-flip");
+
+	prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->ub_i2c_addr = (unsigned short)value;
+		}
+	} else {
+		bdata->ub_i2c_addr = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->cap_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->cap_button_map->map)
+			return -ENOMEM;
+		bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+		retval = of_property_read_u32_array(np,
+				"synaptics,cap-button-codes",
+				bdata->cap_button_map->map,
+				bdata->cap_button_map->nbuttons);
+		if (retval < 0) {
+			bdata->cap_button_map->nbuttons = 0;
+			bdata->cap_button_map->map = NULL;
+		}
+	} else {
+		bdata->cap_button_map->nbuttons = 0;
+		bdata->cap_button_map->map = NULL;
+	}
+
+	prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->vir_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->vir_button_map->map)
+			return -ENOMEM;
+		bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+		bdata->vir_button_map->nbuttons /= 5;
+		retval = of_property_read_u32_array(np,
+				"synaptics,vir-button-codes",
+				bdata->vir_button_map->map,
+				bdata->vir_button_map->nbuttons * 5);
+		if (retval < 0) {
+			bdata->vir_button_map->nbuttons = 0;
+			bdata->vir_button_map->map = NULL;
+		}
+	} else {
+		bdata->vir_button_map->nbuttons = 0;
+		bdata->vir_button_map->map = NULL;
+	}
+
+	return 0;
+}
+#endif
+
+static int synaptics_rmi4_i2c_alloc_buf(struct synaptics_rmi4_data *rmi4_data,
+		unsigned int count)
+{
+	static unsigned int buf_size;
+
+	if (count > buf_size) {
+		if (buf_size)
+			kfree(wr_buf);
+		wr_buf = kzalloc(count, GFP_KERNEL);
+		if (!wr_buf) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for buffer\n",
+					__func__);
+			buf_size = 0;
+			return -ENOMEM;
+		}
+		buf_size = count;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_i2c_check_addr(struct synaptics_rmi4_data *rmi4_data,
+		struct i2c_client *i2c)
+{
+	if (hw_if.board_data->ub_i2c_addr == -1)
+		return;
+
+	if (hw_if.board_data->i2c_addr == i2c->addr)
+		hw_if.board_data->i2c_addr = hw_if.board_data->ub_i2c_addr;
+	else
+		hw_if.board_data->i2c_addr = i2c->addr;
+
+	return;
+}
+
+static int synaptics_rmi4_i2c_set_page(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr)
+{
+	int retval = 0;
+	unsigned char retry;
+	unsigned char buf[PAGE_SELECT_LEN];
+	unsigned char page;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_msg msg[2];
+
+	msg[0].addr = hw_if.board_data->i2c_addr;
+	msg[0].flags = 0;
+	msg[0].len = PAGE_SELECT_LEN;
+	msg[0].buf = buf;
+
+	page = ((addr >> 8) & MASK_8BIT);
+	buf[0] = MASK_8BIT;
+	buf[1] = page;
+
+	if (page != rmi4_data->current_page) {
+		for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+			if (i2c_transfer(i2c->adapter, &msg[0], 1) == 1) {
+				rmi4_data->current_page = page;
+				retval = PAGE_SELECT_LEN;
+				break;
+			}
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: I2C retry %d\n",
+					__func__, retry + 1);
+			msleep(20);
+
+			if (retry == SYN_I2C_RETRY_TIMES / 2) {
+				synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+				msg[0].addr = hw_if.board_data->i2c_addr;
+			}
+		}
+	} else {
+		retval = PAGE_SELECT_LEN;
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval = 0;
+	unsigned char retry;
+	unsigned char buf;
+	unsigned char index = 0;
+	unsigned char xfer_msgs;
+	unsigned char remaining_msgs;
+	unsigned short i2c_addr;
+	unsigned short data_offset = 0;
+	unsigned int remaining_length = length;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_adapter *adap = i2c->adapter;
+	struct i2c_msg msg[rd_msgs + 1];
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	retval = synaptics_rmi4_i2c_set_page(rmi4_data, addr);
+	if (retval != PAGE_SELECT_LEN) {
+		retval = -EIO;
+		goto exit;
+	}
+
+	msg[0].addr = hw_if.board_data->i2c_addr;
+	msg[0].flags = 0;
+	msg[0].len = 1;
+	msg[0].buf = &buf;
+	msg[rd_msgs].addr = hw_if.board_data->i2c_addr;
+	msg[rd_msgs].flags = I2C_M_RD;
+	msg[rd_msgs].len = (unsigned short)remaining_length;
+	msg[rd_msgs].buf = &data[data_offset];
+
+	buf = addr & MASK_8BIT;
+
+	remaining_msgs = rd_msgs + 1;
+
+	while (remaining_msgs) {
+		xfer_msgs = remaining_msgs;
+		for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+			retval = i2c_transfer(adap, &msg[index], xfer_msgs);
+			if (retval == xfer_msgs)
+				break;
+
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: I2C retry %d\n",
+					__func__, retry + 1);
+			msleep(20);
+
+			if (retry == SYN_I2C_RETRY_TIMES / 2) {
+				synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+				i2c_addr = hw_if.board_data->i2c_addr;
+				msg[0].addr = i2c_addr;
+				msg[rd_msgs].addr = i2c_addr;
+			}
+		}
+
+		if (retry == SYN_I2C_RETRY_TIMES) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: I2C read over retry limit\n",
+					__func__);
+			retval = -EIO;
+			goto exit;
+		}
+
+		remaining_msgs -= xfer_msgs;
+		index += xfer_msgs;
+	}
+
+	retval = length;
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned char retry;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_msg msg[2];
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	retval = synaptics_rmi4_i2c_alloc_buf(rmi4_data, length + 1);
+	if (retval < 0)
+		goto exit;
+
+	retval = synaptics_rmi4_i2c_set_page(rmi4_data, addr);
+	if (retval != PAGE_SELECT_LEN) {
+		retval = -EIO;
+		goto exit;
+	}
+
+	msg[0].addr = hw_if.board_data->i2c_addr;
+	msg[0].flags = 0;
+	msg[0].len = (unsigned short)(length + 1);
+	msg[0].buf = wr_buf;
+
+	wr_buf[0] = addr & MASK_8BIT;
+	retval = secure_memcpy(&wr_buf[1], length, &data[0], length, length);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy data\n",
+				__func__);
+		goto exit;
+	}
+
+	for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+		if (i2c_transfer(i2c->adapter, &msg[0], 1) == 1) {
+			retval = length;
+			break;
+		}
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: I2C retry %d\n",
+				__func__, retry + 1);
+		msleep(20);
+
+		if (retry == SYN_I2C_RETRY_TIMES / 2) {
+			synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+			msg[0].addr = hw_if.board_data->i2c_addr;
+		}
+	}
+
+	if (retry == SYN_I2C_RETRY_TIMES) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: I2C write over retry limit\n",
+				__func__);
+		retval = -EIO;
+	}
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+	.type = BUS_I2C,
+	.read = synaptics_rmi4_i2c_read,
+	.write = synaptics_rmi4_i2c_write,
+};
+
+static void synaptics_rmi4_i2c_dev_release(struct device *dev)
+{
+	kfree(synaptics_dsx_i2c_device);
+
+	return;
+}
+
+static int synaptics_rmi4_i2c_probe(struct i2c_client *client,
+		const struct i2c_device_id *dev_id)
+{
+	int retval;
+
+	if (!i2c_check_functionality(client->adapter,
+			I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_err(&client->dev,
+				"%s: SMBus byte data commands not supported by host\n",
+				__func__);
+		return -EIO;
+	}
+
+	synaptics_dsx_i2c_device = kzalloc(
+			sizeof(struct platform_device),
+			GFP_KERNEL);
+	if (!synaptics_dsx_i2c_device) {
+		dev_err(&client->dev,
+				"%s: Failed to allocate memory for synaptics_dsx_i2c_device\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_OF
+	if (client->dev.of_node) {
+		hw_if.board_data = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_board_data),
+				GFP_KERNEL);
+		if (!hw_if.board_data) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for board data\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->cap_button_map = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->cap_button_map) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for 0D button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->vir_button_map = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->vir_button_map) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for virtual button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		parse_dt(&client->dev, hw_if.board_data);
+	}
+#else
+	hw_if.board_data = client->dev.platform_data;
+#endif
+
+	hw_if.bus_access = &bus_access;
+	hw_if.board_data->i2c_addr = client->addr;
+
+	synaptics_dsx_i2c_device->name = PLATFORM_DRIVER_NAME;
+	synaptics_dsx_i2c_device->id = 0;
+	synaptics_dsx_i2c_device->num_resources = 0;
+	synaptics_dsx_i2c_device->dev.parent = &client->dev;
+	synaptics_dsx_i2c_device->dev.platform_data = &hw_if;
+	synaptics_dsx_i2c_device->dev.release = synaptics_rmi4_i2c_dev_release;
+
+	retval = platform_device_register(synaptics_dsx_i2c_device);
+	if (retval) {
+		dev_err(&client->dev,
+				"%s: Failed to register platform device\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_i2c_remove(struct i2c_client *client)
+{
+	platform_device_unregister(synaptics_dsx_i2c_device);
+
+	return 0;
+}
+
+static const struct i2c_device_id synaptics_rmi4_id_table[] = {
+	{I2C_DRIVER_NAME, 0},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
+
+#ifdef CONFIG_OF
+static struct of_device_id synaptics_rmi4_of_match_table[] = {
+	{
+		.compatible = "synaptics,dsx-i2c",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct i2c_driver synaptics_rmi4_i2c_driver = {
+	.driver = {
+		.name = I2C_DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = synaptics_rmi4_of_match_table,
+	},
+	.probe = synaptics_rmi4_i2c_probe,
+	.remove = synaptics_rmi4_i2c_remove,
+	.id_table = synaptics_rmi4_id_table,
+};
+
+int synaptics_rmi4_bus_init(void)
+{
+	return i2c_add_driver(&synaptics_rmi4_i2c_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init);
+
+void synaptics_rmi4_bus_exit(void)
+{
+	kfree(wr_buf);
+
+	i2c_del_driver(&synaptics_rmi4_i2c_driver);
+
+	return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX I2C Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_proximity.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_proximity.c
new file mode 100644
index 0000000..518b805
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_proximity.c
@@ -0,0 +1,692 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define PROX_PHYS_NAME "synaptics_dsx/proximity"
+
+#define HOVER_Z_MAX (255)
+
+#define HOVERING_FINGER_EN (1 << 4)
+
+static ssize_t synaptics_rmi4_hover_finger_en_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_hover_finger_en_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static struct device_attribute attrs[] = {
+	__ATTR(hover_finger_en, 0664,
+			synaptics_rmi4_hover_finger_en_show,
+			synaptics_rmi4_hover_finger_en_store),
+};
+
+struct synaptics_rmi4_f12_query_5 {
+	union {
+		struct {
+			unsigned char size_of_query6;
+			struct {
+				unsigned char ctrl0_is_present:1;
+				unsigned char ctrl1_is_present:1;
+				unsigned char ctrl2_is_present:1;
+				unsigned char ctrl3_is_present:1;
+				unsigned char ctrl4_is_present:1;
+				unsigned char ctrl5_is_present:1;
+				unsigned char ctrl6_is_present:1;
+				unsigned char ctrl7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl8_is_present:1;
+				unsigned char ctrl9_is_present:1;
+				unsigned char ctrl10_is_present:1;
+				unsigned char ctrl11_is_present:1;
+				unsigned char ctrl12_is_present:1;
+				unsigned char ctrl13_is_present:1;
+				unsigned char ctrl14_is_present:1;
+				unsigned char ctrl15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl16_is_present:1;
+				unsigned char ctrl17_is_present:1;
+				unsigned char ctrl18_is_present:1;
+				unsigned char ctrl19_is_present:1;
+				unsigned char ctrl20_is_present:1;
+				unsigned char ctrl21_is_present:1;
+				unsigned char ctrl22_is_present:1;
+				unsigned char ctrl23_is_present:1;
+			} __packed;
+		};
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f12_query_8 {
+	union {
+		struct {
+			unsigned char size_of_query9;
+			struct {
+				unsigned char data0_is_present:1;
+				unsigned char data1_is_present:1;
+				unsigned char data2_is_present:1;
+				unsigned char data3_is_present:1;
+				unsigned char data4_is_present:1;
+				unsigned char data5_is_present:1;
+				unsigned char data6_is_present:1;
+				unsigned char data7_is_present:1;
+			} __packed;
+		};
+		unsigned char data[2];
+	};
+};
+
+struct prox_finger_data {
+	union {
+		struct {
+			unsigned char object_type_and_status;
+			unsigned char x_lsb;
+			unsigned char x_msb;
+			unsigned char y_lsb;
+			unsigned char y_msb;
+			unsigned char z;
+		} __packed;
+		unsigned char proximity_data[6];
+	};
+};
+
+struct synaptics_rmi4_prox_handle {
+	bool hover_finger_present;
+	bool hover_finger_en;
+	unsigned char intr_mask;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	unsigned short hover_finger_en_addr;
+	unsigned short hover_finger_data_addr;
+	struct input_dev *prox_dev;
+	struct prox_finger_data *finger_data;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_prox_handle *prox;
+
+DECLARE_COMPLETION(prox_remove_complete);
+
+static void prox_hover_finger_lift(void)
+{
+	input_report_key(prox->prox_dev, BTN_TOUCH, 0);
+	input_report_key(prox->prox_dev, BTN_TOOL_FINGER, 0);
+	input_sync(prox->prox_dev);
+	prox->hover_finger_present = false;
+
+	return;
+}
+
+static void prox_hover_finger_report(void)
+{
+	int retval;
+	int x;
+	int y;
+	int z;
+	struct prox_finger_data *data;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	data = prox->finger_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			prox->hover_finger_data_addr,
+			data->proximity_data,
+			sizeof(data->proximity_data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read hovering finger data\n",
+				__func__);
+		return;
+	}
+
+	if (data->object_type_and_status != F12_HOVERING_FINGER_STATUS) {
+		if (prox->hover_finger_present)
+			prox_hover_finger_lift();
+
+		return;
+	}
+
+	x = (data->x_msb << 8) | (data->x_lsb);
+	y = (data->y_msb << 8) | (data->y_lsb);
+	z = HOVER_Z_MAX - data->z;
+
+	input_report_key(prox->prox_dev, BTN_TOUCH, 0);
+	input_report_key(prox->prox_dev, BTN_TOOL_FINGER, 1);
+	input_report_abs(prox->prox_dev, ABS_X, x);
+	input_report_abs(prox->prox_dev, ABS_Y, y);
+	input_report_abs(prox->prox_dev, ABS_DISTANCE, z);
+
+	input_sync(prox->prox_dev);
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: x = %d y = %d z = %d\n",
+			__func__, x, y, z);
+
+	prox->hover_finger_present = true;
+
+	return;
+}
+
+static int prox_set_hover_finger_en(void)
+{
+	int retval;
+	unsigned char object_report_enable;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			prox->hover_finger_en_addr,
+			&object_report_enable,
+			sizeof(object_report_enable));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read from object report enable register\n",
+				__func__);
+		return retval;
+	}
+
+	if (prox->hover_finger_en)
+		object_report_enable |= HOVERING_FINGER_EN;
+	else
+		object_report_enable &= ~HOVERING_FINGER_EN;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			prox->hover_finger_en_addr,
+			&object_report_enable,
+			sizeof(object_report_enable));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write to object report enable register\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static void prox_set_params(void)
+{
+	input_set_abs_params(prox->prox_dev, ABS_X, 0,
+			prox->rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(prox->prox_dev, ABS_Y, 0,
+			prox->rmi4_data->sensor_max_y, 0, 0);
+	input_set_abs_params(prox->prox_dev, ABS_DISTANCE, 0,
+			HOVER_Z_MAX, 0, 0);
+
+	return;
+}
+
+static int prox_reg_init(void)
+{
+	int retval;
+	unsigned char ctrl_23_offset;
+	unsigned char data_1_offset;
+	struct synaptics_rmi4_f12_query_5 query_5;
+	struct synaptics_rmi4_f12_query_8 query_8;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			prox->query_base_addr + 5,
+			query_5.data,
+			sizeof(query_5.data));
+	if (retval < 0)
+		return retval;
+
+	ctrl_23_offset = query_5.ctrl0_is_present +
+			query_5.ctrl1_is_present +
+			query_5.ctrl2_is_present +
+			query_5.ctrl3_is_present +
+			query_5.ctrl4_is_present +
+			query_5.ctrl5_is_present +
+			query_5.ctrl6_is_present +
+			query_5.ctrl7_is_present +
+			query_5.ctrl8_is_present +
+			query_5.ctrl9_is_present +
+			query_5.ctrl10_is_present +
+			query_5.ctrl11_is_present +
+			query_5.ctrl12_is_present +
+			query_5.ctrl13_is_present +
+			query_5.ctrl14_is_present +
+			query_5.ctrl15_is_present +
+			query_5.ctrl16_is_present +
+			query_5.ctrl17_is_present +
+			query_5.ctrl18_is_present +
+			query_5.ctrl19_is_present +
+			query_5.ctrl20_is_present +
+			query_5.ctrl21_is_present +
+			query_5.ctrl22_is_present;
+
+	prox->hover_finger_en_addr = prox->control_base_addr + ctrl_23_offset;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			prox->query_base_addr + 8,
+			query_8.data,
+			sizeof(query_8.data));
+	if (retval < 0)
+		return retval;
+
+	data_1_offset = query_8.data0_is_present;
+	prox->hover_finger_data_addr = prox->data_base_addr + data_1_offset;
+
+	return retval;
+}
+
+static int prox_scan_pdt(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char page;
+	unsigned char intr_count = 0;
+	unsigned char intr_off;
+	unsigned char intr_src;
+	unsigned short addr;
+	struct synaptics_rmi4_fn_desc fd;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&fd,
+					sizeof(fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (fd.fn_number) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Found F%02x\n",
+						__func__, fd.fn_number);
+				switch (fd.fn_number) {
+				case SYNAPTICS_RMI4_F12:
+					goto f12_found;
+					break;
+				}
+			} else {
+				break;
+			}
+
+			intr_count += fd.intr_src_count;
+		}
+	}
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to find F12\n",
+			__func__);
+	return -EINVAL;
+
+f12_found:
+	prox->query_base_addr = fd.query_base_addr | (page << 8);
+	prox->control_base_addr = fd.ctrl_base_addr | (page << 8);
+	prox->data_base_addr = fd.data_base_addr | (page << 8);
+	prox->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+	retval = prox_reg_init();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to initialize proximity registers\n",
+				__func__);
+		return retval;
+	}
+
+	prox->intr_mask = 0;
+	intr_src = fd.intr_src_count;
+	intr_off = intr_count % 8;
+	for (ii = intr_off;
+			ii < (intr_src + intr_off);
+			ii++) {
+		prox->intr_mask |= 1 << ii;
+	}
+
+	rmi4_data->intr_mask[0] |= prox->intr_mask;
+
+	addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			addr,
+			&(rmi4_data->intr_mask[0]),
+			sizeof(rmi4_data->intr_mask[0]));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set interrupt enable bit\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static ssize_t synaptics_rmi4_hover_finger_en_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	if (!prox)
+		return -ENODEV;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			prox->hover_finger_en);
+}
+
+static ssize_t synaptics_rmi4_hover_finger_en_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	if (!prox)
+		return -ENODEV;
+
+	if (kstrtouint(buf, 16, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		prox->hover_finger_en = true;
+	else if (input == 0)
+		prox->hover_finger_en = false;
+	else
+		return -EINVAL;
+
+	retval = prox_set_hover_finger_en();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change hovering finger enable setting\n",
+				__func__);
+		return retval;
+	}
+
+	return count;
+}
+
+int synaptics_rmi4_prox_hover_finger_en(bool enable)
+{
+	int retval;
+
+	if (!prox)
+		return -ENODEV;
+
+	prox->hover_finger_en = enable;
+
+	retval = prox_set_hover_finger_en();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+EXPORT_SYMBOL(synaptics_rmi4_prox_hover_finger_en);
+
+static void synaptics_rmi4_prox_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!prox)
+		return;
+
+	if (prox->intr_mask & intr_mask)
+		prox_hover_finger_report();
+
+	return;
+}
+
+static int synaptics_rmi4_prox_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char attr_count;
+
+	if (prox) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	prox = kzalloc(sizeof(*prox), GFP_KERNEL);
+	if (!prox) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for prox\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	prox->finger_data = kzalloc(sizeof(*(prox->finger_data)), GFP_KERNEL);
+	if (!prox->finger_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for finger_data\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_prox;
+	}
+
+	prox->rmi4_data = rmi4_data;
+
+	retval = prox_scan_pdt();
+	if (retval < 0)
+		goto exit_free_finger_data;
+
+	prox->hover_finger_en = true;
+
+	retval = prox_set_hover_finger_en();
+	if (retval < 0)
+		return retval;
+
+	prox->prox_dev = input_allocate_device();
+	if (prox->prox_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate proximity device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_finger_data;
+	}
+
+	prox->prox_dev->name = PROXIMITY_DRIVER_NAME;
+	prox->prox_dev->phys = PROX_PHYS_NAME;
+	prox->prox_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	prox->prox_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	prox->prox_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(prox->prox_dev, rmi4_data);
+
+	set_bit(EV_KEY, prox->prox_dev->evbit);
+	set_bit(EV_ABS, prox->prox_dev->evbit);
+	set_bit(BTN_TOUCH, prox->prox_dev->keybit);
+	set_bit(BTN_TOOL_FINGER, prox->prox_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, prox->prox_dev->propbit);
+#endif
+
+	prox_set_params();
+
+	retval = input_register_device(prox->prox_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register proximity device\n",
+				__func__);
+		goto exit_free_input_device;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			goto exit_free_sysfs;
+		}
+	}
+
+	return 0;
+
+exit_free_sysfs:
+	for (attr_count--; attr_count >= 0; attr_count--) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	input_unregister_device(prox->prox_dev);
+	prox->prox_dev = NULL;
+
+exit_free_input_device:
+	if (prox->prox_dev)
+		input_free_device(prox->prox_dev);
+
+exit_free_finger_data:
+	kfree(prox->finger_data);
+
+exit_free_prox:
+	kfree(prox);
+	prox = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_prox_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char attr_count;
+
+	if (!prox)
+		goto exit;
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	input_unregister_device(prox->prox_dev);
+	kfree(prox->finger_data);
+	kfree(prox);
+	prox = NULL;
+
+exit:
+	complete(&prox_remove_complete);
+
+	return;
+}
+
+static void synaptics_rmi4_prox_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!prox) {
+		synaptics_rmi4_prox_init(rmi4_data);
+		return;
+	}
+
+	prox_hover_finger_lift();
+
+	prox_scan_pdt();
+
+	prox_set_hover_finger_en();
+
+	return;
+}
+
+static void synaptics_rmi4_prox_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!prox)
+		return;
+
+	prox_hover_finger_lift();
+
+	prox_set_hover_finger_en();
+
+	return;
+}
+
+static void synaptics_rmi4_prox_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!prox)
+		return;
+
+	prox_hover_finger_lift();
+
+	return;
+}
+
+static void synaptics_rmi4_prox_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!prox)
+		return;
+
+	prox_hover_finger_lift();
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn proximity_module = {
+	.fn_type = RMI_PROXIMITY,
+	.init = synaptics_rmi4_prox_init,
+	.remove = synaptics_rmi4_prox_remove,
+	.reset = synaptics_rmi4_prox_reset,
+	.reinit = synaptics_rmi4_prox_reinit,
+	.early_suspend = synaptics_rmi4_prox_e_suspend,
+	.suspend = synaptics_rmi4_prox_suspend,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = synaptics_rmi4_prox_attn,
+};
+
+static int __init rmi4_proximity_module_init(void)
+{
+	synaptics_rmi4_new_function(&proximity_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_proximity_module_exit(void)
+{
+	synaptics_rmi4_new_function(&proximity_module, false);
+
+	wait_for_completion(&prox_remove_complete);
+
+	return;
+}
+
+module_init(rmi4_proximity_module_init);
+module_exit(rmi4_proximity_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Proximity Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
new file mode 100644
index 0000000..331274e
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
@@ -0,0 +1,1079 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/gpio.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define CHAR_DEVICE_NAME "rmi"
+#define DEVICE_CLASS_NAME "rmidev"
+#define SYSFS_FOLDER_NAME "rmidev"
+#define DEV_NUMBER 1
+#define REG_ADDR_LIMIT 0xFFFF
+
+#define RMIDEV_MAJOR_NUM 0
+
+static ssize_t rmidev_sysfs_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t rmidev_sysfs_data_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t rmidev_sysfs_open_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_release_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_attn_state_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_pid_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_pid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_term_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_intr_mask_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_intr_mask_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_concurrent_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_concurrent_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+struct rmidev_handle {
+	dev_t dev_no;
+	pid_t pid;
+	unsigned char intr_mask;
+	unsigned char *tmpbuf;
+	unsigned int tmpbuf_size;
+	struct device dev;
+	struct synaptics_rmi4_data *rmi4_data;
+	struct kobject *sysfs_dir;
+	struct siginfo interrupt_signal;
+	struct siginfo terminate_signal;
+	struct task_struct *task;
+	void *data;
+	bool concurrent;
+};
+
+struct rmidev_data {
+	int ref_count;
+	struct cdev main_dev;
+	struct class *device_class;
+	struct mutex file_mutex;
+	struct rmidev_handle *rmi_dev;
+};
+
+static struct bin_attribute attr_data = {
+	.attr = {
+		.name = "data",
+		.mode = 0664,
+	},
+	.size = 0,
+	.read = rmidev_sysfs_data_show,
+	.write = rmidev_sysfs_data_store,
+};
+
+static struct device_attribute attrs[] = {
+	__ATTR(open, 0220,
+			synaptics_rmi4_show_error,
+			rmidev_sysfs_open_store),
+	__ATTR(release, 0220,
+			synaptics_rmi4_show_error,
+			rmidev_sysfs_release_store),
+	__ATTR(attn_state, 0444,
+			rmidev_sysfs_attn_state_show,
+			synaptics_rmi4_store_error),
+	__ATTR(pid, 0664,
+			rmidev_sysfs_pid_show,
+			rmidev_sysfs_pid_store),
+	__ATTR(term, 0220,
+			synaptics_rmi4_show_error,
+			rmidev_sysfs_term_store),
+	__ATTR(intr_mask, 0664,
+			rmidev_sysfs_intr_mask_show,
+			rmidev_sysfs_intr_mask_store),
+	__ATTR(concurrent, 0664,
+			rmidev_sysfs_concurrent_show,
+			rmidev_sysfs_concurrent_store),
+};
+
+static int rmidev_major_num = RMIDEV_MAJOR_NUM;
+
+static struct class *rmidev_device_class;
+
+static struct rmidev_handle *rmidev;
+
+DECLARE_COMPLETION(rmidev_remove_complete);
+
+static irqreturn_t rmidev_sysfs_irq(int irq, void *data)
+{
+	struct synaptics_rmi4_data *rmi4_data = data;
+
+	sysfs_notify(&rmi4_data->input_dev->dev.kobj,
+			SYSFS_FOLDER_NAME, "attn_state");
+
+	return IRQ_HANDLED;
+}
+
+static int rmidev_sysfs_irq_enable(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval = 0;
+	unsigned char intr_status[MAX_INTR_REGISTERS];
+	unsigned long irq_flags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
+			IRQF_ONESHOT;
+
+	mutex_lock(&(rmi4_data->rmi4_irq_enable_mutex));
+
+	if (enable) {
+		if (rmi4_data->irq_enabled) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Interrupt already enabled\n",
+					__func__);
+			goto exit;
+		}
+
+		/* Clear interrupts first */
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_data_base_addr + 1,
+				intr_status,
+				rmi4_data->num_of_intr_regs);
+		if (retval < 0)
+			goto exit;
+
+		retval = request_threaded_irq(rmi4_data->irq, NULL,
+				rmidev_sysfs_irq, irq_flags,
+				PLATFORM_DRIVER_NAME, rmi4_data);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create irq thread\n",
+					__func__);
+			goto exit;
+		}
+
+		rmi4_data->irq_enabled = true;
+	} else {
+		if (rmi4_data->irq_enabled) {
+			disable_irq(rmi4_data->irq);
+			free_irq(rmi4_data->irq, rmi4_data);
+			rmi4_data->irq_enabled = false;
+		}
+	}
+
+exit:
+	mutex_unlock(&(rmi4_data->rmi4_irq_enable_mutex));
+
+	return retval;
+}
+
+static ssize_t rmidev_sysfs_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned char intr_status = 0;
+	unsigned int length = (unsigned int)count;
+	unsigned short address = (unsigned short)pos;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (length > (REG_ADDR_LIMIT - address)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Out of register map limit\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (length) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				address,
+				(unsigned char *)buf,
+				length);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read data\n",
+					__func__);
+			return retval;
+		}
+	} else {
+		return -EINVAL;
+	}
+
+	if (!rmidev->concurrent)
+		goto exit;
+
+	if (address != rmi4_data->f01_data_base_addr)
+		goto exit;
+
+	if (length <= 1)
+		goto exit;
+
+	intr_status = buf[1];
+
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->num_of_data_sources) {
+				if (fhandler->intr_mask & intr_status) {
+					rmi4_data->report_touch(rmi4_data,
+							fhandler);
+				}
+			}
+		}
+	}
+
+exit:
+	return length;
+}
+
+static ssize_t rmidev_sysfs_data_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned int length = (unsigned int)count;
+	unsigned short address = (unsigned short)pos;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (length > (REG_ADDR_LIMIT - address)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Out of register map limit\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (length) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				address,
+				(unsigned char *)buf,
+				length);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write data\n",
+					__func__);
+			return retval;
+		}
+	} else {
+		return -EINVAL;
+	}
+
+	return length;
+}
+
+static ssize_t rmidev_sysfs_open_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	rmi4_data->irq_enable(rmi4_data, false, false);
+	rmidev_sysfs_irq_enable(rmi4_data, true);
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Attention interrupt disabled\n",
+			__func__);
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_release_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	rmidev_sysfs_irq_enable(rmi4_data, false);
+
+	rmi4_data->reset_device(rmi4_data, false);
+
+	rmi4_data->stay_awake = false;
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_attn_state_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int attn_state;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	attn_state = gpio_get_value(bdata->irq_gpio);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", attn_state);
+}
+
+static ssize_t rmidev_sysfs_pid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", rmidev->pid);
+}
+
+static ssize_t rmidev_sysfs_pid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	rmidev->pid = input;
+
+	if (rmidev->pid) {
+		rmidev->task = pid_task(find_vpid(rmidev->pid), PIDTYPE_PID);
+		if (!rmidev->task) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to locate PID of data logging tool\n",
+					__func__);
+			return -EINVAL;
+		}
+	}
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_term_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	if (rmidev->pid)
+		send_sig_info(SIGTERM, &rmidev->terminate_signal, rmidev->task);
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_intr_mask_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%02x\n", rmidev->intr_mask);
+}
+
+static ssize_t rmidev_sysfs_intr_mask_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	rmidev->intr_mask = (unsigned char)input;
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_concurrent_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", rmidev->concurrent);
+}
+
+static ssize_t rmidev_sysfs_concurrent_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	rmidev->concurrent = input > 0 ? true : false;
+
+	return count;
+}
+
+static int rmidev_allocate_buffer(int count)
+{
+	if (count + 1 > rmidev->tmpbuf_size) {
+		if (rmidev->tmpbuf_size)
+			kfree(rmidev->tmpbuf);
+		rmidev->tmpbuf = kzalloc(count + 1, GFP_KERNEL);
+		if (!rmidev->tmpbuf) {
+			dev_err(rmidev->rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for buffer\n",
+					__func__);
+			rmidev->tmpbuf_size = 0;
+			return -ENOMEM;
+		}
+		rmidev->tmpbuf_size = count + 1;
+	}
+
+	return 0;
+}
+
+/*
+ * rmidev_llseek - set register address to access for RMI device
+ *
+ * @filp: pointer to file structure
+ * @off:
+ *	if whence == SEEK_SET,
+ *		off: 16-bit RMI register address
+ *	if whence == SEEK_CUR,
+ *		off: offset from current position
+ *	if whence == SEEK_END,
+ *		off: offset from end position (0xFFFF)
+ * @whence: SEEK_SET, SEEK_CUR, or SEEK_END
+ */
+static loff_t rmidev_llseek(struct file *filp, loff_t off, int whence)
+{
+	loff_t newpos;
+	struct rmidev_data *dev_data = filp->private_data;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (IS_ERR(dev_data)) {
+		pr_err("%s: Pointer of char device data is invalid", __func__);
+		return -EBADF;
+	}
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	switch (whence) {
+	case SEEK_SET:
+		newpos = off;
+		break;
+	case SEEK_CUR:
+		newpos = filp->f_pos + off;
+		break;
+	case SEEK_END:
+		newpos = REG_ADDR_LIMIT + off;
+		break;
+	default:
+		newpos = -EINVAL;
+		goto clean_up;
+	}
+
+	if (newpos < 0 || newpos > REG_ADDR_LIMIT) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: New position 0x%04x is invalid\n",
+				__func__, (unsigned int)newpos);
+		newpos = -EINVAL;
+		goto clean_up;
+	}
+
+	filp->f_pos = newpos;
+
+clean_up:
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return newpos;
+}
+
+/*
+ * rmidev_read: read register data from RMI device
+ *
+ * @filp: pointer to file structure
+ * @buf: pointer to user space buffer
+ * @count: number of bytes to read
+ * @f_pos: starting RMI register address
+ */
+static ssize_t rmidev_read(struct file *filp, char __user *buf,
+		size_t count, loff_t *f_pos)
+{
+	ssize_t retval;
+	unsigned char intr_status = 0;
+	unsigned short address;
+	struct rmidev_data *dev_data = filp->private_data;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (IS_ERR(dev_data)) {
+		pr_err("%s: Pointer of char device data is invalid", __func__);
+		return -EBADF;
+	}
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	if (*f_pos > REG_ADDR_LIMIT) {
+		retval = -EFAULT;
+		goto clean_up;
+	}
+
+	if (count > (REG_ADDR_LIMIT - *f_pos))
+		count = REG_ADDR_LIMIT - *f_pos;
+
+	if (count == 0) {
+		retval = 0;
+		goto clean_up;
+	}
+	address = (unsigned short)(*f_pos);
+
+	rmidev_allocate_buffer(count);
+
+	retval = synaptics_rmi4_reg_read(rmidev->rmi4_data,
+			*f_pos,
+			rmidev->tmpbuf,
+			count);
+	if (retval < 0)
+		goto clean_up;
+
+	if (copy_to_user(buf, rmidev->tmpbuf, count))
+		retval = -EFAULT;
+	else
+		*f_pos += retval;
+
+	if (!rmidev->concurrent)
+		goto clean_up;
+
+	if (address != rmi4_data->f01_data_base_addr)
+		goto clean_up;
+
+	if (count <= 1)
+		goto clean_up;
+
+	intr_status = rmidev->tmpbuf[1];
+
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->num_of_data_sources) {
+				if (fhandler->intr_mask & intr_status) {
+					rmi4_data->report_touch(rmi4_data,
+							fhandler);
+				}
+			}
+		}
+	}
+
+clean_up:
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return retval;
+}
+
+/*
+ * rmidev_write: write register data to RMI device
+ *
+ * @filp: pointer to file structure
+ * @buf: pointer to user space buffer
+ * @count: number of bytes to write
+ * @f_pos: starting RMI register address
+ */
+static ssize_t rmidev_write(struct file *filp, const char __user *buf,
+		size_t count, loff_t *f_pos)
+{
+	ssize_t retval;
+	struct rmidev_data *dev_data = filp->private_data;
+
+	if (IS_ERR(dev_data)) {
+		pr_err("%s: Pointer of char device data is invalid", __func__);
+		return -EBADF;
+	}
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	if (*f_pos > REG_ADDR_LIMIT) {
+		retval = -EFAULT;
+		goto unlock;
+	}
+
+	if (count > (REG_ADDR_LIMIT - *f_pos))
+		count = REG_ADDR_LIMIT - *f_pos;
+
+	if (count == 0) {
+		retval = 0;
+		goto unlock;
+	}
+	rmidev_allocate_buffer(count);
+
+	if (copy_from_user(rmidev->tmpbuf, buf, count)) {
+		return -EFAULT;
+		goto unlock;
+	}
+
+	retval = synaptics_rmi4_reg_write(rmidev->rmi4_data,
+			*f_pos,
+			rmidev->tmpbuf,
+			count);
+	if (retval >= 0)
+		*f_pos += retval;
+
+unlock:
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return retval;
+}
+
+static int rmidev_open(struct inode *inp, struct file *filp)
+{
+	int retval = 0;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+	struct rmidev_data *dev_data =
+			container_of(inp->i_cdev, struct rmidev_data, main_dev);
+
+	if (!dev_data)
+		return -EACCES;
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	filp->private_data = dev_data;
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	rmi4_data->irq_enable(rmi4_data, false, false);
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Attention interrupt disabled\n",
+			__func__);
+
+	if (dev_data->ref_count < 1)
+		dev_data->ref_count++;
+	else
+		retval = -EACCES;
+
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return retval;
+}
+
+static int rmidev_release(struct inode *inp, struct file *filp)
+{
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+	struct rmidev_data *dev_data =
+			container_of(inp->i_cdev, struct rmidev_data, main_dev);
+
+	if (!dev_data)
+		return -EACCES;
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	dev_data->ref_count--;
+	if (dev_data->ref_count < 0)
+		dev_data->ref_count = 0;
+
+	rmi4_data->reset_device(rmi4_data, false);
+
+	rmi4_data->stay_awake = false;
+
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return 0;
+}
+
+static const struct file_operations rmidev_fops = {
+	.owner = THIS_MODULE,
+	.llseek = rmidev_llseek,
+	.read = rmidev_read,
+	.write = rmidev_write,
+	.open = rmidev_open,
+	.release = rmidev_release,
+};
+
+static void rmidev_device_cleanup(struct rmidev_data *dev_data)
+{
+	dev_t devno;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (dev_data) {
+		devno = dev_data->main_dev.dev;
+
+		if (dev_data->device_class)
+			device_destroy(dev_data->device_class, devno);
+
+		cdev_del(&dev_data->main_dev);
+
+		unregister_chrdev_region(devno, 1);
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: rmidev device removed\n",
+				__func__);
+	}
+
+	return;
+}
+
+static char *rmi_char_devnode(struct device *dev, umode_t *mode)
+{
+	if (!mode)
+		return NULL;
+
+	*mode = (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
+
+	return kasprintf(GFP_KERNEL, "rmi/%s", dev_name(dev));
+}
+
+static int rmidev_create_device_class(void)
+{
+	if (rmidev_device_class != NULL)
+		return 0;
+
+	rmidev_device_class = class_create(THIS_MODULE, DEVICE_CLASS_NAME);
+
+	if (IS_ERR(rmidev_device_class)) {
+		pr_err("%s: Failed to create /dev/%s\n",
+				__func__, CHAR_DEVICE_NAME);
+		return -ENODEV;
+	}
+
+	rmidev_device_class->devnode = rmi_char_devnode;
+
+	return 0;
+}
+
+static void rmidev_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!rmidev)
+		return;
+
+	if (rmidev->pid && (rmidev->intr_mask & intr_mask))
+		send_sig_info(SIGIO, &rmidev->interrupt_signal, rmidev->task);
+
+	return;
+}
+
+static int rmidev_init_device(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	dev_t dev_no;
+	unsigned char attr_count;
+	struct rmidev_data *dev_data;
+	struct device *device_ptr;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (rmidev) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	rmidev = kzalloc(sizeof(*rmidev), GFP_KERNEL);
+	if (!rmidev) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for rmidev\n",
+				__func__);
+		retval = -ENOMEM;
+		goto err_rmidev;
+	}
+
+	rmidev->rmi4_data = rmi4_data;
+
+	memset(&rmidev->interrupt_signal, 0, sizeof(rmidev->interrupt_signal));
+	rmidev->interrupt_signal.si_signo = SIGIO;
+	rmidev->interrupt_signal.si_code = SI_USER;
+
+	memset(&rmidev->terminate_signal, 0, sizeof(rmidev->terminate_signal));
+	rmidev->terminate_signal.si_signo = SIGTERM;
+	rmidev->terminate_signal.si_code = SI_USER;
+
+	retval = rmidev_create_device_class();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create device class\n",
+				__func__);
+		goto err_device_class;
+	}
+
+	if (rmidev_major_num) {
+		dev_no = MKDEV(rmidev_major_num, DEV_NUMBER);
+		retval = register_chrdev_region(dev_no, 1, CHAR_DEVICE_NAME);
+	} else {
+		retval = alloc_chrdev_region(&dev_no, 0, 1, CHAR_DEVICE_NAME);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to allocate char device region\n",
+					__func__);
+			goto err_device_region;
+		}
+
+		rmidev_major_num = MAJOR(dev_no);
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Major number of rmidev = %d\n",
+				__func__, rmidev_major_num);
+	}
+
+	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+	if (!dev_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for dev_data\n",
+				__func__);
+		retval = -ENOMEM;
+		goto err_dev_data;
+	}
+
+	mutex_init(&dev_data->file_mutex);
+	dev_data->rmi_dev = rmidev;
+	rmidev->data = dev_data;
+
+	cdev_init(&dev_data->main_dev, &rmidev_fops);
+
+	retval = cdev_add(&dev_data->main_dev, dev_no, 1);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to add rmi char device\n",
+				__func__);
+		goto err_char_device;
+	}
+
+	dev_set_name(&rmidev->dev, "rmidev%d", MINOR(dev_no));
+	dev_data->device_class = rmidev_device_class;
+
+	device_ptr = device_create(dev_data->device_class, NULL, dev_no,
+			NULL, CHAR_DEVICE_NAME"%d", MINOR(dev_no));
+	if (IS_ERR(device_ptr)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create rmi char device\n",
+				__func__);
+		retval = -ENODEV;
+		goto err_char_device;
+	}
+
+	retval = gpio_export(bdata->irq_gpio, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to export attention gpio\n",
+				__func__);
+	} else {
+		retval = gpio_export_link(&(rmi4_data->input_dev->dev),
+				"attn", bdata->irq_gpio);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s Failed to create gpio symlink\n",
+					__func__);
+		} else {
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Exported attention gpio %d\n",
+					__func__, bdata->irq_gpio);
+		}
+	}
+
+	rmidev->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+			&rmi4_data->input_dev->dev.kobj);
+	if (!rmidev->sysfs_dir) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs directory\n",
+				__func__);
+		retval = -ENODEV;
+		goto err_sysfs_dir;
+	}
+
+	retval = sysfs_create_bin_file(rmidev->sysfs_dir,
+			&attr_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs bin file\n",
+				__func__);
+		goto err_sysfs_bin;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(rmidev->sysfs_dir,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			retval = -ENODEV;
+			goto err_sysfs_attrs;
+		}
+	}
+
+	return 0;
+
+err_sysfs_attrs:
+	for (attr_count--; attr_count >= 0; attr_count--)
+		sysfs_remove_file(rmidev->sysfs_dir, &attrs[attr_count].attr);
+
+	sysfs_remove_bin_file(rmidev->sysfs_dir, &attr_data);
+
+err_sysfs_bin:
+	kobject_put(rmidev->sysfs_dir);
+
+err_sysfs_dir:
+	sysfs_remove_link(&(rmi4_data->input_dev->dev.kobj), "attn");
+	gpio_unexport(bdata->irq_gpio);
+
+err_char_device:
+	rmidev_device_cleanup(dev_data);
+	kfree(dev_data);
+
+err_dev_data:
+	unregister_chrdev_region(dev_no, 1);
+
+err_device_region:
+	if (rmidev_device_class != NULL) {
+		class_destroy(rmidev_device_class);
+		rmidev_device_class = NULL;
+	}
+
+err_device_class:
+	kfree(rmidev);
+	rmidev = NULL;
+
+err_rmidev:
+	return retval;
+}
+
+static void rmidev_remove_device(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char attr_count;
+	struct rmidev_data *dev_data;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (!rmidev)
+		goto exit;
+
+	rmidev_major_num = RMIDEV_MAJOR_NUM;
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++)
+		sysfs_remove_file(rmidev->sysfs_dir, &attrs[attr_count].attr);
+
+	sysfs_remove_bin_file(rmidev->sysfs_dir, &attr_data);
+
+	kobject_put(rmidev->sysfs_dir);
+
+	sysfs_remove_link(&(rmi4_data->input_dev->dev.kobj), "attn");
+	gpio_unexport(bdata->irq_gpio);
+
+	dev_data = rmidev->data;
+	if (dev_data) {
+		rmidev_device_cleanup(dev_data);
+		kfree(dev_data);
+	}
+
+	unregister_chrdev_region(rmidev->dev_no, 1);
+
+	if (rmidev_device_class != NULL) {
+		class_destroy(rmidev_device_class);
+		rmidev_device_class = NULL;
+	}
+
+	kfree(rmidev->tmpbuf);
+
+	kfree(rmidev);
+	rmidev = NULL;
+
+exit:
+	complete(&rmidev_remove_complete);
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn rmidev_module = {
+	.fn_type = RMI_DEV,
+	.init = rmidev_init_device,
+	.remove = rmidev_remove_device,
+	.reset = NULL,
+	.reinit = NULL,
+	.early_suspend = NULL,
+	.suspend = NULL,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = rmidev_attn,
+};
+
+static int __init rmidev_module_init(void)
+{
+	synaptics_rmi4_new_function(&rmidev_module, true);
+
+	return 0;
+}
+
+static void __exit rmidev_module_exit(void)
+{
+	synaptics_rmi4_new_function(&rmidev_module, false);
+
+	wait_for_completion(&rmidev_remove_complete);
+
+	return;
+}
+
+module_init(rmidev_module_init);
+module_exit(rmidev_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX RMI Dev Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_hid_i2c.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_hid_i2c.c
new file mode 100644
index 0000000..244e97e
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_hid_i2c.c
@@ -0,0 +1,1006 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SYN_I2C_RETRY_TIMES 10
+
+#define REPORT_ID_GET_BLOB 0x07
+#define REPORT_ID_WRITE 0x09
+#define REPORT_ID_READ_ADDRESS 0x0a
+#define REPORT_ID_READ_DATA 0x0b
+#define REPORT_ID_SET_RMI_MODE 0x0f
+
+#define PREFIX_USAGE_PAGE_1BYTE 0x05
+#define PREFIX_USAGE_PAGE_2BYTES 0x06
+#define PREFIX_USAGE 0x09
+#define PREFIX_REPORT_ID 0x85
+#define PREFIX_REPORT_COUNT_1BYTE 0x95
+#define PREFIX_REPORT_COUNT_2BYTES 0x96
+
+#define USAGE_GET_BLOB 0xc5
+#define USAGE_WRITE 0x02
+#define USAGE_READ_ADDRESS 0x03
+#define USAGE_READ_DATA 0x04
+#define USAGE_SET_MODE 0x06
+
+#define FEATURE_REPORT_TYPE 0x03
+
+#define VENDOR_DEFINED_PAGE 0xff00
+
+#define BLOB_REPORT_SIZE 256
+
+#define RESET_COMMAND 0x01
+#define GET_REPORT_COMMAND 0x02
+#define SET_REPORT_COMMAND 0x03
+#define SET_POWER_COMMAND 0x08
+
+#define FINGER_MODE 0x00
+#define RMI_MODE 0x02
+
+struct hid_report_info {
+	unsigned char get_blob_id;
+	unsigned char write_id;
+	unsigned char read_addr_id;
+	unsigned char read_data_id;
+	unsigned char set_mode_id;
+	unsigned int blob_size;
+};
+
+static struct hid_report_info hid_report;
+
+struct hid_device_descriptor {
+	unsigned short device_descriptor_length;
+	unsigned short format_version;
+	unsigned short report_descriptor_length;
+	unsigned short report_descriptor_index;
+	unsigned short input_register_index;
+	unsigned short input_report_max_length;
+	unsigned short output_register_index;
+	unsigned short output_report_max_length;
+	unsigned short command_register_index;
+	unsigned short data_register_index;
+	unsigned short vendor_id;
+	unsigned short product_id;
+	unsigned short version_id;
+	unsigned int reserved;
+};
+
+static struct hid_device_descriptor hid_dd;
+
+struct i2c_rw_buffer {
+	unsigned char *read;
+	unsigned char *write;
+	unsigned int read_size;
+	unsigned int write_size;
+};
+
+static struct i2c_rw_buffer buffer;
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+	int retval;
+	u32 value;
+	const char *name;
+	struct property *prop;
+	struct device_node *np = dev->of_node;
+
+	bdata->irq_gpio = of_get_named_gpio_flags(np,
+			"synaptics,irq-gpio", 0,
+			(enum of_gpio_flags *)&bdata->irq_flags);
+
+	retval = of_property_read_u32(np, "synaptics,irq-on-state",
+			&value);
+	if (retval < 0)
+		bdata->irq_on_state = 0;
+	else
+		bdata->irq_on_state = value;
+
+	retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+	if (retval < 0)
+		bdata->pwr_reg_name = NULL;
+	else
+		bdata->pwr_reg_name = name;
+
+	retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+	if (retval < 0)
+		bdata->bus_reg_name = NULL;
+	else
+		bdata->bus_reg_name = name;
+
+	prop = of_find_property(np, "synaptics,power-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->power_gpio = of_get_named_gpio_flags(np,
+				"synaptics,power-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,power-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_on_state = value;
+		}
+	} else {
+		bdata->power_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_delay_ms = value;
+		}
+	} else {
+		bdata->power_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->reset_gpio = of_get_named_gpio_flags(np,
+				"synaptics,reset-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,reset-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_on_state = value;
+		}
+		retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_active_ms = value;
+		}
+	} else {
+		bdata->reset_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_delay_ms = value;
+		}
+	} else {
+		bdata->reset_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,dev-dscrptr-addr", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,dev-dscrptr-addr",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,dev-dscrptr-addr property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->device_descriptor_addr = (unsigned short)value;
+		}
+	} else {
+		bdata->device_descriptor_addr = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->max_y_for_2d = value;
+		}
+	} else {
+		bdata->max_y_for_2d = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,swap-axes", NULL);
+	bdata->swap_axes = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,x-flip", NULL);
+	bdata->x_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,y-flip", NULL);
+	bdata->y_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->ub_i2c_addr = (unsigned short)value;
+		}
+	} else {
+		bdata->ub_i2c_addr = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->cap_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->cap_button_map->map)
+			return -ENOMEM;
+		bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+		retval = of_property_read_u32_array(np,
+				"synaptics,cap-button-codes",
+				bdata->cap_button_map->map,
+				bdata->cap_button_map->nbuttons);
+		if (retval < 0) {
+			bdata->cap_button_map->nbuttons = 0;
+			bdata->cap_button_map->map = NULL;
+		}
+	} else {
+		bdata->cap_button_map->nbuttons = 0;
+		bdata->cap_button_map->map = NULL;
+	}
+
+	prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->vir_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->vir_button_map->map)
+			return -ENOMEM;
+		bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+		bdata->vir_button_map->nbuttons /= 5;
+		retval = of_property_read_u32_array(np,
+				"synaptics,vir-button-codes",
+				bdata->vir_button_map->map,
+				bdata->vir_button_map->nbuttons * 5);
+		if (retval < 0) {
+			bdata->vir_button_map->nbuttons = 0;
+			bdata->vir_button_map->map = NULL;
+		}
+	} else {
+		bdata->vir_button_map->nbuttons = 0;
+		bdata->vir_button_map->map = NULL;
+	}
+
+	return 0;
+}
+#endif
+
+static int do_i2c_transfer(struct i2c_client *client, struct i2c_msg *msg)
+{
+	unsigned char retry;
+
+	for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+		if (i2c_transfer(client->adapter, msg, 1) == 1)
+			break;
+		dev_err(&client->dev,
+				"%s: I2C retry %d\n",
+				__func__, retry + 1);
+		msleep(20);
+	}
+
+	if (retry == SYN_I2C_RETRY_TIMES) {
+		dev_err(&client->dev,
+				"%s: I2C transfer over retry limit\n",
+				__func__);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int check_buffer(unsigned char **buffer, unsigned int *buffer_size,
+		unsigned int length)
+{
+	if (*buffer_size < length) {
+		if (*buffer_size)
+			kfree(*buffer);
+		*buffer = kzalloc(length, GFP_KERNEL);
+		if (!(*buffer))
+			return -ENOMEM;
+		*buffer_size = length;
+	}
+
+	return 0;
+}
+
+static int generic_read(struct i2c_client *client, unsigned short length)
+{
+	int retval;
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = I2C_M_RD,
+			.len = length,
+		}
+	};
+
+	check_buffer(&buffer.read, &buffer.read_size, length);
+	msg[0].buf = buffer.read;
+
+	retval = do_i2c_transfer(client, msg);
+
+	return retval;
+}
+
+static int generic_write(struct i2c_client *client, unsigned short length)
+{
+	int retval;
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.len = length,
+			.buf = buffer.write,
+		}
+	};
+
+	retval = do_i2c_transfer(client, msg);
+
+	return retval;
+}
+
+static void traverse_report_descriptor(unsigned int *index)
+{
+	unsigned char size;
+	unsigned char *buf = buffer.read;
+
+	size = buf[*index] & MASK_2BIT;
+	switch (size) {
+	case 0: /* 0 bytes */
+		*index += 1;
+		break;
+	case 1: /* 1 byte */
+		*index += 2;
+		break;
+	case 2: /* 2 bytes */
+		*index += 3;
+		break;
+	case 3: /* 4 bytes */
+		*index += 5;
+		break;
+	default:
+		break;
+	}
+
+	return;
+}
+
+static void find_blob_size(unsigned int index)
+{
+	unsigned int ii = index;
+	unsigned char *buf = buffer.read;
+
+	while (ii < hid_dd.report_descriptor_length) {
+		if (buf[ii] == PREFIX_REPORT_COUNT_1BYTE) {
+			hid_report.blob_size = buf[ii + 1];
+			return;
+		} else if (buf[ii] == PREFIX_REPORT_COUNT_2BYTES) {
+			hid_report.blob_size = buf[ii + 1] | (buf[ii + 2] << 8);
+			return;
+		}
+		traverse_report_descriptor(&ii);
+	}
+
+	return;
+}
+
+static void find_reports(unsigned int index)
+{
+	unsigned int ii = index;
+	unsigned char *buf = buffer.read;
+	static unsigned int report_id_index;
+	static unsigned char report_id;
+	static unsigned short usage_page;
+
+	if (buf[ii] == PREFIX_REPORT_ID) {
+		report_id = buf[ii + 1];
+		report_id_index = ii;
+		return;
+	}
+
+	if (buf[ii] == PREFIX_USAGE_PAGE_1BYTE) {
+		usage_page = buf[ii + 1];
+		return;
+	} else if (buf[ii] == PREFIX_USAGE_PAGE_2BYTES) {
+		usage_page = buf[ii + 1] | (buf[ii + 2] << 8);
+		return;
+	}
+
+	if ((usage_page == VENDOR_DEFINED_PAGE) && (buf[ii] == PREFIX_USAGE)) {
+		switch (buf[ii + 1]) {
+		case USAGE_GET_BLOB:
+			hid_report.get_blob_id = report_id;
+			find_blob_size(report_id_index);
+			break;
+		case USAGE_WRITE:
+			hid_report.write_id = report_id;
+			break;
+		case USAGE_READ_ADDRESS:
+			hid_report.read_addr_id = report_id;
+			break;
+		case USAGE_READ_DATA:
+			hid_report.read_data_id = report_id;
+			break;
+		case USAGE_SET_MODE:
+			hid_report.set_mode_id = report_id;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return;
+}
+
+static int parse_report_descriptor(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned int ii = 0;
+	unsigned char *buf;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+	buffer.write[0] = hid_dd.report_descriptor_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.report_descriptor_index >> 8;
+	retval = generic_write(i2c, 2);
+	if (retval < 0)
+		return retval;
+	retval = generic_read(i2c, hid_dd.report_descriptor_length);
+	if (retval < 0)
+		return retval;
+
+	buf = buffer.read;
+
+	hid_report.get_blob_id = REPORT_ID_GET_BLOB;
+	hid_report.write_id = REPORT_ID_WRITE;
+	hid_report.read_addr_id = REPORT_ID_READ_ADDRESS;
+	hid_report.read_data_id = REPORT_ID_READ_DATA;
+	hid_report.set_mode_id = REPORT_ID_SET_RMI_MODE;
+	hid_report.blob_size = BLOB_REPORT_SIZE;
+
+	while (ii < hid_dd.report_descriptor_length) {
+		find_reports(ii);
+		traverse_report_descriptor(&ii);
+	}
+
+	return 0;
+}
+
+static int switch_to_rmi(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size, 11);
+
+	/* set rmi mode */
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.set_mode_id;
+	buffer.write[3] = SET_REPORT_COMMAND;
+	buffer.write[4] = hid_report.set_mode_id;
+	buffer.write[5] = hid_dd.data_register_index & MASK_8BIT;
+	buffer.write[6] = hid_dd.data_register_index >> 8;
+	buffer.write[7] = 0x04;
+	buffer.write[8] = 0x00;
+	buffer.write[9] = hid_report.set_mode_id;
+	buffer.write[10] = RMI_MODE;
+
+	retval = generic_write(i2c, 11);
+
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static int check_report_mode(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned short report_size;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size, 7);
+
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.set_mode_id;
+	buffer.write[3] = GET_REPORT_COMMAND;
+	buffer.write[4] = hid_report.set_mode_id;
+	buffer.write[5] = hid_dd.data_register_index & MASK_8BIT;
+	buffer.write[6] = hid_dd.data_register_index >> 8;
+
+	retval = generic_write(i2c, 7);
+	if (retval < 0)
+		goto exit;
+
+	retval = generic_read(i2c, 2);
+	if (retval < 0)
+		goto exit;
+
+	report_size = (buffer.read[1] << 8) | buffer.read[0];
+
+	retval = generic_write(i2c, 7);
+	if (retval < 0)
+		goto exit;
+
+	retval = generic_read(i2c, report_size);
+	if (retval < 0)
+		goto exit;
+
+	retval = buffer.read[3];
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Report mode = %d\n",
+			__func__, retval);
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static int hid_i2c_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size, 6);
+
+	/* read device descriptor */
+	buffer.write[0] = bdata->device_descriptor_addr & MASK_8BIT;
+	buffer.write[1] = bdata->device_descriptor_addr >> 8;
+	retval = generic_write(i2c, 2);
+	if (retval < 0)
+		goto exit;
+	retval = generic_read(i2c, sizeof(hid_dd));
+	if (retval < 0)
+		goto exit;
+	retval = secure_memcpy((unsigned char *)&hid_dd,
+			sizeof(struct hid_device_descriptor),
+			buffer.read,
+			buffer.read_size,
+			sizeof(hid_dd));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy device descriptor data\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = parse_report_descriptor(rmi4_data);
+	if (retval < 0)
+		goto exit;
+
+	/* set power */
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = 0x00;
+	buffer.write[3] = SET_POWER_COMMAND;
+	retval = generic_write(i2c, 4);
+	if (retval < 0)
+		goto exit;
+
+	/* reset */
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = 0x00;
+	buffer.write[3] = RESET_COMMAND;
+	retval = generic_write(i2c, 4);
+	if (retval < 0)
+		goto exit;
+
+	while (gpio_get_value(bdata->irq_gpio))
+		msleep(20);
+
+	retval = generic_read(i2c, hid_dd.input_report_max_length);
+	if (retval < 0)
+		goto exit;
+
+	/* get blob */
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.get_blob_id;
+	buffer.write[3] = 0x02;
+	buffer.write[4] = hid_dd.data_register_index & MASK_8BIT;
+	buffer.write[5] = hid_dd.data_register_index >> 8;
+
+	retval = generic_write(i2c, 6);
+	if (retval < 0)
+		goto exit;
+
+	msleep(20);
+
+	retval = generic_read(i2c, hid_report.blob_size + 3);
+	if (retval < 0)
+		goto exit;
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to initialize HID/I2C interface\n",
+				__func__);
+		return retval;
+	}
+
+	retval = switch_to_rmi(rmi4_data);
+
+	return retval;
+}
+
+static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned char retry;
+	unsigned char recover = 1;
+	unsigned short report_length;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_msg msg[] = {
+		{
+			.addr = i2c->addr,
+			.flags = 0,
+			.len = hid_dd.output_report_max_length + 2,
+		},
+		{
+			.addr = i2c->addr,
+			.flags = I2C_M_RD,
+			.len = (unsigned short)(length + 4),
+		},
+	};
+
+recover:
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size,
+			hid_dd.output_report_max_length + 2);
+	msg[0].buf = buffer.write;
+	buffer.write[0] = hid_dd.output_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.output_register_index >> 8;
+	buffer.write[2] = hid_dd.output_report_max_length & MASK_8BIT;
+	buffer.write[3] = hid_dd.output_report_max_length >> 8;
+	buffer.write[4] = hid_report.read_addr_id;
+	buffer.write[5] = 0x00;
+	buffer.write[6] = addr & MASK_8BIT;
+	buffer.write[7] = addr >> 8;
+	buffer.write[8] = (unsigned char)length;
+	buffer.write[9] = (unsigned char)(length >> 8);
+
+	check_buffer(&buffer.read, &buffer.read_size, length + 4);
+	msg[1].buf = buffer.read;
+
+	retval = do_i2c_transfer(i2c, &msg[0]);
+	if (retval != 0)
+		goto exit;
+
+	retry = 0;
+	do {
+		retval = do_i2c_transfer(i2c, &msg[1]);
+		if (retval == 0)
+			retval = length;
+		else
+			goto exit;
+
+		report_length = (buffer.read[1] << 8) | buffer.read[0];
+		if (report_length == hid_dd.input_report_max_length) {
+			retval = secure_memcpy(&data[0], length,
+					&buffer.read[4], buffer.read_size - 4,
+					length);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to copy data\n",
+						__func__);
+			} else {
+				retval = length;
+			}
+			goto exit;
+		}
+
+		msleep(20);
+		retry++;
+	} while (retry < SYN_I2C_RETRY_TIMES);
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to receive read report\n",
+			__func__);
+	retval = -EIO;
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	if ((retval != length) && (recover == 1)) {
+		recover = 0;
+		if (check_report_mode(rmi4_data) != RMI_MODE) {
+			retval = hid_i2c_init(rmi4_data);
+			if (retval == 0)
+				goto recover;
+		}
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned char recover = 1;
+	unsigned int msg_length;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_msg msg[] = {
+		{
+			.addr = i2c->addr,
+			.flags = 0,
+		}
+	};
+
+	if ((length + 10) < (hid_dd.output_report_max_length + 2))
+		msg_length = hid_dd.output_report_max_length + 2;
+	else
+		msg_length = length + 10;
+
+recover:
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size, msg_length);
+	msg[0].len = (unsigned short)msg_length;
+	msg[0].buf = buffer.write;
+	buffer.write[0] = hid_dd.output_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.output_register_index >> 8;
+	buffer.write[2] = hid_dd.output_report_max_length & MASK_8BIT;
+	buffer.write[3] = hid_dd.output_report_max_length >> 8;
+	buffer.write[4] = hid_report.write_id;
+	buffer.write[5] = 0x00;
+	buffer.write[6] = addr & MASK_8BIT;
+	buffer.write[7] = addr >> 8;
+	buffer.write[8] = (unsigned char)length;
+	buffer.write[9] = (unsigned char)(length >> 8);
+	retval = secure_memcpy(&buffer.write[10], buffer.write_size - 10,
+			&data[0], length, length);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy data\n",
+				__func__);
+	} else {
+		retval = do_i2c_transfer(i2c, msg);
+		if (retval == 0)
+			retval = length;
+	}
+
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	if ((retval != length) && (recover == 1)) {
+		recover = 0;
+		if (check_report_mode(rmi4_data) != RMI_MODE) {
+			retval = hid_i2c_init(rmi4_data);
+			if (retval == 0)
+				goto recover;
+		}
+	}
+
+	return retval;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+	.type = BUS_I2C,
+	.read = synaptics_rmi4_i2c_read,
+	.write = synaptics_rmi4_i2c_write,
+};
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_i2c_device;
+
+static void synaptics_rmi4_i2c_dev_release(struct device *dev)
+{
+	kfree(synaptics_dsx_i2c_device);
+
+	return;
+}
+
+static int synaptics_rmi4_i2c_probe(struct i2c_client *client,
+		const struct i2c_device_id *dev_id)
+{
+	int retval;
+
+	if (!i2c_check_functionality(client->adapter,
+			I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_err(&client->dev,
+				"%s: SMBus byte data commands not supported by host\n",
+				__func__);
+		return -EIO;
+	}
+
+	synaptics_dsx_i2c_device = kzalloc(
+			sizeof(struct platform_device),
+			GFP_KERNEL);
+	if (!synaptics_dsx_i2c_device) {
+		dev_err(&client->dev,
+				"%s: Failed to allocate memory for synaptics_dsx_i2c_device\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_OF
+	if (client->dev.of_node) {
+		hw_if.board_data = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_board_data),
+				GFP_KERNEL);
+		if (!hw_if.board_data) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for board data\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->cap_button_map = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->cap_button_map) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for 0D button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->vir_button_map = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->vir_button_map) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for virtual button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		parse_dt(&client->dev, hw_if.board_data);
+	}
+#else
+	hw_if.board_data = client->dev.platform_data;
+#endif
+
+	hw_if.bus_access = &bus_access;
+	hw_if.bl_hw_init = switch_to_rmi;
+	hw_if.ui_hw_init = hid_i2c_init;
+
+	synaptics_dsx_i2c_device->name = PLATFORM_DRIVER_NAME;
+	synaptics_dsx_i2c_device->id = 0;
+	synaptics_dsx_i2c_device->num_resources = 0;
+	synaptics_dsx_i2c_device->dev.parent = &client->dev;
+	synaptics_dsx_i2c_device->dev.platform_data = &hw_if;
+	synaptics_dsx_i2c_device->dev.release = synaptics_rmi4_i2c_dev_release;
+
+	retval = platform_device_register(synaptics_dsx_i2c_device);
+	if (retval) {
+		dev_err(&client->dev,
+				"%s: Failed to register platform device\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_i2c_remove(struct i2c_client *client)
+{
+	if (buffer.read_size)
+		kfree(buffer.read);
+
+	if (buffer.write_size)
+		kfree(buffer.write);
+
+	platform_device_unregister(synaptics_dsx_i2c_device);
+
+	return 0;
+}
+
+static const struct i2c_device_id synaptics_rmi4_id_table[] = {
+	{I2C_DRIVER_NAME, 0},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
+
+#ifdef CONFIG_OF
+static struct of_device_id synaptics_rmi4_of_match_table[] = {
+	{
+		.compatible = "synaptics,dsx-rmi-hid-i2c",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct i2c_driver synaptics_rmi4_i2c_driver = {
+	.driver = {
+		.name = I2C_DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = synaptics_rmi4_of_match_table,
+	},
+	.probe = synaptics_rmi4_i2c_probe,
+	.remove = synaptics_rmi4_i2c_remove,
+	.id_table = synaptics_rmi4_id_table,
+};
+
+int synaptics_rmi4_bus_init(void)
+{
+	return i2c_add_driver(&synaptics_rmi4_i2c_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init);
+
+void synaptics_rmi4_bus_exit(void)
+{
+	i2c_del_driver(&synaptics_rmi4_i2c_driver);
+
+	return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX I2C Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_spi.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_spi.c
new file mode 100644
index 0000000..e2dafbb
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_spi.c
@@ -0,0 +1,712 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SPI_READ 0x80
+#define SPI_WRITE 0x00
+
+static unsigned char *buf;
+
+static struct spi_transfer *xfer;
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+	int retval;
+	u32 value;
+	const char *name;
+	struct property *prop;
+	struct device_node *np = dev->of_node;
+
+	bdata->irq_gpio = of_get_named_gpio_flags(np,
+			"synaptics,irq-gpio", 0,
+			(enum of_gpio_flags *)&bdata->irq_flags);
+
+	retval = of_property_read_u32(np, "synaptics,irq-on-state",
+			&value);
+	if (retval < 0)
+		bdata->irq_on_state = 0;
+	else
+		bdata->irq_on_state = value;
+
+	retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+	if (retval < 0)
+		bdata->pwr_reg_name = NULL;
+	else
+		bdata->pwr_reg_name = name;
+
+	retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+	if (retval < 0)
+		bdata->bus_reg_name = NULL;
+	else
+		bdata->bus_reg_name = name;
+
+	prop = of_find_property(np, "synaptics,power-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->power_gpio = of_get_named_gpio_flags(np,
+				"synaptics,power-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,power-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_on_state = value;
+		}
+	} else {
+		bdata->power_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_delay_ms = value;
+		}
+	} else {
+		bdata->power_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->reset_gpio = of_get_named_gpio_flags(np,
+				"synaptics,reset-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,reset-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_on_state = value;
+		}
+		retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_active_ms = value;
+		}
+	} else {
+		bdata->reset_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_delay_ms = value;
+		}
+	} else {
+		bdata->reset_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,byte-delay-us", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,byte-delay-us",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,byte-delay-us property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->byte_delay_us = value;
+		}
+	} else {
+		bdata->byte_delay_us = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,block-delay-us", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,block-delay-us",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,block-delay-us property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->block_delay_us = value;
+		}
+	} else {
+		bdata->block_delay_us = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,address-delay-us", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,address-delay-us",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,address-delay-us property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->addr_delay_us = value;
+		}
+	} else {
+		bdata->addr_delay_us = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->max_y_for_2d = value;
+		}
+	} else {
+		bdata->max_y_for_2d = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,swap-axes", NULL);
+	bdata->swap_axes = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,x-flip", NULL);
+	bdata->x_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,y-flip", NULL);
+	bdata->y_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->ub_i2c_addr = (unsigned short)value;
+		}
+	} else {
+		bdata->ub_i2c_addr = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->cap_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->cap_button_map->map)
+			return -ENOMEM;
+		bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+		retval = of_property_read_u32_array(np,
+				"synaptics,cap-button-codes",
+				bdata->cap_button_map->map,
+				bdata->cap_button_map->nbuttons);
+		if (retval < 0) {
+			bdata->cap_button_map->nbuttons = 0;
+			bdata->cap_button_map->map = NULL;
+		}
+	} else {
+		bdata->cap_button_map->nbuttons = 0;
+		bdata->cap_button_map->map = NULL;
+	}
+
+	prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->vir_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->vir_button_map->map)
+			return -ENOMEM;
+		bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+		bdata->vir_button_map->nbuttons /= 5;
+		retval = of_property_read_u32_array(np,
+				"synaptics,vir-button-codes",
+				bdata->vir_button_map->map,
+				bdata->vir_button_map->nbuttons * 5);
+		if (retval < 0) {
+			bdata->vir_button_map->nbuttons = 0;
+			bdata->vir_button_map->map = NULL;
+		}
+	} else {
+		bdata->vir_button_map->nbuttons = 0;
+		bdata->vir_button_map->map = NULL;
+	}
+
+	return 0;
+}
+#endif
+
+static int synaptics_rmi4_spi_alloc_buf(struct synaptics_rmi4_data *rmi4_data,
+		unsigned int size, unsigned int count)
+{
+	static unsigned int buf_size;
+	static unsigned int xfer_count;
+
+	if (size > buf_size) {
+		if (buf_size)
+			kfree(buf);
+		buf = kmalloc(size, GFP_KERNEL);
+		if (!buf) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for buf\n",
+					__func__);
+			buf_size = 0;
+			return -ENOMEM;
+		}
+		buf_size = size;
+	}
+
+	if (count > xfer_count) {
+		if (xfer_count)
+			kfree(xfer);
+		xfer = kcalloc(count, sizeof(struct spi_transfer), GFP_KERNEL);
+		if (!xfer) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for xfer\n",
+					__func__);
+			xfer_count = 0;
+			return -ENOMEM;
+		}
+		xfer_count = count;
+	} else {
+		memset(xfer, 0, count * sizeof(struct spi_transfer));
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_spi_set_page(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr)
+{
+	int retval;
+	unsigned int index;
+	unsigned int byte_count = PAGE_SELECT_LEN + 1;
+	unsigned char page;
+	struct spi_message msg;
+	struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	page = ((addr >> 8) & MASK_8BIT);
+	if ((page >> 7) == (rmi4_data->current_page >> 7))
+		return PAGE_SELECT_LEN;
+
+	spi_message_init(&msg);
+
+	retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, byte_count,
+			byte_count);
+	if (retval < 0)
+		return retval;
+
+	buf[0] = SPI_WRITE;
+	buf[1] = MASK_8BIT;
+	buf[2] = page;
+
+	if (bdata->byte_delay_us == 0) {
+		xfer[0].len = byte_count;
+		xfer[0].tx_buf = &buf[0];
+		if (bdata->block_delay_us)
+			xfer[0].delay_usecs = bdata->block_delay_us;
+		spi_message_add_tail(&xfer[0], &msg);
+	} else {
+		for (index = 0; index < byte_count; index++) {
+			xfer[index].len = 1;
+			xfer[index].tx_buf = &buf[index];
+			if (index == 1)
+				xfer[index].delay_usecs = bdata->addr_delay_us;
+			else
+				xfer[index].delay_usecs = bdata->byte_delay_us;
+			spi_message_add_tail(&xfer[index], &msg);
+		}
+		if (bdata->block_delay_us)
+			xfer[index - 1].delay_usecs = bdata->block_delay_us;
+	}
+
+	retval = spi_sync(spi, &msg);
+	if (retval == 0) {
+		rmi4_data->current_page = page;
+		retval = PAGE_SELECT_LEN;
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to complete SPI transfer, error = %d\n",
+				__func__, retval);
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_spi_read(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned int index;
+	unsigned int byte_count = length + ADDRESS_LEN;
+	unsigned char txbuf[ADDRESS_LEN];
+	struct spi_message msg;
+	struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	spi_message_init(&msg);
+
+	txbuf[0] = (addr >> 8) | SPI_READ;
+	txbuf[1] = addr & MASK_8BIT;
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	retval = synaptics_rmi4_spi_set_page(rmi4_data, addr);
+	if (retval != PAGE_SELECT_LEN) {
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		return -EIO;
+	}
+
+	if (bdata->byte_delay_us == 0) {
+		retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, length,
+				2);
+	} else {
+		retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, length,
+				byte_count);
+	}
+	if (retval < 0) {
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		return retval;
+	}
+
+	if (bdata->byte_delay_us == 0) {
+		xfer[0].len = ADDRESS_LEN;
+		xfer[0].tx_buf = &txbuf[0];
+		spi_message_add_tail(&xfer[0], &msg);
+		xfer[1].len = length;
+		xfer[1].rx_buf = &buf[0];
+		if (bdata->block_delay_us)
+			xfer[1].delay_usecs = bdata->block_delay_us;
+		spi_message_add_tail(&xfer[1], &msg);
+	} else {
+		for (index = 0; index < byte_count; index++) {
+			xfer[index].len = 1;
+			if (index < ADDRESS_LEN)
+				xfer[index].tx_buf = &txbuf[index];
+			else
+				xfer[index].rx_buf = &buf[index - ADDRESS_LEN];
+			if (index == 1)
+				xfer[index].delay_usecs = bdata->addr_delay_us;
+			else
+				xfer[index].delay_usecs = bdata->byte_delay_us;
+			spi_message_add_tail(&xfer[index], &msg);
+		}
+		if (bdata->block_delay_us)
+			xfer[index - 1].delay_usecs = bdata->block_delay_us;
+	}
+
+	retval = spi_sync(spi, &msg);
+	if (retval == 0) {
+		retval = secure_memcpy(data, length, buf, length, length);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy data\n",
+					__func__);
+		} else {
+			retval = length;
+		}
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to complete SPI transfer, error = %d\n",
+				__func__, retval);
+	}
+
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static int synaptics_rmi4_spi_write(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned int index;
+	unsigned int byte_count = length + ADDRESS_LEN;
+	struct spi_message msg;
+	struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	spi_message_init(&msg);
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	retval = synaptics_rmi4_spi_set_page(rmi4_data, addr);
+	if (retval != PAGE_SELECT_LEN) {
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		return -EIO;
+	}
+
+	if (bdata->byte_delay_us == 0) {
+		retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, byte_count,
+				1);
+	} else {
+		retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, byte_count,
+				byte_count);
+	}
+	if (retval < 0) {
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		return retval;
+	}
+
+	buf[0] = (addr >> 8) & ~SPI_READ;
+	buf[1] = addr & MASK_8BIT;
+	retval = secure_memcpy(&buf[ADDRESS_LEN],
+			byte_count - ADDRESS_LEN, data, length, length);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy data\n",
+				__func__);
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		return retval;
+	}
+
+	if (bdata->byte_delay_us == 0) {
+		xfer[0].len = byte_count;
+		xfer[0].tx_buf = &buf[0];
+		if (bdata->block_delay_us)
+			xfer[0].delay_usecs = bdata->block_delay_us;
+		spi_message_add_tail(xfer, &msg);
+	} else {
+		for (index = 0; index < byte_count; index++) {
+			xfer[index].len = 1;
+			xfer[index].tx_buf = &buf[index];
+			if (index == 1)
+				xfer[index].delay_usecs = bdata->addr_delay_us;
+			else
+				xfer[index].delay_usecs = bdata->byte_delay_us;
+			spi_message_add_tail(&xfer[index], &msg);
+		}
+		if (bdata->block_delay_us)
+			xfer[index - 1].delay_usecs = bdata->block_delay_us;
+	}
+
+	retval = spi_sync(spi, &msg);
+	if (retval == 0) {
+		retval = length;
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to complete SPI transfer, error = %d\n",
+				__func__, retval);
+	}
+
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+	.type = BUS_SPI,
+	.read = synaptics_rmi4_spi_read,
+	.write = synaptics_rmi4_spi_write,
+};
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_spi_device;
+
+static void synaptics_rmi4_spi_dev_release(struct device *dev)
+{
+	kfree(synaptics_dsx_spi_device);
+
+	return;
+}
+
+static int synaptics_rmi4_spi_probe(struct spi_device *spi)
+{
+	int retval;
+
+	if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) {
+		dev_err(&spi->dev,
+				"%s: Full duplex not supported by host\n",
+				__func__);
+		return -EIO;
+	}
+
+	synaptics_dsx_spi_device = kzalloc(
+			sizeof(struct platform_device),
+			GFP_KERNEL);
+	if (!synaptics_dsx_spi_device) {
+		dev_err(&spi->dev,
+				"%s: Failed to allocate memory for synaptics_dsx_spi_device\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_OF
+	if (spi->dev.of_node) {
+		hw_if.board_data = devm_kzalloc(&spi->dev,
+				sizeof(struct synaptics_dsx_board_data),
+				GFP_KERNEL);
+		if (!hw_if.board_data) {
+			dev_err(&spi->dev,
+					"%s: Failed to allocate memory for board data\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->cap_button_map = devm_kzalloc(&spi->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->cap_button_map) {
+			dev_err(&spi->dev,
+					"%s: Failed to allocate memory for 0D button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->vir_button_map = devm_kzalloc(&spi->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->vir_button_map) {
+			dev_err(&spi->dev,
+					"%s: Failed to allocate memory for virtual button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		parse_dt(&spi->dev, hw_if.board_data);
+	}
+#else
+	hw_if.board_data = spi->dev.platform_data;
+#endif
+
+	hw_if.bus_access = &bus_access;
+
+	spi->bits_per_word = 8;
+	spi->mode = SPI_MODE_3;
+
+	retval = spi_setup(spi);
+	if (retval < 0) {
+		dev_err(&spi->dev,
+				"%s: Failed to perform SPI setup\n",
+				__func__);
+		return retval;
+	}
+
+	synaptics_dsx_spi_device->name = PLATFORM_DRIVER_NAME;
+	synaptics_dsx_spi_device->id = 0;
+	synaptics_dsx_spi_device->num_resources = 0;
+	synaptics_dsx_spi_device->dev.parent = &spi->dev;
+	synaptics_dsx_spi_device->dev.platform_data = &hw_if;
+	synaptics_dsx_spi_device->dev.release = synaptics_rmi4_spi_dev_release;
+
+	retval = platform_device_register(synaptics_dsx_spi_device);
+	if (retval) {
+		dev_err(&spi->dev,
+				"%s: Failed to register platform device\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_spi_remove(struct spi_device *spi)
+{
+	platform_device_unregister(synaptics_dsx_spi_device);
+
+	return 0;
+}
+
+static const struct spi_device_id synaptics_rmi4_id_table[] = {
+	{SPI_DRIVER_NAME, 0},
+	{},
+};
+MODULE_DEVICE_TABLE(spi, synaptics_rmi4_id_table);
+
+#ifdef CONFIG_OF
+static struct of_device_id synaptics_rmi4_of_match_table[] = {
+	{
+		.compatible = "synaptics,dsx-spi",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct spi_driver synaptics_rmi4_spi_driver = {
+	.driver = {
+		.name = SPI_DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = synaptics_rmi4_of_match_table,
+	},
+	.probe = synaptics_rmi4_spi_probe,
+	.remove = synaptics_rmi4_spi_remove,
+	.id_table = synaptics_rmi4_id_table,
+};
+
+
+int synaptics_rmi4_bus_init(void)
+{
+	return spi_register_driver(&synaptics_rmi4_spi_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init);
+
+void synaptics_rmi4_bus_exit(void)
+{
+	kfree(buf);
+
+	kfree(xfer);
+
+	spi_unregister_driver(&synaptics_rmi4_spi_driver);
+
+	return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX SPI Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c
new file mode 100644
index 0000000..606e737
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c
@@ -0,0 +1,5356 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/ctype.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SYSFS_FOLDER_NAME "f54"
+
+#define GET_REPORT_TIMEOUT_S 3
+#define CALIBRATION_TIMEOUT_S 10
+#define COMMAND_TIMEOUT_100MS 20
+
+#define NO_SLEEP_OFF (0 << 2)
+#define NO_SLEEP_ON (1 << 2)
+
+#define STATUS_IDLE 0
+#define STATUS_BUSY 1
+#define STATUS_ERROR 2
+
+#define REPORT_INDEX_OFFSET 1
+#define REPORT_DATA_OFFSET 3
+
+#define SENSOR_RX_MAPPING_OFFSET 1
+#define SENSOR_TX_MAPPING_OFFSET 2
+
+#define COMMAND_GET_REPORT 1
+#define COMMAND_FORCE_CAL 2
+#define COMMAND_FORCE_UPDATE 4
+
+#define CONTROL_NO_AUTO_CAL 1
+
+#define CONTROL_0_SIZE 1
+#define CONTROL_1_SIZE 1
+#define CONTROL_2_SIZE 2
+#define CONTROL_3_SIZE 1
+#define CONTROL_4_6_SIZE 3
+#define CONTROL_7_SIZE 1
+#define CONTROL_8_9_SIZE 3
+#define CONTROL_10_SIZE 1
+#define CONTROL_11_SIZE 2
+#define CONTROL_12_13_SIZE 2
+#define CONTROL_14_SIZE 1
+#define CONTROL_15_SIZE 1
+#define CONTROL_16_SIZE 1
+#define CONTROL_17_SIZE 1
+#define CONTROL_18_SIZE 1
+#define CONTROL_19_SIZE 1
+#define CONTROL_20_SIZE 1
+#define CONTROL_21_SIZE 2
+#define CONTROL_22_26_SIZE 7
+#define CONTROL_27_SIZE 1
+#define CONTROL_28_SIZE 2
+#define CONTROL_29_SIZE 1
+#define CONTROL_30_SIZE 1
+#define CONTROL_31_SIZE 1
+#define CONTROL_32_35_SIZE 8
+#define CONTROL_36_SIZE 1
+#define CONTROL_37_SIZE 1
+#define CONTROL_38_SIZE 1
+#define CONTROL_39_SIZE 1
+#define CONTROL_40_SIZE 1
+#define CONTROL_41_SIZE 1
+#define CONTROL_42_SIZE 2
+#define CONTROL_43_54_SIZE 13
+#define CONTROL_55_56_SIZE 2
+#define CONTROL_57_SIZE 1
+#define CONTROL_58_SIZE 1
+#define CONTROL_59_SIZE 2
+#define CONTROL_60_62_SIZE 3
+#define CONTROL_63_SIZE 1
+#define CONTROL_64_67_SIZE 4
+#define CONTROL_68_73_SIZE 8
+#define CONTROL_70_73_SIZE 6
+#define CONTROL_74_SIZE 2
+#define CONTROL_75_SIZE 1
+#define CONTROL_76_SIZE 1
+#define CONTROL_77_78_SIZE 2
+#define CONTROL_79_83_SIZE 5
+#define CONTROL_84_85_SIZE 2
+#define CONTROL_86_SIZE 1
+#define CONTROL_87_SIZE 1
+#define CONTROL_88_SIZE 1
+#define CONTROL_89_SIZE 1
+#define CONTROL_90_SIZE 1
+#define CONTROL_91_SIZE 1
+#define CONTROL_92_SIZE 1
+#define CONTROL_93_SIZE 1
+#define CONTROL_94_SIZE 1
+#define CONTROL_95_SIZE 1
+#define CONTROL_96_SIZE 1
+#define CONTROL_97_SIZE 1
+#define CONTROL_98_SIZE 1
+#define CONTROL_99_SIZE 1
+#define CONTROL_100_SIZE 1
+#define CONTROL_101_SIZE 1
+#define CONTROL_102_SIZE 1
+#define CONTROL_103_SIZE 1
+#define CONTROL_104_SIZE 1
+#define CONTROL_105_SIZE 1
+#define CONTROL_106_SIZE 1
+#define CONTROL_107_SIZE 1
+#define CONTROL_108_SIZE 1
+#define CONTROL_109_SIZE 1
+#define CONTROL_110_SIZE 1
+#define CONTROL_111_SIZE 1
+#define CONTROL_112_SIZE 1
+#define CONTROL_113_SIZE 1
+#define CONTROL_114_SIZE 1
+#define CONTROL_115_SIZE 1
+#define CONTROL_116_SIZE 1
+#define CONTROL_117_SIZE 1
+#define CONTROL_118_SIZE 1
+#define CONTROL_119_SIZE 1
+#define CONTROL_120_SIZE 1
+#define CONTROL_121_SIZE 1
+#define CONTROL_122_SIZE 1
+#define CONTROL_123_SIZE 1
+#define CONTROL_124_SIZE 1
+#define CONTROL_125_SIZE 1
+#define CONTROL_126_SIZE 1
+#define CONTROL_127_SIZE 1
+#define CONTROL_128_SIZE 1
+#define CONTROL_129_SIZE 1
+#define CONTROL_130_SIZE 1
+#define CONTROL_131_SIZE 1
+#define CONTROL_132_SIZE 1
+#define CONTROL_133_SIZE 1
+#define CONTROL_134_SIZE 1
+#define CONTROL_135_SIZE 1
+#define CONTROL_136_SIZE 1
+#define CONTROL_137_SIZE 1
+#define CONTROL_138_SIZE 1
+#define CONTROL_139_SIZE 1
+#define CONTROL_140_SIZE 1
+#define CONTROL_141_SIZE 1
+#define CONTROL_142_SIZE 1
+#define CONTROL_143_SIZE 1
+#define CONTROL_144_SIZE 1
+#define CONTROL_145_SIZE 1
+#define CONTROL_146_SIZE 1
+#define CONTROL_147_SIZE 1
+#define CONTROL_148_SIZE 1
+#define CONTROL_149_SIZE 1
+#define CONTROL_150_SIZE 1
+#define CONTROL_151_SIZE 1
+#define CONTROL_152_SIZE 1
+#define CONTROL_153_SIZE 1
+#define CONTROL_154_SIZE 1
+#define CONTROL_155_SIZE 1
+#define CONTROL_156_SIZE 1
+#define CONTROL_157_158_SIZE 2
+#define CONTROL_163_SIZE 1
+#define CONTROL_165_SIZE 1
+#define CONTROL_166_SIZE 1
+#define CONTROL_167_SIZE 1
+#define CONTROL_168_SIZE 1
+#define CONTROL_169_SIZE 1
+#define CONTROL_171_SIZE 1
+#define CONTROL_172_SIZE 1
+#define CONTROL_173_SIZE 1
+#define CONTROL_174_SIZE 1
+#define CONTROL_175_SIZE 1
+#define CONTROL_176_SIZE 1
+#define CONTROL_177_178_SIZE 2
+#define CONTROL_179_SIZE 1
+#define CONTROL_182_SIZE 1
+#define CONTROL_183_SIZE 1
+#define CONTROL_185_SIZE 1
+#define CONTROL_186_SIZE 1
+#define CONTROL_187_SIZE 1
+#define CONTROL_188_SIZE 1
+
+#define HIGH_RESISTANCE_DATA_SIZE 6
+#define FULL_RAW_CAP_MIN_MAX_DATA_SIZE 4
+#define TRX_OPEN_SHORT_DATA_SIZE 7
+
+#define concat(a, b) a##b
+
+#define attrify(propname) (&dev_attr_##propname.attr)
+
+#define show_prototype(propname)\
+static ssize_t concat(test_sysfs, _##propname##_show)(\
+		struct device *dev,\
+		struct device_attribute *attr,\
+		char *buf);\
+\
+static struct device_attribute dev_attr_##propname =\
+		__ATTR(propname, 0444,\
+		concat(test_sysfs, _##propname##_show),\
+		synaptics_rmi4_store_error);
+
+#define store_prototype(propname)\
+static ssize_t concat(test_sysfs, _##propname##_store)(\
+		struct device *dev,\
+		struct device_attribute *attr,\
+		const char *buf, size_t count);\
+\
+static struct device_attribute dev_attr_##propname =\
+		__ATTR(propname, 0220,\
+		synaptics_rmi4_show_error,\
+		concat(test_sysfs, _##propname##_store));
+
+#define show_store_prototype(propname)\
+static ssize_t concat(test_sysfs, _##propname##_show)(\
+		struct device *dev,\
+		struct device_attribute *attr,\
+		char *buf);\
+\
+static ssize_t concat(test_sysfs, _##propname##_store)(\
+		struct device *dev,\
+		struct device_attribute *attr,\
+		const char *buf, size_t count);\
+\
+static struct device_attribute dev_attr_##propname =\
+		__ATTR(propname, 0664,\
+		concat(test_sysfs, _##propname##_show),\
+		concat(test_sysfs, _##propname##_store));
+
+#define disable_cbc(ctrl_num)\
+do {\
+	retval = synaptics_rmi4_reg_read(rmi4_data,\
+			f54->control.ctrl_num->address,\
+			f54->control.ctrl_num->data,\
+			sizeof(f54->control.ctrl_num->data));\
+	if (retval < 0) {\
+		dev_err(rmi4_data->pdev->dev.parent,\
+				"%s: Failed to disable CBC (" #ctrl_num ")\n",\
+				__func__);\
+		return retval;\
+	} \
+	f54->control.ctrl_num->cbc_tx_carrier_selection = 0;\
+	retval = synaptics_rmi4_reg_write(rmi4_data,\
+			f54->control.ctrl_num->address,\
+			f54->control.ctrl_num->data,\
+			sizeof(f54->control.ctrl_num->data));\
+	if (retval < 0) {\
+		dev_err(rmi4_data->pdev->dev.parent,\
+				"%s: Failed to disable CBC (" #ctrl_num ")\n",\
+				__func__);\
+		return retval;\
+	} \
+} while (0)
+
+enum f54_report_types {
+	F54_8BIT_IMAGE = 1,
+	F54_16BIT_IMAGE = 2,
+	F54_RAW_16BIT_IMAGE = 3,
+	F54_HIGH_RESISTANCE = 4,
+	F54_TX_TO_TX_SHORTS = 5,
+	F54_RX_TO_RX_SHORTS_1 = 7,
+	F54_TRUE_BASELINE = 9,
+	F54_FULL_RAW_CAP_MIN_MAX = 13,
+	F54_RX_OPENS_1 = 14,
+	F54_TX_OPENS = 15,
+	F54_TX_TO_GND_SHORTS = 16,
+	F54_RX_TO_RX_SHORTS_2 = 17,
+	F54_RX_OPENS_2 = 18,
+	F54_FULL_RAW_CAP = 19,
+	F54_FULL_RAW_CAP_NO_RX_COUPLING = 20,
+	F54_SENSOR_SPEED = 22,
+	F54_ADC_RANGE = 23,
+	F54_TRX_OPENS = 24,
+	F54_TRX_TO_GND_SHORTS = 25,
+	F54_TRX_SHORTS = 26,
+	F54_ABS_RAW_CAP = 38,
+	F54_ABS_DELTA_CAP = 40,
+	F54_ABS_HYBRID_DELTA_CAP = 59,
+	F54_ABS_HYBRID_RAW_CAP = 63,
+	F54_AMP_FULL_RAW_CAP = 78,
+	F54_AMP_RAW_ADC = 83,
+	F54_FULL_RAW_CAP_TDDI = 92,
+	INVALID_REPORT_TYPE = -1,
+};
+
+enum f54_afe_cal {
+	F54_AFE_CAL,
+	F54_AFE_IS_CAL,
+};
+
+struct f54_query {
+	union {
+		struct {
+			/* query 0 */
+			unsigned char num_of_rx_electrodes;
+
+			/* query 1 */
+			unsigned char num_of_tx_electrodes;
+
+			/* query 2 */
+			unsigned char f54_query2_b0__1:2;
+			unsigned char has_baseline:1;
+			unsigned char has_image8:1;
+			unsigned char f54_query2_b4__5:2;
+			unsigned char has_image16:1;
+			unsigned char f54_query2_b7:1;
+
+			/* queries 3.0 and 3.1 */
+			unsigned short clock_rate;
+
+			/* query 4 */
+			unsigned char touch_controller_family;
+
+			/* query 5 */
+			unsigned char has_pixel_touch_threshold_adjustment:1;
+			unsigned char f54_query5_b1__7:7;
+
+			/* query 6 */
+			unsigned char has_sensor_assignment:1;
+			unsigned char has_interference_metric:1;
+			unsigned char has_sense_frequency_control:1;
+			unsigned char has_firmware_noise_mitigation:1;
+			unsigned char has_ctrl11:1;
+			unsigned char has_two_byte_report_rate:1;
+			unsigned char has_one_byte_report_rate:1;
+			unsigned char has_relaxation_control:1;
+
+			/* query 7 */
+			unsigned char curve_compensation_mode:2;
+			unsigned char f54_query7_b2__7:6;
+
+			/* query 8 */
+			unsigned char f54_query8_b0:1;
+			unsigned char has_iir_filter:1;
+			unsigned char has_cmn_removal:1;
+			unsigned char has_cmn_maximum:1;
+			unsigned char has_touch_hysteresis:1;
+			unsigned char has_edge_compensation:1;
+			unsigned char has_per_frequency_noise_control:1;
+			unsigned char has_enhanced_stretch:1;
+
+			/* query 9 */
+			unsigned char has_force_fast_relaxation:1;
+			unsigned char has_multi_metric_state_machine:1;
+			unsigned char has_signal_clarity:1;
+			unsigned char has_variance_metric:1;
+			unsigned char has_0d_relaxation_control:1;
+			unsigned char has_0d_acquisition_control:1;
+			unsigned char has_status:1;
+			unsigned char has_slew_metric:1;
+
+			/* query 10 */
+			unsigned char has_h_blank:1;
+			unsigned char has_v_blank:1;
+			unsigned char has_long_h_blank:1;
+			unsigned char has_startup_fast_relaxation:1;
+			unsigned char has_esd_control:1;
+			unsigned char has_noise_mitigation2:1;
+			unsigned char has_noise_state:1;
+			unsigned char has_energy_ratio_relaxation:1;
+
+			/* query 11 */
+			unsigned char has_excessive_noise_reporting:1;
+			unsigned char has_slew_option:1;
+			unsigned char has_two_overhead_bursts:1;
+			unsigned char has_query13:1;
+			unsigned char has_one_overhead_burst:1;
+			unsigned char f54_query11_b5:1;
+			unsigned char has_ctrl88:1;
+			unsigned char has_query15:1;
+
+			/* query 12 */
+			unsigned char number_of_sensing_frequencies:4;
+			unsigned char f54_query12_b4__7:4;
+		} __packed;
+		unsigned char data[14];
+	};
+};
+
+struct f54_query_13 {
+	union {
+		struct {
+			unsigned char has_ctrl86:1;
+			unsigned char has_ctrl87:1;
+			unsigned char has_ctrl87_sub0:1;
+			unsigned char has_ctrl87_sub1:1;
+			unsigned char has_ctrl87_sub2:1;
+			unsigned char has_cidim:1;
+			unsigned char has_noise_mitigation_enhancement:1;
+			unsigned char has_rail_im:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_15 {
+	union {
+		struct {
+			unsigned char has_ctrl90:1;
+			unsigned char has_transmit_strength:1;
+			unsigned char has_ctrl87_sub3:1;
+			unsigned char has_query16:1;
+			unsigned char has_query20:1;
+			unsigned char has_query21:1;
+			unsigned char has_query22:1;
+			unsigned char has_query25:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_16 {
+	union {
+		struct {
+			unsigned char has_query17:1;
+			unsigned char has_data17:1;
+			unsigned char has_ctrl92:1;
+			unsigned char has_ctrl93:1;
+			unsigned char has_ctrl94_query18:1;
+			unsigned char has_ctrl95_query19:1;
+			unsigned char has_ctrl99:1;
+			unsigned char has_ctrl100:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_21 {
+	union {
+		struct {
+			unsigned char has_abs_rx:1;
+			unsigned char has_abs_tx:1;
+			unsigned char has_ctrl91:1;
+			unsigned char has_ctrl96:1;
+			unsigned char has_ctrl97:1;
+			unsigned char has_ctrl98:1;
+			unsigned char has_data19:1;
+			unsigned char has_query24_data18:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_22 {
+	union {
+		struct {
+			unsigned char has_packed_image:1;
+			unsigned char has_ctrl101:1;
+			unsigned char has_dynamic_sense_display_ratio:1;
+			unsigned char has_query23:1;
+			unsigned char has_ctrl103_query26:1;
+			unsigned char has_ctrl104:1;
+			unsigned char has_ctrl105:1;
+			unsigned char has_query28:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_23 {
+	union {
+		struct {
+			unsigned char has_ctrl102:1;
+			unsigned char has_ctrl102_sub1:1;
+			unsigned char has_ctrl102_sub2:1;
+			unsigned char has_ctrl102_sub4:1;
+			unsigned char has_ctrl102_sub5:1;
+			unsigned char has_ctrl102_sub9:1;
+			unsigned char has_ctrl102_sub10:1;
+			unsigned char has_ctrl102_sub11:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_25 {
+	union {
+		struct {
+			unsigned char has_ctrl106:1;
+			unsigned char has_ctrl102_sub12:1;
+			unsigned char has_ctrl107:1;
+			unsigned char has_ctrl108:1;
+			unsigned char has_ctrl109:1;
+			unsigned char has_data20:1;
+			unsigned char f54_query25_b6:1;
+			unsigned char has_query27:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_27 {
+	union {
+		struct {
+			unsigned char has_ctrl110:1;
+			unsigned char has_data21:1;
+			unsigned char has_ctrl111:1;
+			unsigned char has_ctrl112:1;
+			unsigned char has_ctrl113:1;
+			unsigned char has_data22:1;
+			unsigned char has_ctrl114:1;
+			unsigned char has_query29:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_29 {
+	union {
+		struct {
+			unsigned char has_ctrl115:1;
+			unsigned char has_ground_ring_options:1;
+			unsigned char has_lost_bursts_tuning:1;
+			unsigned char has_aux_exvcom2_select:1;
+			unsigned char has_ctrl116:1;
+			unsigned char has_data23:1;
+			unsigned char has_ctrl117:1;
+			unsigned char has_query30:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_30 {
+	union {
+		struct {
+			unsigned char has_ctrl118:1;
+			unsigned char has_ctrl119:1;
+			unsigned char has_ctrl120:1;
+			unsigned char has_ctrl121:1;
+			unsigned char has_ctrl122_query31:1;
+			unsigned char has_ctrl123:1;
+			unsigned char has_ctrl124:1;
+			unsigned char has_query32:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_32 {
+	union {
+		struct {
+			unsigned char has_ctrl125:1;
+			unsigned char has_ctrl126:1;
+			unsigned char has_ctrl127:1;
+			unsigned char has_abs_charge_pump_disable:1;
+			unsigned char has_query33:1;
+			unsigned char has_data24:1;
+			unsigned char has_query34:1;
+			unsigned char has_query35:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_33 {
+	union {
+		struct {
+			unsigned char has_ctrl128:1;
+			unsigned char has_ctrl129:1;
+			unsigned char has_ctrl130:1;
+			unsigned char has_ctrl131:1;
+			unsigned char has_ctrl132:1;
+			unsigned char has_ctrl133:1;
+			unsigned char has_ctrl134:1;
+			unsigned char has_query36:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_35 {
+	union {
+		struct {
+			unsigned char has_data25:1;
+			unsigned char has_ctrl135:1;
+			unsigned char has_ctrl136:1;
+			unsigned char has_ctrl137:1;
+			unsigned char has_ctrl138:1;
+			unsigned char has_ctrl139:1;
+			unsigned char has_data26:1;
+			unsigned char has_ctrl140:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_36 {
+	union {
+		struct {
+			unsigned char has_ctrl141:1;
+			unsigned char has_ctrl142:1;
+			unsigned char has_query37:1;
+			unsigned char has_ctrl143:1;
+			unsigned char has_ctrl144:1;
+			unsigned char has_ctrl145:1;
+			unsigned char has_ctrl146:1;
+			unsigned char has_query38:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_38 {
+	union {
+		struct {
+			unsigned char has_ctrl147:1;
+			unsigned char has_ctrl148:1;
+			unsigned char has_ctrl149:1;
+			unsigned char has_ctrl150:1;
+			unsigned char has_ctrl151:1;
+			unsigned char has_ctrl152:1;
+			unsigned char has_ctrl153:1;
+			unsigned char has_query39:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_39 {
+	union {
+		struct {
+			unsigned char has_ctrl154:1;
+			unsigned char has_ctrl155:1;
+			unsigned char has_ctrl156:1;
+			unsigned char has_ctrl160:1;
+			unsigned char has_ctrl157_ctrl158:1;
+			unsigned char f54_query39_b5__6:2;
+			unsigned char has_query40:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_40 {
+	union {
+		struct {
+			unsigned char has_ctrl169:1;
+			unsigned char has_ctrl163_query41:1;
+			unsigned char f54_query40_b2:1;
+			unsigned char has_ctrl165_query42:1;
+			unsigned char has_ctrl166:1;
+			unsigned char has_ctrl167:1;
+			unsigned char has_ctrl168:1;
+			unsigned char has_query43:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_43 {
+	union {
+		struct {
+			unsigned char f54_query43_b0__1:2;
+			unsigned char has_ctrl171:1;
+			unsigned char has_ctrl172_query44_query45:1;
+			unsigned char has_ctrl173:1;
+			unsigned char has_ctrl174:1;
+			unsigned char has_ctrl175:1;
+			unsigned char has_query46:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_46 {
+	union {
+		struct {
+			unsigned char has_ctrl176:1;
+			unsigned char has_ctrl177_ctrl178:1;
+			unsigned char has_ctrl179:1;
+			unsigned char f54_query46_b3:1;
+			unsigned char has_data27:1;
+			unsigned char has_data28:1;
+			unsigned char f54_query46_b6:1;
+			unsigned char has_query47:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_47 {
+	union {
+		struct {
+			unsigned char f54_query47_b0:1;
+			unsigned char has_ctrl182:1;
+			unsigned char has_ctrl183:1;
+			unsigned char f54_query47_b3:1;
+			unsigned char has_ctrl185:1;
+			unsigned char has_ctrl186:1;
+			unsigned char has_ctrl187:1;
+			unsigned char has_query49:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_49 {
+	union {
+		struct {
+			unsigned char f54_query49_b0__1:2;
+			unsigned char has_ctrl188:1;
+			unsigned char has_data31:1;
+			unsigned char f54_query49_b4__6:3;
+			unsigned char has_query50:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_50 {
+	union {
+		struct {
+			unsigned char f54_query50_b0__6:7;
+			unsigned char has_query51:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_51 {
+	union {
+		struct {
+			unsigned char f54_query51_b0__4:5;
+			unsigned char has_query53_query54_ctrl198:1;
+			unsigned char has_ctrl199:1;
+			unsigned char has_query55:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_55 {
+	union {
+		struct {
+			unsigned char has_query56:1;
+			unsigned char has_data33_data34:1;
+			unsigned char has_alt_report_rate:1;
+			unsigned char has_ctrl200:1;
+			unsigned char has_ctrl201_ctrl202:1;
+			unsigned char has_ctrl203:1;
+			unsigned char has_ctrl204:1;
+			unsigned char has_query57:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_57 {
+	union {
+		struct {
+			unsigned char has_ctrl205:1;
+			unsigned char has_ctrl206:1;
+			unsigned char has_usb_bulk_read:1;
+			unsigned char has_ctrl207:1;
+			unsigned char has_ctrl208:1;
+			unsigned char has_ctrl209:1;
+			unsigned char has_ctrl210:1;
+			unsigned char has_query58:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_58 {
+	union {
+		struct {
+			unsigned char has_query59:1;
+			unsigned char has_query60:1;
+			unsigned char has_ctrl211:1;
+			unsigned char has_ctrl212:1;
+			unsigned char has_hybrid_abs_tx_axis_filtering:1;
+			unsigned char has_hybrid_abs_tx_interpolation:1;
+			unsigned char has_ctrl213:1;
+			unsigned char has_query61:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_61 {
+	union {
+		struct {
+			unsigned char has_ctrl214:1;
+			unsigned char has_ctrl215_query62_query63:1;
+			unsigned char f54_query_61_b2:1;
+			unsigned char has_ctrl216:1;
+			unsigned char has_ctrl217:1;
+			unsigned char has_misc_host_ctrl:1;
+			unsigned char hybrid_abs_buttons:1;
+			unsigned char has_query64:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_64 {
+	union {
+		struct {
+			unsigned char has_ctrl101_sub1:1;
+			unsigned char has_ctrl220:1;
+			unsigned char has_ctrl221:1;
+			unsigned char has_ctrl222:1;
+			unsigned char has_ctrl219_sub1:1;
+			unsigned char has_ctrl103_sub3:1;
+			unsigned char has_ctrl224_ctrl226_ctrl227:1;
+			unsigned char has_query65:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_65 {
+	union {
+		struct {
+			unsigned char f54_query_65_b0__1:2;
+			unsigned char has_ctrl101_sub2:1;
+			unsigned char f54_query_65_b3__4:2;
+			unsigned char has_query66_ctrl231:1;
+			unsigned char has_ctrl232:1;
+			unsigned char has_query67:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_67 {
+	union {
+		struct {
+			unsigned char has_abs_doze_spatial_filter_en:1;
+			unsigned char has_abs_doze_avg_filter_enhancement_en:1;
+			unsigned char has_single_display_pulse:1;
+			unsigned char f54_query_67_b3__4:2;
+			unsigned char has_ctrl235_ctrl236:1;
+			unsigned char f54_query_67_b6:1;
+			unsigned char has_query68:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_68 {
+	union {
+		struct {
+			unsigned char f54_query_68_b0:1;
+			unsigned char has_ctrl238:1;
+			unsigned char has_ctrl238_sub1:1;
+			unsigned char has_ctrl238_sub2:1;
+			unsigned char has_ctrl239:1;
+			unsigned char has_freq_filter_bw_ext:1;
+			unsigned char is_tddi_hic:1;
+			unsigned char has_query69:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_69 {
+	union {
+		struct {
+			unsigned char has_ctrl240_sub0:1;
+			unsigned char has_ctrl240_sub1_sub2:1;
+			unsigned char has_ctrl240_sub3:1;
+			unsigned char has_ctrl240_sub4:1;
+			unsigned char f54_query_69_b4__7:4;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_data_31 {
+	union {
+		struct {
+			unsigned char is_calibration_crc:1;
+			unsigned char calibration_crc:1;
+			unsigned char short_test_row_number:5;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_7 {
+	union {
+		struct {
+			unsigned char cbc_cap:3;
+			unsigned char cbc_polarity:1;
+			unsigned char cbc_tx_carrier_selection:1;
+			unsigned char f54_ctrl7_b5__7:3;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_41 {
+	union {
+		struct {
+			unsigned char no_signal_clarity:1;
+			unsigned char f54_ctrl41_b1__7:7;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_57 {
+	union {
+		struct {
+			unsigned char cbc_cap:3;
+			unsigned char cbc_polarity:1;
+			unsigned char cbc_tx_carrier_selection:1;
+			unsigned char f54_ctrl57_b5__7:3;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_86 {
+	union {
+		struct {
+			unsigned char enable_high_noise_state:1;
+			unsigned char dynamic_sense_display_ratio:2;
+			unsigned char f54_ctrl86_b3__7:5;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_88 {
+	union {
+		struct {
+			unsigned char tx_low_reference_polarity:1;
+			unsigned char tx_high_reference_polarity:1;
+			unsigned char abs_low_reference_polarity:1;
+			unsigned char abs_polarity:1;
+			unsigned char cbc_polarity:1;
+			unsigned char cbc_tx_carrier_selection:1;
+			unsigned char charge_pump_enable:1;
+			unsigned char cbc_abs_auto_servo:1;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_110 {
+	union {
+		struct {
+			unsigned char active_stylus_rx_feedback_cap;
+			unsigned char active_stylus_rx_feedback_cap_reference;
+			unsigned char active_stylus_low_reference;
+			unsigned char active_stylus_high_reference;
+			unsigned char active_stylus_gain_control;
+			unsigned char active_stylus_gain_control_reference;
+			unsigned char active_stylus_timing_mode;
+			unsigned char active_stylus_discovery_bursts;
+			unsigned char active_stylus_detection_bursts;
+			unsigned char active_stylus_discovery_noise_multiplier;
+			unsigned char active_stylus_detection_envelope_min;
+			unsigned char active_stylus_detection_envelope_max;
+			unsigned char active_stylus_lose_count;
+		} __packed;
+		struct {
+			unsigned char data[13];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_149 {
+	union {
+		struct {
+			unsigned char trans_cbc_global_cap_enable:1;
+			unsigned char f54_ctrl149_b1__7:7;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_188 {
+	union {
+		struct {
+			unsigned char start_calibration:1;
+			unsigned char start_is_calibration:1;
+			unsigned char frequency:2;
+			unsigned char start_production_test:1;
+			unsigned char short_test_calibration:1;
+			unsigned char f54_ctrl188_b7:1;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control {
+	struct f54_control_7 *reg_7;
+	struct f54_control_41 *reg_41;
+	struct f54_control_57 *reg_57;
+	struct f54_control_86 *reg_86;
+	struct f54_control_88 *reg_88;
+	struct f54_control_110 *reg_110;
+	struct f54_control_149 *reg_149;
+	struct f54_control_188 *reg_188;
+};
+
+struct synaptics_rmi4_f54_handle {
+	bool no_auto_cal;
+	bool skip_preparation;
+	unsigned char status;
+	unsigned char intr_mask;
+	unsigned char intr_reg_num;
+	unsigned char tx_assigned;
+	unsigned char rx_assigned;
+	unsigned char *report_data;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	unsigned short fifoindex;
+	unsigned int report_size;
+	unsigned int data_buffer_size;
+	unsigned int data_pos;
+	enum f54_report_types report_type;
+	struct f54_query query;
+	struct f54_query_13 query_13;
+	struct f54_query_15 query_15;
+	struct f54_query_16 query_16;
+	struct f54_query_21 query_21;
+	struct f54_query_22 query_22;
+	struct f54_query_23 query_23;
+	struct f54_query_25 query_25;
+	struct f54_query_27 query_27;
+	struct f54_query_29 query_29;
+	struct f54_query_30 query_30;
+	struct f54_query_32 query_32;
+	struct f54_query_33 query_33;
+	struct f54_query_35 query_35;
+	struct f54_query_36 query_36;
+	struct f54_query_38 query_38;
+	struct f54_query_39 query_39;
+	struct f54_query_40 query_40;
+	struct f54_query_43 query_43;
+	struct f54_query_46 query_46;
+	struct f54_query_47 query_47;
+	struct f54_query_49 query_49;
+	struct f54_query_50 query_50;
+	struct f54_query_51 query_51;
+	struct f54_query_55 query_55;
+	struct f54_query_57 query_57;
+	struct f54_query_58 query_58;
+	struct f54_query_61 query_61;
+	struct f54_query_64 query_64;
+	struct f54_query_65 query_65;
+	struct f54_query_67 query_67;
+	struct f54_query_68 query_68;
+	struct f54_query_69 query_69;
+	struct f54_data_31 data_31;
+	struct f54_control control;
+	struct mutex status_mutex;
+	struct kobject *sysfs_dir;
+	struct hrtimer watchdog;
+	struct work_struct timeout_work;
+	struct work_struct test_report_work;
+	struct workqueue_struct *test_report_workqueue;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+struct f55_query {
+	union {
+		struct {
+			/* query 0 */
+			unsigned char num_of_rx_electrodes;
+
+			/* query 1 */
+			unsigned char num_of_tx_electrodes;
+
+			/* query 2 */
+			unsigned char has_sensor_assignment:1;
+			unsigned char has_edge_compensation:1;
+			unsigned char curve_compensation_mode:2;
+			unsigned char has_ctrl6:1;
+			unsigned char has_alternate_transmitter_assignment:1;
+			unsigned char has_single_layer_multi_touch:1;
+			unsigned char has_query5:1;
+		} __packed;
+		unsigned char data[3];
+	};
+};
+
+struct f55_query_3 {
+	union {
+		struct {
+			unsigned char has_ctrl8:1;
+			unsigned char has_ctrl9:1;
+			unsigned char has_oncell_pattern_support:1;
+			unsigned char has_data0:1;
+			unsigned char has_single_wide_pattern_support:1;
+			unsigned char has_mirrored_tx_pattern_support:1;
+			unsigned char has_discrete_pattern_support:1;
+			unsigned char has_query9:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_5 {
+	union {
+		struct {
+			unsigned char has_corner_compensation:1;
+			unsigned char has_ctrl12:1;
+			unsigned char has_trx_configuration:1;
+			unsigned char has_ctrl13:1;
+			unsigned char f55_query5_b4:1;
+			unsigned char has_ctrl14:1;
+			unsigned char has_basis_function:1;
+			unsigned char has_query17:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_17 {
+	union {
+		struct {
+			unsigned char f55_query17_b0:1;
+			unsigned char has_ctrl16:1;
+			unsigned char has_ctrl18_ctrl19:1;
+			unsigned char has_ctrl17:1;
+			unsigned char has_ctrl20:1;
+			unsigned char has_ctrl21:1;
+			unsigned char has_ctrl22:1;
+			unsigned char has_query18:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_18 {
+	union {
+		struct {
+			unsigned char has_ctrl23:1;
+			unsigned char has_ctrl24:1;
+			unsigned char has_query19:1;
+			unsigned char has_ctrl25:1;
+			unsigned char has_ctrl26:1;
+			unsigned char has_ctrl27_query20:1;
+			unsigned char has_ctrl28_query21:1;
+			unsigned char has_query22:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_22 {
+	union {
+		struct {
+			unsigned char has_ctrl29:1;
+			unsigned char has_query23:1;
+			unsigned char has_guard_disable:1;
+			unsigned char has_ctrl30:1;
+			unsigned char has_ctrl31:1;
+			unsigned char has_ctrl32:1;
+			unsigned char has_query24_through_query27:1;
+			unsigned char has_query28:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_23 {
+	union {
+		struct {
+			unsigned char amp_sensor_enabled:1;
+			unsigned char image_transposed:1;
+			unsigned char first_column_at_left_side:1;
+			unsigned char size_of_column2mux:5;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_28 {
+	union {
+		struct {
+			unsigned char f55_query28_b0__4:5;
+			unsigned char has_ctrl37:1;
+			unsigned char has_query29:1;
+			unsigned char has_query30:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_30 {
+	union {
+		struct {
+			unsigned char has_ctrl38:1;
+			unsigned char has_query31_query32:1;
+			unsigned char has_ctrl39:1;
+			unsigned char has_ctrl40:1;
+			unsigned char has_ctrl41:1;
+			unsigned char has_ctrl42:1;
+			unsigned char has_ctrl43_ctrl44:1;
+			unsigned char has_query33:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_33 {
+	union {
+		struct {
+			unsigned char has_extended_amp_pad:1;
+			unsigned char has_extended_amp_btn:1;
+			unsigned char has_ctrl45_ctrl46:1;
+			unsigned char f55_query33_b3:1;
+			unsigned char has_ctrl47_sub0_sub1:1;
+			unsigned char f55_query33_b5__7:3;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_control_43 {
+	union {
+		struct {
+			unsigned char swap_sensor_side:1;
+			unsigned char f55_ctrl43_b1__7:7;
+			unsigned char afe_l_mux_size:4;
+			unsigned char afe_r_mux_size:4;
+		} __packed;
+		unsigned char data[2];
+	};
+};
+
+struct synaptics_rmi4_f55_handle {
+	bool amp_sensor;
+	bool extended_amp;
+	bool has_force;
+	unsigned char size_of_column2mux;
+	unsigned char afe_mux_offset;
+	unsigned char force_tx_offset;
+	unsigned char force_rx_offset;
+	unsigned char *tx_assignment;
+	unsigned char *rx_assignment;
+	unsigned char *force_tx_assignment;
+	unsigned char *force_rx_assignment;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	struct f55_query query;
+	struct f55_query_3 query_3;
+	struct f55_query_5 query_5;
+	struct f55_query_17 query_17;
+	struct f55_query_18 query_18;
+	struct f55_query_22 query_22;
+	struct f55_query_23 query_23;
+	struct f55_query_28 query_28;
+	struct f55_query_30 query_30;
+	struct f55_query_33 query_33;
+};
+
+struct f21_query_2 {
+	union {
+		struct {
+			unsigned char size_of_query3;
+			struct {
+				unsigned char query0_is_present:1;
+				unsigned char query1_is_present:1;
+				unsigned char query2_is_present:1;
+				unsigned char query3_is_present:1;
+				unsigned char query4_is_present:1;
+				unsigned char query5_is_present:1;
+				unsigned char query6_is_present:1;
+				unsigned char query7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char query8_is_present:1;
+				unsigned char query9_is_present:1;
+				unsigned char query10_is_present:1;
+				unsigned char query11_is_present:1;
+				unsigned char query12_is_present:1;
+				unsigned char query13_is_present:1;
+				unsigned char query14_is_present:1;
+				unsigned char query15_is_present:1;
+			} __packed;
+		};
+		unsigned char data[3];
+	};
+};
+
+struct f21_query_5 {
+	union {
+		struct {
+			unsigned char size_of_query6;
+			struct {
+				unsigned char ctrl0_is_present:1;
+				unsigned char ctrl1_is_present:1;
+				unsigned char ctrl2_is_present:1;
+				unsigned char ctrl3_is_present:1;
+				unsigned char ctrl4_is_present:1;
+				unsigned char ctrl5_is_present:1;
+				unsigned char ctrl6_is_present:1;
+				unsigned char ctrl7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl8_is_present:1;
+				unsigned char ctrl9_is_present:1;
+				unsigned char ctrl10_is_present:1;
+				unsigned char ctrl11_is_present:1;
+				unsigned char ctrl12_is_present:1;
+				unsigned char ctrl13_is_present:1;
+				unsigned char ctrl14_is_present:1;
+				unsigned char ctrl15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl16_is_present:1;
+				unsigned char ctrl17_is_present:1;
+				unsigned char ctrl18_is_present:1;
+				unsigned char ctrl19_is_present:1;
+				unsigned char ctrl20_is_present:1;
+				unsigned char ctrl21_is_present:1;
+				unsigned char ctrl22_is_present:1;
+				unsigned char ctrl23_is_present:1;
+			} __packed;
+		};
+		unsigned char data[4];
+	};
+};
+
+struct f21_query_11 {
+	union {
+		struct {
+			unsigned char has_high_resolution_force:1;
+			unsigned char has_force_sensing_txrx_mapping:1;
+			unsigned char f21_query11_00_b2__7:6;
+			unsigned char f21_query11_00_reserved;
+			unsigned char max_number_of_force_sensors;
+			unsigned char max_number_of_force_txs;
+			unsigned char max_number_of_force_rxs;
+			unsigned char f21_query11_01_reserved;
+		} __packed;
+		unsigned char data[6];
+	};
+};
+
+struct synaptics_rmi4_f21_handle {
+	bool has_force;
+	unsigned char tx_assigned;
+	unsigned char rx_assigned;
+	unsigned char max_num_of_tx;
+	unsigned char max_num_of_rx;
+	unsigned char max_num_of_txrx;
+	unsigned char *force_txrx_assignment;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+};
+
+show_prototype(num_of_mapped_tx)
+show_prototype(num_of_mapped_rx)
+show_prototype(tx_mapping)
+show_prototype(rx_mapping)
+show_prototype(num_of_mapped_force_tx)
+show_prototype(num_of_mapped_force_rx)
+show_prototype(force_tx_mapping)
+show_prototype(force_rx_mapping)
+show_prototype(report_size)
+show_prototype(status)
+store_prototype(do_preparation)
+store_prototype(force_cal)
+store_prototype(get_report)
+store_prototype(resume_touch)
+store_prototype(do_afe_calibration)
+show_store_prototype(report_type)
+show_store_prototype(fifoindex)
+show_store_prototype(no_auto_cal)
+show_store_prototype(read_report)
+
+static struct attribute *attrs[] = {
+	attrify(num_of_mapped_tx),
+	attrify(num_of_mapped_rx),
+	attrify(tx_mapping),
+	attrify(rx_mapping),
+	attrify(num_of_mapped_force_tx),
+	attrify(num_of_mapped_force_rx),
+	attrify(force_tx_mapping),
+	attrify(force_rx_mapping),
+	attrify(report_size),
+	attrify(status),
+	attrify(do_preparation),
+	attrify(force_cal),
+	attrify(get_report),
+	attrify(resume_touch),
+	attrify(do_afe_calibration),
+	attrify(report_type),
+	attrify(fifoindex),
+	attrify(no_auto_cal),
+	attrify(read_report),
+	NULL,
+};
+
+static struct attribute_group attr_group = {
+	.attrs = attrs,
+};
+
+static ssize_t test_sysfs_data_read(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static struct bin_attribute test_report_data = {
+	.attr = {
+		.name = "report_data",
+		.mode = 0444,
+	},
+	.size = 0,
+	.read = test_sysfs_data_read,
+};
+
+static struct synaptics_rmi4_f54_handle *f54;
+static struct synaptics_rmi4_f55_handle *f55;
+static struct synaptics_rmi4_f21_handle *f21;
+
+DECLARE_COMPLETION(test_remove_complete);
+
+static bool test_report_type_valid(enum f54_report_types report_type)
+{
+	switch (report_type) {
+	case F54_8BIT_IMAGE:
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_HIGH_RESISTANCE:
+	case F54_TX_TO_TX_SHORTS:
+	case F54_RX_TO_RX_SHORTS_1:
+	case F54_TRUE_BASELINE:
+	case F54_FULL_RAW_CAP_MIN_MAX:
+	case F54_RX_OPENS_1:
+	case F54_TX_OPENS:
+	case F54_TX_TO_GND_SHORTS:
+	case F54_RX_TO_RX_SHORTS_2:
+	case F54_RX_OPENS_2:
+	case F54_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+	case F54_SENSOR_SPEED:
+	case F54_ADC_RANGE:
+	case F54_TRX_OPENS:
+	case F54_TRX_TO_GND_SHORTS:
+	case F54_TRX_SHORTS:
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+	case F54_AMP_FULL_RAW_CAP:
+	case F54_AMP_RAW_ADC:
+	case F54_FULL_RAW_CAP_TDDI:
+		return true;
+		break;
+	default:
+		f54->report_type = INVALID_REPORT_TYPE;
+		f54->report_size = 0;
+		return false;
+	}
+}
+
+static void test_set_report_size(void)
+{
+	int retval;
+	unsigned char tx = f54->tx_assigned;
+	unsigned char rx = f54->rx_assigned;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	switch (f54->report_type) {
+	case F54_8BIT_IMAGE:
+		f54->report_size = tx * rx;
+		break;
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_TRUE_BASELINE:
+	case F54_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+	case F54_SENSOR_SPEED:
+	case F54_AMP_FULL_RAW_CAP:
+	case F54_AMP_RAW_ADC:
+	case F54_FULL_RAW_CAP_TDDI:
+		f54->report_size = 2 * tx * rx;
+		break;
+	case F54_HIGH_RESISTANCE:
+		f54->report_size = HIGH_RESISTANCE_DATA_SIZE;
+		break;
+	case F54_TX_TO_TX_SHORTS:
+	case F54_TX_OPENS:
+	case F54_TX_TO_GND_SHORTS:
+		f54->report_size = (tx + 7) / 8;
+		break;
+	case F54_RX_TO_RX_SHORTS_1:
+	case F54_RX_OPENS_1:
+		if (rx < tx)
+			f54->report_size = 2 * rx * rx;
+		else
+			f54->report_size = 2 * tx * rx;
+		break;
+	case F54_FULL_RAW_CAP_MIN_MAX:
+		f54->report_size = FULL_RAW_CAP_MIN_MAX_DATA_SIZE;
+		break;
+	case F54_RX_TO_RX_SHORTS_2:
+	case F54_RX_OPENS_2:
+		if (rx <= tx)
+			f54->report_size = 0;
+		else
+			f54->report_size = 2 * rx * (rx - tx);
+		break;
+	case F54_ADC_RANGE:
+		if (f54->query.has_signal_clarity) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					f54->control.reg_41->address,
+					f54->control.reg_41->data,
+					sizeof(f54->control.reg_41->data));
+			if (retval < 0) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Failed to read control reg_41\n",
+						__func__);
+				f54->report_size = 0;
+				break;
+			}
+			if (!f54->control.reg_41->no_signal_clarity) {
+				if (tx % 4)
+					tx += 4 - (tx % 4);
+			}
+		}
+		f54->report_size = 2 * tx * rx;
+		break;
+	case F54_TRX_OPENS:
+	case F54_TRX_TO_GND_SHORTS:
+	case F54_TRX_SHORTS:
+		f54->report_size = TRX_OPEN_SHORT_DATA_SIZE;
+		break;
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+		tx += f21->tx_assigned;
+		rx += f21->rx_assigned;
+		f54->report_size = 4 * (tx + rx);
+		break;
+	default:
+		f54->report_size = 0;
+	}
+
+	return;
+}
+
+static int test_set_interrupt(bool set)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char zero = 0x00;
+	unsigned char *intr_mask;
+	unsigned short f01_ctrl_reg;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	intr_mask = rmi4_data->intr_mask;
+	f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + f54->intr_reg_num;
+
+	if (!set) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				f01_ctrl_reg,
+				&zero,
+				sizeof(zero));
+		if (retval < 0)
+			return retval;
+	}
+
+	for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) {
+		if (intr_mask[ii] != 0x00) {
+			f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + ii;
+			if (set) {
+				retval = synaptics_rmi4_reg_write(rmi4_data,
+						f01_ctrl_reg,
+						&zero,
+						sizeof(zero));
+				if (retval < 0)
+					return retval;
+			} else {
+				retval = synaptics_rmi4_reg_write(rmi4_data,
+						f01_ctrl_reg,
+						&(intr_mask[ii]),
+						sizeof(intr_mask[ii]));
+				if (retval < 0)
+					return retval;
+			}
+		}
+	}
+
+	f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + f54->intr_reg_num;
+
+	if (set) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				f01_ctrl_reg,
+				&f54->intr_mask,
+				1);
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static int test_wait_for_command_completion(void)
+{
+	int retval;
+	unsigned char value;
+	unsigned char timeout_count;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	timeout_count = 0;
+	do {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->command_base_addr,
+				&value,
+				sizeof(value));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read command register\n",
+					__func__);
+			return retval;
+		}
+
+		if (value == 0x00)
+			break;
+
+		msleep(100);
+		timeout_count++;
+	} while (timeout_count < COMMAND_TIMEOUT_100MS);
+
+	if (timeout_count == COMMAND_TIMEOUT_100MS) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Timed out waiting for command completion\n",
+				__func__);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int test_do_command(unsigned char command)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->command_base_addr,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write command\n",
+				__func__);
+		return retval;
+	}
+
+	retval = test_wait_for_command_completion();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int test_do_preparation(void)
+{
+	int retval;
+	unsigned char value;
+	unsigned char zero = 0x00;
+	unsigned char device_ctrl;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set no sleep\n",
+				__func__);
+		return retval;
+	}
+
+	device_ctrl |= NO_SLEEP_ON;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set no sleep\n",
+				__func__);
+		return retval;
+	}
+
+	if (f54->skip_preparation)
+		return 0;
+
+	switch (f54->report_type) {
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_SENSOR_SPEED:
+	case F54_ADC_RANGE:
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+	case F54_FULL_RAW_CAP_TDDI:
+		break;
+	case F54_AMP_RAW_ADC:
+		if (f54->query_49.has_ctrl188) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					f54->control.reg_188->address,
+					f54->control.reg_188->data,
+					sizeof(f54->control.reg_188->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to set start production test\n",
+						__func__);
+				return retval;
+			}
+			f54->control.reg_188->start_production_test = 1;
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					f54->control.reg_188->address,
+					f54->control.reg_188->data,
+					sizeof(f54->control.reg_188->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to set start production test\n",
+						__func__);
+				return retval;
+			}
+		}
+		break;
+	default:
+		if (f54->query.touch_controller_family == 1)
+			disable_cbc(reg_7);
+		else if (f54->query.has_ctrl88)
+			disable_cbc(reg_88);
+
+		if (f54->query.has_0d_acquisition_control)
+			disable_cbc(reg_57);
+
+		if ((f54->query.has_query15) &&
+				(f54->query_15.has_query25) &&
+				(f54->query_25.has_query27) &&
+				(f54->query_27.has_query29) &&
+				(f54->query_29.has_query30) &&
+				(f54->query_30.has_query32) &&
+				(f54->query_32.has_query33) &&
+				(f54->query_33.has_query36) &&
+				(f54->query_36.has_query38) &&
+				(f54->query_38.has_ctrl149)) {
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					f54->control.reg_149->address,
+					&zero,
+					sizeof(f54->control.reg_149->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to disable global CBC\n",
+						__func__);
+				return retval;
+			}
+		}
+
+		if (f54->query.has_signal_clarity) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					f54->control.reg_41->address,
+					&value,
+					sizeof(f54->control.reg_41->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to disable signal clarity\n",
+						__func__);
+				return retval;
+			}
+			value |= 0x01;
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					f54->control.reg_41->address,
+					&value,
+					sizeof(f54->control.reg_41->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to disable signal clarity\n",
+						__func__);
+				return retval;
+			}
+		}
+
+		retval = test_do_command(COMMAND_FORCE_UPDATE);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to do force update\n",
+					__func__);
+			return retval;
+		}
+
+		retval = test_do_command(COMMAND_FORCE_CAL);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to do force cal\n",
+					__func__);
+			return retval;
+		}
+	}
+
+	return 0;
+}
+
+static int test_do_afe_calibration(enum f54_afe_cal mode)
+{
+	int retval;
+	unsigned char timeout = CALIBRATION_TIMEOUT_S;
+	unsigned char timeout_count = 0;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->control.reg_188->address,
+			f54->control.reg_188->data,
+			sizeof(f54->control.reg_188->data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to start calibration\n",
+				__func__);
+		return retval;
+	}
+
+	if (mode == F54_AFE_CAL)
+		f54->control.reg_188->start_calibration = 1;
+	else if (mode == F54_AFE_IS_CAL)
+		f54->control.reg_188->start_is_calibration = 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->control.reg_188->address,
+			f54->control.reg_188->data,
+			sizeof(f54->control.reg_188->data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to start calibration\n",
+				__func__);
+		return retval;
+	}
+
+	do {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->control.reg_188->address,
+				f54->control.reg_188->data,
+				sizeof(f54->control.reg_188->data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to complete calibration\n",
+					__func__);
+			return retval;
+		}
+
+		if (mode == F54_AFE_CAL) {
+			if (!f54->control.reg_188->start_calibration)
+				break;
+		} else if (mode == F54_AFE_IS_CAL) {
+			if (!f54->control.reg_188->start_is_calibration)
+				break;
+		}
+
+		if (timeout_count == timeout) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Timed out waiting for calibration completion\n",
+					__func__);
+			return -EBUSY;
+		}
+
+		timeout_count++;
+		msleep(1000);
+	} while (true);
+
+	/* check CRC */
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->data_31.address,
+			f54->data_31.data,
+			sizeof(f54->data_31.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read calibration CRC\n",
+				__func__);
+		return retval;
+	}
+
+	if (mode == F54_AFE_CAL) {
+		if (f54->data_31.calibration_crc == 0)
+			return 0;
+	} else if (mode == F54_AFE_IS_CAL) {
+		if (f54->data_31.is_calibration_crc == 0)
+			return 0;
+	}
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to read calibration CRC\n",
+			__func__);
+
+	return -EINVAL;
+}
+
+static int test_check_for_idle_status(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	switch (f54->status) {
+	case STATUS_IDLE:
+		retval = 0;
+		break;
+	case STATUS_BUSY:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Status busy\n",
+				__func__);
+		retval = -EINVAL;
+		break;
+	case STATUS_ERROR:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Status error\n",
+				__func__);
+		retval = -EINVAL;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid status (%d)\n",
+				__func__, f54->status);
+		retval = -EINVAL;
+	}
+
+	return retval;
+}
+
+static void test_timeout_work(struct work_struct *work)
+{
+	int retval;
+	unsigned char command;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	mutex_lock(&f54->status_mutex);
+
+	if (f54->status == STATUS_BUSY) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->command_base_addr,
+				&command,
+				sizeof(command));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read command register\n",
+					__func__);
+		} else if (command & COMMAND_GET_REPORT) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Report type not supported by FW\n",
+					__func__);
+		} else {
+			queue_work(f54->test_report_workqueue,
+					&f54->test_report_work);
+			goto exit;
+		}
+		f54->status = STATUS_ERROR;
+		f54->report_size = 0;
+	}
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return;
+}
+
+static enum hrtimer_restart test_get_report_timeout(struct hrtimer *timer)
+{
+	schedule_work(&(f54->timeout_work));
+
+	return HRTIMER_NORESTART;
+}
+
+static ssize_t test_sysfs_num_of_mapped_tx_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->tx_assigned);
+}
+
+static ssize_t test_sysfs_num_of_mapped_rx_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->rx_assigned);
+}
+
+static ssize_t test_sysfs_tx_mapping_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int cnt;
+	int count = 0;
+	unsigned char ii;
+	unsigned char tx_num;
+	unsigned char tx_electrodes;
+
+	if (!f55)
+		return -EINVAL;
+
+	tx_electrodes = f55->query.num_of_tx_electrodes;
+
+	for (ii = 0; ii < tx_electrodes; ii++) {
+		tx_num = f55->tx_assignment[ii];
+		if (tx_num == 0xff)
+			cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+		else
+			cnt = snprintf(buf, PAGE_SIZE - count, "%02u ", tx_num);
+		buf += cnt;
+		count += cnt;
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t test_sysfs_rx_mapping_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int cnt;
+	int count = 0;
+	unsigned char ii;
+	unsigned char rx_num;
+	unsigned char rx_electrodes;
+
+	if (!f55)
+		return -EINVAL;
+
+	rx_electrodes = f55->query.num_of_rx_electrodes;
+
+	for (ii = 0; ii < rx_electrodes; ii++) {
+		rx_num = f55->rx_assignment[ii];
+		if (rx_num == 0xff)
+			cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+		else
+			cnt = snprintf(buf, PAGE_SIZE - count, "%02u ", rx_num);
+		buf += cnt;
+		count += cnt;
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t test_sysfs_num_of_mapped_force_tx_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f21->tx_assigned);
+}
+
+static ssize_t test_sysfs_num_of_mapped_force_rx_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f21->rx_assigned);
+}
+
+static ssize_t test_sysfs_force_tx_mapping_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int cnt;
+	int count = 0;
+	unsigned char ii;
+	unsigned char tx_num;
+	unsigned char tx_electrodes;
+
+	if ((!f55 || !f55->has_force) && (!f21 || !f21->has_force))
+		return -EINVAL;
+
+	if (f55->has_force) {
+		tx_electrodes = f55->query.num_of_tx_electrodes;
+
+		for (ii = 0; ii < tx_electrodes; ii++) {
+			tx_num = f55->force_tx_assignment[ii];
+			if (tx_num == 0xff) {
+				cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+			} else {
+				cnt = snprintf(buf, PAGE_SIZE - count, "%02u ",
+						tx_num);
+			}
+			buf += cnt;
+			count += cnt;
+		}
+	} else if (f21->has_force) {
+		tx_electrodes = f21->max_num_of_tx;
+
+		for (ii = 0; ii < tx_electrodes; ii++) {
+			tx_num = f21->force_txrx_assignment[ii];
+			if (tx_num == 0xff) {
+				cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+			} else {
+				cnt = snprintf(buf, PAGE_SIZE - count, "%02u ",
+						tx_num);
+			}
+			buf += cnt;
+			count += cnt;
+		}
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t test_sysfs_force_rx_mapping_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int cnt;
+	int count = 0;
+	unsigned char ii;
+	unsigned char offset;
+	unsigned char rx_num;
+	unsigned char rx_electrodes;
+
+	if ((!f55 || !f55->has_force) && (!f21 || !f21->has_force))
+		return -EINVAL;
+
+	if (f55->has_force) {
+		rx_electrodes = f55->query.num_of_rx_electrodes;
+
+		for (ii = 0; ii < rx_electrodes; ii++) {
+			rx_num = f55->force_rx_assignment[ii];
+			if (rx_num == 0xff)
+				cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+			else
+				cnt = snprintf(buf, PAGE_SIZE - count, "%02u ",
+						rx_num);
+			buf += cnt;
+			count += cnt;
+		}
+	} else if (f21->has_force) {
+		offset = f21->max_num_of_tx;
+		rx_electrodes = f21->max_num_of_rx;
+
+		for (ii = offset; ii < (rx_electrodes + offset); ii++) {
+			rx_num = f21->force_txrx_assignment[ii];
+			if (rx_num == 0xff)
+				cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+			else
+				cnt = snprintf(buf, PAGE_SIZE - count, "%02u ",
+						rx_num);
+			buf += cnt;
+			count += cnt;
+		}
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t test_sysfs_report_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->report_size);
+}
+
+static ssize_t test_sysfs_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", f54->status);
+
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_do_preparation_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting != 1)
+		return -EINVAL;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	retval = test_do_preparation();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do preparation\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_force_cal_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting != 1)
+		return -EINVAL;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	retval = test_do_command(COMMAND_FORCE_CAL);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do force cal\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_get_report_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char command;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting != 1)
+		return -EINVAL;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	if (!test_report_type_valid(f54->report_type)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid report type\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	test_set_interrupt(true);
+
+	command = (unsigned char)COMMAND_GET_REPORT;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->command_base_addr,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write get report command\n",
+				__func__);
+		goto exit;
+	}
+
+	f54->status = STATUS_BUSY;
+	f54->report_size = 0;
+	f54->data_pos = 0;
+
+	hrtimer_start(&f54->watchdog,
+			ktime_set(GET_REPORT_TIMEOUT_S, 0),
+			HRTIMER_MODE_REL);
+
+	retval = count;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_resume_touch_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char device_ctrl;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting != 1)
+		return -EINVAL;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to restore no sleep setting\n",
+				__func__);
+		return retval;
+	}
+
+	device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+	device_ctrl |= rmi4_data->no_sleep_setting;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to restore no sleep setting\n",
+				__func__);
+		return retval;
+	}
+
+	test_set_interrupt(false);
+
+	if (f54->skip_preparation)
+		return count;
+
+	switch (f54->report_type) {
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_SENSOR_SPEED:
+	case F54_ADC_RANGE:
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+	case F54_FULL_RAW_CAP_TDDI:
+		break;
+	case F54_AMP_RAW_ADC:
+		if (f54->query_49.has_ctrl188) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					f54->control.reg_188->address,
+					f54->control.reg_188->data,
+					sizeof(f54->control.reg_188->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to set start production test\n",
+						__func__);
+				return retval;
+			}
+			f54->control.reg_188->start_production_test = 0;
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					f54->control.reg_188->address,
+					f54->control.reg_188->data,
+					sizeof(f54->control.reg_188->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to set start production test\n",
+						__func__);
+				return retval;
+			}
+		}
+		break;
+	default:
+		rmi4_data->reset_device(rmi4_data, false);
+	}
+
+	return count;
+}
+
+static ssize_t test_sysfs_do_afe_calibration_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (!f54->query_49.has_ctrl188) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: F54_ANALOG_Ctrl188 not found\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (setting == 0 || setting == 1)
+		retval = test_do_afe_calibration((enum f54_afe_cal)setting);
+	else
+		return -EINVAL;
+
+	if (retval)
+		return retval;
+	else
+		return count;
+}
+
+static ssize_t test_sysfs_report_type_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->report_type);
+}
+
+static ssize_t test_sysfs_report_type_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char data;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	if (!test_report_type_valid((enum f54_report_types)setting)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Report type not supported by driver\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	f54->report_type = (enum f54_report_types)setting;
+	data = (unsigned char)setting;
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->data_base_addr,
+			&data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write report type\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_fifoindex_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned char data[2];
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->data_base_addr + REPORT_INDEX_OFFSET,
+			data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read report index\n",
+				__func__);
+		return retval;
+	}
+
+	batohs(&f54->fifoindex, data);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->fifoindex);
+}
+
+static ssize_t test_sysfs_fifoindex_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char data[2];
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	f54->fifoindex = setting;
+
+	hstoba(data, (unsigned short)setting);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->data_base_addr + REPORT_INDEX_OFFSET,
+			data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write report index\n",
+				__func__);
+		return retval;
+	}
+
+	return count;
+}
+
+static ssize_t test_sysfs_no_auto_cal_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->no_auto_cal);
+}
+
+static ssize_t test_sysfs_no_auto_cal_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char data;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting > 1)
+		return -EINVAL;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->control_base_addr,
+			&data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read no auto cal setting\n",
+				__func__);
+		return retval;
+	}
+
+	if (setting)
+		data |= CONTROL_NO_AUTO_CAL;
+	else
+		data &= ~CONTROL_NO_AUTO_CAL;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->control_base_addr,
+			&data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write no auto cal setting\n",
+				__func__);
+		return retval;
+	}
+
+	f54->no_auto_cal = (setting == 1);
+
+	return count;
+}
+
+static ssize_t test_sysfs_read_report_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned int ii;
+	unsigned int jj;
+	int cnt;
+	int count = 0;
+	int tx_num = f54->tx_assigned;
+	int rx_num = f54->rx_assigned;
+	char *report_data_8;
+	short *report_data_16;
+	int *report_data_32;
+	unsigned short *report_data_u16;
+	unsigned int *report_data_u32;
+
+	switch (f54->report_type) {
+	case F54_8BIT_IMAGE:
+		report_data_8 = (char *)f54->report_data;
+		for (ii = 0; ii < f54->report_size; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "%03d: %d\n",
+					ii, *report_data_8);
+			report_data_8++;
+			buf += cnt;
+			count += cnt;
+		}
+		break;
+	case F54_AMP_RAW_ADC:
+		report_data_u16 = (unsigned short *)f54->report_data;
+		cnt = snprintf(buf, PAGE_SIZE - count, "tx = %d\nrx = %d\n",
+				tx_num, rx_num);
+		buf += cnt;
+		count += cnt;
+
+		for (ii = 0; ii < tx_num; ii++) {
+			for (jj = 0; jj < (rx_num - 1); jj++) {
+				cnt = snprintf(buf, PAGE_SIZE - count, "%-4d ",
+						*report_data_u16);
+				report_data_u16++;
+				buf += cnt;
+				count += cnt;
+			}
+			cnt = snprintf(buf, PAGE_SIZE - count, "%-4d\n",
+					*report_data_u16);
+			report_data_u16++;
+			buf += cnt;
+			count += cnt;
+		}
+		break;
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_TRUE_BASELINE:
+	case F54_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+	case F54_SENSOR_SPEED:
+	case F54_AMP_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_TDDI:
+		report_data_16 = (short *)f54->report_data;
+		cnt = snprintf(buf, PAGE_SIZE - count, "tx = %d\nrx = %d\n",
+				tx_num, rx_num);
+		buf += cnt;
+		count += cnt;
+
+		for (ii = 0; ii < tx_num; ii++) {
+			for (jj = 0; jj < (rx_num - 1); jj++) {
+				cnt = snprintf(buf, PAGE_SIZE - count, "%-4d ",
+						*report_data_16);
+				report_data_16++;
+				buf += cnt;
+				count += cnt;
+			}
+			cnt = snprintf(buf, PAGE_SIZE - count, "%-4d\n",
+					*report_data_16);
+			report_data_16++;
+			buf += cnt;
+			count += cnt;
+		}
+		break;
+	case F54_HIGH_RESISTANCE:
+	case F54_FULL_RAW_CAP_MIN_MAX:
+		report_data_16 = (short *)f54->report_data;
+		for (ii = 0; ii < f54->report_size; ii += 2) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "%03d: %d\n",
+					ii / 2, *report_data_16);
+			report_data_16++;
+			buf += cnt;
+			count += cnt;
+		}
+		break;
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+		tx_num += f21->tx_assigned;
+		rx_num += f21->rx_assigned;
+		report_data_u32 = (unsigned int *)f54->report_data;
+		cnt = snprintf(buf, PAGE_SIZE - count, "rx ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < rx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "     %2d", ii);
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "   ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < rx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "  %5u",
+					*report_data_u32);
+			report_data_u32++;
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "tx ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < tx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "     %2d", ii);
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "   ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < tx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "  %5u",
+					*report_data_u32);
+			report_data_u32++;
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+		break;
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+		tx_num += f21->tx_assigned;
+		rx_num += f21->rx_assigned;
+		report_data_32 = (int *)f54->report_data;
+		cnt = snprintf(buf, PAGE_SIZE - count, "rx ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < rx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "     %2d", ii);
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "   ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < rx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "  %5d",
+					*report_data_32);
+			report_data_32++;
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "tx ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < tx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "     %2d", ii);
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "   ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < tx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "  %5d",
+					*report_data_32);
+			report_data_32++;
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+		break;
+	default:
+		for (ii = 0; ii < f54->report_size; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "%03d: 0x%02x\n",
+					ii, f54->report_data[ii]);
+			buf += cnt;
+			count += cnt;
+		}
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t test_sysfs_read_report_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char timeout = GET_REPORT_TIMEOUT_S * 10;
+	unsigned char timeout_count;
+	const char cmd[] = {'1', 0};
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = test_sysfs_report_type_store(dev, attr, buf, count);
+	if (retval < 0)
+		goto exit;
+
+	retval = test_sysfs_do_preparation_store(dev, attr, cmd, 1);
+	if (retval < 0)
+		goto exit;
+
+	retval = test_sysfs_get_report_store(dev, attr, cmd, 1);
+	if (retval < 0)
+		goto exit;
+
+	timeout_count = 0;
+	do {
+		if (f54->status != STATUS_BUSY)
+			break;
+		msleep(100);
+		timeout_count++;
+	} while (timeout_count < timeout);
+
+	if ((f54->status != STATUS_IDLE) || (f54->report_size == 0)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read report\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	retval = test_sysfs_resume_touch_store(dev, attr, cmd, 1);
+	if (retval < 0)
+		goto exit;
+
+	return count;
+
+exit:
+	rmi4_data->reset_device(rmi4_data, false);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_data_read(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned int read_size;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	if (!f54->report_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Report type %d data not available\n",
+				__func__, f54->report_type);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if ((f54->data_pos + count) > f54->report_size)
+		read_size = f54->report_size - f54->data_pos;
+	else
+		read_size = min_t(unsigned int, count, f54->report_size);
+
+	retval = secure_memcpy(buf, count, f54->report_data + f54->data_pos,
+			f54->data_buffer_size - f54->data_pos, read_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy report data\n",
+				__func__);
+		goto exit;
+	}
+	f54->data_pos += read_size;
+	retval = read_size;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static void test_report_work(struct work_struct *work)
+{
+	int retval;
+	unsigned char report_index[2];
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	mutex_lock(&f54->status_mutex);
+
+	if (f54->status != STATUS_BUSY) {
+		retval = f54->status;
+		goto exit;
+	}
+
+	retval = test_wait_for_command_completion();
+	if (retval < 0) {
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	test_set_report_size();
+	if (f54->report_size == 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Report data size = 0\n",
+				__func__);
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	if (f54->data_buffer_size < f54->report_size) {
+		if (f54->data_buffer_size)
+			kfree(f54->report_data);
+		f54->report_data = kzalloc(f54->report_size, GFP_KERNEL);
+		if (!f54->report_data) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for data buffer\n",
+					__func__);
+			f54->data_buffer_size = 0;
+			retval = STATUS_ERROR;
+			goto exit;
+		}
+		f54->data_buffer_size = f54->report_size;
+	}
+
+	report_index[0] = 0;
+	report_index[1] = 0;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->data_base_addr + REPORT_INDEX_OFFSET,
+			report_index,
+			sizeof(report_index));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write report data index\n",
+				__func__);
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->data_base_addr + REPORT_DATA_OFFSET,
+			f54->report_data,
+			f54->report_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read report data\n",
+				__func__);
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	retval = STATUS_IDLE;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	if (retval == STATUS_ERROR)
+		f54->report_size = 0;
+
+	f54->status = retval;
+
+	return;
+}
+
+static void test_remove_sysfs(void)
+{
+	sysfs_remove_group(f54->sysfs_dir, &attr_group);
+	sysfs_remove_bin_file(f54->sysfs_dir, &test_report_data);
+	kobject_put(f54->sysfs_dir);
+
+	return;
+}
+
+static int test_set_sysfs(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	f54->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+			&rmi4_data->input_dev->dev.kobj);
+	if (!f54->sysfs_dir) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs directory\n",
+				__func__);
+		goto exit_directory;
+	}
+
+	retval = sysfs_create_bin_file(f54->sysfs_dir, &test_report_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs bin file\n",
+				__func__);
+		goto exit_bin_file;
+	}
+
+	retval = sysfs_create_group(f54->sysfs_dir, &attr_group);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs attributes\n",
+				__func__);
+		goto exit_attributes;
+	}
+
+	return 0;
+
+exit_attributes:
+	sysfs_remove_group(f54->sysfs_dir, &attr_group);
+	sysfs_remove_bin_file(f54->sysfs_dir, &test_report_data);
+
+exit_bin_file:
+	kobject_put(f54->sysfs_dir);
+
+exit_directory:
+	return -ENODEV;
+}
+
+static void test_free_control_mem(void)
+{
+	struct f54_control control = f54->control;
+
+	kfree(control.reg_7);
+	kfree(control.reg_41);
+	kfree(control.reg_57);
+	kfree(control.reg_86);
+	kfree(control.reg_88);
+	kfree(control.reg_110);
+	kfree(control.reg_149);
+	kfree(control.reg_188);
+
+	return;
+}
+
+static void test_set_data(void)
+{
+	unsigned short reg_addr;
+
+	reg_addr = f54->data_base_addr + REPORT_DATA_OFFSET + 1;
+
+	/* data 4 */
+	if (f54->query.has_sense_frequency_control)
+		reg_addr++;
+
+	/* data 5 reserved */
+
+	/* data 6 */
+	if (f54->query.has_interference_metric)
+		reg_addr += 2;
+
+	/* data 7 */
+	if (f54->query.has_one_byte_report_rate |
+			f54->query.has_two_byte_report_rate)
+		reg_addr++;
+	if (f54->query.has_two_byte_report_rate)
+		reg_addr++;
+
+	/* data 8 */
+	if (f54->query.has_variance_metric)
+		reg_addr += 2;
+
+	/* data 9 */
+	if (f54->query.has_multi_metric_state_machine)
+		reg_addr += 2;
+
+	/* data 10 */
+	if (f54->query.has_multi_metric_state_machine |
+			f54->query.has_noise_state)
+		reg_addr++;
+
+	/* data 11 */
+	if (f54->query.has_status)
+		reg_addr++;
+
+	/* data 12 */
+	if (f54->query.has_slew_metric)
+		reg_addr += 2;
+
+	/* data 13 */
+	if (f54->query.has_multi_metric_state_machine)
+		reg_addr += 2;
+
+	/* data 14 */
+	if (f54->query_13.has_cidim)
+		reg_addr++;
+
+	/* data 15 */
+	if (f54->query_13.has_rail_im)
+		reg_addr++;
+
+	/* data 16 */
+	if (f54->query_13.has_noise_mitigation_enhancement)
+		reg_addr++;
+
+	/* data 17 */
+	if (f54->query_16.has_data17)
+		reg_addr++;
+
+	/* data 18 */
+	if (f54->query_21.has_query24_data18)
+		reg_addr++;
+
+	/* data 19 */
+	if (f54->query_21.has_data19)
+		reg_addr++;
+
+	/* data_20 */
+	if (f54->query_25.has_ctrl109)
+		reg_addr++;
+
+	/* data 21 */
+	if (f54->query_27.has_data21)
+		reg_addr++;
+
+	/* data 22 */
+	if (f54->query_27.has_data22)
+		reg_addr++;
+
+	/* data 23 */
+	if (f54->query_29.has_data23)
+		reg_addr++;
+
+	/* data 24 */
+	if (f54->query_32.has_data24)
+		reg_addr++;
+
+	/* data 25 */
+	if (f54->query_35.has_data25)
+		reg_addr++;
+
+	/* data 26 */
+	if (f54->query_35.has_data26)
+		reg_addr++;
+
+	/* data 27 */
+	if (f54->query_46.has_data27)
+		reg_addr++;
+
+	/* data 28 */
+	if (f54->query_46.has_data28)
+		reg_addr++;
+
+	/* data 29 30 reserved */
+
+	/* data 31 */
+	if (f54->query_49.has_data31) {
+		f54->data_31.address = reg_addr;
+		reg_addr++;
+	}
+
+	return;
+}
+
+static int test_set_controls(void)
+{
+	int retval;
+	unsigned char length;
+	unsigned char num_of_sensing_freqs;
+	unsigned short reg_addr = f54->control_base_addr;
+	struct f54_control *control = &f54->control;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	num_of_sensing_freqs = f54->query.number_of_sensing_frequencies;
+
+	/* control 0 */
+	reg_addr += CONTROL_0_SIZE;
+
+	/* control 1 */
+	if ((f54->query.touch_controller_family == 0) ||
+			(f54->query.touch_controller_family == 1))
+		reg_addr += CONTROL_1_SIZE;
+
+	/* control 2 */
+	reg_addr += CONTROL_2_SIZE;
+
+	/* control 3 */
+	if (f54->query.has_pixel_touch_threshold_adjustment)
+		reg_addr += CONTROL_3_SIZE;
+
+	/* controls 4 5 6 */
+	if ((f54->query.touch_controller_family == 0) ||
+			(f54->query.touch_controller_family == 1))
+		reg_addr += CONTROL_4_6_SIZE;
+
+	/* control 7 */
+	if (f54->query.touch_controller_family == 1) {
+		control->reg_7 = kzalloc(sizeof(*(control->reg_7)),
+				GFP_KERNEL);
+		if (!control->reg_7)
+			goto exit_no_mem;
+		control->reg_7->address = reg_addr;
+		reg_addr += CONTROL_7_SIZE;
+	}
+
+	/* controls 8 9 */
+	if ((f54->query.touch_controller_family == 0) ||
+			(f54->query.touch_controller_family == 1))
+		reg_addr += CONTROL_8_9_SIZE;
+
+	/* control 10 */
+	if (f54->query.has_interference_metric)
+		reg_addr += CONTROL_10_SIZE;
+
+	/* control 11 */
+	if (f54->query.has_ctrl11)
+		reg_addr += CONTROL_11_SIZE;
+
+	/* controls 12 13 */
+	if (f54->query.has_relaxation_control)
+		reg_addr += CONTROL_12_13_SIZE;
+
+	/* controls 14 15 16 */
+	if (f54->query.has_sensor_assignment) {
+		reg_addr += CONTROL_14_SIZE;
+		reg_addr += CONTROL_15_SIZE * f54->query.num_of_rx_electrodes;
+		reg_addr += CONTROL_16_SIZE * f54->query.num_of_tx_electrodes;
+	}
+
+	/* controls 17 18 19 */
+	if (f54->query.has_sense_frequency_control) {
+		reg_addr += CONTROL_17_SIZE * num_of_sensing_freqs;
+		reg_addr += CONTROL_18_SIZE * num_of_sensing_freqs;
+		reg_addr += CONTROL_19_SIZE * num_of_sensing_freqs;
+	}
+
+	/* control 20 */
+	reg_addr += CONTROL_20_SIZE;
+
+	/* control 21 */
+	if (f54->query.has_sense_frequency_control)
+		reg_addr += CONTROL_21_SIZE;
+
+	/* controls 22 23 24 25 26 */
+	if (f54->query.has_firmware_noise_mitigation)
+		reg_addr += CONTROL_22_26_SIZE;
+
+	/* control 27 */
+	if (f54->query.has_iir_filter)
+		reg_addr += CONTROL_27_SIZE;
+
+	/* control 28 */
+	if (f54->query.has_firmware_noise_mitigation)
+		reg_addr += CONTROL_28_SIZE;
+
+	/* control 29 */
+	if (f54->query.has_cmn_removal)
+		reg_addr += CONTROL_29_SIZE;
+
+	/* control 30 */
+	if (f54->query.has_cmn_maximum)
+		reg_addr += CONTROL_30_SIZE;
+
+	/* control 31 */
+	if (f54->query.has_touch_hysteresis)
+		reg_addr += CONTROL_31_SIZE;
+
+	/* controls 32 33 34 35 */
+	if (f54->query.has_edge_compensation)
+		reg_addr += CONTROL_32_35_SIZE;
+
+	/* control 36 */
+	if ((f54->query.curve_compensation_mode == 1) ||
+			(f54->query.curve_compensation_mode == 2)) {
+		if (f54->query.curve_compensation_mode == 1) {
+			length = max(f54->query.num_of_rx_electrodes,
+					f54->query.num_of_tx_electrodes);
+		} else if (f54->query.curve_compensation_mode == 2) {
+			length = f54->query.num_of_rx_electrodes;
+		}
+		reg_addr += CONTROL_36_SIZE * length;
+	}
+
+	/* control 37 */
+	if (f54->query.curve_compensation_mode == 2)
+		reg_addr += CONTROL_37_SIZE * f54->query.num_of_tx_electrodes;
+
+	/* controls 38 39 40 */
+	if (f54->query.has_per_frequency_noise_control) {
+		reg_addr += CONTROL_38_SIZE * num_of_sensing_freqs;
+		reg_addr += CONTROL_39_SIZE * num_of_sensing_freqs;
+		reg_addr += CONTROL_40_SIZE * num_of_sensing_freqs;
+	}
+
+	/* control 41 */
+	if (f54->query.has_signal_clarity) {
+		control->reg_41 = kzalloc(sizeof(*(control->reg_41)),
+				GFP_KERNEL);
+		if (!control->reg_41)
+			goto exit_no_mem;
+		control->reg_41->address = reg_addr;
+		reg_addr += CONTROL_41_SIZE;
+	}
+
+	/* control 42 */
+	if (f54->query.has_variance_metric)
+		reg_addr += CONTROL_42_SIZE;
+
+	/* controls 43 44 45 46 47 48 49 50 51 52 53 54 */
+	if (f54->query.has_multi_metric_state_machine)
+		reg_addr += CONTROL_43_54_SIZE;
+
+	/* controls 55 56 */
+	if (f54->query.has_0d_relaxation_control)
+		reg_addr += CONTROL_55_56_SIZE;
+
+	/* control 57 */
+	if (f54->query.has_0d_acquisition_control) {
+		control->reg_57 = kzalloc(sizeof(*(control->reg_57)),
+				GFP_KERNEL);
+		if (!control->reg_57)
+			goto exit_no_mem;
+		control->reg_57->address = reg_addr;
+		reg_addr += CONTROL_57_SIZE;
+	}
+
+	/* control 58 */
+	if (f54->query.has_0d_acquisition_control)
+		reg_addr += CONTROL_58_SIZE;
+
+	/* control 59 */
+	if (f54->query.has_h_blank)
+		reg_addr += CONTROL_59_SIZE;
+
+	/* controls 60 61 62 */
+	if ((f54->query.has_h_blank) ||
+			(f54->query.has_v_blank) ||
+			(f54->query.has_long_h_blank))
+		reg_addr += CONTROL_60_62_SIZE;
+
+	/* control 63 */
+	if ((f54->query.has_h_blank) ||
+			(f54->query.has_v_blank) ||
+			(f54->query.has_long_h_blank) ||
+			(f54->query.has_slew_metric) ||
+			(f54->query.has_slew_option) ||
+			(f54->query.has_noise_mitigation2))
+		reg_addr += CONTROL_63_SIZE;
+
+	/* controls 64 65 66 67 */
+	if (f54->query.has_h_blank)
+		reg_addr += CONTROL_64_67_SIZE * 7;
+	else if ((f54->query.has_v_blank) ||
+			(f54->query.has_long_h_blank))
+		reg_addr += CONTROL_64_67_SIZE;
+
+	/* controls 68 69 70 71 72 73 */
+	if ((f54->query.has_h_blank) ||
+			(f54->query.has_v_blank) ||
+			(f54->query.has_long_h_blank)) {
+		if (f54->query_68.is_tddi_hic)
+			reg_addr += CONTROL_70_73_SIZE;
+		else
+			reg_addr += CONTROL_68_73_SIZE;
+	}
+
+	/* control 74 */
+	if (f54->query.has_slew_metric)
+		reg_addr += CONTROL_74_SIZE;
+
+	/* control 75 */
+	if (f54->query.has_enhanced_stretch)
+		reg_addr += CONTROL_75_SIZE * num_of_sensing_freqs;
+
+	/* control 76 */
+	if (f54->query.has_startup_fast_relaxation)
+		reg_addr += CONTROL_76_SIZE;
+
+	/* controls 77 78 */
+	if (f54->query.has_esd_control)
+		reg_addr += CONTROL_77_78_SIZE;
+
+	/* controls 79 80 81 82 83 */
+	if (f54->query.has_noise_mitigation2)
+		reg_addr += CONTROL_79_83_SIZE;
+
+	/* controls 84 85 */
+	if (f54->query.has_energy_ratio_relaxation)
+		reg_addr += CONTROL_84_85_SIZE;
+
+	/* control 86 */
+	if (f54->query_13.has_ctrl86) {
+		control->reg_86 = kzalloc(sizeof(*(control->reg_86)),
+				GFP_KERNEL);
+		if (!control->reg_86)
+			goto exit_no_mem;
+		control->reg_86->address = reg_addr;
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->control.reg_86->address,
+				f54->control.reg_86->data,
+				sizeof(f54->control.reg_86->data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read sense display ratio\n",
+					__func__);
+			return retval;
+		}
+		reg_addr += CONTROL_86_SIZE;
+	}
+
+	/* control 87 */
+	if (f54->query_13.has_ctrl87)
+		reg_addr += CONTROL_87_SIZE;
+
+	/* control 88 */
+	if (f54->query.has_ctrl88) {
+		control->reg_88 = kzalloc(sizeof(*(control->reg_88)),
+				GFP_KERNEL);
+		if (!control->reg_88)
+			goto exit_no_mem;
+		control->reg_88->address = reg_addr;
+		reg_addr += CONTROL_88_SIZE;
+	}
+
+	/* control 89 */
+	if (f54->query_13.has_cidim ||
+			f54->query_13.has_noise_mitigation_enhancement ||
+			f54->query_13.has_rail_im)
+		reg_addr += CONTROL_89_SIZE;
+
+	/* control 90 */
+	if (f54->query_15.has_ctrl90)
+		reg_addr += CONTROL_90_SIZE;
+
+	/* control 91 */
+	if (f54->query_21.has_ctrl91)
+		reg_addr += CONTROL_91_SIZE;
+
+	/* control 92 */
+	if (f54->query_16.has_ctrl92)
+		reg_addr += CONTROL_92_SIZE;
+
+	/* control 93 */
+	if (f54->query_16.has_ctrl93)
+		reg_addr += CONTROL_93_SIZE;
+
+	/* control 94 */
+	if (f54->query_16.has_ctrl94_query18)
+		reg_addr += CONTROL_94_SIZE;
+
+	/* control 95 */
+	if (f54->query_16.has_ctrl95_query19)
+		reg_addr += CONTROL_95_SIZE;
+
+	/* control 96 */
+	if (f54->query_21.has_ctrl96)
+		reg_addr += CONTROL_96_SIZE;
+
+	/* control 97 */
+	if (f54->query_21.has_ctrl97)
+		reg_addr += CONTROL_97_SIZE;
+
+	/* control 98 */
+	if (f54->query_21.has_ctrl98)
+		reg_addr += CONTROL_98_SIZE;
+
+	/* control 99 */
+	if (f54->query.touch_controller_family == 2)
+		reg_addr += CONTROL_99_SIZE;
+
+	/* control 100 */
+	if (f54->query_16.has_ctrl100)
+		reg_addr += CONTROL_100_SIZE;
+
+	/* control 101 */
+	if (f54->query_22.has_ctrl101)
+		reg_addr += CONTROL_101_SIZE;
+
+	/* control 102 */
+	if (f54->query_23.has_ctrl102)
+		reg_addr += CONTROL_102_SIZE;
+
+	/* control 103 */
+	if (f54->query_22.has_ctrl103_query26) {
+		f54->skip_preparation = true;
+		reg_addr += CONTROL_103_SIZE;
+	}
+
+	/* control 104 */
+	if (f54->query_22.has_ctrl104)
+		reg_addr += CONTROL_104_SIZE;
+
+	/* control 105 */
+	if (f54->query_22.has_ctrl105)
+		reg_addr += CONTROL_105_SIZE;
+
+	/* control 106 */
+	if (f54->query_25.has_ctrl106)
+		reg_addr += CONTROL_106_SIZE;
+
+	/* control 107 */
+	if (f54->query_25.has_ctrl107)
+		reg_addr += CONTROL_107_SIZE;
+
+	/* control 108 */
+	if (f54->query_25.has_ctrl108)
+		reg_addr += CONTROL_108_SIZE;
+
+	/* control 109 */
+	if (f54->query_25.has_ctrl109)
+		reg_addr += CONTROL_109_SIZE;
+
+	/* control 110 */
+	if (f54->query_27.has_ctrl110) {
+		control->reg_110 = kzalloc(sizeof(*(control->reg_110)),
+				GFP_KERNEL);
+		if (!control->reg_110)
+			goto exit_no_mem;
+		control->reg_110->address = reg_addr;
+		reg_addr += CONTROL_110_SIZE;
+	}
+
+	/* control 111 */
+	if (f54->query_27.has_ctrl111)
+		reg_addr += CONTROL_111_SIZE;
+
+	/* control 112 */
+	if (f54->query_27.has_ctrl112)
+		reg_addr += CONTROL_112_SIZE;
+
+	/* control 113 */
+	if (f54->query_27.has_ctrl113)
+		reg_addr += CONTROL_113_SIZE;
+
+	/* control 114 */
+	if (f54->query_27.has_ctrl114)
+		reg_addr += CONTROL_114_SIZE;
+
+	/* control 115 */
+	if (f54->query_29.has_ctrl115)
+		reg_addr += CONTROL_115_SIZE;
+
+	/* control 116 */
+	if (f54->query_29.has_ctrl116)
+		reg_addr += CONTROL_116_SIZE;
+
+	/* control 117 */
+	if (f54->query_29.has_ctrl117)
+		reg_addr += CONTROL_117_SIZE;
+
+	/* control 118 */
+	if (f54->query_30.has_ctrl118)
+		reg_addr += CONTROL_118_SIZE;
+
+	/* control 119 */
+	if (f54->query_30.has_ctrl119)
+		reg_addr += CONTROL_119_SIZE;
+
+	/* control 120 */
+	if (f54->query_30.has_ctrl120)
+		reg_addr += CONTROL_120_SIZE;
+
+	/* control 121 */
+	if (f54->query_30.has_ctrl121)
+		reg_addr += CONTROL_121_SIZE;
+
+	/* control 122 */
+	if (f54->query_30.has_ctrl122_query31)
+		reg_addr += CONTROL_122_SIZE;
+
+	/* control 123 */
+	if (f54->query_30.has_ctrl123)
+		reg_addr += CONTROL_123_SIZE;
+
+	/* control 124 */
+	if (f54->query_30.has_ctrl124)
+		reg_addr += CONTROL_124_SIZE;
+
+	/* control 125 */
+	if (f54->query_32.has_ctrl125)
+		reg_addr += CONTROL_125_SIZE;
+
+	/* control 126 */
+	if (f54->query_32.has_ctrl126)
+		reg_addr += CONTROL_126_SIZE;
+
+	/* control 127 */
+	if (f54->query_32.has_ctrl127)
+		reg_addr += CONTROL_127_SIZE;
+
+	/* control 128 */
+	if (f54->query_33.has_ctrl128)
+		reg_addr += CONTROL_128_SIZE;
+
+	/* control 129 */
+	if (f54->query_33.has_ctrl129)
+		reg_addr += CONTROL_129_SIZE;
+
+	/* control 130 */
+	if (f54->query_33.has_ctrl130)
+		reg_addr += CONTROL_130_SIZE;
+
+	/* control 131 */
+	if (f54->query_33.has_ctrl131)
+		reg_addr += CONTROL_131_SIZE;
+
+	/* control 132 */
+	if (f54->query_33.has_ctrl132)
+		reg_addr += CONTROL_132_SIZE;
+
+	/* control 133 */
+	if (f54->query_33.has_ctrl133)
+		reg_addr += CONTROL_133_SIZE;
+
+	/* control 134 */
+	if (f54->query_33.has_ctrl134)
+		reg_addr += CONTROL_134_SIZE;
+
+	/* control 135 */
+	if (f54->query_35.has_ctrl135)
+		reg_addr += CONTROL_135_SIZE;
+
+	/* control 136 */
+	if (f54->query_35.has_ctrl136)
+		reg_addr += CONTROL_136_SIZE;
+
+	/* control 137 */
+	if (f54->query_35.has_ctrl137)
+		reg_addr += CONTROL_137_SIZE;
+
+	/* control 138 */
+	if (f54->query_35.has_ctrl138)
+		reg_addr += CONTROL_138_SIZE;
+
+	/* control 139 */
+	if (f54->query_35.has_ctrl139)
+		reg_addr += CONTROL_139_SIZE;
+
+	/* control 140 */
+	if (f54->query_35.has_ctrl140)
+		reg_addr += CONTROL_140_SIZE;
+
+	/* control 141 */
+	if (f54->query_36.has_ctrl141)
+		reg_addr += CONTROL_141_SIZE;
+
+	/* control 142 */
+	if (f54->query_36.has_ctrl142)
+		reg_addr += CONTROL_142_SIZE;
+
+	/* control 143 */
+	if (f54->query_36.has_ctrl143)
+		reg_addr += CONTROL_143_SIZE;
+
+	/* control 144 */
+	if (f54->query_36.has_ctrl144)
+		reg_addr += CONTROL_144_SIZE;
+
+	/* control 145 */
+	if (f54->query_36.has_ctrl145)
+		reg_addr += CONTROL_145_SIZE;
+
+	/* control 146 */
+	if (f54->query_36.has_ctrl146)
+		reg_addr += CONTROL_146_SIZE;
+
+	/* control 147 */
+	if (f54->query_38.has_ctrl147)
+		reg_addr += CONTROL_147_SIZE;
+
+	/* control 148 */
+	if (f54->query_38.has_ctrl148)
+		reg_addr += CONTROL_148_SIZE;
+
+	/* control 149 */
+	if (f54->query_38.has_ctrl149) {
+		control->reg_149 = kzalloc(sizeof(*(control->reg_149)),
+				GFP_KERNEL);
+		if (!control->reg_149)
+			goto exit_no_mem;
+		control->reg_149->address = reg_addr;
+		reg_addr += CONTROL_149_SIZE;
+	}
+
+	/* control 150 */
+	if (f54->query_38.has_ctrl150)
+		reg_addr += CONTROL_150_SIZE;
+
+	/* control 151 */
+	if (f54->query_38.has_ctrl151)
+		reg_addr += CONTROL_151_SIZE;
+
+	/* control 152 */
+	if (f54->query_38.has_ctrl152)
+		reg_addr += CONTROL_152_SIZE;
+
+	/* control 153 */
+	if (f54->query_38.has_ctrl153)
+		reg_addr += CONTROL_153_SIZE;
+
+	/* control 154 */
+	if (f54->query_39.has_ctrl154)
+		reg_addr += CONTROL_154_SIZE;
+
+	/* control 155 */
+	if (f54->query_39.has_ctrl155)
+		reg_addr += CONTROL_155_SIZE;
+
+	/* control 156 */
+	if (f54->query_39.has_ctrl156)
+		reg_addr += CONTROL_156_SIZE;
+
+	/* controls 157 158 */
+	if (f54->query_39.has_ctrl157_ctrl158)
+		reg_addr += CONTROL_157_158_SIZE;
+
+	/* controls 159 to 162 reserved */
+
+	/* control 163 */
+	if (f54->query_40.has_ctrl163_query41)
+		reg_addr += CONTROL_163_SIZE;
+
+	/* control 164 reserved */
+
+	/* control 165 */
+	if (f54->query_40.has_ctrl165_query42)
+		reg_addr += CONTROL_165_SIZE;
+
+	/* control 166 */
+	if (f54->query_40.has_ctrl166)
+		reg_addr += CONTROL_166_SIZE;
+
+	/* control 167 */
+	if (f54->query_40.has_ctrl167)
+		reg_addr += CONTROL_167_SIZE;
+
+	/* control 168 */
+	if (f54->query_40.has_ctrl168)
+		reg_addr += CONTROL_168_SIZE;
+
+	/* control 169 */
+	if (f54->query_40.has_ctrl169)
+		reg_addr += CONTROL_169_SIZE;
+
+	/* control 170 reserved */
+
+	/* control 171 */
+	if (f54->query_43.has_ctrl171)
+		reg_addr += CONTROL_171_SIZE;
+
+	/* control 172 */
+	if (f54->query_43.has_ctrl172_query44_query45)
+		reg_addr += CONTROL_172_SIZE;
+
+	/* control 173 */
+	if (f54->query_43.has_ctrl173)
+		reg_addr += CONTROL_173_SIZE;
+
+	/* control 174 */
+	if (f54->query_43.has_ctrl174)
+		reg_addr += CONTROL_174_SIZE;
+
+	/* control 175 */
+	if (f54->query_43.has_ctrl175)
+		reg_addr += CONTROL_175_SIZE;
+
+	/* control 176 */
+	if (f54->query_46.has_ctrl176)
+		reg_addr += CONTROL_176_SIZE;
+
+	/* controls 177 178 */
+	if (f54->query_46.has_ctrl177_ctrl178)
+		reg_addr += CONTROL_177_178_SIZE;
+
+	/* control 179 */
+	if (f54->query_46.has_ctrl179)
+		reg_addr += CONTROL_179_SIZE;
+
+	/* controls 180 to 181 reserved */
+
+	/* control 182 */
+	if (f54->query_47.has_ctrl182)
+		reg_addr += CONTROL_182_SIZE;
+
+	/* control 183 */
+	if (f54->query_47.has_ctrl183)
+		reg_addr += CONTROL_183_SIZE;
+
+	/* control 184 reserved */
+
+	/* control 185 */
+	if (f54->query_47.has_ctrl185)
+		reg_addr += CONTROL_185_SIZE;
+
+	/* control 186 */
+	if (f54->query_47.has_ctrl186)
+		reg_addr += CONTROL_186_SIZE;
+
+	/* control 187 */
+	if (f54->query_47.has_ctrl187)
+		reg_addr += CONTROL_187_SIZE;
+
+	/* control 188 */
+	if (f54->query_49.has_ctrl188) {
+		control->reg_188 = kzalloc(sizeof(*(control->reg_188)),
+				GFP_KERNEL);
+		if (!control->reg_188)
+			goto exit_no_mem;
+		control->reg_188->address = reg_addr;
+		reg_addr += CONTROL_188_SIZE;
+	}
+
+	return 0;
+
+exit_no_mem:
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to alloc mem for control registers\n",
+			__func__);
+	return -ENOMEM;
+}
+
+static int test_set_queries(void)
+{
+	int retval;
+	unsigned char offset;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->query_base_addr,
+			f54->query.data,
+			sizeof(f54->query.data));
+	if (retval < 0)
+		return retval;
+
+	offset = sizeof(f54->query.data);
+
+	/* query 12 */
+	if (f54->query.has_sense_frequency_control == 0)
+		offset -= 1;
+
+	/* query 13 */
+	if (f54->query.has_query13) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_13.data,
+				sizeof(f54->query_13.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 14 */
+	if (f54->query_13.has_ctrl87)
+		offset += 1;
+
+	/* query 15 */
+	if (f54->query.has_query15) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_15.data,
+				sizeof(f54->query_15.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 16 */
+	if (f54->query_15.has_query16) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_16.data,
+				sizeof(f54->query_16.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 17 */
+	if (f54->query_16.has_query17)
+		offset += 1;
+
+	/* query 18 */
+	if (f54->query_16.has_ctrl94_query18)
+		offset += 1;
+
+	/* query 19 */
+	if (f54->query_16.has_ctrl95_query19)
+		offset += 1;
+
+	/* query 20 */
+	if (f54->query_15.has_query20)
+		offset += 1;
+
+	/* query 21 */
+	if (f54->query_15.has_query21) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_21.data,
+				sizeof(f54->query_21.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 22 */
+	if (f54->query_15.has_query22) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_22.data,
+				sizeof(f54->query_22.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 23 */
+	if (f54->query_22.has_query23) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_23.data,
+				sizeof(f54->query_23.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 24 */
+	if (f54->query_21.has_query24_data18)
+		offset += 1;
+
+	/* query 25 */
+	if (f54->query_15.has_query25) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_25.data,
+				sizeof(f54->query_25.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 26 */
+	if (f54->query_22.has_ctrl103_query26)
+		offset += 1;
+
+	/* query 27 */
+	if (f54->query_25.has_query27) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_27.data,
+				sizeof(f54->query_27.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 28 */
+	if (f54->query_22.has_query28)
+		offset += 1;
+
+	/* query 29 */
+	if (f54->query_27.has_query29) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_29.data,
+				sizeof(f54->query_29.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 30 */
+	if (f54->query_29.has_query30) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_30.data,
+				sizeof(f54->query_30.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 31 */
+	if (f54->query_30.has_ctrl122_query31)
+		offset += 1;
+
+	/* query 32 */
+	if (f54->query_30.has_query32) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_32.data,
+				sizeof(f54->query_32.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 33 */
+	if (f54->query_32.has_query33) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_33.data,
+				sizeof(f54->query_33.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 34 */
+	if (f54->query_32.has_query34)
+		offset += 1;
+
+	/* query 35 */
+	if (f54->query_32.has_query35) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_35.data,
+				sizeof(f54->query_35.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 36 */
+	if (f54->query_33.has_query36) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_36.data,
+				sizeof(f54->query_36.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 37 */
+	if (f54->query_36.has_query37)
+		offset += 1;
+
+	/* query 38 */
+	if (f54->query_36.has_query38) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_38.data,
+				sizeof(f54->query_38.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 39 */
+	if (f54->query_38.has_query39) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_39.data,
+				sizeof(f54->query_39.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 40 */
+	if (f54->query_39.has_query40) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_40.data,
+				sizeof(f54->query_40.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 41 */
+	if (f54->query_40.has_ctrl163_query41)
+		offset += 1;
+
+	/* query 42 */
+	if (f54->query_40.has_ctrl165_query42)
+		offset += 1;
+
+	/* query 43 */
+	if (f54->query_40.has_query43) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_43.data,
+				sizeof(f54->query_43.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	if (f54->query_43.has_ctrl172_query44_query45)
+		offset += 2;
+
+	/* query 46 */
+	if (f54->query_43.has_query46) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_46.data,
+				sizeof(f54->query_46.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 47 */
+	if (f54->query_46.has_query47) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_47.data,
+				sizeof(f54->query_47.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 48 reserved */
+
+	/* query 49 */
+	if (f54->query_47.has_query49) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_49.data,
+				sizeof(f54->query_49.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 50 */
+	if (f54->query_49.has_query50) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_50.data,
+				sizeof(f54->query_50.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 51 */
+	if (f54->query_50.has_query51) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_51.data,
+				sizeof(f54->query_51.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 53 54 */
+	if (f54->query_51.has_query53_query54_ctrl198)
+		offset += 2;
+
+	/* query 55 */
+	if (f54->query_51.has_query55) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_55.data,
+				sizeof(f54->query_55.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 56 */
+	if (f54->query_55.has_query56)
+		offset += 1;
+
+	/* query 57 */
+	if (f54->query_55.has_query57) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_57.data,
+				sizeof(f54->query_57.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 58 */
+	if (f54->query_57.has_query58) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_58.data,
+				sizeof(f54->query_58.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 59 */
+	if (f54->query_58.has_query59)
+		offset += 1;
+
+	/* query 60 */
+	if (f54->query_58.has_query60)
+		offset += 1;
+
+	/* query 61 */
+	if (f54->query_58.has_query61) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_61.data,
+				sizeof(f54->query_61.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 62 63 */
+	if (f54->query_61.has_ctrl215_query62_query63)
+		offset += 2;
+
+	/* query 64 */
+	if (f54->query_61.has_query64) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_64.data,
+				sizeof(f54->query_64.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 65 */
+	if (f54->query_64.has_query65) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_65.data,
+				sizeof(f54->query_65.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 66 */
+	if (f54->query_65.has_query66_ctrl231)
+		offset += 1;
+
+	/* query 67 */
+	if (f54->query_65.has_query67) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_67.data,
+				sizeof(f54->query_67.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 68 */
+	if (f54->query_67.has_query68) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_68.data,
+				sizeof(f54->query_68.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 68 */
+	if (f54->query_68.has_query69) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_69.data,
+				sizeof(f54->query_69.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	return 0;
+}
+
+static void test_f54_set_regs(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count,
+		unsigned char page)
+{
+	unsigned char ii;
+	unsigned char intr_offset;
+
+	f54->query_base_addr = fd->query_base_addr | (page << 8);
+	f54->control_base_addr = fd->ctrl_base_addr | (page << 8);
+	f54->data_base_addr = fd->data_base_addr | (page << 8);
+	f54->command_base_addr = fd->cmd_base_addr | (page << 8);
+
+	f54->intr_reg_num = (intr_count + 7) / 8;
+	if (f54->intr_reg_num != 0)
+		f54->intr_reg_num -= 1;
+
+	f54->intr_mask = 0;
+	intr_offset = intr_count % 8;
+	for (ii = intr_offset;
+			ii < (fd->intr_src_count + intr_offset);
+			ii++) {
+		f54->intr_mask |= 1 << ii;
+	}
+
+	return;
+}
+
+static int test_f55_set_controls(void)
+{
+	unsigned char offset = 0;
+
+	/* controls 0 1 2 */
+	if (f55->query.has_sensor_assignment)
+		offset += 3;
+
+	/* control 3 */
+	if (f55->query.has_edge_compensation)
+		offset++;
+
+	/* control 4 */
+	if (f55->query.curve_compensation_mode == 0x1 ||
+			f55->query.curve_compensation_mode == 0x2)
+		offset++;
+
+	/* control 5 */
+	if (f55->query.curve_compensation_mode == 0x2)
+		offset++;
+
+	/* control 6 */
+	if (f55->query.has_ctrl6)
+		offset++;
+
+	/* control 7 */
+	if (f55->query.has_alternate_transmitter_assignment)
+		offset++;
+
+	/* control 8 */
+	if (f55->query_3.has_ctrl8)
+		offset++;
+
+	/* control 9 */
+	if (f55->query_3.has_ctrl9)
+		offset++;
+
+	/* control 10 */
+	if (f55->query_5.has_corner_compensation)
+		offset++;
+
+	/* control 11 */
+	if (f55->query.curve_compensation_mode == 0x3)
+		offset++;
+
+	/* control 12 */
+	if (f55->query_5.has_ctrl12)
+		offset++;
+
+	/* control 13 */
+	if (f55->query_5.has_ctrl13)
+		offset++;
+
+	/* control 14 */
+	if (f55->query_5.has_ctrl14)
+		offset++;
+
+	/* control 15 */
+	if (f55->query_5.has_basis_function)
+		offset++;
+
+	/* control 16 */
+	if (f55->query_17.has_ctrl16)
+		offset++;
+
+	/* control 17 */
+	if (f55->query_17.has_ctrl17)
+		offset++;
+
+	/* controls 18 19 */
+	if (f55->query_17.has_ctrl18_ctrl19)
+		offset += 2;
+
+	/* control 20 */
+	if (f55->query_17.has_ctrl20)
+		offset++;
+
+	/* control 21 */
+	if (f55->query_17.has_ctrl21)
+		offset++;
+
+	/* control 22 */
+	if (f55->query_17.has_ctrl22)
+		offset++;
+
+	/* control 23 */
+	if (f55->query_18.has_ctrl23)
+		offset++;
+
+	/* control 24 */
+	if (f55->query_18.has_ctrl24)
+		offset++;
+
+	/* control 25 */
+	if (f55->query_18.has_ctrl25)
+		offset++;
+
+	/* control 26 */
+	if (f55->query_18.has_ctrl26)
+		offset++;
+
+	/* control 27 */
+	if (f55->query_18.has_ctrl27_query20)
+		offset++;
+
+	/* control 28 */
+	if (f55->query_18.has_ctrl28_query21)
+		offset++;
+
+	/* control 29 */
+	if (f55->query_22.has_ctrl29)
+		offset++;
+
+	/* control 30 */
+	if (f55->query_22.has_ctrl30)
+		offset++;
+
+	/* control 31 */
+	if (f55->query_22.has_ctrl31)
+		offset++;
+
+	/* control 32 */
+	if (f55->query_22.has_ctrl32)
+		offset++;
+
+	/* controls 33 34 35 36 reserved */
+
+	/* control 37 */
+	if (f55->query_28.has_ctrl37)
+		offset++;
+
+	/* control 38 */
+	if (f55->query_30.has_ctrl38)
+		offset++;
+
+	/* control 39 */
+	if (f55->query_30.has_ctrl39)
+		offset++;
+
+	/* control 40 */
+	if (f55->query_30.has_ctrl40)
+		offset++;
+
+	/* control 41 */
+	if (f55->query_30.has_ctrl41)
+		offset++;
+
+	/* control 42 */
+	if (f55->query_30.has_ctrl42)
+		offset++;
+
+	/* controls 43 44 */
+	if (f55->query_30.has_ctrl43_ctrl44) {
+		f55->afe_mux_offset = offset;
+		offset += 2;
+	}
+
+	/* controls 45 46 */
+	if (f55->query_33.has_ctrl45_ctrl46) {
+		f55->has_force = true;
+		f55->force_tx_offset = offset;
+		f55->force_rx_offset = offset + 1;
+		offset += 2;
+	}
+
+	return 0;
+}
+
+static int test_f55_set_queries(void)
+{
+	int retval;
+	unsigned char offset;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f55->query_base_addr,
+			f55->query.data,
+			sizeof(f55->query.data));
+	if (retval < 0)
+		return retval;
+
+	offset = sizeof(f55->query.data);
+
+	/* query 3 */
+	if (f55->query.has_single_layer_multi_touch) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_3.data,
+				sizeof(f55->query_3.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 4 */
+	if (f55->query_3.has_ctrl9)
+		offset += 1;
+
+	/* query 5 */
+	if (f55->query.has_query5) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_5.data,
+				sizeof(f55->query_5.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* queries 6 7 */
+	if (f55->query.curve_compensation_mode == 0x3)
+		offset += 2;
+
+	/* query 8 */
+	if (f55->query_3.has_ctrl8)
+		offset += 1;
+
+	/* query 9 */
+	if (f55->query_3.has_query9)
+		offset += 1;
+
+	/* queries 10 11 12 13 14 15 16 */
+	if (f55->query_5.has_basis_function)
+		offset += 7;
+
+	/* query 17 */
+	if (f55->query_5.has_query17) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_17.data,
+				sizeof(f55->query_17.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 18 */
+	if (f55->query_17.has_query18) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_18.data,
+				sizeof(f55->query_18.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 19 */
+	if (f55->query_18.has_query19)
+		offset += 1;
+
+	/* query 20 */
+	if (f55->query_18.has_ctrl27_query20)
+		offset += 1;
+
+	/* query 21 */
+	if (f55->query_18.has_ctrl28_query21)
+		offset += 1;
+
+	/* query 22 */
+	if (f55->query_18.has_query22) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_22.data,
+				sizeof(f55->query_22.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 23 */
+	if (f55->query_22.has_query23) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_23.data,
+				sizeof(f55->query_23.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+
+		f55->amp_sensor = f55->query_23.amp_sensor_enabled;
+		f55->size_of_column2mux = f55->query_23.size_of_column2mux;
+	}
+
+	/* queries 24 25 26 27 reserved */
+
+	/* query 28 */
+	if (f55->query_22.has_query28) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_28.data,
+				sizeof(f55->query_28.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 29 */
+	if (f55->query_28.has_query29)
+		offset += 1;
+
+	/* query 30 */
+	if (f55->query_28.has_query30) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_30.data,
+				sizeof(f55->query_30.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* queries 31 32 */
+	if (f55->query_30.has_query31_query32)
+		offset += 2;
+
+	/* query 33 */
+	if (f55->query_30.has_query33) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_33.data,
+				sizeof(f55->query_33.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+
+		f55->extended_amp = f55->query_33.has_extended_amp_pad;
+	}
+
+	return 0;
+}
+
+static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char rx_electrodes;
+	unsigned char tx_electrodes;
+	struct f55_control_43 ctrl_43;
+
+	retval = test_f55_set_queries();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F55 query registers\n",
+				__func__);
+		return;
+	}
+
+	if (!f55->query.has_sensor_assignment)
+		return;
+
+	retval = test_f55_set_controls();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set up F55 control registers\n",
+				__func__);
+		return;
+	}
+
+	tx_electrodes = f55->query.num_of_tx_electrodes;
+	rx_electrodes = f55->query.num_of_rx_electrodes;
+
+	f55->tx_assignment = kzalloc(tx_electrodes, GFP_KERNEL);
+	f55->rx_assignment = kzalloc(rx_electrodes, GFP_KERNEL);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f55->control_base_addr + SENSOR_TX_MAPPING_OFFSET,
+			f55->tx_assignment,
+			tx_electrodes);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F55 tx assignment\n",
+				__func__);
+		return;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f55->control_base_addr + SENSOR_RX_MAPPING_OFFSET,
+			f55->rx_assignment,
+			rx_electrodes);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F55 rx assignment\n",
+				__func__);
+		return;
+	}
+
+	f54->tx_assigned = 0;
+	for (ii = 0; ii < tx_electrodes; ii++) {
+		if (f55->tx_assignment[ii] != 0xff)
+			f54->tx_assigned++;
+	}
+
+	f54->rx_assigned = 0;
+	for (ii = 0; ii < rx_electrodes; ii++) {
+		if (f55->rx_assignment[ii] != 0xff)
+			f54->rx_assigned++;
+	}
+
+	if (f55->amp_sensor) {
+		f54->tx_assigned = f55->size_of_column2mux;
+		f54->rx_assigned /= 2;
+	}
+
+	if (f55->extended_amp) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->control_base_addr + f55->afe_mux_offset,
+				ctrl_43.data,
+				sizeof(ctrl_43.data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read F55 AFE mux sizes\n",
+					__func__);
+			return;
+		}
+
+		f54->tx_assigned = ctrl_43.afe_l_mux_size +
+				ctrl_43.afe_r_mux_size;
+	}
+
+	/* force mapping */
+	if (f55->has_force) {
+		f55->force_tx_assignment = kzalloc(tx_electrodes, GFP_KERNEL);
+		f55->force_rx_assignment = kzalloc(rx_electrodes, GFP_KERNEL);
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->control_base_addr + f55->force_tx_offset,
+				f55->force_tx_assignment,
+				tx_electrodes);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read F55 force tx assignment\n",
+					__func__);
+			return;
+		}
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->control_base_addr + f55->force_rx_offset,
+				f55->force_rx_assignment,
+				rx_electrodes);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read F55 force rx assignment\n",
+					__func__);
+			return;
+		}
+
+		for (ii = 0; ii < tx_electrodes; ii++) {
+			if (f55->force_tx_assignment[ii] != 0xff)
+				f54->tx_assigned++;
+		}
+
+		for (ii = 0; ii < rx_electrodes; ii++) {
+			if (f55->force_rx_assignment[ii] != 0xff)
+				f54->rx_assigned++;
+		}
+	}
+
+	return;
+}
+
+static void test_f55_set_regs(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned char page)
+{
+	f55 = kzalloc(sizeof(*f55), GFP_KERNEL);
+	if (!f55) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for F55\n",
+				__func__);
+		return;
+	}
+
+	f55->query_base_addr = fd->query_base_addr | (page << 8);
+	f55->control_base_addr = fd->ctrl_base_addr | (page << 8);
+	f55->data_base_addr = fd->data_base_addr | (page << 8);
+	f55->command_base_addr = fd->cmd_base_addr | (page << 8);
+
+	return;
+}
+
+static void test_f21_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char size_of_query2;
+	unsigned char size_of_query5;
+	unsigned char query_11_offset;
+	unsigned char ctrl_4_offset;
+	struct f21_query_2 *query_2 = NULL;
+	struct f21_query_5 *query_5 = NULL;
+	struct f21_query_11 *query_11 = NULL;
+
+	query_2 = kzalloc(sizeof(*query_2), GFP_KERNEL);
+	if (!query_2) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_2\n",
+				__func__);
+		goto exit;
+	}
+
+	query_5 = kzalloc(sizeof(*query_5), GFP_KERNEL);
+	if (!query_5) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_5\n",
+				__func__);
+		goto exit;
+	}
+
+	query_11 = kzalloc(sizeof(*query_11), GFP_KERNEL);
+	if (!query_11) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_11\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->query_base_addr + 1,
+			&size_of_query2,
+			sizeof(size_of_query2));
+	if (retval < 0)
+		goto exit;
+
+	if (size_of_query2 > sizeof(query_2->data))
+		size_of_query2 = sizeof(query_2->data);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->query_base_addr + 2,
+			query_2->data,
+			size_of_query2);
+	if (retval < 0)
+		goto exit;
+
+	if (!query_2->query11_is_present) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: No F21 force capabilities\n",
+				__func__);
+		goto exit;
+	}
+
+	query_11_offset = query_2->query0_is_present +
+			query_2->query1_is_present +
+			query_2->query2_is_present +
+			query_2->query3_is_present +
+			query_2->query4_is_present +
+			query_2->query5_is_present +
+			query_2->query6_is_present +
+			query_2->query7_is_present +
+			query_2->query8_is_present +
+			query_2->query9_is_present +
+			query_2->query10_is_present;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->query_base_addr + 11,
+			query_11->data,
+			sizeof(query_11->data));
+	if (retval < 0)
+		goto exit;
+
+	if (!query_11->has_force_sensing_txrx_mapping) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: No F21 force mapping\n",
+				__func__);
+		goto exit;
+	}
+
+	f21->max_num_of_tx = query_11->max_number_of_force_txs;
+	f21->max_num_of_rx = query_11->max_number_of_force_rxs;
+	f21->max_num_of_txrx = f21->max_num_of_tx + f21->max_num_of_rx;
+
+	f21->force_txrx_assignment = kzalloc(f21->max_num_of_txrx, GFP_KERNEL);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->query_base_addr + 4,
+			&size_of_query5,
+			sizeof(size_of_query5));
+	if (retval < 0)
+		goto exit;
+
+	if (size_of_query5 > sizeof(query_5->data))
+		size_of_query5 = sizeof(query_5->data);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->query_base_addr + 5,
+			query_5->data,
+			size_of_query5);
+	if (retval < 0)
+		goto exit;
+
+	ctrl_4_offset = query_5->ctrl0_is_present +
+			query_5->ctrl1_is_present +
+			query_5->ctrl2_is_present +
+			query_5->ctrl3_is_present;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->control_base_addr + ctrl_4_offset,
+			f21->force_txrx_assignment,
+			f21->max_num_of_txrx);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F21 force txrx assignment\n",
+				__func__);
+		goto exit;
+	}
+
+	f21->has_force = true;
+
+	for (ii = 0; ii < f21->max_num_of_tx; ii++) {
+		if (f21->force_txrx_assignment[ii] != 0xff)
+			f21->tx_assigned++;
+	}
+
+	for (ii = f21->max_num_of_tx; ii < f21->max_num_of_txrx; ii++) {
+		if (f21->force_txrx_assignment[ii] != 0xff)
+			f21->rx_assigned++;
+	}
+
+exit:
+	kfree(query_2);
+	kfree(query_5);
+	kfree(query_11);
+
+	return;
+}
+
+static void test_f21_set_regs(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned char page)
+{
+	f21 = kzalloc(sizeof(*f21), GFP_KERNEL);
+	if (!f21) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for F21\n",
+				__func__);
+		return;
+	}
+
+	f21->query_base_addr = fd->query_base_addr | (page << 8);
+	f21->control_base_addr = fd->ctrl_base_addr | (page << 8);
+	f21->data_base_addr = fd->data_base_addr | (page << 8);
+	f21->command_base_addr = fd->cmd_base_addr | (page << 8);
+
+	return;
+}
+
+static int test_scan_pdt(void)
+{
+	int retval;
+	unsigned char intr_count = 0;
+	unsigned char page;
+	unsigned short addr;
+	bool f54found = false;
+	bool f55found = false;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&rmi_fd,
+					sizeof(rmi_fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (!rmi_fd.fn_number)
+				break;
+
+			switch (rmi_fd.fn_number) {
+			case SYNAPTICS_RMI4_F54:
+				test_f54_set_regs(rmi4_data,
+						&rmi_fd, intr_count, page);
+				f54found = true;
+				break;
+			case SYNAPTICS_RMI4_F55:
+				test_f55_set_regs(rmi4_data,
+						&rmi_fd, page);
+				f55found = true;
+				break;
+			case SYNAPTICS_RMI4_F21:
+				test_f21_set_regs(rmi4_data,
+						&rmi_fd, page);
+				break;
+			default:
+				break;
+			}
+
+			if (f54found && f55found)
+				goto pdt_done;
+
+			intr_count += rmi_fd.intr_src_count;
+		}
+	}
+
+	if (!f54found) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to find F54\n",
+				__func__);
+		return -EINVAL;
+	}
+
+pdt_done:
+	return 0;
+}
+
+static void synaptics_rmi4_test_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!f54)
+		return;
+
+	if (f54->intr_mask & intr_mask)
+		queue_work(f54->test_report_workqueue, &f54->test_report_work);
+
+	return;
+}
+
+static int synaptics_rmi4_test_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	if (f54) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	f54 = kzalloc(sizeof(*f54), GFP_KERNEL);
+	if (!f54) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for F54\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	f54->rmi4_data = rmi4_data;
+
+	f55 = NULL;
+
+	f21 = NULL;
+
+	retval = test_scan_pdt();
+	if (retval < 0)
+		goto exit_free_mem;
+
+	retval = test_set_queries();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F54 query registers\n",
+				__func__);
+		goto exit_free_mem;
+	}
+
+	f54->tx_assigned = f54->query.num_of_tx_electrodes;
+	f54->rx_assigned = f54->query.num_of_rx_electrodes;
+
+	retval = test_set_controls();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set up F54 control registers\n",
+				__func__);
+		goto exit_free_control;
+	}
+
+	test_set_data();
+
+	if (f55)
+		test_f55_init(rmi4_data);
+
+	if (f21)
+		test_f21_init(rmi4_data);
+
+	if (rmi4_data->external_afe_buttons)
+		f54->tx_assigned++;
+
+	retval = test_set_sysfs();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs entries\n",
+				__func__);
+		goto exit_sysfs;
+	}
+
+	f54->test_report_workqueue =
+			create_singlethread_workqueue("test_report_workqueue");
+	INIT_WORK(&f54->test_report_work, test_report_work);
+
+	hrtimer_init(&f54->watchdog, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	f54->watchdog.function = test_get_report_timeout;
+	INIT_WORK(&f54->timeout_work, test_timeout_work);
+
+	mutex_init(&f54->status_mutex);
+	f54->status = STATUS_IDLE;
+
+	return 0;
+
+exit_sysfs:
+	if (f21)
+		kfree(f21->force_txrx_assignment);
+
+	if (f55) {
+		kfree(f55->tx_assignment);
+		kfree(f55->rx_assignment);
+		kfree(f55->force_tx_assignment);
+		kfree(f55->force_rx_assignment);
+	}
+
+exit_free_control:
+	test_free_control_mem();
+
+exit_free_mem:
+	kfree(f21);
+	f21 = NULL;
+	kfree(f55);
+	f55 = NULL;
+	kfree(f54);
+	f54 = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_test_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!f54)
+		goto exit;
+
+	hrtimer_cancel(&f54->watchdog);
+
+	cancel_work_sync(&f54->test_report_work);
+	flush_workqueue(f54->test_report_workqueue);
+	destroy_workqueue(f54->test_report_workqueue);
+
+	test_remove_sysfs();
+
+	if (f21)
+		kfree(f21->force_txrx_assignment);
+
+	if (f55) {
+		kfree(f55->tx_assignment);
+		kfree(f55->rx_assignment);
+		kfree(f55->force_tx_assignment);
+		kfree(f55->force_rx_assignment);
+	}
+
+	test_free_control_mem();
+
+	if (f54->data_buffer_size)
+		kfree(f54->report_data);
+
+	kfree(f21);
+	f21 = NULL;
+
+	kfree(f55);
+	f55 = NULL;
+
+	kfree(f54);
+	f54 = NULL;
+
+exit:
+	complete(&test_remove_complete);
+
+	return;
+}
+
+static void synaptics_rmi4_test_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	if (!f54) {
+		synaptics_rmi4_test_init(rmi4_data);
+		return;
+	}
+
+	if (f21)
+		kfree(f21->force_txrx_assignment);
+
+	if (f55) {
+		kfree(f55->tx_assignment);
+		kfree(f55->rx_assignment);
+		kfree(f55->force_tx_assignment);
+		kfree(f55->force_rx_assignment);
+	}
+
+	test_free_control_mem();
+
+	kfree(f55);
+	f55 = NULL;
+
+	kfree(f21);
+	f21 = NULL;
+
+	retval = test_scan_pdt();
+	if (retval < 0)
+		goto exit_free_mem;
+
+	retval = test_set_queries();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F54 query registers\n",
+				__func__);
+		goto exit_free_mem;
+	}
+
+	f54->tx_assigned = f54->query.num_of_tx_electrodes;
+	f54->rx_assigned = f54->query.num_of_rx_electrodes;
+
+	retval = test_set_controls();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set up F54 control registers\n",
+				__func__);
+		goto exit_free_control;
+	}
+
+	test_set_data();
+
+	if (f55)
+		test_f55_init(rmi4_data);
+
+	if (f21)
+		test_f21_init(rmi4_data);
+
+	if (rmi4_data->external_afe_buttons)
+		f54->tx_assigned++;
+
+	f54->status = STATUS_IDLE;
+
+	return;
+
+exit_free_control:
+	test_free_control_mem();
+
+exit_free_mem:
+	hrtimer_cancel(&f54->watchdog);
+
+	cancel_work_sync(&f54->test_report_work);
+	flush_workqueue(f54->test_report_workqueue);
+	destroy_workqueue(f54->test_report_workqueue);
+
+	test_remove_sysfs();
+
+	if (f54->data_buffer_size)
+		kfree(f54->report_data);
+
+	kfree(f21);
+	f21 = NULL;
+
+	kfree(f55);
+	f55 = NULL;
+
+	kfree(f54);
+	f54 = NULL;
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn test_module = {
+	.fn_type = RMI_TEST_REPORTING,
+	.init = synaptics_rmi4_test_init,
+	.remove = synaptics_rmi4_test_remove,
+	.reset = synaptics_rmi4_test_reset,
+	.reinit = NULL,
+	.early_suspend = NULL,
+	.suspend = NULL,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = synaptics_rmi4_test_attn,
+};
+
+static int __init rmi4_test_module_init(void)
+{
+	synaptics_rmi4_new_function(&test_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_test_module_exit(void)
+{
+	synaptics_rmi4_new_function(&test_module, false);
+
+	wait_for_completion(&test_remove_complete);
+
+	return;
+}
+
+module_init(rmi4_test_module_init);
+module_exit(rmi4_test_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Test Reporting Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_video.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_video.c
new file mode 100644
index 0000000..b9ae0ac
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_video.c
@@ -0,0 +1,416 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SYSFS_FOLDER_NAME "video"
+
+/*
+*#define RMI_DCS_SUSPEND_RESUME
+*/
+
+static ssize_t video_sysfs_dcs_write_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t video_sysfs_param_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static int video_send_dcs_command(unsigned char command_opcode);
+
+struct f38_command {
+	union {
+		struct {
+			unsigned char command_opcode;
+			unsigned char register_access:1;
+			unsigned char gamma_page:1;
+			unsigned char f38_control1_b2__7:6;
+			unsigned char parameter_field_1;
+			unsigned char parameter_field_2;
+			unsigned char parameter_field_3;
+			unsigned char parameter_field_4;
+			unsigned char send_to_dcs:1;
+			unsigned char f38_command6_b1__7:7;
+		} __packed;
+		unsigned char data[7];
+	};
+};
+
+struct synaptics_rmi4_video_handle {
+	unsigned char param;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	struct synaptics_rmi4_data *rmi4_data;
+	struct kobject *sysfs_dir;
+};
+
+#ifdef RMI_DCS_SUSPEND_RESUME
+struct dcs_command {
+	unsigned char command;
+	unsigned int wait_time;
+};
+
+static struct dcs_command suspend_sequence[] = {
+	{
+		.command = 0x28,
+		.wait_time = 200,
+	},
+	{
+		.command = 0x10,
+		.wait_time = 200,
+	},
+};
+
+static struct dcs_command resume_sequence[] = {
+	{
+		.command = 0x11,
+		.wait_time = 200,
+	},
+	{
+		.command = 0x29,
+		.wait_time = 200,
+	},
+};
+#endif
+
+static struct device_attribute attrs[] = {
+	__ATTR(dcs_write, 0220,
+			synaptics_rmi4_show_error,
+			video_sysfs_dcs_write_store),
+	__ATTR(param, 0220,
+			synaptics_rmi4_show_error,
+			video_sysfs_param_store),
+};
+
+static struct synaptics_rmi4_video_handle *video;
+
+DECLARE_COMPLETION(video_remove_complete);
+
+static ssize_t video_sysfs_dcs_write_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+
+	if (kstrtouint(buf, 16, &input) != 1)
+		return -EINVAL;
+
+	retval = video_send_dcs_command((unsigned char)input);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t video_sysfs_param_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 16, &input) != 1)
+		return -EINVAL;
+
+	video->param = (unsigned char)input;
+
+	return count;
+}
+
+static int video_send_dcs_command(unsigned char command_opcode)
+{
+	int retval;
+	struct f38_command command;
+	struct synaptics_rmi4_data *rmi4_data = video->rmi4_data;
+
+	memset(&command, 0x00, sizeof(command));
+
+	command.command_opcode = command_opcode;
+	command.parameter_field_1 = video->param;
+	command.send_to_dcs = 1;
+
+	video->param = 0;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			video->command_base_addr,
+			command.data,
+			sizeof(command.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to send DCS command\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int video_scan_pdt(void)
+{
+	int retval;
+	unsigned char page;
+	unsigned short addr;
+	bool f38_found = false;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_data *rmi4_data = video->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&rmi_fd,
+					sizeof(rmi_fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (!rmi_fd.fn_number)
+				break;
+
+			if (rmi_fd.fn_number == SYNAPTICS_RMI4_F38) {
+				f38_found = true;
+				goto f38_found;
+			}
+		}
+	}
+
+	if (!f38_found) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to find F38\n",
+				__func__);
+		return -EINVAL;
+	}
+
+f38_found:
+	video->query_base_addr = rmi_fd.query_base_addr | (page << 8);
+	video->control_base_addr = rmi_fd.ctrl_base_addr | (page << 8);
+	video->data_base_addr = rmi_fd.data_base_addr | (page << 8);
+	video->command_base_addr = rmi_fd.cmd_base_addr | (page << 8);
+
+	return 0;
+}
+
+static int synaptics_rmi4_video_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char attr_count;
+
+	if (video) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	video = kzalloc(sizeof(*video), GFP_KERNEL);
+	if (!video) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for video\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	video->rmi4_data = rmi4_data;
+
+	retval = video_scan_pdt();
+	if (retval < 0) {
+		retval = 0;
+		goto exit_scan_pdt;
+	}
+
+	video->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+			&rmi4_data->input_dev->dev.kobj);
+	if (!video->sysfs_dir) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs directory\n",
+				__func__);
+		retval = -ENODEV;
+		goto exit_sysfs_dir;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(video->sysfs_dir,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			retval = -ENODEV;
+			goto exit_sysfs_attrs;
+		}
+	}
+
+	return 0;
+
+exit_sysfs_attrs:
+	for (attr_count--; attr_count >= 0; attr_count--)
+		sysfs_remove_file(video->sysfs_dir, &attrs[attr_count].attr);
+
+	kobject_put(video->sysfs_dir);
+
+exit_sysfs_dir:
+exit_scan_pdt:
+	kfree(video);
+	video = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_video_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char attr_count;
+
+	if (!video)
+		goto exit;
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++)
+		sysfs_remove_file(video->sysfs_dir, &attrs[attr_count].attr);
+
+	kobject_put(video->sysfs_dir);
+
+	kfree(video);
+	video = NULL;
+
+exit:
+	complete(&video_remove_complete);
+
+	return;
+}
+
+static void synaptics_rmi4_video_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!video)
+		synaptics_rmi4_video_init(rmi4_data);
+
+	return;
+}
+
+#ifdef RMI_DCS_SUSPEND_RESUME
+static void synaptics_rmi4_video_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char command;
+	unsigned char num_of_cmds;
+
+	if (!video)
+		return;
+
+	num_of_cmds = ARRAY_SIZE(suspend_sequence);
+
+	for (ii = 0; ii < num_of_cmds; ii++) {
+		command = suspend_sequence[ii].command;
+		retval = video_send_dcs_command(command);
+		if (retval < 0)
+			return;
+		msleep(suspend_sequence[ii].wait_time);
+	}
+
+	return;
+}
+
+static void synaptics_rmi4_video_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char command;
+	unsigned char num_of_cmds;
+
+	if (!video)
+		return;
+
+	num_of_cmds = ARRAY_SIZE(resume_sequence);
+
+	for (ii = 0; ii < num_of_cmds; ii++) {
+		command = resume_sequence[ii].command;
+		retval = video_send_dcs_command(command);
+		if (retval < 0)
+			return;
+		msleep(resume_sequence[ii].wait_time);
+	}
+
+	return;
+}
+#endif
+
+static struct synaptics_rmi4_exp_fn video_module = {
+	.fn_type = RMI_VIDEO,
+	.init = synaptics_rmi4_video_init,
+	.remove = synaptics_rmi4_video_remove,
+	.reset = synaptics_rmi4_video_reset,
+	.reinit = NULL,
+	.early_suspend = NULL,
+#ifdef RMI_DCS_SUSPEND_RESUME
+	.suspend = synaptics_rmi4_video_suspend,
+	.resume = synaptics_rmi4_video_resume,
+#else
+	.suspend = NULL,
+	.resume = NULL,
+#endif
+	.late_resume = NULL,
+	.attn = NULL,
+};
+
+static int __init rmi4_video_module_init(void)
+{
+	synaptics_rmi4_new_function(&video_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_video_module_exit(void)
+{
+	synaptics_rmi4_new_function(&video_module, false);
+
+	wait_for_completion(&video_remove_complete);
+
+	return;
+}
+
+module_init(rmi4_video_module_init);
+module_exit(rmi4_video_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Video Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/Kconfig b/drivers/input/touchscreen/synaptics_dsx_2.6/Kconfig
new file mode 100644
index 0000000..78b995e
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/Kconfig
@@ -0,0 +1,117 @@
+#
+# Synaptics DSX v2.6 touchscreen driver configuration
+#
+menuconfig TOUCHSCREEN_SYNAPTICS_DSX_v26
+	bool "Synaptics DSX v2.6 touchscreen"
+	default y
+	help
+	  Say Y here if you have a Synaptics DSX touchscreen connected
+	  to your system.
+
+	  If unsure, say N.
+
+if TOUCHSCREEN_SYNAPTICS_DSX_v26
+
+choice
+	default TOUCHSCREEN_SYNAPTICS_DSX_I2C_v26
+	prompt "Synaptics DSX v2.6 bus interface"
+config TOUCHSCREEN_SYNAPTICS_DSX_I2C_v26
+	bool "RMI over I2C"
+	depends on I2C
+config TOUCHSCREEN_SYNAPTICS_DSX_SPI_v26
+	bool "RMI over SPI"
+	depends on SPI_MASTER
+config TOUCHSCREEN_SYNAPTICS_DSX_RMI_HID_I2C_v26
+	bool "HID over I2C"
+	depends on I2C
+endchoice
+
+config TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26
+	tristate "Synaptics DSX v2.6 core driver module"
+	depends on I2C || SPI_MASTER
+	help
+	  Say Y here to enable basic touch reporting functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_core.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26
+	tristate "Synaptics DSX v2.6 RMI device module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26
+	help
+	  Say Y here to enable support for direct RMI register access.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_rmi_dev.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26
+	tristate "Synaptics DSX v2.6 firmware update module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26
+	help
+	  Say Y here to enable support for doing firmware update.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_fw_update.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_TEST_REPORTING_v26
+	tristate "Synaptics DSX v2.6 test reporting module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26
+	help
+	  Say Y here to enable support for retrieving production test reports.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_test_reporting.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_PROXIMITY_v26
+	tristate "Synaptics DSX v2.6 proximity module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26
+	help
+	  Say Y here to enable support for proximity functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_proximity.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_ACTIVE_PEN_v26
+	tristate "Synaptics DSX v2.6 active pen module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26
+	help
+	  Say Y here to enable support for active pen functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_active_pen.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_GESTURE_v26
+	tristate "Synaptics DSX v2.6 user defined gesture module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26
+	help
+	  Say Y here to enable support for user defined gesture functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_gesture.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_VIDEO_v26
+	tristate "Synaptics DSX v2.6 video module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26
+	help
+	  Say Y here to enable support for video communication functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_video.
+
+endif
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/Makefile b/drivers/input/touchscreen/synaptics_dsx_2.6/Makefile
new file mode 100644
index 0000000..e5e7215
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for the Synaptics DSX touchscreen driver.
+#
+
+# Each configuration option enables a list of files.
+
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_I2C_v26) += synaptics_dsx_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_SPI_v26) += synaptics_dsx_spi.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_HID_I2C_v26) += synaptics_dsx_rmi_hid_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26) += synaptics_dsx_core.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26) += synaptics_dsx_rmi_dev.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26) += synaptics_dsx_fw_update.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_TEST_REPORTING_v26) += synaptics_dsx_test_reporting.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_PROXIMITY_v26) += synaptics_dsx_proximity.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_ACTIVE_PEN_v26) += synaptics_dsx_active_pen.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_GESTURE_v26) += synaptics_dsx_gesture.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_VIDEO_v26) += synaptics_dsx_video.o
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_active_pen.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_active_pen.c
new file mode 100644
index 0000000..db5324a
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_active_pen.c
@@ -0,0 +1,624 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define APEN_PHYS_NAME "synaptics_dsx/active_pen"
+
+#define ACTIVE_PEN_MAX_PRESSURE_16BIT 65535
+#define ACTIVE_PEN_MAX_PRESSURE_8BIT 255
+
+struct synaptics_rmi4_f12_query_8 {
+	union {
+		struct {
+			unsigned char size_of_query9;
+			struct {
+				unsigned char data0_is_present:1;
+				unsigned char data1_is_present:1;
+				unsigned char data2_is_present:1;
+				unsigned char data3_is_present:1;
+				unsigned char data4_is_present:1;
+				unsigned char data5_is_present:1;
+				unsigned char data6_is_present:1;
+				unsigned char data7_is_present:1;
+			} __packed;
+		};
+		unsigned char data[2];
+	};
+};
+
+struct apen_data_8b_pressure {
+	union {
+		struct {
+			unsigned char status_pen:1;
+			unsigned char status_invert:1;
+			unsigned char status_barrel:1;
+			unsigned char status_reserved:5;
+			unsigned char x_lsb;
+			unsigned char x_msb;
+			unsigned char y_lsb;
+			unsigned char y_msb;
+			unsigned char pressure_msb;
+			unsigned char battery_state;
+			unsigned char pen_id_0_7;
+			unsigned char pen_id_8_15;
+			unsigned char pen_id_16_23;
+			unsigned char pen_id_24_31;
+		} __packed;
+		unsigned char data[11];
+	};
+};
+
+struct apen_data {
+	union {
+		struct {
+			unsigned char status_pen:1;
+			unsigned char status_invert:1;
+			unsigned char status_barrel:1;
+			unsigned char status_reserved:5;
+			unsigned char x_lsb;
+			unsigned char x_msb;
+			unsigned char y_lsb;
+			unsigned char y_msb;
+			unsigned char pressure_lsb;
+			unsigned char pressure_msb;
+			unsigned char battery_state;
+			unsigned char pen_id_0_7;
+			unsigned char pen_id_8_15;
+			unsigned char pen_id_16_23;
+			unsigned char pen_id_24_31;
+		} __packed;
+		unsigned char data[12];
+	};
+};
+
+struct synaptics_rmi4_apen_handle {
+	bool apen_present;
+	unsigned char intr_mask;
+	unsigned char battery_state;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	unsigned short apen_data_addr;
+	unsigned short max_pressure;
+	unsigned int pen_id;
+	struct input_dev *apen_dev;
+	struct apen_data *apen_data;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_apen_handle *apen;
+
+DECLARE_COMPLETION(apen_remove_complete);
+
+static void apen_lift(void)
+{
+	input_report_key(apen->apen_dev, BTN_TOUCH, 0);
+	input_report_key(apen->apen_dev, BTN_TOOL_PEN, 0);
+	input_report_key(apen->apen_dev, BTN_TOOL_RUBBER, 0);
+	input_sync(apen->apen_dev);
+	apen->apen_present = false;
+
+	return;
+}
+
+static void apen_report(void)
+{
+	int retval;
+	int x;
+	int y;
+	int pressure;
+	static int invert = -1;
+	struct apen_data_8b_pressure *apen_data_8b;
+	struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			apen->apen_data_addr,
+			apen->apen_data->data,
+			sizeof(apen->apen_data->data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read active pen data\n",
+				__func__);
+		return;
+	}
+
+	if (apen->apen_data->status_pen == 0) {
+		if (apen->apen_present)
+			apen_lift();
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: No active pen data\n",
+				__func__);
+
+		return;
+	}
+
+	x = (apen->apen_data->x_msb << 8) | (apen->apen_data->x_lsb);
+	y = (apen->apen_data->y_msb << 8) | (apen->apen_data->y_lsb);
+
+	if ((x == -1) && (y == -1)) {
+		if (apen->apen_present)
+			apen_lift();
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Active pen in range but no valid x & y\n",
+				__func__);
+
+		return;
+	}
+
+	if (!apen->apen_present)
+		invert = -1;
+
+	if (invert != -1 && invert != apen->apen_data->status_invert)
+		apen_lift();
+
+	invert = apen->apen_data->status_invert;
+
+	if (apen->max_pressure == ACTIVE_PEN_MAX_PRESSURE_16BIT) {
+		pressure = (apen->apen_data->pressure_msb << 8) |
+				apen->apen_data->pressure_lsb;
+		apen->battery_state = apen->apen_data->battery_state;
+		apen->pen_id = (apen->apen_data->pen_id_24_31 << 24) |
+				(apen->apen_data->pen_id_16_23 << 16) |
+				(apen->apen_data->pen_id_8_15 << 8) |
+				apen->apen_data->pen_id_0_7;
+	} else {
+		apen_data_8b = (struct apen_data_8b_pressure *)apen->apen_data;
+		pressure = apen_data_8b->pressure_msb;
+		apen->battery_state = apen_data_8b->battery_state;
+		apen->pen_id = (apen_data_8b->pen_id_24_31 << 24) |
+				(apen_data_8b->pen_id_16_23 << 16) |
+				(apen_data_8b->pen_id_8_15 << 8) |
+				apen_data_8b->pen_id_0_7;
+	}
+
+	input_report_key(apen->apen_dev, BTN_TOUCH, pressure > 0 ? 1 : 0);
+	input_report_key(apen->apen_dev,
+			apen->apen_data->status_invert > 0 ?
+			BTN_TOOL_RUBBER : BTN_TOOL_PEN, 1);
+	input_report_key(apen->apen_dev,
+			BTN_STYLUS, apen->apen_data->status_barrel > 0 ?
+			1 : 0);
+	input_report_abs(apen->apen_dev, ABS_X, x);
+	input_report_abs(apen->apen_dev, ABS_Y, y);
+	input_report_abs(apen->apen_dev, ABS_PRESSURE, pressure);
+
+	input_sync(apen->apen_dev);
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Active pen: status = %d, invert = %d, barrel = %d, x = %d, y = %d, pressure = %d\n",
+			__func__,
+			apen->apen_data->status_pen,
+			apen->apen_data->status_invert,
+			apen->apen_data->status_barrel,
+			x, y, pressure);
+
+	apen->apen_present = true;
+
+	return;
+}
+
+static void apen_set_params(void)
+{
+	input_set_abs_params(apen->apen_dev, ABS_X, 0,
+			apen->rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(apen->apen_dev, ABS_Y, 0,
+			apen->rmi4_data->sensor_max_y, 0, 0);
+	input_set_abs_params(apen->apen_dev, ABS_PRESSURE, 0,
+			apen->max_pressure, 0, 0);
+
+	return;
+}
+
+static int apen_pressure(struct synaptics_rmi4_f12_query_8 *query_8)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char data_reg_presence;
+	unsigned char size_of_query_9;
+	unsigned char *query_9;
+	unsigned char *data_desc;
+	struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+	data_reg_presence = query_8->data[1];
+
+	size_of_query_9 = query_8->size_of_query9;
+	query_9 = kmalloc(size_of_query_9, GFP_KERNEL);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			apen->query_base_addr + 9,
+			query_9,
+			size_of_query_9);
+	if (retval < 0)
+		goto exit;
+
+	data_desc = query_9;
+
+	for (ii = 0; ii < 6; ii++) {
+		if (!(data_reg_presence & (1 << ii)))
+			continue; /* The data register is not present */
+		data_desc++; /* Jump over the size entry */
+		while (*data_desc & (1 << 7))
+			data_desc++;
+		data_desc++; /* Go to the next descriptor */
+	}
+
+	data_desc++; /* Jump over the size entry */
+	/* Check for the presence of subpackets 1 and 2 */
+	if ((*data_desc & (3 << 1)) == (3 << 1))
+		apen->max_pressure = ACTIVE_PEN_MAX_PRESSURE_16BIT;
+	else
+		apen->max_pressure = ACTIVE_PEN_MAX_PRESSURE_8BIT;
+
+exit:
+	kfree(query_9);
+
+	return retval;
+}
+
+static int apen_reg_init(void)
+{
+	int retval;
+	unsigned char data_offset;
+	unsigned char size_of_query8;
+	struct synaptics_rmi4_f12_query_8 query_8;
+	struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			apen->query_base_addr + 7,
+			&size_of_query8,
+			sizeof(size_of_query8));
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			apen->query_base_addr + 8,
+			query_8.data,
+			sizeof(query_8.data));
+	if (retval < 0)
+		return retval;
+
+	if ((size_of_query8 >= 2) && (query_8.data6_is_present)) {
+		data_offset = query_8.data0_is_present +
+				query_8.data1_is_present +
+				query_8.data2_is_present +
+				query_8.data3_is_present +
+				query_8.data4_is_present +
+				query_8.data5_is_present;
+		apen->apen_data_addr = apen->data_base_addr + data_offset;
+		retval = apen_pressure(&query_8);
+		if (retval < 0)
+			return retval;
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Active pen support unavailable\n",
+				__func__);
+		retval = -ENODEV;
+	}
+
+	return retval;
+}
+
+static int apen_scan_pdt(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char page;
+	unsigned char intr_count = 0;
+	unsigned char intr_off;
+	unsigned char intr_src;
+	unsigned short addr;
+	struct synaptics_rmi4_fn_desc fd;
+	struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&fd,
+					sizeof(fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (fd.fn_number) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Found F%02x\n",
+						__func__, fd.fn_number);
+				switch (fd.fn_number) {
+				case SYNAPTICS_RMI4_F12:
+					goto f12_found;
+					break;
+				}
+			} else {
+				break;
+			}
+
+			intr_count += fd.intr_src_count;
+		}
+	}
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to find F12\n",
+			__func__);
+	return -EINVAL;
+
+f12_found:
+	apen->query_base_addr = fd.query_base_addr | (page << 8);
+	apen->control_base_addr = fd.ctrl_base_addr | (page << 8);
+	apen->data_base_addr = fd.data_base_addr | (page << 8);
+	apen->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+	retval = apen_reg_init();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to initialize active pen registers\n",
+				__func__);
+		return retval;
+	}
+
+	apen->intr_mask = 0;
+	intr_src = fd.intr_src_count;
+	intr_off = intr_count % 8;
+	for (ii = intr_off;
+			ii < (intr_src + intr_off);
+			ii++) {
+		apen->intr_mask |= 1 << ii;
+	}
+
+	rmi4_data->intr_mask[0] |= apen->intr_mask;
+
+	addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			addr,
+			&(rmi4_data->intr_mask[0]),
+			sizeof(rmi4_data->intr_mask[0]));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set interrupt enable bit\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_apen_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!apen)
+		return;
+
+	if (apen->intr_mask & intr_mask)
+		apen_report();
+
+	return;
+}
+
+static int synaptics_rmi4_apen_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	if (apen) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	apen = kzalloc(sizeof(*apen), GFP_KERNEL);
+	if (!apen) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for apen\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	apen->apen_data = kzalloc(sizeof(*(apen->apen_data)), GFP_KERNEL);
+	if (!apen->apen_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for apen_data\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_apen;
+	}
+
+	apen->rmi4_data = rmi4_data;
+
+	retval = apen_scan_pdt();
+	if (retval < 0)
+		goto exit_free_apen_data;
+
+	apen->apen_dev = input_allocate_device();
+	if (apen->apen_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate active pen device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_apen_data;
+	}
+
+	apen->apen_dev->name = ACTIVE_PEN_DRIVER_NAME;
+	apen->apen_dev->phys = APEN_PHYS_NAME;
+	apen->apen_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	apen->apen_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	apen->apen_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(apen->apen_dev, rmi4_data);
+
+	set_bit(EV_KEY, apen->apen_dev->evbit);
+	set_bit(EV_ABS, apen->apen_dev->evbit);
+	set_bit(BTN_TOUCH, apen->apen_dev->keybit);
+	set_bit(BTN_TOOL_PEN, apen->apen_dev->keybit);
+	set_bit(BTN_TOOL_RUBBER, apen->apen_dev->keybit);
+	set_bit(BTN_STYLUS, apen->apen_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, apen->apen_dev->propbit);
+#endif
+
+	apen_set_params();
+
+	retval = input_register_device(apen->apen_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register active pen device\n",
+				__func__);
+		goto exit_free_input_device;
+	}
+
+	return 0;
+
+exit_free_input_device:
+	input_free_device(apen->apen_dev);
+
+exit_free_apen_data:
+	kfree(apen->apen_data);
+
+exit_free_apen:
+	kfree(apen);
+	apen = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_apen_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen)
+		goto exit;
+
+	input_unregister_device(apen->apen_dev);
+	kfree(apen->apen_data);
+	kfree(apen);
+	apen = NULL;
+
+exit:
+	complete(&apen_remove_complete);
+
+	return;
+}
+
+static void synaptics_rmi4_apen_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen) {
+		synaptics_rmi4_apen_init(rmi4_data);
+		return;
+	}
+
+	apen_lift();
+
+	apen_scan_pdt();
+
+	return;
+}
+
+static void synaptics_rmi4_apen_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen)
+		return;
+
+	apen_lift();
+
+	return;
+}
+
+static void synaptics_rmi4_apen_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen)
+		return;
+
+	apen_lift();
+
+	return;
+}
+
+static void synaptics_rmi4_apen_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen)
+		return;
+
+	apen_lift();
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn active_pen_module = {
+	.fn_type = RMI_ACTIVE_PEN,
+	.init = synaptics_rmi4_apen_init,
+	.remove = synaptics_rmi4_apen_remove,
+	.reset = synaptics_rmi4_apen_reset,
+	.reinit = synaptics_rmi4_apen_reinit,
+	.early_suspend = synaptics_rmi4_apen_e_suspend,
+	.suspend = synaptics_rmi4_apen_suspend,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = synaptics_rmi4_apen_attn,
+};
+
+static int __init rmi4_active_pen_module_init(void)
+{
+	synaptics_rmi4_new_function(&active_pen_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_active_pen_module_exit(void)
+{
+	synaptics_rmi4_new_function(&active_pen_module, false);
+
+	wait_for_completion(&apen_remove_complete);
+
+	return;
+}
+
+module_init(rmi4_active_pen_module_init);
+module_exit(rmi4_active_pen_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Active Pen Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c
new file mode 100644
index 0000000..b2f3bf5
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c
@@ -0,0 +1,4335 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+#ifdef KERNEL_ABOVE_2_6_38
+#include <linux/input/mt.h>
+#endif
+
+#define INPUT_PHYS_NAME "synaptics_dsx/touch_input"
+#define STYLUS_PHYS_NAME "synaptics_dsx/stylus"
+
+#define VIRTUAL_KEY_MAP_FILE_NAME "virtualkeys." PLATFORM_DRIVER_NAME
+
+#ifdef KERNEL_ABOVE_2_6_38
+#define TYPE_B_PROTOCOL
+#endif
+
+#define WAKEUP_GESTURE false
+
+#define NO_0D_WHILE_2D
+#define REPORT_2D_Z
+#define REPORT_2D_W
+/*
+#define REPORT_2D_PRESSURE
+*/
+
+#define F12_DATA_15_WORKAROUND
+
+#define IGNORE_FN_INIT_FAILURE
+
+#define FB_READY_RESET
+#define FB_READY_WAIT_MS 100
+#define FB_READY_TIMEOUT_S 30
+
+#define RPT_TYPE (1 << 0)
+#define RPT_X_LSB (1 << 1)
+#define RPT_X_MSB (1 << 2)
+#define RPT_Y_LSB (1 << 3)
+#define RPT_Y_MSB (1 << 4)
+#define RPT_Z (1 << 5)
+#define RPT_WX (1 << 6)
+#define RPT_WY (1 << 7)
+#define RPT_DEFAULT (RPT_TYPE | RPT_X_LSB | RPT_X_MSB | RPT_Y_LSB | RPT_Y_MSB)
+
+#define REBUILD_WORK_DELAY_MS 500 /* ms */
+
+#define EXP_FN_WORK_DELAY_MS 500 /* ms */
+#define MAX_F11_TOUCH_WIDTH 15
+#define MAX_F12_TOUCH_WIDTH 255
+#define MAX_F12_TOUCH_PRESSURE 255
+
+#define CHECK_STATUS_TIMEOUT_MS 100
+
+#define F01_STD_QUERY_LEN 21
+#define F01_BUID_ID_OFFSET 18
+
+#define STATUS_NO_ERROR 0x00
+#define STATUS_RESET_OCCURRED 0x01
+#define STATUS_INVALID_CONFIG 0x02
+#define STATUS_DEVICE_FAILURE 0x03
+#define STATUS_CONFIG_CRC_FAILURE 0x04
+#define STATUS_FIRMWARE_CRC_FAILURE 0x05
+#define STATUS_CRC_IN_PROGRESS 0x06
+
+#define NORMAL_OPERATION (0 << 0)
+#define SENSOR_SLEEP (1 << 0)
+#define NO_SLEEP_OFF (0 << 2)
+#define NO_SLEEP_ON (1 << 2)
+#define CONFIGURED (1 << 7)
+
+#define F11_CONTINUOUS_MODE 0x00
+#define F11_WAKEUP_GESTURE_MODE 0x04
+#define F12_CONTINUOUS_MODE 0x00
+#define F12_WAKEUP_GESTURE_MODE 0x02
+#define F12_UDG_DETECT 0x0f
+
+static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
+		bool *was_in_bl_mode);
+static int synaptics_rmi4_free_fingers(struct synaptics_rmi4_data *rmi4_data);
+static int synaptics_rmi4_reinit_device(struct synaptics_rmi4_data *rmi4_data);
+static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data,
+		bool rebuild);
+
+#ifdef CONFIG_FB
+static int synaptics_rmi4_fb_notifier_cb(struct notifier_block *self,
+		unsigned long event, void *data);
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#ifndef CONFIG_FB
+#define USE_EARLYSUSPEND
+#endif
+#endif
+
+#ifdef USE_EARLYSUSPEND
+static void synaptics_rmi4_early_suspend(struct early_suspend *h);
+
+static void synaptics_rmi4_late_resume(struct early_suspend *h);
+#endif
+
+static int synaptics_rmi4_suspend(struct device *dev);
+
+static int synaptics_rmi4_resume(struct device *dev);
+
+static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_suspend_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_wake_gesture_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_wake_gesture_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_virtual_key_map_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf);
+
+struct synaptics_rmi4_f01_device_status {
+	union {
+		struct {
+			unsigned char status_code:4;
+			unsigned char reserved:2;
+			unsigned char flash_prog:1;
+			unsigned char unconfigured:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f11_query_0_5 {
+	union {
+		struct {
+			/* query 0 */
+			unsigned char f11_query0_b0__2:3;
+			unsigned char has_query_9:1;
+			unsigned char has_query_11:1;
+			unsigned char has_query_12:1;
+			unsigned char has_query_27:1;
+			unsigned char has_query_28:1;
+
+			/* query 1 */
+			unsigned char num_of_fingers:3;
+			unsigned char has_rel:1;
+			unsigned char has_abs:1;
+			unsigned char has_gestures:1;
+			unsigned char has_sensitibity_adjust:1;
+			unsigned char f11_query1_b7:1;
+
+			/* query 2 */
+			unsigned char num_of_x_electrodes;
+
+			/* query 3 */
+			unsigned char num_of_y_electrodes;
+
+			/* query 4 */
+			unsigned char max_electrodes:7;
+			unsigned char f11_query4_b7:1;
+
+			/* query 5 */
+			unsigned char abs_data_size:2;
+			unsigned char has_anchored_finger:1;
+			unsigned char has_adj_hyst:1;
+			unsigned char has_dribble:1;
+			unsigned char has_bending_correction:1;
+			unsigned char has_large_object_suppression:1;
+			unsigned char has_jitter_filter:1;
+		} __packed;
+		unsigned char data[6];
+	};
+};
+
+struct synaptics_rmi4_f11_query_7_8 {
+	union {
+		struct {
+			/* query 7 */
+			unsigned char has_single_tap:1;
+			unsigned char has_tap_and_hold:1;
+			unsigned char has_double_tap:1;
+			unsigned char has_early_tap:1;
+			unsigned char has_flick:1;
+			unsigned char has_press:1;
+			unsigned char has_pinch:1;
+			unsigned char has_chiral_scroll:1;
+
+			/* query 8 */
+			unsigned char has_palm_detect:1;
+			unsigned char has_rotate:1;
+			unsigned char has_touch_shapes:1;
+			unsigned char has_scroll_zones:1;
+			unsigned char individual_scroll_zones:1;
+			unsigned char has_multi_finger_scroll:1;
+			unsigned char has_multi_finger_scroll_edge_motion:1;
+			unsigned char has_multi_finger_scroll_inertia:1;
+		} __packed;
+		unsigned char data[2];
+	};
+};
+
+struct synaptics_rmi4_f11_query_9 {
+	union {
+		struct {
+			unsigned char has_pen:1;
+			unsigned char has_proximity:1;
+			unsigned char has_large_object_sensitivity:1;
+			unsigned char has_suppress_on_large_object_detect:1;
+			unsigned char has_two_pen_thresholds:1;
+			unsigned char has_contact_geometry:1;
+			unsigned char has_pen_hover_discrimination:1;
+			unsigned char has_pen_hover_and_edge_filters:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f11_query_12 {
+	union {
+		struct {
+			unsigned char has_small_object_detection:1;
+			unsigned char has_small_object_detection_tuning:1;
+			unsigned char has_8bit_w:1;
+			unsigned char has_2d_adjustable_mapping:1;
+			unsigned char has_general_information_2:1;
+			unsigned char has_physical_properties:1;
+			unsigned char has_finger_limit:1;
+			unsigned char has_linear_cofficient_2:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f11_query_27 {
+	union {
+		struct {
+			unsigned char f11_query27_b0:1;
+			unsigned char has_pen_position_correction:1;
+			unsigned char has_pen_jitter_filter_coefficient:1;
+			unsigned char has_group_decomposition:1;
+			unsigned char has_wakeup_gesture:1;
+			unsigned char has_small_finger_correction:1;
+			unsigned char has_data_37:1;
+			unsigned char f11_query27_b7:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f11_ctrl_6_9 {
+	union {
+		struct {
+			unsigned char sensor_max_x_pos_7_0;
+			unsigned char sensor_max_x_pos_11_8:4;
+			unsigned char f11_ctrl7_b4__7:4;
+			unsigned char sensor_max_y_pos_7_0;
+			unsigned char sensor_max_y_pos_11_8:4;
+			unsigned char f11_ctrl9_b4__7:4;
+		} __packed;
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f11_data_1_5 {
+	union {
+		struct {
+			unsigned char x_position_11_4;
+			unsigned char y_position_11_4;
+			unsigned char x_position_3_0:4;
+			unsigned char y_position_3_0:4;
+			unsigned char wx:4;
+			unsigned char wy:4;
+			unsigned char z;
+		} __packed;
+		unsigned char data[5];
+	};
+};
+
+struct synaptics_rmi4_f12_query_5 {
+	union {
+		struct {
+			unsigned char size_of_query6;
+			struct {
+				unsigned char ctrl0_is_present:1;
+				unsigned char ctrl1_is_present:1;
+				unsigned char ctrl2_is_present:1;
+				unsigned char ctrl3_is_present:1;
+				unsigned char ctrl4_is_present:1;
+				unsigned char ctrl5_is_present:1;
+				unsigned char ctrl6_is_present:1;
+				unsigned char ctrl7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl8_is_present:1;
+				unsigned char ctrl9_is_present:1;
+				unsigned char ctrl10_is_present:1;
+				unsigned char ctrl11_is_present:1;
+				unsigned char ctrl12_is_present:1;
+				unsigned char ctrl13_is_present:1;
+				unsigned char ctrl14_is_present:1;
+				unsigned char ctrl15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl16_is_present:1;
+				unsigned char ctrl17_is_present:1;
+				unsigned char ctrl18_is_present:1;
+				unsigned char ctrl19_is_present:1;
+				unsigned char ctrl20_is_present:1;
+				unsigned char ctrl21_is_present:1;
+				unsigned char ctrl22_is_present:1;
+				unsigned char ctrl23_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl24_is_present:1;
+				unsigned char ctrl25_is_present:1;
+				unsigned char ctrl26_is_present:1;
+				unsigned char ctrl27_is_present:1;
+				unsigned char ctrl28_is_present:1;
+				unsigned char ctrl29_is_present:1;
+				unsigned char ctrl30_is_present:1;
+				unsigned char ctrl31_is_present:1;
+			} __packed;
+		};
+		unsigned char data[5];
+	};
+};
+
+struct synaptics_rmi4_f12_query_8 {
+	union {
+		struct {
+			unsigned char size_of_query9;
+			struct {
+				unsigned char data0_is_present:1;
+				unsigned char data1_is_present:1;
+				unsigned char data2_is_present:1;
+				unsigned char data3_is_present:1;
+				unsigned char data4_is_present:1;
+				unsigned char data5_is_present:1;
+				unsigned char data6_is_present:1;
+				unsigned char data7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data8_is_present:1;
+				unsigned char data9_is_present:1;
+				unsigned char data10_is_present:1;
+				unsigned char data11_is_present:1;
+				unsigned char data12_is_present:1;
+				unsigned char data13_is_present:1;
+				unsigned char data14_is_present:1;
+				unsigned char data15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data16_is_present:1;
+				unsigned char data17_is_present:1;
+				unsigned char data18_is_present:1;
+				unsigned char data19_is_present:1;
+				unsigned char data20_is_present:1;
+				unsigned char data21_is_present:1;
+				unsigned char data22_is_present:1;
+				unsigned char data23_is_present:1;
+			} __packed;
+		};
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f12_ctrl_8 {
+	union {
+		struct {
+			unsigned char max_x_coord_lsb;
+			unsigned char max_x_coord_msb;
+			unsigned char max_y_coord_lsb;
+			unsigned char max_y_coord_msb;
+			unsigned char rx_pitch_lsb;
+			unsigned char rx_pitch_msb;
+			unsigned char tx_pitch_lsb;
+			unsigned char tx_pitch_msb;
+			unsigned char low_rx_clip;
+			unsigned char high_rx_clip;
+			unsigned char low_tx_clip;
+			unsigned char high_tx_clip;
+			unsigned char num_of_rx;
+			unsigned char num_of_tx;
+		};
+		unsigned char data[14];
+	};
+};
+
+struct synaptics_rmi4_f12_ctrl_23 {
+	union {
+		struct {
+			unsigned char finger_enable:1;
+			unsigned char active_stylus_enable:1;
+			unsigned char palm_enable:1;
+			unsigned char unclassified_object_enable:1;
+			unsigned char hovering_finger_enable:1;
+			unsigned char gloved_finger_enable:1;
+			unsigned char f12_ctr23_00_b6__7:2;
+			unsigned char max_reported_objects;
+			unsigned char f12_ctr23_02_b0:1;
+			unsigned char report_active_stylus_as_finger:1;
+			unsigned char report_palm_as_finger:1;
+			unsigned char report_unclassified_object_as_finger:1;
+			unsigned char report_hovering_finger_as_finger:1;
+			unsigned char report_gloved_finger_as_finger:1;
+			unsigned char report_narrow_object_swipe_as_finger:1;
+			unsigned char report_handedge_as_finger:1;
+			unsigned char cover_enable:1;
+			unsigned char stylus_enable:1;
+			unsigned char eraser_enable:1;
+			unsigned char small_object_enable:1;
+			unsigned char f12_ctr23_03_b4__7:4;
+			unsigned char report_cover_as_finger:1;
+			unsigned char report_stylus_as_finger:1;
+			unsigned char report_eraser_as_finger:1;
+			unsigned char report_small_object_as_finger:1;
+			unsigned char f12_ctr23_04_b4__7:4;
+		};
+		unsigned char data[5];
+	};
+};
+
+struct synaptics_rmi4_f12_ctrl_31 {
+	union {
+		struct {
+			unsigned char max_x_coord_lsb;
+			unsigned char max_x_coord_msb;
+			unsigned char max_y_coord_lsb;
+			unsigned char max_y_coord_msb;
+			unsigned char rx_pitch_lsb;
+			unsigned char rx_pitch_msb;
+			unsigned char rx_clip_low;
+			unsigned char rx_clip_high;
+			unsigned char wedge_clip_low;
+			unsigned char wedge_clip_high;
+			unsigned char num_of_p;
+			unsigned char num_of_q;
+		};
+		unsigned char data[12];
+	};
+};
+
+struct synaptics_rmi4_f12_finger_data {
+	unsigned char object_type_and_status;
+	unsigned char x_lsb;
+	unsigned char x_msb;
+	unsigned char y_lsb;
+	unsigned char y_msb;
+#ifdef REPORT_2D_Z
+	unsigned char z;
+#endif
+#ifdef REPORT_2D_W
+	unsigned char wx;
+	unsigned char wy;
+#endif
+};
+
+struct synaptics_rmi4_f1a_query {
+	union {
+		struct {
+			unsigned char max_button_count:3;
+			unsigned char f1a_query0_b3__4:2;
+			unsigned char has_query4:1;
+			unsigned char has_query3:1;
+			unsigned char has_query2:1;
+			unsigned char has_general_control:1;
+			unsigned char has_interrupt_enable:1;
+			unsigned char has_multibutton_select:1;
+			unsigned char has_tx_rx_map:1;
+			unsigned char has_perbutton_threshold:1;
+			unsigned char has_release_threshold:1;
+			unsigned char has_strongestbtn_hysteresis:1;
+			unsigned char has_filter_strength:1;
+		} __packed;
+		unsigned char data[2];
+	};
+};
+
+struct synaptics_rmi4_f1a_query_4 {
+	union {
+		struct {
+			unsigned char has_ctrl19:1;
+			unsigned char f1a_query4_b1__4:4;
+			unsigned char has_ctrl24:1;
+			unsigned char f1a_query4_b6__7:2;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f1a_control_0 {
+	union {
+		struct {
+			unsigned char multibutton_report:2;
+			unsigned char filter_mode:2;
+			unsigned char reserved:4;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f1a_control {
+	struct synaptics_rmi4_f1a_control_0 general_control;
+	unsigned char button_int_enable;
+	unsigned char multi_button;
+	unsigned char *txrx_map;
+	unsigned char *button_threshold;
+	unsigned char button_release_threshold;
+	unsigned char strongest_button_hysteresis;
+	unsigned char filter_strength;
+};
+
+struct synaptics_rmi4_f1a_handle {
+	int button_bitmask_size;
+	unsigned char max_count;
+	unsigned char valid_button_count;
+	unsigned char *button_data_buffer;
+	unsigned char *button_map;
+	struct synaptics_rmi4_f1a_query button_query;
+	struct synaptics_rmi4_f1a_control button_control;
+};
+
+struct synaptics_rmi4_exp_fhandler {
+	struct synaptics_rmi4_exp_fn *exp_fn;
+	bool insert;
+	bool remove;
+	struct list_head link;
+};
+
+struct synaptics_rmi4_exp_fn_data {
+	bool initialized;
+	bool queue_work;
+	struct mutex mutex;
+	struct list_head list;
+	struct delayed_work work;
+	struct workqueue_struct *workqueue;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_exp_fn_data exp_data;
+
+static struct synaptics_dsx_button_map *vir_button_map;
+
+static struct device_attribute attrs[] = {
+	__ATTR(reset, 0220,
+			synaptics_rmi4_show_error,
+			synaptics_rmi4_f01_reset_store),
+	__ATTR(productinfo, 0444,
+			synaptics_rmi4_f01_productinfo_show,
+			synaptics_rmi4_store_error),
+	__ATTR(buildid, 0444,
+			synaptics_rmi4_f01_buildid_show,
+			synaptics_rmi4_store_error),
+	__ATTR(flashprog, 0444,
+			synaptics_rmi4_f01_flashprog_show,
+			synaptics_rmi4_store_error),
+	__ATTR(0dbutton, 0664,
+			synaptics_rmi4_0dbutton_show,
+			synaptics_rmi4_0dbutton_store),
+	__ATTR(suspend, 0220,
+			synaptics_rmi4_show_error,
+			synaptics_rmi4_suspend_store),
+	__ATTR(wake_gesture, 0664,
+			synaptics_rmi4_wake_gesture_show,
+			synaptics_rmi4_wake_gesture_store),
+};
+
+static struct kobj_attribute virtual_key_map_attr = {
+	.attr = {
+		.name = VIRTUAL_KEY_MAP_FILE_NAME,
+		.mode = S_IRUGO,
+	},
+	.show = synaptics_rmi4_virtual_key_map_show,
+};
+
+static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int reset;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (sscanf(buf, "%u", &reset) != 1)
+		return -EINVAL;
+
+	if (reset != 1)
+		return -EINVAL;
+
+	retval = synaptics_rmi4_reset_device(rmi4_data, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command, error = %d\n",
+				__func__, retval);
+		return retval;
+	}
+
+	return count;
+}
+
+static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "0x%02x 0x%02x\n",
+			(rmi4_data->rmi4_mod_info.product_info[0]),
+			(rmi4_data->rmi4_mod_info.product_info[1]));
+}
+
+static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			rmi4_data->firmware_id);
+}
+
+static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	struct synaptics_rmi4_f01_device_status device_status;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_data_base_addr,
+			device_status.data,
+			sizeof(device_status.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read device status, error = %d\n",
+				__func__, retval);
+		return retval;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			device_status.flash_prog);
+}
+
+static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			rmi4_data->button_0d_enabled);
+}
+
+static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	unsigned char ii;
+	unsigned char intr_enable;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (sscanf(buf, "%u", &input) != 1)
+		return -EINVAL;
+
+	input = input > 0 ? 1 : 0;
+
+	if (rmi4_data->button_0d_enabled == input)
+		return count;
+
+	if (list_empty(&rmi->support_fn_list))
+		return -ENODEV;
+
+	list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+		if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) {
+			ii = fhandler->intr_reg_num;
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr + 1 + ii,
+					&intr_enable,
+					sizeof(intr_enable));
+			if (retval < 0)
+				return retval;
+
+			if (input == 1)
+				intr_enable |= fhandler->intr_mask;
+			else
+				intr_enable &= ~fhandler->intr_mask;
+
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr + 1 + ii,
+					&intr_enable,
+					sizeof(intr_enable));
+			if (retval < 0)
+				return retval;
+		}
+	}
+
+	rmi4_data->button_0d_enabled = input;
+
+	return count;
+}
+
+static ssize_t synaptics_rmi4_suspend_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (sscanf(buf, "%u", &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		synaptics_rmi4_suspend(dev);
+	else if (input == 0)
+		synaptics_rmi4_resume(dev);
+	else
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t synaptics_rmi4_wake_gesture_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			rmi4_data->enable_wakeup_gesture);
+}
+
+static ssize_t synaptics_rmi4_wake_gesture_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (sscanf(buf, "%u", &input) != 1)
+		return -EINVAL;
+
+	input = input > 0 ? 1 : 0;
+
+	if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture)
+		rmi4_data->enable_wakeup_gesture = input;
+
+	return count;
+}
+
+static ssize_t synaptics_rmi4_virtual_key_map_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	int ii;
+	int cnt;
+	int count = 0;
+
+	for (ii = 0; ii < vir_button_map->nbuttons; ii++) {
+		cnt = snprintf(buf, PAGE_SIZE - count, "0x01:%d:%d:%d:%d:%d\n",
+				vir_button_map->map[ii * 5 + 0],
+				vir_button_map->map[ii * 5 + 1],
+				vir_button_map->map[ii * 5 + 2],
+				vir_button_map->map[ii * 5 + 3],
+				vir_button_map->map[ii * 5 + 4]);
+		buf += cnt;
+		count += cnt;
+	}
+
+	return count;
+}
+
+static int synaptics_rmi4_f11_abs_report(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	unsigned char touch_count = 0; /* number of touch points */
+	unsigned char reg_index;
+	unsigned char finger;
+	unsigned char fingers_supported;
+	unsigned char num_of_finger_status_regs;
+	unsigned char finger_shift;
+	unsigned char finger_status;
+	unsigned char finger_status_reg[3];
+	unsigned char detected_gestures;
+	unsigned short data_addr;
+	unsigned short data_offset;
+	int x;
+	int y;
+	int wx;
+	int wy;
+	int temp;
+	struct synaptics_rmi4_f11_data_1_5 data;
+	struct synaptics_rmi4_f11_extra_data *extra_data;
+
+	/*
+	 * The number of finger status registers is determined by the
+	 * maximum number of fingers supported - 2 bits per finger. So
+	 * the number of finger status registers to read is:
+	 * register_count = ceil(max_num_of_fingers / 4)
+	 */
+	fingers_supported = fhandler->num_of_data_points;
+	num_of_finger_status_regs = (fingers_supported + 3) / 4;
+	data_addr = fhandler->full_addr.data_base;
+
+	extra_data = (struct synaptics_rmi4_f11_extra_data *)fhandler->extra;
+
+	if (rmi4_data->suspend && rmi4_data->enable_wakeup_gesture) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_addr + extra_data->data38_offset,
+				&detected_gestures,
+				sizeof(detected_gestures));
+		if (retval < 0)
+			return 0;
+
+		if (detected_gestures) {
+			input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 1);
+			input_sync(rmi4_data->input_dev);
+			input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 0);
+			input_sync(rmi4_data->input_dev);
+			rmi4_data->suspend = false;
+		}
+
+		return 0;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_addr,
+			finger_status_reg,
+			num_of_finger_status_regs);
+	if (retval < 0)
+		return 0;
+
+	mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+	for (finger = 0; finger < fingers_supported; finger++) {
+		reg_index = finger / 4;
+		finger_shift = (finger % 4) * 2;
+		finger_status = (finger_status_reg[reg_index] >> finger_shift)
+				& MASK_2BIT;
+
+		/*
+		 * Each 2-bit finger status field represents the following:
+		 * 00 = finger not present
+		 * 01 = finger present and data accurate
+		 * 10 = finger present but data may be inaccurate
+		 * 11 = reserved
+		 */
+#ifdef TYPE_B_PROTOCOL
+		input_mt_slot(rmi4_data->input_dev, finger);
+		input_mt_report_slot_state(rmi4_data->input_dev,
+				MT_TOOL_FINGER, finger_status);
+#endif
+
+		if (finger_status) {
+			data_offset = data_addr +
+					num_of_finger_status_regs +
+					(finger * sizeof(data.data));
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					data_offset,
+					data.data,
+					sizeof(data.data));
+			if (retval < 0) {
+				touch_count = 0;
+				goto exit;
+			}
+
+			x = (data.x_position_11_4 << 4) | data.x_position_3_0;
+			y = (data.y_position_11_4 << 4) | data.y_position_3_0;
+			wx = data.wx;
+			wy = data.wy;
+
+			if (rmi4_data->hw_if->board_data->swap_axes) {
+				temp = x;
+				x = y;
+				y = temp;
+				temp = wx;
+				wx = wy;
+				wy = temp;
+			}
+
+			if (rmi4_data->hw_if->board_data->x_flip)
+				x = rmi4_data->sensor_max_x - x;
+			if (rmi4_data->hw_if->board_data->y_flip)
+				y = rmi4_data->sensor_max_y - y;
+
+			input_report_key(rmi4_data->input_dev,
+					BTN_TOUCH, 1);
+			input_report_key(rmi4_data->input_dev,
+					BTN_TOOL_FINGER, 1);
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_POSITION_X, x);
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_POSITION_Y, y);
+#ifdef REPORT_2D_W
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_TOUCH_MAJOR, max(wx, wy));
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_TOUCH_MINOR, min(wx, wy));
+#endif
+#ifndef TYPE_B_PROTOCOL
+			input_mt_sync(rmi4_data->input_dev);
+#endif
+
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Finger %d: status = 0x%02x, x = %d, y = %d, wx = %d, wy = %d\n",
+					__func__, finger,
+					finger_status,
+					x, y, wx, wy);
+
+			touch_count++;
+		}
+	}
+
+	if (touch_count == 0) {
+		input_report_key(rmi4_data->input_dev,
+				BTN_TOUCH, 0);
+		input_report_key(rmi4_data->input_dev,
+				BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+		input_mt_sync(rmi4_data->input_dev);
+#endif
+	}
+
+	input_sync(rmi4_data->input_dev);
+
+exit:
+	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+	return touch_count;
+}
+
+static int synaptics_rmi4_f12_abs_report(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	unsigned char touch_count = 0; /* number of touch points */
+	unsigned char index;
+	unsigned char finger;
+	unsigned char fingers_to_process;
+	unsigned char finger_status;
+	unsigned char size_of_2d_data;
+	unsigned char gesture_type;
+	unsigned short data_addr;
+	int x;
+	int y;
+	int wx;
+	int wy;
+	int temp;
+#ifdef REPORT_2D_PRESSURE
+	int pressure;
+#endif
+	struct synaptics_rmi4_f12_extra_data *extra_data;
+	struct synaptics_rmi4_f12_finger_data *data;
+	struct synaptics_rmi4_f12_finger_data *finger_data;
+	static unsigned char finger_presence;
+	static unsigned char stylus_presence;
+#ifdef F12_DATA_15_WORKAROUND
+	static unsigned char objects_already_present;
+#endif
+
+	fingers_to_process = fhandler->num_of_data_points;
+	data_addr = fhandler->full_addr.data_base;
+	extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+	size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
+
+	if (rmi4_data->suspend && rmi4_data->enable_wakeup_gesture) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_addr + extra_data->data4_offset,
+				rmi4_data->gesture_detection,
+				sizeof(rmi4_data->gesture_detection));
+		if (retval < 0)
+			return 0;
+
+		gesture_type = rmi4_data->gesture_detection[0];
+
+		if (gesture_type && gesture_type != F12_UDG_DETECT) {
+			input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 1);
+			input_sync(rmi4_data->input_dev);
+			input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 0);
+			input_sync(rmi4_data->input_dev);
+			rmi4_data->suspend = false;
+		}
+
+		return 0;
+	}
+
+	/* Determine the total number of fingers to process */
+	if (extra_data->data15_size) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_addr + extra_data->data15_offset,
+				extra_data->data15_data,
+				extra_data->data15_size);
+		if (retval < 0)
+			return 0;
+
+		/* Start checking from the highest bit */
+		index = extra_data->data15_size - 1; /* Highest byte */
+		finger = (fingers_to_process - 1) % 8; /* Highest bit */
+		do {
+			if (extra_data->data15_data[index] & (1 << finger))
+				break;
+
+			if (finger) {
+				finger--;
+			} else if (index > 0) {
+				index--; /* Move to the next lower byte */
+				finger = 7;
+			}
+
+			fingers_to_process--;
+		} while (fingers_to_process);
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Number of fingers to process = %d\n",
+			__func__, fingers_to_process);
+	}
+
+#ifdef F12_DATA_15_WORKAROUND
+	fingers_to_process = max(fingers_to_process, objects_already_present);
+#endif
+
+	if (!fingers_to_process) {
+		synaptics_rmi4_free_fingers(rmi4_data);
+		finger_presence = 0;
+		stylus_presence = 0;
+		return 0;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_addr + extra_data->data1_offset,
+			(unsigned char *)fhandler->data,
+			fingers_to_process * size_of_2d_data);
+	if (retval < 0)
+		return 0;
+
+	data = (struct synaptics_rmi4_f12_finger_data *)fhandler->data;
+
+#ifdef REPORT_2D_PRESSURE
+	if (rmi4_data->report_pressure) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_addr + extra_data->data23_offset,
+				extra_data->data23_data,
+				fingers_to_process);
+		if (retval < 0)
+			return 0;
+	}
+#endif
+
+	mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+	for (finger = 0; finger < fingers_to_process; finger++) {
+		finger_data = data + finger;
+		finger_status = finger_data->object_type_and_status;
+
+#ifdef F12_DATA_15_WORKAROUND
+		objects_already_present = finger + 1;
+#endif
+
+		x = (finger_data->x_msb << 8) | (finger_data->x_lsb);
+		y = (finger_data->y_msb << 8) | (finger_data->y_lsb);
+#ifdef REPORT_2D_W
+		wx = finger_data->wx;
+		wy = finger_data->wy;
+#endif
+
+		if (rmi4_data->hw_if->board_data->swap_axes) {
+			temp = x;
+			x = y;
+			y = temp;
+			temp = wx;
+			wx = wy;
+			wy = temp;
+		}
+
+		if (rmi4_data->hw_if->board_data->x_flip)
+			x = rmi4_data->sensor_max_x - x;
+		if (rmi4_data->hw_if->board_data->y_flip)
+			y = rmi4_data->sensor_max_y - y;
+
+		switch (finger_status) {
+		case F12_FINGER_STATUS:
+		case F12_GLOVED_FINGER_STATUS:
+			/* Stylus has priority over fingers */
+			if (stylus_presence)
+				break;
+#ifdef TYPE_B_PROTOCOL
+			input_mt_slot(rmi4_data->input_dev, finger);
+			input_mt_report_slot_state(rmi4_data->input_dev,
+					MT_TOOL_FINGER, 1);
+#endif
+
+			input_report_key(rmi4_data->input_dev,
+					BTN_TOUCH, 1);
+			input_report_key(rmi4_data->input_dev,
+					BTN_TOOL_FINGER, 1);
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_POSITION_X, x);
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_POSITION_Y, y);
+#ifdef REPORT_2D_W
+			if (rmi4_data->wedge_sensor) {
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_TOUCH_MAJOR, wx);
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_TOUCH_MINOR, wx);
+			} else {
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_TOUCH_MAJOR,
+						max(wx, wy));
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_TOUCH_MINOR,
+						min(wx, wy));
+			}
+#endif
+#ifdef REPORT_2D_PRESSURE
+			if (rmi4_data->report_pressure) {
+				pressure = extra_data->data23_data[finger];
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_PRESSURE, pressure);
+			}
+#endif
+#ifndef TYPE_B_PROTOCOL
+			input_mt_sync(rmi4_data->input_dev);
+#endif
+			input_sync(rmi4_data->input_dev);
+
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Finger %d: status = 0x%02x, x = %d, y = %d, wx = %d, wy = %d\n",
+					__func__, finger,
+					finger_status,
+					x, y, wx, wy);
+
+			finger_presence = 1;
+			touch_count++;
+			break;
+		case F12_PALM_STATUS:
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Finger %d: x = %d, y = %d, wx = %d, wy = %d\n",
+					__func__, finger,
+					x, y, wx, wy);
+			break;
+		case F12_STYLUS_STATUS:
+		case F12_ERASER_STATUS:
+			if (!rmi4_data->stylus_enable)
+				break;
+			/* Stylus has priority over fingers */
+			if (finger_presence) {
+				mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+				synaptics_rmi4_free_fingers(rmi4_data);
+				mutex_lock(&(rmi4_data->rmi4_report_mutex));
+				finger_presence = 0;
+			}
+			if (stylus_presence) {/* Allow one stylus at a timee */
+				if (finger + 1 != stylus_presence)
+					break;
+			}
+			input_report_key(rmi4_data->stylus_dev,
+					BTN_TOUCH, 1);
+			if (finger_status == F12_STYLUS_STATUS) {
+				input_report_key(rmi4_data->stylus_dev,
+						BTN_TOOL_PEN, 1);
+			} else {
+				input_report_key(rmi4_data->stylus_dev,
+						BTN_TOOL_RUBBER, 1);
+			}
+			input_report_abs(rmi4_data->stylus_dev,
+					ABS_X, x);
+			input_report_abs(rmi4_data->stylus_dev,
+					ABS_Y, y);
+			input_sync(rmi4_data->stylus_dev);
+
+			stylus_presence = finger + 1;
+			touch_count++;
+			break;
+		default:
+#ifdef TYPE_B_PROTOCOL
+			input_mt_slot(rmi4_data->input_dev, finger);
+			input_mt_report_slot_state(rmi4_data->input_dev,
+					MT_TOOL_FINGER, 0);
+#endif
+			break;
+		}
+	}
+
+	if (touch_count == 0) {
+		finger_presence = 0;
+#ifdef F12_DATA_15_WORKAROUND
+		objects_already_present = 0;
+#endif
+		input_report_key(rmi4_data->input_dev,
+				BTN_TOUCH, 0);
+		input_report_key(rmi4_data->input_dev,
+				BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+		input_mt_sync(rmi4_data->input_dev);
+#endif
+		input_sync(rmi4_data->input_dev);
+
+		if (rmi4_data->stylus_enable) {
+			stylus_presence = 0;
+			input_report_key(rmi4_data->stylus_dev,
+					BTN_TOUCH, 0);
+			input_report_key(rmi4_data->stylus_dev,
+					BTN_TOOL_PEN, 0);
+			if (rmi4_data->eraser_enable) {
+				input_report_key(rmi4_data->stylus_dev,
+						BTN_TOOL_RUBBER, 0);
+			}
+			input_sync(rmi4_data->stylus_dev);
+		}
+	}
+
+	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+	return touch_count;
+}
+
+static void synaptics_rmi4_f1a_report(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	unsigned char touch_count = 0;
+	unsigned char button;
+	unsigned char index;
+	unsigned char shift;
+	unsigned char status;
+	unsigned char *data;
+	unsigned short data_addr = fhandler->full_addr.data_base;
+	struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+	static unsigned char do_once = 1;
+	static bool current_status[MAX_NUMBER_OF_BUTTONS];
+#ifdef NO_0D_WHILE_2D
+	static bool before_2d_status[MAX_NUMBER_OF_BUTTONS];
+	static bool while_2d_status[MAX_NUMBER_OF_BUTTONS];
+#endif
+
+	if (do_once) {
+		memset(current_status, 0, sizeof(current_status));
+#ifdef NO_0D_WHILE_2D
+		memset(before_2d_status, 0, sizeof(before_2d_status));
+		memset(while_2d_status, 0, sizeof(while_2d_status));
+#endif
+		do_once = 0;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_addr,
+			f1a->button_data_buffer,
+			f1a->button_bitmask_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read button data registers\n",
+				__func__);
+		return;
+	}
+
+	data = f1a->button_data_buffer;
+
+	mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+	for (button = 0; button < f1a->valid_button_count; button++) {
+		index = button / 8;
+		shift = button % 8;
+		status = ((data[index] >> shift) & MASK_1BIT);
+
+		if (current_status[button] == status)
+			continue;
+		else
+			current_status[button] = status;
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Button %d (code %d) ->%d\n",
+				__func__, button,
+				f1a->button_map[button],
+				status);
+#ifdef NO_0D_WHILE_2D
+		if (rmi4_data->fingers_on_2d == false) {
+			if (status == 1) {
+				before_2d_status[button] = 1;
+			} else {
+				if (while_2d_status[button] == 1) {
+					while_2d_status[button] = 0;
+					continue;
+				} else {
+					before_2d_status[button] = 0;
+				}
+			}
+			touch_count++;
+			input_report_key(rmi4_data->input_dev,
+					f1a->button_map[button],
+					status);
+		} else {
+			if (before_2d_status[button] == 1) {
+				before_2d_status[button] = 0;
+				touch_count++;
+				input_report_key(rmi4_data->input_dev,
+						f1a->button_map[button],
+						status);
+			} else {
+				if (status == 1)
+					while_2d_status[button] = 1;
+				else
+					while_2d_status[button] = 0;
+			}
+		}
+#else
+		touch_count++;
+		input_report_key(rmi4_data->input_dev,
+				f1a->button_map[button],
+				status);
+#endif
+	}
+
+	if (touch_count)
+		input_sync(rmi4_data->input_dev);
+
+	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+	return;
+}
+
+static void synaptics_rmi4_report_touch(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	unsigned char touch_count_2d;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Function %02x reporting\n",
+			__func__, fhandler->fn_number);
+
+	switch (fhandler->fn_number) {
+	case SYNAPTICS_RMI4_F11:
+		touch_count_2d = synaptics_rmi4_f11_abs_report(rmi4_data,
+				fhandler);
+
+		if (touch_count_2d)
+			rmi4_data->fingers_on_2d = true;
+		else
+			rmi4_data->fingers_on_2d = false;
+		break;
+	case SYNAPTICS_RMI4_F12:
+		touch_count_2d = synaptics_rmi4_f12_abs_report(rmi4_data,
+				fhandler);
+
+		if (touch_count_2d)
+			rmi4_data->fingers_on_2d = true;
+		else
+			rmi4_data->fingers_on_2d = false;
+		break;
+	case SYNAPTICS_RMI4_F1A:
+		synaptics_rmi4_f1a_report(rmi4_data, fhandler);
+		break;
+	default:
+		break;
+	}
+
+	return;
+}
+
+static void synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
+		bool report)
+{
+	int retval;
+	unsigned char data[MAX_INTR_REGISTERS + 1];
+	unsigned char *intr = &data[1];
+	bool was_in_bl_mode;
+	struct synaptics_rmi4_f01_device_status status;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (rmi4_data->stay_awake) {
+		msleep(30);
+		return;
+	}
+
+	/*
+	 * Get interrupt status information from F01 Data1 register to
+	 * determine the source(s) that are flagging the interrupt.
+	 */
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_data_base_addr,
+			data,
+			rmi4_data->num_of_intr_regs + 1);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read interrupt status\n",
+				__func__);
+		return;
+	}
+
+	status.data[0] = data[0];
+	if (status.status_code == STATUS_CRC_IN_PROGRESS) {
+		retval = synaptics_rmi4_check_status(rmi4_data,
+				&was_in_bl_mode);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to check status\n",
+					__func__);
+			return;
+		}
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_data_base_addr,
+				status.data,
+				sizeof(status.data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read device status\n",
+					__func__);
+			return;
+		}
+	}
+	if (status.unconfigured && !status.flash_prog) {
+		pr_notice("%s: spontaneous reset detected\n", __func__);
+		retval = synaptics_rmi4_reinit_device(rmi4_data);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to reinit device\n",
+					__func__);
+		}
+	}
+
+	if (!report)
+		return;
+
+	/*
+	 * Traverse the function handler list and service the source(s)
+	 * of the interrupt accordingly.
+	 */
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->num_of_data_sources) {
+				if (fhandler->intr_mask &
+						intr[fhandler->intr_reg_num]) {
+					synaptics_rmi4_report_touch(rmi4_data,
+							fhandler);
+				}
+			}
+		}
+	}
+
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link) {
+			if (!exp_fhandler->insert &&
+					!exp_fhandler->remove &&
+					(exp_fhandler->exp_fn->attn != NULL))
+				exp_fhandler->exp_fn->attn(rmi4_data, intr[0]);
+		}
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	return;
+}
+
+static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
+{
+	struct synaptics_rmi4_data *rmi4_data = data;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (gpio_get_value(bdata->irq_gpio) != bdata->irq_on_state)
+		goto exit;
+
+	synaptics_rmi4_sensor_report(rmi4_data, true);
+
+exit:
+	return IRQ_HANDLED;
+}
+
+static int synaptics_rmi4_int_enable(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval = 0;
+	unsigned char ii;
+	unsigned char zero = 0x00;
+	unsigned char *intr_mask;
+	unsigned short intr_addr;
+
+	intr_mask = rmi4_data->intr_mask;
+
+	for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) {
+		if (intr_mask[ii] != 0x00) {
+			intr_addr = rmi4_data->f01_ctrl_base_addr + 1 + ii;
+			if (enable) {
+				retval = synaptics_rmi4_reg_write(rmi4_data,
+						intr_addr,
+						&(intr_mask[ii]),
+						sizeof(intr_mask[ii]));
+				if (retval < 0)
+					return retval;
+			} else {
+				retval = synaptics_rmi4_reg_write(rmi4_data,
+						intr_addr,
+						&zero,
+						sizeof(zero));
+				if (retval < 0)
+					return retval;
+			}
+		}
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_irq_enable(struct synaptics_rmi4_data *rmi4_data,
+		bool enable, bool attn_only)
+{
+	int retval = 0;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (attn_only) {
+		retval = synaptics_rmi4_int_enable(rmi4_data, enable);
+		return retval;
+	}
+
+	if (enable) {
+		if (rmi4_data->irq_enabled)
+			return retval;
+
+		retval = synaptics_rmi4_int_enable(rmi4_data, false);
+		if (retval < 0)
+			return retval;
+
+		/* Process and clear interrupts */
+		synaptics_rmi4_sensor_report(rmi4_data, false);
+
+		retval = request_threaded_irq(rmi4_data->irq, NULL,
+				synaptics_rmi4_irq, bdata->irq_flags,
+				PLATFORM_DRIVER_NAME, rmi4_data);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create irq thread\n",
+					__func__);
+			return retval;
+		}
+
+		retval = synaptics_rmi4_int_enable(rmi4_data, true);
+		if (retval < 0)
+			return retval;
+
+		rmi4_data->irq_enabled = true;
+	} else {
+		if (rmi4_data->irq_enabled) {
+			disable_irq(rmi4_data->irq);
+			free_irq(rmi4_data->irq, rmi4_data);
+			rmi4_data->irq_enabled = false;
+		}
+	}
+
+	return retval;
+}
+
+static void synaptics_rmi4_set_intr_mask(struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	unsigned char ii;
+	unsigned char intr_offset;
+
+	fhandler->intr_reg_num = (intr_count + 7) / 8;
+	if (fhandler->intr_reg_num != 0)
+		fhandler->intr_reg_num -= 1;
+
+	/* Set an enable bit for each data source */
+	intr_offset = intr_count % 8;
+	fhandler->intr_mask = 0;
+	for (ii = intr_offset;
+			ii < (fd->intr_src_count + intr_offset);
+			ii++)
+		fhandler->intr_mask |= 1 << ii;
+
+	return;
+}
+
+static int synaptics_rmi4_f01_init(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	fhandler->fn_number = fd->fn_number;
+	fhandler->num_of_data_sources = fd->intr_src_count;
+	fhandler->data = NULL;
+	fhandler->extra = NULL;
+
+	synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+	rmi4_data->f01_query_base_addr = fd->query_base_addr;
+	rmi4_data->f01_ctrl_base_addr = fd->ctrl_base_addr;
+	rmi4_data->f01_data_base_addr = fd->data_base_addr;
+	rmi4_data->f01_cmd_base_addr = fd->cmd_base_addr;
+
+	return 0;
+}
+
+static int synaptics_rmi4_f11_init(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	int retval;
+	int temp;
+	unsigned char offset;
+	unsigned char fingers_supported;
+	struct synaptics_rmi4_f11_extra_data *extra_data;
+	struct synaptics_rmi4_f11_query_0_5 query_0_5;
+	struct synaptics_rmi4_f11_query_7_8 query_7_8;
+	struct synaptics_rmi4_f11_query_9 query_9;
+	struct synaptics_rmi4_f11_query_12 query_12;
+	struct synaptics_rmi4_f11_query_27 query_27;
+	struct synaptics_rmi4_f11_ctrl_6_9 control_6_9;
+	const struct synaptics_dsx_board_data *bdata =
+				rmi4_data->hw_if->board_data;
+
+	fhandler->fn_number = fd->fn_number;
+	fhandler->num_of_data_sources = fd->intr_src_count;
+	fhandler->extra = kmalloc(sizeof(*extra_data), GFP_KERNEL);
+	if (!fhandler->extra) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fhandler->extra\n",
+				__func__);
+		return -ENOMEM;
+	}
+	extra_data = (struct synaptics_rmi4_f11_extra_data *)fhandler->extra;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base,
+			query_0_5.data,
+			sizeof(query_0_5.data));
+	if (retval < 0)
+		return retval;
+
+	/* Maximum number of fingers supported */
+	if (query_0_5.num_of_fingers <= 4)
+		fhandler->num_of_data_points = query_0_5.num_of_fingers + 1;
+	else if (query_0_5.num_of_fingers == 5)
+		fhandler->num_of_data_points = 10;
+
+	rmi4_data->num_of_fingers = fhandler->num_of_data_points;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.ctrl_base + 6,
+			control_6_9.data,
+			sizeof(control_6_9.data));
+	if (retval < 0)
+		return retval;
+
+	/* Maximum x and y */
+	rmi4_data->sensor_max_x = control_6_9.sensor_max_x_pos_7_0 |
+			(control_6_9.sensor_max_x_pos_11_8 << 8);
+	rmi4_data->sensor_max_y = control_6_9.sensor_max_y_pos_7_0 |
+			(control_6_9.sensor_max_y_pos_11_8 << 8);
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Function %02x max x = %d max y = %d\n",
+			__func__, fhandler->fn_number,
+			rmi4_data->sensor_max_x,
+			rmi4_data->sensor_max_y);
+
+	rmi4_data->max_touch_width = MAX_F11_TOUCH_WIDTH;
+
+	if (bdata->swap_axes) {
+		temp = rmi4_data->sensor_max_x;
+		rmi4_data->sensor_max_x = rmi4_data->sensor_max_y;
+		rmi4_data->sensor_max_y = temp;
+	}
+
+	synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+	fhandler->data = NULL;
+
+	offset = sizeof(query_0_5.data);
+
+	/* query 6 */
+	if (query_0_5.has_rel)
+		offset += 1;
+
+	/* queries 7 8 */
+	if (query_0_5.has_gestures) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_7_8.data,
+				sizeof(query_7_8.data));
+		if (retval < 0)
+			return retval;
+
+		offset += sizeof(query_7_8.data);
+	}
+
+	/* query 9 */
+	if (query_0_5.has_query_9) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_9.data,
+				sizeof(query_9.data));
+		if (retval < 0)
+			return retval;
+
+		offset += sizeof(query_9.data);
+	}
+
+	/* query 10 */
+	if (query_0_5.has_gestures && query_7_8.has_touch_shapes)
+		offset += 1;
+
+	/* query 11 */
+	if (query_0_5.has_query_11)
+		offset += 1;
+
+	/* query 12 */
+	if (query_0_5.has_query_12) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_12.data,
+				sizeof(query_12.data));
+		if (retval < 0)
+			return retval;
+
+		offset += sizeof(query_12.data);
+	}
+
+	/* query 13 */
+	if (query_0_5.has_jitter_filter)
+		offset += 1;
+
+	/* query 14 */
+	if (query_0_5.has_query_12 && query_12.has_general_information_2)
+		offset += 1;
+
+	/* queries 15 16 17 18 19 20 21 22 23 24 25 26*/
+	if (query_0_5.has_query_12 && query_12.has_physical_properties)
+		offset += 12;
+
+	/* query 27 */
+	if (query_0_5.has_query_27) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_27.data,
+				sizeof(query_27.data));
+		if (retval < 0)
+			return retval;
+
+		rmi4_data->f11_wakeup_gesture = query_27.has_wakeup_gesture;
+	}
+
+	if (!rmi4_data->f11_wakeup_gesture)
+		return retval;
+
+	/* data 0 */
+	fingers_supported = fhandler->num_of_data_points;
+	offset = (fingers_supported + 3) / 4;
+
+	/* data 1 2 3 4 5 */
+	offset += 5 * fingers_supported;
+
+	/* data 6 7 */
+	if (query_0_5.has_rel)
+		offset += 2 * fingers_supported;
+
+	/* data 8 */
+	if (query_0_5.has_gestures && query_7_8.data[0])
+		offset += 1;
+
+	/* data 9 */
+	if (query_0_5.has_gestures && (query_7_8.data[0] || query_7_8.data[1]))
+		offset += 1;
+
+	/* data 10 */
+	if (query_0_5.has_gestures &&
+			(query_7_8.has_pinch || query_7_8.has_flick))
+		offset += 1;
+
+	/* data 11 12 */
+	if (query_0_5.has_gestures &&
+			(query_7_8.has_flick || query_7_8.has_rotate))
+		offset += 2;
+
+	/* data 13 */
+	if (query_0_5.has_gestures && query_7_8.has_touch_shapes)
+		offset += (fingers_supported + 3) / 4;
+
+	/* data 14 15 */
+	if (query_0_5.has_gestures &&
+			(query_7_8.has_scroll_zones ||
+			query_7_8.has_multi_finger_scroll ||
+			query_7_8.has_chiral_scroll))
+		offset += 2;
+
+	/* data 16 17 */
+	if (query_0_5.has_gestures &&
+			(query_7_8.has_scroll_zones &&
+			query_7_8.individual_scroll_zones))
+		offset += 2;
+
+	/* data 18 19 20 21 22 23 24 25 26 27 */
+	if (query_0_5.has_query_9 && query_9.has_contact_geometry)
+		offset += 10 * fingers_supported;
+
+	/* data 28 */
+	if (query_0_5.has_bending_correction ||
+			query_0_5.has_large_object_suppression)
+		offset += 1;
+
+	/* data 29 30 31 */
+	if (query_0_5.has_query_9 && query_9.has_pen_hover_discrimination)
+		offset += 3;
+
+	/* data 32 */
+	if (query_0_5.has_query_12 &&
+			query_12.has_small_object_detection_tuning)
+		offset += 1;
+
+	/* data 33 34 */
+	if (query_0_5.has_query_27 && query_27.f11_query27_b0)
+		offset += 2;
+
+	/* data 35 */
+	if (query_0_5.has_query_12 && query_12.has_8bit_w)
+		offset += fingers_supported;
+
+	/* data 36 */
+	if (query_0_5.has_bending_correction)
+		offset += 1;
+
+	/* data 37 */
+	if (query_0_5.has_query_27 && query_27.has_data_37)
+		offset += 1;
+
+	/* data 38 */
+	if (query_0_5.has_query_27 && query_27.has_wakeup_gesture)
+		extra_data->data38_offset = offset;
+
+	return retval;
+}
+
+static int synaptics_rmi4_f12_set_enables(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short ctrl28)
+{
+	int retval;
+	static unsigned short ctrl_28_address;
+
+	if (ctrl28)
+		ctrl_28_address = ctrl28;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			ctrl_28_address,
+			&rmi4_data->report_enable,
+			sizeof(rmi4_data->report_enable));
+	if (retval < 0)
+		return retval;
+
+	return retval;
+}
+
+static int synaptics_rmi4_f12_ctrl_sub(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_f12_query_5 *query_5,
+		unsigned char ctrlreg, unsigned char subpacket)
+{
+	int retval;
+	unsigned char cnt;
+	unsigned char regnum;
+	unsigned char bitnum;
+	unsigned char q5_index;
+	unsigned char q6_index;
+	unsigned char offset;
+	unsigned char max_ctrlreg;
+	unsigned char *query_6;
+
+	max_ctrlreg = (sizeof(query_5->data) - 1) * 8 - 1;
+
+	if (ctrlreg > max_ctrlreg) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Control register number (%d) over limit\n",
+				__func__, ctrlreg);
+		return -EINVAL;
+	}
+
+	q5_index = ctrlreg / 8 + 1;
+	bitnum = ctrlreg % 8;
+	if ((query_5->data[q5_index] & (1 << bitnum)) == 0x00) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Control %d is not present\n",
+				__func__, ctrlreg);
+		return -EINVAL;
+	}
+
+	query_6 = kmalloc(query_5->size_of_query6, GFP_KERNEL);
+	if (!query_6) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query 6\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + 6,
+			query_6,
+			query_5->size_of_query6);
+	if (retval < 0)
+		goto exit;
+
+	q6_index = 0;
+
+	for (regnum = 0; regnum < ctrlreg; regnum++) {
+		q5_index = regnum / 8 + 1;
+		bitnum = regnum % 8;
+		if ((query_5->data[q5_index] & (1 << bitnum)) == 0x00)
+			continue;
+
+		if (query_6[q6_index] == 0x00)
+			q6_index += 3;
+		else
+			q6_index++;
+
+		while (query_6[q6_index] & ~MASK_7BIT)
+			q6_index++;
+
+		q6_index++;
+	}
+
+	cnt = 0;
+	q6_index++;
+	offset = subpacket / 7;
+	bitnum = subpacket % 7;
+
+	do {
+		if (cnt == offset) {
+			if (query_6[q6_index + cnt] & (1 << bitnum))
+				retval = 1;
+			else
+				retval = 0;
+			goto exit;
+		}
+		cnt++;
+	} while (query_6[q6_index + cnt - 1] & ~MASK_7BIT);
+
+	retval = 0;
+
+exit:
+	kfree(query_6);
+
+	return retval;
+}
+
+static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	int retval = 0;
+	int temp;
+	unsigned char subpacket;
+	unsigned char ctrl_23_size;
+	unsigned char size_of_2d_data;
+	unsigned char size_of_query8;
+	unsigned char ctrl_8_offset;
+	unsigned char ctrl_20_offset;
+	unsigned char ctrl_23_offset;
+	unsigned char ctrl_28_offset;
+	unsigned char ctrl_31_offset;
+	unsigned char num_of_fingers;
+	struct synaptics_rmi4_f12_extra_data *extra_data;
+	struct synaptics_rmi4_f12_query_5 *query_5 = NULL;
+	struct synaptics_rmi4_f12_query_8 *query_8 = NULL;
+	struct synaptics_rmi4_f12_ctrl_8 *ctrl_8 = NULL;
+	struct synaptics_rmi4_f12_ctrl_23 *ctrl_23 = NULL;
+	struct synaptics_rmi4_f12_ctrl_31 *ctrl_31 = NULL;
+	const struct synaptics_dsx_board_data *bdata =
+				rmi4_data->hw_if->board_data;
+
+	fhandler->fn_number = fd->fn_number;
+	fhandler->num_of_data_sources = fd->intr_src_count;
+	fhandler->extra = kmalloc(sizeof(*extra_data), GFP_KERNEL);
+	if (!fhandler->extra) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fhandler->extra\n",
+				__func__);
+		return -ENOMEM;
+	}
+	extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+	size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
+
+	query_5 = kmalloc(sizeof(*query_5), GFP_KERNEL);
+	if (!query_5) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_5\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	query_8 = kmalloc(sizeof(*query_8), GFP_KERNEL);
+	if (!query_8) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_8\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	ctrl_8 = kmalloc(sizeof(*ctrl_8), GFP_KERNEL);
+	if (!ctrl_8) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_8\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	ctrl_23 = kmalloc(sizeof(*ctrl_23), GFP_KERNEL);
+	if (!ctrl_23) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_23\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	ctrl_31 = kmalloc(sizeof(*ctrl_31), GFP_KERNEL);
+	if (!ctrl_31) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_31\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + 5,
+			query_5->data,
+			sizeof(query_5->data));
+	if (retval < 0)
+		goto exit;
+
+	ctrl_8_offset = query_5->ctrl0_is_present +
+			query_5->ctrl1_is_present +
+			query_5->ctrl2_is_present +
+			query_5->ctrl3_is_present +
+			query_5->ctrl4_is_present +
+			query_5->ctrl5_is_present +
+			query_5->ctrl6_is_present +
+			query_5->ctrl7_is_present;
+
+	ctrl_20_offset = ctrl_8_offset +
+			query_5->ctrl8_is_present +
+			query_5->ctrl9_is_present +
+			query_5->ctrl10_is_present +
+			query_5->ctrl11_is_present +
+			query_5->ctrl12_is_present +
+			query_5->ctrl13_is_present +
+			query_5->ctrl14_is_present +
+			query_5->ctrl15_is_present +
+			query_5->ctrl16_is_present +
+			query_5->ctrl17_is_present +
+			query_5->ctrl18_is_present +
+			query_5->ctrl19_is_present;
+
+	ctrl_23_offset = ctrl_20_offset +
+			query_5->ctrl20_is_present +
+			query_5->ctrl21_is_present +
+			query_5->ctrl22_is_present;
+
+	ctrl_28_offset = ctrl_23_offset +
+			query_5->ctrl23_is_present +
+			query_5->ctrl24_is_present +
+			query_5->ctrl25_is_present +
+			query_5->ctrl26_is_present +
+			query_5->ctrl27_is_present;
+
+	ctrl_31_offset = ctrl_28_offset +
+			query_5->ctrl28_is_present +
+			query_5->ctrl29_is_present +
+			query_5->ctrl30_is_present;
+
+	ctrl_23_size = 2;
+	for (subpacket = 2; subpacket <= 4; subpacket++) {
+		retval = synaptics_rmi4_f12_ctrl_sub(rmi4_data,
+				fhandler, query_5, 23, subpacket);
+		if (retval == 1)
+			ctrl_23_size++;
+		else if (retval < 0)
+			goto exit;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.ctrl_base + ctrl_23_offset,
+			ctrl_23->data,
+			ctrl_23_size);
+	if (retval < 0)
+		goto exit;
+
+	/* Maximum number of fingers supported */
+	fhandler->num_of_data_points = min_t(unsigned char,
+			ctrl_23->max_reported_objects,
+			(unsigned char)F12_FINGERS_TO_SUPPORT);
+
+	num_of_fingers = fhandler->num_of_data_points;
+	rmi4_data->num_of_fingers = num_of_fingers;
+
+	rmi4_data->stylus_enable = ctrl_23->stylus_enable;
+	rmi4_data->eraser_enable = ctrl_23->eraser_enable;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + 7,
+			&size_of_query8,
+			sizeof(size_of_query8));
+	if (retval < 0)
+		goto exit;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + 8,
+			query_8->data,
+			size_of_query8);
+	if (retval < 0)
+		goto exit;
+
+	/* Determine the presence of the Data0 register */
+	extra_data->data1_offset = query_8->data0_is_present;
+
+	if ((size_of_query8 >= 3) && (query_8->data15_is_present)) {
+		extra_data->data15_offset = query_8->data0_is_present +
+				query_8->data1_is_present +
+				query_8->data2_is_present +
+				query_8->data3_is_present +
+				query_8->data4_is_present +
+				query_8->data5_is_present +
+				query_8->data6_is_present +
+				query_8->data7_is_present +
+				query_8->data8_is_present +
+				query_8->data9_is_present +
+				query_8->data10_is_present +
+				query_8->data11_is_present +
+				query_8->data12_is_present +
+				query_8->data13_is_present +
+				query_8->data14_is_present;
+		extra_data->data15_size = (num_of_fingers + 7) / 8;
+	} else {
+		extra_data->data15_size = 0;
+	}
+
+#ifdef REPORT_2D_PRESSURE
+	if ((size_of_query8 >= 4) && (query_8->data23_is_present)) {
+		extra_data->data23_offset = query_8->data0_is_present +
+				query_8->data1_is_present +
+				query_8->data2_is_present +
+				query_8->data3_is_present +
+				query_8->data4_is_present +
+				query_8->data5_is_present +
+				query_8->data6_is_present +
+				query_8->data7_is_present +
+				query_8->data8_is_present +
+				query_8->data9_is_present +
+				query_8->data10_is_present +
+				query_8->data11_is_present +
+				query_8->data12_is_present +
+				query_8->data13_is_present +
+				query_8->data14_is_present +
+				query_8->data15_is_present +
+				query_8->data16_is_present +
+				query_8->data17_is_present +
+				query_8->data18_is_present +
+				query_8->data19_is_present +
+				query_8->data20_is_present +
+				query_8->data21_is_present +
+				query_8->data22_is_present;
+		extra_data->data23_size = num_of_fingers;
+		rmi4_data->report_pressure = true;
+	} else {
+		extra_data->data23_size = 0;
+		rmi4_data->report_pressure = false;
+	}
+#endif
+
+	rmi4_data->report_enable = RPT_DEFAULT;
+#ifdef REPORT_2D_Z
+	rmi4_data->report_enable |= RPT_Z;
+#endif
+#ifdef REPORT_2D_W
+	rmi4_data->report_enable |= (RPT_WX | RPT_WY);
+#endif
+
+	retval = synaptics_rmi4_f12_set_enables(rmi4_data,
+			fhandler->full_addr.ctrl_base + ctrl_28_offset);
+	if (retval < 0)
+		goto exit;
+
+	if (query_5->ctrl8_is_present) {
+		rmi4_data->wedge_sensor = false;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.ctrl_base + ctrl_8_offset,
+				ctrl_8->data,
+				sizeof(ctrl_8->data));
+		if (retval < 0)
+			goto exit;
+
+		/* Maximum x and y */
+		rmi4_data->sensor_max_x =
+				((unsigned int)ctrl_8->max_x_coord_lsb << 0) |
+				((unsigned int)ctrl_8->max_x_coord_msb << 8);
+		rmi4_data->sensor_max_y =
+				((unsigned int)ctrl_8->max_y_coord_lsb << 0) |
+				((unsigned int)ctrl_8->max_y_coord_msb << 8);
+
+		rmi4_data->max_touch_width = MAX_F12_TOUCH_WIDTH;
+	} else {
+		rmi4_data->wedge_sensor = true;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.ctrl_base + ctrl_31_offset,
+				ctrl_31->data,
+				sizeof(ctrl_31->data));
+		if (retval < 0)
+			goto exit;
+
+		/* Maximum x and y */
+		rmi4_data->sensor_max_x =
+				((unsigned int)ctrl_31->max_x_coord_lsb << 0) |
+				((unsigned int)ctrl_31->max_x_coord_msb << 8);
+		rmi4_data->sensor_max_y =
+				((unsigned int)ctrl_31->max_y_coord_lsb << 0) |
+				((unsigned int)ctrl_31->max_y_coord_msb << 8);
+
+		rmi4_data->max_touch_width = MAX_F12_TOUCH_WIDTH;
+	}
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Function %02x max x = %d max y = %d\n",
+			__func__, fhandler->fn_number,
+			rmi4_data->sensor_max_x,
+			rmi4_data->sensor_max_y);
+
+	if (bdata->swap_axes) {
+		temp = rmi4_data->sensor_max_x;
+		rmi4_data->sensor_max_x = rmi4_data->sensor_max_y;
+		rmi4_data->sensor_max_y = temp;
+	}
+
+	rmi4_data->f12_wakeup_gesture = query_5->ctrl27_is_present;
+	if (rmi4_data->f12_wakeup_gesture) {
+		extra_data->ctrl20_offset = ctrl_20_offset;
+		extra_data->data4_offset = query_8->data0_is_present +
+				query_8->data1_is_present +
+				query_8->data2_is_present +
+				query_8->data3_is_present;
+	}
+
+	synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+	/* Allocate memory for finger data storage space */
+	fhandler->data_size = num_of_fingers * size_of_2d_data;
+	fhandler->data = kmalloc(fhandler->data_size, GFP_KERNEL);
+	if (!fhandler->data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fhandler->data\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+exit:
+	kfree(query_5);
+	kfree(query_8);
+	kfree(ctrl_8);
+	kfree(ctrl_23);
+	kfree(ctrl_31);
+
+	return retval;
+}
+
+static int synaptics_rmi4_f1a_alloc_mem(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	struct synaptics_rmi4_f1a_handle *f1a;
+
+	f1a = kzalloc(sizeof(*f1a), GFP_KERNEL);
+	if (!f1a) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for function handle\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	fhandler->data = (void *)f1a;
+	fhandler->extra = NULL;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base,
+			f1a->button_query.data,
+			sizeof(f1a->button_query.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read query registers\n",
+				__func__);
+		return retval;
+	}
+
+	f1a->max_count = f1a->button_query.max_button_count + 1;
+
+	f1a->button_control.txrx_map = kzalloc(f1a->max_count * 2, GFP_KERNEL);
+	if (!f1a->button_control.txrx_map) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for tx rx mapping\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	f1a->button_bitmask_size = (f1a->max_count + 7) / 8;
+
+	f1a->button_data_buffer = kcalloc(f1a->button_bitmask_size,
+			sizeof(*(f1a->button_data_buffer)), GFP_KERNEL);
+	if (!f1a->button_data_buffer) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for data buffer\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	f1a->button_map = kcalloc(f1a->max_count,
+			sizeof(*(f1a->button_map)), GFP_KERNEL);
+	if (!f1a->button_map) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for button map\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_f1a_button_map(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char offset = 0;
+	struct synaptics_rmi4_f1a_query_4 query_4;
+	struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	offset = f1a->button_query.has_general_control +
+			f1a->button_query.has_interrupt_enable +
+			f1a->button_query.has_multibutton_select;
+
+	if (f1a->button_query.has_tx_rx_map) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.ctrl_base + offset,
+				f1a->button_control.txrx_map,
+				f1a->max_count * 2);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read tx rx mapping\n",
+					__func__);
+			return retval;
+		}
+
+		rmi4_data->button_txrx_mapping = f1a->button_control.txrx_map;
+	}
+
+	if (f1a->button_query.has_query4) {
+		offset = 2 + f1a->button_query.has_query2 +
+				f1a->button_query.has_query3;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_4.data,
+				sizeof(query_4.data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read button features 4\n",
+					__func__);
+			return retval;
+		}
+
+		if (query_4.has_ctrl24)
+			rmi4_data->external_afe_buttons = true;
+		else
+			rmi4_data->external_afe_buttons = false;
+	}
+
+	if (!bdata->cap_button_map) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: cap_button_map is NULL in board file\n",
+				__func__);
+		return -ENODEV;
+	} else if (!bdata->cap_button_map->map) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Button map is missing in board file\n",
+				__func__);
+		return -ENODEV;
+	} else {
+		if (bdata->cap_button_map->nbuttons != f1a->max_count) {
+			f1a->valid_button_count = min(f1a->max_count,
+					bdata->cap_button_map->nbuttons);
+		} else {
+			f1a->valid_button_count = f1a->max_count;
+		}
+
+		for (ii = 0; ii < f1a->valid_button_count; ii++)
+			f1a->button_map[ii] = bdata->cap_button_map->map[ii];
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_f1a_kfree(struct synaptics_rmi4_fn *fhandler)
+{
+	struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+
+	if (f1a) {
+		kfree(f1a->button_control.txrx_map);
+		kfree(f1a->button_data_buffer);
+		kfree(f1a->button_map);
+		kfree(f1a);
+		fhandler->data = NULL;
+	}
+
+	return;
+}
+
+static int synaptics_rmi4_f1a_init(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	int retval;
+
+	fhandler->fn_number = fd->fn_number;
+	fhandler->num_of_data_sources = fd->intr_src_count;
+
+	synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+	retval = synaptics_rmi4_f1a_alloc_mem(rmi4_data, fhandler);
+	if (retval < 0)
+		goto error_exit;
+
+	retval = synaptics_rmi4_f1a_button_map(rmi4_data, fhandler);
+	if (retval < 0)
+		goto error_exit;
+
+	rmi4_data->button_0d_enabled = 1;
+
+	return 0;
+
+error_exit:
+	synaptics_rmi4_f1a_kfree(fhandler);
+
+	return retval;
+}
+
+static void synaptics_rmi4_empty_fn_list(struct synaptics_rmi4_data *rmi4_data)
+{
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_fn *fhandler_temp;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry_safe(fhandler,
+				fhandler_temp,
+				&rmi->support_fn_list,
+				link) {
+			if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) {
+				synaptics_rmi4_f1a_kfree(fhandler);
+			} else {
+				kfree(fhandler->extra);
+				kfree(fhandler->data);
+			}
+			list_del(&fhandler->link);
+			kfree(fhandler);
+		}
+	}
+	INIT_LIST_HEAD(&rmi->support_fn_list);
+
+	return;
+}
+
+static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
+		bool *was_in_bl_mode)
+{
+	int retval;
+	int timeout = CHECK_STATUS_TIMEOUT_MS;
+	struct synaptics_rmi4_f01_device_status status;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_data_base_addr,
+			status.data,
+			sizeof(status.data));
+	if (retval < 0)
+		return retval;
+
+	while (status.status_code == STATUS_CRC_IN_PROGRESS) {
+		if (timeout > 0)
+			msleep(20);
+		else
+			return -EINVAL;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_data_base_addr,
+				status.data,
+				sizeof(status.data));
+		if (retval < 0)
+			return retval;
+
+		timeout -= 20;
+	}
+
+	if (timeout != CHECK_STATUS_TIMEOUT_MS)
+		*was_in_bl_mode = true;
+
+	if (status.flash_prog == 1) {
+		rmi4_data->flash_prog_mode = true;
+		pr_notice("%s: In flash prog mode, status = 0x%02x\n",
+				__func__,
+				status.status_code);
+	} else {
+		rmi4_data->flash_prog_mode = false;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_set_configured(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char device_ctrl;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set configured\n",
+				__func__);
+		return;
+	}
+
+	rmi4_data->no_sleep_setting = device_ctrl & NO_SLEEP_ON;
+	device_ctrl |= CONFIGURED;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set configured\n",
+				__func__);
+	}
+
+	return;
+}
+
+static int synaptics_rmi4_alloc_fh(struct synaptics_rmi4_fn **fhandler,
+		struct synaptics_rmi4_fn_desc *rmi_fd, int page_number)
+{
+	*fhandler = kmalloc(sizeof(**fhandler), GFP_KERNEL);
+	if (!(*fhandler))
+		return -ENOMEM;
+
+	(*fhandler)->full_addr.data_base =
+			(rmi_fd->data_base_addr |
+			(page_number << 8));
+	(*fhandler)->full_addr.ctrl_base =
+			(rmi_fd->ctrl_base_addr |
+			(page_number << 8));
+	(*fhandler)->full_addr.cmd_base =
+			(rmi_fd->cmd_base_addr |
+			(page_number << 8));
+	(*fhandler)->full_addr.query_base =
+			(rmi_fd->query_base_addr |
+			(page_number << 8));
+
+	return 0;
+}
+
+static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char page_number;
+	unsigned char intr_count;
+	unsigned char *f01_query;
+	unsigned short pdt_entry_addr;
+	bool f01found;
+	bool f35found;
+	bool was_in_bl_mode;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+rescan_pdt:
+	f01found = false;
+	f35found = false;
+	was_in_bl_mode = false;
+	intr_count = 0;
+	INIT_LIST_HEAD(&rmi->support_fn_list);
+
+	/* Scan the page description tables of the pages to service */
+	for (page_number = 0; page_number < PAGES_TO_SERVICE; page_number++) {
+		for (pdt_entry_addr = PDT_START; pdt_entry_addr > PDT_END;
+				pdt_entry_addr -= PDT_ENTRY_SIZE) {
+			pdt_entry_addr |= (page_number << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					pdt_entry_addr,
+					(unsigned char *)&rmi_fd,
+					sizeof(rmi_fd));
+			if (retval < 0)
+				return retval;
+
+			pdt_entry_addr &= ~(MASK_8BIT << 8);
+
+			fhandler = NULL;
+
+			if (rmi_fd.fn_number == 0) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Reached end of PDT\n",
+						__func__);
+				break;
+			}
+
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: F%02x found (page %d)\n",
+					__func__, rmi_fd.fn_number,
+					page_number);
+
+			switch (rmi_fd.fn_number) {
+			case SYNAPTICS_RMI4_F01:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				f01found = true;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				retval = synaptics_rmi4_f01_init(rmi4_data,
+						fhandler, &rmi_fd, intr_count);
+				if (retval < 0)
+					return retval;
+
+				retval = synaptics_rmi4_check_status(rmi4_data,
+						&was_in_bl_mode);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to check status\n",
+							__func__);
+					return retval;
+				}
+
+				if (was_in_bl_mode) {
+					kfree(fhandler);
+					fhandler = NULL;
+					goto rescan_pdt;
+				}
+
+				if (rmi4_data->flash_prog_mode)
+					goto flash_prog_mode;
+
+				break;
+			case SYNAPTICS_RMI4_F11:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				retval = synaptics_rmi4_f11_init(rmi4_data,
+						fhandler, &rmi_fd, intr_count);
+				if (retval < 0)
+					return retval;
+				break;
+			case SYNAPTICS_RMI4_F12:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				retval = synaptics_rmi4_f12_init(rmi4_data,
+						fhandler, &rmi_fd, intr_count);
+				if (retval < 0)
+					return retval;
+				break;
+			case SYNAPTICS_RMI4_F1A:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				retval = synaptics_rmi4_f1a_init(rmi4_data,
+						fhandler, &rmi_fd, intr_count);
+				if (retval < 0) {
+#ifdef IGNORE_FN_INIT_FAILURE
+					kfree(fhandler);
+					fhandler = NULL;
+#else
+					return retval;
+#endif
+				}
+				break;
+			case SYNAPTICS_RMI4_F35:
+				f35found = true;
+				break;
+			}
+
+			/* Accumulate the interrupt count */
+			intr_count += rmi_fd.intr_src_count;
+
+			if (fhandler && rmi_fd.intr_src_count) {
+				list_add_tail(&fhandler->link,
+						&rmi->support_fn_list);
+			}
+		}
+	}
+
+	if (!f01found) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to find F01\n",
+				__func__);
+		if (!f35found) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to find F35\n",
+					__func__);
+			return -EINVAL;
+		} else {
+			pr_notice("%s: In microbootloader mode\n",
+					__func__);
+			return 0;
+		}
+	}
+
+flash_prog_mode:
+	rmi4_data->num_of_intr_regs = (intr_count + 7) / 8;
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Number of interrupt registers = %d\n",
+			__func__, rmi4_data->num_of_intr_regs);
+
+	f01_query = kmalloc(F01_STD_QUERY_LEN, GFP_KERNEL);
+	if (!f01_query) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for f01_query\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_query_base_addr,
+			f01_query,
+			F01_STD_QUERY_LEN);
+	if (retval < 0) {
+		kfree(f01_query);
+		return retval;
+	}
+
+	/* RMI Version 4.0 currently supported */
+	rmi->version_major = 4;
+	rmi->version_minor = 0;
+
+	rmi->manufacturer_id = f01_query[0];
+	rmi->product_props = f01_query[1];
+	rmi->product_info[0] = f01_query[2];
+	rmi->product_info[1] = f01_query[3];
+	retval = secure_memcpy(rmi->product_id_string,
+			sizeof(rmi->product_id_string),
+			&f01_query[11],
+			F01_STD_QUERY_LEN - 11,
+			PRODUCT_ID_SIZE);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy product ID string\n",
+				__func__);
+	}
+
+	kfree(f01_query);
+
+	if (rmi->manufacturer_id != 1) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Non-Synaptics device found, manufacturer ID = %d\n",
+				__func__, rmi->manufacturer_id);
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_query_base_addr + F01_BUID_ID_OFFSET,
+			rmi->build_id,
+			sizeof(rmi->build_id));
+	if (retval < 0)
+		return retval;
+
+	rmi4_data->firmware_id = (unsigned int)rmi->build_id[0] +
+			(unsigned int)rmi->build_id[1] * 0x100 +
+			(unsigned int)rmi->build_id[2] * 0x10000;
+
+	memset(rmi4_data->intr_mask, 0x00, sizeof(rmi4_data->intr_mask));
+
+	/*
+	 * Map out the interrupt bit masks for the interrupt sources
+	 * from the registered function handlers.
+	 */
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->num_of_data_sources) {
+				rmi4_data->intr_mask[fhandler->intr_reg_num] |=
+						fhandler->intr_mask;
+			}
+		}
+	}
+
+	if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture)
+		rmi4_data->enable_wakeup_gesture = WAKEUP_GESTURE;
+	else
+		rmi4_data->enable_wakeup_gesture = false;
+
+	synaptics_rmi4_set_configured(rmi4_data);
+
+	return 0;
+}
+
+static int synaptics_rmi4_gpio_setup(int gpio, bool config, int dir, int state)
+{
+	int retval = 0;
+	unsigned char buf[16];
+
+	if (config) {
+		snprintf(buf, PAGE_SIZE, "dsx_gpio_%u\n", gpio);
+
+		retval = gpio_request(gpio, buf);
+		if (retval) {
+			pr_err("%s: Failed to get gpio %d (code: %d)",
+					__func__, gpio, retval);
+			return retval;
+		}
+
+		if (dir == 0)
+			retval = gpio_direction_input(gpio);
+		else
+			retval = gpio_direction_output(gpio, state);
+		if (retval) {
+			pr_err("%s: Failed to set gpio %d direction",
+					__func__, gpio);
+			return retval;
+		}
+	} else {
+		gpio_free(gpio);
+	}
+
+	return retval;
+}
+
+static void synaptics_rmi4_set_params(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char ii;
+	struct synaptics_rmi4_f1a_handle *f1a;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_POSITION_X, 0,
+			rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_POSITION_Y, 0,
+			rmi4_data->sensor_max_y, 0, 0);
+#ifdef REPORT_2D_W
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_TOUCH_MAJOR, 0,
+			rmi4_data->max_touch_width, 0, 0);
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_TOUCH_MINOR, 0,
+			rmi4_data->max_touch_width, 0, 0);
+#endif
+
+#ifdef REPORT_2D_PRESSURE
+	if (rmi4_data->report_pressure) {
+		input_set_abs_params(rmi4_data->input_dev,
+				ABS_MT_PRESSURE, 0,
+				MAX_F12_TOUCH_PRESSURE, 0, 0);
+	}
+#endif
+
+#ifdef TYPE_B_PROTOCOL
+#ifdef KERNEL_ABOVE_3_6
+	input_mt_init_slots(rmi4_data->input_dev,
+			rmi4_data->num_of_fingers, INPUT_MT_DIRECT);
+#else
+	input_mt_init_slots(rmi4_data->input_dev,
+			rmi4_data->num_of_fingers);
+#endif
+#endif
+
+	f1a = NULL;
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
+				f1a = fhandler->data;
+		}
+	}
+
+	if (f1a) {
+		for (ii = 0; ii < f1a->valid_button_count; ii++) {
+			set_bit(f1a->button_map[ii],
+					rmi4_data->input_dev->keybit);
+			input_set_capability(rmi4_data->input_dev,
+					EV_KEY, f1a->button_map[ii]);
+		}
+	}
+
+	if (vir_button_map->nbuttons) {
+		for (ii = 0; ii < vir_button_map->nbuttons; ii++) {
+			set_bit(vir_button_map->map[ii * 5],
+					rmi4_data->input_dev->keybit);
+			input_set_capability(rmi4_data->input_dev,
+					EV_KEY, vir_button_map->map[ii * 5]);
+		}
+	}
+
+	if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture) {
+		set_bit(KEY_WAKEUP, rmi4_data->input_dev->keybit);
+		input_set_capability(rmi4_data->input_dev, EV_KEY, KEY_WAKEUP);
+	}
+
+	return;
+}
+
+static int synaptics_rmi4_set_input_dev(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	const struct synaptics_dsx_board_data *bdata =
+				rmi4_data->hw_if->board_data;
+
+	rmi4_data->input_dev = input_allocate_device();
+	if (rmi4_data->input_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate input device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto err_input_device;
+	}
+
+	retval = synaptics_rmi4_query_device(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to query device\n",
+				__func__);
+		goto err_query_device;
+	}
+
+	rmi4_data->input_dev->name = PLATFORM_DRIVER_NAME;
+	rmi4_data->input_dev->phys = INPUT_PHYS_NAME;
+	rmi4_data->input_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	rmi4_data->input_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	rmi4_data->input_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(rmi4_data->input_dev, rmi4_data);
+
+	set_bit(EV_SYN, rmi4_data->input_dev->evbit);
+	set_bit(EV_KEY, rmi4_data->input_dev->evbit);
+	set_bit(EV_ABS, rmi4_data->input_dev->evbit);
+	set_bit(BTN_TOUCH, rmi4_data->input_dev->keybit);
+	set_bit(BTN_TOOL_FINGER, rmi4_data->input_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, rmi4_data->input_dev->propbit);
+#endif
+
+	if (bdata->max_y_for_2d >= 0)
+		rmi4_data->sensor_max_y = bdata->max_y_for_2d;
+
+	synaptics_rmi4_set_params(rmi4_data);
+
+	retval = input_register_device(rmi4_data->input_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register input device\n",
+				__func__);
+		goto err_register_input;
+	}
+
+	if (!rmi4_data->stylus_enable)
+		return 0;
+
+	rmi4_data->stylus_dev = input_allocate_device();
+	if (rmi4_data->stylus_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate stylus device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto err_stylus_device;
+	}
+
+	rmi4_data->stylus_dev->name = STYLUS_DRIVER_NAME;
+	rmi4_data->stylus_dev->phys = STYLUS_PHYS_NAME;
+	rmi4_data->stylus_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	rmi4_data->stylus_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	rmi4_data->stylus_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(rmi4_data->stylus_dev, rmi4_data);
+
+	set_bit(EV_KEY, rmi4_data->stylus_dev->evbit);
+	set_bit(EV_ABS, rmi4_data->stylus_dev->evbit);
+	set_bit(BTN_TOUCH, rmi4_data->stylus_dev->keybit);
+	set_bit(BTN_TOOL_PEN, rmi4_data->stylus_dev->keybit);
+	if (rmi4_data->eraser_enable)
+		set_bit(BTN_TOOL_RUBBER, rmi4_data->stylus_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, rmi4_data->stylus_dev->propbit);
+#endif
+
+	input_set_abs_params(rmi4_data->stylus_dev, ABS_X, 0,
+			rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(rmi4_data->stylus_dev, ABS_Y, 0,
+			rmi4_data->sensor_max_y, 0, 0);
+
+	retval = input_register_device(rmi4_data->stylus_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register stylus device\n",
+				__func__);
+		goto err_register_stylus;
+	}
+
+	return 0;
+
+err_register_stylus:
+	rmi4_data->stylus_dev = NULL;
+
+err_stylus_device:
+	input_unregister_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+
+err_register_input:
+err_query_device:
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+	input_free_device(rmi4_data->input_dev);
+
+err_input_device:
+	return retval;
+}
+
+static int synaptics_rmi4_set_gpio(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	retval = synaptics_rmi4_gpio_setup(
+			bdata->irq_gpio,
+			true, 0, 0);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to configure attention GPIO\n",
+				__func__);
+		goto err_gpio_irq;
+	}
+
+	if (bdata->power_gpio >= 0) {
+		retval = synaptics_rmi4_gpio_setup(
+				bdata->power_gpio,
+				true, 1, !bdata->power_on_state);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to configure power GPIO\n",
+					__func__);
+			goto err_gpio_power;
+		}
+	}
+
+	if (bdata->reset_gpio >= 0) {
+		retval = synaptics_rmi4_gpio_setup(
+				bdata->reset_gpio,
+				true, 1, !bdata->reset_on_state);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to configure reset GPIO\n",
+					__func__);
+			goto err_gpio_reset;
+		}
+	}
+
+	if (bdata->power_gpio >= 0) {
+		gpio_set_value(bdata->power_gpio, bdata->power_on_state);
+		msleep(bdata->power_delay_ms);
+	}
+
+	if (bdata->reset_gpio >= 0) {
+		gpio_set_value(bdata->reset_gpio, bdata->reset_on_state);
+		msleep(bdata->reset_active_ms);
+		gpio_set_value(bdata->reset_gpio, !bdata->reset_on_state);
+		msleep(bdata->reset_delay_ms);
+	}
+
+	return 0;
+
+err_gpio_reset:
+	if (bdata->power_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+err_gpio_power:
+	synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+err_gpio_irq:
+	return retval;
+}
+
+static int synaptics_rmi4_get_reg(struct synaptics_rmi4_data *rmi4_data,
+		bool get)
+{
+	int retval;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (!get) {
+		retval = 0;
+		goto regulator_put;
+	}
+
+	if ((bdata->pwr_reg_name != NULL) && (*bdata->pwr_reg_name != 0)) {
+		rmi4_data->pwr_reg = regulator_get(rmi4_data->pdev->dev.parent,
+				bdata->pwr_reg_name);
+		if (IS_ERR(rmi4_data->pwr_reg)) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to get power regulator\n",
+					__func__);
+			retval = PTR_ERR(rmi4_data->pwr_reg);
+			goto regulator_put;
+		}
+	}
+
+	if ((bdata->bus_reg_name != NULL) && (*bdata->bus_reg_name != 0)) {
+		rmi4_data->bus_reg = regulator_get(rmi4_data->pdev->dev.parent,
+				bdata->bus_reg_name);
+		if (IS_ERR(rmi4_data->bus_reg)) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to get bus pullup regulator\n",
+					__func__);
+			retval = PTR_ERR(rmi4_data->bus_reg);
+			goto regulator_put;
+		}
+	}
+
+	return 0;
+
+regulator_put:
+	if (rmi4_data->pwr_reg) {
+		regulator_put(rmi4_data->pwr_reg);
+		rmi4_data->pwr_reg = NULL;
+	}
+
+	if (rmi4_data->bus_reg) {
+		regulator_put(rmi4_data->bus_reg);
+		rmi4_data->bus_reg = NULL;
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_enable_reg(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (!enable) {
+		retval = 0;
+		goto disable_pwr_reg;
+	}
+
+	if (rmi4_data->bus_reg) {
+		retval = regulator_enable(rmi4_data->bus_reg);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to enable bus pullup regulator\n",
+					__func__);
+			goto exit;
+		}
+	}
+
+	if (rmi4_data->pwr_reg) {
+		retval = regulator_enable(rmi4_data->pwr_reg);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to enable power regulator\n",
+					__func__);
+			goto disable_bus_reg;
+		}
+		msleep(bdata->power_delay_ms);
+	}
+
+	return 0;
+
+disable_pwr_reg:
+	if (rmi4_data->pwr_reg)
+		regulator_disable(rmi4_data->pwr_reg);
+
+disable_bus_reg:
+	if (rmi4_data->bus_reg)
+		regulator_disable(rmi4_data->bus_reg);
+
+exit:
+	return retval;
+}
+
+static int synaptics_rmi4_free_fingers(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char ii;
+
+	mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+#ifdef TYPE_B_PROTOCOL
+	for (ii = 0; ii < rmi4_data->num_of_fingers; ii++) {
+		input_mt_slot(rmi4_data->input_dev, ii);
+		input_mt_report_slot_state(rmi4_data->input_dev,
+				MT_TOOL_FINGER, 0);
+	}
+#endif
+	input_report_key(rmi4_data->input_dev,
+			BTN_TOUCH, 0);
+	input_report_key(rmi4_data->input_dev,
+			BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+	input_mt_sync(rmi4_data->input_dev);
+#endif
+	input_sync(rmi4_data->input_dev);
+
+	if (rmi4_data->stylus_enable) {
+		input_report_key(rmi4_data->stylus_dev,
+				BTN_TOUCH, 0);
+		input_report_key(rmi4_data->stylus_dev,
+				BTN_TOOL_PEN, 0);
+		if (rmi4_data->eraser_enable) {
+			input_report_key(rmi4_data->stylus_dev,
+					BTN_TOOL_RUBBER, 0);
+		}
+		input_sync(rmi4_data->stylus_dev);
+	}
+
+	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+	rmi4_data->fingers_on_2d = false;
+
+	return 0;
+}
+
+static int synaptics_rmi4_sw_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char command = 0x01;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_cmd_base_addr,
+			&command,
+			sizeof(command));
+	if (retval < 0)
+		return retval;
+
+	msleep(rmi4_data->hw_if->board_data->reset_delay_ms);
+
+	if (rmi4_data->hw_if->ui_hw_init) {
+		retval = rmi4_data->hw_if->ui_hw_init(rmi4_data);
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_rebuild_work(struct work_struct *work)
+{
+	int retval;
+	unsigned char attr_count;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct delayed_work *delayed_work =
+			container_of(work, struct delayed_work, work);
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(delayed_work, struct synaptics_rmi4_data,
+			rb_work);
+
+	mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+	mutex_lock(&exp_data.mutex);
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->remove != NULL)
+				exp_fhandler->exp_fn->remove(rmi4_data);
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	synaptics_rmi4_free_fingers(rmi4_data);
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+	input_unregister_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+	if (rmi4_data->stylus_enable) {
+		input_unregister_device(rmi4_data->stylus_dev);
+		rmi4_data->stylus_dev = NULL;
+	}
+
+	retval = synaptics_rmi4_sw_reset(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = synaptics_rmi4_set_input_dev(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set up input device\n",
+				__func__);
+		goto exit;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			goto exit;
+		}
+	}
+
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->init != NULL)
+				exp_fhandler->exp_fn->init(rmi4_data);
+	}
+
+	retval = 0;
+
+exit:
+	synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+	mutex_unlock(&exp_data.mutex);
+
+	mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+
+	return;
+}
+
+static int synaptics_rmi4_reinit_device(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+	synaptics_rmi4_free_fingers(rmi4_data);
+
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->fn_number == SYNAPTICS_RMI4_F12) {
+				synaptics_rmi4_f12_set_enables(rmi4_data, 0);
+				break;
+			}
+		}
+	}
+
+	retval = synaptics_rmi4_int_enable(rmi4_data, true);
+	if (retval < 0)
+		goto exit;
+
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->reinit != NULL)
+				exp_fhandler->exp_fn->reinit(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	synaptics_rmi4_set_configured(rmi4_data);
+
+	retval = 0;
+
+exit:
+	mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+	return retval;
+}
+
+static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data,
+		bool rebuild)
+{
+	int retval;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+
+	if (rebuild) {
+		queue_delayed_work(rmi4_data->rb_workqueue,
+				&rmi4_data->rb_work,
+				msecs_to_jiffies(REBUILD_WORK_DELAY_MS));
+		return 0;
+	}
+
+	mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+	retval = synaptics_rmi4_sw_reset(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+		goto exit;
+	}
+
+	synaptics_rmi4_free_fingers(rmi4_data);
+
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+
+	retval = synaptics_rmi4_query_device(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to query device\n",
+				__func__);
+		goto exit;
+	}
+
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->reset != NULL)
+				exp_fhandler->exp_fn->reset(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	retval = 0;
+
+exit:
+	synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+	mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+
+	return retval;
+}
+
+#ifdef FB_READY_RESET
+static void synaptics_rmi4_reset_work(struct work_struct *work)
+{
+	int retval;
+	unsigned int timeout;
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(work, struct synaptics_rmi4_data,
+			reset_work);
+
+	timeout = FB_READY_TIMEOUT_S * 1000 / FB_READY_WAIT_MS + 1;
+
+	while (!rmi4_data->fb_ready) {
+		msleep(FB_READY_WAIT_MS);
+		timeout--;
+		if (timeout == 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Timed out waiting for FB ready\n",
+					__func__);
+			return;
+		}
+	}
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	retval = synaptics_rmi4_reset_device(rmi4_data, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+	}
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	return;
+}
+#endif
+
+static void synaptics_rmi4_sleep_enable(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval;
+	unsigned char device_ctrl;
+	unsigned char no_sleep_setting = rmi4_data->no_sleep_setting;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read device control\n",
+				__func__);
+		return;
+	}
+
+	device_ctrl = device_ctrl & ~MASK_3BIT;
+	if (enable)
+		device_ctrl = device_ctrl | NO_SLEEP_OFF | SENSOR_SLEEP;
+	else
+		device_ctrl = device_ctrl | no_sleep_setting | NORMAL_OPERATION;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write device control\n",
+				__func__);
+		return;
+	}
+
+	rmi4_data->sensor_sleep = enable;
+
+	return;
+}
+
+static void synaptics_rmi4_exp_fn_work(struct work_struct *work)
+{
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler_temp;
+	struct synaptics_rmi4_data *rmi4_data = exp_data.rmi4_data;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+	mutex_lock(&rmi4_data->rmi4_reset_mutex);
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry_safe(exp_fhandler,
+				exp_fhandler_temp,
+				&exp_data.list,
+				link) {
+			if ((exp_fhandler->exp_fn->init != NULL) &&
+					exp_fhandler->insert) {
+				exp_fhandler->exp_fn->init(rmi4_data);
+				exp_fhandler->insert = false;
+			} else if ((exp_fhandler->exp_fn->remove != NULL) &&
+					exp_fhandler->remove) {
+				exp_fhandler->exp_fn->remove(rmi4_data);
+				list_del(&exp_fhandler->link);
+				kfree(exp_fhandler);
+			}
+		}
+	}
+	mutex_unlock(&exp_data.mutex);
+	mutex_unlock(&rmi4_data->rmi4_reset_mutex);
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	return;
+}
+
+void synaptics_rmi4_new_function(struct synaptics_rmi4_exp_fn *exp_fn,
+		bool insert)
+{
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+
+	if (!exp_data.initialized) {
+		mutex_init(&exp_data.mutex);
+		INIT_LIST_HEAD(&exp_data.list);
+		exp_data.initialized = true;
+	}
+
+	mutex_lock(&exp_data.mutex);
+	if (insert) {
+		exp_fhandler = kzalloc(sizeof(*exp_fhandler), GFP_KERNEL);
+		if (!exp_fhandler) {
+			pr_err("%s: Failed to alloc mem for expansion function\n",
+					__func__);
+			goto exit;
+		}
+		exp_fhandler->exp_fn = exp_fn;
+		exp_fhandler->insert = true;
+		exp_fhandler->remove = false;
+		list_add_tail(&exp_fhandler->link, &exp_data.list);
+	} else if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link) {
+			if (exp_fhandler->exp_fn->fn_type == exp_fn->fn_type) {
+				exp_fhandler->insert = false;
+				exp_fhandler->remove = true;
+				goto exit;
+			}
+		}
+	}
+
+exit:
+	mutex_unlock(&exp_data.mutex);
+
+	if (exp_data.queue_work) {
+		queue_delayed_work(exp_data.workqueue,
+				&exp_data.work,
+				msecs_to_jiffies(EXP_FN_WORK_DELAY_MS));
+	}
+
+	return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_new_function);
+
+static int synaptics_rmi4_probe(struct platform_device *pdev)
+{
+	int retval;
+	unsigned char attr_count;
+	struct synaptics_rmi4_data *rmi4_data;
+	const struct synaptics_dsx_hw_interface *hw_if;
+	const struct synaptics_dsx_board_data *bdata;
+
+	hw_if = pdev->dev.platform_data;
+	if (!hw_if) {
+		dev_err(&pdev->dev,
+				"%s: No hardware interface found\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	bdata = hw_if->board_data;
+	if (!bdata) {
+		dev_err(&pdev->dev,
+				"%s: No board data found\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	rmi4_data = kzalloc(sizeof(*rmi4_data), GFP_KERNEL);
+	if (!rmi4_data) {
+		dev_err(&pdev->dev,
+				"%s: Failed to alloc mem for rmi4_data\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	rmi4_data->pdev = pdev;
+	rmi4_data->current_page = MASK_8BIT;
+	rmi4_data->hw_if = hw_if;
+	rmi4_data->suspend = false;
+	rmi4_data->irq_enabled = false;
+	rmi4_data->fingers_on_2d = false;
+
+	rmi4_data->reset_device = synaptics_rmi4_reset_device;
+	rmi4_data->irq_enable = synaptics_rmi4_irq_enable;
+	rmi4_data->sleep_enable = synaptics_rmi4_sleep_enable;
+	rmi4_data->report_touch = synaptics_rmi4_report_touch;
+
+	mutex_init(&(rmi4_data->rmi4_reset_mutex));
+	mutex_init(&(rmi4_data->rmi4_report_mutex));
+	mutex_init(&(rmi4_data->rmi4_io_ctrl_mutex));
+	mutex_init(&(rmi4_data->rmi4_exp_init_mutex));
+
+	platform_set_drvdata(pdev, rmi4_data);
+
+	vir_button_map = bdata->vir_button_map;
+
+	retval = synaptics_rmi4_get_reg(rmi4_data, true);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to get regulators\n",
+				__func__);
+		goto err_get_reg;
+	}
+
+	retval = synaptics_rmi4_enable_reg(rmi4_data, true);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to enable regulators\n",
+				__func__);
+		goto err_enable_reg;
+	}
+
+	retval = synaptics_rmi4_set_gpio(rmi4_data);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to set up GPIO's\n",
+				__func__);
+		goto err_set_gpio;
+	}
+
+	if (hw_if->ui_hw_init) {
+		retval = hw_if->ui_hw_init(rmi4_data);
+		if (retval < 0) {
+			dev_err(&pdev->dev,
+					"%s: Failed to initialize hardware interface\n",
+					__func__);
+			goto err_ui_hw_init;
+		}
+	}
+
+	retval = synaptics_rmi4_set_input_dev(rmi4_data);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to set up input device\n",
+				__func__);
+		goto err_set_input_dev;
+	}
+
+#ifdef CONFIG_FB
+	rmi4_data->fb_notifier.notifier_call = synaptics_rmi4_fb_notifier_cb;
+	retval = fb_register_client(&rmi4_data->fb_notifier);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to register fb notifier client\n",
+				__func__);
+	}
+#endif
+
+#ifdef USE_EARLYSUSPEND
+	rmi4_data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+	rmi4_data->early_suspend.suspend = synaptics_rmi4_early_suspend;
+	rmi4_data->early_suspend.resume = synaptics_rmi4_late_resume;
+	register_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+	if (!exp_data.initialized) {
+		mutex_init(&exp_data.mutex);
+		INIT_LIST_HEAD(&exp_data.list);
+		exp_data.initialized = true;
+	}
+
+	rmi4_data->irq = gpio_to_irq(bdata->irq_gpio);
+
+	retval = synaptics_rmi4_irq_enable(rmi4_data, true, false);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to enable attention interrupt\n",
+				__func__);
+		goto err_enable_irq;
+	}
+
+	if (vir_button_map->nbuttons) {
+		rmi4_data->board_prop_dir = kobject_create_and_add(
+				"board_properties", NULL);
+		if (!rmi4_data->board_prop_dir) {
+			dev_err(&pdev->dev,
+					"%s: Failed to create board_properties directory\n",
+					__func__);
+			goto err_virtual_buttons;
+		} else {
+			retval = sysfs_create_file(rmi4_data->board_prop_dir,
+					&virtual_key_map_attr.attr);
+			if (retval < 0) {
+				dev_err(&pdev->dev,
+						"%s: Failed to create virtual key map file\n",
+						__func__);
+				goto err_virtual_buttons;
+			}
+		}
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(&pdev->dev,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			goto err_sysfs;
+		}
+	}
+
+	rmi4_data->rb_workqueue =
+			create_singlethread_workqueue("dsx_rebuild_workqueue");
+	INIT_DELAYED_WORK(&rmi4_data->rb_work, synaptics_rmi4_rebuild_work);
+
+	exp_data.workqueue = create_singlethread_workqueue("dsx_exp_workqueue");
+	INIT_DELAYED_WORK(&exp_data.work, synaptics_rmi4_exp_fn_work);
+	exp_data.rmi4_data = rmi4_data;
+	exp_data.queue_work = true;
+	queue_delayed_work(exp_data.workqueue,
+			&exp_data.work,
+			0);
+
+#ifdef FB_READY_RESET
+	rmi4_data->reset_workqueue =
+			create_singlethread_workqueue("dsx_reset_workqueue");
+	INIT_WORK(&rmi4_data->reset_work, synaptics_rmi4_reset_work);
+	queue_work(rmi4_data->reset_workqueue, &rmi4_data->reset_work);
+#endif
+
+	return retval;
+
+err_sysfs:
+	for (attr_count--; attr_count >= 0; attr_count--) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+err_virtual_buttons:
+	if (rmi4_data->board_prop_dir) {
+		sysfs_remove_file(rmi4_data->board_prop_dir,
+				&virtual_key_map_attr.attr);
+		kobject_put(rmi4_data->board_prop_dir);
+	}
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+err_enable_irq:
+#ifdef CONFIG_FB
+	fb_unregister_client(&rmi4_data->fb_notifier);
+#endif
+
+#ifdef USE_EARLYSUSPEND
+	unregister_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+	input_unregister_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+	if (rmi4_data->stylus_enable) {
+		input_unregister_device(rmi4_data->stylus_dev);
+		rmi4_data->stylus_dev = NULL;
+	}
+
+err_set_input_dev:
+	synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+	if (bdata->reset_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->reset_gpio, false, 0, 0);
+
+	if (bdata->power_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+err_ui_hw_init:
+err_set_gpio:
+	synaptics_rmi4_enable_reg(rmi4_data, false);
+
+err_enable_reg:
+	synaptics_rmi4_get_reg(rmi4_data, false);
+
+err_get_reg:
+	kfree(rmi4_data);
+
+	return retval;
+}
+
+static int synaptics_rmi4_remove(struct platform_device *pdev)
+{
+	unsigned char attr_count;
+	struct synaptics_rmi4_data *rmi4_data = platform_get_drvdata(pdev);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+#ifdef FB_READY_RESET
+	cancel_work_sync(&rmi4_data->reset_work);
+	flush_workqueue(rmi4_data->reset_workqueue);
+	destroy_workqueue(rmi4_data->reset_workqueue);
+#endif
+
+	cancel_delayed_work_sync(&exp_data.work);
+	flush_workqueue(exp_data.workqueue);
+	destroy_workqueue(exp_data.workqueue);
+
+	cancel_delayed_work_sync(&rmi4_data->rb_work);
+	flush_workqueue(rmi4_data->rb_workqueue);
+	destroy_workqueue(rmi4_data->rb_workqueue);
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	if (rmi4_data->board_prop_dir) {
+		sysfs_remove_file(rmi4_data->board_prop_dir,
+				&virtual_key_map_attr.attr);
+		kobject_put(rmi4_data->board_prop_dir);
+	}
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+#ifdef CONFIG_FB
+	fb_unregister_client(&rmi4_data->fb_notifier);
+#endif
+
+#ifdef USE_EARLYSUSPEND
+	unregister_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+	input_unregister_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+	if (rmi4_data->stylus_enable) {
+		input_unregister_device(rmi4_data->stylus_dev);
+		rmi4_data->stylus_dev = NULL;
+	}
+
+	synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+	if (bdata->reset_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->reset_gpio, false, 0, 0);
+
+	if (bdata->power_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+	synaptics_rmi4_enable_reg(rmi4_data, false);
+	synaptics_rmi4_get_reg(rmi4_data, false);
+
+	kfree(rmi4_data);
+
+	return 0;
+}
+
+static void synaptics_rmi4_f11_wg(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval;
+	unsigned char reporting_control;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+		if (fhandler->fn_number == SYNAPTICS_RMI4_F11)
+			break;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.ctrl_base,
+			&reporting_control,
+			sizeof(reporting_control));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change reporting mode\n",
+				__func__);
+		return;
+	}
+
+	reporting_control = (reporting_control & ~MASK_3BIT);
+	if (enable)
+		reporting_control |= F11_WAKEUP_GESTURE_MODE;
+	else
+		reporting_control |= F11_CONTINUOUS_MODE;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			fhandler->full_addr.ctrl_base,
+			&reporting_control,
+			sizeof(reporting_control));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change reporting mode\n",
+				__func__);
+		return;
+	}
+
+	return;
+}
+
+static void synaptics_rmi4_f12_wg(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval;
+	unsigned char offset;
+	unsigned char reporting_control[3];
+	struct synaptics_rmi4_f12_extra_data *extra_data;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+		if (fhandler->fn_number == SYNAPTICS_RMI4_F12)
+			break;
+	}
+
+	extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+	offset = extra_data->ctrl20_offset;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.ctrl_base + offset,
+			reporting_control,
+			sizeof(reporting_control));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change reporting mode\n",
+				__func__);
+		return;
+	}
+
+	if (enable)
+		reporting_control[2] = F12_WAKEUP_GESTURE_MODE;
+	else
+		reporting_control[2] = F12_CONTINUOUS_MODE;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			fhandler->full_addr.ctrl_base + offset,
+			reporting_control,
+			sizeof(reporting_control));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change reporting mode\n",
+				__func__);
+		return;
+	}
+
+	return;
+}
+
+static void synaptics_rmi4_wakeup_gesture(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	if (rmi4_data->f11_wakeup_gesture)
+		synaptics_rmi4_f11_wg(rmi4_data, enable);
+	else if (rmi4_data->f12_wakeup_gesture)
+		synaptics_rmi4_f12_wg(rmi4_data, enable);
+
+	return;
+}
+
+#ifdef CONFIG_FB
+static int synaptics_rmi4_fb_notifier_cb(struct notifier_block *self,
+		unsigned long event, void *data)
+{
+	int *transition;
+	struct fb_event *evdata = data;
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(self, struct synaptics_rmi4_data,
+			fb_notifier);
+
+	if (evdata && evdata->data && rmi4_data) {
+		if (event == FB_EVENT_BLANK) {
+			transition = evdata->data;
+			if (*transition == FB_BLANK_POWERDOWN) {
+				synaptics_rmi4_suspend(&rmi4_data->pdev->dev);
+				rmi4_data->fb_ready = false;
+			} else if (*transition == FB_BLANK_UNBLANK) {
+				synaptics_rmi4_resume(&rmi4_data->pdev->dev);
+				rmi4_data->fb_ready = true;
+			}
+		}
+	}
+
+	return 0;
+}
+#endif
+
+#ifdef USE_EARLYSUSPEND
+static void synaptics_rmi4_early_suspend(struct early_suspend *h)
+{
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(h, struct synaptics_rmi4_data,
+			early_suspend);
+
+	if (rmi4_data->stay_awake)
+		return;
+
+	if (rmi4_data->enable_wakeup_gesture) {
+		synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+		enable_irq_wake(rmi4_data->irq);
+		goto exit;
+	}
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+	synaptics_rmi4_sleep_enable(rmi4_data, true);
+	synaptics_rmi4_free_fingers(rmi4_data);
+
+exit:
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->early_suspend != NULL)
+				exp_fhandler->exp_fn->early_suspend(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	rmi4_data->suspend = true;
+
+	return;
+}
+
+static void synaptics_rmi4_late_resume(struct early_suspend *h)
+{
+#ifdef FB_READY_RESET
+	int retval;
+#endif
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(h, struct synaptics_rmi4_data,
+			early_suspend);
+
+	if (rmi4_data->stay_awake)
+		return;
+
+	if (rmi4_data->enable_wakeup_gesture) {
+		synaptics_rmi4_wakeup_gesture(rmi4_data, false);
+		disable_irq_wake(rmi4_data->irq);
+		goto exit;
+	}
+
+	rmi4_data->current_page = MASK_8BIT;
+
+	if (rmi4_data->suspend) {
+		synaptics_rmi4_sleep_enable(rmi4_data, false);
+		synaptics_rmi4_irq_enable(rmi4_data, true, false);
+	}
+
+exit:
+#ifdef FB_READY_RESET
+	if (rmi4_data->suspend) {
+		retval = synaptics_rmi4_reset_device(rmi4_data, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to issue reset command\n",
+					__func__);
+		}
+	}
+#endif
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->late_resume != NULL)
+				exp_fhandler->exp_fn->late_resume(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	rmi4_data->suspend = false;
+
+	return;
+}
+#endif
+
+static int synaptics_rmi4_suspend(struct device *dev)
+{
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (rmi4_data->stay_awake)
+		return 0;
+
+	if (rmi4_data->enable_wakeup_gesture) {
+		synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+		enable_irq_wake(rmi4_data->irq);
+		goto exit;
+	}
+
+	if (!rmi4_data->suspend) {
+		synaptics_rmi4_irq_enable(rmi4_data, false, false);
+		synaptics_rmi4_sleep_enable(rmi4_data, true);
+		synaptics_rmi4_free_fingers(rmi4_data);
+	}
+
+exit:
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->suspend != NULL)
+				exp_fhandler->exp_fn->suspend(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	rmi4_data->suspend = true;
+
+	return 0;
+}
+
+static int synaptics_rmi4_resume(struct device *dev)
+{
+#ifdef FB_READY_RESET
+	int retval;
+#endif
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (rmi4_data->stay_awake)
+		return 0;
+
+	if (rmi4_data->enable_wakeup_gesture) {
+		synaptics_rmi4_wakeup_gesture(rmi4_data, false);
+		disable_irq_wake(rmi4_data->irq);
+		goto exit;
+	}
+
+	rmi4_data->current_page = MASK_8BIT;
+
+	synaptics_rmi4_sleep_enable(rmi4_data, false);
+	synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+exit:
+#ifdef FB_READY_RESET
+	retval = synaptics_rmi4_reset_device(rmi4_data, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+	}
+#endif
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->resume != NULL)
+				exp_fhandler->exp_fn->resume(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	rmi4_data->suspend = false;
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = {
+#ifndef CONFIG_FB
+	.suspend = synaptics_rmi4_suspend,
+	.resume = synaptics_rmi4_resume,
+#endif
+};
+#endif
+
+static struct platform_driver synaptics_rmi4_driver = {
+	.driver = {
+		.name = PLATFORM_DRIVER_NAME,
+		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &synaptics_rmi4_dev_pm_ops,
+#endif
+	},
+	.probe = synaptics_rmi4_probe,
+	.remove = synaptics_rmi4_remove,
+};
+
+static int __init synaptics_rmi4_init(void)
+{
+	int retval;
+
+	retval = synaptics_rmi4_bus_init_v26();
+	if (retval)
+		return retval;
+
+	return platform_driver_register(&synaptics_rmi4_driver);
+}
+
+static void __exit synaptics_rmi4_exit(void)
+{
+	platform_driver_unregister(&synaptics_rmi4_driver);
+
+	synaptics_rmi4_bus_exit_v26();
+
+	return;
+}
+
+module_init(synaptics_rmi4_init);
+module_exit(synaptics_rmi4_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Touch Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.h b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.h
new file mode 100644
index 0000000..0de0e99
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.h
@@ -0,0 +1,477 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#ifndef _SYNAPTICS_DSX_RMI4_H_
+#define _SYNAPTICS_DSX_RMI4_H_
+
+#define SYNAPTICS_DS4 (1 << 0)
+#define SYNAPTICS_DS5 (1 << 1)
+#define SYNAPTICS_DSX_DRIVER_PRODUCT (SYNAPTICS_DS4 | SYNAPTICS_DS5)
+#define SYNAPTICS_DSX_DRIVER_VERSION 0x2061
+
+#include <linux/version.h>
+#ifdef CONFIG_FB
+#include <linux/notifier.h>
+#include <linux/fb.h>
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38))
+#define KERNEL_ABOVE_2_6_38
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+#define KERNEL_ABOVE_3_6
+#endif
+
+#ifdef KERNEL_ABOVE_2_6_38
+#define sstrtoul(...) kstrtoul(__VA_ARGS__)
+#else
+#define sstrtoul(...) strict_strtoul(__VA_ARGS__)
+#endif
+
+#define PDT_PROPS (0X00EF)
+#define PDT_START (0x00E9)
+#define PDT_END (0x00D0)
+#define PDT_ENTRY_SIZE (0x0006)
+#define PAGES_TO_SERVICE (10)
+#define PAGE_SELECT_LEN (2)
+#define ADDRESS_WORD_LEN (2)
+
+#define SYNAPTICS_RMI4_F01 (0x01)
+#define SYNAPTICS_RMI4_F11 (0x11)
+#define SYNAPTICS_RMI4_F12 (0x12)
+#define SYNAPTICS_RMI4_F1A (0x1A)
+#define SYNAPTICS_RMI4_F34 (0x34)
+#define SYNAPTICS_RMI4_F35 (0x35)
+#define SYNAPTICS_RMI4_F38 (0x38)
+#define SYNAPTICS_RMI4_F51 (0x51)
+#define SYNAPTICS_RMI4_F54 (0x54)
+#define SYNAPTICS_RMI4_F55 (0x55)
+#define SYNAPTICS_RMI4_FDB (0xDB)
+
+#define PRODUCT_INFO_SIZE 2
+#define PRODUCT_ID_SIZE 10
+#define BUILD_ID_SIZE 3
+
+#define F12_FINGERS_TO_SUPPORT 10
+#define F12_NO_OBJECT_STATUS 0x00
+#define F12_FINGER_STATUS 0x01
+#define F12_ACTIVE_STYLUS_STATUS 0x02
+#define F12_PALM_STATUS 0x03
+#define F12_HOVERING_FINGER_STATUS 0x05
+#define F12_GLOVED_FINGER_STATUS 0x06
+#define F12_NARROW_OBJECT_STATUS 0x07
+#define F12_HAND_EDGE_STATUS 0x08
+#define F12_COVER_STATUS 0x0A
+#define F12_STYLUS_STATUS 0x0B
+#define F12_ERASER_STATUS 0x0C
+#define F12_SMALL_OBJECT_STATUS 0x0D
+
+#define F12_GESTURE_DETECTION_LEN 5
+
+#define MAX_NUMBER_OF_BUTTONS 4
+#define MAX_INTR_REGISTERS 4
+
+#define MASK_16BIT 0xFFFF
+#define MASK_8BIT 0xFF
+#define MASK_7BIT 0x7F
+#define MASK_6BIT 0x3F
+#define MASK_5BIT 0x1F
+#define MASK_4BIT 0x0F
+#define MASK_3BIT 0x07
+#define MASK_2BIT 0x03
+#define MASK_1BIT 0x01
+
+enum exp_fn {
+	RMI_DEV = 0,
+	RMI_FW_UPDATER,
+	RMI_TEST_REPORTING,
+	RMI_PROXIMITY,
+	RMI_ACTIVE_PEN,
+	RMI_GESTURE,
+	RMI_VIDEO,
+	RMI_DEBUG,
+	RMI_LAST,
+};
+
+/*
+ * struct synaptics_rmi4_fn_desc - function descriptor fields in PDT entry
+ * @query_base_addr: base address for query registers
+ * @cmd_base_addr: base address for command registers
+ * @ctrl_base_addr: base address for control registers
+ * @data_base_addr: base address for data registers
+ * @intr_src_count: number of interrupt sources
+ * @fn_version: version of function
+ * @fn_number: function number
+ */
+struct synaptics_rmi4_fn_desc {
+	union {
+		struct {
+			unsigned char query_base_addr;
+			unsigned char cmd_base_addr;
+			unsigned char ctrl_base_addr;
+			unsigned char data_base_addr;
+			unsigned char intr_src_count:3;
+			unsigned char reserved_1:2;
+			unsigned char fn_version:2;
+			unsigned char reserved_2:1;
+			unsigned char fn_number;
+		} __packed;
+		unsigned char data[6];
+	};
+};
+
+/*
+ * synaptics_rmi4_fn_full_addr - full 16-bit base addresses
+ * @query_base: 16-bit base address for query registers
+ * @cmd_base: 16-bit base address for command registers
+ * @ctrl_base: 16-bit base address for control registers
+ * @data_base: 16-bit base address for data registers
+ */
+struct synaptics_rmi4_fn_full_addr {
+	unsigned short query_base;
+	unsigned short cmd_base;
+	unsigned short ctrl_base;
+	unsigned short data_base;
+};
+
+/*
+ * struct synaptics_rmi4_f11_extra_data - extra data of F$11
+ * @data38_offset: offset to F11_2D_DATA38 register
+ */
+struct synaptics_rmi4_f11_extra_data {
+	unsigned char data38_offset;
+};
+
+/*
+ * struct synaptics_rmi4_f12_extra_data - extra data of F$12
+ * @data1_offset: offset to F12_2D_DATA01 register
+ * @data4_offset: offset to F12_2D_DATA04 register
+ * @data15_offset: offset to F12_2D_DATA15 register
+ * @data15_size: size of F12_2D_DATA15 register
+ * @data15_data: buffer for reading F12_2D_DATA15 register
+ * @data23_offset: offset to F12_2D_DATA23 register
+ * @data23_size: size of F12_2D_DATA23 register
+ * @data23_data: buffer for reading F12_2D_DATA23 register
+ * @ctrl20_offset: offset to F12_2D_CTRL20 register
+ */
+struct synaptics_rmi4_f12_extra_data {
+	unsigned char data1_offset;
+	unsigned char data4_offset;
+	unsigned char data15_offset;
+	unsigned char data15_size;
+	unsigned char data15_data[(F12_FINGERS_TO_SUPPORT + 7) / 8];
+	unsigned char data23_offset;
+	unsigned char data23_size;
+	unsigned char data23_data[F12_FINGERS_TO_SUPPORT];
+	unsigned char ctrl20_offset;
+};
+
+/*
+ * struct synaptics_rmi4_fn - RMI function handler
+ * @fn_number: function number
+ * @num_of_data_sources: number of data sources
+ * @num_of_data_points: maximum number of fingers supported
+ * @intr_reg_num: index to associated interrupt register
+ * @intr_mask: interrupt mask
+ * @full_addr: full 16-bit base addresses of function registers
+ * @link: linked list for function handlers
+ * @data_size: size of private data
+ * @data: pointer to private data
+ * @extra: pointer to extra data
+ */
+struct synaptics_rmi4_fn {
+	unsigned char fn_number;
+	unsigned char num_of_data_sources;
+	unsigned char num_of_data_points;
+	unsigned char intr_reg_num;
+	unsigned char intr_mask;
+	struct synaptics_rmi4_fn_full_addr full_addr;
+	struct list_head link;
+	int data_size;
+	void *data;
+	void *extra;
+};
+
+/*
+ * struct synaptics_rmi4_device_info - device information
+ * @version_major: RMI protocol major version number
+ * @version_minor: RMI protocol minor version number
+ * @manufacturer_id: manufacturer ID
+ * @product_props: product properties
+ * @product_info: product information
+ * @product_id_string: product ID
+ * @build_id: firmware build ID
+ * @support_fn_list: linked list for function handlers
+ */
+struct synaptics_rmi4_device_info {
+	unsigned int version_major;
+	unsigned int version_minor;
+	unsigned char manufacturer_id;
+	unsigned char product_props;
+	unsigned char product_info[PRODUCT_INFO_SIZE];
+	unsigned char product_id_string[PRODUCT_ID_SIZE + 1];
+	unsigned char build_id[BUILD_ID_SIZE];
+	struct list_head support_fn_list;
+};
+
+/*
+ * struct synaptics_rmi4_data - RMI4 device instance data
+ * @pdev: pointer to platform device
+ * @input_dev: pointer to associated input device
+ * @stylus_dev: pointer to associated stylus device
+ * @hw_if: pointer to hardware interface data
+ * @rmi4_mod_info: device information
+ * @board_prop_dir: /sys/board_properties directory for virtual key map file
+ * @pwr_reg: pointer to regulator for power control
+ * @bus_reg: pointer to regulator for bus pullup control
+ * @rmi4_reset_mutex: mutex for software reset
+ * @rmi4_report_mutex: mutex for input event reporting
+ * @rmi4_io_ctrl_mutex: mutex for communication interface I/O
+ * @rmi4_exp_init_mutex: mutex for expansion function module initialization
+ * @rb_work: work for rebuilding input device
+ * @rb_workqueue: workqueue for rebuilding input device
+ * @fb_notifier: framebuffer notifier client
+ * @reset_work: work for issuing reset after display framebuffer ready
+ * @reset_workqueue: workqueue for issuing reset after display framebuffer ready
+ * @early_suspend: early suspend power management
+ * @current_page: current RMI page for register access
+ * @button_0d_enabled: switch for enabling 0d button support
+ * @num_of_tx: number of Tx channels for 2D touch
+ * @num_of_rx: number of Rx channels for 2D touch
+ * @num_of_fingers: maximum number of fingers for 2D touch
+ * @max_touch_width: maximum touch width
+ * @report_enable: input data to report for F$12
+ * @no_sleep_setting: default setting of NoSleep in F01_RMI_CTRL00 register
+ * @gesture_detection: detected gesture type and properties
+ * @intr_mask: interrupt enable mask
+ * @button_txrx_mapping: Tx Rx mapping of 0D buttons
+ * @num_of_intr_regs: number of interrupt registers
+ * @f01_query_base_addr: query base address for f$01
+ * @f01_cmd_base_addr: command base address for f$01
+ * @f01_ctrl_base_addr: control base address for f$01
+ * @f01_data_base_addr: data base address for f$01
+ * @firmware_id: firmware build ID
+ * @irq: attention interrupt
+ * @sensor_max_x: maximum x coordinate for 2D touch
+ * @sensor_max_y: maximum y coordinate for 2D touch
+ * @flash_prog_mode: flag to indicate flash programming mode status
+ * @irq_enabled: flag to indicate attention interrupt enable status
+ * @fingers_on_2d: flag to indicate presence of fingers in 2D area
+ * @suspend: flag to indicate whether in suspend state
+ * @sensor_sleep: flag to indicate sleep state of sensor
+ * @stay_awake: flag to indicate whether to stay awake during suspend
+ * @fb_ready: flag to indicate whether display framebuffer in ready state
+ * @f11_wakeup_gesture: flag to indicate support for wakeup gestures in F$11
+ * @f12_wakeup_gesture: flag to indicate support for wakeup gestures in F$12
+ * @enable_wakeup_gesture: flag to indicate usage of wakeup gestures
+ * @wedge_sensor: flag to indicate use of wedge sensor
+ * @report_pressure: flag to indicate reporting of pressure data
+ * @stylus_enable: flag to indicate reporting of stylus data
+ * @eraser_enable: flag to indicate reporting of eraser data
+ * @external_afe_buttons: flag to indicate presence of external AFE buttons
+ * @reset_device: pointer to device reset function
+ * @irq_enable: pointer to interrupt enable function
+ * @sleep_enable: pointer to sleep enable function
+ * @report_touch: pointer to touch reporting function
+ */
+struct synaptics_rmi4_data {
+	struct platform_device *pdev;
+	struct input_dev *input_dev;
+	struct input_dev *stylus_dev;
+	const struct synaptics_dsx_hw_interface *hw_if;
+	struct synaptics_rmi4_device_info rmi4_mod_info;
+	struct kobject *board_prop_dir;
+	struct regulator *pwr_reg;
+	struct regulator *bus_reg;
+	struct mutex rmi4_reset_mutex;
+	struct mutex rmi4_report_mutex;
+	struct mutex rmi4_io_ctrl_mutex;
+	struct mutex rmi4_exp_init_mutex;
+	struct delayed_work rb_work;
+	struct workqueue_struct *rb_workqueue;
+#ifdef CONFIG_FB
+	struct notifier_block fb_notifier;
+	struct work_struct reset_work;
+	struct workqueue_struct *reset_workqueue;
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend early_suspend;
+#endif
+	unsigned char current_page;
+	unsigned char button_0d_enabled;
+	unsigned char num_of_tx;
+	unsigned char num_of_rx;
+	unsigned char num_of_fingers;
+	unsigned char max_touch_width;
+	unsigned char report_enable;
+	unsigned char no_sleep_setting;
+	unsigned char gesture_detection[F12_GESTURE_DETECTION_LEN];
+	unsigned char intr_mask[MAX_INTR_REGISTERS];
+	unsigned char *button_txrx_mapping;
+	unsigned short num_of_intr_regs;
+	unsigned short f01_query_base_addr;
+	unsigned short f01_cmd_base_addr;
+	unsigned short f01_ctrl_base_addr;
+	unsigned short f01_data_base_addr;
+	unsigned int firmware_id;
+	int irq;
+	int sensor_max_x;
+	int sensor_max_y;
+	bool flash_prog_mode;
+	bool irq_enabled;
+	bool fingers_on_2d;
+	bool suspend;
+	bool sensor_sleep;
+	bool stay_awake;
+	bool fb_ready;
+	bool f11_wakeup_gesture;
+	bool f12_wakeup_gesture;
+	bool enable_wakeup_gesture;
+	bool wedge_sensor;
+	bool report_pressure;
+	bool stylus_enable;
+	bool eraser_enable;
+	bool external_afe_buttons;
+	int (*reset_device)(struct synaptics_rmi4_data *rmi4_data,
+			bool rebuild);
+	int (*irq_enable)(struct synaptics_rmi4_data *rmi4_data, bool enable,
+			bool attn_only);
+	void (*sleep_enable)(struct synaptics_rmi4_data *rmi4_data,
+			bool enable);
+	void (*report_touch)(struct synaptics_rmi4_data *rmi4_data,
+			struct synaptics_rmi4_fn *fhandler);
+};
+
+struct synaptics_dsx_bus_access {
+	unsigned char type;
+	int (*read)(struct synaptics_rmi4_data *rmi4_data, unsigned short addr,
+		unsigned char *data, unsigned short length);
+	int (*write)(struct synaptics_rmi4_data *rmi4_data, unsigned short addr,
+		unsigned char *data, unsigned short length);
+};
+
+struct synaptics_dsx_hw_interface {
+	struct synaptics_dsx_board_data *board_data;
+	const struct synaptics_dsx_bus_access *bus_access;
+	int (*bl_hw_init)(struct synaptics_rmi4_data *rmi4_data);
+	int (*ui_hw_init)(struct synaptics_rmi4_data *rmi4_data);
+};
+
+struct synaptics_rmi4_exp_fn {
+	enum exp_fn fn_type;
+	int (*init)(struct synaptics_rmi4_data *rmi4_data);
+	void (*remove)(struct synaptics_rmi4_data *rmi4_data);
+	void (*reset)(struct synaptics_rmi4_data *rmi4_data);
+	void (*reinit)(struct synaptics_rmi4_data *rmi4_data);
+	void (*early_suspend)(struct synaptics_rmi4_data *rmi4_data);
+	void (*suspend)(struct synaptics_rmi4_data *rmi4_data);
+	void (*resume)(struct synaptics_rmi4_data *rmi4_data);
+	void (*late_resume)(struct synaptics_rmi4_data *rmi4_data);
+	void (*attn)(struct synaptics_rmi4_data *rmi4_data,
+			unsigned char intr_mask);
+};
+
+int synaptics_rmi4_bus_init_v26(void);
+
+void synaptics_rmi4_bus_exit_v26(void);
+
+void synaptics_rmi4_new_function(struct synaptics_rmi4_exp_fn *exp_fn_module,
+		bool insert);
+
+int synaptics_fw_updater(const unsigned char *fw_data);
+
+static inline int synaptics_rmi4_reg_read(
+		struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr,
+		unsigned char *data,
+		unsigned short len)
+{
+	return rmi4_data->hw_if->bus_access->read(rmi4_data, addr, data, len);
+}
+
+static inline int synaptics_rmi4_reg_write(
+		struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr,
+		unsigned char *data,
+		unsigned short len)
+{
+	return rmi4_data->hw_if->bus_access->write(rmi4_data, addr, data, len);
+}
+
+static inline ssize_t synaptics_rmi4_show_error(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	dev_warn(dev, "%s Attempted to read from write-only attribute %s\n",
+			__func__, attr->attr.name);
+	return -EPERM;
+}
+
+static inline ssize_t synaptics_rmi4_store_error(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	dev_warn(dev, "%s Attempted to write to read-only attribute %s\n",
+			__func__, attr->attr.name);
+	return -EPERM;
+}
+
+static inline int secure_memcpy(unsigned char *dest, unsigned int dest_size,
+		const unsigned char *src, unsigned int src_size,
+		unsigned int count)
+{
+	if (dest == NULL || src == NULL)
+		return -EINVAL;
+
+	if (count > dest_size || count > src_size)
+		return -EINVAL;
+
+	memcpy((void *)dest, (const void *)src, count);
+
+	return 0;
+}
+
+static inline void batohs(unsigned short *dest, unsigned char *src)
+{
+	*dest = src[1] * 0x100 + src[0];
+}
+
+static inline void hstoba(unsigned char *dest, unsigned short src)
+{
+	dest[0] = src % 0x100;
+	dest[1] = src / 0x100;
+}
+
+#endif
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_fw_update.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_fw_update.c
new file mode 100644
index 0000000..9fb9beb
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_fw_update.c
@@ -0,0 +1,4438 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define FW_IMAGE_NAME "synaptics/startup_fw_update.img"
+/*
+#define DO_STARTUP_FW_UPDATE
+*/
+/*
+#ifdef DO_STARTUP_FW_UPDATE
+#ifdef CONFIG_FB
+#define WAIT_FOR_FB_READY
+#define FB_READY_WAIT_MS 100
+#define FB_READY_TIMEOUT_S 30
+#endif
+#endif
+*/
+#define FORCE_UPDATE false
+#define DO_LOCKDOWN false
+
+#define MAX_IMAGE_NAME_LEN 256
+#define MAX_FIRMWARE_ID_LEN 10
+
+#define IMAGE_HEADER_VERSION_05 0x05
+#define IMAGE_HEADER_VERSION_06 0x06
+#define IMAGE_HEADER_VERSION_10 0x10
+
+#define IMAGE_AREA_OFFSET 0x100
+#define LOCKDOWN_SIZE 0x50
+
+#define V5V6_BOOTLOADER_ID_OFFSET 0
+#define V5V6_CONFIG_ID_SIZE 4
+
+#define V5_PROPERTIES_OFFSET 2
+#define V5_BLOCK_SIZE_OFFSET 3
+#define V5_BLOCK_COUNT_OFFSET 5
+#define V5_BLOCK_NUMBER_OFFSET 0
+#define V5_BLOCK_DATA_OFFSET 2
+
+#define V6_PROPERTIES_OFFSET 1
+#define V6_BLOCK_SIZE_OFFSET 2
+#define V6_BLOCK_COUNT_OFFSET 3
+#define V6_PROPERTIES_2_OFFSET 4
+#define V6_GUEST_CODE_BLOCK_COUNT_OFFSET 5
+#define V6_BLOCK_NUMBER_OFFSET 0
+#define V6_BLOCK_DATA_OFFSET 1
+#define V6_FLASH_COMMAND_OFFSET 2
+#define V6_FLASH_STATUS_OFFSET 3
+
+#define V7_CONFIG_ID_SIZE 32
+
+#define V7_FLASH_STATUS_OFFSET 0
+#define V7_PARTITION_ID_OFFSET 1
+#define V7_BLOCK_NUMBER_OFFSET 2
+#define V7_TRANSFER_LENGTH_OFFSET 3
+#define V7_COMMAND_OFFSET 4
+#define V7_PAYLOAD_OFFSET 5
+
+#define V7_PARTITION_SUPPORT_BYTES 4
+
+#define F35_ERROR_CODE_OFFSET 0
+#define F35_CHUNK_NUM_LSB_OFFSET 0
+#define F35_CHUNK_NUM_MSB_OFFSET 1
+#define F35_CHUNK_DATA_OFFSET 2
+#define F35_CHUNK_COMMAND_OFFSET 18
+
+#define F35_CHUNK_SIZE 16
+#define F35_ERASE_ALL_WAIT_MS 3000
+#define F35_RESET_WAIT_MS 250
+
+#define SLEEP_MODE_NORMAL (0x00)
+#define SLEEP_MODE_SENSOR_SLEEP (0x01)
+#define SLEEP_MODE_RESERVED0 (0x02)
+#define SLEEP_MODE_RESERVED1 (0x03)
+
+#define ENABLE_WAIT_MS (1 * 1000)
+#define WRITE_WAIT_MS (3 * 1000)
+#define ERASE_WAIT_MS (5 * 1000)
+
+#define MIN_SLEEP_TIME_US 50
+#define MAX_SLEEP_TIME_US 100
+
+#define INT_DISABLE_WAIT_MS 20
+#define ENTER_FLASH_PROG_WAIT_MS 20
+
+static int fwu_do_reflash(void);
+
+static int fwu_recovery_check_status(void);
+
+static ssize_t fwu_sysfs_show_image(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t fwu_sysfs_store_image(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t fwu_sysfs_do_recovery_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_write_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_read_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_config_area_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_image_name_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_image_size_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_block_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_firmware_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_configuration_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_disp_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_perm_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_bl_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_guest_code_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_write_guest_code_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+enum f34_version {
+	F34_V0 = 0,
+	F34_V1,
+	F34_V2,
+};
+
+enum bl_version {
+	BL_V5 = 5,
+	BL_V6 = 6,
+	BL_V7 = 7,
+	BL_V8 = 8,
+};
+
+enum flash_area {
+	NONE = 0,
+	UI_FIRMWARE,
+	UI_CONFIG,
+};
+
+enum update_mode {
+	NORMAL = 1,
+	FORCE = 2,
+	LOCKDOWN = 8,
+};
+
+enum config_area {
+	UI_CONFIG_AREA = 0,
+	PM_CONFIG_AREA,
+	BL_CONFIG_AREA,
+	DP_CONFIG_AREA,
+	FLASH_CONFIG_AREA,
+};
+
+enum v7_status {
+	SUCCESS = 0x00,
+	DEVICE_NOT_IN_BOOTLOADER_MODE,
+	INVALID_PARTITION,
+	INVALID_COMMAND,
+	INVALID_BLOCK_OFFSET,
+	INVALID_TRANSFER,
+	NOT_ERASED,
+	FLASH_PROGRAMMING_KEY_INCORRECT,
+	BAD_PARTITION_TABLE,
+	CHECKSUM_FAILED,
+	FLASH_HARDWARE_FAILURE = 0x1f,
+};
+
+enum v7_partition_id {
+	BOOTLOADER_PARTITION = 0x01,
+	DEVICE_CONFIG_PARTITION,
+	FLASH_CONFIG_PARTITION,
+	MANUFACTURING_BLOCK_PARTITION,
+	GUEST_SERIALIZATION_PARTITION,
+	GLOBAL_PARAMETERS_PARTITION,
+	CORE_CODE_PARTITION,
+	CORE_CONFIG_PARTITION,
+	GUEST_CODE_PARTITION,
+	DISPLAY_CONFIG_PARTITION,
+};
+
+enum v7_flash_command {
+	CMD_V7_IDLE = 0x00,
+	CMD_V7_ENTER_BL,
+	CMD_V7_READ,
+	CMD_V7_WRITE,
+	CMD_V7_ERASE,
+	CMD_V7_ERASE_AP,
+	CMD_V7_SENSOR_ID,
+};
+
+enum v5v6_flash_command {
+	CMD_V5V6_IDLE = 0x0,
+	CMD_V5V6_WRITE_FW = 0x2,
+	CMD_V5V6_ERASE_ALL = 0x3,
+	CMD_V5V6_WRITE_LOCKDOWN = 0x4,
+	CMD_V5V6_READ_CONFIG = 0x5,
+	CMD_V5V6_WRITE_CONFIG = 0x6,
+	CMD_V5V6_ERASE_UI_CONFIG = 0x7,
+	CMD_V5V6_ERASE_BL_CONFIG = 0x9,
+	CMD_V5V6_ERASE_DISP_CONFIG = 0xa,
+	CMD_V5V6_ERASE_GUEST_CODE = 0xb,
+	CMD_V5V6_WRITE_GUEST_CODE = 0xc,
+	CMD_V5V6_ENABLE_FLASH_PROG = 0xf,
+};
+
+enum flash_command {
+	CMD_IDLE = 0,
+	CMD_WRITE_FW,
+	CMD_WRITE_CONFIG,
+	CMD_WRITE_LOCKDOWN,
+	CMD_WRITE_GUEST_CODE,
+	CMD_READ_CONFIG,
+	CMD_ERASE_ALL,
+	CMD_ERASE_UI_FIRMWARE,
+	CMD_ERASE_UI_CONFIG,
+	CMD_ERASE_BL_CONFIG,
+	CMD_ERASE_DISP_CONFIG,
+	CMD_ERASE_FLASH_CONFIG,
+	CMD_ERASE_GUEST_CODE,
+	CMD_ENABLE_FLASH_PROG,
+};
+
+enum f35_flash_command {
+	CMD_F35_IDLE = 0x0,
+	CMD_F35_RESERVED = 0x1,
+	CMD_F35_WRITE_CHUNK = 0x2,
+	CMD_F35_ERASE_ALL = 0x3,
+	CMD_F35_RESET = 0x10,
+};
+
+enum container_id {
+	TOP_LEVEL_CONTAINER = 0,
+	UI_CONTAINER,
+	UI_CONFIG_CONTAINER,
+	BL_CONTAINER,
+	BL_IMAGE_CONTAINER,
+	BL_CONFIG_CONTAINER,
+	BL_LOCKDOWN_INFO_CONTAINER,
+	PERMANENT_CONFIG_CONTAINER,
+	GUEST_CODE_CONTAINER,
+	BL_PROTOCOL_DESCRIPTOR_CONTAINER,
+	UI_PROTOCOL_DESCRIPTOR_CONTAINER,
+	RMI_SELF_DISCOVERY_CONTAINER,
+	RMI_PAGE_CONTENT_CONTAINER,
+	GENERAL_INFORMATION_CONTAINER,
+	DEVICE_CONFIG_CONTAINER,
+	FLASH_CONFIG_CONTAINER,
+	GUEST_SERIALIZATION_CONTAINER,
+	GLOBAL_PARAMETERS_CONTAINER,
+	CORE_CODE_CONTAINER,
+	CORE_CONFIG_CONTAINER,
+	DISPLAY_CONFIG_CONTAINER,
+};
+
+struct pdt_properties {
+	union {
+		struct {
+			unsigned char reserved_1:6;
+			unsigned char has_bsr:1;
+			unsigned char reserved_2:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct partition_table {
+	unsigned char partition_id:5;
+	unsigned char byte_0_reserved:3;
+	unsigned char byte_1_reserved;
+	unsigned char partition_length_7_0;
+	unsigned char partition_length_15_8;
+	unsigned char start_physical_address_7_0;
+	unsigned char start_physical_address_15_8;
+	unsigned char partition_properties_7_0;
+	unsigned char partition_properties_15_8;
+} __packed;
+
+struct f01_device_control {
+	union {
+		struct {
+			unsigned char sleep_mode:2;
+			unsigned char nosleep:1;
+			unsigned char reserved:2;
+			unsigned char charger_connected:1;
+			unsigned char report_rate:1;
+			unsigned char configured:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f34_v7_query_0 {
+	union {
+		struct {
+			unsigned char subpacket_1_size:3;
+			unsigned char has_config_id:1;
+			unsigned char f34_query0_b4:1;
+			unsigned char has_thqa:1;
+			unsigned char f34_query0_b6__7:2;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f34_v7_query_1_7 {
+	union {
+		struct {
+			/* query 1 */
+			unsigned char bl_minor_revision;
+			unsigned char bl_major_revision;
+
+			/* query 2 */
+			unsigned char bl_fw_id_7_0;
+			unsigned char bl_fw_id_15_8;
+			unsigned char bl_fw_id_23_16;
+			unsigned char bl_fw_id_31_24;
+
+			/* query 3 */
+			unsigned char minimum_write_size;
+			unsigned char block_size_7_0;
+			unsigned char block_size_15_8;
+			unsigned char flash_page_size_7_0;
+			unsigned char flash_page_size_15_8;
+
+			/* query 4 */
+			unsigned char adjustable_partition_area_size_7_0;
+			unsigned char adjustable_partition_area_size_15_8;
+
+			/* query 5 */
+			unsigned char flash_config_length_7_0;
+			unsigned char flash_config_length_15_8;
+
+			/* query 6 */
+			unsigned char payload_length_7_0;
+			unsigned char payload_length_15_8;
+
+			/* query 7 */
+			unsigned char f34_query7_b0:1;
+			unsigned char has_bootloader:1;
+			unsigned char has_device_config:1;
+			unsigned char has_flash_config:1;
+			unsigned char has_manufacturing_block:1;
+			unsigned char has_guest_serialization:1;
+			unsigned char has_global_parameters:1;
+			unsigned char has_core_code:1;
+			unsigned char has_core_config:1;
+			unsigned char has_guest_code:1;
+			unsigned char has_display_config:1;
+			unsigned char f34_query7_b11__15:5;
+			unsigned char f34_query7_b16__23;
+			unsigned char f34_query7_b24__31;
+		} __packed;
+		unsigned char data[21];
+	};
+};
+
+struct f34_v7_data0 {
+	union {
+		struct {
+			unsigned char operation_status:5;
+			unsigned char device_cfg_status:2;
+			unsigned char bl_mode:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f34_v7_data_1_5 {
+	union {
+		struct {
+			unsigned char partition_id:5;
+			unsigned char f34_data1_b5__7:3;
+			unsigned char block_offset_7_0;
+			unsigned char block_offset_15_8;
+			unsigned char transfer_length_7_0;
+			unsigned char transfer_length_15_8;
+			unsigned char command;
+			unsigned char payload_0;
+			unsigned char payload_1;
+		} __packed;
+		unsigned char data[8];
+	};
+};
+
+struct f34_v5v6_flash_properties {
+	union {
+		struct {
+			unsigned char reg_map:1;
+			unsigned char unlocked:1;
+			unsigned char has_config_id:1;
+			unsigned char has_pm_config:1;
+			unsigned char has_bl_config:1;
+			unsigned char has_disp_config:1;
+			unsigned char has_ctrl1:1;
+			unsigned char has_query4:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f34_v5v6_flash_properties_2 {
+	union {
+		struct {
+			unsigned char has_guest_code:1;
+			unsigned char reserved:7;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct register_offset {
+	unsigned char properties;
+	unsigned char properties_2;
+	unsigned char block_size;
+	unsigned char block_count;
+	unsigned char gc_block_count;
+	unsigned char flash_status;
+	unsigned char partition_id;
+	unsigned char block_number;
+	unsigned char transfer_length;
+	unsigned char flash_cmd;
+	unsigned char payload;
+};
+
+struct block_count {
+	unsigned short ui_firmware;
+	unsigned short ui_config;
+	unsigned short dp_config;
+	unsigned short pm_config;
+	unsigned short fl_config;
+	unsigned short bl_image;
+	unsigned short bl_config;
+	unsigned short lockdown;
+	unsigned short guest_code;
+	unsigned short total_count;
+};
+
+struct physical_address {
+	unsigned short ui_firmware;
+	unsigned short ui_config;
+	unsigned short dp_config;
+	unsigned short fl_config;
+	unsigned short guest_code;
+};
+
+struct container_descriptor {
+	unsigned char content_checksum[4];
+	unsigned char container_id[2];
+	unsigned char minor_version;
+	unsigned char major_version;
+	unsigned char reserved_08;
+	unsigned char reserved_09;
+	unsigned char reserved_0a;
+	unsigned char reserved_0b;
+	unsigned char container_option_flags[4];
+	unsigned char content_options_length[4];
+	unsigned char content_options_address[4];
+	unsigned char content_length[4];
+	unsigned char content_address[4];
+};
+
+struct image_header_10 {
+	unsigned char checksum[4];
+	unsigned char reserved_04;
+	unsigned char reserved_05;
+	unsigned char minor_header_version;
+	unsigned char major_header_version;
+	unsigned char reserved_08;
+	unsigned char reserved_09;
+	unsigned char reserved_0a;
+	unsigned char reserved_0b;
+	unsigned char top_level_container_start_addr[4];
+};
+
+struct image_header_05_06 {
+	/* 0x00 - 0x0f */
+	unsigned char checksum[4];
+	unsigned char reserved_04;
+	unsigned char reserved_05;
+	unsigned char options_firmware_id:1;
+	unsigned char options_bootloader:1;
+	unsigned char options_guest_code:1;
+	unsigned char options_tddi:1;
+	unsigned char options_reserved:4;
+	unsigned char header_version;
+	unsigned char firmware_size[4];
+	unsigned char config_size[4];
+	/* 0x10 - 0x1f */
+	unsigned char product_id[PRODUCT_ID_SIZE];
+	unsigned char package_id[2];
+	unsigned char package_id_revision[2];
+	unsigned char product_info[PRODUCT_INFO_SIZE];
+	/* 0x20 - 0x2f */
+	unsigned char bootloader_addr[4];
+	unsigned char bootloader_size[4];
+	unsigned char ui_addr[4];
+	unsigned char ui_size[4];
+	/* 0x30 - 0x3f */
+	unsigned char ds_id[16];
+	/* 0x40 - 0x4f */
+	union {
+		struct {
+			unsigned char cstmr_product_id[PRODUCT_ID_SIZE];
+			unsigned char reserved_4a_4f[6];
+		};
+		struct {
+			unsigned char dsp_cfg_addr[4];
+			unsigned char dsp_cfg_size[4];
+			unsigned char reserved_48_4f[8];
+		};
+	};
+	/* 0x50 - 0x53 */
+	unsigned char firmware_id[4];
+};
+
+struct block_data {
+	unsigned int size;
+	const unsigned char *data;
+};
+
+struct image_metadata {
+	bool contains_firmware_id;
+	bool contains_bootloader;
+	bool contains_guest_code;
+	bool contains_disp_config;
+	bool contains_perm_config;
+	bool contains_flash_config;
+	unsigned int firmware_id;
+	unsigned int checksum;
+	unsigned int bootloader_size;
+	unsigned int disp_config_offset;
+	unsigned char bl_version;
+	unsigned char product_id[PRODUCT_ID_SIZE + 1];
+	unsigned char cstmr_product_id[PRODUCT_ID_SIZE + 1];
+	struct block_data bootloader;
+	struct block_data ui_firmware;
+	struct block_data ui_config;
+	struct block_data dp_config;
+	struct block_data pm_config;
+	struct block_data fl_config;
+	struct block_data bl_image;
+	struct block_data bl_config;
+	struct block_data lockdown;
+	struct block_data guest_code;
+	struct block_count blkcount;
+	struct physical_address phyaddr;
+};
+
+struct synaptics_rmi4_fwu_handle {
+	enum bl_version bl_version;
+	bool initialized;
+	bool in_bl_mode;
+	bool in_ub_mode;
+	bool force_update;
+	bool do_lockdown;
+	bool has_guest_code;
+	bool new_partition_table;
+	unsigned int data_pos;
+	unsigned char *ext_data_source;
+	unsigned char *read_config_buf;
+	unsigned char intr_mask;
+	unsigned char command;
+	unsigned char bootloader_id[2];
+	unsigned char config_id[32];
+	unsigned char flash_status;
+	unsigned char partitions;
+	unsigned short block_size;
+	unsigned short config_size;
+	unsigned short config_area;
+	unsigned short config_block_count;
+	unsigned short flash_config_length;
+	unsigned short payload_length;
+	unsigned short partition_table_bytes;
+	unsigned short read_config_buf_size;
+	const unsigned char *config_data;
+	const unsigned char *image;
+	unsigned char *image_name;
+	unsigned int image_size;
+	struct image_metadata img;
+	struct register_offset off;
+	struct block_count blkcount;
+	struct physical_address phyaddr;
+	struct f34_v5v6_flash_properties flash_properties;
+	struct synaptics_rmi4_fn_desc f34_fd;
+	struct synaptics_rmi4_fn_desc f35_fd;
+	struct synaptics_rmi4_data *rmi4_data;
+	struct workqueue_struct *fwu_workqueue;
+	struct work_struct fwu_work;
+};
+
+static struct bin_attribute dev_attr_data = {
+	.attr = {
+		.name = "data",
+		.mode = (S_IRUGO | S_IWUGO),
+	},
+	.size = 0,
+	.read = fwu_sysfs_show_image,
+	.write = fwu_sysfs_store_image,
+};
+
+static struct device_attribute attrs[] = {
+	__ATTR(dorecovery, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_do_recovery_store),
+	__ATTR(doreflash, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_do_reflash_store),
+	__ATTR(writeconfig, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_write_config_store),
+	__ATTR(readconfig, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_read_config_store),
+	__ATTR(configarea, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_config_area_store),
+	__ATTR(imagename, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_image_name_store),
+	__ATTR(imagesize, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_image_size_store),
+	__ATTR(blocksize, 0444,
+			fwu_sysfs_block_size_show,
+			synaptics_rmi4_store_error),
+	__ATTR(fwblockcount, 0444,
+			fwu_sysfs_firmware_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(configblockcount, 0444,
+			fwu_sysfs_configuration_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(dispconfigblockcount, 0444,
+			fwu_sysfs_disp_config_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(permconfigblockcount, 0444,
+			fwu_sysfs_perm_config_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(blconfigblockcount, 0444,
+			fwu_sysfs_bl_config_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(guestcodeblockcount, 0444,
+			fwu_sysfs_guest_code_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(writeguestcode, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_write_guest_code_store),
+};
+
+static struct synaptics_rmi4_fwu_handle *fwu;
+
+DECLARE_COMPLETION(fwu_remove_complete);
+
+static unsigned int le_to_uint(const unsigned char *ptr)
+{
+	return (unsigned int)ptr[0] +
+			(unsigned int)ptr[1] * 0x100 +
+			(unsigned int)ptr[2] * 0x10000 +
+			(unsigned int)ptr[3] * 0x1000000;
+}
+
+static int fwu_allocate_read_config_buf(unsigned int count)
+{
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (count > fwu->read_config_buf_size) {
+		kfree(fwu->read_config_buf);
+		fwu->read_config_buf = kzalloc(count, GFP_KERNEL);
+		if (!fwu->read_config_buf) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for fwu->read_config_buf\n",
+					__func__);
+			fwu->read_config_buf_size = 0;
+			return -ENOMEM;
+		}
+		fwu->read_config_buf_size = count;
+	}
+
+	return 0;
+}
+
+static void fwu_compare_partition_tables(void)
+{
+	if (fwu->phyaddr.ui_firmware != fwu->img.phyaddr.ui_firmware) {
+		fwu->new_partition_table = true;
+		return;
+	}
+
+	if (fwu->phyaddr.ui_config != fwu->img.phyaddr.ui_config) {
+		fwu->new_partition_table = true;
+		return;
+	}
+
+	if (fwu->flash_properties.has_disp_config) {
+		if (fwu->phyaddr.dp_config != fwu->img.phyaddr.dp_config) {
+			fwu->new_partition_table = true;
+			return;
+		}
+	}
+
+	if (fwu->has_guest_code) {
+		if (fwu->phyaddr.guest_code != fwu->img.phyaddr.guest_code) {
+			fwu->new_partition_table = true;
+			return;
+		}
+	}
+
+	fwu->new_partition_table = false;
+
+	return;
+}
+
+static void fwu_parse_partition_table(const unsigned char *partition_table,
+		struct block_count *blkcount, struct physical_address *phyaddr)
+{
+	unsigned char ii;
+	unsigned char index;
+	unsigned char offset;
+	unsigned short partition_length;
+	unsigned short physical_address;
+	struct partition_table *ptable;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	for (ii = 0; ii < fwu->partitions; ii++) {
+		index = ii * 8 + 2;
+		ptable = (struct partition_table *)&partition_table[index];
+		partition_length = ptable->partition_length_15_8 << 8 |
+				ptable->partition_length_7_0;
+		physical_address = ptable->start_physical_address_15_8 << 8 |
+				ptable->start_physical_address_7_0;
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Partition entry %d:\n",
+				__func__, ii);
+		for (offset = 0; offset < 8; offset++) {
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: 0x%02x\n",
+					__func__,
+					partition_table[index + offset]);
+		}
+		switch (ptable->partition_id) {
+		case CORE_CODE_PARTITION:
+			blkcount->ui_firmware = partition_length;
+			phyaddr->ui_firmware = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Core code block count: %d\n",
+					__func__, blkcount->ui_firmware);
+			blkcount->total_count += partition_length;
+			break;
+		case CORE_CONFIG_PARTITION:
+			blkcount->ui_config = partition_length;
+			phyaddr->ui_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Core config block count: %d\n",
+					__func__, blkcount->ui_config);
+			blkcount->total_count += partition_length;
+			break;
+		case BOOTLOADER_PARTITION:
+			blkcount->bl_image = partition_length;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Core config block count: %d\n",
+					__func__, blkcount->ui_config);
+			blkcount->total_count += partition_length;
+			break;
+		case DISPLAY_CONFIG_PARTITION:
+			blkcount->dp_config = partition_length;
+			phyaddr->dp_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Display config block count: %d\n",
+					__func__, blkcount->dp_config);
+			blkcount->total_count += partition_length;
+			break;
+		case FLASH_CONFIG_PARTITION:
+			blkcount->fl_config = partition_length;
+			phyaddr->fl_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Flash config block count: %d\n",
+					__func__, blkcount->fl_config);
+			blkcount->total_count += partition_length;
+			break;
+		case GUEST_CODE_PARTITION:
+			blkcount->guest_code = partition_length;
+			phyaddr->guest_code = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Guest code block count: %d\n",
+					__func__, blkcount->guest_code);
+			blkcount->total_count += partition_length;
+			break;
+		case GUEST_SERIALIZATION_PARTITION:
+			blkcount->pm_config = partition_length;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Guest serialization block count: %d\n",
+					__func__, blkcount->pm_config);
+			blkcount->total_count += partition_length;
+			break;
+		case GLOBAL_PARAMETERS_PARTITION:
+			blkcount->bl_config = partition_length;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Global parameters block count: %d\n",
+					__func__, blkcount->bl_config);
+			blkcount->total_count += partition_length;
+			break;
+		case DEVICE_CONFIG_PARTITION:
+			blkcount->lockdown = partition_length;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Device config block count: %d\n",
+					__func__, blkcount->lockdown);
+			blkcount->total_count += partition_length;
+			break;
+		};
+	}
+
+	return;
+}
+
+static void fwu_parse_image_header_10_bl_container(const unsigned char *image)
+{
+	unsigned char ii;
+	unsigned char num_of_containers;
+	unsigned int addr;
+	unsigned int container_id;
+	unsigned int length;
+	const unsigned char *content;
+	struct container_descriptor *descriptor;
+
+	num_of_containers = (fwu->img.bootloader.size - 4) / 4;
+
+	for (ii = 1; ii <= num_of_containers; ii++) {
+		addr = le_to_uint(fwu->img.bootloader.data + (ii * 4));
+		descriptor = (struct container_descriptor *)(image + addr);
+		container_id = descriptor->container_id[0] |
+				descriptor->container_id[1] << 8;
+		content = image + le_to_uint(descriptor->content_address);
+		length = le_to_uint(descriptor->content_length);
+		switch (container_id) {
+		case BL_IMAGE_CONTAINER:
+			fwu->img.bl_image.data = content;
+			fwu->img.bl_image.size = length;
+			break;
+		case BL_CONFIG_CONTAINER:
+		case GLOBAL_PARAMETERS_CONTAINER:
+			fwu->img.bl_config.data = content;
+			fwu->img.bl_config.size = length;
+			break;
+		case BL_LOCKDOWN_INFO_CONTAINER:
+		case DEVICE_CONFIG_CONTAINER:
+			fwu->img.lockdown.data = content;
+			fwu->img.lockdown.size = length;
+			break;
+		default:
+			break;
+		};
+	}
+
+	return;
+}
+
+static void fwu_parse_image_header_10(void)
+{
+	unsigned char ii;
+	unsigned char num_of_containers;
+	unsigned int addr;
+	unsigned int offset;
+	unsigned int container_id;
+	unsigned int length;
+	const unsigned char *image;
+	const unsigned char *content;
+	struct container_descriptor *descriptor;
+	struct image_header_10 *header;
+
+	image = fwu->image;
+	header = (struct image_header_10 *)image;
+
+	fwu->img.checksum = le_to_uint(header->checksum);
+
+	/* address of top level container */
+	offset = le_to_uint(header->top_level_container_start_addr);
+	descriptor = (struct container_descriptor *)(image + offset);
+
+	/* address of top level container content */
+	offset = le_to_uint(descriptor->content_address);
+	num_of_containers = le_to_uint(descriptor->content_length) / 4;
+
+	for (ii = 0; ii < num_of_containers; ii++) {
+		addr = le_to_uint(image + offset);
+		offset += 4;
+		descriptor = (struct container_descriptor *)(image + addr);
+		container_id = descriptor->container_id[0] |
+				descriptor->container_id[1] << 8;
+		content = image + le_to_uint(descriptor->content_address);
+		length = le_to_uint(descriptor->content_length);
+		switch (container_id) {
+		case UI_CONTAINER:
+		case CORE_CODE_CONTAINER:
+			fwu->img.ui_firmware.data = content;
+			fwu->img.ui_firmware.size = length;
+			break;
+		case UI_CONFIG_CONTAINER:
+		case CORE_CONFIG_CONTAINER:
+			fwu->img.ui_config.data = content;
+			fwu->img.ui_config.size = length;
+			break;
+		case BL_CONTAINER:
+			fwu->img.bl_version = *content;
+			fwu->img.bootloader.data = content;
+			fwu->img.bootloader.size = length;
+			fwu_parse_image_header_10_bl_container(image);
+			break;
+		case GUEST_CODE_CONTAINER:
+			fwu->img.contains_guest_code = true;
+			fwu->img.guest_code.data = content;
+			fwu->img.guest_code.size = length;
+			break;
+		case DISPLAY_CONFIG_CONTAINER:
+			fwu->img.contains_disp_config = true;
+			fwu->img.dp_config.data = content;
+			fwu->img.dp_config.size = length;
+			break;
+		case PERMANENT_CONFIG_CONTAINER:
+		case GUEST_SERIALIZATION_CONTAINER:
+			fwu->img.contains_perm_config = true;
+			fwu->img.pm_config.data = content;
+			fwu->img.pm_config.size = length;
+			break;
+		case FLASH_CONFIG_CONTAINER:
+			fwu->img.contains_flash_config = true;
+			fwu->img.fl_config.data = content;
+			fwu->img.fl_config.size = length;
+			break;
+		case GENERAL_INFORMATION_CONTAINER:
+			fwu->img.contains_firmware_id = true;
+			fwu->img.firmware_id = le_to_uint(content + 4);
+			break;
+		default:
+			break;
+		}
+	}
+
+	return;
+}
+
+static void fwu_parse_image_header_05_06(void)
+{
+	int retval;
+	const unsigned char *image;
+	struct image_header_05_06 *header;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	image = fwu->image;
+	header = (struct image_header_05_06 *)image;
+
+	fwu->img.checksum = le_to_uint(header->checksum);
+
+	fwu->img.bl_version = header->header_version;
+
+	fwu->img.contains_bootloader = header->options_bootloader;
+	if (fwu->img.contains_bootloader)
+		fwu->img.bootloader_size = le_to_uint(header->bootloader_size);
+
+	fwu->img.ui_firmware.size = le_to_uint(header->firmware_size);
+	if (fwu->img.ui_firmware.size) {
+		fwu->img.ui_firmware.data = image + IMAGE_AREA_OFFSET;
+		if (fwu->img.contains_bootloader)
+			fwu->img.ui_firmware.data += fwu->img.bootloader_size;
+	}
+
+	if ((fwu->img.bl_version == BL_V6) && header->options_tddi)
+		fwu->img.ui_firmware.data = image + IMAGE_AREA_OFFSET;
+
+	fwu->img.ui_config.size = le_to_uint(header->config_size);
+	if (fwu->img.ui_config.size) {
+		fwu->img.ui_config.data = fwu->img.ui_firmware.data +
+				fwu->img.ui_firmware.size;
+	}
+
+	if ((fwu->img.bl_version == BL_V5 && fwu->img.contains_bootloader) ||
+			(fwu->img.bl_version == BL_V6 && header->options_tddi))
+		fwu->img.contains_disp_config = true;
+	else
+		fwu->img.contains_disp_config = false;
+
+	if (fwu->img.contains_disp_config) {
+		fwu->img.disp_config_offset = le_to_uint(header->dsp_cfg_addr);
+		fwu->img.dp_config.size = le_to_uint(header->dsp_cfg_size);
+		fwu->img.dp_config.data = image + fwu->img.disp_config_offset;
+	} else {
+		retval = secure_memcpy(fwu->img.cstmr_product_id,
+				sizeof(fwu->img.cstmr_product_id),
+				header->cstmr_product_id,
+				sizeof(header->cstmr_product_id),
+				PRODUCT_ID_SIZE);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy custom product ID string\n",
+					__func__);
+		}
+		fwu->img.cstmr_product_id[PRODUCT_ID_SIZE] = 0;
+	}
+
+	fwu->img.contains_firmware_id = header->options_firmware_id;
+	if (fwu->img.contains_firmware_id)
+		fwu->img.firmware_id = le_to_uint(header->firmware_id);
+
+	retval = secure_memcpy(fwu->img.product_id,
+			sizeof(fwu->img.product_id),
+			header->product_id,
+			sizeof(header->product_id),
+			PRODUCT_ID_SIZE);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy product ID string\n",
+				__func__);
+	}
+	fwu->img.product_id[PRODUCT_ID_SIZE] = 0;
+
+	fwu->img.lockdown.size = LOCKDOWN_SIZE;
+	fwu->img.lockdown.data = image + IMAGE_AREA_OFFSET - LOCKDOWN_SIZE;
+
+	return;
+}
+
+static int fwu_parse_image_info(void)
+{
+	struct image_header_10 *header;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	header = (struct image_header_10 *)fwu->image;
+
+	memset(&fwu->img, 0x00, sizeof(fwu->img));
+
+	switch (header->major_header_version) {
+	case IMAGE_HEADER_VERSION_10:
+		fwu_parse_image_header_10();
+		break;
+	case IMAGE_HEADER_VERSION_05:
+	case IMAGE_HEADER_VERSION_06:
+		fwu_parse_image_header_05_06();
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Unsupported image file format (0x%02x)\n",
+				__func__, header->major_header_version);
+		return -EINVAL;
+	}
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8) {
+		if (!fwu->img.contains_flash_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: No flash config found in firmware image\n",
+					__func__);
+			return -EINVAL;
+		}
+
+		fwu_parse_partition_table(fwu->img.fl_config.data,
+				&fwu->img.blkcount, &fwu->img.phyaddr);
+
+		fwu_compare_partition_tables();
+	} else {
+		fwu->new_partition_table = false;
+	}
+
+	return 0;
+}
+
+static int fwu_read_flash_status(void)
+{
+	int retval;
+	unsigned char status;
+	unsigned char command;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fwu->f34_fd.data_base_addr + fwu->off.flash_status,
+			&status,
+			sizeof(status));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash status\n",
+				__func__);
+		return retval;
+	}
+
+	fwu->in_bl_mode = status >> 7;
+
+	if (fwu->bl_version == BL_V5)
+		fwu->flash_status = (status >> 4) & MASK_3BIT;
+	else if (fwu->bl_version == BL_V6)
+		fwu->flash_status = status & MASK_3BIT;
+	else if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		fwu->flash_status = status & MASK_5BIT;
+
+	if (fwu->flash_status != 0x00) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Flash status = %d, command = 0x%02x\n",
+				__func__, fwu->flash_status, fwu->command);
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fwu->f34_fd.data_base_addr + fwu->off.flash_cmd,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash command\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->bl_version == BL_V5)
+		fwu->command = command & MASK_4BIT;
+	else if (fwu->bl_version == BL_V6)
+		fwu->command = command & MASK_6BIT;
+	else if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		fwu->command = command;
+
+	return 0;
+}
+
+static int fwu_wait_for_idle(int timeout_ms, bool poll)
+{
+	int count = 0;
+	int timeout_count = ((timeout_ms * 1000) / MAX_SLEEP_TIME_US) + 1;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	do {
+		usleep_range(MIN_SLEEP_TIME_US, MAX_SLEEP_TIME_US);
+
+		count++;
+		if (poll || (count == timeout_count))
+			fwu_read_flash_status();
+
+		if ((fwu->command == CMD_IDLE) && (fwu->flash_status == 0x00))
+			return 0;
+	} while (count < timeout_count);
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Timed out waiting for idle status\n",
+			__func__);
+
+	return -ETIMEDOUT;
+}
+
+static int fwu_write_f34_v7_command_single_transaction(unsigned char cmd)
+{
+	int retval;
+	unsigned char base;
+	struct f34_v7_data_1_5 data_1_5;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f34_fd.data_base_addr;
+
+	memset(data_1_5.data, 0x00, sizeof(data_1_5.data));
+
+	switch (cmd) {
+	case CMD_ERASE_ALL:
+		data_1_5.partition_id = CORE_CODE_PARTITION;
+		data_1_5.command = CMD_V7_ERASE_AP;
+		break;
+	case CMD_ERASE_UI_FIRMWARE:
+		data_1_5.partition_id = CORE_CODE_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_BL_CONFIG:
+		data_1_5.partition_id = GLOBAL_PARAMETERS_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_UI_CONFIG:
+		data_1_5.partition_id = CORE_CONFIG_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_DISP_CONFIG:
+		data_1_5.partition_id = DISPLAY_CONFIG_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_FLASH_CONFIG:
+		data_1_5.partition_id = FLASH_CONFIG_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_GUEST_CODE:
+		data_1_5.partition_id = GUEST_CODE_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ENABLE_FLASH_PROG:
+		data_1_5.partition_id = BOOTLOADER_PARTITION;
+		data_1_5.command = CMD_V7_ENTER_BL;
+		break;
+	};
+
+	data_1_5.payload_0 = fwu->bootloader_id[0];
+	data_1_5.payload_1 = fwu->bootloader_id[1];
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			base + fwu->off.partition_id,
+			data_1_5.data,
+			sizeof(data_1_5.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write single transaction command\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_v7_command(unsigned char cmd)
+{
+	int retval;
+	unsigned char base;
+	unsigned char command;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f34_fd.data_base_addr;
+
+	switch (cmd) {
+	case CMD_WRITE_FW:
+	case CMD_WRITE_CONFIG:
+	case CMD_WRITE_LOCKDOWN:
+	case CMD_WRITE_GUEST_CODE:
+		command = CMD_V7_WRITE;
+		break;
+	case CMD_READ_CONFIG:
+		command = CMD_V7_READ;
+		break;
+	case CMD_ERASE_ALL:
+		command = CMD_V7_ERASE_AP;
+		break;
+	case CMD_ERASE_UI_FIRMWARE:
+	case CMD_ERASE_BL_CONFIG:
+	case CMD_ERASE_UI_CONFIG:
+	case CMD_ERASE_DISP_CONFIG:
+	case CMD_ERASE_FLASH_CONFIG:
+	case CMD_ERASE_GUEST_CODE:
+		command = CMD_V7_ERASE;
+		break;
+	case CMD_ENABLE_FLASH_PROG:
+		command = CMD_V7_ENTER_BL;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid command 0x%02x\n",
+				__func__, cmd);
+		return -EINVAL;
+	};
+
+	fwu->command = command;
+
+	switch (cmd) {
+	case CMD_ERASE_ALL:
+	case CMD_ERASE_UI_FIRMWARE:
+	case CMD_ERASE_BL_CONFIG:
+	case CMD_ERASE_UI_CONFIG:
+	case CMD_ERASE_DISP_CONFIG:
+	case CMD_ERASE_FLASH_CONFIG:
+	case CMD_ERASE_GUEST_CODE:
+	case CMD_ENABLE_FLASH_PROG:
+		retval = fwu_write_f34_v7_command_single_transaction(cmd);
+		if (retval < 0)
+			return retval;
+		else
+			return 0;
+	default:
+		break;
+	};
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			base + fwu->off.flash_cmd,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write flash command\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_v5v6_command(unsigned char cmd)
+{
+	int retval;
+	unsigned char base;
+	unsigned char command;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f34_fd.data_base_addr;
+
+	switch (cmd) {
+	case CMD_IDLE:
+		command = CMD_V5V6_IDLE;
+		break;
+	case CMD_WRITE_FW:
+		command = CMD_V5V6_WRITE_FW;
+		break;
+	case CMD_WRITE_CONFIG:
+		command = CMD_V5V6_WRITE_CONFIG;
+		break;
+	case CMD_WRITE_LOCKDOWN:
+		command = CMD_V5V6_WRITE_LOCKDOWN;
+		break;
+	case CMD_WRITE_GUEST_CODE:
+		command = CMD_V5V6_WRITE_GUEST_CODE;
+		break;
+	case CMD_READ_CONFIG:
+		command = CMD_V5V6_READ_CONFIG;
+		break;
+	case CMD_ERASE_ALL:
+		command = CMD_V5V6_ERASE_ALL;
+		break;
+	case CMD_ERASE_UI_CONFIG:
+		command = CMD_V5V6_ERASE_UI_CONFIG;
+		break;
+	case CMD_ERASE_DISP_CONFIG:
+		command = CMD_V5V6_ERASE_DISP_CONFIG;
+		break;
+	case CMD_ERASE_GUEST_CODE:
+		command = CMD_V5V6_ERASE_GUEST_CODE;
+		break;
+	case CMD_ENABLE_FLASH_PROG:
+		command = CMD_V5V6_ENABLE_FLASH_PROG;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid command 0x%02x\n",
+				__func__, cmd);
+		return -EINVAL;
+	}
+
+	switch (cmd) {
+	case CMD_ERASE_ALL:
+	case CMD_ERASE_UI_CONFIG:
+	case CMD_ERASE_DISP_CONFIG:
+	case CMD_ERASE_GUEST_CODE:
+	case CMD_ENABLE_FLASH_PROG:
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				base + fwu->off.payload,
+				fwu->bootloader_id,
+				sizeof(fwu->bootloader_id));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write bootloader ID\n",
+					__func__);
+			return retval;
+		}
+		break;
+	default:
+		break;
+	};
+
+	fwu->command = command;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			base + fwu->off.flash_cmd,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write command 0x%02x\n",
+				__func__, command);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_command(unsigned char cmd)
+{
+	int retval;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		retval = fwu_write_f34_v7_command(cmd);
+	else
+		retval = fwu_write_f34_v5v6_command(cmd);
+
+	return retval;
+}
+
+static int fwu_write_f34_v7_partition_id(unsigned char cmd)
+{
+	int retval;
+	unsigned char base;
+	unsigned char partition;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f34_fd.data_base_addr;
+
+	switch (cmd) {
+	case CMD_WRITE_FW:
+		partition = CORE_CODE_PARTITION;
+		break;
+	case CMD_WRITE_CONFIG:
+	case CMD_READ_CONFIG:
+		if (fwu->config_area == UI_CONFIG_AREA)
+			partition = CORE_CONFIG_PARTITION;
+		else if (fwu->config_area == DP_CONFIG_AREA)
+			partition = DISPLAY_CONFIG_PARTITION;
+		else if (fwu->config_area == PM_CONFIG_AREA)
+			partition = GUEST_SERIALIZATION_PARTITION;
+		else if (fwu->config_area == BL_CONFIG_AREA)
+			partition = GLOBAL_PARAMETERS_PARTITION;
+		else if (fwu->config_area == FLASH_CONFIG_AREA)
+			partition = FLASH_CONFIG_PARTITION;
+		break;
+	case CMD_WRITE_LOCKDOWN:
+		partition = DEVICE_CONFIG_PARTITION;
+		break;
+	case CMD_WRITE_GUEST_CODE:
+		partition = GUEST_CODE_PARTITION;
+		break;
+	case CMD_ERASE_ALL:
+		partition = CORE_CODE_PARTITION;
+		break;
+	case CMD_ERASE_BL_CONFIG:
+		partition = GLOBAL_PARAMETERS_PARTITION;
+		break;
+	case CMD_ERASE_UI_CONFIG:
+		partition = CORE_CONFIG_PARTITION;
+		break;
+	case CMD_ERASE_DISP_CONFIG:
+		partition = DISPLAY_CONFIG_PARTITION;
+		break;
+	case CMD_ERASE_FLASH_CONFIG:
+		partition = FLASH_CONFIG_PARTITION;
+		break;
+	case CMD_ERASE_GUEST_CODE:
+		partition = GUEST_CODE_PARTITION;
+		break;
+	case CMD_ENABLE_FLASH_PROG:
+		partition = BOOTLOADER_PARTITION;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid command 0x%02x\n",
+				__func__, cmd);
+		return -EINVAL;
+	};
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			base + fwu->off.partition_id,
+			&partition,
+			sizeof(partition));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write partition ID\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_partition_id(unsigned char cmd)
+{
+	int retval;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		retval = fwu_write_f34_v7_partition_id(cmd);
+	else
+		retval = 0;
+
+	return retval;
+}
+
+static int fwu_read_f34_v7_partition_table(unsigned char *partition_table)
+{
+	int retval;
+	unsigned char base;
+	unsigned char length[2];
+	unsigned short block_number = 0;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f34_fd.data_base_addr;
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+
+	retval = fwu_write_f34_partition_id(CMD_READ_CONFIG);
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			base + fwu->off.block_number,
+			(unsigned char *)&block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	length[0] = (unsigned char)(fwu->flash_config_length & MASK_8BIT);
+	length[1] = (unsigned char)(fwu->flash_config_length >> 8);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			base + fwu->off.transfer_length,
+			length,
+			sizeof(length));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write transfer length\n",
+				__func__);
+		return retval;
+	}
+
+	retval = fwu_write_f34_command(CMD_READ_CONFIG);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write command\n",
+				__func__);
+		return retval;
+	}
+
+	retval = fwu_wait_for_idle(WRITE_WAIT_MS, true);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to wait for idle status\n",
+				__func__);
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + fwu->off.payload,
+			partition_table,
+			fwu->partition_table_bytes);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read block data\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_read_f34_v7_queries(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char base;
+	unsigned char index;
+	unsigned char offset;
+	unsigned char *ptable;
+	struct f34_v7_query_0 query_0;
+	struct f34_v7_query_1_7 query_1_7;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f34_fd.query_base_addr;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base,
+			query_0.data,
+			sizeof(query_0.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read query 0\n",
+				__func__);
+		return retval;
+	}
+
+	offset = query_0.subpacket_1_size + 1;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + offset,
+			query_1_7.data,
+			sizeof(query_1_7.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read queries 1 to 7\n",
+				__func__);
+		return retval;
+	}
+
+	fwu->bootloader_id[0] = query_1_7.bl_minor_revision;
+	fwu->bootloader_id[1] = query_1_7.bl_major_revision;
+
+	if (fwu->bootloader_id[1] == BL_V8)
+		fwu->bl_version = BL_V8;
+
+	fwu->block_size = query_1_7.block_size_15_8 << 8 |
+			query_1_7.block_size_7_0;
+
+	fwu->flash_config_length = query_1_7.flash_config_length_15_8 << 8 |
+			query_1_7.flash_config_length_7_0;
+
+	fwu->payload_length = query_1_7.payload_length_15_8 << 8 |
+			query_1_7.payload_length_7_0;
+
+	fwu->off.flash_status = V7_FLASH_STATUS_OFFSET;
+	fwu->off.partition_id = V7_PARTITION_ID_OFFSET;
+	fwu->off.block_number = V7_BLOCK_NUMBER_OFFSET;
+	fwu->off.transfer_length = V7_TRANSFER_LENGTH_OFFSET;
+	fwu->off.flash_cmd = V7_COMMAND_OFFSET;
+	fwu->off.payload = V7_PAYLOAD_OFFSET;
+
+	index = sizeof(query_1_7.data) - V7_PARTITION_SUPPORT_BYTES;
+
+	fwu->partitions = 0;
+	for (offset = 0; offset < V7_PARTITION_SUPPORT_BYTES; offset++) {
+		for (ii = 0; ii < 8; ii++) {
+			if (query_1_7.data[index + offset] & (1 << ii))
+				fwu->partitions++;
+		}
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Supported partitions: 0x%02x\n",
+				__func__, query_1_7.data[index + offset]);
+	}
+
+	fwu->partition_table_bytes = fwu->partitions * 8 + 2;
+
+	ptable = kzalloc(fwu->partition_table_bytes, GFP_KERNEL);
+	if (!ptable) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for partition table\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	retval = fwu_read_f34_v7_partition_table(ptable);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read partition table\n",
+				__func__);
+		kfree(ptable);
+		return retval;
+	}
+
+	fwu_parse_partition_table(ptable, &fwu->blkcount, &fwu->phyaddr);
+
+	if (fwu->blkcount.dp_config)
+		fwu->flash_properties.has_disp_config = 1;
+	else
+		fwu->flash_properties.has_disp_config = 0;
+
+	if (fwu->blkcount.pm_config)
+		fwu->flash_properties.has_pm_config = 1;
+	else
+		fwu->flash_properties.has_pm_config = 0;
+
+	if (fwu->blkcount.bl_config)
+		fwu->flash_properties.has_bl_config = 1;
+	else
+		fwu->flash_properties.has_bl_config = 0;
+
+	if (fwu->blkcount.guest_code)
+		fwu->has_guest_code = 1;
+	else
+		fwu->has_guest_code = 0;
+
+	kfree(ptable);
+
+	return 0;
+}
+
+static int fwu_read_f34_v5v6_queries(void)
+{
+	int retval;
+	unsigned char count;
+	unsigned char base;
+	unsigned char buf[10];
+	struct f34_v5v6_flash_properties_2 properties_2;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f34_fd.query_base_addr;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + V5V6_BOOTLOADER_ID_OFFSET,
+			fwu->bootloader_id,
+			sizeof(fwu->bootloader_id));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read bootloader ID\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->bl_version == BL_V5) {
+		fwu->off.properties = V5_PROPERTIES_OFFSET;
+		fwu->off.block_size = V5_BLOCK_SIZE_OFFSET;
+		fwu->off.block_count = V5_BLOCK_COUNT_OFFSET;
+		fwu->off.block_number = V5_BLOCK_NUMBER_OFFSET;
+		fwu->off.payload = V5_BLOCK_DATA_OFFSET;
+	} else if (fwu->bl_version == BL_V6) {
+		fwu->off.properties = V6_PROPERTIES_OFFSET;
+		fwu->off.properties_2 = V6_PROPERTIES_2_OFFSET;
+		fwu->off.block_size = V6_BLOCK_SIZE_OFFSET;
+		fwu->off.block_count = V6_BLOCK_COUNT_OFFSET;
+		fwu->off.gc_block_count = V6_GUEST_CODE_BLOCK_COUNT_OFFSET;
+		fwu->off.block_number = V6_BLOCK_NUMBER_OFFSET;
+		fwu->off.payload = V6_BLOCK_DATA_OFFSET;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + fwu->off.block_size,
+			buf,
+			2);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read block size info\n",
+				__func__);
+		return retval;
+	}
+
+	batohs(&fwu->block_size, &(buf[0]));
+
+	if (fwu->bl_version == BL_V5) {
+		fwu->off.flash_cmd = fwu->off.payload + fwu->block_size;
+		fwu->off.flash_status = fwu->off.flash_cmd;
+	} else if (fwu->bl_version == BL_V6) {
+		fwu->off.flash_cmd = V6_FLASH_COMMAND_OFFSET;
+		fwu->off.flash_status = V6_FLASH_STATUS_OFFSET;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + fwu->off.properties,
+			fwu->flash_properties.data,
+			sizeof(fwu->flash_properties.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash properties\n",
+				__func__);
+		return retval;
+	}
+
+	count = 4;
+
+	if (fwu->flash_properties.has_pm_config)
+		count += 2;
+
+	if (fwu->flash_properties.has_bl_config)
+		count += 2;
+
+	if (fwu->flash_properties.has_disp_config)
+		count += 2;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + fwu->off.block_count,
+			buf,
+			count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read block count info\n",
+				__func__);
+		return retval;
+	}
+
+	batohs(&fwu->blkcount.ui_firmware, &(buf[0]));
+	batohs(&fwu->blkcount.ui_config, &(buf[2]));
+
+	count = 4;
+
+	if (fwu->flash_properties.has_pm_config) {
+		batohs(&fwu->blkcount.pm_config, &(buf[count]));
+		count += 2;
+	}
+
+	if (fwu->flash_properties.has_bl_config) {
+		batohs(&fwu->blkcount.bl_config, &(buf[count]));
+		count += 2;
+	}
+
+	if (fwu->flash_properties.has_disp_config)
+		batohs(&fwu->blkcount.dp_config, &(buf[count]));
+
+	fwu->has_guest_code = false;
+
+	if (fwu->flash_properties.has_query4) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				base + fwu->off.properties_2,
+				properties_2.data,
+				sizeof(properties_2.data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read flash properties 2\n",
+					__func__);
+			return retval;
+		}
+
+		if (properties_2.has_guest_code) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					base + fwu->off.gc_block_count,
+					buf,
+					2);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to read guest code block count\n",
+						__func__);
+				return retval;
+			}
+
+			batohs(&fwu->blkcount.guest_code, &(buf[0]));
+			fwu->has_guest_code = true;
+		}
+	}
+
+	return 0;
+}
+
+static int fwu_read_f34_queries(void)
+{
+	int retval;
+
+	memset(&fwu->blkcount, 0x00, sizeof(fwu->blkcount));
+	memset(&fwu->phyaddr, 0x00, sizeof(fwu->phyaddr));
+
+	if (fwu->bl_version == BL_V7)
+		retval = fwu_read_f34_v7_queries();
+	else
+		retval = fwu_read_f34_v5v6_queries();
+
+	return retval;
+}
+
+static int fwu_write_f34_v7_blocks(unsigned char *block_ptr,
+		unsigned short block_cnt, unsigned char command)
+{
+	int retval;
+	unsigned char base;
+	unsigned char length[2];
+	unsigned short transfer;
+	unsigned short max_transfer;
+	unsigned short remaining = block_cnt;
+	unsigned short block_number = 0;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f34_fd.data_base_addr;
+
+	retval = fwu_write_f34_partition_id(command);
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			base + fwu->off.block_number,
+			(unsigned char *)&block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->payload_length > (PAGE_SIZE / fwu->block_size))
+		max_transfer = PAGE_SIZE / fwu->block_size;
+	else
+		max_transfer = fwu->payload_length;
+
+	do {
+		if (remaining / max_transfer)
+			transfer = max_transfer;
+		else
+			transfer = remaining;
+
+		length[0] = (unsigned char)(transfer & MASK_8BIT);
+		length[1] = (unsigned char)(transfer >> 8);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				base + fwu->off.transfer_length,
+				length,
+				sizeof(length));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write transfer length (%d blocks remaining)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = fwu_write_f34_command(command);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write command (%d blocks remaining)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				base + fwu->off.payload,
+				block_ptr,
+				transfer * fwu->block_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write block data (%d blocks remaining)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to wait for idle status (%d blocks remaining)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		block_ptr += (transfer * fwu->block_size);
+		remaining -= transfer;
+	} while (remaining);
+
+	return 0;
+}
+
+static int fwu_write_f34_v5v6_blocks(unsigned char *block_ptr,
+		unsigned short block_cnt, unsigned char command)
+{
+	int retval;
+	unsigned char base;
+	unsigned char block_number[] = {0, 0};
+	unsigned short blk;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f34_fd.data_base_addr;
+
+	block_number[1] |= (fwu->config_area << 5);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			base + fwu->off.block_number,
+			block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	for (blk = 0; blk < block_cnt; blk++) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				base + fwu->off.payload,
+				block_ptr,
+				fwu->block_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write block data (block %d)\n",
+					__func__, blk);
+			return retval;
+		}
+
+		retval = fwu_write_f34_command(command);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write command for block %d\n",
+					__func__, blk);
+			return retval;
+		}
+
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to wait for idle status (block %d)\n",
+					__func__, blk);
+			return retval;
+		}
+
+		block_ptr += fwu->block_size;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_blocks(unsigned char *block_ptr,
+		unsigned short block_cnt, unsigned char cmd)
+{
+	int retval;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		retval = fwu_write_f34_v7_blocks(block_ptr, block_cnt, cmd);
+	else
+		retval = fwu_write_f34_v5v6_blocks(block_ptr, block_cnt, cmd);
+
+	return retval;
+}
+
+static int fwu_read_f34_v7_blocks(unsigned short block_cnt,
+		unsigned char command)
+{
+	int retval;
+	unsigned char base;
+	unsigned char length[2];
+	unsigned short transfer;
+	unsigned short max_transfer;
+	unsigned short remaining = block_cnt;
+	unsigned short block_number = 0;
+	unsigned short index = 0;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f34_fd.data_base_addr;
+
+	retval = fwu_write_f34_partition_id(command);
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			base + fwu->off.block_number,
+			(unsigned char *)&block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->payload_length > (PAGE_SIZE / fwu->block_size))
+		max_transfer = PAGE_SIZE / fwu->block_size;
+	else
+		max_transfer = fwu->payload_length;
+
+	do {
+		if (remaining / max_transfer)
+			transfer = max_transfer;
+		else
+			transfer = remaining;
+
+		length[0] = (unsigned char)(transfer & MASK_8BIT);
+		length[1] = (unsigned char)(transfer >> 8);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				base + fwu->off.transfer_length,
+				length,
+				sizeof(length));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write transfer length (%d blocks remaining)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = fwu_write_f34_command(command);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write command (%d blocks remaining)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to wait for idle status (%d blocks remaining)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				base + fwu->off.payload,
+				&fwu->read_config_buf[index],
+				transfer * fwu->block_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read block data (%d blocks remaining)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		index += (transfer * fwu->block_size);
+		remaining -= transfer;
+	} while (remaining);
+
+	return 0;
+}
+
+static int fwu_read_f34_v5v6_blocks(unsigned short block_cnt,
+		unsigned char command)
+{
+	int retval;
+	unsigned char base;
+	unsigned char block_number[] = {0, 0};
+	unsigned short blk;
+	unsigned short index = 0;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f34_fd.data_base_addr;
+
+	block_number[1] |= (fwu->config_area << 5);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			base + fwu->off.block_number,
+			block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	for (blk = 0; blk < block_cnt; blk++) {
+		retval = fwu_write_f34_command(command);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write read config command\n",
+					__func__);
+			return retval;
+		}
+
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to wait for idle status\n",
+					__func__);
+			return retval;
+		}
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				base + fwu->off.payload,
+				&fwu->read_config_buf[index],
+				fwu->block_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read block data (block %d)\n",
+					__func__, blk);
+			return retval;
+		}
+
+		index += fwu->block_size;
+	}
+
+	return 0;
+}
+
+static int fwu_read_f34_blocks(unsigned short block_cnt, unsigned char cmd)
+{
+	int retval;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		retval = fwu_read_f34_v7_blocks(block_cnt, cmd);
+	else
+		retval = fwu_read_f34_v5v6_blocks(block_cnt, cmd);
+
+	return retval;
+}
+
+static int fwu_get_image_firmware_id(unsigned int *fw_id)
+{
+	int retval;
+	unsigned char index = 0;
+	char *strptr;
+	char *firmware_id;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->img.contains_firmware_id) {
+		*fw_id = fwu->img.firmware_id;
+	} else {
+		strptr = strnstr(fwu->image_name, "PR", MAX_IMAGE_NAME_LEN);
+		if (!strptr) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: No valid PR number (PRxxxxxxx) found in image file name (%s)\n",
+					__func__, fwu->image_name);
+			return -EINVAL;
+		}
+
+		strptr += 2;
+		firmware_id = kzalloc(MAX_FIRMWARE_ID_LEN, GFP_KERNEL);
+		if (!firmware_id) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for firmware_id\n",
+					__func__);
+			return -ENOMEM;
+		}
+		while (strptr[index] >= '0' && strptr[index] <= '9') {
+			firmware_id[index] = strptr[index];
+			index++;
+		}
+
+		retval = sstrtoul(firmware_id, 10, (unsigned long *)fw_id);
+		kfree(firmware_id);
+		if (retval) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to obtain image firmware ID\n",
+					__func__);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int fwu_get_device_config_id(void)
+{
+	int retval;
+	unsigned char config_id_size;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		config_id_size = V7_CONFIG_ID_SIZE;
+	else
+		config_id_size = V5V6_CONFIG_ID_SIZE;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+				fwu->f34_fd.ctrl_base_addr,
+				fwu->config_id,
+				config_id_size);
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static enum flash_area fwu_go_nogo(void)
+{
+	int retval;
+	enum flash_area flash_area = NONE;
+	unsigned char ii;
+	unsigned char config_id_size;
+	unsigned int device_fw_id;
+	unsigned int image_fw_id;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->force_update) {
+		flash_area = UI_FIRMWARE;
+		goto exit;
+	}
+
+	/* Update both UI and config if device is in bootloader mode */
+	if (fwu->in_bl_mode) {
+		flash_area = UI_FIRMWARE;
+		goto exit;
+	}
+
+	/* Get device firmware ID */
+	device_fw_id = rmi4_data->firmware_id;
+	dev_info(rmi4_data->pdev->dev.parent,
+			"%s: Device firmware ID = %d\n",
+			__func__, device_fw_id);
+
+	/* Get image firmware ID */
+	retval = fwu_get_image_firmware_id(&image_fw_id);
+	if (retval < 0) {
+		flash_area = NONE;
+		goto exit;
+	}
+	dev_info(rmi4_data->pdev->dev.parent,
+			"%s: Image firmware ID = %d\n",
+			__func__, image_fw_id);
+
+	if (image_fw_id > device_fw_id) {
+		flash_area = UI_FIRMWARE;
+		goto exit;
+	} else if (image_fw_id < device_fw_id) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Image firmware ID older than device firmware ID\n",
+				__func__);
+		flash_area = NONE;
+		goto exit;
+	}
+
+	/* Get device config ID */
+	retval = fwu_get_device_config_id();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read device config ID\n",
+				__func__);
+		flash_area = NONE;
+		goto exit;
+	}
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		config_id_size = V7_CONFIG_ID_SIZE;
+	else
+		config_id_size = V5V6_CONFIG_ID_SIZE;
+
+	for (ii = 0; ii < config_id_size; ii++) {
+		if (fwu->img.ui_config.data[ii] > fwu->config_id[ii]) {
+			flash_area = UI_CONFIG;
+			goto exit;
+		} else if (fwu->img.ui_config.data[ii] < fwu->config_id[ii]) {
+			flash_area = NONE;
+			goto exit;
+		}
+	}
+
+	flash_area = NONE;
+
+exit:
+	if (flash_area == NONE) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: No need to do reflash\n",
+				__func__);
+	} else {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Updating %s\n",
+				__func__,
+				flash_area == UI_FIRMWARE ?
+				"UI firmware and config" :
+				"UI config only");
+	}
+
+	return flash_area;
+}
+
+static int fwu_scan_pdt(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char intr_count = 0;
+	unsigned char intr_off;
+	unsigned char intr_src;
+	unsigned short addr;
+	bool f01found = false;
+	bool f34found = false;
+	bool f35found = false;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	fwu->in_ub_mode = false;
+
+	for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				addr,
+				(unsigned char *)&rmi_fd,
+				sizeof(rmi_fd));
+		if (retval < 0)
+			return retval;
+
+		if (rmi_fd.fn_number) {
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Found F%02x\n",
+					__func__, rmi_fd.fn_number);
+			switch (rmi_fd.fn_number) {
+			case SYNAPTICS_RMI4_F01:
+				f01found = true;
+
+				rmi4_data->f01_query_base_addr =
+						rmi_fd.query_base_addr;
+				rmi4_data->f01_ctrl_base_addr =
+						rmi_fd.ctrl_base_addr;
+				rmi4_data->f01_data_base_addr =
+						rmi_fd.data_base_addr;
+				rmi4_data->f01_cmd_base_addr =
+						rmi_fd.cmd_base_addr;
+				break;
+			case SYNAPTICS_RMI4_F34:
+				f34found = true;
+				fwu->f34_fd.query_base_addr =
+						rmi_fd.query_base_addr;
+				fwu->f34_fd.ctrl_base_addr =
+						rmi_fd.ctrl_base_addr;
+				fwu->f34_fd.data_base_addr =
+						rmi_fd.data_base_addr;
+
+				switch (rmi_fd.fn_version) {
+				case F34_V0:
+					fwu->bl_version = BL_V5;
+					break;
+				case F34_V1:
+					fwu->bl_version = BL_V6;
+					break;
+				case F34_V2:
+					fwu->bl_version = BL_V7;
+					break;
+				default:
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Unrecognized F34 version\n",
+							__func__);
+					return -EINVAL;
+				}
+
+				fwu->intr_mask = 0;
+				intr_src = rmi_fd.intr_src_count;
+				intr_off = intr_count % 8;
+				for (ii = intr_off;
+						ii < (intr_src + intr_off);
+						ii++) {
+					fwu->intr_mask |= 1 << ii;
+				}
+				break;
+			case SYNAPTICS_RMI4_F35:
+				f35found = true;
+				fwu->f35_fd.query_base_addr =
+						rmi_fd.query_base_addr;
+				fwu->f35_fd.ctrl_base_addr =
+						rmi_fd.ctrl_base_addr;
+				fwu->f35_fd.data_base_addr =
+						rmi_fd.data_base_addr;
+				break;
+			}
+		} else {
+			break;
+		}
+
+		intr_count += rmi_fd.intr_src_count;
+	}
+
+	if (!f01found || !f34found) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to find both F01 and F34\n",
+				__func__);
+		if (!f35found) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to find F35\n",
+					__func__);
+			return -EINVAL;
+		} else {
+			fwu->in_ub_mode = true;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: In microbootloader mode\n",
+					__func__);
+			fwu_recovery_check_status();
+			return 0;
+		}
+	}
+
+	rmi4_data->intr_mask[0] |= fwu->intr_mask;
+
+	addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			addr,
+			&(rmi4_data->intr_mask[0]),
+			sizeof(rmi4_data->intr_mask[0]));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set interrupt enable bit\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_enter_flash_prog(void)
+{
+	int retval;
+	struct f01_device_control f01_device_control;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_read_flash_status();
+	if (retval < 0)
+		return retval;
+
+	if (fwu->in_bl_mode)
+		return 0;
+
+	retval = rmi4_data->irq_enable(rmi4_data, false, true);
+	if (retval < 0)
+		return retval;
+
+	msleep(INT_DISABLE_WAIT_MS);
+
+	retval = fwu_write_f34_command(CMD_ENABLE_FLASH_PROG);
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_wait_for_idle(ENABLE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	if (!fwu->in_bl_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: BL mode not entered\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (rmi4_data->hw_if->bl_hw_init) {
+		retval = rmi4_data->hw_if->bl_hw_init(rmi4_data);
+		if (retval < 0)
+			return retval;
+	}
+
+	retval = fwu_scan_pdt();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_read_f34_queries();
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			f01_device_control.data,
+			sizeof(f01_device_control.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F01 device control\n",
+				__func__);
+		return retval;
+	}
+
+	f01_device_control.nosleep = true;
+	f01_device_control.sleep_mode = SLEEP_MODE_NORMAL;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			f01_device_control.data,
+			sizeof(f01_device_control.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write F01 device control\n",
+				__func__);
+		return retval;
+	}
+
+	msleep(ENTER_FLASH_PROG_WAIT_MS);
+
+	return retval;
+}
+
+static int fwu_check_ui_firmware_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.ui_firmware.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.ui_firmware) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: UI firmware size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_check_ui_configuration_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.ui_config.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.ui_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: UI configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_check_dp_configuration_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.dp_config.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.dp_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Display configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_check_pm_configuration_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.pm_config.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.pm_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Permanent configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_check_bl_configuration_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.bl_config.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.bl_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Bootloader configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_check_guest_code_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.guest_code.size / fwu->block_size;
+	if (block_count != fwu->blkcount.guest_code) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Guest code size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_write_firmware(void)
+{
+	unsigned short firmware_block_count;
+
+	firmware_block_count = fwu->img.ui_firmware.size / fwu->block_size;
+
+	return fwu_write_f34_blocks((unsigned char *)fwu->img.ui_firmware.data,
+			firmware_block_count, CMD_WRITE_FW);
+}
+
+static int fwu_erase_configuration(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_UI_CONFIG);
+		if (retval < 0)
+			return retval;
+		break;
+	case DP_CONFIG_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_DISP_CONFIG);
+		if (retval < 0)
+			return retval;
+		break;
+	case BL_CONFIG_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_BL_CONFIG);
+		if (retval < 0)
+			return retval;
+		break;
+	}
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Erase command written\n",
+			__func__);
+
+	retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Idle status detected\n",
+			__func__);
+
+	return retval;
+}
+
+static int fwu_erase_guest_code(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_write_f34_command(CMD_ERASE_GUEST_CODE);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Erase command written\n",
+			__func__);
+
+	retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Idle status detected\n",
+			__func__);
+
+	return 0;
+}
+
+static int fwu_erase_all(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->bl_version == BL_V7) {
+		retval = fwu_write_f34_command(CMD_ERASE_UI_FIRMWARE);
+		if (retval < 0)
+			return retval;
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Erase command written\n",
+				__func__);
+
+		retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+		if (retval < 0)
+			return retval;
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Idle status detected\n",
+				__func__);
+
+		fwu->config_area = UI_CONFIG_AREA;
+		retval = fwu_erase_configuration();
+		if (retval < 0)
+			return retval;
+	} else {
+		retval = fwu_write_f34_command(CMD_ERASE_ALL);
+		if (retval < 0)
+			return retval;
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Erase all command written\n",
+				__func__);
+
+		retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+		if (!(fwu->bl_version == BL_V8 &&
+				fwu->flash_status == BAD_PARTITION_TABLE)) {
+			if (retval < 0)
+				return retval;
+		}
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Idle status detected\n",
+				__func__);
+
+		if (fwu->bl_version == BL_V8)
+			return 0;
+	}
+
+	if (fwu->flash_properties.has_disp_config &&
+			fwu->img.contains_disp_config) {
+		fwu->config_area = DP_CONFIG_AREA;
+		retval = fwu_erase_configuration();
+		if (retval < 0)
+			return retval;
+	}
+
+	if (fwu->has_guest_code && fwu->img.contains_guest_code) {
+		retval = fwu_erase_guest_code();
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_configuration(void)
+{
+	return fwu_write_f34_blocks((unsigned char *)fwu->config_data,
+			fwu->config_block_count, CMD_WRITE_CONFIG);
+}
+
+static int fwu_write_ui_configuration(void)
+{
+	fwu->config_area = UI_CONFIG_AREA;
+	fwu->config_data = fwu->img.ui_config.data;
+	fwu->config_size = fwu->img.ui_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	return fwu_write_configuration();
+}
+
+static int fwu_write_dp_configuration(void)
+{
+	fwu->config_area = DP_CONFIG_AREA;
+	fwu->config_data = fwu->img.dp_config.data;
+	fwu->config_size = fwu->img.dp_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	return fwu_write_configuration();
+}
+
+static int fwu_write_pm_configuration(void)
+{
+	fwu->config_area = PM_CONFIG_AREA;
+	fwu->config_data = fwu->img.pm_config.data;
+	fwu->config_size = fwu->img.pm_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	return fwu_write_configuration();
+}
+
+static int fwu_write_flash_configuration(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+	fwu->config_data = fwu->img.fl_config.data;
+	fwu->config_size = fwu->img.fl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	if (fwu->config_block_count != fwu->blkcount.fl_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Flash configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	retval = fwu_write_f34_command(CMD_ERASE_FLASH_CONFIG);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Erase flash configuration command written\n",
+			__func__);
+
+	retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Idle status detected\n",
+			__func__);
+
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	rmi4_data->reset_device(rmi4_data, false);
+
+	return 0;
+}
+
+static int fwu_write_guest_code(void)
+{
+	int retval;
+	unsigned short guest_code_block_count;
+
+	guest_code_block_count = fwu->img.guest_code.size / fwu->block_size;
+
+	retval = fwu_write_f34_blocks((unsigned char *)fwu->img.guest_code.data,
+			guest_code_block_count, CMD_WRITE_GUEST_CODE);
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int fwu_write_lockdown(void)
+{
+	unsigned short lockdown_block_count;
+
+	lockdown_block_count = fwu->img.lockdown.size / fwu->block_size;
+
+	return fwu_write_f34_blocks((unsigned char *)fwu->img.lockdown.data,
+			lockdown_block_count, CMD_WRITE_LOCKDOWN);
+}
+
+static int fwu_write_partition_table_v8(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+	fwu->config_data = fwu->img.fl_config.data;
+	fwu->config_size = fwu->img.fl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	if (fwu->config_block_count != fwu->blkcount.fl_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Flash configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	rmi4_data->reset_device(rmi4_data, false);
+
+	return 0;
+}
+
+static int fwu_write_partition_table_v7(void)
+{
+	int retval;
+	unsigned short block_count;
+
+	block_count = fwu->blkcount.bl_config;
+	fwu->config_area = BL_CONFIG_AREA;
+	fwu->config_size = fwu->block_size * block_count;
+
+	retval = fwu_allocate_read_config_buf(fwu->config_size);
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_read_f34_blocks(block_count, CMD_READ_CONFIG);
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_erase_configuration();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_write_flash_configuration();
+	if (retval < 0)
+		return retval;
+
+	fwu->config_area = BL_CONFIG_AREA;
+	fwu->config_data = fwu->read_config_buf;
+	fwu->config_size = fwu->img.bl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int fwu_do_reflash(void)
+{
+	int retval;
+
+	if (!fwu->new_partition_table) {
+		retval = fwu_check_ui_firmware_size();
+		if (retval < 0)
+			return retval;
+
+		retval = fwu_check_ui_configuration_size();
+		if (retval < 0)
+			return retval;
+
+		if (fwu->flash_properties.has_disp_config &&
+				fwu->img.contains_disp_config) {
+			retval = fwu_check_dp_configuration_size();
+			if (retval < 0)
+				return retval;
+		}
+
+		if (fwu->has_guest_code && fwu->img.contains_guest_code) {
+			retval = fwu_check_guest_code_size();
+			if (retval < 0)
+				return retval;
+		}
+	} else if (fwu->bl_version == BL_V7) {
+		retval = fwu_check_bl_configuration_size();
+		if (retval < 0)
+			return retval;
+	}
+
+	retval = fwu_erase_all();
+	if (retval < 0)
+		return retval;
+
+	if (fwu->bl_version == BL_V7 && fwu->new_partition_table) {
+		retval = fwu_write_partition_table_v7();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Partition table programmed\n", __func__);
+	} else if (fwu->bl_version == BL_V8) {
+		retval = fwu_write_partition_table_v8();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Partition table programmed\n", __func__);
+	}
+
+	retval = fwu_write_firmware();
+	if (retval < 0)
+		return retval;
+	pr_notice("%s: Firmware programmed\n", __func__);
+
+	fwu->config_area = UI_CONFIG_AREA;
+	retval = fwu_write_ui_configuration();
+	if (retval < 0)
+		return retval;
+	pr_notice("%s: Configuration programmed\n", __func__);
+
+	if (fwu->flash_properties.has_disp_config &&
+			fwu->img.contains_disp_config) {
+		retval = fwu_write_dp_configuration();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Display configuration programmed\n", __func__);
+	}
+
+	if (fwu->has_guest_code && fwu->img.contains_guest_code) {
+		retval = fwu_write_guest_code();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Guest code programmed\n", __func__);
+	}
+
+	return retval;
+}
+
+static int fwu_do_read_config(void)
+{
+	int retval;
+	unsigned short block_count;
+	unsigned short config_area;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		block_count = fwu->blkcount.ui_config;
+		break;
+	case DP_CONFIG_AREA:
+		if (!fwu->flash_properties.has_disp_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Display configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.dp_config;
+		break;
+	case PM_CONFIG_AREA:
+		if (!fwu->flash_properties.has_pm_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Permanent configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.pm_config;
+		break;
+	case BL_CONFIG_AREA:
+		if (!fwu->flash_properties.has_bl_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Bootloader configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.bl_config;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid config area\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (block_count == 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid block count\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	config_area = fwu->config_area;
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		goto exit;
+
+	fwu->config_area = config_area;
+
+	fwu->config_size = fwu->block_size * block_count;
+
+	retval = fwu_allocate_read_config_buf(fwu->config_size);
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_read_f34_blocks(block_count, CMD_READ_CONFIG);
+
+exit:
+	rmi4_data->reset_device(rmi4_data, false);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	return retval;
+}
+
+static int fwu_do_lockdown_v7(void)
+{
+	int retval;
+	struct f34_v7_data0 status;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fwu->f34_fd.data_base_addr + fwu->off.flash_status,
+			status.data,
+			sizeof(status.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash status\n",
+				__func__);
+		return retval;
+	}
+
+	if (status.device_cfg_status == 2) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Device already locked down\n",
+				__func__);
+		return 0;
+	}
+
+	retval = fwu_write_lockdown();
+	if (retval < 0)
+		return retval;
+
+	pr_notice("%s: Lockdown programmed\n", __func__);
+
+	return retval;
+}
+
+static int fwu_do_lockdown_v5v6(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fwu->f34_fd.query_base_addr + fwu->off.properties,
+			fwu->flash_properties.data,
+			sizeof(fwu->flash_properties.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash properties\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->flash_properties.unlocked == 0) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Device already locked down\n",
+				__func__);
+		return 0;
+	}
+
+	retval = fwu_write_lockdown();
+	if (retval < 0)
+		return retval;
+
+	pr_notice("%s: Lockdown programmed\n", __func__);
+
+	return retval;
+}
+
+static int fwu_start_write_guest_code(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_parse_image_info();
+	if (retval < 0)
+		return -EINVAL;
+
+	if (!fwu->has_guest_code) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Guest code not supported\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (!fwu->img.contains_guest_code) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: No guest code in firmware image\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	pr_notice("%s: Start of write guest code process\n", __func__);
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_check_guest_code_size();
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_erase_guest_code();
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_write_guest_code();
+	if (retval < 0)
+		goto exit;
+
+	pr_notice("%s: Guest code programmed\n", __func__);
+
+exit:
+	rmi4_data->reset_device(rmi4_data, false);
+
+	pr_notice("%s: End of write guest code process\n", __func__);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	rmi4_data->stay_awake = false;
+
+	return retval;
+}
+
+static int fwu_start_write_config(void)
+{
+	int retval;
+	unsigned short config_area;
+	unsigned int device_fw_id;
+	unsigned int image_fw_id;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_parse_image_info();
+	if (retval < 0)
+		return -EINVAL;
+
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		device_fw_id = rmi4_data->firmware_id;
+		retval = fwu_get_image_firmware_id(&image_fw_id);
+		if (retval < 0)
+			return retval;
+		if (device_fw_id != image_fw_id) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Device and image firmware IDs don't match\n",
+					__func__);
+			return -EINVAL;
+		}
+		retval = fwu_check_ui_configuration_size();
+		if (retval < 0)
+			return retval;
+		break;
+	case DP_CONFIG_AREA:
+		if (!fwu->flash_properties.has_disp_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Display configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		if (!fwu->img.contains_disp_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: No display configuration in firmware image\n",
+					__func__);
+			return -EINVAL;
+		}
+		retval = fwu_check_dp_configuration_size();
+		if (retval < 0)
+			return retval;
+		break;
+	case PM_CONFIG_AREA:
+		if (!fwu->flash_properties.has_pm_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Permanent configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		if (!fwu->img.contains_perm_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: No permanent configuration in firmware image\n",
+					__func__);
+			return -EINVAL;
+		}
+		retval = fwu_check_pm_configuration_size();
+		if (retval < 0)
+			return retval;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Configuration not supported\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	pr_notice("%s: Start of write config process\n", __func__);
+
+	config_area = fwu->config_area;
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		goto exit;
+
+	fwu->config_area = config_area;
+
+	if (fwu->config_area != PM_CONFIG_AREA) {
+		retval = fwu_erase_configuration();
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to erase config\n",
+					__func__);
+			goto exit;
+		}
+	}
+
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		retval = fwu_write_ui_configuration();
+		if (retval < 0)
+			goto exit;
+		break;
+	case DP_CONFIG_AREA:
+		retval = fwu_write_dp_configuration();
+		if (retval < 0)
+			goto exit;
+		break;
+	case PM_CONFIG_AREA:
+		retval = fwu_write_pm_configuration();
+		if (retval < 0)
+			goto exit;
+		break;
+	}
+
+	pr_notice("%s: Config written\n", __func__);
+
+exit:
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		rmi4_data->reset_device(rmi4_data, true);
+		break;
+	case DP_CONFIG_AREA:
+	case PM_CONFIG_AREA:
+		rmi4_data->reset_device(rmi4_data, false);
+		break;
+	}
+
+	pr_notice("%s: End of write config process\n", __func__);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	rmi4_data->stay_awake = false;
+
+	return retval;
+}
+
+static int fwu_start_reflash(void)
+{
+	int retval = 0;
+	enum flash_area flash_area;
+	const struct firmware *fw_entry = NULL;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	pr_notice("%s: Start of reflash process\n", __func__);
+
+	if (fwu->image == NULL) {
+		retval = secure_memcpy(fwu->image_name, MAX_IMAGE_NAME_LEN,
+				FW_IMAGE_NAME, sizeof(FW_IMAGE_NAME),
+				sizeof(FW_IMAGE_NAME));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy image file name\n",
+					__func__);
+			goto exit;
+		}
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Requesting firmware image %s\n",
+				__func__, fwu->image_name);
+
+		retval = request_firmware(&fw_entry, fwu->image_name,
+				rmi4_data->pdev->dev.parent);
+		if (retval != 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Firmware image %s not available\n",
+					__func__, fwu->image_name);
+			retval = -EINVAL;
+			goto exit;
+		}
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Firmware image size = %d\n",
+				__func__, (unsigned int)fw_entry->size);
+
+		fwu->image = fw_entry->data;
+	}
+
+	retval = fwu_parse_image_info();
+	if (retval < 0)
+		goto exit;
+
+	if (fwu->blkcount.total_count != fwu->img.blkcount.total_count) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Flash size mismatch\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (fwu->bl_version != fwu->img.bl_version) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Bootloader version mismatch\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->force_update && fwu->new_partition_table) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Partition table mismatch\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	retval = fwu_read_flash_status();
+	if (retval < 0)
+		goto exit;
+
+	if (fwu->in_bl_mode) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Device in bootloader mode\n",
+				__func__);
+	}
+
+	flash_area = fwu_go_nogo();
+
+	if (flash_area != NONE) {
+		retval = fwu_enter_flash_prog();
+		if (retval < 0) {
+			rmi4_data->reset_device(rmi4_data, false);
+			goto exit;
+		}
+	}
+
+	switch (flash_area) {
+	case UI_FIRMWARE:
+		retval = fwu_do_reflash();
+		rmi4_data->reset_device(rmi4_data, true);
+		break;
+	case UI_CONFIG:
+		retval = fwu_check_ui_configuration_size();
+		if (retval < 0)
+			break;
+		fwu->config_area = UI_CONFIG_AREA;
+		retval = fwu_erase_configuration();
+		if (retval < 0)
+			break;
+		retval = fwu_write_ui_configuration();
+		rmi4_data->reset_device(rmi4_data, true);
+		break;
+	case NONE:
+	default:
+		rmi4_data->reset_device(rmi4_data, false);
+		break;
+	}
+
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do reflash\n",
+				__func__);
+		goto exit;
+	}
+
+	if (fwu->do_lockdown && (fwu->img.lockdown.data != NULL)) {
+		switch (fwu->bl_version) {
+		case BL_V5:
+		case BL_V6:
+			retval = fwu_do_lockdown_v5v6();
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to do lockdown\n",
+						__func__);
+			}
+			rmi4_data->reset_device(rmi4_data, false);
+			break;
+		case BL_V7:
+		case BL_V8:
+			retval = fwu_do_lockdown_v7();
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to do lockdown\n",
+						__func__);
+			}
+			rmi4_data->reset_device(rmi4_data, false);
+			break;
+		default:
+			break;
+		}
+	}
+
+exit:
+	if (fw_entry)
+		release_firmware(fw_entry);
+
+	pr_notice("%s: End of reflash process\n", __func__);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	rmi4_data->stay_awake = false;
+
+	return retval;
+}
+
+static int fwu_recovery_check_status(void)
+{
+	int retval;
+	unsigned char base;
+	unsigned char status;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f35_fd.data_base_addr;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + F35_ERROR_CODE_OFFSET,
+			&status,
+			1);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read status\n",
+				__func__);
+		return retval;
+	}
+
+	status = status & MASK_7BIT;
+
+	if (status != 0x00) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Recovery mode status = %d\n",
+				__func__, status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_recovery_erase_all(void)
+{
+	int retval;
+	unsigned char base;
+	unsigned char command = CMD_F35_ERASE_ALL;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f35_fd.ctrl_base_addr;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			base + F35_CHUNK_COMMAND_OFFSET,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue erase all command\n",
+				__func__);
+		return retval;
+	}
+
+	msleep(F35_ERASE_ALL_WAIT_MS);
+
+	retval = fwu_recovery_check_status();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int fwu_recovery_write_chunk(void)
+{
+	int retval;
+	unsigned char base;
+	unsigned char chunk_number[] = {0, 0};
+	unsigned char chunk_spare;
+	unsigned char chunk_size;
+	unsigned char buf[F35_CHUNK_SIZE + 1];
+	unsigned short chunk;
+	unsigned short chunk_total;
+	unsigned short bytes_written = 0;
+	unsigned char *chunk_ptr = (unsigned char *)fwu->image;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f35_fd.ctrl_base_addr;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			base + F35_CHUNK_NUM_LSB_OFFSET,
+			chunk_number,
+			sizeof(chunk_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write chunk number\n",
+				__func__);
+		return retval;
+	}
+
+	buf[sizeof(buf) - 1] = CMD_F35_WRITE_CHUNK;
+
+	chunk_total = fwu->image_size / F35_CHUNK_SIZE;
+	chunk_spare = fwu->image_size % F35_CHUNK_SIZE;
+	if (chunk_spare)
+		chunk_total++;
+
+	for (chunk = 0; chunk < chunk_total; chunk++) {
+		if (chunk_spare && chunk == chunk_total - 1)
+			chunk_size = chunk_spare;
+		else
+			chunk_size = F35_CHUNK_SIZE;
+
+		memset(buf, 0x00, F35_CHUNK_SIZE);
+		secure_memcpy(buf, sizeof(buf), chunk_ptr,
+					fwu->image_size - bytes_written,
+					chunk_size);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				base + F35_CHUNK_DATA_OFFSET,
+				buf,
+				sizeof(buf));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write chunk data (chunk %d)\n",
+					__func__, chunk);
+			return retval;
+		}
+		chunk_ptr += chunk_size;
+		bytes_written += chunk_size;
+	}
+
+	retval = fwu_recovery_check_status();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write chunk data\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_recovery_reset(void)
+{
+	int retval;
+	unsigned char base;
+	unsigned char command = CMD_F35_RESET;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f35_fd.ctrl_base_addr;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			base + F35_CHUNK_COMMAND_OFFSET,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+		return retval;
+	}
+
+	msleep(F35_RESET_WAIT_MS);
+
+	return 0;
+}
+
+static int fwu_start_recovery(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	pr_notice("%s: Start of recovery process\n", __func__);
+
+	retval = rmi4_data->irq_enable(rmi4_data, false, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to disable interrupt\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = fwu_recovery_erase_all();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do erase all in recovery mode\n",
+				__func__);
+		goto exit;
+	}
+
+	pr_notice("%s: External flash erased\n", __func__);
+
+	retval = fwu_recovery_write_chunk();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write chunk data in recovery mode\n",
+				__func__);
+		goto exit;
+	}
+
+	pr_notice("%s: Chunk data programmed\n", __func__);
+
+	retval = fwu_recovery_reset();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to reset device in recovery mode\n",
+				__func__);
+		goto exit;
+	}
+
+	pr_notice("%s: Recovery mode reset issued\n", __func__);
+
+	rmi4_data->reset_device(rmi4_data, true);
+
+	retval = 0;
+
+exit:
+	pr_notice("%s: End of recovery process\n", __func__);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	rmi4_data->stay_awake = false;
+
+	return retval;
+}
+
+int synaptics_fw_updater(const unsigned char *fw_data)
+{
+	int retval;
+
+	if (!fwu)
+		return -ENODEV;
+
+	if (!fwu->initialized)
+		return -ENODEV;
+
+	if (fwu->in_ub_mode)
+		return -ENODEV;
+
+	fwu->image = fw_data;
+
+	retval = fwu_start_reflash();
+
+	fwu->image = NULL;
+
+	return retval;
+}
+EXPORT_SYMBOL(synaptics_fw_updater);
+
+#ifdef DO_STARTUP_FW_UPDATE
+static void fwu_startup_fw_update_work(struct work_struct *work)
+{
+	static unsigned char do_once = 1;
+#ifdef WAIT_FOR_FB_READY
+	unsigned int timeout;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+#endif
+
+	if (!do_once)
+		return;
+	do_once = 0;
+
+#ifdef WAIT_FOR_FB_READY
+	timeout = FB_READY_TIMEOUT_S * 1000 / FB_READY_WAIT_MS + 1;
+
+	while (!rmi4_data->fb_ready) {
+		msleep(FB_READY_WAIT_MS);
+		timeout--;
+		if (timeout == 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Timed out waiting for FB ready\n",
+					__func__);
+			return;
+		}
+	}
+#endif
+
+	synaptics_fw_updater(NULL);
+
+	return;
+}
+#endif
+
+static ssize_t fwu_sysfs_show_image(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (count < fwu->config_size) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not enough space (%d bytes) in buffer\n",
+				__func__, (unsigned int)count);
+		return -EINVAL;
+	}
+
+	retval = secure_memcpy(buf, count, fwu->read_config_buf,
+			fwu->read_config_buf_size, fwu->config_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy config data\n",
+				__func__);
+		return retval;
+	}
+
+	return fwu->config_size;
+}
+
+static ssize_t fwu_sysfs_store_image(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = secure_memcpy(&fwu->ext_data_source[fwu->data_pos],
+			fwu->image_size - fwu->data_pos, buf, count, count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy image data\n",
+				__func__);
+		return retval;
+	}
+
+	fwu->data_pos += count;
+
+	return count;
+}
+
+static ssize_t fwu_sysfs_do_recovery_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (sscanf(buf, "%u", &input) != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not in microbootloader mode\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->ext_data_source)
+		return -EINVAL;
+	else
+		fwu->image = fwu->ext_data_source;
+
+	retval = fwu_start_recovery();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do recovery\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = NULL;
+	fwu->image = NULL;
+	return retval;
+}
+
+static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (sscanf(buf, "%u", &input) != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: In microbootloader mode\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->ext_data_source)
+		return -EINVAL;
+	else
+		fwu->image = fwu->ext_data_source;
+
+	if (input & LOCKDOWN) {
+		fwu->do_lockdown = true;
+		input &= ~LOCKDOWN;
+	}
+
+	if ((input != NORMAL) && (input != FORCE)) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (input == FORCE)
+		fwu->force_update = true;
+
+	retval = synaptics_fw_updater(fwu->image);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do reflash\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = NULL;
+	fwu->image = NULL;
+	fwu->force_update = FORCE_UPDATE;
+	fwu->do_lockdown = DO_LOCKDOWN;
+	return retval;
+}
+
+static ssize_t fwu_sysfs_write_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (sscanf(buf, "%u", &input) != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (input != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: In microbootloader mode\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->ext_data_source)
+		return -EINVAL;
+	else
+		fwu->image = fwu->ext_data_source;
+
+	retval = fwu_start_write_config();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write config\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = NULL;
+	fwu->image = NULL;
+	return retval;
+}
+
+static ssize_t fwu_sysfs_read_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (sscanf(buf, "%u", &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	if (fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: In microbootloader mode\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	retval = fwu_do_read_config();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read config\n",
+				__func__);
+		return retval;
+	}
+
+	return count;
+}
+
+static ssize_t fwu_sysfs_config_area_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long config_area;
+
+	retval = sstrtoul(buf, 10, &config_area);
+	if (retval)
+		return retval;
+
+	fwu->config_area = config_area;
+
+	return count;
+}
+
+static ssize_t fwu_sysfs_image_name_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = secure_memcpy(fwu->image_name, MAX_IMAGE_NAME_LEN,
+			buf, count, count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy image file name\n",
+				__func__);
+		return retval;
+	}
+
+	return count;
+}
+
+static ssize_t fwu_sysfs_image_size_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long size;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &size);
+	if (retval)
+		return retval;
+
+	fwu->image_size = size;
+	fwu->data_pos = 0;
+
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = kzalloc(fwu->image_size, GFP_KERNEL);
+	if (!fwu->ext_data_source) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for image data\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	return count;
+}
+
+static ssize_t fwu_sysfs_block_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", fwu->block_size);
+}
+
+static ssize_t fwu_sysfs_firmware_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.ui_firmware);
+}
+
+static ssize_t fwu_sysfs_configuration_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.ui_config);
+}
+
+static ssize_t fwu_sysfs_disp_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.dp_config);
+}
+
+static ssize_t fwu_sysfs_perm_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.pm_config);
+}
+
+static ssize_t fwu_sysfs_bl_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.bl_config);
+}
+
+static ssize_t fwu_sysfs_guest_code_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.guest_code);
+}
+
+static ssize_t fwu_sysfs_write_guest_code_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (sscanf(buf, "%u", &input) != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (input != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: In microbootloader mode\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->ext_data_source)
+		return -EINVAL;
+	else
+		fwu->image = fwu->ext_data_source;
+
+	retval = fwu_start_write_guest_code();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write guest code\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = NULL;
+	fwu->image = NULL;
+	return retval;
+}
+
+static void synaptics_rmi4_fwu_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!fwu)
+		return;
+
+	if (fwu->intr_mask & intr_mask)
+		fwu_read_flash_status();
+
+	return;
+}
+
+static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char attr_count;
+	struct pdt_properties pdt_props;
+
+	if (fwu) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	fwu = kzalloc(sizeof(*fwu), GFP_KERNEL);
+	if (!fwu) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fwu\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	fwu->image_name = kzalloc(MAX_IMAGE_NAME_LEN, GFP_KERNEL);
+	if (!fwu->image_name) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for image name\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_fwu;
+	}
+
+	fwu->rmi4_data = rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			PDT_PROPS,
+			pdt_props.data,
+			sizeof(pdt_props.data));
+	if (retval < 0) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read PDT properties, assuming 0x00\n",
+				__func__);
+	} else if (pdt_props.has_bsr) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Reflash for LTS not currently supported\n",
+				__func__);
+		retval = -ENODEV;
+		goto exit_free_mem;
+	}
+
+	retval = fwu_scan_pdt();
+	if (retval < 0)
+		goto exit_free_mem;
+
+	if (!fwu->in_ub_mode) {
+		retval = fwu_read_f34_queries();
+		if (retval < 0)
+			goto exit_free_mem;
+
+		retval = fwu_get_device_config_id();
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read device config ID\n",
+					__func__);
+			goto exit_free_mem;
+		}
+	}
+
+	fwu->force_update = FORCE_UPDATE;
+	fwu->do_lockdown = DO_LOCKDOWN;
+	fwu->initialized = true;
+
+	retval = sysfs_create_bin_file(&rmi4_data->input_dev->dev.kobj,
+			&dev_attr_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs bin file\n",
+				__func__);
+		goto exit_free_mem;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			retval = -ENODEV;
+			goto exit_remove_attrs;
+		}
+	}
+
+#ifdef DO_STARTUP_FW_UPDATE
+	fwu->fwu_workqueue = create_singlethread_workqueue("fwu_workqueue");
+	INIT_WORK(&fwu->fwu_work, fwu_startup_fw_update_work);
+	queue_work(fwu->fwu_workqueue,
+			&fwu->fwu_work);
+#endif
+
+	return 0;
+
+exit_remove_attrs:
+	for (attr_count--; attr_count >= 0; attr_count--) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+
+exit_free_mem:
+	kfree(fwu->image_name);
+
+exit_free_fwu:
+	kfree(fwu);
+	fwu = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_fwu_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char attr_count;
+
+	if (!fwu)
+		goto exit;
+
+#ifdef DO_STARTUP_FW_UPDATE
+	cancel_work_sync(&fwu->fwu_work);
+	flush_workqueue(fwu->fwu_workqueue);
+	destroy_workqueue(fwu->fwu_workqueue);
+#endif
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+
+	kfree(fwu->read_config_buf);
+	kfree(fwu->image_name);
+	kfree(fwu);
+	fwu = NULL;
+
+exit:
+	complete(&fwu_remove_complete);
+
+	return;
+}
+
+static void synaptics_rmi4_fwu_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	if (!fwu) {
+		synaptics_rmi4_fwu_init(rmi4_data);
+		return;
+	}
+
+	retval = fwu_scan_pdt();
+	if (retval < 0)
+		return;
+
+	if (!fwu->in_ub_mode)
+		fwu_read_f34_queries();
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn fwu_module = {
+	.fn_type = RMI_FW_UPDATER,
+	.init = synaptics_rmi4_fwu_init,
+	.remove = synaptics_rmi4_fwu_remove,
+	.reset = synaptics_rmi4_fwu_reset,
+	.reinit = NULL,
+	.early_suspend = NULL,
+	.suspend = NULL,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = synaptics_rmi4_fwu_attn,
+};
+
+static int __init rmi4_fw_update_module_init(void)
+{
+	synaptics_rmi4_new_function(&fwu_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_fw_update_module_exit(void)
+{
+	synaptics_rmi4_new_function(&fwu_module, false);
+
+	wait_for_completion(&fwu_remove_complete);
+
+	return;
+}
+
+module_init(rmi4_fw_update_module_init);
+module_exit(rmi4_fw_update_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX FW Update Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_gesture.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_gesture.c
new file mode 100644
index 0000000..0bd342c
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_gesture.c
@@ -0,0 +1,2308 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define GESTURE_PHYS_NAME "synaptics_dsx/gesture"
+
+#define TUNING_SYSFS_DIR_NAME "tuning"
+
+#define STORE_GESTURES
+#ifdef STORE_GESTURES
+#define GESTURES_TO_STORE 10
+#endif
+
+#define CTRL23_FINGER_REPORT_ENABLE_BIT 0
+#define CTRL27_UDG_ENABLE_BIT 4
+#define WAKEUP_GESTURE_MODE 0x02
+
+static ssize_t udg_sysfs_engine_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_detection_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_detection_score_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_detection_index_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_registration_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_registration_begin_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_registration_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_max_index_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_detection_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_index_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_template_valid_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_valid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_template_clear_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_trace_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_template_data_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_trace_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_template_displacement_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_displacement_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_rotation_invariance_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_rotation_invariance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_scale_invariance_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_scale_invariance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_threshold_factor_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_threshold_factor_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_match_metric_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_match_metric_threshold_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_max_inter_stroke_time_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_max_inter_stroke_time_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static int udg_read_tuning_params(void);
+
+static int udg_write_tuning_params(void);
+
+static int udg_detection_enable(bool enable);
+
+static int udg_engine_enable(bool enable);
+
+static int udg_set_index(unsigned char index);
+
+#ifdef STORE_GESTURES
+static int udg_read_valid_data(void);
+static int udg_write_valid_data(void);
+static int udg_read_template_data(unsigned char index);
+static int udg_write_template_data(void);
+#endif
+
+enum gesture_type {
+	DETECTION = 0x0f,
+	REGISTRATION = 0x10,
+};
+
+struct udg_tuning {
+	union {
+		struct {
+			unsigned char maximum_number_of_templates;
+			unsigned char template_size;
+			unsigned char template_disp_lsb;
+			unsigned char template_disp_msb;
+			unsigned char rotation_inv_lsb;
+			unsigned char rotation_inv_msb;
+			unsigned char scale_inv_lsb;
+			unsigned char scale_inv_msb;
+			unsigned char thres_factor_lsb;
+			unsigned char thres_factor_msb;
+			unsigned char metric_thres_lsb;
+			unsigned char metric_thres_msb;
+			unsigned char inter_stroke_lsb;
+			unsigned char inter_stroke_msb;
+		} __packed;
+		unsigned char data[14];
+	};
+};
+
+struct udg_addr {
+	unsigned short data_4;
+	unsigned short ctrl_18;
+	unsigned short ctrl_20;
+	unsigned short ctrl_23;
+	unsigned short ctrl_27;
+	unsigned short ctrl_41;
+	unsigned short trace_x;
+	unsigned short trace_y;
+	unsigned short trace_segment;
+	unsigned short template_helper;
+	unsigned short template_data;
+	unsigned short template_flags;
+};
+
+struct synaptics_rmi4_f12_query_0 {
+	union {
+		struct {
+			struct {
+				unsigned char has_register_descriptors:1;
+				unsigned char has_closed_cover:1;
+				unsigned char has_fast_glove_detect:1;
+				unsigned char has_dribble:1;
+				unsigned char has_4p4_jitter_filter_strength:1;
+				unsigned char f12_query0_s0_b5__7:3;
+			} __packed;
+			struct {
+				unsigned char max_num_templates:4;
+				unsigned char f12_query0_s1_b4__7:4;
+				unsigned char template_size_lsb;
+				unsigned char template_size_msb;
+			} __packed;
+		};
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f12_query_5 {
+	union {
+		struct {
+			unsigned char size_of_query6;
+			struct {
+				unsigned char ctrl0_is_present:1;
+				unsigned char ctrl1_is_present:1;
+				unsigned char ctrl2_is_present:1;
+				unsigned char ctrl3_is_present:1;
+				unsigned char ctrl4_is_present:1;
+				unsigned char ctrl5_is_present:1;
+				unsigned char ctrl6_is_present:1;
+				unsigned char ctrl7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl8_is_present:1;
+				unsigned char ctrl9_is_present:1;
+				unsigned char ctrl10_is_present:1;
+				unsigned char ctrl11_is_present:1;
+				unsigned char ctrl12_is_present:1;
+				unsigned char ctrl13_is_present:1;
+				unsigned char ctrl14_is_present:1;
+				unsigned char ctrl15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl16_is_present:1;
+				unsigned char ctrl17_is_present:1;
+				unsigned char ctrl18_is_present:1;
+				unsigned char ctrl19_is_present:1;
+				unsigned char ctrl20_is_present:1;
+				unsigned char ctrl21_is_present:1;
+				unsigned char ctrl22_is_present:1;
+				unsigned char ctrl23_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl24_is_present:1;
+				unsigned char ctrl25_is_present:1;
+				unsigned char ctrl26_is_present:1;
+				unsigned char ctrl27_is_present:1;
+				unsigned char ctrl28_is_present:1;
+				unsigned char ctrl29_is_present:1;
+				unsigned char ctrl30_is_present:1;
+				unsigned char ctrl31_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl32_is_present:1;
+				unsigned char ctrl33_is_present:1;
+				unsigned char ctrl34_is_present:1;
+				unsigned char ctrl35_is_present:1;
+				unsigned char ctrl36_is_present:1;
+				unsigned char ctrl37_is_present:1;
+				unsigned char ctrl38_is_present:1;
+				unsigned char ctrl39_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl40_is_present:1;
+				unsigned char ctrl41_is_present:1;
+				unsigned char ctrl42_is_present:1;
+				unsigned char ctrl43_is_present:1;
+				unsigned char ctrl44_is_present:1;
+				unsigned char ctrl45_is_present:1;
+				unsigned char ctrl46_is_present:1;
+				unsigned char ctrl47_is_present:1;
+			} __packed;
+		};
+		unsigned char data[7];
+	};
+};
+
+struct synaptics_rmi4_f12_query_8 {
+	union {
+		struct {
+			unsigned char size_of_query9;
+			struct {
+				unsigned char data0_is_present:1;
+				unsigned char data1_is_present:1;
+				unsigned char data2_is_present:1;
+				unsigned char data3_is_present:1;
+				unsigned char data4_is_present:1;
+				unsigned char data5_is_present:1;
+				unsigned char data6_is_present:1;
+				unsigned char data7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data8_is_present:1;
+				unsigned char data9_is_present:1;
+				unsigned char data10_is_present:1;
+				unsigned char data11_is_present:1;
+				unsigned char data12_is_present:1;
+				unsigned char data13_is_present:1;
+				unsigned char data14_is_present:1;
+				unsigned char data15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data16_is_present:1;
+				unsigned char data17_is_present:1;
+				unsigned char data18_is_present:1;
+				unsigned char data19_is_present:1;
+				unsigned char data20_is_present:1;
+				unsigned char data21_is_present:1;
+				unsigned char data22_is_present:1;
+				unsigned char data23_is_present:1;
+			} __packed;
+		};
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f12_control_41 {
+	union {
+		struct {
+			unsigned char enable_registration:1;
+			unsigned char template_index:4;
+			unsigned char begin:1;
+			unsigned char f12_ctrl41_b6__7:2;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_udg_handle {
+	atomic_t attn_event;
+	unsigned char intr_mask;
+	unsigned char report_flags;
+	unsigned char object_type_enable1;
+	unsigned char object_type_enable2;
+	unsigned char trace_size;
+	unsigned char template_index;
+	unsigned char max_num_templates;
+	unsigned char detection_score;
+	unsigned char detection_index;
+	unsigned char detection_status;
+	unsigned char registration_status;
+	unsigned char *ctrl_buf;
+	unsigned char *trace_data_buf;
+	unsigned char *template_data_buf;
+#ifdef STORE_GESTURES
+	unsigned char gestures_to_store;
+	unsigned char *storage_buf;
+	unsigned char valid_buf[2];
+#endif
+	unsigned short trace_data_buf_size;
+	unsigned short template_size;
+	unsigned short template_data_size;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	unsigned short ctrl_18_sub10_off;
+	unsigned short ctrl_20_sub1_off;
+	unsigned short ctrl_23_sub3_off;
+	unsigned short ctrl_27_sub5_off;
+	struct input_dev *udg_dev;
+	struct kobject *tuning_dir;
+	struct udg_addr addr;
+	struct udg_tuning tuning;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct device_attribute attrs[] = {
+	__ATTR(engine_enable, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_engine_enable_store),
+	__ATTR(detection_enable, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_detection_enable_store),
+	__ATTR(detection_score, 0444,
+			udg_sysfs_detection_score_show,
+			synaptics_rmi4_store_error),
+	__ATTR(detection_index, 0444,
+			udg_sysfs_detection_index_show,
+			synaptics_rmi4_store_error),
+	__ATTR(registration_enable, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_registration_enable_store),
+	__ATTR(registration_begin, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_registration_begin_store),
+	__ATTR(registration_status, 0444,
+			udg_sysfs_registration_status_show,
+			synaptics_rmi4_store_error),
+	__ATTR(template_size, 0444,
+			udg_sysfs_template_size_show,
+			synaptics_rmi4_store_error),
+	__ATTR(template_max_index, 0444,
+			udg_sysfs_template_max_index_show,
+			synaptics_rmi4_store_error),
+	__ATTR(template_detection, 0444,
+			udg_sysfs_template_detection_show,
+			synaptics_rmi4_store_error),
+	__ATTR(template_index, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_template_index_store),
+	__ATTR(template_valid, 0664,
+			udg_sysfs_template_valid_show,
+			udg_sysfs_template_valid_store),
+	__ATTR(template_clear, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_template_clear_store),
+	__ATTR(trace_size, 0444,
+			udg_sysfs_trace_size_show,
+			synaptics_rmi4_store_error),
+};
+
+static struct bin_attribute template_data = {
+	.attr = {
+		.name = "template_data",
+		.mode = 0664,
+	},
+	.size = 0,
+	.read = udg_sysfs_template_data_show,
+	.write = udg_sysfs_template_data_store,
+};
+
+static struct bin_attribute trace_data = {
+	.attr = {
+		.name = "trace_data",
+		.mode = 0444,
+	},
+	.size = 0,
+	.read = udg_sysfs_trace_data_show,
+	.write = NULL,
+};
+
+static struct device_attribute params[] = {
+	__ATTR(template_displacement, 0664,
+			udg_sysfs_template_displacement_show,
+			udg_sysfs_template_displacement_store),
+	__ATTR(rotation_invariance, 0664,
+			udg_sysfs_rotation_invariance_show,
+			udg_sysfs_rotation_invariance_store),
+	__ATTR(scale_invariance, 0664,
+			udg_sysfs_scale_invariance_show,
+			udg_sysfs_scale_invariance_store),
+	__ATTR(threshold_factor, 0664,
+			udg_sysfs_threshold_factor_show,
+			udg_sysfs_threshold_factor_store),
+	__ATTR(match_metric_threshold, 0664,
+			udg_sysfs_match_metric_threshold_show,
+			udg_sysfs_match_metric_threshold_store),
+	__ATTR(max_inter_stroke_time, 0664,
+			udg_sysfs_max_inter_stroke_time_show,
+			udg_sysfs_max_inter_stroke_time_store),
+};
+
+static struct synaptics_rmi4_udg_handle *udg;
+
+static unsigned char ctrl_18_sub_size[] = {10, 10, 10, 2, 3, 4, 3, 3, 1, 1};
+static unsigned char ctrl_20_sub_size[] = {2};
+static unsigned char ctrl_23_sub_size[] = {1, 1, 1};
+static unsigned char ctrl_27_sub_size[] = {1, 5, 2, 1, 7};
+
+DECLARE_COMPLETION(udg_remove_complete);
+
+static ssize_t udg_sysfs_engine_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool enable;
+	unsigned int input;
+
+	if (sscanf(buf, "%u", &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		enable = true;
+	else if (input == 0)
+		enable = false;
+	else
+		return -EINVAL;
+
+	retval = udg_engine_enable(enable);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_detection_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool enable;
+	unsigned int input;
+
+	if (sscanf(buf, "%u", &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		enable = true;
+	else if (input == 0)
+		enable = false;
+	else
+		return -EINVAL;
+
+	udg->detection_status = 0;
+
+	retval = udg_detection_enable(enable);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_detection_score_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->detection_score);
+}
+
+static ssize_t udg_sysfs_detection_index_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->detection_index);
+}
+
+static ssize_t udg_sysfs_registration_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool enable;
+	unsigned int input;
+	struct synaptics_rmi4_f12_control_41 control_41;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (sscanf(buf, "%u", &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		enable = true;
+	else if (input == 0)
+		enable = false;
+	else
+		return -EINVAL;
+
+	if (enable) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.ctrl_23,
+				udg->ctrl_buf,
+				udg->ctrl_23_sub3_off + 1);
+		if (retval < 0)
+			return retval;
+
+		udg->ctrl_buf[0] = 0;
+		udg->ctrl_buf[0] |= (1 << CTRL23_FINGER_REPORT_ENABLE_BIT);
+		if (udg->ctrl_23_sub3_off)
+			udg->ctrl_buf[udg->ctrl_23_sub3_off] = 0;
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.ctrl_23,
+				udg->ctrl_buf,
+				udg->ctrl_23_sub3_off + 1);
+		if (retval < 0)
+			return retval;
+	} else {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.ctrl_23,
+				udg->ctrl_buf,
+				udg->ctrl_23_sub3_off + 1);
+		if (retval < 0)
+			return retval;
+
+		udg->ctrl_buf[0] = udg->object_type_enable1;
+		if (udg->ctrl_23_sub3_off) {
+			udg->ctrl_buf[udg->ctrl_23_sub3_off] =
+					udg->object_type_enable2;
+		}
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.ctrl_23,
+				udg->ctrl_buf,
+				udg->ctrl_23_sub3_off + 1);
+		if (retval < 0)
+			return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	control_41.enable_registration = enable ? 1 : 0;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_registration_begin_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool begin;
+	unsigned int input;
+	struct synaptics_rmi4_f12_control_41 control_41;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (sscanf(buf, "%u", &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		begin = true;
+	else if (input == 0)
+		begin = false;
+	else
+		return -EINVAL;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	control_41.begin = begin ? 1 : 0;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_registration_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%02x\n", udg->registration_status);
+}
+
+static ssize_t udg_sysfs_template_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->template_size);
+}
+
+static ssize_t udg_sysfs_template_max_index_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->max_num_templates - 1);
+}
+
+static ssize_t udg_sysfs_template_detection_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	int attn_event;
+	unsigned char detection_status;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	attn_event = atomic_read(&udg->attn_event);
+	atomic_set(&udg->attn_event, 0);
+
+	if (attn_event == 0)
+		return snprintf(buf, PAGE_SIZE, "0\n");
+
+	if (udg->detection_status == 0) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.data_4,
+				rmi4_data->gesture_detection,
+				sizeof(rmi4_data->gesture_detection));
+		if (retval < 0)
+			return retval;
+
+		udg->detection_status = rmi4_data->gesture_detection[0];
+	}
+
+	detection_status = udg->detection_status;
+	udg->detection_status = 0;
+
+	switch (detection_status) {
+	case DETECTION:
+		udg->detection_score = rmi4_data->gesture_detection[1];
+		udg->detection_index = rmi4_data->gesture_detection[4];
+		udg->trace_size = rmi4_data->gesture_detection[3];
+		break;
+	case REGISTRATION:
+		udg->registration_status = rmi4_data->gesture_detection[1];
+		udg->trace_size = rmi4_data->gesture_detection[3];
+		break;
+	default:
+		return snprintf(buf, PAGE_SIZE, "0\n");
+	}
+
+	return snprintf(buf, PAGE_SIZE, "0x%02x\n", detection_status);
+}
+
+static ssize_t udg_sysfs_template_index_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long index;
+
+	retval = sstrtoul(buf, 10, &index);
+	if (retval)
+		return retval;
+
+	retval = udg_set_index((unsigned char)index);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_template_valid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned char valid;
+	unsigned char offset;
+	unsigned char byte_num;
+	unsigned char template_flags[2];
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	byte_num = udg->template_index / 8;
+	offset = udg->template_index % 8;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_flags,
+			template_flags,
+			sizeof(template_flags));
+	if (retval < 0)
+		return retval;
+
+	valid = (template_flags[byte_num] & (1 << offset)) >> offset;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", valid);
+}
+
+static ssize_t udg_sysfs_template_valid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long valid;
+	unsigned char offset;
+	unsigned char byte_num;
+	unsigned char template_flags[2];
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &valid);
+	if (retval)
+		return retval;
+
+	if (valid > 0)
+		valid = 1;
+
+	byte_num = udg->template_index / 8;
+	offset = udg->template_index % 8;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_flags,
+			template_flags,
+			sizeof(template_flags));
+	if (retval < 0)
+		return retval;
+
+	if (valid)
+		template_flags[byte_num] |= (1 << offset);
+	else
+		template_flags[byte_num] &= ~(1 << offset);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.template_flags,
+			template_flags,
+			sizeof(template_flags));
+	if (retval < 0)
+		return retval;
+
+#ifdef STORE_GESTURES
+	udg_read_valid_data();
+#endif
+
+	return count;
+}
+
+static ssize_t udg_sysfs_template_clear_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	const char cmd[] = {'0', 0};
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (sscanf(buf, "%u", &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	memset(udg->template_data_buf, 0x00, udg->template_data_size);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.template_data,
+			udg->template_data_buf,
+			udg->template_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to clear template data\n",
+				__func__);
+		return retval;
+	}
+
+	retval = udg_sysfs_template_valid_store(dev, attr, cmd, 1);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to clear valid bit\n",
+				__func__);
+		return retval;
+	}
+
+#ifdef STORE_GESTURES
+	udg_read_template_data(udg->template_index);
+	udg_read_valid_data();
+#endif
+
+	return count;
+}
+
+static ssize_t udg_sysfs_trace_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->trace_size);
+}
+
+static ssize_t udg_sysfs_trace_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned short index = 0;
+	unsigned short trace_data_size;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	trace_data_size = udg->trace_size * 5;
+
+	if (trace_data_size == 0)
+		return -EINVAL;
+
+	if (count < trace_data_size) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not enough space (%d bytes) in buffer\n",
+				__func__, (unsigned int)count);
+		return -EINVAL;
+	}
+
+	if (udg->trace_data_buf_size < trace_data_size) {
+		if (udg->trace_data_buf_size)
+			kfree(udg->trace_data_buf);
+		udg->trace_data_buf = kzalloc(trace_data_size, GFP_KERNEL);
+		if (!udg->trace_data_buf) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for trace data buffer\n",
+					__func__);
+			udg->trace_data_buf_size = 0;
+			return -ENOMEM;
+		}
+		udg->trace_data_buf_size = trace_data_size;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.trace_x,
+			&udg->trace_data_buf[index],
+			udg->trace_size * 2);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read trace X data\n",
+				__func__);
+		return retval;
+	} else {
+		index += udg->trace_size * 2;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.trace_y,
+			&udg->trace_data_buf[index],
+			udg->trace_size * 2);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read trace Y data\n",
+				__func__);
+		return retval;
+	} else {
+		index += udg->trace_size * 2;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.trace_segment,
+			&udg->trace_data_buf[index],
+			udg->trace_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read trace segment data\n",
+				__func__);
+		return retval;
+	}
+
+	retval = secure_memcpy(buf, count, udg->trace_data_buf,
+			udg->trace_data_buf_size, trace_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy trace data\n",
+				__func__);
+		return retval;
+	}
+
+	return trace_data_size;
+}
+
+static ssize_t udg_sysfs_template_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (count < udg->template_data_size) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not enough space (%d bytes) in buffer\n",
+				__func__, (unsigned int)count);
+		return -EINVAL;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_data,
+			udg->template_data_buf,
+			udg->template_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read template data\n",
+				__func__);
+		return retval;
+	}
+
+	retval = secure_memcpy(buf, count, udg->template_data_buf,
+			udg->template_data_size, udg->template_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy template data\n",
+				__func__);
+		return retval;
+	}
+
+#ifdef STORE_GESTURES
+	udg_read_template_data(udg->template_index);
+	udg_read_valid_data();
+#endif
+
+	return udg->template_data_size;
+}
+
+static ssize_t udg_sysfs_template_data_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = secure_memcpy(udg->template_data_buf, udg->template_data_size,
+			buf, count, count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy template data\n",
+				__func__);
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.template_data,
+			udg->template_data_buf,
+			count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write template data\n",
+				__func__);
+		return retval;
+	}
+
+#ifdef STORE_GESTURES
+	udg_read_template_data(udg->template_index);
+	udg_read_valid_data();
+#endif
+
+	return count;
+}
+
+static ssize_t udg_sysfs_template_displacement_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short template_displacement;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	template_displacement =
+			((unsigned short)udg->tuning.template_disp_lsb << 0) |
+			((unsigned short)udg->tuning.template_disp_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", template_displacement);
+}
+
+static ssize_t udg_sysfs_template_displacement_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.template_disp_lsb = (unsigned char)(input >> 0);
+	udg->tuning.template_disp_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_rotation_invariance_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short rotation_invariance;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	rotation_invariance =
+			((unsigned short)udg->tuning.rotation_inv_lsb << 0) |
+			((unsigned short)udg->tuning.rotation_inv_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", rotation_invariance);
+}
+
+static ssize_t udg_sysfs_rotation_invariance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.rotation_inv_lsb = (unsigned char)(input >> 0);
+	udg->tuning.rotation_inv_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_scale_invariance_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short scale_invariance;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	scale_invariance =
+			((unsigned short)udg->tuning.scale_inv_lsb << 0) |
+			((unsigned short)udg->tuning.scale_inv_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", scale_invariance);
+}
+
+static ssize_t udg_sysfs_scale_invariance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.scale_inv_lsb = (unsigned char)(input >> 0);
+	udg->tuning.scale_inv_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_threshold_factor_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short threshold_factor;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	threshold_factor =
+			((unsigned short)udg->tuning.thres_factor_lsb << 0) |
+			((unsigned short)udg->tuning.thres_factor_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", threshold_factor);
+}
+
+static ssize_t udg_sysfs_threshold_factor_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.thres_factor_lsb = (unsigned char)(input >> 0);
+	udg->tuning.thres_factor_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_match_metric_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short match_metric_threshold;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	match_metric_threshold =
+			((unsigned short)udg->tuning.metric_thres_lsb << 0) |
+			((unsigned short)udg->tuning.metric_thres_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", match_metric_threshold);
+}
+
+static ssize_t udg_sysfs_match_metric_threshold_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.metric_thres_lsb = (unsigned char)(input >> 0);
+	udg->tuning.metric_thres_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_max_inter_stroke_time_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short max_inter_stroke_time;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	max_inter_stroke_time =
+			((unsigned short)udg->tuning.inter_stroke_lsb << 0) |
+			((unsigned short)udg->tuning.inter_stroke_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", max_inter_stroke_time);
+}
+
+static ssize_t udg_sysfs_max_inter_stroke_time_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.inter_stroke_lsb = (unsigned char)(input >> 0);
+	udg->tuning.inter_stroke_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static int udg_ctrl_subpacket(unsigned char ctrlreg,
+		unsigned char subpacket,
+		struct synaptics_rmi4_f12_query_5 *query_5)
+{
+	int retval;
+	unsigned char cnt;
+	unsigned char regnum;
+	unsigned char bitnum;
+	unsigned char q5_index;
+	unsigned char q6_index;
+	unsigned char offset;
+	unsigned char max_ctrlreg;
+	unsigned char *query_6;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	max_ctrlreg = (sizeof(query_5->data) - 1) * 8 - 1;
+
+	if (ctrlreg > max_ctrlreg) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Control register number (%d) over limit\n",
+				__func__, ctrlreg);
+		return -EINVAL;
+	}
+
+	q5_index = ctrlreg / 8 + 1;
+	bitnum = ctrlreg % 8;
+	if ((query_5->data[q5_index] & (1 << bitnum)) == 0x00) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Control %d is not present\n",
+				__func__, ctrlreg);
+		return -EINVAL;
+	}
+
+	query_6 = kmalloc(query_5->size_of_query6, GFP_KERNEL);
+	if (!query_6) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query 6\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 6,
+			query_6,
+			query_5->size_of_query6);
+	if (retval < 0)
+		goto exit;
+
+	q6_index = 0;
+
+	for (regnum = 0; regnum < ctrlreg; regnum++) {
+		q5_index = regnum / 8 + 1;
+		bitnum = regnum % 8;
+		if ((query_5->data[q5_index] & (1 << bitnum)) == 0x00)
+			continue;
+
+		if (query_6[q6_index] == 0x00)
+			q6_index += 3;
+		else
+			q6_index++;
+
+		while (query_6[q6_index] & ~MASK_7BIT)
+			q6_index++;
+
+		q6_index++;
+	}
+
+	cnt = 0;
+	q6_index++;
+	offset = subpacket / 7;
+	bitnum = subpacket % 7;
+
+	do {
+		if (cnt == offset) {
+			if (query_6[q6_index + cnt] & (1 << bitnum))
+				retval = 1;
+			else
+				retval = 0;
+			goto exit;
+		}
+		cnt++;
+	} while (query_6[q6_index + cnt - 1] & ~MASK_7BIT);
+
+	retval = 0;
+
+exit:
+	kfree(query_6);
+
+	return retval;
+}
+
+static int udg_read_tuning_params(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_18,
+			udg->ctrl_buf,
+			udg->ctrl_18_sub10_off + sizeof(struct udg_tuning));
+	if (retval < 0)
+		return retval;
+
+	secure_memcpy(udg->tuning.data,
+			sizeof(udg->tuning.data),
+			(unsigned char *)&udg->ctrl_buf[udg->ctrl_18_sub10_off],
+			sizeof(struct udg_tuning),
+			sizeof(struct udg_tuning));
+
+	return 0;
+}
+
+static int udg_write_tuning_params(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	secure_memcpy((unsigned char *)&udg->ctrl_buf[udg->ctrl_18_sub10_off],
+			sizeof(struct udg_tuning),
+			udg->tuning.data,
+			sizeof(udg->tuning.data),
+			sizeof(struct udg_tuning));
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_18,
+			udg->ctrl_buf,
+			udg->ctrl_18_sub10_off + sizeof(struct udg_tuning));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int udg_detection_enable(bool enable)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_20,
+			udg->ctrl_buf,
+			udg->ctrl_20_sub1_off + 1);
+	if (retval < 0)
+		return retval;
+
+	if (enable)
+		udg->ctrl_buf[udg->ctrl_20_sub1_off] = WAKEUP_GESTURE_MODE;
+	else
+		udg->ctrl_buf[udg->ctrl_20_sub1_off] = udg->report_flags;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_20,
+			udg->ctrl_buf,
+			udg->ctrl_20_sub1_off + 1);
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int udg_engine_enable(bool enable)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (enable) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.ctrl_27,
+				udg->ctrl_buf,
+				udg->ctrl_27_sub5_off + 1);
+		if (retval < 0)
+			return retval;
+
+		udg->ctrl_buf[udg->ctrl_27_sub5_off] |=
+				(1 << CTRL27_UDG_ENABLE_BIT);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.ctrl_27,
+				udg->ctrl_buf,
+				udg->ctrl_27_sub5_off + 1);
+		if (retval < 0)
+			return retval;
+	} else {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.ctrl_27,
+				udg->ctrl_buf,
+				udg->ctrl_27_sub5_off + 1);
+		if (retval < 0)
+			return retval;
+
+		udg->ctrl_buf[udg->ctrl_27_sub5_off] &=
+				~(1 << CTRL27_UDG_ENABLE_BIT);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.ctrl_27,
+				udg->ctrl_buf,
+				udg->ctrl_27_sub5_off + 1);
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static void udg_report(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	atomic_set(&udg->attn_event, 1);
+
+	if (rmi4_data->suspend) {
+		if (rmi4_data->gesture_detection[0] == 0) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					udg->addr.data_4,
+					rmi4_data->gesture_detection,
+					sizeof(rmi4_data->gesture_detection));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to read gesture detection\n",
+						__func__);
+				return;
+			}
+		}
+
+		udg->detection_status = rmi4_data->gesture_detection[0];
+		rmi4_data->gesture_detection[0] = 0;
+
+		if (udg->detection_status == DETECTION) {
+			input_report_key(udg->udg_dev, KEY_WAKEUP, 1);
+			input_sync(udg->udg_dev);
+			input_report_key(udg->udg_dev, KEY_WAKEUP, 0);
+			input_sync(udg->udg_dev);
+			rmi4_data->suspend = false;
+		}
+	}
+
+	return;
+}
+
+static int udg_set_index(unsigned char index)
+{
+	int retval;
+	struct synaptics_rmi4_f12_control_41 control_41;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (index >= udg->max_num_templates)
+		return -EINVAL;
+
+	udg->template_index = index;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	control_41.template_index = udg->template_index;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+#ifdef STORE_GESTURES
+static int udg_read_valid_data(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_flags,
+			udg->valid_buf,
+			sizeof(udg->valid_buf));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int udg_write_valid_data(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.template_flags,
+			udg->valid_buf,
+			sizeof(udg->valid_buf));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int udg_read_template_data(unsigned char index)
+{
+	int retval;
+	unsigned char *storage;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	udg_set_index(index);
+	storage = &(udg->storage_buf[index * udg->template_data_size]);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_data,
+			storage,
+			udg->template_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read template data\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int udg_write_template_data(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char *storage;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	for (ii = 0; ii < udg->gestures_to_store; ii++) {
+		udg_set_index(ii);
+		storage = &(udg->storage_buf[ii * udg->template_data_size]);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.template_data,
+				storage,
+				udg->template_data_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write template data\n",
+					__func__);
+			return retval;
+		}
+	}
+
+	return 0;
+}
+#endif
+
+static int udg_reg_init(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char data_offset;
+	unsigned char size_of_query;
+	unsigned char ctrl_18_offset;
+	unsigned char ctrl_20_offset;
+	unsigned char ctrl_23_offset;
+	unsigned char ctrl_27_offset;
+	unsigned char ctrl_41_offset;
+	struct synaptics_rmi4_f12_query_0 query_0;
+	struct synaptics_rmi4_f12_query_5 query_5;
+	struct synaptics_rmi4_f12_query_8 query_8;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 7,
+			&size_of_query,
+			sizeof(size_of_query));
+	if (retval < 0)
+		return retval;
+
+	if (size_of_query < 4) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: User defined gesture support unavailable (missing data registers)\n",
+				__func__);
+		retval = -ENODEV;
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 8,
+			query_8.data,
+			sizeof(query_8.data));
+	if (retval < 0)
+		return retval;
+
+	if ((query_8.data16_is_present) &&
+			(query_8.data17_is_present) &&
+			(query_8.data18_is_present) &&
+			(query_8.data19_is_present) &&
+			(query_8.data20_is_present) &&
+			(query_8.data21_is_present)) {
+		data_offset = query_8.data0_is_present +
+				query_8.data1_is_present +
+				query_8.data2_is_present +
+				query_8.data3_is_present;
+		udg->addr.data_4 = udg->data_base_addr + data_offset;
+		data_offset = data_offset +
+				query_8.data4_is_present +
+				query_8.data5_is_present +
+				query_8.data6_is_present +
+				query_8.data7_is_present +
+				query_8.data8_is_present +
+				query_8.data9_is_present +
+				query_8.data10_is_present +
+				query_8.data11_is_present +
+				query_8.data12_is_present +
+				query_8.data13_is_present +
+				query_8.data14_is_present +
+				query_8.data15_is_present;
+		udg->addr.trace_x = udg->data_base_addr + data_offset;
+		udg->addr.trace_y = udg->addr.trace_x + 1;
+		udg->addr.trace_segment = udg->addr.trace_y + 1;
+		udg->addr.template_helper = udg->addr.trace_segment + 1;
+		udg->addr.template_data = udg->addr.template_helper + 1;
+		udg->addr.template_flags = udg->addr.template_data + 1;
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: User defined gesture support unavailable (missing data registers)\n",
+				__func__);
+		retval = -ENODEV;
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 4,
+			&size_of_query,
+			sizeof(size_of_query));
+	if (retval < 0)
+		return retval;
+
+	if (size_of_query < 7) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: User defined gesture support unavailable (missing control registers)\n",
+				__func__);
+		retval = -ENODEV;
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 5,
+			query_5.data,
+			sizeof(query_5.data));
+	if (retval < 0)
+		return retval;
+
+	ctrl_18_offset = query_5.ctrl0_is_present +
+			query_5.ctrl1_is_present +
+			query_5.ctrl2_is_present +
+			query_5.ctrl3_is_present +
+			query_5.ctrl4_is_present +
+			query_5.ctrl5_is_present +
+			query_5.ctrl6_is_present +
+			query_5.ctrl7_is_present +
+			query_5.ctrl8_is_present +
+			query_5.ctrl9_is_present +
+			query_5.ctrl10_is_present +
+			query_5.ctrl11_is_present +
+			query_5.ctrl12_is_present +
+			query_5.ctrl13_is_present +
+			query_5.ctrl14_is_present +
+			query_5.ctrl15_is_present +
+			query_5.ctrl16_is_present +
+			query_5.ctrl17_is_present;
+
+	ctrl_20_offset = ctrl_18_offset +
+			query_5.ctrl18_is_present +
+			query_5.ctrl19_is_present;
+
+	ctrl_23_offset = ctrl_20_offset +
+			query_5.ctrl20_is_present +
+			query_5.ctrl21_is_present +
+			query_5.ctrl22_is_present;
+
+	ctrl_27_offset = ctrl_23_offset+
+			query_5.ctrl23_is_present +
+			query_5.ctrl24_is_present +
+			query_5.ctrl25_is_present +
+			query_5.ctrl26_is_present;
+
+	ctrl_41_offset = ctrl_27_offset+
+			query_5.ctrl27_is_present +
+			query_5.ctrl28_is_present +
+			query_5.ctrl29_is_present +
+			query_5.ctrl30_is_present +
+			query_5.ctrl31_is_present +
+			query_5.ctrl32_is_present +
+			query_5.ctrl33_is_present +
+			query_5.ctrl34_is_present +
+			query_5.ctrl35_is_present +
+			query_5.ctrl36_is_present +
+			query_5.ctrl37_is_present +
+			query_5.ctrl38_is_present +
+			query_5.ctrl39_is_present +
+			query_5.ctrl40_is_present;
+
+	udg->addr.ctrl_18 = udg->control_base_addr + ctrl_18_offset;
+	udg->addr.ctrl_20 = udg->control_base_addr + ctrl_20_offset;
+	udg->addr.ctrl_23 = udg->control_base_addr + ctrl_23_offset;
+	udg->addr.ctrl_27 = udg->control_base_addr + ctrl_27_offset;
+	udg->addr.ctrl_41 = udg->control_base_addr + ctrl_41_offset;
+
+	udg->ctrl_18_sub10_off = 0;
+	for (ii = 0; ii < 10; ii++) {
+		retval = udg_ctrl_subpacket(18, ii, &query_5);
+		if (retval == 1)
+			udg->ctrl_18_sub10_off += ctrl_18_sub_size[ii];
+		else if (retval < 0)
+			return retval;
+	}
+
+	udg->ctrl_20_sub1_off = 0;
+	for (ii = 0; ii < 1; ii++) {
+		retval = udg_ctrl_subpacket(20, ii, &query_5);
+		if (retval == 1)
+			udg->ctrl_20_sub1_off += ctrl_20_sub_size[ii];
+		else if (retval < 0)
+			return retval;
+	}
+
+	udg->ctrl_23_sub3_off = 0;
+	for (ii = 0; ii < 3; ii++) {
+		retval = udg_ctrl_subpacket(23, ii, &query_5);
+		if (retval == 1)
+			udg->ctrl_23_sub3_off += ctrl_23_sub_size[ii];
+		else if (retval < 0)
+			return retval;
+	}
+
+	retval = udg_ctrl_subpacket(23, 3, &query_5);
+	if (retval == 0)
+		udg->ctrl_23_sub3_off = 0;
+	else if (retval < 0)
+		return retval;
+
+	udg->ctrl_27_sub5_off = 0;
+	for (ii = 0; ii < 5; ii++) {
+		retval = udg_ctrl_subpacket(27, ii, &query_5);
+		if (retval == 1)
+			udg->ctrl_27_sub5_off += ctrl_27_sub_size[ii];
+		else if (retval < 0)
+			return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 0,
+			query_0.data,
+			sizeof(query_0.data));
+	if (retval < 0)
+		return retval;
+
+	udg->max_num_templates = query_0.max_num_templates;
+	udg->template_size =
+			((unsigned short)query_0.template_size_lsb << 0) |
+			((unsigned short)query_0.template_size_msb << 8);
+	udg->template_data_size = udg->template_size * 4 * 2 + 4 + 1;
+
+#ifdef STORE_GESTURES
+	udg->gestures_to_store = udg->max_num_templates;
+	if (GESTURES_TO_STORE < udg->gestures_to_store)
+		udg->gestures_to_store = GESTURES_TO_STORE;
+#endif
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_20,
+			udg->ctrl_buf,
+			udg->ctrl_20_sub1_off + 1);
+	if (retval < 0)
+		return retval;
+
+	udg->report_flags = udg->ctrl_buf[udg->ctrl_20_sub1_off];
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_23,
+			udg->ctrl_buf,
+			udg->ctrl_23_sub3_off + 1);
+	if (retval < 0)
+		return retval;
+
+	udg->object_type_enable1 = udg->ctrl_buf[0];
+	if (udg->ctrl_23_sub3_off)
+		udg->object_type_enable2 = udg->ctrl_buf[udg->ctrl_23_sub3_off];
+
+	return retval;
+}
+
+static int udg_scan_pdt(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char page;
+	unsigned char intr_count = 0;
+	unsigned char intr_off;
+	unsigned char intr_src;
+	unsigned short addr;
+	struct synaptics_rmi4_fn_desc fd;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&fd,
+					sizeof(fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (fd.fn_number) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Found F%02x\n",
+						__func__, fd.fn_number);
+				switch (fd.fn_number) {
+				case SYNAPTICS_RMI4_F12:
+					goto f12_found;
+					break;
+				}
+			} else {
+				break;
+			}
+
+			intr_count += fd.intr_src_count;
+		}
+	}
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to find F12\n",
+			__func__);
+	return -EINVAL;
+
+f12_found:
+	udg->query_base_addr = fd.query_base_addr | (page << 8);
+	udg->control_base_addr = fd.ctrl_base_addr | (page << 8);
+	udg->data_base_addr = fd.data_base_addr | (page << 8);
+	udg->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+	retval = udg_reg_init();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to initialize user defined gesture registers\n",
+				__func__);
+		return retval;
+	}
+
+	udg->intr_mask = 0;
+	intr_src = fd.intr_src_count;
+	intr_off = intr_count % 8;
+	for (ii = intr_off;
+			ii < (intr_src + intr_off);
+			ii++) {
+		udg->intr_mask |= 1 << ii;
+	}
+
+	rmi4_data->intr_mask[0] |= udg->intr_mask;
+
+	addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			addr,
+			&rmi4_data->intr_mask[0],
+			sizeof(rmi4_data->intr_mask[0]));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set interrupt enable bit\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_udg_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!udg)
+		return;
+
+	if (udg->intr_mask & intr_mask)
+		udg_report();
+
+	return;
+}
+
+static int synaptics_rmi4_udg_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char size;
+	unsigned char attr_count;
+	unsigned char param_count;
+
+	if (udg) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	udg = kzalloc(sizeof(*udg), GFP_KERNEL);
+	if (!udg) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for udg\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	size = 0;
+	for (ii = 0; ii < sizeof(ctrl_18_sub_size); ii++)
+		size += ctrl_18_sub_size[ii];
+	size += sizeof(struct udg_tuning);
+	udg->ctrl_buf = kzalloc(size, GFP_KERNEL);
+	if (!udg->ctrl_buf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_buf\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_udg;
+	}
+
+	udg->rmi4_data = rmi4_data;
+
+	retval = udg_scan_pdt();
+	if (retval < 0)
+		goto exit_free_ctrl_buf;
+
+	udg->template_data_buf = kzalloc(udg->template_data_size, GFP_KERNEL);
+	if (!udg->template_data_buf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for template_data_buf\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_ctrl_buf;
+	}
+
+#ifdef STORE_GESTURES
+	udg->storage_buf = kzalloc(
+			udg->template_data_size * udg->gestures_to_store,
+			GFP_KERNEL);
+	if (!udg->storage_buf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for storage_buf\n",
+				__func__);
+		kfree(udg->template_data_buf);
+		retval = -ENOMEM;
+		goto exit_free_ctrl_buf;
+	}
+#endif
+
+	udg->udg_dev = input_allocate_device();
+	if (udg->udg_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate gesture device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_template_data_buf;
+	}
+
+	udg->udg_dev->name = GESTURE_DRIVER_NAME;
+	udg->udg_dev->phys = GESTURE_PHYS_NAME;
+	udg->udg_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	udg->udg_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	udg->udg_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(udg->udg_dev, rmi4_data);
+
+	set_bit(EV_KEY, udg->udg_dev->evbit);
+	set_bit(KEY_WAKEUP, udg->udg_dev->keybit);
+	input_set_capability(udg->udg_dev, EV_KEY, KEY_WAKEUP);
+
+	retval = input_register_device(udg->udg_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register gesture device\n",
+				__func__);
+		input_free_device(udg->udg_dev);
+		goto exit_free_template_data_buf;
+	}
+
+	udg->tuning_dir = kobject_create_and_add(TUNING_SYSFS_DIR_NAME,
+			&udg->udg_dev->dev.kobj);
+	if (!udg->tuning_dir) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create tuning sysfs directory\n",
+				__func__);
+		goto exit_unregister_input_device;
+	}
+
+	retval = sysfs_create_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create template data bin file\n",
+				__func__);
+		goto exit_remove_sysfs_directory;
+	}
+
+	retval = sysfs_create_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create trace data bin file\n",
+				__func__);
+		goto exit_remove_bin_file;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&udg->udg_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			retval = -ENODEV;
+			goto exit_remove_attrs;
+		}
+	}
+
+	for (param_count = 0; param_count < ARRAY_SIZE(params); param_count++) {
+		retval = sysfs_create_file(udg->tuning_dir,
+				&params[param_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create tuning parameters\n",
+					__func__);
+			retval = -ENODEV;
+			goto exit_remove_params;
+		}
+	}
+
+	retval = udg_engine_enable(true);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to enable gesture engine\n",
+				__func__);
+		goto exit_remove_params;
+	}
+
+	return 0;
+
+exit_remove_params:
+	for (param_count--; param_count >= 0; param_count--) {
+		sysfs_remove_file(udg->tuning_dir,
+				&params[param_count].attr);
+	}
+
+exit_remove_attrs:
+	for (attr_count--; attr_count >= 0; attr_count--) {
+		sysfs_remove_file(&udg->udg_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+
+exit_remove_bin_file:
+	sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+
+exit_remove_sysfs_directory:
+	kobject_put(udg->tuning_dir);
+
+exit_unregister_input_device:
+	input_unregister_device(udg->udg_dev);
+
+exit_free_template_data_buf:
+#ifdef STORE_GESTURES
+	kfree(udg->storage_buf);
+#endif
+	kfree(udg->template_data_buf);
+
+exit_free_ctrl_buf:
+	kfree(udg->ctrl_buf);
+
+exit_free_udg:
+	kfree(udg);
+	udg = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_udg_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char count;
+
+	if (!udg)
+		goto exit;
+
+	for (count = 0; count < ARRAY_SIZE(params); count++) {
+		sysfs_remove_file(udg->tuning_dir,
+				&params[count].attr);
+	}
+
+	for (count = 0; count < ARRAY_SIZE(attrs); count++) {
+		sysfs_remove_file(&udg->udg_dev->dev.kobj,
+				&attrs[count].attr);
+	}
+
+	sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+	sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+	kobject_put(udg->tuning_dir);
+
+	input_unregister_device(udg->udg_dev);
+#ifdef STORE_GESTURES
+	kfree(udg->storage_buf);
+#endif
+	kfree(udg->template_data_buf);
+	kfree(udg->trace_data_buf);
+	kfree(udg->ctrl_buf);
+	kfree(udg);
+	udg = NULL;
+
+exit:
+	complete(&udg_remove_complete);
+
+	return;
+}
+
+static void synaptics_rmi4_udg_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg) {
+		synaptics_rmi4_udg_init(rmi4_data);
+		return;
+	}
+
+	udg_scan_pdt();
+	udg_engine_enable(true);
+#ifdef STORE_GESTURES
+	udg_write_template_data();
+	udg_write_valid_data();
+#endif
+
+	return;
+}
+
+static void synaptics_rmi4_udg_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	udg_engine_enable(true);
+#ifdef STORE_GESTURES
+	udg_write_template_data();
+	udg_write_valid_data();
+#endif
+
+	return;
+}
+
+static void synaptics_rmi4_udg_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	rmi4_data->sleep_enable(rmi4_data, false);
+	rmi4_data->irq_enable(rmi4_data, true, false);
+	enable_irq_wake(rmi4_data->irq);
+
+	udg_engine_enable(true);
+	udg_detection_enable(true);
+
+	return;
+}
+
+static void synaptics_rmi4_udg_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	rmi4_data->sleep_enable(rmi4_data, false);
+	rmi4_data->irq_enable(rmi4_data, true, false);
+	enable_irq_wake(rmi4_data->irq);
+
+	udg_engine_enable(true);
+	udg_detection_enable(true);
+
+	return;
+}
+
+static void synaptics_rmi4_udg_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	disable_irq_wake(rmi4_data->irq);
+	udg_detection_enable(false);
+
+	return;
+}
+
+static void synaptics_rmi4_udg_l_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	disable_irq_wake(rmi4_data->irq);
+	udg_detection_enable(false);
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn gesture_module = {
+	.fn_type = RMI_GESTURE,
+	.init = synaptics_rmi4_udg_init,
+	.remove = synaptics_rmi4_udg_remove,
+	.reset = synaptics_rmi4_udg_reset,
+	.reinit = synaptics_rmi4_udg_reinit,
+	.early_suspend = synaptics_rmi4_udg_e_suspend,
+	.suspend = synaptics_rmi4_udg_suspend,
+	.resume = synaptics_rmi4_udg_resume,
+	.late_resume = synaptics_rmi4_udg_l_resume,
+	.attn = synaptics_rmi4_udg_attn,
+};
+
+static int __init rmi4_gesture_module_init(void)
+{
+	synaptics_rmi4_new_function(&gesture_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_gesture_module_exit(void)
+{
+	synaptics_rmi4_new_function(&gesture_module, false);
+
+	wait_for_completion(&udg_remove_complete);
+
+	return;
+}
+
+module_init(rmi4_gesture_module_init);
+module_exit(rmi4_gesture_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX User Defined Gesture Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c
new file mode 100644
index 0000000..45951a4
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c
@@ -0,0 +1,636 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define SYN_I2C_RETRY_TIMES 10
+
+/*
+#define I2C_BURST_LIMIT 255
+*/
+
+#define XFER_MSGS_LIMIT 8
+
+static unsigned char *wr_buf;
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_i2c_device;
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+	int retval;
+	u32 value;
+	const char *name;
+	struct property *prop;
+	struct device_node *np = dev->of_node;
+
+	bdata->irq_gpio = of_get_named_gpio_flags(np,
+			"synaptics,irq-gpio", 0,
+			(enum of_gpio_flags *)&bdata->irq_flags);
+
+	retval = of_property_read_u32(np, "synaptics,irq-on-state",
+			&value);
+	if (retval < 0)
+		bdata->irq_on_state = 0;
+	else
+		bdata->irq_on_state = value;
+
+	retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+	if (retval < 0)
+		bdata->pwr_reg_name = NULL;
+	else
+		bdata->pwr_reg_name = name;
+
+	retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+	if (retval < 0)
+		bdata->bus_reg_name = NULL;
+	else
+		bdata->bus_reg_name = name;
+
+	prop = of_find_property(np, "synaptics,power-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->power_gpio = of_get_named_gpio_flags(np,
+				"synaptics,power-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,power-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_on_state = value;
+		}
+	} else {
+		bdata->power_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_delay_ms = value;
+		}
+	} else {
+		bdata->power_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->reset_gpio = of_get_named_gpio_flags(np,
+				"synaptics,reset-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,reset-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_on_state = value;
+		}
+		retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_active_ms = value;
+		}
+	} else {
+		bdata->reset_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_delay_ms = value;
+		}
+	} else {
+		bdata->reset_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->max_y_for_2d = value;
+		}
+	} else {
+		bdata->max_y_for_2d = -1;
+	}
+
+	bdata->swap_axes = of_property_read_bool(np, "synaptics,swap-axes");
+	bdata->x_flip = of_property_read_bool(np, "synaptics,x-flip");
+	bdata->y_flip = of_property_read_bool(np, "synaptics,y-flip");
+
+	prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->ub_i2c_addr = (unsigned short)value;
+		}
+	} else {
+		bdata->ub_i2c_addr = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->cap_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->cap_button_map->map)
+			return -ENOMEM;
+		bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+		retval = of_property_read_u32_array(np,
+				"synaptics,cap-button-codes",
+				bdata->cap_button_map->map,
+				bdata->cap_button_map->nbuttons);
+		if (retval < 0) {
+			bdata->cap_button_map->nbuttons = 0;
+			bdata->cap_button_map->map = NULL;
+		}
+	} else {
+		bdata->cap_button_map->nbuttons = 0;
+		bdata->cap_button_map->map = NULL;
+	}
+
+	prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->vir_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->vir_button_map->map)
+			return -ENOMEM;
+		bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+		bdata->vir_button_map->nbuttons /= 5;
+		retval = of_property_read_u32_array(np,
+				"synaptics,vir-button-codes",
+				bdata->vir_button_map->map,
+				bdata->vir_button_map->nbuttons * 5);
+		if (retval < 0) {
+			bdata->vir_button_map->nbuttons = 0;
+			bdata->vir_button_map->map = NULL;
+		}
+	} else {
+		bdata->vir_button_map->nbuttons = 0;
+		bdata->vir_button_map->map = NULL;
+	}
+
+	return 0;
+}
+#endif
+
+static int synaptics_rmi4_i2c_alloc_buf(struct synaptics_rmi4_data *rmi4_data,
+		unsigned int count)
+{
+	static unsigned int buf_size;
+
+	if (count > buf_size) {
+		if (buf_size)
+			kfree(wr_buf);
+		wr_buf = kzalloc(count, GFP_KERNEL);
+		if (!wr_buf) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for buffer\n",
+					__func__);
+			buf_size = 0;
+			return -ENOMEM;
+		}
+		buf_size = count;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_i2c_check_addr(struct synaptics_rmi4_data *rmi4_data,
+		struct i2c_client *i2c)
+{
+	if (hw_if.board_data->ub_i2c_addr == -1)
+		return;
+
+	if (hw_if.board_data->i2c_addr == i2c->addr)
+		hw_if.board_data->i2c_addr = hw_if.board_data->ub_i2c_addr;
+	else
+		hw_if.board_data->i2c_addr = i2c->addr;
+
+	return;
+}
+
+static int synaptics_rmi4_i2c_set_page(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr)
+{
+	int retval = 0;
+	unsigned char retry;
+	unsigned char buf[PAGE_SELECT_LEN];
+	unsigned char page;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_msg msg[1];
+
+	msg[0].addr = hw_if.board_data->i2c_addr;
+	msg[0].flags = 0;
+	msg[0].len = PAGE_SELECT_LEN;
+	msg[0].buf = buf;
+
+	page = ((addr >> 8) & MASK_8BIT);
+	buf[0] = MASK_8BIT;
+	buf[1] = page;
+
+	if (page != rmi4_data->current_page) {
+		for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+			if (i2c_transfer(i2c->adapter, &msg[0], 1) == 1) {
+				rmi4_data->current_page = page;
+				retval = PAGE_SELECT_LEN;
+				break;
+			}
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: I2C retry %d\n",
+					__func__, retry + 1);
+			msleep(20);
+
+			if (retry == SYN_I2C_RETRY_TIMES / 2) {
+				synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+				msg[0].addr = hw_if.board_data->i2c_addr;
+			}
+		}
+	} else {
+		retval = PAGE_SELECT_LEN;
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned short length)
+{
+	int retval;
+	unsigned char retry;
+	unsigned char buf;
+#ifdef I2C_BURST_LIMIT
+	unsigned char ii;
+	unsigned char rd_msgs = ((length - 1) / I2C_BURST_LIMIT) + 1;
+#else
+	unsigned char rd_msgs = 1;
+#endif
+	unsigned char index = 0;
+	unsigned char xfer_msgs;
+	unsigned char remaining_msgs;
+	unsigned short i2c_addr;
+	unsigned short data_offset = 0;
+	unsigned short remaining_length = length;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_adapter *adap = i2c->adapter;
+	struct i2c_msg msg[XFER_MSGS_LIMIT + 1];
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	retval = synaptics_rmi4_i2c_set_page(rmi4_data, addr);
+	if (retval != PAGE_SELECT_LEN) {
+		retval = -EIO;
+		goto exit;
+	}
+
+	msg[0].addr = hw_if.board_data->i2c_addr;
+	msg[0].flags = 0;
+	msg[0].len = 1;
+	msg[0].buf = &buf;
+
+#ifdef I2C_BURST_LIMIT
+	for (ii = 0; ii < (rd_msgs - 1); ii++) {
+		msg[ii + 1].addr = hw_if.board_data->i2c_addr;
+		msg[ii + 1].flags = I2C_M_RD;
+		msg[ii + 1].len = I2C_BURST_LIMIT;
+		msg[ii + 1].buf = &data[data_offset];
+		data_offset += I2C_BURST_LIMIT;
+		remaining_length -= I2C_BURST_LIMIT;
+	}
+#endif
+
+	msg[rd_msgs].addr = hw_if.board_data->i2c_addr;
+	msg[rd_msgs].flags = I2C_M_RD;
+	msg[rd_msgs].len = remaining_length;
+	msg[rd_msgs].buf = &data[data_offset];
+
+	buf = addr & MASK_8BIT;
+
+	remaining_msgs = rd_msgs + 1;
+
+	while (remaining_msgs) {
+		if (remaining_msgs > XFER_MSGS_LIMIT)
+			xfer_msgs = XFER_MSGS_LIMIT;
+		else
+			xfer_msgs = remaining_msgs;
+		for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+			retval = i2c_transfer(adap, &msg[index], xfer_msgs);
+			if (retval == xfer_msgs)
+				break;
+
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: I2C retry %d\n",
+					__func__, retry + 1);
+			msleep(20);
+
+			if (retry == SYN_I2C_RETRY_TIMES / 2) {
+				synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+				i2c_addr = hw_if.board_data->i2c_addr;
+				msg[0].addr = i2c_addr;
+#ifdef I2C_BURST_LIMIT
+				for (ii = 0; ii < (rd_msgs - 1); ii++)
+					msg[ii + 1].addr = i2c_addr;
+#endif
+				msg[rd_msgs].addr = i2c_addr;
+			}
+		}
+
+		if (retry == SYN_I2C_RETRY_TIMES) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: I2C read over retry limit\n",
+					__func__);
+			retval = -EIO;
+			goto exit;
+		}
+
+		remaining_msgs -= xfer_msgs;
+		index += xfer_msgs;
+	}
+
+	retval = length;
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned short length)
+{
+	int retval;
+	unsigned char retry;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_msg msg[1];
+
+	retval = synaptics_rmi4_i2c_alloc_buf(rmi4_data, length + 1);
+	if (retval < 0)
+		return retval;
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	retval = synaptics_rmi4_i2c_set_page(rmi4_data, addr);
+	if (retval != PAGE_SELECT_LEN) {
+		retval = -EIO;
+		goto exit;
+	}
+
+	msg[0].addr = hw_if.board_data->i2c_addr;
+	msg[0].flags = 0;
+	msg[0].len = length + 1;
+	msg[0].buf = wr_buf;
+
+	wr_buf[0] = addr & MASK_8BIT;
+	retval = secure_memcpy(&wr_buf[1], length, &data[0], length, length);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy data\n",
+				__func__);
+		goto exit;
+	}
+
+	for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+		if (i2c_transfer(i2c->adapter, &msg[0], 1) == 1) {
+			retval = length;
+			break;
+		}
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: I2C retry %d\n",
+				__func__, retry + 1);
+		msleep(20);
+
+		if (retry == SYN_I2C_RETRY_TIMES / 2) {
+			synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+			msg[0].addr = hw_if.board_data->i2c_addr;
+		}
+	}
+
+	if (retry == SYN_I2C_RETRY_TIMES) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: I2C write over retry limit\n",
+				__func__);
+		retval = -EIO;
+	}
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+	.type = BUS_I2C,
+	.read = synaptics_rmi4_i2c_read,
+	.write = synaptics_rmi4_i2c_write,
+};
+
+static void synaptics_rmi4_i2c_dev_release(struct device *dev)
+{
+	kfree(synaptics_dsx_i2c_device);
+
+	return;
+}
+
+static int synaptics_rmi4_i2c_probe(struct i2c_client *client,
+		const struct i2c_device_id *dev_id)
+{
+	int retval;
+
+	if (!i2c_check_functionality(client->adapter,
+			I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_err(&client->dev,
+				"%s: SMBus byte data commands not supported by host\n",
+				__func__);
+		return -EIO;
+	}
+
+	synaptics_dsx_i2c_device = kzalloc(
+			sizeof(struct platform_device),
+			GFP_KERNEL);
+	if (!synaptics_dsx_i2c_device) {
+		dev_err(&client->dev,
+				"%s: Failed to allocate memory for synaptics_dsx_i2c_device\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_OF
+	if (client->dev.of_node) {
+		hw_if.board_data = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_board_data),
+				GFP_KERNEL);
+		if (!hw_if.board_data) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for board data\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->cap_button_map = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->cap_button_map) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for 0D button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->vir_button_map = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->vir_button_map) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for virtual button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		parse_dt(&client->dev, hw_if.board_data);
+	}
+#else
+	hw_if.board_data = client->dev.platform_data;
+#endif
+
+	hw_if.bus_access = &bus_access;
+	hw_if.board_data->i2c_addr = client->addr;
+
+	synaptics_dsx_i2c_device->name = PLATFORM_DRIVER_NAME;
+	synaptics_dsx_i2c_device->id = 0;
+	synaptics_dsx_i2c_device->num_resources = 0;
+	synaptics_dsx_i2c_device->dev.parent = &client->dev;
+	synaptics_dsx_i2c_device->dev.platform_data = &hw_if;
+	synaptics_dsx_i2c_device->dev.release = synaptics_rmi4_i2c_dev_release;
+
+	retval = platform_device_register(synaptics_dsx_i2c_device);
+	if (retval) {
+		dev_err(&client->dev,
+				"%s: Failed to register platform device\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_i2c_remove(struct i2c_client *client)
+{
+	platform_device_unregister(synaptics_dsx_i2c_device);
+
+	return 0;
+}
+
+static const struct i2c_device_id synaptics_rmi4_id_table[] = {
+	{I2C_DRIVER_NAME, 0},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
+
+#ifdef CONFIG_OF
+static struct of_device_id synaptics_rmi4_of_match_table[] = {
+	{
+		.compatible = "synaptics,dsx-i2c",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct i2c_driver synaptics_rmi4_i2c_driver = {
+	.driver = {
+		.name = I2C_DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = synaptics_rmi4_of_match_table,
+	},
+	.probe = synaptics_rmi4_i2c_probe,
+	.remove = synaptics_rmi4_i2c_remove,
+	.id_table = synaptics_rmi4_id_table,
+};
+
+int synaptics_rmi4_bus_init_v26(void)
+{
+	return i2c_add_driver(&synaptics_rmi4_i2c_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init_v26);
+
+void synaptics_rmi4_bus_exit_v26(void)
+{
+	kfree(wr_buf);
+
+	i2c_del_driver(&synaptics_rmi4_i2c_driver);
+
+	return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit_v26);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX I2C Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_proximity.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_proximity.c
new file mode 100644
index 0000000..ce8979c
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_proximity.c
@@ -0,0 +1,692 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define PROX_PHYS_NAME "synaptics_dsx/proximity"
+
+#define HOVER_Z_MAX (255)
+
+#define HOVERING_FINGER_EN (1 << 4)
+
+static ssize_t synaptics_rmi4_hover_finger_en_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_hover_finger_en_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static struct device_attribute attrs[] = {
+	__ATTR(hover_finger_en, 0664,
+			synaptics_rmi4_hover_finger_en_show,
+			synaptics_rmi4_hover_finger_en_store),
+};
+
+struct synaptics_rmi4_f12_query_5 {
+	union {
+		struct {
+			unsigned char size_of_query6;
+			struct {
+				unsigned char ctrl0_is_present:1;
+				unsigned char ctrl1_is_present:1;
+				unsigned char ctrl2_is_present:1;
+				unsigned char ctrl3_is_present:1;
+				unsigned char ctrl4_is_present:1;
+				unsigned char ctrl5_is_present:1;
+				unsigned char ctrl6_is_present:1;
+				unsigned char ctrl7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl8_is_present:1;
+				unsigned char ctrl9_is_present:1;
+				unsigned char ctrl10_is_present:1;
+				unsigned char ctrl11_is_present:1;
+				unsigned char ctrl12_is_present:1;
+				unsigned char ctrl13_is_present:1;
+				unsigned char ctrl14_is_present:1;
+				unsigned char ctrl15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl16_is_present:1;
+				unsigned char ctrl17_is_present:1;
+				unsigned char ctrl18_is_present:1;
+				unsigned char ctrl19_is_present:1;
+				unsigned char ctrl20_is_present:1;
+				unsigned char ctrl21_is_present:1;
+				unsigned char ctrl22_is_present:1;
+				unsigned char ctrl23_is_present:1;
+			} __packed;
+		};
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f12_query_8 {
+	union {
+		struct {
+			unsigned char size_of_query9;
+			struct {
+				unsigned char data0_is_present:1;
+				unsigned char data1_is_present:1;
+				unsigned char data2_is_present:1;
+				unsigned char data3_is_present:1;
+				unsigned char data4_is_present:1;
+				unsigned char data5_is_present:1;
+				unsigned char data6_is_present:1;
+				unsigned char data7_is_present:1;
+			} __packed;
+		};
+		unsigned char data[2];
+	};
+};
+
+struct prox_finger_data {
+	union {
+		struct {
+			unsigned char object_type_and_status;
+			unsigned char x_lsb;
+			unsigned char x_msb;
+			unsigned char y_lsb;
+			unsigned char y_msb;
+			unsigned char z;
+		} __packed;
+		unsigned char proximity_data[6];
+	};
+};
+
+struct synaptics_rmi4_prox_handle {
+	bool hover_finger_present;
+	bool hover_finger_en;
+	unsigned char intr_mask;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	unsigned short hover_finger_en_addr;
+	unsigned short hover_finger_data_addr;
+	struct input_dev *prox_dev;
+	struct prox_finger_data *finger_data;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_prox_handle *prox;
+
+DECLARE_COMPLETION(prox_remove_complete);
+
+static void prox_hover_finger_lift(void)
+{
+	input_report_key(prox->prox_dev, BTN_TOUCH, 0);
+	input_report_key(prox->prox_dev, BTN_TOOL_FINGER, 0);
+	input_sync(prox->prox_dev);
+	prox->hover_finger_present = false;
+
+	return;
+}
+
+static void prox_hover_finger_report(void)
+{
+	int retval;
+	int x;
+	int y;
+	int z;
+	struct prox_finger_data *data;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	data = prox->finger_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			prox->hover_finger_data_addr,
+			data->proximity_data,
+			sizeof(data->proximity_data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read hovering finger data\n",
+				__func__);
+		return;
+	}
+
+	if (data->object_type_and_status != F12_HOVERING_FINGER_STATUS) {
+		if (prox->hover_finger_present)
+			prox_hover_finger_lift();
+
+		return;
+	}
+
+	x = (data->x_msb << 8) | (data->x_lsb);
+	y = (data->y_msb << 8) | (data->y_lsb);
+	z = HOVER_Z_MAX - data->z;
+
+	input_report_key(prox->prox_dev, BTN_TOUCH, 0);
+	input_report_key(prox->prox_dev, BTN_TOOL_FINGER, 1);
+	input_report_abs(prox->prox_dev, ABS_X, x);
+	input_report_abs(prox->prox_dev, ABS_Y, y);
+	input_report_abs(prox->prox_dev, ABS_DISTANCE, z);
+
+	input_sync(prox->prox_dev);
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: x = %d y = %d z = %d\n",
+			__func__, x, y, z);
+
+	prox->hover_finger_present = true;
+
+	return;
+}
+
+static int prox_set_hover_finger_en(void)
+{
+	int retval;
+	unsigned char object_report_enable;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			prox->hover_finger_en_addr,
+			&object_report_enable,
+			sizeof(object_report_enable));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read from object report enable register\n",
+				__func__);
+		return retval;
+	}
+
+	if (prox->hover_finger_en)
+		object_report_enable |= HOVERING_FINGER_EN;
+	else
+		object_report_enable &= ~HOVERING_FINGER_EN;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			prox->hover_finger_en_addr,
+			&object_report_enable,
+			sizeof(object_report_enable));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write to object report enable register\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static void prox_set_params(void)
+{
+	input_set_abs_params(prox->prox_dev, ABS_X, 0,
+			prox->rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(prox->prox_dev, ABS_Y, 0,
+			prox->rmi4_data->sensor_max_y, 0, 0);
+	input_set_abs_params(prox->prox_dev, ABS_DISTANCE, 0,
+			HOVER_Z_MAX, 0, 0);
+
+	return;
+}
+
+static int prox_reg_init(void)
+{
+	int retval;
+	unsigned char ctrl_23_offset;
+	unsigned char data_1_offset;
+	struct synaptics_rmi4_f12_query_5 query_5;
+	struct synaptics_rmi4_f12_query_8 query_8;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			prox->query_base_addr + 5,
+			query_5.data,
+			sizeof(query_5.data));
+	if (retval < 0)
+		return retval;
+
+	ctrl_23_offset = query_5.ctrl0_is_present +
+			query_5.ctrl1_is_present +
+			query_5.ctrl2_is_present +
+			query_5.ctrl3_is_present +
+			query_5.ctrl4_is_present +
+			query_5.ctrl5_is_present +
+			query_5.ctrl6_is_present +
+			query_5.ctrl7_is_present +
+			query_5.ctrl8_is_present +
+			query_5.ctrl9_is_present +
+			query_5.ctrl10_is_present +
+			query_5.ctrl11_is_present +
+			query_5.ctrl12_is_present +
+			query_5.ctrl13_is_present +
+			query_5.ctrl14_is_present +
+			query_5.ctrl15_is_present +
+			query_5.ctrl16_is_present +
+			query_5.ctrl17_is_present +
+			query_5.ctrl18_is_present +
+			query_5.ctrl19_is_present +
+			query_5.ctrl20_is_present +
+			query_5.ctrl21_is_present +
+			query_5.ctrl22_is_present;
+
+	prox->hover_finger_en_addr = prox->control_base_addr + ctrl_23_offset;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			prox->query_base_addr + 8,
+			query_8.data,
+			sizeof(query_8.data));
+	if (retval < 0)
+		return retval;
+
+	data_1_offset = query_8.data0_is_present;
+	prox->hover_finger_data_addr = prox->data_base_addr + data_1_offset;
+
+	return retval;
+}
+
+static int prox_scan_pdt(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char page;
+	unsigned char intr_count = 0;
+	unsigned char intr_off;
+	unsigned char intr_src;
+	unsigned short addr;
+	struct synaptics_rmi4_fn_desc fd;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&fd,
+					sizeof(fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (fd.fn_number) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Found F%02x\n",
+						__func__, fd.fn_number);
+				switch (fd.fn_number) {
+				case SYNAPTICS_RMI4_F12:
+					goto f12_found;
+					break;
+				}
+			} else {
+				break;
+			}
+
+			intr_count += fd.intr_src_count;
+		}
+	}
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to find F12\n",
+			__func__);
+	return -EINVAL;
+
+f12_found:
+	prox->query_base_addr = fd.query_base_addr | (page << 8);
+	prox->control_base_addr = fd.ctrl_base_addr | (page << 8);
+	prox->data_base_addr = fd.data_base_addr | (page << 8);
+	prox->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+	retval = prox_reg_init();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to initialize proximity registers\n",
+				__func__);
+		return retval;
+	}
+
+	prox->intr_mask = 0;
+	intr_src = fd.intr_src_count;
+	intr_off = intr_count % 8;
+	for (ii = intr_off;
+			ii < (intr_src + intr_off);
+			ii++) {
+		prox->intr_mask |= 1 << ii;
+	}
+
+	rmi4_data->intr_mask[0] |= prox->intr_mask;
+
+	addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			addr,
+			&(rmi4_data->intr_mask[0]),
+			sizeof(rmi4_data->intr_mask[0]));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set interrupt enable bit\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static ssize_t synaptics_rmi4_hover_finger_en_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	if (!prox)
+		return -ENODEV;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			prox->hover_finger_en);
+}
+
+static ssize_t synaptics_rmi4_hover_finger_en_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	if (!prox)
+		return -ENODEV;
+
+	if (sscanf(buf, "%x", &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		prox->hover_finger_en = true;
+	else if (input == 0)
+		prox->hover_finger_en = false;
+	else
+		return -EINVAL;
+
+	retval = prox_set_hover_finger_en();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change hovering finger enable setting\n",
+				__func__);
+		return retval;
+	}
+
+	return count;
+}
+
+int synaptics_rmi4_prox_hover_finger_en(bool enable)
+{
+	int retval;
+
+	if (!prox)
+		return -ENODEV;
+
+	prox->hover_finger_en = enable;
+
+	retval = prox_set_hover_finger_en();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+EXPORT_SYMBOL(synaptics_rmi4_prox_hover_finger_en);
+
+static void synaptics_rmi4_prox_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!prox)
+		return;
+
+	if (prox->intr_mask & intr_mask)
+		prox_hover_finger_report();
+
+	return;
+}
+
+static int synaptics_rmi4_prox_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char attr_count;
+
+	if (prox) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	prox = kzalloc(sizeof(*prox), GFP_KERNEL);
+	if (!prox) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for prox\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	prox->finger_data = kzalloc(sizeof(*(prox->finger_data)), GFP_KERNEL);
+	if (!prox->finger_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for finger_data\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_prox;
+	}
+
+	prox->rmi4_data = rmi4_data;
+
+	retval = prox_scan_pdt();
+	if (retval < 0)
+		goto exit_free_finger_data;
+
+	prox->hover_finger_en = true;
+
+	retval = prox_set_hover_finger_en();
+	if (retval < 0)
+		return retval;
+
+	prox->prox_dev = input_allocate_device();
+	if (prox->prox_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate proximity device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_finger_data;
+	}
+
+	prox->prox_dev->name = PROXIMITY_DRIVER_NAME;
+	prox->prox_dev->phys = PROX_PHYS_NAME;
+	prox->prox_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	prox->prox_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	prox->prox_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(prox->prox_dev, rmi4_data);
+
+	set_bit(EV_KEY, prox->prox_dev->evbit);
+	set_bit(EV_ABS, prox->prox_dev->evbit);
+	set_bit(BTN_TOUCH, prox->prox_dev->keybit);
+	set_bit(BTN_TOOL_FINGER, prox->prox_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, prox->prox_dev->propbit);
+#endif
+
+	prox_set_params();
+
+	retval = input_register_device(prox->prox_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register proximity device\n",
+				__func__);
+		goto exit_free_input_device;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			goto exit_free_sysfs;
+		}
+	}
+
+	return 0;
+
+exit_free_sysfs:
+	for (attr_count--; attr_count >= 0; attr_count--) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	input_unregister_device(prox->prox_dev);
+	prox->prox_dev = NULL;
+
+exit_free_input_device:
+	if (prox->prox_dev)
+		input_free_device(prox->prox_dev);
+
+exit_free_finger_data:
+	kfree(prox->finger_data);
+
+exit_free_prox:
+	kfree(prox);
+	prox = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_prox_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char attr_count;
+
+	if (!prox)
+		goto exit;
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	input_unregister_device(prox->prox_dev);
+	kfree(prox->finger_data);
+	kfree(prox);
+	prox = NULL;
+
+exit:
+	complete(&prox_remove_complete);
+
+	return;
+}
+
+static void synaptics_rmi4_prox_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!prox) {
+		synaptics_rmi4_prox_init(rmi4_data);
+		return;
+	}
+
+	prox_hover_finger_lift();
+
+	prox_scan_pdt();
+
+	prox_set_hover_finger_en();
+
+	return;
+}
+
+static void synaptics_rmi4_prox_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!prox)
+		return;
+
+	prox_hover_finger_lift();
+
+	prox_set_hover_finger_en();
+
+	return;
+}
+
+static void synaptics_rmi4_prox_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!prox)
+		return;
+
+	prox_hover_finger_lift();
+
+	return;
+}
+
+static void synaptics_rmi4_prox_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!prox)
+		return;
+
+	prox_hover_finger_lift();
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn proximity_module = {
+	.fn_type = RMI_PROXIMITY,
+	.init = synaptics_rmi4_prox_init,
+	.remove = synaptics_rmi4_prox_remove,
+	.reset = synaptics_rmi4_prox_reset,
+	.reinit = synaptics_rmi4_prox_reinit,
+	.early_suspend = synaptics_rmi4_prox_e_suspend,
+	.suspend = synaptics_rmi4_prox_suspend,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = synaptics_rmi4_prox_attn,
+};
+
+static int __init rmi4_proximity_module_init(void)
+{
+	synaptics_rmi4_new_function(&proximity_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_proximity_module_exit(void)
+{
+	synaptics_rmi4_new_function(&proximity_module, false);
+
+	wait_for_completion(&prox_remove_complete);
+
+	return;
+}
+
+module_init(rmi4_proximity_module_init);
+module_exit(rmi4_proximity_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Proximity Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_rmi_dev.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_rmi_dev.c
new file mode 100644
index 0000000..3f57d13
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_rmi_dev.c
@@ -0,0 +1,1058 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/gpio.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define CHAR_DEVICE_NAME "rmi"
+#define DEVICE_CLASS_NAME "rmidev"
+#define SYSFS_FOLDER_NAME "rmidev"
+#define DEV_NUMBER 1
+#define REG_ADDR_LIMIT 0xFFFF
+
+static ssize_t rmidev_sysfs_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t rmidev_sysfs_data_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t rmidev_sysfs_open_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_release_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_attn_state_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_pid_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_pid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_term_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_intr_mask_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_intr_mask_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_concurrent_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_concurrent_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+struct rmidev_handle {
+	dev_t dev_no;
+	pid_t pid;
+	unsigned char intr_mask;
+	unsigned char *tmpbuf;
+	unsigned int tmpbuf_size;
+	struct device dev;
+	struct synaptics_rmi4_data *rmi4_data;
+	struct kobject *sysfs_dir;
+	struct siginfo interrupt_signal;
+	struct siginfo terminate_signal;
+	struct task_struct *task;
+	void *data;
+	bool irq_enabled;
+	bool concurrent;
+};
+
+struct rmidev_data {
+	int ref_count;
+	struct cdev main_dev;
+	struct class *device_class;
+	struct mutex file_mutex;
+	struct rmidev_handle *rmi_dev;
+};
+
+static struct bin_attribute attr_data = {
+	.attr = {
+		.name = "data",
+		.mode = 0664,
+	},
+	.size = 0,
+	.read = rmidev_sysfs_data_show,
+	.write = rmidev_sysfs_data_store,
+};
+
+static struct device_attribute attrs[] = {
+	__ATTR(open, 0220,
+			synaptics_rmi4_show_error,
+			rmidev_sysfs_open_store),
+	__ATTR(release, 0220,
+			synaptics_rmi4_show_error,
+			rmidev_sysfs_release_store),
+	__ATTR(attn_state, 0444,
+			rmidev_sysfs_attn_state_show,
+			synaptics_rmi4_store_error),
+	__ATTR(pid, 0664,
+			rmidev_sysfs_pid_show,
+			rmidev_sysfs_pid_store),
+	__ATTR(term, 0220,
+			synaptics_rmi4_show_error,
+			rmidev_sysfs_term_store),
+	__ATTR(intr_mask, 0444,
+			rmidev_sysfs_intr_mask_show,
+			rmidev_sysfs_intr_mask_store),
+	__ATTR(concurrent, 0444,
+			rmidev_sysfs_concurrent_show,
+			rmidev_sysfs_concurrent_store),
+};
+
+static int rmidev_major_num;
+
+static struct class *rmidev_device_class;
+
+static struct rmidev_handle *rmidev;
+
+DECLARE_COMPLETION(rmidev_remove_complete_v26);
+
+static irqreturn_t rmidev_sysfs_irq(int irq, void *data)
+{
+	struct synaptics_rmi4_data *rmi4_data = data;
+
+	sysfs_notify(&rmi4_data->input_dev->dev.kobj,
+			SYSFS_FOLDER_NAME, "attn_state");
+
+	return IRQ_HANDLED;
+}
+
+static int rmidev_sysfs_irq_enable(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval = 0;
+	unsigned char intr_status[MAX_INTR_REGISTERS];
+	unsigned long irq_flags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
+			IRQF_ONESHOT;
+
+	if (enable) {
+		if (rmidev->irq_enabled)
+			return retval;
+
+		/* Clear interrupts first */
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_data_base_addr + 1,
+				intr_status,
+				rmi4_data->num_of_intr_regs);
+		if (retval < 0)
+			return retval;
+
+		retval = request_threaded_irq(rmi4_data->irq, NULL,
+				rmidev_sysfs_irq, irq_flags,
+				PLATFORM_DRIVER_NAME, rmi4_data);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create irq thread\n",
+					__func__);
+			return retval;
+		}
+
+		rmidev->irq_enabled = true;
+	} else {
+		if (rmidev->irq_enabled) {
+			disable_irq(rmi4_data->irq);
+			free_irq(rmi4_data->irq, rmi4_data);
+			rmidev->irq_enabled = false;
+		}
+	}
+
+	return retval;
+}
+
+static ssize_t rmidev_sysfs_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned char intr_status = 0;
+	unsigned int length = (unsigned int)count;
+	unsigned short address = (unsigned short)pos;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (length > (REG_ADDR_LIMIT - address)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Out of register map limit\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (length) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				address,
+				(unsigned char *)buf,
+				length);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read data\n",
+					__func__);
+			return retval;
+		}
+	} else {
+		return -EINVAL;
+	}
+
+	if (!rmidev->concurrent)
+		goto exit;
+
+	if (address != rmi4_data->f01_data_base_addr)
+		goto exit;
+
+	if (length <= 1)
+		goto exit;
+
+	intr_status = buf[1];
+
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->num_of_data_sources) {
+				if (fhandler->intr_mask & intr_status) {
+					rmi4_data->report_touch(rmi4_data,
+							fhandler);
+				}
+			}
+		}
+	}
+
+exit:
+	return length;
+}
+
+static ssize_t rmidev_sysfs_data_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned int length = (unsigned int)count;
+	unsigned short address = (unsigned short)pos;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (length > (REG_ADDR_LIMIT - address)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Out of register map limit\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (length) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				address,
+				(unsigned char *)buf,
+				length);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write data\n",
+					__func__);
+			return retval;
+		}
+	} else {
+		return -EINVAL;
+	}
+
+	return length;
+}
+
+static ssize_t rmidev_sysfs_open_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (sscanf(buf, "%u", &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	rmi4_data->irq_enable(rmi4_data, false, false);
+	rmidev_sysfs_irq_enable(rmi4_data, true);
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Attention interrupt disabled\n",
+			__func__);
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_release_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (sscanf(buf, "%u", &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	rmidev_sysfs_irq_enable(rmi4_data, false);
+	rmi4_data->irq_enable(rmi4_data, true, false);
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Attention interrupt enabled\n",
+			__func__);
+
+	rmi4_data->reset_device(rmi4_data, false);
+
+	rmi4_data->stay_awake = false;
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_attn_state_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int attn_state;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	attn_state = gpio_get_value(bdata->irq_gpio);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", attn_state);
+}
+
+static ssize_t rmidev_sysfs_pid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", rmidev->pid);
+}
+
+static ssize_t rmidev_sysfs_pid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (sscanf(buf, "%u", &input) != 1)
+		return -EINVAL;
+
+	rmidev->pid = input;
+
+	if (rmidev->pid) {
+		rmidev->task = pid_task(find_vpid(rmidev->pid), PIDTYPE_PID);
+		if (!rmidev->task) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to locate PID of data logging tool\n",
+					__func__);
+			return -EINVAL;
+		}
+	}
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_term_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (sscanf(buf, "%u", &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	if (rmidev->pid)
+		send_sig_info(SIGTERM, &rmidev->terminate_signal, rmidev->task);
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_intr_mask_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%02x\n", rmidev->intr_mask);
+}
+
+static ssize_t rmidev_sysfs_intr_mask_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (sscanf(buf, "%u", &input) != 1)
+		return -EINVAL;
+
+	rmidev->intr_mask = (unsigned char)input;
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_concurrent_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", rmidev->concurrent);
+}
+
+static ssize_t rmidev_sysfs_concurrent_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (sscanf(buf, "%u", &input) != 1)
+		return -EINVAL;
+
+	rmidev->concurrent = input > 0 ? true : false;
+
+	return count;
+}
+
+static int rmidev_allocate_buffer(int count)
+{
+	if (count + 1 > rmidev->tmpbuf_size) {
+		if (rmidev->tmpbuf_size)
+			kfree(rmidev->tmpbuf);
+		rmidev->tmpbuf = kzalloc(count + 1, GFP_KERNEL);
+		if (!rmidev->tmpbuf) {
+			dev_err(rmidev->rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for buffer\n",
+					__func__);
+			rmidev->tmpbuf_size = 0;
+			return -ENOMEM;
+		}
+		rmidev->tmpbuf_size = count + 1;
+	}
+
+	return 0;
+}
+
+/*
+ * rmidev_llseek - set register address to access for RMI device
+ *
+ * @filp: pointer to file structure
+ * @off:
+ *	if whence == SEEK_SET,
+ *		off: 16-bit RMI register address
+ *	if whence == SEEK_CUR,
+ *		off: offset from current position
+ *	if whence == SEEK_END,
+ *		off: offset from end position (0xFFFF)
+ * @whence: SEEK_SET, SEEK_CUR, or SEEK_END
+ */
+static loff_t rmidev_llseek(struct file *filp, loff_t off, int whence)
+{
+	loff_t newpos;
+	struct rmidev_data *dev_data = filp->private_data;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (IS_ERR(dev_data)) {
+		pr_err("%s: Pointer of char device data is invalid", __func__);
+		return -EBADF;
+	}
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	switch (whence) {
+	case SEEK_SET:
+		newpos = off;
+		break;
+	case SEEK_CUR:
+		newpos = filp->f_pos + off;
+		break;
+	case SEEK_END:
+		newpos = REG_ADDR_LIMIT + off;
+		break;
+	default:
+		newpos = -EINVAL;
+		goto clean_up;
+	}
+
+	if (newpos < 0 || newpos > REG_ADDR_LIMIT) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: New position 0x%04x is invalid\n",
+				__func__, (unsigned int)newpos);
+		newpos = -EINVAL;
+		goto clean_up;
+	}
+
+	filp->f_pos = newpos;
+
+clean_up:
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return newpos;
+}
+
+/*
+ * rmidev_read: read register data from RMI device
+ *
+ * @filp: pointer to file structure
+ * @buf: pointer to user space buffer
+ * @count: number of bytes to read
+ * @f_pos: starting RMI register address
+ */
+static ssize_t rmidev_read(struct file *filp, char __user *buf,
+		size_t count, loff_t *f_pos)
+{
+	ssize_t retval;
+	unsigned char intr_status = 0;
+	unsigned short address;
+	struct rmidev_data *dev_data = filp->private_data;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (IS_ERR(dev_data)) {
+		pr_err("%s: Pointer of char device data is invalid", __func__);
+		return -EBADF;
+	}
+
+	if (count == 0)
+		return 0;
+
+	if (count > (REG_ADDR_LIMIT - *f_pos))
+		count = REG_ADDR_LIMIT - *f_pos;
+
+	address = (unsigned short)(*f_pos);
+
+	rmidev_allocate_buffer(count);
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	retval = synaptics_rmi4_reg_read(rmidev->rmi4_data,
+			*f_pos,
+			rmidev->tmpbuf,
+			count);
+	if (retval < 0)
+		goto clean_up;
+
+	if (copy_to_user(buf, rmidev->tmpbuf, count))
+		retval = -EFAULT;
+	else
+		*f_pos += retval;
+
+	if (!rmidev->concurrent)
+		goto clean_up;
+
+	if (address != rmi4_data->f01_data_base_addr)
+		goto clean_up;
+
+	if (count <= 1)
+		goto clean_up;
+
+	intr_status = rmidev->tmpbuf[1];
+
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->num_of_data_sources) {
+				if (fhandler->intr_mask & intr_status) {
+					rmi4_data->report_touch(rmi4_data,
+							fhandler);
+				}
+			}
+		}
+	}
+
+clean_up:
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return retval;
+}
+
+/*
+ * rmidev_write: write register data to RMI device
+ *
+ * @filp: pointer to file structure
+ * @buf: pointer to user space buffer
+ * @count: number of bytes to write
+ * @f_pos: starting RMI register address
+ */
+static ssize_t rmidev_write(struct file *filp, const char __user *buf,
+		size_t count, loff_t *f_pos)
+{
+	ssize_t retval;
+	struct rmidev_data *dev_data = filp->private_data;
+
+	if (IS_ERR(dev_data)) {
+		pr_err("%s: Pointer of char device data is invalid", __func__);
+		return -EBADF;
+	}
+
+	if (count == 0)
+		return 0;
+
+	if (count > (REG_ADDR_LIMIT - *f_pos))
+		count = REG_ADDR_LIMIT - *f_pos;
+
+	rmidev_allocate_buffer(count);
+
+	if (copy_from_user(rmidev->tmpbuf, buf, count))
+		return -EFAULT;
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	retval = synaptics_rmi4_reg_write(rmidev->rmi4_data,
+			*f_pos,
+			rmidev->tmpbuf,
+			count);
+	if (retval >= 0)
+		*f_pos += retval;
+
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return retval;
+}
+
+static int rmidev_open(struct inode *inp, struct file *filp)
+{
+	int retval = 0;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+	struct rmidev_data *dev_data =
+			container_of(inp->i_cdev, struct rmidev_data, main_dev);
+
+	if (!dev_data)
+		return -EACCES;
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	filp->private_data = dev_data;
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	rmi4_data->irq_enable(rmi4_data, false, false);
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Attention interrupt disabled\n",
+			__func__);
+
+	if (dev_data->ref_count < 1)
+		dev_data->ref_count++;
+	else
+		retval = -EACCES;
+
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return retval;
+}
+
+static int rmidev_release(struct inode *inp, struct file *filp)
+{
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+	struct rmidev_data *dev_data =
+			container_of(inp->i_cdev, struct rmidev_data, main_dev);
+
+	if (!dev_data)
+		return -EACCES;
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	dev_data->ref_count--;
+	if (dev_data->ref_count < 0)
+		dev_data->ref_count = 0;
+
+	rmi4_data->irq_enable(rmi4_data, true, false);
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Attention interrupt enabled\n",
+			__func__);
+
+	mutex_unlock(&(dev_data->file_mutex));
+
+	rmi4_data->reset_device(rmi4_data, false);
+
+	rmi4_data->stay_awake = false;
+
+	return 0;
+}
+
+static const struct file_operations rmidev_fops = {
+	.owner = THIS_MODULE,
+	.llseek = rmidev_llseek,
+	.read = rmidev_read,
+	.write = rmidev_write,
+	.open = rmidev_open,
+	.release = rmidev_release,
+};
+
+static void rmidev_device_cleanup(struct rmidev_data *dev_data)
+{
+	dev_t devno;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (dev_data) {
+		devno = dev_data->main_dev.dev;
+
+		if (dev_data->device_class)
+			device_destroy(dev_data->device_class, devno);
+
+		cdev_del(&dev_data->main_dev);
+
+		unregister_chrdev_region(devno, 1);
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: rmidev device removed\n",
+				__func__);
+	}
+
+	return;
+}
+
+static char *rmi_char_devnode(struct device *dev, umode_t *mode)
+{
+	if (!mode)
+		return NULL;
+
+	*mode = (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
+
+	return kasprintf(GFP_KERNEL, "rmi/%s", dev_name(dev));
+}
+
+static int rmidev_create_device_class(void)
+{
+	if (rmidev_device_class != NULL)
+		return 0;
+
+	rmidev_device_class = class_create(THIS_MODULE, DEVICE_CLASS_NAME);
+
+	if (IS_ERR(rmidev_device_class)) {
+		pr_err("%s: Failed to create /dev/%s\n",
+				__func__, CHAR_DEVICE_NAME);
+		return -ENODEV;
+	}
+
+	rmidev_device_class->devnode = rmi_char_devnode;
+
+	return 0;
+}
+
+static void rmidev_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!rmidev)
+		return;
+
+	if (rmidev->pid && (rmidev->intr_mask & intr_mask))
+		send_sig_info(SIGIO, &rmidev->interrupt_signal, rmidev->task);
+
+	return;
+}
+
+static int rmidev_init_device(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	dev_t dev_no;
+	unsigned char attr_count;
+	struct rmidev_data *dev_data;
+	struct device *device_ptr;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (rmidev) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	rmidev = kzalloc(sizeof(*rmidev), GFP_KERNEL);
+	if (!rmidev) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for rmidev\n",
+				__func__);
+		retval = -ENOMEM;
+		goto err_rmidev;
+	}
+
+	rmidev->rmi4_data = rmi4_data;
+
+	memset(&rmidev->interrupt_signal, 0, sizeof(rmidev->interrupt_signal));
+	rmidev->interrupt_signal.si_signo = SIGIO;
+	rmidev->interrupt_signal.si_code = SI_USER;
+
+	memset(&rmidev->terminate_signal, 0, sizeof(rmidev->terminate_signal));
+	rmidev->terminate_signal.si_signo = SIGTERM;
+	rmidev->terminate_signal.si_code = SI_USER;
+
+	retval = rmidev_create_device_class();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create device class\n",
+				__func__);
+		goto err_device_class;
+	}
+
+	if (rmidev_major_num) {
+		dev_no = MKDEV(rmidev_major_num, DEV_NUMBER);
+		retval = register_chrdev_region(dev_no, 1, CHAR_DEVICE_NAME);
+	} else {
+		retval = alloc_chrdev_region(&dev_no, 0, 1, CHAR_DEVICE_NAME);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to allocate char device region\n",
+					__func__);
+			goto err_device_region;
+		}
+
+		rmidev_major_num = MAJOR(dev_no);
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Major number of rmidev = %d\n",
+				__func__, rmidev_major_num);
+	}
+
+	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+	if (!dev_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for dev_data\n",
+				__func__);
+		retval = -ENOMEM;
+		goto err_dev_data;
+	}
+
+	mutex_init(&dev_data->file_mutex);
+	dev_data->rmi_dev = rmidev;
+	rmidev->data = dev_data;
+
+	cdev_init(&dev_data->main_dev, &rmidev_fops);
+
+	retval = cdev_add(&dev_data->main_dev, dev_no, 1);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to add rmi char device\n",
+				__func__);
+		goto err_char_device;
+	}
+
+	dev_set_name(&rmidev->dev, "rmidev%d", MINOR(dev_no));
+	dev_data->device_class = rmidev_device_class;
+
+	device_ptr = device_create(dev_data->device_class, NULL, dev_no,
+			NULL, CHAR_DEVICE_NAME"%d", MINOR(dev_no));
+	if (IS_ERR(device_ptr)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create rmi char device\n",
+				__func__);
+		retval = -ENODEV;
+		goto err_char_device;
+	}
+
+	retval = gpio_export(bdata->irq_gpio, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to export attention gpio\n",
+				__func__);
+	} else {
+		retval = gpio_export_link(&(rmi4_data->input_dev->dev),
+				"attn", bdata->irq_gpio);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s Failed to create gpio symlink\n",
+					__func__);
+		} else {
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Exported attention gpio %d\n",
+					__func__, bdata->irq_gpio);
+		}
+	}
+
+	rmidev->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+			&rmi4_data->input_dev->dev.kobj);
+	if (!rmidev->sysfs_dir) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs directory\n",
+				__func__);
+		retval = -ENODEV;
+		goto err_sysfs_dir;
+	}
+
+	retval = sysfs_create_bin_file(rmidev->sysfs_dir,
+			&attr_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs bin file\n",
+				__func__);
+		goto err_sysfs_bin;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(rmidev->sysfs_dir,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			retval = -ENODEV;
+			goto err_sysfs_attrs;
+		}
+	}
+
+	return 0;
+
+err_sysfs_attrs:
+	for (attr_count--; attr_count >= 0; attr_count--)
+		sysfs_remove_file(rmidev->sysfs_dir, &attrs[attr_count].attr);
+
+	sysfs_remove_bin_file(rmidev->sysfs_dir, &attr_data);
+
+err_sysfs_bin:
+	kobject_put(rmidev->sysfs_dir);
+
+err_sysfs_dir:
+err_char_device:
+	rmidev_device_cleanup(dev_data);
+	kfree(dev_data);
+
+err_dev_data:
+	unregister_chrdev_region(dev_no, 1);
+
+err_device_region:
+	if (rmidev_device_class != NULL) {
+		class_destroy(rmidev_device_class);
+		rmidev_device_class = NULL;
+	}
+
+err_device_class:
+	kfree(rmidev);
+	rmidev = NULL;
+
+err_rmidev:
+	return retval;
+}
+
+static void rmidev_remove_device(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char attr_count;
+	struct rmidev_data *dev_data;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (!rmidev)
+		goto exit;
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++)
+		sysfs_remove_file(rmidev->sysfs_dir, &attrs[attr_count].attr);
+
+	sysfs_remove_bin_file(rmidev->sysfs_dir, &attr_data);
+
+	kobject_put(rmidev->sysfs_dir);
+
+	gpio_unexport(bdata->irq_gpio);
+
+	dev_data = rmidev->data;
+	if (dev_data) {
+		rmidev_device_cleanup(dev_data);
+		kfree(dev_data);
+	}
+
+	unregister_chrdev_region(rmidev->dev_no, 1);
+
+	if (rmidev_device_class != NULL) {
+		class_destroy(rmidev_device_class);
+		rmidev_device_class = NULL;
+	}
+
+	kfree(rmidev->tmpbuf);
+
+	kfree(rmidev);
+	rmidev = NULL;
+
+exit:
+	complete(&rmidev_remove_complete_v26);
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn rmidev_module = {
+	.fn_type = RMI_DEV,
+	.init = rmidev_init_device,
+	.remove = rmidev_remove_device,
+	.reset = NULL,
+	.reinit = NULL,
+	.early_suspend = NULL,
+	.suspend = NULL,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = rmidev_attn,
+};
+
+static int __init rmidev_module_init(void)
+{
+	synaptics_rmi4_new_function(&rmidev_module, true);
+
+	return 0;
+}
+
+static void __exit rmidev_module_exit(void)
+{
+	synaptics_rmi4_new_function(&rmidev_module, false);
+
+	wait_for_completion(&rmidev_remove_complete_v26);
+
+	return;
+}
+
+module_init(rmidev_module_init);
+module_exit(rmidev_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX RMI Dev Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_rmi_hid_i2c.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_rmi_hid_i2c.c
new file mode 100644
index 0000000..7e02487
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_rmi_hid_i2c.c
@@ -0,0 +1,1006 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define SYN_I2C_RETRY_TIMES 10
+
+#define REPORT_ID_GET_BLOB 0x07
+#define REPORT_ID_WRITE 0x09
+#define REPORT_ID_READ_ADDRESS 0x0a
+#define REPORT_ID_READ_DATA 0x0b
+#define REPORT_ID_SET_RMI_MODE 0x0f
+
+#define PREFIX_USAGE_PAGE_1BYTE 0x05
+#define PREFIX_USAGE_PAGE_2BYTES 0x06
+#define PREFIX_USAGE 0x09
+#define PREFIX_REPORT_ID 0x85
+#define PREFIX_REPORT_COUNT_1BYTE 0x95
+#define PREFIX_REPORT_COUNT_2BYTES 0x96
+
+#define USAGE_GET_BLOB 0xc5
+#define USAGE_WRITE 0x02
+#define USAGE_READ_ADDRESS 0x03
+#define USAGE_READ_DATA 0x04
+#define USAGE_SET_MODE 0x06
+
+#define FEATURE_REPORT_TYPE 0x03
+
+#define VENDOR_DEFINED_PAGE 0xff00
+
+#define BLOB_REPORT_SIZE 256
+
+#define RESET_COMMAND 0x01
+#define GET_REPORT_COMMAND 0x02
+#define SET_REPORT_COMMAND 0x03
+#define SET_POWER_COMMAND 0x08
+
+#define FINGER_MODE 0x00
+#define RMI_MODE 0x02
+
+struct hid_report_info {
+	unsigned char get_blob_id;
+	unsigned char write_id;
+	unsigned char read_addr_id;
+	unsigned char read_data_id;
+	unsigned char set_mode_id;
+	unsigned int blob_size;
+};
+
+static struct hid_report_info hid_report;
+
+struct hid_device_descriptor {
+	unsigned short device_descriptor_length;
+	unsigned short format_version;
+	unsigned short report_descriptor_length;
+	unsigned short report_descriptor_index;
+	unsigned short input_register_index;
+	unsigned short input_report_max_length;
+	unsigned short output_register_index;
+	unsigned short output_report_max_length;
+	unsigned short command_register_index;
+	unsigned short data_register_index;
+	unsigned short vendor_id;
+	unsigned short product_id;
+	unsigned short version_id;
+	unsigned int reserved;
+};
+
+static struct hid_device_descriptor hid_dd;
+
+struct i2c_rw_buffer {
+	unsigned char *read;
+	unsigned char *write;
+	unsigned short read_size;
+	unsigned short write_size;
+};
+
+static struct i2c_rw_buffer buffer;
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+	int retval;
+	u32 value;
+	const char *name;
+	struct property *prop;
+	struct device_node *np = dev->of_node;
+
+	bdata->irq_gpio = of_get_named_gpio_flags(np,
+			"synaptics,irq-gpio", 0,
+			(enum of_gpio_flags *)&bdata->irq_flags);
+
+	retval = of_property_read_u32(np, "synaptics,irq-on-state",
+			&value);
+	if (retval < 0)
+		bdata->irq_on_state = 0;
+	else
+		bdata->irq_on_state = value;
+
+	retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+	if (retval < 0)
+		bdata->pwr_reg_name = NULL;
+	else
+		bdata->pwr_reg_name = name;
+
+	retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+	if (retval < 0)
+		bdata->bus_reg_name = NULL;
+	else
+		bdata->bus_reg_name = name;
+
+	prop = of_find_property(np, "synaptics,power-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->power_gpio = of_get_named_gpio_flags(np,
+				"synaptics,power-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,power-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_on_state = value;
+		}
+	} else {
+		bdata->power_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_delay_ms = value;
+		}
+	} else {
+		bdata->power_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->reset_gpio = of_get_named_gpio_flags(np,
+				"synaptics,reset-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,reset-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_on_state = value;
+		}
+		retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_active_ms = value;
+		}
+	} else {
+		bdata->reset_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_delay_ms = value;
+		}
+	} else {
+		bdata->reset_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,dev-dscrptr-addr", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,dev-dscrptr-addr",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,dev-dscrptr-addr property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->device_descriptor_addr = (unsigned short)value;
+		}
+	} else {
+		bdata->device_descriptor_addr = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->max_y_for_2d = value;
+		}
+	} else {
+		bdata->max_y_for_2d = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,swap-axes", NULL);
+	bdata->swap_axes = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,x-flip", NULL);
+	bdata->x_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,y-flip", NULL);
+	bdata->y_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->ub_i2c_addr = (unsigned short)value;
+		}
+	} else {
+		bdata->ub_i2c_addr = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->cap_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->cap_button_map->map)
+			return -ENOMEM;
+		bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+		retval = of_property_read_u32_array(np,
+				"synaptics,cap-button-codes",
+				bdata->cap_button_map->map,
+				bdata->cap_button_map->nbuttons);
+		if (retval < 0) {
+			bdata->cap_button_map->nbuttons = 0;
+			bdata->cap_button_map->map = NULL;
+		}
+	} else {
+		bdata->cap_button_map->nbuttons = 0;
+		bdata->cap_button_map->map = NULL;
+	}
+
+	prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->vir_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->vir_button_map->map)
+			return -ENOMEM;
+		bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+		bdata->vir_button_map->nbuttons /= 5;
+		retval = of_property_read_u32_array(np,
+				"synaptics,vir-button-codes",
+				bdata->vir_button_map->map,
+				bdata->vir_button_map->nbuttons * 5);
+		if (retval < 0) {
+			bdata->vir_button_map->nbuttons = 0;
+			bdata->vir_button_map->map = NULL;
+		}
+	} else {
+		bdata->vir_button_map->nbuttons = 0;
+		bdata->vir_button_map->map = NULL;
+	}
+
+	return 0;
+}
+#endif
+
+static int do_i2c_transfer(struct i2c_client *client, struct i2c_msg *msg)
+{
+	unsigned char retry;
+
+	for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+		if (i2c_transfer(client->adapter, msg, 1) == 1)
+			break;
+		dev_err(&client->dev,
+				"%s: I2C retry %d\n",
+				__func__, retry + 1);
+		msleep(20);
+	}
+
+	if (retry == SYN_I2C_RETRY_TIMES) {
+		dev_err(&client->dev,
+				"%s: I2C transfer over retry limit\n",
+				__func__);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int check_buffer(unsigned char **buffer, unsigned short *buffer_size,
+		unsigned short length)
+{
+	if (*buffer_size < length) {
+		if (*buffer_size)
+			kfree(*buffer);
+		*buffer = kzalloc(length, GFP_KERNEL);
+		if (!(*buffer))
+			return -ENOMEM;
+		*buffer_size = length;
+	}
+
+	return 0;
+}
+
+static int generic_read(struct i2c_client *client, unsigned short length)
+{
+	int retval;
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = I2C_M_RD,
+			.len = length,
+		}
+	};
+
+	check_buffer(&buffer.read, &buffer.read_size, length);
+	msg[0].buf = buffer.read;
+
+	retval = do_i2c_transfer(client, msg);
+
+	return retval;
+}
+
+static int generic_write(struct i2c_client *client, unsigned short length)
+{
+	int retval;
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.len = length,
+			.buf = buffer.write,
+		}
+	};
+
+	retval = do_i2c_transfer(client, msg);
+
+	return retval;
+}
+
+static void traverse_report_descriptor(unsigned int *index)
+{
+	unsigned char size;
+	unsigned char *buf = buffer.read;
+
+	size = buf[*index] & MASK_2BIT;
+	switch (size) {
+	case 0: /* 0 bytes */
+		*index += 1;
+		break;
+	case 1: /* 1 byte */
+		*index += 2;
+		break;
+	case 2: /* 2 bytes */
+		*index += 3;
+		break;
+	case 3: /* 4 bytes */
+		*index += 5;
+		break;
+	default:
+		break;
+	}
+
+	return;
+}
+
+static void find_blob_size(unsigned int index)
+{
+	unsigned int ii = index;
+	unsigned char *buf = buffer.read;
+
+	while (ii < hid_dd.report_descriptor_length) {
+		if (buf[ii] == PREFIX_REPORT_COUNT_1BYTE) {
+			hid_report.blob_size = buf[ii + 1];
+			return;
+		} else if (buf[ii] == PREFIX_REPORT_COUNT_2BYTES) {
+			hid_report.blob_size = buf[ii + 1] | (buf[ii + 2] << 8);
+			return;
+		}
+		traverse_report_descriptor(&ii);
+	}
+
+	return;
+}
+
+static void find_reports(unsigned int index)
+{
+	unsigned int ii = index;
+	unsigned char *buf = buffer.read;
+	static unsigned int report_id_index;
+	static unsigned char report_id;
+	static unsigned short usage_page;
+
+	if (buf[ii] == PREFIX_REPORT_ID) {
+		report_id = buf[ii + 1];
+		report_id_index = ii;
+		return;
+	}
+
+	if (buf[ii] == PREFIX_USAGE_PAGE_1BYTE) {
+		usage_page = buf[ii + 1];
+		return;
+	} else if (buf[ii] == PREFIX_USAGE_PAGE_2BYTES) {
+		usage_page = buf[ii + 1] | (buf[ii + 2] << 8);
+		return;
+	}
+
+	if ((usage_page == VENDOR_DEFINED_PAGE) && (buf[ii] == PREFIX_USAGE)) {
+		switch (buf[ii + 1]) {
+		case USAGE_GET_BLOB:
+			hid_report.get_blob_id = report_id;
+			find_blob_size(report_id_index);
+			break;
+		case USAGE_WRITE:
+			hid_report.write_id = report_id;
+			break;
+		case USAGE_READ_ADDRESS:
+			hid_report.read_addr_id = report_id;
+			break;
+		case USAGE_READ_DATA:
+			hid_report.read_data_id = report_id;
+			break;
+		case USAGE_SET_MODE:
+			hid_report.set_mode_id = report_id;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return;
+}
+
+static int parse_report_descriptor(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned int ii = 0;
+	unsigned char *buf;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+	buffer.write[0] = hid_dd.report_descriptor_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.report_descriptor_index >> 8;
+	retval = generic_write(i2c, 2);
+	if (retval < 0)
+		return retval;
+	retval = generic_read(i2c, hid_dd.report_descriptor_length);
+	if (retval < 0)
+		return retval;
+
+	buf = buffer.read;
+
+	hid_report.get_blob_id = REPORT_ID_GET_BLOB;
+	hid_report.write_id = REPORT_ID_WRITE;
+	hid_report.read_addr_id = REPORT_ID_READ_ADDRESS;
+	hid_report.read_data_id = REPORT_ID_READ_DATA;
+	hid_report.set_mode_id = REPORT_ID_SET_RMI_MODE;
+	hid_report.blob_size = BLOB_REPORT_SIZE;
+
+	while (ii < hid_dd.report_descriptor_length) {
+		find_reports(ii);
+		traverse_report_descriptor(&ii);
+	}
+
+	return 0;
+}
+
+static int switch_to_rmi(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size, 11);
+
+	/* set rmi mode */
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.set_mode_id;
+	buffer.write[3] = SET_REPORT_COMMAND;
+	buffer.write[4] = hid_report.set_mode_id;
+	buffer.write[5] = hid_dd.data_register_index & MASK_8BIT;
+	buffer.write[6] = hid_dd.data_register_index >> 8;
+	buffer.write[7] = 0x04;
+	buffer.write[8] = 0x00;
+	buffer.write[9] = hid_report.set_mode_id;
+	buffer.write[10] = RMI_MODE;
+
+	retval = generic_write(i2c, 11);
+
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static int check_report_mode(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned short report_size;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size, 7);
+
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.set_mode_id;
+	buffer.write[3] = GET_REPORT_COMMAND;
+	buffer.write[4] = hid_report.set_mode_id;
+	buffer.write[5] = hid_dd.data_register_index & MASK_8BIT;
+	buffer.write[6] = hid_dd.data_register_index >> 8;
+
+	retval = generic_write(i2c, 7);
+	if (retval < 0)
+		goto exit;
+
+	retval = generic_read(i2c, 2);
+	if (retval < 0)
+		goto exit;
+
+	report_size = (buffer.read[1] << 8) | buffer.read[0];
+
+	retval = generic_write(i2c, 7);
+	if (retval < 0)
+		goto exit;
+
+	retval = generic_read(i2c, report_size);
+	if (retval < 0)
+		goto exit;
+
+	retval = buffer.read[3];
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Report mode = %d\n",
+			__func__, retval);
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static int hid_i2c_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size, 6);
+
+	/* read device descriptor */
+	buffer.write[0] = bdata->device_descriptor_addr & MASK_8BIT;
+	buffer.write[1] = bdata->device_descriptor_addr >> 8;
+	retval = generic_write(i2c, 2);
+	if (retval < 0)
+		goto exit;
+	retval = generic_read(i2c, sizeof(hid_dd));
+	if (retval < 0)
+		goto exit;
+	retval = secure_memcpy((unsigned char *)&hid_dd,
+			sizeof(struct hid_device_descriptor),
+			buffer.read,
+			buffer.read_size,
+			sizeof(hid_dd));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy device descriptor data\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = parse_report_descriptor(rmi4_data);
+	if (retval < 0)
+		goto exit;
+
+	/* set power */
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = 0x00;
+	buffer.write[3] = SET_POWER_COMMAND;
+	retval = generic_write(i2c, 4);
+	if (retval < 0)
+		goto exit;
+
+	/* reset */
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = 0x00;
+	buffer.write[3] = RESET_COMMAND;
+	retval = generic_write(i2c, 4);
+	if (retval < 0)
+		goto exit;
+
+	while (gpio_get_value(bdata->irq_gpio))
+		msleep(20);
+
+	retval = generic_read(i2c, hid_dd.input_report_max_length);
+	if (retval < 0)
+		goto exit;
+
+	/* get blob */
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.get_blob_id;
+	buffer.write[3] = 0x02;
+	buffer.write[4] = hid_dd.data_register_index & MASK_8BIT;
+	buffer.write[5] = hid_dd.data_register_index >> 8;
+
+	retval = generic_write(i2c, 6);
+	if (retval < 0)
+		goto exit;
+
+	msleep(20);
+
+	retval = generic_read(i2c, hid_report.blob_size + 3);
+	if (retval < 0)
+		goto exit;
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to initialize HID/I2C interface\n",
+				__func__);
+		return retval;
+	}
+
+	retval = switch_to_rmi(rmi4_data);
+
+	return retval;
+}
+
+static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned short length)
+{
+	int retval;
+	unsigned char retry;
+	unsigned char recover = 1;
+	unsigned short report_length;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_msg msg[] = {
+		{
+			.addr = i2c->addr,
+			.flags = 0,
+			.len = hid_dd.output_report_max_length + 2,
+		},
+		{
+			.addr = i2c->addr,
+			.flags = I2C_M_RD,
+			.len = length + 4,
+		},
+	};
+
+recover:
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size,
+			hid_dd.output_report_max_length + 2);
+	msg[0].buf = buffer.write;
+	buffer.write[0] = hid_dd.output_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.output_register_index >> 8;
+	buffer.write[2] = hid_dd.output_report_max_length & MASK_8BIT;
+	buffer.write[3] = hid_dd.output_report_max_length >> 8;
+	buffer.write[4] = hid_report.read_addr_id;
+	buffer.write[5] = 0x00;
+	buffer.write[6] = addr & MASK_8BIT;
+	buffer.write[7] = addr >> 8;
+	buffer.write[8] = length & MASK_8BIT;
+	buffer.write[9] = length >> 8;
+
+	check_buffer(&buffer.read, &buffer.read_size, length + 4);
+	msg[1].buf = buffer.read;
+
+	retval = do_i2c_transfer(i2c, &msg[0]);
+	if (retval != 0)
+		goto exit;
+
+	retry = 0;
+	do {
+		retval = do_i2c_transfer(i2c, &msg[1]);
+		if (retval == 0)
+			retval = length;
+		else
+			goto exit;
+
+		report_length = (buffer.read[1] << 8) | buffer.read[0];
+		if (report_length == hid_dd.input_report_max_length) {
+			retval = secure_memcpy(&data[0], length,
+					&buffer.read[4], buffer.read_size - 4,
+					length);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to copy data\n",
+						__func__);
+			} else {
+				retval = length;
+			}
+			goto exit;
+		}
+
+		msleep(20);
+		retry++;
+	} while (retry < SYN_I2C_RETRY_TIMES);
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to receive read report\n",
+			__func__);
+	retval = -EIO;
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	if ((retval != length) && (recover == 1)) {
+		recover = 0;
+		if (check_report_mode(rmi4_data) != RMI_MODE) {
+			retval = hid_i2c_init(rmi4_data);
+			if (retval == 0)
+				goto recover;
+		}
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned short length)
+{
+	int retval;
+	unsigned char recover = 1;
+	unsigned char msg_length;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_msg msg[] = {
+		{
+			.addr = i2c->addr,
+			.flags = 0,
+		}
+	};
+
+	if ((length + 10) < (hid_dd.output_report_max_length + 2))
+		msg_length = hid_dd.output_report_max_length + 2;
+	else
+		msg_length = length + 10;
+
+recover:
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size, msg_length);
+	msg[0].len = msg_length;
+	msg[0].buf = buffer.write;
+	buffer.write[0] = hid_dd.output_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.output_register_index >> 8;
+	buffer.write[2] = hid_dd.output_report_max_length & MASK_8BIT;
+	buffer.write[3] = hid_dd.output_report_max_length >> 8;
+	buffer.write[4] = hid_report.write_id;
+	buffer.write[5] = 0x00;
+	buffer.write[6] = addr & MASK_8BIT;
+	buffer.write[7] = addr >> 8;
+	buffer.write[8] = length & MASK_8BIT;
+	buffer.write[9] = length >> 8;
+	retval = secure_memcpy(&buffer.write[10], buffer.write_size - 10,
+			&data[0], length, length);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy data\n",
+				__func__);
+	} else {
+		retval = do_i2c_transfer(i2c, msg);
+		if (retval == 0)
+			retval = length;
+	}
+
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	if ((retval != length) && (recover == 1)) {
+		recover = 0;
+		if (check_report_mode(rmi4_data) != RMI_MODE) {
+			retval = hid_i2c_init(rmi4_data);
+			if (retval == 0)
+				goto recover;
+		}
+	}
+
+	return retval;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+	.type = BUS_I2C,
+	.read = synaptics_rmi4_i2c_read,
+	.write = synaptics_rmi4_i2c_write,
+};
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_i2c_device;
+
+static void synaptics_rmi4_i2c_dev_release(struct device *dev)
+{
+	kfree(synaptics_dsx_i2c_device);
+
+	return;
+}
+
+static int synaptics_rmi4_i2c_probe(struct i2c_client *client,
+		const struct i2c_device_id *dev_id)
+{
+	int retval;
+
+	if (!i2c_check_functionality(client->adapter,
+			I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_err(&client->dev,
+				"%s: SMBus byte data commands not supported by host\n",
+				__func__);
+		return -EIO;
+	}
+
+	synaptics_dsx_i2c_device = kzalloc(
+			sizeof(struct platform_device),
+			GFP_KERNEL);
+	if (!synaptics_dsx_i2c_device) {
+		dev_err(&client->dev,
+				"%s: Failed to allocate memory for synaptics_dsx_i2c_device\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_OF
+	if (client->dev.of_node) {
+		hw_if.board_data = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_board_data),
+				GFP_KERNEL);
+		if (!hw_if.board_data) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for board data\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->cap_button_map = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->cap_button_map) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for 0D button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->vir_button_map = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->vir_button_map) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for virtual button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		parse_dt(&client->dev, hw_if.board_data);
+	}
+#else
+	hw_if.board_data = client->dev.platform_data;
+#endif
+
+	hw_if.bus_access = &bus_access;
+	hw_if.bl_hw_init = switch_to_rmi;
+	hw_if.ui_hw_init = hid_i2c_init;
+
+	synaptics_dsx_i2c_device->name = PLATFORM_DRIVER_NAME;
+	synaptics_dsx_i2c_device->id = 0;
+	synaptics_dsx_i2c_device->num_resources = 0;
+	synaptics_dsx_i2c_device->dev.parent = &client->dev;
+	synaptics_dsx_i2c_device->dev.platform_data = &hw_if;
+	synaptics_dsx_i2c_device->dev.release = synaptics_rmi4_i2c_dev_release;
+
+	retval = platform_device_register(synaptics_dsx_i2c_device);
+	if (retval) {
+		dev_err(&client->dev,
+				"%s: Failed to register platform device\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_i2c_remove(struct i2c_client *client)
+{
+	if (buffer.read_size)
+		kfree(buffer.read);
+
+	if (buffer.write_size)
+		kfree(buffer.write);
+
+	platform_device_unregister(synaptics_dsx_i2c_device);
+
+	return 0;
+}
+
+static const struct i2c_device_id synaptics_rmi4_id_table[] = {
+	{I2C_DRIVER_NAME, 0},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
+
+#ifdef CONFIG_OF
+static struct of_device_id synaptics_rmi4_of_match_table[] = {
+	{
+		.compatible = "synaptics,dsx-rmi-hid-i2c",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct i2c_driver synaptics_rmi4_i2c_driver = {
+	.driver = {
+		.name = I2C_DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = synaptics_rmi4_of_match_table,
+	},
+	.probe = synaptics_rmi4_i2c_probe,
+	.remove = synaptics_rmi4_i2c_remove,
+	.id_table = synaptics_rmi4_id_table,
+};
+
+int synaptics_rmi4_bus_init_v26(void)
+{
+	return i2c_add_driver(&synaptics_rmi4_i2c_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init_v26);
+
+void synaptics_rmi4_bus_exit_v26(void)
+{
+	i2c_del_driver(&synaptics_rmi4_i2c_driver);
+
+	return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit_v26);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX I2C Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_spi.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_spi.c
new file mode 100644
index 0000000..382a3dd
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_spi.c
@@ -0,0 +1,634 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define SPI_READ 0x80
+#define SPI_WRITE 0x00
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+	int retval;
+	u32 value;
+	const char *name;
+	struct property *prop;
+	struct device_node *np = dev->of_node;
+
+	bdata->irq_gpio = of_get_named_gpio_flags(np,
+			"synaptics,irq-gpio", 0,
+			(enum of_gpio_flags *)&bdata->irq_flags);
+
+	retval = of_property_read_u32(np, "synaptics,irq-on-state",
+			&value);
+	if (retval < 0)
+		bdata->irq_on_state = 0;
+	else
+		bdata->irq_on_state = value;
+
+	retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+	if (retval < 0)
+		bdata->pwr_reg_name = NULL;
+	else
+		bdata->pwr_reg_name = name;
+
+	retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+	if (retval < 0)
+		bdata->bus_reg_name = NULL;
+	else
+		bdata->bus_reg_name = name;
+
+	prop = of_find_property(np, "synaptics,power-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->power_gpio = of_get_named_gpio_flags(np,
+				"synaptics,power-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,power-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_on_state = value;
+		}
+	} else {
+		bdata->power_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_delay_ms = value;
+		}
+	} else {
+		bdata->power_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->reset_gpio = of_get_named_gpio_flags(np,
+				"synaptics,reset-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,reset-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_on_state = value;
+		}
+		retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_active_ms = value;
+		}
+	} else {
+		bdata->reset_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_delay_ms = value;
+		}
+	} else {
+		bdata->reset_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,byte-delay-us", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,byte-delay-us",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,byte-delay-us property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->byte_delay_us = value;
+		}
+	} else {
+		bdata->byte_delay_us = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,block-delay-us", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,block-delay-us",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,block-delay-us property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->block_delay_us = value;
+		}
+	} else {
+		bdata->block_delay_us = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->max_y_for_2d = value;
+		}
+	} else {
+		bdata->max_y_for_2d = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,swap-axes", NULL);
+	bdata->swap_axes = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,x-flip", NULL);
+	bdata->x_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,y-flip", NULL);
+	bdata->y_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->ub_i2c_addr = (unsigned short)value;
+		}
+	} else {
+		bdata->ub_i2c_addr = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->cap_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->cap_button_map->map)
+			return -ENOMEM;
+		bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+		retval = of_property_read_u32_array(np,
+				"synaptics,cap-button-codes",
+				bdata->cap_button_map->map,
+				bdata->cap_button_map->nbuttons);
+		if (retval < 0) {
+			bdata->cap_button_map->nbuttons = 0;
+			bdata->cap_button_map->map = NULL;
+		}
+	} else {
+		bdata->cap_button_map->nbuttons = 0;
+		bdata->cap_button_map->map = NULL;
+	}
+
+	prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->vir_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->vir_button_map->map)
+			return -ENOMEM;
+		bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+		bdata->vir_button_map->nbuttons /= 5;
+		retval = of_property_read_u32_array(np,
+				"synaptics,vir-button-codes",
+				bdata->vir_button_map->map,
+				bdata->vir_button_map->nbuttons * 5);
+		if (retval < 0) {
+			bdata->vir_button_map->nbuttons = 0;
+			bdata->vir_button_map->map = NULL;
+		}
+	} else {
+		bdata->vir_button_map->nbuttons = 0;
+		bdata->vir_button_map->map = NULL;
+	}
+
+	return 0;
+}
+#endif
+
+static int synaptics_rmi4_spi_set_page(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr)
+{
+	int retval;
+	unsigned int index;
+	unsigned int xfer_count = PAGE_SELECT_LEN + 1;
+	unsigned char txbuf[xfer_count];
+	unsigned char page;
+	struct spi_message msg;
+	struct spi_transfer xfers[xfer_count];
+	struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	page = ((addr >> 8) & ~MASK_7BIT);
+	if (page != rmi4_data->current_page) {
+		spi_message_init(&msg);
+
+		txbuf[0] = SPI_WRITE;
+		txbuf[1] = MASK_8BIT;
+		txbuf[2] = page;
+
+		for (index = 0; index < xfer_count; index++) {
+			memset(&xfers[index], 0, sizeof(struct spi_transfer));
+			xfers[index].len = 1;
+			xfers[index].delay_usecs = bdata->byte_delay_us;
+			xfers[index].tx_buf = &txbuf[index];
+			spi_message_add_tail(&xfers[index], &msg);
+		}
+
+		if (bdata->block_delay_us)
+			xfers[index - 1].delay_usecs = bdata->block_delay_us;
+
+		retval = spi_sync(spi, &msg);
+		if (retval == 0) {
+			rmi4_data->current_page = page;
+			retval = PAGE_SELECT_LEN;
+		} else {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to complete SPI transfer, error = %d\n",
+					__func__, retval);
+		}
+	} else {
+		retval = PAGE_SELECT_LEN;
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_spi_read(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned short length)
+{
+	int retval;
+	unsigned int index;
+	unsigned int xfer_count = length + ADDRESS_WORD_LEN;
+	unsigned char txbuf[ADDRESS_WORD_LEN];
+	unsigned char *rxbuf = NULL;
+	struct spi_message msg;
+	struct spi_transfer *xfers = NULL;
+	struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	spi_message_init(&msg);
+
+	xfers = kcalloc(xfer_count, sizeof(struct spi_transfer), GFP_KERNEL);
+	if (!xfers) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate memory for xfers\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	txbuf[0] = (addr >> 8) | SPI_READ;
+	txbuf[1] = addr & MASK_8BIT;
+
+	rxbuf = kmalloc(length, GFP_KERNEL);
+	if (!rxbuf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate memory for rxbuf\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	retval = synaptics_rmi4_spi_set_page(rmi4_data, addr);
+	if (retval != PAGE_SELECT_LEN) {
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		retval = -EIO;
+		goto exit;
+	}
+
+	for (index = 0; index < xfer_count; index++) {
+		xfers[index].len = 1;
+		xfers[index].delay_usecs = bdata->byte_delay_us;
+		if (index < ADDRESS_WORD_LEN)
+			xfers[index].tx_buf = &txbuf[index];
+		else
+			xfers[index].rx_buf = &rxbuf[index - ADDRESS_WORD_LEN];
+		spi_message_add_tail(&xfers[index], &msg);
+	}
+
+	if (bdata->block_delay_us)
+		xfers[index - 1].delay_usecs = bdata->block_delay_us;
+
+	retval = spi_sync(spi, &msg);
+	if (retval == 0) {
+		retval = secure_memcpy(data, length, rxbuf, length, length);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy data\n",
+					__func__);
+		} else {
+			retval = length;
+		}
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to complete SPI transfer, error = %d\n",
+				__func__, retval);
+	}
+
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+exit:
+	kfree(rxbuf);
+	kfree(xfers);
+
+	return retval;
+}
+
+static int synaptics_rmi4_spi_write(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned short length)
+{
+	int retval;
+	unsigned int index;
+	unsigned int xfer_count = length + ADDRESS_WORD_LEN;
+	unsigned char *txbuf = NULL;
+	struct spi_message msg;
+	struct spi_transfer *xfers = NULL;
+	struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	spi_message_init(&msg);
+
+	xfers = kcalloc(xfer_count, sizeof(struct spi_transfer), GFP_KERNEL);
+	if (!xfers) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate memory for xfers\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	txbuf = kmalloc(xfer_count, GFP_KERNEL);
+	if (!txbuf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate memory for txbuf\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	txbuf[0] = (addr >> 8) & ~SPI_READ;
+	txbuf[1] = addr & MASK_8BIT;
+	retval = secure_memcpy(&txbuf[ADDRESS_WORD_LEN],
+			xfer_count - ADDRESS_WORD_LEN, data, length, length);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy data\n",
+				__func__);
+		goto exit;
+	}
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	retval = synaptics_rmi4_spi_set_page(rmi4_data, addr);
+	if (retval != PAGE_SELECT_LEN) {
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		retval = -EIO;
+		goto exit;
+	}
+
+	for (index = 0; index < xfer_count; index++) {
+		xfers[index].len = 1;
+		xfers[index].delay_usecs = bdata->byte_delay_us;
+		xfers[index].tx_buf = &txbuf[index];
+		spi_message_add_tail(&xfers[index], &msg);
+	}
+
+	if (bdata->block_delay_us)
+		xfers[index - 1].delay_usecs = bdata->block_delay_us;
+
+	retval = spi_sync(spi, &msg);
+	if (retval == 0) {
+		retval = length;
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to complete SPI transfer, error = %d\n",
+				__func__, retval);
+	}
+
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+exit:
+	kfree(txbuf);
+	kfree(xfers);
+
+	return retval;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+	.type = BUS_SPI,
+	.read = synaptics_rmi4_spi_read,
+	.write = synaptics_rmi4_spi_write,
+};
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_spi_device;
+
+static void synaptics_rmi4_spi_dev_release(struct device *dev)
+{
+	kfree(synaptics_dsx_spi_device);
+
+	return;
+}
+
+static int synaptics_rmi4_spi_probe(struct spi_device *spi)
+{
+	int retval;
+
+	if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) {
+		dev_err(&spi->dev,
+				"%s: Full duplex not supported by host\n",
+				__func__);
+		return -EIO;
+	}
+
+	synaptics_dsx_spi_device = kzalloc(
+			sizeof(struct platform_device),
+			GFP_KERNEL);
+	if (!synaptics_dsx_spi_device) {
+		dev_err(&spi->dev,
+				"%s: Failed to allocate memory for synaptics_dsx_spi_device\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_OF
+	if (spi->dev.of_node) {
+		hw_if.board_data = devm_kzalloc(&spi->dev,
+				sizeof(struct synaptics_dsx_board_data),
+				GFP_KERNEL);
+		if (!hw_if.board_data) {
+			dev_err(&spi->dev,
+					"%s: Failed to allocate memory for board data\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->cap_button_map = devm_kzalloc(&spi->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->cap_button_map) {
+			dev_err(&spi->dev,
+					"%s: Failed to allocate memory for 0D button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->vir_button_map = devm_kzalloc(&spi->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->vir_button_map) {
+			dev_err(&spi->dev,
+					"%s: Failed to allocate memory for virtual button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		parse_dt(&spi->dev, hw_if.board_data);
+	}
+#else
+	hw_if.board_data = spi->dev.platform_data;
+#endif
+
+	hw_if.bus_access = &bus_access;
+
+	spi->bits_per_word = 8;
+	spi->mode = SPI_MODE_3;
+
+	retval = spi_setup(spi);
+	if (retval < 0) {
+		dev_err(&spi->dev,
+				"%s: Failed to perform SPI setup\n",
+				__func__);
+		return retval;
+	}
+
+	synaptics_dsx_spi_device->name = PLATFORM_DRIVER_NAME;
+	synaptics_dsx_spi_device->id = 0;
+	synaptics_dsx_spi_device->num_resources = 0;
+	synaptics_dsx_spi_device->dev.parent = &spi->dev;
+	synaptics_dsx_spi_device->dev.platform_data = &hw_if;
+	synaptics_dsx_spi_device->dev.release = synaptics_rmi4_spi_dev_release;
+
+	retval = platform_device_register(synaptics_dsx_spi_device);
+	if (retval) {
+		dev_err(&spi->dev,
+				"%s: Failed to register platform device\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_spi_remove(struct spi_device *spi)
+{
+	platform_device_unregister(synaptics_dsx_spi_device);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id synaptics_rmi4_of_match_table[] = {
+	{
+		.compatible = "synaptics,dsx-spi",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct spi_driver synaptics_rmi4_spi_driver = {
+	.driver = {
+		.name = SPI_DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = synaptics_rmi4_of_match_table,
+	},
+	.probe = synaptics_rmi4_spi_probe,
+	.remove = synaptics_rmi4_spi_remove,
+};
+
+
+int synaptics_rmi4_bus_init_v26(void)
+{
+	return spi_register_driver(&synaptics_rmi4_spi_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init_v26);
+
+void synaptics_rmi4_bus_exit_v26(void)
+{
+	spi_unregister_driver(&synaptics_rmi4_spi_driver);
+
+	return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit_v26);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX SPI Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_test_reporting.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_test_reporting.c
new file mode 100644
index 0000000..1fdd89f
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_test_reporting.c
@@ -0,0 +1,4172 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/ctype.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define SYSFS_FOLDER_NAME "f54"
+
+#define GET_REPORT_TIMEOUT_S 3
+#define CALIBRATION_TIMEOUT_S 10
+#define COMMAND_TIMEOUT_100MS 20
+
+#define NO_SLEEP_OFF (0 << 2)
+#define NO_SLEEP_ON (1 << 2)
+
+#define STATUS_IDLE 0
+#define STATUS_BUSY 1
+#define STATUS_ERROR 2
+
+#define REPORT_INDEX_OFFSET 1
+#define REPORT_DATA_OFFSET 3
+
+#define SENSOR_RX_MAPPING_OFFSET 1
+#define SENSOR_TX_MAPPING_OFFSET 2
+
+#define COMMAND_GET_REPORT 1
+#define COMMAND_FORCE_CAL 2
+#define COMMAND_FORCE_UPDATE 4
+
+#define CONTROL_NO_AUTO_CAL 1
+
+#define CONTROL_0_SIZE 1
+#define CONTROL_1_SIZE 1
+#define CONTROL_2_SIZE 2
+#define CONTROL_3_SIZE 1
+#define CONTROL_4_6_SIZE 3
+#define CONTROL_7_SIZE 1
+#define CONTROL_8_9_SIZE 3
+#define CONTROL_10_SIZE 1
+#define CONTROL_11_SIZE 2
+#define CONTROL_12_13_SIZE 2
+#define CONTROL_14_SIZE 1
+#define CONTROL_15_SIZE 1
+#define CONTROL_16_SIZE 1
+#define CONTROL_17_SIZE 1
+#define CONTROL_18_SIZE 1
+#define CONTROL_19_SIZE 1
+#define CONTROL_20_SIZE 1
+#define CONTROL_21_SIZE 2
+#define CONTROL_22_26_SIZE 7
+#define CONTROL_27_SIZE 1
+#define CONTROL_28_SIZE 2
+#define CONTROL_29_SIZE 1
+#define CONTROL_30_SIZE 1
+#define CONTROL_31_SIZE 1
+#define CONTROL_32_35_SIZE 8
+#define CONTROL_36_SIZE 1
+#define CONTROL_37_SIZE 1
+#define CONTROL_38_SIZE 1
+#define CONTROL_39_SIZE 1
+#define CONTROL_40_SIZE 1
+#define CONTROL_41_SIZE 1
+#define CONTROL_42_SIZE 2
+#define CONTROL_43_54_SIZE 13
+#define CONTROL_55_56_SIZE 2
+#define CONTROL_57_SIZE 1
+#define CONTROL_58_SIZE 1
+#define CONTROL_59_SIZE 2
+#define CONTROL_60_62_SIZE 3
+#define CONTROL_63_SIZE 1
+#define CONTROL_64_67_SIZE 4
+#define CONTROL_68_73_SIZE 8
+#define CONTROL_74_SIZE 2
+#define CONTROL_75_SIZE 1
+#define CONTROL_76_SIZE 1
+#define CONTROL_77_78_SIZE 2
+#define CONTROL_79_83_SIZE 5
+#define CONTROL_84_85_SIZE 2
+#define CONTROL_86_SIZE 1
+#define CONTROL_87_SIZE 1
+#define CONTROL_88_SIZE 1
+#define CONTROL_89_SIZE 1
+#define CONTROL_90_SIZE 1
+#define CONTROL_91_SIZE 1
+#define CONTROL_92_SIZE 1
+#define CONTROL_93_SIZE 1
+#define CONTROL_94_SIZE 1
+#define CONTROL_95_SIZE 1
+#define CONTROL_96_SIZE 1
+#define CONTROL_97_SIZE 1
+#define CONTROL_98_SIZE 1
+#define CONTROL_99_SIZE 1
+#define CONTROL_100_SIZE 1
+#define CONTROL_101_SIZE 1
+#define CONTROL_102_SIZE 1
+#define CONTROL_103_SIZE 1
+#define CONTROL_104_SIZE 1
+#define CONTROL_105_SIZE 1
+#define CONTROL_106_SIZE 1
+#define CONTROL_107_SIZE 1
+#define CONTROL_108_SIZE 1
+#define CONTROL_109_SIZE 1
+#define CONTROL_110_SIZE 1
+#define CONTROL_111_SIZE 1
+#define CONTROL_112_SIZE 1
+#define CONTROL_113_SIZE 1
+#define CONTROL_114_SIZE 1
+#define CONTROL_115_SIZE 1
+#define CONTROL_116_SIZE 1
+#define CONTROL_117_SIZE 1
+#define CONTROL_118_SIZE 1
+#define CONTROL_119_SIZE 1
+#define CONTROL_120_SIZE 1
+#define CONTROL_121_SIZE 1
+#define CONTROL_122_SIZE 1
+#define CONTROL_123_SIZE 1
+#define CONTROL_124_SIZE 1
+#define CONTROL_125_SIZE 1
+#define CONTROL_126_SIZE 1
+#define CONTROL_127_SIZE 1
+#define CONTROL_128_SIZE 1
+#define CONTROL_129_SIZE 1
+#define CONTROL_130_SIZE 1
+#define CONTROL_131_SIZE 1
+#define CONTROL_132_SIZE 1
+#define CONTROL_133_SIZE 1
+#define CONTROL_134_SIZE 1
+#define CONTROL_135_SIZE 1
+#define CONTROL_136_SIZE 1
+#define CONTROL_137_SIZE 1
+#define CONTROL_138_SIZE 1
+#define CONTROL_139_SIZE 1
+#define CONTROL_140_SIZE 1
+#define CONTROL_141_SIZE 1
+#define CONTROL_142_SIZE 1
+#define CONTROL_143_SIZE 1
+#define CONTROL_144_SIZE 1
+#define CONTROL_145_SIZE 1
+#define CONTROL_146_SIZE 1
+#define CONTROL_147_SIZE 1
+#define CONTROL_148_SIZE 1
+#define CONTROL_149_SIZE 1
+#define CONTROL_163_SIZE 1
+#define CONTROL_165_SIZE 1
+#define CONTROL_167_SIZE 1
+#define CONTROL_176_SIZE 1
+#define CONTROL_179_SIZE 1
+#define CONTROL_188_SIZE 1
+
+#define HIGH_RESISTANCE_DATA_SIZE 6
+#define FULL_RAW_CAP_MIN_MAX_DATA_SIZE 4
+#define TRX_OPEN_SHORT_DATA_SIZE 7
+
+#define concat(a, b) a##b
+
+#define attrify(propname) (&dev_attr_##propname.attr)
+
+#define show_prototype(propname)\
+static ssize_t concat(test_sysfs, _##propname##_show)(\
+		struct device *dev,\
+		struct device_attribute *attr,\
+		char *buf);\
+\
+static struct device_attribute dev_attr_##propname =\
+		__ATTR(propname, 0444,\
+		concat(test_sysfs, _##propname##_show),\
+		synaptics_rmi4_store_error);
+
+#define store_prototype(propname)\
+static ssize_t concat(test_sysfs, _##propname##_store)(\
+		struct device *dev,\
+		struct device_attribute *attr,\
+		const char *buf, size_t count);\
+\
+static struct device_attribute dev_attr_##propname =\
+		__ATTR(propname, 0220,\
+		synaptics_rmi4_show_error,\
+		concat(test_sysfs, _##propname##_store));
+
+#define show_store_prototype(propname)\
+static ssize_t concat(test_sysfs, _##propname##_show)(\
+		struct device *dev,\
+		struct device_attribute *attr,\
+		char *buf);\
+\
+static ssize_t concat(test_sysfs, _##propname##_store)(\
+		struct device *dev,\
+		struct device_attribute *attr,\
+		const char *buf, size_t count);\
+\
+static struct device_attribute dev_attr_##propname =\
+		__ATTR(propname, 0664,\
+		concat(test_sysfs, _##propname##_show),\
+		concat(test_sysfs, _##propname##_store));
+
+#define disable_cbc(ctrl_num)\
+do {\
+	retval = synaptics_rmi4_reg_read(rmi4_data,\
+			f54->control.ctrl_num->address,\
+			f54->control.ctrl_num->data,\
+			sizeof(f54->control.ctrl_num->data));\
+	if (retval < 0) {\
+		dev_err(rmi4_data->pdev->dev.parent,\
+				"%s: Failed to disable CBC (" #ctrl_num ")\n",\
+				__func__);\
+		return retval;\
+	} \
+	f54->control.ctrl_num->cbc_tx_carrier_selection = 0;\
+	retval = synaptics_rmi4_reg_write(rmi4_data,\
+			f54->control.ctrl_num->address,\
+			f54->control.ctrl_num->data,\
+			sizeof(f54->control.ctrl_num->data));\
+	if (retval < 0) {\
+		dev_err(rmi4_data->pdev->dev.parent,\
+				"%s: Failed to disable CBC (" #ctrl_num ")\n",\
+				__func__);\
+		return retval;\
+	} \
+} while (0)
+
+enum f54_report_types {
+	F54_8BIT_IMAGE = 1,
+	F54_16BIT_IMAGE = 2,
+	F54_RAW_16BIT_IMAGE = 3,
+	F54_HIGH_RESISTANCE = 4,
+	F54_TX_TO_TX_SHORTS = 5,
+	F54_RX_TO_RX_SHORTS_1 = 7,
+	F54_TRUE_BASELINE = 9,
+	F54_FULL_RAW_CAP_MIN_MAX = 13,
+	F54_RX_OPENS_1 = 14,
+	F54_TX_OPENS = 15,
+	F54_TX_TO_GND_SHORTS = 16,
+	F54_RX_TO_RX_SHORTS_2 = 17,
+	F54_RX_OPENS_2 = 18,
+	F54_FULL_RAW_CAP = 19,
+	F54_FULL_RAW_CAP_NO_RX_COUPLING = 20,
+	F54_SENSOR_SPEED = 22,
+	F54_ADC_RANGE = 23,
+	F54_TRX_OPENS = 24,
+	F54_TRX_TO_GND_SHORTS = 25,
+	F54_TRX_SHORTS = 26,
+	F54_ABS_RAW_CAP = 38,
+	F54_ABS_DELTA_CAP = 40,
+	F54_ABS_HYBRID_DELTA_CAP = 59,
+	F54_ABS_HYBRID_RAW_CAP = 63,
+	F54_AMP_FULL_RAW_CAP = 78,
+	F54_AMP_RAW_ADC = 83,
+	INVALID_REPORT_TYPE = -1,
+};
+
+enum f54_afe_cal {
+	F54_AFE_CAL,
+	F54_AFE_IS_CAL,
+};
+
+struct f54_query {
+	union {
+		struct {
+			/* query 0 */
+			unsigned char num_of_rx_electrodes;
+
+			/* query 1 */
+			unsigned char num_of_tx_electrodes;
+
+			/* query 2 */
+			unsigned char f54_query2_b0__1:2;
+			unsigned char has_baseline:1;
+			unsigned char has_image8:1;
+			unsigned char f54_query2_b4__5:2;
+			unsigned char has_image16:1;
+			unsigned char f54_query2_b7:1;
+
+			/* queries 3.0 and 3.1 */
+			unsigned short clock_rate;
+
+			/* query 4 */
+			unsigned char touch_controller_family;
+
+			/* query 5 */
+			unsigned char has_pixel_touch_threshold_adjustment:1;
+			unsigned char f54_query5_b1__7:7;
+
+			/* query 6 */
+			unsigned char has_sensor_assignment:1;
+			unsigned char has_interference_metric:1;
+			unsigned char has_sense_frequency_control:1;
+			unsigned char has_firmware_noise_mitigation:1;
+			unsigned char has_ctrl11:1;
+			unsigned char has_two_byte_report_rate:1;
+			unsigned char has_one_byte_report_rate:1;
+			unsigned char has_relaxation_control:1;
+
+			/* query 7 */
+			unsigned char curve_compensation_mode:2;
+			unsigned char f54_query7_b2__7:6;
+
+			/* query 8 */
+			unsigned char f54_query8_b0:1;
+			unsigned char has_iir_filter:1;
+			unsigned char has_cmn_removal:1;
+			unsigned char has_cmn_maximum:1;
+			unsigned char has_touch_hysteresis:1;
+			unsigned char has_edge_compensation:1;
+			unsigned char has_per_frequency_noise_control:1;
+			unsigned char has_enhanced_stretch:1;
+
+			/* query 9 */
+			unsigned char has_force_fast_relaxation:1;
+			unsigned char has_multi_metric_state_machine:1;
+			unsigned char has_signal_clarity:1;
+			unsigned char has_variance_metric:1;
+			unsigned char has_0d_relaxation_control:1;
+			unsigned char has_0d_acquisition_control:1;
+			unsigned char has_status:1;
+			unsigned char has_slew_metric:1;
+
+			/* query 10 */
+			unsigned char has_h_blank:1;
+			unsigned char has_v_blank:1;
+			unsigned char has_long_h_blank:1;
+			unsigned char has_startup_fast_relaxation:1;
+			unsigned char has_esd_control:1;
+			unsigned char has_noise_mitigation2:1;
+			unsigned char has_noise_state:1;
+			unsigned char has_energy_ratio_relaxation:1;
+
+			/* query 11 */
+			unsigned char has_excessive_noise_reporting:1;
+			unsigned char has_slew_option:1;
+			unsigned char has_two_overhead_bursts:1;
+			unsigned char has_query13:1;
+			unsigned char has_one_overhead_burst:1;
+			unsigned char f54_query11_b5:1;
+			unsigned char has_ctrl88:1;
+			unsigned char has_query15:1;
+
+			/* query 12 */
+			unsigned char number_of_sensing_frequencies:4;
+			unsigned char f54_query12_b4__7:4;
+		} __packed;
+		unsigned char data[14];
+	};
+};
+
+struct f54_query_13 {
+	union {
+		struct {
+			unsigned char has_ctrl86:1;
+			unsigned char has_ctrl87:1;
+			unsigned char has_ctrl87_sub0:1;
+			unsigned char has_ctrl87_sub1:1;
+			unsigned char has_ctrl87_sub2:1;
+			unsigned char has_cidim:1;
+			unsigned char has_noise_mitigation_enhancement:1;
+			unsigned char has_rail_im:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_15 {
+	union {
+		struct {
+			unsigned char has_ctrl90:1;
+			unsigned char has_transmit_strength:1;
+			unsigned char has_ctrl87_sub3:1;
+			unsigned char has_query16:1;
+			unsigned char has_query20:1;
+			unsigned char has_query21:1;
+			unsigned char has_query22:1;
+			unsigned char has_query25:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_16 {
+	union {
+		struct {
+			unsigned char has_query17:1;
+			unsigned char has_data17:1;
+			unsigned char has_ctrl92:1;
+			unsigned char has_ctrl93:1;
+			unsigned char has_ctrl94_query18:1;
+			unsigned char has_ctrl95_query19:1;
+			unsigned char has_ctrl99:1;
+			unsigned char has_ctrl100:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_21 {
+	union {
+		struct {
+			unsigned char has_abs_rx:1;
+			unsigned char has_abs_tx:1;
+			unsigned char has_ctrl91:1;
+			unsigned char has_ctrl96:1;
+			unsigned char has_ctrl97:1;
+			unsigned char has_ctrl98:1;
+			unsigned char has_data19:1;
+			unsigned char has_query24_data18:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_22 {
+	union {
+		struct {
+			unsigned char has_packed_image:1;
+			unsigned char has_ctrl101:1;
+			unsigned char has_dynamic_sense_display_ratio:1;
+			unsigned char has_query23:1;
+			unsigned char has_ctrl103_query26:1;
+			unsigned char has_ctrl104:1;
+			unsigned char has_ctrl105:1;
+			unsigned char has_query28:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_23 {
+	union {
+		struct {
+			unsigned char has_ctrl102:1;
+			unsigned char has_ctrl102_sub1:1;
+			unsigned char has_ctrl102_sub2:1;
+			unsigned char has_ctrl102_sub4:1;
+			unsigned char has_ctrl102_sub5:1;
+			unsigned char has_ctrl102_sub9:1;
+			unsigned char has_ctrl102_sub10:1;
+			unsigned char has_ctrl102_sub11:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_25 {
+	union {
+		struct {
+			unsigned char has_ctrl106:1;
+			unsigned char has_ctrl102_sub12:1;
+			unsigned char has_ctrl107:1;
+			unsigned char has_ctrl108:1;
+			unsigned char has_ctrl109:1;
+			unsigned char has_data20:1;
+			unsigned char f54_query25_b6:1;
+			unsigned char has_query27:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_27 {
+	union {
+		struct {
+			unsigned char has_ctrl110:1;
+			unsigned char has_data21:1;
+			unsigned char has_ctrl111:1;
+			unsigned char has_ctrl112:1;
+			unsigned char has_ctrl113:1;
+			unsigned char has_data22:1;
+			unsigned char has_ctrl114:1;
+			unsigned char has_query29:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_29 {
+	union {
+		struct {
+			unsigned char has_ctrl115:1;
+			unsigned char has_ground_ring_options:1;
+			unsigned char has_lost_bursts_tuning:1;
+			unsigned char has_aux_exvcom2_select:1;
+			unsigned char has_ctrl116:1;
+			unsigned char has_data23:1;
+			unsigned char has_ctrl117:1;
+			unsigned char has_query30:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_30 {
+	union {
+		struct {
+			unsigned char has_ctrl118:1;
+			unsigned char has_ctrl119:1;
+			unsigned char has_ctrl120:1;
+			unsigned char has_ctrl121:1;
+			unsigned char has_ctrl122_query31:1;
+			unsigned char has_ctrl123:1;
+			unsigned char f54_query30_b6:1;
+			unsigned char has_query32:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_32 {
+	union {
+		struct {
+			unsigned char has_ctrl125:1;
+			unsigned char has_ctrl126:1;
+			unsigned char has_ctrl127:1;
+			unsigned char has_abs_charge_pump_disable:1;
+			unsigned char has_query33:1;
+			unsigned char has_data24:1;
+			unsigned char has_query34:1;
+			unsigned char has_query35:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_33 {
+	union {
+		struct {
+			unsigned char f54_query33_b0:1;
+			unsigned char f54_query33_b1:1;
+			unsigned char f54_query33_b2:1;
+			unsigned char f54_query33_b3:1;
+			unsigned char has_ctrl132:1;
+			unsigned char has_ctrl133:1;
+			unsigned char has_ctrl134:1;
+			unsigned char has_query36:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_35 {
+	union {
+		struct {
+			unsigned char has_data25:1;
+			unsigned char f54_query35_b1:1;
+			unsigned char f54_query35_b2:1;
+			unsigned char has_ctrl137:1;
+			unsigned char has_ctrl138:1;
+			unsigned char has_ctrl139:1;
+			unsigned char has_data26:1;
+			unsigned char has_ctrl140:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_36 {
+	union {
+		struct {
+			unsigned char f54_query36_b0:1;
+			unsigned char has_ctrl142:1;
+			unsigned char has_query37:1;
+			unsigned char has_ctrl143:1;
+			unsigned char has_ctrl144:1;
+			unsigned char has_ctrl145:1;
+			unsigned char has_ctrl146:1;
+			unsigned char has_query38:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_38 {
+	union {
+		struct {
+			unsigned char has_ctrl147:1;
+			unsigned char has_ctrl148:1;
+			unsigned char has_ctrl149:1;
+			unsigned char f54_query38_b3__6:4;
+			unsigned char has_query39:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_39 {
+	union {
+		struct {
+			unsigned char f54_query39_b0__6:7;
+			unsigned char has_query40:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_40 {
+	union {
+		struct {
+			unsigned char f54_query40_b0:1;
+			unsigned char has_ctrl163_query41:1;
+			unsigned char f54_query40_b2:1;
+			unsigned char has_ctrl165_query42:1;
+			unsigned char f54_query40_b4:1;
+			unsigned char has_ctrl167:1;
+			unsigned char f54_query40_b6:1;
+			unsigned char has_query43:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_43 {
+	union {
+		struct {
+			unsigned char f54_query43_b0__6:7;
+			unsigned char has_query46:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_46 {
+	union {
+		struct {
+			unsigned char has_ctrl176:1;
+			unsigned char f54_query46_b1:1;
+			unsigned char has_ctrl179:1;
+			unsigned char f54_query46_b3:1;
+			unsigned char has_data27:1;
+			unsigned char has_data28:1;
+			unsigned char f54_query46_b6:1;
+			unsigned char has_query47:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_47 {
+	union {
+		struct {
+			unsigned char f54_query47_b0__6:7;
+			unsigned char has_query49:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_49 {
+	union {
+		struct {
+			unsigned char f54_query49_b0__1:2;
+			unsigned char has_ctrl188:1;
+			unsigned char has_data31:1;
+			unsigned char f54_query49_b4__6:3;
+			unsigned char has_query50:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_50 {
+	union {
+		struct {
+			unsigned char f54_query50_b0__6:7;
+			unsigned char has_query51:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_51 {
+	union {
+		struct {
+			unsigned char f54_query51_b0__4:5;
+			unsigned char has_query53_query54_ctrl198:1;
+			unsigned char f54_query51_b6__7:2;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_data_31 {
+	union {
+		struct {
+			unsigned char is_calibration_crc:1;
+			unsigned char calibration_crc:1;
+			unsigned char short_test_row_number:5;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_7 {
+	union {
+		struct {
+			unsigned char cbc_cap:3;
+			unsigned char cbc_polarity:1;
+			unsigned char cbc_tx_carrier_selection:1;
+			unsigned char f54_ctrl7_b5__7:3;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_41 {
+	union {
+		struct {
+			unsigned char no_signal_clarity:1;
+			unsigned char f54_ctrl41_b1__7:7;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_57 {
+	union {
+		struct {
+			unsigned char cbc_cap:3;
+			unsigned char cbc_polarity:1;
+			unsigned char cbc_tx_carrier_selection:1;
+			unsigned char f54_ctrl57_b5__7:3;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_86 {
+	union {
+		struct {
+			unsigned char enable_high_noise_state:1;
+			unsigned char dynamic_sense_display_ratio:2;
+			unsigned char f54_ctrl86_b3__7:5;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_88 {
+	union {
+		struct {
+			unsigned char tx_low_reference_polarity:1;
+			unsigned char tx_high_reference_polarity:1;
+			unsigned char abs_low_reference_polarity:1;
+			unsigned char abs_polarity:1;
+			unsigned char cbc_polarity:1;
+			unsigned char cbc_tx_carrier_selection:1;
+			unsigned char charge_pump_enable:1;
+			unsigned char cbc_abs_auto_servo:1;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_110 {
+	union {
+		struct {
+			unsigned char active_stylus_rx_feedback_cap;
+			unsigned char active_stylus_rx_feedback_cap_reference;
+			unsigned char active_stylus_low_reference;
+			unsigned char active_stylus_high_reference;
+			unsigned char active_stylus_gain_control;
+			unsigned char active_stylus_gain_control_reference;
+			unsigned char active_stylus_timing_mode;
+			unsigned char active_stylus_discovery_bursts;
+			unsigned char active_stylus_detection_bursts;
+			unsigned char active_stylus_discovery_noise_multiplier;
+			unsigned char active_stylus_detection_envelope_min;
+			unsigned char active_stylus_detection_envelope_max;
+			unsigned char active_stylus_lose_count;
+		} __packed;
+		struct {
+			unsigned char data[13];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_149 {
+	union {
+		struct {
+			unsigned char trans_cbc_global_cap_enable:1;
+			unsigned char f54_ctrl149_b1__7:7;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_188 {
+	union {
+		struct {
+			unsigned char start_calibration:1;
+			unsigned char start_is_calibration:1;
+			unsigned char frequency:2;
+			unsigned char start_production_test:1;
+			unsigned char short_test_calibration:1;
+			unsigned char f54_ctrl188_b7:1;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control {
+	struct f54_control_7 *reg_7;
+	struct f54_control_41 *reg_41;
+	struct f54_control_57 *reg_57;
+	struct f54_control_86 *reg_86;
+	struct f54_control_88 *reg_88;
+	struct f54_control_110 *reg_110;
+	struct f54_control_149 *reg_149;
+	struct f54_control_188 *reg_188;
+};
+
+struct synaptics_rmi4_f54_handle {
+	bool no_auto_cal;
+	bool skip_preparation;
+	unsigned char status;
+	unsigned char intr_mask;
+	unsigned char intr_reg_num;
+	unsigned char tx_assigned;
+	unsigned char rx_assigned;
+	unsigned char *report_data;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	unsigned short fifoindex;
+	unsigned int report_size;
+	unsigned int data_buffer_size;
+	unsigned int data_pos;
+	enum f54_report_types report_type;
+	struct f54_query query;
+	struct f54_query_13 query_13;
+	struct f54_query_15 query_15;
+	struct f54_query_16 query_16;
+	struct f54_query_21 query_21;
+	struct f54_query_22 query_22;
+	struct f54_query_23 query_23;
+	struct f54_query_25 query_25;
+	struct f54_query_27 query_27;
+	struct f54_query_29 query_29;
+	struct f54_query_30 query_30;
+	struct f54_query_32 query_32;
+	struct f54_query_33 query_33;
+	struct f54_query_35 query_35;
+	struct f54_query_36 query_36;
+	struct f54_query_38 query_38;
+	struct f54_query_39 query_39;
+	struct f54_query_40 query_40;
+	struct f54_query_43 query_43;
+	struct f54_query_46 query_46;
+	struct f54_query_47 query_47;
+	struct f54_query_49 query_49;
+	struct f54_query_50 query_50;
+	struct f54_query_51 query_51;
+	struct f54_data_31 data_31;
+	struct f54_control control;
+	struct mutex status_mutex;
+	struct kobject *sysfs_dir;
+	struct hrtimer watchdog;
+	struct work_struct timeout_work;
+	struct work_struct test_report_work;
+	struct workqueue_struct *test_report_workqueue;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+struct f55_query {
+	union {
+		struct {
+			/* query 0 */
+			unsigned char num_of_rx_electrodes;
+
+			/* query 1 */
+			unsigned char num_of_tx_electrodes;
+
+			/* query 2 */
+			unsigned char has_sensor_assignment:1;
+			unsigned char has_edge_compensation:1;
+			unsigned char curve_compensation_mode:2;
+			unsigned char has_ctrl6:1;
+			unsigned char has_alternate_transmitter_assignment:1;
+			unsigned char has_single_layer_multi_touch:1;
+			unsigned char has_query5:1;
+		} __packed;
+		unsigned char data[3];
+	};
+};
+
+struct f55_query_3 {
+	union {
+		struct {
+			unsigned char has_ctrl8:1;
+			unsigned char has_ctrl9:1;
+			unsigned char has_oncell_pattern_support:1;
+			unsigned char has_data0:1;
+			unsigned char has_single_wide_pattern_support:1;
+			unsigned char has_mirrored_tx_pattern_support:1;
+			unsigned char has_discrete_pattern_support:1;
+			unsigned char has_query9:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_5 {
+	union {
+		struct {
+			unsigned char has_corner_compensation:1;
+			unsigned char has_ctrl12:1;
+			unsigned char has_trx_configuration:1;
+			unsigned char has_ctrl13:1;
+			unsigned char f55_query5_b4:1;
+			unsigned char has_ctrl14:1;
+			unsigned char has_basis_function:1;
+			unsigned char has_query17:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_17 {
+	union {
+		struct {
+			unsigned char f55_query17_b0:1;
+			unsigned char has_ctrl16:1;
+			unsigned char f55_query17_b2:1;
+			unsigned char has_ctrl17:1;
+			unsigned char f55_query17_b4__6:3;
+			unsigned char has_query18:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_18 {
+	union {
+		struct {
+			unsigned char f55_query18_b0__6:7;
+			unsigned char has_query22:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_22 {
+	union {
+		struct {
+			unsigned char f55_query22_b0:1;
+			unsigned char has_query23:1;
+			unsigned char has_guard_disable:1;
+			unsigned char has_ctrl30:1;
+			unsigned char f55_query22_b4__7:4;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_23 {
+	union {
+		struct {
+			unsigned char amp_sensor_enabled:1;
+			unsigned char image_transposed:1;
+			unsigned char first_column_at_left_side:1;
+			unsigned char size_of_column2mux:5;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f55_handle {
+	bool amp_sensor;
+	unsigned char size_of_column2mux;
+	unsigned char *tx_assignment;
+	unsigned char *rx_assignment;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	struct f55_query query;
+	struct f55_query_3 query_3;
+	struct f55_query_5 query_5;
+	struct f55_query_17 query_17;
+	struct f55_query_18 query_18;
+	struct f55_query_22 query_22;
+	struct f55_query_23 query_23;
+};
+
+show_prototype(num_of_mapped_tx)
+show_prototype(num_of_mapped_rx)
+show_prototype(tx_mapping)
+show_prototype(rx_mapping)
+show_prototype(report_size)
+show_prototype(status)
+store_prototype(do_preparation)
+store_prototype(force_cal)
+store_prototype(get_report)
+store_prototype(resume_touch)
+store_prototype(do_afe_calibration)
+show_store_prototype(report_type)
+show_store_prototype(fifoindex)
+show_store_prototype(no_auto_cal)
+show_store_prototype(read_report)
+
+static struct attribute *attrs[] = {
+	attrify(num_of_mapped_tx),
+	attrify(num_of_mapped_rx),
+	attrify(tx_mapping),
+	attrify(rx_mapping),
+	attrify(report_size),
+	attrify(status),
+	attrify(do_preparation),
+	attrify(force_cal),
+	attrify(get_report),
+	attrify(resume_touch),
+	attrify(do_afe_calibration),
+	attrify(report_type),
+	attrify(fifoindex),
+	attrify(no_auto_cal),
+	attrify(read_report),
+	NULL,
+};
+
+static struct attribute_group attr_group = {
+	.attrs = attrs,
+};
+
+static ssize_t test_sysfs_data_read(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static struct bin_attribute test_report_data = {
+	.attr = {
+		.name = "report_data",
+		.mode = S_IRUGO,
+	},
+	.size = 0,
+	.read = test_sysfs_data_read,
+};
+
+static struct synaptics_rmi4_f54_handle *f54;
+static struct synaptics_rmi4_f55_handle *f55;
+
+DECLARE_COMPLETION(test_remove_complete);
+
+static bool test_report_type_valid(enum f54_report_types report_type)
+{
+	switch (report_type) {
+	case F54_8BIT_IMAGE:
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_HIGH_RESISTANCE:
+	case F54_TX_TO_TX_SHORTS:
+	case F54_RX_TO_RX_SHORTS_1:
+	case F54_TRUE_BASELINE:
+	case F54_FULL_RAW_CAP_MIN_MAX:
+	case F54_RX_OPENS_1:
+	case F54_TX_OPENS:
+	case F54_TX_TO_GND_SHORTS:
+	case F54_RX_TO_RX_SHORTS_2:
+	case F54_RX_OPENS_2:
+	case F54_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+	case F54_SENSOR_SPEED:
+	case F54_ADC_RANGE:
+	case F54_TRX_OPENS:
+	case F54_TRX_TO_GND_SHORTS:
+	case F54_TRX_SHORTS:
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+	case F54_AMP_FULL_RAW_CAP:
+	case F54_AMP_RAW_ADC:
+		return true;
+		break;
+	default:
+		f54->report_type = INVALID_REPORT_TYPE;
+		f54->report_size = 0;
+		return false;
+	}
+}
+
+static void test_set_report_size(void)
+{
+	int retval;
+	unsigned char tx = f54->tx_assigned;
+	unsigned char rx = f54->rx_assigned;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	switch (f54->report_type) {
+	case F54_8BIT_IMAGE:
+		f54->report_size = tx * rx;
+		break;
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_TRUE_BASELINE:
+	case F54_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+	case F54_SENSOR_SPEED:
+	case F54_AMP_FULL_RAW_CAP:
+	case F54_AMP_RAW_ADC:
+		f54->report_size = 2 * tx * rx;
+		break;
+	case F54_HIGH_RESISTANCE:
+		f54->report_size = HIGH_RESISTANCE_DATA_SIZE;
+		break;
+	case F54_TX_TO_TX_SHORTS:
+	case F54_TX_OPENS:
+	case F54_TX_TO_GND_SHORTS:
+		f54->report_size = (tx + 7) / 8;
+		break;
+	case F54_RX_TO_RX_SHORTS_1:
+	case F54_RX_OPENS_1:
+		if (rx < tx)
+			f54->report_size = 2 * rx * rx;
+		else
+			f54->report_size = 2 * tx * rx;
+		break;
+	case F54_FULL_RAW_CAP_MIN_MAX:
+		f54->report_size = FULL_RAW_CAP_MIN_MAX_DATA_SIZE;
+		break;
+	case F54_RX_TO_RX_SHORTS_2:
+	case F54_RX_OPENS_2:
+		if (rx <= tx)
+			f54->report_size = 0;
+		else
+			f54->report_size = 2 * rx * (rx - tx);
+		break;
+	case F54_ADC_RANGE:
+		if (f54->query.has_signal_clarity) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					f54->control.reg_41->address,
+					f54->control.reg_41->data,
+					sizeof(f54->control.reg_41->data));
+			if (retval < 0) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Failed to read control reg_41\n",
+						__func__);
+				f54->report_size = 0;
+				break;
+			}
+			if (!f54->control.reg_41->no_signal_clarity) {
+				if (tx % 4)
+					tx += 4 - (tx % 4);
+			}
+		}
+		f54->report_size = 2 * tx * rx;
+		break;
+	case F54_TRX_OPENS:
+	case F54_TRX_TO_GND_SHORTS:
+	case F54_TRX_SHORTS:
+		f54->report_size = TRX_OPEN_SHORT_DATA_SIZE;
+		break;
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+		f54->report_size = 4 * (tx + rx);
+		break;
+	default:
+		f54->report_size = 0;
+	}
+
+	return;
+}
+
+static int test_set_interrupt(bool set)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char zero = 0x00;
+	unsigned char *intr_mask;
+	unsigned short f01_ctrl_reg;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	intr_mask = rmi4_data->intr_mask;
+	f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + f54->intr_reg_num;
+
+	if (!set) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				f01_ctrl_reg,
+				&zero,
+				sizeof(zero));
+		if (retval < 0)
+			return retval;
+	}
+
+	for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) {
+		if (intr_mask[ii] != 0x00) {
+			f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + ii;
+			if (set) {
+				retval = synaptics_rmi4_reg_write(rmi4_data,
+						f01_ctrl_reg,
+						&zero,
+						sizeof(zero));
+				if (retval < 0)
+					return retval;
+			} else {
+				retval = synaptics_rmi4_reg_write(rmi4_data,
+						f01_ctrl_reg,
+						&(intr_mask[ii]),
+						sizeof(intr_mask[ii]));
+				if (retval < 0)
+					return retval;
+			}
+		}
+	}
+
+	f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + f54->intr_reg_num;
+
+	if (set) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				f01_ctrl_reg,
+				&f54->intr_mask,
+				1);
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static int test_wait_for_command_completion(void)
+{
+	int retval;
+	unsigned char value;
+	unsigned char timeout_count;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	timeout_count = 0;
+	do {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->command_base_addr,
+				&value,
+				sizeof(value));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read command register\n",
+					__func__);
+			return retval;
+		}
+
+		if (value == 0x00)
+			break;
+
+		msleep(100);
+		timeout_count++;
+	} while (timeout_count < COMMAND_TIMEOUT_100MS);
+
+	if (timeout_count == COMMAND_TIMEOUT_100MS) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Timed out waiting for command completion\n",
+				__func__);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int test_do_command(unsigned char command)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->command_base_addr,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write command\n",
+				__func__);
+		return retval;
+	}
+
+	retval = test_wait_for_command_completion();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int test_do_preparation(void)
+{
+	int retval;
+	unsigned char value;
+	unsigned char zero = 0x00;
+	unsigned char device_ctrl;
+	struct f54_control_86 reg_86;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set no sleep\n",
+				__func__);
+		return retval;
+	}
+
+	device_ctrl |= NO_SLEEP_ON;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set no sleep\n",
+				__func__);
+		return retval;
+	}
+
+	if ((f54->query.has_query13) &&
+			(f54->query_13.has_ctrl86)) {
+		reg_86.data[0] = f54->control.reg_86->data[0];
+		reg_86.dynamic_sense_display_ratio = 1;
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				f54->control.reg_86->address,
+				reg_86.data,
+				sizeof(reg_86.data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to set sense display ratio\n",
+					__func__);
+			return retval;
+		}
+	}
+
+	if (f54->skip_preparation)
+		return 0;
+
+	switch (f54->report_type) {
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_SENSOR_SPEED:
+	case F54_ADC_RANGE:
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+		break;
+	case F54_AMP_RAW_ADC:
+		if (f54->query_49.has_ctrl188) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					f54->control.reg_188->address,
+					f54->control.reg_188->data,
+					sizeof(f54->control.reg_188->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to set start production test\n",
+						__func__);
+				return retval;
+			}
+			f54->control.reg_188->start_production_test = 1;
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					f54->control.reg_188->address,
+					f54->control.reg_188->data,
+					sizeof(f54->control.reg_188->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to set start production test\n",
+						__func__);
+				return retval;
+			}
+		}
+		break;
+	default:
+		if (f54->query.touch_controller_family == 1)
+			disable_cbc(reg_7);
+		else if (f54->query.has_ctrl88)
+			disable_cbc(reg_88);
+
+		if (f54->query.has_0d_acquisition_control)
+			disable_cbc(reg_57);
+
+		if ((f54->query.has_query15) &&
+				(f54->query_15.has_query25) &&
+				(f54->query_25.has_query27) &&
+				(f54->query_27.has_query29) &&
+				(f54->query_29.has_query30) &&
+				(f54->query_30.has_query32) &&
+				(f54->query_32.has_query33) &&
+				(f54->query_33.has_query36) &&
+				(f54->query_36.has_query38) &&
+				(f54->query_38.has_ctrl149)) {
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					f54->control.reg_149->address,
+					&zero,
+					sizeof(f54->control.reg_149->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to disable global CBC\n",
+						__func__);
+				return retval;
+			}
+		}
+
+		if (f54->query.has_signal_clarity) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					f54->control.reg_41->address,
+					&value,
+					sizeof(f54->control.reg_41->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to disable signal clarity\n",
+						__func__);
+				return retval;
+			}
+			value |= 0x01;
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					f54->control.reg_41->address,
+					&value,
+					sizeof(f54->control.reg_41->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to disable signal clarity\n",
+						__func__);
+				return retval;
+			}
+		}
+
+		retval = test_do_command(COMMAND_FORCE_UPDATE);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to do force update\n",
+					__func__);
+			return retval;
+		}
+
+		retval = test_do_command(COMMAND_FORCE_CAL);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to do force cal\n",
+					__func__);
+			return retval;
+		}
+	}
+
+	return 0;
+}
+
+static int test_do_afe_calibration(enum f54_afe_cal mode)
+{
+	int retval;
+	unsigned char timeout = CALIBRATION_TIMEOUT_S;
+	unsigned char timeout_count = 0;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->control.reg_188->address,
+			f54->control.reg_188->data,
+			sizeof(f54->control.reg_188->data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to start calibration\n",
+				__func__);
+		return retval;
+	}
+
+	if (mode == F54_AFE_CAL)
+		f54->control.reg_188->start_calibration = 1;
+	else if (mode == F54_AFE_IS_CAL)
+		f54->control.reg_188->start_is_calibration = 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->control.reg_188->address,
+			f54->control.reg_188->data,
+			sizeof(f54->control.reg_188->data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to start calibration\n",
+				__func__);
+		return retval;
+	}
+
+	do {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->control.reg_188->address,
+				f54->control.reg_188->data,
+				sizeof(f54->control.reg_188->data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to complete calibration\n",
+					__func__);
+			return retval;
+		}
+
+		if (mode == F54_AFE_CAL) {
+			if (!f54->control.reg_188->start_calibration)
+				break;
+		} else if (mode == F54_AFE_IS_CAL) {
+			if (!f54->control.reg_188->start_is_calibration)
+				break;
+		}
+
+		if (timeout_count == timeout) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Timed out waiting for calibration completion\n",
+					__func__);
+			return -EBUSY;
+		}
+
+		timeout_count++;
+		msleep(1000);
+	} while (true);
+
+	/* check CRC */
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->data_31.address,
+			f54->data_31.data,
+			sizeof(f54->data_31.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read calibration CRC\n",
+				__func__);
+		return retval;
+	}
+
+	if (mode == F54_AFE_CAL) {
+		if (f54->data_31.calibration_crc == 0)
+			return 0;
+	} else if (mode == F54_AFE_IS_CAL) {
+		if (f54->data_31.is_calibration_crc == 0)
+			return 0;
+	}
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to read calibration CRC\n",
+			__func__);
+
+	return -EINVAL;
+}
+
+static int test_check_for_idle_status(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	switch (f54->status) {
+	case STATUS_IDLE:
+		retval = 0;
+		break;
+	case STATUS_BUSY:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Status busy\n",
+				__func__);
+		retval = -EINVAL;
+		break;
+	case STATUS_ERROR:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Status error\n",
+				__func__);
+		retval = -EINVAL;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid status (%d)\n",
+				__func__, f54->status);
+		retval = -EINVAL;
+	}
+
+	return retval;
+}
+
+static void test_timeout_work(struct work_struct *work)
+{
+	int retval;
+	unsigned char command;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	mutex_lock(&f54->status_mutex);
+
+	if (f54->status == STATUS_BUSY) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->command_base_addr,
+				&command,
+				sizeof(command));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read command register\n",
+					__func__);
+		} else if (command & COMMAND_GET_REPORT) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Report type not supported by FW\n",
+					__func__);
+		} else {
+			queue_work(f54->test_report_workqueue,
+					&f54->test_report_work);
+			goto exit;
+		}
+		f54->status = STATUS_ERROR;
+		f54->report_size = 0;
+	}
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return;
+}
+
+static enum hrtimer_restart test_get_report_timeout(struct hrtimer *timer)
+{
+	schedule_work(&(f54->timeout_work));
+
+	return HRTIMER_NORESTART;
+}
+
+static ssize_t test_sysfs_num_of_mapped_tx_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->tx_assigned);
+}
+
+static ssize_t test_sysfs_num_of_mapped_rx_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->rx_assigned);
+}
+
+static ssize_t test_sysfs_tx_mapping_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int cnt;
+	int count = 0;
+	unsigned char ii;
+	unsigned char tx_num;
+	unsigned char tx_electrodes = f54->query.num_of_tx_electrodes;
+
+	if (!f55)
+		return -EINVAL;
+
+	for (ii = 0; ii < tx_electrodes; ii++) {
+		tx_num = f55->tx_assignment[ii];
+		if (tx_num == 0xff)
+			cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+		else
+			cnt = snprintf(buf, PAGE_SIZE - count, "%02u ", tx_num);
+		buf += cnt;
+		count += cnt;
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t test_sysfs_rx_mapping_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int cnt;
+	int count = 0;
+	unsigned char ii;
+	unsigned char rx_num;
+	unsigned char rx_electrodes = f54->query.num_of_rx_electrodes;
+
+	if (!f55)
+		return -EINVAL;
+
+	for (ii = 0; ii < rx_electrodes; ii++) {
+		rx_num = f55->rx_assignment[ii];
+		if (rx_num == 0xff)
+			cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+		else
+			cnt = snprintf(buf, PAGE_SIZE - count, "%02u ", rx_num);
+		buf += cnt;
+		count += cnt;
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t test_sysfs_report_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->report_size);
+}
+
+static ssize_t test_sysfs_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", f54->status);
+
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_do_preparation_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting != 1)
+		return -EINVAL;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	retval = test_do_preparation();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do preparation\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_force_cal_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting != 1)
+		return -EINVAL;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	retval = test_do_command(COMMAND_FORCE_CAL);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do force cal\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_get_report_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char command;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting != 1)
+		return -EINVAL;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	if (!test_report_type_valid(f54->report_type)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid report type\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	test_set_interrupt(true);
+
+	command = (unsigned char)COMMAND_GET_REPORT;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->command_base_addr,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write get report command\n",
+				__func__);
+		goto exit;
+	}
+
+	f54->status = STATUS_BUSY;
+	f54->report_size = 0;
+	f54->data_pos = 0;
+
+	hrtimer_start(&f54->watchdog,
+			ktime_set(GET_REPORT_TIMEOUT_S, 0),
+			HRTIMER_MODE_REL);
+
+	retval = count;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_resume_touch_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char device_ctrl;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting != 1)
+		return -EINVAL;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to restore no sleep setting\n",
+				__func__);
+		return retval;
+	}
+
+	device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+	device_ctrl |= rmi4_data->no_sleep_setting;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to restore no sleep setting\n",
+				__func__);
+		return retval;
+	}
+
+	if ((f54->query.has_query13) &&
+			(f54->query_13.has_ctrl86)) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				f54->control.reg_86->address,
+				f54->control.reg_86->data,
+				sizeof(f54->control.reg_86->data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to restore sense display ratio\n",
+					__func__);
+			return retval;
+		}
+	}
+
+	test_set_interrupt(false);
+
+	if (f54->skip_preparation)
+		return count;
+
+	switch (f54->report_type) {
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_SENSOR_SPEED:
+	case F54_ADC_RANGE:
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+		break;
+	case F54_AMP_RAW_ADC:
+		if (f54->query_49.has_ctrl188) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					f54->control.reg_188->address,
+					f54->control.reg_188->data,
+					sizeof(f54->control.reg_188->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to set start production test\n",
+						__func__);
+				return retval;
+			}
+			f54->control.reg_188->start_production_test = 0;
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					f54->control.reg_188->address,
+					f54->control.reg_188->data,
+					sizeof(f54->control.reg_188->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to set start production test\n",
+						__func__);
+				return retval;
+			}
+		}
+		break;
+	default:
+		rmi4_data->reset_device(rmi4_data, false);
+	}
+
+	return count;
+}
+
+static ssize_t test_sysfs_do_afe_calibration_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (!f54->query_49.has_ctrl188) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: F54_ANALOG_Ctrl188 not found\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (setting == 0 || setting == 1)
+		retval = test_do_afe_calibration((enum f54_afe_cal)setting);
+	else
+		return -EINVAL;
+
+	if (retval)
+		return retval;
+	else
+		return count;
+}
+
+static ssize_t test_sysfs_report_type_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->report_type);
+}
+
+static ssize_t test_sysfs_report_type_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char data;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	if (!test_report_type_valid((enum f54_report_types)setting)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Report type not supported by driver\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	f54->report_type = (enum f54_report_types)setting;
+	data = (unsigned char)setting;
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->data_base_addr,
+			&data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write report type\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_fifoindex_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned char data[2];
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->data_base_addr + REPORT_INDEX_OFFSET,
+			data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read report index\n",
+				__func__);
+		return retval;
+	}
+
+	batohs(&f54->fifoindex, data);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->fifoindex);
+}
+
+static ssize_t test_sysfs_fifoindex_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char data[2];
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	f54->fifoindex = setting;
+
+	hstoba(data, (unsigned short)setting);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->data_base_addr + REPORT_INDEX_OFFSET,
+			data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write report index\n",
+				__func__);
+		return retval;
+	}
+
+	return count;
+}
+
+static ssize_t test_sysfs_no_auto_cal_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->no_auto_cal);
+}
+
+static ssize_t test_sysfs_no_auto_cal_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char data;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting > 1)
+		return -EINVAL;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->control_base_addr,
+			&data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read no auto cal setting\n",
+				__func__);
+		return retval;
+	}
+
+	if (setting)
+		data |= CONTROL_NO_AUTO_CAL;
+	else
+		data &= ~CONTROL_NO_AUTO_CAL;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->control_base_addr,
+			&data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write no auto cal setting\n",
+				__func__);
+		return retval;
+	}
+
+	f54->no_auto_cal = (setting == 1);
+
+	return count;
+}
+
+static ssize_t test_sysfs_read_report_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned int ii;
+	unsigned int jj;
+	int cnt;
+	int count = 0;
+	int tx_num = f54->tx_assigned;
+	int rx_num = f54->rx_assigned;
+	char *report_data_8;
+	short *report_data_16;
+	int *report_data_32;
+	unsigned short *report_data_u16;
+	unsigned int *report_data_u32;
+
+	switch (f54->report_type) {
+	case F54_8BIT_IMAGE:
+		report_data_8 = (char *)f54->report_data;
+		for (ii = 0; ii < f54->report_size; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "%03d: %d\n",
+					ii, *report_data_8);
+			report_data_8++;
+			buf += cnt;
+			count += cnt;
+		}
+		break;
+	case F54_AMP_RAW_ADC:
+		report_data_u16 = (unsigned short *)f54->report_data;
+		cnt = snprintf(buf, PAGE_SIZE - count, "tx = %d\nrx = %d\n",
+				tx_num, rx_num);
+		buf += cnt;
+		count += cnt;
+
+		for (ii = 0; ii < tx_num; ii++) {
+			for (jj = 0; jj < (rx_num - 1); jj++) {
+				cnt = snprintf(buf, PAGE_SIZE - count, "%-4d ",
+						*report_data_u16);
+				report_data_u16++;
+				buf += cnt;
+				count += cnt;
+			}
+			cnt = snprintf(buf, PAGE_SIZE - count, "%-4d\n",
+					*report_data_u16);
+			report_data_u16++;
+			buf += cnt;
+			count += cnt;
+		}
+		break;
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_TRUE_BASELINE:
+	case F54_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+	case F54_SENSOR_SPEED:
+	case F54_AMP_FULL_RAW_CAP:
+		report_data_16 = (short *)f54->report_data;
+		cnt = snprintf(buf, PAGE_SIZE - count, "tx = %d\nrx = %d\n",
+				tx_num, rx_num);
+		buf += cnt;
+		count += cnt;
+
+		for (ii = 0; ii < tx_num; ii++) {
+			for (jj = 0; jj < (rx_num - 1); jj++) {
+				cnt = snprintf(buf, PAGE_SIZE - count, "%-4d ",
+						*report_data_16);
+				report_data_16++;
+				buf += cnt;
+				count += cnt;
+			}
+			cnt = snprintf(buf, PAGE_SIZE - count, "%-4d\n",
+					*report_data_16);
+			report_data_16++;
+			buf += cnt;
+			count += cnt;
+		}
+		break;
+	case F54_HIGH_RESISTANCE:
+	case F54_FULL_RAW_CAP_MIN_MAX:
+		report_data_16 = (short *)f54->report_data;
+		for (ii = 0; ii < f54->report_size; ii += 2) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "%03d: %d\n",
+					ii / 2, *report_data_16);
+			report_data_16++;
+			buf += cnt;
+			count += cnt;
+		}
+		break;
+	case F54_ABS_RAW_CAP:
+		report_data_u32 = (unsigned int *)f54->report_data;
+		cnt = snprintf(buf, PAGE_SIZE - count, "rx ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < rx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "     %2d", ii);
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "   ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < rx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "  %5u",
+					*report_data_u32);
+			report_data_u32++;
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "tx ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < tx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "     %2d", ii);
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "   ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < tx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "  %5u",
+					*report_data_u32);
+			report_data_u32++;
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+		break;
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+		report_data_32 = (int *)f54->report_data;
+		cnt = snprintf(buf, PAGE_SIZE - count, "rx ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < rx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "     %2d", ii);
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "   ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < rx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "  %5d",
+					*report_data_32);
+			report_data_32++;
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "tx ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < tx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "     %2d", ii);
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "   ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < tx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "  %5d",
+					*report_data_32);
+			report_data_32++;
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+		break;
+	default:
+		for (ii = 0; ii < f54->report_size; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "%03d: 0x%02x\n",
+					ii, f54->report_data[ii]);
+			buf += cnt;
+			count += cnt;
+		}
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t test_sysfs_read_report_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char timeout = GET_REPORT_TIMEOUT_S * 10;
+	unsigned char timeout_count;
+	const char cmd[] = {'1', 0};
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = test_sysfs_report_type_store(dev, attr, buf, count);
+	if (retval < 0)
+		goto exit;
+
+	retval = test_sysfs_do_preparation_store(dev, attr, cmd, 1);
+	if (retval < 0)
+		goto exit;
+
+	retval = test_sysfs_get_report_store(dev, attr, cmd, 1);
+	if (retval < 0)
+		goto exit;
+
+	timeout_count = 0;
+	do {
+		if (f54->status != STATUS_BUSY)
+			break;
+		msleep(100);
+		timeout_count++;
+	} while (timeout_count < timeout);
+
+	if ((f54->status != STATUS_IDLE) || (f54->report_size == 0)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read report\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	retval = test_sysfs_resume_touch_store(dev, attr, cmd, 1);
+	if (retval < 0)
+		goto exit;
+
+	return count;
+
+exit:
+	rmi4_data->reset_device(rmi4_data, false);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_data_read(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned int read_size;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	if (!f54->report_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Report type %d data not available\n",
+				__func__, f54->report_type);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if ((f54->data_pos + count) > f54->report_size)
+		read_size = f54->report_size - f54->data_pos;
+	else
+		read_size = min_t(unsigned int, count, f54->report_size);
+
+	retval = secure_memcpy(buf, count, f54->report_data + f54->data_pos,
+			f54->data_buffer_size - f54->data_pos, read_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy report data\n",
+				__func__);
+		goto exit;
+	}
+	f54->data_pos += read_size;
+	retval = read_size;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static void test_report_work(struct work_struct *work)
+{
+	int retval;
+	unsigned char report_index[2];
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	mutex_lock(&f54->status_mutex);
+
+	if (f54->status != STATUS_BUSY) {
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	retval = test_wait_for_command_completion();
+	if (retval < 0) {
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	test_set_report_size();
+	if (f54->report_size == 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Report data size = 0\n",
+				__func__);
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	if (f54->data_buffer_size < f54->report_size) {
+		if (f54->data_buffer_size)
+			kfree(f54->report_data);
+		f54->report_data = kzalloc(f54->report_size, GFP_KERNEL);
+		if (!f54->report_data) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for data buffer\n",
+					__func__);
+			f54->data_buffer_size = 0;
+			retval = STATUS_ERROR;
+			goto exit;
+		}
+		f54->data_buffer_size = f54->report_size;
+	}
+
+	report_index[0] = 0;
+	report_index[1] = 0;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->data_base_addr + REPORT_INDEX_OFFSET,
+			report_index,
+			sizeof(report_index));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write report data index\n",
+				__func__);
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->data_base_addr + REPORT_DATA_OFFSET,
+			f54->report_data,
+			f54->report_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read report data\n",
+				__func__);
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	retval = STATUS_IDLE;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	if (retval == STATUS_ERROR)
+		f54->report_size = 0;
+
+	f54->status = retval;
+
+	return;
+}
+
+static void test_remove_sysfs(void)
+{
+	sysfs_remove_group(f54->sysfs_dir, &attr_group);
+	sysfs_remove_bin_file(f54->sysfs_dir, &test_report_data);
+	kobject_put(f54->sysfs_dir);
+
+	return;
+}
+
+static int test_set_sysfs(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	f54->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+			&rmi4_data->input_dev->dev.kobj);
+	if (!f54->sysfs_dir) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs directory\n",
+				__func__);
+		goto exit_directory;
+	}
+
+	retval = sysfs_create_bin_file(f54->sysfs_dir, &test_report_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs bin file\n",
+				__func__);
+		goto exit_bin_file;
+	}
+
+	retval = sysfs_create_group(f54->sysfs_dir, &attr_group);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs attributes\n",
+				__func__);
+		goto exit_attributes;
+	}
+
+	return 0;
+
+exit_attributes:
+	sysfs_remove_group(f54->sysfs_dir, &attr_group);
+	sysfs_remove_bin_file(f54->sysfs_dir, &test_report_data);
+
+exit_bin_file:
+	kobject_put(f54->sysfs_dir);
+
+exit_directory:
+	return -ENODEV;
+}
+
+static void test_free_control_mem(void)
+{
+	struct f54_control control = f54->control;
+
+	kfree(control.reg_7);
+	kfree(control.reg_41);
+	kfree(control.reg_57);
+	kfree(control.reg_86);
+	kfree(control.reg_88);
+	kfree(control.reg_110);
+	kfree(control.reg_149);
+	kfree(control.reg_188);
+
+	return;
+}
+
+static void test_set_data(void)
+{
+	unsigned short reg_addr;
+
+	reg_addr = f54->data_base_addr + REPORT_DATA_OFFSET + 1;
+
+	/* data 4 */
+	if (f54->query.has_sense_frequency_control)
+		reg_addr++;
+
+	/* data 5 reserved */
+
+	/* data 6 */
+	if (f54->query.has_interference_metric)
+		reg_addr += 2;
+
+	/* data 7 */
+	if (f54->query.has_one_byte_report_rate |
+			f54->query.has_two_byte_report_rate)
+		reg_addr++;
+	if (f54->query.has_two_byte_report_rate)
+		reg_addr++;
+
+	/* data 8 */
+	if (f54->query.has_variance_metric)
+		reg_addr += 2;
+
+	/* data 9 */
+	if (f54->query.has_multi_metric_state_machine)
+		reg_addr += 2;
+
+	/* data 10 */
+	if (f54->query.has_multi_metric_state_machine |
+			f54->query.has_noise_state)
+		reg_addr++;
+
+	/* data 11 */
+	if (f54->query.has_status)
+		reg_addr++;
+
+	/* data 12 */
+	if (f54->query.has_slew_metric)
+		reg_addr += 2;
+
+	/* data 13 */
+	if (f54->query.has_multi_metric_state_machine)
+		reg_addr += 2;
+
+	/* data 14 */
+	if (f54->query_13.has_cidim)
+		reg_addr++;
+
+	/* data 15 */
+	if (f54->query_13.has_rail_im)
+		reg_addr++;
+
+	/* data 16 */
+	if (f54->query_13.has_noise_mitigation_enhancement)
+		reg_addr++;
+
+	/* data 17 */
+	if (f54->query_16.has_data17)
+		reg_addr++;
+
+	/* data 18 */
+	if (f54->query_21.has_query24_data18)
+		reg_addr++;
+
+	/* data 19 */
+	if (f54->query_21.has_data19)
+		reg_addr++;
+
+	/* data_20 */
+	if (f54->query_25.has_ctrl109)
+		reg_addr++;
+
+	/* data 21 */
+	if (f54->query_27.has_data21)
+		reg_addr++;
+
+	/* data 22 */
+	if (f54->query_27.has_data22)
+		reg_addr++;
+
+	/* data 23 */
+	if (f54->query_29.has_data23)
+		reg_addr++;
+
+	/* data 24 */
+	if (f54->query_32.has_data24)
+		reg_addr++;
+
+	/* data 25 */
+	if (f54->query_35.has_data25)
+		reg_addr++;
+
+	/* data 26 */
+	if (f54->query_35.has_data26)
+		reg_addr++;
+
+	/* data 27 */
+	if (f54->query_46.has_data27)
+		reg_addr++;
+
+	/* data 28 */
+	if (f54->query_46.has_data28)
+		reg_addr++;
+
+	/* data 29 30 reserved */
+
+	/* data 31 */
+	if (f54->query_49.has_data31) {
+		f54->data_31.address = reg_addr;
+		reg_addr++;
+	}
+
+	return;
+}
+
+static int test_set_controls(void)
+{
+	int retval;
+	unsigned char length;
+	unsigned char num_of_sensing_freqs;
+	unsigned short reg_addr = f54->control_base_addr;
+	struct f54_control *control = &f54->control;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	num_of_sensing_freqs = f54->query.number_of_sensing_frequencies;
+
+	/* control 0 */
+	reg_addr += CONTROL_0_SIZE;
+
+	/* control 1 */
+	if ((f54->query.touch_controller_family == 0) ||
+			(f54->query.touch_controller_family == 1))
+		reg_addr += CONTROL_1_SIZE;
+
+	/* control 2 */
+	reg_addr += CONTROL_2_SIZE;
+
+	/* control 3 */
+	if (f54->query.has_pixel_touch_threshold_adjustment)
+		reg_addr += CONTROL_3_SIZE;
+
+	/* controls 4 5 6 */
+	if ((f54->query.touch_controller_family == 0) ||
+			(f54->query.touch_controller_family == 1))
+		reg_addr += CONTROL_4_6_SIZE;
+
+	/* control 7 */
+	if (f54->query.touch_controller_family == 1) {
+		control->reg_7 = kzalloc(sizeof(*(control->reg_7)),
+				GFP_KERNEL);
+		if (!control->reg_7)
+			goto exit_no_mem;
+		control->reg_7->address = reg_addr;
+		reg_addr += CONTROL_7_SIZE;
+	}
+
+	/* controls 8 9 */
+	if ((f54->query.touch_controller_family == 0) ||
+			(f54->query.touch_controller_family == 1))
+		reg_addr += CONTROL_8_9_SIZE;
+
+	/* control 10 */
+	if (f54->query.has_interference_metric)
+		reg_addr += CONTROL_10_SIZE;
+
+	/* control 11 */
+	if (f54->query.has_ctrl11)
+		reg_addr += CONTROL_11_SIZE;
+
+	/* controls 12 13 */
+	if (f54->query.has_relaxation_control)
+		reg_addr += CONTROL_12_13_SIZE;
+
+	/* controls 14 15 16 */
+	if (f54->query.has_sensor_assignment) {
+		reg_addr += CONTROL_14_SIZE;
+		reg_addr += CONTROL_15_SIZE * f54->query.num_of_rx_electrodes;
+		reg_addr += CONTROL_16_SIZE * f54->query.num_of_tx_electrodes;
+	}
+
+	/* controls 17 18 19 */
+	if (f54->query.has_sense_frequency_control) {
+		reg_addr += CONTROL_17_SIZE * num_of_sensing_freqs;
+		reg_addr += CONTROL_18_SIZE * num_of_sensing_freqs;
+		reg_addr += CONTROL_19_SIZE * num_of_sensing_freqs;
+	}
+
+	/* control 20 */
+	reg_addr += CONTROL_20_SIZE;
+
+	/* control 21 */
+	if (f54->query.has_sense_frequency_control)
+		reg_addr += CONTROL_21_SIZE;
+
+	/* controls 22 23 24 25 26 */
+	if (f54->query.has_firmware_noise_mitigation)
+		reg_addr += CONTROL_22_26_SIZE;
+
+	/* control 27 */
+	if (f54->query.has_iir_filter)
+		reg_addr += CONTROL_27_SIZE;
+
+	/* control 28 */
+	if (f54->query.has_firmware_noise_mitigation)
+		reg_addr += CONTROL_28_SIZE;
+
+	/* control 29 */
+	if (f54->query.has_cmn_removal)
+		reg_addr += CONTROL_29_SIZE;
+
+	/* control 30 */
+	if (f54->query.has_cmn_maximum)
+		reg_addr += CONTROL_30_SIZE;
+
+	/* control 31 */
+	if (f54->query.has_touch_hysteresis)
+		reg_addr += CONTROL_31_SIZE;
+
+	/* controls 32 33 34 35 */
+	if (f54->query.has_edge_compensation)
+		reg_addr += CONTROL_32_35_SIZE;
+
+	/* control 36 */
+	if ((f54->query.curve_compensation_mode == 1) ||
+			(f54->query.curve_compensation_mode == 2)) {
+		if (f54->query.curve_compensation_mode == 1) {
+			length = max(f54->query.num_of_rx_electrodes,
+					f54->query.num_of_tx_electrodes);
+		} else if (f54->query.curve_compensation_mode == 2) {
+			length = f54->query.num_of_rx_electrodes;
+		}
+		reg_addr += CONTROL_36_SIZE * length;
+	}
+
+	/* control 37 */
+	if (f54->query.curve_compensation_mode == 2)
+		reg_addr += CONTROL_37_SIZE * f54->query.num_of_tx_electrodes;
+
+	/* controls 38 39 40 */
+	if (f54->query.has_per_frequency_noise_control) {
+		reg_addr += CONTROL_38_SIZE * num_of_sensing_freqs;
+		reg_addr += CONTROL_39_SIZE * num_of_sensing_freqs;
+		reg_addr += CONTROL_40_SIZE * num_of_sensing_freqs;
+	}
+
+	/* control 41 */
+	if (f54->query.has_signal_clarity) {
+		control->reg_41 = kzalloc(sizeof(*(control->reg_41)),
+				GFP_KERNEL);
+		if (!control->reg_41)
+			goto exit_no_mem;
+		control->reg_41->address = reg_addr;
+		reg_addr += CONTROL_41_SIZE;
+	}
+
+	/* control 42 */
+	if (f54->query.has_variance_metric)
+		reg_addr += CONTROL_42_SIZE;
+
+	/* controls 43 44 45 46 47 48 49 50 51 52 53 54 */
+	if (f54->query.has_multi_metric_state_machine)
+		reg_addr += CONTROL_43_54_SIZE;
+
+	/* controls 55 56 */
+	if (f54->query.has_0d_relaxation_control)
+		reg_addr += CONTROL_55_56_SIZE;
+
+	/* control 57 */
+	if (f54->query.has_0d_acquisition_control) {
+		control->reg_57 = kzalloc(sizeof(*(control->reg_57)),
+				GFP_KERNEL);
+		if (!control->reg_57)
+			goto exit_no_mem;
+		control->reg_57->address = reg_addr;
+		reg_addr += CONTROL_57_SIZE;
+	}
+
+	/* control 58 */
+	if (f54->query.has_0d_acquisition_control)
+		reg_addr += CONTROL_58_SIZE;
+
+	/* control 59 */
+	if (f54->query.has_h_blank)
+		reg_addr += CONTROL_59_SIZE;
+
+	/* controls 60 61 62 */
+	if ((f54->query.has_h_blank) ||
+			(f54->query.has_v_blank) ||
+			(f54->query.has_long_h_blank))
+		reg_addr += CONTROL_60_62_SIZE;
+
+	/* control 63 */
+	if ((f54->query.has_h_blank) ||
+			(f54->query.has_v_blank) ||
+			(f54->query.has_long_h_blank) ||
+			(f54->query.has_slew_metric) ||
+			(f54->query.has_slew_option) ||
+			(f54->query.has_noise_mitigation2))
+		reg_addr += CONTROL_63_SIZE;
+
+	/* controls 64 65 66 67 */
+	if (f54->query.has_h_blank)
+		reg_addr += CONTROL_64_67_SIZE * 7;
+	else if ((f54->query.has_v_blank) ||
+			(f54->query.has_long_h_blank))
+		reg_addr += CONTROL_64_67_SIZE;
+
+	/* controls 68 69 70 71 72 73 */
+	if ((f54->query.has_h_blank) ||
+			(f54->query.has_v_blank) ||
+			(f54->query.has_long_h_blank))
+		reg_addr += CONTROL_68_73_SIZE;
+
+	/* control 74 */
+	if (f54->query.has_slew_metric)
+		reg_addr += CONTROL_74_SIZE;
+
+	/* control 75 */
+	if (f54->query.has_enhanced_stretch)
+		reg_addr += CONTROL_75_SIZE * num_of_sensing_freqs;
+
+	/* control 76 */
+	if (f54->query.has_startup_fast_relaxation)
+		reg_addr += CONTROL_76_SIZE;
+
+	/* controls 77 78 */
+	if (f54->query.has_esd_control)
+		reg_addr += CONTROL_77_78_SIZE;
+
+	/* controls 79 80 81 82 83 */
+	if (f54->query.has_noise_mitigation2)
+		reg_addr += CONTROL_79_83_SIZE;
+
+	/* controls 84 85 */
+	if (f54->query.has_energy_ratio_relaxation)
+		reg_addr += CONTROL_84_85_SIZE;
+
+	/* control 86 */
+	if (f54->query_13.has_ctrl86) {
+		control->reg_86 = kzalloc(sizeof(*(control->reg_86)),
+				GFP_KERNEL);
+		if (!control->reg_86)
+			goto exit_no_mem;
+		control->reg_86->address = reg_addr;
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->control.reg_86->address,
+				f54->control.reg_86->data,
+				sizeof(f54->control.reg_86->data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read sense display ratio\n",
+					__func__);
+			return retval;
+		}
+		reg_addr += CONTROL_86_SIZE;
+	}
+
+	/* control 87 */
+	if (f54->query_13.has_ctrl87)
+		reg_addr += CONTROL_87_SIZE;
+
+	/* control 88 */
+	if (f54->query.has_ctrl88) {
+		control->reg_88 = kzalloc(sizeof(*(control->reg_88)),
+				GFP_KERNEL);
+		if (!control->reg_88)
+			goto exit_no_mem;
+		control->reg_88->address = reg_addr;
+		reg_addr += CONTROL_88_SIZE;
+	}
+
+	/* control 89 */
+	if (f54->query_13.has_cidim ||
+			f54->query_13.has_noise_mitigation_enhancement ||
+			f54->query_13.has_rail_im)
+		reg_addr += CONTROL_89_SIZE;
+
+	/* control 90 */
+	if (f54->query_15.has_ctrl90)
+		reg_addr += CONTROL_90_SIZE;
+
+	/* control 91 */
+	if (f54->query_21.has_ctrl91)
+		reg_addr += CONTROL_91_SIZE;
+
+	/* control 92 */
+	if (f54->query_16.has_ctrl92)
+		reg_addr += CONTROL_92_SIZE;
+
+	/* control 93 */
+	if (f54->query_16.has_ctrl93)
+		reg_addr += CONTROL_93_SIZE;
+
+	/* control 94 */
+	if (f54->query_16.has_ctrl94_query18)
+		reg_addr += CONTROL_94_SIZE;
+
+	/* control 95 */
+	if (f54->query_16.has_ctrl95_query19)
+		reg_addr += CONTROL_95_SIZE;
+
+	/* control 96 */
+	if (f54->query_21.has_ctrl96)
+		reg_addr += CONTROL_96_SIZE;
+
+	/* control 97 */
+	if (f54->query_21.has_ctrl97)
+		reg_addr += CONTROL_97_SIZE;
+
+	/* control 98 */
+	if (f54->query_21.has_ctrl98)
+		reg_addr += CONTROL_98_SIZE;
+
+	/* control 99 */
+	if (f54->query.touch_controller_family == 2)
+		reg_addr += CONTROL_99_SIZE;
+
+	/* control 100 */
+	if (f54->query_16.has_ctrl100)
+		reg_addr += CONTROL_100_SIZE;
+
+	/* control 101 */
+	if (f54->query_22.has_ctrl101)
+		reg_addr += CONTROL_101_SIZE;
+
+
+	/* control 102 */
+	if (f54->query_23.has_ctrl102)
+		reg_addr += CONTROL_102_SIZE;
+
+	/* control 103 */
+	if (f54->query_22.has_ctrl103_query26) {
+		f54->skip_preparation = true;
+		reg_addr += CONTROL_103_SIZE;
+	}
+
+	/* control 104 */
+	if (f54->query_22.has_ctrl104)
+		reg_addr += CONTROL_104_SIZE;
+
+	/* control 105 */
+	if (f54->query_22.has_ctrl105)
+		reg_addr += CONTROL_105_SIZE;
+
+	/* control 106 */
+	if (f54->query_25.has_ctrl106)
+		reg_addr += CONTROL_106_SIZE;
+
+	/* control 107 */
+	if (f54->query_25.has_ctrl107)
+		reg_addr += CONTROL_107_SIZE;
+
+	/* control 108 */
+	if (f54->query_25.has_ctrl108)
+		reg_addr += CONTROL_108_SIZE;
+
+	/* control 109 */
+	if (f54->query_25.has_ctrl109)
+		reg_addr += CONTROL_109_SIZE;
+
+	/* control 110 */
+	if (f54->query_27.has_ctrl110) {
+		control->reg_110 = kzalloc(sizeof(*(control->reg_110)),
+				GFP_KERNEL);
+		if (!control->reg_110)
+			goto exit_no_mem;
+		control->reg_110->address = reg_addr;
+		reg_addr += CONTROL_110_SIZE;
+	}
+
+	/* control 111 */
+	if (f54->query_27.has_ctrl111)
+		reg_addr += CONTROL_111_SIZE;
+
+	/* control 112 */
+	if (f54->query_27.has_ctrl112)
+		reg_addr += CONTROL_112_SIZE;
+
+	/* control 113 */
+	if (f54->query_27.has_ctrl113)
+		reg_addr += CONTROL_113_SIZE;
+
+	/* control 114 */
+	if (f54->query_27.has_ctrl114)
+		reg_addr += CONTROL_114_SIZE;
+
+	/* control 115 */
+	if (f54->query_29.has_ctrl115)
+		reg_addr += CONTROL_115_SIZE;
+
+	/* control 116 */
+	if (f54->query_29.has_ctrl116)
+		reg_addr += CONTROL_116_SIZE;
+
+	/* control 117 */
+	if (f54->query_29.has_ctrl117)
+		reg_addr += CONTROL_117_SIZE;
+
+	/* control 118 */
+	if (f54->query_30.has_ctrl118)
+		reg_addr += CONTROL_118_SIZE;
+
+	/* control 119 */
+	if (f54->query_30.has_ctrl119)
+		reg_addr += CONTROL_119_SIZE;
+
+	/* control 120 */
+	if (f54->query_30.has_ctrl120)
+		reg_addr += CONTROL_120_SIZE;
+
+	/* control 121 */
+	if (f54->query_30.has_ctrl121)
+		reg_addr += CONTROL_121_SIZE;
+
+	/* control 122 */
+	if (f54->query_30.has_ctrl122_query31)
+		reg_addr += CONTROL_122_SIZE;
+
+	/* control 123 */
+	if (f54->query_30.has_ctrl123)
+		reg_addr += CONTROL_123_SIZE;
+
+	/* control 124 reserved */
+
+	/* control 125 */
+	if (f54->query_32.has_ctrl125)
+		reg_addr += CONTROL_125_SIZE;
+
+	/* control 126 */
+	if (f54->query_32.has_ctrl126)
+		reg_addr += CONTROL_126_SIZE;
+
+	/* control 127 */
+	if (f54->query_32.has_ctrl127)
+		reg_addr += CONTROL_127_SIZE;
+
+	/* controls 128 129 130 131 reserved */
+
+	/* control 132 */
+	if (f54->query_33.has_ctrl132)
+		reg_addr += CONTROL_132_SIZE;
+
+	/* control 133 */
+	if (f54->query_33.has_ctrl133)
+		reg_addr += CONTROL_133_SIZE;
+
+	/* control 134 */
+	if (f54->query_33.has_ctrl134)
+		reg_addr += CONTROL_134_SIZE;
+
+	/* controls 135 136 reserved */
+
+	/* control 137 */
+	if (f54->query_35.has_ctrl137)
+		reg_addr += CONTROL_137_SIZE;
+
+	/* control 138 */
+	if (f54->query_35.has_ctrl138)
+		reg_addr += CONTROL_138_SIZE;
+
+	/* control 139 */
+	if (f54->query_35.has_ctrl139)
+		reg_addr += CONTROL_139_SIZE;
+
+	/* control 140 */
+	if (f54->query_35.has_ctrl140)
+		reg_addr += CONTROL_140_SIZE;
+
+	/* control 141 reserved */
+
+	/* control 142 */
+	if (f54->query_36.has_ctrl142)
+		reg_addr += CONTROL_142_SIZE;
+
+	/* control 143 */
+	if (f54->query_36.has_ctrl143)
+		reg_addr += CONTROL_143_SIZE;
+
+	/* control 144 */
+	if (f54->query_36.has_ctrl144)
+		reg_addr += CONTROL_144_SIZE;
+
+	/* control 145 */
+	if (f54->query_36.has_ctrl145)
+		reg_addr += CONTROL_145_SIZE;
+
+	/* control 146 */
+	if (f54->query_36.has_ctrl146)
+		reg_addr += CONTROL_146_SIZE;
+
+	/* control 147 */
+	if (f54->query_38.has_ctrl147)
+		reg_addr += CONTROL_147_SIZE;
+
+	/* control 148 */
+	if (f54->query_38.has_ctrl148)
+		reg_addr += CONTROL_148_SIZE;
+
+	/* control 149 */
+	if (f54->query_38.has_ctrl149) {
+		control->reg_149 = kzalloc(sizeof(*(control->reg_149)),
+				GFP_KERNEL);
+		if (!control->reg_149)
+			goto exit_no_mem;
+		control->reg_149->address = reg_addr;
+		reg_addr += CONTROL_149_SIZE;
+	}
+
+	/* controls 150 to 162 reserved */
+
+	/* control 163 */
+	if (f54->query_40.has_ctrl163_query41)
+		reg_addr += CONTROL_163_SIZE;
+
+	/* control 164 reserved */
+
+	/* control 165 */
+	if (f54->query_40.has_ctrl165_query42)
+		reg_addr += CONTROL_165_SIZE;
+
+	/* control 166 reserved */
+
+	/* control 167 */
+	if (f54->query_40.has_ctrl167)
+		reg_addr += CONTROL_167_SIZE;
+
+	/* controls 168 to 175 reserved */
+
+	/* control 176 */
+	if (f54->query_46.has_ctrl176)
+		reg_addr += CONTROL_176_SIZE;
+
+	/* controls 177 178 reserved */
+
+	/* control 179 */
+	if (f54->query_46.has_ctrl179)
+		reg_addr += CONTROL_179_SIZE;
+
+	/* controls 180 to 187 reserved */
+
+	/* control 188 */
+	if (f54->query_49.has_ctrl188) {
+		control->reg_188 = kzalloc(sizeof(*(control->reg_188)),
+				GFP_KERNEL);
+		if (!control->reg_188)
+			goto exit_no_mem;
+		control->reg_188->address = reg_addr;
+		reg_addr += CONTROL_188_SIZE;
+	}
+
+	return 0;
+
+exit_no_mem:
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to alloc mem for control registers\n",
+			__func__);
+	return -ENOMEM;
+}
+
+static int test_set_queries(void)
+{
+	int retval;
+	unsigned char offset;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->query_base_addr,
+			f54->query.data,
+			sizeof(f54->query.data));
+	if (retval < 0)
+		return retval;
+
+	offset = sizeof(f54->query.data);
+
+	/* query 12 */
+	if (f54->query.has_sense_frequency_control == 0)
+		offset -= 1;
+
+	/* query 13 */
+	if (f54->query.has_query13) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_13.data,
+				sizeof(f54->query_13.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 14 */
+	if (f54->query_13.has_ctrl87)
+		offset += 1;
+
+	/* query 15 */
+	if (f54->query.has_query15) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_15.data,
+				sizeof(f54->query_15.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 16 */
+	if (f54->query_15.has_query16) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_16.data,
+				sizeof(f54->query_16.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 17 */
+	if (f54->query_16.has_query17)
+		offset += 1;
+
+	/* query 18 */
+	if (f54->query_16.has_ctrl94_query18)
+		offset += 1;
+
+	/* query 19 */
+	if (f54->query_16.has_ctrl95_query19)
+		offset += 1;
+
+	/* query 20 */
+	if (f54->query_15.has_query20)
+		offset += 1;
+
+	/* query 21 */
+	if (f54->query_15.has_query21) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_21.data,
+				sizeof(f54->query_21.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 22 */
+	if (f54->query_15.has_query22) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_22.data,
+				sizeof(f54->query_22.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 23 */
+	if (f54->query_22.has_query23) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_23.data,
+				sizeof(f54->query_23.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 24 */
+	if (f54->query_21.has_query24_data18)
+		offset += 1;
+
+	/* query 25 */
+	if (f54->query_15.has_query25) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_25.data,
+				sizeof(f54->query_25.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 26 */
+	if (f54->query_22.has_ctrl103_query26)
+		offset += 1;
+
+	/* query 27 */
+	if (f54->query_25.has_query27) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_27.data,
+				sizeof(f54->query_27.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 28 */
+	if (f54->query_22.has_query28)
+		offset += 1;
+
+	/* query 29 */
+	if (f54->query_27.has_query29) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_29.data,
+				sizeof(f54->query_29.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 30 */
+	if (f54->query_29.has_query30) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_30.data,
+				sizeof(f54->query_30.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 31 */
+	if (f54->query_30.has_ctrl122_query31)
+		offset += 1;
+
+	/* query 32 */
+	if (f54->query_30.has_query32) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_32.data,
+				sizeof(f54->query_32.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 33 */
+	if (f54->query_32.has_query33) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_33.data,
+				sizeof(f54->query_33.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 34 */
+	if (f54->query_32.has_query34)
+		offset += 1;
+
+	/* query 35 */
+	if (f54->query_32.has_query35) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_35.data,
+				sizeof(f54->query_35.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 36 */
+	if (f54->query_33.has_query36) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_36.data,
+				sizeof(f54->query_36.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 37 */
+	if (f54->query_36.has_query37)
+		offset += 1;
+
+	/* query 38 */
+	if (f54->query_36.has_query38) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_38.data,
+				sizeof(f54->query_38.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 39 */
+	if (f54->query_38.has_query39) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_39.data,
+				sizeof(f54->query_39.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 40 */
+	if (f54->query_39.has_query40) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_40.data,
+				sizeof(f54->query_40.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 41 */
+	if (f54->query_40.has_ctrl163_query41)
+		offset += 1;
+
+	/* query 42 */
+	if (f54->query_40.has_ctrl165_query42)
+		offset += 1;
+
+	/* query 43 */
+	if (f54->query_40.has_query43) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_43.data,
+				sizeof(f54->query_43.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* queries 44 45 reserved */
+
+	/* query 46 */
+	if (f54->query_43.has_query46) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_46.data,
+				sizeof(f54->query_46.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 47 */
+	if (f54->query_46.has_query47) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_47.data,
+				sizeof(f54->query_47.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 48 reserved */
+
+	/* query 49 */
+	if (f54->query_47.has_query49) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_49.data,
+				sizeof(f54->query_49.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 50 */
+	if (f54->query_49.has_query50) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_50.data,
+				sizeof(f54->query_50.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 51 */
+	if (f54->query_50.has_query51) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_51.data,
+				sizeof(f54->query_51.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	return 0;
+}
+
+static void test_f54_set_regs(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count,
+		unsigned char page)
+{
+	unsigned char ii;
+	unsigned char intr_offset;
+
+	f54->query_base_addr = fd->query_base_addr | (page << 8);
+	f54->control_base_addr = fd->ctrl_base_addr | (page << 8);
+	f54->data_base_addr = fd->data_base_addr | (page << 8);
+	f54->command_base_addr = fd->cmd_base_addr | (page << 8);
+
+	f54->intr_reg_num = (intr_count + 7) / 8;
+	if (f54->intr_reg_num != 0)
+		f54->intr_reg_num -= 1;
+
+	f54->intr_mask = 0;
+	intr_offset = intr_count % 8;
+	for (ii = intr_offset;
+			ii < (fd->intr_src_count + intr_offset);
+			ii++) {
+		f54->intr_mask |= 1 << ii;
+	}
+
+	return;
+}
+
+static int test_f55_set_queries(void)
+{
+	int retval;
+	unsigned char offset;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f55->query_base_addr,
+			f55->query.data,
+			sizeof(f55->query.data));
+	if (retval < 0)
+		return retval;
+
+	offset = sizeof(f55->query.data);
+
+	/* query 3 */
+	if (f55->query.has_single_layer_multi_touch) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_3.data,
+				sizeof(f55->query_3.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 4 */
+	if ((f55->query.has_single_layer_multi_touch) &&
+			(f55->query_3.has_ctrl9))
+		offset += 1;
+
+	/* query 5 */
+	if (f55->query.has_query5) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_5.data,
+				sizeof(f55->query_5.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* queries 6 7 */
+	if (f55->query.curve_compensation_mode == 0x3)
+		offset += 2;
+
+	/* query 8 */
+	if ((f55->query.has_single_layer_multi_touch) &&
+			f55->query_3.has_ctrl8)
+		offset += 1;
+
+	/* query 9 */
+	if ((f55->query.has_single_layer_multi_touch) &&
+			f55->query_3.has_query9)
+		offset += 1;
+
+	/* queries 10 11 12 13 14 15 16 */
+	if ((f55->query.has_query5) && (f55->query_5.has_basis_function))
+		offset += 7;
+
+	/* query 17 */
+	if ((f55->query.has_query5) && (f55->query_5.has_query17)) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_17.data,
+				sizeof(f55->query_17.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 18 */
+	if ((f55->query.has_query5) &&
+			(f55->query_5.has_query17) &&
+			(f55->query_17.has_query18)) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_18.data,
+				sizeof(f55->query_18.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 22 */
+	if ((f55->query.has_query5) &&
+			(f55->query_5.has_query17) &&
+			(f55->query_17.has_query18) &&
+			(f55->query_18.has_query22)) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_22.data,
+				sizeof(f55->query_22.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 23 */
+	if ((f55->query.has_query5) &&
+			(f55->query_5.has_query17) &&
+			(f55->query_17.has_query18) &&
+			(f55->query_18.has_query22) &&
+			(f55->query_22.has_query23)) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_23.data,
+				sizeof(f55->query_23.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+
+		f55->amp_sensor = f55->query_23.amp_sensor_enabled;
+		f55->size_of_column2mux = f55->query_23.size_of_column2mux;
+	}
+
+	return 0;
+}
+
+static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char rx_electrodes = f54->query.num_of_rx_electrodes;
+	unsigned char tx_electrodes = f54->query.num_of_tx_electrodes;
+
+	retval = test_f55_set_queries();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read f55 query registers\n",
+				__func__);
+		return;
+	}
+
+	if (!f55->query.has_sensor_assignment)
+		return;
+
+	f55->tx_assignment = kzalloc(tx_electrodes, GFP_KERNEL);
+	f55->rx_assignment = kzalloc(rx_electrodes, GFP_KERNEL);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f55->control_base_addr + SENSOR_TX_MAPPING_OFFSET,
+			f55->tx_assignment,
+			tx_electrodes);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read f55 tx assignment\n",
+				__func__);
+		return;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f55->control_base_addr + SENSOR_RX_MAPPING_OFFSET,
+			f55->rx_assignment,
+			rx_electrodes);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read f55 rx assignment\n",
+				__func__);
+		return;
+	}
+
+	f54->tx_assigned = 0;
+	for (ii = 0; ii < tx_electrodes; ii++) {
+		if (f55->tx_assignment[ii] != 0xff)
+			f54->tx_assigned++;
+	}
+
+	f54->rx_assigned = 0;
+	for (ii = 0; ii < rx_electrodes; ii++) {
+		if (f55->rx_assignment[ii] != 0xff)
+			f54->rx_assigned++;
+	}
+
+	if (f55->amp_sensor) {
+		f54->tx_assigned = f55->size_of_column2mux;
+		f54->rx_assigned /= 2;
+	}
+
+	return;
+}
+
+static void test_f55_set_regs(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned char page)
+{
+	f55 = kzalloc(sizeof(*f55), GFP_KERNEL);
+	if (!f55) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for f55\n",
+				__func__);
+		return;
+	}
+
+	f55->query_base_addr = fd->query_base_addr | (page << 8);
+	f55->control_base_addr = fd->ctrl_base_addr | (page << 8);
+	f55->data_base_addr = fd->data_base_addr | (page << 8);
+	f55->command_base_addr = fd->cmd_base_addr | (page << 8);
+
+	return;
+}
+
+static int test_scan_pdt(void)
+{
+	int retval;
+	unsigned char intr_count = 0;
+	unsigned char page;
+	unsigned short addr;
+	bool f54found = false;
+	bool f55found = false;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&rmi_fd,
+					sizeof(rmi_fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (!rmi_fd.fn_number)
+				break;
+
+			switch (rmi_fd.fn_number) {
+			case SYNAPTICS_RMI4_F54:
+				test_f54_set_regs(rmi4_data,
+						&rmi_fd, intr_count, page);
+				f54found = true;
+				break;
+			case SYNAPTICS_RMI4_F55:
+				test_f55_set_regs(rmi4_data,
+						&rmi_fd, page);
+				f55found = true;
+				break;
+			default:
+				break;
+			}
+
+			if (f54found && f55found)
+				goto pdt_done;
+
+			intr_count += rmi_fd.intr_src_count;
+		}
+	}
+
+	if (!f54found) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to find F54\n",
+				__func__);
+		return -EINVAL;
+	}
+
+pdt_done:
+	return 0;
+}
+
+static void synaptics_rmi4_test_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!f54)
+		return;
+
+	if (f54->intr_mask & intr_mask)
+		queue_work(f54->test_report_workqueue, &f54->test_report_work);
+
+	return;
+}
+
+static int synaptics_rmi4_test_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	if (f54) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	f54 = kzalloc(sizeof(*f54), GFP_KERNEL);
+	if (!f54) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for f54\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	f54->rmi4_data = rmi4_data;
+
+	f55 = NULL;
+
+	retval = test_scan_pdt();
+	if (retval < 0)
+		goto exit_free_mem;
+
+	retval = test_set_queries();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read f54 query registers\n",
+				__func__);
+		goto exit_free_mem;
+	}
+
+	f54->tx_assigned = f54->query.num_of_tx_electrodes;
+	f54->rx_assigned = f54->query.num_of_rx_electrodes;
+
+	retval = test_set_controls();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set up f54 control registers\n",
+				__func__);
+		goto exit_free_control;
+	}
+
+	test_set_data();
+
+	if (f55)
+		test_f55_init(rmi4_data);
+
+	if (rmi4_data->external_afe_buttons)
+		f54->tx_assigned++;
+
+	retval = test_set_sysfs();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs entries\n",
+				__func__);
+		goto exit_sysfs;
+	}
+
+	f54->test_report_workqueue =
+			create_singlethread_workqueue("test_report_workqueue");
+	INIT_WORK(&f54->test_report_work, test_report_work);
+
+	hrtimer_init(&f54->watchdog, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	f54->watchdog.function = test_get_report_timeout;
+	INIT_WORK(&f54->timeout_work, test_timeout_work);
+
+	mutex_init(&f54->status_mutex);
+	f54->status = STATUS_IDLE;
+
+	return 0;
+
+exit_sysfs:
+	if (f55) {
+		kfree(f55->tx_assignment);
+		kfree(f55->rx_assignment);
+	}
+
+exit_free_control:
+	test_free_control_mem();
+
+exit_free_mem:
+	kfree(f55);
+	f55 = NULL;
+	kfree(f54);
+	f54 = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_test_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!f54)
+		goto exit;
+
+	hrtimer_cancel(&f54->watchdog);
+
+	cancel_work_sync(&f54->test_report_work);
+	flush_workqueue(f54->test_report_workqueue);
+	destroy_workqueue(f54->test_report_workqueue);
+
+	test_remove_sysfs();
+
+	if (f55) {
+		kfree(f55->tx_assignment);
+		kfree(f55->rx_assignment);
+	}
+
+	test_free_control_mem();
+
+	if (f54->data_buffer_size)
+		kfree(f54->report_data);
+
+	kfree(f55);
+	f55 = NULL;
+
+	kfree(f54);
+	f54 = NULL;
+
+exit:
+	complete(&test_remove_complete);
+
+	return;
+}
+
+static void synaptics_rmi4_test_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	if (!f54) {
+		synaptics_rmi4_test_init(rmi4_data);
+		return;
+	}
+
+	if (f55) {
+		kfree(f55->tx_assignment);
+		kfree(f55->rx_assignment);
+	}
+
+	test_free_control_mem();
+
+	kfree(f55);
+	f55 = NULL;
+
+	retval = test_scan_pdt();
+	if (retval < 0)
+		goto exit_free_mem;
+
+	retval = test_set_queries();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read f54 query registers\n",
+				__func__);
+		goto exit_free_mem;
+	}
+
+	f54->tx_assigned = f54->query.num_of_tx_electrodes;
+	f54->rx_assigned = f54->query.num_of_rx_electrodes;
+
+	retval = test_set_controls();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set up f54 control registers\n",
+				__func__);
+		goto exit_free_control;
+	}
+
+	test_set_data();
+
+	if (f55)
+		test_f55_init(rmi4_data);
+
+	if (rmi4_data->external_afe_buttons)
+		f54->tx_assigned++;
+
+	f54->status = STATUS_IDLE;
+
+	return;
+
+exit_free_control:
+	test_free_control_mem();
+
+exit_free_mem:
+	hrtimer_cancel(&f54->watchdog);
+
+	cancel_work_sync(&f54->test_report_work);
+	flush_workqueue(f54->test_report_workqueue);
+	destroy_workqueue(f54->test_report_workqueue);
+
+	test_remove_sysfs();
+
+	if (f54->data_buffer_size)
+		kfree(f54->report_data);
+
+	kfree(f55);
+	f55 = NULL;
+
+	kfree(f54);
+	f54 = NULL;
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn test_module = {
+	.fn_type = RMI_TEST_REPORTING,
+	.init = synaptics_rmi4_test_init,
+	.remove = synaptics_rmi4_test_remove,
+	.reset = synaptics_rmi4_test_reset,
+	.reinit = NULL,
+	.early_suspend = NULL,
+	.suspend = NULL,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = synaptics_rmi4_test_attn,
+};
+
+static int __init rmi4_test_module_init(void)
+{
+	synaptics_rmi4_new_function(&test_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_test_module_exit(void)
+{
+	synaptics_rmi4_new_function(&test_module, false);
+
+	wait_for_completion(&test_remove_complete);
+
+	return;
+}
+
+module_init(rmi4_test_module_init);
+module_exit(rmi4_test_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Test Reporting Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_video.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_video.c
new file mode 100644
index 0000000..312d203
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_video.c
@@ -0,0 +1,416 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx_v2_6.h>
+#include "synaptics_dsx_core.h"
+
+#define SYSFS_FOLDER_NAME "video"
+
+/*
+#define RMI_DCS_SUSPEND_RESUME
+*/
+
+static ssize_t video_sysfs_dcs_write_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t video_sysfs_param_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static int video_send_dcs_command(unsigned char command_opcode);
+
+struct f38_command {
+	union {
+		struct {
+			unsigned char command_opcode;
+			unsigned char register_access:1;
+			unsigned char gamma_page:1;
+			unsigned char f38_control1_b2__7:6;
+			unsigned char parameter_field_1;
+			unsigned char parameter_field_2;
+			unsigned char parameter_field_3;
+			unsigned char parameter_field_4;
+			unsigned char send_to_dcs:1;
+			unsigned char f38_command6_b1__7:7;
+		} __packed;
+		unsigned char data[7];
+	};
+};
+
+struct synaptics_rmi4_video_handle {
+	unsigned char param;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	struct synaptics_rmi4_data *rmi4_data;
+	struct kobject *sysfs_dir;
+};
+
+#ifdef RMI_DCS_SUSPEND_RESUME
+struct dcs_command {
+	unsigned char command;
+	unsigned int wait_time;
+};
+
+static struct dcs_command suspend_sequence[] = {
+	{
+		.command = 0x28,
+		.wait_time = 200,
+	},
+	{
+		.command = 0x10,
+		.wait_time = 200,
+	},
+};
+
+static struct dcs_command resume_sequence[] = {
+	{
+		.command = 0x11,
+		.wait_time = 200,
+	},
+	{
+		.command = 0x29,
+		.wait_time = 200,
+	},
+};
+#endif
+
+static struct device_attribute attrs[] = {
+	__ATTR(dcs_write, 0220,
+			synaptics_rmi4_show_error,
+			video_sysfs_dcs_write_store),
+	__ATTR(param, 0220,
+			synaptics_rmi4_show_error,
+			video_sysfs_param_store),
+};
+
+static struct synaptics_rmi4_video_handle *video;
+
+DECLARE_COMPLETION(video_remove_complete);
+
+static ssize_t video_sysfs_dcs_write_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+
+	if (sscanf(buf, "%x", &input) != 1)
+		return -EINVAL;
+
+	retval = video_send_dcs_command((unsigned char)input);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t video_sysfs_param_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (sscanf(buf, "%x", &input) != 1)
+		return -EINVAL;
+
+	video->param = (unsigned char)input;
+
+	return count;
+}
+
+static int video_send_dcs_command(unsigned char command_opcode)
+{
+	int retval;
+	struct f38_command command;
+	struct synaptics_rmi4_data *rmi4_data = video->rmi4_data;
+
+	memset(&command, 0x00, sizeof(command));
+
+	command.command_opcode = command_opcode;
+	command.parameter_field_1 = video->param;
+	command.send_to_dcs = 1;
+
+	video->param = 0;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			video->command_base_addr,
+			command.data,
+			sizeof(command.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to send DCS command\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int video_scan_pdt(void)
+{
+	int retval;
+	unsigned char page;
+	unsigned short addr;
+	bool f38_found = false;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_data *rmi4_data = video->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&rmi_fd,
+					sizeof(rmi_fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (!rmi_fd.fn_number)
+				break;
+
+			if (rmi_fd.fn_number == SYNAPTICS_RMI4_F38) {
+				f38_found = true;
+				goto f38_found;
+			}
+		}
+	}
+
+	if (!f38_found) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to find F38\n",
+				__func__);
+		return -EINVAL;
+	}
+
+f38_found:
+	video->query_base_addr = rmi_fd.query_base_addr | (page << 8);
+	video->control_base_addr = rmi_fd.ctrl_base_addr | (page << 8);
+	video->data_base_addr = rmi_fd.data_base_addr | (page << 8);
+	video->command_base_addr = rmi_fd.cmd_base_addr | (page << 8);
+
+	return 0;
+}
+
+static int synaptics_rmi4_video_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char attr_count;
+
+	if (video) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	video = kzalloc(sizeof(*video), GFP_KERNEL);
+	if (!video) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for video\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	video->rmi4_data = rmi4_data;
+
+	retval = video_scan_pdt();
+	if (retval < 0) {
+		retval = 0;
+		goto exit_scan_pdt;
+	}
+
+	video->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+			&rmi4_data->input_dev->dev.kobj);
+	if (!video->sysfs_dir) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs directory\n",
+				__func__);
+		retval = -ENODEV;
+		goto exit_sysfs_dir;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(video->sysfs_dir,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			retval = -ENODEV;
+			goto exit_sysfs_attrs;
+		}
+	}
+
+	return 0;
+
+exit_sysfs_attrs:
+	for (attr_count--; attr_count >= 0; attr_count--)
+		sysfs_remove_file(video->sysfs_dir, &attrs[attr_count].attr);
+
+	kobject_put(video->sysfs_dir);
+
+exit_sysfs_dir:
+exit_scan_pdt:
+	kfree(video);
+	video = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_video_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char attr_count;
+
+	if (!video)
+		goto exit;
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++)
+		sysfs_remove_file(video->sysfs_dir, &attrs[attr_count].attr);
+
+	kobject_put(video->sysfs_dir);
+
+	kfree(video);
+	video = NULL;
+
+exit:
+	complete(&video_remove_complete);
+
+	return;
+}
+
+static void synaptics_rmi4_video_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!video)
+		synaptics_rmi4_video_init(rmi4_data);
+
+	return;
+}
+
+#ifdef RMI_DCS_SUSPEND_RESUME
+static void synaptics_rmi4_video_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char command;
+	unsigned char num_of_cmds;
+
+	if (!video)
+		return;
+
+	num_of_cmds = ARRAY_SIZE(suspend_sequence);
+
+	for (ii = 0; ii < num_of_cmds; ii++) {
+		command = suspend_sequence[ii].command;
+		retval = video_send_dcs_command(command);
+		if (retval < 0)
+			return;
+		msleep(suspend_sequence[ii].wait_time);
+	}
+
+	return;
+}
+
+static void synaptics_rmi4_video_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char command;
+	unsigned char num_of_cmds;
+
+	if (!video)
+		return;
+
+	num_of_cmds = ARRAY_SIZE(resume_sequence);
+
+	for (ii = 0; ii < num_of_cmds; ii++) {
+		command = resume_sequence[ii].command;
+		retval = video_send_dcs_command(command);
+		if (retval < 0)
+			return;
+		msleep(resume_sequence[ii].wait_time);
+	}
+
+	return;
+}
+#endif
+
+static struct synaptics_rmi4_exp_fn video_module = {
+	.fn_type = RMI_VIDEO,
+	.init = synaptics_rmi4_video_init,
+	.remove = synaptics_rmi4_video_remove,
+	.reset = synaptics_rmi4_video_reset,
+	.reinit = NULL,
+	.early_suspend = NULL,
+#ifdef RMI_DCS_SUSPEND_RESUME
+	.suspend = synaptics_rmi4_video_suspend,
+	.resume = synaptics_rmi4_video_resume,
+#else
+	.suspend = NULL,
+	.resume = NULL,
+#endif
+	.late_resume = NULL,
+	.attn = NULL,
+};
+
+static int __init rmi4_video_module_init(void)
+{
+	synaptics_rmi4_new_function(&video_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_video_module_exit(void)
+{
+	synaptics_rmi4_new_function(&video_module, false);
+
+	wait_for_completion(&video_remove_complete);
+
+	return;
+}
+
+module_init(rmi4_video_module_init);
+module_exit(rmi4_video_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Video Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 1a0b110..0c910a8 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3211,7 +3211,7 @@
 	unsigned long start, end;
 
 	start = IOVA_PFN(region->start);
-	end   = IOVA_PFN(region->start + region->length);
+	end   = IOVA_PFN(region->start + region->length - 1);
 
 	WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
 }
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index d3d975a..7f294f7 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1547,13 +1547,15 @@
 	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
 	domain->geometry.aperture_end = (1UL << ias) - 1;
 	domain->geometry.force_aperture = true;
-	smmu_domain->pgtbl_ops = pgtbl_ops;
 
 	ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
-	if (ret < 0)
+	if (ret < 0) {
 		free_io_pgtable_ops(pgtbl_ops);
+		return ret;
+	}
 
-	return ret;
+	smmu_domain->pgtbl_ops = pgtbl_ops;
+	return 0;
 }
 
 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
@@ -1580,7 +1582,7 @@
 
 static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
 {
-	int i;
+	int i, j;
 	struct arm_smmu_master_data *master = fwspec->iommu_priv;
 	struct arm_smmu_device *smmu = master->smmu;
 
@@ -1588,6 +1590,13 @@
 		u32 sid = fwspec->ids[i];
 		__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
 
+		/* Bridged PCI devices may end up with duplicated IDs */
+		for (j = 0; j < i; j++)
+			if (fwspec->ids[j] == sid)
+				break;
+		if (j < i)
+			continue;
+
 		arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
 	}
 
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 28ef920..85df514 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -55,11 +55,11 @@
 #include <linux/remote_spinlock.h>
 #include <linux/ktime.h>
 #include <trace/events/iommu.h>
-#include <soc/qcom/msm_tz_smmu.h>
-#include <soc/qcom/scm.h>
 #include <linux/notifier.h>
+#include <dt-bindings/arm/arm-smmu.h>
 
 #include <linux/amba/bus.h>
+#include <soc/qcom/msm_tz_smmu.h>
 
 #include "io-pgtable.h"
 
@@ -432,16 +432,6 @@
 #define ARM_SMMU_FEAT_FMT_AARCH32_S	(1 << 11)
 	u32				features;
 
-#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
-#define ARM_SMMU_OPT_FATAL_ASF		(1 << 1)
-#define ARM_SMMU_OPT_SKIP_INIT		(1 << 2)
-#define ARM_SMMU_OPT_DYNAMIC		(1 << 3)
-#define ARM_SMMU_OPT_3LVL_TABLES	(1 << 4)
-#define ARM_SMMU_OPT_NO_ASID_RETENTION	(1 << 5)
-#define ARM_SMMU_OPT_DISABLE_ATOS	(1 << 6)
-#define ARM_SMMU_OPT_MMU500_ERRATA1	(1 << 7)
-#define ARM_SMMU_OPT_STATIC_CB		(1 << 8)
-#define ARM_SMMU_OPT_HALT		(1 << 9)
 	u32				options;
 	enum arm_smmu_arch_version	version;
 	enum arm_smmu_implementation	model;
@@ -449,6 +439,7 @@
 	u32				num_context_banks;
 	u32				num_s2_context_banks;
 	DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
+	DECLARE_BITMAP(secure_context_map, ARM_SMMU_MAX_CBS);
 	atomic_t			irptndx;
 
 	u32				num_mapping_groups;
@@ -628,7 +619,7 @@
 	return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
 }
 
-static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu)
+static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu, u32 cb)
 {
 	int ret;
 	int scm_ret = 0;
@@ -636,7 +627,7 @@
 	if (!arm_smmu_is_static_cb(smmu))
 		return 0;
 
-	ret = scm_restore_sec_cfg(smmu->sec_id, 0x0, &scm_ret);
+	ret = scm_restore_sec_cfg(smmu->sec_id, cb, &scm_ret);
 	if (ret || scm_ret) {
 		pr_err("scm call IOMMU_SECURE_CFG failed\n");
 		return -EINVAL;
@@ -1556,6 +1547,19 @@
 	return IRQ_HANDLED;
 }
 
+static bool arm_smmu_master_attached(struct arm_smmu_device *smmu,
+				     struct iommu_fwspec *fwspec)
+{
+	int i, idx;
+
+	for_each_cfg_sme(fwspec, i, idx) {
+		if (smmu->s2crs[idx].attach_count)
+			return true;
+	}
+
+	return false;
+}
+
 static int arm_smmu_set_pt_format(struct arm_smmu_domain *smmu_domain,
 				  struct io_pgtable_cfg *pgtbl_cfg)
 {
@@ -1966,6 +1970,10 @@
 
 	/* Publish page table ops for map/unmap */
 	smmu_domain->pgtbl_ops = pgtbl_ops;
+	if (arm_smmu_is_slave_side_secure(smmu_domain) &&
+			!arm_smmu_master_attached(smmu, dev->iommu_fwspec))
+		arm_smmu_restore_sec_cfg(smmu, cfg->cbndx);
+
 	return 0;
 
 out_clear_smmu:
@@ -2034,6 +2042,11 @@
 	arm_smmu_unassign_table(smmu_domain);
 	arm_smmu_secure_domain_unlock(smmu_domain);
 	__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
+	/* As the nonsecure context bank index is any way set to zero,
+	 * so, directly clearing up the secure cb bitmap.
+	 */
+	if (arm_smmu_is_slave_side_secure(smmu_domain))
+		__arm_smmu_free_bitmap(smmu->secure_context_map, cfg->cbndx);
 
 	arm_smmu_power_off(smmu->pwr);
 	arm_smmu_domain_reinit(smmu_domain);
@@ -2736,6 +2749,50 @@
 	}
 }
 
+#ifdef CONFIG_MSM_TZ_SMMU
+static struct arm_smmu_device *arm_smmu_get_by_addr(void __iomem *addr)
+{
+	struct arm_smmu_device *smmu;
+	unsigned long flags;
+
+	spin_lock_irqsave(&arm_smmu_devices_lock, flags);
+	list_for_each_entry(smmu, &arm_smmu_devices, list) {
+		unsigned long base = (unsigned long)smmu->base;
+		unsigned long mask = ~(smmu->size - 1);
+
+		if ((base & mask) == ((unsigned long)addr & mask)) {
+			spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
+			return smmu;
+		}
+	}
+	spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
+	return NULL;
+}
+
+bool arm_smmu_skip_write(void __iomem *addr)
+{
+	struct arm_smmu_device *smmu;
+	int cb;
+
+	smmu = arm_smmu_get_by_addr(addr);
+
+	/* Skip write if smmu not available by now */
+	if (!smmu)
+		return true;
+
+	/* Do not write to global space */
+	if (((unsigned long)addr & (smmu->size - 1)) < (smmu->size >> 1))
+		return true;
+
+	/* Finally skip writing to secure CB */
+	cb = ((unsigned long)addr & ((smmu->size >> 1) - 1)) >> PAGE_SHIFT;
+	if (test_bit(cb, smmu->secure_context_map))
+		return true;
+
+	return false;
+}
+#endif
+
 static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
 {
 	struct arm_smmu_device *smmu;
@@ -3666,9 +3723,13 @@
 			cb = smmu->s2crs[idx].cbndx;
 	}
 
-	if (cb >= 0 && arm_smmu_is_static_cb(smmu))
+	if (cb >= 0 && arm_smmu_is_static_cb(smmu)) {
 		smmu_domain->slave_side_secure = true;
 
+		if (arm_smmu_is_slave_side_secure(smmu_domain))
+			bitmap_set(smmu->secure_context_map, cb, 1);
+	}
+
 	if (cb < 0 && !arm_smmu_is_static_cb(smmu)) {
 		mutex_unlock(&smmu->stream_map_mutex);
 		return __arm_smmu_alloc_bitmap(smmu->context_map,
@@ -3843,7 +3904,7 @@
 	if (event == REGULATOR_EVENT_PRE_DISABLE)
 		qsmmuv2_halt(smmu);
 	else if (event == REGULATOR_EVENT_ENABLE) {
-		if (arm_smmu_restore_sec_cfg(smmu))
+		if (arm_smmu_restore_sec_cfg(smmu, 0))
 			goto power_off;
 		qsmmuv2_resume(smmu);
 	}
@@ -3996,7 +4057,7 @@
 	bool cttw_dt, cttw_reg;
 	int i;
 
-	if (arm_smmu_restore_sec_cfg(smmu))
+	if (arm_smmu_restore_sec_cfg(smmu, 0))
 		return -ENODEV;
 
 	dev_dbg(smmu->dev, "probing hardware configuration...\n");
@@ -4482,7 +4543,8 @@
 	if (arm_smmu_power_on(smmu->pwr))
 		return -EINVAL;
 
-	if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
+	if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS) ||
+	    !bitmap_empty(smmu->secure_context_map, ARM_SMMU_MAX_CBS))
 		dev_err(&pdev->dev, "removing device with active domains!\n");
 
 	idr_destroy(&smmu->asid_idr);
@@ -5250,6 +5312,9 @@
 	data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
 	smmu->archdata = data;
 
+	if (arm_smmu_is_static_cb(smmu))
+		return 0;
+
 	ret = qsmmuv500_parse_errata1(smmu);
 	if (ret)
 		return ret;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index c7820b3..beef59e 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -543,7 +543,10 @@
 	if (is_sysmmu_active(data) && data->version >= MAKE_MMU_VER(3, 3)) {
 		clk_enable(data->clk_master);
 		if (sysmmu_block(data)) {
-			__sysmmu_tlb_invalidate_entry(data, iova, 1);
+			if (data->version >= MAKE_MMU_VER(5, 0))
+				__sysmmu_tlb_invalidate(data);
+			else
+				__sysmmu_tlb_invalidate_entry(data, iova, 1);
 			sysmmu_unblock(data);
 		}
 		clk_disable(data->clk_master);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 002f8a4..88bbc8c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2245,10 +2245,12 @@
 		uint64_t tmp;
 
 		if (!sg_res) {
+			unsigned int pgoff = sg->offset & ~PAGE_MASK;
+
 			sg_res = aligned_nrpages(sg->offset, sg->length);
-			sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
+			sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
 			sg->dma_length = sg->length;
-			pteval = page_to_phys(sg_page(sg)) | prot;
+			pteval = (sg_phys(sg) - pgoff) | prot;
 			phys_pfn = pteval >> VTD_PAGE_SHIFT;
 		}
 
@@ -3894,7 +3896,7 @@
 
 	for_each_sg(sglist, sg, nelems, i) {
 		BUG_ON(!sg_page(sg));
-		sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
+		sg->dma_address = sg_phys(sg);
 		sg->dma_length = sg->length;
 	}
 	return nelems;
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index f50e51c..d68a552 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -418,8 +418,12 @@
 			pte |= ARM_V7S_ATTR_NS_TABLE;
 
 		__arm_v7s_set_pte(ptep, pte, 1, cfg);
-	} else {
+	} else if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
 		cptep = iopte_deref(pte, lvl);
+	} else {
+		/* We require an unmap first */
+		WARN_ON(!selftest_running);
+		return -EEXIST;
 	}
 
 	/* Rinse, repeat */
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 22a708e..25b85ab 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -28,7 +28,7 @@
 #include <asm/cacheflush.h>
 #include <asm/dma-iommu.h>
 
-#if defined(CONFIG_IOMMU_DEBUG_TRACKING) || defined(CONFIG_IOMMU_TESTS)
+#if defined(CONFIG_IOMMU_TESTS)
 
 static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
 {
@@ -170,6 +170,8 @@
 	u64 phys;
 	size_t len;
 	struct list_head list;
+	struct mutex clk_lock;
+	unsigned int clk_count;
 };
 
 static int iommu_debug_build_phoney_sg_table(struct device *dev,
@@ -1195,6 +1197,7 @@
 		return -ENOMEM;
 	}
 
+	val = VMID_CP_CAMERA;
 	if (is_secure && iommu_domain_set_attr(ddev->domain,
 					       DOMAIN_ATTR_SECURE_VMID,
 					       &val)) {
@@ -1485,6 +1488,10 @@
 	ssize_t retval;
 	size_t buflen;
 
+	if (kptr_restrict != 0) {
+		pr_err("kptr_restrict needs to be disabled.\n");
+		return -EPERM;
+	}
 	if (!dev->archdata.mapping) {
 		pr_err("No mapping. Did you already attach?\n");
 		return -EINVAL;
@@ -1552,6 +1559,10 @@
 	ssize_t retval;
 	size_t buflen;
 
+	if (kptr_restrict != 0) {
+		pr_err("kptr_restrict needs to be disabled.\n");
+		return -EPERM;
+	}
 	if (!ddev->domain) {
 		pr_err("No domain. Did you already attach?\n");
 		return -EINVAL;
@@ -1600,6 +1611,10 @@
 	ssize_t retval;
 	size_t buflen;
 
+	if (kptr_restrict != 0) {
+		pr_err("kptr_restrict needs to be disabled.\n");
+		return -EPERM;
+	}
 	if (!dev->archdata.mapping) {
 		pr_err("No mapping. Did you already attach?\n");
 		return -EINVAL;
@@ -2046,20 +2061,34 @@
 		return -EFAULT;
 	}
 
+	mutex_lock(&ddev->clk_lock);
 	switch (buf) {
 	case '0':
+		if (ddev->clk_count == 0) {
+			dev_err(dev, "Config clocks already disabled\n");
+			break;
+		}
+
+		if (--ddev->clk_count > 0)
+			break;
+
 		dev_err(dev, "Disabling config clocks\n");
 		iommu_disable_config_clocks(ddev->domain);
 		break;
 	case '1':
+		if (ddev->clk_count++ > 0)
+			break;
+
 		dev_err(dev, "Enabling config clocks\n");
 		if (iommu_enable_config_clocks(ddev->domain))
 			dev_err(dev, "Failed!\n");
 		break;
 	default:
 		dev_err(dev, "Invalid value. Should be 0 or 1.\n");
+		mutex_unlock(&ddev->clk_lock);
 		return -EINVAL;
 	}
+	mutex_unlock(&ddev->clk_lock);
 
 	return count;
 }
@@ -2109,6 +2138,9 @@
 	if (!of_find_property(dev->of_node, "iommus", NULL))
 		return 0;
 
+	if (!of_device_is_compatible(dev->of_node, "iommu-debug-test"))
+		return 0;
+
 	/* Hold a reference count */
 	if (!iommu_group_get(dev))
 		return 0;
@@ -2116,6 +2148,7 @@
 	ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
 	if (!ddev)
 		return -ENODEV;
+	mutex_init(&ddev->clk_lock);
 	ddev->dev = dev;
 	dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
 	if (!dir) {
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index b8aeb07..68c6050 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -703,7 +703,7 @@
 	.probe	= mtk_iommu_probe,
 	.remove	= mtk_iommu_remove,
 	.driver	= {
-		.name = "mtk-iommu",
+		.name = "mtk-iommu-v1",
 		.of_match_table = mtk_iommu_of_ids,
 		.pm = &mtk_iommu_pm_ops,
 	}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index ee50a61..9c8ec67 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -278,6 +278,7 @@
 
 config MVEBU_ODMI
 	bool
+	select GENERIC_MSI_IRQ_DOMAIN
 
 config MVEBU_PIC
 	bool
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 05bbf17..1070b7b 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -199,7 +199,7 @@
 static int __init crossbar_of_init(struct device_node *node)
 {
 	int i, size, reserved = 0;
-	u32 max = 0, entry;
+	u32 max = 0, entry, reg_size;
 	const __be32 *irqsr;
 	int ret = -ENOMEM;
 
@@ -276,9 +276,9 @@
 	if (!cb->register_offsets)
 		goto err_irq_map;
 
-	of_property_read_u32(node, "ti,reg-size", &size);
+	of_property_read_u32(node, "ti,reg-size", &reg_size);
 
-	switch (size) {
+	switch (reg_size) {
 	case 1:
 		cb->write = crossbar_writeb;
 		break;
@@ -304,7 +304,7 @@
 			continue;
 
 		cb->register_offsets[i] = reserved;
-		reserved += size;
+		reserved += reg_size;
 	}
 
 	of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 01f9435..c67e813 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1066,18 +1066,18 @@
 	int nr_parts;
 	struct partition_affinity *parts;
 
-	parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
+	parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
 	if (!parts_node)
 		return;
 
 	nr_parts = of_get_child_count(parts_node);
 
 	if (!nr_parts)
-		return;
+		goto out_put_node;
 
 	parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
 	if (WARN_ON(!parts))
-		return;
+		goto out_put_node;
 
 	for_each_child_of_node(parts_node, child_part) {
 		struct partition_affinity *part;
@@ -1144,6 +1144,9 @@
 
 		gic_data.ppi_descs[i] = desc;
 	}
+
+out_put_node:
+	of_node_put(parts_node);
 }
 
 static void __init gic_of_setup_kvm_info(struct device_node *node)
diff --git a/drivers/irqchip/qcom/Kconfig b/drivers/irqchip/qcom/Kconfig
index b892109..0038047 100644
--- a/drivers/irqchip/qcom/Kconfig
+++ b/drivers/irqchip/qcom/Kconfig
@@ -20,3 +20,10 @@
         default y if ARCH_SDM670
         help
           QTI Power Domain Controller for SDM670
+
+config QTI_PDC_SDXPOORWILLS
+        bool "QTI PDC SDxPOORWILLS"
+        select QTI_PDC
+        default y if ARCH_SDXPOORWILLS
+        help
+          QTI Power Domain Controller for SDxPoorwills
diff --git a/drivers/irqchip/qcom/Makefile b/drivers/irqchip/qcom/Makefile
index 5e99040..c4ff9ef 100644
--- a/drivers/irqchip/qcom/Makefile
+++ b/drivers/irqchip/qcom/Makefile
@@ -1,3 +1,4 @@
 obj-$(CONFIG_QTI_PDC)			+= pdc.o
 obj-$(CONFIG_QTI_PDC_SDM845)		+= pdc-sdm845.o
 obj-$(CONFIG_QTI_PDC_SDM670)		+= pdc-sdm670.o
+obj-$(CONFIG_QTI_PDC_SDXPOORWILLS)	+= pdc-sdxpoorwills.o
diff --git a/drivers/irqchip/qcom/pdc-sdxpoorwills.c b/drivers/irqchip/qcom/pdc-sdxpoorwills.c
new file mode 100644
index 0000000..5bbca03
--- /dev/null
+++ b/drivers/irqchip/qcom/pdc-sdxpoorwills.c
@@ -0,0 +1,76 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/irqchip.h>
+#include "pdc.h"
+
+static struct pdc_pin sdxpoorwills_data[] = {
+	{0, 179}, /* rpmh_wake */
+	{1, 180}, /* ee0_apps_hlos_spmi_periph_irq */
+	{2, 181}, /* ee1_apps_trustzone_spmi_periph_irq */
+	{3, 182}, /* secure_wdog_expired */
+	{4, 183}, /* secure_wdog_bark_irq */
+	{5, 184}, /* aop_wdog_expired_irq */
+	{8, 187}, /* aoss_pmic_arb_mpu_xpu_summary_irq */
+	{9, 188}, /* rpmh_wake */
+	{12, 191}, /* pdc_apps_epcb_timeout_summary_irq	*/
+	{13, 192}, /* spmi_protocol_irq	*/
+	{14, 193}, /* tsense0_tsense_max_min_int */
+	{15, 194}, /* apps_pdc_irq_in_15 */
+	{16, 195}, /* tsense0_upper_lower_intr */
+	{17, 196}, /* apps_pdc_irq_in_17 */
+	{18, 197}, /* tsense0_critical_intr	*/
+	{19, 198}, /* apps_pdc_irq_in_19 */
+	{20, 199}, /* apps_pdc.gp_irq_mux[0] */
+	{21, 200}, /* apps_pdc.gp_irq_mux[1] */
+	{22, 201}, /* apps_pdc.gp_irq_mux[2] */
+	{23, 202}, /* apps_pdc.gp_irq_mux[3] */
+	{24, 203}, /* apps_pdc.gp_irq_mux[4] */
+	{25, 204}, /* apps_pdc.gp_irq_mux[5] */
+	{26, 205}, /* apps_pdc.gp_irq_mux[6] */
+	{27, 206}, /* apps_pdc.gp_irq_mux[7] */
+	{28, 207}, /* apps_pdc.gp_irq_mux[8] */
+	{29, 208}, /* apps_pdc.gp_irq_mux[9] */
+	{30, 209}, /* apps_pdc.gp_irq_mux[10] */
+	{31, 210}, /* apps_pdc.gp_irq_mux[11] */
+	{32, 211}, /* apps_pdc.gp_irq_mux[12] */
+	{33, 212}, /* apps_pdc.gp_irq_mux[13] */
+	{34, 213}, /* apps_pdc.gp_irq_mux[14] */
+	{35, 214}, /* apps_pdc.gp_irq_mux[15] */
+	{36, 215}, /* apps_pdc.gp_irq_mux[16] */
+	{37, 216}, /* apps_pdc.gp_irq_mux[17] */
+	{38, 217}, /* apps_pdc.gp_irq_mux[18] */
+	{39, 218}, /* apps_pdc.gp_irq_mux[19] */
+	{40, 219}, /* apps_pdc.gp_irq_mux[20] */
+	{41, 220}, /* apps_pdc.gp_irq_mux[21] */
+	{42, 221}, /* apps_pdc.gp_irq_mux[22] */
+	{43, 222}, /* apps_pdc.gp_irq_mux[23] */
+	{44, 223}, /* apps_pdc.gp_irq_mux[24] */
+	{45, 224}, /* apps_pdc.gp_irq_mux[25] */
+	{46, 225}, /* apps_pdc.gp_irq_mux[26] */
+	{47, 226}, /* apps_pdc.gp_irq_mux[27] */
+	{48, 227}, /* apps_pdc.gp_irq_mux[28] */
+	{49, 228}, /* apps_pdc.gp_irq_mux[29] */
+	{50, 229}, /* apps_pdc.gp_irq_mux[30] */
+	{51, 230}, /* apps_pdc.gp_irq_mux[31] */
+	{-1}
+};
+
+static int __init qcom_pdc_gic_init(struct device_node *node,
+		struct device_node *parent)
+{
+	pr_info("PDC sdxpoowills initialized\n");
+	return qcom_pdc_init(node, parent, sdxpoorwills_data);
+}
+
+IRQCHIP_DECLARE(pdc_sdxpoorwills, "qcom,pdc-sdxpoorwills", qcom_pdc_gic_init);
diff --git a/drivers/irqchip/qcom/pdc.c b/drivers/irqchip/qcom/pdc.c
index 923552f..f7284bd 100644
--- a/drivers/irqchip/qcom/pdc.c
+++ b/drivers/irqchip/qcom/pdc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/irqchip.h>
+#include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
@@ -95,6 +96,20 @@
 	return 0;
 }
 
+static int qcom_pdc_gic_get_irqchip_state(struct irq_data *d,
+		enum irqchip_irq_state which, bool *state)
+{
+	return d->parent_data->chip->irq_get_irqchip_state(d,
+		which, state);
+}
+
+static int qcom_pdc_gic_set_irqchip_state(struct irq_data *d,
+		enum irqchip_irq_state which, bool value)
+{
+	return d->parent_data->chip->irq_set_irqchip_state(d,
+		which, value);
+}
+
 static void qcom_pdc_gic_mask(struct irq_data *d)
 {
 	pdc_enable_intr(d, false);
@@ -220,6 +235,8 @@
 #ifdef CONFIG_SMP
 	.irq_set_affinity	= irq_chip_set_affinity_parent,
 #endif
+	.irq_get_irqchip_state	= qcom_pdc_gic_get_irqchip_state,
+	.irq_set_irqchip_state	= qcom_pdc_gic_set_irqchip_state,
 };
 
 static int qcom_pdc_translate(struct irq_domain *d,
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 823f698..dd7e38a 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -1032,6 +1032,7 @@
 						     sizeof(avmb1_carddef))))
 				return -EFAULT;
 			cdef.cardtype = AVM_CARDTYPE_B1;
+			cdef.cardnr = 0;
 		} else {
 			if ((retval = copy_from_user(&cdef, data,
 						     sizeof(avmb1_extcarddef))))
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index b8f30cd..787bda3 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -625,6 +625,15 @@
 	  To compile this driver as a module, choose 'm' here: the module
 	  will be called leds-powernv.
 
+config LEDS_QTI_TRI_LED
+	tristate "LED support for Qualcomm Technologies, Inc. TRI_LED"
+	depends on LEDS_CLASS && MFD_SPMI_PMIC && PWM && OF
+	help
+	  This driver supports the TRI_LED module found in Qualcomm
+	  Technologies, Inc. PMIC chips. TRI_LED supports 3 LED drivers
+	  at max and each is controlled by a PWM channel used for dimming
+	  or blinking.
+
 config LEDS_SYSCON
 	bool "LED support for LEDs on system controllers"
 	depends on LEDS_CLASS=y
@@ -706,6 +715,14 @@
 	  module provides haptic feedback for user actions such as a long press
 	  on the touch screen.
 
+config LEDS_QPNP_VIBRATOR_LDO
+	tristate "Vibrator-LDO support for QPNP PMIC"
+	depends on LEDS_CLASS && MFD_SPMI_PMIC
+	help
+	  This option enables device driver support for the vibrator-ldo
+	  peripheral found on Qualcomm Technologies, Inc. QPNP PMICs.
+	  The vibrator-ldo peripheral is capable of driving ERM vibrators.
+
 comment "LED Triggers"
 source "drivers/leds/trigger/Kconfig"
 
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index ba9bb8d..e9eaa50 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -61,6 +61,7 @@
 obj-$(CONFIG_LEDS_MAX8997)		+= leds-max8997.o
 obj-$(CONFIG_LEDS_LM355x)		+= leds-lm355x.o
 obj-$(CONFIG_LEDS_BLINKM)		+= leds-blinkm.o
+obj-$(CONFIG_LEDS_QTI_TRI_LED)		+= leds-qti-tri-led.o
 obj-$(CONFIG_LEDS_SYSCON)		+= leds-syscon.o
 obj-$(CONFIG_LEDS_VERSATILE)		+= leds-versatile.o
 obj-$(CONFIG_LEDS_MENF21BMC)		+= leds-menf21bmc.o
@@ -72,10 +73,11 @@
 obj-$(CONFIG_LEDS_PM8058)		+= leds-pm8058.o
 obj-$(CONFIG_LEDS_MLXCPLD)		+= leds-mlxcpld.o
 obj-$(CONFIG_LEDS_QPNP)			+= leds-qpnp.o
-obj-$(CONFIG_LEDS_QPNP_FLASH)		+= leds-qpnp-flash.o
-obj-$(CONFIG_LEDS_QPNP_FLASH_V2)	+= leds-qpnp-flash-v2.o
+obj-$(CONFIG_LEDS_QPNP_FLASH)		+= leds-qpnp-flash.o leds-qpnp-flash-common.o
+obj-$(CONFIG_LEDS_QPNP_FLASH_V2)	+= leds-qpnp-flash-v2.o leds-qpnp-flash-common.o
 obj-$(CONFIG_LEDS_QPNP_WLED)		+= leds-qpnp-wled.o
 obj-$(CONFIG_LEDS_QPNP_HAPTICS)	+= leds-qpnp-haptics.o
+obj-$(CONFIG_LEDS_QPNP_VIBRATOR_LDO)	+= leds-qpnp-vibrator-ldo.o
 
 # LED SPI Drivers
 obj-$(CONFIG_LEDS_DAC124S085)		+= leds-dac124s085.o
diff --git a/drivers/leds/leds-qpnp-flash-common.c b/drivers/leds/leds-qpnp-flash-common.c
new file mode 100644
index 0000000..5aed910
--- /dev/null
+++ b/drivers/leds/leds-qpnp-flash-common.c
@@ -0,0 +1,16 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/leds-qpnp-flash.h>
+
+int (*qpnp_flash_led_prepare)(struct led_trigger *trig, int options,
+					int *max_current);
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 1a2aea9..f3f9a1a 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -99,11 +99,8 @@
 
 #define	VPH_DROOP_DEBOUNCE_US_TO_VAL(val_us)	(val_us / 8)
 #define	VPH_DROOP_HYST_MV_TO_VAL(val_mv)	(val_mv / 25)
-#define	VPH_DROOP_THRESH_MV_TO_VAL(val_mv)	((val_mv / 100) - 25)
 #define	VPH_DROOP_THRESH_VAL_TO_UV(val)		((val + 25) * 100000)
 #define	MITIGATION_THRSH_MA_TO_VAL(val_ma)	(val_ma / 100)
-#define	CURRENT_MA_TO_REG_VAL(curr_ma, ires_ua)	((curr_ma * 1000) / ires_ua - 1)
-#define	SAFETY_TMR_TO_REG_VAL(duration_ms)	((duration_ms / 10) - 1)
 #define	THERMAL_HYST_TEMP_TO_VAL(val, divisor)	(val / divisor)
 
 #define	FLASH_LED_ISC_WARMUP_DELAY_SHIFT	6
@@ -317,6 +314,14 @@
 	FLASH_LED_IRES7P5_MAX_CURR_MA, FLASH_LED_IRES5P0_MAX_CURR_MA
 };
 
+static inline int get_current_reg_code(int target_curr_ma, int ires_ua)
+{
+	if (!ires_ua || !target_curr_ma || (target_curr_ma < (ires_ua / 1000)))
+		return 0;
+
+	return DIV_ROUND_UP(target_curr_ma * 1000, ires_ua) - 1;
+}
+
 static int qpnp_flash_led_read(struct qpnp_flash_led *led, u16 addr, u8 *data)
 {
 	int rc;
@@ -542,7 +547,7 @@
 		return rc;
 
 	if (led->pdata->led1n2_iclamp_low_ma) {
-		val = CURRENT_MA_TO_REG_VAL(led->pdata->led1n2_iclamp_low_ma,
+		val = get_current_reg_code(led->pdata->led1n2_iclamp_low_ma,
 						led->fnode[LED1].ires_ua);
 		rc = qpnp_flash_led_masked_write(led,
 				FLASH_LED_REG_LED1N2_ICLAMP_LOW(led->base),
@@ -552,7 +557,7 @@
 	}
 
 	if (led->pdata->led1n2_iclamp_mid_ma) {
-		val = CURRENT_MA_TO_REG_VAL(led->pdata->led1n2_iclamp_mid_ma,
+		val = get_current_reg_code(led->pdata->led1n2_iclamp_mid_ma,
 						led->fnode[LED1].ires_ua);
 		rc = qpnp_flash_led_masked_write(led,
 				FLASH_LED_REG_LED1N2_ICLAMP_MID(led->base),
@@ -562,7 +567,7 @@
 	}
 
 	if (led->pdata->led3_iclamp_low_ma) {
-		val = CURRENT_MA_TO_REG_VAL(led->pdata->led3_iclamp_low_ma,
+		val = get_current_reg_code(led->pdata->led3_iclamp_low_ma,
 						led->fnode[LED3].ires_ua);
 		rc = qpnp_flash_led_masked_write(led,
 				FLASH_LED_REG_LED3_ICLAMP_LOW(led->base),
@@ -572,7 +577,7 @@
 	}
 
 	if (led->pdata->led3_iclamp_mid_ma) {
-		val = CURRENT_MA_TO_REG_VAL(led->pdata->led3_iclamp_mid_ma,
+		val = get_current_reg_code(led->pdata->led3_iclamp_mid_ma,
 						led->fnode[LED3].ires_ua);
 		rc = qpnp_flash_led_masked_write(led,
 				FLASH_LED_REG_LED3_ICLAMP_MID(led->base),
@@ -992,7 +997,7 @@
 	}
 	fnode->current_ma = prgm_current_ma;
 	fnode->cdev.brightness = prgm_current_ma;
-	fnode->current_reg_val = CURRENT_MA_TO_REG_VAL(prgm_current_ma,
+	fnode->current_reg_val = get_current_reg_code(prgm_current_ma,
 					fnode->ires_ua);
 	fnode->led_on = prgm_current_ma != 0;
 
@@ -1103,10 +1108,11 @@
 		return rc;
 	}
 
-	/* Iterate over all leds for this switch node */
+	/* Iterate over all active leds for this switch node */
 	val = 0;
 	for (i = 0; i < led->num_fnodes; i++)
-		if (snode->led_mask & BIT(led->fnode[i].id))
+		if (led->fnode[i].led_on &&
+				snode->led_mask & BIT(led->fnode[i].id))
 			val |= led->fnode[i].ires_idx << (led->fnode[i].id * 2);
 
 	rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_IRES(led->base),
@@ -1210,7 +1216,7 @@
 	return 0;
 }
 
-int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
+static int qpnp_flash_led_prepare_v2(struct led_trigger *trig, int options,
 					int *max_current)
 {
 	struct led_classdev *led_cdev;
@@ -1430,6 +1436,22 @@
 	return atomic_notifier_chain_unregister(&irq_notifier_list, nb);
 }
 
+static inline u8 get_safety_timer_code(u32 duration_ms)
+{
+	if (!duration_ms)
+		return 0;
+
+	return (duration_ms / 10) - 1;
+}
+
+static inline u8 get_vph_droop_thresh_code(u32 val_mv)
+{
+	if (!val_mv)
+		return 0;
+
+	return (val_mv / 100) - 25;
+}
+
 static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
 			struct flash_node_data *fnode, struct device_node *node)
 {
@@ -1521,8 +1543,9 @@
 	fnode->duration = FLASH_LED_SAFETY_TMR_DISABLED;
 	rc = of_property_read_u32(node, "qcom,duration-ms", &val);
 	if (!rc) {
-		fnode->duration = (u8)(SAFETY_TMR_TO_REG_VAL(val) |
-					FLASH_LED_SAFETY_TMR_ENABLE);
+		fnode->duration = get_safety_timer_code(val);
+		if (fnode->duration)
+			fnode->duration |= FLASH_LED_SAFETY_TMR_ENABLE;
 	} else if (rc == -EINVAL) {
 		if (fnode->type == FLASH_LED_TYPE_FLASH) {
 			pr_err("Timer duration is required for flash LED\n");
@@ -1968,7 +1991,7 @@
 	rc = of_property_read_u32(node, "qcom,vph-droop-threshold-mv", &val);
 	if (!rc) {
 		led->pdata->vph_droop_threshold =
-			VPH_DROOP_THRESH_MV_TO_VAL(val);
+			get_vph_droop_thresh_code(val);
 	} else if (rc != -EINVAL) {
 		pr_err("Unable to read VPH droop threshold, rc=%d\n", rc);
 		return rc;
@@ -2194,6 +2217,7 @@
 	if (!led->pdata)
 		return -ENOMEM;
 
+	qpnp_flash_led_prepare = qpnp_flash_led_prepare_v2;
 	rc = qpnp_flash_led_parse_common_dt(led, node);
 	if (rc < 0) {
 		pr_err("Failed to parse common flash LED device tree\n");
diff --git a/drivers/leds/leds-qpnp-flash.c b/drivers/leds/leds-qpnp-flash.c
index 3b07af8..ce2b055 100644
--- a/drivers/leds/leds-qpnp-flash.c
+++ b/drivers/leds/leds-qpnp-flash.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1207,7 +1207,7 @@
 	return rc;
 }
 
-int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
+static int qpnp_flash_led_prepare_v1(struct led_trigger *trig, int options,
 					int *max_current)
 {
 	struct led_classdev *led_cdev = trigger_to_lcdev(trig);
@@ -2468,6 +2468,7 @@
 	led->pdev = pdev;
 	led->current_addr = FLASH_LED0_CURRENT(led->base);
 	led->current2_addr = FLASH_LED1_CURRENT(led->base);
+	qpnp_flash_led_prepare = qpnp_flash_led_prepare_v1;
 
 	led->pdata = devm_kzalloc(&pdev->dev, sizeof(*led->pdata), GFP_KERNEL);
 	if (!led->pdata)
diff --git a/drivers/leds/leds-qpnp-haptics.c b/drivers/leds/leds-qpnp-haptics.c
index 1eaa652..ebdff87 100644
--- a/drivers/leds/leds-qpnp-haptics.c
+++ b/drivers/leds/leds-qpnp-haptics.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation.
+ * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1434,8 +1435,7 @@
 		time_us = ktime_to_us(time_rem);
 	}
 
-	return snprintf(buf, PAGE_SIZE, "%lld\n", time_us / 1000);
-	return 0;
+	return snprintf(buf, PAGE_SIZE, "%lld\n", div_s64(time_us, 1000));
 }
 
 static ssize_t qpnp_haptics_store_duration(struct device *dev,
diff --git a/drivers/leds/leds-qpnp-vibrator-ldo.c b/drivers/leds/leds-qpnp-vibrator-ldo.c
new file mode 100644
index 0000000..6a14324
--- /dev/null
+++ b/drivers/leds/leds-qpnp-vibrator-ldo.c
@@ -0,0 +1,550 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/hrtimer.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/workqueue.h>
+
+/* Vibrator-LDO register definitions */
+#define QPNP_VIB_LDO_REG_STATUS1	0x08
+#define QPNP_VIB_LDO_VREG_READY		BIT(7)
+
+#define QPNP_VIB_LDO_REG_VSET_LB	0x40
+
+#define QPNP_VIB_LDO_REG_EN_CTL		0x46
+#define QPNP_VIB_LDO_EN			BIT(7)
+
+/* Vibrator-LDO voltage settings */
+#define QPNP_VIB_LDO_VMIN_UV		1504000
+#define QPNP_VIB_LDO_VMAX_UV		3544000
+#define QPNP_VIB_LDO_VOLT_STEP_UV	8000
+
+/*
+ * Define vibration periods: default(5sec), min(50ms), max(15sec) and
+ * overdrive(30ms).
+ */
+#define QPNP_VIB_MIN_PLAY_MS		50
+#define QPNP_VIB_PLAY_MS		5000
+#define QPNP_VIB_MAX_PLAY_MS		15000
+#define QPNP_VIB_OVERDRIVE_PLAY_MS	30
+
+struct vib_ldo_chip {
+	struct led_classdev	cdev;
+	struct regmap		*regmap;
+	struct mutex		lock;
+	struct hrtimer		stop_timer;
+	struct hrtimer		overdrive_timer;
+	struct work_struct	vib_work;
+	struct work_struct	overdrive_work;
+
+	u16			base;
+	int			vmax_uV;
+	int			overdrive_volt_uV;
+	int			ldo_uV;
+	int			state;
+	u64			vib_play_ms;
+	bool			vib_enabled;
+	bool			disable_overdrive;
+};
+
+static int qpnp_vib_ldo_set_voltage(struct vib_ldo_chip *chip, int new_uV)
+{
+	unsigned int val;
+	u32 vlevel;
+	u8 reg[2];
+	int ret;
+
+	if (chip->ldo_uV == new_uV)
+		return 0;
+
+	vlevel = roundup(new_uV, QPNP_VIB_LDO_VOLT_STEP_UV) / 1000;
+	reg[0] = vlevel & 0xff;
+	reg[1] = (vlevel & 0xff00) >> 8;
+	ret = regmap_bulk_write(chip->regmap,
+				chip->base + QPNP_VIB_LDO_REG_VSET_LB, reg, 2);
+	if (ret < 0) {
+		pr_err("regmap write failed, ret=%d\n", ret);
+		return ret;
+	}
+
+	if (chip->vib_enabled) {
+		ret = regmap_read_poll_timeout(chip->regmap,
+					chip->base + QPNP_VIB_LDO_REG_STATUS1,
+					val, val & QPNP_VIB_LDO_VREG_READY,
+					100, 1000);
+		if (ret < 0) {
+			pr_err("Vibrator LDO vreg_ready timeout, status=0x%02x, ret=%d\n",
+				val, ret);
+			return ret;
+		}
+	}
+
+	chip->ldo_uV = new_uV;
+	return ret;
+}
+
+static inline int qpnp_vib_ldo_enable(struct vib_ldo_chip *chip, bool enable)
+{
+	unsigned int val;
+	int ret;
+
+	if (chip->vib_enabled == enable)
+		return 0;
+
+	ret = regmap_update_bits(chip->regmap,
+				chip->base + QPNP_VIB_LDO_REG_EN_CTL,
+				QPNP_VIB_LDO_EN,
+				enable ? QPNP_VIB_LDO_EN : 0);
+	if (ret < 0) {
+		pr_err("Program Vibrator LDO %s is failed, ret=%d\n",
+			enable ? "enable" : "disable", ret);
+		return ret;
+	}
+
+	if (enable) {
+		ret = regmap_read_poll_timeout(chip->regmap,
+					chip->base + QPNP_VIB_LDO_REG_STATUS1,
+					val, val & QPNP_VIB_LDO_VREG_READY,
+					100, 1000);
+		if (ret < 0) {
+			pr_err("Vibrator LDO vreg_ready timeout, status=0x%02x, ret=%d\n",
+				val, ret);
+			return ret;
+		}
+	}
+
+	chip->vib_enabled = enable;
+
+	return ret;
+}
+
+static int qpnp_vibrator_play_on(struct vib_ldo_chip *chip)
+{
+	int volt_uV;
+	int ret;
+
+	volt_uV = chip->vmax_uV;
+	if (!chip->disable_overdrive)
+		volt_uV = chip->overdrive_volt_uV ? chip->overdrive_volt_uV
+				: min(chip->vmax_uV * 2, QPNP_VIB_LDO_VMAX_UV);
+
+	ret = qpnp_vib_ldo_set_voltage(chip, volt_uV);
+	if (ret < 0) {
+		pr_err("set voltage = %duV failed, ret=%d\n", volt_uV, ret);
+		return ret;
+	}
+	pr_debug("voltage set to %d uV\n", volt_uV);
+
+	ret = qpnp_vib_ldo_enable(chip, true);
+	if (ret < 0) {
+		pr_err("vibration enable failed, ret=%d\n", ret);
+		return ret;
+	}
+
+	if (!chip->disable_overdrive)
+		hrtimer_start(&chip->overdrive_timer,
+			ms_to_ktime(QPNP_VIB_OVERDRIVE_PLAY_MS),
+			HRTIMER_MODE_REL);
+
+	return ret;
+}
+
+static void qpnp_vib_work(struct work_struct *work)
+{
+	struct vib_ldo_chip *chip = container_of(work, struct vib_ldo_chip,
+						vib_work);
+	int ret = 0;
+
+	if (chip->state) {
+		if (!chip->vib_enabled)
+			ret = qpnp_vibrator_play_on(chip);
+
+		if (ret == 0)
+			hrtimer_start(&chip->stop_timer,
+				      ms_to_ktime(chip->vib_play_ms),
+				      HRTIMER_MODE_REL);
+	} else {
+		if (!chip->disable_overdrive) {
+			hrtimer_cancel(&chip->overdrive_timer);
+			cancel_work_sync(&chip->overdrive_work);
+		}
+		qpnp_vib_ldo_enable(chip, false);
+	}
+}
+
+static enum hrtimer_restart vib_stop_timer(struct hrtimer *timer)
+{
+	struct vib_ldo_chip *chip = container_of(timer, struct vib_ldo_chip,
+					     stop_timer);
+
+	chip->state = 0;
+	schedule_work(&chip->vib_work);
+	return HRTIMER_NORESTART;
+}
+
+static void qpnp_vib_overdrive_work(struct work_struct *work)
+{
+	struct vib_ldo_chip *chip = container_of(work, struct vib_ldo_chip,
+					     overdrive_work);
+	int ret;
+
+	mutex_lock(&chip->lock);
+
+	/* LDO voltage update not required if Vibration disabled */
+	if (!chip->vib_enabled)
+		goto unlock;
+
+	ret = qpnp_vib_ldo_set_voltage(chip, chip->vmax_uV);
+	if (ret < 0) {
+		pr_err("set vibration voltage = %duV failed, ret=%d\n",
+			chip->vmax_uV, ret);
+		qpnp_vib_ldo_enable(chip, false);
+		goto unlock;
+	}
+	pr_debug("voltage set to %d\n", chip->vmax_uV);
+
+unlock:
+	mutex_unlock(&chip->lock);
+}
+
+static enum hrtimer_restart vib_overdrive_timer(struct hrtimer *timer)
+{
+	struct vib_ldo_chip *chip = container_of(timer, struct vib_ldo_chip,
+					     overdrive_timer);
+	schedule_work(&chip->overdrive_work);
+	pr_debug("overdrive timer expired\n");
+	return HRTIMER_NORESTART;
+}
+
+static ssize_t qpnp_vib_show_state(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct vib_ldo_chip *chip = container_of(cdev, struct vib_ldo_chip,
+						cdev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", chip->vib_enabled);
+}
+
+static ssize_t qpnp_vib_store_state(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	/* At present, nothing to do with setting state */
+	return count;
+}
+
+static ssize_t qpnp_vib_show_duration(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct vib_ldo_chip *chip = container_of(cdev, struct vib_ldo_chip,
+						cdev);
+	ktime_t time_rem;
+	s64 time_ms = 0;
+
+	if (hrtimer_active(&chip->stop_timer)) {
+		time_rem = hrtimer_get_remaining(&chip->stop_timer);
+		time_ms = ktime_to_ms(time_rem);
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%lld\n", time_ms);
+}
+
+static ssize_t qpnp_vib_store_duration(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct vib_ldo_chip *chip = container_of(cdev, struct vib_ldo_chip,
+						cdev);
+	u32 val;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	/* setting 0 on duration is NOP for now */
+	if (val <= 0)
+		return count;
+
+	if (val < QPNP_VIB_MIN_PLAY_MS)
+		val = QPNP_VIB_MIN_PLAY_MS;
+
+	if (val > QPNP_VIB_MAX_PLAY_MS)
+		val = QPNP_VIB_MAX_PLAY_MS;
+
+	mutex_lock(&chip->lock);
+	chip->vib_play_ms = val;
+	mutex_unlock(&chip->lock);
+
+	return count;
+}
+
+static ssize_t qpnp_vib_show_activate(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	/* For now nothing to show */
+	return snprintf(buf, PAGE_SIZE, "%d\n", 0);
+}
+
+static ssize_t qpnp_vib_store_activate(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct vib_ldo_chip *chip = container_of(cdev, struct vib_ldo_chip,
+						cdev);
+	u32 val;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (val != 0 && val != 1)
+		return count;
+
+	mutex_lock(&chip->lock);
+	hrtimer_cancel(&chip->stop_timer);
+	chip->state = val;
+	pr_debug("state = %d, time = %llums\n", chip->state, chip->vib_play_ms);
+	mutex_unlock(&chip->lock);
+	schedule_work(&chip->vib_work);
+
+	return count;
+}
+
+static ssize_t qpnp_vib_show_vmax(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct vib_ldo_chip *chip = container_of(cdev, struct vib_ldo_chip,
+						cdev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", chip->vmax_uV / 1000);
+}
+
+static ssize_t qpnp_vib_store_vmax(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct vib_ldo_chip *chip = container_of(cdev, struct vib_ldo_chip,
+						cdev);
+	int data, ret;
+
+	ret = kstrtoint(buf, 10, &data);
+	if (ret < 0)
+		return ret;
+
+	data = data * 1000; /* Convert to microvolts */
+
+	/* check against vibrator ldo min/max voltage limits */
+	data = min(data, QPNP_VIB_LDO_VMAX_UV);
+	data = max(data, QPNP_VIB_LDO_VMIN_UV);
+
+	mutex_lock(&chip->lock);
+	chip->vmax_uV = data;
+	mutex_unlock(&chip->lock);
+	return ret;
+}
+
+static struct device_attribute qpnp_vib_attrs[] = {
+	__ATTR(state, 0664, qpnp_vib_show_state, qpnp_vib_store_state),
+	__ATTR(duration, 0664, qpnp_vib_show_duration, qpnp_vib_store_duration),
+	__ATTR(activate, 0664, qpnp_vib_show_activate, qpnp_vib_store_activate),
+	__ATTR(vmax_mv, 0664, qpnp_vib_show_vmax, qpnp_vib_store_vmax),
+};
+
+static int qpnp_vib_parse_dt(struct device *dev, struct vib_ldo_chip *chip)
+{
+	int ret;
+
+	ret = of_property_read_u32(dev->of_node, "qcom,vib-ldo-volt-uv",
+				&chip->vmax_uV);
+	if (ret < 0) {
+		pr_err("qcom,vib-ldo-volt-uv property read failed, ret=%d\n",
+			ret);
+		return ret;
+	}
+
+	chip->disable_overdrive = of_property_read_bool(dev->of_node,
+					"qcom,disable-overdrive");
+
+	if (of_find_property(dev->of_node, "qcom,vib-overdrive-volt-uv",
+			     NULL)) {
+		ret = of_property_read_u32(dev->of_node,
+					   "qcom,vib-overdrive-volt-uv",
+					   &chip->overdrive_volt_uV);
+		if (ret < 0) {
+			pr_err("qcom,vib-overdrive-volt-uv property read failed, ret=%d\n",
+				ret);
+			return ret;
+		}
+
+		/* check against vibrator ldo min/max voltage limits */
+		chip->overdrive_volt_uV = min(chip->overdrive_volt_uV,
+						QPNP_VIB_LDO_VMAX_UV);
+		chip->overdrive_volt_uV = max(chip->overdrive_volt_uV,
+						QPNP_VIB_LDO_VMIN_UV);
+	}
+
+	return ret;
+}
+
+/* Dummy functions for brightness */
+static enum led_brightness qpnp_vib_brightness_get(struct led_classdev *cdev)
+{
+	return 0;
+}
+
+static void qpnp_vib_brightness_set(struct led_classdev *cdev,
+			enum led_brightness level)
+{
+}
+
+static int qpnp_vibrator_ldo_suspend(struct device *dev)
+{
+	struct vib_ldo_chip *chip = dev_get_drvdata(dev);
+
+	mutex_lock(&chip->lock);
+	if (!chip->disable_overdrive) {
+		hrtimer_cancel(&chip->overdrive_timer);
+		cancel_work_sync(&chip->overdrive_work);
+	}
+	hrtimer_cancel(&chip->stop_timer);
+	cancel_work_sync(&chip->vib_work);
+	mutex_unlock(&chip->lock);
+
+	return 0;
+}
+static SIMPLE_DEV_PM_OPS(qpnp_vibrator_ldo_pm_ops, qpnp_vibrator_ldo_suspend,
+			NULL);
+
+static int qpnp_vibrator_ldo_probe(struct platform_device *pdev)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	struct vib_ldo_chip *chip;
+	int i, ret;
+	u32 base;
+
+	ret = of_property_read_u32(of_node, "reg", &base);
+	if (ret < 0) {
+		pr_err("reg property reading failed, ret=%d\n", ret);
+		return ret;
+	}
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!chip->regmap) {
+		pr_err("couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	ret = qpnp_vib_parse_dt(&pdev->dev, chip);
+	if (ret < 0) {
+		pr_err("couldn't parse device tree, ret=%d\n", ret);
+		return ret;
+	}
+
+	chip->base = (uint16_t)base;
+	chip->vib_play_ms = QPNP_VIB_PLAY_MS;
+	mutex_init(&chip->lock);
+	INIT_WORK(&chip->vib_work, qpnp_vib_work);
+	INIT_WORK(&chip->overdrive_work, qpnp_vib_overdrive_work);
+
+	hrtimer_init(&chip->stop_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	chip->stop_timer.function = vib_stop_timer;
+	hrtimer_init(&chip->overdrive_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	chip->overdrive_timer.function = vib_overdrive_timer;
+	dev_set_drvdata(&pdev->dev, chip);
+
+	chip->cdev.name = "vibrator";
+	chip->cdev.brightness_get = qpnp_vib_brightness_get;
+	chip->cdev.brightness_set = qpnp_vib_brightness_set;
+	chip->cdev.max_brightness = 100;
+	ret = devm_led_classdev_register(&pdev->dev, &chip->cdev);
+	if (ret < 0) {
+		pr_err("Error in registering led class device, ret=%d\n", ret);
+		goto fail;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(qpnp_vib_attrs); i++) {
+		ret = sysfs_create_file(&chip->cdev.dev->kobj,
+				&qpnp_vib_attrs[i].attr);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "Error in creating sysfs file, ret=%d\n",
+				ret);
+			goto sysfs_fail;
+		}
+	}
+
+	pr_info("Vibrator LDO successfully registered: uV = %d, overdrive = %s\n",
+		chip->vmax_uV,
+		chip->disable_overdrive ? "disabled" : "enabled");
+	return 0;
+
+sysfs_fail:
+	for (--i; i >= 0; i--)
+		sysfs_remove_file(&chip->cdev.dev->kobj,
+				&qpnp_vib_attrs[i].attr);
+fail:
+	mutex_destroy(&chip->lock);
+	dev_set_drvdata(&pdev->dev, NULL);
+	return ret;
+}
+
+static int qpnp_vibrator_ldo_remove(struct platform_device *pdev)
+{
+	struct vib_ldo_chip *chip = dev_get_drvdata(&pdev->dev);
+
+	if (!chip->disable_overdrive) {
+		hrtimer_cancel(&chip->overdrive_timer);
+		cancel_work_sync(&chip->overdrive_work);
+	}
+	hrtimer_cancel(&chip->stop_timer);
+	cancel_work_sync(&chip->vib_work);
+	mutex_destroy(&chip->lock);
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	return 0;
+}
+
+static const struct of_device_id vibrator_ldo_match_table[] = {
+	{ .compatible = "qcom,qpnp-vibrator-ldo" },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, vibrator_ldo_match_table);
+
+static struct platform_driver qpnp_vibrator_ldo_driver = {
+	.driver	= {
+		.name		= "qcom,qpnp-vibrator-ldo",
+		.of_match_table	= vibrator_ldo_match_table,
+		.pm		= &qpnp_vibrator_ldo_pm_ops,
+	},
+	.probe	= qpnp_vibrator_ldo_probe,
+	.remove	= qpnp_vibrator_ldo_remove,
+};
+module_platform_driver(qpnp_vibrator_ldo_driver);
+
+MODULE_DESCRIPTION("QCOM QPNP Vibrator-LDO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
index 29e09c9..d2e576d 100644
--- a/drivers/leds/leds-qpnp-wled.c
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -210,6 +210,7 @@
 #define QPNP_WLED_SEC_ACCESS_REG(b)    (b + 0xD0)
 #define QPNP_WLED_SEC_UNLOCK           0xA5
 
+#define NUM_DDIC_CODES			256
 #define QPNP_WLED_MAX_STRINGS		4
 #define QPNP_PM660_WLED_MAX_STRINGS	3
 #define WLED_MAX_LEVEL_4095		4095
@@ -315,6 +316,7 @@
  *  @ cdev - led class device
  *  @ pdev - platform device
  *  @ work - worker for led operation
+ *  @ wq - workqueue for setting brightness level
  *  @ lock - mutex lock for exclusive access
  *  @ fdbk_op - output feedback mode
  *  @ dim_mode - dimming mode
@@ -340,6 +342,10 @@
  *  @ ramp_ms - delay between ramp steps in ms
  *  @ ramp_step - ramp step size
  *  @ cons_sync_write_delay_us - delay between two consecutive writes to SYNC
+ *  @ auto_calibration_ovp_count - OVP fault irq count to run auto calibration
+ *  @ max_strings - Number of strings supported in WLED peripheral
+ *  @ prev_level - Previous brightness level
+ *  @ brt_map_table - Brightness map table
  *  @ strings - supported list of strings
  *  @ num_strings - number of strings
  *  @ loop_auto_gm_thresh - the clamping level for auto gm
@@ -353,6 +359,13 @@
  *  @ en_cabc - enable or disable cabc
  *  @ disp_type_amoled - type of display: LCD/AMOLED
  *  @ en_ext_pfet_sc_pro - enable sc protection on external pfet
+ *  @ prev_state - previous state of WLED
+ *  @ stepper_en - Flag to enable stepper algorithm
+ *  @ ovp_irq_disabled - OVP interrupt disable status
+ *  @ auto_calib_enabled - Flag to enable auto calibration feature
+ *  @ auto_calib_done - Flag to indicate auto calibration is done
+ *  @ module_dis_perm - Flat to keep module permanently disabled
+ *  @ start_ovp_fault_time - Time when the OVP fault first occurred
  */
 struct qpnp_wled {
 	struct led_classdev	cdev;
@@ -360,6 +373,7 @@
 	struct regmap		*regmap;
 	struct pmic_revid_data	*pmic_rev_id;
 	struct work_struct	work;
+	struct workqueue_struct *wq;
 	struct mutex		lock;
 	struct mutex		bus_lock;
 	enum qpnp_wled_fdbk_op	fdbk_op;
@@ -388,6 +402,8 @@
 	u16			cons_sync_write_delay_us;
 	u16			auto_calibration_ovp_count;
 	u16			max_strings;
+	u16			prev_level;
+	u16			*brt_map_table;
 	u8			strings[QPNP_WLED_MAX_STRINGS];
 	u8			num_strings;
 	u8			loop_auto_gm_thresh;
@@ -402,6 +418,7 @@
 	bool			disp_type_amoled;
 	bool			en_ext_pfet_sc_pro;
 	bool			prev_state;
+	bool			stepper_en;
 	bool			ovp_irq_disabled;
 	bool			auto_calib_enabled;
 	bool			auto_calib_done;
@@ -409,6 +426,21 @@
 	ktime_t			start_ovp_fault_time;
 };
 
+static int qpnp_wled_step_delay_us = 52000;
+module_param_named(
+	total_step_delay_us, qpnp_wled_step_delay_us, int, 0600
+);
+
+static int qpnp_wled_step_size_threshold = 3;
+module_param_named(
+	step_size_threshold, qpnp_wled_step_size_threshold, int, 0600
+);
+
+static int qpnp_wled_step_delay_gain = 2;
+module_param_named(
+	step_delay_gain, qpnp_wled_step_delay_gain, int, 0600
+);
+
 /* helper to read a pmic register */
 static int qpnp_wled_read_reg(struct qpnp_wled *wled, u16 addr, u8 *data)
 {
@@ -570,6 +602,93 @@
 		return rc;
 	}
 
+	pr_debug("level:%d\n", level);
+	return 0;
+}
+
+static int qpnp_wled_set_map_level(struct qpnp_wled *wled, int level)
+{
+	int rc, i;
+
+	if (level < wled->prev_level) {
+		for (i = wled->prev_level; i >= level; i--) {
+			rc = qpnp_wled_set_level(wled, wled->brt_map_table[i]);
+			if (rc < 0) {
+				pr_err("set brightness level failed, rc:%d\n",
+					rc);
+				return rc;
+			}
+		}
+	} else if (level > wled->prev_level) {
+		for (i = wled->prev_level; i <= level; i++) {
+			rc = qpnp_wled_set_level(wled, wled->brt_map_table[i]);
+			if (rc < 0) {
+				pr_err("set brightness level failed, rc:%d\n",
+					rc);
+				return rc;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int qpnp_wled_set_step_level(struct qpnp_wled *wled, int new_level)
+{
+	int rc, i, num_steps, delay_us;
+	u16 level, start_level, end_level, step_size;
+	bool level_inc = false;
+
+	level = wled->prev_level;
+	start_level = wled->brt_map_table[level];
+	end_level = wled->brt_map_table[new_level];
+	level_inc = (new_level > level);
+
+	num_steps = abs(start_level - end_level);
+	if (!num_steps)
+		return 0;
+
+	delay_us = qpnp_wled_step_delay_us / num_steps;
+	pr_debug("level goes from [%d %d] num_steps: %d, delay: %d\n",
+		start_level, end_level, num_steps, delay_us);
+
+	if (delay_us < 500) {
+		step_size = 1000 / delay_us;
+		num_steps = num_steps / step_size;
+		delay_us = 1000;
+	} else {
+		if (num_steps < qpnp_wled_step_size_threshold)
+			delay_us *= qpnp_wled_step_delay_gain;
+
+		step_size = 1;
+	}
+
+	i = start_level;
+	while (num_steps--) {
+		if (level_inc)
+			i += step_size;
+		else
+			i -= step_size;
+
+		rc = qpnp_wled_set_level(wled, i);
+		if (rc < 0)
+			return rc;
+
+		if (delay_us > 0) {
+			if (delay_us < 20000)
+				usleep_range(delay_us, delay_us + 1);
+			else
+				msleep(delay_us / USEC_PER_MSEC);
+		}
+	}
+
+	if (i != end_level) {
+		i = end_level;
+		rc = qpnp_wled_set_level(wled, i);
+		if (rc < 0)
+			return rc;
+	}
+
 	return 0;
 }
 
@@ -942,15 +1061,33 @@
 static void qpnp_wled_work(struct work_struct *work)
 {
 	struct qpnp_wled *wled;
-	int level, rc;
+	int level, level_255, rc;
 
 	wled = container_of(work, struct qpnp_wled, work);
 
+	mutex_lock(&wled->lock);
 	level = wled->cdev.brightness;
 
-	mutex_lock(&wled->lock);
+	if (wled->brt_map_table) {
+		/*
+		 * Change the 12 bit level to 8 bit level and use the mapped
+		 * values for 12 bit level from brightness map table.
+		 */
+		level_255 = DIV_ROUND_CLOSEST(level, 16);
+		if (level_255 > 255)
+			level_255 = 255;
 
-	if (level) {
+		pr_debug("level: %d level_255: %d\n", level, level_255);
+		if (wled->stepper_en)
+			rc = qpnp_wled_set_step_level(wled, level_255);
+		else
+			rc = qpnp_wled_set_map_level(wled, level_255);
+		if (rc) {
+			dev_err(&wled->pdev->dev, "wled set level failed\n");
+			goto unlock_mutex;
+		}
+		wled->prev_level = level_255;
+	} else if (level) {
 		rc = qpnp_wled_set_level(wled, level);
 		if (rc) {
 			dev_err(&wled->pdev->dev, "wled set level failed\n");
@@ -1009,7 +1146,7 @@
 		level = wled->cdev.max_brightness;
 
 	wled->cdev.brightness = level;
-	schedule_work(&wled->work);
+	queue_work(wled->wq, &wled->work);
 }
 
 static int qpnp_wled_set_disp(struct qpnp_wled *wled, u16 base_addr)
@@ -2115,7 +2252,7 @@
 	struct property *prop;
 	const char *temp_str;
 	u32 temp_val;
-	int rc, i;
+	int rc, i, size;
 	u8 *strings;
 
 	wled->cdev.name = "wled";
@@ -2134,6 +2271,45 @@
 		return rc;
 	}
 
+	if (of_find_property(pdev->dev.of_node, "qcom,wled-brightness-map",
+			NULL)) {
+		size = of_property_count_elems_of_size(pdev->dev.of_node,
+				"qcom,wled-brightness-map", sizeof(u16));
+		if (size != NUM_DDIC_CODES) {
+			pr_err("Invalid WLED brightness map size:%d\n", size);
+			return rc;
+		}
+
+		wled->brt_map_table = devm_kcalloc(&pdev->dev, NUM_DDIC_CODES,
+						sizeof(u16), GFP_KERNEL);
+		if (!wled->brt_map_table)
+			return -ENOMEM;
+
+		rc = of_property_read_u16_array(pdev->dev.of_node,
+			"qcom,wled-brightness-map", wled->brt_map_table,
+			NUM_DDIC_CODES);
+		if (rc < 0) {
+			pr_err("Error in reading WLED brightness map, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		for (i = 0; i < NUM_DDIC_CODES; i++) {
+			if (wled->brt_map_table[i] > WLED_MAX_LEVEL_4095) {
+				pr_err("WLED brightness map not in range\n");
+				return -EDOM;
+			}
+
+			if ((i > 1) && wled->brt_map_table[i]
+						< wled->brt_map_table[i - 1]) {
+				pr_err("WLED brightness map not in ascending order?\n");
+				return -EDOM;
+			}
+		}
+	}
+
+	wled->stepper_en = of_property_read_bool(pdev->dev.of_node,
+				"qcom,wled-stepper-en");
 	wled->disp_type_amoled = of_property_read_bool(pdev->dev.of_node,
 				"qcom,disp-type-amoled");
 	if (wled->disp_type_amoled) {
@@ -2470,6 +2646,7 @@
 	}
 
 	wled->pmic_rev_id = get_revid_data(revid_node);
+	of_node_put(revid_node);
 	if (IS_ERR_OR_NULL(wled->pmic_rev_id)) {
 		pr_err("Unable to get pmic_revid rc=%ld\n",
 			PTR_ERR(wled->pmic_rev_id));
@@ -2484,6 +2661,12 @@
 	pr_debug("PMIC subtype %d Digital major %d\n",
 		wled->pmic_rev_id->pmic_subtype, wled->pmic_rev_id->rev4);
 
+	wled->wq = alloc_ordered_workqueue("qpnp_wled_wq", WQ_HIGHPRI);
+	if (!wled->wq) {
+		pr_err("Unable to alloc workqueue for WLED\n");
+		return -ENOMEM;
+	}
+
 	prop = of_get_address_by_name(pdev->dev.of_node, QPNP_WLED_SINK_BASE,
 			NULL, NULL);
 	if (!prop) {
@@ -2549,6 +2732,7 @@
 	led_classdev_unregister(&wled->cdev);
 wled_register_fail:
 	cancel_work_sync(&wled->work);
+	destroy_workqueue(wled->wq);
 	mutex_destroy(&wled->lock);
 	return rc;
 }
@@ -2564,6 +2748,7 @@
 
 	led_classdev_unregister(&wled->cdev);
 	cancel_work_sync(&wled->work);
+	destroy_workqueue(wled->wq);
 	mutex_destroy(&wled->lock);
 
 	return 0;
diff --git a/drivers/leds/leds-qti-tri-led.c b/drivers/leds/leds-qti-tri-led.c
new file mode 100644
index 0000000..ab5e876
--- /dev/null
+++ b/drivers/leds/leds-qti-tri-led.c
@@ -0,0 +1,512 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#define TRILED_REG_TYPE			0x04
+#define TRILED_REG_SUBTYPE		0x05
+#define TRILED_REG_EN_CTL		0x46
+
+/* TRILED_REG_EN_CTL */
+#define TRILED_EN_CTL_MASK		GENMASK(7, 5)
+#define TRILED_EN_CTL_MAX_BIT		7
+
+#define TRILED_TYPE			0x19
+#define TRILED_SUBTYPE_LED3H0L12	0x02
+#define TRILED_SUBTYPE_LED2H0L12	0x03
+#define TRILED_SUBTYPE_LED1H2L12	0x04
+
+#define TRILED_NUM_MAX			3
+
+#define PWM_PERIOD_DEFAULT_NS		1000000
+#define LED_BLINK_ON_MS			125
+#define LED_BLINK_OFF_MS		875
+
+struct pwm_setting {
+	u32	pre_period_ns;
+	u32	period_ns;
+	u32	duty_ns;
+};
+
+struct led_setting {
+	u32			on_ms;
+	u32			off_ms;
+	enum led_brightness	brightness;
+	bool			blink;
+};
+
+struct qpnp_led_dev {
+	struct led_classdev	cdev;
+	struct pwm_device	*pwm_dev;
+	struct pwm_setting	pwm_setting;
+	struct led_setting	led_setting;
+	struct qpnp_tri_led_chip	*chip;
+	struct mutex		lock;
+	const char		*label;
+	const char		*default_trigger;
+	u8			id;
+	bool			blinking;
+};
+
+struct qpnp_tri_led_chip {
+	struct device		*dev;
+	struct regmap		*regmap;
+	struct qpnp_led_dev	*leds;
+	struct mutex		bus_lock;
+	int			num_leds;
+	u16			reg_base;
+	u8			subtype;
+};
+
+static int qpnp_tri_led_read(struct qpnp_tri_led_chip *chip, u16 addr, u8 *val)
+{
+	int rc;
+	unsigned int tmp;
+
+	mutex_lock(&chip->bus_lock);
+	rc = regmap_read(chip->regmap, chip->reg_base + addr, &tmp);
+	if (rc < 0)
+		dev_err(chip->dev, "Read addr 0x%x failed, rc=%d\n", addr, rc);
+	else
+		*val = (u8)tmp;
+	mutex_unlock(&chip->bus_lock);
+
+	return rc;
+}
+
+static int qpnp_tri_led_masked_write(struct qpnp_tri_led_chip *chip,
+				u16 addr, u8 mask, u8 val)
+{
+	int rc;
+
+	mutex_lock(&chip->bus_lock);
+	rc = regmap_update_bits(chip->regmap, chip->reg_base + addr, mask, val);
+	if (rc < 0)
+		dev_err(chip->dev, "Update addr 0x%x to val 0x%x with mask 0x%x failed, rc=%d\n",
+					addr, val, mask, rc);
+	mutex_unlock(&chip->bus_lock);
+
+	return rc;
+}
+
+static int __tri_led_config_pwm(struct qpnp_led_dev *led,
+				struct pwm_setting *pwm)
+{
+	struct pwm_state pstate;
+	int rc;
+
+	pwm_get_state(led->pwm_dev, &pstate);
+	pstate.enabled = !!(pwm->duty_ns != 0);
+	pstate.period = pwm->period_ns;
+	pstate.duty_cycle = pwm->duty_ns;
+	rc = pwm_apply_state(led->pwm_dev, &pstate);
+
+	if (rc < 0)
+		dev_err(led->chip->dev, "Apply PWM state for %s led failed, rc=%d\n",
+					led->cdev.name, rc);
+
+	return rc;
+}
+
+static int __tri_led_set(struct qpnp_led_dev *led)
+{
+	int rc = 0;
+	u8 val = 0, mask = 0;
+
+	rc = __tri_led_config_pwm(led, &led->pwm_setting);
+	if (rc < 0) {
+		dev_err(led->chip->dev, "Configure PWM for %s led failed, rc=%d\n",
+					led->cdev.name, rc);
+		return rc;
+	}
+
+	mask |= 1 << (TRILED_EN_CTL_MAX_BIT - led->id);
+
+	if (led->pwm_setting.duty_ns == 0)
+		val = 0;
+	else
+		val = mask;
+
+	rc = qpnp_tri_led_masked_write(led->chip, TRILED_REG_EN_CTL,
+							mask, val);
+	if (rc < 0)
+		dev_err(led->chip->dev, "Update addr 0x%x failed, rc=%d\n",
+					TRILED_REG_EN_CTL, rc);
+
+	return rc;
+}
+
+static int qpnp_tri_led_set(struct qpnp_led_dev *led)
+{
+	u32 on_ms, off_ms, period_ns, duty_ns;
+	enum led_brightness brightness = led->led_setting.brightness;
+	int rc = 0;
+
+	if (led->led_setting.blink) {
+		on_ms = led->led_setting.on_ms;
+		off_ms = led->led_setting.off_ms;
+		if (on_ms > INT_MAX / NSEC_PER_MSEC)
+			duty_ns = INT_MAX - 1;
+		else
+			duty_ns = on_ms * NSEC_PER_MSEC;
+
+		if (on_ms + off_ms > INT_MAX / NSEC_PER_MSEC) {
+			period_ns = INT_MAX;
+			duty_ns = (period_ns / (on_ms + off_ms)) * on_ms;
+		} else {
+			period_ns = (on_ms + off_ms) * NSEC_PER_MSEC;
+		}
+
+		if (period_ns < duty_ns && duty_ns != 0)
+			period_ns = duty_ns + 1;
+	} else {
+		/* Use initial period if no blinking is required */
+		period_ns = led->pwm_setting.pre_period_ns;
+
+		if (period_ns > INT_MAX / brightness)
+			duty_ns = (period_ns / LED_FULL) * brightness;
+		else
+			duty_ns = (period_ns * brightness) / LED_FULL;
+
+		if (period_ns < duty_ns && duty_ns != 0)
+			period_ns = duty_ns + 1;
+	}
+	dev_dbg(led->chip->dev, "PWM settings for %s led: period = %dns, duty = %dns\n",
+				led->cdev.name, period_ns, duty_ns);
+
+	led->pwm_setting.duty_ns = duty_ns;
+	led->pwm_setting.period_ns = period_ns;
+
+	rc = __tri_led_set(led);
+	if (rc < 0) {
+		dev_err(led->chip->dev, "__tri_led_set %s failed, rc=%d\n",
+				led->cdev.name, rc);
+		return rc;
+	}
+
+	if (led->led_setting.blink) {
+		led->cdev.brightness = LED_FULL;
+		led->blinking = true;
+	} else {
+		led->cdev.brightness = led->led_setting.brightness;
+		led->blinking = false;
+	}
+
+	return rc;
+}
+
+static int qpnp_tri_led_set_brightness(struct led_classdev *led_cdev,
+		enum led_brightness brightness)
+{
+	struct qpnp_led_dev *led =
+		container_of(led_cdev, struct qpnp_led_dev, cdev);
+	int rc = 0;
+
+	mutex_lock(&led->lock);
+	if (brightness > LED_FULL)
+		brightness = LED_FULL;
+
+	if (brightness == led->led_setting.brightness &&
+				!led->blinking) {
+		mutex_unlock(&led->lock);
+		return 0;
+	}
+
+	led->led_setting.brightness = brightness;
+	if (!!brightness)
+		led->led_setting.off_ms = 0;
+	else
+		led->led_setting.on_ms = 0;
+	led->led_setting.blink = false;
+
+	rc = qpnp_tri_led_set(led);
+	if (rc)
+		dev_err(led->chip->dev, "Set led failed for %s, rc=%d\n",
+				led->label, rc);
+
+	mutex_unlock(&led->lock);
+
+	return rc;
+}
+
+static enum led_brightness qpnp_tri_led_get_brightness(
+			struct led_classdev *led_cdev)
+{
+	return led_cdev->brightness;
+}
+
+static int qpnp_tri_led_set_blink(struct led_classdev *led_cdev,
+		unsigned long *on_ms, unsigned long *off_ms)
+{
+	struct qpnp_led_dev *led =
+		container_of(led_cdev, struct qpnp_led_dev, cdev);
+	int rc = 0;
+
+	mutex_lock(&led->lock);
+	if (led->blinking && *on_ms == led->led_setting.on_ms &&
+			*off_ms == led->led_setting.off_ms) {
+		dev_dbg(led_cdev->dev, "Ignore, on/off setting is not changed: on %lums, off %lums\n",
+						*on_ms, *off_ms);
+		mutex_unlock(&led->lock);
+		return 0;
+	}
+
+	if (*on_ms == 0) {
+		led->led_setting.blink = false;
+		led->led_setting.brightness = LED_OFF;
+	} else if (*off_ms == 0) {
+		led->led_setting.blink = false;
+		led->led_setting.brightness = led->cdev.brightness;
+	} else {
+		led->led_setting.on_ms = *on_ms;
+		led->led_setting.off_ms = *off_ms;
+		led->led_setting.blink = true;
+	}
+
+	rc = qpnp_tri_led_set(led);
+	if (rc)
+		dev_err(led->chip->dev, "Set led failed for %s, rc=%d\n",
+				led->label, rc);
+
+	mutex_unlock(&led->lock);
+	return rc;
+}
+
+static int qpnp_tri_led_register(struct qpnp_tri_led_chip *chip)
+{
+	struct qpnp_led_dev *led;
+	int rc, i, j;
+
+	for (i = 0; i < chip->num_leds; i++) {
+		led = &chip->leds[i];
+		mutex_init(&led->lock);
+		led->cdev.name = led->label;
+		led->cdev.max_brightness = LED_FULL;
+		led->cdev.brightness_set_blocking = qpnp_tri_led_set_brightness;
+		led->cdev.brightness_get = qpnp_tri_led_get_brightness;
+		led->cdev.blink_set = qpnp_tri_led_set_blink;
+		led->cdev.default_trigger = led->default_trigger;
+		led->cdev.brightness = LED_OFF;
+		led->cdev.blink_delay_on = LED_BLINK_ON_MS;
+		led->cdev.blink_delay_off = LED_BLINK_OFF_MS;
+
+		rc = devm_led_classdev_register(chip->dev, &led->cdev);
+		if (rc < 0) {
+			dev_err(chip->dev, "%s led class device registering failed, rc=%d\n",
+							led->label, rc);
+			goto destroy;
+		}
+	}
+
+	return 0;
+destroy:
+	for (j = 0; j <= i; j++)
+		mutex_destroy(&chip->leds[i].lock);
+
+	return rc;
+}
+
+static int qpnp_tri_led_hw_init(struct qpnp_tri_led_chip *chip)
+{
+	int rc = 0;
+	u8 val;
+
+	rc = qpnp_tri_led_read(chip, TRILED_REG_TYPE, &val);
+	if (rc < 0) {
+		dev_err(chip->dev, "Read REG_TYPE failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (val != TRILED_TYPE) {
+		dev_err(chip->dev, "invalid subtype(%d)\n", val);
+		return -ENODEV;
+	}
+
+	rc = qpnp_tri_led_read(chip, TRILED_REG_SUBTYPE, &val);
+	if (rc < 0) {
+		dev_err(chip->dev, "Read REG_SUBTYPE failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->subtype = val;
+
+	return 0;
+}
+
+static int qpnp_tri_led_parse_dt(struct qpnp_tri_led_chip *chip)
+{
+	struct device_node *node = chip->dev->of_node, *child_node;
+	struct qpnp_led_dev *led;
+	struct pwm_args pargs;
+	const __be32 *addr;
+	int rc, id, i = 0;
+
+	addr = of_get_address(chip->dev->of_node, 0, NULL, NULL);
+	if (!addr) {
+		dev_err(chip->dev, "Getting address failed\n");
+		return -EINVAL;
+	}
+	chip->reg_base = be32_to_cpu(addr[0]);
+
+	chip->num_leds = of_get_available_child_count(node);
+	if (chip->num_leds == 0) {
+		dev_err(chip->dev, "No led child node defined\n");
+		return -ENODEV;
+	}
+
+	if (chip->num_leds > TRILED_NUM_MAX) {
+		dev_err(chip->dev, "can't support %d leds(max %d)\n",
+				chip->num_leds, TRILED_NUM_MAX);
+		return -EINVAL;
+	}
+
+	chip->leds = devm_kcalloc(chip->dev, chip->num_leds,
+			sizeof(struct qpnp_led_dev), GFP_KERNEL);
+	if (!chip->leds)
+		return -ENOMEM;
+
+	for_each_available_child_of_node(node, child_node) {
+		rc = of_property_read_u32(child_node, "led-sources", &id);
+		if (rc) {
+			dev_err(chip->dev, "Get led-sources failed, rc=%d\n",
+							rc);
+			return rc;
+		}
+
+		if (id >= TRILED_NUM_MAX) {
+			dev_err(chip->dev, "only support 0~%d current source\n",
+					TRILED_NUM_MAX - 1);
+			return -EINVAL;
+		}
+
+		led = &chip->leds[i++];
+		led->chip = chip;
+		led->id = id;
+		led->label =
+			of_get_property(child_node, "label", NULL) ? :
+							child_node->name;
+
+		led->pwm_dev =
+			devm_of_pwm_get(chip->dev, child_node, NULL);
+		if (IS_ERR(led->pwm_dev)) {
+			rc = PTR_ERR(led->pwm_dev);
+			if (rc != -EPROBE_DEFER)
+				dev_err(chip->dev, "Get pwm device for %s led failed, rc=%d\n",
+							led->label, rc);
+			return rc;
+		}
+
+		pwm_get_args(led->pwm_dev, &pargs);
+		if (pargs.period == 0)
+			led->pwm_setting.pre_period_ns = PWM_PERIOD_DEFAULT_NS;
+		else
+			led->pwm_setting.pre_period_ns = pargs.period;
+
+		led->default_trigger = of_get_property(child_node,
+				"linux,default-trigger", NULL);
+	}
+
+	return rc;
+}
+
+static int qpnp_tri_led_probe(struct platform_device *pdev)
+{
+	struct qpnp_tri_led_chip *chip;
+	int rc = 0;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->dev = &pdev->dev;
+	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+	if (!chip->regmap) {
+		dev_err(chip->dev, "Getting regmap failed\n");
+		return -EINVAL;
+	}
+
+	rc = qpnp_tri_led_parse_dt(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Devicetree properties parsing failed, rc=%d\n",
+								rc);
+		return rc;
+	}
+
+	mutex_init(&chip->bus_lock);
+
+	rc = qpnp_tri_led_hw_init(chip);
+	if (rc) {
+		dev_err(chip->dev, "HW initialization failed, rc=%d\n", rc);
+		goto destroy;
+	}
+
+	dev_set_drvdata(chip->dev, chip);
+	rc = qpnp_tri_led_register(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Registering LED class devices failed, rc=%d\n",
+								rc);
+		goto destroy;
+	}
+
+	dev_dbg(chip->dev, "Tri-led module with subtype 0x%x is detected\n",
+					chip->subtype);
+	return 0;
+destroy:
+	mutex_destroy(&chip->bus_lock);
+	dev_set_drvdata(chip->dev, NULL);
+
+	return rc;
+}
+
+static int qpnp_tri_led_remove(struct platform_device *pdev)
+{
+	int i;
+	struct qpnp_tri_led_chip *chip = dev_get_drvdata(&pdev->dev);
+
+	mutex_destroy(&chip->bus_lock);
+	for (i = 0; i < chip->num_leds; i++)
+		mutex_destroy(&chip->leds[i].lock);
+	dev_set_drvdata(chip->dev, NULL);
+	return 0;
+}
+
+static const struct of_device_id qpnp_tri_led_of_match[] = {
+	{ .compatible = "qcom,tri-led",},
+	{ },
+};
+
+static struct platform_driver qpnp_tri_led_driver = {
+	.driver		= {
+		.name		= "qcom,tri-led",
+		.of_match_table	= qpnp_tri_led_of_match,
+	},
+	.probe		= qpnp_tri_led_probe,
+	.remove		= qpnp_tri_led_remove,
+};
+module_platform_driver(qpnp_tri_led_driver);
+
+MODULE_DESCRIPTION("QTI TRI_LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("leds:qpnp-tri-led");
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index ca4abe1..537903b 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -404,7 +404,8 @@
 
 	finish_wait(&ca->set->bucket_wait, &w);
 out:
-	wake_up_process(ca->alloc_thread);
+	if (ca->alloc_thread)
+		wake_up_process(ca->alloc_thread);
 
 	trace_bcache_alloc(ca, reserve);
 
@@ -476,7 +477,7 @@
 		if (b == -1)
 			goto err;
 
-		k->ptr[i] = PTR(ca->buckets[b].gen,
+		k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
 				bucket_to_sector(c, b),
 				ca->sb.nr_this_dev);
 
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 243de0bf..4bf1518 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -584,7 +584,7 @@
 		return false;
 
 	for (i = 0; i < KEY_PTRS(l); i++)
-		if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+		if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
 		    PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
 			return false;
 
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 6925023..08f20b7 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -508,7 +508,7 @@
 			continue;
 
 		ja->cur_idx = next;
-		k->ptr[n++] = PTR(0,
+		k->ptr[n++] = MAKE_PTR(0,
 				  bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
 				  ca->sb.nr_this_dev);
 	}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index e0f1c6d..edb8d1a 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -468,6 +468,7 @@
 	unsigned		recoverable:1;
 	unsigned		write:1;
 	unsigned		read_dirty_data:1;
+	unsigned		cache_missed:1;
 
 	unsigned long		start_time;
 
@@ -653,6 +654,7 @@
 
 	s->orig_bio		= bio;
 	s->cache_miss		= NULL;
+	s->cache_missed		= 0;
 	s->d			= d;
 	s->recoverable		= 1;
 	s->write		= op_is_write(bio_op(bio));
@@ -703,7 +705,14 @@
 	struct search *s = container_of(cl, struct search, cl);
 	struct bio *bio = &s->bio.bio;
 
-	if (s->recoverable) {
+	/*
+	 * If read request hit dirty data (s->read_dirty_data is true),
+	 * then recovery a failed read request from cached device may
+	 * get a stale data back. So read failure recovery is only
+	 * permitted when read request hit clean data in cache device,
+	 * or when cache read race happened.
+	 */
+	if (s->recoverable && !s->read_dirty_data) {
 		/* Retry from the backing device: */
 		trace_bcache_read_retry(s->orig_bio);
 
@@ -764,7 +773,7 @@
 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 
 	bch_mark_cache_accounting(s->iop.c, s->d,
-				  !s->cache_miss, s->iop.bypass);
+				  !s->cache_missed, s->iop.bypass);
 	trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
 
 	if (s->iop.error)
@@ -783,6 +792,8 @@
 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 	struct bio *miss, *cache_bio;
 
+	s->cache_missed = 1;
+
 	if (s->cache_miss || s->iop.bypass) {
 		miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
 		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index f4557f5..28ce342 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -2091,6 +2091,7 @@
 	if (bcache_major)
 		unregister_blkdev(bcache_major, "bcache");
 	unregister_reboot_notifier(&reboot);
+	mutex_destroy(&bch_register_lock);
 }
 
 static int __init bcache_init(void)
@@ -2109,14 +2110,15 @@
 	bcache_major = register_blkdev(0, "bcache");
 	if (bcache_major < 0) {
 		unregister_reboot_notifier(&reboot);
+		mutex_destroy(&bch_register_lock);
 		return bcache_major;
 	}
 
 	if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
 	    !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
-	    sysfs_create_files(bcache_kobj, files) ||
 	    bch_request_init() ||
-	    bch_debug_init(bcache_kobj))
+	    bch_debug_init(bcache_kobj) ||
+	    sysfs_create_files(bcache_kobj, files))
 		goto err;
 
 	return 0;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index fb02c39..f7ff408 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -2084,6 +2084,7 @@
 				for (k = 0; k < page; k++) {
 					kfree(new_bp[k].map);
 				}
+				kfree(new_bp);
 
 				/* restore some fields from old_counts */
 				bitmap->counts.bp = old_counts.bp;
@@ -2134,6 +2135,14 @@
 		block += old_blocks;
 	}
 
+	if (bitmap->counts.bp != old_counts.bp) {
+		unsigned long k;
+		for (k = 0; k < old_counts.pages; k++)
+			if (!old_counts.bp[k].hijacked)
+				kfree(old_counts.bp[k].map);
+		kfree(old_counts.bp);
+	}
+
 	if (!init) {
 		int i;
 		while (block < (chunks << chunkshift)) {
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 8bf9667..3ec647e 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -937,7 +937,8 @@
 		buffers = c->minimum_buffers;
 
 	*limit_buffers = buffers;
-	*threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
+	*threshold_buffers = mult_frac(buffers,
+				       DM_BUFIO_WRITEBACK_PERCENT, 100);
 }
 
 /*
@@ -1553,7 +1554,8 @@
 	int l;
 	struct dm_buffer *b, *tmp;
 	unsigned long freed = 0;
-	unsigned long count = nr_to_scan;
+	unsigned long count = c->n_buffers[LIST_CLEAN] +
+			      c->n_buffers[LIST_DIRTY];
 	unsigned long retain_target = get_retain_buffers(c);
 
 	for (l = 0; l < LIST_SIZE; l++) {
@@ -1590,6 +1592,7 @@
 {
 	struct dm_bufio_client *c;
 	unsigned long count;
+	unsigned long retain_target;
 
 	c = container_of(shrink, struct dm_bufio_client, shrinker);
 	if (sc->gfp_mask & __GFP_FS)
@@ -1598,8 +1601,9 @@
 		return 0;
 
 	count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
+	retain_target = get_retain_buffers(c);
 	dm_bufio_unlock(c);
-	return count;
+	return (count < retain_target) ? 0 : (count - retain_target);
 }
 
 /*
@@ -1856,19 +1860,15 @@
 	memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
 	memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
 
-	mem = (__u64)((totalram_pages - totalhigh_pages) *
-		      DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
+	mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
+			       DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
 
 	if (mem > ULONG_MAX)
 		mem = ULONG_MAX;
 
 #ifdef CONFIG_MMU
-	/*
-	 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
-	 * in fs/proc/internal.h
-	 */
-	if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
-		mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
+	if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
+		mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
 #endif
 
 	dm_bufio_default_cache_size = mem;
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 40ceba1..1609d49 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -29,7 +29,6 @@
  * DM targets must _not_ deference a mapped_device to directly access its members!
  */
 struct mapped_device {
-	struct srcu_struct io_barrier;
 	struct mutex suspend_lock;
 
 	/*
@@ -127,6 +126,8 @@
 	struct blk_mq_tag_set *tag_set;
 	bool use_blk_mq:1;
 	bool init_tio_pdu:1;
+
+	struct srcu_struct io_barrier;
 };
 
 void dm_init_md_queue(struct mapped_device *md);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index aac7161..73e7262 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -21,6 +21,7 @@
 #include <linux/delay.h>
 #include <linux/wait.h>
 #include <linux/pr.h>
+#include <linux/vmalloc.h>
 
 #define DM_MSG_PREFIX "core"
 
@@ -1518,7 +1519,7 @@
 	struct mapped_device *md;
 	void *old_md;
 
-	md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
+	md = vzalloc_node(sizeof(*md), numa_node_id);
 	if (!md) {
 		DMWARN("unable to allocate device, out of memory.");
 		return NULL;
@@ -1612,7 +1613,7 @@
 bad_minor:
 	module_put(THIS_MODULE);
 bad_module_get:
-	kfree(md);
+	kvfree(md);
 	return NULL;
 }
 
@@ -1631,7 +1632,7 @@
 	free_minor(minor);
 
 	module_put(THIS_MODULE);
-	kfree(md);
+	kvfree(md);
 }
 
 static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
@@ -2521,11 +2522,15 @@
 
 	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
 
-	if (test_bit(DMF_FREEING, &md->flags) ||
-	    dm_deleting_md(md))
-		return NULL;
-
+	spin_lock(&_minor_lock);
+	if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
+		md = NULL;
+		goto out;
+	}
 	dm_get(md);
+out:
+	spin_unlock(&_minor_lock);
+
 	return md;
 }
 
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 2b13117..ba7edcd 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -974,6 +974,7 @@
 	lockres_free(cinfo->bitmap_lockres);
 	unlock_all_bitmaps(mddev);
 	dlm_release_lockspace(cinfo->lockspace, 2);
+	kfree(cinfo);
 	return 0;
 }
 
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7aea022..475a7a1 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1689,8 +1689,11 @@
 		struct r5dev *dev = &sh->dev[i];
 
 		if (dev->written || i == pd_idx || i == qd_idx) {
-			if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
+			if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) {
 				set_bit(R5_UPTODATE, &dev->flags);
+				if (test_bit(STRIPE_EXPAND_READY, &sh->state))
+					set_bit(R5_Expanded, &dev->flags);
+			}
 			if (fua)
 				set_bit(R5_WantFUA, &dev->flags);
 			if (sync)
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index 7e6d999..d1222aa 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -196,6 +196,30 @@
 	return rc;
 }
 
+int cam_context_handle_crm_process_evt(struct cam_context *ctx,
+	struct cam_req_mgr_link_evt_data *process_evt)
+{
+	int rc = 0;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.process_evt) {
+		rc = ctx->state_machine[ctx->state].crm_ops.process_evt(ctx,
+			process_evt);
+	} else {
+		/* handling of this message is optional */
+		CAM_DBG(CAM_CORE, "No crm process evt in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
 int cam_context_handle_acquire_dev(struct cam_context *ctx,
 	struct cam_acquire_dev_cmd *cmd)
 {
@@ -257,10 +281,10 @@
 int cam_context_handle_flush_dev(struct cam_context *ctx,
 	struct cam_flush_dev_cmd *cmd)
 {
-	int rc;
+	int rc = 0;
 
 	if (!ctx->state_machine) {
-		CAM_ERR(CAM_CORE, "context is not ready");
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
@@ -274,9 +298,8 @@
 		rc = ctx->state_machine[ctx->state].ioctl_ops.flush_dev(
 			ctx, cmd);
 	} else {
-		CAM_ERR(CAM_CORE, "No flush device in dev %d, state %d",
+		CAM_WARN(CAM_CORE, "No flush device in dev %d, state %d",
 			ctx->dev_hdl, ctx->state);
-		rc = -EPROTO;
 	}
 	mutex_unlock(&ctx->ctx_mutex);
 
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
index c823b7a..af92b7e 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -110,6 +110,7 @@
  * @unlink:                Unlink the context
  * @apply_req:             Apply setting for the context
  * @flush_req:             Flush request to remove request ids
+ * @process_evt:           Handle event notification from CRM.(optional)
  *
  */
 struct cam_ctx_crm_ops {
@@ -123,6 +124,8 @@
 			struct cam_req_mgr_apply_request *apply);
 	int (*flush_req)(struct cam_context *ctx,
 			struct cam_req_mgr_flush_request *flush);
+	int (*process_evt)(struct cam_context *ctx,
+			struct cam_req_mgr_link_evt_data *evt_data);
 };
 
 
@@ -273,6 +276,18 @@
 		struct cam_req_mgr_flush_request *apply);
 
 /**
+ * cam_context_handle_crm_process_evt()
+ *
+ * @brief:        Handle process event command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @process_evt:  process event command payload
+ *
+ */
+int cam_context_handle_crm_process_evt(struct cam_context *ctx,
+	struct cam_req_mgr_link_evt_data *process_evt);
+
+/**
  * cam_context_handle_acquire_dev()
  *
  * @brief:        Handle acquire device command
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index aab1a1a..8ea920d 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -430,6 +430,8 @@
 	uint32_t i;
 	int rc = 0;
 
+	CAM_DBG(CAM_CTXT, "E: NRT flush ctx");
+
 	/*
 	 * flush pending requests, take the sync lock to synchronize with the
 	 * sync callback thread so that the sync cb thread does not try to
@@ -444,23 +446,33 @@
 	while (!list_empty(&temp_list)) {
 		req = list_first_entry(&temp_list,
 				struct cam_ctx_request, list);
+
 		list_del_init(&req->list);
 		req->flushed = 1;
+
 		flush_args.flush_req_pending[flush_args.num_req_pending++] =
 			req->req_priv;
 		for (i = 0; i < req->num_out_map_entries; i++)
-			if (req->out_map_entries[i].sync_id != -1)
-				cam_sync_signal(req->out_map_entries[i].sync_id,
+			if (req->out_map_entries[i].sync_id != -1) {
+				rc = cam_sync_signal(
+					req->out_map_entries[i].sync_id,
 					CAM_SYNC_STATE_SIGNALED_ERROR);
+				if (rc == -EALREADY) {
+					CAM_ERR(CAM_CTXT,
+						"Req: %llu already signalled, sync_id:%d",
+						req->request_id,
+						req->out_map_entries[i].
+						sync_id);
+					break;
+				}
+			}
 	}
 	mutex_unlock(&ctx->sync_mutex);
 
 	if (ctx->hw_mgr_intf->hw_flush) {
 		flush_args.num_req_active = 0;
 		spin_lock(&ctx->lock);
-		INIT_LIST_HEAD(&temp_list);
-		list_splice_init(&ctx->active_req_list, &temp_list);
-		list_for_each_entry(req, &temp_list, list) {
+		list_for_each_entry(req, &ctx->active_req_list, list) {
 			flush_args.flush_req_active[flush_args.num_req_active++]
 				= req->req_priv;
 		}
@@ -474,24 +486,42 @@
 		}
 	}
 
+	INIT_LIST_HEAD(&temp_list);
+	spin_lock(&ctx->lock);
+	list_splice_init(&ctx->active_req_list, &temp_list);
+	INIT_LIST_HEAD(&ctx->active_req_list);
+	spin_unlock(&ctx->lock);
+
 	while (!list_empty(&temp_list)) {
 		req = list_first_entry(&temp_list,
 			struct cam_ctx_request, list);
 		list_del_init(&req->list);
-		for (i = 0; i < req->num_out_map_entries; i++)
+		for (i = 0; i < req->num_out_map_entries; i++) {
 			if (req->out_map_entries[i].sync_id != -1) {
-				cam_sync_signal(req->out_map_entries[i].sync_id,
+				rc = cam_sync_signal(
+					req->out_map_entries[i].sync_id,
 					CAM_SYNC_STATE_SIGNALED_ERROR);
+				if (rc == -EALREADY) {
+					CAM_ERR(CAM_CTXT,
+						"Req: %llu already signalled ctx: %pK dev_name: %s dev_handle: %d ctx_state: %d",
+						req->request_id, req->ctx,
+						req->ctx->dev_name,
+						req->ctx->dev_hdl,
+						req->ctx->state);
+					break;
+				}
 			}
+		}
 
 		spin_lock(&ctx->lock);
 		list_add_tail(&req->list, &ctx->free_req_list);
 		spin_unlock(&ctx->lock);
 		req->ctx = NULL;
 	}
-	INIT_LIST_HEAD(&ctx->active_req_list);
 
-	return rc;
+	CAM_DBG(CAM_CTXT, "X: NRT flush ctx");
+
+	return 0;
 }
 
 int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
@@ -502,6 +532,8 @@
 	uint32_t i;
 	int rc = 0;
 
+	CAM_DBG(CAM_CTXT, "E: NRT flush req");
+
 	flush_args.num_req_pending = 0;
 	flush_args.num_req_active = 0;
 	mutex_lock(&ctx->sync_mutex);
@@ -510,7 +542,9 @@
 		if (req->request_id != cmd->req_id)
 			continue;
 
+		list_del_init(&req->list);
 		req->flushed = 1;
+
 		flush_args.flush_req_pending[flush_args.num_req_pending++] =
 			req->req_priv;
 		break;
@@ -525,6 +559,8 @@
 				if (req->request_id != cmd->req_id)
 					continue;
 
+				list_del_init(&req->list);
+
 				flush_args.flush_req_active[
 					flush_args.num_req_active++] =
 					req->req_priv;
@@ -543,20 +579,31 @@
 
 	if (req) {
 		if (flush_args.num_req_pending || flush_args.num_req_active) {
-			list_del_init(&req->list);
 			for (i = 0; i < req->num_out_map_entries; i++)
-				if (req->out_map_entries[i].sync_id != -1)
-					cam_sync_signal(
+				if (req->out_map_entries[i].sync_id != -1) {
+					rc = cam_sync_signal(
 						req->out_map_entries[i].sync_id,
 						CAM_SYNC_STATE_SIGNALED_ERROR);
-			spin_lock(&ctx->lock);
-			list_add_tail(&req->list, &ctx->free_req_list);
-			spin_unlock(&ctx->lock);
-			req->ctx = NULL;
+					if (rc == -EALREADY) {
+						CAM_ERR(CAM_CTXT,
+							"Req: %llu already signalled, sync_id:%d",
+							req->request_id,
+							req->out_map_entries[i].
+							sync_id);
+						break;
+					}
+				}
+			if (flush_args.num_req_active) {
+				spin_lock(&ctx->lock);
+				list_add_tail(&req->list, &ctx->free_req_list);
+				spin_unlock(&ctx->lock);
+				req->ctx = NULL;
+			}
 		}
 	}
+	CAM_DBG(CAM_CTXT, "X: NRT flush req");
 
-	return rc;
+	return 0;
 }
 
 int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index a5977b3..4e9034e 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -220,7 +220,7 @@
 
 	rc = cam_context_handle_flush_dev(ctx, flush);
 	if (rc)
-		CAM_ERR(CAM_CORE, "FLush failure for node %s", node->name);
+		CAM_ERR(CAM_CORE, "Flush failure for node %s", node->name);
 
 	return rc;
 }
@@ -342,6 +342,25 @@
 	return cam_context_handle_crm_flush_req(ctx, flush);
 }
 
+static int __cam_node_crm_process_evt(
+	struct cam_req_mgr_link_evt_data *evt_data)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!evt_data) {
+		CAM_ERR(CAM_CORE, "Invalid process event request payload");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *) cam_get_device_priv(evt_data->dev_hdl);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			evt_data->dev_hdl);
+		return -EINVAL;
+	}
+	return cam_context_handle_crm_process_evt(ctx, evt_data);
+}
+
 int cam_node_deinit(struct cam_node *node)
 {
 	if (node)
@@ -394,6 +413,7 @@
 	node->crm_node_intf.get_dev_info = __cam_node_crm_get_dev_info;
 	node->crm_node_intf.link_setup = __cam_node_crm_link_setup;
 	node->crm_node_intf.flush_req = __cam_node_crm_flush_req;
+	node->crm_node_intf.process_evt = __cam_node_crm_process_evt;
 
 	mutex_init(&node->list_mutex);
 	INIT_LIST_HEAD(&node->free_ctx_list);
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 00ead5d..b04bc23 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,12 +16,19 @@
 #include <linux/msm-bus.h>
 #include <linux/pm_opp.h>
 #include <linux/slab.h>
+#include <linux/module.h>
 
 #include "cam_cpas_hw.h"
 #include "cam_cpas_hw_intf.h"
 #include "cam_cpas_soc.h"
 
-#define CAM_CPAS_AXI_MIN_BW (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_MNOC_AB_BW   (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_MNOC_IB_BW   (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_CAMNOC_AB_BW (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000L)
+
+static uint cam_min_camnoc_ib_bw;
+module_param(cam_min_camnoc_ib_bw, uint, 0644);
 
 int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
 	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info)
@@ -84,11 +91,19 @@
 }
 
 static int cam_cpas_util_vote_bus_client_bw(
-	struct cam_cpas_bus_client *bus_client, uint64_t ab, uint64_t ib)
+	struct cam_cpas_bus_client *bus_client, uint64_t ab, uint64_t ib,
+	bool camnoc_bw)
 {
 	struct msm_bus_paths *path;
 	struct msm_bus_scale_pdata *pdata;
 	int idx = 0;
+	uint64_t min_camnoc_ib_bw = CAM_CPAS_AXI_MIN_CAMNOC_IB_BW;
+
+	if (cam_min_camnoc_ib_bw > 0)
+		min_camnoc_ib_bw = (uint64_t)cam_min_camnoc_ib_bw * 1000000L;
+
+	CAM_DBG(CAM_CPAS, "cam_min_camnoc_ib_bw = %d, min_camnoc_ib_bw=%llu",
+		cam_min_camnoc_ib_bw, min_camnoc_ib_bw);
 
 	if (!bus_client->valid) {
 		CAM_ERR(CAM_CPAS, "bus client not valid");
@@ -118,11 +133,19 @@
 	bus_client->curr_vote_level = idx;
 	mutex_unlock(&bus_client->lock);
 
-	if ((ab > 0) && (ab < CAM_CPAS_AXI_MIN_BW))
-		ab = CAM_CPAS_AXI_MIN_BW;
+	if (camnoc_bw == true) {
+		if ((ab > 0) && (ab < CAM_CPAS_AXI_MIN_CAMNOC_AB_BW))
+			ab = CAM_CPAS_AXI_MIN_CAMNOC_AB_BW;
 
-	if ((ib > 0) && (ib < CAM_CPAS_AXI_MIN_BW))
-		ib = CAM_CPAS_AXI_MIN_BW;
+		if ((ib > 0) && (ib < min_camnoc_ib_bw))
+			ib = min_camnoc_ib_bw;
+	} else {
+		if ((ab > 0) && (ab < CAM_CPAS_AXI_MIN_MNOC_AB_BW))
+			ab = CAM_CPAS_AXI_MIN_MNOC_AB_BW;
+
+		if ((ib > 0) && (ib < CAM_CPAS_AXI_MIN_MNOC_IB_BW))
+			ib = CAM_CPAS_AXI_MIN_MNOC_IB_BW;
+	}
 
 	pdata = bus_client->pdata;
 	path = &(pdata->usecase[idx]);
@@ -205,7 +228,7 @@
 		return -EINVAL;
 
 	if (bus_client->dyn_vote)
-		cam_cpas_util_vote_bus_client_bw(bus_client, 0, 0);
+		cam_cpas_util_vote_bus_client_bw(bus_client, 0, 0, false);
 	else
 		cam_cpas_util_vote_bus_client_level(bus_client, 0);
 
@@ -370,7 +393,7 @@
 	list_for_each_entry_safe(curr_port, temp_port,
 		&cpas_core->axi_ports_list_head, sibling_port) {
 		rc = cam_cpas_util_vote_bus_client_bw(&curr_port->mnoc_bus,
-			mnoc_bw, mnoc_bw);
+			mnoc_bw, mnoc_bw, false);
 		if (rc) {
 			CAM_ERR(CAM_CPAS,
 				"Failed in mnoc vote, enable=%d, rc=%d",
@@ -380,13 +403,13 @@
 
 		if (soc_private->axi_camnoc_based) {
 			cam_cpas_util_vote_bus_client_bw(
-				&curr_port->camnoc_bus, 0, camnoc_bw);
+				&curr_port->camnoc_bus, 0, camnoc_bw, true);
 			if (rc) {
 				CAM_ERR(CAM_CPAS,
 					"Failed in mnoc vote, enable=%d, %d",
 					enable, rc);
 				cam_cpas_util_vote_bus_client_bw(
-					&curr_port->mnoc_bus, 0, 0);
+					&curr_port->mnoc_bus, 0, 0, false);
 				goto remove_ahb_vote;
 			}
 		}
@@ -571,7 +594,7 @@
 		camnoc_bw, mnoc_bw);
 
 	rc = cam_cpas_util_vote_bus_client_bw(&axi_port->mnoc_bus,
-		mnoc_bw, mnoc_bw);
+		mnoc_bw, mnoc_bw, false);
 	if (rc) {
 		CAM_ERR(CAM_CPAS,
 			"Failed in mnoc vote ab[%llu] ib[%llu] rc=%d",
@@ -581,7 +604,7 @@
 
 	if (soc_private->axi_camnoc_based) {
 		rc = cam_cpas_util_vote_bus_client_bw(&axi_port->camnoc_bus,
-			0, camnoc_bw);
+			0, camnoc_bw, true);
 		if (rc) {
 			CAM_ERR(CAM_CPAS,
 				"Failed camnoc vote ab[%llu] ib[%llu] rc=%d",
@@ -662,9 +685,10 @@
 
 	opp = dev_pm_opp_find_freq_ceil(dev, &corner_freq);
 	if (IS_ERR(opp)) {
-		CAM_ERR(CAM_CPAS, "Error on OPP freq :%ld, %pK",
+		CAM_DBG(CAM_CPAS, "OPP Ceil not available for freq :%ld, %pK",
 			corner_freq, opp);
-		return -EINVAL;
+		*req_level = CAM_TURBO_VOTE;
+		return 0;
 	}
 
 	corner = dev_pm_opp_get_voltage(opp);
@@ -879,9 +903,11 @@
 		goto done;
 
 	if (cpas_core->streamon_clients == 0) {
+		atomic_set(&cpas_core->irq_count, 1);
 		rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info,
 			applied_level);
 		if (rc) {
+			atomic_set(&cpas_core->irq_count, 0);
 			CAM_ERR(CAM_CPAS, "enable_resorce failed, rc=%d", rc);
 			goto done;
 		}
@@ -889,14 +915,17 @@
 		if (cpas_core->internal_ops.power_on) {
 			rc = cpas_core->internal_ops.power_on(cpas_hw);
 			if (rc) {
+				atomic_set(&cpas_core->irq_count, 0);
 				cam_cpas_soc_disable_resources(
-					&cpas_hw->soc_info);
+					&cpas_hw->soc_info, true, true);
 				CAM_ERR(CAM_CPAS,
 					"failed in power_on settings rc=%d",
 					rc);
 				goto done;
 			}
 		}
+		CAM_DBG(CAM_CPAS, "irq_count=%d\n",
+			atomic_read(&cpas_core->irq_count));
 		cpas_hw->hw_state = CAM_HW_STATE_POWER_UP;
 	}
 
@@ -911,6 +940,10 @@
 	return rc;
 }
 
+static int _check_irq_count(struct cam_cpas *cpas_core)
+{
+	return (atomic_read(&cpas_core->irq_count) > 0) ? 0 : 1;
+}
 
 static int cam_cpas_hw_stop(void *hw_priv, void *stop_args,
 	uint32_t arg_size)
@@ -923,6 +956,7 @@
 	struct cam_ahb_vote ahb_vote;
 	struct cam_axi_vote axi_vote;
 	int rc = 0;
+	long result;
 
 	if (!hw_priv || !stop_args) {
 		CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
@@ -971,11 +1005,29 @@
 			}
 		}
 
-		rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
+		rc = cam_cpas_soc_disable_irq(&cpas_hw->soc_info);
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "disable_irq failed, rc=%d", rc);
+			goto done;
+		}
+
+		/* Wait for any IRQs still being handled */
+		atomic_dec(&cpas_core->irq_count);
+		result = wait_event_timeout(cpas_core->irq_count_wq,
+			_check_irq_count(cpas_core), HZ);
+		if (result == 0) {
+			CAM_ERR(CAM_CPAS, "Wait failed: irq_count=%d",
+				atomic_read(&cpas_core->irq_count));
+		}
+
+		rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info,
+			true, false);
 		if (rc) {
 			CAM_ERR(CAM_CPAS, "disable_resorce failed, rc=%d", rc);
 			goto done;
 		}
+		CAM_DBG(CAM_CPAS, "Disabled all the resources: irq_count=%d\n",
+			atomic_read(&cpas_core->irq_count));
 		cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
 	}
 
@@ -1426,6 +1478,8 @@
 	soc_private = (struct cam_cpas_private_soc *)
 		cpas_hw->soc_info.soc_private;
 	cpas_core->num_clients = soc_private->num_clients;
+	atomic_set(&cpas_core->irq_count, 0);
+	init_waitqueue_head(&cpas_core->irq_count_wq);
 
 	if (internal_ops->setup_regbase) {
 		rc = internal_ops->setup_regbase(&cpas_hw->soc_info,
@@ -1481,7 +1535,7 @@
 	if (rc)
 		goto disable_soc_res;
 
-	rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
+	rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info, true, true);
 	if (rc) {
 		CAM_ERR(CAM_CPAS, "failed in soc_disable_resources, rc=%d", rc);
 		goto remove_default_vote;
@@ -1499,7 +1553,7 @@
 	return 0;
 
 disable_soc_res:
-	cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
+	cam_cpas_soc_disable_resources(&cpas_hw->soc_info, true, true);
 remove_default_vote:
 	cam_cpas_util_vote_default_ahb_axi(cpas_hw, false);
 axi_cleanup:
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
index aa3663d..05840bb 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -187,6 +187,8 @@
 	struct list_head axi_ports_list_head;
 	struct cam_cpas_internal_ops internal_ops;
 	struct workqueue_struct *work_queue;
+	atomic_t irq_count;
+	wait_queue_head_t irq_count_wq;
 };
 
 int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
index f85f461..b18af0a 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -209,13 +209,26 @@
 	return rc;
 }
 
-int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info)
+int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clocks, bool disble_irq)
 {
 	int rc = 0;
 
-	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	rc = cam_soc_util_disable_platform_resource(soc_info,
+		disable_clocks, disble_irq);
 	if (rc)
 		CAM_ERR(CAM_CPAS, "disable platform failed, rc=%d", rc);
 
 	return rc;
 }
+
+int cam_cpas_soc_disable_irq(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_irq_disable(soc_info);
+	if (rc)
+		CAM_ERR(CAM_CPAS, "disable irq failed, rc=%d", rc);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
index d4fc039..fe0187e 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -61,5 +61,7 @@
 int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
 int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info,
 	enum cam_vote_level default_level);
-int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info);
+int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clocks, bool disble_irq);
+int cam_cpas_soc_disable_irq(struct cam_hw_soc_info *soc_info);
 #endif /* _CAM_CPAS_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
index 0e5ce85..0533ed8 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -354,6 +354,11 @@
 	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
 	soc_info = &cpas_hw->soc_info;
 
+	if (!atomic_inc_not_zero(&cpas_core->irq_count)) {
+		CAM_ERR(CAM_CPAS, "CPAS off");
+		return;
+	}
+
 	for (i = 0; i < camnoc_info->irq_err_size; i++) {
 		if ((payload->irq_status & camnoc_info->irq_err[i].sbm_port) &&
 			(camnoc_info->irq_err[i].enable)) {
@@ -398,6 +403,9 @@
 				~camnoc_info->irq_err[i].sbm_port;
 		}
 	}
+	atomic_dec(&cpas_core->irq_count);
+	wake_up(&cpas_core->irq_count_wq);
+	CAM_DBG(CAM_CPAS, "irq_count=%d\n", atomic_read(&cpas_core->irq_count));
 
 	if (payload->irq_status)
 		CAM_ERR(CAM_CPAS, "IRQ not handled irq_status=0x%x",
@@ -414,9 +422,14 @@
 	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
 	struct cam_cpas_work_payload *payload;
 
+	if (!atomic_inc_not_zero(&cpas_core->irq_count)) {
+		CAM_ERR(CAM_CPAS, "CPAS off");
+		return IRQ_HANDLED;
+	}
+
 	payload = kzalloc(sizeof(struct cam_cpas_work_payload), GFP_ATOMIC);
 	if (!payload)
-		return IRQ_HANDLED;
+		goto done;
 
 	payload->irq_status = cam_io_r_mb(
 		soc_info->reg_map[camnoc_index].mem_base +
@@ -433,6 +446,9 @@
 	cam_cpastop_reset_irq(cpas_hw);
 
 	queue_work(cpas_core->work_queue, &payload->work);
+done:
+	atomic_dec(&cpas_core->irq_count);
+	wake_up(&cpas_core->irq_count_wq);
 
 	return IRQ_HANDLED;
 }
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
index 6909972..73663b3 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -74,6 +74,7 @@
 #define ICP_SHARED_MEM_IN_BYTES                 (1024 * 1024)
 #define ICP_UNCACHED_HEAP_SIZE_IN_BYTES         (2 * 1024 * 1024)
 #define ICP_HFI_MAX_PKT_SIZE_IN_WORDS           25600
+#define ICP_HFI_MAX_PKT_SIZE_MSGQ_IN_WORDS      256
 
 #define ICP_HFI_QTBL_HOSTID1                    0x01000000
 #define ICP_HFI_QTBL_STATUS_ENABLED             0x00000001
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h
index 837efec..0412b8a 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -197,6 +197,7 @@
 } __packed;
 
 #define MAX_NUM_OF_IMAGE_PLANES	2
+#define MAX_HFR_GROUP          16
 
 enum hfi_ipe_io_images {
 	IPE_INPUT_IMAGE_FULL,
@@ -220,6 +221,40 @@
 	IPE_IO_IMAGES_MAX
 };
 
+enum bps_io_images {
+	BPS_INPUT_IMAGE,
+	BPS_OUTPUT_IMAGE_FULL,
+	BPS_OUTPUT_IMAGE_DS4,
+	BPS_OUTPUT_IMAGE_DS16,
+	BPS_OUTPUT_IMAGE_DS64,
+	BPS_OUTPUT_IMAGE_STATS_BG,
+	BPS_OUTPUT_IMAGE_STATS_BHIST,
+	BPS_OUTPUT_IMAGE_REG1,
+	BPS_OUTPUT_IMAGE_REG2,
+	BPS_OUTPUT_IMAGE_FIRST = BPS_OUTPUT_IMAGE_FULL,
+	BPS_OUTPUT_IMAGE_LAST = BPS_OUTPUT_IMAGE_REG2,
+	BPS_IO_IMAGES_MAX
+};
+
+struct frame_buffer {
+	uint32_t buffer_ptr[MAX_NUM_OF_IMAGE_PLANES];
+	uint32_t meta_buffer_ptr[MAX_NUM_OF_IMAGE_PLANES];
+} __packed;
+
+struct bps_frame_process_data {
+	struct frame_buffer buffers[BPS_IO_IMAGES_MAX];
+	uint32_t max_num_cores;
+	uint32_t target_time;
+	uint32_t ubwc_stats_buffer_addr;
+	uint32_t ubwc_stats_buffer_size;
+	uint32_t cdm_buffer_addr;
+	uint32_t cdm_buffer_size;
+	uint32_t iq_settings_addr;
+	uint32_t strip_lib_out_addr;
+	uint32_t cdm_prog_addr;
+	uint32_t request_id;
+};
+
 enum hfi_ipe_image_format {
 	IMAGE_FORMAT_INVALID,
 	IMAGE_FORMAT_MIPI_8,
@@ -361,6 +396,49 @@
 	struct buffer_layout meta_buf_layout[MAX_NUM_OF_IMAGE_PLANES];
 } __packed;
 
+struct ica_stab_coeff {
+	uint32_t coeffs[8];
+} __packed;
+
+struct ica_stab_params {
+	uint32_t mode;
+	struct ica_stab_coeff transforms[3];
+} __packed;
+
+struct frame_set {
+	struct frame_buffer buffers[IPE_IO_IMAGES_MAX];
+	struct ica_stab_params ica_params;
+	uint32_t cdm_ica1_addr;
+	uint32_t cdm_ica2_addr;
+} __packed;
+
+struct ipe_frame_process_data {
+	uint32_t strip_lib_out_addr;
+	uint32_t iq_settings_addr;
+	uint32_t scratch_buffer_addr;
+	uint32_t scratch_buffer_size;
+	uint32_t ubwc_stats_buffer_addr;
+	uint32_t ubwc_stats_buffer_size;
+	uint32_t cdm_buffer_addr;
+	uint32_t cdm_buffer_size;
+	uint32_t max_num_cores;
+	uint32_t target_time;
+	uint32_t cdm_prog_base;
+	uint32_t cdm_pre_ltm;
+	uint32_t cdm_post_ltm;
+	uint32_t cdm_anr_full_pass;
+	uint32_t cdm_anr_ds4;
+	uint32_t cdm_anr_ds16;
+	uint32_t cdm_anr_ds64;
+	uint32_t cdm_tf_full_pass;
+	uint32_t cdm_tf_ds4;
+	uint32_t cdm_tf_ds16;
+	uint32_t cdm_tf_ds64;
+	uint32_t request_id;
+	uint32_t frames_in_batch;
+	struct frame_set framesets[MAX_HFR_GROUP];
+} __packed;
+
 /**
  * struct hfi_cmd_ipe_config
  * @images: images descreptions
diff --git a/drivers/media/platform/msm/camera/cam_icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
index eca16d6..77f33d0 100644
--- a/drivers/media/platform/msm/camera/cam_icp/hfi.c
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -138,6 +138,7 @@
 	struct hfi_q_hdr *q;
 	uint32_t new_read_idx, size_in_words, word_diff, temp;
 	uint32_t *read_q, *read_ptr, *write_ptr;
+	uint32_t size_upper_bound = 0;
 	int rc = 0;
 
 	if (!pmsg) {
@@ -175,10 +176,13 @@
 		goto err;
 	}
 
-	if (q_id == Q_MSG)
+	if (q_id == Q_MSG) {
 		read_q = (uint32_t *)g_hfi->map.msg_q.kva;
-	else
+		size_upper_bound = ICP_HFI_MAX_PKT_SIZE_MSGQ_IN_WORDS;
+	} else {
 		read_q = (uint32_t *)g_hfi->map.dbg_q.kva;
+		size_upper_bound = ICP_HFI_MAX_PKT_SIZE_IN_WORDS;
+	}
 
 	read_ptr = (uint32_t *)(read_q + q->qhdr_read_idx);
 	write_ptr = (uint32_t *)(read_q + q->qhdr_write_idx);
@@ -196,7 +200,7 @@
 	}
 
 	if ((size_in_words == 0) ||
-		(size_in_words > ICP_HFI_MAX_PKT_SIZE_IN_WORDS)) {
+		(size_in_words > size_upper_bound)) {
 		CAM_ERR(CAM_HFI, "invalid HFI message packet size - 0x%08x",
 			size_in_words << BYTE_WORD_SHIFT);
 		q->qhdr_read_idx = q->qhdr_write_idx;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
index 25e1ce7..5bd7f1c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
@@ -276,22 +276,29 @@
 		rc = cam_bps_handle_resume(bps_dev);
 		break;
 	case CAM_ICP_BPS_CMD_UPDATE_CLK: {
-		uint32_t clk_rate = *(uint32_t *)cmd_args;
+		struct cam_a5_clk_update_cmd *clk_upd_cmd =
+			(struct cam_a5_clk_update_cmd *)cmd_args;
+		uint32_t clk_rate = clk_upd_cmd->curr_clk_rate;
 
 		CAM_DBG(CAM_ICP, "bps_src_clk rate = %d", (int)clk_rate);
+
 		if (!core_info->clk_enable) {
-			cam_bps_handle_pc(bps_dev);
-			cam_cpas_reg_write(core_info->cpas_handle,
-				CAM_CPAS_REG_CPASTOP,
-				hw_info->pwr_ctrl, true, 0x0);
+			if (clk_upd_cmd->ipe_bps_pc_enable) {
+				cam_bps_handle_pc(bps_dev);
+				cam_cpas_reg_write(core_info->cpas_handle,
+					CAM_CPAS_REG_CPASTOP,
+					hw_info->pwr_ctrl, true, 0x0);
+			}
 			rc = cam_bps_toggle_clk(soc_info, true);
 			if (rc)
 				CAM_ERR(CAM_ICP, "Enable failed");
 			else
 				core_info->clk_enable = true;
-			rc = cam_bps_handle_resume(bps_dev);
-			if (rc)
-				CAM_ERR(CAM_ICP, "handle resume failed");
+			if (clk_upd_cmd->ipe_bps_pc_enable) {
+				rc = cam_bps_handle_resume(bps_dev);
+				if (rc)
+					CAM_ERR(CAM_ICP, "BPS resume failed");
+			}
 		}
 		CAM_DBG(CAM_ICP, "clock rate %d", clk_rate);
 		rc = cam_bps_update_clk_rate(soc_info, clk_rate);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
index b7b636c..d2314c4 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -138,9 +138,22 @@
 int cam_bps_update_clk_rate(struct cam_hw_soc_info *soc_info,
 	uint32_t clk_rate)
 {
+	int32_t src_clk_idx;
+
 	if (!soc_info)
 		return -EINVAL;
 
+	src_clk_idx = soc_info->src_clk_idx;
+
+	if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) &&
+		(soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) &&
+		(clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) {
+		CAM_DBG(CAM_ICP, "clk_rate %d greater than max, reset to %d",
+			clk_rate,
+			soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]);
+		clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx];
+	}
+
 	return cam_soc_util_set_clk_rate(soc_info->clk[soc_info->src_clk_idx],
 		soc_info->clk_name[soc_info->src_clk_idx], clk_rate);
 }
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index f44fcc0..3bbb8d3 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -116,7 +116,12 @@
 		if (ctx_data->clk_info.clk_rate[i] >= base_clk)
 			return i;
 
-	return 0;
+	/*
+	 * Caller has to ensure returned index is within array
+	 * size bounds while accessing that index.
+	 */
+
+	return i;
 }
 
 static bool cam_icp_is_over_clk(struct cam_icp_hw_mgr *hw_mgr,
@@ -132,7 +137,7 @@
 	curr_clk_idx = cam_icp_get_actual_clk_rate_idx(ctx_data,
 		hw_mgr_clk_info->curr_clk);
 
-	CAM_DBG(CAM_ICP, "bc_idx = %d cc_idx = %d %lld %lld",
+	CAM_DBG(CAM_ICP, "bc_idx = %d cc_idx = %d %d %d",
 		base_clk_idx, curr_clk_idx, hw_mgr_clk_info->base_clk,
 		hw_mgr_clk_info->curr_clk);
 
@@ -192,9 +197,9 @@
 	struct cam_hw_info *dev = NULL;
 
 	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
-		dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+		dev_intf = hw_mgr->bps_dev_intf;
 	else
-		dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
+		dev_intf = hw_mgr->ipe0_dev_intf;
 
 	if (!dev_intf) {
 		CAM_ERR(CAM_ICP, "dev_intf is invalid");
@@ -247,16 +252,16 @@
 		(struct cam_icp_clk_info *)task_data->data;
 	uint32_t id;
 	uint32_t i;
-	uint32_t curr_clk_rate;
 	struct cam_icp_hw_ctx_data *ctx_data;
 	struct cam_hw_intf *ipe0_dev_intf = NULL;
 	struct cam_hw_intf *ipe1_dev_intf = NULL;
 	struct cam_hw_intf *bps_dev_intf = NULL;
 	struct cam_hw_intf *dev_intf = NULL;
+	struct cam_a5_clk_update_cmd clk_upd_cmd;
 
-	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
-	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
-	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
 
 	clk_info->base_clk = 0;
 	clk_info->curr_clk = 0;
@@ -290,19 +295,135 @@
 
 	CAM_DBG(CAM_ICP, "Disable %d", clk_info->hw_type);
 
+	clk_upd_cmd.ipe_bps_pc_enable = icp_hw_mgr.ipe_bps_pc_flag;
+
 	dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
-		&curr_clk_rate, sizeof(curr_clk_rate));
+		&clk_upd_cmd, sizeof(struct cam_a5_clk_update_cmd));
 
 	if (clk_info->hw_type != ICP_CLK_HW_BPS)
 		if (ipe1_dev_intf)
 			ipe1_dev_intf->hw_ops.process_cmd(
 				ipe1_dev_intf->hw_priv, id,
-				&curr_clk_rate, sizeof(curr_clk_rate));
+				&clk_upd_cmd,
+				sizeof(struct cam_a5_clk_update_cmd));
 
 	return 0;
 }
 
-static void cam_icp_timer_cb(unsigned long data)
+static int32_t cam_icp_ctx_timer(void *priv, void *data)
+{
+	struct clk_work_data *task_data = (struct clk_work_data *)data;
+	struct cam_icp_hw_ctx_data *ctx_data =
+		(struct cam_icp_hw_ctx_data *)task_data->data;
+	struct cam_icp_hw_mgr *hw_mgr = &icp_hw_mgr;
+	uint32_t id;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+	struct cam_hw_intf *dev_intf = NULL;
+	struct cam_icp_clk_info *clk_info;
+	struct cam_icp_cpas_vote clk_update;
+
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "ctx_data is NULL, failed to update clk");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	if ((ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) ||
+		(ctx_data->watch_dog_reset_counter == 0)) {
+		CAM_DBG(CAM_ICP, "state %d, counter=%d",
+			ctx_data->state, ctx_data->watch_dog_reset_counter);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return 0;
+	}
+
+	CAM_DBG(CAM_ICP,
+		"E :ctx_id = %d ubw = %lld cbw = %lld curr_fc = %u bc = %u",
+		ctx_data->ctx_id,
+		ctx_data->clk_info.uncompressed_bw,
+		ctx_data->clk_info.compressed_bw,
+		ctx_data->clk_info.curr_fc, ctx_data->clk_info.base_clk);
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return -EINVAL;
+	}
+
+	if (!ctx_data->icp_dev_acquire_info) {
+		CAM_WARN(CAM_ICP, "NULL acquire info");
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return -EINVAL;
+	}
+
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+		dev_intf = bps_dev_intf;
+		clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+		id = CAM_ICP_BPS_CMD_VOTE_CPAS;
+	} else {
+		dev_intf = ipe0_dev_intf;
+		clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+		id = CAM_ICP_IPE_CMD_VOTE_CPAS;
+	}
+
+	clk_info->compressed_bw -= ctx_data->clk_info.compressed_bw;
+	clk_info->uncompressed_bw -= ctx_data->clk_info.uncompressed_bw;
+	ctx_data->clk_info.uncompressed_bw = 0;
+	ctx_data->clk_info.compressed_bw = 0;
+	ctx_data->clk_info.curr_fc = 0;
+	ctx_data->clk_info.base_clk = 0;
+
+	clk_update.ahb_vote.type = CAM_VOTE_DYNAMIC;
+	clk_update.ahb_vote.vote.freq = clk_info->curr_clk;
+	clk_update.ahb_vote_valid = true;
+	clk_update.axi_vote.compressed_bw = clk_info->compressed_bw;
+	clk_update.axi_vote.uncompressed_bw = clk_info->uncompressed_bw;
+	clk_update.axi_vote_valid = true;
+	dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
+		&clk_update, sizeof(clk_update));
+
+	CAM_DBG(CAM_ICP,
+		"X :ctx_id = %d ubw = %lld cbw = %lld curr_fc = %u bc = %u",
+		ctx_data->ctx_id,
+		ctx_data->clk_info.uncompressed_bw,
+		ctx_data->clk_info.compressed_bw,
+		ctx_data->clk_info.curr_fc, ctx_data->clk_info.base_clk);
+
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	return 0;
+}
+
+static void cam_icp_ctx_timer_cb(unsigned long data)
+{
+	unsigned long flags;
+	struct crm_workq_task *task;
+	struct clk_work_data *task_data;
+	struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
+
+	spin_lock_irqsave(&icp_hw_mgr.hw_mgr_lock, flags);
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.msg_work);
+	if (!task) {
+		CAM_ERR(CAM_ICP, "no empty task");
+		spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+		return;
+	}
+
+	task_data = (struct clk_work_data *)task->payload;
+	task_data->data = timer->parent;
+	task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_icp_ctx_timer;
+	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+}
+
+static void cam_icp_device_timer_cb(unsigned long data)
 {
 	unsigned long flags;
 	struct crm_workq_task *task;
@@ -339,13 +460,29 @@
 		hw_mgr->clk_info[i].uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
 		hw_mgr->clk_info[i].compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
 		hw_mgr->clk_info[i].hw_type = i;
+		hw_mgr->clk_info[i].watch_dog_reset_counter = 0;
 	}
 	hw_mgr->icp_default_clk = ICP_CLK_SVS_HZ;
 
 	return 0;
 }
 
-static int cam_icp_timer_start(struct cam_icp_hw_mgr *hw_mgr)
+static int cam_icp_ctx_timer_start(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int rc = 0;
+
+	rc = crm_timer_init(&ctx_data->watch_dog,
+		2000, ctx_data, &cam_icp_ctx_timer_cb);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to start timer");
+
+	ctx_data->watch_dog_reset_counter = 0;
+
+	CAM_DBG(CAM_ICP, "stop timer : ctx_id = %d", ctx_data->ctx_id);
+	return rc;
+}
+
+static int cam_icp_device_timer_start(struct cam_icp_hw_mgr *hw_mgr)
 {
 	int rc = 0;
 	int i;
@@ -353,21 +490,70 @@
 	for (i = 0; i < ICP_CLK_HW_MAX; i++)  {
 		if (!hw_mgr->clk_info[i].watch_dog) {
 			rc = crm_timer_init(&hw_mgr->clk_info[i].watch_dog,
-				3000, &hw_mgr->clk_info[i], &cam_icp_timer_cb);
+				3000, &hw_mgr->clk_info[i],
+				&cam_icp_device_timer_cb);
+
 			if (rc)
 				CAM_ERR(CAM_ICP, "Failed to start timer %d", i);
+
+			hw_mgr->clk_info[i].watch_dog_reset_counter = 0;
 		}
 	}
 
 	return rc;
 }
 
-static void cam_icp_timer_stop(struct cam_icp_hw_mgr *hw_mgr)
+static int cam_icp_ctx_timer_stop(struct cam_icp_hw_ctx_data *ctx_data)
 {
-	if (!hw_mgr->bps_ctxt_cnt)
+	if (ctx_data->watch_dog) {
+		CAM_DBG(CAM_ICP, "stop timer : ctx_id = %d", ctx_data->ctx_id);
+		ctx_data->watch_dog_reset_counter = 0;
+		crm_timer_exit(&ctx_data->watch_dog);
+		ctx_data->watch_dog = NULL;
+	}
+
+	return 0;
+}
+
+static void cam_icp_device_timer_stop(struct cam_icp_hw_mgr *hw_mgr)
+{
+	if (!hw_mgr->bps_ctxt_cnt &&
+		hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog) {
+		hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog_reset_counter = 0;
 		crm_timer_exit(&hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog);
-	else if (!hw_mgr->ipe_ctxt_cnt)
+		hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog = NULL;
+	}
+
+	if (!hw_mgr->ipe_ctxt_cnt &&
+		hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog) {
+		hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog_reset_counter = 0;
 		crm_timer_exit(&hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog);
+		hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog = NULL;
+	}
+}
+
+static int cam_icp_ctx_timer_reset(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	if (ctx_data && ctx_data->watch_dog) {
+		ctx_data->watch_dog_reset_counter++;
+		CAM_DBG(CAM_ICP, "reset timer : ctx_id = %d, counter=%d",
+			ctx_data->ctx_id, ctx_data->watch_dog_reset_counter);
+		crm_timer_reset(ctx_data->watch_dog);
+	}
+
+	return 0;
+}
+
+static void cam_icp_device_timer_reset(struct cam_icp_hw_mgr *hw_mgr,
+	int device_index)
+{
+	if ((device_index >= ICP_CLK_HW_MAX) || (!hw_mgr))
+		return;
+
+	if (hw_mgr->clk_info[device_index].watch_dog) {
+		crm_timer_reset(hw_mgr->clk_info[device_index].watch_dog);
+		hw_mgr->clk_info[device_index].watch_dog_reset_counter++;
+	}
 }
 
 static uint32_t cam_icp_mgr_calc_base_clk(uint32_t frame_cycles,
@@ -417,7 +603,9 @@
 	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
 		ctx_data = &hw_mgr->ctx_data[i];
 		if (ctx_data->state == CAM_ICP_CTX_STATE_ACQUIRED &&
-			ctx_data->icp_dev_acquire_info->dev_type == dev_type)
+			ICP_DEV_TYPE_TO_CLK_TYPE(
+			ctx_data->icp_dev_acquire_info->dev_type) ==
+			ICP_DEV_TYPE_TO_CLK_TYPE(dev_type))
 			hw_mgr_clk_info->base_clk +=
 				ctx_data->clk_info.base_clk;
 	}
@@ -572,7 +760,8 @@
 		hw_mgr_clk_info->over_clked = 0;
 		rc = false;
 	}  else if (hw_mgr_clk_info->curr_clk < hw_mgr_clk_info->base_clk) {
-		hw_mgr_clk_info->curr_clk = hw_mgr_clk_info->base_clk;
+		hw_mgr_clk_info->curr_clk = cam_icp_get_actual_clk_rate(hw_mgr,
+			ctx_data, hw_mgr_clk_info->base_clk);
 		rc = true;
 	}
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
@@ -630,7 +819,13 @@
 	 * recalculate bandwidth of all contexts of same hardware and update
 	 * voting of bandwidth
 	 */
-	if (clk_info->uncompressed_bw == ctx_data->clk_info.uncompressed_bw)
+	CAM_DBG(CAM_ICP, "ubw ctx = %lld clk_info ubw = %lld busy = %d",
+		ctx_data->clk_info.uncompressed_bw,
+		clk_info->uncompressed_bw, busy);
+
+	if ((clk_info->uncompressed_bw == ctx_data->clk_info.uncompressed_bw) &&
+		(ctx_data->clk_info.uncompressed_bw ==
+		hw_mgr_clk_info->uncompressed_bw))
 		return false;
 
 	if (busy &&
@@ -644,13 +839,18 @@
 	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
 		ctx = &hw_mgr->ctx_data[i];
 		if (ctx->state == CAM_ICP_CTX_STATE_ACQUIRED &&
-			ctx->icp_dev_acquire_info->dev_type ==
-			ctx_data->icp_dev_acquire_info->dev_type) {
+			ICP_DEV_TYPE_TO_CLK_TYPE(
+			ctx->icp_dev_acquire_info->dev_type) ==
+			ICP_DEV_TYPE_TO_CLK_TYPE(
+			ctx_data->icp_dev_acquire_info->dev_type)) {
 			mutex_lock(&hw_mgr->hw_mgr_mutex);
 			hw_mgr_clk_info->uncompressed_bw +=
 				ctx->clk_info.uncompressed_bw;
 			hw_mgr_clk_info->compressed_bw +=
 				ctx->clk_info.compressed_bw;
+			CAM_DBG(CAM_ICP, "ubw = %lld, cbw = %lld",
+				hw_mgr_clk_info->uncompressed_bw,
+				hw_mgr_clk_info->compressed_bw);
 			mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		}
 	}
@@ -668,12 +868,13 @@
 	uint64_t req_id;
 	struct cam_icp_clk_info *hw_mgr_clk_info;
 
+	cam_icp_ctx_timer_reset(ctx_data);
 	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
-		crm_timer_reset(hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog);
+		cam_icp_device_timer_reset(hw_mgr, ICP_CLK_HW_BPS);
 		hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
 		CAM_DBG(CAM_ICP, "Reset bps timer");
 	} else {
-		crm_timer_reset(hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog);
+		cam_icp_device_timer_reset(hw_mgr, ICP_CLK_HW_IPE);
 		hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
 		CAM_DBG(CAM_ICP, "Reset ipe timer");
 	}
@@ -747,6 +948,7 @@
 	struct cam_hw_intf *ipe1_dev_intf = NULL;
 	struct cam_hw_intf *bps_dev_intf = NULL;
 	struct cam_hw_intf *dev_intf = NULL;
+	struct cam_a5_clk_update_cmd clk_upd_cmd;
 
 	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
 	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
@@ -768,14 +970,18 @@
 		id = CAM_ICP_IPE_CMD_UPDATE_CLK;
 	}
 
+	clk_upd_cmd.curr_clk_rate = curr_clk_rate;
+	clk_upd_cmd.ipe_bps_pc_enable = icp_hw_mgr.ipe_bps_pc_flag;
+
 	dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
-		&curr_clk_rate, sizeof(curr_clk_rate));
+		&clk_upd_cmd, sizeof(struct cam_a5_clk_update_cmd));
 
 	if (ctx_data->icp_dev_acquire_info->dev_type != CAM_ICP_RES_TYPE_BPS)
 		if (ipe1_dev_intf)
 			ipe1_dev_intf->hw_ops.process_cmd(
 				ipe1_dev_intf->hw_priv, id,
-				&curr_clk_rate, sizeof(curr_clk_rate));
+				&clk_upd_cmd,
+				sizeof(struct cam_a5_clk_update_cmd));
 
 	return 0;
 }
@@ -819,11 +1025,13 @@
 	dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
 		&clk_update, sizeof(clk_update));
 
-	if (ctx_data->icp_dev_acquire_info->dev_type != CAM_ICP_RES_TYPE_BPS)
-		if (ipe1_dev_intf)
-			ipe1_dev_intf->hw_ops.process_cmd(
-				ipe1_dev_intf->hw_priv, id,
-				&clk_update, sizeof(clk_update));
+	/*
+	 * Consolidated bw needs to be voted on only one IPE client. Otherwise
+	 * total bw that we vote at bus client would be doubled. So either
+	 * remove voting on IPE1 or divide the vote for each IPE client
+	 * and vote to cpas - cpas will add up and vote full bw to sf client
+	 * anyway.
+	 */
 
 	return 0;
 }
@@ -863,7 +1071,7 @@
 		if (hw_mgr->bps_ctxt_cnt++)
 			goto end;
 		bps_dev_intf->hw_ops.init(bps_dev_intf->hw_priv, NULL, 0);
-		if (icp_hw_mgr.icp_pc_flag) {
+		if (icp_hw_mgr.ipe_bps_pc_flag) {
 			bps_dev_intf->hw_ops.process_cmd(
 				bps_dev_intf->hw_priv,
 				CAM_ICP_BPS_CMD_POWER_RESUME, NULL, 0);
@@ -874,7 +1082,7 @@
 			goto end;
 
 		ipe0_dev_intf->hw_ops.init(ipe0_dev_intf->hw_priv, NULL, 0);
-		if (icp_hw_mgr.icp_pc_flag) {
+		if (icp_hw_mgr.ipe_bps_pc_flag) {
 			ipe0_dev_intf->hw_ops.process_cmd(
 				ipe0_dev_intf->hw_priv,
 				CAM_ICP_IPE_CMD_POWER_RESUME, NULL, 0);
@@ -884,21 +1092,21 @@
 			ipe1_dev_intf->hw_ops.init(ipe1_dev_intf->hw_priv,
 				NULL, 0);
 
-			if (icp_hw_mgr.icp_pc_flag) {
+			if (icp_hw_mgr.ipe_bps_pc_flag) {
 				ipe1_dev_intf->hw_ops.process_cmd(
 					ipe1_dev_intf->hw_priv,
 					CAM_ICP_IPE_CMD_POWER_RESUME,
 					NULL, 0);
 			}
 		}
-		if (icp_hw_mgr.icp_pc_flag) {
+		if (icp_hw_mgr.ipe_bps_pc_flag) {
 			hw_mgr->core_info = hw_mgr->core_info |
 				(ICP_PWR_CLP_IPE0 | ICP_PWR_CLP_IPE1);
 		}
 	}
 
 	CAM_DBG(CAM_ICP, "core_info %X",  hw_mgr->core_info);
-	if (icp_hw_mgr.icp_pc_flag)
+	if (icp_hw_mgr.ipe_bps_pc_flag)
 		rc = hfi_enable_ipe_bps_pc(true, hw_mgr->core_info);
 	else
 		rc = hfi_enable_ipe_bps_pc(false, hw_mgr->core_info);
@@ -936,7 +1144,7 @@
 		if (hw_mgr->bps_ctxt_cnt)
 			goto end;
 
-		if (icp_hw_mgr.icp_pc_flag) {
+		if (icp_hw_mgr.ipe_bps_pc_flag) {
 			rc = bps_dev_intf->hw_ops.process_cmd(
 				bps_dev_intf->hw_priv,
 				CAM_ICP_BPS_CMD_POWER_COLLAPSE,
@@ -954,7 +1162,7 @@
 		if (hw_mgr->ipe_ctxt_cnt)
 			goto end;
 
-		if (icp_hw_mgr.icp_pc_flag) {
+		if (icp_hw_mgr.ipe_bps_pc_flag) {
 			rc = ipe0_dev_intf->hw_ops.process_cmd(
 				ipe0_dev_intf->hw_priv,
 				CAM_ICP_IPE_CMD_POWER_COLLAPSE, NULL, 0);
@@ -963,7 +1171,7 @@
 		ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
 
 		if (ipe1_dev_intf) {
-			if (icp_hw_mgr.icp_pc_flag) {
+			if (icp_hw_mgr.ipe_bps_pc_flag) {
 				rc = ipe1_dev_intf->hw_ops.process_cmd(
 					ipe1_dev_intf->hw_priv,
 					CAM_ICP_IPE_CMD_POWER_COLLAPSE,
@@ -973,7 +1181,7 @@
 			ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
 				NULL, 0);
 		}
-		if (icp_hw_mgr.icp_pc_flag) {
+		if (icp_hw_mgr.ipe_bps_pc_flag) {
 			hw_mgr->core_info = hw_mgr->core_info &
 				(~(ICP_PWR_CLP_IPE0 | ICP_PWR_CLP_IPE1));
 		}
@@ -1031,7 +1239,18 @@
 		rc = -ENOMEM;
 		goto err;
 	}
-	icp_hw_mgr.icp_pc_flag = 1;
+	icp_hw_mgr.icp_pc_flag = false;
+
+	if (!debugfs_create_bool("ipe_bps_pc",
+		0644,
+		icp_hw_mgr.dentry,
+		&icp_hw_mgr.ipe_bps_pc_flag)) {
+		CAM_ERR(CAM_ICP, "failed to create ipe_bps_pc entry");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	icp_hw_mgr.ipe_bps_pc_flag = false;
 
 	if (!debugfs_create_file("icp_debug_clk",
 		0644,
@@ -1155,15 +1374,14 @@
 
 	clk_type = ICP_DEV_TYPE_TO_CLK_TYPE(ctx_data->icp_dev_acquire_info->
 		dev_type);
-	crm_timer_reset(icp_hw_mgr.clk_info[clk_type].watch_dog);
+	cam_icp_device_timer_reset(&icp_hw_mgr, clk_type);
 
 	mutex_lock(&ctx_data->ctx_mutex);
+	cam_icp_ctx_timer_reset(ctx_data);
 	if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) {
+		CAM_DBG(CAM_ICP, "ctx %u is in %d state",
+			ctx_data->ctx_id, ctx_data->state);
 		mutex_unlock(&ctx_data->ctx_mutex);
-		CAM_WARN(CAM_ICP,
-			"ctx with id: %u not in the right state : %x",
-			ctx_data->ctx_id,
-			ctx_data->state);
 		return 0;
 	}
 
@@ -1814,7 +2032,10 @@
 	}
 	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
 
-	rc = cam_icp_mgr_send_pc_prep(hw_mgr);
+	if (!hw_mgr->icp_pc_flag)
+		rc = cam_icp_mgr_hw_close(hw_mgr, NULL);
+	else
+		rc = cam_icp_mgr_send_pc_prep(hw_mgr);
 
 	cam_hfi_disable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
 	a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
@@ -1871,33 +2092,6 @@
 		hw_mgr->a5_jtag_debug);
 }
 
-static int cam_icp_mgr_icp_resume(struct cam_icp_hw_mgr *hw_mgr)
-{
-	int rc = 0;
-	struct cam_hw_intf *a5_dev_intf = NULL;
-
-	CAM_DBG(CAM_ICP, "Enter");
-	a5_dev_intf = hw_mgr->a5_dev_intf;
-
-	if (!a5_dev_intf) {
-		CAM_ERR(CAM_ICP, "a5 dev intf is wrong");
-		return -EINVAL;
-	}
-
-	rc = a5_dev_intf->hw_ops.init(a5_dev_intf->hw_priv, NULL, 0);
-	if (rc)
-		return -EINVAL;
-
-	rc = cam_icp_mgr_hfi_resume(hw_mgr);
-	if (rc)
-		goto hfi_resume_failed;
-
-	CAM_DBG(CAM_ICP, "Exit");
-	return rc;
-hfi_resume_failed:
-	cam_icp_mgr_icp_power_collapse(hw_mgr);
-	return rc;
-}
 static int cam_icp_mgr_abort_handle(
 	struct cam_icp_hw_ctx_data *ctx_data)
 {
@@ -2021,7 +2215,10 @@
 			msecs_to_jiffies((timeout)));
 	if (!rem_jiffies) {
 		rc = -ETIMEDOUT;
-		CAM_ERR(CAM_ICP, "FW response timeout: %d", rc);
+		CAM_ERR(CAM_ICP, "FW response timeout: %d for %u",
+			rc, ctx_data->ctx_id);
+		if (icp_hw_mgr.a5_debug_q)
+			cam_icp_mgr_process_dbg_buf();
 	}
 
 	kfree(destroy_cmd);
@@ -2050,6 +2247,7 @@
 	cam_icp_mgr_ipe_bps_power_collapse(hw_mgr,
 		&hw_mgr->ctx_data[ctx_id], 0);
 	hw_mgr->ctx_data[ctx_id].state = CAM_ICP_CTX_STATE_RELEASE;
+	CAM_DBG(CAM_ICP, "E: ctx_id = %d", ctx_id);
 	cam_icp_mgr_destroy_handle(&hw_mgr->ctx_data[ctx_id]);
 	cam_icp_mgr_cleanup_ctx(&hw_mgr->ctx_data[ctx_id]);
 
@@ -2066,8 +2264,10 @@
 	kfree(hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info);
 	hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
 	hw_mgr->ctx_data[ctx_id].state = CAM_ICP_CTX_STATE_FREE;
+	cam_icp_ctx_timer_stop(&hw_mgr->ctx_data[ctx_id]);
 	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
 
+	CAM_DBG(CAM_ICP, "X: ctx_id = %d", ctx_id);
 	return 0;
 }
 
@@ -2145,6 +2345,7 @@
 	hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
+	CAM_DBG(CAM_ICP, "Exit");
 	return rc;
 }
 
@@ -2321,11 +2522,50 @@
 	return rc;
 }
 
+static int cam_icp_mgr_icp_resume(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc = 0;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	bool downloadFromResume = true;
+
+	CAM_DBG(CAM_ICP, "Enter");
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5 dev intf is wrong");
+		return -EINVAL;
+	}
+
+	if (hw_mgr->fw_download  == false) {
+		CAM_DBG(CAM_ICP, "Downloading FW");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		cam_icp_mgr_hw_open(hw_mgr, &downloadFromResume);
+		mutex_lock(&hw_mgr->hw_mgr_mutex);
+		CAM_DBG(CAM_ICP, "FW Download Done Exit");
+		return 0;
+	}
+
+	rc = a5_dev_intf->hw_ops.init(a5_dev_intf->hw_priv, NULL, 0);
+	if (rc)
+		return -EINVAL;
+
+	rc = cam_icp_mgr_hfi_resume(hw_mgr);
+	if (rc)
+		goto hfi_resume_failed;
+
+	CAM_DBG(CAM_ICP, "Exit");
+	return rc;
+hfi_resume_failed:
+	cam_icp_mgr_icp_power_collapse(hw_mgr);
+	return rc;
+}
+
 static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args)
 {
 	struct cam_hw_intf *a5_dev_intf = NULL;
 	struct cam_hw_info *a5_dev = NULL;
 	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	bool icp_pc = false;
 	int rc = 0;
 
 	if (!hw_mgr) {
@@ -2348,47 +2588,43 @@
 	}
 	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
 	rc = cam_icp_allocate_hfi_mem();
-	if (rc) {
-		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	if (rc)
 		goto alloc_hfi_mem_failed;
-	}
 
 	rc = cam_icp_mgr_device_init(hw_mgr);
-	if (rc) {
-		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	if (rc)
 		goto dev_init_fail;
-	}
 
 	rc = cam_icp_mgr_fw_download(hw_mgr);
-	if (rc) {
-		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	if (rc)
 		goto fw_download_failed;
-	}
 
 	rc = cam_icp_mgr_hfi_init(hw_mgr);
-	if (rc) {
-		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	if (rc)
 		goto hfi_init_failed;
-	}
 
 	rc = cam_icp_mgr_send_fw_init(hw_mgr);
-	if (rc) {
-		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	if (rc)
 		goto fw_init_failed;
-	}
 
 	hw_mgr->ctxt_cnt = 0;
+	hw_mgr->fw_download = true;
 
 	if (icp_hw_mgr.a5_debug_q)
 		hfi_set_debug_level(icp_hw_mgr.a5_dbg_lvl);
 
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	CAM_INFO(CAM_ICP, "FW download done successfully");
 
 	rc = cam_ipe_bps_deint(hw_mgr);
+	if (download_fw_args)
+		icp_pc = *((bool *)download_fw_args);
+
+	if (download_fw_args && icp_pc == true)
+		return rc;
+
 	rc = cam_icp_mgr_icp_power_collapse(hw_mgr);
 
-	hw_mgr->fw_download = true;
-	CAM_DBG(CAM_ICP, "FW download done successfully");
 	return rc;
 
 fw_init_failed:
@@ -2400,6 +2636,7 @@
 dev_init_fail:
 	cam_icp_free_hfi_mem();
 alloc_hfi_mem_failed:
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 	return rc;
 }
 
@@ -2489,6 +2726,8 @@
 	rc = cam_icp_mgr_enqueue_config(hw_mgr, config_args);
 	if (rc)
 		goto config_err;
+	CAM_DBG(CAM_ICP, "req_id = %lld %u",
+		req_id, ctx_data->ctx_id);
 	mutex_unlock(&ctx_data->ctx_mutex);
 
 	return 0;
@@ -2539,14 +2778,17 @@
 }
 
 static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr,
-	struct cam_packet *packet,
+	struct cam_packet *packet, struct cam_icp_hw_ctx_data *ctx_data,
 	uint32_t *fw_cmd_buf_iova_addr)
 {
 	int rc = 0;
-	int i;
+	int i, j, k;
 	uint64_t addr;
 	size_t len;
 	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	uint64_t cpu_addr = 0;
+	struct ipe_frame_process_data *frame_process_data = NULL;
+	struct bps_frame_process_data *bps_frame_process_data = NULL;
 
 	cmd_desc = (struct cam_cmd_buf_desc *)
 		((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
@@ -2564,6 +2806,67 @@
 			*fw_cmd_buf_iova_addr = addr;
 			*fw_cmd_buf_iova_addr =
 				(*fw_cmd_buf_iova_addr + cmd_desc[i].offset);
+			rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+				&cpu_addr, &len);
+			if (rc) {
+				CAM_ERR(CAM_ICP, "get cmd buf failed %x",
+					hw_mgr->iommu_hdl);
+				*fw_cmd_buf_iova_addr = 0;
+				return rc;
+			}
+			cpu_addr = cpu_addr + cmd_desc[i].offset;
+		}
+	}
+
+	if (!cpu_addr) {
+		CAM_ERR(CAM_ICP, "Invalid cpu addr");
+		return -EINVAL;
+	}
+
+	if (ctx_data->icp_dev_acquire_info->dev_type !=
+		CAM_ICP_RES_TYPE_BPS) {
+		CAM_DBG(CAM_ICP, "cpu addr = %llx", cpu_addr);
+		frame_process_data = (struct ipe_frame_process_data *)cpu_addr;
+		CAM_DBG(CAM_ICP, "%u %u %u", frame_process_data->max_num_cores,
+			frame_process_data->target_time,
+			frame_process_data->frames_in_batch);
+		frame_process_data->strip_lib_out_addr = 0;
+		frame_process_data->iq_settings_addr = 0;
+		frame_process_data->scratch_buffer_addr = 0;
+		frame_process_data->ubwc_stats_buffer_addr = 0;
+		frame_process_data->cdm_buffer_addr = 0;
+		frame_process_data->cdm_prog_base = 0;
+		for (i = 0; i < frame_process_data->frames_in_batch; i++) {
+			for (j = 0; j < IPE_IO_IMAGES_MAX; j++) {
+				for (k = 0; k < MAX_NUM_OF_IMAGE_PLANES; k++) {
+					frame_process_data->
+					framesets[i].buffers[j].
+					buffer_ptr[k] = 0;
+					frame_process_data->
+					framesets[i].buffers[j].
+					meta_buffer_ptr[k] = 0;
+				}
+			}
+		}
+	} else {
+		CAM_DBG(CAM_ICP, "cpu addr = %llx", cpu_addr);
+		bps_frame_process_data =
+			(struct bps_frame_process_data *)cpu_addr;
+		CAM_DBG(CAM_ICP, "%u %u",
+			bps_frame_process_data->max_num_cores,
+			bps_frame_process_data->target_time);
+		bps_frame_process_data->ubwc_stats_buffer_addr = 0;
+		bps_frame_process_data->cdm_buffer_addr = 0;
+		bps_frame_process_data->iq_settings_addr = 0;
+		bps_frame_process_data->strip_lib_out_addr = 0;
+		bps_frame_process_data->cdm_prog_addr = 0;
+		for (i = 0; i < BPS_IO_IMAGES_MAX; i++) {
+			for (j = 0; j < MAX_NUM_OF_IMAGE_PLANES; j++) {
+				bps_frame_process_data->
+				buffers[i].buffer_ptr[j] = 0;
+				bps_frame_process_data->
+				buffers[i].meta_buffer_ptr[j] = 0;
+			}
 		}
 	}
 
@@ -2766,12 +3069,13 @@
 	}
 
 	rc = cam_icp_mgr_process_cmd_desc(hw_mgr, packet,
-		&fw_cmd_buf_iova_addr);
+		ctx_data, &fw_cmd_buf_iova_addr);
 	if (rc) {
 		mutex_unlock(&ctx_data->ctx_mutex);
 		return rc;
 	}
 
+	CAM_DBG(CAM_ICP, "E: req id = %lld", packet->header.request_id);
 	/* Update Buffer Address from handles and patch information */
 	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl,
 		hw_mgr->iommu_sec_hdl);
@@ -2809,6 +3113,8 @@
 	prepare_args->hw_update_entries[0].addr = (uint64_t)hfi_cmd;
 	prepare_args->priv = &ctx_data->hfi_frame_process.request_id[idx];
 
+	CAM_DBG(CAM_ICP, "X: req id = %lld ctx_id = %u",
+		packet->header.request_id, ctx_data->ctx_id);
 	mutex_unlock(&ctx_data->ctx_mutex);
 	return rc;
 }
@@ -3025,6 +3331,7 @@
 		return -EINVAL;
 	}
 
+	CAM_DBG(CAM_ICP, "Enter");
 	ctx_data = release_hw->ctxt_to_hw_map;
 	if (!ctx_data) {
 		CAM_ERR(CAM_ICP, "NULL ctx data");
@@ -3054,15 +3361,18 @@
 	rc = cam_icp_mgr_release_ctx(hw_mgr, ctx_id);
 	if (!hw_mgr->ctxt_cnt) {
 		CAM_DBG(CAM_ICP, "Last Release");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		cam_icp_mgr_icp_power_collapse(hw_mgr);
+		mutex_lock(&hw_mgr->hw_mgr_mutex);
 		cam_icp_hw_mgr_reset_clk_info(hw_mgr);
 		hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
 	}
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	if (!hw_mgr->bps_ctxt_cnt || !hw_mgr->ipe_ctxt_cnt)
-		cam_icp_timer_stop(hw_mgr);
+		cam_icp_device_timer_stop(hw_mgr);
 
+	CAM_DBG(CAM_ICP, "Exit");
 	return rc;
 }
 
@@ -3302,6 +3612,7 @@
 		return -EINVAL;
 	}
 
+	CAM_DBG(CAM_ICP, "ENTER");
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	ctx_id = cam_icp_mgr_get_free_ctx(hw_mgr);
 	if (ctx_id >= CAM_ICP_CTX_MAX) {
@@ -3346,6 +3657,9 @@
 			goto get_io_buf_failed;
 		}
 
+		if (icp_hw_mgr.a5_debug_q)
+			hfi_set_debug_level(icp_hw_mgr.a5_dbg_lvl);
+
 		rc = cam_icp_send_ubwc_cfg(hw_mgr);
 		if (rc) {
 			mutex_unlock(&hw_mgr->hw_mgr_mutex);
@@ -3354,7 +3668,9 @@
 	}
 
 	if (!hw_mgr->bps_ctxt_cnt || !hw_mgr->ipe_ctxt_cnt)
-		cam_icp_timer_start(hw_mgr);
+		cam_icp_device_timer_start(hw_mgr);
+
+	cam_icp_ctx_timer_start(ctx_data);
 
 	rc = cam_icp_mgr_ipe_bps_resume(hw_mgr, ctx_data);
 	if (rc) {
@@ -3421,6 +3737,7 @@
 send_ping_failed:
 	cam_icp_mgr_ipe_bps_power_collapse(hw_mgr, ctx_data, 0);
 ipe_bps_resume_failed:
+	cam_icp_ctx_timer_stop(&hw_mgr->ctx_data[ctx_id]);
 ubwc_cfg_failed:
 	if (!hw_mgr->ctxt_cnt)
 		cam_icp_mgr_icp_power_collapse(hw_mgr);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index 43d7a4a..4d181f0 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -183,6 +183,8 @@
  * @temp_payload: Payload for destroy handle data
  * @ctx_id: Context Id
  * @clk_info: Current clock info of a context
+ * @watch_dog: watchdog timer handle
+ * @watch_dog_reset_counter: Counter for watch dog reset
  */
 struct cam_icp_hw_ctx_data {
 	void *context_priv;
@@ -200,6 +202,8 @@
 	struct ipe_bps_destroy temp_payload;
 	uint32_t ctx_id;
 	struct cam_ctx_clk_info clk_info;
+	struct cam_req_mgr_timer *watch_dog;
+	uint32_t watch_dog_reset_counter;
 };
 
 /**
@@ -222,6 +226,7 @@
  * @compressed_bw: Current compressed bandwidth voting
  * @hw_type: IPE/BPS device type
  * @watch_dog: watchdog timer handle
+ * @watch_dog_reset_counter: Counter for watch dog reset
  */
 struct cam_icp_clk_info {
 	uint32_t base_clk;
@@ -232,6 +237,7 @@
 	uint64_t compressed_bw;
 	uint32_t hw_type;
 	struct cam_req_mgr_timer *watch_dog;
+	uint32_t watch_dog_reset_counter;
 };
 
 /**
@@ -258,6 +264,8 @@
  * @dentry: Debugfs entry
  * @a5_debug: A5 debug flag
  * @icp_pc_flag: Flag to enable/disable power collapse
+ * @ipe_bps_pc_flag: Flag to enable/disable
+ *                   power collapse for ipe & bps
  * @icp_debug_clk: Set clock based on debug value
  * @icp_default_clk: Set this clok if user doesn't supply
  * @clk_info: Clock info of hardware
@@ -295,6 +303,7 @@
 	struct dentry *dentry;
 	bool a5_debug;
 	bool icp_pc_flag;
+	bool ipe_bps_pc_flag;
 	uint64_t icp_debug_clk;
 	uint64_t icp_default_clk;
 	struct cam_icp_clk_info clk_info[ICP_CLK_HW_MAX];
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
index 6915ad5..9e05f2b 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
@@ -22,4 +22,15 @@
 	CAM_ICP_DEV_BPS,
 	CAM_ICP_DEV_MAX,
 };
+
+/**
+ * struct cam_a5_clk_update_cmd - Payload for hw manager command
+ *
+ * @curr_clk_rate:        clk rate to HW
+ * @ipe_bps_pc_enable     power collpase enable flag
+ */
+struct cam_a5_clk_update_cmd {
+	uint32_t  curr_clk_rate;
+	bool  ipe_bps_pc_enable;
+};
 #endif
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
index d2e04ef..771c4ed 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -21,7 +21,7 @@
 #define ICP_CLK_TURBO_HZ         600000000
 #define ICP_CLK_SVS_HZ           400000000
 
-#define CAM_ICP_A5_BW_BYTES_VOTE 100000000
+#define CAM_ICP_A5_BW_BYTES_VOTE 40000000
 
 #define CAM_ICP_CTX_MAX          36
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
index 5b4156a..87478af 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
@@ -267,22 +267,28 @@
 		rc = cam_ipe_handle_resume(ipe_dev);
 		break;
 	case CAM_ICP_IPE_CMD_UPDATE_CLK: {
-		uint32_t clk_rate = *(uint32_t *)cmd_args;
+		struct cam_a5_clk_update_cmd *clk_upd_cmd =
+			(struct cam_a5_clk_update_cmd *)cmd_args;
+		uint32_t clk_rate = clk_upd_cmd->curr_clk_rate;
 
 		CAM_DBG(CAM_ICP, "ipe_src_clk rate = %d", (int)clk_rate);
 		if (!core_info->clk_enable) {
-			cam_ipe_handle_pc(ipe_dev);
-			cam_cpas_reg_write(core_info->cpas_handle,
-				CAM_CPAS_REG_CPASTOP,
-				hw_info->pwr_ctrl, true, 0x0);
+			if (clk_upd_cmd->ipe_bps_pc_enable) {
+				cam_ipe_handle_pc(ipe_dev);
+				cam_cpas_reg_write(core_info->cpas_handle,
+					CAM_CPAS_REG_CPASTOP,
+					hw_info->pwr_ctrl, true, 0x0);
+			}
 			rc = cam_ipe_toggle_clk(soc_info, true);
 			if (rc)
 				CAM_ERR(CAM_ICP, "Enable failed");
 			else
 				core_info->clk_enable = true;
-			rc = cam_ipe_handle_resume(ipe_dev);
-			if (rc)
-				CAM_ERR(CAM_ICP, "handle resume failed");
+			if (clk_upd_cmd->ipe_bps_pc_enable) {
+				rc = cam_ipe_handle_resume(ipe_dev);
+				if (rc)
+					CAM_ERR(CAM_ICP, "bps resume failed");
+			}
 		}
 		CAM_DBG(CAM_ICP, "clock rate %d", clk_rate);
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
index 289d7d4..d24305a 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -141,9 +141,22 @@
 int cam_ipe_update_clk_rate(struct cam_hw_soc_info *soc_info,
 	uint32_t clk_rate)
 {
+	int32_t src_clk_idx;
+
 	if (!soc_info)
 		return -EINVAL;
 
+	src_clk_idx = soc_info->src_clk_idx;
+
+	if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) &&
+		(soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) &&
+		(clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) {
+		CAM_DBG(CAM_ICP, "clk_rate %d greater than max, reset to %d",
+			clk_rate,
+			soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]);
+		clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx];
+	}
+
 	return cam_soc_util_set_clk_rate(soc_info->clk[soc_info->src_clk_idx],
 		soc_info->clk_name[soc_info->src_clk_idx], clk_rate);
 }
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 01c0a02..46c4c6a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -456,6 +456,9 @@
 			}
 		}
 
+		if (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_BUBBLE)
+			request_id = 0;
+
 		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
 			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
 	} else {
@@ -612,7 +615,10 @@
 		req_isp->bubble_report = 0;
 	}
 
-	request_id = req->request_id;
+	if (req->request_id > ctx_isp->reported_req_id) {
+		request_id = req->request_id;
+		ctx_isp->reported_req_id = request_id;
+	}
 	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
 		CAM_REQ_MGR_SOF_EVENT_ERROR);
 
@@ -738,9 +744,18 @@
 		req_isp->bubble_report = 0;
 	}
 
-	request_id = req->request_id;
-	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
-		CAM_REQ_MGR_SOF_EVENT_ERROR);
+	if (!req_isp->bubble_report) {
+		if (req->request_id > ctx_isp->reported_req_id) {
+			request_id = req->request_id;
+			ctx_isp->reported_req_id = request_id;
+			__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+			CAM_REQ_MGR_SOF_EVENT_ERROR);
+		} else
+			__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+				CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+	} else
+		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
 
 	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
 	CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
@@ -881,6 +896,29 @@
 	return rc;
 }
 
+static int __cam_isp_ctx_sof_in_flush(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
+
+	if (!evt_data) {
+		CAM_ERR(CAM_ISP, "in valid sof event data");
+		return -EINVAL;
+	}
+	ctx_isp->frame_id++;
+	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+	if (--ctx_isp->frame_skip_count == 0)
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+	else
+		CAM_ERR(CAM_ISP, "Skip currect SOF");
+
+	return rc;
+}
+
 static struct cam_isp_ctx_irq_ops
 	cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
 	/* SOF */
@@ -952,6 +990,17 @@
 	/* HALT */
 	{
 	},
+	/* FLUSH */
+	{
+		.irq_ops = {
+			NULL,
+			__cam_isp_ctx_sof_in_flush,
+			NULL,
+			NULL,
+			NULL,
+			__cam_isp_ctx_buf_done_in_applied,
+		},
+	},
 };
 
 static int __cam_isp_ctx_apply_req_in_activated_state(
@@ -1155,6 +1204,24 @@
 	return rc;
 }
 
+static int __cam_isp_ctx_flush_req_in_activated(
+	struct cam_context *ctx,
+	struct cam_req_mgr_flush_request *flush_req)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp;
+
+	ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
+	spin_lock_bh(&ctx->lock);
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_FLUSH;
+	ctx_isp->frame_skip_count = 2;
+	spin_unlock_bh(&ctx->lock);
+
+	CAM_DBG(CAM_ISP, "Flush request in state %d", ctx->state);
+	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+	return rc;
+}
+
 static int __cam_isp_ctx_flush_req_in_ready(
 	struct cam_context *ctx,
 	struct cam_req_mgr_flush_request *flush_req)
@@ -1215,12 +1282,24 @@
 		.crm_ops = {},
 		.irq_ops = NULL,
 	},
+	/* HW ERROR */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
 	/* HALT */
 	{
 		.ioctl_ops = {},
 		.crm_ops = {},
 		.irq_ops = NULL,
 	},
+	/* FLUSH */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
 };
 
 static int __cam_isp_ctx_rdi_only_sof_in_top_state(
@@ -1310,6 +1389,17 @@
 	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
 	uint64_t  request_id = 0;
 
+	/*
+	 * Sof in bubble applied state means, reg update not received.
+	 * before increment frame id and override time stamp value, send
+	 * the previous sof time stamp that got captured in the
+	 * sof in applied state.
+	 */
+	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+		CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
 	ctx_isp->frame_id++;
 	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
 	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
@@ -1359,9 +1449,18 @@
 		req_isp->bubble_report = 0;
 	}
 
-	request_id = req->request_id;
-	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
-		CAM_REQ_MGR_SOF_EVENT_ERROR);
+	if (!req_isp->bubble_report) {
+		if (req->request_id > ctx_isp->reported_req_id) {
+			request_id = req->request_id;
+			ctx_isp->reported_req_id = request_id;
+			__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+			CAM_REQ_MGR_SOF_EVENT_ERROR);
+		} else
+			__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+				CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+	} else
+		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
 
 	/* change the state to bubble, as reg update has not come */
 	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
@@ -1566,10 +1665,23 @@
 			__cam_isp_ctx_buf_done_in_bubble_applied,
 		},
 	},
-
+	/* HW ERROR */
+	{
+	},
 	/* HALT */
 	{
 	},
+	/* FLUSH */
+	{
+		.irq_ops = {
+			NULL,
+			__cam_isp_ctx_sof_in_flush,
+			NULL,
+			NULL,
+			NULL,
+			__cam_isp_ctx_buf_done_in_applied,
+		},
+	},
 };
 
 static int __cam_isp_ctx_rdi_only_apply_req_top_state(
@@ -1625,15 +1737,26 @@
 		.crm_ops = {},
 		.irq_ops = NULL,
 	},
+	/* HW ERROR */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
 	/* HALT */
 	{
 		.ioctl_ops = {},
 		.crm_ops = {},
 		.irq_ops = NULL,
 	},
+	/* FLUSHED */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
 };
 
-
 /* top level state machine */
 static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
 	struct cam_release_dev_cmd *cmd)
@@ -2177,6 +2300,58 @@
 	return rc;
 }
 
+static int __cam_isp_ctx_link_pause(struct cam_context *ctx)
+{
+	int rc = 0;
+	struct cam_isp_hw_cmd_args   hw_cmd_args;
+	struct cam_isp_context      *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW;
+	rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+		&hw_cmd_args);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_link_resume(struct cam_context *ctx)
+{
+	int rc = 0;
+	struct cam_isp_hw_cmd_args   hw_cmd_args;
+	struct cam_isp_context      *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
+	rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+		&hw_cmd_args);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_process_evt(struct cam_context *ctx,
+	struct cam_req_mgr_link_evt_data *link_evt_data)
+{
+	int rc = 0;
+
+	switch (link_evt_data->evt_type) {
+	case CAM_REQ_MGR_LINK_EVT_ERR:
+		/* No need to handle this message now */
+		break;
+	case CAM_REQ_MGR_LINK_EVT_PAUSE:
+		__cam_isp_ctx_link_pause(ctx);
+		break;
+	case CAM_REQ_MGR_LINK_EVT_RESUME:
+		__cam_isp_ctx_link_resume(ctx);
+		break;
+	default:
+		CAM_WARN(CAM_ISP, "Unknown event from CRM");
+		break;
+	}
+	return rc;
+}
+
 static int __cam_isp_ctx_unlink_in_activated(struct cam_context *ctx,
 	struct cam_req_mgr_core_dev_link_setup *unlink)
 {
@@ -2308,7 +2483,8 @@
 		.crm_ops = {
 			.unlink = __cam_isp_ctx_unlink_in_activated,
 			.apply_req = __cam_isp_ctx_apply_req,
-			.flush_req = __cam_isp_ctx_flush_req_in_top_state,
+			.flush_req = __cam_isp_ctx_flush_req_in_activated,
+			.process_evt = __cam_isp_ctx_process_evt,
 		},
 		.irq_ops = __cam_isp_ctx_handle_irq_in_activated,
 	},
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
index 88ebc03..b0b7ae9 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -52,6 +52,7 @@
 	CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED,
 	CAM_ISP_CTX_ACTIVATED_HW_ERROR,
 	CAM_ISP_CTX_ACTIVATED_HALT,
+	CAM_ISP_CTX_ACTIVATED_FLUSH,
 	CAM_ISP_CTX_ACTIVATED_MAX,
 };
 
@@ -116,6 +117,7 @@
  * @subscribe_event:       The irq event mask that CRM subscribes to, IFE will
  *                         invoke CRM cb at those event.
  * @last_applied_req_id:   Last applied request id
+ * @frame_skip_count:      Number of frame to skip before change state
  *
  */
 struct cam_isp_context {
@@ -135,6 +137,7 @@
 	int64_t                          reported_req_id;
 	uint32_t                         subscribe_event;
 	int64_t                          last_applied_req_id;
+	uint32_t                         frame_skip_count;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index ccab3a0..fda6427 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -860,10 +860,95 @@
 	return rc;
 }
 
-static int cam_ife_hw_mgr_acquire_res_ife_csid_ipp(
+static int cam_ife_mgr_acquire_cid_res(
 	struct cam_ife_hw_mgr_ctx          *ife_ctx,
 	struct cam_isp_in_port_info        *in_port,
-	uint32_t                            cid_res_id)
+	uint32_t                           *cid_res_id,
+	enum cam_ife_pix_path_res_id        csid_path)
+{
+	int rc = -1;
+	int i, j;
+	struct cam_ife_hw_mgr               *ife_hw_mgr;
+	struct cam_ife_hw_mgr_res           *cid_res;
+	struct cam_hw_intf                  *hw_intf;
+	struct cam_csid_hw_reserve_resource_args  csid_acquire;
+
+	ife_hw_mgr = ife_ctx->hw_mgr;
+
+	rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &cid_res);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "No more free hw mgr resource");
+		goto err;
+	}
+	cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, &cid_res);
+
+	csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
+	csid_acquire.in_port = in_port;
+	csid_acquire.res_id =  csid_path;
+
+	for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
+		if (!ife_hw_mgr->csid_devices[i])
+			continue;
+
+		hw_intf = ife_hw_mgr->csid_devices[i];
+		rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, &csid_acquire,
+			sizeof(csid_acquire));
+		if (rc)
+			continue;
+		else
+			break;
+	}
+
+	if (i == CAM_IFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) {
+		CAM_ERR(CAM_ISP, "Can not acquire ife csid rdi resource");
+		goto err;
+	}
+
+	cid_res->res_type = CAM_IFE_HW_MGR_RES_CID;
+	cid_res->res_id = csid_acquire.node_res->res_id;
+	cid_res->is_dual_vfe = in_port->usage_type;
+	cid_res->hw_res[0] = csid_acquire.node_res;
+	cid_res->hw_res[1] = NULL;
+	/* CID(DT_ID) value of acquire device, require for path */
+	*cid_res_id = csid_acquire.node_res->res_id;
+
+	if (cid_res->is_dual_vfe) {
+		csid_acquire.node_res = NULL;
+		csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
+		csid_acquire.in_port = in_port;
+		for (j = i + 1; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
+			if (!ife_hw_mgr->csid_devices[j])
+				continue;
+
+			hw_intf = ife_hw_mgr->csid_devices[j];
+			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+				&csid_acquire, sizeof(csid_acquire));
+			if (rc)
+				continue;
+			else
+				break;
+		}
+
+		if (j == CAM_IFE_CSID_HW_NUM_MAX) {
+			CAM_ERR(CAM_ISP,
+				"Can not acquire ife csid rdi resource");
+			goto err;
+		}
+		cid_res->hw_res[1] = csid_acquire.node_res;
+	}
+	cid_res->parent = &ife_ctx->res_list_ife_in;
+	ife_ctx->res_list_ife_in.child[
+		ife_ctx->res_list_ife_in.num_children++] = cid_res;
+
+	return 0;
+err:
+	return rc;
+
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_csid_ipp(
+	struct cam_ife_hw_mgr_ctx          *ife_ctx,
+	struct cam_isp_in_port_info        *in_port)
 {
 	int rc = -1;
 	int i;
@@ -872,8 +957,17 @@
 	struct cam_ife_hw_mgr_res           *csid_res;
 	struct cam_ife_hw_mgr_res           *cid_res;
 	struct cam_hw_intf                  *hw_intf;
+	uint32_t                             cid_res_id;
 	struct cam_csid_hw_reserve_resource_args  csid_acquire;
 
+	/* get cid resource */
+	rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res_id,
+		CAM_IFE_PIX_PATH_RES_IPP);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
+		goto err;
+	}
+
 	ife_hw_mgr = ife_ctx->hw_mgr;
 
 	rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &csid_res);
@@ -985,8 +1079,7 @@
 
 static int cam_ife_hw_mgr_acquire_res_ife_csid_rdi(
 	struct cam_ife_hw_mgr_ctx     *ife_ctx,
-	struct cam_isp_in_port_info   *in_port,
-	uint32_t                       cid_res_id)
+	struct cam_isp_in_port_info   *in_port)
 {
 	int rc = -1;
 	int i, j;
@@ -996,6 +1089,7 @@
 	struct cam_ife_hw_mgr_res           *cid_res;
 	struct cam_hw_intf                  *hw_intf;
 	struct cam_isp_out_port_info        *out_port;
+	uint32_t                            cid_res_id;
 	struct cam_csid_hw_reserve_resource_args  csid_acquire;
 
 	ife_hw_mgr = ife_ctx->hw_mgr;
@@ -1005,6 +1099,15 @@
 		if (!cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
 			continue;
 
+		/* get cid resource */
+		rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res_id,
+			cam_ife_hw_mgr_get_ife_csid_rdi_res_type(
+			out_port->res_type));
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
+			goto err;
+		}
+
 		rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
 			&csid_res);
 		if (rc) {
@@ -1135,91 +1238,6 @@
 	return 0;
 }
 
-static int cam_ife_mgr_acquire_cid_res(
-	struct cam_ife_hw_mgr_ctx          *ife_ctx,
-	struct cam_isp_in_port_info        *in_port,
-	uint32_t                           *cid_res_id,
-	int                                 pixel_count)
-{
-	int rc = -1;
-	int i, j;
-	struct cam_ife_hw_mgr               *ife_hw_mgr;
-	struct cam_ife_hw_mgr_res           *cid_res;
-	struct cam_hw_intf                  *hw_intf;
-	struct cam_csid_hw_reserve_resource_args  csid_acquire;
-
-	ife_hw_mgr = ife_ctx->hw_mgr;
-
-	rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &cid_res);
-	if (rc) {
-		CAM_ERR(CAM_ISP, "No more free hw mgr resource");
-		goto err;
-	}
-	cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, &cid_res);
-
-	csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
-	csid_acquire.in_port = in_port;
-	csid_acquire.pixel_count = pixel_count;
-
-	for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
-		if (!ife_hw_mgr->csid_devices[i])
-			continue;
-
-		hw_intf = ife_hw_mgr->csid_devices[i];
-		rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, &csid_acquire,
-			sizeof(csid_acquire));
-		if (rc)
-			continue;
-		else
-			break;
-	}
-
-	if (i == CAM_IFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) {
-		CAM_ERR(CAM_ISP, "Can not acquire ife csid rdi resource");
-		goto err;
-	}
-
-	cid_res->res_type = CAM_IFE_HW_MGR_RES_CID;
-	cid_res->res_id = csid_acquire.node_res->res_id;
-	cid_res->is_dual_vfe = in_port->usage_type;
-	cid_res->hw_res[0] = csid_acquire.node_res;
-	cid_res->hw_res[1] = NULL;
-	/* CID(DT_ID) value of acquire device, require for path */
-	*cid_res_id = csid_acquire.node_res->res_id;
-
-	if (cid_res->is_dual_vfe) {
-		csid_acquire.node_res = NULL;
-		csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
-		csid_acquire.in_port = in_port;
-		for (j = i + 1; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
-			if (!ife_hw_mgr->csid_devices[j])
-				continue;
-
-			hw_intf = ife_hw_mgr->csid_devices[j];
-			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
-				&csid_acquire, sizeof(csid_acquire));
-			if (rc)
-				continue;
-			else
-				break;
-		}
-
-		if (j == CAM_IFE_CSID_HW_NUM_MAX) {
-			CAM_ERR(CAM_ISP,
-				"Can not acquire ife csid rdi resource");
-			goto err;
-		}
-		cid_res->hw_res[1] = csid_acquire.node_res;
-	}
-	cid_res->parent = &ife_ctx->res_list_ife_in;
-	ife_ctx->res_list_ife_in.child[
-		ife_ctx->res_list_ife_in.num_children++] = cid_res;
-
-	return 0;
-err:
-	return rc;
-
-}
 static int cam_ife_mgr_acquire_hw_for_ctx(
 	struct cam_ife_hw_mgr_ctx          *ife_ctx,
 	struct cam_isp_in_port_info        *in_port,
@@ -1229,7 +1247,6 @@
 	int is_dual_vfe                           = 0;
 	int pixel_count                           = 0;
 	int rdi_count                             = 0;
-	uint32_t                                cid_res_id = 0;
 
 	is_dual_vfe = in_port->usage_type;
 
@@ -1248,18 +1265,9 @@
 		return -EINVAL;
 	}
 
-	/* get cid resource */
-	rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res_id,
-		pixel_count);
-	if (rc) {
-		CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
-		goto err;
-	}
-
 	if (pixel_count) {
 		/* get ife csid IPP resrouce */
-		rc = cam_ife_hw_mgr_acquire_res_ife_csid_ipp(ife_ctx, in_port,
-				cid_res_id);
+		rc = cam_ife_hw_mgr_acquire_res_ife_csid_ipp(ife_ctx, in_port);
 		if (rc) {
 			CAM_ERR(CAM_ISP,
 				"Acquire IFE CSID IPP resource Failed");
@@ -1269,8 +1277,7 @@
 
 	if (rdi_count) {
 		/* get ife csid rdi resource */
-		rc = cam_ife_hw_mgr_acquire_res_ife_csid_rdi(ife_ctx, in_port,
-			cid_res_id);
+		rc = cam_ife_hw_mgr_acquire_res_ife_csid_rdi(ife_ctx, in_port);
 		if (rc) {
 			CAM_ERR(CAM_ISP,
 				"Acquire IFE CSID RDI resource Failed");
@@ -2488,6 +2495,53 @@
 	return rc;
 }
 
+static int cam_ife_mgr_bw_control(struct cam_ife_hw_mgr_ctx *ctx,
+	enum cam_vfe_bw_control_action action)
+{
+	struct cam_ife_hw_mgr_res             *hw_mgr_res;
+	struct cam_hw_intf                    *hw_intf;
+	struct cam_vfe_bw_control_args         bw_ctrl_args;
+	int                                    rc = -EINVAL;
+	uint32_t                               i;
+
+	CAM_DBG(CAM_ISP, "Enter...ctx id:%d", ctx->ctx_index);
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_intf && hw_intf->hw_ops.process_cmd) {
+				bw_ctrl_args.node_res =
+					hw_mgr_res->hw_res[i];
+				bw_ctrl_args.action = action;
+
+				rc = hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					CAM_ISP_HW_CMD_BW_CONTROL,
+					&bw_ctrl_args,
+					sizeof(struct cam_vfe_bw_control_args));
+				if (rc)
+					CAM_ERR(CAM_ISP, "BW Update failed");
+			} else
+				CAM_WARN(CAM_ISP, "NULL hw_intf!");
+		}
+	}
+
+	return rc;
+}
+
+static int cam_ife_mgr_pause_hw(struct cam_ife_hw_mgr_ctx *ctx)
+{
+	return cam_ife_mgr_bw_control(ctx, CAM_VFE_BW_CONTROL_EXCLUDE);
+}
+
+static int cam_ife_mgr_resume_hw(struct cam_ife_hw_mgr_ctx *ctx)
+{
+	return cam_ife_mgr_bw_control(ctx, CAM_VFE_BW_CONTROL_INCLUDE);
+}
+
 static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
 {
 	int rc = 0;
@@ -2513,6 +2567,12 @@
 			hw_cmd_args->u.is_rdi_only_context = 0;
 
 		break;
+	case CAM_ISP_HW_MGR_CMD_PAUSE_HW:
+		cam_ife_mgr_pause_hw(ctx);
+		break;
+	case CAM_ISP_HW_MGR_CMD_RESUME_HW:
+		cam_ife_mgr_resume_hw(ctx);
+		break;
 	default:
 		CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
 			hw_cmd_args->cmd_type);
@@ -3733,10 +3793,13 @@
 	struct cam_vfe_top_irq_evt_payload   *evt_payload;
 	int rc = -EINVAL;
 
-	if (!handler_priv)
+	if (!evt_payload_priv)
 		return rc;
 
 	evt_payload = evt_payload_priv;
+	if (!handler_priv)
+		goto put_payload;
+
 	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
 
 	CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core_index:%d",
@@ -3764,7 +3827,7 @@
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Encountered Error (%d), ignoring other irqs",
 			 rc);
-		return IRQ_HANDLED;
+		goto put_payload;
 	}
 
 	CAM_DBG(CAM_ISP, "Calling EOF");
@@ -3786,6 +3849,8 @@
 	cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(ife_hwr_mgr_ctx,
 		evt_payload_priv);
 
+put_payload:
+	cam_vfe_put_evt_payload(evt_payload->core_info, &evt_payload);
 	return IRQ_HANDLED;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
index 031b7b2..b632e77 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
@@ -607,6 +607,33 @@
 	CAM_DBG(CAM_ISP, "Exit");
 }
 
+irqreturn_t cam_irq_controller_clear_and_mask(int irq_num, void *priv)
+{
+	struct cam_irq_controller  *controller  = priv;
+	uint32_t i = 0;
+
+	if (!controller)
+		return IRQ_NONE;
+
+	for (i = 0; i < controller->num_registers; i++) {
+
+		cam_io_w_mb(0x0, controller->mem_base +
+			controller->irq_register_arr[i].clear_reg_offset);
+	}
+
+	if (controller->global_clear_offset)
+		cam_io_w_mb(controller->global_clear_bitmask,
+			controller->mem_base +
+			controller->global_clear_offset);
+
+	for (i = 0; i < controller->num_registers; i++) {
+		cam_io_w_mb(0x0, controller->mem_base +
+		controller->irq_register_arr[i].mask_reg_offset);
+	}
+
+	return IRQ_HANDLED;
+}
+
 irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv)
 {
 	struct cam_irq_controller  *controller  = priv;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
index 7e307b5..e3071ac 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
@@ -250,4 +250,18 @@
  */
 int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle);
 
+/*
+ * cam_irq_controller_clear_and_mask()
+ *
+ * @brief:              This function clears and masks all the irq bits
+ *
+ * @irq_num:            Number of IRQ line that was set that lead to this
+ *                      function being called
+ * @priv:               Private data registered with request_irq is passed back
+ *                      here. This private data should be the irq_controller
+ *                      structure.
+ *
+ * @return:             IRQ_HANDLED/IRQ_NONE
+ */
+irqreturn_t cam_irq_controller_clear_and_mask(int irq_num, void *priv);
 #endif /* _CAM_IRQ_CONTROLLER_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
index cf044eb..56f2d68 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -130,6 +130,8 @@
 /* enum cam_isp_hw_mgr_command - Hardware manager command type */
 enum cam_isp_hw_mgr_command {
 	CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT,
+	CAM_ISP_HW_MGR_CMD_PAUSE_HW,
+	CAM_ISP_HW_MGR_CMD_RESUME_HW,
 	CAM_ISP_HW_MGR_CMD_MAX,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index 70c9c3b..ff0c91f 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -287,7 +287,7 @@
 
 static int cam_ife_csid_cid_get(struct cam_ife_csid_hw *csid_hw,
 	struct cam_isp_resource_node **res, int32_t vc, uint32_t dt,
-	uint32_t res_type, int pixel_count)
+	uint32_t res_type)
 {
 	int  rc = 0;
 	struct cam_ife_csid_cid_data    *cid_data;
@@ -305,8 +305,7 @@
 					break;
 				}
 			} else {
-				if (cid_data->vc == vc && cid_data->dt == dt &&
-					cid_data->pixel_count == pixel_count) {
+				if (cid_data->vc == vc && cid_data->dt == dt) {
 					cid_data->cnt++;
 					*res = &csid_hw->cid_res[i];
 					break;
@@ -330,7 +329,6 @@
 				cid_data->vc  = vc;
 				cid_data->dt  = dt;
 				cid_data->cnt = 1;
-				cid_data->pixel_count = pixel_count;
 				csid_hw->cid_res[j].res_state =
 					CAM_ISP_RESOURCE_STATE_RESERVED;
 				*res = &csid_hw->cid_res[j];
@@ -570,7 +568,6 @@
 	struct cam_csid_hw_reserve_resource_args  *cid_reserv)
 {
 	int rc = 0;
-	uint32_t i;
 	struct cam_ife_csid_cid_data       *cid_data;
 
 	CAM_DBG(CAM_ISP,
@@ -728,7 +725,6 @@
 		cid_data->vc = cid_reserv->in_port->vc;
 		cid_data->dt = cid_reserv->in_port->dt;
 		cid_data->cnt = 1;
-		cid_data->pixel_count = cid_reserv->pixel_count;
 		cid_reserv->node_res = &csid_hw->cid_res[0];
 		csid_hw->csi2_reserve_cnt++;
 
@@ -737,27 +733,43 @@
 			csid_hw->hw_intf->hw_idx,
 			cid_reserv->node_res->res_id);
 	} else {
-		if (cid_reserv->pixel_count > 0) {
-			for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
-				cid_data = (struct cam_ife_csid_cid_data *)
-					csid_hw->cid_res[i].res_priv;
-				if ((csid_hw->cid_res[i].res_state >=
-					CAM_ISP_RESOURCE_STATE_RESERVED) &&
-					cid_data->pixel_count > 0) {
-					CAM_DBG(CAM_ISP,
-						"CSID:%d IPP resource is full");
-					rc = -EINVAL;
-					goto end;
-				}
+		switch (cid_reserv->res_id) {
+		case CAM_IFE_PIX_PATH_RES_IPP:
+			if (csid_hw->ipp_res.res_state !=
+				CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+				CAM_DBG(CAM_ISP,
+					"CSID:%d IPP resource not available",
+					csid_hw->hw_intf->hw_idx);
+				rc = -EINVAL;
+				goto end;
 			}
+			break;
+		case CAM_IFE_PIX_PATH_RES_RDI_0:
+		case CAM_IFE_PIX_PATH_RES_RDI_1:
+		case CAM_IFE_PIX_PATH_RES_RDI_2:
+		case CAM_IFE_PIX_PATH_RES_RDI_3:
+			if (csid_hw->rdi_res[cid_reserv->res_id].res_state !=
+				CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+				CAM_DBG(CAM_ISP,
+					"CSID:%d RDI:%d resource not available",
+					csid_hw->hw_intf->hw_idx,
+					cid_reserv->res_id);
+				rc = -EINVAL;
+				goto end;
+			}
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "CSID%d: Invalid csid path",
+				csid_hw->hw_intf->hw_idx);
+			rc = -EINVAL;
+			goto end;
 		}
 
 		rc = cam_ife_csid_cid_get(csid_hw,
 			&cid_reserv->node_res,
 			cid_reserv->in_port->vc,
 			cid_reserv->in_port->dt,
-			cid_reserv->in_port->res_type,
-			cid_reserv->pixel_count);
+			cid_reserv->in_port->res_type);
 		/* if success then increment the reserve count */
 		if (!rc) {
 			if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
@@ -1884,21 +1896,23 @@
 
 	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX ||
 		!csid_reg->rdi_reg[res->res_id]) {
-		CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d Invalid res id%d",
 			csid_hw->hw_intf->hw_idx, res->res_id);
 		return -EINVAL;
 	}
 
 	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
 		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
-		CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
-			 csid_hw->hw_intf->hw_idx,
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"CSID:%d Res:%d already in stopped state:%d",
+			csid_hw->hw_intf->hw_idx,
 			res->res_id, res->res_state);
 		return rc;
 	}
 
 	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
-		CAM_DBG(CAM_ISP, "CSID:%d Res:%d Invalid res_state%d",
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"CSID:%d Res:%d Invalid res_state%d",
 			csid_hw->hw_intf->hw_idx, res->res_id,
 			res->res_state);
 		return -EINVAL;
@@ -2006,21 +2020,23 @@
 	soc_info = &csid_hw->hw_info->soc_info;
 
 	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
-		CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d Invalid res id%d",
 			csid_hw->hw_intf->hw_idx, res->res_id);
 		return -EINVAL;
 	}
 
 	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
 		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
-		CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
-			 csid_hw->hw_intf->hw_idx,
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"CSID:%d Res:%d already in stopped state:%d",
+			csid_hw->hw_intf->hw_idx,
 			res->res_id, res->res_state);
 		return rc;
 	}
 
 	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
-		CAM_DBG(CAM_ISP, "CSID:%d Res:%d Invalid state%d",
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"CSID:%d Res:%d Invalid state%d",
 			csid_hw->hw_intf->hw_idx, res->res_id,
 			res->res_state);
 		return -EINVAL;
@@ -2258,6 +2274,33 @@
 	return rc;
 }
 
+static int cam_ife_csid_reset_retain_sw_reg(
+	struct cam_ife_csid_hw *csid_hw)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset *csid_reg =
+		csid_hw->csid_info->csid_reg;
+
+	cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb,
+		csid_hw->hw_info->soc_info.reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_rst_strobes_addr);
+
+	CAM_DBG(CAM_ISP, " Waiting for SW reset complete from irq handler");
+	rc = wait_for_completion_timeout(&csid_hw->csid_top_complete,
+		msecs_to_jiffies(IFE_CSID_TIMEOUT));
+	if (rc <= 0) {
+		CAM_ERR(CAM_ISP, "CSID:%d reset completion in fail rc = %d",
+			csid_hw->hw_intf->hw_idx, rc);
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+	} else {
+		rc = 0;
+	}
+
+	return rc;
+}
+
+
 static int cam_ife_csid_init_hw(void *hw_priv,
 	void *init_args, uint32_t arg_size)
 {
@@ -2290,7 +2333,6 @@
 		goto end;
 	}
 
-
 	if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH) &&
 		(res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) {
 		CAM_ERR(CAM_ISP,
@@ -2304,7 +2346,6 @@
 	CAM_DBG(CAM_ISP, "CSID:%d res type :%d res_id:%d",
 		csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
 
-
 	/* Initialize the csid hardware */
 	rc = cam_ife_csid_enable_hw(csid_hw);
 	if (rc)
@@ -2328,6 +2369,12 @@
 		break;
 	}
 
+	rc = cam_ife_csid_reset_retain_sw_reg(csid_hw);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP, "CSID: Failed in SW reset");
+		return rc;
+	}
+
 	if (rc)
 		cam_ife_csid_disable_hw(csid_hw);
 end:
@@ -2489,8 +2536,7 @@
 	/*wait for the path to halt */
 	for (i = 0; i < csid_stop->num_res; i++) {
 		res = csid_stop->node_res[i];
-		if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
-			csid_stop->stop_cmd == CAM_CSID_HALT_AT_FRAME_BOUNDARY)
+		if (csid_stop->stop_cmd == CAM_CSID_HALT_AT_FRAME_BOUNDARY)
 			rc = cam_ife_csid_res_wait_for_halt(csid_hw, res);
 		else
 			res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
@@ -2548,8 +2594,35 @@
 
 }
 
+static int cam_ife_csid_halt_device(
+	struct cam_ife_csid_hw *csid_hw)
+{
+	uint32_t  i;
+	int rc = 0;
+	struct cam_isp_resource_node *res_node;
+
+	res_node = &csid_hw->ipp_res;
+	if (res_node->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) {
+		rc = cam_ife_csid_disable_ipp_path(csid_hw,
+			res_node, CAM_CSID_HALT_IMMEDIATELY);
+		res_node->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+	}
+
+	for (i = 0; i < CAM_IFE_CSID_RDI_MAX; i++) {
+		res_node = &csid_hw->rdi_res[i];
+		if (res_node->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) {
+			rc = cam_ife_csid_disable_rdi_path(csid_hw,
+				res_node, CAM_CSID_HALT_IMMEDIATELY);
+			res_node->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+		}
+	}
+	return rc;
+}
+
+
 irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
 {
+	int rc = 0;
 	struct cam_ife_csid_hw          *csid_hw;
 	struct cam_hw_soc_info          *soc_info;
 	struct cam_ife_csid_reg_offset  *csid_reg;
@@ -2623,22 +2696,52 @@
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 0 over flow",
 			 csid_hw->hw_intf->hw_idx);
+		rc = cam_ife_csid_halt_device(csid_hw);
+		if (rc) {
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d csid halt device fail rc = %d",
+				csid_hw->hw_intf->hw_idx, rc);
+		}
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 1 over flow",
 			 csid_hw->hw_intf->hw_idx);
+		rc = cam_ife_csid_halt_device(csid_hw);
+		if (rc) {
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d csid halt device fail rc = %d",
+				csid_hw->hw_intf->hw_idx, rc);
+		}
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 2 over flow",
 			 csid_hw->hw_intf->hw_idx);
+		rc = cam_ife_csid_halt_device(csid_hw);
+		if (rc) {
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d csid halt device fail rc = %d",
+				csid_hw->hw_intf->hw_idx, rc);
+		}
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 3 over flow",
 			 csid_hw->hw_intf->hw_idx);
+		rc = cam_ife_csid_halt_device(csid_hw);
+		if (rc) {
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d csid halt device fail rc = %d",
+				csid_hw->hw_intf->hw_idx, rc);
+		}
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d TG OVER  FLOW",
 			 csid_hw->hw_intf->hw_idx);
+		rc = cam_ife_csid_halt_device(csid_hw);
+		if (rc) {
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d csid halt device fail rc = %d",
+				csid_hw->hw_intf->hw_idx, rc);
+		}
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_EOT_RECEPTION",
@@ -2664,7 +2767,7 @@
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d MMAPPED_VC_DT",
 			 csid_hw->hw_intf->hw_idx);
 	}
-		if (irq_status_rx & CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW) {
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_STREAM_UNDERFLOW",
 			 csid_hw->hw_intf->hw_idx);
 	}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
index b400d14..4b546ea 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -368,7 +368,6 @@
 	uint32_t                     dt;
 	uint32_t                     cnt;
 	uint32_t                     tpg_set;
-	int                          pixel_count;
 };
 
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
index df97bd6..ceeacbe 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -75,7 +75,6 @@
  * @cid:          cid (DT_ID) value for path, this is applicable for CSID path
  *                reserve
  * @node_res :    Reserved resource structure pointer
- * @pixel_count:  Number of pixel resources
  *
  */
 struct cam_csid_hw_reserve_resource_args {
@@ -87,7 +86,6 @@
 	uint32_t                                  master_idx;
 	uint32_t                                  cid;
 	struct cam_isp_resource_node             *node_res;
-	int                                       pixel_count;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index 257a5ac..b9f6d77 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -92,6 +92,7 @@
 	CAM_ISP_HW_CMD_STRIPE_UPDATE,
 	CAM_ISP_HW_CMD_CLOCK_UPDATE,
 	CAM_ISP_HW_CMD_BW_UPDATE,
+	CAM_ISP_HW_CMD_BW_CONTROL,
 	CAM_ISP_HW_CMD_MAX,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index b771ec6..8927d6a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -185,6 +185,22 @@
 	uint64_t                           external_bw_bytes;
 };
 
+enum cam_vfe_bw_control_action {
+	CAM_VFE_BW_CONTROL_EXCLUDE       = 0,
+	CAM_VFE_BW_CONTROL_INCLUDE       = 1
+};
+
+/*
+ * struct cam_vfe_bw_control_args:
+ *
+ * @node_res:             Resource to get the time stamp
+ * @action:               Bandwidth control action
+ */
+struct cam_vfe_bw_control_args {
+	struct cam_isp_resource_node      *node_res;
+	enum cam_vfe_bw_control_action     action;
+};
+
 /*
  * struct cam_vfe_top_irq_evt_payload:
  *
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index 4a7a4f2..2c4fe9d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -172,6 +172,8 @@
 			th_payload->evt_status_arr[1]);
 		cam_irq_controller_disable_irq(core_info->vfe_irq_controller,
 			core_info->irq_err_handle);
+		cam_irq_controller_clear_and_mask(evt_id,
+			core_info->vfe_irq_controller);
 	}
 
 	rc  = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
@@ -695,6 +697,7 @@
 	case CAM_ISP_HW_CMD_GET_REG_UPDATE:
 	case CAM_ISP_HW_CMD_CLOCK_UPDATE:
 	case CAM_ISP_HW_CMD_BW_UPDATE:
+	case CAM_ISP_HW_CMD_BW_CONTROL:
 		rc = core_info->vfe_top->hw_ops.process_cmd(
 			core_info->vfe_top->top_priv, cmd_type, cmd_args,
 			arg_size);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index c166113..36ce652 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -60,7 +60,7 @@
 static uint32_t bus_error_irq_mask[3] = {
 	0x7800,
 	0x0000,
-	0x00C0,
+	0x0040,
 };
 
 enum cam_vfe_bus_packer_format {
@@ -106,6 +106,7 @@
 	struct cam_vfe_bus_irq_evt_payload          evt_payload[
 		CAM_VFE_BUS_VER2_PAYLOAD_MAX];
 	struct list_head                            free_payload_list;
+	spinlock_t                                  spin_lock;
 	struct mutex                                bus_mutex;
 	uint32_t                                    secure_mode;
 	uint32_t                                    num_sec_out;
@@ -214,16 +215,23 @@
 	struct cam_vfe_bus_ver2_common_data  *common_data,
 	struct cam_vfe_bus_irq_evt_payload  **evt_payload)
 {
+	int rc;
+
+	spin_lock(&common_data->spin_lock);
 	if (list_empty(&common_data->free_payload_list)) {
 		*evt_payload = NULL;
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
-		return -ENODEV;
+		rc = -ENODEV;
+		goto done;
 	}
 
 	*evt_payload = list_first_entry(&common_data->free_payload_list,
 		struct cam_vfe_bus_irq_evt_payload, list);
 	list_del_init(&(*evt_payload)->list);
-	return 0;
+	rc = 0;
+done:
+	spin_unlock(&common_data->spin_lock);
+	return rc;
 }
 
 static enum cam_vfe_bus_comp_grp_id
@@ -254,6 +262,7 @@
 	struct cam_vfe_bus_ver2_common_data *common_data = NULL;
 	uint32_t  *ife_irq_regs = NULL;
 	uint32_t   status_reg0, status_reg1, status_reg2;
+	unsigned long flags;
 
 	if (!core_info) {
 		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
@@ -276,8 +285,12 @@
 	}
 
 	common_data = core_info;
+
+	spin_lock_irqsave(&common_data->spin_lock, flags);
 	list_add_tail(&(*evt_payload)->list,
 		&common_data->free_payload_list);
+	spin_unlock_irqrestore(&common_data->spin_lock, flags);
+
 	*evt_payload = NULL;
 
 	return 0;
@@ -2556,8 +2569,21 @@
 		CAM_DBG(CAM_ISP, "WM %d image address 0x%x",
 			wm_data->index, reg_val_pair[j-1]);
 
-		frame_inc = io_cfg->planes[i].plane_stride *
-			io_cfg->planes[i].slice_height;
+		if (wm_data->en_ubwc) {
+			frame_inc = ALIGNUP(io_cfg->planes[i].plane_stride *
+			    io_cfg->planes[i].slice_height, 4096);
+			frame_inc += io_cfg->planes[i].meta_size;
+			CAM_DBG(CAM_ISP,
+				"WM %d frm %d: ht: %d stride %d meta: %d",
+				wm_data->index, frame_inc,
+				io_cfg->planes[i].slice_height,
+				io_cfg->planes[i].plane_stride,
+				io_cfg->planes[i].meta_size);
+		} else {
+			frame_inc = io_cfg->planes[i].plane_stride *
+				io_cfg->planes[i].slice_height;
+		}
+
 		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
 			wm_data->hw_regs->frame_inc, frame_inc);
 		CAM_DBG(CAM_ISP, "WM %d frame_inc %d",
@@ -2975,6 +3001,7 @@
 		}
 	}
 
+	spin_lock_init(&bus_priv->common_data.spin_lock);
 	INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list);
 	for (i = 0; i < CAM_VFE_BUS_VER2_PAYLOAD_MAX; i++) {
 		INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index 9848454..f427ab9 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -355,7 +355,6 @@
 			CAM_DBG(CAM_ISP, "Received EPOCH");
 			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 		}
-		cam_vfe_put_evt_payload(payload->core_info, &payload);
 		break;
 	case CAM_ISP_HW_EVENT_REG_UPDATE:
 		if (irq_status0 & camif_priv->reg_data->reg_update_irq_mask) {
@@ -373,7 +372,6 @@
 		if (irq_status1 & camif_priv->reg_data->error_irq_mask1) {
 			CAM_DBG(CAM_ISP, "Received ERROR\n");
 			ret = CAM_ISP_HW_ERROR_OVERFLOW;
-			cam_vfe_put_evt_payload(payload->core_info, &payload);
 		} else {
 			ret = CAM_ISP_HW_ERROR_NONE;
 		}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
index 28e99f2..50dca827 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
@@ -209,7 +209,6 @@
 			CAM_DBG(CAM_ISP, "Received REG UPDATE");
 			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 		}
-		cam_vfe_put_evt_payload(payload->core_info, &payload);
 		break;
 	default:
 		break;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
index f166025..842cfc7 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -34,6 +34,9 @@
 	struct cam_isp_resource_node        mux_rsrc[CAM_VFE_TOP_VER2_MUX_MAX];
 	unsigned long                       hw_clk_rate;
 	struct cam_axi_vote                 hw_axi_vote;
+	enum cam_vfe_bw_control_action      axi_vote_control[
+						CAM_VFE_TOP_VER2_MUX_MAX];
+
 	struct cam_axi_vote             req_axi_vote[CAM_VFE_TOP_VER2_MUX_MAX];
 	unsigned long                   req_clk_rate[CAM_VFE_TOP_VER2_MUX_MAX];
 };
@@ -134,10 +137,13 @@
 	}
 
 	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
-		sum.uncompressed_bw +=
-			top_priv->req_axi_vote[i].uncompressed_bw;
-		sum.compressed_bw +=
-			top_priv->req_axi_vote[i].compressed_bw;
+		if (top_priv->axi_vote_control[i] ==
+			CAM_VFE_BW_CONTROL_INCLUDE) {
+			sum.uncompressed_bw +=
+				top_priv->req_axi_vote[i].uncompressed_bw;
+			sum.compressed_bw +=
+				top_priv->req_axi_vote[i].compressed_bw;
+		}
 	}
 
 	CAM_DBG(CAM_ISP, "BW Vote: u=%lld c=%lld",
@@ -239,6 +245,8 @@
 				bw_update->camnoc_bw_bytes;
 			top_priv->req_axi_vote[i].compressed_bw =
 				bw_update->external_bw_bytes;
+			top_priv->axi_vote_control[i] =
+				CAM_VFE_BW_CONTROL_INCLUDE;
 			break;
 		}
 	}
@@ -253,6 +261,50 @@
 	return rc;
 }
 
+static int cam_vfe_top_bw_control(
+	struct cam_vfe_top_ver2_priv *top_priv,
+	 void *cmd_args, uint32_t arg_size)
+{
+	struct cam_vfe_bw_control_args       *bw_ctrl = NULL;
+	struct cam_isp_resource_node         *res = NULL;
+	struct cam_hw_info                   *hw_info = NULL;
+	int                                   rc = 0;
+	int                                   i;
+
+	bw_ctrl = (struct cam_vfe_bw_control_args *)cmd_args;
+	res = bw_ctrl->node_res;
+
+	if (!res || !res->hw_intf->hw_priv)
+		return -EINVAL;
+
+	hw_info = res->hw_intf->hw_priv;
+
+	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+			res->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+		if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+			top_priv->axi_vote_control[i] = bw_ctrl->action;
+			break;
+		}
+	}
+
+	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_DBG(CAM_ISP, "VFE:%d Not ready to set BW yet :%d",
+			res->hw_intf->hw_idx,
+			hw_info->hw_state);
+	} else {
+		rc = cam_vfe_top_set_axi_bw_vote(top_priv);
+	}
+
+	return rc;
+}
+
 static int cam_vfe_top_mux_get_reg_update(
 	struct cam_vfe_top_ver2_priv *top_priv,
 	void *cmd_args, uint32_t arg_size)
@@ -458,6 +510,8 @@
 				top_priv->req_clk_rate[i] = 0;
 				top_priv->req_axi_vote[i].compressed_bw = 0;
 				top_priv->req_axi_vote[i].uncompressed_bw = 0;
+				top_priv->axi_vote_control[i] =
+					CAM_VFE_BW_CONTROL_EXCLUDE;
 				break;
 			}
 		}
@@ -518,6 +572,9 @@
 		rc = cam_vfe_top_bw_update(top_priv, cmd_args,
 			arg_size);
 		break;
+	case CAM_ISP_HW_CMD_BW_CONTROL:
+		rc = cam_vfe_top_bw_control(top_priv, cmd_args, arg_size);
+		break;
 	default:
 		rc = -EINVAL;
 		CAM_ERR(CAM_ISP, "Error! Invalid cmd:%d", cmd_type);
@@ -565,6 +622,8 @@
 		top_priv->req_clk_rate[i] = 0;
 		top_priv->req_axi_vote[i].compressed_bw = 0;
 		top_priv->req_axi_vote[i].uncompressed_bw = 0;
+		top_priv->axi_vote_control[i] = CAM_VFE_BW_CONTROL_EXCLUDE;
+
 
 		if (ver2_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_2_0) {
 			top_priv->mux_rsrc[i].res_id =
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
index 65922dd..6e2e7e9 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -742,7 +742,7 @@
 	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
 	uint32_t dev_type;
 	struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
-	struct cam_jpeg_hw_cfg_req *cfg_req, *req_temp;
+	struct cam_jpeg_hw_cfg_req *cfg_req = NULL, *req_temp = NULL;
 
 	if (!hw_mgr || !ctx_data) {
 		CAM_ERR(CAM_JPEG, "Invalid args");
@@ -776,8 +776,8 @@
 
 	list_for_each_entry_safe(cfg_req, req_temp,
 		&hw_mgr->hw_config_req_list, list) {
-		if ((struct cam_jpeg_hw_ctx_data *)cfg_req->
-			hw_cfg_args.ctxt_to_hw_map != ctx_data)
+		if ((cfg_req) && ((struct cam_jpeg_hw_ctx_data *)cfg_req->
+			hw_cfg_args.ctxt_to_hw_map != ctx_data))
 			continue;
 
 		list_del_init(&cfg_req->list);
@@ -800,11 +800,14 @@
 		return -EINVAL;
 	}
 
-	request_id = *(int64_t *)flush_args->flush_req_pending[0];
+	if (flush_args->num_req_pending)
+		return 0;
+
+	request_id = *(int64_t *)flush_args->flush_req_active[0];
 	list_for_each_entry_safe(cfg_req, req_temp,
 		&hw_mgr->hw_config_req_list, list) {
-		if (cfg_req->hw_cfg_args.ctxt_to_hw_map
-			!= ctx_data)
+		if ((cfg_req) && (cfg_req->hw_cfg_args.ctxt_to_hw_map
+			!= ctx_data))
 			continue;
 
 		if (cfg_req->req_id != request_id)
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
index 3fc9032..21e66a2 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -60,8 +60,21 @@
 		hw_info->bus_rd_reg.bus_client_reg[index].core_cfg, 0x1);
 
 	/* 5. unpack_cfg */
-	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
-		hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0, 0x0);
+	if (io_buf->io_cfg->format == CAM_FORMAT_PD10)
+		cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+			hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0,
+			0x0);
+	else if (io_buf->io_cfg->format == CAM_FORMAT_Y_ONLY)
+		cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+			hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0,
+			0x1);
+	else if (io_buf->io_cfg->format == CAM_FORMAT_PLAIN16_10)
+		cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+			hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0,
+			0x22);
+	else
+		CAM_ERR(CAM_LRME, "Unsupported format %d",
+			io_buf->io_cfg->format);
 }
 
 static void cam_lrme_hw_util_fill_we_reg(struct cam_lrme_hw_io_buffer *io_buf,
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 784e90b..92f708b 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -1138,9 +1138,8 @@
  * @brief    : Cleans up the mem allocated while linking
  * @link     : pointer to link, mem associated with this link is freed
  *
- * @return   : returns if unlink for any device was success or failure
  */
-static int __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
+static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
 {
 	int32_t                                 i = 0;
 	struct cam_req_mgr_connected_device    *dev;
@@ -1157,12 +1156,13 @@
 		dev = &link->l_dev[i];
 		if (dev != NULL) {
 			link_data.dev_hdl = dev->dev_hdl;
-			if (dev->ops && dev->ops->link_setup)
+			if (dev->ops && dev->ops->link_setup) {
 				rc = dev->ops->link_setup(&link_data);
 				if (rc)
 					CAM_ERR(CAM_CRM,
-						"Unlink failed dev_hdl %d",
-						dev->dev_hdl);
+						"Unlink failed dev_hdl 0x%x rc=%d",
+						dev->dev_hdl, rc);
+			}
 			dev->dev_hdl = 0;
 			dev->parent = NULL;
 			dev->ops = NULL;
@@ -1176,8 +1176,6 @@
 	link->pd_mask = 0;
 	link->num_devs = 0;
 	link->max_delay = 0;
-
-	return rc;
 }
 
 /**
@@ -1263,45 +1261,71 @@
 	return NULL;
 }
 
+/*
+ * __cam_req_mgr_free_link()
+ *
+ * @brief: Frees the link and its request queue
+ *
+ * @link: link identifier
+ *
+ */
+static void __cam_req_mgr_free_link(struct cam_req_mgr_core_link *link)
+{
+	kfree(link->req.in_q);
+	link->req.in_q = NULL;
+	kfree(link);
+}
+
 /**
  * __cam_req_mgr_unreserve_link()
  *
- * @brief  : Reserves one link data struct within session
+ * @brief  : Removes the link data struct from the session and frees it
  * @session: session identifier
  * @link   : link identifier
  *
  */
 static void __cam_req_mgr_unreserve_link(
 	struct cam_req_mgr_core_session *session,
-	struct cam_req_mgr_core_link **link)
+	struct cam_req_mgr_core_link *link)
 {
-	int32_t   i = 0;
+	int i;
 
-	if (!session || !*link) {
+	if (!session || !link) {
 		CAM_ERR(CAM_CRM, "NULL session/link ptr %pK %pK",
-			session, *link);
+			session, link);
 		return;
 	}
 
 	mutex_lock(&session->lock);
-	if (!session->num_links)
-		CAM_WARN(CAM_CRM, "No active link or invalid state %d",
-			session->num_links);
-	else {
-		for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
-			if (session->links[i] == *link)
-				session->links[i] = NULL;
-		}
-		session->num_links--;
-		CAM_DBG(CAM_CRM, "Active session links (%d)",
-			session->num_links);
+	if (!session->num_links) {
+		CAM_WARN(CAM_CRM, "No active link or invalid state: hdl %x",
+			link->link_hdl);
+		mutex_unlock(&session->lock);
+		return;
 	}
-	kfree((*link)->req.in_q);
-	(*link)->req.in_q = NULL;
-	kfree(*link);
-	*link = NULL;
-	mutex_unlock(&session->lock);
 
+	for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
+		if (session->links[i] == link)
+			session->links[i] = NULL;
+	}
+
+	if ((session->sync_mode != CAM_REQ_MGR_SYNC_MODE_NO_SYNC) &&
+		(link->sync_link)) {
+		/*
+		 * make sure to unlink sync setup under the assumption
+		 * of only having 2 links in a given session
+		 */
+		session->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
+		for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
+			if (session->links[i])
+				session->links[i]->sync_link = NULL;
+		}
+	}
+
+	session->num_links--;
+	CAM_DBG(CAM_CRM, "Active session links (%d)", session->num_links);
+	mutex_unlock(&session->lock);
+	__cam_req_mgr_free_link(link);
 }
 
 /* Workqueue context processing section */
@@ -2145,11 +2169,58 @@
 	return rc;
 }
 
+/**
+ * __cam_req_mgr_unlink()
+ *
+ * @brief : Unlink devices on a link structure from the session
+ * @link  : Pointer to the link structure
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_unlink(struct cam_req_mgr_core_link *link)
+{
+	int rc;
+
+	mutex_lock(&link->lock);
+	spin_lock_bh(&link->link_state_spin_lock);
+	link->state = CAM_CRM_LINK_STATE_IDLE;
+	spin_unlock_bh(&link->link_state_spin_lock);
+	__cam_req_mgr_print_req_tbl(&link->req);
+
+	/* Destroy workq payload data */
+	kfree(link->workq->task.pool[0].payload);
+	link->workq->task.pool[0].payload = NULL;
+
+	/* Destroy workq and timer of link */
+	crm_timer_exit(&link->watchdog);
+
+	cam_req_mgr_workq_destroy(&link->workq);
+
+	/* Cleanup request tables and unlink devices */
+	__cam_req_mgr_destroy_link_info(link);
+
+	/* Free memory holding data of linked devs */
+	__cam_req_mgr_destroy_subdev(link->l_dev);
+
+	/* Destroy the link handle */
+	rc = cam_destroy_device_hdl(link->link_hdl);
+	if (rc < 0) {
+		CAM_ERR(CAM_CRM, "error while destroying dev handle %d %x",
+			rc, link->link_hdl);
+	}
+
+	mutex_unlock(&link->lock);
+	return rc;
+}
+
 int cam_req_mgr_destroy_session(
 		struct cam_req_mgr_session_info *ses_info)
 {
 	int rc;
+	int i;
 	struct cam_req_mgr_core_session *cam_session = NULL;
+	struct cam_req_mgr_core_link *link;
 
 	if (!ses_info) {
 		CAM_DBG(CAM_CRM, "NULL session info pointer");
@@ -2167,10 +2238,20 @@
 	}
 	mutex_lock(&cam_session->lock);
 	if (cam_session->num_links) {
-		CAM_ERR(CAM_CRM, "destroy session %x num_active_links %d",
+		CAM_DBG(CAM_CRM, "destroy session %x num_active_links %d",
 			ses_info->session_hdl,
 			cam_session->num_links);
-		/* @TODO : Go through active links and destroy ? */
+
+		for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
+			link = cam_session->links[i];
+
+			if (!link)
+				continue;
+
+			/* Ignore return value since session is going away */
+			__cam_req_mgr_unlink(link);
+			__cam_req_mgr_free_link(link);
+		}
 	}
 	list_del(&cam_session->entry);
 	mutex_unlock(&cam_session->lock);
@@ -2286,7 +2367,7 @@
 	link_info->link_hdl = 0;
 link_hdl_fail:
 	mutex_unlock(&link->lock);
-	__cam_req_mgr_unreserve_link(cam_session, &link);
+	__cam_req_mgr_unreserve_link(cam_session, link);
 	mutex_unlock(&g_crm_core_dev->crm_lock);
 	return rc;
 }
@@ -2296,7 +2377,6 @@
 	int                              rc = 0;
 	struct cam_req_mgr_core_session *cam_session;
 	struct cam_req_mgr_core_link    *link;
-	int                              i;
 
 	if (!unlink_info) {
 		CAM_ERR(CAM_CRM, "NULL pointer");
@@ -2319,60 +2399,18 @@
 	link = cam_get_device_priv(unlink_info->link_hdl);
 	if (!link) {
 		CAM_ERR(CAM_CRM, "NULL pointer");
-		mutex_unlock(&g_crm_core_dev->crm_lock);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto done;
 	}
 
-	mutex_lock(&link->lock);
-	spin_lock_bh(&link->link_state_spin_lock);
-	link->state = CAM_CRM_LINK_STATE_IDLE;
-	spin_unlock_bh(&link->link_state_spin_lock);
-	__cam_req_mgr_print_req_tbl(&link->req);
-
-	if ((cam_session->sync_mode != CAM_REQ_MGR_SYNC_MODE_NO_SYNC) &&
-		(link->sync_link)) {
-		/*
-		 * make sure to unlink sync setup under the assumption
-		 * of only having 2 links in a given session
-		 */
-		cam_session->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
-		for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
-			if (cam_session->links[i])
-				cam_session->links[i]->sync_link = NULL;
-		}
-	}
-
-	/* Destroy workq payload data */
-	kfree(link->workq->task.pool[0].payload);
-	link->workq->task.pool[0].payload = NULL;
-
-	/* Destroy workq and timer of link */
-	crm_timer_exit(&link->watchdog);
-
-	cam_req_mgr_workq_destroy(&link->workq);
-
-	/* Cleanup request tables and unlink devices */
-	rc = __cam_req_mgr_destroy_link_info(link);
-	if (rc) {
-		CAM_ERR(CAM_CORE, "Unlink failed. Cannot proceed");
-		return rc;
-	}
-
-	/* Free memory holding data of linked devs */
-	__cam_req_mgr_destroy_subdev(link->l_dev);
-
-	/* Destroy the link handle */
-	rc = cam_destroy_device_hdl(unlink_info->link_hdl);
-	if (rc < 0) {
-		CAM_ERR(CAM_CRM, "error while destroying dev handle %d %x",
-			rc, link->link_hdl);
-	}
+	rc = __cam_req_mgr_unlink(link);
 
 	/* Free curent link and put back into session's free pool of links */
-	mutex_unlock(&link->lock);
-	__cam_req_mgr_unreserve_link(cam_session, &link);
-	mutex_unlock(&g_crm_core_dev->crm_lock);
+	if (!rc)
+		__cam_req_mgr_unreserve_link(cam_session, link);
 
+done:
+	mutex_unlock(&g_crm_core_dev->crm_lock);
 	return rc;
 }
 
@@ -2569,9 +2607,12 @@
 int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control)
 {
 	int                               rc = 0;
-	int                               i;
+	int                               i, j;
 	struct cam_req_mgr_core_link     *link = NULL;
 
+	struct cam_req_mgr_connected_device *dev = NULL;
+	struct cam_req_mgr_link_evt_data     evt_data;
+
 	if (!control) {
 		CAM_ERR(CAM_CRM, "Control command is NULL");
 		rc = -EINVAL;
@@ -2601,9 +2642,29 @@
 					link->link_hdl);
 				rc = -EFAULT;
 			}
+			/* notify nodes */
+			for (j = 0; j < link->num_devs; j++) {
+				dev = &link->l_dev[j];
+				evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_RESUME;
+				evt_data.link_hdl =  link->link_hdl;
+				evt_data.dev_hdl = dev->dev_hdl;
+				evt_data.req_id = 0;
+				if (dev->ops && dev->ops->process_evt)
+					dev->ops->process_evt(&evt_data);
+			}
 		} else if (control->ops == CAM_REQ_MGR_LINK_DEACTIVATE) {
 			/* Destroy SOF watchdog timer */
 			crm_timer_exit(&link->watchdog);
+			/* notify nodes */
+			for (j = 0; j < link->num_devs; j++) {
+				dev = &link->l_dev[j];
+				evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_PAUSE;
+				evt_data.link_hdl =  link->link_hdl;
+				evt_data.dev_hdl = dev->dev_hdl;
+				evt_data.req_id = 0;
+				if (dev->ops && dev->ops->process_evt)
+					dev->ops->process_evt(&evt_data);
+			}
 		} else {
 			CAM_ERR(CAM_CRM, "Invalid link control command");
 			rc = -EINVAL;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
index ce8dfa7..45ebc69 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
@@ -81,9 +81,9 @@
  * @process_evt  : payload to generic event
  */
 struct cam_req_mgr_kmd_ops {
-	cam_req_mgr_get_dev_info      get_dev_info;
-	cam_req_mgr_link_setup        link_setup;
-	cam_req_mgr_apply_req         apply_req;
+	cam_req_mgr_get_dev_info     get_dev_info;
+	cam_req_mgr_link_setup       link_setup;
+	cam_req_mgr_apply_req        apply_req;
 	cam_req_mgr_flush_req        flush_req;
 	cam_req_mgr_process_evt      process_evt;
 };
@@ -182,6 +182,8 @@
  */
 enum cam_req_mgr_link_evt_type {
 	CAM_REQ_MGR_LINK_EVT_ERR,
+	CAM_REQ_MGR_LINK_EVT_PAUSE,
+	CAM_REQ_MGR_LINK_EVT_RESUME,
 	CAM_REQ_MGR_LINK_EVT_MAX,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
index e0d4502..23d25a4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -612,37 +612,36 @@
 		case CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE: {
 			CAM_DBG(CAM_FLASH,
 				"CAMERA_FLASH_CMD_TYPE_OPS case called");
-			if ((fctrl->flash_state == CAM_FLASH_STATE_START) ||
+			if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
 				(fctrl->flash_state ==
-					CAM_FLASH_STATE_CONFIG)) {
-				flash_operation_info =
-					(struct cam_flash_set_on_off *) cmd_buf;
-				if (!flash_operation_info) {
-					CAM_ERR(CAM_FLASH,
-						"flash_operation_info Null");
-					return -EINVAL;
-				}
-
-				fctrl->per_frame[frame_offset].opcode =
-					flash_operation_info->opcode;
-				fctrl->per_frame[frame_offset].cmn_attr.count =
-					flash_operation_info->count;
-				for (i = 0;
-					i < flash_operation_info->count; i++)
-					fctrl->per_frame[frame_offset].
-						led_current_ma[i]
-						= flash_operation_info->
-						led_current_ma[i];
-
-			} else {
-				CAM_ERR(CAM_FLASH,
-					"Rxed Update packets without linking");
+					CAM_FLASH_STATE_ACQUIRE)) {
+				CAM_WARN(CAM_FLASH,
+					"Rxed Flash fire ops without linking");
 				fctrl->per_frame[frame_offset].
 					cmn_attr.is_settings_valid = false;
+				return 0;
+			}
+
+			flash_operation_info =
+				(struct cam_flash_set_on_off *) cmd_buf;
+			if (!flash_operation_info) {
+				CAM_ERR(CAM_FLASH,
+					"flash_operation_info Null");
 				return -EINVAL;
 			}
+
+			fctrl->per_frame[frame_offset].opcode =
+				flash_operation_info->opcode;
+			fctrl->per_frame[frame_offset].cmn_attr.count =
+				flash_operation_info->count;
+			for (i = 0;
+				i < flash_operation_info->count; i++)
+				fctrl->per_frame[frame_offset].
+					led_current_ma[i]
+					= flash_operation_info->
+					led_current_ma[i];
+			}
 			break;
-		}
 		default:
 			CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
 				cmn_hdr->cmd_type);
@@ -741,18 +740,18 @@
 		break;
 	}
 	case CAM_PKT_NOP_OPCODE: {
-		if ((fctrl->flash_state == CAM_FLASH_STATE_START) ||
-			(fctrl->flash_state == CAM_FLASH_STATE_CONFIG)) {
-			CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u",
-				csl_packet->header.request_id);
-			goto update_req_mgr;
-		} else {
-			CAM_ERR(CAM_FLASH,
-				"Rxed Update packets without linking");
+		if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+			(fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE)) {
+			CAM_WARN(CAM_FLASH,
+				"Rxed NOP packets without linking");
 			fctrl->per_frame[frame_offset].
 				cmn_attr.is_settings_valid = false;
-			return -EINVAL;
+			return 0;
 		}
+
+		CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u",
+			csl_packet->header.request_id);
+		goto update_req_mgr;
 	}
 	default:
 		CAM_ERR(CAM_FLASH, "Wrong Opcode : %d",
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
index eddbf97..085bcf6 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
@@ -82,7 +82,8 @@
 	}
 	case CAM_RELEASE_DEV: {
 		CAM_DBG(CAM_FLASH, "CAM_RELEASE_DEV");
-		if (fctrl->flash_state != CAM_FLASH_STATE_ACQUIRE) {
+		if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+			(fctrl->flash_state == CAM_FLASH_STATE_START)) {
 			CAM_WARN(CAM_FLASH,
 				"Cannot apply Release dev: Prev state:%d",
 				fctrl->flash_state);
@@ -131,7 +132,8 @@
 	}
 	case CAM_START_DEV: {
 		CAM_DBG(CAM_FLASH, "CAM_START_DEV");
-		if (fctrl->flash_state != CAM_FLASH_STATE_CONFIG) {
+		if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+			(fctrl->flash_state == CAM_FLASH_STATE_START)) {
 			CAM_WARN(CAM_FLASH,
 				"Cannot apply Start Dev: Prev state: %d",
 				fctrl->flash_state);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
index 76f5b46..db80584 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
@@ -469,7 +469,7 @@
 				CAM_ERR(CAM_OIS, "invalid cmd buf");
 				return -EINVAL;
 			}
-			cmd_buf += cmd_desc->offset / sizeof(uint32_t);
+			cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
 			cmm_hdr = (struct common_header *)cmd_buf;
 
 			switch (cmm_hdr->cmd_type) {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index 9ce7a21..d5bb1b0 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,31 @@
 			add_req.req_id);
 }
 
+static void cam_sensor_release_stream_rsc(
+	struct cam_sensor_ctrl_t *s_ctrl)
+{
+	struct i2c_settings_array *i2c_set = NULL;
+	int rc;
+
+	i2c_set = &(s_ctrl->i2c_data.streamoff_settings);
+	if (i2c_set->is_settings_valid == 1) {
+		i2c_set->is_settings_valid = -1;
+		rc = delete_request(i2c_set);
+		if (rc < 0)
+			CAM_ERR(CAM_SENSOR,
+				"failed while deleting Streamoff settings");
+	}
+
+	i2c_set = &(s_ctrl->i2c_data.streamon_settings);
+	if (i2c_set->is_settings_valid == 1) {
+		i2c_set->is_settings_valid = -1;
+		rc = delete_request(i2c_set);
+		if (rc < 0)
+			CAM_ERR(CAM_SENSOR,
+				"failed while deleting Streamon settings");
+	}
+}
+
 static void cam_sensor_release_resource(
 	struct cam_sensor_ctrl_t *s_ctrl)
 {
@@ -61,26 +86,10 @@
 			CAM_ERR(CAM_SENSOR,
 				"failed while deleting Res settings");
 	}
-	i2c_set = &(s_ctrl->i2c_data.streamoff_settings);
-	if (i2c_set->is_settings_valid == 1) {
-		i2c_set->is_settings_valid = -1;
-		rc = delete_request(i2c_set);
-		if (rc < 0)
-			CAM_ERR(CAM_SENSOR,
-				"failed while deleting Streamoff settings");
-	}
-	i2c_set = &(s_ctrl->i2c_data.streamon_settings);
-	if (i2c_set->is_settings_valid == 1) {
-		i2c_set->is_settings_valid = -1;
-		rc = delete_request(i2c_set);
-		if (rc < 0)
-			CAM_ERR(CAM_SENSOR,
-				"failed while deleting Streamoff settings");
-	}
+
 	if (s_ctrl->i2c_data.per_frame != NULL) {
 		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
 			i2c_set = &(s_ctrl->i2c_data.per_frame[i]);
-
 			if (i2c_set->is_settings_valid == 1) {
 				i2c_set->is_settings_valid = -1;
 				rc = delete_request(i2c_set);
@@ -165,42 +174,44 @@
 	}
 
 	case CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE: {
-		if ((s_ctrl->sensor_state == CAM_SENSOR_CONFIG) ||
-			(s_ctrl->sensor_state == CAM_SENSOR_START)) {
-			i2c_reg_settings =
-				&i2c_data->
-				per_frame[csl_packet->header.request_id %
-				MAX_PER_FRAME_ARRAY];
-			CAM_DBG(CAM_SENSOR, "Received Packet: %lld",
-			csl_packet->header.request_id % MAX_PER_FRAME_ARRAY);
-			if (i2c_reg_settings->is_settings_valid == 1) {
-				CAM_ERR(CAM_SENSOR,
-					"Already some pkt in offset req : %lld",
-					csl_packet->header.request_id);
-				rc = delete_request(i2c_reg_settings);
-				if (rc < 0) {
-					CAM_ERR(CAM_SENSOR,
-					"Failed in Deleting the err: %d", rc);
-					return rc;
-				}
-			}
-		} else {
-			CAM_ERR(CAM_SENSOR,
+		if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) ||
+			(s_ctrl->sensor_state == CAM_SENSOR_ACQUIRE)) {
+			CAM_WARN(CAM_SENSOR,
 				"Rxed Update packets without linking");
-			return -EINVAL;
+			return 0;
 		}
-	break;
+
+		i2c_reg_settings =
+			&i2c_data->
+			per_frame[csl_packet->header.request_id %
+			MAX_PER_FRAME_ARRAY];
+		CAM_DBG(CAM_SENSOR, "Received Packet: %lld",
+		csl_packet->header.request_id % MAX_PER_FRAME_ARRAY);
+		if (i2c_reg_settings->is_settings_valid == 1) {
+			CAM_ERR(CAM_SENSOR,
+				"Already some pkt in offset req : %lld",
+				csl_packet->header.request_id);
+			/*
+			 * Update req mgr even in case of failure.
+			 * This will help not to wait indefinitely
+			 * and freeze. If this log is triggered then
+			 * fix it.
+			 */
+			cam_sensor_update_req_mgr(s_ctrl, csl_packet);
+			return 0;
+		}
+		break;
 	}
 	case CAM_SENSOR_PACKET_OPCODE_SENSOR_NOP: {
-		if ((s_ctrl->sensor_state == CAM_SENSOR_CONFIG) ||
-			(s_ctrl->sensor_state == CAM_SENSOR_START)) {
-			cam_sensor_update_req_mgr(s_ctrl, csl_packet);
-		} else {
-			CAM_ERR(CAM_SENSOR,
-				"Rxed Update packets without linking");
-			rc = -EINVAL;
+		if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) ||
+			(s_ctrl->sensor_state == CAM_SENSOR_ACQUIRE)) {
+			CAM_WARN(CAM_SENSOR,
+				"Rxed NOP packets without linking");
+			return 0;
 		}
-		return rc;
+
+		cam_sensor_update_req_mgr(s_ctrl, csl_packet);
+		return 0;
 	}
 	default:
 		CAM_ERR(CAM_SENSOR, "Invalid Packet Header");
@@ -489,7 +500,7 @@
 		return;
 
 	cam_sensor_release_resource(s_ctrl);
-
+	cam_sensor_release_stream_rsc(s_ctrl);
 	if (s_ctrl->sensor_state >= CAM_SENSOR_ACQUIRE)
 		cam_sensor_power_down(s_ctrl);
 
@@ -706,8 +717,8 @@
 	}
 		break;
 	case CAM_RELEASE_DEV: {
-		if ((s_ctrl->sensor_state < CAM_SENSOR_ACQUIRE) ||
-			(s_ctrl->sensor_state > CAM_SENSOR_CONFIG)) {
+		if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) ||
+			(s_ctrl->sensor_state == CAM_SENSOR_START)) {
 			rc = -EINVAL;
 			CAM_WARN(CAM_SENSOR,
 			"Not in right state to release : %d",
@@ -722,6 +733,7 @@
 		}
 
 		cam_sensor_release_resource(s_ctrl);
+		cam_sensor_release_stream_rsc(s_ctrl);
 		if (s_ctrl->bridge_intf.device_hdl == -1) {
 			CAM_ERR(CAM_SENSOR,
 				"Invalid Handles: link hdl: %d device hdl: %d",
@@ -754,7 +766,8 @@
 		break;
 	}
 	case CAM_START_DEV: {
-		if (s_ctrl->sensor_state != CAM_SENSOR_CONFIG) {
+		if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) ||
+			(s_ctrl->sensor_state == CAM_SENSOR_START)) {
 			rc = -EINVAL;
 			CAM_WARN(CAM_SENSOR,
 			"Not in right state to start : %d",
@@ -793,6 +806,8 @@
 				"cannot apply streamoff settings");
 			}
 		}
+
+		cam_sensor_release_resource(s_ctrl);
 		s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE;
 	}
 		break;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
index 7a6d7fd..89aad4e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -98,6 +98,11 @@
 		return -EINVAL;
 	}
 
+	if (!write_setting->reg_setting) {
+		CAM_ERR(CAM_SENSOR, "Invalid Register Settings");
+		return -EINVAL;
+	}
+
 	if (io_master_info->master_type == CCI_MASTER) {
 		return cam_cci_i2c_write_table(io_master_info,
 			write_setting);
@@ -125,6 +130,11 @@
 		return -EINVAL;
 	}
 
+	if (!write_setting->reg_setting) {
+		CAM_ERR(CAM_SENSOR, "Invalid Register Settings");
+		return -EINVAL;
+	}
+
 	if (io_master_info->master_type == CCI_MASTER) {
 		return cam_cci_i2c_write_continuous_table(io_master_info,
 			write_setting, cam_sensor_i2c_write_flag);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
index 72ca737..622dae6 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -26,7 +26,7 @@
 #define MAX_REGULATOR 5
 #define MAX_POWER_CONFIG 12
 
-#define MAX_PER_FRAME_ARRAY 8
+#define MAX_PER_FRAME_ARRAY 32
 
 #define CAM_SENSOR_NAME    "cam-sensor"
 #define CAM_ACTUATOR_NAME  "cam-actuator"
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index 7824102..c757315 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -434,7 +434,7 @@
 		CAM_ERR(CAM_SMMU, "Error: domain = %pK, device = %pK",
 			domain, dev);
 		CAM_ERR(CAM_SMMU, "iova = %lX, flags = %d", iova, flags);
-		return 0;
+		return -EINVAL;
 	}
 
 	cb_name = (char *)token;
@@ -448,12 +448,12 @@
 		CAM_ERR(CAM_SMMU,
 			"Error: index is not valid, index = %d, token = %s",
 			idx, cb_name);
-		return 0;
+		return -EINVAL;
 	}
 
 	payload = kzalloc(sizeof(struct cam_smmu_work_payload), GFP_ATOMIC);
 	if (!payload)
-		return 0;
+		return -EINVAL;
 
 	payload->domain = domain;
 	payload->dev = dev;
@@ -468,7 +468,7 @@
 
 	schedule_work(&iommu_cb_set.smmu_work);
 
-	return 0;
+	return -EINVAL;
 }
 
 static int cam_smmu_translate_dir_to_iommu_dir(
@@ -3140,12 +3140,10 @@
 		CAM_ERR(CAM_SMMU, "Error: failed to setup cb : %s", cb->name);
 		goto cb_init_fail;
 	}
-
 	if (cb->io_support && cb->mapping)
 		iommu_set_fault_handler(cb->mapping->domain,
 			cam_smmu_iommu_fault_handler,
 			(void *)cb->name);
-
 	/* increment count to next bank */
 	iommu_cb_set.cb_init_count++;
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
index 749aa7f..9d92acf 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -31,6 +31,7 @@
 #include "sde_rotator_util.h"
 #include "sde_rotator_trace.h"
 #include "sde_rotator_debug.h"
+#include "sde_rotator_dev.h"
 
 static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
 {
@@ -707,6 +708,16 @@
 
 	sde_mdp_parse_inline_rot_lut_setting(pdev, mdata);
 
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,mdss-rot-qos-cpu-mask", &data);
+	mdata->rot_pm_qos_cpu_mask = (!rc ? data : 0);
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		 "qcom,mdss-rot-qos-cpu-dma-latency", &data);
+	mdata->rot_pm_qos_cpu_dma_latency = (!rc ? data : 0);
+
+	sde_rotator_pm_qos_add(mdata);
+
 	mdata->mdp_base = mdata->sde_io.base + SDE_MDP_OFFSET;
 
 	return 0;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index 8eef152..0ffe219 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -23,6 +23,7 @@
 #include "sde_rotator_io_util.h"
 #include "sde_rotator_smmu.h"
 #include "sde_rotator_formats.h"
+#include <linux/pm_qos.h>
 
 /* HW Revisions for different targets */
 #define SDE_GET_MAJOR_REV(rev)	((rev) >> 28)
@@ -240,6 +241,11 @@
 	u32 *vbif_nrt_qos;
 	u32 npriority_lvl;
 
+	struct pm_qos_request pm_qos_rot_cpu_req;
+	u32 rot_pm_qos_cpu_count;
+	u32 rot_pm_qos_cpu_mask;
+	u32 rot_pm_qos_cpu_dma_latency;
+
 	u32 vbif_memtype_count;
 	u32 *vbif_memtype;
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 43d17d9..a46194f 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -57,8 +57,15 @@
 #define SDE_ROTATOR_DEGREE_180		180
 #define SDE_ROTATOR_DEGREE_90		90
 
+/* Inline rotator qos request */
+#define SDE_ROTATOR_ADD_REQUEST		1
+#define SDE_ROTATOR_REMOVE_REQUEST		0
+
+
 static void sde_rotator_submit_handler(struct kthread_work *work);
 static void sde_rotator_retire_handler(struct kthread_work *work);
+static void sde_rotator_pm_qos_request(struct sde_rotator_device *rot_dev,
+					 bool add_request);
 #ifdef CONFIG_COMPAT
 static long sde_rotator_compat_ioctl32(struct file *file,
 	unsigned int cmd, unsigned long arg);
@@ -1012,6 +1019,8 @@
 		SDEDEV_DBG(ctx->rot_dev->dev, "timeline is not available\n");
 
 	sde_rot_mgr_lock(rot_dev->mgr);
+	sde_rotator_pm_qos_request(rot_dev,
+				 SDE_ROTATOR_ADD_REQUEST);
 	ret = sde_rotator_session_open(rot_dev->mgr, &ctx->private,
 			ctx->session_id, &ctx->work_queue);
 	if (ret < 0) {
@@ -1121,6 +1130,8 @@
 	}
 	SDEDEV_DBG(rot_dev->dev, "release session s:%d\n", session_id);
 	sde_rot_mgr_lock(rot_dev->mgr);
+	sde_rotator_pm_qos_request(rot_dev,
+			SDE_ROTATOR_REMOVE_REQUEST);
 	sde_rotator_session_close(rot_dev->mgr, ctx->private, session_id);
 	sde_rot_mgr_unlock(rot_dev->mgr);
 	SDEDEV_DBG(rot_dev->dev, "release retire work s:%d\n", session_id);
@@ -1230,6 +1241,104 @@
 	return retire_delta >= 0;
 }
 
+static void sde_rotator_pm_qos_remove(struct sde_rot_data_type *rot_mdata)
+{
+	struct pm_qos_request *req;
+	u32 cpu_mask;
+
+	if (!rot_mdata) {
+		SDEROT_DBG("invalid rot device or context\n");
+		return;
+	}
+
+	cpu_mask = rot_mdata->rot_pm_qos_cpu_mask;
+
+	if (!cpu_mask)
+		return;
+
+	req = &rot_mdata->pm_qos_rot_cpu_req;
+	pm_qos_remove_request(req);
+}
+
+void sde_rotator_pm_qos_add(struct sde_rot_data_type *rot_mdata)
+{
+	struct pm_qos_request *req;
+	u32 cpu_mask;
+	int cpu;
+
+	if (!rot_mdata) {
+		SDEROT_DBG("invalid rot device or context\n");
+		return;
+	}
+
+	cpu_mask = rot_mdata->rot_pm_qos_cpu_mask;
+
+	if (!cpu_mask)
+		return;
+
+	req = &rot_mdata->pm_qos_rot_cpu_req;
+	req->type = PM_QOS_REQ_AFFINE_CORES;
+	cpumask_empty(&req->cpus_affine);
+	for_each_possible_cpu(cpu) {
+		if ((1 << cpu) & cpu_mask)
+			cpumask_set_cpu(cpu, &req->cpus_affine);
+	}
+	pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY,
+		PM_QOS_DEFAULT_VALUE);
+
+	SDEROT_DBG("rotator pmqos add mask %x latency %x\n",
+		rot_mdata->rot_pm_qos_cpu_mask,
+		rot_mdata->rot_pm_qos_cpu_dma_latency);
+}
+
+static void sde_rotator_pm_qos_request(struct sde_rotator_device *rot_dev,
+					 bool add_request)
+{
+	u32 cpu_mask;
+	u32 cpu_dma_latency;
+	bool changed = false;
+
+	if (!rot_dev) {
+		SDEROT_DBG("invalid rot device or context\n");
+		return;
+	}
+
+	cpu_mask = rot_dev->mdata->rot_pm_qos_cpu_mask;
+	cpu_dma_latency = rot_dev->mdata->rot_pm_qos_cpu_dma_latency;
+
+	if (!cpu_mask)
+		return;
+
+	if (add_request) {
+		if (rot_dev->mdata->rot_pm_qos_cpu_count == 0)
+			changed = true;
+		rot_dev->mdata->rot_pm_qos_cpu_count++;
+	} else {
+		if (rot_dev->mdata->rot_pm_qos_cpu_count != 0) {
+			rot_dev->mdata->rot_pm_qos_cpu_count--;
+			if (rot_dev->mdata->rot_pm_qos_cpu_count == 0)
+				changed = true;
+		} else {
+			SDEROT_DBG("%s: ref_count is not balanced\n",
+				__func__);
+		}
+	}
+
+	if (!changed)
+		return;
+
+	SDEROT_EVTLOG(add_request, cpu_mask, cpu_dma_latency);
+
+	if (!add_request) {
+		pm_qos_update_request(&rot_dev->mdata->pm_qos_rot_cpu_req,
+			PM_QOS_DEFAULT_VALUE);
+		return;
+	}
+
+	pm_qos_update_request(&rot_dev->mdata->pm_qos_rot_cpu_req,
+		cpu_dma_latency);
+}
+
 /*
  * sde_rotator_inline_open - open inline rotator session
  * @pdev: Pointer to rotator platform device
@@ -3545,6 +3654,7 @@
 		return 0;
 	}
 
+	sde_rotator_pm_qos_remove(rot_dev->mdata);
 	sde_rotator_destroy_debugfs(rot_dev->debugfs_root);
 	video_unregister_device(rot_dev->vdev);
 	video_device_release(rot_dev->vdev);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
index a464a39..ab27043 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
@@ -250,4 +250,7 @@
 {
 	return ((struct sde_rotator_device *) dev_get_drvdata(dev))->mgr;
 }
+
+void sde_rotator_pm_qos_add(struct sde_rot_data_type *rot_mdata);
+
 #endif /* __SDE_ROTATOR_DEV_H__ */
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 03dfde6..44cc7dc 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,9 @@
 	case HFI_ERR_SYS_FATAL:
 		vidc_err = VIDC_ERR_HW_FATAL;
 		break;
+	case HFI_ERR_SYS_NOC_ERROR:
+		vidc_err = VIDC_ERR_NOC_ERROR;
+		break;
 	case HFI_ERR_SYS_VERSION_MISMATCH:
 	case HFI_ERR_SYS_INVALID_PARAMETER:
 	case HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE:
@@ -316,11 +319,14 @@
 	return 0;
 }
 
-static int hfi_process_sys_error(u32 device_id, struct msm_vidc_cb_info *info)
+static int hfi_process_sys_error(u32 device_id,
+	struct hfi_msg_event_notify_packet *pkt,
+	struct msm_vidc_cb_info *info)
 {
 	struct msm_vidc_cb_cmd_done cmd_done = {0};
 
 	cmd_done.device_id = device_id;
+	cmd_done.status = hfi_map_err_status(pkt->event_data1);
 
 	info->response_type = HAL_SYS_ERROR;
 	info->response.cmd = cmd_done;
@@ -373,7 +379,7 @@
 	case HFI_EVENT_SYS_ERROR:
 		dprintk(VIDC_ERR, "HFI_EVENT_SYS_ERROR: %d, %#x\n",
 			pkt->event_data1, pkt->event_data2);
-		return hfi_process_sys_error(device_id, info);
+		return hfi_process_sys_error(device_id, pkt, info);
 	case HFI_EVENT_SESSION_ERROR:
 		dprintk(VIDC_INFO, "HFI_EVENT_SESSION_ERROR[%#x]\n",
 				pkt->session_id);
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index 5198bc3..6936354 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -28,6 +28,7 @@
 	void *clnt;
 	struct msm_vidc_platform_resources *res;
 	enum session_type session_type;
+	bool tme_encode_mode;
 };
 
 static int msm_ion_get_device_address(struct smem_client *smem_client,
@@ -771,6 +772,13 @@
 	return client;
 }
 
+void msm_smem_set_tme_encode_mode(struct smem_client *client, bool enable)
+{
+	if (!client)
+		return;
+	client->tme_encode_mode = enable;
+}
+
 int msm_smem_alloc(struct smem_client *client, size_t size,
 		u32 align, u32 flags, enum hal_buffer buffer_type,
 		int map_kernel, struct msm_smem *smem)
@@ -863,7 +871,8 @@
 	if (is_secure && client->session_type == MSM_VIDC_ENCODER) {
 		if (buffer_type == HAL_BUFFER_INPUT)
 			buffer_type = HAL_BUFFER_OUTPUT;
-		else if (buffer_type == HAL_BUFFER_OUTPUT)
+		else if (buffer_type == HAL_BUFFER_OUTPUT &&
+			!client->tme_encode_mode)
 			buffer_type = HAL_BUFFER_INPUT;
 	}
 
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index fa40091..42bf1ba 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,8 @@
 
 static inline struct msm_vidc_inst *get_vidc_inst(struct file *filp, void *fh)
 {
+	if (!filp->private_data)
+		return NULL;
 	return container_of(filp->private_data,
 					struct msm_vidc_inst, event_handler);
 }
@@ -74,6 +76,7 @@
 	vidc_inst = get_vidc_inst(filp, NULL);
 
 	rc = msm_vidc_close(vidc_inst);
+	filp->private_data = NULL;
 	trace_msm_v4l2_vidc_close_end("msm_v4l2_close end");
 	return rc;
 }
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index dd749d6..a80990c 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -2691,6 +2691,11 @@
 		memcpy(&inst->fmts[fmt->type], fmt,
 				sizeof(struct msm_vidc_format));
 
+		if (get_hal_codec(inst->fmts[CAPTURE_PORT].fourcc) ==
+			HAL_VIDEO_CODEC_TME) {
+			msm_smem_set_tme_encode_mode(inst->mem_client, true);
+		}
+
 		rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
 		if (rc) {
 			dprintk(VIDC_ERR, "Failed to open instance\n");
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index dc9302e..2d1ef10 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -93,7 +93,10 @@
 
 	frame_size = (msm_vidc_get_mbs_per_frame(inst) / (32 * 8) * 3) / 2;
 
-	CF = recon_stats->complexity_number / frame_size;
+	if (frame_size)
+		CF = recon_stats->complexity_number / frame_size;
+	else
+		CF = MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR;
 
 	mutex_lock(&inst->reconbufs.lock);
 	list_for_each_entry(binfo, &inst->reconbufs.list, list) {
@@ -167,6 +170,7 @@
 	struct hfi_device *hdev;
 	struct msm_vidc_inst *inst = NULL;
 	struct vidc_bus_vote_data *vote_data = NULL;
+	bool is_turbo = false;
 
 	if (!core || !core->device) {
 		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core);
@@ -208,6 +212,11 @@
 					temp->vvb.vb2_buf.planes[0].bytesused);
 				device_addr = temp->smem[0].device_addr;
 			}
+			if (inst->session_type == MSM_VIDC_ENCODER &&
+				(temp->vvb.flags &
+				V4L2_QCOM_BUF_FLAG_PERF_MODE)) {
+				is_turbo = true;
+			}
 		}
 		mutex_unlock(&inst->registeredbufs.lock);
 
@@ -248,7 +257,7 @@
 			vote_data[i].fps = inst->prop.fps;
 
 		vote_data[i].power_mode = 0;
-		if (!msm_vidc_clock_scaling ||
+		if (!msm_vidc_clock_scaling || is_turbo ||
 			inst->clk_data.buffer_counter < DCVS_FTB_WINDOW)
 			vote_data[i].power_mode = VIDC_POWER_TURBO;
 
@@ -409,7 +418,7 @@
 }
 
 static void msm_vidc_update_freq_entry(struct msm_vidc_inst *inst,
-	unsigned long freq, u32 device_addr)
+	unsigned long freq, u32 device_addr, bool is_turbo)
 {
 	struct vidc_freq_data *temp, *next;
 	bool found = false;
@@ -433,6 +442,7 @@
 		temp->device_addr = device_addr;
 		list_add_tail(&temp->list, &inst->freqs.list);
 	}
+	temp->turbo = !!is_turbo;
 exit:
 	mutex_unlock(&inst->freqs.lock);
 }
@@ -452,18 +462,36 @@
 	inst->clk_data.buffer_counter++;
 }
 
+static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core)
+{
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	unsigned long freq = 0;
+
+	allowed_clks_tbl = core->resources.allowed_clks_tbl;
+	freq = allowed_clks_tbl[0].clock_rate;
+	dprintk(VIDC_PROF, "Max rate = %lu\n", freq);
+	return freq;
+}
 
 static unsigned long msm_vidc_adjust_freq(struct msm_vidc_inst *inst)
 {
 	struct vidc_freq_data *temp;
 	unsigned long freq = 0;
+	bool is_turbo = false;
 
 	mutex_lock(&inst->freqs.lock);
 	list_for_each_entry(temp, &inst->freqs.list, list) {
 		freq = max(freq, temp->freq);
+		if (temp->turbo) {
+			is_turbo = true;
+			break;
+		}
 	}
 	mutex_unlock(&inst->freqs.lock);
 
+	if (is_turbo) {
+		return msm_vidc_max_freq(inst->core);
+	}
 	/* If current requirement is within DCVS limits, try DCVS. */
 
 	if (freq < inst->clk_data.load_norm) {
@@ -531,17 +559,8 @@
 	mutex_unlock(&inst->input_crs.lock);
 }
 
-static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core)
-{
-	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
-	unsigned long freq = 0;
 
-	allowed_clks_tbl = core->resources.allowed_clks_tbl;
-	freq = allowed_clks_tbl[0].clock_rate;
-	dprintk(VIDC_PROF, "Max rate = %lu\n", freq);
 
-	return freq;
-}
 
 static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst,
 	u32 filled_len)
@@ -741,6 +760,7 @@
 	unsigned long freq = 0;
 	u32 filled_len = 0;
 	u32 device_addr = 0;
+	bool is_turbo = false;
 
 	if (!inst || !inst->core) {
 		dprintk(VIDC_ERR, "%s Invalid args: Inst = %pK\n",
@@ -755,6 +775,11 @@
 					temp->flags & MSM_VIDC_FLAG_DEFERRED) {
 			filled_len = max(filled_len,
 				temp->vvb.vb2_buf.planes[0].bytesused);
+			if (inst->session_type == MSM_VIDC_ENCODER &&
+				(temp->vvb.flags &
+				 V4L2_QCOM_BUF_FLAG_PERF_MODE)) {
+				is_turbo = true;
+			}
 			device_addr = temp->smem[0].device_addr;
 		}
 	}
@@ -767,7 +792,7 @@
 
 	freq = msm_vidc_calc_freq(inst, filled_len);
 
-	msm_vidc_update_freq_entry(inst, freq, device_addr);
+	msm_vidc_update_freq_entry(inst, freq, device_addr, is_turbo);
 
 	freq = msm_vidc_adjust_freq(inst);
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 7d4e4a1..fd0fd39 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,8 @@
 		V4L2_EVENT_MSM_VIDC_RELEASE_BUFFER_REFERENCE
 #define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
 
+#define TRIGGER_SSR_LOCK_RETRIES 5
+
 const char *const mpeg_video_vidc_extradata[] = {
 	"Extradata none",
 	"Extradata MB Quantization",
@@ -2207,6 +2209,10 @@
 	}
 	/* handle the hw error before core released to get full debug info */
 	msm_vidc_handle_hw_error(core);
+	if (response->status == VIDC_ERR_NOC_ERROR) {
+		dprintk(VIDC_WARN, "Got NOC error");
+		MSM_VIDC_ERROR(true);
+	}
 	dprintk(VIDC_DBG, "Calling core_release\n");
 	rc = call_hfi_op(hdev, core_release, hdev->hfi_device_data);
 	if (rc) {
@@ -5305,6 +5311,7 @@
 {
 	int rc = 0;
 	struct hfi_device *hdev;
+	int try_lock_counter = TRIGGER_SSR_LOCK_RETRIES;
 
 	if (!core || !core->device) {
 		dprintk(VIDC_WARN, "Invalid parameters: %pK\n", core);
@@ -5312,7 +5319,13 @@
 	}
 	hdev = core->device;
 
-	mutex_lock(&core->lock);
+	while (try_lock_counter) {
+		if (mutex_trylock(&core->lock))
+			break;
+		try_lock_counter--;
+		if (!try_lock_counter)
+			return -EBUSY;
+	}
 	if (core->state == VIDC_CORE_INIT_DONE) {
 		/*
 		 * In current implementation user-initiated SSR triggers
@@ -6045,6 +6058,16 @@
 		inst->prop.width[CAPTURE_PORT] = inst->reconfig_width;
 		inst->prop.height[OUTPUT_PORT] = inst->reconfig_height;
 		inst->prop.width[OUTPUT_PORT] = inst->reconfig_width;
+		if (msm_comm_get_stream_output_mode(inst) ==
+			HAL_VIDEO_DECODER_SECONDARY) {
+			rc = msm_comm_queue_output_buffers(inst);
+			if (rc) {
+				dprintk(VIDC_ERR,
+						"Failed to queue output buffers: %d\n",
+						rc);
+				goto sess_continue_fail;
+			}
+		}
 	} else if (inst->session_type == MSM_VIDC_ENCODER) {
 		dprintk(VIDC_DBG,
 				"session_continue not supported for encoder");
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 215bb78..74fa3d5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -131,7 +131,7 @@
 static ssize_t trigger_ssr_write(struct file *filp, const char __user *buf,
 		size_t count, loff_t *ppos) {
 	unsigned long ssr_trigger_val = 0;
-	int rc = 0;
+	int rc = 0, ret = 0;
 	struct msm_vidc_core *core = filp->private_data;
 	size_t size = MAX_SSR_STRING_LEN;
 	char kbuf[MAX_SSR_STRING_LEN + 1] = {0};
@@ -156,8 +156,8 @@
 		dprintk(VIDC_WARN, "returning error err %d\n", rc);
 		rc = -EINVAL;
 	} else {
-		msm_vidc_trigger_ssr(core, ssr_trigger_val);
-		rc = count;
+		ret = msm_vidc_trigger_ssr(core, ssr_trigger_val);
+		rc = (ret == -EBUSY ? ret : count);
 	}
 exit:
 	return rc;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 98b5714..eda531e 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -143,6 +143,7 @@
 	struct list_head list;
 	u32 device_addr;
 	unsigned long freq;
+	bool turbo;
 };
 
 struct vidc_input_cr_data {
@@ -457,6 +458,7 @@
 void msm_comm_handle_thermal_event(void);
 void *msm_smem_new_client(enum smem_type mtype,
 		void *platform_resources, enum session_type stype);
+void msm_smem_set_tme_encode_mode(struct smem_client *client, bool enable);
 int msm_smem_alloc(struct smem_client *client,
 		size_t size, u32 align, u32 flags, enum hal_buffer buffer_type,
 		int map_kernel, struct msm_smem *smem);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index b1a240d..530fe3a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -992,10 +992,11 @@
 
 	if (core->smmu_fault_handled) {
 		if (core->resources.non_fatal_pagefaults) {
-			msm_vidc_noc_error_info(core);
-			MSM_VIDC_ERROR(true);
+			dprintk(VIDC_ERR,
+					"%s: non-fatal pagefault address: %lx\n",
+					__func__, iova);
+			return 0;
 		}
-		return -ENOSYS;
 	}
 
 	dprintk(VIDC_ERR, "%s - faulting address: %lx\n", __func__, iova);
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 2260b55..54cbdfc 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -88,6 +88,7 @@
 	VIDC_ERR_TIMEOUT,
 	VIDC_ERR_CMDQFULL,
 	VIDC_ERR_START_CODE_NOT_FOUND,
+	VIDC_ERR_NOC_ERROR,
 	VIDC_ERR_CLIENT_PRESENT = 0x90000001,
 	VIDC_ERR_CLIENT_FATAL,
 	VIDC_ERR_CMD_QUEUE_FULL,
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index ca6d803..ea8cf1a 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -45,7 +45,7 @@
 #define HFI_ERR_SYS_SESSION_IN_USE			(HFI_COMMON_BASE + 0x7)
 #define HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE	(HFI_COMMON_BASE + 0x8)
 #define HFI_ERR_SYS_UNSUPPORTED_DOMAIN		(HFI_COMMON_BASE + 0x9)
-
+#define HFI_ERR_SYS_NOC_ERROR			(HFI_COMMON_BASE + 0x11)
 #define HFI_ERR_SESSION_FATAL			(HFI_COMMON_BASE + 0x1001)
 #define HFI_ERR_SESSION_INVALID_PARAMETER	(HFI_COMMON_BASE + 0x1002)
 #define HFI_ERR_SESSION_BAD_POINTER		(HFI_COMMON_BASE + 0x1003)
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index b49f80c..d9a5710 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -286,11 +286,14 @@
 		if (!dev->max_timeout)
 			return -ENOSYS;
 
+		/* Check for multiply overflow */
+		if (val > U32_MAX / 1000)
+			return -EINVAL;
+
 		tmp = val * 1000;
 
-		if (tmp < dev->min_timeout ||
-		    tmp > dev->max_timeout)
-				return -EINVAL;
+		if (tmp < dev->min_timeout || tmp > dev->max_timeout)
+			return -EINVAL;
 
 		if (dev->s_timeout)
 			ret = dev->s_timeout(dev, tmp);
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 6ebe895..f4509ef 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -446,6 +446,8 @@
 		return -ERESTARTSYS;
 
 	ir = irctls[iminor(inode)];
+	mutex_unlock(&lirc_dev_lock);
+
 	if (!ir) {
 		retval = -ENODEV;
 		goto error;
@@ -486,8 +488,6 @@
 	}
 
 error:
-	mutex_unlock(&lirc_dev_lock);
-
 	nonseekable_open(inode, file);
 
 	return retval;
diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c
index 5a28ce3..38dbc12 100644
--- a/drivers/media/usb/as102/as102_fw.c
+++ b/drivers/media/usb/as102/as102_fw.c
@@ -101,18 +101,23 @@
 				 unsigned char *cmd,
 				 const struct firmware *firmware) {
 
-	struct as10x_fw_pkt_t fw_pkt;
+	struct as10x_fw_pkt_t *fw_pkt;
 	int total_read_bytes = 0, errno = 0;
 	unsigned char addr_has_changed = 0;
 
+	fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL);
+	if (!fw_pkt)
+		return -ENOMEM;
+
+
 	for (total_read_bytes = 0; total_read_bytes < firmware->size; ) {
 		int read_bytes = 0, data_len = 0;
 
 		/* parse intel hex line */
 		read_bytes = parse_hex_line(
 				(u8 *) (firmware->data + total_read_bytes),
-				fw_pkt.raw.address,
-				fw_pkt.raw.data,
+				fw_pkt->raw.address,
+				fw_pkt->raw.data,
 				&data_len,
 				&addr_has_changed);
 
@@ -122,28 +127,28 @@
 		/* detect the end of file */
 		total_read_bytes += read_bytes;
 		if (total_read_bytes == firmware->size) {
-			fw_pkt.u.request[0] = 0x00;
-			fw_pkt.u.request[1] = 0x03;
+			fw_pkt->u.request[0] = 0x00;
+			fw_pkt->u.request[1] = 0x03;
 
 			/* send EOF command */
 			errno = bus_adap->ops->upload_fw_pkt(bus_adap,
 							     (uint8_t *)
-							     &fw_pkt, 2, 0);
+							     fw_pkt, 2, 0);
 			if (errno < 0)
 				goto error;
 		} else {
 			if (!addr_has_changed) {
 				/* prepare command to send */
-				fw_pkt.u.request[0] = 0x00;
-				fw_pkt.u.request[1] = 0x01;
+				fw_pkt->u.request[0] = 0x00;
+				fw_pkt->u.request[1] = 0x01;
 
-				data_len += sizeof(fw_pkt.u.request);
-				data_len += sizeof(fw_pkt.raw.address);
+				data_len += sizeof(fw_pkt->u.request);
+				data_len += sizeof(fw_pkt->raw.address);
 
 				/* send cmd to device */
 				errno = bus_adap->ops->upload_fw_pkt(bus_adap,
 								     (uint8_t *)
-								     &fw_pkt,
+								     fw_pkt,
 								     data_len,
 								     0);
 				if (errno < 0)
@@ -152,6 +157,7 @@
 		}
 	}
 error:
+	kfree(fw_pkt);
 	return (errno == 0) ? total_read_bytes : errno;
 }
 
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index be9e333..921cf1e 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1622,7 +1622,7 @@
 	nr = dev->devno;
 
 	assoc_desc = udev->actconfig->intf_assoc[0];
-	if (assoc_desc->bFirstInterface != ifnum) {
+	if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) {
 		dev_err(d, "Not found matching IAD interface\n");
 		retval = -ENODEV;
 		goto err_if;
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 8207e69..bcacb0f 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -223,8 +223,20 @@
 
 int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
 {
-	u8 wbuf[1] = { offs };
-	return dibusb_i2c_msg(d, 0x50, wbuf, 1, val, 1);
+	u8 *buf;
+	int rc;
+
+	buf = kmalloc(2, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	buf[0] = offs;
+
+	rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1);
+	*val = buf[1];
+	kfree(buf);
+
+	return rc;
 }
 EXPORT_SYMBOL(dibusb_read_eeprom_byte);
 
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 6739fb0..98e5e3b 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1222,6 +1222,16 @@
 }
 EXPORT_SYMBOL(v4l2_ctrl_fill);
 
+static u32 user_flags(const struct v4l2_ctrl *ctrl)
+{
+	u32 flags = ctrl->flags;
+
+	if (ctrl->is_ptr)
+		flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
+
+	return flags;
+}
+
 static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
 {
 	memset(ev->reserved, 0, sizeof(ev->reserved));
@@ -1229,7 +1239,7 @@
 	ev->id = ctrl->id;
 	ev->u.ctrl.changes = changes;
 	ev->u.ctrl.type = ctrl->type;
-	ev->u.ctrl.flags = ctrl->flags;
+	ev->u.ctrl.flags = user_flags(ctrl);
 	if (ctrl->is_ptr)
 		ev->u.ctrl.value64 = 0;
 	else
@@ -2553,10 +2563,8 @@
 	else
 		qc->id = ctrl->id;
 	strlcpy(qc->name, ctrl->name, sizeof(qc->name));
-	qc->flags = ctrl->flags;
+	qc->flags = user_flags(ctrl);
 	qc->type = ctrl->type;
-	if (ctrl->is_ptr)
-		qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
 	qc->elem_size = ctrl->elem_size;
 	qc->elems = ctrl->elems;
 	qc->nr_of_dims = ctrl->nr_of_dims;
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 5457c36..bf0fe01 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -1947,9 +1947,7 @@
 	if (!of_property_read_u32(child, "dma-channel", &val))
 		gpmc_onenand_data->dma_channel = val;
 
-	gpmc_onenand_init(gpmc_onenand_data);
-
-	return 0;
+	return gpmc_onenand_init(gpmc_onenand_data);
 }
 #else
 static int gpmc_probe_onenand_child(struct platform_device *pdev,
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index a518832..59dbdaa 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -664,6 +664,7 @@
 			   sizeof(struct ec_response_get_protocol_info);
 	ec_dev->dout_size = sizeof(struct ec_host_request);
 
+	ec_spi->last_transfer_ns = ktime_get_ns();
 
 	err = cros_ec_register(ec_dev);
 	if (err) {
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index 77b2675..92e1760 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -183,6 +183,19 @@
 	return 0;
 }
 
+static int mx25_tsadc_remove(struct platform_device *pdev)
+{
+	struct mx25_tsadc *tsadc = platform_get_drvdata(pdev);
+	int irq = platform_get_irq(pdev, 0);
+
+	if (irq) {
+		irq_set_chained_handler_and_data(irq, NULL, NULL);
+		irq_domain_remove(tsadc->domain);
+	}
+
+	return 0;
+}
+
 static const struct of_device_id mx25_tsadc_ids[] = {
 	{ .compatible = "fsl,imx25-tsadc" },
 	{ /* Sentinel */ }
@@ -194,6 +207,7 @@
 		.of_match_table = of_match_ptr(mx25_tsadc_ids),
 	},
 	.probe = mx25_tsadc_probe,
+	.remove = mx25_tsadc_remove,
 };
 module_platform_driver(mx25_tsadc_driver);
 
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
index 0a16064..cc832d3 100644
--- a/drivers/mfd/twl4030-audio.c
+++ b/drivers/mfd/twl4030-audio.c
@@ -159,13 +159,18 @@
 EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
 
 static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata,
-			      struct device_node *node)
+			      struct device_node *parent)
 {
+	struct device_node *node;
+
 	if (pdata && pdata->codec)
 		return true;
 
-	if (of_find_node_by_name(node, "codec"))
+	node = of_get_child_by_name(parent, "codec");
+	if (node) {
+		of_node_put(node);
 		return true;
+	}
 
 	return false;
 }
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index 1beb722..e1e69a4 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -701,6 +701,7 @@
 	TWL_RESOURCE_RESET(RES_MAIN_REF),
 	TWL_RESOURCE_GROUP_RESET(RES_GRP_ALL, RES_TYPE_R0, RES_TYPE2_R2),
 	TWL_RESOURCE_RESET(RES_VUSB_3V1),
+	TWL_RESOURCE_RESET(RES_VMMC1),
 	TWL_RESOURCE_GROUP_RESET(RES_GRP_ALL, RES_TYPE_R0, RES_TYPE2_R1),
 	TWL_RESOURCE_GROUP_RESET(RES_GRP_RC, RES_TYPE_ALL, RES_TYPE2_R0),
 	TWL_RESOURCE_ON(RES_RESET),
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index d66502d..dd19f17 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -97,12 +97,16 @@
 };
 
 
-static bool twl6040_has_vibra(struct device_node *node)
+static bool twl6040_has_vibra(struct device_node *parent)
 {
-#ifdef CONFIG_OF
-	if (of_find_node_by_name(node, "vibra"))
+	struct device_node *node;
+
+	node = of_get_child_by_name(parent, "vibra");
+	if (node) {
+		of_node_put(node);
 		return true;
-#endif
+	}
+
 	return false;
 }
 
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index eef202d..a5422f4 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1758,6 +1758,9 @@
 	/* There should only be one entry, but go through the list
 	 * anyway
 	 */
+	if (afu->phb == NULL)
+		return result;
+
 	list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
 		if (!afu_dev->driver)
 			continue;
@@ -1801,6 +1804,11 @@
 			/* Only participate in EEH if we are on a virtual PHB */
 			if (afu->phb == NULL)
 				return PCI_ERS_RESULT_NONE;
+
+			/*
+			 * Tell the AFU drivers; but we don't care what they
+			 * say, we're going away.
+			 */
 			cxl_vphb_error_detected(afu, state);
 		}
 		return PCI_ERS_RESULT_DISCONNECT;
@@ -1941,6 +1949,9 @@
 		if (cxl_afu_select_best_mode(afu))
 			goto err;
 
+		if (afu->phb == NULL)
+			continue;
+
 		list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
 			/* Reset the device context.
 			 * TODO: make this less disruptive
@@ -2003,6 +2014,9 @@
 	for (i = 0; i < adapter->slices; i++) {
 		afu = adapter->afu[i];
 
+		if (afu->phb == NULL)
+			continue;
+
 		list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
 			if (afu_dev->driver && afu_dev->driver->err_handler &&
 			    afu_dev->driver->err_handler->resume)
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 051b147..d8a485f 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -365,7 +365,8 @@
 	memset(msg, 0, sizeof(msg));
 	msg[0].addr = client->addr;
 	msg[0].buf = addrbuf;
-	addrbuf[0] = 0x90 + offset;
+	/* EUI-48 starts from 0x9a, EUI-64 from 0x98 */
+	addrbuf[0] = 0xa0 - at24->chip.byte_len + offset;
 	msg[0].len = 1;
 	msg[1].addr = client->addr;
 	msg[1].flags = I2C_M_RD;
@@ -506,6 +507,9 @@
 	if (unlikely(!count))
 		return count;
 
+	if (off + count > at24->chip.byte_len)
+		return -EINVAL;
+
 	/*
 	 * Read data from chip, protecting against concurrent updates
 	 * from this host, but not from other I2C masters.
@@ -538,6 +542,9 @@
 	if (unlikely(!count))
 		return -EINVAL;
 
+	if (off + count > at24->chip.byte_len)
+		return -EINVAL;
+
 	/*
 	 * Write data to chip, protecting against concurrent updates
 	 * from this host, but not from other I2C masters.
@@ -638,6 +645,16 @@
 		dev_warn(&client->dev,
 			"page_size looks suspicious (no power of 2)!\n");
 
+	/*
+	 * REVISIT: the size of the EUI-48 byte array is 6 in at24mac402, while
+	 * the call to ilog2() in AT24_DEVICE_MAGIC() rounds it down to 4.
+	 *
+	 * Eventually we'll get rid of the magic values altoghether in favor of
+	 * real structs, but for now just manually set the right size.
+	 */
+	if (chip.flags & AT24_FLAG_MAC && chip.byte_len == 4)
+		chip.byte_len = 6;
+
 	/* Use I2C operations unless we're stuck with SMBus extensions. */
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
 		if (chip.flags & AT24_FLAG_ADDR16)
@@ -766,7 +783,7 @@
 	at24->nvmem_config.reg_read = at24_read;
 	at24->nvmem_config.reg_write = at24_write;
 	at24->nvmem_config.priv = at24;
-	at24->nvmem_config.stride = 4;
+	at24->nvmem_config.stride = 1;
 	at24->nvmem_config.word_size = 1;
 	at24->nvmem_config.size = chip.byte_len;
 
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index e4af5c3..980c1c0 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -4043,7 +4043,7 @@
 		struct mmc_host *host = card->host;
 		struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
 
-		if ((req_op(req) == REQ_OP_FLUSH || req_op(req) ==  REQ_OP_DISCARD) &&
+		if (mmc_req_is_special(req) &&
 		    (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) &&
 		    ctx->active_small_sector_read_reqs) {
 			ret = wait_event_interruptible(ctx->queue_empty_wq,
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 3fd621c..b34a143 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -102,8 +102,7 @@
 	 */
 	wait_event(ctx->wait, kthread_should_stop()
 		|| (mmc_peek_request(mq) &&
-		!(((req_op(mq->cmdq_req_peeked) == REQ_OP_FLUSH) ||
-		   (req_op(mq->cmdq_req_peeked) == REQ_OP_DISCARD))
+		!(mmc_req_is_special(mq->cmdq_req_peeked)
 		  && test_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx->curr_state))
 		&& !(!host->card->part_curr && !mmc_card_suspended(host->card)
 		     && mmc_host_halt(host))
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 3e0ba75..2958473 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -50,9 +50,28 @@
 	kfree(host);
 }
 
+static int mmc_host_prepare(struct device *dev)
+{
+	/*
+	 * Since mmc_host is a virtual device, we don't have to do anything.
+	 * If we return a positive value, the pm framework will consider that
+	 * the runtime suspend and system suspend of this device is same and
+	 * will set direct_complete flag as true. We don't want this as the
+	 * mmc_host always has positive disable_depth and setting the flag
+	 * will not speed up the suspend process.
+	 * So return 0.
+	 */
+	return 0;
+}
+
+static const struct dev_pm_ops mmc_pm_ops = {
+	.prepare = mmc_host_prepare,
+};
+
 static struct class mmc_host_class = {
 	.name		= "mmc_host",
 	.dev_release	= mmc_host_classdev_release,
+	.pm		= &mmc_pm_ops,
 };
 
 int mmc_register_host_class(void)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index dd58288..ff4f84f 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -854,7 +854,7 @@
 MMC_DEV_ATTR(enhanced_rpmb_supported, "%#x\n",
 		card->ext_csd.enhanced_rpmb_supported);
 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
-MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
 
 static ssize_t mmc_fwrev_show(struct device *dev,
 			      struct device_attribute *attr,
@@ -3004,12 +3004,6 @@
 	struct mmc_card *card = host->card;
 	int ret;
 
-	/*
-	 * In the case of recovery, we can't expect flushing the cache to work
-	 * always, but we have a go and ignore errors.
-	 */
-	mmc_flush_cache(host->card);
-
 	if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
 	     mmc_can_reset(card)) {
 		mmc_host_clk_hold(host);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index e3bbc2c..965d1f0 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -753,7 +753,7 @@
 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
-MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
 
 
 static ssize_t mmc_dsr_show(struct device *dev,
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index 55ce946..01811d9 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -357,7 +357,7 @@
 	if (!cq_host->desc_base || !cq_host->trans_desc_base)
 		return -ENOMEM;
 
-	pr_info("desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
+	pr_debug("desc-base: 0x%pK trans-base: 0x%pK\n desc_dma 0x%llx trans_dma: 0x%llx\n",
 		 cq_host->desc_base, cq_host->trans_desc_base,
 		(unsigned long long)cq_host->desc_dma_base,
 		(unsigned long long) cq_host->trans_desc_dma_base);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 84e9afc..6f9535e 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -579,7 +579,7 @@
 		}
 	}
 	sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
-			(mode << 8) | (div % 0xff));
+		      (mode << 8) | div);
 	sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
 	while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
 		cpu_relax();
@@ -1562,7 +1562,7 @@
 	host->src_clk_freq = clk_get_rate(host->src_clk);
 	/* Set host parameters to mmc */
 	mmc->ops = &mt_msdc_ops;
-	mmc->f_min = host->src_clk_freq / (4 * 255);
+	mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
 
 	mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
 	/* MMC core transfer sizes tunable parameters */
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 7880405..5ed9b72 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2,7 +2,7 @@
  * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
  * driver source file
  *
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,7 @@
 #include <linux/iopoll.h>
 #include <linux/msm-bus.h>
 #include <linux/pm_runtime.h>
+#include <linux/nvmem-consumer.h>
 #include <trace/events/mmc.h>
 
 #include "sdhci-msm.h"
@@ -1885,6 +1886,65 @@
 	}
 }
 
+#ifdef CONFIG_NVMEM
+/* Parse qfprom data for deciding on errata work-arounds */
+static long qfprom_read(struct device *dev, const char *name)
+{
+	struct nvmem_cell *cell;
+	ssize_t len = 0;
+	u32 *buf, val = 0;
+	long err = 0;
+
+	cell = nvmem_cell_get(dev, name);
+	if (IS_ERR(cell)) {
+		err = PTR_ERR(cell);
+		dev_err(dev, "failed opening nvmem cell err : %ld\n", err);
+		/* If entry does not exist, then that is not an error */
+		if (err == -ENOENT)
+			err = 0;
+		return err;
+	}
+
+	buf = (u32 *)nvmem_cell_read(cell, &len);
+	if (IS_ERR(buf) || !len) {
+		dev_err(dev, "Failed reading nvmem cell, err: %u, bytes fetched: %zd\n",
+				*buf, len);
+		if (!IS_ERR(buf)) {
+			kfree(buf);
+			err = -EINVAL;
+		} else {
+			err = PTR_ERR(buf);
+		}
+	} else {
+		val = *buf;
+		kfree(buf);
+	}
+
+	nvmem_cell_put(cell);
+	return err ? err : (long) val;
+}
+
+/* Reads the SoC version */
+static int sdhci_msm_get_socrev(struct device *dev,
+				struct sdhci_msm_host *msm_host)
+{
+
+	msm_host->soc_min_rev  = qfprom_read(dev, "minor_rev");
+
+	if (msm_host->soc_min_rev < 0)
+		dev_err(dev, "failed getting soc_min_rev, err : %d\n",
+				msm_host->soc_min_rev);
+	return msm_host->soc_min_rev;
+}
+#else
+/* Reads the SoC version */
+static int sdhci_msm_get_socrev(struct device *dev,
+				struct sdhci_msm_host *msm_host)
+{
+	return 0;
+}
+#endif
+
 /* Parse platform data */
 static
 struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
@@ -2062,6 +2122,13 @@
 	if (!of_property_read_u32(np, "qcom,ddr-config", &pdata->ddr_config))
 		pdata->rclk_wa = true;
 
+	/*
+	 * rclk_wa is not required if soc version is mentioned and
+	 * is not base version.
+	 */
+	if (msm_host->soc_min_rev != 0)
+		pdata->rclk_wa = false;
+
 	return pdata;
 out:
 	return NULL;
@@ -4170,11 +4237,10 @@
 		group->latency = PM_QOS_DEFAULT_VALUE;
 		pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
 			group->latency);
-		pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
+		pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d\n",
 			__func__, i,
 			group->req.cpus_affine.bits[0],
-			group->latency,
-			&latency[i].latency[SDHCI_PERFORMANCE_MODE]);
+			group->latency);
 	}
 	msm_host->pm_qos_prev_cpu = -1;
 	msm_host->pm_qos_group_enable = true;
@@ -4530,6 +4596,12 @@
 	msm_host->mmc = host->mmc;
 	msm_host->pdev = pdev;
 
+	ret = sdhci_msm_get_socrev(&pdev->dev, msm_host);
+	if (ret == -EPROBE_DEFER) {
+		dev_err(&pdev->dev, "SoC version rd: fail: defer for now\n");
+		goto pltfm_free;
+	}
+
 	/* get the ice device vops if present */
 	ret = sdhci_msm_ice_get_dev(host);
 	if (ret == -EPROBE_DEFER) {
@@ -4735,8 +4807,6 @@
 			goto vreg_deinit;
 		}
 		writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
-		dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
-				&tlmm_memres->start, readl_relaxed(tlmm_mem));
 	}
 
 	/*
@@ -4845,6 +4915,7 @@
 				msm_host->pwr_irq);
 		goto vreg_deinit;
 	}
+
 	ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
 					sdhci_msm_pwr_irq, IRQF_ONESHOT,
 					dev_name(&pdev->dev), host);
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 7c737cc..9c2442d 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -255,6 +255,7 @@
 	bool core_3_0v_support;
 	bool pltfm_init_done;
 	struct sdhci_msm_regs_restore regs_restore;
+	int soc_min_rev;
 };
 
 extern char *saved_command_line;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 566be69..b674b38 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3068,13 +3068,13 @@
 		struct sdhci_adma2_64_desc *dma_desc = desc;
 
 		if (host->flags & SDHCI_USE_64_BIT_DMA)
-			DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
+			DBG("%s: %pK: DMA 0x%08x%08x, LEN 0x%04x,Attr=0x%02x\n",
 			    name, desc, le32_to_cpu(dma_desc->addr_hi),
 			    le32_to_cpu(dma_desc->addr_lo),
 			    le16_to_cpu(dma_desc->len),
 			    le16_to_cpu(dma_desc->cmd));
 		else
-			DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
+			DBG("%s: %pK: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
 			    name, desc, le32_to_cpu(dma_desc->addr_lo),
 			    le16_to_cpu(dma_desc->len),
 			    le16_to_cpu(dma_desc->cmd));
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index dbf2562..ada2d88 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -116,6 +116,11 @@
 		op = ECC_DECODE;
 		dec = readw(ecc->regs + ECC_DECDONE);
 		if (dec & ecc->sectors) {
+			/*
+			 * Clear decode IRQ status once again to ensure that
+			 * there will be no extra IRQ.
+			 */
+			readw(ecc->regs + ECC_DECIRQ_STA);
 			ecc->sectors = 0;
 			complete(&ecc->done);
 		} else {
@@ -131,8 +136,6 @@
 		}
 	}
 
-	writel(0, ecc->regs + ECC_IRQ_REG(op));
-
 	return IRQ_HANDLED;
 }
 
@@ -342,6 +345,12 @@
 
 	/* disable it */
 	mtk_ecc_wait_idle(ecc, op);
+	if (op == ECC_DECODE)
+		/*
+		 * Clear decode IRQ status in case there is a timeout to wait
+		 * decode IRQ.
+		 */
+		readw(ecc->regs + ECC_DECIRQ_STA);
 	writew(0, ecc->regs + ECC_IRQ_REG(op));
 	writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
 
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 31a6ee3..a77cfd7 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2935,15 +2935,18 @@
 			    size_t *retlen, const uint8_t *buf)
 {
 	struct nand_chip *chip = mtd_to_nand(mtd);
+	int chipnr = (int)(to >> chip->chip_shift);
 	struct mtd_oob_ops ops;
 	int ret;
 
-	/* Wait for the device to get ready */
-	panic_nand_wait(mtd, chip, 400);
-
 	/* Grab the device */
 	panic_nand_get_device(chip, mtd, FL_WRITING);
 
+	chip->select_chip(mtd, chipnr);
+
+	/* Wait for the device to get ready */
+	panic_nand_wait(mtd, chip, 400);
+
 	memset(&ops, 0, sizeof(ops));
 	ops.len = len;
 	ops.datbuf = (uint8_t *)buf;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index c178cb0d..f3a516b 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1133,129 +1133,172 @@
 				0x97, 0x79, 0xe5, 0x24, 0xb5};
 
 /**
- * omap_calculate_ecc_bch - Generate bytes of ECC bytes
+ * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
  * @mtd:	MTD device structure
  * @dat:	The pointer to data on which ecc is computed
  * @ecc_code:	The ecc_code buffer
+ * @i:		The sector number (for a multi sector page)
  *
- * Support calculating of BCH4/8 ecc vectors for the page
+ * Support calculating of BCH4/8/16 ECC vectors for one sector
+ * within a page. Sector number is in @i.
  */
-static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
-					const u_char *dat, u_char *ecc_calc)
+static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
+				   const u_char *dat, u_char *ecc_calc, int i)
 {
 	struct omap_nand_info *info = mtd_to_omap(mtd);
 	int eccbytes	= info->nand.ecc.bytes;
 	struct gpmc_nand_regs	*gpmc_regs = &info->reg;
 	u8 *ecc_code;
-	unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
+	unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
 	u32 val;
-	int i, j;
+	int j;
+
+	ecc_code = ecc_calc;
+	switch (info->ecc_opt) {
+	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+	case OMAP_ECC_BCH8_CODE_HW:
+		bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+		bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+		bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
+		bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
+		*ecc_code++ = (bch_val4 & 0xFF);
+		*ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+		*ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+		*ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+		*ecc_code++ = (bch_val3 & 0xFF);
+		*ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+		*ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+		*ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+		*ecc_code++ = (bch_val2 & 0xFF);
+		*ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+		*ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+		*ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+		*ecc_code++ = (bch_val1 & 0xFF);
+		break;
+	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+	case OMAP_ECC_BCH4_CODE_HW:
+		bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+		bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+		*ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+		*ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+		*ecc_code++ = ((bch_val2 & 0xF) << 4) |
+			((bch_val1 >> 28) & 0xF);
+		*ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+		*ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+		*ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+		*ecc_code++ = ((bch_val1 & 0xF) << 4);
+		break;
+	case OMAP_ECC_BCH16_CODE_HW:
+		val = readl(gpmc_regs->gpmc_bch_result6[i]);
+		ecc_code[0]  = ((val >>  8) & 0xFF);
+		ecc_code[1]  = ((val >>  0) & 0xFF);
+		val = readl(gpmc_regs->gpmc_bch_result5[i]);
+		ecc_code[2]  = ((val >> 24) & 0xFF);
+		ecc_code[3]  = ((val >> 16) & 0xFF);
+		ecc_code[4]  = ((val >>  8) & 0xFF);
+		ecc_code[5]  = ((val >>  0) & 0xFF);
+		val = readl(gpmc_regs->gpmc_bch_result4[i]);
+		ecc_code[6]  = ((val >> 24) & 0xFF);
+		ecc_code[7]  = ((val >> 16) & 0xFF);
+		ecc_code[8]  = ((val >>  8) & 0xFF);
+		ecc_code[9]  = ((val >>  0) & 0xFF);
+		val = readl(gpmc_regs->gpmc_bch_result3[i]);
+		ecc_code[10] = ((val >> 24) & 0xFF);
+		ecc_code[11] = ((val >> 16) & 0xFF);
+		ecc_code[12] = ((val >>  8) & 0xFF);
+		ecc_code[13] = ((val >>  0) & 0xFF);
+		val = readl(gpmc_regs->gpmc_bch_result2[i]);
+		ecc_code[14] = ((val >> 24) & 0xFF);
+		ecc_code[15] = ((val >> 16) & 0xFF);
+		ecc_code[16] = ((val >>  8) & 0xFF);
+		ecc_code[17] = ((val >>  0) & 0xFF);
+		val = readl(gpmc_regs->gpmc_bch_result1[i]);
+		ecc_code[18] = ((val >> 24) & 0xFF);
+		ecc_code[19] = ((val >> 16) & 0xFF);
+		ecc_code[20] = ((val >>  8) & 0xFF);
+		ecc_code[21] = ((val >>  0) & 0xFF);
+		val = readl(gpmc_regs->gpmc_bch_result0[i]);
+		ecc_code[22] = ((val >> 24) & 0xFF);
+		ecc_code[23] = ((val >> 16) & 0xFF);
+		ecc_code[24] = ((val >>  8) & 0xFF);
+		ecc_code[25] = ((val >>  0) & 0xFF);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* ECC scheme specific syndrome customizations */
+	switch (info->ecc_opt) {
+	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+		/* Add constant polynomial to remainder, so that
+		 * ECC of blank pages results in 0x0 on reading back
+		 */
+		for (j = 0; j < eccbytes; j++)
+			ecc_calc[j] ^= bch4_polynomial[j];
+		break;
+	case OMAP_ECC_BCH4_CODE_HW:
+		/* Set  8th ECC byte as 0x0 for ROM compatibility */
+		ecc_calc[eccbytes - 1] = 0x0;
+		break;
+	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+		/* Add constant polynomial to remainder, so that
+		 * ECC of blank pages results in 0x0 on reading back
+		 */
+		for (j = 0; j < eccbytes; j++)
+			ecc_calc[j] ^= bch8_polynomial[j];
+		break;
+	case OMAP_ECC_BCH8_CODE_HW:
+		/* Set 14th ECC byte as 0x0 for ROM compatibility */
+		ecc_calc[eccbytes - 1] = 0x0;
+		break;
+	case OMAP_ECC_BCH16_CODE_HW:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
+ * @mtd:	MTD device structure
+ * @dat:	The pointer to data on which ecc is computed
+ * @ecc_code:	The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
+ * when SW based correction is required as ECC is required for one sector
+ * at a time.
+ */
+static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd,
+				     const u_char *dat, u_char *ecc_calc)
+{
+	return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0);
+}
+
+/**
+ * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
+ * @mtd:	MTD device structure
+ * @dat:	The pointer to data on which ecc is computed
+ * @ecc_code:	The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
+ */
+static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
+					const u_char *dat, u_char *ecc_calc)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	int eccbytes = info->nand.ecc.bytes;
+	unsigned long nsectors;
+	int i, ret;
 
 	nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
 	for (i = 0; i < nsectors; i++) {
-		ecc_code = ecc_calc;
-		switch (info->ecc_opt) {
-		case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
-		case OMAP_ECC_BCH8_CODE_HW:
-			bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
-			bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
-			bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
-			bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
-			*ecc_code++ = (bch_val4 & 0xFF);
-			*ecc_code++ = ((bch_val3 >> 24) & 0xFF);
-			*ecc_code++ = ((bch_val3 >> 16) & 0xFF);
-			*ecc_code++ = ((bch_val3 >> 8) & 0xFF);
-			*ecc_code++ = (bch_val3 & 0xFF);
-			*ecc_code++ = ((bch_val2 >> 24) & 0xFF);
-			*ecc_code++ = ((bch_val2 >> 16) & 0xFF);
-			*ecc_code++ = ((bch_val2 >> 8) & 0xFF);
-			*ecc_code++ = (bch_val2 & 0xFF);
-			*ecc_code++ = ((bch_val1 >> 24) & 0xFF);
-			*ecc_code++ = ((bch_val1 >> 16) & 0xFF);
-			*ecc_code++ = ((bch_val1 >> 8) & 0xFF);
-			*ecc_code++ = (bch_val1 & 0xFF);
-			break;
-		case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
-		case OMAP_ECC_BCH4_CODE_HW:
-			bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
-			bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
-			*ecc_code++ = ((bch_val2 >> 12) & 0xFF);
-			*ecc_code++ = ((bch_val2 >> 4) & 0xFF);
-			*ecc_code++ = ((bch_val2 & 0xF) << 4) |
-				((bch_val1 >> 28) & 0xF);
-			*ecc_code++ = ((bch_val1 >> 20) & 0xFF);
-			*ecc_code++ = ((bch_val1 >> 12) & 0xFF);
-			*ecc_code++ = ((bch_val1 >> 4) & 0xFF);
-			*ecc_code++ = ((bch_val1 & 0xF) << 4);
-			break;
-		case OMAP_ECC_BCH16_CODE_HW:
-			val = readl(gpmc_regs->gpmc_bch_result6[i]);
-			ecc_code[0]  = ((val >>  8) & 0xFF);
-			ecc_code[1]  = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result5[i]);
-			ecc_code[2]  = ((val >> 24) & 0xFF);
-			ecc_code[3]  = ((val >> 16) & 0xFF);
-			ecc_code[4]  = ((val >>  8) & 0xFF);
-			ecc_code[5]  = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result4[i]);
-			ecc_code[6]  = ((val >> 24) & 0xFF);
-			ecc_code[7]  = ((val >> 16) & 0xFF);
-			ecc_code[8]  = ((val >>  8) & 0xFF);
-			ecc_code[9]  = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result3[i]);
-			ecc_code[10] = ((val >> 24) & 0xFF);
-			ecc_code[11] = ((val >> 16) & 0xFF);
-			ecc_code[12] = ((val >>  8) & 0xFF);
-			ecc_code[13] = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result2[i]);
-			ecc_code[14] = ((val >> 24) & 0xFF);
-			ecc_code[15] = ((val >> 16) & 0xFF);
-			ecc_code[16] = ((val >>  8) & 0xFF);
-			ecc_code[17] = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result1[i]);
-			ecc_code[18] = ((val >> 24) & 0xFF);
-			ecc_code[19] = ((val >> 16) & 0xFF);
-			ecc_code[20] = ((val >>  8) & 0xFF);
-			ecc_code[21] = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result0[i]);
-			ecc_code[22] = ((val >> 24) & 0xFF);
-			ecc_code[23] = ((val >> 16) & 0xFF);
-			ecc_code[24] = ((val >>  8) & 0xFF);
-			ecc_code[25] = ((val >>  0) & 0xFF);
-			break;
-		default:
-			return -EINVAL;
-		}
+		ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
+		if (ret)
+			return ret;
 
-		/* ECC scheme specific syndrome customizations */
-		switch (info->ecc_opt) {
-		case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
-			/* Add constant polynomial to remainder, so that
-			 * ECC of blank pages results in 0x0 on reading back */
-			for (j = 0; j < eccbytes; j++)
-				ecc_calc[j] ^= bch4_polynomial[j];
-			break;
-		case OMAP_ECC_BCH4_CODE_HW:
-			/* Set  8th ECC byte as 0x0 for ROM compatibility */
-			ecc_calc[eccbytes - 1] = 0x0;
-			break;
-		case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
-			/* Add constant polynomial to remainder, so that
-			 * ECC of blank pages results in 0x0 on reading back */
-			for (j = 0; j < eccbytes; j++)
-				ecc_calc[j] ^= bch8_polynomial[j];
-			break;
-		case OMAP_ECC_BCH8_CODE_HW:
-			/* Set 14th ECC byte as 0x0 for ROM compatibility */
-			ecc_calc[eccbytes - 1] = 0x0;
-			break;
-		case OMAP_ECC_BCH16_CODE_HW:
-			break;
-		default:
-			return -EINVAL;
-		}
-
-	ecc_calc += eccbytes;
+		ecc_calc += eccbytes;
 	}
 
 	return 0;
@@ -1496,7 +1539,7 @@
 	chip->write_buf(mtd, buf, mtd->writesize);
 
 	/* Update ecc vector from GPMC result registers */
-	chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
+	omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
 
 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
 					 chip->ecc.total);
@@ -1509,6 +1552,72 @@
 }
 
 /**
+ * omap_write_subpage_bch - BCH hardware ECC based subpage write
+ * @mtd:	mtd info structure
+ * @chip:	nand chip info structure
+ * @offset:	column address of subpage within the page
+ * @data_len:	data length
+ * @buf:	data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * OMAP optimized subpage write method.
+ */
+static int omap_write_subpage_bch(struct mtd_info *mtd,
+				  struct nand_chip *chip, u32 offset,
+				  u32 data_len, const u8 *buf,
+				  int oob_required, int page)
+{
+	u8 *ecc_calc = chip->buffers->ecccalc;
+	int ecc_size      = chip->ecc.size;
+	int ecc_bytes     = chip->ecc.bytes;
+	int ecc_steps     = chip->ecc.steps;
+	u32 start_step = offset / ecc_size;
+	u32 end_step   = (offset + data_len - 1) / ecc_size;
+	int step, ret = 0;
+
+	/*
+	 * Write entire page at one go as it would be optimal
+	 * as ECC is calculated by hardware.
+	 * ECC is calculated for all subpages but we choose
+	 * only what we want.
+	 */
+
+	/* Enable GPMC ECC engine */
+	chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+	/* Write data */
+	chip->write_buf(mtd, buf, mtd->writesize);
+
+	for (step = 0; step < ecc_steps; step++) {
+		/* mask ECC of un-touched subpages by padding 0xFF */
+		if (step < start_step || step > end_step)
+			memset(ecc_calc, 0xff, ecc_bytes);
+		else
+			ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
+
+		if (ret)
+			return ret;
+
+		buf += ecc_size;
+		ecc_calc += ecc_bytes;
+	}
+
+	/* copy calculated ECC for whole page to chip->buffer->oob */
+	/* this include masked-value(0xFF) for unwritten subpages */
+	ecc_calc = chip->buffers->ecccalc;
+	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
+
+	/* write OOB buffer to NAND device */
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+/**
  * omap_read_page_bch - BCH ecc based page read function for entire page
  * @mtd:		mtd info structure
  * @chip:		nand chip info structure
@@ -1544,7 +1653,7 @@
 		       chip->ecc.total);
 
 	/* Calculate ecc bytes */
-	chip->ecc.calculate(mtd, buf, ecc_calc);
+	omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
 
 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
 					 chip->ecc.total);
@@ -2044,7 +2153,7 @@
 		nand_chip->ecc.strength		= 4;
 		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
 		nand_chip->ecc.correct		= nand_bch_correct_data;
-		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
+		nand_chip->ecc.calculate	= omap_calculate_ecc_bch_sw;
 		mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
 		/* Reserve one byte for the OMAP marker */
 		oobbytes_per_step		= nand_chip->ecc.bytes + 1;
@@ -2066,9 +2175,9 @@
 		nand_chip->ecc.strength		= 4;
 		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
 		nand_chip->ecc.correct		= omap_elm_correct_data;
-		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
 		nand_chip->ecc.read_page	= omap_read_page_bch;
 		nand_chip->ecc.write_page	= omap_write_page_bch;
+		nand_chip->ecc.write_subpage	= omap_write_subpage_bch;
 		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
 		oobbytes_per_step		= nand_chip->ecc.bytes;
 
@@ -2087,7 +2196,7 @@
 		nand_chip->ecc.strength		= 8;
 		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
 		nand_chip->ecc.correct		= nand_bch_correct_data;
-		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
+		nand_chip->ecc.calculate	= omap_calculate_ecc_bch_sw;
 		mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
 		/* Reserve one byte for the OMAP marker */
 		oobbytes_per_step		= nand_chip->ecc.bytes + 1;
@@ -2109,9 +2218,9 @@
 		nand_chip->ecc.strength		= 8;
 		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
 		nand_chip->ecc.correct		= omap_elm_correct_data;
-		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
 		nand_chip->ecc.read_page	= omap_read_page_bch;
 		nand_chip->ecc.write_page	= omap_write_page_bch;
+		nand_chip->ecc.write_subpage	= omap_write_subpage_bch;
 		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
 		oobbytes_per_step		= nand_chip->ecc.bytes;
 
@@ -2131,9 +2240,9 @@
 		nand_chip->ecc.strength		= 16;
 		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
 		nand_chip->ecc.correct		= omap_elm_correct_data;
-		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
 		nand_chip->ecc.read_page	= omap_read_page_bch;
 		nand_chip->ecc.write_page	= omap_write_page_bch;
+		nand_chip->ecc.write_subpage	= omap_write_subpage_bch;
 		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
 		oobbytes_per_step		= nand_chip->ecc.bytes;
 
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index b121bf4..3b8911c 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -950,6 +950,7 @@
 
 	switch (command) {
 	case NAND_CMD_READ0:
+	case NAND_CMD_READOOB:
 	case NAND_CMD_PAGEPROG:
 		info->use_ecc = 1;
 		break;
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index e90c6a7..2e46496 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -191,7 +191,7 @@
  */
 static int ipddp_create(struct ipddp_route *new_rt)
 {
-        struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL);
+        struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
 
         if (rt == NULL)
                 return -ENOMEM;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 6749b18..4d01d7b 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -652,6 +652,9 @@
 		mbx_mask = hecc_read(priv, HECC_CANMIM);
 		mbx_mask |= HECC_TX_MBOX_MASK;
 		hecc_write(priv, HECC_CANMIM, mbx_mask);
+	} else {
+		/* repoll is done only if whole budget is used */
+		num_pkts = quota;
 	}
 
 	return num_pkts;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index b3d0275..b003582 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -288,6 +288,8 @@
 
 	case -ECONNRESET: /* unlink */
 	case -ENOENT:
+	case -EPIPE:
+	case -EPROTO:
 	case -ESHUTDOWN:
 		return;
 
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 9fdb0f0..c6dcf93 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -393,6 +393,8 @@
 		break;
 
 	case -ENOENT:
+	case -EPIPE:
+	case -EPROTO:
 	case -ESHUTDOWN:
 		return;
 
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index eea9aea..5d50123 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -449,7 +449,7 @@
 		dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
 			rc);
 
-	return rc;
+	return (rc > 0) ? 0 : rc;
 }
 
 static void gs_usb_xmit_callback(struct urb *urb)
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 4224e06..c9d61a6 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -609,8 +609,8 @@
 			}
 
 			if (pos + tmp->len > actual_len) {
-				dev_err(dev->udev->dev.parent,
-					"Format error\n");
+				dev_err_ratelimited(dev->udev->dev.parent,
+						    "Format error\n");
 				break;
 			}
 
@@ -813,6 +813,7 @@
 	if (err) {
 		netdev_err(netdev, "Error transmitting URB\n");
 		usb_unanchor_urb(urb);
+		kfree(buf);
 		usb_free_urb(urb);
 		return err;
 	}
@@ -1325,6 +1326,8 @@
 	case 0:
 		break;
 	case -ENOENT:
+	case -EPIPE:
+	case -EPROTO:
 	case -ESHUTDOWN:
 		return;
 	default:
@@ -1333,7 +1336,7 @@
 		goto resubmit_urb;
 	}
 
-	while (pos <= urb->actual_length - MSG_HEADER_LEN) {
+	while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
 		msg = urb->transfer_buffer + pos;
 
 		/* The Kvaser firmware can only read and write messages that
@@ -1352,7 +1355,8 @@
 		}
 
 		if (pos + msg->len > urb->actual_length) {
-			dev_err(dev->udev->dev.parent, "Format error\n");
+			dev_err_ratelimited(dev->udev->dev.parent,
+					    "Format error\n");
 			break;
 		}
 
@@ -1768,6 +1772,7 @@
 		spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
 
 		usb_unanchor_urb(urb);
+		kfree(buf);
 
 		stats->tx_dropped++;
 
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index d000cb6..27861c4 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -524,6 +524,8 @@
 		break;
 
 	case -ENOENT:
+	case -EPIPE:
+	case -EPROTO:
 	case -ESHUTDOWN:
 		return;
 
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 8f8418d..a0012c3 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2366,9 +2366,9 @@
 	 * 4) Get the hardware address.
 	 * 5) Put the card to sleep.
 	 */
-	if (typhoon_reset(ioaddr, WaitSleep) < 0) {
+	err = typhoon_reset(ioaddr, WaitSleep);
+	if (err < 0) {
 		err_msg = "could not reset 3XP";
-		err = -EIO;
 		goto error_out_dma;
 	}
 
@@ -2382,24 +2382,25 @@
 	typhoon_init_interface(tp);
 	typhoon_init_rings(tp);
 
-	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+	err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST);
+	if (err < 0) {
 		err_msg = "cannot boot 3XP sleep image";
-		err = -EIO;
 		goto error_out_reset;
 	}
 
 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
-	if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
+	err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp);
+	if (err < 0) {
 		err_msg = "cannot read MAC address";
-		err = -EIO;
 		goto error_out_reset;
 	}
 
 	*(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
 	*(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
 
-	if(!is_valid_ether_addr(dev->dev_addr)) {
+	if (!is_valid_ether_addr(dev->dev_addr)) {
 		err_msg = "Could not obtain valid ethernet address, aborting";
+		err = -EIO;
 		goto error_out_reset;
 	}
 
@@ -2407,7 +2408,8 @@
 	 * later when we print out the version reported.
 	 */
 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
-	if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
+	err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp);
+	if (err < 0) {
 		err_msg = "Could not get Sleep Image version";
 		goto error_out_reset;
 	}
@@ -2424,9 +2426,9 @@
 	if(xp_resp[0].numDesc != 0)
 		tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
 
-	if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
+	err = typhoon_sleep(tp, PCI_D3hot, 0);
+	if (err < 0) {
 		err_msg = "cannot put adapter to sleep";
-		err = -EIO;
 		goto error_out_reset;
 	}
 
@@ -2449,7 +2451,8 @@
 	dev->features = dev->hw_features |
 		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
 
-	if(register_netdev(dev) < 0) {
+	err = register_netdev(dev);
+	if (err < 0) {
 		err_msg = "unable to register netdev";
 		goto error_out_reset;
 	}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index be7ec5a..744ed6d 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1023,6 +1023,18 @@
 		goto out;
 	}
 
+	/* The Ethernet switch we are interfaced with needs packets to be at
+	 * least 64 bytes (including FCS) otherwise they will be discarded when
+	 * they enter the switch port logic. When Broadcom tags are enabled, we
+	 * need to make sure that packets are at least 68 bytes
+	 * (including FCS and tag) because the length verification is done after
+	 * the Broadcom tag is stripped off the ingress packet.
+	 */
+	if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
+		ret = NETDEV_TX_OK;
+		goto out;
+	}
+
 	/* Insert TSB and checksum infos */
 	if (priv->tsb_en) {
 		skb = bcm_sysport_insert_tsb(skb, dev);
@@ -1032,20 +1044,7 @@
 		}
 	}
 
-	/* The Ethernet switch we are interfaced with needs packets to be at
-	 * least 64 bytes (including FCS) otherwise they will be discarded when
-	 * they enter the switch port logic. When Broadcom tags are enabled, we
-	 * need to make sure that packets are at least 68 bytes
-	 * (including FCS and tag) because the length verification is done after
-	 * the Broadcom tag is stripped off the ingress packet.
-	 */
-	if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
-		ret = NETDEV_TX_OK;
-		goto out;
-	}
-
-	skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
-			ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
+	skb_len = skb->len;
 
 	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
 	if (dma_mapping_error(kdev, mapping)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 4febe60..5d958b5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13293,17 +13293,15 @@
 	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
 
-	/* VF with OLD Hypervisor or old PF do not support filtering */
 	if (IS_PF(bp)) {
 		if (chip_is_e1x)
 			bp->accept_any_vlan = true;
 		else
 			dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-#ifdef CONFIG_BNX2X_SRIOV
-	} else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
-		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-#endif
 	}
+	/* For VF we'll know whether to enable VLAN filtering after
+	 * getting a response to CHANNEL_TLV_ACQUIRE from PF.
+	 */
 
 	dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
 	dev->features |= NETIF_F_HIGHDMA;
@@ -13735,7 +13733,7 @@
 	if (!netif_running(bp->dev)) {
 		DP(BNX2X_MSG_PTP,
 		   "PTP adjfreq called while the interface is down\n");
-		return -EFAULT;
+		return -ENETDOWN;
 	}
 
 	if (ppb < 0) {
@@ -13794,6 +13792,12 @@
 {
 	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
 
+	if (!netif_running(bp->dev)) {
+		DP(BNX2X_MSG_PTP,
+		   "PTP adjtime called while the interface is down\n");
+		return -ENETDOWN;
+	}
+
 	DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
 
 	timecounter_adjtime(&bp->timecounter, delta);
@@ -13806,6 +13810,12 @@
 	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
 	u64 ns;
 
+	if (!netif_running(bp->dev)) {
+		DP(BNX2X_MSG_PTP,
+		   "PTP gettime called while the interface is down\n");
+		return -ENETDOWN;
+	}
+
 	ns = timecounter_read(&bp->timecounter);
 
 	DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
@@ -13821,6 +13831,12 @@
 	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
 	u64 ns;
 
+	if (!netif_running(bp->dev)) {
+		DP(BNX2X_MSG_PTP,
+		   "PTP settime called while the interface is down\n");
+		return -ENETDOWN;
+	}
+
 	ns = timespec64_to_ns(ts);
 
 	DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
@@ -13988,6 +14004,14 @@
 		rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
 		if (rc)
 			goto init_one_freemem;
+
+#ifdef CONFIG_BNX2X_SRIOV
+		/* VF with OLD Hypervisor or old PF do not support filtering */
+		if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
+			dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+			dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+		}
+#endif
 	}
 
 	/* Enable SRIOV if capability found in configuration space */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 3f77d08..c6e0591 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -434,7 +434,9 @@
 
 	/* Add/Remove the filter */
 	rc = bnx2x_config_vlan_mac(bp, &ramrod);
-	if (rc && rc != -EEXIST) {
+	if (rc == -EEXIST)
+		return 0;
+	if (rc) {
 		BNX2X_ERR("Failed to %s %s\n",
 			  filter->add ? "add" : "delete",
 			  (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
@@ -444,6 +446,8 @@
 		return rc;
 	}
 
+	filter->applied = true;
+
 	return 0;
 }
 
@@ -471,6 +475,8 @@
 		BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
 			  i, filters->count + 1);
 		while (--i >= 0) {
+			if (!filters->filters[i].applied)
+				continue;
 			filters->filters[i].add = !filters->filters[i].add;
 			bnx2x_vf_mac_vlan_config(bp, vf, qid,
 						 &filters->filters[i],
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 7a6d406..888d0b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -114,6 +114,7 @@
 	(BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
 
 	bool add;
+	bool applied;
 	u8 *mac;
 	u16 vid;
 };
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index bfae300..c2d327d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -868,7 +868,7 @@
 	struct bnx2x *bp = netdev_priv(dev);
 	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
 	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
-	int rc, i = 0;
+	int rc = 0, i = 0;
 	struct netdev_hw_addr *ha;
 
 	if (bp->state != BNX2X_STATE_OPEN) {
@@ -883,6 +883,15 @@
 	/* Get Rx mode requested */
 	DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
 
+	/* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
+	if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
+		DP(NETIF_MSG_IFUP,
+		   "VF supports not more than %d multicast MAC addresses\n",
+		   PFVF_MAX_MULTICAST_PER_VF);
+		rc = -EINVAL;
+		goto out;
+	}
+
 	netdev_for_each_mc_addr(ha, dev) {
 		DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
 		   bnx2x_mc_addr(ha));
@@ -890,16 +899,6 @@
 		i++;
 	}
 
-	/* We support four PFVF_MAX_MULTICAST_PER_VF mcast
-	  * addresses tops
-	  */
-	if (i >= PFVF_MAX_MULTICAST_PER_VF) {
-		DP(NETIF_MSG_IFUP,
-		   "VF supports not more than %d multicast MAC addresses\n",
-		   PFVF_MAX_MULTICAST_PER_VF);
-		return -EINVAL;
-	}
-
 	req->n_multicast = i;
 	req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
 	req->vf_qid = 0;
@@ -924,7 +923,7 @@
 out:
 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
 
-	return 0;
+	return rc;
 }
 
 /* request pf to add a vlan for the vf */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 333df54..bbb3641 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2381,6 +2381,18 @@
 	return 0;
 }
 
+static void bnxt_init_cp_rings(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+		ring->fw_ring_id = INVALID_HW_RING_ID;
+	}
+}
+
 static int bnxt_init_rx_rings(struct bnxt *bp)
 {
 	int i, rc = 0;
@@ -3800,6 +3812,30 @@
 	return rc;
 }
 
+static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
+{
+	int rc;
+
+	if (BNXT_PF(bp)) {
+		struct hwrm_func_cfg_input req = {0};
+
+		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+		req.fid = cpu_to_le16(0xffff);
+		req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+		req.async_event_cr = cpu_to_le16(idx);
+		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	} else {
+		struct hwrm_func_vf_cfg_input req = {0};
+
+		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+		req.enables =
+			cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+		req.async_event_cr = cpu_to_le16(idx);
+		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	}
+	return rc;
+}
+
 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
 {
 	int i, rc = 0;
@@ -3816,6 +3852,12 @@
 			goto err_out;
 		BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
 		bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
+
+		if (!i) {
+			rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
+			if (rc)
+				netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
+		}
 	}
 
 	for (i = 0; i < bp->tx_nr_rings; i++) {
@@ -4670,6 +4712,7 @@
 
 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
 {
+	bnxt_init_cp_rings(bp);
 	bnxt_init_rx_rings(bp);
 	bnxt_init_tx_rings(bp);
 	bnxt_init_ring_grps(bp, irq_re_init);
@@ -5102,8 +5145,9 @@
 		bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
 	}
-	link_info->support_auto_speeds =
-		le16_to_cpu(resp->supported_speeds_auto_mode);
+	if (resp->supported_speeds_auto_mode)
+		link_info->support_auto_speeds =
+			le16_to_cpu(resp->supported_speeds_auto_mode);
 
 hwrm_phy_qcaps_exit:
 	mutex_unlock(&bp->hwrm_cmd_lock);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 0975af2..3480b30 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1,7 +1,7 @@
 /*
  * Broadcom GENET (Gigabit Ethernet) controller driver
  *
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -778,8 +778,9 @@
 	STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
 	/* Misc UniMAC counters */
 	STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
-			UMAC_RBUF_OVFL_CNT),
-	STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
+			UMAC_RBUF_OVFL_CNT_V1),
+	STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
+			UMAC_RBUF_ERR_CNT_V1),
 	STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
 	STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
 	STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
@@ -821,6 +822,45 @@
 	}
 }
 
+static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
+{
+	u16 new_offset;
+	u32 val;
+
+	switch (offset) {
+	case UMAC_RBUF_OVFL_CNT_V1:
+		if (GENET_IS_V2(priv))
+			new_offset = RBUF_OVFL_CNT_V2;
+		else
+			new_offset = RBUF_OVFL_CNT_V3PLUS;
+
+		val = bcmgenet_rbuf_readl(priv,	new_offset);
+		/* clear if overflowed */
+		if (val == ~0)
+			bcmgenet_rbuf_writel(priv, 0, new_offset);
+		break;
+	case UMAC_RBUF_ERR_CNT_V1:
+		if (GENET_IS_V2(priv))
+			new_offset = RBUF_ERR_CNT_V2;
+		else
+			new_offset = RBUF_ERR_CNT_V3PLUS;
+
+		val = bcmgenet_rbuf_readl(priv,	new_offset);
+		/* clear if overflowed */
+		if (val == ~0)
+			bcmgenet_rbuf_writel(priv, 0, new_offset);
+		break;
+	default:
+		val = bcmgenet_umac_readl(priv, offset);
+		/* clear if overflowed */
+		if (val == ~0)
+			bcmgenet_umac_writel(priv, 0, offset);
+		break;
+	}
+
+	return val;
+}
+
 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
 {
 	int i, j = 0;
@@ -836,19 +876,28 @@
 		case BCMGENET_STAT_NETDEV:
 		case BCMGENET_STAT_SOFT:
 			continue;
-		case BCMGENET_STAT_MIB_RX:
-		case BCMGENET_STAT_MIB_TX:
 		case BCMGENET_STAT_RUNT:
-			if (s->type != BCMGENET_STAT_MIB_RX)
-				offset = BCMGENET_STAT_OFFSET;
+			offset += BCMGENET_STAT_OFFSET;
+			/* fall through */
+		case BCMGENET_STAT_MIB_TX:
+			offset += BCMGENET_STAT_OFFSET;
+			/* fall through */
+		case BCMGENET_STAT_MIB_RX:
 			val = bcmgenet_umac_readl(priv,
 						  UMAC_MIB_START + j + offset);
+			offset = 0;	/* Reset Offset */
 			break;
 		case BCMGENET_STAT_MISC:
-			val = bcmgenet_umac_readl(priv, s->reg_offset);
-			/* clear if overflowed */
-			if (val == ~0)
-				bcmgenet_umac_writel(priv, 0, s->reg_offset);
+			if (GENET_IS_V1(priv)) {
+				val = bcmgenet_umac_readl(priv, s->reg_offset);
+				/* clear if overflowed */
+				if (val == ~0)
+					bcmgenet_umac_writel(priv, 0,
+							     s->reg_offset);
+			} else {
+				val = bcmgenet_update_stat_misc(priv,
+								s->reg_offset);
+			}
 			break;
 		}
 
@@ -2464,24 +2513,28 @@
 /* Interrupt bottom half */
 static void bcmgenet_irq_task(struct work_struct *work)
 {
+	unsigned long flags;
+	unsigned int status;
 	struct bcmgenet_priv *priv = container_of(
 			work, struct bcmgenet_priv, bcmgenet_irq_work);
 
 	netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
 
-	if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
-		priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
+	spin_lock_irqsave(&priv->lock, flags);
+	status = priv->irq0_stat;
+	priv->irq0_stat = 0;
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	if (status & UMAC_IRQ_MPD_R) {
 		netif_dbg(priv, wol, priv->dev,
 			  "magic packet detected, waking up\n");
 		bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
 	}
 
 	/* Link UP/DOWN event */
-	if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
+	if (status & UMAC_IRQ_LINK_EVENT)
 		phy_mac_interrupt(priv->phydev,
-				  !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
-		priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
-	}
+				  !!(status & UMAC_IRQ_LINK_UP));
 }
 
 /* bcmgenet_isr1: handle Rx and Tx priority queues */
@@ -2490,22 +2543,21 @@
 	struct bcmgenet_priv *priv = dev_id;
 	struct bcmgenet_rx_ring *rx_ring;
 	struct bcmgenet_tx_ring *tx_ring;
-	unsigned int index;
+	unsigned int index, status;
 
-	/* Save irq status for bottom-half processing. */
-	priv->irq1_stat =
-		bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+	/* Read irq status */
+	status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
 		~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
 
 	/* clear interrupts */
-	bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+	bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
 
 	netif_dbg(priv, intr, priv->dev,
-		  "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+		  "%s: IRQ=0x%x\n", __func__, status);
 
 	/* Check Rx priority queue interrupts */
 	for (index = 0; index < priv->hw_params->rx_queues; index++) {
-		if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
+		if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
 			continue;
 
 		rx_ring = &priv->rx_rings[index];
@@ -2518,7 +2570,7 @@
 
 	/* Check Tx priority queue interrupts */
 	for (index = 0; index < priv->hw_params->tx_queues; index++) {
-		if (!(priv->irq1_stat & BIT(index)))
+		if (!(status & BIT(index)))
 			continue;
 
 		tx_ring = &priv->tx_rings[index];
@@ -2538,19 +2590,20 @@
 	struct bcmgenet_priv *priv = dev_id;
 	struct bcmgenet_rx_ring *rx_ring;
 	struct bcmgenet_tx_ring *tx_ring;
+	unsigned int status;
+	unsigned long flags;
 
-	/* Save irq status for bottom-half processing. */
-	priv->irq0_stat =
-		bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
+	/* Read irq status */
+	status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
 		~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
 
 	/* clear interrupts */
-	bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+	bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
 
 	netif_dbg(priv, intr, priv->dev,
-		  "IRQ=0x%x\n", priv->irq0_stat);
+		  "IRQ=0x%x\n", status);
 
-	if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
+	if (status & UMAC_IRQ_RXDMA_DONE) {
 		rx_ring = &priv->rx_rings[DESC_INDEX];
 
 		if (likely(napi_schedule_prep(&rx_ring->napi))) {
@@ -2559,7 +2612,7 @@
 		}
 	}
 
-	if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
+	if (status & UMAC_IRQ_TXDMA_DONE) {
 		tx_ring = &priv->tx_rings[DESC_INDEX];
 
 		if (likely(napi_schedule_prep(&tx_ring->napi))) {
@@ -2568,20 +2621,21 @@
 		}
 	}
 
-	if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
-				UMAC_IRQ_PHY_DET_F |
-				UMAC_IRQ_LINK_EVENT |
-				UMAC_IRQ_HFB_SM |
-				UMAC_IRQ_HFB_MM |
-				UMAC_IRQ_MPD_R)) {
-		/* all other interested interrupts handled in bottom half */
-		schedule_work(&priv->bcmgenet_irq_work);
+	if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
+		status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+		wake_up(&priv->wq);
 	}
 
-	if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
-	    priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
-		priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
-		wake_up(&priv->wq);
+	/* all other interested interrupts handled in bottom half */
+	status &= (UMAC_IRQ_LINK_EVENT |
+		   UMAC_IRQ_MPD_R);
+	if (status) {
+		/* Save irq status for bottom-half processing. */
+		spin_lock_irqsave(&priv->lock, flags);
+		priv->irq0_stat |= status;
+		spin_unlock_irqrestore(&priv->lock, flags);
+
+		schedule_work(&priv->bcmgenet_irq_work);
 	}
 
 	return IRQ_HANDLED;
@@ -2808,6 +2862,8 @@
 err_fini_dma:
 	bcmgenet_fini_dma(priv);
 err_clk_disable:
+	if (priv->internal_phy)
+		bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
 	clk_disable_unprepare(priv->clk);
 	return ret;
 }
@@ -3184,6 +3240,12 @@
 	 */
 	gphy_rev = reg & 0xffff;
 
+	/* This is reserved so should require special treatment */
+	if (gphy_rev == 0 || gphy_rev == 0x01ff) {
+		pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
+		return;
+	}
+
 	/* This is the good old scheme, just GPHY major, no minor nor patch */
 	if ((gphy_rev & 0xf0) != 0)
 		priv->gphy_rev = gphy_rev << 8;
@@ -3192,12 +3254,6 @@
 	else if ((gphy_rev & 0xff00) != 0)
 		priv->gphy_rev = gphy_rev;
 
-	/* This is reserved so should require special treatment */
-	else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
-		pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
-		return;
-	}
-
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
 	if (!(params->flags & GENET_HAS_40BITS))
 		pr_warn("GENET does not support 40-bits PA\n");
@@ -3240,6 +3296,7 @@
 	const void *macaddr;
 	struct resource *r;
 	int err = -EIO;
+	const char *phy_mode_str;
 
 	/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
 	dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
@@ -3283,6 +3340,8 @@
 		goto err;
 	}
 
+	spin_lock_init(&priv->lock);
+
 	SET_NETDEV_DEV(dev, &pdev->dev);
 	dev_set_drvdata(&pdev->dev, dev);
 	ether_addr_copy(dev->dev_addr, macaddr);
@@ -3345,6 +3404,13 @@
 		priv->clk_eee = NULL;
 	}
 
+	/* If this is an internal GPHY, power it on now, before UniMAC is
+	 * brought out of reset as absolutely no UniMAC activity is allowed
+	 */
+	if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
+	    !strcasecmp(phy_mode_str, "internal"))
+		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
 	err = reset_umac(priv);
 	if (err)
 		goto err_clk_disable;
@@ -3511,6 +3577,8 @@
 	return 0;
 
 out_clk_disable:
+	if (priv->internal_phy)
+		bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
 	clk_disable_unprepare(priv->clk);
 	return ret;
 }
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 1e2dc34..db7f289 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -214,7 +214,9 @@
 #define  MDIO_REG_SHIFT			16
 #define  MDIO_REG_MASK			0x1F
 
-#define UMAC_RBUF_OVFL_CNT		0x61C
+#define UMAC_RBUF_OVFL_CNT_V1		0x61C
+#define RBUF_OVFL_CNT_V2		0x80
+#define RBUF_OVFL_CNT_V3PLUS		0x94
 
 #define UMAC_MPD_CTRL			0x620
 #define  MPD_EN				(1 << 0)
@@ -224,7 +226,9 @@
 
 #define UMAC_MPD_PW_MS			0x624
 #define UMAC_MPD_PW_LS			0x628
-#define UMAC_RBUF_ERR_CNT		0x634
+#define UMAC_RBUF_ERR_CNT_V1		0x634
+#define RBUF_ERR_CNT_V2			0x84
+#define RBUF_ERR_CNT_V3PLUS		0x98
 #define UMAC_MDF_ERR_CNT		0x638
 #define UMAC_MDF_CTRL			0x650
 #define UMAC_MDF_ADDR			0x654
@@ -619,11 +623,13 @@
 	struct work_struct bcmgenet_irq_work;
 	int irq0;
 	int irq1;
-	unsigned int irq0_stat;
-	unsigned int irq1_stat;
 	int wol_irq;
 	bool wol_irq_disabled;
 
+	/* shared status */
+	spinlock_t lock;
+	unsigned int irq0_stat;
+
 	/* HW descriptors/checksum variables */
 	bool desc_64b_en;
 	bool desc_rxchk_en;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index edae2dc..bb22d32 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -14226,7 +14226,9 @@
 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
 	 * breaks all requests to 256 bytes.
 	 */
-	if (tg3_asic_rev(tp) == ASIC_REV_57766)
+	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5719)
 		reset_phy = true;
 
 	err = tg3_restart_hw(tp, reset_phy);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 9e59663..0f68118 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1930,13 +1930,13 @@
 bfa_ioc_send_enable(struct bfa_ioc *ioc)
 {
 	struct bfi_ioc_ctrl_req enable_req;
-	struct timeval tv;
 
 	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
 		    bfa_ioc_portid(ioc));
 	enable_req.clscode = htons(ioc->clscode);
-	do_gettimeofday(&tv);
-	enable_req.tv_sec = ntohl(tv.tv_sec);
+	enable_req.rsvd = htons(0);
+	/* overflow in 2106 */
+	enable_req.tv_sec = ntohl(ktime_get_real_seconds());
 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
 }
 
@@ -1947,6 +1947,10 @@
 
 	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
 		    bfa_ioc_portid(ioc));
+	disable_req.clscode = htons(ioc->clscode);
+	disable_req.rsvd = htons(0);
+	/* overflow in 2106 */
+	disable_req.tv_sec = ntohl(ktime_get_real_seconds());
 	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
 }
 
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 05c1c1d..cebfe3b 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -325,7 +325,7 @@
 		return PTR_ERR(kern_buf);
 
 	rc = sscanf(kern_buf, "%x:%x", &addr, &len);
-	if (rc < 2) {
+	if (rc < 2 || len > UINT_MAX >> 2) {
 		netdev_warn(bnad->netdev, "failed to read user buffer\n");
 		kfree(kern_buf);
 		return -EINVAL;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
index 67befed..578c7f8 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -116,8 +116,7 @@
 	int speed = 2;
 
 	if (!xcv) {
-		dev_err(&xcv->pdev->dev,
-			"XCV init not done, probe may have failed\n");
+		pr_err("XCV init not done, probe may have failed\n");
 		return;
 	}
 
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
index 0f0de5b..d04a6c1 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
@@ -133,17 +133,15 @@
 		if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
 			fl6.flowi6_oif = sin6_scope_id;
 		dst = ip6_route_output(&init_net, NULL, &fl6);
-		if (!dst)
-			goto out;
-		if (!cxgb_our_interface(lldi, get_real_dev,
-					ip6_dst_idev(dst)->dev) &&
-		    !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
+		if (dst->error ||
+		    (!cxgb_our_interface(lldi, get_real_dev,
+					 ip6_dst_idev(dst)->dev) &&
+		     !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK))) {
 			dst_release(dst);
-			dst = NULL;
+			return NULL;
 		}
 	}
 
-out:
 	return dst;
 }
 EXPORT_SYMBOL(cxgb_find_route6);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 5626908..1644896 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -275,8 +275,7 @@
 
 	/* Check if mac has already been added as part of uc-list */
 	for (i = 0; i < adapter->uc_macs; i++) {
-		if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
-				     mac)) {
+		if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
 			/* mac already added, skip addition */
 			adapter->pmac_id[0] = adapter->pmac_id[i + 1];
 			return 0;
@@ -363,8 +362,10 @@
 		status = -EPERM;
 		goto err;
 	}
-done:
+
+	/* Remember currently programmed MAC */
 	ether_addr_copy(adapter->dev_mac, addr->sa_data);
+done:
 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
 	dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
 	return 0;
@@ -1679,14 +1680,12 @@
 
 static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
 {
-	if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
-			     adapter->dev_mac)) {
+	if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
 		adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
 		return 0;
 	}
 
-	return be_cmd_pmac_add(adapter,
-			       (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
+	return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
 			       adapter->if_handle,
 			       &adapter->pmac_id[uc_idx + 1], 0);
 }
@@ -1722,9 +1721,8 @@
 	}
 
 	if (adapter->update_uc_list) {
-		i = 1; /* First slot is claimed by the Primary MAC */
-
 		/* cache the uc-list in adapter array */
+		i = 0;
 		netdev_for_each_uc_addr(ha, netdev) {
 			ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
 			i++;
@@ -3639,8 +3637,10 @@
 {
 	/* Don't delete MAC on BE3 VFs without FILTMGMT privilege  */
 	if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
-	    check_privilege(adapter, BE_PRIV_FILTMGMT))
+	    check_privilege(adapter, BE_PRIV_FILTMGMT)) {
 		be_dev_mac_del(adapter, adapter->pmac_id[0]);
+		eth_zero_addr(adapter->dev_mac);
+	}
 
 	be_clear_uc_list(adapter);
 	be_clear_mc_list(adapter);
@@ -3794,12 +3794,27 @@
 	if (status)
 		return status;
 
-	/* Don't add MAC on BE3 VFs without FILTMGMT privilege */
-	if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
-	    check_privilege(adapter, BE_PRIV_FILTMGMT)) {
+	/* Normally this condition usually true as the ->dev_mac is zeroed.
+	 * But on BE3 VFs the initial MAC is pre-programmed by PF and
+	 * subsequent be_dev_mac_add() can fail (after fresh boot)
+	 */
+	if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
+		int old_pmac_id = -1;
+
+		/* Remember old programmed MAC if any - can happen on BE3 VF */
+		if (!is_zero_ether_addr(adapter->dev_mac))
+			old_pmac_id = adapter->pmac_id[0];
+
 		status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
 		if (status)
 			return status;
+
+		/* Delete the old programmed MAC as we successfully programmed
+		 * a new MAC
+		 */
+		if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
+			be_dev_mac_del(adapter, old_pmac_id);
+
 		ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
 	}
 
@@ -4573,6 +4588,10 @@
 
 		memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
 		memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+
+		/* Initial MAC for BE3 VFs is already programmed by PF */
+		if (BEx_chip(adapter) && be_virtfn(adapter))
+			memcpy(adapter->dev_mac, mac, ETH_ALEN);
 	}
 
 	return 0;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 12aef1b..9170918 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -172,10 +172,12 @@
 #endif /* CONFIG_M5272 */
 
 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
+ *
+ * 2048 byte skbufs are allocated. However, alignment requirements
+ * varies between FEC variants. Worst case is 64, so round down by 64.
  */
-#define PKT_MAXBUF_SIZE		1522
+#define PKT_MAXBUF_SIZE		(round_down(2048 - 64, 64))
 #define PKT_MINBUF_SIZE		64
-#define PKT_MAXBLR_SIZE		1536
 
 /* FEC receive acceleration */
 #define FEC_RACC_IPDIS		(1 << 1)
@@ -813,6 +815,12 @@
 		for (i = 0; i < txq->bd.ring_size; i++) {
 			/* Initialize the BD for every fragment in the page. */
 			bdp->cbd_sc = cpu_to_fec16(0);
+			if (bdp->cbd_bufaddr &&
+			    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+				dma_unmap_single(&fep->pdev->dev,
+						 fec32_to_cpu(bdp->cbd_bufaddr),
+						 fec16_to_cpu(bdp->cbd_datlen),
+						 DMA_TO_DEVICE);
 			if (txq->tx_skbuff[i]) {
 				dev_kfree_skb_any(txq->tx_skbuff[i]);
 				txq->tx_skbuff[i] = NULL;
@@ -847,7 +855,7 @@
 	for (i = 0; i < fep->num_rx_queues; i++) {
 		rxq = fep->rx_queue[i];
 		writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
-		writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
+		writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
 
 		/* enable DMA1/2 */
 		if (i)
@@ -2923,6 +2931,7 @@
 	struct netdev_hw_addr *ha;
 	unsigned int i, bit, data, crc, tmp;
 	unsigned char hash;
+	unsigned int hash_high = 0, hash_low = 0;
 
 	if (ndev->flags & IFF_PROMISC) {
 		tmp = readl(fep->hwp + FEC_R_CNTRL);
@@ -2945,11 +2954,7 @@
 		return;
 	}
 
-	/* Clear filter and add the addresses in hash register
-	 */
-	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
-	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-
+	/* Add the addresses in hash register */
 	netdev_for_each_mc_addr(ha, ndev) {
 		/* calculate crc32 value of mac address */
 		crc = 0xffffffff;
@@ -2967,16 +2972,14 @@
 		 */
 		hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
 
-		if (hash > 31) {
-			tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
-			tmp |= 1 << (hash - 32);
-			writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
-		} else {
-			tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-			tmp |= 1 << hash;
-			writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-		}
+		if (hash > 31)
+			hash_high |= 1 << (hash - 32);
+		else
+			hash_low |= 1 << hash;
 	}
+
+	writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+	writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
 }
 
 /* Set a MAC change in hardware. */
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index b8778e7..7c6c146 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -404,7 +404,7 @@
 	send_map_query(adapter);
 	for (i = 0; i < rxadd_subcrqs; i++) {
 		init_rx_pool(adapter, &adapter->rx_pool[i],
-			     IBMVNIC_BUFFS_PER_POOL, i,
+			     adapter->req_rx_add_entries_per_subcrq, i,
 			     be64_to_cpu(size_array[i]), 1);
 		if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
 			dev_err(dev, "Couldn't alloc rx pool\n");
@@ -419,23 +419,23 @@
 	for (i = 0; i < tx_subcrqs; i++) {
 		tx_pool = &adapter->tx_pool[i];
 		tx_pool->tx_buff =
-		    kcalloc(adapter->max_tx_entries_per_subcrq,
+		    kcalloc(adapter->req_tx_entries_per_subcrq,
 			    sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
 		if (!tx_pool->tx_buff)
 			goto tx_pool_alloc_failed;
 
 		if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
-					 adapter->max_tx_entries_per_subcrq *
+					 adapter->req_tx_entries_per_subcrq *
 					 adapter->req_mtu))
 			goto tx_ltb_alloc_failed;
 
 		tx_pool->free_map =
-		    kcalloc(adapter->max_tx_entries_per_subcrq,
+		    kcalloc(adapter->req_tx_entries_per_subcrq,
 			    sizeof(int), GFP_KERNEL);
 		if (!tx_pool->free_map)
 			goto tx_fm_alloc_failed;
 
-		for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
+		for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
 			tx_pool->free_map[j] = j;
 
 		tx_pool->consumer_index = 0;
@@ -705,6 +705,7 @@
 	u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
 	struct device *dev = &adapter->vdev->dev;
 	struct ibmvnic_tx_buff *tx_buff = NULL;
+	struct ibmvnic_sub_crq_queue *tx_scrq;
 	struct ibmvnic_tx_pool *tx_pool;
 	unsigned int tx_send_failed = 0;
 	unsigned int tx_map_failed = 0;
@@ -724,6 +725,7 @@
 	int ret = 0;
 
 	tx_pool = &adapter->tx_pool[queue_num];
+	tx_scrq = adapter->tx_scrq[queue_num];
 	txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
 	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
 				   be32_to_cpu(adapter->login_rsp_buf->
@@ -744,7 +746,7 @@
 
 	tx_pool->consumer_index =
 	    (tx_pool->consumer_index + 1) %
-		adapter->max_tx_entries_per_subcrq;
+		adapter->req_tx_entries_per_subcrq;
 
 	tx_buff = &tx_pool->tx_buff[index];
 	tx_buff->skb = skb;
@@ -817,7 +819,7 @@
 
 		if (tx_pool->consumer_index == 0)
 			tx_pool->consumer_index =
-				adapter->max_tx_entries_per_subcrq - 1;
+				adapter->req_tx_entries_per_subcrq - 1;
 		else
 			tx_pool->consumer_index--;
 
@@ -826,6 +828,14 @@
 		ret = NETDEV_TX_BUSY;
 		goto out;
 	}
+
+	atomic_inc(&tx_scrq->used);
+
+	if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
+		netdev_info(netdev, "Stopping queue %d\n", queue_num);
+		netif_stop_subqueue(netdev, queue_num);
+	}
+
 	tx_packets++;
 	tx_bytes += skb->len;
 	txq->trans_start = jiffies;
@@ -1220,6 +1230,7 @@
 	scrq->adapter = adapter;
 	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
 	scrq->cur = 0;
+	atomic_set(&scrq->used, 0);
 	scrq->rx_skb_top = NULL;
 	spin_lock_init(&scrq->lock);
 
@@ -1368,14 +1379,28 @@
 						 DMA_TO_DEVICE);
 			}
 
-			if (txbuff->last_frag)
+			if (txbuff->last_frag) {
+				atomic_dec(&scrq->used);
+
+				if (atomic_read(&scrq->used) <=
+				    (adapter->req_tx_entries_per_subcrq / 2) &&
+				    netif_subqueue_stopped(adapter->netdev,
+							   txbuff->skb)) {
+					netif_wake_subqueue(adapter->netdev,
+							    scrq->pool_index);
+					netdev_dbg(adapter->netdev,
+						   "Started queue %d\n",
+						   scrq->pool_index);
+				}
+
 				dev_kfree_skb_any(txbuff->skb);
+			}
 
 			adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
 						     producer_index] = index;
 			adapter->tx_pool[pool].producer_index =
 			    (adapter->tx_pool[pool].producer_index + 1) %
-			    adapter->max_tx_entries_per_subcrq;
+			    adapter->req_tx_entries_per_subcrq;
 		}
 		/* remove tx_comp scrq*/
 		next->tx_comp.first = 0;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index dd775d9..892eda3 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -863,6 +863,7 @@
 	spinlock_t lock;
 	struct sk_buff *rx_skb_top;
 	struct ibmvnic_adapter *adapter;
+	atomic_t used;
 };
 
 struct ibmvnic_long_term_buff {
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 0641c00..afb7ebe 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -398,6 +398,7 @@
 #define E1000_ICR_LSC           0x00000004 /* Link Status Change */
 #define E1000_ICR_RXSEQ         0x00000008 /* Rx sequence error */
 #define E1000_ICR_RXDMT0        0x00000010 /* Rx desc min. threshold (0) */
+#define E1000_ICR_RXO           0x00000040 /* Receiver Overrun */
 #define E1000_ICR_RXT0          0x00000080 /* Rx timer intr (ring 0) */
 #define E1000_ICR_ECCER         0x00400000 /* Uncorrectable ECC Error */
 /* If this bit asserted, the driver should claim the interrupt */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f3aaca7..8a48656 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1364,6 +1364,9 @@
  *  Checks to see of the link status of the hardware has changed.  If a
  *  change in link status has been detected, then we read the PHY registers
  *  to get the current speed/duplex if link exists.
+ *
+ *  Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
+ *  up).
  **/
 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
 {
@@ -1379,7 +1382,7 @@
 	 * Change or Rx Sequence Error interrupt.
 	 */
 	if (!mac->get_link_status)
-		return 0;
+		return 1;
 
 	/* First we want to see if the MII Status Register reports
 	 * link.  If so, then we want to get the current speed/duplex
@@ -1611,10 +1614,12 @@
 	 * different link partner.
 	 */
 	ret_val = e1000e_config_fc_after_link_up(hw);
-	if (ret_val)
+	if (ret_val) {
 		e_dbg("Error configuring flow control\n");
+		return ret_val;
+	}
 
-	return ret_val;
+	return 1;
 }
 
 static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index b322011..f457c57 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -410,6 +410,9 @@
  *  Checks to see of the link status of the hardware has changed.  If a
  *  change in link status has been detected, then we read the PHY registers
  *  to get the current speed/duplex if link exists.
+ *
+ *  Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
+ *  up).
  **/
 s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
 {
@@ -423,7 +426,7 @@
 	 * Change or Rx Sequence Error interrupt.
 	 */
 	if (!mac->get_link_status)
-		return 0;
+		return 1;
 
 	/* First we want to see if the MII Status Register reports
 	 * link.  If so, then we want to get the current speed/duplex
@@ -461,10 +464,12 @@
 	 * different link partner.
 	 */
 	ret_val = e1000e_config_fc_after_link_up(hw);
-	if (ret_val)
+	if (ret_val) {
 		e_dbg("Error configuring flow control\n");
+		return ret_val;
+	}
 
-	return ret_val;
+	return 1;
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 7017281..0feddf3 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1905,14 +1905,30 @@
 	struct net_device *netdev = data;
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
+	u32 icr;
+	bool enable = true;
 
-	hw->mac.get_link_status = true;
-
-	/* guard against interrupt when we're going down */
-	if (!test_bit(__E1000_DOWN, &adapter->state)) {
-		mod_timer(&adapter->watchdog_timer, jiffies + 1);
-		ew32(IMS, E1000_IMS_OTHER);
+	icr = er32(ICR);
+	if (icr & E1000_ICR_RXO) {
+		ew32(ICR, E1000_ICR_RXO);
+		enable = false;
+		/* napi poll will re-enable Other, make sure it runs */
+		if (napi_schedule_prep(&adapter->napi)) {
+			adapter->total_rx_bytes = 0;
+			adapter->total_rx_packets = 0;
+			__napi_schedule(&adapter->napi);
+		}
 	}
+	if (icr & E1000_ICR_LSC) {
+		ew32(ICR, E1000_ICR_LSC);
+		hw->mac.get_link_status = true;
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__E1000_DOWN, &adapter->state))
+			mod_timer(&adapter->watchdog_timer, jiffies + 1);
+	}
+
+	if (enable && !test_bit(__E1000_DOWN, &adapter->state))
+		ew32(IMS, E1000_IMS_OTHER);
 
 	return IRQ_HANDLED;
 }
@@ -2683,7 +2699,8 @@
 		napi_complete_done(napi, work_done);
 		if (!test_bit(__E1000_DOWN, &adapter->state)) {
 			if (adapter->msix_entries)
-				ew32(IMS, adapter->rx_ring->ims_val);
+				ew32(IMS, adapter->rx_ring->ims_val |
+				     E1000_IMS_OTHER);
 			else
 				e1000_irq_enable(adapter);
 		}
@@ -4178,7 +4195,7 @@
 	struct e1000_hw *hw = &adapter->hw;
 
 	if (adapter->msix_entries)
-		ew32(ICS, E1000_ICS_OTHER);
+		ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER);
 	else
 		ew32(ICS, E1000_ICS_LSC);
 }
@@ -5056,7 +5073,7 @@
 	case e1000_media_type_copper:
 		if (hw->mac.get_link_status) {
 			ret_val = hw->mac.ops.check_for_link(hw);
-			link_active = !hw->mac.get_link_status;
+			link_active = ret_val > 0;
 		} else {
 			link_active = true;
 		}
@@ -5074,7 +5091,7 @@
 		break;
 	}
 
-	if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
+	if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
 	    (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
 		/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
 		e_info("Gigabit has been disabled, downgrading speed\n");
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index d78d47b..86ff096 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1744,6 +1744,7 @@
 	s32 ret_val = 0;
 	u16 i, phy_status;
 
+	*success = false;
 	for (i = 0; i < iterations; i++) {
 		/* Some PHYs require the MII_BMSR register to be read
 		 * twice due to the link bit being sticky.  No harm doing
@@ -1763,16 +1764,16 @@
 		ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
 		if (ret_val)
 			break;
-		if (phy_status & BMSR_LSTATUS)
+		if (phy_status & BMSR_LSTATUS) {
+			*success = true;
 			break;
+		}
 		if (usec_interval >= 1000)
 			msleep(usec_interval / 1000);
 		else
 			udelay(usec_interval);
 	}
 
-	*success = (i < iterations);
-
 	return ret_val;
 }
 
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 4d19e46..3693ae1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -508,8 +508,8 @@
 int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac);
 int fm10k_ndo_set_vf_vlan(struct net_device *netdev,
 			  int vf_idx, u16 vid, u8 qos, __be16 vlan_proto);
-int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate,
-			int unused);
+int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
+			int __always_unused min_rate, int max_rate);
 int fm10k_ndo_get_vf_config(struct net_device *netdev,
 			    int vf_idx, struct ifla_vf_info *ivi);
 
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 5f4dac0..e72fd52 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -126,6 +126,9 @@
 		struct fm10k_mbx_info *mbx = &vf_info->mbx;
 		u16 glort = vf_info->glort;
 
+		/* process the SM mailbox first to drain outgoing messages */
+		hw->mbx.ops.process(hw, &hw->mbx);
+
 		/* verify port mapping is valid, if not reset port */
 		if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort))
 			hw->iov.ops.reset_lport(hw, vf_info);
@@ -482,7 +485,7 @@
 }
 
 int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
-			int __always_unused unused, int rate)
+			int __always_unused min_rate, int max_rate)
 {
 	struct fm10k_intfc *interface = netdev_priv(netdev);
 	struct fm10k_iov_data *iov_data = interface->iov_data;
@@ -493,14 +496,15 @@
 		return -EINVAL;
 
 	/* rate limit cannot be less than 10Mbs or greater than link speed */
-	if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX))
+	if (max_rate &&
+	    (max_rate < FM10K_VF_TC_MIN || max_rate > FM10K_VF_TC_MAX))
 		return -EINVAL;
 
 	/* store values */
-	iov_data->vf_info[vf_idx].rate = rate;
+	iov_data->vf_info[vf_idx].rate = max_rate;
 
 	/* update hardware configuration */
-	hw->iov.ops.configure_tc(hw, vf_idx, rate);
+	hw->iov.ops.configure_tc(hw, vf_idx, max_rate);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5de9378..2aae6f8 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1225,7 +1225,7 @@
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		read_barrier_depends();
+		smp_rmb();
 
 		/* if DD is not set pending work has not been completed */
 		if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 31c97e3..becffd1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3604,7 +3604,7 @@
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		read_barrier_depends();
+		smp_rmb();
 
 		/* if the descriptor isn't done, no work yet to do */
 		if (!(eop_desc->cmd_type_offset_bsz &
@@ -4217,8 +4217,12 @@
 	if (!vsi->netdev)
 		return;
 
-	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-		napi_enable(&vsi->q_vectors[q_idx]->napi);
+	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+		struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+		if (q_vector->rx.ring || q_vector->tx.ring)
+			napi_enable(&q_vector->napi);
+	}
 }
 
 /**
@@ -4232,8 +4236,12 @@
 	if (!vsi->netdev)
 		return;
 
-	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-		napi_disable(&vsi->q_vectors[q_idx]->napi);
+	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+		struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+		if (q_vector->rx.ring || q_vector->tx.ring)
+			napi_disable(&q_vector->napi);
+	}
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 6287bf6..c543039 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -679,7 +679,7 @@
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		read_barrier_depends();
+		smp_rmb();
 
 		/* we have caught up to head, no work left to do */
 		if (tx_head == tx_desc)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 75f2a2c..c03800d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -184,7 +184,7 @@
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		read_barrier_depends();
+		smp_rmb();
 
 		/* we have caught up to head, no work left to do */
 		if (tx_head == tx_desc)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c6c2562..ca54f76 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3102,6 +3102,8 @@
 	/* Setup and initialize a copy of the hw vlan table array */
 	adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
 				       GFP_ATOMIC);
+	if (!adapter->shadow_vfta)
+		return -ENOMEM;
 
 	/* This call may decrease the number of queues */
 	if (igb_init_interrupt_scheme(adapter, true)) {
@@ -6660,7 +6662,7 @@
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		read_barrier_depends();
+		smp_rmb();
 
 		/* if DD is not set pending work has not been completed */
 		if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 7dff7f6..5428e39 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -810,7 +810,7 @@
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		read_barrier_depends();
+		smp_rmb();
 
 		/* if DD is not set pending work has not been completed */
 		if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 77d3039..ad33622 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3696,10 +3696,10 @@
 	fw_cmd.ver_build = build;
 	fw_cmd.ver_sub = sub;
 	fw_cmd.hdr.checksum = 0;
-	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
-				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
 	fw_cmd.pad = 0;
 	fw_cmd.pad2 = 0;
+	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
 
 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
 		ret_val = ixgbe_host_interface_command(hw, &fw_cmd,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 334eb96..a5428b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1171,7 +1171,7 @@
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		read_barrier_depends();
+		smp_rmb();
 
 		/* if DD is not set pending work has not been completed */
 		if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 60f0bf7..77a60aa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -617,6 +617,8 @@
 		/* convert offset from words to bytes */
 		buffer.address = cpu_to_be32((offset + current_word) * 2);
 		buffer.length = cpu_to_be16(words_to_read * 2);
+		buffer.pad2 = 0;
+		buffer.pad3 = 0;
 
 		status = ixgbe_host_interface_command(hw, &buffer,
 						      sizeof(buffer),
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index cbf70fe..1499ce2b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -325,7 +325,7 @@
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		read_barrier_depends();
+		smp_rmb();
 
 		/* if DD is not set pending work has not been completed */
 		if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index a0d1b08..7aeb7fe 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -232,7 +232,8 @@
 			dev->regs + MVMDIO_ERR_INT_MASK);
 
 	} else if (dev->err_interrupt == -EPROBE_DEFER) {
-		return -EPROBE_DEFER;
+		ret = -EPROBE_DEFER;
+		goto out_mdio;
 	}
 
 	mutex_init(&dev->lock);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 6ea10a9..fa46326 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1182,6 +1182,10 @@
 	val &= ~MVNETA_GMAC0_PORT_ENABLE;
 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
 
+	pp->link = 0;
+	pp->duplex = -1;
+	pp->speed = 0;
+
 	udelay(200);
 }
 
@@ -1905,9 +1909,9 @@
 
 		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
 		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+			mvneta_rx_error(pp, rx_desc);
 err_drop_frame:
 			dev->stats.rx_errors++;
-			mvneta_rx_error(pp, rx_desc);
 			/* leave the descriptor untouched */
 			continue;
 		}
@@ -2922,7 +2926,7 @@
 {
 	int queue;
 
-	for (queue = 0; queue < txq_number; queue++)
+	for (queue = 0; queue < rxq_number; queue++)
 		mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e36bebc..dae9dcf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2304,6 +2304,17 @@
 		rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
 		if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
 			/* PCI might be offline */
+
+			/* If device removal has been requested,
+			 * do not continue retrying.
+			 */
+			if (dev->persist->interface_state &
+			    MLX4_INTERFACE_STATE_NOWAIT) {
+				mlx4_warn(dev,
+					  "communication channel is offline\n");
+				return -EIO;
+			}
+
 			msleep(100);
 			wr_toggle = swab32(readl(&priv->mfunc.comm->
 					   slave_write));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index d4d97ca..f9897d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -251,13 +251,9 @@
 {
 	u32 freq_khz = freq * 1000;
 	u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
-	u64 tmp_rounded =
-		roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
-		roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
-	u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
-		max_val_cycles : tmp_rounded;
+	u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
 	/* calculate max possible multiplier in order to fit in 64bit */
-	u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
+	u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
 
 	/* This comes from the reverse of clocksource_khz2mult */
 	return ilog2(div_u64(max_mul * freq_khz, 1000000));
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 727122d..5411ca4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1940,6 +1940,14 @@
 			       (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
 		if (!offline_bit)
 			return 0;
+
+		/* If device removal has been requested,
+		 * do not continue retrying.
+		 */
+		if (dev->persist->interface_state &
+		    MLX4_INTERFACE_STATE_NOWAIT)
+			break;
+
 		/* There are cases as part of AER/Reset flow that PF needs
 		 * around 100 msec to load. We therefore sleep for 100 msec
 		 * to allow other tasks to make use of that CPU during this
@@ -3954,6 +3962,9 @@
 	struct devlink *devlink = priv_to_devlink(priv);
 	int active_vfs = 0;
 
+	if (mlx4_is_slave(dev))
+		persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
+
 	mutex_lock(&persist->interface_state_mutex);
 	persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
 	mutex_unlock(&persist->interface_state_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index f7fabec..4c3f1cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -367,7 +367,7 @@
 	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
 	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
 	case MLX5_CMD_OP_QUERY_Q_COUNTER:
-	case MLX5_CMD_OP_SET_RATE_LIMIT:
+	case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
 	case MLX5_CMD_OP_QUERY_RATE_LIMIT:
 	case MLX5_CMD_OP_ALLOC_PD:
 	case MLX5_CMD_OP_ALLOC_UAR:
@@ -502,7 +502,7 @@
 	MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
 	MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
 	MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
-	MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
+	MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
 	MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
 	MLX5_COMMAND_STR_CASE(ALLOC_PD);
 	MLX5_COMMAND_STR_CASE(DEALLOC_PD);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 9d37229..38981db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3038,6 +3038,7 @@
 						    struct sk_buff *skb,
 						    netdev_features_t features)
 {
+	unsigned int offset = 0;
 	struct udphdr *udph;
 	u16 proto;
 	u16 port = 0;
@@ -3047,7 +3048,7 @@
 		proto = ip_hdr(skb)->protocol;
 		break;
 	case htons(ETH_P_IPV6):
-		proto = ipv6_hdr(skb)->nexthdr;
+		proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
 		break;
 	default:
 		goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 4de3c28..331a6ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1015,7 +1015,7 @@
 						u32 *match_criteria)
 {
 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-	struct list_head *prev = ft->node.children.prev;
+	struct list_head *prev = &ft->node.children;
 	unsigned int candidate_index = 0;
 	struct mlx5_flow_group *fg;
 	void *match_criteria_addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index b3309f2..981cd1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1283,6 +1283,7 @@
 	if (err)
 		goto clean_load;
 
+	pci_save_state(pdev);
 	return 0;
 
 clean_load:
@@ -1331,9 +1332,8 @@
 
 	mlx5_enter_error_state(dev);
 	mlx5_unload_one(dev, priv, false);
-	/* In case of kernel call save the pci state and drain the health wq */
+	/* In case of kernel call drain the health wq */
 	if (state) {
-		pci_save_state(pdev);
 		mlx5_drain_health_wq(dev);
 		mlx5_pci_disable_device(dev);
 	}
@@ -1385,6 +1385,7 @@
 
 	pci_set_master(pdev);
 	pci_restore_state(pdev);
+	pci_save_state(pdev);
 
 	if (wait_vital(pdev)) {
 		dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index d0a4005..9346f39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -303,8 +303,8 @@
 err_cmd:
 	memset(din, 0, sizeof(din));
 	memset(dout, 0, sizeof(dout));
-	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
-	MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+	MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+	MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
 	mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
 	return err;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index 104902a..2be9ec5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -60,16 +60,16 @@
 	return ret_entry;
 }
 
-static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
+static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
 				   u32 rate, u16 index)
 {
-	u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
 
-	MLX5_SET(set_rate_limit_in, in, opcode,
-		 MLX5_CMD_OP_SET_RATE_LIMIT);
-	MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
-	MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
+	MLX5_SET(set_pp_rate_limit_in, in, opcode,
+		 MLX5_CMD_OP_SET_PP_RATE_LIMIT);
+	MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
+	MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate);
 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 }
 
@@ -108,7 +108,7 @@
 		entry->refcount++;
 	} else {
 		/* new rate limit */
-		err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
+		err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index);
 		if (err) {
 			mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
 				      rate, err);
@@ -144,7 +144,7 @@
 	entry->refcount--;
 	if (!entry->refcount) {
 		/* need to remove rate */
-		mlx5_set_rate_limit_cmd(dev, 0, entry->index);
+		mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index);
 		entry->rate = 0;
 	}
 
@@ -197,8 +197,8 @@
 	/* Clear all configured rates */
 	for (i = 0; i < table->max_size; i++)
 		if (table->rl_entry[i].rate)
-			mlx5_set_rate_limit_cmd(dev, 0,
-						table->rl_entry[i].index);
+			mlx5_set_pp_rate_limit_cmd(dev, 0,
+						   table->rl_entry[i].index);
 
 	kfree(dev->priv.rl_table.rl_entry);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index 07a9ba6..2f74953 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -71,9 +71,9 @@
 	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
 	struct mlx5e_vxlan *vxlan;
 
-	spin_lock(&vxlan_db->lock);
+	spin_lock_bh(&vxlan_db->lock);
 	vxlan = radix_tree_lookup(&vxlan_db->tree, port);
-	spin_unlock(&vxlan_db->lock);
+	spin_unlock_bh(&vxlan_db->lock);
 
 	return vxlan;
 }
@@ -88,8 +88,12 @@
 	struct mlx5e_vxlan *vxlan;
 	int err;
 
-	if (mlx5e_vxlan_lookup_port(priv, port))
+	mutex_lock(&priv->state_lock);
+	vxlan = mlx5e_vxlan_lookup_port(priv, port);
+	if (vxlan) {
+		atomic_inc(&vxlan->refcount);
 		goto free_work;
+	}
 
 	if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
 		goto free_work;
@@ -99,10 +103,11 @@
 		goto err_delete_port;
 
 	vxlan->udp_port = port;
+	atomic_set(&vxlan->refcount, 1);
 
-	spin_lock_irq(&vxlan_db->lock);
+	spin_lock_bh(&vxlan_db->lock);
 	err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
-	spin_unlock_irq(&vxlan_db->lock);
+	spin_unlock_bh(&vxlan_db->lock);
 	if (err)
 		goto err_free;
 
@@ -113,35 +118,39 @@
 err_delete_port:
 	mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
 free_work:
+	mutex_unlock(&priv->state_lock);
 	kfree(vxlan_work);
 }
 
-static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
-{
-	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
-	struct mlx5e_vxlan *vxlan;
-
-	spin_lock_irq(&vxlan_db->lock);
-	vxlan = radix_tree_delete(&vxlan_db->tree, port);
-	spin_unlock_irq(&vxlan_db->lock);
-
-	if (!vxlan)
-		return;
-
-	mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
-
-	kfree(vxlan);
-}
-
 static void mlx5e_vxlan_del_port(struct work_struct *work)
 {
 	struct mlx5e_vxlan_work *vxlan_work =
 		container_of(work, struct mlx5e_vxlan_work, work);
-	struct mlx5e_priv *priv = vxlan_work->priv;
+	struct mlx5e_priv *priv         = vxlan_work->priv;
+	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
 	u16 port = vxlan_work->port;
+	struct mlx5e_vxlan *vxlan;
+	bool remove = false;
 
-	__mlx5e_vxlan_core_del_port(priv, port);
+	mutex_lock(&priv->state_lock);
+	spin_lock_bh(&vxlan_db->lock);
+	vxlan = radix_tree_lookup(&vxlan_db->tree, port);
+	if (!vxlan)
+		goto out_unlock;
 
+	if (atomic_dec_and_test(&vxlan->refcount)) {
+		radix_tree_delete(&vxlan_db->tree, port);
+		remove = true;
+	}
+
+out_unlock:
+	spin_unlock_bh(&vxlan_db->lock);
+
+	if (remove) {
+		mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+		kfree(vxlan);
+	}
+	mutex_unlock(&priv->state_lock);
 	kfree(vxlan_work);
 }
 
@@ -171,12 +180,11 @@
 	struct mlx5e_vxlan *vxlan;
 	unsigned int port = 0;
 
-	spin_lock_irq(&vxlan_db->lock);
+	/* Lockless since we are the only radix-tree consumers, wq is disabled */
 	while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
 		port = vxlan->udp_port;
-		spin_unlock_irq(&vxlan_db->lock);
-		__mlx5e_vxlan_core_del_port(priv, (u16)port);
-		spin_lock_irq(&vxlan_db->lock);
+		radix_tree_delete(&vxlan_db->tree, port);
+		mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+		kfree(vxlan);
 	}
-	spin_unlock_irq(&vxlan_db->lock);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
index 5def12c..5ef6ae7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
@@ -36,6 +36,7 @@
 #include "en.h"
 
 struct mlx5e_vxlan {
+	atomic_t refcount;
 	u16 udp_port;
 };
 
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6460c72..a01e6c0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -788,7 +788,7 @@
 #define MLXSW_REG_SPVM_ID 0x200F
 #define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
 #define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
-#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVM_REC_MAX_COUNT 255
 #define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN +	\
 		    MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
 
@@ -1757,7 +1757,7 @@
 #define MLXSW_REG_SPVMLR_ID 0x2020
 #define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
 #define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
-#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 255
 #define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
 			      MLXSW_REG_SPVMLR_REC_LEN * \
 			      MLXSW_REG_SPVMLR_REC_MAX_COUNT)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 9e31a33..8aa91dd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1328,9 +1328,9 @@
 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
 					    bool removing)
 {
-	if (!removing && !nh->should_offload)
+	if (!removing)
 		nh->should_offload = 1;
-	else if (removing && nh->offloaded)
+	else
 		nh->should_offload = 0;
 	nh->update = 1;
 }
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 4367dd6..0622fd0 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -25,6 +25,7 @@
 #include <linux/of_irq.h>
 #include <linux/crc32.h>
 #include <linux/crc32c.h>
+#include <linux/circ_buf.h>
 
 #include "moxart_ether.h"
 
@@ -278,6 +279,13 @@
 	return rx;
 }
 
+static int moxart_tx_queue_space(struct net_device *ndev)
+{
+	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+	return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM);
+}
+
 static void moxart_tx_finished(struct net_device *ndev)
 {
 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
@@ -297,6 +305,9 @@
 		tx_tail = TX_NEXT(tx_tail);
 	}
 	priv->tx_tail = tx_tail;
+	if (netif_queue_stopped(ndev) &&
+	    moxart_tx_queue_space(ndev) >= TX_WAKE_THRESHOLD)
+		netif_wake_queue(ndev);
 }
 
 static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
@@ -324,13 +335,18 @@
 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
 	void *desc;
 	unsigned int len;
-	unsigned int tx_head = priv->tx_head;
+	unsigned int tx_head;
 	u32 txdes1;
 	int ret = NETDEV_TX_BUSY;
 
+	spin_lock_irq(&priv->txlock);
+
+	tx_head = priv->tx_head;
 	desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
 
-	spin_lock_irq(&priv->txlock);
+	if (moxart_tx_queue_space(ndev) == 1)
+		netif_stop_queue(ndev);
+
 	if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
 		net_dbg_ratelimited("no TX space for packet\n");
 		priv->stats.tx_dropped++;
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
index 93a9563..afc32ec 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.h
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -59,6 +59,7 @@
 #define TX_NEXT(N)		(((N) + 1) & (TX_DESC_NUM_MASK))
 #define TX_BUF_SIZE		1600
 #define TX_BUF_SIZE_MAX		(TX_DESC1_BUF_SIZE_MASK+1)
+#define TX_WAKE_THRESHOLD	16
 
 #define RX_DESC_NUM		64
 #define RX_DESC_NUM_MASK	(RX_DESC_NUM-1)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 0c42c24..ed014bd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -373,8 +373,9 @@
 		u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
 		u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
 		u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+		u32 align = elems_per_page * DQ_RANGE_ALIGN;
 
-		p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
+		p_conn->cid_count = roundup(p_conn->cid_count, align);
 	}
 }
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 62ae55b..a3360cb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -187,6 +187,8 @@
 	/* If need to reuse or there's no replacement buffer, repost this */
 	if (rc)
 		goto out_post;
+	dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+			 cdev->ll2->rx_size, DMA_FROM_DEVICE);
 
 	skb = build_skb(buffer->data, 0);
 	if (!skb) {
@@ -441,7 +443,7 @@
 static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
 				      struct qed_ll2_info *p_ll2_conn,
 				      union core_rx_cqe_union *p_cqe,
-				      unsigned long lock_flags,
+				      unsigned long *p_lock_flags,
 				      bool b_last_cqe)
 {
 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
@@ -462,10 +464,10 @@
 			  "Mismatch between active_descq and the LL2 Rx chain\n");
 	list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
 
-	spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+	spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
 	qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
 				    p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
-	spin_lock_irqsave(&p_rx->lock, lock_flags);
+	spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
 
 	return 0;
 }
@@ -505,7 +507,8 @@
 			break;
 		case CORE_RX_CQE_TYPE_REGULAR:
 			rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
-							cqe, flags, b_last_cqe);
+							cqe, &flags,
+							b_last_cqe);
 			break;
 		default:
 			rc = -EIO;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 11623aa..10d3a9f 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -941,14 +941,10 @@
 	/* Receive error message handling */
 	priv->rx_over_errors =  priv->stats[RAVB_BE].rx_over_errors;
 	priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
-	if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
+	if (priv->rx_over_errors != ndev->stats.rx_over_errors)
 		ndev->stats.rx_over_errors = priv->rx_over_errors;
-		netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
-	}
-	if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
+	if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
 		ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
-		netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
-	}
 out:
 	return budget - quota;
 }
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 2140ded..b6816ae 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3087,18 +3087,37 @@
 	/* ioremap the TSU registers */
 	if (mdp->cd->tsu) {
 		struct resource *rtsu;
+
 		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-		mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
-		if (IS_ERR(mdp->tsu_addr)) {
-			ret = PTR_ERR(mdp->tsu_addr);
+		if (!rtsu) {
+			dev_err(&pdev->dev, "no TSU resource\n");
+			ret = -ENODEV;
+			goto out_release;
+		}
+		/* We can only request the  TSU region  for the first port
+		 * of the two  sharing this TSU for the probe to succeed...
+		 */
+		if (devno % 2 == 0 &&
+		    !devm_request_mem_region(&pdev->dev, rtsu->start,
+					     resource_size(rtsu),
+					     dev_name(&pdev->dev))) {
+			dev_err(&pdev->dev, "can't request TSU resource.\n");
+			ret = -EBUSY;
+			goto out_release;
+		}
+		mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
+					     resource_size(rtsu));
+		if (!mdp->tsu_addr) {
+			dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
+			ret = -ENOMEM;
 			goto out_release;
 		}
 		mdp->port = devno % 2;
 		ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
 	}
 
-	/* initialize first or needed device */
-	if (!devno || pd->needs_init) {
+	/* Need to init only the first port of the two sharing a TSU */
+	if (devno % 2 == 0) {
 		if (mdp->cd->chip_reset)
 			mdp->cd->chip_reset(ndev);
 
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 1d85109..3d5d5d54 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4967,7 +4967,7 @@
 		 * MCFW do not support VFs.
 		 */
 		rc = efx_ef10_vport_set_mac_address(efx);
-	} else {
+	} else if (rc) {
 		efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
 				       sizeof(inbuf), NULL, 0, rc);
 	}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ef6bff8..98bbb91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -280,8 +280,14 @@
 bool stmmac_eee_init(struct stmmac_priv *priv)
 {
 	unsigned long flags;
+	int interface = priv->plat->interface;
 	bool ret = false;
 
+	if ((interface != PHY_INTERFACE_MODE_MII) &&
+	    (interface != PHY_INTERFACE_MODE_GMII) &&
+	    !phy_interface_mode_is_rgmii(interface))
+		goto out;
+
 	/* Using PCS we cannot dial with the phy registers at this stage
 	 * so we do not support extra feature like EEE.
 	 */
@@ -1795,6 +1801,7 @@
 
 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+	priv->mss = 0;
 
 	ret = alloc_dma_desc_resources(priv);
 	if (ret < 0) {
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index e46b1eb..7ea8ead 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1277,7 +1277,7 @@
 	fjes_set_ethtool_ops(netdev);
 	netdev->mtu = fjes_support_mtu[3];
 	netdev->flags |= IFF_BROADCAST;
-	netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
+	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 }
 
 static void fjes_irq_watch_task(struct work_struct *work)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index cebde07..cb206e5 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -69,7 +69,6 @@
 	struct socket		*sock0;
 	struct socket		*sock1u;
 
-	struct net		*net;
 	struct net_device	*dev;
 
 	unsigned int		hash_size;
@@ -316,7 +315,7 @@
 
 	netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
 
-	xnet = !net_eq(gtp->net, dev_net(gtp->dev));
+	xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
 
 	switch (udp_sk(sk)->encap_type) {
 	case UDP_ENCAP_GTP0:
@@ -612,7 +611,7 @@
 				    pktinfo.fl4.saddr, pktinfo.fl4.daddr,
 				    pktinfo.iph->tos,
 				    ip4_dst_hoplimit(&pktinfo.rt->dst),
-				    htons(IP_DF),
+				    0,
 				    pktinfo.gtph_port, pktinfo.gtph_port,
 				    true, false);
 		break;
@@ -658,7 +657,7 @@
 static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
 static void gtp_hashtable_free(struct gtp_dev *gtp);
 static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
-			    int fd_gtp0, int fd_gtp1, struct net *src_net);
+			    int fd_gtp0, int fd_gtp1);
 
 static int gtp_newlink(struct net *src_net, struct net_device *dev,
 			struct nlattr *tb[], struct nlattr *data[])
@@ -675,7 +674,7 @@
 	fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
 	fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
 
-	err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net);
+	err = gtp_encap_enable(dev, gtp, fd0, fd1);
 	if (err < 0)
 		goto out_err;
 
@@ -821,7 +820,7 @@
 }
 
 static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
-			    int fd_gtp0, int fd_gtp1, struct net *src_net)
+			    int fd_gtp0, int fd_gtp1)
 {
 	struct udp_tunnel_sock_cfg tuncfg = {NULL};
 	struct socket *sock0, *sock1u;
@@ -858,7 +857,6 @@
 
 	gtp->sock0 = sock0;
 	gtp->sock1u = sock1u;
-	gtp->net = src_net;
 
 	tuncfg.sk_user_data = gtp;
 	tuncfg.encap_rcv = gtp_encap_recv;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index b4e9907..980e385 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -404,7 +404,7 @@
 	struct dst_entry *dst;
 	int err, ret = NET_XMIT_DROP;
 	struct flowi6 fl6 = {
-		.flowi6_iif = dev->ifindex,
+		.flowi6_oif = dev->ifindex,
 		.daddr = ip6h->daddr,
 		.saddr = ip6h->saddr,
 		.flowi6_flags = FLOWI_FLAG_ANYSRC,
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index a0849f4..c0192f9 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -418,8 +418,9 @@
 		memset(rd, 0, sizeof(*rd));
 		rd->hw = hwmap + i;
 		rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
-		if (rd->buf == NULL ||
-		    !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
+		if (rd->buf)
+			busaddr = pci_map_single(pdev, rd->buf, len, dir);
+		if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) {
 			if (rd->buf) {
 				net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
 						    __func__, rd->buf);
@@ -430,8 +431,7 @@
 				rd = r->rd + j;
 				busaddr = rd_get_addr(rd);
 				rd_set_addr_status(rd, 0, 0);
-				if (busaddr)
-					pci_unmap_single(pdev, busaddr, len, dir);
+				pci_unmap_single(pdev, busaddr, len, dir);
 				kfree(rd->buf);
 				rd->buf = NULL;
 			}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index dc8ccac..6d55049 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -452,7 +452,7 @@
 					      struct macvlan_dev, list);
 	else
 		vlan = macvlan_hash_lookup(port, eth->h_dest);
-	if (vlan == NULL)
+	if (!vlan || vlan->mode == MACVLAN_MODE_SOURCE)
 		return RX_HANDLER_PASS;
 
 	dev = vlan->dev;
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index a52b560..3603eec 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -166,7 +166,7 @@
 		mac = (const u8 *) ndev->dev_addr;
 
 		if (!is_valid_ether_addr(mac))
-			return -EFAULT;
+			return -EINVAL;
 
 		for (i = 0; i < 3; i++) {
 			phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 2229188..2032a6d 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -624,6 +624,7 @@
 		phydev->link = 0;
 		if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
 			phydev->drv->config_intr(phydev);
+		return genphy_config_aneg(phydev);
 	}
 
 	return 0;
@@ -1020,7 +1021,7 @@
 	.phy_id		= PHY_ID_KSZ8795,
 	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KSZ8795",
-	.features	= (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
 	.config_init	= kszphy_config_init,
 	.config_aneg	= ksz8873mll_config_aneg,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 93ffedf..1e2d4f1 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -491,13 +491,14 @@
 	if (err)
 		return err;
 
-	ks->regs_attr.size = ks->chip->regs_size;
 	memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
+	ks->regs_attr.size = ks->chip->regs_size;
 
 	err = ks8995_reset(ks);
 	if (err)
 		return err;
 
+	sysfs_attr_init(&ks->regs_attr.attr);
 	err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
 	if (err) {
 		dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 440d5f4..b883af9 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -958,6 +958,7 @@
 	unregister_netdevice_many(&list);
 	rtnl_unlock();
 
+	mutex_destroy(&pn->all_ppp_mutex);
 	idr_destroy(&pn->units_idr);
 }
 
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index e221bfc..947bea8 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -293,12 +293,9 @@
 {
 	int len = skb->len;
 
-	if (skb_headroom(skb) < 2) {
-		struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags);
+	if (skb_cow_head(skb, 2)) {
 		dev_kfree_skb_any(skb);
-		skb = skb2;
-		if (!skb)
-			return NULL;
+		return NULL;
 	}
 	skb_push(skb, 2);
 
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f33460c..9c257ff 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2419,14 +2419,9 @@
 {
 	u32 tx_cmd_a, tx_cmd_b;
 
-	if (skb_headroom(skb) < TX_OVERHEAD) {
-		struct sk_buff *skb2;
-
-		skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
+	if (skb_cow_head(skb, TX_OVERHEAD)) {
 		dev_kfree_skb_any(skb);
-		skb = skb2;
-		if (!skb)
-			return NULL;
+		return NULL;
 	}
 
 	if (lan78xx_linearize(skb) < 0)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 9cf11c8..db65d9a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -74,9 +74,11 @@
 		net->hard_header_len = 0;
 		net->addr_len        = 0;
 		net->flags           = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+		set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
 		netdev_dbg(net, "mode: raw IP\n");
 	} else if (!net->header_ops) { /* don't bother if already set */
 		ether_setup(net);
+		clear_bit(EVENT_NO_IP_ALIGN, &dev->flags);
 		netdev_dbg(net, "mode: Ethernet\n");
 	}
 
@@ -580,6 +582,10 @@
 		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
 		.driver_info        = (unsigned long)&qmi_wwan_info,
 	},
+	{	/* Motorola Mapphone devices with MDM6600 */
+		USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff),
+		.driver_info        = (unsigned long)&qmi_wwan_info,
+	},
 
 	/* 2. Combined interface devices matching on class+protocol */
 	{	/* Huawei E367 and possibly others in "Windows mode" */
@@ -901,6 +907,7 @@
 	{QMI_FIXED_INTF(0x1199, 0x9079, 10)},	/* Sierra Wireless EM74xx */
 	{QMI_FIXED_INTF(0x1199, 0x907b, 8)},	/* Sierra Wireless EM74xx */
 	{QMI_FIXED_INTF(0x1199, 0x907b, 10)},	/* Sierra Wireless EM74xx */
+	{QMI_FIXED_INTF(0x1199, 0x9091, 8)},	/* Sierra Wireless EM7565 */
 	{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},	/* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
 	{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},	/* Alcatel L800MA */
 	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */
@@ -936,6 +943,7 @@
 	{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)},	/* SIMCom 7230E */
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)},	/* Quectel EC25, EC20 R2.0  Mini PCIe */
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)},	/* Quectel EC21 Mini PCIe */
+	{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},	/* Quectel BG96 */
 
 	/* 4. Gobi 1000 devices */
 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index afb953a..b2d7c7e 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
 #define NETNEXT_VERSION		"08"
 
 /* Information for net */
-#define NET_VERSION		"8"
+#define NET_VERSION		"9"
 
 #define DRIVER_VERSION		"v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -501,6 +501,8 @@
 #define RTL8153_RMS		RTL8153_MAX_PACKET
 #define RTL8152_TX_TIMEOUT	(5 * HZ)
 #define RTL8152_NAPI_WEIGHT	64
+#define rx_reserved_size(x)	((x) + VLAN_ETH_HLEN + CRC_SIZE + \
+				 sizeof(struct rx_desc) + RX_ALIGN)
 
 /* rtl8152 flags */
 enum rtl8152_flags {
@@ -1292,6 +1294,7 @@
 		}
 	} else {
 		if (netif_carrier_ok(tp->netdev)) {
+			netif_stop_queue(tp->netdev);
 			set_bit(RTL8152_LINK_CHG, &tp->flags);
 			schedule_delayed_work(&tp->schedule, 0);
 		}
@@ -1362,6 +1365,7 @@
 	spin_lock_init(&tp->rx_lock);
 	spin_lock_init(&tp->tx_lock);
 	INIT_LIST_HEAD(&tp->tx_free);
+	INIT_LIST_HEAD(&tp->rx_done);
 	skb_queue_head_init(&tp->tx_queue);
 	skb_queue_head_init(&tp->rx_queue);
 
@@ -2252,8 +2256,7 @@
 
 static void r8153_set_rx_early_size(struct r8152 *tp)
 {
-	u32 mtu = tp->netdev->mtu;
-	u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
+	u32 ocp_data = (agg_buf_sz - rx_reserved_size(tp->netdev->mtu)) / 4;
 
 	ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
 }
@@ -3165,6 +3168,9 @@
 			napi_enable(&tp->napi);
 			netif_wake_queue(netdev);
 			netif_info(tp, link, netdev, "carrier on\n");
+		} else if (netif_queue_stopped(netdev) &&
+			   skb_queue_len(&tp->tx_queue) < tp->tx_qlen) {
+			netif_wake_queue(netdev);
 		}
 	} else {
 		if (netif_carrier_ok(netdev)) {
@@ -3698,8 +3704,18 @@
 			tp->rtl_ops.autosuspend_en(tp, false);
 			napi_disable(&tp->napi);
 			set_bit(WORK_ENABLE, &tp->flags);
-			if (netif_carrier_ok(tp->netdev))
-				rtl_start_rx(tp);
+
+			if (netif_carrier_ok(tp->netdev)) {
+				if (rtl8152_get_speed(tp) & LINK_STATUS) {
+					rtl_start_rx(tp);
+				} else {
+					netif_carrier_off(tp->netdev);
+					tp->rtl_ops.disable(tp);
+					netif_info(tp, link, tp->netdev,
+						   "linking down\n");
+				}
+			}
+
 			napi_enable(&tp->napi);
 			clear_bit(SELECTIVE_SUSPEND, &tp->flags);
 			smp_mb__after_atomic();
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 9af9799..4cb9b11 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -2205,13 +2205,9 @@
 {
 	u32 tx_cmd_a, tx_cmd_b;
 
-	if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
-		struct sk_buff *skb2 =
-			skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
+	if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) {
 		dev_kfree_skb_any(skb);
-		skb = skb2;
-		if (!skb)
-			return NULL;
+		return NULL;
 	}
 
 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 4a1e9c4..aadfe1d 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -456,14 +456,9 @@
 
 	len = skb->len;
 
-	if (skb_headroom(skb) < SR_TX_OVERHEAD) {
-		struct sk_buff *skb2;
-
-		skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags);
+	if (skb_cow_head(skb, SR_TX_OVERHEAD)) {
 		dev_kfree_skb_any(skb);
-		skb = skb2;
-		if (!skb)
-			return NULL;
+		return NULL;
 	}
 
 	__skb_push(skb, SR_TX_OVERHEAD);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index d5071e3..4ab82b9 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -485,7 +485,10 @@
 		return -ENOLINK;
 	}
 
-	skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
+	if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags))
+		skb = __netdev_alloc_skb(dev->net, size, flags);
+	else
+		skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
 	if (!skb) {
 		netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
 		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index e7f5910..f8eb66e 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -467,6 +467,9 @@
 	struct i2400mu *i2400mu;
 	struct usb_device *usb_dev = interface_to_usbdev(iface);
 
+	if (iface->cur_altsetting->desc.bNumEndpoints < 4)
+		return -ENODEV;
+
 	if (usb_dev->speed != USB_SPEED_HIGH)
 		dev_err(dev, "device not connected as high speed\n");
 
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index e2e9603..75faeb1 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -121,6 +121,7 @@
 	 Select Y to compile the driver in order to have WLAN functionality
 	 support.
 
+source "drivers/net/wireless/cnss2/Kconfig"
 source "drivers/net/wireless/cnss_utils/Kconfig"
 source "drivers/net/wireless/cnss_genl/Kconfig"
 
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 7a75193..4ffbd10 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -26,6 +26,8 @@
 
 obj-$(CONFIG_MAC80211_HWSIM)	+= mac80211_hwsim.o
 
+obj-$(CONFIG_CNSS2)	+= cnss2/
+
 obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/
 
 obj-$(CONFIG_CNSS_UTILS) += cnss_utils/
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 70ecd82..098c814 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -413,6 +413,13 @@
 						       skb_tail_pointer(newskb),
 						       RX_PKT_SIZE,
 						       PCI_DMA_FROMDEVICE);
+				if (pci_dma_mapping_error(priv->pdev,
+					   priv->rx_buffers[entry].mapping)) {
+					priv->rx_buffers[entry].skb = NULL;
+					dev_kfree_skb(newskb);
+					skb = NULL;
+					/* TODO: update rx dropped stats */
+				}
 			} else {
 				skb = NULL;
 				/* TODO: update rx dropped stats */
@@ -1450,6 +1457,12 @@
 						  skb_tail_pointer(rx_info->skb),
 						  RX_PKT_SIZE,
 						  PCI_DMA_FROMDEVICE);
+		if (pci_dma_mapping_error(priv->pdev, rx_info->mapping)) {
+			dev_kfree_skb(rx_info->skb);
+			rx_info->skb = NULL;
+			break;
+		}
+
 		desc->buffer1 = cpu_to_le32(rx_info->mapping);
 		desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL);
 	}
@@ -1613,7 +1626,7 @@
 }
 
 /* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */
-static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
+static int adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
 			   u16 plcp_signal,
 			   size_t hdrlen)
 {
@@ -1625,6 +1638,8 @@
 
 	mapping = pci_map_single(priv->pdev, skb->data, skb->len,
 				 PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(priv->pdev, mapping))
+		return -ENOMEM;
 
 	spin_lock_irqsave(&priv->lock, flags);
 
@@ -1657,6 +1672,8 @@
 
 	/* Trigger transmit poll */
 	ADM8211_CSR_WRITE(TDR, 0);
+
+	return 0;
 }
 
 /* Put adm8211_tx_hdr on skb and transmit */
@@ -1710,7 +1727,10 @@
 
 	txhdr->retry_limit = info->control.rates[0].count;
 
-	adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
+	if (adm8211_tx_raw(dev, skb, plcp_signal, hdrlen)) {
+		/* Drop packet */
+		ieee80211_free_txskb(dev, skb);
+	}
 }
 
 static int adm8211_alloc_rings(struct ieee80211_hw *dev)
@@ -1843,7 +1863,8 @@
 	priv->rx_ring_size = rx_ring_size;
 	priv->tx_ring_size = tx_ring_size;
 
-	if (adm8211_alloc_rings(dev)) {
+	err = adm8211_alloc_rings(dev);
+	if (err) {
 		printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n",
 		       pci_name(pdev));
 		goto err_iounmap;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 366d3dc..7b3017f 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -691,8 +691,11 @@
 		   "boot get otp board id result 0x%08x board_id %d chip_id %d\n",
 		   result, board_id, chip_id);
 
-	if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0)
+	if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 ||
+	    (board_id == 0)) {
+		ath10k_warn(ar, "board id is not exist in otp, ignore it\n");
 		return -EOPNOTSUPP;
+	}
 
 	ar->id.bmi_ids_valid = true;
 	ar->id.bmi_board_id = board_id;
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 0b4c156..ba1fe61 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -548,6 +548,11 @@
 		return IEEE80211_TKIP_IV_LEN;
 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
 		return IEEE80211_CCMP_HDR_LEN;
+	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+		return IEEE80211_CCMP_256_HDR_LEN;
+	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+		return IEEE80211_GCMP_HDR_LEN;
 	case HTT_RX_MPDU_ENCRYPT_WEP128:
 	case HTT_RX_MPDU_ENCRYPT_WAPI:
 		break;
@@ -573,6 +578,11 @@
 		return IEEE80211_TKIP_ICV_LEN;
 	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
 		return IEEE80211_CCMP_MIC_LEN;
+	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+		return IEEE80211_CCMP_256_MIC_LEN;
+	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+		return IEEE80211_GCMP_MIC_LEN;
 	case HTT_RX_MPDU_ENCRYPT_WEP128:
 	case HTT_RX_MPDU_ENCRYPT_WAPI:
 		break;
@@ -1024,9 +1034,21 @@
 	hdr = (void *)msdu->data;
 
 	/* Tail */
-	if (status->flag & RX_FLAG_IV_STRIPPED)
+	if (status->flag & RX_FLAG_IV_STRIPPED) {
 		skb_trim(msdu, msdu->len -
 			 ath10k_htt_rx_crypto_tail_len(ar, enctype));
+	} else {
+		/* MIC */
+		if ((status->flag & RX_FLAG_MIC_STRIPPED) &&
+		    enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+			skb_trim(msdu, msdu->len - 8);
+
+		/* ICV */
+		if (status->flag & RX_FLAG_ICV_STRIPPED &&
+		    enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+			skb_trim(msdu, msdu->len -
+				 ath10k_htt_rx_crypto_tail_len(ar, enctype));
+	}
 
 	/* MMIC */
 	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
@@ -1048,7 +1070,8 @@
 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
 					  struct sk_buff *msdu,
 					  struct ieee80211_rx_status *status,
-					  const u8 first_hdr[64])
+					  const u8 first_hdr[64],
+					  enum htt_rx_mpdu_encrypt_type enctype)
 {
 	struct ieee80211_hdr *hdr;
 	struct htt_rx_desc *rxd;
@@ -1056,6 +1079,7 @@
 	u8 da[ETH_ALEN];
 	u8 sa[ETH_ALEN];
 	int l3_pad_bytes;
+	int bytes_aligned = ar->hw_params.decap_align_bytes;
 
 	/* Delivered decapped frame:
 	 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
@@ -1084,6 +1108,14 @@
 	/* push original 802.11 header */
 	hdr = (struct ieee80211_hdr *)first_hdr;
 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+		memcpy(skb_push(msdu,
+				ath10k_htt_rx_crypto_param_len(ar, enctype)),
+		       (void *)hdr + round_up(hdr_len, bytes_aligned),
+			ath10k_htt_rx_crypto_param_len(ar, enctype));
+	}
+
 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
 
 	/* original 802.11 header has a different DA and in
@@ -1144,6 +1176,7 @@
 	u8 sa[ETH_ALEN];
 	int l3_pad_bytes;
 	struct htt_rx_desc *rxd;
+	int bytes_aligned = ar->hw_params.decap_align_bytes;
 
 	/* Delivered decapped frame:
 	 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
@@ -1172,6 +1205,14 @@
 	/* push original 802.11 header */
 	hdr = (struct ieee80211_hdr *)first_hdr;
 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+		memcpy(skb_push(msdu,
+				ath10k_htt_rx_crypto_param_len(ar, enctype)),
+		       (void *)hdr + round_up(hdr_len, bytes_aligned),
+			ath10k_htt_rx_crypto_param_len(ar, enctype));
+	}
+
 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
 
 	/* original 802.11 header has a different DA and in
@@ -1185,12 +1226,14 @@
 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
 					 struct sk_buff *msdu,
 					 struct ieee80211_rx_status *status,
-					 const u8 first_hdr[64])
+					 const u8 first_hdr[64],
+					 enum htt_rx_mpdu_encrypt_type enctype)
 {
 	struct ieee80211_hdr *hdr;
 	size_t hdr_len;
 	int l3_pad_bytes;
 	struct htt_rx_desc *rxd;
+	int bytes_aligned = ar->hw_params.decap_align_bytes;
 
 	/* Delivered decapped frame:
 	 * [amsdu header] <-- replaced with 802.11 hdr
@@ -1206,6 +1249,14 @@
 
 	hdr = (struct ieee80211_hdr *)first_hdr;
 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+		memcpy(skb_push(msdu,
+				ath10k_htt_rx_crypto_param_len(ar, enctype)),
+		       (void *)hdr + round_up(hdr_len, bytes_aligned),
+			ath10k_htt_rx_crypto_param_len(ar, enctype));
+	}
+
 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
 }
 
@@ -1240,13 +1291,15 @@
 					    is_decrypted);
 		break;
 	case RX_MSDU_DECAP_NATIVE_WIFI:
-		ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
+		ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
+					      enctype);
 		break;
 	case RX_MSDU_DECAP_ETHERNET2_DIX:
 		ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
 		break;
 	case RX_MSDU_DECAP_8023_SNAP_LLC:
-		ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
+		ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
+					     enctype);
 		break;
 	}
 }
@@ -1289,7 +1342,8 @@
 
 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
 				 struct sk_buff_head *amsdu,
-				 struct ieee80211_rx_status *status)
+				 struct ieee80211_rx_status *status,
+				 bool fill_crypt_header)
 {
 	struct sk_buff *first;
 	struct sk_buff *last;
@@ -1299,7 +1353,6 @@
 	enum htt_rx_mpdu_encrypt_type enctype;
 	u8 first_hdr[64];
 	u8 *qos;
-	size_t hdr_len;
 	bool has_fcs_err;
 	bool has_crypto_err;
 	bool has_tkip_err;
@@ -1324,15 +1377,17 @@
 	 * decapped header. It'll be used for undecapping of each MSDU.
 	 */
 	hdr = (void *)rxd->rx_hdr_status;
-	hdr_len = ieee80211_hdrlen(hdr->frame_control);
-	memcpy(first_hdr, hdr, hdr_len);
+	memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
 
 	/* Each A-MSDU subframe will use the original header as the base and be
 	 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
 	 */
 	hdr = (void *)first_hdr;
-	qos = ieee80211_get_qos_ctl(hdr);
-	qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+	if (ieee80211_is_data_qos(hdr->frame_control)) {
+		qos = ieee80211_get_qos_ctl(hdr);
+		qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+	}
 
 	/* Some attention flags are valid only in the last MSDU. */
 	last = skb_peek_tail(amsdu);
@@ -1379,9 +1434,14 @@
 		status->flag |= RX_FLAG_DECRYPTED;
 
 		if (likely(!is_mgmt))
-			status->flag |= RX_FLAG_IV_STRIPPED |
-					RX_FLAG_MMIC_STRIPPED;
-}
+			status->flag |= RX_FLAG_MMIC_STRIPPED;
+
+		if (fill_crypt_header)
+			status->flag |= RX_FLAG_MIC_STRIPPED |
+					RX_FLAG_ICV_STRIPPED;
+		else
+			status->flag |= RX_FLAG_IV_STRIPPED;
+	}
 
 	skb_queue_walk(amsdu, msdu) {
 		ath10k_htt_rx_h_csum_offload(msdu);
@@ -1397,6 +1457,9 @@
 		if (is_mgmt)
 			continue;
 
+		if (fill_crypt_header)
+			continue;
+
 		hdr = (void *)msdu->data;
 		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
 	}
@@ -1407,6 +1470,9 @@
 				    struct ieee80211_rx_status *status)
 {
 	struct sk_buff *msdu;
+	struct sk_buff *first_subframe;
+
+	first_subframe = skb_peek(amsdu);
 
 	while ((msdu = __skb_dequeue(amsdu))) {
 		/* Setup per-MSDU flags */
@@ -1415,6 +1481,13 @@
 		else
 			status->flag |= RX_FLAG_AMSDU_MORE;
 
+		if (msdu == first_subframe) {
+			first_subframe = NULL;
+			status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+		} else {
+			status->flag |= RX_FLAG_ALLOW_SAME_PN;
+		}
+
 		ath10k_process_rx(ar, status, msdu);
 	}
 }
@@ -1557,7 +1630,7 @@
 	ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
 	ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
 	ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
-	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
 	ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
 
 	return num_msdus;
@@ -1892,7 +1965,7 @@
 			num_msdus += skb_queue_len(&amsdu);
 			ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
 			ath10k_htt_rx_h_filter(ar, &amsdu, status);
-			ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
+			ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
 			ath10k_htt_rx_h_deliver(ar, &amsdu, status);
 			break;
 		case -EAGAIN:
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 30e98af..17ab8ef 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1224,6 +1224,36 @@
 		return ath10k_monitor_stop(ar);
 }
 
+static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (!arvif->is_started) {
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n");
+		return false;
+	}
+
+	return true;
+}
+
+static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	u32 vdev_param;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	vdev_param = ar->wmi.vdev_param->protection_mode;
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n",
+		   arvif->vdev_id, arvif->use_cts_prot);
+
+	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+					 arvif->use_cts_prot ? 1 : 0);
+}
+
 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
 {
 	struct ath10k *ar = arvif->ar;
@@ -4668,7 +4698,8 @@
 	lockdep_assert_held(&ar->conf_mutex);
 
 	list_for_each_entry(arvif, &ar->arvifs, list) {
-		WARN_ON(arvif->txpower < 0);
+		if (arvif->txpower <= 0)
+			continue;
 
 		if (txpower == -1)
 			txpower = arvif->txpower;
@@ -4676,8 +4707,8 @@
 			txpower = min(txpower, arvif->txpower);
 	}
 
-	if (WARN_ON(txpower == -1))
-		return -EINVAL;
+	if (txpower == -1)
+		return 0;
 
 	ret = ath10k_mac_txpower_setup(ar, txpower);
 	if (ret) {
@@ -5321,20 +5352,18 @@
 
 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
 		arvif->use_cts_prot = info->use_cts_prot;
-		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
-			   arvif->vdev_id, info->use_cts_prot);
 
 		ret = ath10k_recalc_rtscts_prot(arvif);
 		if (ret)
 			ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
 				    arvif->vdev_id, ret);
 
-		vdev_param = ar->wmi.vdev_param->protection_mode;
-		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
-						info->use_cts_prot ? 1 : 0);
-		if (ret)
-			ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
-				    info->use_cts_prot, arvif->vdev_id, ret);
+		if (ath10k_mac_can_set_cts_prot(arvif)) {
+			ret = ath10k_mac_set_cts_prot(arvif);
+			if (ret)
+				ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
+					    arvif->vdev_id, ret);
+		}
 	}
 
 	if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -7355,6 +7384,13 @@
 		arvif->is_up = true;
 	}
 
+	if (ath10k_mac_can_set_cts_prot(arvif)) {
+		ret = ath10k_mac_set_cts_prot(arvif);
+		if (ret)
+			ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
+				    arvif->vdev_id, ret);
+	}
+
 	mutex_unlock(&ar->conf_mutex);
 	return 0;
 
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index 034e7a5..e4878d0 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -239,6 +239,9 @@
 	HTT_RX_MPDU_ENCRYPT_WAPI             = 5,
 	HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2     = 6,
 	HTT_RX_MPDU_ENCRYPT_NONE             = 7,
+	HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2  = 8,
+	HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2    = 9,
+	HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2 = 10,
 };
 
 #define RX_MPDU_START_INFO0_PEER_IDX_MASK     0x000007ff
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index e64f593..0e4d49a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1105,8 +1105,10 @@
 		struct ath10k_fw_stats_pdev *dst;
 
 		src = data;
-		if (data_len < sizeof(*src))
+		if (data_len < sizeof(*src)) {
+			kfree(tb);
 			return -EPROTO;
+		}
 
 		data += sizeof(*src);
 		data_len -= sizeof(*src);
@@ -1126,8 +1128,10 @@
 		struct ath10k_fw_stats_vdev *dst;
 
 		src = data;
-		if (data_len < sizeof(*src))
+		if (data_len < sizeof(*src)) {
+			kfree(tb);
 			return -EPROTO;
+		}
 
 		data += sizeof(*src);
 		data_len -= sizeof(*src);
@@ -1145,8 +1149,10 @@
 		struct ath10k_fw_stats_peer *dst;
 
 		src = data;
-		if (data_len < sizeof(*src))
+		if (data_len < sizeof(*src)) {
+			kfree(tb);
 			return -EPROTO;
+		}
 
 		data += sizeof(*src);
 		data_len -= sizeof(*src);
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index 1fa7f84..8e9480c 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -179,6 +179,9 @@
 	ssize_t len;
 	int r;
 
+	if (count < 1)
+		return -EINVAL;
+
 	if (sc->cur_chan->nvifs > 1)
 		return -EOPNOTSUPP;
 
@@ -186,6 +189,8 @@
 	if (copy_from_user(buf, user_buf, len))
 		return -EFAULT;
 
+	buf[len] = '\0';
+
 	if (strtobool(buf, &start))
 		return -EINVAL;
 
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 51030c3..91d29f5 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1031,9 +1031,8 @@
 				    struct cfg80211_chan_def *chandef)
 {
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-	struct wireless_dev *wdev = wil_to_wdev(wil);
 
-	wdev->preset_chandef = *chandef;
+	wil->monitor_chandef = *chandef;
 
 	return 0;
 }
@@ -1807,9 +1806,8 @@
 	wil_dbg_pm(wil, "suspending\n");
 
 	mutex_lock(&wil->mutex);
-	wil_p2p_stop_discovery(wil);
-
 	mutex_lock(&wil->p2p_wdev_mutex);
+	wil_p2p_stop_radio_operations(wil);
 	wil_abort_scan(wil, true);
 	mutex_unlock(&wil->p2p_wdev_mutex);
 	mutex_unlock(&wil->mutex);
@@ -1827,6 +1825,68 @@
 	return 0;
 }
 
+static int
+wil_cfg80211_sched_scan_start(struct wiphy *wiphy,
+			      struct net_device *dev,
+			      struct cfg80211_sched_scan_request *request)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	int i, rc;
+
+	wil_dbg_misc(wil,
+		     "sched scan start: n_ssids %d, ie_len %zu, flags 0x%x\n",
+		     request->n_ssids, request->ie_len, request->flags);
+	for (i = 0; i < request->n_ssids; i++) {
+		wil_dbg_misc(wil, "SSID[%d]:", i);
+		wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+				  request->ssids[i].ssid,
+				  request->ssids[i].ssid_len, true);
+	}
+	wil_dbg_misc(wil, "channels:");
+	for (i = 0; i < request->n_channels; i++)
+		wil_dbg_misc(wil, " %d%s", request->channels[i]->hw_value,
+			     i == request->n_channels - 1 ? "\n" : "");
+	wil_dbg_misc(wil, "n_match_sets %d, min_rssi_thold %d, delay %d\n",
+		     request->n_match_sets, request->min_rssi_thold,
+		     request->delay);
+	for (i = 0; i < request->n_match_sets; i++) {
+		struct cfg80211_match_set *ms = &request->match_sets[i];
+
+		wil_dbg_misc(wil, "MATCHSET[%d]: rssi_thold %d\n",
+			     i, ms->rssi_thold);
+		wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+				  ms->ssid.ssid,
+				  ms->ssid.ssid_len, true);
+	}
+	wil_dbg_misc(wil, "n_scan_plans %d\n", request->n_scan_plans);
+	for (i = 0; i < request->n_scan_plans; i++) {
+		struct cfg80211_sched_scan_plan *sp = &request->scan_plans[i];
+
+		wil_dbg_misc(wil, "SCAN PLAN[%d]: interval %d iterations %d\n",
+			     i, sp->interval, sp->iterations);
+	}
+
+	rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
+	if (rc)
+		return rc;
+	return wmi_start_sched_scan(wil, request);
+}
+
+static int
+wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	int rc;
+
+	rc = wmi_stop_sched_scan(wil);
+	/* device would return error if it thinks PNO is already stopped.
+	 * ignore the return code so user space and driver gets back in-sync
+	 */
+	wil_dbg_misc(wil, "sched scan stopped (%d)\n", rc);
+
+	return 0;
+}
+
 static struct cfg80211_ops wil_cfg80211_ops = {
 	.add_virtual_intf = wil_cfg80211_add_iface,
 	.del_virtual_intf = wil_cfg80211_del_iface,
@@ -1860,6 +1920,8 @@
 	.set_power_mgmt = wil_cfg80211_set_power_mgmt,
 	.suspend = wil_cfg80211_suspend,
 	.resume = wil_cfg80211_resume,
+	.sched_scan_start = wil_cfg80211_sched_scan_start,
+	.sched_scan_stop = wil_cfg80211_sched_scan_stop,
 };
 
 static void wil_wiphy_init(struct wiphy *wiphy)
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 831780a..751e911 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -875,7 +875,6 @@
 
 	params.buf = frame;
 	params.len = len;
-	params.chan = wdev->preset_chandef.chan;
 
 	rc = wil_cfg80211_mgmt_tx(wiphy, wdev, &params, NULL);
 
@@ -1682,8 +1681,6 @@
 	struct wil6210_priv *wil = file->private_data;
 
 	memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
-	wil->suspend_stats.min_suspend_time = ULONG_MAX;
-	wil->suspend_stats.collection_start = ktime_get();
 
 	return len;
 }
@@ -1693,33 +1690,41 @@
 				      size_t count, loff_t *ppos)
 {
 	struct wil6210_priv *wil = file->private_data;
-	static char text[400];
-	int n;
-	unsigned long long stats_collection_time =
-		ktime_to_us(ktime_sub(ktime_get(),
-				      wil->suspend_stats.collection_start));
+	char *text;
+	int n, ret, text_size = 500;
 
-	n = snprintf(text, sizeof(text),
-		     "Suspend statistics:\n"
+	text = kmalloc(text_size, GFP_KERNEL);
+	if (!text)
+		return -ENOMEM;
+
+	n = snprintf(text, text_size,
+		     "Radio on suspend statistics:\n"
 		     "successful suspends:%ld failed suspends:%ld\n"
 		     "successful resumes:%ld failed resumes:%ld\n"
-		     "rejected by host:%ld rejected by device:%ld\n"
-		     "total suspend time:%lld min suspend time:%lld\n"
-		     "max suspend time:%lld stats collection time: %lld\n",
-		     wil->suspend_stats.successful_suspends,
-		     wil->suspend_stats.failed_suspends,
-		     wil->suspend_stats.successful_resumes,
-		     wil->suspend_stats.failed_resumes,
-		     wil->suspend_stats.rejected_by_host,
+		     "rejected by device:%ld\n"
+		     "Radio off suspend statistics:\n"
+		     "successful suspends:%ld failed suspends:%ld\n"
+		     "successful resumes:%ld failed resumes:%ld\n"
+		     "General statistics:\n"
+		     "rejected by host:%ld\n",
+		     wil->suspend_stats.r_on.successful_suspends,
+		     wil->suspend_stats.r_on.failed_suspends,
+		     wil->suspend_stats.r_on.successful_resumes,
+		     wil->suspend_stats.r_on.failed_resumes,
 		     wil->suspend_stats.rejected_by_device,
-		     wil->suspend_stats.total_suspend_time,
-		     wil->suspend_stats.min_suspend_time,
-		     wil->suspend_stats.max_suspend_time,
-		     stats_collection_time);
+		     wil->suspend_stats.r_off.successful_suspends,
+		     wil->suspend_stats.r_off.failed_suspends,
+		     wil->suspend_stats.r_off.successful_resumes,
+		     wil->suspend_stats.r_off.failed_resumes,
+		     wil->suspend_stats.rejected_by_host);
 
-	n = min_t(int, n, sizeof(text));
+	n = min_t(int, n, text_size);
 
-	return simple_read_from_buffer(user_buf, count, ppos, text, n);
+	ret = simple_read_from_buffer(user_buf, count, ppos, text, n);
+
+	kfree(text);
+
+	return ret;
 }
 
 static const struct file_operations fops_suspend_stats = {
@@ -1888,8 +1893,6 @@
 
 	wil6210_debugfs_create_ITR_CNT(wil, dbg);
 
-	wil->suspend_stats.collection_start = ktime_get();
-
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 5cf3417..dcf87a7 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -565,7 +565,7 @@
 	if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)))
 		return IRQ_NONE;
 
-	/* FIXME: IRQ mask debug */
+	/* IRQ mask debug */
 	if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause)))
 		return IRQ_NONE;
 
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index ae5a1b6..8e13f24 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -583,7 +583,6 @@
 	wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
 			      WMI_WAKEUP_TRIGGER_BCAST;
 	memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
-	wil->suspend_stats.min_suspend_time = ULONG_MAX;
 	wil->vring_idle_trsh = 16;
 
 	return 0;
@@ -792,6 +791,14 @@
 	else
 		wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
 
+	if (test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) {
+		wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+		wiphy->max_sched_scan_ssids = WMI_MAX_PNO_SSID_NUM;
+		wiphy->max_match_sets = WMI_MAX_PNO_SSID_NUM;
+		wiphy->max_sched_scan_ie_len = WMI_MAX_IE_LEN;
+		wiphy->max_sched_scan_plans = WMI_MAX_PLANS_NUM;
+	}
+
 	if (wil->platform_ops.set_features) {
 		features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL,
 				     wil->fw_capabilities) &&
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 40cd32a..e2abe67 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -179,7 +179,7 @@
 	wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
 	/* default monitor channel */
 	ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels;
-	cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
+	cfg80211_chandef_create(&wil->monitor_chandef, ch, NL80211_CHAN_NO_HT);
 
 	ndev = alloc_netdev(0, ifname, NET_NAME_UNKNOWN, wil_dev_setup);
 	if (!ndev) {
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 370068a..025bdd3 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -211,6 +211,8 @@
 		.fw_recovery = wil_platform_rop_fw_recovery,
 	};
 	u32 bar_size = pci_resource_len(pdev, 0);
+	int dma_addr_size[] = {48, 40, 32}; /* keep descending order */
+	int i;
 
 	/* check HW */
 	dev_info(&pdev->dev, WIL_NAME
@@ -246,21 +248,23 @@
 	}
 	/* rollback to err_plat */
 
-	/* device supports 48bit addresses */
-	rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
-	if (rc) {
-		dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc);
-		rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	/* device supports >32bit addresses */
+	for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
+		rc = dma_set_mask_and_coherent(dev,
+					       DMA_BIT_MASK(dma_addr_size[i]));
 		if (rc) {
-			dev_err(dev,
-				"dma_set_mask_and_coherent(32) failed: %d\n",
-				rc);
-			goto err_plat;
+			dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
+				dma_addr_size[i], rc);
+			continue;
 		}
-	} else {
-		wil->use_extended_dma_addr = 1;
+		dev_info(dev, "using dma mask %d", dma_addr_size[i]);
+		wil->dma_addr_size = dma_addr_size[i];
+		break;
 	}
 
+	if (wil->dma_addr_size == 0)
+		goto err_plat;
+
 	rc = pci_enable_device(pdev);
 	if (rc && pdev->msi_enabled == 0) {
 		wil_err(wil,
@@ -393,6 +397,9 @@
 	int rc = 0;
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+	struct net_device *ndev = wil_to_ndev(wil);
+	bool keep_radio_on = ndev->flags & IFF_UP &&
+			     wil->keep_radio_on_during_sleep;
 
 	wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
 
@@ -400,16 +407,18 @@
 	if (rc)
 		goto out;
 
-	rc = wil_suspend(wil, is_runtime);
+	rc = wil_suspend(wil, is_runtime, keep_radio_on);
 	if (!rc) {
-		wil->suspend_stats.successful_suspends++;
-
-		/* If platform device supports keep_radio_on_during_sleep
-		 * it will control PCIe master
+		/* In case radio stays on, platform device will control
+		 * PCIe master
 		 */
-		if (!wil->keep_radio_on_during_sleep)
+		if (!keep_radio_on) {
 			/* disable bus mastering */
 			pci_clear_master(pdev);
+			wil->suspend_stats.r_off.successful_suspends++;
+		} else {
+			wil->suspend_stats.r_on.successful_suspends++;
+		}
 	}
 out:
 	return rc;
@@ -420,23 +429,32 @@
 	int rc = 0;
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+	struct net_device *ndev = wil_to_ndev(wil);
+	bool keep_radio_on = ndev->flags & IFF_UP &&
+			     wil->keep_radio_on_during_sleep;
 
 	wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
 
-	/* If platform device supports keep_radio_on_during_sleep it will
-	 * control PCIe master
+	/* In case radio stays on, platform device will control
+	 * PCIe master
 	 */
-	if (!wil->keep_radio_on_during_sleep)
+	if (!keep_radio_on)
 		/* allow master */
 		pci_set_master(pdev);
-	rc = wil_resume(wil, is_runtime);
+	rc = wil_resume(wil, is_runtime, keep_radio_on);
 	if (rc) {
 		wil_err(wil, "device failed to resume (%d)\n", rc);
-		wil->suspend_stats.failed_resumes++;
-		if (!wil->keep_radio_on_during_sleep)
+		if (!keep_radio_on) {
 			pci_clear_master(pdev);
+			wil->suspend_stats.r_off.failed_resumes++;
+		} else {
+			wil->suspend_stats.r_on.failed_resumes++;
+		}
 	} else {
-		wil->suspend_stats.successful_resumes++;
+		if (keep_radio_on)
+			wil->suspend_stats.r_on.successful_resumes++;
+		else
+			wil->suspend_stats.r_off.successful_resumes++;
 	}
 
 	return rc;
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 153c1cf..14533ed 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -186,7 +186,7 @@
 					break;
 				wil_err(wil,
 					"TO waiting for idle RX, suspend failed\n");
-				wil->suspend_stats.failed_suspends++;
+				wil->suspend_stats.r_on.failed_suspends++;
 				goto resume_after_fail;
 			}
 			wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n");
@@ -202,7 +202,7 @@
 	 */
 	if (!wil_is_wmi_idle(wil)) {
 		wil_err(wil, "suspend failed due to pending WMI events\n");
-		wil->suspend_stats.failed_suspends++;
+		wil->suspend_stats.r_on.failed_suspends++;
 		goto resume_after_fail;
 	}
 
@@ -216,7 +216,7 @@
 		if (rc) {
 			wil_err(wil, "platform device failed to suspend (%d)\n",
 				rc);
-			wil->suspend_stats.failed_suspends++;
+			wil->suspend_stats.r_on.failed_suspends++;
 			wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
 			wil_unmask_irq(wil);
 			goto resume_after_fail;
@@ -272,6 +272,7 @@
 		rc = wil_down(wil);
 		if (rc) {
 			wil_err(wil, "wil_down : %d\n", rc);
+			wil->suspend_stats.r_off.failed_suspends++;
 			goto out;
 		}
 	}
@@ -284,6 +285,7 @@
 		rc = wil->platform_ops.suspend(wil->platform_handle, false);
 		if (rc) {
 			wil_enable_irq(wil);
+			wil->suspend_stats.r_off.failed_suspends++;
 			goto out;
 		}
 	}
@@ -317,12 +319,9 @@
 	return rc;
 }
 
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
 {
 	int rc = 0;
-	struct net_device *ndev = wil_to_ndev(wil);
-	bool keep_radio_on = ndev->flags & IFF_UP &&
-			     wil->keep_radio_on_during_sleep;
 
 	wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
 
@@ -339,19 +338,12 @@
 	wil_dbg_pm(wil, "suspend: %s => %d\n",
 		   is_runtime ? "runtime" : "system", rc);
 
-	if (!rc)
-		wil->suspend_stats.suspend_start_time = ktime_get();
-
 	return rc;
 }
 
-int wil_resume(struct wil6210_priv *wil, bool is_runtime)
+int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
 {
 	int rc = 0;
-	struct net_device *ndev = wil_to_ndev(wil);
-	bool keep_radio_on = ndev->flags & IFF_UP &&
-			     wil->keep_radio_on_during_sleep;
-	unsigned long long suspend_time_usec = 0;
 
 	wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
 
@@ -369,21 +361,9 @@
 	else
 		rc = wil_resume_radio_off(wil);
 
-	if (rc)
-		goto out;
-
-	suspend_time_usec =
-		ktime_to_us(ktime_sub(ktime_get(),
-				      wil->suspend_stats.suspend_start_time));
-	wil->suspend_stats.total_suspend_time += suspend_time_usec;
-	if (suspend_time_usec < wil->suspend_stats.min_suspend_time)
-		wil->suspend_stats.min_suspend_time = suspend_time_usec;
-	if (suspend_time_usec > wil->suspend_stats.max_suspend_time)
-		wil->suspend_stats.max_suspend_time = suspend_time_usec;
-
 out:
-	wil_dbg_pm(wil, "resume: %s => %d, suspend time %lld usec\n",
-		   is_runtime ? "runtime" : "system", rc, suspend_time_usec);
+	wil_dbg_pm(wil, "resume: %s => %d\n", is_runtime ? "runtime" : "system",
+		   rc);
 	return rc;
 }
 
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index 2e301b6..4ea27b0 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -111,14 +111,14 @@
 	 *
 	 * HW has limitation that all vrings addresses must share the same
 	 * upper 16 msb bits part of 48 bits address. To workaround that,
-	 * if we are using 48 bit addresses switch to 32 bit allocation
-	 * before allocating vring memory.
+	 * if we are using more than 32 bit addresses switch to 32 bit
+	 * allocation before allocating vring memory.
 	 *
 	 * There's no check for the return value of dma_set_mask_and_coherent,
 	 * since we assume if we were able to set the mask during
 	 * initialization in this system it will not fail if we set it again
 	 */
-	if (wil->use_extended_dma_addr)
+	if (wil->dma_addr_size > 32)
 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
 
 	pmc->pring_va = dma_alloc_coherent(dev,
@@ -126,8 +126,9 @@
 			&pmc->pring_pa,
 			GFP_KERNEL);
 
-	if (wil->use_extended_dma_addr)
-		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+	if (wil->dma_addr_size > 32)
+		dma_set_mask_and_coherent(dev,
+					  DMA_BIT_MASK(wil->dma_addr_size));
 
 	wil_dbg_misc(wil,
 		     "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 8fe2239..62d1d07 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -178,14 +178,14 @@
 	 *
 	 * HW has limitation that all vrings addresses must share the same
 	 * upper 16 msb bits part of 48 bits address. To workaround that,
-	 * if we are using 48 bit addresses switch to 32 bit allocation
-	 * before allocating vring memory.
+	 * if we are using more than 32 bit addresses switch to 32 bit
+	 * allocation before allocating vring memory.
 	 *
 	 * There's no check for the return value of dma_set_mask_and_coherent,
 	 * since we assume if we were able to set the mask during
 	 * initialization in this system it will not fail if we set it again
 	 */
-	if (wil->use_extended_dma_addr)
+	if (wil->dma_addr_size > 32)
 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
 
 	vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
@@ -195,8 +195,9 @@
 		return -ENOMEM;
 	}
 
-	if (wil->use_extended_dma_addr)
-		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+	if (wil->dma_addr_size > 32)
+		dma_set_mask_and_coherent(dev,
+					  DMA_BIT_MASK(wil->dma_addr_size));
 
 	/* initially, all descriptors are SW owned
 	 * For Tx and Rx, ownership bit is at the same location, thus
@@ -347,7 +348,6 @@
 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
 				       struct sk_buff *skb)
 {
-	struct wireless_dev *wdev = wil->wdev;
 	struct wil6210_rtap {
 		struct ieee80211_radiotap_header rthdr;
 		/* fields should be in the order of bits in rthdr.it_present */
@@ -374,7 +374,7 @@
 	int rtap_len = sizeof(struct wil6210_rtap);
 	int phy_length = 0; /* phy info header size, bytes */
 	static char phy_data[128];
-	struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+	struct ieee80211_channel *ch = wil->monitor_chandef.chan;
 
 	if (rtap_include_phy_info) {
 		rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index bb43f3f..33df230 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -83,18 +83,18 @@
  */
 #define WIL_MAX_MPDU_OVERHEAD	(62)
 
-struct wil_suspend_stats {
+struct wil_suspend_count_stats {
 	unsigned long successful_suspends;
-	unsigned long failed_suspends;
 	unsigned long successful_resumes;
+	unsigned long failed_suspends;
 	unsigned long failed_resumes;
-	unsigned long rejected_by_device;
+};
+
+struct wil_suspend_stats {
+	struct wil_suspend_count_stats r_off;
+	struct wil_suspend_count_stats r_on;
+	unsigned long rejected_by_device; /* only radio on */
 	unsigned long rejected_by_host;
-	unsigned long long total_suspend_time;
-	unsigned long long min_suspend_time;
-	unsigned long long max_suspend_time;
-	ktime_t collection_start;
-	ktime_t suspend_start_time;
 };
 
 /* Calculate MAC buffer size for the firmware. It includes all overhead,
@@ -440,7 +440,7 @@
 	wil_status_fwconnected,
 	wil_status_dontscan,
 	wil_status_mbox_ready, /* MBOX structures ready */
-	wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+	wil_status_irqen, /* interrupts enabled - for debug */
 	wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
 	wil_status_resetting, /* reset in progress */
 	wil_status_suspending, /* suspend in progress */
@@ -657,6 +657,7 @@
 	unsigned long last_fw_recovery; /* jiffies of last fw recovery */
 	wait_queue_head_t wq; /* for all wait_event() use */
 	/* profile */
+	struct cfg80211_chan_def monitor_chandef;
 	u32 monitor_flags;
 	u32 privacy; /* secure connection? */
 	u8 hidden_ssid; /* relevant in AP mode */
@@ -712,7 +713,7 @@
 	struct wil_sta_info sta[WIL6210_MAX_CID];
 	int bcast_vring;
 	u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once  */
-	bool use_extended_dma_addr; /* indicates whether we are using 48 bits */
+	u32 dma_addr_size; /* indicates dma addr size */
 	/* scan */
 	struct cfg80211_scan_request *scan_request;
 
@@ -1041,8 +1042,8 @@
 void wil_pm_runtime_put(struct wil6210_priv *wil);
 
 int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
-int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on);
+int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on);
 bool wil_is_wmi_idle(struct wil6210_priv *wil);
 int wmi_resume(struct wil6210_priv *wil);
 int wmi_suspend(struct wil6210_priv *wil);
@@ -1076,4 +1077,9 @@
 				bool fst_link_loss);
 
 int wmi_set_snr_thresh(struct wil6210_priv *wil, short omni, short direct);
+
+int wmi_start_sched_scan(struct wil6210_priv *wil,
+			 struct cfg80211_sched_scan_request *request);
+int wmi_stop_sched_scan(struct wil6210_priv *wil);
+
 #endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 9520c39..07659b12 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -39,6 +39,7 @@
 		 " 60G device led enablement. Set the led ID (0-2) to enable");
 
 #define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
+#define WIL_WMI_CALL_GENERAL_TO_MS 100
 
 /**
  * WMI event receiving - theory of operations
@@ -208,6 +209,242 @@
 	return 0;
 }
 
+static const char *cmdid2name(u16 cmdid)
+{
+	switch (cmdid) {
+	case WMI_NOTIFY_REQ_CMDID:
+		return "WMI_NOTIFY_REQ_CMD";
+	case WMI_START_SCAN_CMDID:
+		return "WMI_START_SCAN_CMD";
+	case WMI_CONNECT_CMDID:
+		return "WMI_CONNECT_CMD";
+	case WMI_DISCONNECT_CMDID:
+		return "WMI_DISCONNECT_CMD";
+	case WMI_SW_TX_REQ_CMDID:
+		return "WMI_SW_TX_REQ_CMD";
+	case WMI_GET_RF_SECTOR_PARAMS_CMDID:
+		return "WMI_GET_RF_SECTOR_PARAMS_CMD";
+	case WMI_SET_RF_SECTOR_PARAMS_CMDID:
+		return "WMI_SET_RF_SECTOR_PARAMS_CMD";
+	case WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID:
+		return "WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD";
+	case WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID:
+		return "WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD";
+	case WMI_BRP_SET_ANT_LIMIT_CMDID:
+		return "WMI_BRP_SET_ANT_LIMIT_CMD";
+	case WMI_TOF_SESSION_START_CMDID:
+		return "WMI_TOF_SESSION_START_CMD";
+	case WMI_AOA_MEAS_CMDID:
+		return "WMI_AOA_MEAS_CMD";
+	case WMI_PMC_CMDID:
+		return "WMI_PMC_CMD";
+	case WMI_TOF_GET_TX_RX_OFFSET_CMDID:
+		return "WMI_TOF_GET_TX_RX_OFFSET_CMD";
+	case WMI_TOF_SET_TX_RX_OFFSET_CMDID:
+		return "WMI_TOF_SET_TX_RX_OFFSET_CMD";
+	case WMI_VRING_CFG_CMDID:
+		return "WMI_VRING_CFG_CMD";
+	case WMI_BCAST_VRING_CFG_CMDID:
+		return "WMI_BCAST_VRING_CFG_CMD";
+	case WMI_TRAFFIC_SUSPEND_CMDID:
+		return "WMI_TRAFFIC_SUSPEND_CMD";
+	case WMI_TRAFFIC_RESUME_CMDID:
+		return "WMI_TRAFFIC_RESUME_CMD";
+	case WMI_ECHO_CMDID:
+		return "WMI_ECHO_CMD";
+	case WMI_SET_MAC_ADDRESS_CMDID:
+		return "WMI_SET_MAC_ADDRESS_CMD";
+	case WMI_LED_CFG_CMDID:
+		return "WMI_LED_CFG_CMD";
+	case WMI_PCP_START_CMDID:
+		return "WMI_PCP_START_CMD";
+	case WMI_PCP_STOP_CMDID:
+		return "WMI_PCP_STOP_CMD";
+	case WMI_SET_SSID_CMDID:
+		return "WMI_SET_SSID_CMD";
+	case WMI_GET_SSID_CMDID:
+		return "WMI_GET_SSID_CMD";
+	case WMI_SET_PCP_CHANNEL_CMDID:
+		return "WMI_SET_PCP_CHANNEL_CMD";
+	case WMI_GET_PCP_CHANNEL_CMDID:
+		return "WMI_GET_PCP_CHANNEL_CMD";
+	case WMI_P2P_CFG_CMDID:
+		return "WMI_P2P_CFG_CMD";
+	case WMI_START_LISTEN_CMDID:
+		return "WMI_START_LISTEN_CMD";
+	case WMI_START_SEARCH_CMDID:
+		return "WMI_START_SEARCH_CMD";
+	case WMI_DISCOVERY_STOP_CMDID:
+		return "WMI_DISCOVERY_STOP_CMD";
+	case WMI_DELETE_CIPHER_KEY_CMDID:
+		return "WMI_DELETE_CIPHER_KEY_CMD";
+	case WMI_ADD_CIPHER_KEY_CMDID:
+		return "WMI_ADD_CIPHER_KEY_CMD";
+	case WMI_SET_APPIE_CMDID:
+		return "WMI_SET_APPIE_CMD";
+	case WMI_CFG_RX_CHAIN_CMDID:
+		return "WMI_CFG_RX_CHAIN_CMD";
+	case WMI_TEMP_SENSE_CMDID:
+		return "WMI_TEMP_SENSE_CMD";
+	case WMI_DEL_STA_CMDID:
+		return "WMI_DEL_STA_CMD";
+	case WMI_DISCONNECT_STA_CMDID:
+		return "WMI_DISCONNECT_STA_CMD";
+	case WMI_VRING_BA_EN_CMDID:
+		return "WMI_VRING_BA_EN_CMD";
+	case WMI_VRING_BA_DIS_CMDID:
+		return "WMI_VRING_BA_DIS_CMD";
+	case WMI_RCP_DELBA_CMDID:
+		return "WMI_RCP_DELBA_CMD";
+	case WMI_RCP_ADDBA_RESP_CMDID:
+		return "WMI_RCP_ADDBA_RESP_CMD";
+	case WMI_PS_DEV_PROFILE_CFG_CMDID:
+		return "WMI_PS_DEV_PROFILE_CFG_CMD";
+	case WMI_SET_MGMT_RETRY_LIMIT_CMDID:
+		return "WMI_SET_MGMT_RETRY_LIMIT_CMD";
+	case WMI_GET_MGMT_RETRY_LIMIT_CMDID:
+		return "WMI_GET_MGMT_RETRY_LIMIT_CMD";
+	case WMI_ABORT_SCAN_CMDID:
+		return "WMI_ABORT_SCAN_CMD";
+	case WMI_NEW_STA_CMDID:
+		return "WMI_NEW_STA_CMD";
+	case WMI_SET_THERMAL_THROTTLING_CFG_CMDID:
+		return "WMI_SET_THERMAL_THROTTLING_CFG_CMD";
+	case WMI_GET_THERMAL_THROTTLING_CFG_CMDID:
+		return "WMI_GET_THERMAL_THROTTLING_CFG_CMD";
+	case WMI_LINK_MAINTAIN_CFG_WRITE_CMDID:
+		return "WMI_LINK_MAINTAIN_CFG_WRITE_CMD";
+	case WMI_LO_POWER_CALIB_FROM_OTP_CMDID:
+		return "WMI_LO_POWER_CALIB_FROM_OTP_CMD";
+	case WMI_START_SCHED_SCAN_CMDID:
+		return "WMI_START_SCHED_SCAN_CMD";
+	case WMI_STOP_SCHED_SCAN_CMDID:
+		return "WMI_STOP_SCHED_SCAN_CMD";
+	default:
+		return "Untracked CMD";
+	}
+}
+
+static const char *eventid2name(u16 eventid)
+{
+	switch (eventid) {
+	case WMI_NOTIFY_REQ_DONE_EVENTID:
+		return "WMI_NOTIFY_REQ_DONE_EVENT";
+	case WMI_DISCONNECT_EVENTID:
+		return "WMI_DISCONNECT_EVENT";
+	case WMI_SW_TX_COMPLETE_EVENTID:
+		return "WMI_SW_TX_COMPLETE_EVENT";
+	case WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID:
+		return "WMI_GET_RF_SECTOR_PARAMS_DONE_EVENT";
+	case WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID:
+		return "WMI_SET_RF_SECTOR_PARAMS_DONE_EVENT";
+	case WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
+		return "WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
+	case WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
+		return "WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
+	case WMI_BRP_SET_ANT_LIMIT_EVENTID:
+		return "WMI_BRP_SET_ANT_LIMIT_EVENT";
+	case WMI_FW_READY_EVENTID:
+		return "WMI_FW_READY_EVENT";
+	case WMI_TRAFFIC_RESUME_EVENTID:
+		return "WMI_TRAFFIC_RESUME_EVENT";
+	case WMI_TOF_GET_TX_RX_OFFSET_EVENTID:
+		return "WMI_TOF_GET_TX_RX_OFFSET_EVENT";
+	case WMI_TOF_SET_TX_RX_OFFSET_EVENTID:
+		return "WMI_TOF_SET_TX_RX_OFFSET_EVENT";
+	case WMI_VRING_CFG_DONE_EVENTID:
+		return "WMI_VRING_CFG_DONE_EVENT";
+	case WMI_READY_EVENTID:
+		return "WMI_READY_EVENT";
+	case WMI_RX_MGMT_PACKET_EVENTID:
+		return "WMI_RX_MGMT_PACKET_EVENT";
+	case WMI_TX_MGMT_PACKET_EVENTID:
+		return "WMI_TX_MGMT_PACKET_EVENT";
+	case WMI_SCAN_COMPLETE_EVENTID:
+		return "WMI_SCAN_COMPLETE_EVENT";
+	case WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID:
+		return "WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT";
+	case WMI_CONNECT_EVENTID:
+		return "WMI_CONNECT_EVENT";
+	case WMI_EAPOL_RX_EVENTID:
+		return "WMI_EAPOL_RX_EVENT";
+	case WMI_BA_STATUS_EVENTID:
+		return "WMI_BA_STATUS_EVENT";
+	case WMI_RCP_ADDBA_REQ_EVENTID:
+		return "WMI_RCP_ADDBA_REQ_EVENT";
+	case WMI_DELBA_EVENTID:
+		return "WMI_DELBA_EVENT";
+	case WMI_VRING_EN_EVENTID:
+		return "WMI_VRING_EN_EVENT";
+	case WMI_DATA_PORT_OPEN_EVENTID:
+		return "WMI_DATA_PORT_OPEN_EVENT";
+	case WMI_AOA_MEAS_EVENTID:
+		return "WMI_AOA_MEAS_EVENT";
+	case WMI_TOF_SESSION_END_EVENTID:
+		return "WMI_TOF_SESSION_END_EVENT";
+	case WMI_TOF_GET_CAPABILITIES_EVENTID:
+		return "WMI_TOF_GET_CAPABILITIES_EVENT";
+	case WMI_TOF_SET_LCR_EVENTID:
+		return "WMI_TOF_SET_LCR_EVENT";
+	case WMI_TOF_SET_LCI_EVENTID:
+		return "WMI_TOF_SET_LCI_EVENT";
+	case WMI_TOF_FTM_PER_DEST_RES_EVENTID:
+		return "WMI_TOF_FTM_PER_DEST_RES_EVENT";
+	case WMI_TOF_CHANNEL_INFO_EVENTID:
+		return "WMI_TOF_CHANNEL_INFO_EVENT";
+	case WMI_TRAFFIC_SUSPEND_EVENTID:
+		return "WMI_TRAFFIC_SUSPEND_EVENT";
+	case WMI_ECHO_RSP_EVENTID:
+		return "WMI_ECHO_RSP_EVENT";
+	case WMI_LED_CFG_DONE_EVENTID:
+		return "WMI_LED_CFG_DONE_EVENT";
+	case WMI_PCP_STARTED_EVENTID:
+		return "WMI_PCP_STARTED_EVENT";
+	case WMI_PCP_STOPPED_EVENTID:
+		return "WMI_PCP_STOPPED_EVENT";
+	case WMI_GET_SSID_EVENTID:
+		return "WMI_GET_SSID_EVENT";
+	case WMI_GET_PCP_CHANNEL_EVENTID:
+		return "WMI_GET_PCP_CHANNEL_EVENT";
+	case WMI_P2P_CFG_DONE_EVENTID:
+		return "WMI_P2P_CFG_DONE_EVENT";
+	case WMI_LISTEN_STARTED_EVENTID:
+		return "WMI_LISTEN_STARTED_EVENT";
+	case WMI_SEARCH_STARTED_EVENTID:
+		return "WMI_SEARCH_STARTED_EVENT";
+	case WMI_DISCOVERY_STOPPED_EVENTID:
+		return "WMI_DISCOVERY_STOPPED_EVENT";
+	case WMI_CFG_RX_CHAIN_DONE_EVENTID:
+		return "WMI_CFG_RX_CHAIN_DONE_EVENT";
+	case WMI_TEMP_SENSE_DONE_EVENTID:
+		return "WMI_TEMP_SENSE_DONE_EVENT";
+	case WMI_RCP_ADDBA_RESP_SENT_EVENTID:
+		return "WMI_RCP_ADDBA_RESP_SENT_EVENT";
+	case WMI_PS_DEV_PROFILE_CFG_EVENTID:
+		return "WMI_PS_DEV_PROFILE_CFG_EVENT";
+	case WMI_SET_MGMT_RETRY_LIMIT_EVENTID:
+		return "WMI_SET_MGMT_RETRY_LIMIT_EVENT";
+	case WMI_GET_MGMT_RETRY_LIMIT_EVENTID:
+		return "WMI_GET_MGMT_RETRY_LIMIT_EVENT";
+	case WMI_SET_THERMAL_THROTTLING_CFG_EVENTID:
+		return "WMI_SET_THERMAL_THROTTLING_CFG_EVENT";
+	case WMI_GET_THERMAL_THROTTLING_CFG_EVENTID:
+		return "WMI_GET_THERMAL_THROTTLING_CFG_EVENT";
+	case WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID:
+		return "WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENT";
+	case WMI_LO_POWER_CALIB_FROM_OTP_EVENTID:
+		return "WMI_LO_POWER_CALIB_FROM_OTP_EVENT";
+	case WMI_START_SCHED_SCAN_EVENTID:
+		return "WMI_START_SCHED_SCAN_EVENT";
+	case WMI_STOP_SCHED_SCAN_EVENTID:
+		return "WMI_STOP_SCHED_SCAN_EVENT";
+	case WMI_SCHED_SCAN_RESULT_EVENTID:
+		return "WMI_SCHED_SCAN_RESULT_EVENT";
+	default:
+		return "Untracked EVENT";
+	}
+}
+
 static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
 {
 	struct {
@@ -304,7 +541,8 @@
 	}
 	cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
 	/* set command */
-	wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
+	wil_dbg_wmi(wil, "sending %s (0x%04x) [%d]\n",
+		    cmdid2name(cmdid), cmdid, len);
 	wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
 			 sizeof(cmd), true);
 	wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
@@ -581,8 +819,6 @@
 		}
 	}
 
-	/* FIXME FW can transmit only ucast frames to peer */
-	/* FIXME real ring_id instead of hard coded 0 */
 	ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
 	wil->sta[evt->cid].status = wil_sta_conn_pending;
 
@@ -869,6 +1105,75 @@
 	wil_ftm_evt_per_dest_res(wil, evt);
 }
 
+static void
+wmi_evt_sched_scan_result(struct wil6210_priv *wil, int id, void *d, int len)
+{
+	struct wmi_sched_scan_result_event *data = d;
+	struct wiphy *wiphy = wil_to_wiphy(wil);
+	struct ieee80211_mgmt *rx_mgmt_frame =
+		(struct ieee80211_mgmt *)data->payload;
+	int flen = len - offsetof(struct wmi_sched_scan_result_event, payload);
+	int ch_no;
+	u32 freq;
+	struct ieee80211_channel *channel;
+	s32 signal;
+	__le16 fc;
+	u32 d_len;
+	struct cfg80211_bss *bss;
+
+	if (flen < 0) {
+		wil_err(wil, "sched scan result event too short, len %d\n",
+			len);
+		return;
+	}
+
+	d_len = le32_to_cpu(data->info.len);
+	if (d_len != flen) {
+		wil_err(wil,
+			"sched scan result length mismatch, d_len %d should be %d\n",
+			d_len, flen);
+		return;
+	}
+
+	fc = rx_mgmt_frame->frame_control;
+	if (!ieee80211_is_probe_resp(fc)) {
+		wil_err(wil, "sched scan result invalid frame, fc 0x%04x\n",
+			fc);
+		return;
+	}
+
+	ch_no = data->info.channel + 1;
+	freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
+	channel = ieee80211_get_channel(wiphy, freq);
+	if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
+		signal = 100 * data->info.rssi;
+	else
+		signal = data->info.sqi;
+
+	wil_dbg_wmi(wil, "sched scan result: channel %d MCS %d RSSI %d\n",
+		    data->info.channel, data->info.mcs, data->info.rssi);
+	wil_dbg_wmi(wil, "len %d qid %d mid %d cid %d\n",
+		    d_len, data->info.qid, data->info.mid, data->info.cid);
+	wil_hex_dump_wmi("PROBE ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
+			 d_len, true);
+
+	if (!channel) {
+		wil_err(wil, "Frame on unsupported channel\n");
+		return;
+	}
+
+	bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
+					d_len, signal, GFP_KERNEL);
+	if (bss) {
+		wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid);
+		cfg80211_put_bss(wiphy, bss);
+	} else {
+		wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
+	}
+
+	cfg80211_sched_scan_results(wiphy);
+}
+
 /**
  * Some events are ignored for purpose; and need not be interpreted as
  * "unhandled events"
@@ -903,6 +1208,7 @@
 	{WMI_TOF_SET_LCI_EVENTID,		wmi_evt_ignore},
 	{WMI_TOF_FTM_PER_DEST_RES_EVENTID,	wmi_evt_per_dest_res},
 	{WMI_TOF_CHANNEL_INFO_EVENTID,		wmi_evt_ignore},
+	{WMI_SCHED_SCAN_RESULT_EVENTID,		wmi_evt_sched_scan_result},
 };
 
 /*
@@ -1009,8 +1315,8 @@
 			}
 			spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
 
-			wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n",
-				    id, wmi->mid, tstamp);
+			wil_dbg_wmi(wil, "recv %s (0x%04x) MID %d @%d msec\n",
+				    eventid2name(id), id, wmi->mid, tstamp);
 			trace_wil6210_wmi_event(wmi, &wmi[1],
 						len - sizeof(*wmi));
 		}
@@ -1513,7 +1819,7 @@
 	int rc;
 
 	if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
-		struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+		struct ieee80211_channel *ch = wil->monitor_chandef.chan;
 
 		cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
 		if (ch)
@@ -1969,6 +2275,16 @@
 	return rc;
 }
 
+static const char *suspend_status2name(u8 status)
+{
+	switch (status) {
+	case WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE:
+		return "LINK_NOT_IDLE";
+	default:
+		return "Untracked status";
+	}
+}
+
 int wmi_suspend(struct wil6210_priv *wil)
 {
 	int rc;
@@ -1984,7 +2300,7 @@
 	wil->suspend_resp_rcvd = false;
 	wil->suspend_resp_comp = false;
 
-	reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED;
+	reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE;
 
 	rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, &cmd, sizeof(cmd),
 		      WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply),
@@ -2016,8 +2332,9 @@
 	}
 
 	wil_dbg_wmi(wil, "suspend_response_completed rcvd\n");
-	if (reply.evt.status == WMI_TRAFFIC_SUSPEND_REJECTED) {
-		wil_dbg_pm(wil, "device rejected the suspend\n");
+	if (reply.evt.status != WMI_TRAFFIC_SUSPEND_APPROVED) {
+		wil_dbg_pm(wil, "device rejected the suspend, %s\n",
+			   suspend_status2name(reply.evt.status));
 		wil->suspend_stats.rejected_by_device++;
 	}
 	rc = reply.evt.status;
@@ -2029,21 +2346,50 @@
 	return rc;
 }
 
+static void resume_triggers2string(u32 triggers, char *string, int str_size)
+{
+	string[0] = '\0';
+
+	if (!triggers) {
+		strlcat(string, " UNKNOWN", str_size);
+		return;
+	}
+
+	if (triggers & WMI_RESUME_TRIGGER_HOST)
+		strlcat(string, " HOST", str_size);
+
+	if (triggers & WMI_RESUME_TRIGGER_UCAST_RX)
+		strlcat(string, " UCAST_RX", str_size);
+
+	if (triggers & WMI_RESUME_TRIGGER_BCAST_RX)
+		strlcat(string, " BCAST_RX", str_size);
+
+	if (triggers & WMI_RESUME_TRIGGER_WMI_EVT)
+		strlcat(string, " WMI_EVT", str_size);
+}
+
 int wmi_resume(struct wil6210_priv *wil)
 {
 	int rc;
+	char string[100];
 	struct {
 		struct wmi_cmd_hdr wmi;
 		struct wmi_traffic_resume_event evt;
 	} __packed reply;
 
 	reply.evt.status = WMI_TRAFFIC_RESUME_FAILED;
+	reply.evt.resume_triggers = WMI_RESUME_TRIGGER_UNKNOWN;
 
 	rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, NULL, 0,
 		      WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply),
 		      WIL_WAIT_FOR_SUSPEND_RESUME_COMP);
 	if (rc)
 		return rc;
+	resume_triggers2string(le32_to_cpu(reply.evt.resume_triggers), string,
+			       sizeof(string));
+	wil_dbg_pm(wil, "device resume %s, resume triggers:%s (0x%x)\n",
+		   reply.evt.status ? "failed" : "passed", string,
+		   le32_to_cpu(reply.evt.resume_triggers));
 
 	return reply.evt.status;
 }
@@ -2074,8 +2420,8 @@
 		void *evt_data = (void *)(&wmi[1]);
 		u16 id = le16_to_cpu(wmi->command_id);
 
-		wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n",
-			    id, wil->reply_id);
+		wil_dbg_wmi(wil, "Handle %s (0x%04x) (reply_id 0x%04x)\n",
+			    eventid2name(id), id, wil->reply_id);
 		/* check if someone waits for this event */
 		if (wil->reply_id && wil->reply_id == id) {
 			WARN_ON(wil->reply_buf);
@@ -2199,3 +2545,159 @@
 
 	return 0;
 }
+
+static void
+wmi_sched_scan_set_ssids(struct wil6210_priv *wil,
+			 struct wmi_start_sched_scan_cmd *cmd,
+			 struct cfg80211_ssid *ssids, int n_ssids,
+			 struct cfg80211_match_set *match_sets,
+			 int n_match_sets)
+{
+	int i;
+
+	if (n_match_sets > WMI_MAX_PNO_SSID_NUM) {
+		wil_dbg_wmi(wil, "too many match sets (%d), use first %d\n",
+			    n_match_sets, WMI_MAX_PNO_SSID_NUM);
+		n_match_sets = WMI_MAX_PNO_SSID_NUM;
+	}
+	cmd->num_of_ssids = n_match_sets;
+
+	for (i = 0; i < n_match_sets; i++) {
+		struct wmi_sched_scan_ssid_match *wmi_match =
+			&cmd->ssid_for_match[i];
+		struct cfg80211_match_set *cfg_match = &match_sets[i];
+		int j;
+
+		wmi_match->ssid_len = cfg_match->ssid.ssid_len;
+		memcpy(wmi_match->ssid, cfg_match->ssid.ssid,
+		       min_t(u8, wmi_match->ssid_len, WMI_MAX_SSID_LEN));
+		wmi_match->rssi_threshold = S8_MIN;
+		if (cfg_match->rssi_thold >= S8_MIN &&
+		    cfg_match->rssi_thold <= S8_MAX)
+			wmi_match->rssi_threshold = cfg_match->rssi_thold;
+
+		for (j = 0; j < n_ssids; j++)
+			if (wmi_match->ssid_len == ssids[j].ssid_len &&
+			    memcmp(wmi_match->ssid, ssids[j].ssid,
+				   wmi_match->ssid_len) == 0)
+				wmi_match->add_ssid_to_probe = true;
+	}
+}
+
+static void
+wmi_sched_scan_set_channels(struct wil6210_priv *wil,
+			    struct wmi_start_sched_scan_cmd *cmd,
+			    u32 n_channels,
+			    struct ieee80211_channel **channels)
+{
+	int i;
+
+	if (n_channels > WMI_MAX_CHANNEL_NUM) {
+		wil_dbg_wmi(wil, "too many channels (%d), use first %d\n",
+			    n_channels, WMI_MAX_CHANNEL_NUM);
+		n_channels = WMI_MAX_CHANNEL_NUM;
+	}
+	cmd->num_of_channels = n_channels;
+
+	for (i = 0; i < n_channels; i++) {
+		struct ieee80211_channel *cfg_chan = channels[i];
+
+		cmd->channel_list[i] = cfg_chan->hw_value - 1;
+	}
+}
+
+static void
+wmi_sched_scan_set_plans(struct wil6210_priv *wil,
+			 struct wmi_start_sched_scan_cmd *cmd,
+			 struct cfg80211_sched_scan_plan *scan_plans,
+			 int n_scan_plans)
+{
+	int i;
+
+	if (n_scan_plans > WMI_MAX_PLANS_NUM) {
+		wil_dbg_wmi(wil, "too many plans (%d), use first %d\n",
+			    n_scan_plans, WMI_MAX_PLANS_NUM);
+		n_scan_plans = WMI_MAX_PLANS_NUM;
+	}
+
+	for (i = 0; i < n_scan_plans; i++) {
+		struct cfg80211_sched_scan_plan *cfg_plan = &scan_plans[i];
+
+		cmd->scan_plans[i].interval_sec =
+			cpu_to_le16(cfg_plan->interval);
+		cmd->scan_plans[i].num_of_iterations =
+			cpu_to_le16(cfg_plan->iterations);
+	}
+}
+
+int wmi_start_sched_scan(struct wil6210_priv *wil,
+			 struct cfg80211_sched_scan_request *request)
+{
+	int rc;
+	struct wmi_start_sched_scan_cmd cmd = {
+		.min_rssi_threshold = S8_MIN,
+		.initial_delay_sec = cpu_to_le16(request->delay),
+	};
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_start_sched_scan_event evt;
+	} __packed reply;
+
+	if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
+		return -ENOTSUPP;
+
+	if (request->min_rssi_thold >= S8_MIN &&
+	    request->min_rssi_thold <= S8_MAX)
+		cmd.min_rssi_threshold = request->min_rssi_thold;
+
+	wmi_sched_scan_set_ssids(wil, &cmd, request->ssids, request->n_ssids,
+				 request->match_sets, request->n_match_sets);
+	wmi_sched_scan_set_channels(wil, &cmd,
+				    request->n_channels, request->channels);
+	wmi_sched_scan_set_plans(wil, &cmd,
+				 request->scan_plans, request->n_scan_plans);
+
+	reply.evt.result = WMI_PNO_REJECT;
+
+	rc = wmi_call(wil, WMI_START_SCHED_SCAN_CMDID, &cmd, sizeof(cmd),
+		      WMI_START_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
+		      WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc)
+		return rc;
+
+	if (reply.evt.result != WMI_PNO_SUCCESS) {
+		wil_err(wil, "start sched scan failed, result %d\n",
+			reply.evt.result);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int wmi_stop_sched_scan(struct wil6210_priv *wil)
+{
+	int rc;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_stop_sched_scan_event evt;
+	} __packed reply;
+
+	if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
+		return -ENOTSUPP;
+
+	reply.evt.result = WMI_PNO_REJECT;
+
+	rc = wmi_call(wil, WMI_STOP_SCHED_SCAN_CMDID, NULL, 0,
+		      WMI_STOP_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
+		      WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc)
+		return rc;
+
+	if (reply.evt.result != WMI_PNO_SUCCESS) {
+		wil_err(wil, "stop sched scan failed, result %d\n",
+			reply.evt.result);
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 809e320..28568dc 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -71,6 +71,7 @@
 	WMI_FW_CAPABILITY_RSSI_REPORTING		= 12,
 	WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE		= 13,
 	WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP	= 14,
+	WMI_FW_CAPABILITY_PNO				= 15,
 	WMI_FW_CAPABILITY_CONNECT_SNR_THR		= 16,
 	WMI_FW_CAPABILITY_REF_CLOCK_CONTROL		= 18,
 	WMI_FW_CAPABILITY_MAX,
@@ -89,6 +90,8 @@
 	WMI_CONNECT_CMDID				= 0x01,
 	WMI_DISCONNECT_CMDID				= 0x03,
 	WMI_DISCONNECT_STA_CMDID			= 0x04,
+	WMI_START_SCHED_SCAN_CMDID			= 0x05,
+	WMI_STOP_SCHED_SCAN_CMDID			= 0x06,
 	WMI_START_SCAN_CMDID				= 0x07,
 	WMI_SET_BSS_FILTER_CMDID			= 0x09,
 	WMI_SET_PROBED_SSID_CMDID			= 0x0A,
@@ -387,6 +390,38 @@
 	} channel_list[0];
 } __packed;
 
+#define WMI_MAX_PNO_SSID_NUM	(16)
+#define WMI_MAX_CHANNEL_NUM	(6)
+#define WMI_MAX_PLANS_NUM	(2)
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_sched_scan_ssid_match {
+	u8 ssid_len;
+	u8 ssid[WMI_MAX_SSID_LEN];
+	s8 rssi_threshold;
+	/* boolean */
+	u8 add_ssid_to_probe;
+	u8 reserved;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_sched_scan_plan {
+	__le16 interval_sec;
+	__le16 num_of_iterations;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_start_sched_scan_cmd {
+	struct wmi_sched_scan_ssid_match ssid_for_match[WMI_MAX_PNO_SSID_NUM];
+	u8 num_of_ssids;
+	s8 min_rssi_threshold;
+	u8 channel_list[WMI_MAX_CHANNEL_NUM];
+	u8 num_of_channels;
+	u8 reserved;
+	__le16 initial_delay_sec;
+	struct wmi_sched_scan_plan scan_plans[WMI_MAX_PLANS_NUM];
+} __packed;
+
 /* WMI_SET_PROBED_SSID_CMDID */
 #define MAX_PROBED_SSID_INDEX	(3)
 
@@ -1240,6 +1275,9 @@
 	WMI_READY_EVENTID				= 0x1001,
 	WMI_CONNECT_EVENTID				= 0x1002,
 	WMI_DISCONNECT_EVENTID				= 0x1003,
+	WMI_START_SCHED_SCAN_EVENTID			= 0x1005,
+	WMI_STOP_SCHED_SCAN_EVENTID			= 0x1006,
+	WMI_SCHED_SCAN_RESULT_EVENTID			= 0x1007,
 	WMI_SCAN_COMPLETE_EVENTID			= 0x100A,
 	WMI_REPORT_STATISTICS_EVENTID			= 0x100B,
 	WMI_RD_MEM_RSP_EVENTID				= 0x1800,
@@ -1602,6 +1640,49 @@
 	__le32 status;
 } __packed;
 
+/* wmi_rx_mgmt_info */
+struct wmi_rx_mgmt_info {
+	u8 mcs;
+	s8 rssi;
+	u8 range;
+	u8 sqi;
+	__le16 stype;
+	__le16 snr;
+	__le32 len;
+	/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
+	u8 qid;
+	/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
+	u8 mid;
+	u8 cid;
+	/* From Radio MNGR */
+	u8 channel;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_EVENTID */
+enum wmi_pno_result {
+	WMI_PNO_SUCCESS			= 0x00,
+	WMI_PNO_REJECT			= 0x01,
+	WMI_PNO_INVALID_PARAMETERS	= 0x02,
+	WMI_PNO_NOT_ENABLED		= 0x03,
+};
+
+struct wmi_start_sched_scan_event {
+	/* pno_result */
+	u8 result;
+	u8 reserved[3];
+} __packed;
+
+struct wmi_stop_sched_scan_event {
+	/* pno_result */
+	u8 result;
+	u8 reserved[3];
+} __packed;
+
+struct wmi_sched_scan_result_event {
+	struct wmi_rx_mgmt_info info;
+	u8 payload[0];
+} __packed;
+
 /* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */
 enum wmi_acs_info_bitmask {
 	WMI_ACS_INFO_BITMASK_BEACON_FOUND	= 0x01,
@@ -1816,24 +1897,6 @@
 	u8 ssid[WMI_MAX_SSID_LEN];
 } __packed;
 
-/* wmi_rx_mgmt_info */
-struct wmi_rx_mgmt_info {
-	u8 mcs;
-	s8 rssi;
-	u8 range;
-	u8 sqi;
-	__le16 stype;
-	__le16 snr;
-	__le32 len;
-	/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
-	u8 qid;
-	/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
-	u8 mid;
-	u8 cid;
-	/* From Radio MNGR */
-	u8 channel;
-} __packed;
-
 /* EVENT: WMI_RF_XPM_READ_RESULT_EVENTID */
 struct wmi_rf_xpm_read_result_event {
 	/* enum wmi_fw_status_e - success=0 or fail=1 */
@@ -2269,8 +2332,8 @@
 } __packed;
 
 enum wmi_traffic_suspend_status {
-	WMI_TRAFFIC_SUSPEND_APPROVED	= 0x0,
-	WMI_TRAFFIC_SUSPEND_REJECTED	= 0x1,
+	WMI_TRAFFIC_SUSPEND_APPROVED			= 0x0,
+	WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE	= 0x1,
 };
 
 /* WMI_TRAFFIC_SUSPEND_EVENTID */
@@ -2284,10 +2347,21 @@
 	WMI_TRAFFIC_RESUME_FAILED	= 0x1,
 };
 
+enum wmi_resume_trigger {
+	WMI_RESUME_TRIGGER_UNKNOWN	= 0x0,
+	WMI_RESUME_TRIGGER_HOST		= 0x1,
+	WMI_RESUME_TRIGGER_UCAST_RX	= 0x2,
+	WMI_RESUME_TRIGGER_BCAST_RX	= 0x4,
+	WMI_RESUME_TRIGGER_WMI_EVT	= 0x8,
+};
+
 /* WMI_TRAFFIC_RESUME_EVENTID */
 struct wmi_traffic_resume_event {
-	/* enum wmi_traffic_resume_status_e */
+	/* enum wmi_traffic_resume_status */
 	u8 status;
+	u8 reserved[3];
+	/* enum wmi_resume_trigger bitmap */
+	__le32 resume_triggers;
 } __packed;
 
 /* Power Save command completion status codes */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 8e3c6f4..edffe5a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -4080,8 +4080,8 @@
 	sdio_release_host(sdiodev->func[1]);
 fail:
 	brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
-	device_release_driver(dev);
 	device_release_driver(&sdiodev->func[2]->dev);
+	device_release_driver(dev);
 }
 
 struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
diff --git a/drivers/net/wireless/cnss2/Kconfig b/drivers/net/wireless/cnss2/Kconfig
new file mode 100644
index 0000000..daa343e
--- /dev/null
+++ b/drivers/net/wireless/cnss2/Kconfig
@@ -0,0 +1,40 @@
+config CNSS2
+	tristate "CNSS2 Platform Driver for Wi-Fi Module"
+	depends on !CNSS && PCI_MSM
+	---help---
+	  This module adds the support for Connectivity Subsystem (CNSS) used
+	  for PCIe based Wi-Fi devices with QCA6174/QCA6290 chipsets.
+	  This driver also adds support to integrate WLAN module to subsystem
+	  restart framework.
+
+config CNSS2_DEBUG
+	bool "CNSS2 Platform Driver Debug Support"
+	depends on CNSS2
+	---help---
+	  This option is to enable CNSS2 platform driver debug support which
+	  primarily includes providing additional verbose logs for certain
+	  features, enabling kernel panic for certain cases to aid the
+	  debugging, and enabling any other debug mechanisms.
+
+config CNSS_ASYNC
+	bool "Enable/disable CNSS platform driver asynchronous probe"
+	depends on CNSS2
+	---help---
+	  If enabled, CNSS platform driver would do asynchronous probe.
+	  Using asynchronous probe will allow CNSS platform driver to
+	  probe in parallel with other device drivers and will help to
+	  reduce kernel boot time.
+
+config BUS_AUTO_SUSPEND
+	bool "Enable/Disable Runtime PM support for PCIe based WLAN Drivers"
+	depends on CNSS2
+	depends on PCI
+	---help---
+	  Runtime Power Management is supported for PCIe based WLAN Drivers.
+	  The features enable cld wlan driver to suspend pcie bus when APPS
+	  is awake based on the driver inactivity with the Firmware.
+	  The Feature uses runtime power management framework from kernel to
+	  track bus access clients and to synchronize the driver activity
+	  during system pm.
+	  This config flag controls the feature per target based. The feature
+	  requires CNSS driver support.
diff --git a/drivers/net/wireless/cnss2/Makefile b/drivers/net/wireless/cnss2/Makefile
new file mode 100644
index 0000000..b49d089
--- /dev/null
+++ b/drivers/net/wireless/cnss2/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_CNSS2) += cnss2.o
+
+cnss2-y := main.o
+cnss2-y += debug.o
+cnss2-y += pci.o
+cnss2-y += power.o
+cnss2-y += qmi.o
+cnss2-y += wlan_firmware_service_v01.o
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
new file mode 100644
index 0000000..5e2d44c
--- /dev/null
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -0,0 +1,495 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include "main.h"
+#include "debug.h"
+#include "pci.h"
+
+#define CNSS_IPC_LOG_PAGES		32
+
+void *cnss_ipc_log_context;
+
+static int cnss_pin_connect_show(struct seq_file *s, void *data)
+{
+	struct cnss_plat_data *cnss_priv = s->private;
+
+	seq_puts(s, "Pin connect results\n");
+	seq_printf(s, "FW power pin result: %04x\n",
+		   cnss_priv->pin_result.fw_pwr_pin_result);
+	seq_printf(s, "FW PHY IO pin result: %04x\n",
+		   cnss_priv->pin_result.fw_phy_io_pin_result);
+	seq_printf(s, "FW RF pin result: %04x\n",
+		   cnss_priv->pin_result.fw_rf_pin_result);
+	seq_printf(s, "Host pin result: %04x\n",
+		   cnss_priv->pin_result.host_pin_result);
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+static int cnss_pin_connect_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, cnss_pin_connect_show, inode->i_private);
+}
+
+static const struct file_operations cnss_pin_connect_fops = {
+	.read		= seq_read,
+	.release	= single_release,
+	.open		= cnss_pin_connect_open,
+	.owner		= THIS_MODULE,
+	.llseek		= seq_lseek,
+};
+
+static int cnss_stats_show_state(struct seq_file *s,
+				 struct cnss_plat_data *plat_priv)
+{
+	enum cnss_driver_state i;
+	int skip = 0;
+	unsigned long state;
+
+	seq_printf(s, "\nState: 0x%lx(", plat_priv->driver_state);
+	for (i = 0, state = plat_priv->driver_state; state != 0;
+	     state >>= 1, i++) {
+		if (!(state & 0x1))
+			continue;
+
+		if (skip++)
+			seq_puts(s, " | ");
+
+		switch (i) {
+		case CNSS_QMI_WLFW_CONNECTED:
+			seq_puts(s, "QMI_WLFW_CONNECTED");
+			continue;
+		case CNSS_FW_MEM_READY:
+			seq_puts(s, "FW_MEM_READY");
+			continue;
+		case CNSS_FW_READY:
+			seq_puts(s, "FW_READY");
+			continue;
+		case CNSS_COLD_BOOT_CAL:
+			seq_puts(s, "COLD_BOOT_CAL");
+			continue;
+		case CNSS_DRIVER_LOADING:
+			seq_puts(s, "DRIVER_LOADING");
+			continue;
+		case CNSS_DRIVER_UNLOADING:
+			seq_puts(s, "DRIVER_UNLOADING");
+			continue;
+		case CNSS_DRIVER_PROBED:
+			seq_puts(s, "DRIVER_PROBED");
+			continue;
+		case CNSS_DRIVER_RECOVERY:
+			seq_puts(s, "DRIVER_RECOVERY");
+			continue;
+		case CNSS_FW_BOOT_RECOVERY:
+			seq_puts(s, "FW_BOOT_RECOVERY");
+			continue;
+		case CNSS_DEV_ERR_NOTIFY:
+			seq_puts(s, "DEV_ERR");
+			continue;
+		case CNSS_DRIVER_DEBUG:
+			seq_puts(s, "DRIVER_DEBUG");
+			continue;
+		}
+
+		seq_printf(s, "UNKNOWN-%d", i);
+	}
+	seq_puts(s, ")\n");
+
+	return 0;
+}
+
+static int cnss_stats_show(struct seq_file *s, void *data)
+{
+	struct cnss_plat_data *plat_priv = s->private;
+
+	cnss_stats_show_state(s, plat_priv);
+
+	return 0;
+}
+
+static int cnss_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, cnss_stats_show, inode->i_private);
+}
+
+static const struct file_operations cnss_stats_fops = {
+	.read		= seq_read,
+	.release	= single_release,
+	.open		= cnss_stats_open,
+	.owner		= THIS_MODULE,
+	.llseek		= seq_lseek,
+};
+
+static ssize_t cnss_dev_boot_debug_write(struct file *fp,
+					 const char __user *user_buf,
+					 size_t count, loff_t *off)
+{
+	struct cnss_plat_data *plat_priv =
+		((struct seq_file *)fp->private_data)->private;
+	char buf[64];
+	char *cmd;
+	unsigned int len = 0;
+	int ret = 0;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	cmd = buf;
+
+	if (sysfs_streq(cmd, "on")) {
+		ret = cnss_power_on_device(plat_priv);
+	} else if (sysfs_streq(cmd, "off")) {
+		cnss_power_off_device(plat_priv);
+	} else if (sysfs_streq(cmd, "enumerate")) {
+		ret = cnss_pci_init(plat_priv);
+	} else if (sysfs_streq(cmd, "download")) {
+		set_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state);
+		ret = cnss_pci_start_mhi(plat_priv->bus_priv);
+	} else if (sysfs_streq(cmd, "linkup")) {
+		ret = cnss_resume_pci_link(plat_priv->bus_priv);
+	} else if (sysfs_streq(cmd, "linkdown")) {
+		ret = cnss_suspend_pci_link(plat_priv->bus_priv);
+	} else if (sysfs_streq(cmd, "powerup")) {
+		set_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state);
+		ret = cnss_driver_event_post(plat_priv,
+					     CNSS_DRIVER_EVENT_POWER_UP,
+					     CNSS_EVENT_SYNC, NULL);
+	} else if (sysfs_streq(cmd, "shutdown")) {
+		ret = cnss_driver_event_post(plat_priv,
+					     CNSS_DRIVER_EVENT_POWER_DOWN,
+					     CNSS_EVENT_SYNC, NULL);
+		clear_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state);
+	} else {
+		cnss_pr_err("Device boot debugfs command is invalid\n");
+		ret = -EINVAL;
+	}
+
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static int cnss_dev_boot_debug_show(struct seq_file *s, void *data)
+{
+	seq_puts(s, "\nUsage: echo <action> > <debugfs_path>/cnss/dev_boot\n");
+	seq_puts(s, "<action> can be one of below:\n");
+	seq_puts(s, "on: turn on device power, assert WLAN_EN\n");
+	seq_puts(s, "off: de-assert WLAN_EN, turn off device power\n");
+	seq_puts(s, "enumerate: de-assert PERST, enumerate PCIe\n");
+	seq_puts(s, "download: download FW and do QMI handshake with FW\n");
+	seq_puts(s, "linkup: bring up PCIe link\n");
+	seq_puts(s, "linkdown: bring down PCIe link\n");
+	seq_puts(s, "powerup: full power on sequence to boot device, download FW and do QMI handshake with FW\n");
+	seq_puts(s, "shutdown: full power off sequence to shutdown device\n");
+
+	return 0;
+}
+
+static int cnss_dev_boot_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, cnss_dev_boot_debug_show, inode->i_private);
+}
+
+static const struct file_operations cnss_dev_boot_debug_fops = {
+	.read		= seq_read,
+	.write		= cnss_dev_boot_debug_write,
+	.release	= single_release,
+	.open		= cnss_dev_boot_debug_open,
+	.owner		= THIS_MODULE,
+	.llseek		= seq_lseek,
+};
+
+static int cnss_reg_read_debug_show(struct seq_file *s, void *data)
+{
+	struct cnss_plat_data *plat_priv = s->private;
+
+	mutex_lock(&plat_priv->dev_lock);
+	if (!plat_priv->diag_reg_read_buf) {
+		seq_puts(s, "\nUsage: echo <mem_type> <offset> <data_len> > <debugfs_path>/cnss/reg_read\n");
+		mutex_unlock(&plat_priv->dev_lock);
+		return 0;
+	}
+
+	seq_printf(s, "\nRegister read, address: 0x%x memory type: 0x%x length: 0x%x\n\n",
+		   plat_priv->diag_reg_read_addr,
+		   plat_priv->diag_reg_read_mem_type,
+		   plat_priv->diag_reg_read_len);
+
+	seq_hex_dump(s, "", DUMP_PREFIX_OFFSET, 32, 4,
+		     plat_priv->diag_reg_read_buf,
+		     plat_priv->diag_reg_read_len, false);
+
+	plat_priv->diag_reg_read_len = 0;
+	kfree(plat_priv->diag_reg_read_buf);
+	plat_priv->diag_reg_read_buf = NULL;
+	mutex_unlock(&plat_priv->dev_lock);
+
+	return 0;
+}
+
+static ssize_t cnss_reg_read_debug_write(struct file *fp,
+					 const char __user *user_buf,
+					 size_t count, loff_t *off)
+{
+	struct cnss_plat_data *plat_priv =
+		((struct seq_file *)fp->private_data)->private;
+	char buf[64];
+	char *sptr, *token;
+	unsigned int len = 0;
+	u32 reg_offset, mem_type;
+	u32 data_len = 0;
+	u8 *reg_buf = NULL;
+	const char *delim = " ";
+	int ret = 0;
+
+	if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+		cnss_pr_err("Firmware is not ready yet\n");
+		return -EINVAL;
+	}
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	sptr = buf;
+
+	token = strsep(&sptr, delim);
+	if (!token)
+		return -EINVAL;
+
+	if (!sptr)
+		return -EINVAL;
+
+	if (kstrtou32(token, 0, &mem_type))
+		return -EINVAL;
+
+	token = strsep(&sptr, delim);
+	if (!token)
+		return -EINVAL;
+
+	if (!sptr)
+		return -EINVAL;
+
+	if (kstrtou32(token, 0, &reg_offset))
+		return -EINVAL;
+
+	token = strsep(&sptr, delim);
+	if (!token)
+		return -EINVAL;
+
+	if (kstrtou32(token, 0, &data_len))
+		return -EINVAL;
+
+	if (data_len == 0 ||
+	    data_len > QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01)
+		return -EINVAL;
+
+	mutex_lock(&plat_priv->dev_lock);
+	kfree(plat_priv->diag_reg_read_buf);
+	plat_priv->diag_reg_read_buf = NULL;
+
+	reg_buf = kzalloc(data_len, GFP_KERNEL);
+	if (!reg_buf) {
+		mutex_unlock(&plat_priv->dev_lock);
+		return -ENOMEM;
+	}
+
+	ret = cnss_wlfw_athdiag_read_send_sync(plat_priv, reg_offset,
+					       mem_type, data_len,
+					       reg_buf);
+	if (ret) {
+		kfree(reg_buf);
+		mutex_unlock(&plat_priv->dev_lock);
+		return ret;
+	}
+
+	plat_priv->diag_reg_read_addr = reg_offset;
+	plat_priv->diag_reg_read_mem_type = mem_type;
+	plat_priv->diag_reg_read_len = data_len;
+	plat_priv->diag_reg_read_buf = reg_buf;
+	mutex_unlock(&plat_priv->dev_lock);
+
+	return count;
+}
+
+static int cnss_reg_read_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, cnss_reg_read_debug_show, inode->i_private);
+}
+
+static const struct file_operations cnss_reg_read_debug_fops = {
+	.read		= seq_read,
+	.write		= cnss_reg_read_debug_write,
+	.open		= cnss_reg_read_debug_open,
+	.owner		= THIS_MODULE,
+	.llseek		= seq_lseek,
+};
+
+static int cnss_reg_write_debug_show(struct seq_file *s, void *data)
+{
+	seq_puts(s, "\nUsage: echo <mem_type> <offset> <reg_val> > <debugfs_path>/cnss/reg_write\n");
+
+	return 0;
+}
+
+static ssize_t cnss_reg_write_debug_write(struct file *fp,
+					  const char __user *user_buf,
+					  size_t count, loff_t *off)
+{
+	struct cnss_plat_data *plat_priv =
+		((struct seq_file *)fp->private_data)->private;
+	char buf[64];
+	char *sptr, *token;
+	unsigned int len = 0;
+	u32 reg_offset, mem_type, reg_val;
+	const char *delim = " ";
+	int ret = 0;
+
+	if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+		cnss_pr_err("Firmware is not ready yet\n");
+		return -EINVAL;
+	}
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	sptr = buf;
+
+	token = strsep(&sptr, delim);
+	if (!token)
+		return -EINVAL;
+
+	if (!sptr)
+		return -EINVAL;
+
+	if (kstrtou32(token, 0, &mem_type))
+		return -EINVAL;
+
+	token = strsep(&sptr, delim);
+	if (!token)
+		return -EINVAL;
+
+	if (!sptr)
+		return -EINVAL;
+
+	if (kstrtou32(token, 0, &reg_offset))
+		return -EINVAL;
+
+	token = strsep(&sptr, delim);
+	if (!token)
+		return -EINVAL;
+
+	if (kstrtou32(token, 0, &reg_val))
+		return -EINVAL;
+
+	ret = cnss_wlfw_athdiag_write_send_sync(plat_priv, reg_offset, mem_type,
+						sizeof(u32),
+						(u8 *)&reg_val);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static int cnss_reg_write_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, cnss_reg_write_debug_show, inode->i_private);
+}
+
+static const struct file_operations cnss_reg_write_debug_fops = {
+	.read		= seq_read,
+	.write		= cnss_reg_write_debug_write,
+	.open		= cnss_reg_write_debug_open,
+	.owner		= THIS_MODULE,
+	.llseek		= seq_lseek,
+};
+
+#ifdef CONFIG_CNSS2_DEBUG
+static int cnss_create_debug_only_node(struct cnss_plat_data *plat_priv)
+{
+	struct dentry *root_dentry = plat_priv->root_dentry;
+
+	debugfs_create_file("dev_boot", 0600, root_dentry, plat_priv,
+			    &cnss_dev_boot_debug_fops);
+	debugfs_create_file("reg_read", 0600, root_dentry, plat_priv,
+			    &cnss_reg_read_debug_fops);
+	debugfs_create_file("reg_write", 0600, root_dentry, plat_priv,
+			    &cnss_reg_write_debug_fops);
+
+	return 0;
+}
+#else
+static int cnss_create_debug_only_node(struct cnss_plat_data *plat_priv)
+{
+	return 0;
+}
+#endif
+
+int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	struct dentry *root_dentry;
+
+	root_dentry = debugfs_create_dir("cnss", 0);
+	if (IS_ERR(root_dentry)) {
+		ret = PTR_ERR(root_dentry);
+		cnss_pr_err("Unable to create debugfs %d\n", ret);
+		goto out;
+	}
+
+	plat_priv->root_dentry = root_dentry;
+
+	debugfs_create_file("pin_connect_result", 0644, root_dentry, plat_priv,
+			    &cnss_pin_connect_fops);
+	debugfs_create_file("stats", 0644, root_dentry, plat_priv,
+			    &cnss_stats_fops);
+
+	cnss_create_debug_only_node(plat_priv);
+
+out:
+	return ret;
+}
+
+void cnss_debugfs_destroy(struct cnss_plat_data *plat_priv)
+{
+	debugfs_remove_recursive(plat_priv->root_dentry);
+}
+
+int cnss_debug_init(void)
+{
+	cnss_ipc_log_context = ipc_log_context_create(CNSS_IPC_LOG_PAGES,
+						      "cnss", 0);
+	if (!cnss_ipc_log_context) {
+		cnss_pr_err("Unable to create IPC log context!\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void cnss_debug_deinit(void)
+{
+	if (cnss_ipc_log_context) {
+		ipc_log_context_destroy(cnss_ipc_log_context);
+		cnss_ipc_log_context = NULL;
+	}
+}
diff --git a/drivers/net/wireless/cnss2/debug.h b/drivers/net/wireless/cnss2/debug.h
new file mode 100644
index 0000000..decc84a
--- /dev/null
+++ b/drivers/net/wireless/cnss2/debug.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_DEBUG_H
+#define _CNSS_DEBUG_H
+
+#include <linux/ipc_logging.h>
+#include <linux/printk.h>
+
+extern void *cnss_ipc_log_context;
+
+#define cnss_ipc_log_string(_x...) do {					\
+		if (cnss_ipc_log_context)				\
+			ipc_log_string(cnss_ipc_log_context, _x);	\
+	} while (0)
+
+#define cnss_pr_err(_fmt, ...) do {					\
+		pr_err("cnss: " _fmt, ##__VA_ARGS__);			\
+		cnss_ipc_log_string("ERR: " _fmt, ##__VA_ARGS__);	\
+	} while (0)
+
+#define cnss_pr_warn(_fmt, ...) do {					\
+		pr_warn("cnss: " _fmt, ##__VA_ARGS__);			\
+		cnss_ipc_log_string("WRN: " _fmt, ##__VA_ARGS__);	\
+	} while (0)
+
+#define cnss_pr_info(_fmt, ...) do {					\
+		pr_info("cnss: " _fmt, ##__VA_ARGS__);			\
+		cnss_ipc_log_string("INF: " _fmt, ##__VA_ARGS__);	\
+	} while (0)
+
+#define cnss_pr_dbg(_fmt, ...) do {					\
+		pr_debug("cnss: " _fmt, ##__VA_ARGS__);			\
+		cnss_ipc_log_string("DBG: " _fmt, ##__VA_ARGS__);	\
+	} while (0)
+
+#ifdef CONFIG_CNSS2_DEBUG
+#define CNSS_ASSERT(_condition) do {					\
+		if (!(_condition)) {					\
+			cnss_pr_err("ASSERT at line %d\n",		\
+				    __LINE__);				\
+			BUG_ON(1);					\
+		}							\
+	} while (0)
+#else
+#define CNSS_ASSERT(_condition) do {					\
+		if (!(_condition)) {					\
+			cnss_pr_err("ASSERT at line %d\n",		\
+				    __LINE__);				\
+			WARN_ON(1);					\
+		}							\
+	} while (0)
+#endif
+
+int cnss_debug_init(void);
+void cnss_debug_deinit(void);
+int cnss_debugfs_create(struct cnss_plat_data *plat_priv);
+void cnss_debugfs_destroy(struct cnss_plat_data *plat_priv);
+
+#endif /* _CNSS_DEBUG_H */
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
new file mode 100644
index 0000000..bcea74a
--- /dev/null
+++ b/drivers/net/wireless/cnss2/main.c
@@ -0,0 +1,2368 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_wakeup.h>
+#include <linux/rwsem.h>
+#include <linux/suspend.h>
+#include <linux/timer.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/subsystem_notif.h>
+
+#include "main.h"
+#include "debug.h"
+#include "pci.h"
+
+#define CNSS_DUMP_FORMAT_VER		0x11
+#define CNSS_DUMP_FORMAT_VER_V2		0x22
+#define CNSS_DUMP_MAGIC_VER_V2		0x42445953
+#define CNSS_DUMP_NAME			"CNSS_WLAN"
+#define CNSS_DUMP_DESC_SIZE		0x1000
+#define CNSS_DUMP_SEG_VER		0x1
+#define WLAN_RECOVERY_DELAY		1000
+#define FILE_SYSTEM_READY		1
+#define FW_READY_TIMEOUT		20000
+#define FW_ASSERT_TIMEOUT		5000
+#define CNSS_EVENT_PENDING		2989
+#define WAKE_MSI_NAME			"WAKE"
+
+static struct cnss_plat_data *plat_env;
+
+static DECLARE_RWSEM(cnss_pm_sem);
+
+static bool qmi_bypass;
+#ifdef CONFIG_CNSS2_DEBUG
+module_param(qmi_bypass, bool, 0600);
+MODULE_PARM_DESC(qmi_bypass, "Bypass QMI from platform driver");
+#endif
+
+static bool enable_waltest;
+#ifdef CONFIG_CNSS2_DEBUG
+module_param(enable_waltest, bool, 0600);
+MODULE_PARM_DESC(enable_waltest, "Enable to handle firmware waltest");
+#endif
+
+enum cnss_debug_quirks {
+	LINK_DOWN_SELF_RECOVERY,
+	SKIP_DEVICE_BOOT,
+	USE_CORE_ONLY_FW,
+	SKIP_RECOVERY,
+};
+
+unsigned long quirks;
+#ifdef CONFIG_CNSS2_DEBUG
+module_param(quirks, ulong, 0600);
+MODULE_PARM_DESC(quirks, "Debug quirks for the driver");
+#endif
+
+static struct cnss_fw_files FW_FILES_QCA6174_FW_3_0 = {
+	"qwlan30.bin", "bdwlan30.bin", "otp30.bin", "utf30.bin",
+	"utfbd30.bin", "epping30.bin", "evicted30.bin"
+};
+
+static struct cnss_fw_files FW_FILES_DEFAULT = {
+	"qwlan.bin", "bdwlan.bin", "otp.bin", "utf.bin",
+	"utfbd.bin", "epping.bin", "evicted.bin"
+};
+
+struct cnss_driver_event {
+	struct list_head list;
+	enum cnss_driver_event_type type;
+	bool sync;
+	struct completion complete;
+	int ret;
+	void *data;
+};
+
+static enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev)
+{
+	if (!dev)
+		return CNSS_BUS_NONE;
+
+	if (!dev->bus)
+		return CNSS_BUS_NONE;
+
+	if (memcmp(dev->bus->name, "pci", 3) == 0)
+		return CNSS_BUS_PCI;
+	else
+		return CNSS_BUS_NONE;
+}
+
+static void cnss_set_plat_priv(struct platform_device *plat_dev,
+			       struct cnss_plat_data *plat_priv)
+{
+	plat_env = plat_priv;
+}
+
+static struct cnss_plat_data *cnss_get_plat_priv(struct platform_device
+						 *plat_dev)
+{
+	return plat_env;
+}
+
+void *cnss_bus_dev_to_bus_priv(struct device *dev)
+{
+	if (!dev)
+		return NULL;
+
+	switch (cnss_get_dev_bus_type(dev)) {
+	case CNSS_BUS_PCI:
+		return cnss_get_pci_priv(to_pci_dev(dev));
+	default:
+		return NULL;
+	}
+}
+
+struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev)
+{
+	void *bus_priv;
+
+	if (!dev)
+		return cnss_get_plat_priv(NULL);
+
+	bus_priv = cnss_bus_dev_to_bus_priv(dev);
+	if (!bus_priv)
+		return NULL;
+
+	switch (cnss_get_dev_bus_type(dev)) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_priv_to_plat_priv(bus_priv);
+	default:
+		return NULL;
+	}
+}
+
+static int cnss_pm_notify(struct notifier_block *b,
+			  unsigned long event, void *p)
+{
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		down_write(&cnss_pm_sem);
+		break;
+	case PM_POST_SUSPEND:
+		up_write(&cnss_pm_sem);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block cnss_pm_notifier = {
+	.notifier_call = cnss_pm_notify,
+};
+
+static void cnss_pm_stay_awake(struct cnss_plat_data *plat_priv)
+{
+	if (atomic_inc_return(&plat_priv->pm_count) != 1)
+		return;
+
+	cnss_pr_dbg("PM stay awake, state: 0x%lx, count: %d\n",
+		    plat_priv->driver_state,
+		    atomic_read(&plat_priv->pm_count));
+	pm_stay_awake(&plat_priv->plat_dev->dev);
+}
+
+static void cnss_pm_relax(struct cnss_plat_data *plat_priv)
+{
+	int r = atomic_dec_return(&plat_priv->pm_count);
+
+	WARN_ON(r < 0);
+
+	if (r != 0)
+		return;
+
+	cnss_pr_dbg("PM relax, state: 0x%lx, count: %d\n",
+		    plat_priv->driver_state,
+		    atomic_read(&plat_priv->pm_count));
+	pm_relax(&plat_priv->plat_dev->dev);
+}
+
+void cnss_lock_pm_sem(struct device *dev)
+{
+	down_read(&cnss_pm_sem);
+}
+EXPORT_SYMBOL(cnss_lock_pm_sem);
+
+void cnss_release_pm_sem(struct device *dev)
+{
+	up_read(&cnss_pm_sem);
+}
+EXPORT_SYMBOL(cnss_release_pm_sem);
+
+int cnss_get_fw_files_for_target(struct device *dev,
+				 struct cnss_fw_files *pfw_files,
+				 u32 target_type, u32 target_version)
+{
+	if (!pfw_files)
+		return -ENODEV;
+
+	switch (target_version) {
+	case QCA6174_REV3_VERSION:
+	case QCA6174_REV3_2_VERSION:
+		memcpy(pfw_files, &FW_FILES_QCA6174_FW_3_0, sizeof(*pfw_files));
+		break;
+	default:
+		memcpy(pfw_files, &FW_FILES_DEFAULT, sizeof(*pfw_files));
+		cnss_pr_err("Unknown target version, type: 0x%X, version: 0x%X",
+			    target_type, target_version);
+		break;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_get_fw_files_for_target);
+
+int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	struct cnss_bus_bw_info *bus_bw_info;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	bus_bw_info = &plat_priv->bus_bw_info;
+	if (!bus_bw_info->bus_client)
+		return -EINVAL;
+
+	switch (bandwidth) {
+	case CNSS_BUS_WIDTH_NONE:
+	case CNSS_BUS_WIDTH_LOW:
+	case CNSS_BUS_WIDTH_MEDIUM:
+	case CNSS_BUS_WIDTH_HIGH:
+		ret = msm_bus_scale_client_update_request(
+			bus_bw_info->bus_client, bandwidth);
+		if (!ret)
+			bus_bw_info->current_bw_vote = bandwidth;
+		else
+			cnss_pr_err("Could not set bus bandwidth: %d, err = %d\n",
+				    bandwidth, ret);
+		break;
+	default:
+		cnss_pr_err("Invalid bus bandwidth: %d", bandwidth);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(cnss_request_bus_bandwidth);
+
+int cnss_get_platform_cap(struct device *dev, struct cnss_platform_cap *cap)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	if (cap)
+		*cap = plat_priv->cap;
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_get_platform_cap);
+
+int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	void *bus_priv = cnss_bus_dev_to_bus_priv(dev);
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	ret = cnss_pci_get_bar_info(bus_priv, &info->va, &info->pa);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_get_soc_info);
+
+void cnss_request_pm_qos(struct device *dev, u32 qos_val)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+	if (!plat_priv)
+		return;
+
+	pm_qos_add_request(&plat_priv->qos_request, PM_QOS_CPU_DMA_LATENCY,
+			   qos_val);
+}
+EXPORT_SYMBOL(cnss_request_pm_qos);
+
+void cnss_remove_pm_qos(struct device *dev)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+	if (!plat_priv)
+		return;
+
+	pm_qos_remove_request(&plat_priv->qos_request);
+}
+EXPORT_SYMBOL(cnss_remove_pm_qos);
+
+int cnss_wlan_enable(struct device *dev,
+		     struct cnss_wlan_enable_cfg *config,
+		     enum cnss_driver_mode mode,
+		     const char *host_version)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	struct wlfw_wlan_cfg_req_msg_v01 req;
+	u32 i;
+	int ret = 0;
+
+	if (plat_priv->device_id == QCA6174_DEVICE_ID)
+		return 0;
+
+	if (qmi_bypass)
+		return 0;
+
+	if (!config || !host_version) {
+		cnss_pr_err("Invalid config or host_version pointer\n");
+		return -EINVAL;
+	}
+
+	cnss_pr_dbg("Mode: %d, config: %pK, host_version: %s\n",
+		    mode, config, host_version);
+
+	if (mode == CNSS_WALTEST || mode == CNSS_CCPM)
+		goto skip_cfg;
+
+	memset(&req, 0, sizeof(req));
+
+	req.host_version_valid = 1;
+	strlcpy(req.host_version, host_version,
+		QMI_WLFW_MAX_STR_LEN_V01 + 1);
+
+	req.tgt_cfg_valid = 1;
+	if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
+		req.tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
+	else
+		req.tgt_cfg_len = config->num_ce_tgt_cfg;
+	for (i = 0; i < req.tgt_cfg_len; i++) {
+		req.tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
+		req.tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
+		req.tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
+		req.tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
+		req.tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
+	}
+
+	req.svc_cfg_valid = 1;
+	if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
+		req.svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
+	else
+		req.svc_cfg_len = config->num_ce_svc_pipe_cfg;
+	for (i = 0; i < req.svc_cfg_len; i++) {
+		req.svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
+		req.svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
+		req.svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
+	}
+
+	req.shadow_reg_v2_valid = 1;
+	if (config->num_shadow_reg_v2_cfg >
+	    QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01)
+		req.shadow_reg_v2_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01;
+	else
+		req.shadow_reg_v2_len = config->num_shadow_reg_v2_cfg;
+
+	memcpy(req.shadow_reg_v2, config->shadow_reg_v2_cfg,
+	       sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01)
+	       * req.shadow_reg_v2_len);
+
+	ret = cnss_wlfw_wlan_cfg_send_sync(plat_priv, &req);
+	if (ret)
+		goto out;
+
+skip_cfg:
+	ret = cnss_wlfw_wlan_mode_send_sync(plat_priv, mode);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(cnss_wlan_enable);
+
+int cnss_wlan_disable(struct device *dev, enum cnss_driver_mode mode)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+	if (plat_priv->device_id == QCA6174_DEVICE_ID)
+		return 0;
+
+	if (qmi_bypass)
+		return 0;
+
+	return cnss_wlfw_wlan_mode_send_sync(plat_priv, QMI_WLFW_OFF_V01);
+}
+EXPORT_SYMBOL(cnss_wlan_disable);
+
+#ifdef CONFIG_CNSS2_DEBUG
+int cnss_athdiag_read(struct device *dev, u32 offset, u32 mem_type,
+		      u32 data_len, u8 *output)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	int ret = 0;
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL!\n");
+		return -EINVAL;
+	}
+
+	if (plat_priv->device_id == QCA6174_DEVICE_ID)
+		return 0;
+
+	if (!output || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
+		cnss_pr_err("Invalid parameters for athdiag read: output %p, data_len %u\n",
+			    output, data_len);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+		cnss_pr_err("Invalid state for athdiag read: 0x%lx\n",
+			    plat_priv->driver_state);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = cnss_wlfw_athdiag_read_send_sync(plat_priv, offset, mem_type,
+					       data_len, output);
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL(cnss_athdiag_read);
+
+int cnss_athdiag_write(struct device *dev, u32 offset, u32 mem_type,
+		       u32 data_len, u8 *input)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	int ret = 0;
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL!\n");
+		return -EINVAL;
+	}
+
+	if (plat_priv->device_id == QCA6174_DEVICE_ID)
+		return 0;
+
+	if (!input || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
+		cnss_pr_err("Invalid parameters for athdiag write: input %p, data_len %u\n",
+			    input, data_len);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+		cnss_pr_err("Invalid state for athdiag write: 0x%lx\n",
+			    plat_priv->driver_state);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = cnss_wlfw_athdiag_write_send_sync(plat_priv, offset, mem_type,
+						data_len, input);
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL(cnss_athdiag_write);
+#else
+int cnss_athdiag_read(struct device *dev, u32 offset, u32 mem_type,
+		      u32 data_len, u8 *output)
+{
+	return -EPERM;
+}
+EXPORT_SYMBOL(cnss_athdiag_read);
+
+int cnss_athdiag_write(struct device *dev, u32 offset, u32 mem_type,
+		       u32 data_len, u8 *input)
+{
+	return -EPERM;
+}
+EXPORT_SYMBOL(cnss_athdiag_write);
+#endif
+
+int cnss_set_fw_log_mode(struct device *dev, u8 fw_log_mode)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+	if (plat_priv->device_id == QCA6174_DEVICE_ID)
+		return 0;
+
+	return cnss_wlfw_ini_send_sync(plat_priv, fw_log_mode);
+}
+EXPORT_SYMBOL(cnss_set_fw_log_mode);
+
+u32 cnss_get_wake_msi(struct cnss_plat_data *plat_priv)
+{
+	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+	int ret, num_vectors;
+	u32 user_base_data, base_vector;
+
+	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
+					   WAKE_MSI_NAME, &num_vectors,
+					   &user_base_data, &base_vector);
+
+	if (ret) {
+		cnss_pr_err("WAKE MSI is not valid\n");
+		return 0;
+	}
+
+	return user_base_data;
+}
+
+static int cnss_fw_mem_ready_hdlr(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	set_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
+
+	ret = cnss_wlfw_tgt_cap_send_sync(plat_priv);
+	if (ret)
+		goto out;
+
+	ret = cnss_wlfw_bdf_dnld_send_sync(plat_priv);
+	if (ret)
+		goto out;
+
+	ret = cnss_pci_load_m3(plat_priv->bus_priv);
+	if (ret)
+		goto out;
+
+	ret = cnss_wlfw_m3_dnld_send_sync(plat_priv);
+	if (ret)
+		goto out;
+
+	return 0;
+out:
+	return ret;
+}
+
+static int cnss_driver_call_probe(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+	if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
+		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+		cnss_pr_dbg("Skip driver probe\n");
+		goto out;
+	}
+
+	if (!plat_priv->driver_ops) {
+		cnss_pr_err("driver_ops is NULL\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+		ret = plat_priv->driver_ops->reinit(pci_priv->pci_dev,
+						    pci_priv->pci_device_id);
+		if (ret) {
+			cnss_pr_err("Failed to reinit host driver, err = %d\n",
+				    ret);
+			goto out;
+		}
+		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+	} else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
+		ret = plat_priv->driver_ops->probe(pci_priv->pci_dev,
+						   pci_priv->pci_device_id);
+		if (ret) {
+			cnss_pr_err("Failed to probe host driver, err = %d\n",
+				    ret);
+			goto out;
+		}
+		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+		set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+	}
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static int cnss_driver_call_remove(struct cnss_plat_data *plat_priv)
+{
+	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+	if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) ||
+	    test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
+	    test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
+		cnss_pr_dbg("Skip driver remove\n");
+		return 0;
+	}
+
+	if (!plat_priv->driver_ops) {
+		cnss_pr_err("driver_ops is NULL\n");
+		return -EINVAL;
+	}
+
+	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+		plat_priv->driver_ops->shutdown(pci_priv->pci_dev);
+	} else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+		plat_priv->driver_ops->remove(pci_priv->pci_dev);
+		clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+	}
+
+	return 0;
+}
+
+static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	del_timer(&plat_priv->fw_boot_timer);
+	set_bit(CNSS_FW_READY, &plat_priv->driver_state);
+
+	if (test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state)) {
+		clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
+		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+	}
+
+	if (enable_waltest) {
+		ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
+						    QMI_WLFW_WALTEST_V01);
+	} else if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state)) {
+		ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
+						    QMI_WLFW_CALIBRATION_V01);
+	} else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
+		   test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+		ret = cnss_driver_call_probe(plat_priv);
+	} else {
+		complete(&plat_priv->power_up_complete);
+	}
+
+	if (ret)
+		goto shutdown;
+
+	return 0;
+
+shutdown:
+	cnss_pci_stop_mhi(plat_priv->bus_priv);
+	cnss_suspend_pci_link(plat_priv->bus_priv);
+	cnss_power_off_device(plat_priv);
+
+	return ret;
+}
+
+static char *cnss_driver_event_to_str(enum cnss_driver_event_type type)
+{
+	switch (type) {
+	case CNSS_DRIVER_EVENT_SERVER_ARRIVE:
+		return "SERVER_ARRIVE";
+	case CNSS_DRIVER_EVENT_SERVER_EXIT:
+		return "SERVER_EXIT";
+	case CNSS_DRIVER_EVENT_REQUEST_MEM:
+		return "REQUEST_MEM";
+	case CNSS_DRIVER_EVENT_FW_MEM_READY:
+		return "FW_MEM_READY";
+	case CNSS_DRIVER_EVENT_FW_READY:
+		return "FW_READY";
+	case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START:
+		return "COLD_BOOT_CAL_START";
+	case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
+		return "COLD_BOOT_CAL_DONE";
+	case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
+		return "REGISTER_DRIVER";
+	case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
+		return "UNREGISTER_DRIVER";
+	case CNSS_DRIVER_EVENT_RECOVERY:
+		return "RECOVERY";
+	case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
+		return "FORCE_FW_ASSERT";
+	case CNSS_DRIVER_EVENT_POWER_UP:
+		return "POWER_UP";
+	case CNSS_DRIVER_EVENT_POWER_DOWN:
+		return "POWER_DOWN";
+	case CNSS_DRIVER_EVENT_MAX:
+		return "EVENT_MAX";
+	}
+
+	return "UNKNOWN";
+};
+
+int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
+			   enum cnss_driver_event_type type,
+			   u32 flags, void *data)
+{
+	struct cnss_driver_event *event;
+	unsigned long irq_flags;
+	int gfp = GFP_KERNEL;
+	int ret = 0;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	cnss_pr_dbg("Posting event: %s(%d)%s, state: 0x%lx flags: 0x%0x\n",
+		    cnss_driver_event_to_str(type), type,
+		    flags ? "-sync" : "", plat_priv->driver_state, flags);
+
+	if (type >= CNSS_DRIVER_EVENT_MAX) {
+		cnss_pr_err("Invalid Event type: %d, can't post", type);
+		return -EINVAL;
+	}
+
+	if (in_interrupt() || irqs_disabled())
+		gfp = GFP_ATOMIC;
+
+	event = kzalloc(sizeof(*event), gfp);
+	if (!event)
+		return -ENOMEM;
+
+	cnss_pm_stay_awake(plat_priv);
+
+	event->type = type;
+	event->data = data;
+	init_completion(&event->complete);
+	event->ret = CNSS_EVENT_PENDING;
+	event->sync = !!(flags & CNSS_EVENT_SYNC);
+
+	spin_lock_irqsave(&plat_priv->event_lock, irq_flags);
+	list_add_tail(&event->list, &plat_priv->event_list);
+	spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
+
+	queue_work(plat_priv->event_wq, &plat_priv->event_work);
+
+	if (!(flags & CNSS_EVENT_SYNC))
+		goto out;
+
+	if (flags & CNSS_EVENT_UNINTERRUPTIBLE)
+		wait_for_completion(&event->complete);
+	else
+		ret = wait_for_completion_interruptible(&event->complete);
+
+	cnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
+		    cnss_driver_event_to_str(type), type,
+		    plat_priv->driver_state, ret, event->ret);
+	spin_lock_irqsave(&plat_priv->event_lock, irq_flags);
+	if (ret == -ERESTARTSYS && event->ret == CNSS_EVENT_PENDING) {
+		event->sync = false;
+		spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
+		ret = -EINTR;
+		goto out;
+	}
+	spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
+
+	ret = event->ret;
+	kfree(event);
+
+out:
+	cnss_pm_relax(plat_priv);
+	return ret;
+}
+
+int cnss_power_up(struct device *dev)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	unsigned int timeout;
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	cnss_pr_dbg("Powering up device\n");
+
+	ret = cnss_driver_event_post(plat_priv,
+				     CNSS_DRIVER_EVENT_POWER_UP,
+				     CNSS_EVENT_SYNC, NULL);
+	if (ret)
+		goto out;
+
+	if (plat_priv->device_id == QCA6174_DEVICE_ID)
+		goto out;
+
+	timeout = cnss_get_qmi_timeout();
+
+	reinit_completion(&plat_priv->power_up_complete);
+	ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
+					  msecs_to_jiffies(timeout) << 2);
+	if (!ret) {
+		cnss_pr_err("Timeout waiting for power up to complete\n");
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	return 0;
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL(cnss_power_up);
+
+int cnss_power_down(struct device *dev)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	cnss_pr_dbg("Powering down device\n");
+
+	return cnss_driver_event_post(plat_priv,
+				      CNSS_DRIVER_EVENT_POWER_DOWN,
+				      CNSS_EVENT_SYNC, NULL);
+}
+EXPORT_SYMBOL(cnss_power_down);
+
+int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL);
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL!\n");
+		return -ENODEV;
+	}
+
+	if (plat_priv->driver_ops) {
+		cnss_pr_err("Driver has already registered!\n");
+		return -EEXIST;
+	}
+
+	ret = cnss_driver_event_post(plat_priv,
+				     CNSS_DRIVER_EVENT_REGISTER_DRIVER,
+				     CNSS_EVENT_SYNC_UNINTERRUPTIBLE,
+				     driver_ops);
+	return ret;
+}
+EXPORT_SYMBOL(cnss_wlan_register_driver);
+
+void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
+{
+	struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL);
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL!\n");
+		return;
+	}
+
+	cnss_driver_event_post(plat_priv,
+			       CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
+			       CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
+}
+EXPORT_SYMBOL(cnss_wlan_unregister_driver);
+
+static int cnss_get_resources(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+
+	ret = cnss_get_vreg(plat_priv);
+	if (ret) {
+		cnss_pr_err("Failed to get vreg, err = %d\n", ret);
+		goto out;
+	}
+
+	ret = cnss_get_pinctrl(plat_priv);
+	if (ret) {
+		cnss_pr_err("Failed to get pinctrl, err = %d\n", ret);
+		goto out;
+	}
+
+	return 0;
+out:
+	return ret;
+}
+
+static void cnss_put_resources(struct cnss_plat_data *plat_priv)
+{
+}
+
+static int cnss_modem_notifier_nb(struct notifier_block *nb,
+				  unsigned long code,
+				  void *ss_handle)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(nb, struct cnss_plat_data, modem_nb);
+	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+	struct cnss_esoc_info *esoc_info;
+	struct cnss_wlan_driver *driver_ops;
+
+	cnss_pr_dbg("Modem notifier: event %lu\n", code);
+
+	if (!pci_priv)
+		return NOTIFY_DONE;
+
+	esoc_info = &plat_priv->esoc_info;
+
+	if (code == SUBSYS_AFTER_POWERUP)
+		esoc_info->modem_current_status = 1;
+	else if (code == SUBSYS_BEFORE_SHUTDOWN)
+		esoc_info->modem_current_status = 0;
+	else
+		return NOTIFY_DONE;
+
+	driver_ops = plat_priv->driver_ops;
+	if (!driver_ops || !driver_ops->modem_status)
+		return NOTIFY_DONE;
+
+	driver_ops->modem_status(pci_priv->pci_dev,
+				 esoc_info->modem_current_status);
+
+	return NOTIFY_OK;
+}
+
+static int cnss_register_esoc(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	struct device *dev;
+	struct cnss_esoc_info *esoc_info;
+	struct esoc_desc *esoc_desc;
+	const char *client_desc;
+
+	dev = &plat_priv->plat_dev->dev;
+	esoc_info = &plat_priv->esoc_info;
+
+	esoc_info->notify_modem_status =
+		of_property_read_bool(dev->of_node,
+				      "qcom,notify-modem-status");
+
+	if (esoc_info->notify_modem_status)
+		goto out;
+
+	ret = of_property_read_string_index(dev->of_node, "esoc-names", 0,
+					    &client_desc);
+	if (ret) {
+		cnss_pr_dbg("esoc-names is not defined in DT, skip!\n");
+	} else {
+		esoc_desc = devm_register_esoc_client(dev, client_desc);
+		if (IS_ERR_OR_NULL(esoc_desc)) {
+			ret = PTR_RET(esoc_desc);
+			cnss_pr_err("Failed to register esoc_desc, err = %d\n",
+				    ret);
+			goto out;
+		}
+		esoc_info->esoc_desc = esoc_desc;
+	}
+
+	plat_priv->modem_nb.notifier_call = cnss_modem_notifier_nb;
+	esoc_info->modem_current_status = 0;
+	esoc_info->modem_notify_handler =
+		subsys_notif_register_notifier(esoc_info->esoc_desc ?
+					       esoc_info->esoc_desc->name :
+					       "modem", &plat_priv->modem_nb);
+	if (IS_ERR(esoc_info->modem_notify_handler)) {
+		ret = PTR_ERR(esoc_info->modem_notify_handler);
+		cnss_pr_err("Failed to register esoc notifier, err = %d\n",
+			    ret);
+		goto unreg_esoc;
+	}
+
+	return 0;
+unreg_esoc:
+	if (esoc_info->esoc_desc)
+		devm_unregister_esoc_client(dev, esoc_info->esoc_desc);
+out:
+	return ret;
+}
+
+static void cnss_unregister_esoc(struct cnss_plat_data *plat_priv)
+{
+	struct device *dev;
+	struct cnss_esoc_info *esoc_info;
+
+	dev = &plat_priv->plat_dev->dev;
+	esoc_info = &plat_priv->esoc_info;
+
+	if (esoc_info->notify_modem_status)
+		subsys_notif_unregister_notifier(esoc_info->
+						 modem_notify_handler,
+						 &plat_priv->modem_nb);
+	if (esoc_info->esoc_desc)
+		devm_unregister_esoc_client(dev, esoc_info->esoc_desc);
+}
+
+static int cnss_qca6174_powerup(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL!\n");
+		return -ENODEV;
+	}
+
+	ret = cnss_power_on_device(plat_priv);
+	if (ret) {
+		cnss_pr_err("Failed to power on device, err = %d\n", ret);
+		goto out;
+	}
+
+	ret = cnss_resume_pci_link(pci_priv);
+	if (ret) {
+		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
+		goto power_off;
+	}
+
+	ret = cnss_driver_call_probe(plat_priv);
+	if (ret)
+		goto suspend_link;
+
+	return 0;
+suspend_link:
+	cnss_suspend_pci_link(pci_priv);
+power_off:
+	cnss_power_off_device(plat_priv);
+out:
+	return ret;
+}
+
+static int cnss_qca6174_shutdown(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	cnss_pm_request_resume(pci_priv);
+
+	cnss_driver_call_remove(plat_priv);
+
+	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
+				   CNSS_BUS_WIDTH_NONE);
+	cnss_pci_set_monitor_wake_intr(pci_priv, false);
+	cnss_pci_set_auto_suspended(pci_priv, 0);
+
+	ret = cnss_suspend_pci_link(pci_priv);
+	if (ret)
+		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
+
+	cnss_power_off_device(plat_priv);
+
+	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+
+	return ret;
+}
+
+static void cnss_qca6174_crash_shutdown(struct cnss_plat_data *plat_priv)
+{
+	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+	if (!plat_priv->driver_ops)
+		return;
+
+	plat_priv->driver_ops->crash_shutdown(pci_priv->pci_dev);
+}
+
+static int cnss_qca6290_powerup(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+	unsigned int timeout;
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL!\n");
+		return -ENODEV;
+	}
+
+	if (plat_priv->ramdump_info_v2.dump_data_valid ||
+	    test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
+		cnss_pci_clear_dump_info(pci_priv);
+	}
+
+	ret = cnss_power_on_device(plat_priv);
+	if (ret) {
+		cnss_pr_err("Failed to power on device, err = %d\n", ret);
+		goto out;
+	}
+
+	ret = cnss_resume_pci_link(pci_priv);
+	if (ret) {
+		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
+		goto power_off;
+	}
+
+	timeout = cnss_get_qmi_timeout();
+
+	ret = cnss_pci_start_mhi(pci_priv);
+	if (ret) {
+		cnss_pr_err("Failed to start MHI, err = %d\n", ret);
+		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
+		    !pci_priv->pci_link_down_ind && timeout)
+			mod_timer(&plat_priv->fw_boot_timer,
+				  jiffies + msecs_to_jiffies(timeout >> 1));
+		return 0;
+	}
+
+	if (test_bit(USE_CORE_ONLY_FW, &quirks)) {
+		clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
+		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+		return 0;
+	}
+
+	cnss_set_pin_connect_status(plat_priv);
+
+	if (qmi_bypass) {
+		ret = cnss_driver_call_probe(plat_priv);
+		if (ret)
+			goto stop_mhi;
+	} else if (timeout) {
+		mod_timer(&plat_priv->fw_boot_timer,
+			  jiffies + msecs_to_jiffies(timeout << 1));
+	}
+
+	return 0;
+
+stop_mhi:
+	cnss_pci_stop_mhi(pci_priv);
+	cnss_suspend_pci_link(pci_priv);
+power_off:
+	cnss_power_off_device(plat_priv);
+out:
+	return ret;
+}
+
+static int cnss_qca6290_shutdown(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	cnss_pm_request_resume(pci_priv);
+
+	cnss_driver_call_remove(plat_priv);
+
+	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
+				   CNSS_BUS_WIDTH_NONE);
+	cnss_pci_set_monitor_wake_intr(pci_priv, false);
+	cnss_pci_set_auto_suspended(pci_priv, 0);
+
+	cnss_pci_stop_mhi(pci_priv);
+
+	ret = cnss_suspend_pci_link(pci_priv);
+	if (ret)
+		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
+
+	cnss_power_off_device(plat_priv);
+
+	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
+	clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
+	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+
+	return ret;
+}
+
+static void cnss_qca6290_crash_shutdown(struct cnss_plat_data *plat_priv)
+{
+	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+	int ret = 0;
+
+	cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
+		    plat_priv->driver_state);
+
+	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) ||
+	    test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
+	    test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state))
+		return;
+
+	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_KERNEL_PANIC);
+	if (ret) {
+		cnss_pr_err("Fail to complete RDDM, err = %d\n", ret);
+		return;
+	}
+
+	cnss_pci_collect_dump_info(pci_priv);
+}
+
+static int cnss_powerup(struct cnss_plat_data *plat_priv)
+{
+	int ret;
+
+	switch (plat_priv->device_id) {
+	case QCA6174_DEVICE_ID:
+		ret = cnss_qca6174_powerup(plat_priv);
+		break;
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		ret = cnss_qca6290_powerup(plat_priv);
+		break;
+	default:
+		cnss_pr_err("Unknown device_id found: 0x%lx\n",
+			    plat_priv->device_id);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+static int cnss_shutdown(struct cnss_plat_data *plat_priv)
+{
+	int ret;
+
+	switch (plat_priv->device_id) {
+	case QCA6174_DEVICE_ID:
+		ret = cnss_qca6174_shutdown(plat_priv);
+		break;
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		ret = cnss_qca6290_shutdown(plat_priv);
+		break;
+	default:
+		cnss_pr_err("Unknown device_id found: 0x%lx\n",
+			    plat_priv->device_id);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+static int cnss_subsys_powerup(const struct subsys_desc *subsys_desc)
+{
+	struct cnss_plat_data *plat_priv;
+
+	if (!subsys_desc->dev) {
+		cnss_pr_err("dev from subsys_desc is NULL\n");
+		return -ENODEV;
+	}
+
+	plat_priv = dev_get_drvdata(subsys_desc->dev);
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	if (!plat_priv->driver_state) {
+		cnss_pr_dbg("Powerup is ignored\n");
+		return 0;
+	}
+
+	return cnss_powerup(plat_priv);
+}
+
+static int cnss_subsys_shutdown(const struct subsys_desc *subsys_desc,
+				bool force_stop)
+{
+	struct cnss_plat_data *plat_priv;
+
+	if (!subsys_desc->dev) {
+		cnss_pr_err("dev from subsys_desc is NULL\n");
+		return -ENODEV;
+	}
+
+	plat_priv = dev_get_drvdata(subsys_desc->dev);
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	if (!plat_priv->driver_state) {
+		cnss_pr_dbg("shutdown is ignored\n");
+		return 0;
+	}
+
+	return cnss_shutdown(plat_priv);
+}
+
+static int cnss_qca6290_ramdump(struct cnss_plat_data *plat_priv)
+{
+	struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
+	struct cnss_dump_data *dump_data = &info_v2->dump_data;
+	struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
+	struct ramdump_segment *ramdump_segs, *s;
+	int i, ret = 0;
+
+	if (!info_v2->dump_data_valid ||
+	    dump_data->nentries == 0)
+		return 0;
+
+	ramdump_segs = kcalloc(dump_data->nentries,
+			       sizeof(*ramdump_segs),
+			       GFP_KERNEL);
+	if (!ramdump_segs)
+		return -ENOMEM;
+
+	s = ramdump_segs;
+	for (i = 0; i < dump_data->nentries; i++) {
+		s->address = dump_seg->address;
+		s->v_address = dump_seg->v_address;
+		s->size = dump_seg->size;
+		s++;
+		dump_seg++;
+	}
+
+	ret = do_elf_ramdump(info_v2->ramdump_dev, ramdump_segs,
+			     dump_data->nentries);
+	kfree(ramdump_segs);
+
+	cnss_pci_set_mhi_state(plat_priv->bus_priv, CNSS_MHI_DEINIT);
+	cnss_pci_clear_dump_info(plat_priv->bus_priv);
+
+	return ret;
+}
+
+static int cnss_qca6174_ramdump(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	struct cnss_ramdump_info *ramdump_info;
+	struct ramdump_segment segment;
+
+	ramdump_info = &plat_priv->ramdump_info;
+	if (!ramdump_info->ramdump_size)
+		return -EINVAL;
+
+	memset(&segment, 0, sizeof(segment));
+	segment.v_address = ramdump_info->ramdump_va;
+	segment.size = ramdump_info->ramdump_size;
+	ret = do_ramdump(ramdump_info->ramdump_dev, &segment, 1);
+
+	return ret;
+}
+
+static int cnss_subsys_ramdump(int enable,
+			       const struct subsys_desc *subsys_desc)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL!\n");
+		return -ENODEV;
+	}
+
+	if (!enable)
+		return 0;
+
+	switch (plat_priv->device_id) {
+	case QCA6174_DEVICE_ID:
+		ret = cnss_qca6174_ramdump(plat_priv);
+		break;
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		ret = cnss_qca6290_ramdump(plat_priv);
+		break;
+	default:
+		cnss_pr_err("Unknown device_id found: 0x%lx\n",
+			    plat_priv->device_id);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	struct cnss_ramdump_info *ramdump_info;
+
+	if (!plat_priv)
+		return NULL;
+
+	ramdump_info = &plat_priv->ramdump_info;
+	*size = ramdump_info->ramdump_size;
+
+	return ramdump_info->ramdump_va;
+}
+EXPORT_SYMBOL(cnss_get_virt_ramdump_mem);
+
+void cnss_device_crashed(struct device *dev)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	struct cnss_subsys_info *subsys_info;
+
+	if (!plat_priv)
+		return;
+
+	subsys_info = &plat_priv->subsys_info;
+	if (subsys_info->subsys_device) {
+		set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+		subsys_set_crash_status(subsys_info->subsys_device, true);
+		subsystem_restart_dev(subsys_info->subsys_device);
+	}
+}
+EXPORT_SYMBOL(cnss_device_crashed);
+
+static void cnss_subsys_crash_shutdown(const struct subsys_desc *subsys_desc)
+{
+	struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL!\n");
+		return;
+	}
+
+	switch (plat_priv->device_id) {
+	case QCA6174_DEVICE_ID:
+		cnss_qca6174_crash_shutdown(plat_priv);
+		break;
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		cnss_qca6290_crash_shutdown(plat_priv);
+		break;
+	default:
+		cnss_pr_err("Unknown device_id found: 0x%lx\n",
+			    plat_priv->device_id);
+	}
+}
+
+static const char *cnss_recovery_reason_to_str(enum cnss_recovery_reason reason)
+{
+	switch (reason) {
+	case CNSS_REASON_DEFAULT:
+		return "DEFAULT";
+	case CNSS_REASON_LINK_DOWN:
+		return "LINK_DOWN";
+	case CNSS_REASON_RDDM:
+		return "RDDM";
+	case CNSS_REASON_TIMEOUT:
+		return "TIMEOUT";
+	}
+
+	return "UNKNOWN";
+};
+
+static int cnss_do_recovery(struct cnss_plat_data *plat_priv,
+			    enum cnss_recovery_reason reason)
+{
+	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+	struct cnss_subsys_info *subsys_info =
+		&plat_priv->subsys_info;
+	int ret = 0;
+
+	plat_priv->recovery_count++;
+
+	if (plat_priv->device_id == QCA6174_DEVICE_ID)
+		goto self_recovery;
+
+	if (plat_priv->driver_ops &&
+	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state))
+		plat_priv->driver_ops->update_status(pci_priv->pci_dev,
+						     CNSS_RECOVERY);
+
+	if (test_bit(SKIP_RECOVERY, &quirks)) {
+		cnss_pr_dbg("Skip device recovery\n");
+		return 0;
+	}
+
+	switch (reason) {
+	case CNSS_REASON_LINK_DOWN:
+		if (test_bit(LINK_DOWN_SELF_RECOVERY, &quirks))
+			goto self_recovery;
+		break;
+	case CNSS_REASON_RDDM:
+		clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
+		ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM);
+		if (ret) {
+			cnss_pr_err("Failed to complete RDDM, err = %d\n", ret);
+			break;
+		}
+		cnss_pci_collect_dump_info(pci_priv);
+		break;
+	case CNSS_REASON_DEFAULT:
+	case CNSS_REASON_TIMEOUT:
+		break;
+	default:
+		cnss_pr_err("Unsupported recovery reason: %s(%d)\n",
+			    cnss_recovery_reason_to_str(reason), reason);
+		break;
+	}
+
+	if (!subsys_info->subsys_device)
+		return 0;
+
+	subsys_set_crash_status(subsys_info->subsys_device, true);
+	subsystem_restart_dev(subsys_info->subsys_device);
+
+	return 0;
+
+self_recovery:
+	cnss_shutdown(plat_priv);
+	cnss_powerup(plat_priv);
+
+	return 0;
+}
+
+static int cnss_driver_recovery_hdlr(struct cnss_plat_data *plat_priv,
+				     void *data)
+{
+	struct cnss_recovery_data *recovery_data = data;
+	int ret = 0;
+
+	cnss_pr_dbg("Driver recovery is triggered with reason: %s(%d)\n",
+		    cnss_recovery_reason_to_str(recovery_data->reason),
+		    recovery_data->reason);
+
+	if (!plat_priv->driver_state) {
+		cnss_pr_err("Improper driver state, ignore recovery\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+		cnss_pr_err("Recovery is already in progress\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+		cnss_pr_err("Driver unload is in progress, ignore recovery\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	switch (plat_priv->device_id) {
+	case QCA6174_DEVICE_ID:
+		if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
+			cnss_pr_err("Driver load is in progress, ignore recovery\n");
+			ret = -EINVAL;
+			goto out;
+		}
+		break;
+	default:
+		if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+			set_bit(CNSS_FW_BOOT_RECOVERY,
+				&plat_priv->driver_state);
+		} else if (test_bit(CNSS_DRIVER_LOADING,
+			   &plat_priv->driver_state)) {
+			cnss_pr_err("Driver probe is in progress, ignore recovery\n");
+			ret = -EINVAL;
+			goto out;
+		}
+		break;
+	}
+
+	set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+	ret = cnss_do_recovery(plat_priv, recovery_data->reason);
+
+out:
+	kfree(data);
+	return ret;
+}
+
+int cnss_self_recovery(struct device *dev,
+		       enum cnss_recovery_reason reason)
+{
+	cnss_schedule_recovery(dev, reason);
+	return 0;
+}
+EXPORT_SYMBOL(cnss_self_recovery);
+
+void cnss_schedule_recovery(struct device *dev,
+			    enum cnss_recovery_reason reason)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	struct cnss_recovery_data *data;
+	int gfp = GFP_KERNEL;
+
+	if (in_interrupt() || irqs_disabled())
+		gfp = GFP_ATOMIC;
+
+	data = kzalloc(sizeof(*data), gfp);
+	if (!data)
+		return;
+
+	data->reason = reason;
+	cnss_driver_event_post(plat_priv,
+			       CNSS_DRIVER_EVENT_RECOVERY,
+			       0, data);
+}
+EXPORT_SYMBOL(cnss_schedule_recovery);
+
+static int cnss_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv)
+{
+	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+	int ret;
+
+	ret = cnss_pci_set_mhi_state(plat_priv->bus_priv,
+				     CNSS_MHI_TRIGGER_RDDM);
+	if (ret) {
+		cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
+		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+				       CNSS_REASON_DEFAULT);
+		return 0;
+	}
+
+	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
+		mod_timer(&plat_priv->fw_boot_timer,
+			  jiffies + msecs_to_jiffies(FW_ASSERT_TIMEOUT));
+	}
+
+	return 0;
+}
+
+int cnss_force_fw_assert(struct device *dev)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL\n");
+		return -ENODEV;
+	}
+
+	if (plat_priv->device_id == QCA6174_DEVICE_ID) {
+		cnss_pr_info("Forced FW assert is not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+		cnss_pr_info("Recovery is already in progress, ignore forced FW assert\n");
+		return 0;
+	}
+
+	cnss_driver_event_post(plat_priv,
+			       CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
+			       0, NULL);
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_force_fw_assert);
+
+void fw_boot_timeout(unsigned long data)
+{
+	struct cnss_plat_data *plat_priv = (struct cnss_plat_data *)data;
+	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+	cnss_pr_err("Timeout waiting for FW ready indication!\n");
+
+	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+			       CNSS_REASON_TIMEOUT);
+}
+
+static int cnss_register_driver_hdlr(struct cnss_plat_data *plat_priv,
+				     void *data)
+{
+	int ret = 0;
+
+	set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+	plat_priv->driver_ops = data;
+
+	ret = cnss_powerup(plat_priv);
+	if (ret) {
+		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+		plat_priv->driver_ops = NULL;
+	}
+
+	return ret;
+}
+
+static int cnss_unregister_driver_hdlr(struct cnss_plat_data *plat_priv)
+{
+	set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+	cnss_shutdown(plat_priv);
+	plat_priv->driver_ops = NULL;
+
+	return 0;
+}
+
+static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+
+	set_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
+	ret = cnss_powerup(plat_priv);
+	if (ret)
+		clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
+
+	return ret;
+}
+
+static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv)
+{
+	cnss_wlfw_wlan_mode_send_sync(plat_priv, QMI_WLFW_OFF_V01);
+	cnss_shutdown(plat_priv);
+	clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
+
+	return 0;
+}
+
+static int cnss_power_up_hdlr(struct cnss_plat_data *plat_priv)
+{
+	return cnss_powerup(plat_priv);
+}
+
+static int cnss_power_down_hdlr(struct cnss_plat_data *plat_priv)
+{
+	cnss_shutdown(plat_priv);
+
+	return 0;
+}
+
+static void cnss_driver_event_work(struct work_struct *work)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(work, struct cnss_plat_data, event_work);
+	struct cnss_driver_event *event;
+	unsigned long flags;
+	int ret = 0;
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL!\n");
+		return;
+	}
+
+	cnss_pm_stay_awake(plat_priv);
+
+	spin_lock_irqsave(&plat_priv->event_lock, flags);
+
+	while (!list_empty(&plat_priv->event_list)) {
+		event = list_first_entry(&plat_priv->event_list,
+					 struct cnss_driver_event, list);
+		list_del(&event->list);
+		spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+
+		cnss_pr_dbg("Processing driver event: %s%s(%d), state: 0x%lx\n",
+			    cnss_driver_event_to_str(event->type),
+			    event->sync ? "-sync" : "", event->type,
+			    plat_priv->driver_state);
+
+		switch (event->type) {
+		case CNSS_DRIVER_EVENT_SERVER_ARRIVE:
+			ret = cnss_wlfw_server_arrive(plat_priv);
+			break;
+		case CNSS_DRIVER_EVENT_SERVER_EXIT:
+			ret = cnss_wlfw_server_exit(plat_priv);
+			break;
+		case CNSS_DRIVER_EVENT_REQUEST_MEM:
+			ret = cnss_pci_alloc_fw_mem(plat_priv->bus_priv);
+			if (ret)
+				break;
+			ret = cnss_wlfw_respond_mem_send_sync(plat_priv);
+			break;
+		case CNSS_DRIVER_EVENT_FW_MEM_READY:
+			ret = cnss_fw_mem_ready_hdlr(plat_priv);
+			break;
+		case CNSS_DRIVER_EVENT_FW_READY:
+			ret = cnss_fw_ready_hdlr(plat_priv);
+			break;
+		case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START:
+			ret = cnss_cold_boot_cal_start_hdlr(plat_priv);
+			break;
+		case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
+			ret = cnss_cold_boot_cal_done_hdlr(plat_priv);
+			break;
+		case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
+			ret = cnss_register_driver_hdlr(plat_priv,
+							event->data);
+			break;
+		case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
+			ret = cnss_unregister_driver_hdlr(plat_priv);
+			break;
+		case CNSS_DRIVER_EVENT_RECOVERY:
+			ret = cnss_driver_recovery_hdlr(plat_priv,
+							event->data);
+			break;
+		case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
+			ret = cnss_force_fw_assert_hdlr(plat_priv);
+			break;
+		case CNSS_DRIVER_EVENT_POWER_UP:
+			ret = cnss_power_up_hdlr(plat_priv);
+			break;
+		case CNSS_DRIVER_EVENT_POWER_DOWN:
+			ret = cnss_power_down_hdlr(plat_priv);
+			break;
+		default:
+			cnss_pr_err("Invalid driver event type: %d",
+				    event->type);
+			kfree(event);
+			spin_lock_irqsave(&plat_priv->event_lock, flags);
+			continue;
+		}
+
+		spin_lock_irqsave(&plat_priv->event_lock, flags);
+		if (event->sync) {
+			event->ret = ret;
+			complete(&event->complete);
+			continue;
+		}
+		spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+
+		kfree(event);
+
+		spin_lock_irqsave(&plat_priv->event_lock, flags);
+	}
+	spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+
+	cnss_pm_relax(plat_priv);
+}
+
+int cnss_register_subsys(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	struct cnss_subsys_info *subsys_info;
+
+	subsys_info = &plat_priv->subsys_info;
+
+	switch (plat_priv->device_id) {
+	case QCA6174_DEVICE_ID:
+		subsys_info->subsys_desc.name = "AR6320";
+		break;
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		subsys_info->subsys_desc.name = "QCA6290";
+		break;
+	default:
+		cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	subsys_info->subsys_desc.owner = THIS_MODULE;
+	subsys_info->subsys_desc.powerup = cnss_subsys_powerup;
+	subsys_info->subsys_desc.shutdown = cnss_subsys_shutdown;
+	subsys_info->subsys_desc.ramdump = cnss_subsys_ramdump;
+	subsys_info->subsys_desc.crash_shutdown = cnss_subsys_crash_shutdown;
+	subsys_info->subsys_desc.dev = &plat_priv->plat_dev->dev;
+
+	subsys_info->subsys_device = subsys_register(&subsys_info->subsys_desc);
+	if (IS_ERR(subsys_info->subsys_device)) {
+		ret = PTR_ERR(subsys_info->subsys_device);
+		cnss_pr_err("Failed to register subsys, err = %d\n", ret);
+		goto out;
+	}
+
+	subsys_info->subsys_handle =
+		subsystem_get(subsys_info->subsys_desc.name);
+	if (!subsys_info->subsys_handle) {
+		cnss_pr_err("Failed to get subsys_handle!\n");
+		ret = -EINVAL;
+		goto unregister_subsys;
+	} else if (IS_ERR(subsys_info->subsys_handle)) {
+		ret = PTR_ERR(subsys_info->subsys_handle);
+		cnss_pr_err("Failed to do subsystem_get, err = %d\n", ret);
+		goto unregister_subsys;
+	}
+
+	return 0;
+
+unregister_subsys:
+	subsys_unregister(subsys_info->subsys_device);
+out:
+	return ret;
+}
+
+void cnss_unregister_subsys(struct cnss_plat_data *plat_priv)
+{
+	struct cnss_subsys_info *subsys_info;
+
+	subsys_info = &plat_priv->subsys_info;
+	subsystem_put(subsys_info->subsys_handle);
+	subsys_unregister(subsys_info->subsys_device);
+}
+
+static int cnss_init_dump_entry(struct cnss_plat_data *plat_priv)
+{
+	struct cnss_ramdump_info *ramdump_info;
+	struct msm_dump_entry dump_entry;
+
+	ramdump_info = &plat_priv->ramdump_info;
+	ramdump_info->dump_data.addr = ramdump_info->ramdump_pa;
+	ramdump_info->dump_data.len = ramdump_info->ramdump_size;
+	ramdump_info->dump_data.version = CNSS_DUMP_FORMAT_VER;
+	ramdump_info->dump_data.magic = CNSS_DUMP_MAGIC_VER_V2;
+	strlcpy(ramdump_info->dump_data.name, CNSS_DUMP_NAME,
+		sizeof(ramdump_info->dump_data.name));
+	dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
+	dump_entry.addr = virt_to_phys(&ramdump_info->dump_data);
+
+	return msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
+}
+
+static int cnss_qca6174_register_ramdump(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	struct device *dev;
+	struct cnss_subsys_info *subsys_info;
+	struct cnss_ramdump_info *ramdump_info;
+	u32 ramdump_size = 0;
+
+	dev = &plat_priv->plat_dev->dev;
+	subsys_info = &plat_priv->subsys_info;
+	ramdump_info = &plat_priv->ramdump_info;
+
+	if (of_property_read_u32(dev->of_node, "qcom,wlan-ramdump-dynamic",
+				 &ramdump_size) == 0) {
+		ramdump_info->ramdump_va = dma_alloc_coherent(dev, ramdump_size,
+			&ramdump_info->ramdump_pa, GFP_KERNEL);
+
+		if (ramdump_info->ramdump_va)
+			ramdump_info->ramdump_size = ramdump_size;
+	}
+
+	cnss_pr_dbg("ramdump va: %pK, pa: %pa\n",
+		    ramdump_info->ramdump_va, &ramdump_info->ramdump_pa);
+
+	if (ramdump_info->ramdump_size == 0) {
+		cnss_pr_info("Ramdump will not be collected");
+		goto out;
+	}
+
+	ret = cnss_init_dump_entry(plat_priv);
+	if (ret) {
+		cnss_pr_err("Failed to setup dump table, err = %d\n", ret);
+		goto free_ramdump;
+	}
+
+	ramdump_info->ramdump_dev = create_ramdump_device(
+		subsys_info->subsys_desc.name, subsys_info->subsys_desc.dev);
+	if (!ramdump_info->ramdump_dev) {
+		cnss_pr_err("Failed to create ramdump device!");
+		ret = -ENOMEM;
+		goto free_ramdump;
+	}
+
+	return 0;
+free_ramdump:
+	dma_free_coherent(dev, ramdump_info->ramdump_size,
+			  ramdump_info->ramdump_va, ramdump_info->ramdump_pa);
+out:
+	return ret;
+}
+
+static void cnss_qca6174_unregister_ramdump(struct cnss_plat_data *plat_priv)
+{
+	struct device *dev;
+	struct cnss_ramdump_info *ramdump_info;
+
+	dev = &plat_priv->plat_dev->dev;
+	ramdump_info = &plat_priv->ramdump_info;
+
+	if (ramdump_info->ramdump_dev)
+		destroy_ramdump_device(ramdump_info->ramdump_dev);
+
+	if (ramdump_info->ramdump_va)
+		dma_free_coherent(dev, ramdump_info->ramdump_size,
+				  ramdump_info->ramdump_va,
+				  ramdump_info->ramdump_pa);
+}
+
+static int cnss_qca6290_register_ramdump(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	struct cnss_subsys_info *subsys_info;
+	struct cnss_ramdump_info_v2 *info_v2;
+	struct cnss_dump_data *dump_data;
+	struct msm_dump_entry dump_entry;
+	struct device *dev = &plat_priv->plat_dev->dev;
+	u32 ramdump_size = 0;
+
+	subsys_info = &plat_priv->subsys_info;
+	info_v2 = &plat_priv->ramdump_info_v2;
+	dump_data = &info_v2->dump_data;
+
+	if (of_property_read_u32(dev->of_node, "qcom,wlan-ramdump-dynamic",
+				 &ramdump_size) == 0)
+		info_v2->ramdump_size = ramdump_size;
+
+	cnss_pr_dbg("Ramdump size 0x%lx\n", info_v2->ramdump_size);
+
+	info_v2->dump_data_vaddr = kzalloc(CNSS_DUMP_DESC_SIZE, GFP_KERNEL);
+	if (!info_v2->dump_data_vaddr)
+		return -ENOMEM;
+
+	dump_data->paddr = virt_to_phys(info_v2->dump_data_vaddr);
+	dump_data->version = CNSS_DUMP_FORMAT_VER_V2;
+	dump_data->magic = CNSS_DUMP_MAGIC_VER_V2;
+	dump_data->seg_version = CNSS_DUMP_SEG_VER;
+	strlcpy(dump_data->name, CNSS_DUMP_NAME,
+		sizeof(dump_data->name));
+	dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
+	dump_entry.addr = virt_to_phys(dump_data);
+
+	ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
+	if (ret) {
+		cnss_pr_err("Failed to setup dump table, err = %d\n", ret);
+		goto free_ramdump;
+	}
+
+	info_v2->ramdump_dev =
+		create_ramdump_device(subsys_info->subsys_desc.name,
+				      subsys_info->subsys_desc.dev);
+	if (!info_v2->ramdump_dev) {
+		cnss_pr_err("Failed to create ramdump device!\n");
+		ret = -ENOMEM;
+		goto free_ramdump;
+	}
+
+	return 0;
+
+free_ramdump:
+	kfree(info_v2->dump_data_vaddr);
+	info_v2->dump_data_vaddr = NULL;
+	return ret;
+}
+
+static void cnss_qca6290_unregister_ramdump(struct cnss_plat_data *plat_priv)
+{
+	struct cnss_ramdump_info_v2 *info_v2;
+
+	info_v2 = &plat_priv->ramdump_info_v2;
+
+	if (info_v2->ramdump_dev)
+		destroy_ramdump_device(info_v2->ramdump_dev);
+
+	kfree(info_v2->dump_data_vaddr);
+	info_v2->dump_data_vaddr = NULL;
+	info_v2->dump_data_valid = false;
+}
+
+int cnss_register_ramdump(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+
+	switch (plat_priv->device_id) {
+	case QCA6174_DEVICE_ID:
+		ret = cnss_qca6174_register_ramdump(plat_priv);
+		break;
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		ret = cnss_qca6290_register_ramdump(plat_priv);
+		break;
+	default:
+		cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
+		ret = -ENODEV;
+		break;
+	}
+	return ret;
+}
+
+void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv)
+{
+	switch (plat_priv->device_id) {
+	case QCA6174_DEVICE_ID:
+		cnss_qca6174_unregister_ramdump(plat_priv);
+		break;
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		cnss_qca6290_unregister_ramdump(plat_priv);
+		break;
+	default:
+		cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
+		break;
+	}
+}
+
+static int cnss_register_bus_scale(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	struct cnss_bus_bw_info *bus_bw_info;
+
+	bus_bw_info = &plat_priv->bus_bw_info;
+
+	bus_bw_info->bus_scale_table =
+		msm_bus_cl_get_pdata(plat_priv->plat_dev);
+	if (bus_bw_info->bus_scale_table)  {
+		bus_bw_info->bus_client =
+			msm_bus_scale_register_client(
+				bus_bw_info->bus_scale_table);
+		if (!bus_bw_info->bus_client) {
+			cnss_pr_err("Failed to register bus scale client!\n");
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	return 0;
+out:
+	return ret;
+}
+
+static void cnss_unregister_bus_scale(struct cnss_plat_data *plat_priv)
+{
+	struct cnss_bus_bw_info *bus_bw_info;
+
+	bus_bw_info = &plat_priv->bus_bw_info;
+
+	if (bus_bw_info->bus_client)
+		msm_bus_scale_unregister_client(bus_bw_info->bus_client);
+}
+
+static ssize_t cnss_fs_ready_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf,
+				   size_t count)
+{
+	int fs_ready = 0;
+	struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
+
+	if (sscanf(buf, "%du", &fs_ready) != 1)
+		return -EINVAL;
+
+	cnss_pr_dbg("File system is ready, fs_ready is %d, count is %zu\n",
+		    fs_ready, count);
+
+	if (qmi_bypass) {
+		cnss_pr_dbg("QMI is bypassed.\n");
+		return count;
+	}
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL!\n");
+		return count;
+	}
+
+	switch (plat_priv->device_id) {
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		break;
+	default:
+		cnss_pr_err("Not supported for device ID 0x%lx\n",
+			    plat_priv->device_id);
+		return count;
+	}
+
+	if (fs_ready == FILE_SYSTEM_READY) {
+		cnss_driver_event_post(plat_priv,
+				       CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START,
+				       CNSS_EVENT_SYNC, NULL);
+	}
+
+	return count;
+}
+
+static DEVICE_ATTR(fs_ready, 0220, NULL, cnss_fs_ready_store);
+
+static int cnss_create_sysfs(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+
+	ret = device_create_file(&plat_priv->plat_dev->dev, &dev_attr_fs_ready);
+	if (ret) {
+		cnss_pr_err("Failed to create device file, err = %d\n", ret);
+		goto out;
+	}
+
+	return 0;
+out:
+	return ret;
+}
+
+static void cnss_remove_sysfs(struct cnss_plat_data *plat_priv)
+{
+	device_remove_file(&plat_priv->plat_dev->dev, &dev_attr_fs_ready);
+}
+
+static int cnss_event_work_init(struct cnss_plat_data *plat_priv)
+{
+	spin_lock_init(&plat_priv->event_lock);
+	plat_priv->event_wq = alloc_workqueue("cnss_driver_event",
+					      WQ_UNBOUND, 1);
+	if (!plat_priv->event_wq) {
+		cnss_pr_err("Failed to create event workqueue!\n");
+		return -EFAULT;
+	}
+
+	INIT_WORK(&plat_priv->event_work, cnss_driver_event_work);
+	INIT_LIST_HEAD(&plat_priv->event_list);
+
+	return 0;
+}
+
+static void cnss_event_work_deinit(struct cnss_plat_data *plat_priv)
+{
+	destroy_workqueue(plat_priv->event_wq);
+}
+
+static const struct platform_device_id cnss_platform_id_table[] = {
+	{ .name = "qca6174", .driver_data = QCA6174_DEVICE_ID, },
+	{ .name = "qca6290", .driver_data = QCA6290_DEVICE_ID, },
+};
+
+static const struct of_device_id cnss_of_match_table[] = {
+	{
+		.compatible = "qcom,cnss",
+		.data = (void *)&cnss_platform_id_table[0]},
+	{
+		.compatible = "qcom,cnss-qca6290",
+		.data = (void *)&cnss_platform_id_table[1]},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, cnss_of_match_table);
+
+static int cnss_probe(struct platform_device *plat_dev)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv;
+	const struct of_device_id *of_id;
+	const struct platform_device_id *device_id;
+
+	if (cnss_get_plat_priv(plat_dev)) {
+		cnss_pr_err("Driver is already initialized!\n");
+		ret = -EEXIST;
+		goto out;
+	}
+
+	of_id = of_match_device(cnss_of_match_table, &plat_dev->dev);
+	if (!of_id || !of_id->data) {
+		cnss_pr_err("Failed to find of match device!\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	device_id = of_id->data;
+
+	plat_priv = devm_kzalloc(&plat_dev->dev, sizeof(*plat_priv),
+				 GFP_KERNEL);
+	if (!plat_priv) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	plat_priv->plat_dev = plat_dev;
+	plat_priv->device_id = device_id->driver_data;
+	cnss_set_plat_priv(plat_dev, plat_priv);
+	platform_set_drvdata(plat_dev, plat_priv);
+
+	ret = cnss_get_resources(plat_priv);
+	if (ret)
+		goto reset_ctx;
+
+	if (!test_bit(SKIP_DEVICE_BOOT, &quirks)) {
+		ret = cnss_power_on_device(plat_priv);
+		if (ret)
+			goto free_res;
+
+		ret = cnss_pci_init(plat_priv);
+		if (ret)
+			goto power_off;
+	}
+
+	ret = cnss_register_esoc(plat_priv);
+	if (ret)
+		goto deinit_pci;
+
+	ret = cnss_register_bus_scale(plat_priv);
+	if (ret)
+		goto unreg_esoc;
+
+	ret = cnss_create_sysfs(plat_priv);
+	if (ret)
+		goto unreg_bus_scale;
+
+	ret = cnss_event_work_init(plat_priv);
+	if (ret)
+		goto remove_sysfs;
+
+	ret = cnss_qmi_init(plat_priv);
+	if (ret)
+		goto deinit_event_work;
+
+	ret = cnss_debugfs_create(plat_priv);
+	if (ret)
+		goto deinit_qmi;
+
+	setup_timer(&plat_priv->fw_boot_timer,
+		    fw_boot_timeout, (unsigned long)plat_priv);
+
+	register_pm_notifier(&cnss_pm_notifier);
+
+	ret = device_init_wakeup(&plat_dev->dev, true);
+	if (ret)
+		cnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
+			    ret);
+
+	init_completion(&plat_priv->power_up_complete);
+	mutex_init(&plat_priv->dev_lock);
+
+	cnss_pr_info("Platform driver probed successfully.\n");
+
+	return 0;
+
+deinit_qmi:
+	cnss_qmi_deinit(plat_priv);
+deinit_event_work:
+	cnss_event_work_deinit(plat_priv);
+remove_sysfs:
+	cnss_remove_sysfs(plat_priv);
+unreg_bus_scale:
+	cnss_unregister_bus_scale(plat_priv);
+unreg_esoc:
+	cnss_unregister_esoc(plat_priv);
+deinit_pci:
+	if (!test_bit(SKIP_DEVICE_BOOT, &quirks))
+		cnss_pci_deinit(plat_priv);
+power_off:
+	if (!test_bit(SKIP_DEVICE_BOOT, &quirks))
+		cnss_power_off_device(plat_priv);
+free_res:
+	cnss_put_resources(plat_priv);
+reset_ctx:
+	platform_set_drvdata(plat_dev, NULL);
+	cnss_set_plat_priv(plat_dev, NULL);
+out:
+	return ret;
+}
+
+static int cnss_remove(struct platform_device *plat_dev)
+{
+	struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev);
+
+	complete_all(&plat_priv->power_up_complete);
+	device_init_wakeup(&plat_dev->dev, false);
+	unregister_pm_notifier(&cnss_pm_notifier);
+	del_timer(&plat_priv->fw_boot_timer);
+	cnss_debugfs_destroy(plat_priv);
+	cnss_qmi_deinit(plat_priv);
+	cnss_event_work_deinit(plat_priv);
+	cnss_remove_sysfs(plat_priv);
+	cnss_unregister_bus_scale(plat_priv);
+	cnss_unregister_esoc(plat_priv);
+	cnss_pci_deinit(plat_priv);
+	cnss_put_resources(plat_priv);
+	platform_set_drvdata(plat_dev, NULL);
+	plat_env = NULL;
+
+	return 0;
+}
+
+static struct platform_driver cnss_platform_driver = {
+	.probe  = cnss_probe,
+	.remove = cnss_remove,
+	.driver = {
+		.name = "cnss2",
+		.owner = THIS_MODULE,
+		.of_match_table = cnss_of_match_table,
+#ifdef CONFIG_CNSS_ASYNC
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+#endif
+	},
+};
+
+static int __init cnss_initialize(void)
+{
+	int ret = 0;
+
+	cnss_debug_init();
+	ret = platform_driver_register(&cnss_platform_driver);
+	if (ret)
+		cnss_debug_deinit();
+
+	return ret;
+}
+
+static void __exit cnss_exit(void)
+{
+	platform_driver_unregister(&cnss_platform_driver);
+	cnss_debug_deinit();
+}
+
+module_init(cnss_initialize);
+module_exit(cnss_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CNSS2 Platform Driver");
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
new file mode 100644
index 0000000..81b5de8
--- /dev/null
+++ b/drivers/net/wireless/cnss2/main.h
@@ -0,0 +1,225 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_MAIN_H
+#define _CNSS_MAIN_H
+
+#include <linux/esoc_client.h>
+#include <linux/etherdevice.h>
+#include <linux/msm-bus.h>
+#include <linux/pm_qos.h>
+#include <net/cnss2.h>
+#include <soc/qcom/memory_dump.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#include "qmi.h"
+
+#define MAX_NO_OF_MAC_ADDR		4
+
+#define CNSS_EVENT_SYNC   BIT(0)
+#define CNSS_EVENT_UNINTERRUPTIBLE BIT(1)
+#define CNSS_EVENT_SYNC_UNINTERRUPTIBLE (CNSS_EVENT_SYNC | \
+				CNSS_EVENT_UNINTERRUPTIBLE)
+
+enum cnss_dev_bus_type {
+	CNSS_BUS_NONE = -1,
+	CNSS_BUS_PCI,
+};
+
+struct cnss_vreg_info {
+	struct regulator *reg;
+	const char *name;
+	u32 min_uv;
+	u32 max_uv;
+	u32 load_ua;
+	u32 delay_us;
+};
+
+struct cnss_pinctrl_info {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *bootstrap_active;
+	struct pinctrl_state *wlan_en_active;
+	struct pinctrl_state *wlan_en_sleep;
+};
+
+struct cnss_subsys_info {
+	struct subsys_device *subsys_device;
+	struct subsys_desc subsys_desc;
+	void *subsys_handle;
+};
+
+struct cnss_ramdump_info {
+	struct ramdump_device *ramdump_dev;
+	unsigned long ramdump_size;
+	void *ramdump_va;
+	phys_addr_t ramdump_pa;
+	struct msm_dump_data dump_data;
+};
+
+struct cnss_dump_seg {
+	unsigned long address;
+	void *v_address;
+	unsigned long size;
+	u32 type;
+};
+
+struct cnss_dump_data {
+	u32 version;
+	u32 magic;
+	char name[32];
+	phys_addr_t paddr;
+	int nentries;
+	u32 seg_version;
+};
+
+struct cnss_ramdump_info_v2 {
+	struct ramdump_device *ramdump_dev;
+	unsigned long ramdump_size;
+	void *dump_data_vaddr;
+	bool dump_data_valid;
+	struct cnss_dump_data dump_data;
+};
+
+struct cnss_esoc_info {
+	struct esoc_desc *esoc_desc;
+	bool notify_modem_status;
+	void *modem_notify_handler;
+	int modem_current_status;
+};
+
+struct cnss_bus_bw_info {
+	struct msm_bus_scale_pdata *bus_scale_table;
+	u32 bus_client;
+	int current_bw_vote;
+};
+
+struct cnss_fw_mem {
+	size_t size;
+	void *va;
+	phys_addr_t pa;
+	bool valid;
+};
+
+enum cnss_driver_event_type {
+	CNSS_DRIVER_EVENT_SERVER_ARRIVE,
+	CNSS_DRIVER_EVENT_SERVER_EXIT,
+	CNSS_DRIVER_EVENT_REQUEST_MEM,
+	CNSS_DRIVER_EVENT_FW_MEM_READY,
+	CNSS_DRIVER_EVENT_FW_READY,
+	CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START,
+	CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
+	CNSS_DRIVER_EVENT_REGISTER_DRIVER,
+	CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
+	CNSS_DRIVER_EVENT_RECOVERY,
+	CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
+	CNSS_DRIVER_EVENT_POWER_UP,
+	CNSS_DRIVER_EVENT_POWER_DOWN,
+	CNSS_DRIVER_EVENT_MAX,
+};
+
+enum cnss_driver_state {
+	CNSS_QMI_WLFW_CONNECTED,
+	CNSS_FW_MEM_READY,
+	CNSS_FW_READY,
+	CNSS_COLD_BOOT_CAL,
+	CNSS_DRIVER_LOADING,
+	CNSS_DRIVER_UNLOADING,
+	CNSS_DRIVER_PROBED,
+	CNSS_DRIVER_RECOVERY,
+	CNSS_FW_BOOT_RECOVERY,
+	CNSS_DEV_ERR_NOTIFY,
+	CNSS_DRIVER_DEBUG,
+};
+
+struct cnss_recovery_data {
+	enum cnss_recovery_reason reason;
+};
+
+enum cnss_pins {
+	CNSS_WLAN_EN,
+	CNSS_PCIE_TXP,
+	CNSS_PCIE_TXN,
+	CNSS_PCIE_RXP,
+	CNSS_PCIE_RXN,
+	CNSS_PCIE_REFCLKP,
+	CNSS_PCIE_REFCLKN,
+	CNSS_PCIE_RST,
+	CNSS_PCIE_WAKE,
+};
+
+struct cnss_pin_connect_result {
+	u32 fw_pwr_pin_result;
+	u32 fw_phy_io_pin_result;
+	u32 fw_rf_pin_result;
+	u32 host_pin_result;
+};
+
+struct cnss_plat_data {
+	struct platform_device *plat_dev;
+	void *bus_priv;
+	struct cnss_vreg_info *vreg_info;
+	struct cnss_pinctrl_info pinctrl_info;
+	struct cnss_subsys_info subsys_info;
+	struct cnss_ramdump_info ramdump_info;
+	struct cnss_ramdump_info_v2 ramdump_info_v2;
+	struct cnss_esoc_info esoc_info;
+	struct cnss_bus_bw_info bus_bw_info;
+	struct notifier_block modem_nb;
+	struct cnss_platform_cap cap;
+	struct pm_qos_request qos_request;
+	unsigned long device_id;
+	struct cnss_wlan_driver *driver_ops;
+	enum cnss_driver_status driver_status;
+	u32 recovery_count;
+	unsigned long driver_state;
+	struct list_head event_list;
+	spinlock_t event_lock; /* spinlock for driver work event handling */
+	struct work_struct event_work;
+	struct workqueue_struct *event_wq;
+	struct qmi_handle *qmi_wlfw_clnt;
+	struct work_struct qmi_recv_msg_work;
+	struct notifier_block qmi_wlfw_clnt_nb;
+	struct wlfw_rf_chip_info_s_v01 chip_info;
+	struct wlfw_rf_board_info_s_v01 board_info;
+	struct wlfw_soc_info_s_v01 soc_info;
+	struct wlfw_fw_version_info_s_v01 fw_version_info;
+	struct cnss_fw_mem fw_mem;
+	struct cnss_fw_mem m3_mem;
+	struct cnss_pin_connect_result pin_result;
+	struct dentry *root_dentry;
+	atomic_t pm_count;
+	struct timer_list fw_boot_timer;
+	struct completion power_up_complete;
+	struct mutex dev_lock; /* mutex for register access through debugfs */
+	u32 diag_reg_read_addr;
+	u32 diag_reg_read_mem_type;
+	u32 diag_reg_read_len;
+	u8 *diag_reg_read_buf;
+};
+
+void *cnss_bus_dev_to_bus_priv(struct device *dev);
+struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev);
+int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
+			   enum cnss_driver_event_type type,
+			   u32 flags, void *data);
+int cnss_get_vreg(struct cnss_plat_data *plat_priv);
+int cnss_get_pinctrl(struct cnss_plat_data *plat_priv);
+int cnss_power_on_device(struct cnss_plat_data *plat_priv);
+void cnss_power_off_device(struct cnss_plat_data *plat_priv);
+int cnss_register_subsys(struct cnss_plat_data *plat_priv);
+void cnss_unregister_subsys(struct cnss_plat_data *plat_priv);
+int cnss_register_ramdump(struct cnss_plat_data *plat_priv);
+void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv);
+void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv);
+u32 cnss_get_wake_msi(struct cnss_plat_data *plat_priv);
+
+#endif /* _CNSS_MAIN_H */
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
new file mode 100644
index 0000000..3c5bc72
--- /dev/null
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -0,0 +1,1615 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/firmware.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+
+#include "main.h"
+#include "debug.h"
+#include "pci.h"
+
+#define PCI_LINK_UP			1
+#define PCI_LINK_DOWN			0
+
+#define SAVE_PCI_CONFIG_SPACE		1
+#define RESTORE_PCI_CONFIG_SPACE	0
+
+#define PM_OPTIONS_DEFAULT		0
+#define PM_OPTIONS_LINK_DOWN \
+	(MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN)
+
+#define PCI_BAR_NUM			0
+
+#ifdef CONFIG_ARM_LPAE
+#define PCI_DMA_MASK			64
+#else
+#define PCI_DMA_MASK			32
+#endif
+
+#define MHI_NODE_NAME			"qcom,mhi"
+
+#define MAX_M3_FILE_NAME_LENGTH		13
+#define DEFAULT_M3_FILE_NAME		"m3.bin"
+
+static DEFINE_SPINLOCK(pci_link_down_lock);
+
+static unsigned int pci_link_down_panic;
+module_param(pci_link_down_panic, uint, 0600);
+MODULE_PARM_DESC(pci_link_down_panic,
+		 "Trigger kernel panic when PCI link down is detected");
+
+static bool fbc_bypass;
+#ifdef CONFIG_CNSS2_DEBUG
+module_param(fbc_bypass, bool, 0600);
+MODULE_PARM_DESC(fbc_bypass,
+		 "Bypass firmware download when loading WLAN driver");
+#endif
+
+static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
+{
+	struct pci_dev *pci_dev = pci_priv->pci_dev;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	bool link_down_or_recovery;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	link_down_or_recovery = pci_priv->pci_link_down_ind ||
+		(test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state));
+
+	if (save) {
+		if (link_down_or_recovery) {
+			pci_priv->saved_state = NULL;
+		} else {
+			pci_save_state(pci_dev);
+			pci_priv->saved_state = pci_store_saved_state(pci_dev);
+		}
+	} else {
+		if (link_down_or_recovery) {
+			pci_load_saved_state(pci_dev, pci_priv->default_state);
+			pci_restore_state(pci_dev);
+		} else if (pci_priv->saved_state) {
+			pci_load_and_free_saved_state(pci_dev,
+						      &pci_priv->saved_state);
+			pci_restore_state(pci_dev);
+		}
+	}
+
+	return 0;
+}
+
+static int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
+{
+	int ret = 0;
+	struct pci_dev *pci_dev = pci_priv->pci_dev;
+
+	ret = msm_pcie_pm_control(link_up ? MSM_PCIE_RESUME :
+				  MSM_PCIE_SUSPEND,
+				  pci_dev->bus->number,
+				  pci_dev, NULL,
+				  PM_OPTIONS_DEFAULT);
+	if (ret) {
+		cnss_pr_err("Failed to %s PCI link with default option, err = %d\n",
+			    link_up ? "resume" : "suspend", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	cnss_pr_dbg("Suspending PCI link\n");
+	if (!pci_priv->pci_link_state) {
+		cnss_pr_info("PCI link is already suspended!\n");
+		goto out;
+	}
+
+	ret = cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
+	if (ret)
+		goto out;
+
+	pci_disable_device(pci_priv->pci_dev);
+
+	if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
+		if (pci_set_power_state(pci_priv->pci_dev, PCI_D3hot))
+			cnss_pr_err("Failed to set D3Hot, err =  %d\n", ret);
+	}
+
+	ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN);
+	if (ret)
+		goto out;
+
+	pci_priv->pci_link_state = PCI_LINK_DOWN;
+
+	return 0;
+out:
+	return ret;
+}
+
+int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	cnss_pr_dbg("Resuming PCI link\n");
+	if (pci_priv->pci_link_state) {
+		cnss_pr_info("PCI link is already resumed!\n");
+		goto out;
+	}
+
+	ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
+	if (ret)
+		goto out;
+
+	pci_priv->pci_link_state = PCI_LINK_UP;
+
+	ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
+	if (ret)
+		goto out;
+
+	ret = pci_enable_device(pci_priv->pci_dev);
+	if (ret) {
+		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
+		goto out;
+	}
+
+	pci_set_master(pci_priv->pci_dev);
+
+	if (pci_priv->pci_link_down_ind)
+		pci_priv->pci_link_down_ind = false;
+
+	return 0;
+out:
+	return ret;
+}
+
+int cnss_pci_link_down(struct device *dev)
+{
+	unsigned long flags;
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL!\n");
+		return -EINVAL;
+	}
+
+	if (pci_link_down_panic)
+		panic("cnss: PCI link is down!\n");
+
+	spin_lock_irqsave(&pci_link_down_lock, flags);
+	if (pci_priv->pci_link_down_ind) {
+		cnss_pr_dbg("PCI link down recovery is in progress, ignore!\n");
+		spin_unlock_irqrestore(&pci_link_down_lock, flags);
+		return -EINVAL;
+	}
+	pci_priv->pci_link_down_ind = true;
+	spin_unlock_irqrestore(&pci_link_down_lock, flags);
+
+	cnss_pr_err("PCI link down is detected by host driver, schedule recovery!\n");
+
+	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_NOTIFY_LINK_ERROR);
+	cnss_schedule_recovery(dev, CNSS_REASON_LINK_DOWN);
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_pci_link_down);
+
+static int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct device *dev;
+	struct dma_iommu_mapping *mapping;
+	int atomic_ctx = 1;
+	int s1_bypass = 1;
+
+	dev = &pci_priv->pci_dev->dev;
+
+	mapping = arm_iommu_create_mapping(&platform_bus_type,
+					   pci_priv->smmu_iova_start,
+					   pci_priv->smmu_iova_len);
+	if (IS_ERR(mapping)) {
+		ret = PTR_ERR(mapping);
+		cnss_pr_err("Failed to create SMMU mapping, err = %d\n", ret);
+		goto out;
+	}
+
+	ret = iommu_domain_set_attr(mapping->domain,
+				    DOMAIN_ATTR_ATOMIC,
+				    &atomic_ctx);
+	if (ret) {
+		pr_err("Failed to set SMMU atomic_ctx attribute, err = %d\n",
+		       ret);
+		goto release_mapping;
+	}
+
+	ret = iommu_domain_set_attr(mapping->domain,
+				    DOMAIN_ATTR_S1_BYPASS,
+				    &s1_bypass);
+	if (ret) {
+		pr_err("Failed to set SMMU s1_bypass attribute, err = %d\n",
+		       ret);
+		goto release_mapping;
+	}
+
+	ret = arm_iommu_attach_device(dev, mapping);
+	if (ret) {
+		pr_err("Failed to attach SMMU device, err = %d\n", ret);
+		goto release_mapping;
+	}
+
+	pci_priv->smmu_mapping = mapping;
+
+	return ret;
+release_mapping:
+	arm_iommu_release_mapping(mapping);
+out:
+	return ret;
+}
+
+static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
+{
+	arm_iommu_detach_device(&pci_priv->pci_dev->dev);
+	arm_iommu_release_mapping(pci_priv->smmu_mapping);
+
+	pci_priv->smmu_mapping = NULL;
+}
+
+static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
+{
+	unsigned long flags;
+	struct pci_dev *pci_dev;
+	struct cnss_pci_data *pci_priv;
+
+	if (!notify)
+		return;
+
+	pci_dev = notify->user;
+	if (!pci_dev)
+		return;
+
+	pci_priv = cnss_get_pci_priv(pci_dev);
+	if (!pci_priv)
+		return;
+
+	switch (notify->event) {
+	case MSM_PCIE_EVENT_LINKDOWN:
+		if (pci_link_down_panic)
+			panic("cnss: PCI link is down!\n");
+
+		spin_lock_irqsave(&pci_link_down_lock, flags);
+		if (pci_priv->pci_link_down_ind) {
+			cnss_pr_dbg("PCI link down recovery is in progress, ignore!\n");
+			spin_unlock_irqrestore(&pci_link_down_lock, flags);
+			return;
+		}
+		pci_priv->pci_link_down_ind = true;
+		spin_unlock_irqrestore(&pci_link_down_lock, flags);
+
+		cnss_pr_err("PCI link down, schedule recovery!\n");
+		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_NOTIFY_LINK_ERROR);
+		if (pci_dev->device == QCA6174_DEVICE_ID)
+			disable_irq(pci_dev->irq);
+		cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN);
+		break;
+	case MSM_PCIE_EVENT_WAKEUP:
+		if (cnss_pci_get_monitor_wake_intr(pci_priv) &&
+		    cnss_pci_get_auto_suspended(pci_priv)) {
+			cnss_pci_set_monitor_wake_intr(pci_priv, false);
+			pm_request_resume(&pci_dev->dev);
+		}
+		break;
+	default:
+		cnss_pr_err("Received invalid PCI event: %d\n", notify->event);
+	}
+}
+
+static int cnss_reg_pci_event(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct msm_pcie_register_event *pci_event;
+
+	pci_event = &pci_priv->msm_pci_event;
+	pci_event->events = MSM_PCIE_EVENT_LINKDOWN |
+		MSM_PCIE_EVENT_WAKEUP;
+	pci_event->user = pci_priv->pci_dev;
+	pci_event->mode = MSM_PCIE_TRIGGER_CALLBACK;
+	pci_event->callback = cnss_pci_event_cb;
+	pci_event->options = MSM_PCIE_CONFIG_NO_RECOVERY;
+
+	ret = msm_pcie_register_event(pci_event);
+	if (ret)
+		cnss_pr_err("Failed to register MSM PCI event, err = %d\n",
+			    ret);
+
+	return ret;
+}
+
+static void cnss_dereg_pci_event(struct cnss_pci_data *pci_priv)
+{
+	msm_pcie_deregister_event(&pci_priv->msm_pci_event);
+}
+
+static int cnss_pci_suspend(struct device *dev)
+{
+	int ret = 0;
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+	struct cnss_plat_data *plat_priv;
+	struct cnss_wlan_driver *driver_ops;
+
+	pm_message_t state = { .event = PM_EVENT_SUSPEND };
+
+	if (!pci_priv)
+		goto out;
+
+	plat_priv = pci_priv->plat_priv;
+	if (!plat_priv)
+		goto out;
+
+	driver_ops = plat_priv->driver_ops;
+	if (driver_ops && driver_ops->suspend) {
+		ret = driver_ops->suspend(pci_dev, state);
+		if (ret) {
+			cnss_pr_err("Failed to suspend host driver, err = %d\n",
+				    ret);
+			ret = -EAGAIN;
+			goto out;
+		}
+	}
+
+	if (pci_priv->pci_link_state) {
+		ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND);
+		if (ret) {
+			if (driver_ops && driver_ops->resume)
+				driver_ops->resume(pci_dev);
+			ret = -EAGAIN;
+			goto out;
+		}
+
+		cnss_set_pci_config_space(pci_priv,
+					  SAVE_PCI_CONFIG_SPACE);
+		pci_disable_device(pci_dev);
+
+		ret = pci_set_power_state(pci_dev, PCI_D3hot);
+		if (ret)
+			cnss_pr_err("Failed to set D3Hot, err = %d\n",
+				    ret);
+	}
+
+	cnss_pci_set_monitor_wake_intr(pci_priv, false);
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static int cnss_pci_resume(struct device *dev)
+{
+	int ret = 0;
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+	struct cnss_plat_data *plat_priv;
+	struct cnss_wlan_driver *driver_ops;
+
+	if (!pci_priv)
+		goto out;
+
+	plat_priv = pci_priv->plat_priv;
+	if (!plat_priv)
+		goto out;
+
+	if (pci_priv->pci_link_down_ind)
+		goto out;
+
+	ret = pci_enable_device(pci_dev);
+	if (ret)
+		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
+
+	if (pci_priv->saved_state)
+		cnss_set_pci_config_space(pci_priv,
+					  RESTORE_PCI_CONFIG_SPACE);
+
+	pci_set_master(pci_dev);
+	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+
+	driver_ops = plat_priv->driver_ops;
+	if (driver_ops && driver_ops->resume) {
+		ret = driver_ops->resume(pci_dev);
+		if (ret)
+			cnss_pr_err("Failed to resume host driver, err = %d\n",
+				    ret);
+	}
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static int cnss_pci_suspend_noirq(struct device *dev)
+{
+	int ret = 0;
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+	struct cnss_plat_data *plat_priv;
+	struct cnss_wlan_driver *driver_ops;
+
+	if (!pci_priv)
+		goto out;
+
+	plat_priv = pci_priv->plat_priv;
+	if (!plat_priv)
+		goto out;
+
+	driver_ops = plat_priv->driver_ops;
+	if (driver_ops && driver_ops->suspend_noirq)
+		ret = driver_ops->suspend_noirq(pci_dev);
+
+out:
+	return ret;
+}
+
+static int cnss_pci_resume_noirq(struct device *dev)
+{
+	int ret = 0;
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+	struct cnss_plat_data *plat_priv;
+	struct cnss_wlan_driver *driver_ops;
+
+	if (!pci_priv)
+		goto out;
+
+	plat_priv = pci_priv->plat_priv;
+	if (!plat_priv)
+		goto out;
+
+	driver_ops = plat_priv->driver_ops;
+	if (driver_ops && driver_ops->resume_noirq &&
+	    !pci_priv->pci_link_down_ind)
+		ret = driver_ops->resume_noirq(pci_dev);
+
+out:
+	return ret;
+}
+
+static int cnss_pci_runtime_suspend(struct device *dev)
+{
+	int ret = 0;
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+	struct cnss_plat_data *plat_priv;
+	struct cnss_wlan_driver *driver_ops;
+
+	if (!pci_priv)
+		return -EAGAIN;
+
+	plat_priv = pci_priv->plat_priv;
+	if (!plat_priv)
+		return -EAGAIN;
+
+	if (pci_priv->pci_link_down_ind) {
+		cnss_pr_dbg("PCI link down recovery is in progress!\n");
+		return -EAGAIN;
+	}
+
+	cnss_pr_dbg("Runtime suspend start\n");
+
+	driver_ops = plat_priv->driver_ops;
+	if (driver_ops && driver_ops->runtime_ops &&
+	    driver_ops->runtime_ops->runtime_suspend)
+		ret = driver_ops->runtime_ops->runtime_suspend(pci_dev);
+
+	cnss_pr_info("Runtime suspend status: %d\n", ret);
+
+	return ret;
+}
+
+static int cnss_pci_runtime_resume(struct device *dev)
+{
+	int ret = 0;
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+	struct cnss_plat_data *plat_priv;
+	struct cnss_wlan_driver *driver_ops;
+
+	if (!pci_priv)
+		return -EAGAIN;
+
+	plat_priv = pci_priv->plat_priv;
+	if (!plat_priv)
+		return -EAGAIN;
+
+	if (pci_priv->pci_link_down_ind) {
+		cnss_pr_dbg("PCI link down recovery is in progress!\n");
+		return -EAGAIN;
+	}
+
+	cnss_pr_dbg("Runtime resume start\n");
+
+	driver_ops = plat_priv->driver_ops;
+	if (driver_ops && driver_ops->runtime_ops &&
+	    driver_ops->runtime_ops->runtime_resume)
+		ret = driver_ops->runtime_ops->runtime_resume(pci_dev);
+
+	cnss_pr_info("Runtime resume status: %d\n", ret);
+
+	return ret;
+}
+
+static int cnss_pci_runtime_idle(struct device *dev)
+{
+	cnss_pr_dbg("Runtime idle\n");
+
+	pm_request_autosuspend(dev);
+
+	return -EBUSY;
+}
+
+int cnss_wlan_pm_control(struct device *dev, bool vote)
+{
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	struct cnss_pci_data *pci_priv;
+	struct pci_dev *pci_dev;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	pci_priv = plat_priv->bus_priv;
+	if (!pci_priv)
+		return -ENODEV;
+
+	pci_dev = pci_priv->pci_dev;
+
+	return msm_pcie_pm_control(vote ? MSM_PCIE_DISABLE_PC :
+				   MSM_PCIE_ENABLE_PC,
+				   pci_dev->bus->number, pci_dev,
+				   NULL, PM_OPTIONS_DEFAULT);
+}
+EXPORT_SYMBOL(cnss_wlan_pm_control);
+
+int cnss_auto_suspend(struct device *dev)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	struct pci_dev *pci_dev;
+	struct cnss_pci_data *pci_priv;
+	struct cnss_bus_bw_info *bus_bw_info;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	pci_priv = plat_priv->bus_priv;
+	if (!pci_priv)
+		return -ENODEV;
+
+	pci_dev = pci_priv->pci_dev;
+
+	if (pci_priv->pci_link_state) {
+		if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND)) {
+			ret = -EAGAIN;
+			goto out;
+		}
+
+		cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
+		pci_disable_device(pci_dev);
+
+		ret = pci_set_power_state(pci_dev, PCI_D3hot);
+		if (ret)
+			cnss_pr_err("Failed to set D3Hot, err =  %d\n", ret);
+
+		cnss_pr_dbg("Suspending PCI link\n");
+		if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
+			cnss_pr_err("Failed to suspend PCI link!\n");
+			ret = -EAGAIN;
+			goto resume_mhi;
+		}
+
+		pci_priv->pci_link_state = PCI_LINK_DOWN;
+	}
+
+	cnss_pci_set_auto_suspended(pci_priv, 1);
+	cnss_pci_set_monitor_wake_intr(pci_priv, true);
+
+	bus_bw_info = &plat_priv->bus_bw_info;
+	msm_bus_scale_client_update_request(bus_bw_info->bus_client,
+					    CNSS_BUS_WIDTH_NONE);
+
+	return 0;
+
+resume_mhi:
+	if (pci_enable_device(pci_dev))
+		cnss_pr_err("Failed to enable PCI device!\n");
+	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(cnss_auto_suspend);
+
+int cnss_auto_resume(struct device *dev)
+{
+	int ret = 0;
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+	struct pci_dev *pci_dev;
+	struct cnss_pci_data *pci_priv;
+	struct cnss_bus_bw_info *bus_bw_info;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	pci_priv = plat_priv->bus_priv;
+	if (!pci_priv)
+		return -ENODEV;
+
+	pci_dev = pci_priv->pci_dev;
+	if (!pci_priv->pci_link_state) {
+		cnss_pr_dbg("Resuming PCI link\n");
+		if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
+			cnss_pr_err("Failed to resume PCI link!\n");
+			ret = -EAGAIN;
+			goto out;
+		}
+		pci_priv->pci_link_state = PCI_LINK_UP;
+
+		ret = pci_enable_device(pci_dev);
+		if (ret)
+			cnss_pr_err("Failed to enable PCI device, err = %d\n",
+				    ret);
+
+		cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
+		pci_set_master(pci_dev);
+		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+	}
+
+	cnss_pci_set_auto_suspended(pci_priv, 0);
+
+	bus_bw_info = &plat_priv->bus_bw_info;
+	msm_bus_scale_client_update_request(bus_bw_info->bus_client,
+					    bus_bw_info->current_bw_vote);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(cnss_auto_resume);
+
+int cnss_pm_request_resume(struct cnss_pci_data *pci_priv)
+{
+	struct pci_dev *pci_dev;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	pci_dev = pci_priv->pci_dev;
+	if (!pci_dev)
+		return -ENODEV;
+
+	return pm_request_resume(&pci_dev->dev);
+}
+
+int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
+
+	if (!fw_mem->va && fw_mem->size) {
+		fw_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
+						fw_mem->size, &fw_mem->pa,
+						GFP_KERNEL);
+		if (!fw_mem->va) {
+			cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx\n",
+				    fw_mem->size);
+			fw_mem->size = 0;
+
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
+
+	if (fw_mem->va && fw_mem->size) {
+		cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx\n",
+			    fw_mem->va, &fw_mem->pa, fw_mem->size);
+		dma_free_coherent(&pci_priv->pci_dev->dev, fw_mem->size,
+				  fw_mem->va, fw_mem->pa);
+		fw_mem->va = NULL;
+		fw_mem->pa = 0;
+		fw_mem->size = 0;
+	}
+}
+
+int cnss_pci_load_m3(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
+	char filename[MAX_M3_FILE_NAME_LENGTH];
+	const struct firmware *fw_entry;
+	int ret = 0;
+
+	if (!m3_mem->va && !m3_mem->size) {
+		snprintf(filename, sizeof(filename), DEFAULT_M3_FILE_NAME);
+
+		ret = request_firmware(&fw_entry, filename,
+				       &pci_priv->pci_dev->dev);
+		if (ret) {
+			cnss_pr_err("Failed to load M3 image: %s\n", filename);
+			return ret;
+		}
+
+		m3_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
+						fw_entry->size, &m3_mem->pa,
+						GFP_KERNEL);
+		if (!m3_mem->va) {
+			cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
+				    fw_entry->size);
+			release_firmware(fw_entry);
+			return -ENOMEM;
+		}
+
+		memcpy(m3_mem->va, fw_entry->data, fw_entry->size);
+		m3_mem->size = fw_entry->size;
+		release_firmware(fw_entry);
+	}
+
+	return 0;
+}
+
+static void cnss_pci_free_m3_mem(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
+
+	if (m3_mem->va && m3_mem->size) {
+		cnss_pr_dbg("Freeing memory for M3, va: 0x%pK, pa: %pa, size: 0x%zx\n",
+			    m3_mem->va, &m3_mem->pa, m3_mem->size);
+		dma_free_coherent(&pci_priv->pci_dev->dev, m3_mem->size,
+				  m3_mem->va, m3_mem->pa);
+	}
+
+	m3_mem->va = NULL;
+	m3_mem->pa = 0;
+	m3_mem->size = 0;
+}
+
+int cnss_pci_get_bar_info(struct cnss_pci_data *pci_priv, void __iomem **va,
+			  phys_addr_t *pa)
+{
+	if (!pci_priv)
+		return -ENODEV;
+
+	*va = pci_priv->bar;
+	*pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
+
+	return 0;
+}
+
+static struct cnss_msi_config msi_config = {
+	.total_vectors = 32,
+	.total_users = 4,
+	.users = (struct cnss_msi_user[]) {
+		{ .name = "MHI", .num_vectors = 2, .base_vector = 0 },
+		{ .name = "CE", .num_vectors = 11, .base_vector = 2 },
+		{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+		{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
+	},
+};
+
+static int cnss_pci_get_msi_assignment(struct cnss_pci_data *pci_priv)
+{
+	pci_priv->msi_config = &msi_config;
+
+	return 0;
+}
+
+static int cnss_pci_enable_msi(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct pci_dev *pci_dev = pci_priv->pci_dev;
+	int num_vectors;
+	struct cnss_msi_config *msi_config;
+	struct msi_desc *msi_desc;
+
+	ret = cnss_pci_get_msi_assignment(pci_priv);
+	if (ret) {
+		cnss_pr_err("Failed to get MSI assignment, err = %d\n", ret);
+		goto out;
+	}
+
+	msi_config = pci_priv->msi_config;
+	if (!msi_config) {
+		cnss_pr_err("msi_config is NULL!\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	num_vectors = pci_enable_msi_range(pci_dev,
+					   msi_config->total_vectors,
+					   msi_config->total_vectors);
+	if (num_vectors != msi_config->total_vectors) {
+		cnss_pr_err("Failed to get enough MSI vectors (%d), available vectors = %d",
+			    msi_config->total_vectors, num_vectors);
+		ret = -EINVAL;
+		goto reset_msi_config;
+	}
+
+	msi_desc = irq_get_msi_desc(pci_dev->irq);
+	if (!msi_desc) {
+		cnss_pr_err("msi_desc is NULL!\n");
+		ret = -EINVAL;
+		goto disable_msi;
+	}
+
+	pci_priv->msi_ep_base_data = msi_desc->msg.data;
+	if (!pci_priv->msi_ep_base_data) {
+		cnss_pr_err("Got 0 MSI base data!\n");
+		CNSS_ASSERT(0);
+	}
+
+	cnss_pr_dbg("MSI base data is %d\n", pci_priv->msi_ep_base_data);
+
+	return 0;
+
+disable_msi:
+	pci_disable_msi(pci_priv->pci_dev);
+reset_msi_config:
+	pci_priv->msi_config = NULL;
+out:
+	return ret;
+}
+
+static void cnss_pci_disable_msi(struct cnss_pci_data *pci_priv)
+{
+	pci_disable_msi(pci_priv->pci_dev);
+}
+
+int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
+				 int *num_vectors, u32 *user_base_data,
+				 u32 *base_vector)
+{
+	struct cnss_pci_data *pci_priv = dev_get_drvdata(dev);
+	struct cnss_msi_config *msi_config;
+	int idx;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	msi_config = pci_priv->msi_config;
+	if (!msi_config) {
+		cnss_pr_err("MSI is not supported.\n");
+		return -EINVAL;
+	}
+
+	for (idx = 0; idx < msi_config->total_users; idx++) {
+		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
+			*num_vectors = msi_config->users[idx].num_vectors;
+			*user_base_data = msi_config->users[idx].base_vector
+				+ pci_priv->msi_ep_base_data;
+			*base_vector = msi_config->users[idx].base_vector;
+
+			cnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
+				    user_name, *num_vectors, *user_base_data,
+				    *base_vector);
+
+			return 0;
+		}
+	}
+
+	cnss_pr_err("Failed to find MSI assignment for %s!\n", user_name);
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(cnss_get_user_msi_assignment);
+
+int cnss_get_msi_irq(struct device *dev, unsigned int vector)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+
+	return pci_dev->irq + vector;
+}
+EXPORT_SYMBOL(cnss_get_msi_irq);
+
+void cnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
+			  u32 *msi_addr_high)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+
+	pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
+			      msi_addr_low);
+
+	pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
+			      msi_addr_high);
+}
+EXPORT_SYMBOL(cnss_get_msi_address);
+
+static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct pci_dev *pci_dev = pci_priv->pci_dev;
+	u16 device_id;
+
+	pci_read_config_word(pci_dev, PCI_DEVICE_ID, &device_id);
+	if (device_id != pci_priv->pci_device_id->device)  {
+		cnss_pr_err("PCI device ID mismatch, config ID: 0x%x, probe ID: 0x%x\n",
+			    device_id, pci_priv->pci_device_id->device);
+		ret = -EIO;
+		goto out;
+	}
+
+	ret = pci_assign_resource(pci_dev, PCI_BAR_NUM);
+	if (ret) {
+		pr_err("Failed to assign PCI resource, err = %d\n", ret);
+		goto out;
+	}
+
+	ret = pci_enable_device(pci_dev);
+	if (ret) {
+		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
+		goto out;
+	}
+
+	ret = pci_request_region(pci_dev, PCI_BAR_NUM, "cnss");
+	if (ret) {
+		cnss_pr_err("Failed to request PCI region, err = %d\n", ret);
+		goto disable_device;
+	}
+
+	ret = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(PCI_DMA_MASK));
+	if (ret) {
+		cnss_pr_err("Failed to set PCI DMA mask (%d), err = %d\n",
+			    ret, PCI_DMA_MASK);
+		goto release_region;
+	}
+
+	ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(PCI_DMA_MASK));
+	if (ret) {
+		cnss_pr_err("Failed to set PCI consistent DMA mask (%d), err = %d\n",
+			    ret, PCI_DMA_MASK);
+		goto release_region;
+	}
+
+	pci_set_master(pci_dev);
+
+	pci_priv->bar = pci_iomap(pci_dev, PCI_BAR_NUM, 0);
+	if (!pci_priv->bar) {
+		cnss_pr_err("Failed to do PCI IO map!\n");
+		ret = -EIO;
+		goto clear_master;
+	}
+	return 0;
+
+clear_master:
+	pci_clear_master(pci_dev);
+release_region:
+	pci_release_region(pci_dev, PCI_BAR_NUM);
+disable_device:
+	pci_disable_device(pci_dev);
+out:
+	return ret;
+}
+
+static void cnss_pci_disable_bus(struct cnss_pci_data *pci_priv)
+{
+	struct pci_dev *pci_dev = pci_priv->pci_dev;
+
+	if (pci_priv->bar) {
+		pci_iounmap(pci_dev, pci_priv->bar);
+		pci_priv->bar = NULL;
+	}
+
+	pci_clear_master(pci_dev);
+	pci_release_region(pci_dev, PCI_BAR_NUM);
+	pci_disable_device(pci_dev);
+}
+
+static int cnss_mhi_pm_runtime_get(struct pci_dev *pci_dev)
+{
+	return pm_runtime_get(&pci_dev->dev);
+}
+
+static void cnss_mhi_pm_runtime_put_noidle(struct pci_dev *pci_dev)
+{
+	pm_runtime_put_noidle(&pci_dev->dev);
+}
+
+static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
+{
+	switch (mhi_state) {
+	case CNSS_MHI_INIT:
+		return "INIT";
+	case CNSS_MHI_DEINIT:
+		return "DEINIT";
+	case CNSS_MHI_POWER_ON:
+		return "POWER_ON";
+	case CNSS_MHI_POWER_OFF:
+		return "POWER_OFF";
+	case CNSS_MHI_SUSPEND:
+		return "SUSPEND";
+	case CNSS_MHI_RESUME:
+		return "RESUME";
+	case CNSS_MHI_TRIGGER_RDDM:
+		return "TRIGGER_RDDM";
+	case CNSS_MHI_RDDM:
+		return "RDDM";
+	case CNSS_MHI_RDDM_KERNEL_PANIC:
+		return "RDDM_KERNEL_PANIC";
+	case CNSS_MHI_NOTIFY_LINK_ERROR:
+		return "NOTIFY_LINK_ERROR";
+	default:
+		return "UNKNOWN";
+	}
+};
+
+static void *cnss_pci_collect_dump_seg(struct cnss_pci_data *pci_priv,
+				       enum mhi_rddm_segment type,
+				       void *start_addr)
+{
+	int count;
+	struct scatterlist *sg_list, *s;
+	unsigned int i;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct cnss_dump_data *dump_data =
+		&plat_priv->ramdump_info_v2.dump_data;
+	struct cnss_dump_seg *dump_seg = start_addr;
+
+	count = mhi_xfer_rddm(&pci_priv->mhi_dev, type, &sg_list);
+	if (count <= 0 || !sg_list) {
+		cnss_pr_err("Invalid dump_seg for type %u, count %u, sg_list %pK\n",
+			    type, count, sg_list);
+		return start_addr;
+	}
+
+	cnss_pr_dbg("Collect dump seg: type %u, nentries %d\n", type, count);
+
+	for_each_sg(sg_list, s, count, i) {
+		dump_seg->address = sg_dma_address(s);
+		dump_seg->v_address = sg_virt(s);
+		dump_seg->size = s->length;
+		dump_seg->type = type;
+		cnss_pr_dbg("seg-%d: address 0x%lx, v_address %pK, size 0x%lx\n",
+			    i, dump_seg->address,
+			    dump_seg->v_address, dump_seg->size);
+		dump_seg++;
+	}
+
+	dump_data->nentries += count;
+
+	return dump_seg;
+}
+
+void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct cnss_dump_data *dump_data =
+		&plat_priv->ramdump_info_v2.dump_data;
+	void *start_addr, *end_addr;
+
+	dump_data->nentries = 0;
+
+	start_addr = plat_priv->ramdump_info_v2.dump_data_vaddr;
+	end_addr = cnss_pci_collect_dump_seg(pci_priv,
+					     MHI_RDDM_FW_SEGMENT, start_addr);
+
+	start_addr = end_addr;
+	end_addr = cnss_pci_collect_dump_seg(pci_priv,
+					     MHI_RDDM_RD_SEGMENT, start_addr);
+
+	if (dump_data->nentries > 0)
+		plat_priv->ramdump_info_v2.dump_data_valid = true;
+}
+
+void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+	plat_priv->ramdump_info_v2.dump_data.nentries = 0;
+	plat_priv->ramdump_info_v2.dump_data_valid = false;
+}
+
+static void cnss_mhi_notify_status(enum MHI_CB_REASON reason, void *priv)
+{
+	struct cnss_pci_data *pci_priv = priv;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	enum cnss_recovery_reason cnss_reason = CNSS_REASON_RDDM;
+
+	if (!pci_priv)
+		return;
+
+	cnss_pr_dbg("MHI status cb is called with reason %d\n", reason);
+
+	set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
+	del_timer(&plat_priv->fw_boot_timer);
+
+	if (reason == MHI_CB_SYS_ERROR)
+		cnss_reason = CNSS_REASON_TIMEOUT;
+
+	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+			       cnss_reason);
+}
+
+static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+	struct pci_dev *pci_dev = pci_priv->pci_dev;
+	struct mhi_device *mhi_dev = &pci_priv->mhi_dev;
+
+	mhi_dev->dev = &pci_priv->plat_priv->plat_dev->dev;
+	mhi_dev->pci_dev = pci_dev;
+
+	mhi_dev->resources[0].start = (resource_size_t)pci_priv->bar;
+	mhi_dev->resources[0].end = (resource_size_t)pci_priv->bar +
+		pci_resource_len(pci_dev, PCI_BAR_NUM);
+	mhi_dev->resources[0].flags =
+		pci_resource_flags(pci_dev, PCI_BAR_NUM);
+	mhi_dev->resources[0].name = "BAR";
+	cnss_pr_dbg("BAR start is %pa, BAR end is %pa\n",
+		    &mhi_dev->resources[0].start, &mhi_dev->resources[0].end);
+
+	if (!mhi_dev->resources[1].start) {
+		mhi_dev->resources[1].start = pci_dev->irq;
+		mhi_dev->resources[1].end = pci_dev->irq + 1;
+		mhi_dev->resources[1].flags = IORESOURCE_IRQ;
+		mhi_dev->resources[1].name = "IRQ";
+	}
+	cnss_pr_dbg("IRQ start is %pa, IRQ end is %pa\n",
+		    &mhi_dev->resources[1].start, &mhi_dev->resources[1].end);
+
+	mhi_dev->pm_runtime_get = cnss_mhi_pm_runtime_get;
+	mhi_dev->pm_runtime_put_noidle = cnss_mhi_pm_runtime_put_noidle;
+
+	mhi_dev->support_rddm = true;
+	mhi_dev->rddm_size = pci_priv->plat_priv->ramdump_info_v2.ramdump_size;
+	mhi_dev->status_cb = cnss_mhi_notify_status;
+
+	ret = mhi_register_device(mhi_dev, MHI_NODE_NAME, pci_priv);
+	if (ret) {
+		cnss_pr_err("Failed to register as MHI device, err = %d\n",
+			    ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
+{
+}
+
+static enum mhi_dev_ctrl cnss_to_mhi_dev_state(enum cnss_mhi_state state)
+{
+	switch (state) {
+	case CNSS_MHI_INIT:
+		return MHI_DEV_CTRL_INIT;
+	case CNSS_MHI_DEINIT:
+		return MHI_DEV_CTRL_DE_INIT;
+	case CNSS_MHI_POWER_ON:
+		return MHI_DEV_CTRL_POWER_ON;
+	case CNSS_MHI_POWER_OFF:
+		return MHI_DEV_CTRL_POWER_OFF;
+	case CNSS_MHI_SUSPEND:
+		return MHI_DEV_CTRL_SUSPEND;
+	case CNSS_MHI_RESUME:
+		return MHI_DEV_CTRL_RESUME;
+	case CNSS_MHI_TRIGGER_RDDM:
+		return MHI_DEV_CTRL_TRIGGER_RDDM;
+	case CNSS_MHI_RDDM:
+		return MHI_DEV_CTRL_RDDM;
+	case CNSS_MHI_RDDM_KERNEL_PANIC:
+		return MHI_DEV_CTRL_RDDM_KERNEL_PANIC;
+	case CNSS_MHI_NOTIFY_LINK_ERROR:
+		return MHI_DEV_CTRL_NOTIFY_LINK_ERROR;
+	default:
+		cnss_pr_err("Unknown CNSS MHI state (%d)\n", state);
+		return -EINVAL;
+	}
+}
+
+static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
+					enum cnss_mhi_state mhi_state)
+{
+	switch (mhi_state) {
+	case CNSS_MHI_INIT:
+		if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state))
+			return 0;
+		break;
+	case CNSS_MHI_DEINIT:
+	case CNSS_MHI_POWER_ON:
+		if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) &&
+		    !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
+			return 0;
+		break;
+	case CNSS_MHI_POWER_OFF:
+	case CNSS_MHI_SUSPEND:
+		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
+		    !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
+			return 0;
+		break;
+	case CNSS_MHI_RESUME:
+		if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
+			return 0;
+		break;
+	case CNSS_MHI_TRIGGER_RDDM:
+	case CNSS_MHI_RDDM:
+	case CNSS_MHI_RDDM_KERNEL_PANIC:
+	case CNSS_MHI_NOTIFY_LINK_ERROR:
+		return 0;
+	default:
+		cnss_pr_err("Unhandled MHI state: %s(%d)\n",
+			    cnss_mhi_state_to_str(mhi_state), mhi_state);
+	}
+
+	cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
+		    cnss_mhi_state_to_str(mhi_state), mhi_state,
+		    pci_priv->mhi_state);
+
+	return -EINVAL;
+}
+
+static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
+				       enum cnss_mhi_state mhi_state)
+{
+	switch (mhi_state) {
+	case CNSS_MHI_INIT:
+		set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_DEINIT:
+		clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_POWER_ON:
+		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_POWER_OFF:
+		clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_SUSPEND:
+		set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_RESUME:
+		clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_TRIGGER_RDDM:
+	case CNSS_MHI_RDDM:
+	case CNSS_MHI_RDDM_KERNEL_PANIC:
+	case CNSS_MHI_NOTIFY_LINK_ERROR:
+		break;
+	default:
+		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
+	}
+}
+
+int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
+			   enum cnss_mhi_state mhi_state)
+{
+	int ret = 0;
+	enum mhi_dev_ctrl mhi_dev_state = cnss_to_mhi_dev_state(mhi_state);
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL!\n");
+		return -ENODEV;
+	}
+
+	if (pci_priv->device_id == QCA6174_DEVICE_ID)
+		return 0;
+
+	if (mhi_dev_state < 0) {
+		cnss_pr_err("Invalid MHI DEV state (%d)\n", mhi_dev_state);
+		return -EINVAL;
+	}
+
+	ret = cnss_pci_check_mhi_state_bit(pci_priv, mhi_state);
+	if (ret)
+		goto out;
+
+	cnss_pr_dbg("Setting MHI state: %s(%d)\n",
+		    cnss_mhi_state_to_str(mhi_state), mhi_state);
+	ret = mhi_pm_control_device(&pci_priv->mhi_dev, mhi_dev_state);
+	if (ret) {
+		cnss_pr_err("Failed to set MHI state: %s(%d)\n",
+			    cnss_mhi_state_to_str(mhi_state), mhi_state);
+		goto out;
+	}
+
+	cnss_pci_set_mhi_state_bit(pci_priv, mhi_state);
+
+out:
+	return ret;
+}
+
+int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
+{
+	int ret = 0;
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL!\n");
+		return -ENODEV;
+	}
+
+	if (fbc_bypass)
+		return 0;
+
+	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT);
+	if (ret)
+		goto out;
+
+	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON);
+	if (ret)
+		goto out;
+
+	return 0;
+
+out:
+	return ret;
+}
+
+void cnss_pci_stop_mhi(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv;
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL!\n");
+		return;
+	}
+
+	if (fbc_bypass)
+		return;
+
+	plat_priv = pci_priv->plat_priv;
+
+	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME);
+	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF);
+
+	if (plat_priv->ramdump_info_v2.dump_data_valid ||
+	    test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state))
+		return;
+
+	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
+}
+
+static int cnss_pci_probe(struct pci_dev *pci_dev,
+			  const struct pci_device_id *id)
+{
+	int ret = 0;
+	struct cnss_pci_data *pci_priv;
+	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+	struct resource *res;
+
+	cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x\n",
+		    id->vendor, pci_dev->device);
+
+	switch (pci_dev->device) {
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		if (!mhi_is_device_ready(&plat_priv->plat_dev->dev,
+					 MHI_NODE_NAME)) {
+			cnss_pr_err("MHI driver is not ready, defer PCI probe!\n");
+			ret = -EPROBE_DEFER;
+			goto out;
+		}
+		break;
+	default:
+		break;
+	}
+
+	pci_priv = devm_kzalloc(&pci_dev->dev, sizeof(*pci_priv),
+				GFP_KERNEL);
+	if (!pci_priv) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	pci_priv->pci_link_state = PCI_LINK_UP;
+	pci_priv->plat_priv = plat_priv;
+	pci_priv->pci_dev = pci_dev;
+	pci_priv->pci_device_id = id;
+	pci_priv->device_id = pci_dev->device;
+	cnss_set_pci_priv(pci_dev, pci_priv);
+	plat_priv->device_id = pci_dev->device;
+	plat_priv->bus_priv = pci_priv;
+
+	ret = cnss_register_subsys(plat_priv);
+	if (ret)
+		goto reset_ctx;
+
+	ret = cnss_register_ramdump(plat_priv);
+	if (ret)
+		goto unregister_subsys;
+
+	res = platform_get_resource_byname(plat_priv->plat_dev, IORESOURCE_MEM,
+					   "smmu_iova_base");
+	if (res) {
+		pci_priv->smmu_iova_start = res->start;
+		pci_priv->smmu_iova_len = resource_size(res);
+		cnss_pr_dbg("smmu_iova_start: %pa, smmu_iova_len: %zu\n",
+			    &pci_priv->smmu_iova_start,
+			    pci_priv->smmu_iova_len);
+
+		ret = cnss_pci_init_smmu(pci_priv);
+		if (ret) {
+			cnss_pr_err("Failed to init SMMU, err = %d\n", ret);
+			goto unregister_ramdump;
+		}
+	}
+
+	ret = cnss_reg_pci_event(pci_priv);
+	if (ret) {
+		cnss_pr_err("Failed to register PCI event, err = %d\n", ret);
+		goto deinit_smmu;
+	}
+
+	ret = cnss_pci_enable_bus(pci_priv);
+	if (ret)
+		goto dereg_pci_event;
+
+	pci_save_state(pci_dev);
+	pci_priv->default_state = pci_store_saved_state(pci_dev);
+
+	switch (pci_dev->device) {
+	case QCA6174_DEVICE_ID:
+		pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET,
+				     &pci_priv->revision_id);
+		ret = cnss_suspend_pci_link(pci_priv);
+		if (ret)
+			cnss_pr_err("Failed to suspend PCI link, err = %d\n",
+				    ret);
+		cnss_power_off_device(plat_priv);
+		break;
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		ret = cnss_pci_enable_msi(pci_priv);
+		if (ret)
+			goto disable_bus;
+		ret = cnss_pci_register_mhi(pci_priv);
+		if (ret) {
+			cnss_pci_disable_msi(pci_priv);
+			goto disable_bus;
+		}
+		ret = cnss_suspend_pci_link(pci_priv);
+		if (ret)
+			cnss_pr_err("Failed to suspend PCI link, err = %d\n",
+				    ret);
+		cnss_power_off_device(plat_priv);
+		break;
+	default:
+		cnss_pr_err("Unknown PCI device found: 0x%x\n",
+			    pci_dev->device);
+		ret = -ENODEV;
+		goto disable_bus;
+	}
+
+	return 0;
+
+disable_bus:
+	cnss_pci_disable_bus(pci_priv);
+dereg_pci_event:
+	cnss_dereg_pci_event(pci_priv);
+deinit_smmu:
+	if (pci_priv->smmu_mapping)
+		cnss_pci_deinit_smmu(pci_priv);
+unregister_ramdump:
+	cnss_unregister_ramdump(plat_priv);
+unregister_subsys:
+	cnss_unregister_subsys(plat_priv);
+reset_ctx:
+	plat_priv->bus_priv = NULL;
+out:
+	return ret;
+}
+
+static void cnss_pci_remove(struct pci_dev *pci_dev)
+{
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+	struct cnss_plat_data *plat_priv =
+		cnss_bus_dev_to_plat_priv(&pci_dev->dev);
+
+	cnss_pci_free_m3_mem(pci_priv);
+	cnss_pci_free_fw_mem(pci_priv);
+
+	switch (pci_dev->device) {
+	case QCA6290_EMULATION_DEVICE_ID:
+	case QCA6290_DEVICE_ID:
+		cnss_pci_unregister_mhi(pci_priv);
+		cnss_pci_disable_msi(pci_priv);
+		break;
+	default:
+		break;
+	}
+
+	pci_load_and_free_saved_state(pci_dev, &pci_priv->saved_state);
+
+	cnss_pci_disable_bus(pci_priv);
+	cnss_dereg_pci_event(pci_priv);
+	if (pci_priv->smmu_mapping)
+		cnss_pci_deinit_smmu(pci_priv);
+	cnss_unregister_ramdump(plat_priv);
+	cnss_unregister_subsys(plat_priv);
+	plat_priv->bus_priv = NULL;
+}
+
+static const struct pci_device_id cnss_pci_id_table[] = {
+	{ QCA6174_VENDOR_ID, QCA6174_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
+	{ QCA6290_EMULATION_VENDOR_ID, QCA6290_EMULATION_DEVICE_ID,
+	  PCI_ANY_ID, PCI_ANY_ID },
+	{ QCA6290_VENDOR_ID, QCA6290_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
+	{ 0 }
+};
+MODULE_DEVICE_TABLE(pci, cnss_pci_id_table);
+
+static const struct dev_pm_ops cnss_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
+				      cnss_pci_resume_noirq)
+	SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend, cnss_pci_runtime_resume,
+			   cnss_pci_runtime_idle)
+};
+
+struct pci_driver cnss_pci_driver = {
+	.name     = "cnss_pci",
+	.id_table = cnss_pci_id_table,
+	.probe    = cnss_pci_probe,
+	.remove   = cnss_pci_remove,
+	.driver = {
+		.pm = &cnss_pm_ops,
+	},
+};
+
+int cnss_pci_init(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	struct device *dev = &plat_priv->plat_dev->dev;
+	u32 rc_num;
+
+	ret = of_property_read_u32(dev->of_node, "qcom,wlan-rc-num", &rc_num);
+	if (ret) {
+		cnss_pr_err("Failed to find PCIe RC number, err = %d\n", ret);
+		goto out;
+	}
+
+	ret = msm_pcie_enumerate(rc_num);
+	if (ret) {
+		cnss_pr_err("Failed to enable PCIe RC%x, err = %d\n",
+			    rc_num, ret);
+		goto out;
+	}
+
+	ret = pci_register_driver(&cnss_pci_driver);
+	if (ret) {
+		cnss_pr_err("Failed to register to PCI framework, err = %d\n",
+			    ret);
+		goto out;
+	}
+
+	return 0;
+out:
+	return ret;
+}
+
+void cnss_pci_deinit(struct cnss_plat_data *plat_priv)
+{
+	pci_unregister_driver(&cnss_pci_driver);
+}
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
new file mode 100644
index 0000000..89edc60
--- /dev/null
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -0,0 +1,143 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_PCI_H
+#define _CNSS_PCI_H
+
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
+#include <linux/msm_mhi.h>
+#include <linux/msm_pcie.h>
+#include <linux/pci.h>
+
+#include "main.h"
+
+#define QCA6174_VENDOR_ID		0x168C
+#define QCA6174_DEVICE_ID		0x003E
+#define QCA6174_REV_ID_OFFSET		0x08
+#define QCA6174_REV3_VERSION		0x5020000
+#define QCA6174_REV3_2_VERSION		0x5030000
+#define QCA6290_VENDOR_ID		0x17CB
+#define QCA6290_DEVICE_ID		0x1100
+#define QCA6290_EMULATION_VENDOR_ID	0x168C
+#define QCA6290_EMULATION_DEVICE_ID	0xABCD
+
+enum cnss_mhi_state {
+	CNSS_MHI_INIT,
+	CNSS_MHI_DEINIT,
+	CNSS_MHI_SUSPEND,
+	CNSS_MHI_RESUME,
+	CNSS_MHI_POWER_OFF,
+	CNSS_MHI_POWER_ON,
+	CNSS_MHI_TRIGGER_RDDM,
+	CNSS_MHI_RDDM,
+	CNSS_MHI_RDDM_KERNEL_PANIC,
+	CNSS_MHI_NOTIFY_LINK_ERROR,
+};
+
+struct cnss_msi_user {
+	char *name;
+	int num_vectors;
+	u32 base_vector;
+};
+
+struct cnss_msi_config {
+	int total_vectors;
+	int total_users;
+	struct cnss_msi_user *users;
+};
+
+struct cnss_pci_data {
+	struct pci_dev *pci_dev;
+	struct cnss_plat_data *plat_priv;
+	const struct pci_device_id *pci_device_id;
+	u32 device_id;
+	u16 revision_id;
+	bool pci_link_state;
+	bool pci_link_down_ind;
+	struct pci_saved_state *saved_state;
+	struct pci_saved_state *default_state;
+	struct msm_pcie_register_event msm_pci_event;
+	atomic_t auto_suspended;
+	bool monitor_wake_intr;
+	struct dma_iommu_mapping *smmu_mapping;
+	dma_addr_t smmu_iova_start;
+	size_t smmu_iova_len;
+	void __iomem *bar;
+	struct cnss_msi_config *msi_config;
+	u32 msi_ep_base_data;
+	struct mhi_device mhi_dev;
+	unsigned long mhi_state;
+};
+
+static inline void cnss_set_pci_priv(struct pci_dev *pci_dev, void *data)
+{
+	pci_set_drvdata(pci_dev, data);
+}
+
+static inline struct cnss_pci_data *cnss_get_pci_priv(struct pci_dev *pci_dev)
+{
+	return pci_get_drvdata(pci_dev);
+}
+
+static inline struct cnss_plat_data *cnss_pci_priv_to_plat_priv(void *bus_priv)
+{
+	struct cnss_pci_data *pci_priv = bus_priv;
+
+	return pci_priv->plat_priv;
+}
+
+static inline void cnss_pci_set_monitor_wake_intr(void *bus_priv, bool val)
+{
+	struct cnss_pci_data *pci_priv = bus_priv;
+
+	pci_priv->monitor_wake_intr = val;
+}
+
+static inline bool cnss_pci_get_monitor_wake_intr(void *bus_priv)
+{
+	struct cnss_pci_data *pci_priv = bus_priv;
+
+	return pci_priv->monitor_wake_intr;
+}
+
+static inline void cnss_pci_set_auto_suspended(void *bus_priv, int val)
+{
+	struct cnss_pci_data *pci_priv = bus_priv;
+
+	atomic_set(&pci_priv->auto_suspended, val);
+}
+
+static inline int cnss_pci_get_auto_suspended(void *bus_priv)
+{
+	struct cnss_pci_data *pci_priv = bus_priv;
+
+	return atomic_read(&pci_priv->auto_suspended);
+}
+
+int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv);
+int cnss_resume_pci_link(struct cnss_pci_data *pci_priv);
+int cnss_pci_init(struct cnss_plat_data *plat_priv);
+void cnss_pci_deinit(struct cnss_plat_data *plat_priv);
+int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv);
+int cnss_pci_load_m3(struct cnss_pci_data *pci_priv);
+int cnss_pci_get_bar_info(struct cnss_pci_data *pci_priv, void __iomem **va,
+			  phys_addr_t *pa);
+int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
+			   enum cnss_mhi_state state);
+int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv);
+void cnss_pci_stop_mhi(struct cnss_pci_data *pci_priv);
+void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv);
+void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv);
+int cnss_pm_request_resume(struct cnss_pci_data *pci_priv);
+
+#endif /* _CNSS_PCI_H */
diff --git a/drivers/net/wireless/cnss2/power.c b/drivers/net/wireless/cnss2/power.c
new file mode 100644
index 0000000..8ed1507
--- /dev/null
+++ b/drivers/net/wireless/cnss2/power.c
@@ -0,0 +1,386 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include "main.h"
+#include "debug.h"
+
+static struct cnss_vreg_info cnss_vreg_info[] = {
+	{NULL, "vdd-wlan-core", 1300000, 1300000, 0, 0},
+	{NULL, "vdd-wlan-io", 1800000, 1800000, 0, 0},
+	{NULL, "vdd-wlan-xtal-aon", 0, 0, 0, 0},
+	{NULL, "vdd-wlan-xtal", 1800000, 1800000, 0, 2},
+	{NULL, "vdd-wlan", 0, 0, 0, 0},
+	{NULL, "vdd-wlan-sp2t", 2700000, 2700000, 0, 0},
+	{NULL, "wlan-ant-switch", 2700000, 2700000, 20000, 0},
+	{NULL, "wlan-soc-swreg", 1200000, 1200000, 0, 0},
+	{NULL, "vdd-wlan-en", 0, 0, 0, 10},
+};
+
+#define CNSS_VREG_INFO_SIZE		ARRAY_SIZE(cnss_vreg_info)
+#define MAX_PROP_SIZE			32
+
+#define BOOTSTRAP_GPIO			"qcom,enable-bootstrap-gpio"
+#define BOOTSTRAP_ACTIVE		"bootstrap_active"
+#define WLAN_EN_GPIO			"wlan-en-gpio"
+#define WLAN_EN_ACTIVE			"wlan_en_active"
+#define WLAN_EN_SLEEP			"wlan_en_sleep"
+
+#define BOOTSTRAP_DELAY			1000
+#define WLAN_ENABLE_DELAY		1000
+
+int cnss_get_vreg(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	int i;
+	struct cnss_vreg_info *vreg_info;
+	struct device *dev;
+	struct regulator *reg;
+	const __be32 *prop;
+	char prop_name[MAX_PROP_SIZE];
+	int len;
+
+	dev = &plat_priv->plat_dev->dev;
+
+	plat_priv->vreg_info = devm_kzalloc(dev, sizeof(cnss_vreg_info),
+					    GFP_KERNEL);
+	if (!plat_priv->vreg_info) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	memcpy(plat_priv->vreg_info, cnss_vreg_info, sizeof(cnss_vreg_info));
+
+	for (i = 0; i < CNSS_VREG_INFO_SIZE; i++) {
+		vreg_info = &plat_priv->vreg_info[i];
+		reg = devm_regulator_get_optional(dev, vreg_info->name);
+		if (IS_ERR(reg)) {
+			ret = PTR_ERR(reg);
+			if (ret == -ENODEV)
+				continue;
+			else if (ret == -EPROBE_DEFER)
+				cnss_pr_info("EPROBE_DEFER for regulator: %s\n",
+					     vreg_info->name);
+			else
+				cnss_pr_err("Failed to get regulator %s, err = %d\n",
+					    vreg_info->name, ret);
+			goto out;
+		}
+
+		vreg_info->reg = reg;
+
+		snprintf(prop_name, MAX_PROP_SIZE, "qcom,%s-info",
+			 vreg_info->name);
+
+		prop = of_get_property(dev->of_node, prop_name, &len);
+		cnss_pr_dbg("Got regulator info, name: %s, len: %d\n",
+			    prop_name, len);
+
+		if (!prop || len != (4 * sizeof(__be32))) {
+			cnss_pr_dbg("Property %s %s, use default\n", prop_name,
+				    prop ? "invalid format" : "doesn't exist");
+		} else {
+			vreg_info->min_uv = be32_to_cpup(&prop[0]);
+			vreg_info->max_uv = be32_to_cpup(&prop[1]);
+			vreg_info->load_ua = be32_to_cpup(&prop[2]);
+			vreg_info->delay_us = be32_to_cpup(&prop[3]);
+		}
+
+		cnss_pr_dbg("Got regulator: %s, min_uv: %u, max_uv: %u, load_ua: %u, delay_us: %u\n",
+			    vreg_info->name, vreg_info->min_uv,
+			    vreg_info->max_uv, vreg_info->load_ua,
+			    vreg_info->delay_us);
+	}
+
+	return 0;
+out:
+	return ret;
+}
+
+static int cnss_vreg_on(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	struct cnss_vreg_info *vreg_info;
+	int i;
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL!\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < CNSS_VREG_INFO_SIZE; i++) {
+		vreg_info = &plat_priv->vreg_info[i];
+
+		if (!vreg_info->reg)
+			continue;
+
+		cnss_pr_dbg("Regulator %s is being enabled\n", vreg_info->name);
+
+		if (vreg_info->min_uv != 0 && vreg_info->max_uv != 0) {
+			ret = regulator_set_voltage(vreg_info->reg,
+						    vreg_info->min_uv,
+						    vreg_info->max_uv);
+
+			if (ret) {
+				cnss_pr_err("Failed to set voltage for regulator %s, min_uv: %u, max_uv: %u, err = %d\n",
+					    vreg_info->name, vreg_info->min_uv,
+					    vreg_info->max_uv, ret);
+				break;
+			}
+		}
+
+		if (vreg_info->load_ua) {
+			ret = regulator_set_load(vreg_info->reg,
+						 vreg_info->load_ua);
+
+			if (ret < 0) {
+				cnss_pr_err("Failed to set load for regulator %s, load: %u, err = %d\n",
+					    vreg_info->name, vreg_info->load_ua,
+					    ret);
+				break;
+			}
+		}
+
+		if (vreg_info->delay_us)
+			udelay(vreg_info->delay_us);
+
+		ret = regulator_enable(vreg_info->reg);
+		if (ret) {
+			cnss_pr_err("Failed to enable regulator %s, err = %d\n",
+				    vreg_info->name, ret);
+			break;
+		}
+	}
+
+	if (ret) {
+		for (; i >= 0; i--) {
+			vreg_info = &plat_priv->vreg_info[i];
+
+			if (!vreg_info->reg)
+				continue;
+
+			regulator_disable(vreg_info->reg);
+			if (vreg_info->load_ua)
+				regulator_set_load(vreg_info->reg, 0);
+			if (vreg_info->min_uv != 0 && vreg_info->max_uv != 0)
+				regulator_set_voltage(vreg_info->reg, 0,
+						      vreg_info->max_uv);
+		}
+
+		return ret;
+	}
+
+	return 0;
+}
+
+static int cnss_vreg_off(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	struct cnss_vreg_info *vreg_info;
+	int i;
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL!\n");
+		return -ENODEV;
+	}
+
+	for (i = CNSS_VREG_INFO_SIZE - 1; i >= 0; i--) {
+		vreg_info = &plat_priv->vreg_info[i];
+
+		if (!vreg_info->reg)
+			continue;
+
+		cnss_pr_dbg("Regulator %s is being disabled\n",
+			    vreg_info->name);
+
+		ret = regulator_disable(vreg_info->reg);
+		if (ret)
+			cnss_pr_err("Failed to disable regulator %s, err = %d\n",
+				    vreg_info->name, ret);
+
+		if (vreg_info->load_ua) {
+			ret = regulator_set_load(vreg_info->reg, 0);
+			if (ret < 0)
+				cnss_pr_err("Failed to set load for regulator %s, err = %d\n",
+					    vreg_info->name, ret);
+		}
+
+		if (vreg_info->min_uv != 0 && vreg_info->max_uv != 0) {
+			ret = regulator_set_voltage(vreg_info->reg, 0,
+						    vreg_info->max_uv);
+			if (ret)
+				cnss_pr_err("Failed to set voltage for regulator %s, err = %d\n",
+					    vreg_info->name, ret);
+		}
+	}
+
+	return ret;
+}
+
+int cnss_get_pinctrl(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+	struct device *dev;
+	struct cnss_pinctrl_info *pinctrl_info;
+
+	dev = &plat_priv->plat_dev->dev;
+	pinctrl_info = &plat_priv->pinctrl_info;
+
+	pinctrl_info->pinctrl = devm_pinctrl_get(dev);
+	if (IS_ERR_OR_NULL(pinctrl_info->pinctrl)) {
+		ret = PTR_ERR(pinctrl_info->pinctrl);
+		cnss_pr_err("Failed to get pinctrl, err = %d\n", ret);
+		goto out;
+	}
+
+	if (of_find_property(dev->of_node, BOOTSTRAP_GPIO, NULL)) {
+		pinctrl_info->bootstrap_active =
+			pinctrl_lookup_state(pinctrl_info->pinctrl,
+					     BOOTSTRAP_ACTIVE);
+		if (IS_ERR_OR_NULL(pinctrl_info->bootstrap_active)) {
+			ret = PTR_ERR(pinctrl_info->bootstrap_active);
+			cnss_pr_err("Failed to get bootstrap active state, err = %d\n",
+				    ret);
+			goto out;
+		}
+	}
+
+	if (of_find_property(dev->of_node, WLAN_EN_GPIO, NULL)) {
+		pinctrl_info->wlan_en_active =
+			pinctrl_lookup_state(pinctrl_info->pinctrl,
+					     WLAN_EN_ACTIVE);
+		if (IS_ERR_OR_NULL(pinctrl_info->wlan_en_active)) {
+			ret = PTR_ERR(pinctrl_info->wlan_en_active);
+			cnss_pr_err("Failed to get wlan_en active state, err = %d\n",
+				    ret);
+			goto out;
+		}
+
+		pinctrl_info->wlan_en_sleep =
+			pinctrl_lookup_state(pinctrl_info->pinctrl,
+					     WLAN_EN_SLEEP);
+		if (IS_ERR_OR_NULL(pinctrl_info->wlan_en_sleep)) {
+			ret = PTR_ERR(pinctrl_info->wlan_en_sleep);
+			cnss_pr_err("Failed to get wlan_en sleep state, err = %d\n",
+				    ret);
+			goto out;
+		}
+	}
+
+	return 0;
+out:
+	return ret;
+}
+
+static int cnss_select_pinctrl_state(struct cnss_plat_data *plat_priv,
+				     bool state)
+{
+	int ret = 0;
+	struct cnss_pinctrl_info *pinctrl_info;
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL!\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	pinctrl_info = &plat_priv->pinctrl_info;
+
+	if (state) {
+		if (!IS_ERR_OR_NULL(pinctrl_info->bootstrap_active)) {
+			ret = pinctrl_select_state(pinctrl_info->pinctrl,
+						   pinctrl_info->
+						   bootstrap_active);
+			if (ret) {
+				cnss_pr_err("Failed to select bootstrap active state, err = %d\n",
+					    ret);
+				goto out;
+			}
+			udelay(BOOTSTRAP_DELAY);
+		}
+
+		if (!IS_ERR_OR_NULL(pinctrl_info->wlan_en_active)) {
+			ret = pinctrl_select_state(pinctrl_info->pinctrl,
+						   pinctrl_info->
+						   wlan_en_active);
+			if (ret) {
+				cnss_pr_err("Failed to select wlan_en active state, err = %d\n",
+					    ret);
+				goto out;
+			}
+			udelay(WLAN_ENABLE_DELAY);
+		}
+	} else {
+		if (!IS_ERR_OR_NULL(pinctrl_info->wlan_en_sleep)) {
+			ret = pinctrl_select_state(pinctrl_info->pinctrl,
+						   pinctrl_info->wlan_en_sleep);
+			if (ret) {
+				cnss_pr_err("Failed to select wlan_en sleep state, err = %d\n",
+					    ret);
+				goto out;
+			}
+		}
+	}
+
+	return 0;
+out:
+	return ret;
+}
+
+int cnss_power_on_device(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+
+	ret = cnss_vreg_on(plat_priv);
+	if (ret) {
+		cnss_pr_err("Failed to turn on vreg, err = %d\n", ret);
+		goto out;
+	}
+
+	ret = cnss_select_pinctrl_state(plat_priv, true);
+	if (ret) {
+		cnss_pr_err("Failed to select pinctrl state, err = %d\n", ret);
+		goto vreg_off;
+	}
+
+	return 0;
+vreg_off:
+	cnss_vreg_off(plat_priv);
+out:
+	return ret;
+}
+
+void cnss_power_off_device(struct cnss_plat_data *plat_priv)
+{
+	cnss_select_pinctrl_state(plat_priv, false);
+	cnss_vreg_off(plat_priv);
+}
+
+void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv)
+{
+	unsigned long pin_status = 0;
+
+	set_bit(CNSS_WLAN_EN, &pin_status);
+	set_bit(CNSS_PCIE_TXN, &pin_status);
+	set_bit(CNSS_PCIE_TXP, &pin_status);
+	set_bit(CNSS_PCIE_RXN, &pin_status);
+	set_bit(CNSS_PCIE_RXP, &pin_status);
+	set_bit(CNSS_PCIE_REFCLKN, &pin_status);
+	set_bit(CNSS_PCIE_REFCLKP, &pin_status);
+	set_bit(CNSS_PCIE_RST, &pin_status);
+
+	plat_priv->pin_result.host_pin_result = pin_status;
+}
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
new file mode 100644
index 0000000..f4344ae
--- /dev/null
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -0,0 +1,1037 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/qmi_encdec.h>
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "main.h"
+#include "debug.h"
+#include "qmi.h"
+
+#define WLFW_SERVICE_INS_ID_V01		1
+#define WLFW_CLIENT_ID			0x4b4e454c
+#define MAX_BDF_FILE_NAME		11
+#define DEFAULT_BDF_FILE_NAME		"bdwlan.elf"
+#define BDF_FILE_NAME_PREFIX		"bdwlan.e"
+
+#ifdef CONFIG_CNSS2_DEBUG
+static unsigned int qmi_timeout = 10000;
+module_param(qmi_timeout, uint, 0600);
+MODULE_PARM_DESC(qmi_timeout, "Timeout for QMI message in milliseconds");
+
+#define QMI_WLFW_TIMEOUT_MS		qmi_timeout
+#else
+#define QMI_WLFW_TIMEOUT_MS		10000
+#endif
+
+static bool daemon_support;
+module_param(daemon_support, bool, 0600);
+MODULE_PARM_DESC(daemon_support, "User space has cnss-daemon support or not");
+
+static bool bdf_bypass;
+#ifdef CONFIG_CNSS2_DEBUG
+module_param(bdf_bypass, bool, 0600);
+MODULE_PARM_DESC(bdf_bypass, "If BDF is not found, send dummy BDF to FW");
+#endif
+
+enum cnss_bdf_type {
+	CNSS_BDF_BIN,
+	CNSS_BDF_ELF,
+};
+
+static char *cnss_qmi_mode_to_str(enum wlfw_driver_mode_enum_v01 mode)
+{
+	switch (mode) {
+	case QMI_WLFW_MISSION_V01:
+		return "MISSION";
+	case QMI_WLFW_FTM_V01:
+		return "FTM";
+	case QMI_WLFW_EPPING_V01:
+		return "EPPING";
+	case QMI_WLFW_WALTEST_V01:
+		return "WALTEST";
+	case QMI_WLFW_OFF_V01:
+		return "OFF";
+	case QMI_WLFW_CCPM_V01:
+		return "CCPM";
+	case QMI_WLFW_QVIT_V01:
+		return "QVIT";
+	case QMI_WLFW_CALIBRATION_V01:
+		return "CALIBRATION";
+	default:
+		return "UNKNOWN";
+	}
+};
+
+static void cnss_wlfw_clnt_notifier_work(struct work_struct *work)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(work, struct cnss_plat_data, qmi_recv_msg_work);
+	int ret = 0;
+
+	cnss_pr_dbg("Receiving QMI WLFW event in work queue context\n");
+
+	do {
+		ret = qmi_recv_msg(plat_priv->qmi_wlfw_clnt);
+	} while (ret == 0);
+
+	if (ret != -ENOMSG)
+		cnss_pr_err("Error receiving message: %d\n", ret);
+
+	cnss_pr_dbg("Receiving QMI event completed\n");
+}
+
+static void cnss_wlfw_clnt_notifier(struct qmi_handle *handle,
+				    enum qmi_event_type event,
+				    void *notify_priv)
+{
+	struct cnss_plat_data *plat_priv = notify_priv;
+
+	cnss_pr_dbg("Received QMI WLFW event: %d\n", event);
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL!\n");
+		return;
+	}
+
+	switch (event) {
+	case QMI_RECV_MSG:
+		schedule_work(&plat_priv->qmi_recv_msg_work);
+		break;
+	case QMI_SERVER_EXIT:
+		break;
+	default:
+		cnss_pr_dbg("Unhandled QMI event: %d\n", event);
+		break;
+	}
+}
+
+static int cnss_wlfw_clnt_svc_event_notifier(struct notifier_block *nb,
+					     unsigned long code, void *_cmd)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(nb, struct cnss_plat_data, qmi_wlfw_clnt_nb);
+	int ret = 0;
+
+	cnss_pr_dbg("Received QMI WLFW service event: %ld\n", code);
+
+	switch (code) {
+	case QMI_SERVER_ARRIVE:
+		ret = cnss_driver_event_post(plat_priv,
+					     CNSS_DRIVER_EVENT_SERVER_ARRIVE,
+					     0, NULL);
+		break;
+
+	case QMI_SERVER_EXIT:
+		ret = cnss_driver_event_post(plat_priv,
+					     CNSS_DRIVER_EVENT_SERVER_EXIT,
+					     0, NULL);
+		break;
+	default:
+		cnss_pr_dbg("Invalid QMI service event: %ld\n", code);
+		break;
+	}
+
+	return ret;
+}
+
+static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv)
+{
+	struct wlfw_host_cap_req_msg_v01 req;
+	struct wlfw_host_cap_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int ret = 0;
+
+	cnss_pr_dbg("Sending host capability message, state: 0x%lx\n",
+		    plat_priv->driver_state);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.daemon_support_valid = 1;
+	req.daemon_support = daemon_support;
+
+	cnss_pr_dbg("daemon_support is %d\n", req.daemon_support);
+
+	req.wake_msi = cnss_get_wake_msi(plat_priv);
+	if (req.wake_msi) {
+		cnss_pr_dbg("WAKE MSI base data is %d\n", req.wake_msi);
+		req.wake_msi_valid = 1;
+	}
+
+	req_desc.max_msg_len = WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_HOST_CAP_REQ_V01;
+	req_desc.ei_array = wlfw_host_cap_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_HOST_CAP_RESP_V01;
+	resp_desc.ei_array = wlfw_host_cap_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+				sizeof(req), &resp_desc, &resp, sizeof(resp),
+				QMI_WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		cnss_pr_err("Failed to send host capability request, err = %d\n",
+			    ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("Host capability request failed, result: %d, err: %d\n",
+			    resp.resp.result, resp.resp.error);
+		ret = resp.resp.result;
+		goto out;
+	}
+
+	return 0;
+out:
+	CNSS_ASSERT(0);
+	return ret;
+}
+
+static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv)
+{
+	struct wlfw_ind_register_req_msg_v01 req;
+	struct wlfw_ind_register_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int ret = 0;
+
+	cnss_pr_dbg("Sending indication register message, state: 0x%lx\n",
+		    plat_priv->driver_state);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.client_id_valid = 1;
+	req.client_id = WLFW_CLIENT_ID;
+	req.fw_ready_enable_valid = 1;
+	req.fw_ready_enable = 1;
+	req.request_mem_enable_valid = 1;
+	req.request_mem_enable = 1;
+	req.fw_mem_ready_enable_valid = 1;
+	req.fw_mem_ready_enable = 1;
+	req.cold_boot_cal_done_enable_valid = 1;
+	req.cold_boot_cal_done_enable = 1;
+	req.pin_connect_result_enable_valid = 1;
+	req.pin_connect_result_enable = 1;
+
+	req_desc.max_msg_len = WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_IND_REGISTER_REQ_V01;
+	req_desc.ei_array = wlfw_ind_register_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_IND_REGISTER_RESP_V01;
+	resp_desc.ei_array = wlfw_ind_register_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+				sizeof(req), &resp_desc, &resp, sizeof(resp),
+				QMI_WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		cnss_pr_err("Failed to send indication register request, err = %d\n",
+			    ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("Indication register request failed, result: %d, err: %d\n",
+			    resp.resp.result, resp.resp.error);
+		ret = resp.resp.result;
+		goto out;
+	}
+
+	return 0;
+out:
+	CNSS_ASSERT(0);
+	return ret;
+}
+
+static int cnss_wlfw_request_mem_ind_hdlr(struct cnss_plat_data *plat_priv,
+					  void *msg, unsigned int msg_len)
+{
+	struct msg_desc ind_desc;
+	struct wlfw_request_mem_ind_msg_v01 ind_msg;
+	struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
+	int ret = 0;
+
+	ind_desc.msg_id = QMI_WLFW_REQUEST_MEM_IND_V01;
+	ind_desc.max_msg_len = WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN;
+	ind_desc.ei_array = wlfw_request_mem_ind_msg_v01_ei;
+
+	ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
+	if (ret < 0) {
+		cnss_pr_err("Failed to decode request memory indication, msg_len: %u, err = %d\n",
+			    ret, msg_len);
+		return ret;
+	}
+
+	fw_mem->size = ind_msg.size;
+
+	cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REQUEST_MEM,
+			       0, NULL);
+
+	return 0;
+}
+
+static int cnss_qmi_pin_result_ind_hdlr(struct cnss_plat_data *plat_priv,
+					void *msg, unsigned int msg_len)
+{
+	struct msg_desc ind_desc;
+	struct wlfw_pin_connect_result_ind_msg_v01 ind_msg;
+	int ret = 0;
+
+	ind_desc.msg_id = QMI_WLFW_PIN_CONNECT_RESULT_IND_V01;
+	ind_desc.max_msg_len = WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN;
+	ind_desc.ei_array = wlfw_pin_connect_result_ind_msg_v01_ei;
+
+	ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
+	if (ret < 0) {
+		cnss_pr_err("Failed to decode pin connect result indication, msg_len: %u, err = %d\n",
+			    msg_len, ret);
+		return ret;
+	}
+	if (ind_msg.pwr_pin_result_valid)
+		plat_priv->pin_result.fw_pwr_pin_result =
+		    ind_msg.pwr_pin_result;
+	if (ind_msg.phy_io_pin_result_valid)
+		plat_priv->pin_result.fw_phy_io_pin_result =
+		    ind_msg.phy_io_pin_result;
+	if (ind_msg.rf_pin_result_valid)
+		plat_priv->pin_result.fw_rf_pin_result = ind_msg.rf_pin_result;
+
+	cnss_pr_dbg("Pin connect Result: pwr_pin: 0x%x phy_io_pin: 0x%x rf_io_pin: 0x%x\n",
+		    ind_msg.pwr_pin_result, ind_msg.phy_io_pin_result,
+		    ind_msg.rf_pin_result);
+	return ret;
+}
+
+int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv)
+{
+	struct wlfw_respond_mem_req_msg_v01 req;
+	struct wlfw_respond_mem_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
+	int ret = 0;
+
+	cnss_pr_dbg("Sending respond memory message, state: 0x%lx\n",
+		    plat_priv->driver_state);
+
+	if (!fw_mem->pa || !fw_mem->size) {
+		cnss_pr_err("Memory for FW is not available!\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx\n",
+		    fw_mem->va, &fw_mem->pa, fw_mem->size);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.addr = fw_mem->pa;
+	req.size = fw_mem->size;
+
+	req_desc.max_msg_len = WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_RESPOND_MEM_REQ_V01;
+	req_desc.ei_array = wlfw_respond_mem_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_RESPOND_MEM_RESP_V01;
+	resp_desc.ei_array = wlfw_respond_mem_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+				sizeof(req), &resp_desc, &resp, sizeof(resp),
+				QMI_WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		cnss_pr_err("Failed to send respond memory request, err = %d\n",
+			    ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("Respond memory request failed, result: %d, err: %d\n",
+			    resp.resp.result, resp.resp.error);
+		ret = resp.resp.result;
+		goto out;
+	}
+
+	return 0;
+out:
+	CNSS_ASSERT(0);
+	return ret;
+}
+
+int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv)
+{
+	struct wlfw_cap_req_msg_v01 req;
+	struct wlfw_cap_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int ret = 0;
+
+	cnss_pr_dbg("Sending target capability message, state: 0x%lx\n",
+		    plat_priv->driver_state);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req_desc.max_msg_len = WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_CAP_REQ_V01;
+	req_desc.ei_array = wlfw_cap_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_CAP_RESP_V01;
+	resp_desc.ei_array = wlfw_cap_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+				sizeof(req), &resp_desc, &resp, sizeof(resp),
+				QMI_WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		cnss_pr_err("Failed to send target capability request, err = %d\n",
+			    ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("Target capability request failed, result: %d, err: %d\n",
+			    resp.resp.result, resp.resp.error);
+		ret = resp.resp.result;
+		goto out;
+	}
+
+	if (resp.chip_info_valid)
+		plat_priv->chip_info = resp.chip_info;
+	if (resp.board_info_valid)
+		plat_priv->board_info = resp.board_info;
+	else
+		plat_priv->board_info.board_id = 0xFF;
+	if (resp.soc_info_valid)
+		plat_priv->soc_info = resp.soc_info;
+	if (resp.fw_version_info_valid)
+		plat_priv->fw_version_info = resp.fw_version_info;
+
+	cnss_pr_dbg("Target capability: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x, fw_version: 0x%x, fw_build_timestamp: %s",
+		    plat_priv->chip_info.chip_id,
+		    plat_priv->chip_info.chip_family,
+		    plat_priv->board_info.board_id, plat_priv->soc_info.soc_id,
+		    plat_priv->fw_version_info.fw_version,
+		    plat_priv->fw_version_info.fw_build_timestamp);
+
+	return 0;
+out:
+	CNSS_ASSERT(0);
+	return ret;
+}
+
+int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv)
+{
+	struct wlfw_bdf_download_req_msg_v01 *req;
+	struct wlfw_bdf_download_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	char filename[MAX_BDF_FILE_NAME];
+	const struct firmware *fw_entry;
+	const u8 *temp;
+	unsigned int remaining;
+	int ret = 0;
+
+	cnss_pr_dbg("Sending BDF download message, state: 0x%lx\n",
+		    plat_priv->driver_state);
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	if (plat_priv->board_info.board_id == 0xFF)
+		snprintf(filename, sizeof(filename), DEFAULT_BDF_FILE_NAME);
+	else
+		snprintf(filename, sizeof(filename),
+			 BDF_FILE_NAME_PREFIX "%02x",
+			 plat_priv->board_info.board_id);
+
+	ret = request_firmware(&fw_entry, filename, &plat_priv->plat_dev->dev);
+	if (ret) {
+		cnss_pr_err("Failed to load BDF: %s\n", filename);
+		if (bdf_bypass) {
+			cnss_pr_info("bdf_bypass is enabled, sending dummy BDF\n");
+			temp = filename;
+			remaining = MAX_BDF_FILE_NAME;
+			goto bypass_bdf;
+		} else {
+			goto err_req_fw;
+		}
+	}
+
+	temp = fw_entry->data;
+	remaining = fw_entry->size;
+
+bypass_bdf:
+	cnss_pr_dbg("Downloading BDF: %s, size: %u\n", filename, remaining);
+
+	memset(&resp, 0, sizeof(resp));
+
+	req_desc.max_msg_len = WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_BDF_DOWNLOAD_REQ_V01;
+	req_desc.ei_array = wlfw_bdf_download_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_BDF_DOWNLOAD_RESP_V01;
+	resp_desc.ei_array = wlfw_bdf_download_resp_msg_v01_ei;
+
+	while (remaining) {
+		req->valid = 1;
+		req->file_id_valid = 1;
+		req->file_id = plat_priv->board_info.board_id;
+		req->total_size_valid = 1;
+		req->total_size = remaining;
+		req->seg_id_valid = 1;
+		req->data_valid = 1;
+		req->end_valid = 1;
+		req->bdf_type_valid = 1;
+		req->bdf_type = CNSS_BDF_ELF;
+
+		if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
+			req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
+		} else {
+			req->data_len = remaining;
+			req->end = 1;
+		}
+
+		memcpy(req->data, temp, req->data_len);
+
+		ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc,
+					req, sizeof(*req), &resp_desc, &resp,
+					sizeof(resp), QMI_WLFW_TIMEOUT_MS);
+		if (ret < 0) {
+			cnss_pr_err("Failed to send BDF download request, err = %d\n",
+				    ret);
+			goto err_send;
+		}
+
+		if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+			cnss_pr_err("BDF download request failed, result: %d, err: %d\n",
+				    resp.resp.result, resp.resp.error);
+			ret = resp.resp.result;
+			goto err_send;
+		}
+
+		remaining -= req->data_len;
+		temp += req->data_len;
+		req->seg_id++;
+	}
+
+err_send:
+	if (!bdf_bypass)
+		release_firmware(fw_entry);
+err_req_fw:
+	kfree(req);
+out:
+	if (ret)
+		CNSS_ASSERT(0);
+	return ret;
+}
+
+int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv)
+{
+	struct wlfw_m3_info_req_msg_v01 req;
+	struct wlfw_m3_info_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
+	int ret = 0;
+
+	cnss_pr_dbg("Sending M3 information message, state: 0x%lx\n",
+		    plat_priv->driver_state);
+
+	if (!m3_mem->pa || !m3_mem->size) {
+		cnss_pr_err("Memory for M3 is not available!\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	cnss_pr_dbg("M3 memory, va: 0x%pK, pa: %pa, size: 0x%zx\n",
+		    m3_mem->va, &m3_mem->pa, m3_mem->size);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.addr = plat_priv->m3_mem.pa;
+	req.size = plat_priv->m3_mem.size;
+
+	req_desc.max_msg_len = WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_M3_INFO_REQ_V01;
+	req_desc.ei_array = wlfw_m3_info_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_M3_INFO_RESP_V01;
+	resp_desc.ei_array = wlfw_m3_info_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+				sizeof(req), &resp_desc, &resp, sizeof(resp),
+				QMI_WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		cnss_pr_err("Failed to send M3 information request, err = %d\n",
+			    ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("M3 information request failed, result: %d, err: %d\n",
+			    resp.resp.result, resp.resp.error);
+		ret = resp.resp.result;
+		goto out;
+	}
+
+	return 0;
+
+out:
+	CNSS_ASSERT(0);
+	return ret;
+}
+
+int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
+				  enum wlfw_driver_mode_enum_v01 mode)
+{
+	struct wlfw_wlan_mode_req_msg_v01 req;
+	struct wlfw_wlan_mode_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int ret = 0;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	cnss_pr_dbg("Sending mode message, mode: %s(%d), state: 0x%lx\n",
+		    cnss_qmi_mode_to_str(mode), mode, plat_priv->driver_state);
+
+	if (mode == QMI_WLFW_OFF_V01 &&
+	    test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+		cnss_pr_dbg("Recovery is in progress, ignore mode off request.\n");
+		return 0;
+	}
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.mode = mode;
+	req.hw_debug_valid = 1;
+	req.hw_debug = 0;
+
+	req_desc.max_msg_len = WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_WLAN_MODE_REQ_V01;
+	req_desc.ei_array = wlfw_wlan_mode_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_WLAN_MODE_RESP_V01;
+	resp_desc.ei_array = wlfw_wlan_mode_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+				sizeof(req), &resp_desc, &resp, sizeof(resp),
+				QMI_WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		if (mode == QMI_WLFW_OFF_V01 && ret == -ENETRESET) {
+			cnss_pr_dbg("WLFW service is disconnected while sending mode off request.\n");
+			return 0;
+		}
+		cnss_pr_err("Failed to send mode request, mode: %s(%d), err: %d\n",
+			    cnss_qmi_mode_to_str(mode), mode, ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("Mode request failed, mode: %s(%d), result: %d, err: %d\n",
+			    cnss_qmi_mode_to_str(mode), mode, resp.resp.result,
+			    resp.resp.error);
+		ret = resp.resp.result;
+		goto out;
+	}
+
+	return 0;
+out:
+	if (mode != QMI_WLFW_OFF_V01)
+		CNSS_ASSERT(0);
+	return ret;
+}
+
+int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv,
+				 struct wlfw_wlan_cfg_req_msg_v01 *data)
+{
+	struct wlfw_wlan_cfg_req_msg_v01 req;
+	struct wlfw_wlan_cfg_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int ret = 0;
+
+	cnss_pr_dbg("Sending WLAN config message, state: 0x%lx\n",
+		    plat_priv->driver_state);
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	memcpy(&req, data, sizeof(req));
+
+	req_desc.max_msg_len = WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_WLAN_CFG_REQ_V01;
+	req_desc.ei_array = wlfw_wlan_cfg_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_WLAN_CFG_RESP_V01;
+	resp_desc.ei_array = wlfw_wlan_cfg_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+				sizeof(req), &resp_desc, &resp, sizeof(resp),
+				QMI_WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		cnss_pr_err("Failed to send WLAN config request, err = %d\n",
+			    ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("WLAN config request failed, result: %d, err: %d\n",
+			    resp.resp.result, resp.resp.error);
+		ret = resp.resp.result;
+		goto out;
+	}
+
+	return 0;
+out:
+	CNSS_ASSERT(0);
+	return ret;
+}
+
+int cnss_wlfw_athdiag_read_send_sync(struct cnss_plat_data *plat_priv,
+				     u32 offset, u32 mem_type,
+				     u32 data_len, u8 *data)
+{
+	struct wlfw_athdiag_read_req_msg_v01 req;
+	struct wlfw_athdiag_read_resp_msg_v01 *resp;
+	struct msg_desc req_desc, resp_desc;
+	int ret = 0;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	if (!plat_priv->qmi_wlfw_clnt)
+		return -EINVAL;
+
+	cnss_pr_dbg("athdiag read: state 0x%lx, offset %x, mem_type %x, data_len %u\n",
+		    plat_priv->driver_state, offset, mem_type, data_len);
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp)
+		return -ENOMEM;
+
+	memset(&req, 0, sizeof(req));
+
+	req.offset = offset;
+	req.mem_type = mem_type;
+	req.data_len = data_len;
+
+	req_desc.max_msg_len = WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_ATHDIAG_READ_REQ_V01;
+	req_desc.ei_array = wlfw_athdiag_read_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_ATHDIAG_READ_RESP_V01;
+	resp_desc.ei_array = wlfw_athdiag_read_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+				sizeof(req), &resp_desc, resp, sizeof(*resp),
+				QMI_WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		cnss_pr_err("Failed to send athdiag read request, err = %d\n",
+			    ret);
+		goto out;
+	}
+
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("athdiag read request failed, result: %d, err: %d\n",
+			    resp->resp.result, resp->resp.error);
+		ret = resp->resp.result;
+		goto out;
+	}
+
+	if (!resp->data_valid || resp->data_len != data_len) {
+		cnss_pr_err("athdiag read data is invalid, data_valid = %u, data_len = %u\n",
+			    resp->data_valid, resp->data_len);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	memcpy(data, resp->data, resp->data_len);
+
+out:
+	kfree(resp);
+	return ret;
+}
+
+int cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data *plat_priv,
+				      u32 offset, u32 mem_type,
+				      u32 data_len, u8 *data)
+{
+	struct wlfw_athdiag_write_req_msg_v01 *req;
+	struct wlfw_athdiag_write_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int ret = 0;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	if (!plat_priv->qmi_wlfw_clnt)
+		return -EINVAL;
+
+	cnss_pr_dbg("athdiag write: state 0x%lx, offset %x, mem_type %x, data_len %u, data %p\n",
+		    plat_priv->driver_state, offset, mem_type, data_len, data);
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	memset(&resp, 0, sizeof(resp));
+
+	req->offset = offset;
+	req->mem_type = mem_type;
+	req->data_len = data_len;
+	memcpy(req->data, data, data_len);
+
+	req_desc.max_msg_len = WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_ATHDIAG_WRITE_REQ_V01;
+	req_desc.ei_array = wlfw_athdiag_write_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_ATHDIAG_WRITE_RESP_V01;
+	resp_desc.ei_array = wlfw_athdiag_write_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, req,
+				sizeof(*req), &resp_desc, &resp, sizeof(resp),
+				QMI_WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		cnss_pr_err("Failed to send athdiag write request, err = %d\n",
+			    ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("athdiag write request failed, result: %d, err: %d\n",
+			    resp.resp.result, resp.resp.error);
+		ret = resp.resp.result;
+		goto out;
+	}
+
+out:
+	kfree(req);
+	return ret;
+}
+
+int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
+			    u8 fw_log_mode)
+{
+	int ret;
+	struct wlfw_ini_req_msg_v01 req;
+	struct wlfw_ini_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	cnss_pr_dbg("Sending ini sync request, state: 0x%lx, fw_log_mode: %d\n",
+		    plat_priv->driver_state, fw_log_mode);
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.enablefwlog_valid = 1;
+	req.enablefwlog = fw_log_mode;
+
+	req_desc.max_msg_len = WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.msg_id = QMI_WLFW_INI_REQ_V01;
+	req_desc.ei_array = wlfw_ini_req_msg_v01_ei;
+
+	resp_desc.max_msg_len = WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.msg_id = QMI_WLFW_INI_RESP_V01;
+	resp_desc.ei_array = wlfw_ini_resp_msg_v01_ei;
+
+	ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt,
+				&req_desc, &req, sizeof(req),
+				&resp_desc, &resp, sizeof(resp),
+				QMI_WLFW_TIMEOUT_MS);
+	if (ret < 0) {
+		cnss_pr_err("Send INI req failed fw_log_mode: %d, ret: %d\n",
+			    fw_log_mode, ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("QMI INI request rejected, fw_log_mode:%d result:%d error:%d\n",
+			    fw_log_mode, resp.resp.result, resp.resp.error);
+		ret = resp.resp.result;
+		goto out;
+	}
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static void cnss_wlfw_clnt_ind(struct qmi_handle *handle,
+			       unsigned int msg_id, void *msg,
+			       unsigned int msg_len, void *ind_cb_priv)
+{
+	struct cnss_plat_data *plat_priv = ind_cb_priv;
+
+	cnss_pr_dbg("Received QMI WLFW indication, msg_id: 0x%x, msg_len: %d\n",
+		    msg_id, msg_len);
+
+	if (!plat_priv) {
+		cnss_pr_err("plat_priv is NULL!\n");
+		return;
+	}
+
+	switch (msg_id) {
+	case QMI_WLFW_REQUEST_MEM_IND_V01:
+		cnss_wlfw_request_mem_ind_hdlr(plat_priv, msg, msg_len);
+		break;
+	case QMI_WLFW_FW_MEM_READY_IND_V01:
+		cnss_driver_event_post(plat_priv,
+				       CNSS_DRIVER_EVENT_FW_MEM_READY,
+				       0, NULL);
+		break;
+	case QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01:
+		cnss_driver_event_post(plat_priv,
+				       CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
+				       0, NULL);
+		break;
+	case QMI_WLFW_FW_READY_IND_V01:
+		cnss_driver_event_post(plat_priv,
+				       CNSS_DRIVER_EVENT_FW_READY,
+				       0, NULL);
+		break;
+	case QMI_WLFW_PIN_CONNECT_RESULT_IND_V01:
+		cnss_qmi_pin_result_ind_hdlr(plat_priv, msg, msg_len);
+		break;
+	default:
+		cnss_pr_err("Invalid QMI WLFW indication, msg_id: 0x%x\n",
+			    msg_id);
+		break;
+	}
+}
+
+unsigned int cnss_get_qmi_timeout(void)
+{
+	cnss_pr_dbg("QMI timeout is %u ms\n", QMI_WLFW_TIMEOUT_MS);
+
+	return QMI_WLFW_TIMEOUT_MS;
+}
+EXPORT_SYMBOL(cnss_get_qmi_timeout);
+
+int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	plat_priv->qmi_wlfw_clnt =
+		qmi_handle_create(cnss_wlfw_clnt_notifier, plat_priv);
+	if (!plat_priv->qmi_wlfw_clnt) {
+		cnss_pr_err("Failed to create QMI client handle!\n");
+		ret = -ENOMEM;
+		goto err_create_handle;
+	}
+
+	ret = qmi_connect_to_service(plat_priv->qmi_wlfw_clnt,
+				     WLFW_SERVICE_ID_V01,
+				     WLFW_SERVICE_VERS_V01,
+				     WLFW_SERVICE_INS_ID_V01);
+	if (ret < 0) {
+		cnss_pr_err("Failed to connect to QMI WLFW service, err = %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_register_ind_cb(plat_priv->qmi_wlfw_clnt,
+				  cnss_wlfw_clnt_ind, plat_priv);
+	if (ret < 0) {
+		cnss_pr_err("Failed to register QMI WLFW service indication callback, err = %d\n",
+			    ret);
+		goto out;
+	}
+
+	set_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state);
+
+	cnss_pr_info("QMI WLFW service connected, state: 0x%lx\n",
+		     plat_priv->driver_state);
+
+	ret = cnss_wlfw_host_cap_send_sync(plat_priv);
+	if (ret < 0)
+		goto out;
+
+	ret = cnss_wlfw_ind_register_send_sync(plat_priv);
+	if (ret < 0)
+		goto out;
+
+	return 0;
+out:
+	qmi_handle_destroy(plat_priv->qmi_wlfw_clnt);
+	plat_priv->qmi_wlfw_clnt = NULL;
+err_create_handle:
+	CNSS_ASSERT(0);
+	return ret;
+}
+
+int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	qmi_handle_destroy(plat_priv->qmi_wlfw_clnt);
+	plat_priv->qmi_wlfw_clnt = NULL;
+
+	clear_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state);
+
+	cnss_pr_info("QMI WLFW service disconnected, state: 0x%lx\n",
+		     plat_priv->driver_state);
+
+	return 0;
+}
+
+int cnss_qmi_init(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+
+	INIT_WORK(&plat_priv->qmi_recv_msg_work,
+		  cnss_wlfw_clnt_notifier_work);
+
+	plat_priv->qmi_wlfw_clnt_nb.notifier_call =
+		cnss_wlfw_clnt_svc_event_notifier;
+
+	ret = qmi_svc_event_notifier_register(WLFW_SERVICE_ID_V01,
+					      WLFW_SERVICE_VERS_V01,
+					      WLFW_SERVICE_INS_ID_V01,
+					      &plat_priv->qmi_wlfw_clnt_nb);
+	if (ret < 0)
+		cnss_pr_err("Failed to register QMI event notifier, err = %d\n",
+			    ret);
+
+	return ret;
+}
+
+void cnss_qmi_deinit(struct cnss_plat_data *plat_priv)
+{
+	qmi_svc_event_notifier_unregister(WLFW_SERVICE_ID_V01,
+					  WLFW_SERVICE_VERS_V01,
+					  WLFW_SERVICE_INS_ID_V01,
+					  &plat_priv->qmi_wlfw_clnt_nb);
+}
diff --git a/drivers/net/wireless/cnss2/qmi.h b/drivers/net/wireless/cnss2/qmi.h
new file mode 100644
index 0000000..70d8d40
--- /dev/null
+++ b/drivers/net/wireless/cnss2/qmi.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_QMI_H
+#define _CNSS_QMI_H
+
+#include "wlan_firmware_service_v01.h"
+
+struct cnss_plat_data;
+
+int cnss_qmi_init(struct cnss_plat_data *plat_priv);
+void cnss_qmi_deinit(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
+				  enum wlfw_driver_mode_enum_v01 mode);
+int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv,
+				 struct wlfw_wlan_cfg_req_msg_v01 *data);
+int cnss_wlfw_athdiag_read_send_sync(struct cnss_plat_data *plat_priv,
+				     u32 offset, u32 mem_type,
+				     u32 data_len, u8 *data);
+int cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data *plat_priv,
+				      u32 offset, u32 mem_type,
+				      u32 data_len, u8 *data);
+int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
+			    u8 fw_log_mode);
+
+#endif /* _CNSS_QMI_H */
diff --git a/drivers/net/wireless/cnss2/utils.c b/drivers/net/wireless/cnss2/utils.c
new file mode 100644
index 0000000..9ffe386
--- /dev/null
+++ b/drivers/net/wireless/cnss2/utils.c
@@ -0,0 +1,129 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define CNSS_MAX_CH_NUM			45
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+static DEFINE_MUTEX(unsafe_channel_list_lock);
+static DEFINE_MUTEX(dfs_nol_info_lock);
+
+static struct cnss_unsafe_channel_list {
+	u16 unsafe_ch_count;
+	u16 unsafe_ch_list[CNSS_MAX_CH_NUM];
+} unsafe_channel_list;
+
+static struct cnss_dfs_nol_info {
+	void *dfs_nol_info;
+	u16 dfs_nol_info_len;
+} dfs_nol_info;
+
+int cnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count)
+{
+	mutex_lock(&unsafe_channel_list_lock);
+	if ((!unsafe_ch_list) || (ch_count > CNSS_MAX_CH_NUM)) {
+		mutex_unlock(&unsafe_channel_list_lock);
+		return -EINVAL;
+	}
+
+	unsafe_channel_list.unsafe_ch_count = ch_count;
+
+	if (ch_count != 0) {
+		memcpy((char *)unsafe_channel_list.unsafe_ch_list,
+		       (char *)unsafe_ch_list, ch_count * sizeof(u16));
+	}
+	mutex_unlock(&unsafe_channel_list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_set_wlan_unsafe_channel);
+
+int cnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list,
+				 u16 *ch_count, u16 buf_len)
+{
+	mutex_lock(&unsafe_channel_list_lock);
+	if (!unsafe_ch_list || !ch_count) {
+		mutex_unlock(&unsafe_channel_list_lock);
+		return -EINVAL;
+	}
+
+	if (buf_len < (unsafe_channel_list.unsafe_ch_count * sizeof(u16))) {
+		mutex_unlock(&unsafe_channel_list_lock);
+		return -ENOMEM;
+	}
+
+	*ch_count = unsafe_channel_list.unsafe_ch_count;
+	memcpy((char *)unsafe_ch_list,
+	       (char *)unsafe_channel_list.unsafe_ch_list,
+	       unsafe_channel_list.unsafe_ch_count * sizeof(u16));
+	mutex_unlock(&unsafe_channel_list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_get_wlan_unsafe_channel);
+
+int cnss_wlan_set_dfs_nol(const void *info, u16 info_len)
+{
+	void *temp;
+	struct cnss_dfs_nol_info *dfs_info;
+
+	mutex_lock(&dfs_nol_info_lock);
+	if (!info || !info_len) {
+		mutex_unlock(&dfs_nol_info_lock);
+		return -EINVAL;
+	}
+
+	temp = kmalloc(info_len, GFP_KERNEL);
+	if (!temp) {
+		mutex_unlock(&dfs_nol_info_lock);
+		return -ENOMEM;
+	}
+
+	memcpy(temp, info, info_len);
+	dfs_info = &dfs_nol_info;
+	kfree(dfs_info->dfs_nol_info);
+
+	dfs_info->dfs_nol_info = temp;
+	dfs_info->dfs_nol_info_len = info_len;
+	mutex_unlock(&dfs_nol_info_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_wlan_set_dfs_nol);
+
+int cnss_wlan_get_dfs_nol(void *info, u16 info_len)
+{
+	int len;
+	struct cnss_dfs_nol_info *dfs_info;
+
+	mutex_lock(&dfs_nol_info_lock);
+	if (!info || !info_len) {
+		mutex_unlock(&dfs_nol_info_lock);
+		return -EINVAL;
+	}
+
+	dfs_info = &dfs_nol_info;
+
+	if (!dfs_info->dfs_nol_info || dfs_info->dfs_nol_info_len == 0) {
+		mutex_unlock(&dfs_nol_info_lock);
+		return -ENOENT;
+	}
+
+	len = min(info_len, dfs_info->dfs_nol_info_len);
+
+	memcpy(info, dfs_info->dfs_nol_info, len);
+	mutex_unlock(&dfs_nol_info_lock);
+
+	return len;
+}
+EXPORT_SYMBOL(cnss_wlan_get_dfs_nol);
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
new file mode 100644
index 0000000..7d6a771
--- /dev/null
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
@@ -0,0 +1,2221 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "wlan_firmware_service_v01.h"
+
+static struct elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+					   pipe_num),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_pipedir_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+					   pipe_dir),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+					   nentries),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+					   nbytes_max),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+					   flags),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+					   service_id),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_pipedir_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+					   pipe_dir),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+					   pipe_num),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+					   id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+					   offset),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01,
+					   addr),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_memory_region_info_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_memory_region_info_s_v01,
+					   region_addr),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_memory_region_info_s_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_memory_region_info_s_v01,
+					   secure_flag),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_rf_chip_info_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_rf_chip_info_s_v01,
+					   chip_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_rf_chip_info_s_v01,
+					   chip_family),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_rf_board_info_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_rf_board_info_s_v01,
+					   board_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_soc_info_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_soc_info_s_v01,
+					   soc_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info wlfw_fw_version_info_s_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_fw_version_info_s_v01,
+					   fw_version),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct wlfw_fw_version_info_s_v01,
+					   fw_build_timestamp),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_ind_register_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   wlfw_ind_register_req_msg_v01,
+					   fw_ready_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   fw_ready_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   initiate_cal_download_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   initiate_cal_download_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   initiate_cal_update_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   initiate_cal_update_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   msa_ready_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   msa_ready_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   pin_connect_result_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   pin_connect_result_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   client_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   client_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   request_mem_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   request_mem_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   fw_mem_ready_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   fw_mem_ready_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   cold_boot_cal_done_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   cold_boot_cal_done_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   rejuvenate_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(struct wlfw_ind_register_req_msg_v01,
+					   rejuvenate_enable),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   wlfw_ind_register_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   wlfw_ind_register_resp_msg_v01,
+					   fw_status_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   wlfw_ind_register_resp_msg_v01,
+					   fw_status),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   wlfw_pin_connect_result_ind_msg_v01,
+					   pwr_pin_result_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   wlfw_pin_connect_result_ind_msg_v01,
+					   pwr_pin_result),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+					   wlfw_pin_connect_result_ind_msg_v01,
+					   phy_io_pin_result_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+					   wlfw_pin_connect_result_ind_msg_v01,
+					   phy_io_pin_result),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct
+					   wlfw_pin_connect_result_ind_msg_v01,
+					   rf_pin_result_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct
+					   wlfw_pin_connect_result_ind_msg_v01,
+					   rf_pin_result),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_driver_mode_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+					   mode),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+					   hw_debug_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+					   hw_debug),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_wlan_mode_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   host_version_valid),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_WLFW_MAX_STR_LEN_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   host_version),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   tgt_cfg_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   tgt_cfg_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_CE_V01,
+		.elem_size      = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   tgt_cfg),
+		.ei_array      = wlfw_ce_tgt_pipe_cfg_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   svc_cfg_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   svc_cfg_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_SVC_V01,
+		.elem_size      = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   svc_cfg),
+		.ei_array      = wlfw_ce_svc_pipe_cfg_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_SHADOW_REG_V01,
+		.elem_size      = sizeof(struct wlfw_shadow_reg_cfg_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg),
+		.ei_array      = wlfw_shadow_reg_cfg_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_v2_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_v2_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01,
+		.elem_size      = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+					   shadow_reg_v2),
+		.ei_array      = wlfw_shadow_reg_v2_cfg_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_wlan_cfg_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cap_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cap_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   chip_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct wlfw_rf_chip_info_s_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   chip_info),
+		.ei_array      = wlfw_rf_chip_info_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   board_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct wlfw_rf_board_info_s_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   board_info),
+		.ei_array      = wlfw_rf_board_info_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   soc_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct wlfw_soc_info_s_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   soc_info),
+		.ei_array      = wlfw_soc_info_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   fw_version_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct wlfw_fw_version_info_s_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   fw_version_info),
+		.ei_array      = wlfw_fw_version_info_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   fw_build_id_valid),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   fw_build_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   num_macs_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   num_macs),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   valid),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   file_id_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   file_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   total_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   total_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   seg_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   seg_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   end_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   end),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   bdf_type_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct wlfw_bdf_download_req_msg_v01,
+					   bdf_type),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   wlfw_bdf_download_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_report_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_cal_report_req_msg_v01,
+					   meta_data_len),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = QMI_WLFW_MAX_NUM_CAL_V01,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_cal_report_req_msg_v01,
+					   meta_data),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cal_report_req_msg_v01,
+					   xo_cal_data_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cal_report_req_msg_v01,
+					   xo_cal_data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_cal_report_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(
+				  struct wlfw_initiate_cal_download_ind_msg_v01,
+				  cal_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_download_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   valid),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   file_id_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   file_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   total_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   total_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   seg_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   seg_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   end_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cal_download_req_msg_v01,
+					   end),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   wlfw_cal_download_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+					   wlfw_initiate_cal_update_ind_msg_v01,
+					   cal_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   wlfw_initiate_cal_update_ind_msg_v01,
+					   total_size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_update_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_cal_update_req_msg_v01,
+					   cal_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_cal_update_req_msg_v01,
+					   seg_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   file_id_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlfw_cal_temp_id_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   file_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   total_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   total_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   seg_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   seg_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   end_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlfw_cal_update_resp_msg_v01,
+					   end),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_msa_info_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_msa_info_req_msg_v01,
+					   msa_addr),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_msa_info_req_msg_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_msa_info_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct wlfw_msa_info_resp_msg_v01,
+					   mem_region_info_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01,
+		.elem_size      = sizeof(struct wlfw_memory_region_info_s_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct wlfw_msa_info_resp_msg_v01,
+					   mem_region_info),
+		.ei_array      = wlfw_memory_region_info_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_msa_ready_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_ini_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ini_req_msg_v01,
+					   enablefwlog_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ini_req_msg_v01,
+					   enablefwlog),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_ini_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_ini_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+					   offset),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+					   mem_type),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   wlfw_athdiag_read_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   wlfw_athdiag_read_resp_msg_v01,
+					   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   wlfw_athdiag_read_resp_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   wlfw_athdiag_read_resp_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+					   wlfw_athdiag_write_req_msg_v01,
+					   offset),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   wlfw_athdiag_write_req_msg_v01,
+					   mem_type),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct
+					   wlfw_athdiag_write_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct
+					   wlfw_athdiag_write_req_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   wlfw_athdiag_write_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_vbatt_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_vbatt_req_msg_v01,
+					   voltage_uv),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_vbatt_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_mac_addr_req_msg_v01,
+					   mac_addr_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLFW_MAC_ADDR_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.is_array       = STATIC_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_mac_addr_req_msg_v01,
+					   mac_addr),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_mac_addr_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_host_cap_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   daemon_support_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   daemon_support),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   wake_msi_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_host_cap_req_msg_v01,
+					   wake_msi),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_host_cap_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_request_mem_ind_msg_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_respond_mem_req_msg_v01,
+					   addr),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_respond_mem_req_msg_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_respond_mem_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   cause_for_rejuvenation_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   cause_for_rejuvenation),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   requesting_sub_system_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   requesting_sub_system),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   line_number_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   line_number),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   function_name_valid),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+					   function_name),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   wlfw_rejuvenate_ack_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				struct wlfw_dynamic_feature_mask_req_msg_v01,
+				mask_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				struct wlfw_dynamic_feature_mask_req_msg_v01,
+				mask),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				struct wlfw_dynamic_feature_mask_resp_msg_v01,
+				resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				struct wlfw_dynamic_feature_mask_resp_msg_v01,
+				prev_mask_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				struct wlfw_dynamic_feature_mask_resp_msg_v01,
+				prev_mask),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+				struct wlfw_dynamic_feature_mask_resp_msg_v01,
+				curr_mask_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+				struct wlfw_dynamic_feature_mask_resp_msg_v01,
+				curr_mask),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_m3_info_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_m3_info_req_msg_v01,
+					   addr),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_m3_info_req_msg_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_m3_info_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info wlfw_xo_cal_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct wlfw_xo_cal_ind_msg_v01,
+					   xo_cal_data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
new file mode 100644
index 0000000..9b56eb0
--- /dev/null
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
@@ -0,0 +1,657 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef WLAN_FIRMWARE_SERVICE_V01_H
+#define WLAN_FIRMWARE_SERVICE_V01_H
+
+#include <linux/qmi_encdec.h>
+#include <soc/qcom/msm_qmi_interface.h>
+
+#define WLFW_SERVICE_ID_V01 0x45
+#define WLFW_SERVICE_VERS_V01 0x01
+
+#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
+#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
+#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B
+#define QMI_WLFW_M3_INFO_REQ_V01 0x003C
+#define QMI_WLFW_CAP_REQ_V01 0x0024
+#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
+#define QMI_WLFW_M3_INFO_RESP_V01 0x003C
+#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
+#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027
+#define QMI_WLFW_XO_CAL_IND_V01 0x003D
+#define QMI_WLFW_INI_RESP_V01 0x002F
+#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026
+#define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033
+#define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MSA_READY_IND_V01 0x002B
+#define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0038
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_REJUVENATE_IND_V01 0x0039
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B
+#define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C
+#define QMI_WLFW_FW_READY_IND_V01 0x0021
+#define QMI_WLFW_MSA_READY_RESP_V01 0x002E
+#define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029
+#define QMI_WLFW_INI_REQ_V01 0x002F
+#define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A
+#define QMI_WLFW_MSA_INFO_RESP_V01 0x002D
+#define QMI_WLFW_MSA_READY_REQ_V01 0x002E
+#define QMI_WLFW_CAP_RESP_V01 0x0024
+#define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A
+#define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030
+#define QMI_WLFW_VBATT_REQ_V01 0x0032
+#define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033
+#define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLFW_VBATT_RESP_V01 0x0032
+#define QMI_WLFW_MSA_INFO_REQ_V01 0x002D
+#define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027
+#define QMI_WLFW_ATHDIAG_READ_REQ_V01 0x0030
+#define QMI_WLFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020
+
+#define QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01 2
+#define QMI_WLFW_MAX_NUM_CAL_V01 5
+#define QMI_WLFW_MAX_DATA_SIZE_V01 6144
+#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128
+#define QMI_WLFW_MAX_NUM_CE_V01 12
+#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
+#define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 6144
+#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
+#define QMI_WLFW_MAX_STR_LEN_V01 16
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLFW_MAC_ADDR_SIZE_V01 6
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01 36
+#define QMI_WLFW_MAX_NUM_SVC_V01 24
+
+enum wlfw_driver_mode_enum_v01 {
+	WLFW_DRIVER_MODE_ENUM_MIN_VAL_V01 = INT_MIN,
+	QMI_WLFW_MISSION_V01 = 0,
+	QMI_WLFW_FTM_V01 = 1,
+	QMI_WLFW_EPPING_V01 = 2,
+	QMI_WLFW_WALTEST_V01 = 3,
+	QMI_WLFW_OFF_V01 = 4,
+	QMI_WLFW_CCPM_V01 = 5,
+	QMI_WLFW_QVIT_V01 = 6,
+	QMI_WLFW_CALIBRATION_V01 = 7,
+	WLFW_DRIVER_MODE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_cal_temp_id_enum_v01 {
+	WLFW_CAL_TEMP_ID_ENUM_MIN_VAL_V01 = INT_MIN,
+	QMI_WLFW_CAL_TEMP_IDX_0_V01 = 0,
+	QMI_WLFW_CAL_TEMP_IDX_1_V01 = 1,
+	QMI_WLFW_CAL_TEMP_IDX_2_V01 = 2,
+	QMI_WLFW_CAL_TEMP_IDX_3_V01 = 3,
+	QMI_WLFW_CAL_TEMP_IDX_4_V01 = 4,
+	WLFW_CAL_TEMP_ID_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_pipedir_enum_v01 {
+	WLFW_PIPEDIR_ENUM_MIN_VAL_V01 = INT_MIN,
+	QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+	QMI_WLFW_PIPEDIR_IN_V01 = 1,
+	QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+	QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+	WLFW_PIPEDIR_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00)
+#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01)
+#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02)
+#define QMI_WLFW_CE_ATTR_SWIZZLE_DESCRIPTORS_V01 ((u32)0x04)
+#define QMI_WLFW_CE_ATTR_DISABLE_INTR_V01 ((u32)0x08)
+#define QMI_WLFW_CE_ATTR_ENABLE_POLL_V01 ((u32)0x10)
+
+#define QMI_WLFW_ALREADY_REGISTERED_V01 ((u64)0x01ULL)
+#define QMI_WLFW_FW_READY_V01 ((u64)0x02ULL)
+#define QMI_WLFW_MSA_READY_V01 ((u64)0x04ULL)
+#define QMI_WLFW_FW_MEM_READY_V01 ((u64)0x08ULL)
+
+#define QMI_WLFW_FW_REJUVENATE_V01 ((u64)0x01ULL)
+
+struct wlfw_ce_tgt_pipe_cfg_s_v01 {
+	u32 pipe_num;
+	enum wlfw_pipedir_enum_v01 pipe_dir;
+	u32 nentries;
+	u32 nbytes_max;
+	u32 flags;
+};
+
+struct wlfw_ce_svc_pipe_cfg_s_v01 {
+	u32 service_id;
+	enum wlfw_pipedir_enum_v01 pipe_dir;
+	u32 pipe_num;
+};
+
+struct wlfw_shadow_reg_cfg_s_v01 {
+	u16 id;
+	u16 offset;
+};
+
+struct wlfw_shadow_reg_v2_cfg_s_v01 {
+	u32 addr;
+};
+
+struct wlfw_memory_region_info_s_v01 {
+	u64 region_addr;
+	u32 size;
+	u8 secure_flag;
+};
+
+struct wlfw_rf_chip_info_s_v01 {
+	u32 chip_id;
+	u32 chip_family;
+};
+
+struct wlfw_rf_board_info_s_v01 {
+	u32 board_id;
+};
+
+struct wlfw_soc_info_s_v01 {
+	u32 soc_id;
+};
+
+struct wlfw_fw_version_info_s_v01 {
+	u32 fw_version;
+	char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+struct wlfw_ind_register_req_msg_v01 {
+	u8 fw_ready_enable_valid;
+	u8 fw_ready_enable;
+	u8 initiate_cal_download_enable_valid;
+	u8 initiate_cal_download_enable;
+	u8 initiate_cal_update_enable_valid;
+	u8 initiate_cal_update_enable;
+	u8 msa_ready_enable_valid;
+	u8 msa_ready_enable;
+	u8 pin_connect_result_enable_valid;
+	u8 pin_connect_result_enable;
+	u8 client_id_valid;
+	u32 client_id;
+	u8 request_mem_enable_valid;
+	u8 request_mem_enable;
+	u8 fw_mem_ready_enable_valid;
+	u8 fw_mem_ready_enable;
+	u8 cold_boot_cal_done_enable_valid;
+	u8 cold_boot_cal_done_enable;
+	u8 rejuvenate_enable_valid;
+	u32 rejuvenate_enable;
+};
+
+#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 46
+extern struct elem_info wlfw_ind_register_req_msg_v01_ei[];
+
+struct wlfw_ind_register_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 fw_status_valid;
+	u64 fw_status;
+};
+
+#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_ind_register_resp_msg_v01_ei[];
+
+struct wlfw_fw_ready_ind_msg_v01 {
+	char placeholder;
+};
+
+#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_fw_ready_ind_msg_v01_ei[];
+
+struct wlfw_msa_ready_ind_msg_v01 {
+	char placeholder;
+};
+
+#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_msa_ready_ind_msg_v01_ei[];
+
+struct wlfw_pin_connect_result_ind_msg_v01 {
+	u8 pwr_pin_result_valid;
+	u32 pwr_pin_result;
+	u8 phy_io_pin_result_valid;
+	u32 phy_io_pin_result;
+	u8 rf_pin_result_valid;
+	u32 rf_pin_result;
+};
+
+#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21
+extern struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
+
+struct wlfw_wlan_mode_req_msg_v01 {
+	enum wlfw_driver_mode_enum_v01 mode;
+	u8 hw_debug_valid;
+	u8 hw_debug;
+};
+
+#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_wlan_mode_req_msg_v01_ei[];
+
+struct wlfw_wlan_mode_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_req_msg_v01 {
+	u8 host_version_valid;
+	char host_version[QMI_WLFW_MAX_STR_LEN_V01 + 1];
+	u8 tgt_cfg_valid;
+	u32 tgt_cfg_len;
+	struct wlfw_ce_tgt_pipe_cfg_s_v01 tgt_cfg[QMI_WLFW_MAX_NUM_CE_V01];
+	u8 svc_cfg_valid;
+	u32 svc_cfg_len;
+	struct wlfw_ce_svc_pipe_cfg_s_v01 svc_cfg[QMI_WLFW_MAX_NUM_SVC_V01];
+	u8 shadow_reg_valid;
+	u32 shadow_reg_len;
+	struct wlfw_shadow_reg_cfg_s_v01
+		shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01];
+	u8 shadow_reg_v2_valid;
+	u32 shadow_reg_v2_len;
+	struct wlfw_shadow_reg_v2_cfg_s_v01
+		shadow_reg_v2[QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01];
+};
+
+#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803
+extern struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
+
+struct wlfw_cap_req_msg_v01 {
+	char placeholder;
+};
+
+#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_cap_req_msg_v01_ei[];
+
+struct wlfw_cap_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 chip_info_valid;
+	struct wlfw_rf_chip_info_s_v01 chip_info;
+	u8 board_info_valid;
+	struct wlfw_rf_board_info_s_v01 board_info;
+	u8 soc_info_valid;
+	struct wlfw_soc_info_s_v01 soc_info;
+	u8 fw_version_info_valid;
+	struct wlfw_fw_version_info_s_v01 fw_version_info;
+	u8 fw_build_id_valid;
+	char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1];
+	u8 num_macs_valid;
+	u8 num_macs;
+};
+
+#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 207
+extern struct elem_info wlfw_cap_resp_msg_v01_ei[];
+
+struct wlfw_bdf_download_req_msg_v01 {
+	u8 valid;
+	u8 file_id_valid;
+	enum wlfw_cal_temp_id_enum_v01 file_id;
+	u8 total_size_valid;
+	u32 total_size;
+	u8 seg_id_valid;
+	u32 seg_id;
+	u8 data_valid;
+	u32 data_len;
+	u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+	u8 end_valid;
+	u8 end;
+	u8 bdf_type_valid;
+	u8 bdf_type;
+};
+
+#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6182
+extern struct elem_info wlfw_bdf_download_req_msg_v01_ei[];
+
+struct wlfw_bdf_download_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_bdf_download_resp_msg_v01_ei[];
+
+struct wlfw_cal_report_req_msg_v01 {
+	u32 meta_data_len;
+	enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01];
+	u8 xo_cal_data_valid;
+	u8 xo_cal_data;
+};
+
+#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 28
+extern struct elem_info wlfw_cal_report_req_msg_v01_ei[];
+
+struct wlfw_cal_report_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_cal_report_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_download_ind_msg_v01 {
+	enum wlfw_cal_temp_id_enum_v01 cal_id;
+};
+
+#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
+
+struct wlfw_cal_download_req_msg_v01 {
+	u8 valid;
+	u8 file_id_valid;
+	enum wlfw_cal_temp_id_enum_v01 file_id;
+	u8 total_size_valid;
+	u32 total_size;
+	u8 seg_id_valid;
+	u32 seg_id;
+	u8 data_valid;
+	u32 data_len;
+	u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+	u8 end_valid;
+	u8 end;
+};
+
+#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
+extern struct elem_info wlfw_cal_download_req_msg_v01_ei[];
+
+struct wlfw_cal_download_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_cal_download_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_update_ind_msg_v01 {
+	enum wlfw_cal_temp_id_enum_v01 cal_id;
+	u32 total_size;
+};
+
+#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14
+extern struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
+
+struct wlfw_cal_update_req_msg_v01 {
+	enum wlfw_cal_temp_id_enum_v01 cal_id;
+	u32 seg_id;
+};
+
+#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14
+extern struct elem_info wlfw_cal_update_req_msg_v01_ei[];
+
+struct wlfw_cal_update_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 file_id_valid;
+	enum wlfw_cal_temp_id_enum_v01 file_id;
+	u8 total_size_valid;
+	u32 total_size;
+	u8 seg_id_valid;
+	u32 seg_id;
+	u8 data_valid;
+	u32 data_len;
+	u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+	u8 end_valid;
+	u8 end;
+};
+
+#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181
+extern struct elem_info wlfw_cal_update_resp_msg_v01_ei[];
+
+struct wlfw_msa_info_req_msg_v01 {
+	u64 msa_addr;
+	u32 size;
+};
+
+#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_msa_info_req_msg_v01_ei[];
+
+struct wlfw_msa_info_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u32 mem_region_info_len;
+	struct wlfw_memory_region_info_s_v01
+		mem_region_info[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01];
+};
+
+#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37
+extern struct elem_info wlfw_msa_info_resp_msg_v01_ei[];
+
+struct wlfw_msa_ready_req_msg_v01 {
+	char placeholder;
+};
+
+#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_msa_ready_req_msg_v01_ei[];
+
+struct wlfw_msa_ready_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_msa_ready_resp_msg_v01_ei[];
+
+struct wlfw_ini_req_msg_v01 {
+	u8 enablefwlog_valid;
+	u8 enablefwlog;
+};
+
+#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct elem_info wlfw_ini_req_msg_v01_ei[];
+
+struct wlfw_ini_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_ini_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_read_req_msg_v01 {
+	u32 offset;
+	u32 mem_type;
+	u32 data_len;
+};
+
+#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21
+extern struct elem_info wlfw_athdiag_read_req_msg_v01_ei[];
+
+struct wlfw_athdiag_read_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 data_valid;
+	u32 data_len;
+	u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
+};
+
+#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156
+extern struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_write_req_msg_v01 {
+	u32 offset;
+	u32 mem_type;
+	u32 data_len;
+	u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
+};
+
+#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163
+extern struct elem_info wlfw_athdiag_write_req_msg_v01_ei[];
+
+struct wlfw_athdiag_write_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
+
+struct wlfw_vbatt_req_msg_v01 {
+	u64 voltage_uv;
+};
+
+#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_vbatt_req_msg_v01_ei[];
+
+struct wlfw_vbatt_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_vbatt_resp_msg_v01_ei[];
+
+struct wlfw_mac_addr_req_msg_v01 {
+	u8 mac_addr_valid;
+	u8 mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01];
+};
+
+#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9
+extern struct elem_info wlfw_mac_addr_req_msg_v01_ei[];
+
+struct wlfw_mac_addr_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_mac_addr_resp_msg_v01_ei[];
+
+struct wlfw_host_cap_req_msg_v01 {
+	u8 daemon_support_valid;
+	u8 daemon_support;
+	u8 wake_msi_valid;
+	u32 wake_msi;
+};
+
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_host_cap_req_msg_v01_ei[];
+
+struct wlfw_host_cap_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_host_cap_resp_msg_v01_ei[];
+
+struct wlfw_request_mem_ind_msg_v01 {
+	u32 size;
+};
+
+#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_request_mem_ind_msg_v01_ei[];
+
+struct wlfw_respond_mem_req_msg_v01 {
+	u64 addr;
+	u32 size;
+};
+
+#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_respond_mem_req_msg_v01_ei[];
+
+struct wlfw_respond_mem_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_respond_mem_resp_msg_v01_ei[];
+
+struct wlfw_fw_mem_ready_ind_msg_v01 {
+	char placeholder;
+};
+
+#define WLFW_FW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[];
+
+struct wlfw_cold_boot_cal_done_ind_msg_v01 {
+	char placeholder;
+};
+
+#define WLFW_COLD_BOOT_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ind_msg_v01 {
+	u8 cause_for_rejuvenation_valid;
+	u8 cause_for_rejuvenation;
+	u8 requesting_sub_system_valid;
+	u8 requesting_sub_system;
+	u8 line_number_valid;
+	u16 line_number;
+	u8 function_name_valid;
+	char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
+};
+
+#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144
+extern struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_req_msg_v01 {
+	char placeholder;
+};
+
+#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_req_msg_v01 {
+	u8 mask_valid;
+	u64 mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 prev_mask_valid;
+	u64 prev_mask;
+	u8 curr_mask_valid;
+	u64 curr_mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29
+extern struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
+
+struct wlfw_m3_info_req_msg_v01 {
+	u64 addr;
+	u32 size;
+};
+
+#define WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_m3_info_req_msg_v01_ei[];
+
+struct wlfw_m3_info_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_m3_info_resp_msg_v01_ei[];
+
+struct wlfw_xo_cal_ind_msg_v01 {
+	u8 xo_cal_data;
+};
+
+#define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4
+extern struct elem_info wlfw_xo_cal_ind_msg_v01_ei[];
+
+#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 9789f3c..f1231c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2320,7 +2320,7 @@
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
-	/* Called when we need to transmit (a) frame(s) from agg queue */
+	/* Called when we need to transmit (a) frame(s) from agg or dqa queue */
 
 	iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
 					  tids, more_data, true);
@@ -2340,7 +2340,8 @@
 	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
 		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
 
-		if (tid_data->state != IWL_AGG_ON &&
+		if (!iwl_mvm_is_dqa_supported(mvm) &&
+		    tid_data->state != IWL_AGG_ON &&
 		    tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
 			continue;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index e64aeb4..bdd1dee 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -3032,7 +3032,7 @@
 				       struct ieee80211_sta *sta,
 				       enum ieee80211_frame_release_type reason,
 				       u16 cnt, u16 tids, bool more_data,
-				       bool agg)
+				       bool single_sta_queue)
 {
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_mvm_add_sta_cmd cmd = {
@@ -3052,14 +3052,14 @@
 	for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
 		cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
 
-	/* If we're releasing frames from aggregation queues then check if the
-	 * all queues combined that we're releasing frames from have
+	/* If we're releasing frames from aggregation or dqa queues then check
+	 * if all the queues that we're releasing frames from, combined, have:
 	 *  - more frames than the service period, in which case more_data
 	 *    needs to be set
 	 *  - fewer than 'cnt' frames, in which case we need to adjust the
 	 *    firmware command (but do that unconditionally)
 	 */
-	if (agg) {
+	if (single_sta_queue) {
 		int remaining = cnt;
 		int sleep_tx_count;
 
@@ -3069,7 +3069,8 @@
 			u16 n_queued;
 
 			tid_data = &mvmsta->tid_data[tid];
-			if (WARN(tid_data->state != IWL_AGG_ON &&
+			if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
+				 tid_data->state != IWL_AGG_ON &&
 				 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
 				 "TID %d state is %d\n",
 				 tid, tid_data->state)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index e068d53..f65950e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -545,7 +545,7 @@
 				       struct ieee80211_sta *sta,
 				       enum ieee80211_frame_release_type reason,
 				       u16 cnt, u16 tids, bool more_data,
-				       bool agg);
+				       bool single_sta_queue);
 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
 		      bool drain);
 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 092ae00..7465d4d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -621,8 +622,10 @@
 	 * values.
 	 * Note that we don't need to make sure it isn't agg'd, since we're
 	 * TXing non-sta
+	 * For DQA mode - we shouldn't increase it though
 	 */
-	atomic_inc(&mvm->pending_frames[sta_id]);
+	if (!iwl_mvm_is_dqa_supported(mvm))
+		atomic_inc(&mvm->pending_frames[sta_id]);
 
 	return 0;
 }
@@ -1009,11 +1012,8 @@
 
 	spin_unlock(&mvmsta->lock);
 
-	/* Increase pending frames count if this isn't AMPDU */
-	if ((iwl_mvm_is_dqa_supported(mvm) &&
-	     mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON &&
-	     mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) ||
-	    (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu))
+	/* Increase pending frames count if this isn't AMPDU or DQA queue */
+	if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)
 		atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
 
 	return 0;
@@ -1083,12 +1083,13 @@
 	lockdep_assert_held(&mvmsta->lock);
 
 	if ((tid_data->state == IWL_AGG_ON ||
-	     tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
+	     tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
+	     iwl_mvm_is_dqa_supported(mvm)) &&
 	    iwl_mvm_tid_queued(tid_data) == 0) {
 		/*
-		 * Now that this aggregation queue is empty tell mac80211 so it
-		 * knows we no longer have frames buffered for the station on
-		 * this TID (for the TIM bitmap calculation.)
+		 * Now that this aggregation or DQA queue is empty tell
+		 * mac80211 so it knows we no longer have frames buffered for
+		 * the station on this TID (for the TIM bitmap calculation.)
 		 */
 		ieee80211_sta_set_buffered(sta, tid, false);
 	}
@@ -1261,7 +1262,6 @@
 	u8 skb_freed = 0;
 	u16 next_reclaimed, seq_ctl;
 	bool is_ndp = false;
-	bool txq_agg = false; /* Is this TXQ aggregated */
 
 	__skb_queue_head_init(&skbs);
 
@@ -1287,6 +1287,10 @@
 			info->flags |= IEEE80211_TX_STAT_ACK;
 			break;
 		case TX_STATUS_FAIL_DEST_PS:
+			/* In DQA, the FW should have stopped the queue and not
+			 * return this status
+			 */
+			WARN_ON(iwl_mvm_is_dqa_supported(mvm));
 			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
 			break;
 		default:
@@ -1391,15 +1395,6 @@
 			bool send_eosp_ndp = false;
 
 			spin_lock_bh(&mvmsta->lock);
-			if (iwl_mvm_is_dqa_supported(mvm)) {
-				enum iwl_mvm_agg_state state;
-
-				state = mvmsta->tid_data[tid].state;
-				txq_agg = (state == IWL_AGG_ON ||
-					state == IWL_EMPTYING_HW_QUEUE_DELBA);
-			} else {
-				txq_agg = txq_id >= mvm->first_agg_queue;
-			}
 
 			if (!is_ndp) {
 				tid_data->next_reclaimed = next_reclaimed;
@@ -1456,11 +1451,11 @@
 	 * If the txq is not an AMPDU queue, there is no chance we freed
 	 * several skbs. Check that out...
 	 */
-	if (txq_agg)
+	if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue)
 		goto out;
 
 	/* We can't free more than one frame at once on a shared queue */
-	WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1));
+	WARN_ON(skb_freed > 1);
 
 	/* If we have still frames for this STA nothing to do here */
 	if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index d5a3bf9..ab6d39e 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -852,12 +852,11 @@
 {
 	struct p54_common *priv = dev->priv;
 
-#ifdef CONFIG_P54_LEDS
-	p54_unregister_leds(priv);
-#endif /* CONFIG_P54_LEDS */
-
 	if (priv->registered) {
 		priv->registered = false;
+#ifdef CONFIG_P54_LEDS
+		p54_unregister_leds(priv);
+#endif /* CONFIG_P54_LEDS */
 		ieee80211_unregister_hw(dev);
 	}
 
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index d2a28a9..4b462dc 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3047,6 +3047,7 @@
 {
 	struct hwsim_new_radio_params param = { 0 };
 	const char *hwname = NULL;
+	int ret;
 
 	param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
 	param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
@@ -3086,7 +3087,9 @@
 		param.regd = hwsim_world_regdom_custom[idx];
 	}
 
-	return mac80211_hwsim_new_radio(info, &param);
+	ret = mac80211_hwsim_new_radio(info, &param);
+	kfree(hwname);
+	return ret;
 }
 
 static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 8718950..8d601dc 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -2296,6 +2296,12 @@
 	mmc_hw_reset(func->card->host);
 	sdio_release_host(func);
 
+	/* Previous save_adapter won't be valid after this. We will cancel
+	 * pending work requests.
+	 */
+	clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags);
+	clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags);
+
 	mwifiex_sdio_probe(func, device_id);
 }
 
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index bf3f0a3..9fc6f16 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -4707,8 +4707,8 @@
 		rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
 	else
 		rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
-	rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 0);
-	rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
+	rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 10);
+	rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 10);
 	rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
 
 	rt2800_register_read(rt2x00dev, LED_CFG, &reg);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 631df69..f57bb2c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -57,7 +57,7 @@
 		if (status >= 0)
 			return 0;
 
-		if (status == -ENODEV) {
+		if (status == -ENODEV || status == -ENOENT) {
 			/* Device has disappeared. */
 			clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
 			break;
@@ -321,7 +321,7 @@
 
 	status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
 	if (status) {
-		if (status == -ENODEV)
+		if (status == -ENODEV || status == -ENOENT)
 			clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
 		set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
 		rt2x00lib_dmadone(entry);
@@ -410,7 +410,7 @@
 
 	status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
 	if (status) {
-		if (status == -ENODEV)
+		if (status == -ENODEV || status == -ENOENT)
 			clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
 		set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
 		rt2x00lib_dmadone(entry);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index b3f6a9e..27a0e50 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -664,7 +664,7 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	struct sk_buff *skb = NULL;
-
+	bool rtstatus;
 	u32 totalpacketlen;
 	u8 u1rsvdpageloc[5] = { 0 };
 	bool b_dlok = false;
@@ -727,7 +727,9 @@
 	memcpy((u8 *)skb_put(skb, totalpacketlen),
 	       &reserved_page_packet, totalpacketlen);
 
-	b_dlok = true;
+	rtstatus = rtl_cmd_send_packet(hw, skb);
+	if (rtstatus)
+		b_dlok = true;
 
 	if (b_dlok) {
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 1281ebe..82d5389 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -1378,6 +1378,7 @@
 
 	ppsc->wakeup_reason = 0;
 
+	do_gettimeofday(&ts);
 	rtlhal->last_suspend_sec = ts.tv_sec;
 
 	switch (fw_reason) {
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index cb7365b..5b1d2e8 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -113,10 +113,10 @@
 	 * A subset of struct net_device_stats that contains only the
 	 * fields that are updated in netback.c for each queue.
 	 */
-	unsigned int rx_bytes;
-	unsigned int rx_packets;
-	unsigned int tx_bytes;
-	unsigned int tx_packets;
+	u64 rx_bytes;
+	u64 rx_packets;
+	u64 tx_bytes;
+	u64 tx_packets;
 
 	/* Additional stats used by xenvif */
 	unsigned long rx_gso_checksum_fixup;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 5bfaf55..618013e7 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -225,10 +225,10 @@
 {
 	struct xenvif *vif = netdev_priv(dev);
 	struct xenvif_queue *queue = NULL;
-	unsigned long rx_bytes = 0;
-	unsigned long rx_packets = 0;
-	unsigned long tx_bytes = 0;
-	unsigned long tx_packets = 0;
+	u64 rx_bytes = 0;
+	u64 rx_packets = 0;
+	u64 tx_bytes = 0;
+	u64 tx_packets = 0;
 	unsigned int index;
 
 	spin_lock(&vif->lock);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cd442e4..8d498a9 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1854,27 +1854,19 @@
 		xennet_destroy_queues(info);
 
 	err = xennet_create_queues(info, &num_queues);
-	if (err < 0)
-		goto destroy_ring;
+	if (err < 0) {
+		xenbus_dev_fatal(dev, err, "creating queues");
+		kfree(info->queues);
+		info->queues = NULL;
+		goto out;
+	}
 
 	/* Create shared ring, alloc event channel -- for each queue */
 	for (i = 0; i < num_queues; ++i) {
 		queue = &info->queues[i];
 		err = setup_netfront(dev, queue, feature_split_evtchn);
-		if (err) {
-			/* setup_netfront() will tidy up the current
-			 * queue on error, but we need to clean up
-			 * those already allocated.
-			 */
-			if (i > 0) {
-				rtnl_lock();
-				netif_set_real_num_tx_queues(info->netdev, i);
-				rtnl_unlock();
-				goto destroy_ring;
-			} else {
-				goto out;
-			}
-		}
+		if (err)
+			goto destroy_ring;
 	}
 
 again:
@@ -1964,9 +1956,9 @@
 	xenbus_transaction_end(xbt, 1);
  destroy_ring:
 	xennet_disconnect_backend(info);
-	kfree(info->queues);
-	info->queues = NULL;
+	xennet_destroy_queues(info);
  out:
+	device_unregister(&dev->dev);
 	return err;
 }
 
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index ea4bedf..59197d1 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -469,39 +469,43 @@
 		dev_dbg(&nqx_dev->client->dev,
 			"gpio_set_value disable: %s: info: %p\n",
 			__func__, nqx_dev);
-		if (gpio_is_valid(nqx_dev->firm_gpio))
+		if (gpio_is_valid(nqx_dev->firm_gpio)) {
 			gpio_set_value(nqx_dev->firm_gpio, 0);
+			usleep_range(10000, 10100);
+		}
 
 		if (gpio_is_valid(nqx_dev->ese_gpio)) {
 			if (!gpio_get_value(nqx_dev->ese_gpio)) {
 				dev_dbg(&nqx_dev->client->dev, "disabling en_gpio\n");
 				gpio_set_value(nqx_dev->en_gpio, 0);
+				usleep_range(10000, 10100);
 			} else {
 				dev_dbg(&nqx_dev->client->dev, "keeping en_gpio high\n");
 			}
 		} else {
 			dev_dbg(&nqx_dev->client->dev, "ese_gpio invalid, set en_gpio to low\n");
 			gpio_set_value(nqx_dev->en_gpio, 0);
+			usleep_range(10000, 10100);
 		}
 		r = nqx_clock_deselect(nqx_dev);
 		if (r < 0)
 			dev_err(&nqx_dev->client->dev, "unable to disable clock\n");
 		nqx_dev->nfc_ven_enabled = false;
-		/* hardware dependent delay */
-		msleep(100);
 	} else if (arg == 1) {
 		nqx_enable_irq(nqx_dev);
 		dev_dbg(&nqx_dev->client->dev,
 			"gpio_set_value enable: %s: info: %p\n",
 			__func__, nqx_dev);
-		if (gpio_is_valid(nqx_dev->firm_gpio))
+		if (gpio_is_valid(nqx_dev->firm_gpio)) {
 			gpio_set_value(nqx_dev->firm_gpio, 0);
+			usleep_range(10000, 10100);
+		}
 		gpio_set_value(nqx_dev->en_gpio, 1);
+		usleep_range(10000, 10100);
 		r = nqx_clock_select(nqx_dev);
 		if (r < 0)
 			dev_err(&nqx_dev->client->dev, "unable to enable clock\n");
 		nqx_dev->nfc_ven_enabled = true;
-		msleep(20);
 	} else if (arg == 2) {
 		/*
 		 * We are switching to Dowload Mode, toggle the enable pin
@@ -515,14 +519,15 @@
 			}
 		}
 		gpio_set_value(nqx_dev->en_gpio, 1);
-		msleep(20);
-		if (gpio_is_valid(nqx_dev->firm_gpio))
+		usleep_range(10000, 10100);
+		if (gpio_is_valid(nqx_dev->firm_gpio)) {
 			gpio_set_value(nqx_dev->firm_gpio, 1);
-		msleep(20);
+			usleep_range(10000, 10100);
+		}
 		gpio_set_value(nqx_dev->en_gpio, 0);
-		msleep(100);
+		usleep_range(10000, 10100);
 		gpio_set_value(nqx_dev->en_gpio, 1);
-		msleep(20);
+		usleep_range(10000, 10100);
 	} else {
 		r = -ENOIOCTLCMD;
 	}
@@ -648,13 +653,14 @@
 	unsigned char nci_reset_rsp[6];
 	unsigned char init_rsp_len = 0;
 	unsigned int enable_gpio = nqx_dev->en_gpio;
+
 	/* making sure that the NFCC starts in a clean state. */
 	gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
 	/* hardware dependent delay */
-	msleep(20);
+	usleep_range(10000, 10100);
 	gpio_set_value(enable_gpio, 1);/* HPD : Enable*/
 	/* hardware dependent delay */
-	msleep(20);
+	usleep_range(10000, 10100);
 
 	/* send NCI CORE RESET CMD with Keep Config parameters */
 	ret = i2c_master_send(client, raw_nci_reset_cmd,
@@ -670,21 +676,17 @@
 	/* Read Response of RESET command */
 	ret = i2c_master_recv(client, nci_reset_rsp,
 		sizeof(nci_reset_rsp));
-	dev_err(&client->dev,
-	"%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
-	__func__, nci_reset_rsp[0],
-	nci_reset_rsp[1], nci_reset_rsp[2]);
 	if (ret < 0) {
 		dev_err(&client->dev,
 		"%s: - i2c_master_recv Error\n", __func__);
 		goto err_nfcc_hw_check;
 	}
-	ret = i2c_master_send(client, raw_nci_init_cmd,
-		sizeof(raw_nci_init_cmd));
+	ret = nqx_standby_write(nqx_dev, raw_nci_init_cmd,
+				sizeof(raw_nci_init_cmd));
 	if (ret < 0) {
 		dev_err(&client->dev,
 		"%s: - i2c_master_send Error\n", __func__);
-		goto err_nfcc_hw_check;
+		goto err_nfcc_core_init_fail;
 	}
 	/* hardware dependent delay */
 	msleep(30);
@@ -694,7 +696,7 @@
 	if (ret < 0) {
 		dev_err(&client->dev,
 		"%s: - i2c_master_recv Error\n", __func__);
-		goto err_nfcc_hw_check;
+		goto err_nfcc_core_init_fail;
 	}
 	init_rsp_len = 2 + nci_init_rsp[2]; /*payload + len*/
 	if (init_rsp_len > PAYLOAD_HEADER_LENGTH) {
@@ -707,6 +709,11 @@
 		nqx_dev->nqx_info.info.fw_minor =
 				nci_init_rsp[init_rsp_len];
 	}
+	dev_dbg(&client->dev,
+		"%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
+		__func__, nci_reset_rsp[0],
+		nci_reset_rsp[1], nci_reset_rsp[2]);
+
 	dev_dbg(&nqx_dev->client->dev, "NQ NFCC chip_type = %x\n",
 		nqx_dev->nqx_info.info.chip_type);
 	dev_dbg(&nqx_dev->client->dev, "NQ fw version = %x.%x.%x\n",
@@ -746,6 +753,12 @@
 	ret = 0;
 	goto done;
 
+err_nfcc_core_init_fail:
+	dev_err(&client->dev,
+	"%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
+	__func__, nci_reset_rsp[0],
+	nci_reset_rsp[1], nci_reset_rsp[2]);
+
 err_nfcc_hw_check:
 	ret = -ENXIO;
 	dev_err(&client->dev,
@@ -1213,6 +1226,7 @@
 		.owner = THIS_MODULE,
 		.name = "nq-nci",
 		.of_match_table = msm_match_table,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
 		.pm = &nfc_pm_ops,
 	},
 };
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index fac7cabe..d8d189d 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -861,7 +861,7 @@
 	nsindex = to_namespace_index(ndd, 0);
 	memset(nsindex, 0, ndd->nsarea.config_size);
 	for (i = 0; i < 2; i++) {
-		int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT);
+		int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
 
 		if (rc)
 			return rc;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index a38ae34..b8fb1ef 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1451,7 +1451,7 @@
 	if (a == &dev_attr_resource.attr) {
 		if (is_namespace_blk(dev))
 			return 0;
-		return a->mode;
+		return 0400;
 	}
 
 	if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 78cb3e2..42abdd2 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -270,8 +270,16 @@
 	NULL,
 };
 
+static umode_t pfn_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+	if (a == &dev_attr_resource.attr)
+		return 0400;
+	return a->mode;
+}
+
 struct attribute_group nd_pfn_attribute_group = {
 	.attrs = nd_pfn_attributes,
+	.is_visible = pfn_visible,
 };
 
 static const struct attribute_group *nd_pfn_attribute_groups[] = {
@@ -344,9 +352,9 @@
 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 {
 	u64 checksum, offset;
-	unsigned long align;
 	enum nd_pfn_mode mode;
 	struct nd_namespace_io *nsio;
+	unsigned long align, start_pad;
 	struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
 	struct nd_namespace_common *ndns = nd_pfn->ndns;
 	const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev);
@@ -390,6 +398,7 @@
 
 	align = le32_to_cpu(pfn_sb->align);
 	offset = le64_to_cpu(pfn_sb->dataoff);
+	start_pad = le32_to_cpu(pfn_sb->start_pad);
 	if (align == 0)
 		align = 1UL << ilog2(offset);
 	mode = le32_to_cpu(pfn_sb->mode);
@@ -448,7 +457,7 @@
 		return -EBUSY;
 	}
 
-	if ((align && !IS_ALIGNED(offset, align))
+	if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align))
 			|| !IS_ALIGNED(offset, PAGE_SIZE)) {
 		dev_err(&nd_pfn->dev,
 				"bad offset: %#llx dax disabled align: %#lx\n",
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index fbeca06..719ee5f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1619,7 +1619,8 @@
 	mutex_lock(&ctrl->namespaces_mutex);
 	list_for_each_entry(ns, &ctrl->namespaces, list) {
 		if (ns->ns_id == nsid) {
-			kref_get(&ns->kref);
+			if (!kref_get_unless_zero(&ns->kref))
+				continue;
 			ret = ns;
 			break;
 		}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 8edafd8..5c52a61 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -84,7 +84,7 @@
  * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
  * found empirically.
  */
-#define NVME_QUIRK_DELAY_AMOUNT		2000
+#define NVME_QUIRK_DELAY_AMOUNT		2300
 
 enum nvme_ctrl_state {
 	NVME_CTRL_NEW,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 54ea90f..e48ecb9 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2109,6 +2109,8 @@
 		.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
 	{ PCI_DEVICE(0x1c58, 0x0003),	/* HGST adapter */
 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+	{ PCI_DEVICE(0x1c58, 0x0023),	/* WDC SN200 adapter */
+		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
 	{ PCI_DEVICE(0x1c5f, 0x0540),	/* Memblaze Pblaze4 adapter */
 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
 	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 6fe4c48..f791d46 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -381,7 +381,6 @@
 {
 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
-	u64 val;
 	u32 val32;
 	u16 status = 0;
 
@@ -391,8 +390,7 @@
 			(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
 		break;
 	case NVME_FEAT_KATO:
-		val = le64_to_cpu(req->cmd->prop_set.value);
-		val32 = val & 0xffff;
+		val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
 		req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
 		nvmet_set_result(req, req->sq->ctrl->kato);
 		break;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 55ce769..c89d68a 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -422,6 +422,13 @@
 	ctrl->sqs[qid] = sq;
 }
 
+static void nvmet_confirm_sq(struct percpu_ref *ref)
+{
+	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
+
+	complete(&sq->confirm_done);
+}
+
 void nvmet_sq_destroy(struct nvmet_sq *sq)
 {
 	/*
@@ -430,7 +437,8 @@
 	 */
 	if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
 		nvmet_async_events_free(sq->ctrl);
-	percpu_ref_kill(&sq->ref);
+	percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
+	wait_for_completion(&sq->confirm_done);
 	wait_for_completion(&sq->free_done);
 	percpu_ref_exit(&sq->ref);
 
@@ -458,6 +466,7 @@
 		return ret;
 	}
 	init_completion(&sq->free_done);
+	init_completion(&sq->confirm_done);
 
 	return 0;
 }
@@ -816,6 +825,9 @@
 	list_del(&ctrl->subsys_entry);
 	mutex_unlock(&subsys->lock);
 
+	flush_work(&ctrl->async_event_work);
+	cancel_work_sync(&ctrl->fatal_err_work);
+
 	ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
 	nvmet_subsys_put(subsys);
 
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d5df77d..e56ca3f 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -223,8 +223,6 @@
 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
 		struct nvme_loop_iod *iod, unsigned int queue_idx)
 {
-	BUG_ON(queue_idx >= ctrl->queue_count);
-
 	iod->req.cmd = &iod->cmd;
 	iod->req.rsp = &iod->rsp;
 	iod->queue = &ctrl->queues[queue_idx];
@@ -288,9 +286,9 @@
 
 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
 {
+	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
 	blk_cleanup_queue(ctrl->ctrl.admin_q);
 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
-	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
 }
 
 static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
@@ -314,6 +312,43 @@
 	kfree(ctrl);
 }
 
+static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+	int i;
+
+	for (i = 1; i < ctrl->queue_count; i++)
+		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+}
+
+static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+	unsigned int nr_io_queues;
+	int ret, i;
+
+	nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
+	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
+	if (ret || !nr_io_queues)
+		return ret;
+
+	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
+
+	for (i = 1; i <= nr_io_queues; i++) {
+		ctrl->queues[i].ctrl = ctrl;
+		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
+		if (ret)
+			goto out_destroy_queues;
+
+		ctrl->queue_count++;
+	}
+
+	return 0;
+
+out_destroy_queues:
+	nvme_loop_destroy_io_queues(ctrl);
+	return ret;
+}
+
 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
 {
 	int error;
@@ -385,17 +420,13 @@
 
 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
 {
-	int i;
-
 	nvme_stop_keep_alive(&ctrl->ctrl);
 
 	if (ctrl->queue_count > 1) {
 		nvme_stop_queues(&ctrl->ctrl);
 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
 					nvme_cancel_request, &ctrl->ctrl);
-
-		for (i = 1; i < ctrl->queue_count; i++)
-			nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+		nvme_loop_destroy_io_queues(ctrl);
 	}
 
 	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
@@ -467,19 +498,14 @@
 	if (ret)
 		goto out_disable;
 
-	for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
-		ctrl->queues[i].ctrl = ctrl;
-		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
-		if (ret)
-			goto out_free_queues;
+	ret = nvme_loop_init_io_queues(ctrl);
+	if (ret)
+		goto out_destroy_admin;
 
-		ctrl->queue_count++;
-	}
-
-	for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
+	for (i = 1; i < ctrl->queue_count; i++) {
 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
 		if (ret)
-			goto out_free_queues;
+			goto out_destroy_io;
 	}
 
 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -492,9 +518,9 @@
 
 	return;
 
-out_free_queues:
-	for (i = 1; i < ctrl->queue_count; i++)
-		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+out_destroy_io:
+	nvme_loop_destroy_io_queues(ctrl);
+out_destroy_admin:
 	nvme_loop_destroy_admin_queue(ctrl);
 out_disable:
 	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
@@ -533,25 +559,12 @@
 
 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
 {
-	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
 	int ret, i;
 
-	ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
-	if (ret || !opts->nr_io_queues)
+	ret = nvme_loop_init_io_queues(ctrl);
+	if (ret)
 		return ret;
 
-	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
-		opts->nr_io_queues);
-
-	for (i = 1; i <= opts->nr_io_queues; i++) {
-		ctrl->queues[i].ctrl = ctrl;
-		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
-		if (ret)
-			goto out_destroy_queues;
-
-		ctrl->queue_count++;
-	}
-
 	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
 	ctrl->tag_set.ops = &nvme_loop_mq_ops;
 	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
@@ -575,7 +588,7 @@
 		goto out_free_tagset;
 	}
 
-	for (i = 1; i <= opts->nr_io_queues; i++) {
+	for (i = 1; i < ctrl->queue_count; i++) {
 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
 		if (ret)
 			goto out_cleanup_connect_q;
@@ -588,8 +601,7 @@
 out_free_tagset:
 	blk_mq_free_tag_set(&ctrl->tag_set);
 out_destroy_queues:
-	for (i = 1; i < ctrl->queue_count; i++)
-		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+	nvme_loop_destroy_io_queues(ctrl);
 	return ret;
 }
 
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7655a35..26b87dc 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -73,6 +73,7 @@
 	u16			qid;
 	u16			size;
 	struct completion	free_done;
+	struct completion	confirm_done;
 };
 
 /**
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index ca8ddc3..53bd325 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -703,11 +703,6 @@
 {
 	u16 status;
 
-	cmd->queue = queue;
-	cmd->n_rdma = 0;
-	cmd->req.port = queue->port;
-
-
 	ib_dma_sync_single_for_cpu(queue->dev->device,
 		cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
 		DMA_FROM_DEVICE);
@@ -760,9 +755,12 @@
 
 	cmd->queue = queue;
 	rsp = nvmet_rdma_get_rsp(queue);
+	rsp->queue = queue;
 	rsp->cmd = cmd;
 	rsp->flags = 0;
 	rsp->req.cmd = cmd->nvme_cmd;
+	rsp->req.port = queue->port;
+	rsp->n_rdma = 0;
 
 	if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
 		unsigned long flags;
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index b5305f0..c25d7dc 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -98,7 +98,26 @@
 		.of_match_table = qfprom_of_match,
 	},
 };
-module_platform_driver(qfprom_driver);
+
+static int __init qfprom_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&qfprom_driver);
+	if (ret != 0)
+		pr_err("Failed to register qfprom driver\n");
+
+	return ret;
+}
+
+static void __exit qfprom_exit(void)
+{
+	return platform_driver_unregister(&qfprom_driver);
+}
+
+subsys_initcall(qfprom_init);
+module_exit(qfprom_exit);
+
 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
 MODULE_DESCRIPTION("Qualcomm QFPROM driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index bc286cb..1cced1d 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1656,3 +1656,36 @@
 	iounmap(base_addr);
 }
 
+
+/*
+ * The design of the Diva management card in rp34x0 machines (rp3410, rp3440)
+ * seems rushed, so that many built-in components simply don't work.
+ * The following quirks disable the serial AUX port and the built-in ATI RV100
+ * Radeon 7000 graphics card which both don't have any external connectors and
+ * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as
+ * such makes those machines the only PARISC machines on which we can't use
+ * ttyS0 as boot console.
+ */
+static void quirk_diva_ati_card(struct pci_dev *dev)
+{
+	if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+	    dev->subsystem_device != 0x1292)
+		return;
+
+	dev_info(&dev->dev, "Hiding Diva built-in ATI card");
+	dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
+	quirk_diva_ati_card);
+
+static void quirk_diva_aux_disable(struct pci_dev *dev)
+{
+	if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+	    dev->subsystem_device != 0x1291)
+		return;
+
+	dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
+	dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
+	quirk_diva_aux_disable);
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index fd5c515..b897813 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -36,6 +36,7 @@
 #include <linux/reset.h>
 #include <linux/msm-bus.h>
 #include <linux/msm-bus-board.h>
+#include <linux/seq_file.h>
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 #include <linux/io.h>
@@ -49,8 +50,7 @@
 #include <linux/ipc_logging.h>
 #include <linux/msm_pcie.h>
 
-#define PCIE_VENDOR_ID_RCP		0x17cb
-#define PCIE_DEVICE_ID_RCP		0x0106
+#define PCIE_VENDOR_ID_QCOM		0x17cb
 
 #define PCIE20_L1SUB_CONTROL1		0x1E4
 #define PCIE20_PARF_DBI_BASE_ADDR       0x350
@@ -62,7 +62,6 @@
 
 #define PCIE_N_SW_RESET(n)			(PCS_PORT(n) + 0x00)
 #define PCIE_N_POWER_DOWN_CONTROL(n)		(PCS_PORT(n) + 0x04)
-#define PCIE_N_PCS_STATUS(n)			(PCS_PORT(n) + 0x174)
 
 #define PCIE_GEN3_COM_INTEGLOOP_GAIN1_MODE0	0x0154
 #define PCIE_GEN3_L0_DRVR_CTRL0			0x080c
@@ -70,7 +69,6 @@
 #define PCIE_GEN3_L0_BIST_ERR_CNT1_STATUS	0x08a8
 #define PCIE_GEN3_L0_BIST_ERR_CNT2_STATUS	0x08ac
 #define PCIE_GEN3_L0_DEBUG_BUS_STATUS4		0x08bc
-#define PCIE_GEN3_PCIE_PHY_PCS_STATUS		0x1aac
 
 #define PCIE20_PARF_SYS_CTRL	     0x00
 #define PCIE20_PARF_PM_CTRL		0x20
@@ -121,6 +119,15 @@
 #define PCIE20_PLR_IATU_LTAR	     0x918
 #define PCIE20_PLR_IATU_UTAR	     0x91c
 
+#define PCIE_IATU_BASE(n)	(n * 0x200)
+
+#define PCIE_IATU_CTRL1(n)	(PCIE_IATU_BASE(n) + 0x00)
+#define PCIE_IATU_CTRL2(n)	(PCIE_IATU_BASE(n) + 0x04)
+#define PCIE_IATU_LBAR(n)	(PCIE_IATU_BASE(n) + 0x08)
+#define PCIE_IATU_UBAR(n)	(PCIE_IATU_BASE(n) + 0x0c)
+#define PCIE_IATU_LAR(n)	(PCIE_IATU_BASE(n) + 0x10)
+#define PCIE_IATU_LTAR(n)	(PCIE_IATU_BASE(n) + 0x14)
+#define PCIE_IATU_UTAR(n)	(PCIE_IATU_BASE(n) + 0x18)
 
 #define PCIE20_PORT_LINK_CTRL_REG	0x710
 #define PCIE20_GEN3_RELATED_REG	0x890
@@ -177,7 +184,7 @@
 #define MAX_PROP_SIZE 32
 #define MAX_RC_NAME_LEN 15
 #define MSM_PCIE_MAX_VREG 4
-#define MSM_PCIE_MAX_CLK 12
+#define MSM_PCIE_MAX_CLK 13
 #define MSM_PCIE_MAX_PIPE_CLK 1
 #define MAX_RC_NUM 3
 #define MAX_DEVICE_NUM 20
@@ -185,7 +192,6 @@
 #define PCIE_TLP_RD_SIZE 0x5
 #define PCIE_MSI_NR_IRQS 256
 #define MSM_PCIE_MAX_MSI 32
-#define MAX_MSG_LEN 80
 #define PCIE_LOG_PAGES (50)
 #define PCIE_CONF_SPACE_DW			1024
 #define PCIE_CLEAR				0xDEADBEEF
@@ -217,6 +223,9 @@
 #endif
 #define PCIE_LOWER_ADDR(addr) ((u32)((addr) & 0xffffffff))
 
+#define PCIE_BUS_PRIV_DATA(bus) \
+	(struct msm_pcie_dev_t *)(bus->sysdata)
+
 /* Config Space Offsets */
 #define BDF_OFFSET(bus, devfn) \
 	((bus << 24) | (devfn << 16))
@@ -287,6 +296,7 @@
 	MSM_PCIE_RES_PHY,
 	MSM_PCIE_RES_DM_CORE,
 	MSM_PCIE_RES_ELBI,
+	MSM_PCIE_RES_IATU,
 	MSM_PCIE_RES_CONF,
 	MSM_PCIE_RES_IO,
 	MSM_PCIE_RES_BARS,
@@ -363,6 +373,76 @@
 	MSM_PCIE_NO_WAKE_ENUMERATION = BIT(1)
 };
 
+enum msm_pcie_debugfs_option {
+	MSM_PCIE_OUTPUT_PCIE_INFO,
+	MSM_PCIE_DISABLE_LINK,
+	MSM_PCIE_ENABLE_LINK,
+	MSM_PCIE_DISABLE_ENABLE_LINK,
+	MSM_PCIE_DUMP_SHADOW_REGISTER,
+	MSM_PCIE_DISABLE_L0S,
+	MSM_PCIE_ENABLE_L0S,
+	MSM_PCIE_DISABLE_L1,
+	MSM_PCIE_ENABLE_L1,
+	MSM_PCIE_DISABLE_L1SS,
+	MSM_PCIE_ENABLE_L1SS,
+	MSM_PCIE_ENUMERATION,
+	MSM_PCIE_READ_PCIE_REGISTER,
+	MSM_PCIE_WRITE_PCIE_REGISTER,
+	MSM_PCIE_DUMP_PCIE_REGISTER_SPACE,
+	MSM_PCIE_ALLOCATE_DDR_MAP_LBAR,
+	MSM_PCIE_FREE_DDR_UNMAP_LBAR,
+	MSM_PCIE_OUTPUT_DDR_LBAR_ADDRESS,
+	MSM_PCIE_CONFIGURE_LOOPBACK,
+	MSM_PCIE_SETUP_LOOPBACK_IATU,
+	MSM_PCIE_READ_DDR,
+	MSM_PCIE_READ_LBAR,
+	MSM_PCIE_WRITE_DDR,
+	MSM_PCIE_WRITE_LBAR,
+	MSM_PCIE_DISABLE_AER,
+	MSM_PCIE_ENABLE_AER,
+	MSM_PCIE_GPIO_STATUS,
+	MSM_PCIE_ASSERT_PERST,
+	MSM_PCIE_DEASSERT_PERST,
+	MSM_PCIE_KEEP_RESOURCES_ON,
+	MSM_PCIE_FORCE_GEN1,
+	MSM_PCIE_MAX_DEBUGFS_OPTION
+};
+
+static const char * const
+	msm_pcie_debugfs_option_desc[MSM_PCIE_MAX_DEBUGFS_OPTION] = {
+	"OUTPUT PCIE INFO",
+	"DISABLE LINK",
+	"ENABLE LINK",
+	"DISABLE AND ENABLE LINK",
+	"DUMP PCIE SHADOW REGISTER",
+	"DISABLE L0S",
+	"ENABLE L0S",
+	"DISABLE L1",
+	"ENABLE L1",
+	"DISABLE L1SS",
+	"ENABLE L1SS",
+	"ENUMERATE",
+	"READ A PCIE REGISTER",
+	"WRITE TO PCIE REGISTER",
+	"DUMP PCIE REGISTER SPACE",
+	"ALLOCATE DDR AND MAP LBAR",
+	"FREE DDR AND UNMAP LBAR",
+	"OUTPUT DDR AND LBAR VIR ADDRESS",
+	"CONFIGURE PCIE LOOPBACK",
+	"SETUP LOOPBACK IATU",
+	"READ DDR",
+	"READ LBAR",
+	"WRITE DDR",
+	"WRITE LBAR",
+	"SET AER ENABLE FLAG",
+	"CLEAR AER ENABLE FLAG",
+	"OUTPUT PERST AND WAKE GPIO STATUS",
+	"ASSERT PERST",
+	"DE-ASSERT PERST",
+	"SET KEEP_RESOURCES_ON FLAG",
+	"FORCE GEN 1 SPEED FOR LINK TRAINING"
+};
+
 /* gpio info structure */
 struct msm_pcie_gpio_info_t {
 	char	*name;
@@ -452,6 +532,7 @@
 	void __iomem		     *parf;
 	void __iomem		     *phy;
 	void __iomem		     *elbi;
+	void __iomem		     *iatu;
 	void __iomem		     *dm_core;
 	void __iomem		     *conf;
 	void __iomem		     *bars;
@@ -506,6 +587,7 @@
 	uint32_t			switch_latency;
 	uint32_t			wr_halt_size;
 	uint32_t			slv_addr_space_size;
+	uint32_t			phy_status_offset;
 	uint32_t			cpl_timeout;
 	uint32_t			current_bdf;
 	uint32_t			perst_delay_us_min;
@@ -520,10 +602,8 @@
 	bool				 enumerated;
 	struct work_struct	     handle_wake_work;
 	struct mutex		     recovery_lock;
-	spinlock_t                   linkdown_lock;
 	spinlock_t                   wakeup_lock;
-	spinlock_t			global_irq_lock;
-	spinlock_t			aer_lock;
+	spinlock_t			irq_lock;
 	ulong				linkdown_counter;
 	ulong				link_turned_on_counter;
 	ulong				link_turned_off_counter;
@@ -589,13 +669,32 @@
 module_param_named(keep_resources_on, msm_pcie_keep_resources_on,
 			    int, 0644);
 
+/*
+ * For each bit set, force the corresponding root complex
+ * to do link training at gen1 speed.
+ */
+static int msm_pcie_force_gen1;
+module_param_named(force_gen1, msm_pcie_force_gen1,
+			    int, 0644);
+
+
+/*
+ * For each bit set in BIT[3:0] determines which corresponding
+ * root complex will use the value in BIT[31:4] to override the
+ * default (LINK_UP_CHECK_MAX_COUNT) max check count for link training.
+ * Each iteration is LINK_UP_TIMEOUT_US_MIN long.
+ */
+static int msm_pcie_link_check_max_count;
+module_param_named(link_check_max_count, msm_pcie_link_check_max_count,
+			    int, 0644);
+
 /* debugfs values */
-static u32 rc_sel;
+static u32 rc_sel = BIT(0);
 static u32 base_sel;
 static u32 wr_offset;
 static u32 wr_mask;
 static u32 wr_value;
-static ulong corr_counter_limit = 5;
+static u32 corr_counter_limit = 5;
 
 /* Table to track info of PCIe devices */
 static struct msm_pcie_device_info
@@ -677,6 +776,7 @@
 	{NULL, "pcie_0_ldo", 0, false, true},
 	{NULL, "pcie_0_smmu_clk", 0, false, false},
 	{NULL, "pcie_0_slv_q2a_axi_clk", 0, false, false},
+	{NULL, "pcie_0_sleep_clk", 0, false, false},
 	{NULL, "pcie_phy_refgen_clk", 0, false, false},
 	{NULL, "pcie_tbu_clk", 0, false, false},
 	{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
@@ -691,6 +791,7 @@
 	{NULL, "pcie_1_ldo", 0, false, true},
 	{NULL, "pcie_1_smmu_clk", 0, false, false},
 	{NULL, "pcie_1_slv_q2a_axi_clk", 0, false, false},
+	{NULL, "pcie_1_sleep_clk", 0, false, false},
 	{NULL, "pcie_phy_refgen_clk", 0, false, false},
 	{NULL, "pcie_tbu_clk", 0, false, false},
 	{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
@@ -705,6 +806,7 @@
 	{NULL, "pcie_2_ldo", 0, false, true},
 	{NULL, "pcie_2_smmu_clk", 0, false, false},
 	{NULL, "pcie_2_slv_q2a_axi_clk", 0, false, false},
+	{NULL, "pcie_2_sleep_clk", 0, false, false},
 	{NULL, "pcie_phy_refgen_clk", 0, false, false},
 	{NULL, "pcie_tbu_clk", 0, false, false},
 	{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
@@ -732,6 +834,7 @@
 	{"phy",     NULL, NULL},
 	{"dm_core",	NULL, NULL},
 	{"elbi",	NULL, NULL},
+	{"iatu",	NULL, NULL},
 	{"conf",	NULL, NULL},
 	{"io",		NULL, NULL},
 	{"bars",	NULL, NULL},
@@ -768,36 +871,21 @@
 };
 
 static int msm_pcie_config_device(struct pci_dev *dev, void *pdev);
+static int msm_pcie_config_l0s_disable(struct pci_dev *dev, void *pdev);
+static int msm_pcie_config_l0s_enable(struct pci_dev *dev, void *pdev);
+static int msm_pcie_config_l1_disable(struct pci_dev *dev, void *pdev);
+static int msm_pcie_config_l1_enable(struct pci_dev *dev, void *pdev);
+static int msm_pcie_config_l1ss_disable(struct pci_dev *dev, void *pdev);
+static int msm_pcie_config_l1ss_enable(struct pci_dev *dev, void *pdev);
 static void msm_pcie_config_link_pm_rc(struct msm_pcie_dev_t *dev,
 				struct pci_dev *pdev, bool enable);
 
 #ifdef CONFIG_ARM
-#define PCIE_BUS_PRIV_DATA(bus) \
-	(((struct pci_sys_data *)bus->sysdata)->private_data)
-
-static struct pci_sys_data msm_pcie_sys_data[MAX_RC_NUM];
-
-static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
-{
-	msm_pcie_sys_data[dev->rc_idx].domain = dev->rc_idx;
-	msm_pcie_sys_data[dev->rc_idx].private_data = dev;
-
-	return &msm_pcie_sys_data[dev->rc_idx];
-}
-
 static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
 {
 	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
 }
 #else
-#define PCIE_BUS_PRIV_DATA(bus) \
-	(struct msm_pcie_dev_t *)(bus->sysdata)
-
-static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
-{
-	return dev;
-}
-
 static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
 {
 }
@@ -906,11 +994,7 @@
 
 static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
 {
-	u32 pos = (dev->max_link_speed == GEN2_SPEED) ?
-		PCIE_N_PCS_STATUS(dev->rc_idx) :
-		PCIE_GEN3_PCIE_PHY_PCS_STATUS;
-
-	if (readl_relaxed(dev->phy + pos) & BIT(6))
+	if (readl_relaxed(dev->phy + dev->phy_status_offset) & BIT(6))
 		return false;
 	else
 		return true;
@@ -1168,6 +1252,8 @@
 		dev->wr_halt_size);
 	PCIE_DBG_FS(dev, "slv_addr_space_size: 0x%x\n",
 		dev->slv_addr_space_size);
+	PCIE_DBG_FS(dev, "phy_status_offset: 0x%x\n",
+		dev->phy_status_offset);
 	PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
 		dev->cpl_timeout);
 	PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
@@ -1236,56 +1322,24 @@
 static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
 					u32 testcase)
 {
-	phys_addr_t dbi_base_addr =
-		dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
+	u32 dbi_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
 	phys_addr_t loopback_lbar_phy =
-		dbi_base_addr + LOOPBACK_BASE_ADDR_OFFSET;
+		dev->res[MSM_PCIE_RES_DM_CORE].resource->start +
+		LOOPBACK_BASE_ADDR_OFFSET;
 	static uint32_t loopback_val = 0x1;
-	static u64 loopback_ddr_phy;
+	static dma_addr_t loopback_ddr_phy;
 	static uint32_t *loopback_ddr_vir;
 	static void __iomem *loopback_lbar_vir;
 	int ret, i;
 	u32 base_sel_size = 0;
-	u32 val = 0;
-	u32 current_offset = 0;
-	u32 ep_l1sub_ctrl1_offset = 0;
-	u32 ep_l1sub_cap_reg1_offset = 0;
-	u32 ep_link_ctrlstts_offset = 0;
-	u32 ep_dev_ctrl2stts2_offset = 0;
-
-	if (testcase >= 5 && testcase <= 10) {
-		current_offset =
-			readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
-
-		while (current_offset) {
-			val = readl_relaxed(dev->conf + current_offset);
-			if ((val & 0xff) == PCIE20_CAP_ID) {
-				ep_link_ctrlstts_offset = current_offset +
-								0x10;
-				ep_dev_ctrl2stts2_offset = current_offset +
-								0x28;
-				break;
-			}
-			current_offset = (val >> 8) & 0xff;
-		}
-
-		if (!ep_link_ctrlstts_offset)
-			PCIE_DBG(dev,
-				"RC%d endpoint does not support PCIe capability registers\n",
-				dev->rc_idx);
-		else
-			PCIE_DBG(dev,
-				"RC%d: ep_link_ctrlstts_offset: 0x%x\n",
-				dev->rc_idx, ep_link_ctrlstts_offset);
-	}
 
 	switch (testcase) {
-	case 0: /* output status */
+	case MSM_PCIE_OUTPUT_PCIE_INFO:
 		PCIE_DBG_FS(dev, "\n\nPCIe: Status for RC%d:\n",
 			dev->rc_idx);
 		msm_pcie_show_status(dev);
 		break;
-	case 1: /* disable link */
+	case MSM_PCIE_DISABLE_LINK:
 		PCIE_DBG_FS(dev,
 			"\n\nPCIe: RC%d: disable link\n\n", dev->rc_idx);
 		ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
@@ -1298,7 +1352,7 @@
 			PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n",
 				__func__);
 		break;
-	case 2: /* enable link and recover config space for RC and EP */
+	case MSM_PCIE_ENABLE_LINK:
 		PCIE_DBG_FS(dev,
 			"\n\nPCIe: RC%d: enable link and recover config space\n\n",
 			dev->rc_idx);
@@ -1313,10 +1367,7 @@
 			msm_pcie_recover_config(dev->dev);
 		}
 		break;
-	case 3: /*
-		 * disable and enable link, recover config space for
-		 * RC and EP
-		 */
+	case MSM_PCIE_DISABLE_ENABLE_LINK:
 		PCIE_DBG_FS(dev,
 			"\n\nPCIe: RC%d: disable and enable link then recover config space\n\n",
 			dev->rc_idx);
@@ -1339,7 +1390,7 @@
 			msm_pcie_recover_config(dev->dev);
 		}
 		break;
-	case 4: /* dump shadow registers for RC and EP */
+	case MSM_PCIE_DUMP_SHADOW_REGISTER:
 		PCIE_DBG_FS(dev,
 			"\n\nPCIe: RC%d: dumping RC shadow registers\n",
 			dev->rc_idx);
@@ -1350,236 +1401,97 @@
 			dev->rc_idx);
 		msm_pcie_shadow_dump(dev, false);
 		break;
-	case 5: /* disable L0s */
+	case MSM_PCIE_DISABLE_L0S:
 		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L0s\n\n",
 			dev->rc_idx);
-		msm_pcie_write_mask(dev->dm_core +
-				PCIE20_CAP_LINKCTRLSTATUS,
-				BIT(0), 0);
-		msm_pcie_write_mask(dev->conf +
-				ep_link_ctrlstts_offset,
-				BIT(0), 0);
-		if (dev->shadow_en) {
-			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
-					readl_relaxed(dev->dm_core +
-					PCIE20_CAP_LINKCTRLSTATUS);
-			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
-					readl_relaxed(dev->conf +
-					ep_link_ctrlstts_offset);
+		if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+			struct pci_bus *bus, *c_bus;
+			struct list_head *children = &dev->dev->bus->children;
+
+			msm_pcie_config_l0s_disable(dev->dev, dev);
+
+			list_for_each_entry_safe(bus, c_bus, children, node)
+				pci_walk_bus(bus,
+					&msm_pcie_config_l0s_disable, dev);
 		}
-		PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
-			readl_relaxed(dev->dm_core +
-			PCIE20_CAP_LINKCTRLSTATUS));
-		PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
-			readl_relaxed(dev->conf +
-			ep_link_ctrlstts_offset));
+		dev->l0s_supported = false;
 		break;
-	case 6: /* enable L0s */
+	case MSM_PCIE_ENABLE_L0S:
 		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L0s\n\n",
 			dev->rc_idx);
-		msm_pcie_write_mask(dev->dm_core +
-				PCIE20_CAP_LINKCTRLSTATUS,
-				0, BIT(0));
-		msm_pcie_write_mask(dev->conf +
-				ep_link_ctrlstts_offset,
-				0, BIT(0));
-		if (dev->shadow_en) {
-			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
-					readl_relaxed(dev->dm_core +
-					PCIE20_CAP_LINKCTRLSTATUS);
-			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
-					readl_relaxed(dev->conf +
-					ep_link_ctrlstts_offset);
+		dev->l0s_supported = true;
+		if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+			struct pci_bus *bus, *c_bus;
+			struct list_head *children = &dev->dev->bus->children;
+
+			list_for_each_entry_safe(bus, c_bus, children, node)
+				pci_walk_bus(bus,
+					&msm_pcie_config_l0s_enable, dev);
+
+			msm_pcie_config_l0s_enable(dev->dev, dev);
 		}
-		PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
-			readl_relaxed(dev->dm_core +
-			PCIE20_CAP_LINKCTRLSTATUS));
-		PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
-			readl_relaxed(dev->conf +
-			ep_link_ctrlstts_offset));
 		break;
-	case 7: /* disable L1 */
+	case MSM_PCIE_DISABLE_L1:
 		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1\n\n",
 			dev->rc_idx);
-		msm_pcie_write_mask(dev->dm_core +
-				PCIE20_CAP_LINKCTRLSTATUS,
-				BIT(1), 0);
-		msm_pcie_write_mask(dev->conf +
-				ep_link_ctrlstts_offset,
-				BIT(1), 0);
-		if (dev->shadow_en) {
-			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
-					readl_relaxed(dev->dm_core +
-					PCIE20_CAP_LINKCTRLSTATUS);
-			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
-					readl_relaxed(dev->conf +
-					ep_link_ctrlstts_offset);
+		if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+			struct pci_bus *bus, *c_bus;
+			struct list_head *children = &dev->dev->bus->children;
+
+			msm_pcie_config_l1_disable(dev->dev, dev);
+
+			list_for_each_entry_safe(bus, c_bus, children, node)
+				pci_walk_bus(bus,
+					&msm_pcie_config_l1_disable, dev);
 		}
-		PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
-			readl_relaxed(dev->dm_core +
-			PCIE20_CAP_LINKCTRLSTATUS));
-		PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
-			readl_relaxed(dev->conf +
-			ep_link_ctrlstts_offset));
+		dev->l1_supported = false;
 		break;
-	case 8: /* enable L1 */
+	case MSM_PCIE_ENABLE_L1:
 		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1\n\n",
 			dev->rc_idx);
-		msm_pcie_write_mask(dev->dm_core +
-				PCIE20_CAP_LINKCTRLSTATUS,
-				0, BIT(1));
-		msm_pcie_write_mask(dev->conf +
-				ep_link_ctrlstts_offset,
-				0, BIT(1));
-		if (dev->shadow_en) {
-			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
-					readl_relaxed(dev->dm_core +
-					PCIE20_CAP_LINKCTRLSTATUS);
-			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
-					readl_relaxed(dev->conf +
-					ep_link_ctrlstts_offset);
+		dev->l1_supported = true;
+		if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+			struct pci_bus *bus, *c_bus;
+			struct list_head *children = &dev->dev->bus->children;
+
+			list_for_each_entry_safe(bus, c_bus, children, node)
+				pci_walk_bus(bus,
+					&msm_pcie_config_l1_enable, dev);
+
+			msm_pcie_config_l1_enable(dev->dev, dev);
 		}
-		PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
-			readl_relaxed(dev->dm_core +
-			PCIE20_CAP_LINKCTRLSTATUS));
-		PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
-			readl_relaxed(dev->conf +
-			ep_link_ctrlstts_offset));
 		break;
-	case 9: /* disable L1ss */
+	case MSM_PCIE_DISABLE_L1SS:
 		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1ss\n\n",
 			dev->rc_idx);
-		current_offset = PCIE_EXT_CAP_OFFSET;
-		while (current_offset) {
-			val = readl_relaxed(dev->conf + current_offset);
-			if ((val & 0xffff) == L1SUB_CAP_ID) {
-				ep_l1sub_ctrl1_offset =
-						current_offset + 0x8;
-				break;
-			}
-			current_offset = val >> 20;
-		}
-		if (!ep_l1sub_ctrl1_offset) {
-			PCIE_DBG_FS(dev,
-				"PCIe: RC%d endpoint does not support l1ss registers\n",
-				dev->rc_idx);
-			break;
-		}
+		if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+			struct pci_bus *bus, *c_bus;
+			struct list_head *children = &dev->dev->bus->children;
 
-		PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
-				dev->rc_idx, ep_l1sub_ctrl1_offset);
+			msm_pcie_config_l1ss_disable(dev->dev, dev);
 
-		msm_pcie_write_reg_field(dev->dm_core,
-					PCIE20_L1SUB_CONTROL1,
-					0xf, 0);
-		msm_pcie_write_mask(dev->dm_core +
-					PCIE20_DEVICE_CONTROL2_STATUS2,
-					BIT(10), 0);
-		msm_pcie_write_reg_field(dev->conf,
-					ep_l1sub_ctrl1_offset,
-					0xf, 0);
-		msm_pcie_write_mask(dev->conf +
-					ep_dev_ctrl2stts2_offset,
-					BIT(10), 0);
-		if (dev->shadow_en) {
-			dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
-				readl_relaxed(dev->dm_core +
-				PCIE20_L1SUB_CONTROL1);
-			dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
-				readl_relaxed(dev->dm_core +
-				PCIE20_DEVICE_CONTROL2_STATUS2);
-			dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
-				readl_relaxed(dev->conf +
-				ep_l1sub_ctrl1_offset);
-			dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
-				readl_relaxed(dev->conf +
-				ep_dev_ctrl2stts2_offset);
+			list_for_each_entry_safe(bus, c_bus, children, node)
+				pci_walk_bus(bus,
+					&msm_pcie_config_l1ss_disable, dev);
 		}
-		PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
-			readl_relaxed(dev->dm_core +
-			PCIE20_L1SUB_CONTROL1));
-		PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
-			readl_relaxed(dev->dm_core +
-			PCIE20_DEVICE_CONTROL2_STATUS2));
-		PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
-			readl_relaxed(dev->conf +
-			ep_l1sub_ctrl1_offset));
-		PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
-			readl_relaxed(dev->conf +
-			ep_dev_ctrl2stts2_offset));
+		dev->l1ss_supported = false;
 		break;
-	case 10: /* enable L1ss */
+	case MSM_PCIE_ENABLE_L1SS:
 		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1ss\n\n",
 			dev->rc_idx);
-		current_offset = PCIE_EXT_CAP_OFFSET;
-		while (current_offset) {
-			val = readl_relaxed(dev->conf + current_offset);
-			if ((val & 0xffff) == L1SUB_CAP_ID) {
-				ep_l1sub_cap_reg1_offset =
-						current_offset + 0x4;
-				ep_l1sub_ctrl1_offset =
-						current_offset + 0x8;
-				break;
-			}
-			current_offset = val >> 20;
+		dev->l1ss_supported = true;
+		if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+			struct pci_bus *bus, *c_bus;
+			struct list_head *children = &dev->dev->bus->children;
+
+			list_for_each_entry_safe(bus, c_bus, children, node)
+				pci_walk_bus(bus,
+					&msm_pcie_config_l1ss_enable, dev);
+
+			msm_pcie_config_l1ss_enable(dev->dev, dev);
 		}
-		if (!ep_l1sub_ctrl1_offset) {
-			PCIE_DBG_FS(dev,
-				"PCIe: RC%d endpoint does not support l1ss registers\n",
-				dev->rc_idx);
-			break;
-		}
-
-		val = readl_relaxed(dev->conf +
-				ep_l1sub_cap_reg1_offset);
-
-		PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CAPABILITY_REG_1: 0x%x\n",
-			val);
-		PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
-			dev->rc_idx, ep_l1sub_ctrl1_offset);
-
-		val &= 0xf;
-
-		msm_pcie_write_reg_field(dev->dm_core,
-					PCIE20_L1SUB_CONTROL1,
-					0xf, val);
-		msm_pcie_write_mask(dev->dm_core +
-					PCIE20_DEVICE_CONTROL2_STATUS2,
-					0, BIT(10));
-		msm_pcie_write_reg_field(dev->conf,
-					ep_l1sub_ctrl1_offset,
-					0xf, val);
-		msm_pcie_write_mask(dev->conf +
-					ep_dev_ctrl2stts2_offset,
-					0, BIT(10));
-		if (dev->shadow_en) {
-			dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
-				readl_relaxed(dev->dm_core +
-					PCIE20_L1SUB_CONTROL1);
-			dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
-				readl_relaxed(dev->dm_core +
-				PCIE20_DEVICE_CONTROL2_STATUS2);
-			dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
-				readl_relaxed(dev->conf +
-				ep_l1sub_ctrl1_offset);
-			dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
-				readl_relaxed(dev->conf +
-				ep_dev_ctrl2stts2_offset);
-		}
-		PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
-			readl_relaxed(dev->dm_core +
-			PCIE20_L1SUB_CONTROL1));
-		PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
-			readl_relaxed(dev->dm_core +
-			PCIE20_DEVICE_CONTROL2_STATUS2));
-		PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
-			readl_relaxed(dev->conf +
-			ep_l1sub_ctrl1_offset));
-		PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
-			readl_relaxed(dev->conf +
-			ep_dev_ctrl2stts2_offset));
 		break;
-	case 11: /* enumerate PCIe  */
+	case MSM_PCIE_ENUMERATION:
 		PCIE_DBG_FS(dev, "\n\nPCIe: attempting to enumerate RC%d\n\n",
 			dev->rc_idx);
 		if (dev->enumerated)
@@ -1596,7 +1508,41 @@
 					dev->rc_idx);
 		}
 		break;
-	case 12: /* write a value to a register */
+	case MSM_PCIE_READ_PCIE_REGISTER:
+		PCIE_DBG_FS(dev,
+			"\n\nPCIe: RC%d: read a PCIe register\n\n",
+			dev->rc_idx);
+		if (!base_sel) {
+			PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
+			break;
+		}
+
+		PCIE_DBG_FS(dev, "base: %s: 0x%pK\nwr_offset: 0x%x\n",
+			dev->res[base_sel - 1].name,
+			dev->res[base_sel - 1].base,
+			wr_offset);
+
+		base_sel_size = resource_size(dev->res[base_sel - 1].resource);
+
+		if (wr_offset >  base_sel_size - 4 ||
+			msm_pcie_check_align(dev, wr_offset)) {
+			PCIE_DBG_FS(dev,
+				"PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
+				dev->rc_idx, wr_offset, base_sel_size - 4);
+		} else {
+			phys_addr_t wr_register =
+				dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
+
+			wr_register += wr_offset;
+			PCIE_DBG_FS(dev,
+				"PCIe: RC%d: register: 0x%pa value: 0x%x\n",
+				dev->rc_idx, &wr_register,
+				readl_relaxed(dev->res[base_sel - 1].base +
+					wr_offset));
+		}
+
+		break;
+	case MSM_PCIE_WRITE_PCIE_REGISTER:
 		PCIE_DBG_FS(dev,
 			"\n\nPCIe: RC%d: writing a value to a register\n\n",
 			dev->rc_idx);
@@ -1607,7 +1553,7 @@
 		}
 
 		PCIE_DBG_FS(dev,
-			"base: %s: 0x%p\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
+			"base: %s: 0x%pK\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
 			dev->res[base_sel - 1].name,
 			dev->res[base_sel - 1].base,
 			wr_offset, wr_mask, wr_value);
@@ -1624,7 +1570,7 @@
 				wr_offset, wr_mask, wr_value);
 
 		break;
-	case 13: /* dump all registers of base_sel */
+	case MSM_PCIE_DUMP_PCIE_REGISTER_SPACE:
 		if (!base_sel) {
 			PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
 			break;
@@ -1657,7 +1603,7 @@
 			readl_relaxed(dev->res[base_sel - 1].base + (i + 28)));
 		}
 		break;
-	case 14:
+	case MSM_PCIE_ALLOCATE_DDR_MAP_LBAR:
 		PCIE_DBG_FS(dev,
 			"PCIe: RC%d: Allocate 4K DDR memory and map LBAR.\n",
 			dev->rc_idx);
@@ -1673,25 +1619,25 @@
 				"PCIe: RC%d: VIR DDR memory address: 0x%pK\n",
 				dev->rc_idx, loopback_ddr_vir);
 			PCIE_DBG_FS(dev,
-				"PCIe: RC%d: PHY DDR memory address: 0x%llx\n",
-				dev->rc_idx, loopback_ddr_phy);
+				"PCIe: RC%d: PHY DDR memory address: %pad\n",
+				dev->rc_idx, &loopback_ddr_phy);
 		}
 
-		PCIE_DBG_FS(dev, "PCIe: RC%d: map LBAR: 0x%llx\n",
-			dev->rc_idx, loopback_lbar_phy);
+		PCIE_DBG_FS(dev, "PCIe: RC%d: map LBAR: %pa\n",
+			dev->rc_idx, &loopback_lbar_phy);
 		loopback_lbar_vir = devm_ioremap(&dev->pdev->dev,
 			loopback_lbar_phy, SZ_4K);
 		if (!loopback_lbar_vir) {
-			PCIE_DBG_FS(dev, "PCIe: RC%d: failed to map 0x%llx\n",
-				dev->rc_idx, loopback_lbar_phy);
+			PCIE_DBG_FS(dev, "PCIe: RC%d: failed to map %pa\n",
+				dev->rc_idx, &loopback_lbar_phy);
 		} else {
 			PCIE_DBG_FS(dev,
-				"PCIe: RC%d: successfully mapped 0x%llx to 0x%pK\n",
-				dev->rc_idx, loopback_lbar_phy,
+				"PCIe: RC%d: successfully mapped %pa to 0x%pK\n",
+				dev->rc_idx, &loopback_lbar_phy,
 				loopback_lbar_vir);
 		}
 		break;
-	case 15:
+	case MSM_PCIE_FREE_DDR_UNMAP_LBAR:
 		PCIE_DBG_FS(dev,
 			"PCIe: RC%d: Release 4K DDR memory and unmap LBAR.\n",
 			dev->rc_idx);
@@ -1708,7 +1654,7 @@
 			loopback_lbar_vir = NULL;
 		}
 		break;
-	case 16:
+	case MSM_PCIE_OUTPUT_DDR_LBAR_ADDRESS:
 		PCIE_DBG_FS(dev,
 			"PCIe: RC%d: Print DDR and LBAR addresses.\n",
 			dev->rc_idx);
@@ -1721,19 +1667,19 @@
 		}
 
 		PCIE_DBG_FS(dev,
-			"PCIe: RC%d: PHY DDR address: 0x%llx\n",
-			dev->rc_idx, loopback_ddr_phy);
+			"PCIe: RC%d: PHY DDR address: %pad\n",
+			dev->rc_idx, &loopback_ddr_phy);
 		PCIE_DBG_FS(dev,
 			"PCIe: RC%d: VIR DDR address: 0x%pK\n",
 			dev->rc_idx, loopback_ddr_vir);
 		PCIE_DBG_FS(dev,
-			"PCIe: RC%d: PHY LBAR address: 0x%llx\n",
-			dev->rc_idx, loopback_lbar_phy);
+			"PCIe: RC%d: PHY LBAR address: %pa\n",
+			dev->rc_idx, &loopback_lbar_phy);
 		PCIE_DBG_FS(dev,
 			"PCIe: RC%d: VIR LBAR address: 0x%pK\n",
 			dev->rc_idx, loopback_lbar_vir);
 		break;
-	case 17:
+	case MSM_PCIE_CONFIGURE_LOOPBACK:
 		PCIE_DBG_FS(dev,
 			"PCIe: RC%d: Configure Loopback.\n",
 			dev->rc_idx);
@@ -1741,7 +1687,7 @@
 		writel_relaxed(0x10000,
 			dev->dm_core + PCIE20_GEN3_RELATED_REG);
 		PCIE_DBG_FS(dev,
-			"PCIe: RC%d: 0x%llx: 0x%x\n",
+			"PCIe: RC%d: 0x%x: 0x%x\n",
 			dev->rc_idx,
 			dbi_base_addr + PCIE20_GEN3_RELATED_REG,
 			readl_relaxed(dev->dm_core +
@@ -1750,7 +1696,7 @@
 		writel_relaxed(0x80000001,
 			dev->dm_core + PCIE20_PIPE_LOOPBACK_CONTROL);
 		PCIE_DBG_FS(dev,
-			"PCIe: RC%d: 0x%llx: 0x%x\n",
+			"PCIe: RC%d: 0x%x: 0x%x\n",
 			dev->rc_idx,
 			dbi_base_addr + PCIE20_PIPE_LOOPBACK_CONTROL,
 			readl_relaxed(dev->dm_core +
@@ -1759,13 +1705,13 @@
 		writel_relaxed(0x00010124,
 			dev->dm_core + PCIE20_PORT_LINK_CTRL_REG);
 		PCIE_DBG_FS(dev,
-			"PCIe: RC%d: 0x%llx: 0x%x\n",
+			"PCIe: RC%d: 0x%x: 0x%x\n",
 			dev->rc_idx,
 			dbi_base_addr + PCIE20_PORT_LINK_CTRL_REG,
 			readl_relaxed(dev->dm_core +
 				PCIE20_PORT_LINK_CTRL_REG));
 		break;
-	case 18:
+	case MSM_PCIE_SETUP_LOOPBACK_IATU:
 		PCIE_DBG_FS(dev, "PCIe: RC%d: Setup iATU.\n", dev->rc_idx);
 
 		if (!loopback_ddr_vir) {
@@ -1777,57 +1723,57 @@
 
 		writel_relaxed(0x0, dev->dm_core + PCIE20_PLR_IATU_VIEWPORT);
 		PCIE_DBG_FS(dev,
-			"PCIe: RC%d: PCIE20_PLR_IATU_VIEWPORT:\t0x%llx: 0x%x\n",
+			"PCIe: RC%d: PCIE20_PLR_IATU_VIEWPORT:\t0x%x: 0x%x\n",
 			dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_VIEWPORT,
 			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
 
 		writel_relaxed(0x0, dev->dm_core + PCIE20_PLR_IATU_CTRL1);
 		PCIE_DBG_FS(dev,
-			"PCIe: RC%d: PCIE20_PLR_IATU_CTRL1:\t0x%llx: 0x%x\n",
+			"PCIe: RC%d: PCIE20_PLR_IATU_CTRL1:\t0x%x: 0x%x\n",
 			dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_CTRL1,
 			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
 
 		writel_relaxed(loopback_lbar_phy,
 			dev->dm_core + PCIE20_PLR_IATU_LBAR);
 		PCIE_DBG_FS(dev,
-			"PCIe: RC%d: PCIE20_PLR_IATU_LBAR:\t0x%llx: 0x%x\n",
+			"PCIe: RC%d: PCIE20_PLR_IATU_LBAR:\t0x%x: 0x%x\n",
 			dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_LBAR,
 			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR));
 
 		writel_relaxed(0x0, dev->dm_core + PCIE20_PLR_IATU_UBAR);
 		PCIE_DBG_FS(dev,
-			"PCIe: RC%d: PCIE20_PLR_IATU_UBAR:\t0x%llx: 0x%x\n",
+			"PCIe: RC%d: PCIE20_PLR_IATU_UBAR:\t0x%x: 0x%x\n",
 			dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_UBAR,
 			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR));
 
 		writel_relaxed(loopback_lbar_phy + 0xfff,
 			dev->dm_core + PCIE20_PLR_IATU_LAR);
 		PCIE_DBG_FS(dev,
-			"PCIe: RC%d: PCIE20_PLR_IATU_LAR:\t0x%llx: 0x%x\n",
+			"PCIe: RC%d: PCIE20_PLR_IATU_LAR:\t0x%x: 0x%x\n",
 			dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_LAR,
 			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR));
 
 		writel_relaxed(loopback_ddr_phy,
 			dev->dm_core + PCIE20_PLR_IATU_LTAR);
 		PCIE_DBG_FS(dev,
-			"PCIe: RC%d: PCIE20_PLR_IATU_LTAR:\t0x%llx: 0x%x\n",
+			"PCIe: RC%d: PCIE20_PLR_IATU_LTAR:\t0x%x: 0x%x\n",
 			dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_LTAR,
 			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
 
 		writel_relaxed(0, dev->dm_core + PCIE20_PLR_IATU_UTAR);
 		PCIE_DBG_FS(dev,
-			"PCIe: RC%d: PCIE20_PLR_IATU_UTAR:\t0x%llx: 0x%x\n",
+			"PCIe: RC%d: PCIE20_PLR_IATU_UTAR:\t0x%x: 0x%x\n",
 			dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_UTAR,
 			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
 
 		writel_relaxed(0x80000000,
 			dev->dm_core + PCIE20_PLR_IATU_CTRL2);
 		PCIE_DBG_FS(dev,
-			"PCIe: RC%d: PCIE20_PLR_IATU_CTRL2:\t0x%llx: 0x%x\n",
+			"PCIe: RC%d: PCIE20_PLR_IATU_CTRL2:\t0x%x: 0x%x\n",
 			dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_CTRL2,
 			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
 		break;
-	case 19:
+	case MSM_PCIE_READ_DDR:
 		PCIE_DBG_FS(dev,
 			"PCIe: RC%d: Read DDR values.\n",
 			dev->rc_idx);
@@ -1853,7 +1799,7 @@
 				loopback_ddr_vir[i + 7]);
 		}
 		break;
-	case 20:
+	case MSM_PCIE_READ_LBAR:
 		PCIE_DBG_FS(dev,
 			"PCIe: RC%d: Read LBAR values.\n",
 			dev->rc_idx);
@@ -1879,7 +1825,7 @@
 				readl_relaxed(loopback_lbar_vir + (i + 28)));
 		}
 		break;
-	case 21:
+	case MSM_PCIE_WRITE_DDR:
 		PCIE_DBG_FS(dev, "PCIe: RC%d: Write 0x%x to DDR.\n",
 			dev->rc_idx, loopback_val);
 
@@ -1898,7 +1844,7 @@
 		else
 			loopback_val++;
 		break;
-	case 22:
+	case MSM_PCIE_WRITE_LBAR:
 		PCIE_DBG_FS(dev, "PCIe: RC%d: Write 0x%x to LBAR.\n",
 			dev->rc_idx, loopback_val);
 
@@ -1933,6 +1879,53 @@
 		else
 			loopback_val++;
 		break;
+	case MSM_PCIE_DISABLE_AER:
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: clear AER enable flag\n\n",
+			dev->rc_idx);
+		dev->aer_enable = false;
+		break;
+	case MSM_PCIE_ENABLE_AER:
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: set AER enable flag\n\n",
+			dev->rc_idx);
+		dev->aer_enable = true;
+		break;
+	case MSM_PCIE_GPIO_STATUS:
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: PERST and WAKE status\n\n",
+			dev->rc_idx);
+		PCIE_DBG_FS(dev,
+			"PCIe: RC%d: PERST: gpio%u value: %d\n",
+			dev->rc_idx, dev->gpio[MSM_PCIE_GPIO_PERST].num,
+			gpio_get_value(dev->gpio[MSM_PCIE_GPIO_PERST].num));
+		PCIE_DBG_FS(dev,
+			"PCIe: RC%d: WAKE: gpio%u value: %d\n",
+			dev->rc_idx, dev->gpio[MSM_PCIE_GPIO_WAKE].num,
+			gpio_get_value(dev->gpio[MSM_PCIE_GPIO_WAKE].num));
+		break;
+	case MSM_PCIE_ASSERT_PERST:
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: assert PERST\n\n",
+			dev->rc_idx);
+		gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+					dev->gpio[MSM_PCIE_GPIO_PERST].on);
+		usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
+		break;
+	case MSM_PCIE_DEASSERT_PERST:
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: de-assert PERST\n\n",
+			dev->rc_idx);
+		gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+					1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
+		usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
+		break;
+	case MSM_PCIE_KEEP_RESOURCES_ON:
+		PCIE_DBG_FS(dev,
+			"\n\nPCIe: RC%d: set keep resources on flag\n\n",
+			dev->rc_idx);
+		msm_pcie_keep_resources_on |= BIT(dev->rc_idx);
+		break;
+	case MSM_PCIE_FORCE_GEN1:
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: set force gen1 flag\n\n",
+			dev->rc_idx);
+		msm_pcie_force_gen1 |= BIT(dev->rc_idx);
+		break;
 	default:
 		PCIE_DBG_FS(dev, "Invalid testcase: %d.\n", testcase);
 		break;
@@ -1950,8 +1943,10 @@
 		return -ENODEV;
 	}
 
-	if (option == 12 || option == 13) {
-		if (!base || base > 5) {
+	if (option == MSM_PCIE_READ_PCIE_REGISTER ||
+		option == MSM_PCIE_WRITE_PCIE_REGISTER ||
+		option == MSM_PCIE_DUMP_PCIE_REGISTER_SPACE) {
+		if (!base || base >= MSM_PCIE_MAX_RES) {
 			PCIE_DBG_FS(pdev, "Invalid base_sel: 0x%x\n", base);
 			PCIE_DBG_FS(pdev,
 				"PCIe: base_sel is still 0x%x\n", base_sel);
@@ -1961,7 +1956,8 @@
 		base_sel = base;
 		PCIE_DBG_FS(pdev, "PCIe: base_sel is now 0x%x\n", base_sel);
 
-		if (option == 12) {
+		if (option == MSM_PCIE_READ_PCIE_REGISTER ||
+			option == MSM_PCIE_WRITE_PCIE_REGISTER) {
 			wr_offset = offset;
 			wr_mask = mask;
 			wr_value = value;
@@ -1976,7 +1972,7 @@
 	}
 
 	pdev = PCIE_BUS_PRIV_DATA(dev->bus);
-	rc_sel = 1 << pdev->rc_idx;
+	rc_sel = BIT(pdev->rc_idx);
 
 	msm_pcie_sel_debug_testcase(pdev, option);
 
@@ -2041,59 +2037,87 @@
 
 static u32 rc_sel_max;
 
-static ssize_t msm_pcie_cmd_debug(struct file *file,
+static int msm_pcie_debugfs_parse_input(const char __user *buf,
+					size_t count, unsigned int *data)
+{
+	unsigned long ret;
+	char *str, *str_temp;
+
+	str = kmalloc(count + 1, GFP_KERNEL);
+	if (!str)
+		return -ENOMEM;
+
+	ret = copy_from_user(str, buf, count);
+	if (ret) {
+		kfree(str);
+		return -EFAULT;
+	}
+
+	str[count] = 0;
+	str_temp = str;
+
+	ret = get_option(&str_temp, data);
+	kfree(str);
+	if (ret != 1)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int msm_pcie_debugfs_case_show(struct seq_file *m, void *v)
+{
+	int i;
+
+	for (i = 0; i < MSM_PCIE_MAX_DEBUGFS_OPTION; i++)
+		seq_printf(m, "\t%d:\t %s\n", i,
+			msm_pcie_debugfs_option_desc[i]);
+
+	return 0;
+}
+
+static int msm_pcie_debugfs_case_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, msm_pcie_debugfs_case_show, NULL);
+}
+
+static ssize_t msm_pcie_debugfs_case_select(struct file *file,
 				const char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	unsigned long ret;
-	char str[MAX_MSG_LEN];
+	int i, ret;
 	unsigned int testcase = 0;
-	int i;
-	u32 size = sizeof(str) < count ? sizeof(str) : count;
 
-	memset(str, 0, size);
-	ret = copy_from_user(str, buf, size);
+	ret = msm_pcie_debugfs_parse_input(buf, count, &testcase);
 	if (ret)
-		return -EFAULT;
-
-	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
-		testcase = (testcase * 10) + (str[i] - '0');
-
-	if (!rc_sel)
-		rc_sel = 1;
+		return ret;
 
 	pr_alert("PCIe: TEST: %d\n", testcase);
 
 	for (i = 0; i < MAX_RC_NUM; i++) {
-		if (!((rc_sel >> i) & 0x1))
-			continue;
-		msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
+		if (rc_sel & BIT(i))
+			msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
 	}
 
 	return count;
 }
 
-static const struct file_operations msm_pcie_cmd_debug_ops = {
-	.write = msm_pcie_cmd_debug,
+static const struct file_operations msm_pcie_debugfs_case_ops = {
+	.open = msm_pcie_debugfs_case_open,
+	.release = single_release,
+	.read = seq_read,
+	.write = msm_pcie_debugfs_case_select,
 };
 
-static ssize_t msm_pcie_set_rc_sel(struct file *file,
+static ssize_t msm_pcie_debugfs_rc_select(struct file *file,
 				const char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	unsigned long ret;
-	char str[MAX_MSG_LEN];
-	int i;
+	int i, ret;
 	u32 new_rc_sel = 0;
-	u32 size = sizeof(str) < count ? sizeof(str) : count;
 
-	memset(str, 0, size);
-	ret = copy_from_user(str, buf, size);
+	ret = msm_pcie_debugfs_parse_input(buf, count, &new_rc_sel);
 	if (ret)
-		return -EFAULT;
-
-	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
-		new_rc_sel = (new_rc_sel * 10) + (str[i] - '0');
+		return ret;
 
 	if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
 		pr_alert("PCIe: invalid value for rc_sel: 0x%x\n", new_rc_sel);
@@ -2104,232 +2128,151 @@
 	}
 
 	pr_alert("PCIe: the following RC(s) will be tested:\n");
-	for (i = 0; i < MAX_RC_NUM; i++) {
-		if (!rc_sel) {
+	for (i = 0; i < MAX_RC_NUM; i++)
+		if (rc_sel & BIT(i))
 			pr_alert("RC %d\n", i);
-			break;
-		} else if (rc_sel & (1 << i)) {
-			pr_alert("RC %d\n", i);
-		}
-	}
 
 	return count;
 }
 
-static const struct file_operations msm_pcie_rc_sel_ops = {
-	.write = msm_pcie_set_rc_sel,
+static const struct file_operations msm_pcie_debugfs_rc_select_ops = {
+	.write = msm_pcie_debugfs_rc_select,
 };
 
-static ssize_t msm_pcie_set_base_sel(struct file *file,
+static ssize_t msm_pcie_debugfs_base_select(struct file *file,
 				const char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	unsigned long ret;
-	char str[MAX_MSG_LEN];
-	int i;
+	int ret;
 	u32 new_base_sel = 0;
-	char *base_sel_name;
-	u32 size = sizeof(str) < count ? sizeof(str) : count;
 
-	memset(str, 0, size);
-	ret = copy_from_user(str, buf, size);
+	ret = msm_pcie_debugfs_parse_input(buf, count, &new_base_sel);
 	if (ret)
-		return -EFAULT;
+		return ret;
 
-	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
-		new_base_sel = (new_base_sel * 10) + (str[i] - '0');
-
-	if (!new_base_sel || new_base_sel > 5) {
+	if (!new_base_sel || new_base_sel > MSM_PCIE_MAX_RES) {
 		pr_alert("PCIe: invalid value for base_sel: 0x%x\n",
 			new_base_sel);
 		pr_alert("PCIe: base_sel is still 0x%x\n", base_sel);
 	} else {
 		base_sel = new_base_sel;
 		pr_alert("PCIe: base_sel is now 0x%x\n", base_sel);
+		pr_alert("%s\n", msm_pcie_res_info[base_sel - 1].name);
 	}
 
-	switch (base_sel) {
-	case 1:
-		base_sel_name = "PARF";
-		break;
-	case 2:
-		base_sel_name = "PHY";
-		break;
-	case 3:
-		base_sel_name = "RC CONFIG SPACE";
-		break;
-	case 4:
-		base_sel_name = "ELBI";
-		break;
-	case 5:
-		base_sel_name = "EP CONFIG SPACE";
-		break;
-	default:
-		base_sel_name = "INVALID";
-		break;
-	}
-
-	pr_alert("%s\n", base_sel_name);
-
 	return count;
 }
 
-static const struct file_operations msm_pcie_base_sel_ops = {
-	.write = msm_pcie_set_base_sel,
+static const struct file_operations msm_pcie_debugfs_base_select_ops = {
+	.write = msm_pcie_debugfs_base_select,
 };
 
-static ssize_t msm_pcie_set_linkdown_panic(struct file *file,
+static ssize_t msm_pcie_debugfs_linkdown_panic(struct file *file,
 				const char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	unsigned long ret;
-	char str[MAX_MSG_LEN];
+	int i, ret;
 	u32 new_linkdown_panic = 0;
-	int i;
 
-	memset(str, 0, sizeof(str));
-	ret = copy_from_user(str, buf, sizeof(str));
+	ret = msm_pcie_debugfs_parse_input(buf, count, &new_linkdown_panic);
 	if (ret)
-		return -EFAULT;
+		return ret;
 
-	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
-		new_linkdown_panic = (new_linkdown_panic * 10) + (str[i] - '0');
+	new_linkdown_panic = !!new_linkdown_panic;
 
-	if (new_linkdown_panic <= 1) {
-		for (i = 0; i < MAX_RC_NUM; i++) {
-			if (!rc_sel) {
-				msm_pcie_dev[0].linkdown_panic =
-					new_linkdown_panic;
-				PCIE_DBG_FS(&msm_pcie_dev[0],
-					"PCIe: RC0: linkdown_panic is now %d\n",
-					msm_pcie_dev[0].linkdown_panic);
-				break;
-			} else if (rc_sel & (1 << i)) {
-				msm_pcie_dev[i].linkdown_panic =
-					new_linkdown_panic;
-				PCIE_DBG_FS(&msm_pcie_dev[i],
-					"PCIe: RC%d: linkdown_panic is now %d\n",
-					i, msm_pcie_dev[i].linkdown_panic);
-			}
+	for (i = 0; i < MAX_RC_NUM; i++) {
+		if (rc_sel & BIT(i)) {
+			msm_pcie_dev[i].linkdown_panic =
+				new_linkdown_panic;
+			PCIE_DBG_FS(&msm_pcie_dev[i],
+				"PCIe: RC%d: linkdown_panic is now %d\n",
+				i, msm_pcie_dev[i].linkdown_panic);
 		}
-	} else {
-		pr_err("PCIe: Invalid input for linkdown_panic: %d. Please enter 0 or 1.\n",
-			new_linkdown_panic);
 	}
 
 	return count;
 }
 
-static const struct file_operations msm_pcie_linkdown_panic_ops = {
-	.write = msm_pcie_set_linkdown_panic,
+static const struct file_operations msm_pcie_debugfs_linkdown_panic_ops = {
+	.write = msm_pcie_debugfs_linkdown_panic,
 };
 
-static ssize_t msm_pcie_set_wr_offset(struct file *file,
+static ssize_t msm_pcie_debugfs_wr_offset(struct file *file,
 				const char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	unsigned long ret;
-	char str[MAX_MSG_LEN];
-	int i;
-	u32 size = sizeof(str) < count ? sizeof(str) : count;
-
-	memset(str, 0, size);
-	ret = copy_from_user(str, buf, size);
-	if (ret)
-		return -EFAULT;
+	int ret;
 
 	wr_offset = 0;
-	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
-		wr_offset = (wr_offset * 10) + (str[i] - '0');
+
+	ret = msm_pcie_debugfs_parse_input(buf, count, &wr_offset);
+	if (ret)
+		return ret;
 
 	pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
 
 	return count;
 }
 
-static const struct file_operations msm_pcie_wr_offset_ops = {
-	.write = msm_pcie_set_wr_offset,
+static const struct file_operations msm_pcie_debugfs_wr_offset_ops = {
+	.write = msm_pcie_debugfs_wr_offset,
 };
 
-static ssize_t msm_pcie_set_wr_mask(struct file *file,
+static ssize_t msm_pcie_debugfs_wr_mask(struct file *file,
 				const char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	unsigned long ret;
-	char str[MAX_MSG_LEN];
-	int i;
-	u32 size = sizeof(str) < count ? sizeof(str) : count;
-
-	memset(str, 0, size);
-	ret = copy_from_user(str, buf, size);
-	if (ret)
-		return -EFAULT;
+	int ret;
 
 	wr_mask = 0;
-	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
-		wr_mask = (wr_mask * 10) + (str[i] - '0');
+
+	ret = msm_pcie_debugfs_parse_input(buf, count, &wr_mask);
+	if (ret)
+		return ret;
 
 	pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
 
 	return count;
 }
 
-static const struct file_operations msm_pcie_wr_mask_ops = {
-	.write = msm_pcie_set_wr_mask,
+static const struct file_operations msm_pcie_debugfs_wr_mask_ops = {
+	.write = msm_pcie_debugfs_wr_mask,
 };
-static ssize_t msm_pcie_set_wr_value(struct file *file,
+static ssize_t msm_pcie_debugfs_wr_value(struct file *file,
 				const char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	unsigned long ret;
-	char str[MAX_MSG_LEN];
-	int i;
-	u32 size = sizeof(str) < count ? sizeof(str) : count;
-
-	memset(str, 0, size);
-	ret = copy_from_user(str, buf, size);
-	if (ret)
-		return -EFAULT;
+	int ret;
 
 	wr_value = 0;
-	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
-		wr_value = (wr_value * 10) + (str[i] - '0');
+
+	ret = msm_pcie_debugfs_parse_input(buf, count, &wr_value);
+	if (ret)
+		return ret;
 
 	pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
 
 	return count;
 }
 
-static const struct file_operations msm_pcie_wr_value_ops = {
-	.write = msm_pcie_set_wr_value,
+static const struct file_operations msm_pcie_debugfs_wr_value_ops = {
+	.write = msm_pcie_debugfs_wr_value,
 };
 
-static ssize_t msm_pcie_set_boot_option(struct file *file,
+static ssize_t msm_pcie_debugfs_boot_option(struct file *file,
 				const char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	unsigned long ret;
-	char str[MAX_MSG_LEN];
+	int i, ret;
 	u32 new_boot_option = 0;
-	int i;
 
-	memset(str, 0, sizeof(str));
-	ret = copy_from_user(str, buf, sizeof(str));
+	ret = msm_pcie_debugfs_parse_input(buf, count, &new_boot_option);
 	if (ret)
-		return -EFAULT;
+		return ret;
 
-	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
-		new_boot_option = (new_boot_option * 10) + (str[i] - '0');
-
-	if (new_boot_option <= 1) {
+	if (new_boot_option <= (BIT(0) | BIT(1))) {
 		for (i = 0; i < MAX_RC_NUM; i++) {
-			if (!rc_sel) {
-				msm_pcie_dev[0].boot_option = new_boot_option;
-				PCIE_DBG_FS(&msm_pcie_dev[0],
-					"PCIe: RC0: boot_option is now 0x%x\n",
-					msm_pcie_dev[0].boot_option);
-				break;
-			} else if (rc_sel & (1 << i)) {
+			if (rc_sel & BIT(i)) {
 				msm_pcie_dev[i].boot_option = new_boot_option;
 				PCIE_DBG_FS(&msm_pcie_dev[i],
 					"PCIe: RC%d: boot_option is now 0x%x\n",
@@ -2344,42 +2287,25 @@
 	return count;
 }
 
-static const struct file_operations msm_pcie_boot_option_ops = {
-	.write = msm_pcie_set_boot_option,
+static const struct file_operations msm_pcie_debugfs_boot_option_ops = {
+	.write = msm_pcie_debugfs_boot_option,
 };
 
-static ssize_t msm_pcie_set_aer_enable(struct file *file,
+static ssize_t msm_pcie_debugfs_aer_enable(struct file *file,
 				const char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	unsigned long ret;
-	char str[MAX_MSG_LEN];
+	int i, ret;
 	u32 new_aer_enable = 0;
-	u32 temp_rc_sel;
-	int i;
 
-	memset(str, 0, sizeof(str));
-	ret = copy_from_user(str, buf, sizeof(str));
+	ret = msm_pcie_debugfs_parse_input(buf, count, &new_aer_enable);
 	if (ret)
-		return -EFAULT;
+		return ret;
 
-	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
-		new_aer_enable = (new_aer_enable * 10) + (str[i] - '0');
-
-	if (new_aer_enable > 1) {
-		pr_err(
-			"PCIe: Invalid input for aer_enable: %d. Please enter 0 or 1.\n",
-			new_aer_enable);
-		return count;
-	}
-
-	if (rc_sel)
-		temp_rc_sel = rc_sel;
-	else
-		temp_rc_sel = 0x1;
+	new_aer_enable = !!new_aer_enable;
 
 	for (i = 0; i < MAX_RC_NUM; i++) {
-		if (temp_rc_sel & (1 << i)) {
+		if (rc_sel & BIT(i)) {
 			msm_pcie_dev[i].aer_enable = new_aer_enable;
 			PCIE_DBG_FS(&msm_pcie_dev[i],
 				"PCIe: RC%d: aer_enable is now %d\n",
@@ -2400,35 +2326,29 @@
 	return count;
 }
 
-static const struct file_operations msm_pcie_aer_enable_ops = {
-	.write = msm_pcie_set_aer_enable,
+static const struct file_operations msm_pcie_debugfs_aer_enable_ops = {
+	.write = msm_pcie_debugfs_aer_enable,
 };
 
-static ssize_t msm_pcie_set_corr_counter_limit(struct file *file,
+static ssize_t msm_pcie_debugfs_corr_counter_limit(struct file *file,
 				const char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	unsigned long ret;
-	char str[MAX_MSG_LEN];
-	int i;
-	u32 size = sizeof(str) < count ? sizeof(str) : count;
-
-	memset(str, 0, size);
-	ret = copy_from_user(str, buf, size);
-	if (ret)
-		return -EFAULT;
+	int ret;
 
 	corr_counter_limit = 0;
-	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
-		corr_counter_limit = (corr_counter_limit * 10) + (str[i] - '0');
 
-	pr_info("PCIe: corr_counter_limit is now %lu\n", corr_counter_limit);
+	ret = msm_pcie_debugfs_parse_input(buf, count, &corr_counter_limit);
+	if (ret)
+		return ret;
+
+	pr_info("PCIe: corr_counter_limit is now %u\n", corr_counter_limit);
 
 	return count;
 }
 
-static const struct file_operations msm_pcie_corr_counter_limit_ops = {
-	.write = msm_pcie_set_corr_counter_limit,
+static const struct file_operations msm_pcie_debugfs_corr_counter_limit_ops = {
+	.write = msm_pcie_debugfs_corr_counter_limit,
 };
 
 static void msm_pcie_debugfs_init(void)
@@ -2444,7 +2364,7 @@
 
 	dfile_rc_sel = debugfs_create_file("rc_sel", 0664,
 					dent_msm_pcie, NULL,
-					&msm_pcie_rc_sel_ops);
+					&msm_pcie_debugfs_rc_select_ops);
 	if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) {
 		pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n");
 		goto rc_sel_error;
@@ -2452,7 +2372,7 @@
 
 	dfile_case = debugfs_create_file("case", 0664,
 					dent_msm_pcie, NULL,
-					&msm_pcie_cmd_debug_ops);
+					&msm_pcie_debugfs_case_ops);
 	if (!dfile_case || IS_ERR(dfile_case)) {
 		pr_err("PCIe: fail to create the file for debug_fs case.\n");
 		goto case_error;
@@ -2460,7 +2380,7 @@
 
 	dfile_base_sel = debugfs_create_file("base_sel", 0664,
 					dent_msm_pcie, NULL,
-					&msm_pcie_base_sel_ops);
+					&msm_pcie_debugfs_base_select_ops);
 	if (!dfile_base_sel || IS_ERR(dfile_base_sel)) {
 		pr_err("PCIe: fail to create the file for debug_fs base_sel.\n");
 		goto base_sel_error;
@@ -2468,7 +2388,7 @@
 
 	dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644,
 					dent_msm_pcie, NULL,
-					&msm_pcie_linkdown_panic_ops);
+					&msm_pcie_debugfs_linkdown_panic_ops);
 	if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) {
 		pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n");
 		goto linkdown_panic_error;
@@ -2476,7 +2396,7 @@
 
 	dfile_wr_offset = debugfs_create_file("wr_offset", 0664,
 					dent_msm_pcie, NULL,
-					&msm_pcie_wr_offset_ops);
+					&msm_pcie_debugfs_wr_offset_ops);
 	if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) {
 		pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n");
 		goto wr_offset_error;
@@ -2484,7 +2404,7 @@
 
 	dfile_wr_mask = debugfs_create_file("wr_mask", 0664,
 					dent_msm_pcie, NULL,
-					&msm_pcie_wr_mask_ops);
+					&msm_pcie_debugfs_wr_mask_ops);
 	if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) {
 		pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n");
 		goto wr_mask_error;
@@ -2492,7 +2412,7 @@
 
 	dfile_wr_value = debugfs_create_file("wr_value", 0664,
 					dent_msm_pcie, NULL,
-					&msm_pcie_wr_value_ops);
+					&msm_pcie_debugfs_wr_value_ops);
 	if (!dfile_wr_value || IS_ERR(dfile_wr_value)) {
 		pr_err("PCIe: fail to create the file for debug_fs wr_value.\n");
 		goto wr_value_error;
@@ -2500,7 +2420,7 @@
 
 	dfile_boot_option = debugfs_create_file("boot_option", 0664,
 					dent_msm_pcie, NULL,
-					&msm_pcie_boot_option_ops);
+					&msm_pcie_debugfs_boot_option_ops);
 	if (!dfile_boot_option || IS_ERR(dfile_boot_option)) {
 		pr_err("PCIe: fail to create the file for debug_fs boot_option.\n");
 		goto boot_option_error;
@@ -2508,15 +2428,15 @@
 
 	dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
 					dent_msm_pcie, NULL,
-					&msm_pcie_aer_enable_ops);
+					&msm_pcie_debugfs_aer_enable_ops);
 	if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) {
 		pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n");
 		goto aer_enable_error;
 	}
 
 	dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit",
-					0664, dent_msm_pcie, NULL,
-					&msm_pcie_corr_counter_limit_ops);
+				0664, dent_msm_pcie, NULL,
+				&msm_pcie_debugfs_corr_counter_limit_ops);
 	if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) {
 		pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n");
 		goto corr_counter_limit_error;
@@ -2588,9 +2508,38 @@
 				unsigned long host_addr, u32 host_end,
 				unsigned long target_addr)
 {
-	void __iomem *pcie20 = dev->dm_core;
+	void __iomem *iatu_base = dev->iatu ? dev->iatu : dev->dm_core;
 
-	if (dev->shadow_en) {
+	u32 iatu_viewport_offset;
+	u32 iatu_ctrl1_offset;
+	u32 iatu_ctrl2_offset;
+	u32 iatu_lbar_offset;
+	u32 iatu_ubar_offset;
+	u32 iatu_lar_offset;
+	u32 iatu_ltar_offset;
+	u32 iatu_utar_offset;
+
+	if (dev->iatu) {
+		iatu_viewport_offset = 0;
+		iatu_ctrl1_offset = PCIE_IATU_CTRL1(nr);
+		iatu_ctrl2_offset = PCIE_IATU_CTRL2(nr);
+		iatu_lbar_offset = PCIE_IATU_LBAR(nr);
+		iatu_ubar_offset = PCIE_IATU_UBAR(nr);
+		iatu_lar_offset = PCIE_IATU_LAR(nr);
+		iatu_ltar_offset = PCIE_IATU_LTAR(nr);
+		iatu_utar_offset = PCIE_IATU_UTAR(nr);
+	} else {
+		iatu_viewport_offset = PCIE20_PLR_IATU_VIEWPORT;
+		iatu_ctrl1_offset = PCIE20_PLR_IATU_CTRL1;
+		iatu_ctrl2_offset = PCIE20_PLR_IATU_CTRL2;
+		iatu_lbar_offset = PCIE20_PLR_IATU_LBAR;
+		iatu_ubar_offset = PCIE20_PLR_IATU_UBAR;
+		iatu_lar_offset = PCIE20_PLR_IATU_LAR;
+		iatu_ltar_offset = PCIE20_PLR_IATU_LTAR;
+		iatu_utar_offset = PCIE20_PLR_IATU_UTAR;
+	}
+
+	if (dev->shadow_en && iatu_viewport_offset) {
 		dev->rc_shadow[PCIE20_PLR_IATU_VIEWPORT / 4] =
 			nr;
 		dev->rc_shadow[PCIE20_PLR_IATU_CTRL1 / 4] =
@@ -2610,28 +2559,30 @@
 	}
 
 	/* select region */
-	writel_relaxed(nr, pcie20 + PCIE20_PLR_IATU_VIEWPORT);
-	/* ensure that hardware locks it */
-	wmb();
+	if (iatu_viewport_offset) {
+		writel_relaxed(nr, iatu_base + iatu_viewport_offset);
+		/* ensure that hardware locks it */
+		wmb();
+	}
 
 	/* switch off region before changing it */
-	writel_relaxed(0, pcie20 + PCIE20_PLR_IATU_CTRL2);
+	writel_relaxed(0, iatu_base + iatu_ctrl2_offset);
 	/* and wait till it propagates to the hardware */
 	wmb();
 
-	writel_relaxed(type, pcie20 + PCIE20_PLR_IATU_CTRL1);
+	writel_relaxed(type, iatu_base + iatu_ctrl1_offset);
 	writel_relaxed(lower_32_bits(host_addr),
-		       pcie20 + PCIE20_PLR_IATU_LBAR);
+		       iatu_base + iatu_lbar_offset);
 	writel_relaxed(upper_32_bits(host_addr),
-		       pcie20 + PCIE20_PLR_IATU_UBAR);
-	writel_relaxed(host_end, pcie20 + PCIE20_PLR_IATU_LAR);
+		       iatu_base + iatu_ubar_offset);
+	writel_relaxed(host_end, iatu_base + iatu_lar_offset);
 	writel_relaxed(lower_32_bits(target_addr),
-		       pcie20 + PCIE20_PLR_IATU_LTAR);
+		       iatu_base + iatu_ltar_offset);
 	writel_relaxed(upper_32_bits(target_addr),
-		       pcie20 + PCIE20_PLR_IATU_UTAR);
+		       iatu_base + iatu_utar_offset);
 	/* ensure that changes propagated to the hardware */
 	wmb();
-	writel_relaxed(BIT(31), pcie20 + PCIE20_PLR_IATU_CTRL2);
+	writel_relaxed(BIT(31), iatu_base + iatu_ctrl2_offset);
 
 	/* ensure that changes propagated to the hardware */
 	wmb();
@@ -2641,22 +2592,24 @@
 			dev->pcidev_table[nr].bdf >> 24,
 			dev->pcidev_table[nr].bdf >> 19 & 0x1f,
 			dev->pcidev_table[nr].bdf >> 16 & 0x07);
-		PCIE_DBG2(dev, "PCIE20_PLR_IATU_VIEWPORT:0x%x\n",
-			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
-		PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL1:0x%x\n",
-			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
-		PCIE_DBG2(dev, "PCIE20_PLR_IATU_LBAR:0x%x\n",
-			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR));
-		PCIE_DBG2(dev, "PCIE20_PLR_IATU_UBAR:0x%x\n",
-			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR));
-		PCIE_DBG2(dev, "PCIE20_PLR_IATU_LAR:0x%x\n",
-			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR));
-		PCIE_DBG2(dev, "PCIE20_PLR_IATU_LTAR:0x%x\n",
-			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
-		PCIE_DBG2(dev, "PCIE20_PLR_IATU_UTAR:0x%x\n",
-			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
-		PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL2:0x%x\n\n",
-			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
+		if (iatu_viewport_offset)
+			PCIE_DBG2(dev, "IATU_VIEWPORT:0x%x\n",
+				readl_relaxed(dev->dm_core +
+					PCIE20_PLR_IATU_VIEWPORT));
+		PCIE_DBG2(dev, "IATU_CTRL1:0x%x\n",
+			readl_relaxed(iatu_base + iatu_ctrl1_offset));
+		PCIE_DBG2(dev, "IATU_LBAR:0x%x\n",
+			readl_relaxed(iatu_base + iatu_lbar_offset));
+		PCIE_DBG2(dev, "IATU_UBAR:0x%x\n",
+			readl_relaxed(iatu_base + iatu_ubar_offset));
+		PCIE_DBG2(dev, "IATU_LAR:0x%x\n",
+			readl_relaxed(iatu_base + iatu_lar_offset));
+		PCIE_DBG2(dev, "IATU_LTAR:0x%x\n",
+			readl_relaxed(iatu_base + iatu_ltar_offset));
+		PCIE_DBG2(dev, "IATU_UTAR:0x%x\n",
+			readl_relaxed(iatu_base + iatu_utar_offset));
+		PCIE_DBG2(dev, "IATU_CTRL2:0x%x\n\n",
+			readl_relaxed(iatu_base + iatu_ctrl2_offset));
 	}
 }
 
@@ -3387,8 +3340,8 @@
 
 	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
 
-	cnt = of_property_count_strings((&pdev->dev)->of_node,
-			"clock-names");
+	cnt = of_property_count_elems_of_size((&pdev->dev)->of_node,
+			"max-clock-frequency-hz", sizeof(u32));
 	if (cnt > 0) {
 		clkfreq = kzalloc((MSM_PCIE_MAX_CLK + MSM_PCIE_MAX_PIPE_CLK) *
 					sizeof(*clkfreq), GFP_KERNEL);
@@ -3706,6 +3659,7 @@
 	dev->parf = dev->res[MSM_PCIE_RES_PARF].base;
 	dev->phy = dev->res[MSM_PCIE_RES_PHY].base;
 	dev->elbi = dev->res[MSM_PCIE_RES_ELBI].base;
+	dev->iatu = dev->res[MSM_PCIE_RES_IATU].base;
 	dev->dm_core = dev->res[MSM_PCIE_RES_DM_CORE].base;
 	dev->conf = dev->res[MSM_PCIE_RES_CONF].base;
 	dev->bars = dev->res[MSM_PCIE_RES_BARS].base;
@@ -3726,6 +3680,7 @@
 {
 	dev->parf = NULL;
 	dev->elbi = NULL;
+	dev->iatu = NULL;
 	dev->dm_core = NULL;
 	dev->conf = NULL;
 	dev->bars = NULL;
@@ -3771,6 +3726,7 @@
 	long int retries = 0;
 	int link_check_count = 0;
 	unsigned long ep_up_timeout = 0;
+	u32 link_check_max_count;
 
 	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
 
@@ -3937,6 +3893,11 @@
 	if (dev->max_link_speed == GEN3_SPEED)
 		msm_pcie_setup_gen3(dev);
 
+	if (msm_pcie_force_gen1 & BIT(dev->rc_idx))
+		msm_pcie_write_reg_field(dev->dm_core,
+			PCIE20_CAP + PCI_EXP_LNKCTL2,
+			PCI_EXP_LNKCAP_SLS, GEN1_SPEED);
+
 	/* set max tlp read size */
 	msm_pcie_write_reg_field(dev->dm_core, PCIE20_DEVICE_CONTROL_STATUS,
 				0x7000, dev->tlp_rd_size);
@@ -3946,6 +3907,11 @@
 
 	PCIE_DBG(dev, "%s", "check if link is up\n");
 
+	if (msm_pcie_link_check_max_count & BIT(dev->rc_idx))
+		link_check_max_count = msm_pcie_link_check_max_count >> 4;
+	else
+		link_check_max_count = LINK_UP_CHECK_MAX_COUNT;
+
 	/* Wait for up to 100ms for the link to come up */
 	do {
 		usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
@@ -3954,7 +3920,7 @@
 			dev->rc_idx, (val >> 12) & 0x3f);
 	} while ((!(val & XMLH_LINK_UP) ||
 		!msm_pcie_confirm_linkup(dev, false, false, NULL))
-		&& (link_check_count++ < LINK_UP_CHECK_MAX_COUNT));
+		&& (link_check_count++ < link_check_max_count));
 
 	if ((val & XMLH_LINK_UP) &&
 		msm_pcie_confirm_linkup(dev, false, false, NULL)) {
@@ -4330,9 +4296,7 @@
 			}
 
 			bus = pci_create_root_bus(&dev->pdev->dev, 0,
-						&msm_pcie_ops,
-						msm_pcie_setup_sys_data(dev),
-						&res);
+						&msm_pcie_ops, dev, &res);
 			if (!bus) {
 				PCIE_ERR(dev,
 					"PCIe: failed to create root bus for RC%d\n",
@@ -4344,7 +4308,7 @@
 			scan_ret = pci_scan_child_bus(bus);
 			PCIE_DBG(dev,
 				"PCIe: RC%d: The max subordinate bus number discovered is %d\n",
-				dev->rc_idx, ret);
+				dev->rc_idx, scan_ret);
 
 			msm_pcie_fixup_irqs(dev);
 			pci_assign_unassigned_bus_resources(bus);
@@ -4529,7 +4493,6 @@
 	u32 ep_dev_ctrlstts_offset = 0;
 	int i, j, ep_src_bdf = 0;
 	void __iomem *ep_base = NULL;
-	unsigned long irqsave_flags;
 
 	PCIE_DBG2(dev,
 		"AER Interrupt handler fired for RC%d irq %d\nrc_corr_counter: %lu\nrc_non_fatal_counter: %lu\nrc_fatal_counter: %lu\nep_corr_counter: %lu\nep_non_fatal_counter: %lu\nep_fatal_counter: %lu\n",
@@ -4538,16 +4501,6 @@
 		dev->ep_corr_counter, dev->ep_non_fatal_counter,
 		dev->ep_fatal_counter);
 
-	spin_lock_irqsave(&dev->aer_lock, irqsave_flags);
-
-	if (dev->suspending) {
-		PCIE_DBG2(dev,
-			"PCIe: RC%d is currently suspending.\n",
-			dev->rc_idx);
-		spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
-		return IRQ_HANDLED;
-	}
-
 	uncorr_val = readl_relaxed(dev->dm_core +
 				PCIE20_AER_UNCORR_ERR_STATUS_REG);
 	corr_val = readl_relaxed(dev->dm_core +
@@ -4661,7 +4614,6 @@
 			PCIE20_AER_ROOT_ERR_STATUS_REG,
 			0x7f, 0x7f);
 
-	spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
 	return IRQ_HANDLED;
 }
 
@@ -4709,11 +4661,8 @@
 static irqreturn_t handle_linkdown_irq(int irq, void *data)
 {
 	struct msm_pcie_dev_t *dev = data;
-	unsigned long irqsave_flags;
 	int i;
 
-	spin_lock_irqsave(&dev->linkdown_lock, irqsave_flags);
-
 	dev->linkdown_counter++;
 
 	PCIE_DBG(dev,
@@ -4754,8 +4703,6 @@
 		}
 	}
 
-	spin_unlock_irqrestore(&dev->linkdown_lock, irqsave_flags);
-
 	return IRQ_HANDLED;
 }
 
@@ -4799,7 +4746,15 @@
 	unsigned long irqsave_flags;
 	u32 status = 0;
 
-	spin_lock_irqsave(&dev->global_irq_lock, irqsave_flags);
+	spin_lock_irqsave(&dev->irq_lock, irqsave_flags);
+
+	if (dev->suspending) {
+		PCIE_DBG2(dev,
+			"PCIe: RC%d is currently suspending.\n",
+			dev->rc_idx);
+		spin_unlock_irqrestore(&dev->irq_lock, irqsave_flags);
+		return IRQ_HANDLED;
+	}
 
 	status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS) &
 			readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK);
@@ -4838,13 +4793,14 @@
 		}
 	}
 
-	spin_unlock_irqrestore(&dev->global_irq_lock, irqsave_flags);
+	spin_unlock_irqrestore(&dev->irq_lock, irqsave_flags);
 
 	return IRQ_HANDLED;
 }
 
 static void msm_pcie_unmap_qgic_addr(struct msm_pcie_dev_t *dev,
-					struct pci_dev *pdev)
+					struct pci_dev *pdev,
+					struct msi_desc *entry)
 {
 	struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
 	int bypass_en = 0;
@@ -4858,30 +4814,20 @@
 
 	iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
 	if (!bypass_en) {
-		int ret;
-		phys_addr_t pcie_base_addr =
-			dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
-		dma_addr_t iova = rounddown(pcie_base_addr, PAGE_SIZE);
+		dma_addr_t iova = entry->msg.address_lo;
 
-		ret = iommu_unmap(domain, iova, PAGE_SIZE);
-		if (ret != PAGE_SIZE)
-			PCIE_ERR(dev,
-				"PCIe: RC%d: failed to unmap QGIC address. ret = %d\n",
-				dev->rc_idx, ret);
+		PCIE_DBG(dev, "PCIe: RC%d: unmap QGIC MSI IOVA\n", dev->rc_idx);
+
+		dma_unmap_resource(&pdev->dev, iova, PAGE_SIZE,
+				DMA_BIDIRECTIONAL, 0);
 	}
 }
 
-static void msm_pcie_destroy_irq(unsigned int irq, struct pci_dev *pdev)
+static void msm_pcie_destroy_irq(struct msi_desc *entry, unsigned int irq)
 {
 	int pos;
-	struct msi_desc *entry = irq_get_msi_desc(irq);
-	struct msi_desc *firstentry;
 	struct msm_pcie_dev_t *dev;
-	u32 nvec;
-	int firstirq;
-
-	if (!pdev)
-		pdev = irq_get_chip_data(irq);
+	struct pci_dev *pdev = msi_desc_to_pci_dev(entry);
 
 	if (!pdev) {
 		pr_err("PCIe: pci device is null. IRQ:%d\n", irq);
@@ -4894,24 +4840,10 @@
 		return;
 	}
 
-	if (!entry) {
-		PCIE_ERR(dev, "PCIe: RC%d: msi desc is null. IRQ:%d\n",
-			dev->rc_idx, irq);
-		return;
-	}
-
-	firstentry = first_pci_msi_entry(pdev);
-	if (!firstentry) {
-		PCIE_ERR(dev,
-			"PCIe: RC%d: firstentry msi desc is null. IRQ:%d\n",
-			dev->rc_idx, irq);
-		return;
-	}
-
-	firstirq = firstentry->irq;
-	nvec = (1 << entry->msi_attrib.multiple);
-
 	if (dev->msi_gicm_addr) {
+		int firstirq = entry->irq;
+		u32 nvec = (1 << entry->msi_attrib.multiple);
+
 		PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq);
 
 		if (irq < firstirq || irq > firstirq + nvec - 1) {
@@ -4921,7 +4853,7 @@
 			return;
 		}
 		if (irq == firstirq + nvec - 1)
-			msm_pcie_unmap_qgic_addr(dev, pdev);
+			msm_pcie_unmap_qgic_addr(dev, pdev, entry);
 		pos = irq - firstirq;
 	} else {
 		PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
@@ -4940,8 +4872,12 @@
 /* hookup to linux pci msi framework */
 void arch_teardown_msi_irq(unsigned int irq)
 {
+	struct msi_desc *entry = irq_get_msi_desc(irq);
+
 	PCIE_GEN_DBG("irq %d deallocated\n", irq);
-	msm_pcie_destroy_irq(irq, NULL);
+
+	if (entry)
+		msm_pcie_destroy_irq(entry, irq);
 }
 
 void arch_teardown_msi_irqs(struct pci_dev *dev)
@@ -4961,7 +4897,7 @@
 			continue;
 		nvec = 1 << entry->msi_attrib.multiple;
 		for (i = 0; i < nvec; i++)
-			msm_pcie_destroy_irq(entry->irq + i, dev);
+			msm_pcie_destroy_irq(entry, entry->irq + i);
 	}
 }
 
@@ -5075,9 +5011,8 @@
 					struct msi_msg *msg)
 {
 	struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
-	struct iommu_domain_geometry geometry;
-	int fastmap_en = 0, bypass_en = 0;
-	dma_addr_t iova, addr;
+	int bypass_en = 0;
+	dma_addr_t iova;
 
 	msg->address_hi = 0;
 	msg->address_lo = dev->msi_gicm_addr;
@@ -5099,35 +5034,15 @@
 	if (bypass_en)
 		return 0;
 
-	iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &fastmap_en);
-	if (fastmap_en) {
-		iommu_domain_get_attr(domain, DOMAIN_ATTR_GEOMETRY, &geometry);
-		iova = geometry.aperture_start;
-		PCIE_DBG(dev,
-			"PCIe: RC%d: Use client's IOVA 0x%llx to map QGIC MSI address\n",
-			dev->rc_idx, iova);
-	} else {
-		phys_addr_t pcie_base_addr;
-
-		/*
-		 * Use PCIe DBI address as the IOVA since client cannot
-		 * use this address for their IOMMU mapping. This will
-		 * prevent any conflicts between PCIe host and
-		 * client's mapping.
-		 */
-		pcie_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
-		iova = rounddown(pcie_base_addr, PAGE_SIZE);
-	}
-
-	addr = dma_map_resource(&pdev->dev, dev->msi_gicm_addr, PAGE_SIZE,
+	iova = dma_map_resource(&pdev->dev, dev->msi_gicm_addr, PAGE_SIZE,
 				DMA_BIDIRECTIONAL, 0);
-	if (dma_mapping_error(&pdev->dev, addr)) {
+	if (dma_mapping_error(&pdev->dev, iova)) {
 		PCIE_ERR(dev, "PCIe: RC%d: failed to map QGIC address",
 			dev->rc_idx);
 		return -EIO;
 	}
 
-	msg->address_lo = iova + addr;
+	msg->address_lo = iova;
 
 	return 0;
 }
@@ -5415,6 +5330,22 @@
 	PCIE_DBG2(dev, "PCIe: RC%d: LINKCTRLSTATUS:0x%x\n", dev->rc_idx, val);
 }
 
+static int msm_pcie_config_l0s_disable(struct pci_dev *pdev, void *dev)
+{
+	struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+	msm_pcie_config_l0s(pcie_dev, pdev, false);
+	return 0;
+}
+
+static int msm_pcie_config_l0s_enable(struct pci_dev *pdev, void *dev)
+{
+	struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+	msm_pcie_config_l0s(pcie_dev, pdev, true);
+	return 0;
+}
+
 static void msm_pcie_config_l1(struct msm_pcie_dev_t *dev,
 				struct pci_dev *pdev, bool enable)
 {
@@ -5441,10 +5372,27 @@
 	PCIE_DBG2(dev, "PCIe: RC%d: LINKCTRLSTATUS:0x%x\n", dev->rc_idx, val);
 }
 
+static int msm_pcie_config_l1_disable(struct pci_dev *pdev, void *dev)
+{
+	struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+	msm_pcie_config_l1(pcie_dev, pdev, false);
+	return 0;
+}
+
+static int msm_pcie_config_l1_enable(struct pci_dev *pdev, void *dev)
+{
+	struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+	msm_pcie_config_l1(pcie_dev, pdev, true);
+	return 0;
+}
+
 static void msm_pcie_config_l1ss(struct msm_pcie_dev_t *dev,
 				struct pci_dev *pdev, bool enable)
 {
-	bool l1_1_cap_support, l1_2_cap_support;
+	bool l1_1_pcipm_support, l1_2_pcipm_support;
+	bool l1_1_aspm_support, l1_2_aspm_support;
 	u32 val, val2;
 	u32 l1ss_cap_id_offset, l1ss_cap_offset, l1ss_ctl1_offset;
 	u32 devctl2_offset = pdev->pcie_cap + PCI_EXP_DEVCTL2;
@@ -5461,11 +5409,14 @@
 	l1ss_ctl1_offset = l1ss_cap_id_offset + PCI_L1SS_CTL1;
 
 	pci_read_config_dword(pdev, l1ss_cap_offset, &val);
-	l1_1_cap_support = !!(val & (PCI_L1SS_CAP_ASPM_L1_1));
-	l1_2_cap_support = !!(val & (PCI_L1SS_CAP_ASPM_L1_2));
-	if (!l1_1_cap_support && !l1_2_cap_support) {
+	l1_1_pcipm_support = !!(val & (PCI_L1SS_CAP_PCIPM_L1_1));
+	l1_2_pcipm_support = !!(val & (PCI_L1SS_CAP_PCIPM_L1_2));
+	l1_1_aspm_support = !!(val & (PCI_L1SS_CAP_ASPM_L1_1));
+	l1_2_aspm_support = !!(val & (PCI_L1SS_CAP_ASPM_L1_2));
+	if (!l1_1_pcipm_support && !l1_2_pcipm_support &&
+		!l1_1_aspm_support && !l1_2_aspm_support) {
 		PCIE_DBG(dev,
-			"PCIe: RC%d: PCI device does not support L1.1 and L1.2\n",
+			"PCIe: RC%d: PCI device does not support any L1ss\n",
 			dev->rc_idx);
 		return;
 	}
@@ -5484,14 +5435,18 @@
 		msm_pcie_config_clear_set_dword(pdev, devctl2_offset, 0,
 			PCI_EXP_DEVCTL2_LTR_EN);
 		msm_pcie_config_clear_set_dword(pdev, l1ss_ctl1_offset, 0,
-			(l1_1_cap_support ? PCI_L1SS_CTL1_ASPM_L1_1 : 0) |
-			(l1_2_cap_support ? PCI_L1SS_CTL1_ASPM_L1_2 : 0));
+			(l1_1_pcipm_support ? PCI_L1SS_CTL1_PCIPM_L1_1 : 0) |
+			(l1_2_pcipm_support ? PCI_L1SS_CTL1_PCIPM_L1_2 : 0) |
+			(l1_1_aspm_support ? PCI_L1SS_CTL1_ASPM_L1_1 : 0) |
+			(l1_2_aspm_support ? PCI_L1SS_CTL1_ASPM_L1_2 : 0));
 	} else {
 		msm_pcie_config_clear_set_dword(pdev, devctl2_offset,
 			PCI_EXP_DEVCTL2_LTR_EN, 0);
 		msm_pcie_config_clear_set_dword(pdev, l1ss_ctl1_offset,
-			(l1_1_cap_support ? PCI_L1SS_CTL1_ASPM_L1_1 : 0) |
-			(l1_2_cap_support ? PCI_L1SS_CTL1_ASPM_L1_2 : 0), 0);
+			(l1_1_pcipm_support ? PCI_L1SS_CTL1_PCIPM_L1_1 : 0) |
+			(l1_2_pcipm_support ? PCI_L1SS_CTL1_PCIPM_L1_2 : 0) |
+			(l1_1_aspm_support ? PCI_L1SS_CTL1_ASPM_L1_1 : 0) |
+			(l1_2_aspm_support ? PCI_L1SS_CTL1_ASPM_L1_2 : 0), 0);
 	}
 
 	pci_read_config_dword(pdev, l1ss_ctl1_offset, &val);
@@ -5502,6 +5457,22 @@
 		dev->rc_idx, val2);
 }
 
+static int msm_pcie_config_l1ss_disable(struct pci_dev *pdev, void *dev)
+{
+	struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+	msm_pcie_config_l1ss(pcie_dev, pdev, false);
+	return 0;
+}
+
+static int msm_pcie_config_l1ss_enable(struct pci_dev *pdev, void *dev)
+{
+	struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
+
+	msm_pcie_config_l1ss(pcie_dev, pdev, true);
+	return 0;
+}
+
 static void msm_pcie_config_clock_power_management(struct msm_pcie_dev_t *dev,
 				struct pci_dev *pdev)
 {
@@ -5588,7 +5559,9 @@
 			pci_read_config_dword(child_pdev,
 				l1ss_cap_id_offset + PCI_L1SS_CTL1, &val);
 			child_l1ss_enable = !!(val &
-				(PCI_L1SS_CTL1_ASPM_L1_1 |
+				(PCI_L1SS_CTL1_PCIPM_L1_1 |
+				PCI_L1SS_CTL1_PCIPM_L1_2 |
+				PCI_L1SS_CTL1_ASPM_L1_1 |
 				PCI_L1SS_CTL1_ASPM_L1_2));
 			if (child_l1ss_enable)
 				break;
@@ -5835,6 +5808,21 @@
 		"RC%d: slv-addr-space-size: 0x%x.\n",
 		rc_idx, msm_pcie_dev[rc_idx].slv_addr_space_size);
 
+	msm_pcie_dev[rc_idx].phy_status_offset = 0;
+	ret = of_property_read_u32(pdev->dev.of_node,
+				"qcom,phy-status-offset",
+				&msm_pcie_dev[rc_idx].phy_status_offset);
+	if (ret) {
+		PCIE_ERR(&msm_pcie_dev[rc_idx],
+			"RC%d: failed to get PCIe PHY status offset.\n",
+			rc_idx);
+		goto decrease_rc_num;
+	} else {
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: phy-status-offset: 0x%x.\n",
+			rc_idx, msm_pcie_dev[rc_idx].phy_status_offset);
+	}
+
 	msm_pcie_dev[rc_idx].cpl_timeout = 0;
 	ret = of_property_read_u32((&pdev->dev)->of_node,
 				"qcom,cpl-timeout",
@@ -6183,10 +6171,8 @@
 		mutex_init(&msm_pcie_dev[i].enumerate_lock);
 		mutex_init(&msm_pcie_dev[i].setup_lock);
 		mutex_init(&msm_pcie_dev[i].recovery_lock);
-		spin_lock_init(&msm_pcie_dev[i].linkdown_lock);
 		spin_lock_init(&msm_pcie_dev[i].wakeup_lock);
-		spin_lock_init(&msm_pcie_dev[i].global_irq_lock);
-		spin_lock_init(&msm_pcie_dev[i].aer_lock);
+		spin_lock_init(&msm_pcie_dev[i].irq_lock);
 		msm_pcie_dev[i].drv_ready = false;
 	}
 	for (i = 0; i < MAX_RC_NUM * MAX_DEVICE_NUM; i++) {
@@ -6233,10 +6219,10 @@
 	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
 
 	PCIE_DBG(pcie_dev, "hdr_type %d\n", dev->hdr_type);
-	if (dev->hdr_type == 1)
+	if (pci_is_root_bus(dev->bus))
 		dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
 }
-DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
 			msm_pcie_fixup_early);
 
 /* Suspend the PCIe link */
@@ -6251,9 +6237,9 @@
 
 	PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
 
-	spin_lock_irqsave(&pcie_dev->aer_lock, irqsave_flags);
+	spin_lock_irqsave(&pcie_dev->irq_lock, irqsave_flags);
 	pcie_dev->suspending = true;
-	spin_unlock_irqrestore(&pcie_dev->aer_lock, irqsave_flags);
+	spin_unlock_irqrestore(&pcie_dev->irq_lock, irqsave_flags);
 
 	if (!pcie_dev->power_on) {
 		PCIE_DBG(pcie_dev,
@@ -6319,7 +6305,8 @@
 
 	PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
 
-	if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED)
+	if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED ||
+		!pci_is_root_bus(dev->bus))
 		return;
 
 	spin_lock_irqsave(&pcie_dev->cfg_lock,
@@ -6344,7 +6331,7 @@
 
 	mutex_unlock(&pcie_dev->recovery_lock);
 }
-DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
 			  msm_pcie_fixup_suspend);
 
 /* Resume the PCIe link */
@@ -6418,7 +6405,7 @@
 	PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
 
 	if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
-		pcie_dev->user_suspend)
+		pcie_dev->user_suspend || !pci_is_root_bus(dev->bus))
 		return;
 
 	mutex_lock(&pcie_dev->recovery_lock);
@@ -6430,7 +6417,7 @@
 
 	mutex_unlock(&pcie_dev->recovery_lock);
 }
-DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
 				 msm_pcie_fixup_resume);
 
 static void msm_pcie_fixup_resume_early(struct pci_dev *dev)
@@ -6441,7 +6428,7 @@
 	PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
 
 	if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
-		pcie_dev->user_suspend)
+		pcie_dev->user_suspend || !pci_is_root_bus(dev->bus))
 		return;
 
 	mutex_lock(&pcie_dev->recovery_lock);
@@ -6452,7 +6439,7 @@
 
 	mutex_unlock(&pcie_dev->recovery_lock);
 }
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
 				 msm_pcie_fixup_resume_early);
 
 int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 4722782..1d32fe2 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -164,7 +164,6 @@
 	pci_device_add(virtfn, virtfn->bus);
 	mutex_unlock(&iov->dev->sriov->lock);
 
-	pci_bus_add_device(virtfn);
 	sprintf(buf, "virtfn%u", id);
 	rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
 	if (rc)
@@ -175,6 +174,8 @@
 
 	kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
 
+	pci_bus_add_device(virtfn);
+
 	return 0;
 
 failed2:
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 8a68e2b..802997e 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -953,7 +953,12 @@
 	if (pci_has_legacy_pm_support(pci_dev))
 		return pci_legacy_resume_early(dev);
 
-	pci_update_current_state(pci_dev, PCI_D0);
+	/*
+	 * pci_restore_state() requires the device to be in D0 (because of MSI
+	 * restoration among other things), so force it into D0 in case the
+	 * driver's "freeze" callbacks put it into a low-power state directly.
+	 */
+	pci_set_power_state(pci_dev, PCI_D0);
 	pci_restore_state(pci_dev);
 
 	if (drv && drv->pm && drv->pm->thaw_noirq)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e7d4048..a87c8e1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4214,6 +4214,10 @@
 {
 	struct pci_dev *dev;
 
+
+	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
+		return false;
+
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
 		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index b1303b3..057465ad 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -390,7 +390,14 @@
 		 * If the error is reported by an end point, we think this
 		 * error is related to the upstream link of the end point.
 		 */
-		pci_walk_bus(dev->bus, cb, &result_data);
+		if (state == pci_channel_io_normal)
+			/*
+			 * the error is non fatal so the bus is ok, just invoke
+			 * the callback for the function that logged the error.
+			 */
+			cb(dev, &result_data);
+		else
+			pci_walk_bus(dev->bus, cb, &result_data);
 	}
 
 	return result_data.result;
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 4b70349..00f61225 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -232,6 +232,9 @@
 			break;
 
 		pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
+		if (rtsta == (u32) ~0)
+			break;
+
 		if (rtsta & PCI_EXP_RTSTA_PME) {
 			/*
 			 * Clear PME status of the port.  If there are other
@@ -279,7 +282,7 @@
 	spin_lock_irqsave(&data->lock, flags);
 	pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
 
-	if (!(rtsta & PCI_EXP_RTSTA_PME)) {
+	if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) {
 		spin_unlock_irqrestore(&data->lock, flags);
 		return IRQ_NONE;
 	}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index d266d80..a98be6d 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -932,7 +932,8 @@
 			child = pci_add_new_bus(bus, dev, max+1);
 			if (!child)
 				goto out;
-			pci_bus_insert_busn_res(child, max+1, 0xff);
+			pci_bus_insert_busn_res(child, max+1,
+						bus->busn_res.end);
 		}
 		max++;
 		buses = (buses & 0xff000000)
@@ -1438,8 +1439,16 @@
 
 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
 {
-	if (hpp)
-		dev_warn(&dev->dev, "PCI-X settings not supported\n");
+	int pos;
+
+	if (!hpp)
+		return;
+
+	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+	if (!pos)
+		return;
+
+	dev_warn(&dev->dev, "PCI-X settings not supported\n");
 }
 
 static bool pcie_root_rcb_set(struct pci_dev *dev)
@@ -1465,6 +1474,9 @@
 	if (!hpp)
 		return;
 
+	if (!pci_is_pcie(dev))
+		return;
+
 	if (hpp->revision > 1) {
 		dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
 			 hpp->revision);
@@ -2125,6 +2137,10 @@
 	if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
 		if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
 			max = bus->busn_res.start + pci_hotplug_bus_size - 1;
+
+		/* Do not allocate more buses than we have room left */
+		if (max > bus->busn_res.end)
+			max = bus->busn_res.end;
 	}
 
 	/*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 5d8151b..98eba91 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4088,12 +4088,14 @@
 static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
 {
 	/*
-	 * Cavium devices matching this quirk do not perform peer-to-peer
-	 * with other functions, allowing masking out these bits as if they
-	 * were unimplemented in the ACS capability.
+	 * Cavium root ports don't advertise an ACS capability.  However,
+	 * the RTL internally implements similar protection as if ACS had
+	 * Request Redirection, Completion Redirection, Source Validation,
+	 * and Upstream Forwarding features enabled.  Assert that the
+	 * hardware implements and enables equivalent ACS functionality for
+	 * these flags.
 	 */
-	acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
-		       PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+	acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
 
 	return acs_flags ? 0 : 1;
 }
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index f9357e0..b6b9b5b 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -19,9 +19,9 @@
 	pci_pme_active(dev, false);
 
 	if (dev->is_added) {
+		device_release_driver(&dev->dev);
 		pci_proc_detach_device(dev);
 		pci_remove_sysfs_dev_files(dev);
-		device_release_driver(&dev->dev);
 		dev->is_added = 0;
 	}
 
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 671610c..b0c0fa0 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -26,7 +26,8 @@
 
 config PINCTRL_ADI2
 	bool "ADI pin controller driver"
-	depends on BLACKFIN
+	depends on (BF54x || BF60x)
+	depends on !GPIO_ADI
 	select PINMUX
 	select IRQ_DOMAIN
 	help
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 0d34d8a4..e8c08eb 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1594,6 +1594,22 @@
 			clear_bit(i, chip->irq_valid_mask);
 	}
 
+	/*
+	 * The same set of machines in chv_no_valid_mask[] have incorrectly
+	 * configured GPIOs that generate spurious interrupts so we use
+	 * this same list to apply another quirk for them.
+	 *
+	 * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953.
+	 */
+	if (!need_valid_mask) {
+		/*
+		 * Mask all interrupts the community is able to generate
+		 * but leave the ones that can only generate GPEs unmasked.
+		 */
+		chv_writel(GENMASK(31, pctrl->community->nirqs),
+			   pctrl->regs + CHV_INTMASK);
+	}
+
 	/* Clear all interrupts */
 	chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
 
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index b7bb371..50c45bd 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1285,6 +1285,22 @@
 	writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK);
 }
 
+static int st_gpio_irq_request_resources(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+	st_gpio_direction_input(gc, d->hwirq);
+
+	return gpiochip_lock_as_irq(gc, d->hwirq);
+}
+
+static void st_gpio_irq_release_resources(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+	gpiochip_unlock_as_irq(gc, d->hwirq);
+}
+
 static int st_gpio_irq_set_type(struct irq_data *d, unsigned type)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1438,12 +1454,14 @@
 };
 
 static struct irq_chip st_gpio_irqchip = {
-	.name		= "GPIO",
-	.irq_disable	= st_gpio_irq_mask,
-	.irq_mask	= st_gpio_irq_mask,
-	.irq_unmask	= st_gpio_irq_unmask,
-	.irq_set_type	= st_gpio_irq_set_type,
-	.flags		= IRQCHIP_SKIP_SET_WAKE,
+	.name			= "GPIO",
+	.irq_request_resources	= st_gpio_irq_request_resources,
+	.irq_release_resources	= st_gpio_irq_release_resources,
+	.irq_disable		= st_gpio_irq_mask,
+	.irq_mask		= st_gpio_irq_mask,
+	.irq_unmask		= st_gpio_irq_unmask,
+	.irq_set_type		= st_gpio_irq_set_type,
+	.flags			= IRQCHIP_SKIP_SET_WAKE,
 };
 
 static int st_gpiolib_register_bank(struct st_pinctrl *info,
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index c8f8813..1441678 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013, Sony Mobile Communications AB.
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -70,6 +70,7 @@
 
 	const struct msm_pinctrl_soc_data *soc;
 	void __iomem *regs;
+	void __iomem *pdc_regs;
 };
 
 static int msm_get_groups_count(struct pinctrl_dev *pctldev)
@@ -584,6 +585,34 @@
 	spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
+static void msm_gpio_irq_enable(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+	const struct msm_pingroup *g;
+	unsigned long flags;
+	u32 val;
+
+	g = &pctrl->soc->groups[d->hwirq];
+
+	spin_lock_irqsave(&pctrl->lock, flags);
+	/* clear the interrupt status bit before unmask to avoid
+	 * any erraneous interrupts that would have got latched
+	 * when the intterupt is not in use.
+	 */
+	val = readl(pctrl->regs + g->intr_status_reg);
+	val &= ~BIT(g->intr_status_bit);
+	writel(val, pctrl->regs + g->intr_status_reg);
+
+	val = readl(pctrl->regs + g->intr_cfg_reg);
+	val |= BIT(g->intr_enable_bit);
+	writel(val, pctrl->regs + g->intr_cfg_reg);
+
+	set_bit(d->hwirq, pctrl->enabled_irqs);
+
+	spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
 static void msm_gpio_irq_unmask(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -741,6 +770,7 @@
 
 static struct irq_chip msm_gpio_irq_chip = {
 	.name           = "msmgpio",
+	.irq_enable     = msm_gpio_irq_enable,
 	.irq_mask       = msm_gpio_irq_mask,
 	.irq_unmask     = msm_gpio_irq_unmask,
 	.irq_ack        = msm_gpio_irq_ack,
@@ -748,6 +778,102 @@
 	.irq_set_wake   = msm_gpio_irq_set_wake,
 };
 
+static struct irq_chip msm_dirconn_irq_chip;
+
+static void msm_gpio_dirconn_handler(struct irq_desc *desc)
+{
+	struct irq_data *irqd = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+
+	chained_irq_enter(chip, desc);
+	generic_handle_irq(irqd->irq);
+	chained_irq_exit(chip, desc);
+}
+
+static void setup_pdc_gpio(struct irq_domain *domain,
+			unsigned int parent_irq, unsigned int gpio)
+{
+	int irq;
+
+	if (gpio != 0) {
+		irq = irq_find_mapping(domain, gpio);
+		irq_set_parent(irq, parent_irq);
+		irq_set_chip(irq, &msm_dirconn_irq_chip);
+		irq_set_handler_data(parent_irq, irq_get_irq_data(irq));
+	}
+
+	__irq_set_handler(parent_irq, msm_gpio_dirconn_handler, false, NULL);
+}
+
+static void request_dc_interrupt(struct irq_domain *domain,
+			struct irq_domain *parent, irq_hw_number_t hwirq,
+			unsigned int gpio)
+{
+	struct irq_fwspec fwspec;
+	unsigned int parent_irq;
+
+	fwspec.fwnode = parent->fwnode;
+	fwspec.param[0] = 0; /* SPI */
+	fwspec.param[1] = hwirq;
+	fwspec.param[2] = IRQ_TYPE_NONE;
+	fwspec.param_count = 3;
+
+	parent_irq = irq_create_fwspec_mapping(&fwspec);
+
+	setup_pdc_gpio(domain, parent_irq, gpio);
+}
+
+/**
+ * gpio_muxed_to_pdc: Mux the GPIO to a PDC IRQ
+ *
+ * @pdc_domain: the PDC's domain
+ * @d: the GPIO's IRQ data
+ *
+ * Find a free PDC port for the GPIO and map the GPIO's mux information to the
+ * PDC registers; so the GPIO can be used a wakeup source.
+ */
+static void gpio_muxed_to_pdc(struct irq_domain *pdc_domain, struct irq_data *d)
+{
+	int i, j;
+	unsigned int mux;
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	unsigned int gpio = d->hwirq;
+	struct msm_pinctrl *pctrl;
+	unsigned int irq;
+
+	if (!gc || !parent_data)
+		return;
+
+	pctrl = gpiochip_get_data(gc);
+
+	for (i = 0; i < pctrl->soc->n_gpio_mux_in; i++) {
+		if (gpio != pctrl->soc->gpio_mux_in[i].gpio)
+			continue;
+		mux = pctrl->soc->gpio_mux_in[i].mux;
+		for (j = 0; j < pctrl->soc->n_pdc_mux_out; j++) {
+			struct msm_pdc_mux_output *pdc_out =
+						&pctrl->soc->pdc_mux_out[j];
+
+			if (pdc_out->mux == mux)
+				break;
+			if (pdc_out->mux)
+				continue;
+			pdc_out->mux = gpio;
+			irq = irq_find_mapping(pdc_domain, pdc_out->hwirq + 32);
+			/* setup the IRQ parent for the GPIO */
+			setup_pdc_gpio(pctrl->chip.irqdomain, irq, gpio);
+			/* program pdc select grp register */
+			writel_relaxed((mux & 0x3F), pctrl->pdc_regs +
+				(0x14 * j));
+			break;
+		}
+		/* We have no more PDC port available */
+		WARN_ON(j == pctrl->soc->n_pdc_mux_out);
+	}
+}
+
 static bool is_gpio_dual_edge(struct irq_data *d, irq_hw_number_t *dir_conn_irq)
 {
 	struct irq_desc *desc = irq_data_to_desc(d);
@@ -768,6 +894,17 @@
 			return true;
 		}
 	}
+
+	for (i = 0; i < pctrl->soc->n_pdc_mux_out; i++) {
+		struct msm_pdc_mux_output *dir_conn =
+					&pctrl->soc->pdc_mux_out[i];
+
+		if (dir_conn->mux == d->hwirq && (dir_conn->hwirq + 32)
+				!= parent_data->hwirq) {
+			*dir_conn_irq = dir_conn->hwirq + 32;
+			return true;
+		}
+	}
 	return false;
 }
 
@@ -785,13 +922,48 @@
 			irq_get_irq_data(irq_find_mapping(parent_data->domain,
 						dir_conn_irq));
 
-		if (dir_conn_data && dir_conn_data->chip->irq_mask)
+		if (!dir_conn_data)
+			return;
+		if (dir_conn_data->chip->irq_mask)
 			dir_conn_data->chip->irq_mask(dir_conn_data);
 	}
+
 	if (parent_data->chip->irq_mask)
 		parent_data->chip->irq_mask(parent_data);
 }
 
+static void msm_dirconn_irq_enable(struct irq_data *d)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+	irq_hw_number_t dir_conn_irq = 0;
+
+	if (!parent_data)
+		return;
+
+	if (is_gpio_dual_edge(d, &dir_conn_irq)) {
+		struct irq_data *dir_conn_data =
+			irq_get_irq_data(irq_find_mapping(parent_data->domain,
+						dir_conn_irq));
+
+		if (dir_conn_data &&
+				dir_conn_data->chip->irq_set_irqchip_state)
+			dir_conn_data->chip->irq_set_irqchip_state(
+					dir_conn_data,
+					IRQCHIP_STATE_PENDING, 0);
+
+		if (dir_conn_data && dir_conn_data->chip->irq_unmask)
+			dir_conn_data->chip->irq_unmask(dir_conn_data);
+	}
+
+	if (parent_data->chip->irq_set_irqchip_state)
+		parent_data->chip->irq_set_irqchip_state(parent_data,
+						IRQCHIP_STATE_PENDING, 0);
+
+	if (parent_data->chip->irq_unmask)
+		parent_data->chip->irq_unmask(parent_data);
+}
+
 static void msm_dirconn_irq_unmask(struct irq_data *d)
 {
 	struct irq_desc *desc = irq_data_to_desc(d);
@@ -806,7 +978,9 @@
 			irq_get_irq_data(irq_find_mapping(parent_data->domain,
 						dir_conn_irq));
 
-		if (dir_conn_data && dir_conn_data->chip->irq_unmask)
+		if (!dir_conn_data)
+			return;
+		if (dir_conn_data->chip->irq_unmask)
 			dir_conn_data->chip->irq_unmask(dir_conn_data);
 	}
 	if (parent_data->chip->irq_unmask)
@@ -1029,6 +1203,7 @@
 static struct irq_chip msm_dirconn_irq_chip = {
 	.name			= "msmgpio-dc",
 	.irq_mask		= msm_dirconn_irq_mask,
+	.irq_enable		= msm_dirconn_irq_enable,
 	.irq_unmask		= msm_dirconn_irq_unmask,
 	.irq_eoi		= msm_dirconn_irq_eoi,
 	.irq_ack		= msm_dirconn_irq_ack,
@@ -1074,57 +1249,53 @@
 	chained_irq_exit(chip, desc);
 }
 
-static void msm_gpio_dirconn_handler(struct irq_desc *desc)
-{
-	struct irq_data *irqd = irq_desc_get_handler_data(desc);
-	struct irq_chip *chip = irq_desc_get_chip(desc);
-
-	chained_irq_enter(chip, desc);
-	generic_handle_irq(irqd->irq);
-	chained_irq_exit(chip, desc);
-}
-
 static void msm_gpio_setup_dir_connects(struct msm_pinctrl *pctrl)
 {
 	struct device_node *parent_node;
-	struct irq_domain *parent_domain;
-	struct irq_fwspec fwspec;
+	struct irq_domain *pdc_domain;
 	unsigned int i;
 
 	parent_node = of_irq_find_parent(pctrl->dev->of_node);
-
 	if (!parent_node)
 		return;
 
-	parent_domain = irq_find_host(parent_node);
-	if (!parent_domain)
+	pdc_domain = irq_find_host(parent_node);
+	if (!pdc_domain)
 		return;
 
-	fwspec.fwnode = parent_domain->fwnode;
 	for (i = 0; i < pctrl->soc->n_dir_conns; i++) {
 		const struct msm_dir_conn *dirconn = &pctrl->soc->dir_conn[i];
-		unsigned int parent_irq;
-		int irq;
 
-		fwspec.param[0] = 0; /* SPI */
-		fwspec.param[1] = dirconn->hwirq;
-		fwspec.param[2] = IRQ_TYPE_NONE;
-		fwspec.param_count = 3;
-		parent_irq = irq_create_fwspec_mapping(&fwspec);
+		request_dc_interrupt(pctrl->chip.irqdomain, pdc_domain,
+					dirconn->hwirq, dirconn->gpio);
+	}
 
-		if (dirconn->gpio != 0) {
-			irq = irq_find_mapping(pctrl->chip.irqdomain,
-					dirconn->gpio);
+	for (i = 0; i < pctrl->soc->n_pdc_mux_out; i++) {
+		struct msm_pdc_mux_output *pdc_out =
+					&pctrl->soc->pdc_mux_out[i];
 
-			irq_set_parent(irq, parent_irq);
-			irq_set_chip(irq, &msm_dirconn_irq_chip);
-			__irq_set_handler(parent_irq, msm_gpio_dirconn_handler,
-				false, NULL);
-			irq_set_handler_data(parent_irq, irq_get_irq_data(irq));
-		} else {
-			__irq_set_handler(parent_irq, msm_gpio_dirconn_handler,
-				false, NULL);
-		}
+		request_dc_interrupt(pctrl->chip.irqdomain, pdc_domain,
+					pdc_out->hwirq, 0);
+	}
+
+	/*
+	 * Statically choose the GPIOs for mapping to PDC. Dynamic mux mapping
+	 * is very difficult.
+	 */
+	for (i = 0; i < pctrl->soc->n_pdc_mux_out; i++) {
+		unsigned int irq;
+		struct irq_data *d;
+		struct msm_gpio_mux_input *gpio_in =
+					&pctrl->soc->gpio_mux_in[i];
+		if (!gpio_in->init)
+			continue;
+
+		irq = irq_find_mapping(pctrl->chip.irqdomain, gpio_in->gpio);
+		d = irq_get_irq_data(irq);
+		if (!d)
+			continue;
+
+		gpio_muxed_to_pdc(pdc_domain, d);
 	}
 }
 
@@ -1234,6 +1405,9 @@
 	if (IS_ERR(pctrl->regs))
 		return PTR_ERR(pctrl->regs);
 
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	pctrl->pdc_regs = devm_ioremap_resource(&pdev->dev, res);
+
 	msm_pinctrl_setup_pm_reset(pctrl);
 
 	pctrl->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 1c6df2f..9fc6660 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2013, Sony Mobile Communications AB.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -99,13 +100,35 @@
 	unsigned intr_detection_bit:5;
 	unsigned intr_detection_width:5;
 	unsigned dir_conn_en_bit:8;
-}
+};
+
+/**
+ * struct msm_gpio_mux_input - Map GPIO to Mux pin
+ * @mux::	The mux pin to which the GPIO is connected to
+ * @gpio:	GPIO pin number
+ * @init:	Setup PDC connection at probe
+ */
+struct msm_gpio_mux_input {
+	unsigned int mux;
+	unsigned int gpio;
+	bool init;
+};
+
+/**
+ * struct msm_pdc_mux_output - GPIO mux pin to PDC port
+ * @mux:	GPIO mux pin number
+ * @hwirq:	The PDC port (hwirq) that GPIO is connected to
+ */
+struct msm_pdc_mux_output {
+	unsigned int mux;
+	irq_hw_number_t hwirq;
+};
 
 /**
  * struct msm_dir_conn - Direct GPIO connect configuration
  * @gpio:	GPIO pin number
  * @hwirq:	The GIC interrupt that the pin is connected to
- */;
+ */
 struct msm_dir_conn {
 	unsigned int gpio;
 	irq_hw_number_t hwirq;
@@ -122,8 +145,12 @@
  * @ngpio:      The number of pingroups the driver should expose as GPIOs.
  * @dir_conn:   An array describing all the pins directly connected to GIC.
  * @ndirconns:  The number of pins directly connected to GIC
- * @dir_conn_offsets:   Direct connect register offsets for each tile.
  * @dir_conn_irq_base:  Direct connect interrupt base register for kpss.
+ * @gpio_mux_in:	Map of GPIO pin to the hwirq.
+ * @n_gpioc_mux_in:	The number of entries in @pdc_mux_in.
+ * @pdc_mux_out:	Map of GPIO mux to PDC port.
+ * @n_pdc_mux_out:	The number of entries in @pdc_mux_out.
+ * @n_pdc_offset:	The offset for the PDC mux pins
  */
 struct msm_pinctrl_soc_data {
 	const struct pinctrl_pin_desc *pins;
@@ -136,6 +163,11 @@
 	const struct msm_dir_conn *dir_conn;
 	unsigned int n_dir_conns;
 	unsigned int dir_conn_irq_base;
+	struct msm_pdc_mux_output *pdc_mux_out;
+	unsigned int n_pdc_mux_out;
+	struct msm_gpio_mux_input *gpio_mux_in;
+	unsigned int n_gpio_mux_in;
+	unsigned int n_pdc_mux_offset;
 };
 
 int msm_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c b/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
index 6ceb39a..c5f1307 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1105,6 +1105,97 @@
 	[106] = SDC_QDSD_PINGROUP(sdc2_data, 0x0, 9, 0),
 };
 
+static struct msm_gpio_mux_input sdxpoorwills_mux_in[] = {
+	{0, 1},
+	{1, 2},
+	{2, 5},
+	{3, 6},
+	{4, 9},
+	{5, 10},
+	{6, 11},
+	{7, 12},
+	{8, 13},
+	{9, 14},
+	{10, 15},
+	{11, 16},
+	{12, 17},
+	{13, 18},
+	{14, 19},
+	{15, 21},
+	{16, 22},
+	{17, 24},
+	{18, 25},
+	{19, 35},
+	{20, 42, 1},
+	{21, 43},
+	{22, 45},
+	{23, 46},
+	{24, 48},
+	{25, 50},
+	{26, 52},
+	{27, 53},
+	{28, 54},
+	{29, 55},
+	{30, 56},
+	{31, 57},
+	{32, 60},
+	{33, 61},
+	{34, 64},
+	{35, 65},
+	{36, 68},
+	{37, 71},
+	{38, 75},
+	{39, 76},
+	{40, 78},
+	{41, 79},
+	{42, 80},
+	{43, 82},
+	{44, 83},
+	{45, 84},
+	{46, 86},
+	{47, 87},
+	{48, 88},
+	{49, 89},
+	{50, 90},
+	{51, 93},
+	{52, 94},
+	{53, 95},
+	{54, 97},
+	{55, 98},
+};
+
+static struct msm_pdc_mux_output sdxpoorwills_mux_out[] = {
+	{0, 167},
+	{0, 168},
+	{0, 169},
+	{0, 170},
+	{0, 171},
+	{0, 172},
+	{0, 173},
+	{0, 174},
+	{0, 175},
+	{0, 176},
+	{0, 177},
+	{0, 178},
+	{0, 179},
+	{0, 180},
+	{0, 181},
+	{0, 182},
+	{0, 183},
+	{0, 184},
+	{0, 185},
+	{0, 186},
+	{0, 187},
+	{0, 188},
+	{0, 189},
+	{0, 190},
+	{0, 191},
+	{0, 192},
+	{0, 193},
+	{0, 194},
+	{0, 195},
+};
+
 static const struct msm_pinctrl_soc_data sdxpoorwills_pinctrl = {
 	.pins = sdxpoorwills_pins,
 	.npins = ARRAY_SIZE(sdxpoorwills_pins),
@@ -1112,6 +1203,11 @@
 	.nfunctions = ARRAY_SIZE(sdxpoorwills_functions),
 	.groups = sdxpoorwills_groups,
 	.ngroups = ARRAY_SIZE(sdxpoorwills_groups),
+	.gpio_mux_in = sdxpoorwills_mux_in,
+	.n_gpio_mux_in = ARRAY_SIZE(sdxpoorwills_mux_in),
+	.pdc_mux_out = sdxpoorwills_mux_out,
+	.n_pdc_mux_out = ARRAY_SIZE(sdxpoorwills_mux_out),
+	.n_pdc_mux_offset = 20,
 	.ngpios = 100,
 };
 
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 7f30416..f714f67 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -5420,14 +5420,15 @@
 	sys2pci_np = of_find_node_by_name(NULL, "sys2pci");
 	if (!sys2pci_np)
 		return -EINVAL;
+
 	ret = of_address_to_resource(sys2pci_np, 0, &res);
+	of_node_put(sys2pci_np);
 	if (ret)
 		return ret;
+
 	pmx->sys2pci_base = devm_ioremap_resource(&pdev->dev, &res);
-	if (IS_ERR(pmx->sys2pci_base)) {
-		of_node_put(sys2pci_np);
+	if (IS_ERR(pmx->sys2pci_base))
 		return -ENOMEM;
-	}
 
 	pmx->dev = &pdev->dev;
 
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 6117d4d..4abd8f1 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -152,6 +152,14 @@
 	  numbers in the kernel log along with the PMIC option status. The PMIC
 	  type is mapped to a QTI chip part number and logged as well.
 
+config GPIO_USB_DETECT
+	tristate "GPIO-based USB VBUS Detection"
+	depends on POWER_SUPPLY
+	help
+	  This driver supports external USB VBUS detection circuitry whose
+	  output is connected to a GPIO. The driver in turn notifies the
+	  USB driver of VBUS presence/disconnection using the power_supply
+	  framework.
 
 config MSM_MHI_DEV
         tristate "Modem Device Interface Driver"
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index bee32c2..d0f0216f 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -7,6 +7,7 @@
 obj-$(CONFIG_SPS) += sps/
 obj-$(CONFIG_QPNP_COINCELL) += qpnp-coincell.o
 obj-$(CONFIG_QPNP_REVID) += qpnp-revid.o
+obj-$(CONFIG_GPIO_USB_DETECT) += gpio-usbdetect.o
 obj-$(CONFIG_EP_PCIE) += ep_pcie/
 obj-$(CONFIG_MSM_MHI_DEV) += mhi_dev/
 obj-$(CONFIG_USB_BAM) += usb_bam.o
diff --git a/drivers/platform/msm/gpio-usbdetect.c b/drivers/platform/msm/gpio-usbdetect.c
new file mode 100644
index 0000000..6730d4a
--- /dev/null
+++ b/drivers/platform/msm/gpio-usbdetect.c
@@ -0,0 +1,266 @@
+/* Copyright (c) 2013-2015, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/extcon.h>
+#include <linux/regulator/consumer.h>
+
+struct gpio_usbdetect {
+	struct platform_device	*pdev;
+	struct regulator	*vin;
+	int			vbus_det_irq;
+	int			id_det_irq;
+	int			gpio;
+	struct extcon_dev	*extcon_dev;
+	int			vbus_state;
+	bool			id_state;
+};
+
+static const unsigned int gpio_usb_extcon_table[] = {
+	EXTCON_USB,
+	EXTCON_USB_HOST,
+	EXTCON_NONE,
+};
+
+static irqreturn_t gpio_usbdetect_vbus_irq(int irq, void *data)
+{
+	struct gpio_usbdetect *usb = data;
+	union extcon_property_value val;
+
+	usb->vbus_state = gpio_get_value(usb->gpio);
+	if (usb->vbus_state) {
+		dev_dbg(&usb->pdev->dev, "setting vbus notification\n");
+		val.intval = true;
+		extcon_set_property(usb->extcon_dev, EXTCON_USB,
+					EXTCON_PROP_USB_SS, val);
+		extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB, 1);
+	} else {
+		dev_dbg(&usb->pdev->dev, "setting vbus removed notification\n");
+		extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB, 0);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t gpio_usbdetect_id_irq(int irq, void *data)
+{
+	struct gpio_usbdetect *usb = data;
+	int ret;
+
+	ret = irq_get_irqchip_state(irq, IRQCHIP_STATE_LINE_LEVEL,
+			&usb->id_state);
+	if (ret < 0) {
+		dev_err(&usb->pdev->dev, "unable to read ID IRQ LINE\n");
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t gpio_usbdetect_id_irq_thread(int irq, void *data)
+{
+	struct gpio_usbdetect *usb = data;
+	bool curr_id_state;
+	static int prev_id_state = -EINVAL;
+	union extcon_property_value val;
+
+	curr_id_state = usb->id_state;
+	if (curr_id_state == prev_id_state) {
+		dev_dbg(&usb->pdev->dev, "no change in ID state\n");
+		return IRQ_HANDLED;
+	}
+
+	if (curr_id_state) {
+		dev_dbg(&usb->pdev->dev, "stopping usb host\n");
+		extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_HOST, 0);
+		enable_irq(usb->vbus_det_irq);
+	} else {
+		dev_dbg(&usb->pdev->dev, "starting usb HOST\n");
+		disable_irq(usb->vbus_det_irq);
+		val.intval = true;
+		extcon_set_property(usb->extcon_dev, EXTCON_USB_HOST,
+					EXTCON_PROP_USB_SS, val);
+		extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_HOST, 1);
+	}
+
+	prev_id_state = curr_id_state;
+	return IRQ_HANDLED;
+}
+
+static const u32 gpio_usb_extcon_exclusive[] = {0x3, 0};
+
+static int gpio_usbdetect_probe(struct platform_device *pdev)
+{
+	struct gpio_usbdetect *usb;
+	int rc;
+
+	usb = devm_kzalloc(&pdev->dev, sizeof(*usb), GFP_KERNEL);
+	if (!usb)
+		return -ENOMEM;
+
+	usb->pdev = pdev;
+
+	usb->extcon_dev = devm_extcon_dev_allocate(&pdev->dev,
+			gpio_usb_extcon_table);
+	if (IS_ERR(usb->extcon_dev)) {
+		dev_err(&pdev->dev, "failed to allocate a extcon device\n");
+		return PTR_ERR(usb->extcon_dev);
+	}
+
+	usb->extcon_dev->mutually_exclusive = gpio_usb_extcon_exclusive;
+	rc = devm_extcon_dev_register(&pdev->dev, usb->extcon_dev);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to register extcon device\n");
+		return rc;
+	}
+
+	rc = extcon_set_property_capability(usb->extcon_dev,
+			EXTCON_USB, EXTCON_PROP_USB_SS);
+	rc |= extcon_set_property_capability(usb->extcon_dev,
+			EXTCON_USB_HOST, EXTCON_PROP_USB_SS);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to register extcon props rc=%d\n",
+						rc);
+		return rc;
+	}
+
+	if (of_get_property(pdev->dev.of_node, "vin-supply", NULL)) {
+		usb->vin = devm_regulator_get(&pdev->dev, "vin");
+		if (IS_ERR(usb->vin)) {
+			dev_err(&pdev->dev, "Failed to get VIN regulator: %ld\n",
+				PTR_ERR(usb->vin));
+			return PTR_ERR(usb->vin);
+		}
+	}
+
+	if (usb->vin) {
+		rc = regulator_enable(usb->vin);
+		if (rc) {
+			dev_err(&pdev->dev, "Failed to enable VIN regulator: %d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	usb->gpio = of_get_named_gpio(pdev->dev.of_node,
+				"qcom,vbus-det-gpio", 0);
+	if (usb->gpio < 0) {
+		dev_err(&pdev->dev, "Failed to get gpio: %d\n", usb->gpio);
+		rc = usb->gpio;
+		goto error;
+	}
+
+	rc = gpio_request(usb->gpio, "vbus-det-gpio");
+	if (rc < 0) {
+		dev_err(&pdev->dev, "Failed to request gpio: %d\n", rc);
+		goto error;
+	}
+
+	usb->vbus_det_irq = gpio_to_irq(usb->gpio);
+	if (usb->vbus_det_irq < 0) {
+		dev_err(&pdev->dev, "get vbus_det_irq failed\n");
+		rc = usb->vbus_det_irq;
+		goto error;
+	}
+
+	rc = devm_request_threaded_irq(&pdev->dev, usb->vbus_det_irq,
+				NULL, gpio_usbdetect_vbus_irq,
+			      IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+			      IRQF_ONESHOT, "vbus_det_irq", usb);
+	if (rc) {
+		dev_err(&pdev->dev, "request for vbus_det_irq failed: %d\n",
+			rc);
+		goto error;
+	}
+
+	usb->id_det_irq = platform_get_irq_byname(pdev, "pmic_id_irq");
+	if (usb->id_det_irq < 0) {
+		dev_err(&pdev->dev, "get id_det_irq failed\n");
+		rc = usb->id_det_irq;
+		goto error;
+	}
+
+	rc = devm_request_threaded_irq(&pdev->dev, usb->id_det_irq,
+				gpio_usbdetect_id_irq,
+				gpio_usbdetect_id_irq_thread,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+				IRQF_ONESHOT, "id_det_irq", usb);
+	if (rc) {
+		dev_err(&pdev->dev, "request for id_det_irq failed: %d\n", rc);
+		goto error;
+	}
+
+	enable_irq_wake(usb->vbus_det_irq);
+	enable_irq_wake(usb->id_det_irq);
+	dev_set_drvdata(&pdev->dev, usb);
+
+	if (usb->id_det_irq) {
+		gpio_usbdetect_id_irq(usb->id_det_irq, usb);
+		if (!usb->id_state) {
+			gpio_usbdetect_id_irq_thread(usb->id_det_irq, usb);
+			return 0;
+		}
+	}
+
+	/* Read and report initial VBUS state */
+	gpio_usbdetect_vbus_irq(usb->vbus_det_irq, usb);
+
+	return 0;
+
+error:
+	if (usb->vin)
+		regulator_disable(usb->vin);
+	return rc;
+}
+
+static int gpio_usbdetect_remove(struct platform_device *pdev)
+{
+	struct gpio_usbdetect *usb = dev_get_drvdata(&pdev->dev);
+
+	disable_irq_wake(usb->vbus_det_irq);
+	disable_irq(usb->vbus_det_irq);
+	disable_irq_wake(usb->id_det_irq);
+	disable_irq(usb->id_det_irq);
+	if (usb->vin)
+		regulator_disable(usb->vin);
+
+	return 0;
+}
+
+static const struct of_device_id of_match_table[] = {
+	{ .compatible = "qcom,gpio-usbdetect", },
+	{}
+};
+
+static struct platform_driver gpio_usbdetect_driver = {
+	.driver		= {
+		.name	= "qcom,gpio-usbdetect",
+		.of_match_table = of_match_table,
+	},
+	.probe		= gpio_usbdetect_probe,
+	.remove		= gpio_usbdetect_remove,
+};
+
+module_driver(gpio_usbdetect_driver, platform_driver_register,
+		platform_driver_unregister);
+
+MODULE_DESCRIPTION("GPIO USB VBUS Detection driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 6727da6..dc445a0 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -27,6 +27,7 @@
 
 #define GSI_RESET_WA_MIN_SLEEP 1000
 #define GSI_RESET_WA_MAX_SLEEP 2000
+#define GSI_CHNL_STATE_MAX_RETRYCNT 10
 static const struct of_device_id msm_gsi_match[] = {
 	{ .compatible = "qcom,msm_gsi", },
 	{ },
@@ -2076,6 +2077,7 @@
 	uint32_t val;
 	struct gsi_chan_ctx *ctx;
 	bool reset_done = false;
+	uint32_t retry_cnt = 0;
 
 	if (!gsi_ctx) {
 		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
@@ -2112,9 +2114,19 @@
 		return -GSI_STATUS_TIMED_OUT;
 	}
 
+revrfy_chnlstate:
 	if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
 		GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
 				ctx->state);
+		/* GSI register update state not sync with gsi channel
+		 * context state not sync, need to wait for 1ms to sync.
+		 */
+		retry_cnt++;
+		if (retry_cnt <= GSI_CHNL_STATE_MAX_RETRYCNT) {
+			usleep_range(GSI_RESET_WA_MIN_SLEEP,
+				GSI_RESET_WA_MAX_SLEEP);
+			goto revrfy_chnlstate;
+		}
 		BUG();
 	}
 
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index d274490..a8946bf 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -72,6 +72,11 @@
 			IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
 	} while (0)
 
+enum ipa_usb_direction {
+	IPA_USB_DIR_UL,
+	IPA_USB_DIR_DL,
+};
+
 struct ipa_usb_xdci_connect_params_internal {
 	enum ipa_usb_max_usb_packet_size max_pkt_size;
 	u32 ipa_to_usb_clnt_hdl;
@@ -167,7 +172,8 @@
 	int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *user_data);
 	void *user_data;
 	enum ipa3_usb_state state;
-	struct ipa_usb_xdci_chan_params ch_params;
+	struct ipa_usb_xdci_chan_params ul_ch_params;
+	struct ipa_usb_xdci_chan_params dl_ch_params;
 	struct ipa3_usb_teth_prot_conn_params teth_conn_params;
 };
 
@@ -741,10 +747,6 @@
 		&ipa3_usb_ctx->ttype_ctx[ttype];
 	int result;
 
-	/* create PM resources for the first tethering protocol only */
-	if (ipa3_usb_ctx->num_init_prot > 0)
-		return 0;
-
 	memset(&ttype_ctx->pm_ctx.reg_params, 0,
 		sizeof(ttype_ctx->pm_ctx.reg_params));
 	ttype_ctx->pm_ctx.reg_params.name = (ttype == IPA_USB_TRANSPORT_DPL) ?
@@ -1024,11 +1026,11 @@
 	return 0;
 
 teth_prot_init_fail:
-	if ((IPA3_USB_IS_TTYPE_DPL(ttype))
-		|| (ipa3_usb_ctx->num_init_prot == 0)) {
-		if (ipa_pm_is_used()) {
-			ipa3_usb_deregister_pm(ttype);
-		} else {
+	if (ipa_pm_is_used()) {
+		ipa3_usb_deregister_pm(ttype);
+	} else {
+		if ((IPA3_USB_IS_TTYPE_DPL(ttype))
+			|| (ipa3_usb_ctx->num_init_prot == 0)) {
 			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid =
 				false;
 			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid =
@@ -1073,8 +1075,6 @@
 			params->gevntcount_hi_addr);
 	IPA_USB_DBG_LOW("dir = %d\n", params->dir);
 	IPA_USB_DBG_LOW("xfer_ring_len = %d\n", params->xfer_ring_len);
-	IPA_USB_DBG_LOW("xfer_ring_base_addr = %llx\n",
-		params->xfer_ring_base_addr);
 	IPA_USB_DBG_LOW("last_trb_addr_iova = %x\n",
 		params->xfer_scratch.last_trb_addr_iova);
 	IPA_USB_DBG_LOW("const_buffer_size = %d\n",
@@ -1178,15 +1178,16 @@
 		ipa3_usb_ctx->smmu_reg_map.cnt--;
 	}
 
+
 	result = ipa3_smmu_map_peer_buff(params->xfer_ring_base_addr_iova,
-		params->xfer_ring_base_addr, params->xfer_ring_len, map);
+		params->xfer_ring_len, map, params->sgt_xfer_rings);
 	if (result) {
 		IPA_USB_ERR("failed to map Xfer ring %d\n", result);
 		return result;
 	}
 
 	result = ipa3_smmu_map_peer_buff(params->data_buff_base_addr_iova,
-		params->data_buff_base_addr, params->data_buff_base_len, map);
+		params->data_buff_base_len, map, params->sgt_data_buff);
 	if (result) {
 		IPA_USB_ERR("failed to map TRBs buff %d\n", result);
 		return result;
@@ -1195,13 +1196,52 @@
 	return 0;
 }
 
+static int ipa3_usb_smmu_store_sgt(struct sg_table **out_ch_ptr,
+	struct sg_table *in_sgt_ptr)
+{
+	unsigned int nents;
+
+	if (in_sgt_ptr != NULL) {
+		*out_ch_ptr = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+		if (*out_ch_ptr == NULL)
+			return -ENOMEM;
+
+		nents = in_sgt_ptr->nents;
+
+		(*out_ch_ptr)->sgl =
+			kcalloc(nents, sizeof(struct scatterlist),
+				GFP_KERNEL);
+		if ((*out_ch_ptr)->sgl == NULL)
+			return -ENOMEM;
+
+		memcpy((*out_ch_ptr)->sgl, in_sgt_ptr->sgl,
+			nents*sizeof((*out_ch_ptr)->sgl));
+		(*out_ch_ptr)->nents = nents;
+		(*out_ch_ptr)->orig_nents = in_sgt_ptr->orig_nents;
+	}
+	return 0;
+}
+
+static int ipa3_usb_smmu_free_sgt(struct sg_table **out_sgt_ptr)
+{
+	if (*out_sgt_ptr != NULL) {
+		kfree((*out_sgt_ptr)->sgl);
+		(*out_sgt_ptr)->sgl = NULL;
+		kfree(*out_sgt_ptr);
+		*out_sgt_ptr = NULL;
+	}
+	return 0;
+}
+
 static int ipa3_usb_request_xdci_channel(
 	struct ipa_usb_xdci_chan_params *params,
+	enum ipa_usb_direction dir,
 	struct ipa_req_chan_out_params *out_params)
 {
 	int result = -EFAULT;
 	struct ipa_request_gsi_channel_params chan_params;
 	enum ipa3_usb_transport_type ttype;
+	struct ipa_usb_xdci_chan_params *xdci_ch_params;
 
 	IPA_USB_DBG_LOW("entry\n");
 	if (params == NULL || out_params == NULL ||
@@ -1277,8 +1317,26 @@
 	}
 
 	/* store channel params for SMMU unmap */
-	ipa3_usb_ctx->ttype_ctx[ttype].ch_params = *params;
+	if (dir == IPA_USB_DIR_UL)
+		xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].ul_ch_params;
+	else
+		xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].dl_ch_params;
 
+	*xdci_ch_params = *params;
+	result = ipa3_usb_smmu_store_sgt(
+		&xdci_ch_params->sgt_xfer_rings,
+		params->sgt_xfer_rings);
+	if (result) {
+		ipa3_usb_smmu_free_sgt(&xdci_ch_params->sgt_xfer_rings);
+		return result;
+	}
+	result = ipa3_usb_smmu_store_sgt(
+		&xdci_ch_params->sgt_data_buff,
+		params->sgt_data_buff);
+	if (result) {
+		ipa3_usb_smmu_free_sgt(&xdci_ch_params->sgt_data_buff);
+		return result;
+	}
 	chan_params.keep_ipa_awake = params->keep_ipa_awake;
 	chan_params.evt_ring_params.intf = GSI_EVT_CHTYPE_XDCI_EV;
 	chan_params.evt_ring_params.intr = GSI_INTR_IRQ;
@@ -1286,7 +1344,7 @@
 	chan_params.evt_ring_params.ring_len = params->xfer_ring_len -
 		chan_params.evt_ring_params.re_size;
 	chan_params.evt_ring_params.ring_base_addr =
-		params->xfer_ring_base_addr;
+		params->xfer_ring_base_addr_iova;
 	chan_params.evt_ring_params.ring_base_vaddr = NULL;
 	chan_params.evt_ring_params.int_modt = 0;
 	chan_params.evt_ring_params.int_modt = 0;
@@ -1306,7 +1364,7 @@
 	chan_params.chan_params.re_size = GSI_CHAN_RE_SIZE_16B;
 	chan_params.chan_params.ring_len = params->xfer_ring_len;
 	chan_params.chan_params.ring_base_addr =
-		params->xfer_ring_base_addr;
+		params->xfer_ring_base_addr_iova;
 	chan_params.chan_params.ring_base_vaddr = NULL;
 	chan_params.chan_params.use_db_eng = GSI_CHAN_DB_MODE;
 	chan_params.chan_params.max_prefetch = GSI_ONE_PREFETCH_SEG;
@@ -1332,6 +1390,10 @@
 	chan_params.chan_scratch.xdci.outstanding_threshold =
 		((params->teth_prot == IPA_USB_MBIM) ? 1 : 2) *
 		chan_params.chan_params.re_size;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		chan_params.chan_scratch.xdci.outstanding_threshold = 0;
+
 	/* max_outstanding_tre is set in ipa3_request_gsi_channel() */
 	result = ipa3_request_gsi_channel(&chan_params, out_params);
 	if (result) {
@@ -1345,9 +1407,11 @@
 }
 
 static int ipa3_usb_release_xdci_channel(u32 clnt_hdl,
+	enum ipa_usb_direction dir,
 	enum ipa3_usb_transport_type ttype)
 {
 	int result = 0;
+	struct ipa_usb_xdci_chan_params *xdci_ch_params;
 
 	IPA_USB_DBG_LOW("entry\n");
 	if (ttype < 0 || ttype >= IPA_USB_TRANSPORT_MAX) {
@@ -1367,8 +1431,17 @@
 		return result;
 	}
 
-	result = ipa3_usb_smmu_map_xdci_channel(
-		&ipa3_usb_ctx->ttype_ctx[ttype].ch_params, false);
+	if (dir == IPA_USB_DIR_UL)
+		xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].ul_ch_params;
+	else
+		xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].dl_ch_params;
+
+	result = ipa3_usb_smmu_map_xdci_channel(xdci_ch_params, false);
+
+	if (xdci_ch_params->sgt_xfer_rings != NULL)
+		ipa3_usb_smmu_free_sgt(&xdci_ch_params->sgt_xfer_rings);
+	if (xdci_ch_params->sgt_data_buff != NULL)
+		ipa3_usb_smmu_free_sgt(&xdci_ch_params->sgt_data_buff);
 
 	/* Change ipa_usb state to INITIALIZED */
 	if (!ipa3_usb_set_state(IPA_USB_INITIALIZED, false, ttype))
@@ -2131,14 +2204,15 @@
 
 	if (connect_params->teth_prot != IPA_USB_DIAG) {
 		result = ipa3_usb_request_xdci_channel(ul_chan_params,
-			ul_out_params);
+			IPA_USB_DIR_UL, ul_out_params);
 		if (result) {
 			IPA_USB_ERR("failed to allocate UL channel.\n");
 			goto bad_params;
 		}
 	}
 
-	result = ipa3_usb_request_xdci_channel(dl_chan_params, dl_out_params);
+	result = ipa3_usb_request_xdci_channel(dl_chan_params, IPA_USB_DIR_DL,
+		dl_out_params);
 	if (result) {
 		IPA_USB_ERR("failed to allocate DL/DPL channel.\n");
 		goto alloc_dl_chan_fail;
@@ -2174,11 +2248,12 @@
 	return 0;
 
 connect_fail:
-	ipa3_usb_release_xdci_channel(dl_out_params->clnt_hdl,
+	ipa3_usb_release_xdci_channel(dl_out_params->clnt_hdl, IPA_USB_DIR_DL,
 		IPA3_USB_GET_TTYPE(dl_chan_params->teth_prot));
 alloc_dl_chan_fail:
 	if (connect_params->teth_prot != IPA_USB_DIAG)
 		ipa3_usb_release_xdci_channel(ul_out_params->clnt_hdl,
+			IPA_USB_DIR_UL,
 			IPA3_USB_GET_TTYPE(ul_chan_params->teth_prot));
 bad_params:
 	mutex_unlock(&ipa3_usb_ctx->general_mutex);
@@ -2250,14 +2325,16 @@
 		IPA_USB_ERR("failed to change state to stopped\n");
 
 	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
-		result = ipa3_usb_release_xdci_channel(ul_clnt_hdl, ttype);
+		result = ipa3_usb_release_xdci_channel(ul_clnt_hdl,
+			IPA_USB_DIR_UL, ttype);
 		if (result) {
 			IPA_USB_ERR("failed to release UL channel.\n");
 			return result;
 		}
 	}
 
-	result = ipa3_usb_release_xdci_channel(dl_clnt_hdl, ttype);
+	result = ipa3_usb_release_xdci_channel(dl_clnt_hdl,
+		IPA_USB_DIR_DL, ttype);
 	if (result) {
 		IPA_USB_ERR("failed to release DL channel.\n");
 		return result;
@@ -2462,13 +2539,14 @@
 		goto bad_params;
 	}
 
-	if (IPA3_USB_IS_TTYPE_DPL(ttype) ||
-		(ipa3_usb_ctx->num_init_prot == 0)) {
-		if (!ipa3_usb_set_state(IPA_USB_INVALID, false, ttype))
-			IPA_USB_ERR("failed to change state to invalid\n");
-		if (ipa_pm_is_used()) {
-			ipa3_usb_deregister_pm(ttype);
-		} else {
+	if (ipa_pm_is_used()) {
+		ipa3_usb_deregister_pm(ttype);
+	} else {
+		if (IPA3_USB_IS_TTYPE_DPL(ttype) ||
+			(ipa3_usb_ctx->num_init_prot == 0)) {
+			if (!ipa3_usb_set_state(IPA_USB_INVALID, false, ttype))
+				IPA_USB_ERR(
+					"failed to change state to invalid\n");
 			ipa_rm_delete_resource(
 			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name);
 			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid =
@@ -2868,9 +2946,8 @@
 	pr_debug("entry\n");
 	ipa3_usb_ctx = kzalloc(sizeof(struct ipa3_usb_context), GFP_KERNEL);
 	if (ipa3_usb_ctx == NULL) {
-		pr_err("failed to allocate memory\n");
 		pr_err(":ipa_usb init failed\n");
-		return -EFAULT;
+		return -ENOMEM;
 	}
 	memset(ipa3_usb_ctx, 0, sizeof(struct ipa3_usb_context));
 
diff --git a/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
index 613bed3..2723a35 100644
--- a/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
+++ b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -140,7 +140,7 @@
 	ipa_rm_it_handles[resource_name].work_in_progress = false;
 	pwlock = &(ipa_rm_it_handles[resource_name].w_lock);
 	name = ipa_rm_it_handles[resource_name].w_lock_name;
-	snprintf(name, MAX_WS_NAME, "IPA_RM%d\n", resource_name);
+	snprintf(name, MAX_WS_NAME, "IPA_RM%d", resource_name);
 	wakeup_source_init(pwlock, name);
 	INIT_DELAYED_WORK(&ipa_rm_it_handles[resource_name].work,
 			  ipa_rm_inactivity_timer_func);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index a297f24..f062ed2 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -580,6 +580,15 @@
 	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE)
 		pr_err("ether_type:%x ", attrib->ether_type);
 
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE)
+		pr_err("l2tp inner ip type: %d ", attrib->type);
+
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+		addr[0] = htonl(attrib->u.v4.dst_addr);
+		mask[0] = htonl(attrib->u.v4.dst_addr_mask);
+		pr_err("dst_addr:%pI4 dst_addr_mask:%pI4 ", addr, mask);
+	}
+
 	pr_err("\n");
 	return 0;
 }
@@ -1446,7 +1455,11 @@
 	pr_err("Table Size:%d\n",
 				ipa_ctx->nat_mem.size_base_tables);
 
-	pr_err("Expansion Table Size:%d\n",
+	if (!ipa_ctx->nat_mem.size_expansion_tables)
+		pr_err("Expansion Table Size:%d\n",
+				ipa_ctx->nat_mem.size_expansion_tables);
+	else
+		pr_err("Expansion Table Size:%d\n",
 				ipa_ctx->nat_mem.size_expansion_tables-1);
 
 	if (!ipa_ctx->nat_mem.is_sys_mem)
@@ -1461,6 +1474,8 @@
 
 			pr_err("\nBase Table:\n");
 		} else {
+			if (!ipa_ctx->nat_mem.size_expansion_tables)
+				continue;
 			tbl_size = ipa_ctx->nat_mem.size_expansion_tables-1;
 			base_tbl =
 			 (u32 *)ipa_ctx->nat_mem.ipv4_expansion_rules_addr;
@@ -1560,6 +1575,8 @@
 
 			pr_err("\nIndex Table:\n");
 		} else {
+			if (!ipa_ctx->nat_mem.size_expansion_tables)
+				continue;
 			tbl_size = ipa_ctx->nat_mem.size_expansion_tables-1;
 			indx_tbl =
 			 (u32 *)ipa_ctx->nat_mem.index_table_expansion_addr;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 3cb86d0..9f71d7b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -2903,10 +2903,12 @@
 	struct ipa_ep_context *ep;
 	unsigned int src_pipe;
 	u32 metadata;
+	u8 ucp;
 
 	status = (struct ipa_hw_pkt_status *)rx_skb->data;
 	src_pipe = status->endp_src_idx;
 	metadata = status->metadata;
+	ucp = status->ucp;
 	ep = &ipa_ctx->ep[src_pipe];
 	if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes ||
 		!ep->valid ||
@@ -2930,8 +2932,10 @@
 	 *  ------------------------------------------
 	 */
 	*(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
+	*(u8 *)(rx_skb->cb + 4) = ucp;
 	IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
 			metadata, *(u32 *)rx_skb->cb);
+	IPADBG_LOW("ucp: %d\n", *(u8 *)(rx_skb->cb + 4));
 
 	ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
 }
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 72b2e96..8a3fbd4 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -1407,6 +1407,37 @@
 			ihl_ofst_meq32++;
 		}
 
+		if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 22  => offset of IP type after v6 header */
+			*buf = ipa_write_8(22, *buf);
+			*buf = ipa_write_32(0xF0000000, *buf);
+			if (attrib->type == 0x40)
+				*buf = ipa_write_32(0x40000000, *buf);
+			else
+				*buf = ipa_write_32(0x60000000, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 38  => offset of inner IPv4 addr */
+			*buf = ipa_write_8(38, *buf);
+			*buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf);
+			*buf = ipa_write_32(attrib->u.v4.dst_addr, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
 		if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
 			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
 				IPAERR("ran out of ihl_rng16 eq\n");
@@ -2006,6 +2037,36 @@
 			ihl_ofst_meq32++;
 		}
 
+		if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 22  => offset of inner IP type after v6 header */
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 22;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+				0xF0000000;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+				(u32)attrib->type << 24;
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 38  => offset of inner IPv4 addr */
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 38;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+				attrib->u.v4.dst_addr_mask;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+				attrib->u.v4.dst_addr;
+			ihl_ofst_meq32++;
+		}
+
 		if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
 			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
 				IPAERR_RL("ran out of ihl_rng16 eq\n");
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index d91d7eb..068c6c5 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1661,6 +1661,7 @@
 				IPAWANERR("Failed to allocate memory.\n");
 				return -ENOMEM;
 			}
+			extend_ioctl_data.u.if_name[IFNAMSIZ-1] = '\0';
 			len = sizeof(wan_msg->upstream_ifname) >
 			sizeof(extend_ioctl_data.u.if_name) ?
 				sizeof(extend_ioctl_data.u.if_name) :
@@ -2808,7 +2809,8 @@
 	if (rc) {
 		kfree(sap_stats);
 		return rc;
-	} else if (reset) {
+	} else if (data == NULL) {
+		IPAWANDBG("only reset wlan stats\n");
 		kfree(sap_stats);
 		return 0;
 	}
@@ -2868,7 +2870,7 @@
 	if (reset) {
 		req->reset_stats_valid = true;
 		req->reset_stats = true;
-		IPAWANERR("reset the pipe stats\n");
+		IPAWANDBG("reset the pipe stats\n");
 	} else {
 		/* print tethered-client enum */
 		IPAWANDBG_LOW("Tethered-client enum(%d)\n", data->ipa_client);
@@ -2881,6 +2883,7 @@
 		kfree(resp);
 		return rc;
 	} else if (data == NULL) {
+		IPAWANDBG("only reset modem stats\n");
 		kfree(req);
 		kfree(resp);
 		return 0;
@@ -3075,11 +3078,8 @@
 int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
 {
 	enum ipa_upstream_type upstream_type;
-	struct wan_ioctl_query_tether_stats tether_stats;
 	int rc = 0;
 
-	memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
-
 	/* prevent string buffer overflows */
 	data->upstreamIface[IFNAMSIZ-1] = '\0';
 
@@ -3100,7 +3100,7 @@
 	} else {
 		IPAWANDBG(" reset modem-backhaul stats\n");
 		rc = rmnet_ipa_query_tethering_stats_modem(
-			&tether_stats, true);
+			NULL, true);
 		if (rc) {
 			IPAWANERR("reset MODEM stats failed\n");
 			return rc;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 8e8aaef..01c0736 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -3486,7 +3486,7 @@
 	} else {
 		WARN_ON(1);
 	}
-	IPADBG("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx);
+	IPADBG_LOW("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx);
 
 	return idx;
 }
@@ -4394,6 +4394,8 @@
 	/* Prevent consequent calls from trying to load the FW again. */
 	if (ipa3_ctx->ipa_initialization_complete)
 		return 0;
+	/* move proxy vote for modem on ipa3_post_init */
+	IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
 
 	/*
 	 * indication whether working in MHI config or non MHI config is given
@@ -4858,7 +4860,6 @@
 	int result = 0;
 	int i;
 	struct ipa3_rt_tbl_set *rset;
-	struct ipa_active_client_logging_info log_info;
 
 	IPADBG("IPA Driver initialization started\n");
 
@@ -5049,8 +5050,7 @@
 	}
 
 	mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
-	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
-	ipa3_active_clients_log_inc(&log_info, false);
+	/* move proxy vote for modem to ipa3_post_init() */
 	atomic_set(&ipa3_ctx->ipa3_active_clients.cnt, 1);
 
 	/* Create workqueues for power management */
@@ -5296,6 +5296,8 @@
 	IPADBG("ipa cdev added successful. major:%d minor:%d\n",
 			MAJOR(ipa3_ctx->dev_num),
 			MINOR(ipa3_ctx->dev_num));
+	/* proxy vote for modem is added in ipa3_post_init() phase */
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	return 0;
 
 fail_cdev_add:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 35b6dff..17e4838 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -386,6 +386,8 @@
 	int result = -EFAULT;
 	enum gsi_status gsi_res;
 	int aggr_active_bitmap = 0;
+	bool undo_aggr_value = false;
+	struct ipahal_reg_clkon_cfg fields;
 
 	IPADBG("entry\n");
 	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
@@ -398,6 +400,25 @@
 
 	if (!ep->keep_ipa_awake)
 		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	/*
+	 * IPAv4.0 HW has a limitation where WSEQ in MBIM NTH header is not
+	 * reset to 0 when MBIM pipe is reset. Workaround is to disable
+	 * HW clock gating for AGGR block using IPA_CLKON_CFG reg. undo flag to
+	 * disable the bit after reset is finished
+	 */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		if (ep->cfg.aggr.aggr == IPA_MBIM_16 &&
+			ep->cfg.aggr.aggr_en != IPA_BYPASS_AGGR) {
+			ipahal_read_reg_fields(IPA_CLKON_CFG, &fields);
+			if (fields.open_aggr_wrapper == true) {
+				undo_aggr_value = true;
+				fields.open_aggr_wrapper = false;
+				ipahal_write_reg_fields(IPA_CLKON_CFG, &fields);
+			}
+		}
+	}
+
 	/*
 	 * Check for open aggregation frame on Consumer EP -
 	 * reset with open aggregation frame WA
@@ -429,10 +450,22 @@
 	if (!ep->keep_ipa_awake)
 		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
 
+	/* undo the aggr value if flag was set above*/
+	if (undo_aggr_value) {
+		fields.open_aggr_wrapper = false;
+		ipahal_write_reg_fields(IPA_CLKON_CFG, &fields);
+	}
+
 	IPADBG("exit\n");
 	return 0;
 
 reset_chan_fail:
+	/* undo the aggr value if flag was set above*/
+	if (undo_aggr_value) {
+		fields.open_aggr_wrapper = false;
+		ipahal_write_reg_fields(IPA_CLKON_CFG, &fields);
+	}
+
 	if (!ep->keep_ipa_awake)
 		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
 	return result;
@@ -515,10 +548,17 @@
 	return 0;
 }
 
-int ipa3_smmu_map_peer_buff(u64 iova, phys_addr_t phys_addr, u32 size, bool map)
+int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt)
 {
 	struct iommu_domain *smmu_domain;
 	int res;
+	phys_addr_t phys;
+	unsigned long va;
+	struct scatterlist *sg;
+	int count = 0;
+	size_t len;
+	int i;
+	struct page *page;
 
 	if (ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP])
 		return 0;
@@ -529,33 +569,53 @@
 		return -EINVAL;
 	}
 
+	/*
+	 * USB GSI driver would update sgt irrespective of USB S1
+	 * is enable or bypass.
+	 * If USB S1 is enabled using IOMMU, iova != pa.
+	 * If USB S1 is bypass, iova == pa.
+	 */
 	if (map) {
-		res = ipa3_iommu_map(smmu_domain,
-			rounddown(iova, PAGE_SIZE),
-			rounddown(phys_addr, PAGE_SIZE),
-			roundup(size + iova - rounddown(iova, PAGE_SIZE),
-			PAGE_SIZE),
-			IOMMU_READ | IOMMU_WRITE);
-		if (res) {
-			IPAERR("Fail to map 0x%llx->0x%pa\n", iova, &phys_addr);
-			return -EINVAL;
+		if (sgt != NULL) {
+			va = rounddown(iova, PAGE_SIZE);
+			for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+				page = sg_page(sg);
+				phys = page_to_phys(page);
+				len = PAGE_ALIGN(sg->offset + sg->length);
+				res = ipa3_iommu_map(smmu_domain, va, phys,
+					len, IOMMU_READ | IOMMU_WRITE);
+				if (res) {
+					IPAERR("Fail to map pa=%pa\n", &phys);
+					return -EINVAL;
+				}
+				va += len;
+				count++;
+			}
+		} else {
+			res = ipa3_iommu_map(smmu_domain,
+				rounddown(iova, PAGE_SIZE),
+				rounddown(iova, PAGE_SIZE),
+				roundup(size + iova -
+					rounddown(iova, PAGE_SIZE),
+				PAGE_SIZE),
+				IOMMU_READ | IOMMU_WRITE);
+			if (res) {
+				IPAERR("Fail to map 0x%llx\n", iova);
+				return -EINVAL;
+			}
 		}
 	} else {
 		res = iommu_unmap(smmu_domain,
-			rounddown(iova, PAGE_SIZE),
-			roundup(size + iova - rounddown(iova, PAGE_SIZE),
-			PAGE_SIZE));
+		rounddown(iova, PAGE_SIZE),
+		roundup(size + iova - rounddown(iova, PAGE_SIZE),
+		PAGE_SIZE));
 		if (res != roundup(size + iova - rounddown(iova, PAGE_SIZE),
 			PAGE_SIZE)) {
-			IPAERR("Fail to unmap 0x%llx->0x%pa\n",
-				iova, &phys_addr);
+			IPAERR("Fail to unmap 0x%llx\n", iova);
 			return -EINVAL;
 		}
 	}
-
-	IPADBG("Peer buff %s 0x%llx->0x%pa\n", map ? "map" : "unmap",
-		iova, &phys_addr);
-
+	IPADBG("Peer buff %s 0x%llx\n", map ? "map" : "unmap", iova);
 	return 0;
 }
 
@@ -681,6 +741,10 @@
 		sizeof(union __packed gsi_channel_scratch));
 	ep->chan_scratch.xdci.max_outstanding_tre =
 		params->chan_params.re_size * gsi_ep_cfg_ptr->ipa_if_tlv;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		ep->chan_scratch.xdci.max_outstanding_tre = 0;
+
 	gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
 		params->chan_scratch);
 	if (gsi_res != GSI_STATUS_SUCCESS) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 5da83e5..e88ab27 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -527,6 +527,15 @@
 	if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP)
 		pr_err("tcp syn l2tp ");
 
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE)
+		pr_err("l2tp inner ip type: %d ", attrib->type);
+
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+		addr[0] = htonl(attrib->u.v4.dst_addr);
+		mask[0] = htonl(attrib->u.v4.dst_addr_mask);
+		pr_err("dst_addr:%pI4 dst_addr_mask:%pI4 ", addr, mask);
+	}
+
 	pr_err("\n");
 	return 0;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index a812891..90edd2b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -222,7 +222,13 @@
 	tx_pkt->no_unmap_dma = true;
 	tx_pkt->sys = sys;
 	spin_lock_bh(&sys->spinlock);
+	if (unlikely(!sys->nop_pending)) {
+		spin_unlock_bh(&sys->spinlock);
+		kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+		return;
+	}
 	list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+	sys->nop_pending = false;
 	spin_unlock_bh(&sys->spinlock);
 
 	memset(&nop_xfer, 0, sizeof(nop_xfer));
@@ -236,6 +242,8 @@
 		return;
 	}
 	sys->len_pending_xfer = 0;
+	/* make sure TAG process is sent before clocks are gated */
+	ipa3_ctx->tag_process_before_gating = true;
 
 }
 
@@ -271,6 +279,7 @@
 	int result;
 	u32 mem_flag = GFP_ATOMIC;
 	const struct ipa_gsi_ep_config *gsi_ep_cfg;
+	bool send_nop = false;
 
 	if (unlikely(!in_atomic))
 		mem_flag = GFP_KERNEL;
@@ -408,10 +417,14 @@
 	}
 	kfree(gsi_xfer_elem_array);
 
+	if (sys->use_comm_evt_ring && !sys->nop_pending) {
+		sys->nop_pending = true;
+		send_nop = true;
+	}
 	spin_unlock_bh(&sys->spinlock);
 
 	/* set the timer for sending the NOP descriptor */
-	if (sys->use_comm_evt_ring && !hrtimer_active(&sys->db_timer)) {
+	if (send_nop) {
 		ktime_t time = ktime_set(0, IPA_TX_SEND_COMPL_NOP_DELAY_NS);
 
 		IPADBG_LOW("scheduling timer for ch %lu\n",
@@ -419,6 +432,9 @@
 		hrtimer_start(&sys->db_timer, time, HRTIMER_MODE_REL);
 	}
 
+	/* make sure TAG process is sent before clocks are gated */
+	ipa3_ctx->tag_process_before_gating = true;
+
 	return 0;
 
 failure_dma_map:
@@ -823,7 +839,7 @@
 		 * pipe will be unsuspended as part of
 		 * enabling IPA clocks
 		 */
-		ipa_pm_activate_sync(sys->pm_hdl);
+		ipa_pm_activate(sys->pm_hdl);
 		ipa_pm_deferred_deactivate(sys->pm_hdl);
 		break;
 	default:
@@ -2570,10 +2586,12 @@
 	struct ipa3_ep_context *ep;
 	unsigned int src_pipe;
 	u32 metadata;
+	u8 ucp;
 
 	ipahal_pkt_status_parse(rx_skb->data, &status);
 	src_pipe = status.endp_src_idx;
 	metadata = status.metadata;
+	ucp = status.ucp;
 	ep = &ipa3_ctx->ep[src_pipe];
 	if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes ||
 		!ep->valid ||
@@ -2596,8 +2614,10 @@
 	 *  ------------------------------------------
 	 */
 	*(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
+	*(u8 *)(rx_skb->cb + 4) = ucp;
 	IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
 			metadata, *(u32 *)rx_skb->cb);
+	IPADBG_LOW("ucp: %d\n", *(u8 *)(rx_skb->cb + 4));
 
 	ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
 }
@@ -3675,6 +3695,12 @@
 	ch_scratch.gpi.max_outstanding_tre = gsi_ep_info->ipa_if_tlv *
 		GSI_CHAN_RE_SIZE_16B;
 	ch_scratch.gpi.outstanding_threshold = 2 * GSI_CHAN_RE_SIZE_16B;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		ch_scratch.gpi.max_outstanding_tre = 0;
+		ch_scratch.gpi.outstanding_threshold = 0;
+	}
+
 	result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
 	if (result != GSI_STATUS_SUCCESS) {
 		IPAERR("failed to write scratch %d\n", result);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index d7d74a3..7bd1731 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -626,6 +626,7 @@
 	struct delayed_work switch_to_intr_work;
 	enum ipa3_sys_pipe_policy policy;
 	bool use_comm_evt_ring;
+	bool nop_pending;
 	int (*pyld_hdlr)(struct sk_buff *skb, struct ipa3_sys_context *sys);
 	struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags);
 	void (*free_skb)(struct sk_buff *skb);
@@ -2300,8 +2301,7 @@
 int ipa3_rx_poll(u32 clnt_hdl, int budget);
 void ipa3_recycle_wan_skb(struct sk_buff *skb);
 int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map);
-int ipa3_smmu_map_peer_buff(u64 iova, phys_addr_t phys_addr,
-	u32 size, bool map);
+int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt);
 void ipa3_reset_freeze_vote(void);
 int ipa3_ntn_init(void);
 int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index cb970ba..82cd8187 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -302,6 +302,10 @@
 		ep_cfg->ipa_if_tlv * ch_props.re_size;
 	ch_scratch.mhi.outstanding_threshold =
 		min(ep_cfg->ipa_if_tlv / 2, 8) * ch_props.re_size;
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		ch_scratch.mhi.max_outstanding_tre = 0;
+		ch_scratch.mhi.outstanding_threshold = 0;
+	}
 	ch_scratch.mhi.oob_mod_threshold = 4;
 	if (params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
 		params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
index bafc3ca..80a39b5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -240,7 +240,7 @@
 	struct ipa_pm_client *client;
 
 	/* Create a basic array to hold throughputs*/
-	for (i = 0, n = 0; i < IPA_PM_MAX_CLIENTS; i++) {
+	for (i = 1, n = 0; i < IPA_PM_MAX_CLIENTS; i++) {
 		client = ipa_pm_ctx->clients[i];
 		if (client != NULL && IPA_PM_STATE_ACTIVE(client->state)) {
 			/* default case */
@@ -414,7 +414,6 @@
 	complete_all(&client->complete);
 
 	if (dec_clk) {
-		ipa_set_tag_process_before_gating(true);
 		if (!client->skip_clk_vote)
 			IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
 
@@ -465,7 +464,6 @@
 		client->state = IPA_PM_DEACTIVATED;
 		IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
 		spin_unlock_irqrestore(&client->state_lock, flags);
-		ipa_set_tag_process_before_gating(true);
 		if (!client->skip_clk_vote)
 			IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
 
@@ -489,7 +487,8 @@
 
 	n = -ENOBUFS;
 
-	for (i = IPA_PM_MAX_CLIENTS - 1; i >= 0; i--) {
+	/* 0 is not a valid handle */
+	for (i = IPA_PM_MAX_CLIENTS - 1; i >= 1; i--) {
 		if (ipa_pm_ctx->clients[i] == NULL) {
 			n = i;
 			continue;
@@ -1043,7 +1042,7 @@
 		return -EINVAL;
 	}
 
-	for (i = 0; i < IPA_PM_MAX_CLIENTS; i++) {
+	for (i = 1; i < IPA_PM_MAX_CLIENTS; i++) {
 		client = ipa_pm_ctx->clients[i];
 
 		if (client == NULL)
@@ -1073,7 +1072,6 @@
 			IPA_PM_DBG_STATE(client->hdl, client->name,
 				client->state);
 			spin_unlock_irqrestore(&client->state_lock, flags);
-			ipa_set_tag_process_before_gating(true);
 			if (!client->skip_clk_vote)
 				IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
 			deactivate_client(client->hdl);
@@ -1126,7 +1124,6 @@
 	spin_unlock_irqrestore(&client->state_lock, flags);
 
 	/* else case (Deactivates all Activated cases)*/
-	ipa_set_tag_process_before_gating(true);
 	if (!client->skip_clk_vote)
 		IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
 
@@ -1280,7 +1277,7 @@
 	cnt += result;
 
 
-	for (i = 0; i < IPA_PM_MAX_CLIENTS; i++) {
+	for (i = 1; i < IPA_PM_MAX_CLIENTS; i++) {
 		client = ipa_pm_ctx->clients[i];
 
 		if (client == NULL)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
index b2f203a..205e7a5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,7 +16,7 @@
 #include <linux/msm_ipa.h>
 
 /* internal to ipa */
-#define IPA_PM_MAX_CLIENTS 10
+#define IPA_PM_MAX_CLIENTS 11 /* actual max is value -1 since we start from 1*/
 #define IPA_PM_MAX_EX_CL 64
 #define IPA_PM_THRESHOLD_MAX 5
 #define IPA_PM_EXCEPTION_MAX 2
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 941e489..648db5e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1209,8 +1209,6 @@
 		IPADBG("Skipping endpoint configuration.\n");
 	}
 
-	ipa3_enable_data_path(ipa_ep_idx);
-
 	out->clnt_hdl = ipa_ep_idx;
 
 	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client))
@@ -1316,6 +1314,7 @@
 	struct ipa3_ep_context *ep;
 	union IpaHwWdiCommonChCmdData_t enable;
 	struct ipa_ep_cfg_holb holb_cfg;
+	struct ipahal_reg_endp_init_rsrc_grp rsrc_grp;
 
 	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
 	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
@@ -1348,6 +1347,20 @@
 		goto uc_timeout;
 	}
 
+	/* Assign the resource group for pipe */
+	memset(&rsrc_grp, 0, sizeof(rsrc_grp));
+	rsrc_grp.rsrc_grp = ipa_get_ep_group(ep->client);
+	if (rsrc_grp.rsrc_grp == -1) {
+		IPAERR("invalid group for client %d\n", ep->client);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	IPADBG("Setting group %d for pipe %d\n",
+		rsrc_grp.rsrc_grp, clnt_hdl);
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_RSRC_GRP_n, clnt_hdl,
+		&rsrc_grp);
+
 	if (IPA_CLIENT_IS_CONS(ep->client)) {
 		memset(&holb_cfg, 0, sizeof(holb_cfg));
 		holb_cfg.en = IPA_HOLB_TMR_DIS;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 7421eb8..9974b87 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -57,7 +57,6 @@
 #define IPA_BCR_REG_VAL_v3_0 (0x00000001)
 #define IPA_BCR_REG_VAL_v3_5 (0x0000003B)
 #define IPA_BCR_REG_VAL_v4_0 (0x00000039)
-#define IPA_CLKON_CFG_v4_0 (0x30000000)
 #define IPA_AGGR_GRAN_MIN (1)
 #define IPA_AGGR_GRAN_MAX (32)
 #define IPA_EOT_COAL_GRAN_MIN (1)
@@ -806,10 +805,10 @@
 
 	/* IPA_3_5_MHI */
 	[IPA_3_5_MHI][IPA_CLIENT_USB_PROD]            = {
-			true, IPA_v3_5_MHI_GROUP_DDR, true,
-			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			false, IPA_EP_NOT_ALLOCATED, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 0, 7, 8, 16, IPA_EE_AP } },
+			{ -1, -1, -1, -1, -1 } },
 	[IPA_3_5_MHI][IPA_CLIENT_APPS_WAN_PROD]   = {
 			true, IPA_v3_5_MHI_GROUP_DDR, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
@@ -870,7 +869,7 @@
 			true, IPA_v3_5_MHI_GROUP_DMA, true,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
-			{7, 8, 8, 16, IPA_EE_AP } },
+			{ 7, 8, 8, 16, IPA_EE_AP } },
 	[IPA_3_5_MHI][IPA_CLIENT_TEST4_PROD]          = {
 			true, IPA_v3_5_MHI_GROUP_DMA, true,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
@@ -883,15 +882,15 @@
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 3, 8, 8, IPA_EE_UC } },
 	[IPA_3_5_MHI][IPA_CLIENT_USB_CONS]            = {
-			true, IPA_v3_5_MHI_GROUP_DDR, false,
+			false, IPA_EP_NOT_ALLOCATED, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 17, 11, 8, 8, IPA_EE_AP } },
+			{ -1, -1, -1, -1, -1 } },
 	[IPA_3_5_MHI][IPA_CLIENT_USB_DPL_CONS]        = {
-			true, IPA_v3_5_MHI_GROUP_DDR, false,
+			false, IPA_EP_NOT_ALLOCATED, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 14, 10, 4, 6, IPA_EE_AP } },
+			{ -1, -1, -1, -1, -1 } },
 	[IPA_3_5_MHI][IPA_CLIENT_APPS_LAN_CONS]       = {
 			true, IPA_v3_5_MHI_GROUP_DDR, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
@@ -1113,6 +1112,12 @@
 			{ 31, 31, 8, 8, IPA_EE_AP } },
 
 	/* IPA_4_0 */
+	[IPA_4_0][IPA_CLIENT_WLAN1_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 2, 8, 16, IPA_EE_UC } },
 	[IPA_4_0][IPA_CLIENT_USB_PROD]            = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
@@ -1149,12 +1154,6 @@
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 9, 0, 8, 16, IPA_EE_UC } },
-	[IPA_4_0][IPA_CLIENT_Q6_LAN_PROD]         = {
-			true, IPA_v4_0_GROUP_UL_DL,
-			true,
-			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
-			QMB_MASTER_SELECT_DDR,
-			{ 6, 2, 12, 24, IPA_EE_Q6 } },
 	[IPA_4_0][IPA_CLIENT_Q6_WAN_PROD]         = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
@@ -1191,13 +1190,13 @@
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{7, 9, 8, 16, IPA_EE_AP } },
+			{ 7, 9, 8, 16, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_TEST4_PROD]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 8, 10, 8, 16, IPA_EE_AP } },
+			{8, 10, 8, 16, IPA_EE_AP } },
 
 
 	[IPA_4_0][IPA_CLIENT_WLAN1_CONS]          = {
@@ -1205,7 +1204,7 @@
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 18, 11, 6, 9, IPA_EE_AP } },
+			{ 18, 3, 6, 9, IPA_EE_UC } },
 	[IPA_4_0][IPA_CLIENT_WLAN2_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
@@ -1279,25 +1278,25 @@
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 12, 2, 5, 5, IPA_EE_AP } },
+			{ 11, 6, 9, 9, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_TEST1_CONS]           = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_DDR,
-			{ 12, 2, 5, 5, IPA_EE_AP } },
+			QMB_MASTER_SELECT_PCIE,
+			{ 11, 6, 9, 9, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_TEST2_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 18, 11, 6, 9, IPA_EE_AP } },
+			{ 12, 2, 5, 5, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_TEST3_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_DDR,
-			{ 20, 13, 9, 9, IPA_EE_AP } },
+			QMB_MASTER_SELECT_PCIE,
+			{ 19, 12, 9, 9, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_TEST4_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
@@ -1313,12 +1312,6 @@
 			{ 31, 31, 8, 8, IPA_EE_AP } },
 
 	/* IPA_4_0_MHI */
-	[IPA_4_0_MHI][IPA_CLIENT_USB_PROD]            = {
-			true, IPA_v4_0_MHI_GROUP_DDR,
-			true,
-			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
-			QMB_MASTER_SELECT_DDR,
-			{ 0, 8, 8, 16, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_PROD]   = {
 			true, IPA_v4_0_MHI_GROUP_DDR,
 			true,
@@ -1337,12 +1330,6 @@
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_PCIE,
 			{ 1, 0, 8, 16, IPA_EE_AP } },
-	[IPA_4_0_MHI][IPA_CLIENT_Q6_LAN_PROD]         = {
-			true, IPA_v4_0_MHI_GROUP_DDR,
-			true,
-			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
-			QMB_MASTER_SELECT_DDR,
-			{ 6, 2, 12, 24, IPA_EE_Q6 } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_PROD]         = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
@@ -1399,18 +1386,7 @@
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 10, 8, 16, IPA_EE_AP } },
 
-	[IPA_4_0_MHI][IPA_CLIENT_USB_CONS]            = {
-			true, IPA_v4_0_MHI_GROUP_DDR,
-			false,
-			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_DDR,
-			{ 19, 12, 9, 9, IPA_EE_AP } },
-	[IPA_4_0_MHI][IPA_CLIENT_USB_DPL_CONS]        = {
-			true, IPA_v4_0_MHI_GROUP_DDR,
-			false,
-			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_DDR,
-			{ 15, 7, 5, 5, IPA_EE_AP } },
+
 	[IPA_4_0_MHI][IPA_CLIENT_APPS_LAN_CONS]       = {
 			true, IPA_v4_0_MHI_GROUP_DDR,
 			false,
@@ -1465,25 +1441,25 @@
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
-			{ 12, 2, 5, 5, IPA_EE_AP } },
+			{ 11, 6, 9, 9, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST1_CONS]           = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_DDR,
-			{ 12, 2, 5, 5, IPA_EE_AP } },
+			QMB_MASTER_SELECT_PCIE,
+			{ 11, 6, 9, 9, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST2_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_PCIE,
-			{ 18, 11, 6, 9, IPA_EE_AP } },
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 2, 5, 5, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST3_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_DDR,
-			{ 20, 13, 9, 9, IPA_EE_AP } },
+			QMB_MASTER_SELECT_PCIE,
+			{ 19, 12, 9, 9, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST4_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
@@ -1571,10 +1547,12 @@
 
 	switch (resource) {
 	case IPA_RM_RESOURCE_USB_CONS:
-		clients->names[i++] = IPA_CLIENT_USB_CONS;
+		if (ipa3_get_ep_mapping(IPA_CLIENT_USB_CONS) != -1)
+			clients->names[i++] = IPA_CLIENT_USB_CONS;
 		break;
 	case IPA_RM_RESOURCE_USB_DPL_CONS:
-		clients->names[i++] = IPA_CLIENT_USB_DPL_CONS;
+		if (ipa3_get_ep_mapping(IPA_CLIENT_USB_DPL_CONS) != -1)
+			clients->names[i++] = IPA_CLIENT_USB_DPL_CONS;
 		break;
 	case IPA_RM_RESOURCE_HSIC_CONS:
 		clients->names[i++] = IPA_CLIENT_HSIC1_CONS;
@@ -1595,7 +1573,8 @@
 		clients->names[i++] = IPA_CLIENT_ETHERNET_CONS;
 		break;
 	case IPA_RM_RESOURCE_USB_PROD:
-		clients->names[i++] = IPA_CLIENT_USB_PROD;
+		if (ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD) != -1)
+			clients->names[i++] = IPA_CLIENT_USB_PROD;
 		break;
 	case IPA_RM_RESOURCE_HSIC_PROD:
 		clients->names[i++] = IPA_CLIENT_HSIC1_PROD;
@@ -2051,13 +2030,20 @@
 	ipahal_write_reg(IPA_BCR, val);
 
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
-		struct ipahal_reg_tx_cfg cfg;
+		struct ipahal_reg_clkon_cfg clkon_cfg;
+		struct ipahal_reg_tx_cfg tx_cfg;
 
-		ipahal_write_reg(IPA_CLKON_CFG, IPA_CLKON_CFG_v4_0);
-		ipahal_read_reg_fields(IPA_TX_CFG, &cfg);
+		memset(&clkon_cfg, 0, sizeof(clkon_cfg));
+
+		/*enable open global clocks*/
+		clkon_cfg.open_global_2x_clk = true;
+		clkon_cfg.open_global = true;
+		ipahal_write_reg_fields(IPA_CLKON_CFG, &clkon_cfg);
+
+		ipahal_read_reg_fields(IPA_TX_CFG, &tx_cfg);
 		/* disable PA_MASK_EN to allow holb drop */
-		cfg.pa_mask_en = 0;
-		ipahal_write_reg_fields(IPA_TX_CFG, &cfg);
+		tx_cfg.pa_mask_en = 0;
+		ipahal_write_reg_fields(IPA_TX_CFG, &tx_cfg);
 	}
 
 	ipa3_cfg_qsb();
@@ -3978,6 +3964,7 @@
 	}
 	kfree(tag_desc);
 	tag_desc = NULL;
+	ipa3_ctx->tag_process_before_gating = false;
 
 	IPADBG("waiting for TAG response\n");
 	res = wait_for_completion_timeout(&comp->comp, timeout);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index a677046..1254fe3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -1233,6 +1233,39 @@
 		ihl_ofst_meq32 += 2;
 	}
 
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 22  => offset of IP type after v6 header */
+		extra = ipa_write_8(22, extra);
+		rest = ipa_write_32(0xF0000000, rest);
+		if (attrib->type == 0x40)
+			rest = ipa_write_32(0x40000000, rest);
+		else
+			rest = ipa_write_32(0x60000000, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 38  => offset of inner IPv4 addr */
+		extra = ipa_write_8(38, extra);
+		rest = ipa_write_32(attrib->u.v4.dst_addr_mask, rest);
+		rest = ipa_write_32(attrib->u.v4.dst_addr, rest);
+		ihl_ofst_meq32++;
+	}
+
 	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
 		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
 		rest = ipa_write_32(attrib->meta_data_mask, rest);
@@ -2269,6 +2302,40 @@
 		ihl_ofst_meq32 += 2;
 	}
 
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 22  => offset of inner IP type after v6 header */
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 22;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+			0xF0000000;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			(u32)attrib->type << 24;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 38  => offset of inner IPv4 addr */
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 38;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+			attrib->u.v4.dst_addr_mask;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->u.v4.dst_addr;
+		ihl_ofst_meq32++;
+	}
+
 	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
 		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
 			IPAHAL_ERR_RL("ran out of meq32 eq\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 1d8eb13..48e7d7c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -399,6 +399,261 @@
 			IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_BMSK);
 }
 
+static void ipareg_construct_clkon_cfg(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_clkon_cfg *clkon_cfg =
+		(struct ipahal_reg_clkon_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_global_2x_clk,
+			IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_SHFT,
+			IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_global,
+			IPA_CLKON_CFG_OPEN_GLOBAL_SHFT,
+			IPA_CLKON_CFG_OPEN_GLOBAL_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_gsi_if,
+			IPA_CLKON_CFG_OPEN_GSI_IF_SHFT,
+			IPA_CLKON_CFG_OPEN_GSI_IF_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_weight_arb,
+			IPA_CLKON_CFG_OPEN_WEIGHT_ARB_SHFT,
+			IPA_CLKON_CFG_OPEN_WEIGHT_ARB_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_qmb,
+			IPA_CLKON_CFG_OPEN_QMB_SHFT,
+			IPA_CLKON_CFG_OPEN_QMB_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_ram_slaveway,
+			IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_SHFT,
+			IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_aggr_wrapper,
+			IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_SHFT,
+			IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_qsb2axi_cmdq_l,
+			IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_SHFT,
+			IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_fnr,
+			IPA_CLKON_CFG_OPEN_FNR_SHFT,
+			IPA_CLKON_CFG_OPEN_FNR_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_tx_1,
+			IPA_CLKON_CFG_OPEN_TX_1_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_1_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_tx_0,
+			IPA_CLKON_CFG_OPEN_TX_0_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_0_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_ntf_tx_cmdqs,
+			IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_dcmp,
+			IPA_CLKON_CFG_OPEN_DCMP_SHFT,
+			IPA_CLKON_CFG_OPEN_DCMP_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_h_dcph,
+			IPA_CLKON_CFG_OPEN_H_DCPH_SHFT,
+			IPA_CLKON_CFG_OPEN_H_DCPH_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_d_dcph,
+			IPA_CLKON_CFG_OPEN_D_DCPH_SHFT,
+			IPA_CLKON_CFG_OPEN_D_DCPH_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_ack_mngr,
+			IPA_CLKON_CFG_OPEN_ACK_MNGR_SHFT,
+			IPA_CLKON_CFG_OPEN_ACK_MNGR_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_ctx_handler,
+			IPA_CLKON_CFG_OPEN_CTX_HANDLER_SHFT,
+			IPA_CLKON_CFG_OPEN_CTX_HANDLER_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_rsrc_mngr,
+			IPA_CLKON_CFG_OPEN_RSRC_MNGR_SHFT,
+			IPA_CLKON_CFG_OPEN_RSRC_MNGR_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_dps_tx_cmdqs,
+			IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_hps_dps_cmdqs,
+			IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_rx_hps_cmdqs,
+			IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_dps,
+			IPA_CLKON_CFG_OPEN_DPS_SHFT,
+			IPA_CLKON_CFG_OPEN_DPS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_hps,
+			IPA_CLKON_CFG_OPEN_HPS_SHFT,
+			IPA_CLKON_CFG_OPEN_HPS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_ftch_dps,
+			IPA_CLKON_CFG_OPEN_FTCH_DPS_SHFT,
+			IPA_CLKON_CFG_OPEN_FTCH_DPS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_ftch_hps,
+			IPA_CLKON_CFG_OPEN_FTCH_HPS_SHFT,
+			IPA_CLKON_CFG_OPEN_FTCH_HPS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_ram_arb,
+			IPA_CLKON_CFG_OPEN_RAM_ARB_SHFT,
+			IPA_CLKON_CFG_OPEN_RAM_ARB_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_misc,
+			IPA_CLKON_CFG_OPEN_MISC_SHFT,
+			IPA_CLKON_CFG_OPEN_MISC_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_tx_wrapper,
+			IPA_CLKON_CFG_OPEN_TX_WRAPPER_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_WRAPPER_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_proc,
+			IPA_CLKON_CFG_OPEN_PROC_SHFT,
+			IPA_CLKON_CFG_OPEN_PROC_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_rx,
+			IPA_CLKON_CFG_OPEN_RX_SHFT,
+			IPA_CLKON_CFG_OPEN_RX_BMSK);
+}
+
+static void ipareg_parse_clkon_cfg(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_clkon_cfg *clkon_cfg =
+		(struct ipahal_reg_clkon_cfg *)fields;
+
+	memset(clkon_cfg, 0, sizeof(struct ipahal_reg_clkon_cfg));
+	clkon_cfg->open_global_2x_clk = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_SHFT,
+			IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_BMSK);
+
+	clkon_cfg->open_global = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_GLOBAL_SHFT,
+			IPA_CLKON_CFG_OPEN_GLOBAL_BMSK);
+
+	clkon_cfg->open_gsi_if = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_GSI_IF_SHFT,
+			IPA_CLKON_CFG_OPEN_GSI_IF_BMSK);
+
+	clkon_cfg->open_weight_arb = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_WEIGHT_ARB_SHFT,
+			IPA_CLKON_CFG_OPEN_WEIGHT_ARB_BMSK);
+
+	clkon_cfg->open_qmb = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_QMB_SHFT,
+			IPA_CLKON_CFG_OPEN_QMB_BMSK);
+
+	clkon_cfg->open_ram_slaveway = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_SHFT,
+			IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_BMSK);
+
+	clkon_cfg->open_aggr_wrapper = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_SHFT,
+			IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_BMSK);
+
+	clkon_cfg->open_qsb2axi_cmdq_l = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_SHFT,
+			IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_BMSK);
+
+	clkon_cfg->open_fnr = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_FNR_SHFT,
+			IPA_CLKON_CFG_OPEN_FNR_BMSK);
+
+	clkon_cfg->open_tx_1 = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_TX_1_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_1_BMSK);
+
+	clkon_cfg->open_tx_0 = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_TX_0_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_0_BMSK);
+
+	clkon_cfg->open_ntf_tx_cmdqs = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_BMSK);
+
+	clkon_cfg->open_dcmp = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_DCMP_SHFT,
+			IPA_CLKON_CFG_OPEN_DCMP_BMSK);
+
+	clkon_cfg->open_h_dcph = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_H_DCPH_SHFT,
+			IPA_CLKON_CFG_OPEN_H_DCPH_BMSK);
+
+	clkon_cfg->open_d_dcph = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_D_DCPH_SHFT,
+			IPA_CLKON_CFG_OPEN_D_DCPH_BMSK);
+
+	clkon_cfg->open_ack_mngr = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_ACK_MNGR_SHFT,
+			IPA_CLKON_CFG_OPEN_ACK_MNGR_BMSK);
+
+	clkon_cfg->open_ctx_handler = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_CTX_HANDLER_SHFT,
+			IPA_CLKON_CFG_OPEN_CTX_HANDLER_BMSK);
+
+	clkon_cfg->open_rsrc_mngr = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_RSRC_MNGR_SHFT,
+			IPA_CLKON_CFG_OPEN_RSRC_MNGR_BMSK);
+
+	clkon_cfg->open_dps_tx_cmdqs = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_BMSK);
+
+	clkon_cfg->open_hps_dps_cmdqs = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_BMSK);
+
+	clkon_cfg->open_rx_hps_cmdqs = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_BMSK);
+
+	clkon_cfg->open_dps = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_DPS_SHFT,
+			IPA_CLKON_CFG_OPEN_DPS_BMSK);
+
+	clkon_cfg->open_hps = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_HPS_SHFT,
+			IPA_CLKON_CFG_OPEN_HPS_BMSK);
+
+	clkon_cfg->open_ftch_dps = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_FTCH_DPS_SHFT,
+			IPA_CLKON_CFG_OPEN_FTCH_DPS_BMSK);
+
+	clkon_cfg->open_ftch_hps = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_FTCH_HPS_SHFT,
+			IPA_CLKON_CFG_OPEN_FTCH_HPS_BMSK);
+
+	clkon_cfg->open_ram_arb = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_RAM_ARB_SHFT,
+			IPA_CLKON_CFG_OPEN_RAM_ARB_BMSK);
+
+	clkon_cfg->open_misc = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_MISC_SHFT,
+			IPA_CLKON_CFG_OPEN_MISC_BMSK);
+
+	clkon_cfg->open_tx_wrapper = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_TX_WRAPPER_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_WRAPPER_BMSK);
+
+	clkon_cfg->open_proc = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_PROC_SHFT,
+			IPA_CLKON_CFG_OPEN_PROC_BMSK);
+
+	clkon_cfg->open_rx = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_RX_SHFT,
+			IPA_CLKON_CFG_OPEN_RX_BMSK);
+}
+
 static void ipareg_construct_qcncm(
 	enum ipahal_reg_name reg, const void *fields, u32 *val)
 {
@@ -1522,7 +1777,7 @@
 		ipareg_construct_endp_status_n_v4_0, ipareg_parse_dummy,
 		0x00000840, 0x70},
 	[IPA_HW_v4_0][IPA_CLKON_CFG] = {
-		ipareg_construct_dummy, ipareg_parse_dummy,
+		ipareg_construct_clkon_cfg, ipareg_parse_clkon_cfg,
 		0x00000044, 0},
 	[IPA_HW_v4_0][IPA_ENDP_INIT_CONN_TRACK_n] = {
 		ipareg_construct_endp_init_conn_track_n,
@@ -1874,15 +2129,13 @@
 		return;
 	}
 
-	valmask->val = (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) <<
-		IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
-	valmask->mask = IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK <<
-		IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+	valmask->val = (1 << IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT) &&
+		IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK;
+	valmask->mask = IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK;
 
-	valmask->val |= ((0 & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) <<
-		IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT);
-	valmask->mask |= ((IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK <<
-		IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT));
+	valmask->val |= ((0 << IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT) &&
+		IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK);
+	valmask->mask |= IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK;
 }
 
 u32 ipahal_aggr_get_max_byte_limit(void)
@@ -1933,7 +2186,7 @@
 		return;
 	}
 	IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx, shft, bmsk);
-	valmask->mask = bmsk << shft;
+	valmask->mask = bmsk;
 }
 
 void ipahal_get_fltrt_hash_flush_valmask(
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index df3c976..98481a1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -199,6 +199,47 @@
 };
 
 /*
+ * struct ipahal_reg_clkon_cfg-  Enables SW bypass clock-gating for the IPA core
+ *
+ * @all: Enables SW bypass clock-gating controls for this sub-module;
+ *	0: CGC is enabled by internal logic, 1: No CGC (clk is always 'ON').
+ *	sub-module affected is based on var name -> ex: open_rx refers
+ *	to IPA_RX sub-module and open_global refers to global IPA 1x clock
+ */
+struct ipahal_reg_clkon_cfg {
+	bool open_global_2x_clk;
+	bool open_global;
+	bool open_gsi_if;
+	bool open_weight_arb;
+	bool open_qmb;
+	bool open_ram_slaveway;
+	bool open_aggr_wrapper;
+	bool open_qsb2axi_cmdq_l;
+	bool open_fnr;
+	bool open_tx_1;
+	bool open_tx_0;
+	bool open_ntf_tx_cmdqs;
+	bool open_dcmp;
+	bool open_h_dcph;
+	bool open_d_dcph;
+	bool open_ack_mngr;
+	bool open_ctx_handler;
+	bool open_rsrc_mngr;
+	bool open_dps_tx_cmdqs;
+	bool open_hps_dps_cmdqs;
+	bool open_rx_hps_cmdqs;
+	bool open_dps;
+	bool open_hps;
+	bool open_ftch_dps;
+	bool open_ftch_hps;
+	bool open_ram_arb;
+	bool open_misc;
+	bool open_tx_wrapper;
+	bool open_proc;
+	bool open_rx;
+};
+
+/*
  * struct ipa_hash_tuple - Hash tuple members for flt and rt
  *  the fields tells if to be masked or not
  * @src_id: pipe number for flt, table index for rt
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
index 664d254..35a36e1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -246,6 +246,68 @@
 #define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1
 #define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0
 
+/* IPA_CLKON_CFG register */
+#define IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_BMSK  0x20000000
+#define IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_SHFT 29
+#define IPA_CLKON_CFG_OPEN_GLOBAL_BMSK 0x10000000
+#define IPA_CLKON_CFG_OPEN_GLOBAL_SHFT 28
+#define IPA_CLKON_CFG_OPEN_GSI_IF_BMSK 0x8000000
+#define IPA_CLKON_CFG_OPEN_GSI_IF_SHFT 27
+#define IPA_CLKON_CFG_OPEN_WEIGHT_ARB_SHFT 26
+#define IPA_CLKON_CFG_OPEN_WEIGHT_ARB_BMSK 0x4000000
+#define IPA_CLKON_CFG_OPEN_QMB_SHFT 25
+#define IPA_CLKON_CFG_OPEN_QMB_BMSK 0x2000000
+#define IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_SHFT 24
+#define IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_BMSK 0x1000000
+#define IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_SHFT 23
+#define IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_BMSK 0x800000
+#define IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_SHFT 22
+#define IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_BMSK 0x400000
+#define IPA_CLKON_CFG_OPEN_FNR_SHFT 21
+#define IPA_CLKON_CFG_OPEN_FNR_BMSK 0x200000
+#define IPA_CLKON_CFG_OPEN_TX_1_SHFT 20
+#define IPA_CLKON_CFG_OPEN_TX_1_BMSK 0x100000
+#define IPA_CLKON_CFG_OPEN_TX_0_SHFT 19
+#define IPA_CLKON_CFG_OPEN_TX_0_BMSK 0x80000
+#define IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_SHFT 18
+#define IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_BMSK 0x40000
+#define IPA_CLKON_CFG_OPEN_DCMP_SHFT 17
+#define IPA_CLKON_CFG_OPEN_DCMP_BMSK 0x20000
+#define IPA_CLKON_CFG_OPEN_H_DCPH_SHFT 16
+#define IPA_CLKON_CFG_OPEN_H_DCPH_BMSK 0x10000
+#define IPA_CLKON_CFG_OPEN_D_DCPH_SHFT 15
+#define IPA_CLKON_CFG_OPEN_D_DCPH_BMSK 0x8000
+#define IPA_CLKON_CFG_OPEN_ACK_MNGR_SHFT 14
+#define IPA_CLKON_CFG_OPEN_ACK_MNGR_BMSK 0x4000
+#define IPA_CLKON_CFG_OPEN_CTX_HANDLER_SHFT 13
+#define IPA_CLKON_CFG_OPEN_CTX_HANDLER_BMSK 0x2000
+#define IPA_CLKON_CFG_OPEN_RSRC_MNGR_SHFT 12
+#define IPA_CLKON_CFG_OPEN_RSRC_MNGR_BMSK 0x1000
+#define IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_SHFT 11
+#define IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_BMSK 0x800
+#define IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_SHFT 10
+#define IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_BMSK 0x400
+#define IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_SHFT 9
+#define IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_BMSK 0x200
+#define IPA_CLKON_CFG_OPEN_DPS_SHFT 8
+#define IPA_CLKON_CFG_OPEN_DPS_BMSK 0x100
+#define IPA_CLKON_CFG_OPEN_HPS_SHFT 7
+#define IPA_CLKON_CFG_OPEN_HPS_BMSK 0x80
+#define IPA_CLKON_CFG_OPEN_FTCH_DPS_SHFT 6
+#define IPA_CLKON_CFG_OPEN_FTCH_DPS_BMSK 0x40
+#define IPA_CLKON_CFG_OPEN_FTCH_HPS_SHFT 5
+#define IPA_CLKON_CFG_OPEN_FTCH_HPS_BMSK 0x20
+#define IPA_CLKON_CFG_OPEN_RAM_ARB_SHFT 4
+#define IPA_CLKON_CFG_OPEN_RAM_ARB_BMSK 0x10
+#define IPA_CLKON_CFG_OPEN_MISC_SHFT 3
+#define IPA_CLKON_CFG_OPEN_MISC_BMSK 0x8
+#define IPA_CLKON_CFG_OPEN_TX_WRAPPER_SHFT 2
+#define IPA_CLKON_CFG_OPEN_TX_WRAPPER_BMSK 0x4
+#define IPA_CLKON_CFG_OPEN_PROC_SHFT 1
+#define IPA_CLKON_CFG_OPEN_PROC_BMSK 0x2
+#define IPA_CLKON_CFG_OPEN_RX_BMSK 0x1
+#define IPA_CLKON_CFG_OPEN_RX_SHFT 0
+
 /* IPA_ENDP_FILTER_ROUTER_HSH_CFG_n register */
 #define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT 0
 #define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK 0x1
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 98a8594..512dddd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1740,6 +1740,7 @@
 				IPAWANERR("Failed to allocate memory.\n");
 				return -ENOMEM;
 			}
+			extend_ioctl_data.u.if_name[IFNAMSIZ-1] = '\0';
 			len = sizeof(wan_msg->upstream_ifname) >
 			sizeof(extend_ioctl_data.u.if_name) ?
 				sizeof(extend_ioctl_data.u.if_name) :
@@ -2711,8 +2712,13 @@
 		if (atomic_read(&rmnet_ipa3_ctx->is_ssr))
 			/* clean up cached QMI msg/handlers */
 			ipa3_qmi_service_exit();
-		/*hold a proxy vote for the modem*/
-		ipa3_proxy_clk_vote();
+		/*
+		 * hold a proxy vote for the modem.
+		 * for IPA 4.0 offline charge is not needed and proxy vote
+		 * is already held.
+		 */
+		if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
+			ipa3_proxy_clk_vote();
 		ipa3_reset_freeze_vote();
 		IPAWANINFO("IPA BEFORE_POWERUP handling is complete\n");
 		break;
@@ -3112,7 +3118,8 @@
 		IPAWANERR("can't get ipa3_get_wlan_stats\n");
 		kfree(sap_stats);
 		return rc;
-	} else if (reset) {
+	} else if (data == NULL) {
+		IPAWANDBG("only reset wlan stats\n");
 		kfree(sap_stats);
 		return 0;
 	}
@@ -3170,7 +3177,7 @@
 	if (reset) {
 		req->reset_stats_valid = true;
 		req->reset_stats = true;
-		IPAWANERR("reset the pipe stats\n");
+		IPAWANDBG("reset the pipe stats\n");
 	} else {
 		/* print tethered-client enum */
 		IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client);
@@ -3183,6 +3190,7 @@
 		kfree(resp);
 		return rc;
 	} else if (data == NULL) {
+		IPAWANDBG("only reset modem stats\n");
 		kfree(req);
 		kfree(resp);
 		return 0;
@@ -3377,11 +3385,8 @@
 int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
 {
 	enum ipa_upstream_type upstream_type;
-	struct wan_ioctl_query_tether_stats tether_stats;
 	int rc = 0;
 
-	memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
-
 	/* prevent string buffer overflows */
 	data->upstreamIface[IFNAMSIZ-1] = '\0';
 
@@ -3402,7 +3407,7 @@
 	} else {
 		IPAWANERR(" reset modem-backhaul stats\n");
 		rc = rmnet_ipa3_query_tethering_stats_modem(
-			&tether_stats, true);
+			NULL, true);
 		if (rc) {
 			IPAWANERR("reset MODEM stats failed\n");
 			return rc;
@@ -4021,6 +4026,10 @@
 	ipa3_qmi_init();
 
 	/* Register for Modem SSR */
+	/* SSR is not supported yet on IPA 4.0 */
+	if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_0)
+		return platform_driver_register(&rmnet_ipa_driver);
+
 	rmnet_ipa3_ctx->subsys_notify_handle = subsys_notif_register_notifier(
 			SUBSYS_MODEM,
 			&ipa3_ssr_notifier);
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 246f32e..929242a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -253,7 +253,7 @@
 			(struct wan_ioctl_set_data_quota *)param);
 		if (rc != 0) {
 			IPAWANERR("WAN_IOC_SET_DATA_QUOTA failed\n");
-			if (retval == -ENODEV)
+			if (rc == -ENODEV)
 				retval = -ENODEV;
 			else
 				retval = -EFAULT;
diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
index 195799e..212557c 100644
--- a/drivers/platform/msm/ipa/test/ipa_test_mhi.c
+++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1304,6 +1304,7 @@
 	u32 next_wp_ofst;
 	int i;
 	u32 num_of_ed_to_queue;
+	u32 avail_ev;
 
 	IPA_UT_LOG("Entry\n");
 
@@ -1341,6 +1342,8 @@
 
 	wp_ofst = (u32)(p_events[event_ring_index].wp -
 		p_events[event_ring_index].rbase);
+	rp_ofst = (u32)(p_events[event_ring_index].rp -
+		p_events[event_ring_index].rbase);
 
 	if (p_events[event_ring_index].rlen & 0xFFFFFFFF00000000) {
 		IPA_UT_LOG("invalid ev rlen %llu\n",
@@ -1348,23 +1351,48 @@
 		return -EFAULT;
 	}
 
-	next_wp_ofst = (wp_ofst + num_of_ed_to_queue *
-		sizeof(struct ipa_mhi_event_ring_element)) %
-		(u32)p_events[event_ring_index].rlen;
+	if (wp_ofst > rp_ofst) {
+		avail_ev = (wp_ofst - rp_ofst) /
+			sizeof(struct ipa_mhi_event_ring_element);
+	} else {
+		avail_ev = (u32)p_events[event_ring_index].rlen -
+			(rp_ofst - wp_ofst);
+		avail_ev /= sizeof(struct ipa_mhi_event_ring_element);
+	}
 
-	/* set next WP */
-	p_events[event_ring_index].wp =
-		(u32)p_events[event_ring_index].rbase + next_wp_ofst;
+	IPA_UT_LOG("wp_ofst=0x%x rp_ofst=0x%x rlen=%llu avail_ev=%u\n",
+		wp_ofst, rp_ofst, p_events[event_ring_index].rlen, avail_ev);
 
-	/* write value to event ring doorbell */
-	IPA_UT_LOG("DB to event 0x%llx: base %pa ofst 0x%x\n",
-		p_events[event_ring_index].wp,
-		&(gsi_ctx->per.phys_addr), GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
+	if (num_of_ed_to_queue > ((u32)p_events[event_ring_index].rlen /
+		sizeof(struct ipa_mhi_event_ring_element))) {
+		IPA_UT_LOG("event ring too small for %u credits\n",
+			num_of_ed_to_queue);
+		return -EFAULT;
+	}
+
+	if (num_of_ed_to_queue > avail_ev) {
+		IPA_UT_LOG("Need to add event credits (needed=%u)\n",
+			num_of_ed_to_queue - avail_ev);
+
+		next_wp_ofst = (wp_ofst + (num_of_ed_to_queue - avail_ev) *
+			sizeof(struct ipa_mhi_event_ring_element)) %
+			(u32)p_events[event_ring_index].rlen;
+
+		/* set next WP */
+		p_events[event_ring_index].wp =
+			(u32)p_events[event_ring_index].rbase + next_wp_ofst;
+
+		/* write value to event ring doorbell */
+		IPA_UT_LOG("DB to event 0x%llx: base %pa ofst 0x%x\n",
+			p_events[event_ring_index].wp,
+			&(gsi_ctx->per.phys_addr),
+			GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
 			event_ring_index + ipa3_ctx->mhi_evid_limits[0], 0));
-	iowrite32(p_events[event_ring_index].wp,
-		test_mhi_ctx->gsi_mmio +
-		GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
+		iowrite32(p_events[event_ring_index].wp,
+			test_mhi_ctx->gsi_mmio +
+			GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
 			event_ring_index + ipa3_ctx->mhi_evid_limits[0], 0));
+	}
 
 	for (i = 0; i < buf_array_size; i++) {
 		/* calculate virtual pointer for current WP and RP */
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.c b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
index 036902a..bcbcd87 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_framework.c
+++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -49,12 +49,14 @@
 
 /**
  * struct ipa_ut_dbgfs_test_write_work_ctx - work_queue context
- * @dbgfs: work_struct for the write_work
- * @file: file  to be writen to
+ * @dbgfs_Work: work_struct for the write_work
+ * @meta_type: See enum ipa_ut_meta_test_type
+ * @user_data: user data usually used to point to suite or test object
  */
 struct ipa_ut_dbgfs_test_write_work_ctx {
 	struct work_struct dbgfs_work;
-	struct file *file;
+	long meta_type;
+	void *user_data;
 };
 
 static ssize_t ipa_ut_dbgfs_enable_read(struct file *file,
@@ -219,7 +221,6 @@
 {
 	struct ipa_ut_dbgfs_test_write_work_ctx *write_work_ctx;
 	struct ipa_ut_suite *suite;
-	struct file *file;
 	int i;
 	enum ipa_hw_type ipa_ver;
 	int rc = 0;
@@ -232,14 +233,9 @@
 	IPA_UT_DBG("Entry\n");
 
 	mutex_lock(&ipa_ut_ctx->lock);
-	file = write_work_ctx->file;
-	if (file == NULL) {
-		rc = -EFAULT;
-		goto unlock_mutex;
-	}
-	suite = file->f_inode->i_private;
+	suite = (struct ipa_ut_suite *)(write_work_ctx->user_data);
 	ipa_assert_on(!suite);
-	meta_type = (long)(file->private_data);
+	meta_type = write_work_ctx->meta_type;
 	IPA_UT_DBG("Meta test type %ld\n", meta_type);
 
 	_IPA_UT_TEST_LOG_BUF_NAME = kzalloc(_IPA_UT_TEST_LOG_BUF_SIZE,
@@ -354,7 +350,7 @@
 }
 
 /*
- * ipa_ut_dbgfs_meta_test_write() - Debugfs write func for a for a meta test
+ * ipa_ut_dbgfs_meta_test_write() - Debugfs write func for a meta test
  * @params: write fops
  *
  * Run all tests in a suite using a work queue so it does not race with
@@ -373,7 +369,8 @@
 		return -ENOMEM;
 	}
 
-	write_work_ctx->file = file;
+	write_work_ctx->user_data = file->f_inode->i_private;
+	write_work_ctx->meta_type = (long)(file->private_data);
 
 	INIT_WORK(&write_work_ctx->dbgfs_work,
 		ipa_ut_dbgfs_meta_test_write_work_func);
@@ -515,7 +512,6 @@
 	struct ipa_ut_dbgfs_test_write_work_ctx *write_work_ctx;
 	struct ipa_ut_test *test;
 	struct ipa_ut_suite *suite;
-	struct file *file;
 	bool tst_fail = false;
 	int rc = 0;
 	enum ipa_hw_type ipa_ver;
@@ -526,12 +522,7 @@
 	IPA_UT_DBG("Entry\n");
 
 	mutex_lock(&ipa_ut_ctx->lock);
-	file = write_work_ctx->file;
-	if (file == NULL) {
-		rc = -EFAULT;
-		goto unlock_mutex;
-	}
-	test = file->f_inode->i_private;
+	test = (struct ipa_ut_test *)(write_work_ctx->user_data);
 	ipa_assert_on(!test);
 
 	_IPA_UT_TEST_LOG_BUF_NAME = kzalloc(_IPA_UT_TEST_LOG_BUF_SIZE,
@@ -633,7 +624,8 @@
 		return -ENOMEM;
 	}
 
-	write_work_ctx->file = file;
+	write_work_ctx->user_data = file->f_inode->i_private;
+	write_work_ctx->meta_type = (long)(file->private_data);
 
 	INIT_WORK(&write_work_ctx->dbgfs_work,
 		ipa_ut_dbgfs_test_write_work_func);
@@ -979,6 +971,7 @@
 		IS_ERR(ipa_ut_ctx->test_dbgfs_root)) {
 		IPA_UT_ERR("failed to create test debugfs dir\n");
 		ret = -EFAULT;
+		destroy_workqueue(ipa_ut_ctx->wq);
 		goto unlock_mutex;
 	}
 
@@ -988,6 +981,7 @@
 	if (!dfile_enable || IS_ERR(dfile_enable)) {
 		IPA_UT_ERR("failed to create enable debugfs file\n");
 		ret = -EFAULT;
+		destroy_workqueue(ipa_ut_ctx->wq);
 		goto fail_clean_dbgfs;
 	}
 
diff --git a/drivers/platform/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c
index bc4df04..7a93e0e 100644
--- a/drivers/platform/msm/msm_ext_display.c
+++ b/drivers/platform/msm/msm_ext_display.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -44,6 +44,20 @@
 	EXTCON_NONE,
 };
 
+static int msm_ext_disp_find_index(struct extcon_dev *edev,
+		enum msm_ext_disp_type id)
+{
+	int i;
+
+	/* Find the the index of extcon cable in edev->supported_cable */
+	for (i = 0; i < edev->max_supported; i++) {
+		if (edev->supported_cable[i] == id)
+			return i;
+	}
+
+	return -EINVAL;
+}
+
 static int msm_ext_disp_extcon_register(struct msm_ext_disp *ext_disp)
 {
 	int ret = 0;
@@ -145,7 +159,8 @@
 		enum msm_ext_disp_cable_state new_state)
 {
 	int ret = 0;
-	int state;
+	int state, index;
+	enum msm_ext_disp_cable_state current_state;
 
 	if (!ext_disp->ops) {
 		pr_err("codec not registered, skip notification\n");
@@ -154,13 +169,27 @@
 	}
 
 	state = ext_disp->audio_sdev.state;
-	ret = extcon_set_state_sync(&ext_disp->audio_sdev,
-			ext_disp->current_disp, !!new_state);
 
-	pr_debug("Audio state %s %d\n",
-			ext_disp->audio_sdev.state == state ?
-			"is same" : "switched to",
-			ext_disp->audio_sdev.state);
+	index = msm_ext_disp_find_index(&ext_disp->audio_sdev, type);
+	if (index < 0 || index >= ext_disp->audio_sdev.max_supported) {
+		pr_err("invalid index\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	if (state & BIT(index))
+		current_state = EXT_DISPLAY_CABLE_CONNECT;
+	else
+		current_state = EXT_DISPLAY_CABLE_DISCONNECT;
+
+	if (current_state == new_state) {
+		ret = -EEXIST;
+		pr_debug("same state\n");
+	} else {
+		ret = extcon_set_state_sync(&ext_disp->audio_sdev,
+			ext_disp->current_disp, !!new_state);
+		pr_debug("state changed to %d\n", new_state);
+	}
 end:
 	return ret;
 }
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index bec16dd..5d094d2 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -46,7 +46,7 @@
  * @bus_bw:		Client handle to the bus bandwidth request.
  * @bus_mas_id:		Master Endpoint ID for bus BW request.
  * @bus_slv_id:		Slave Endpoint ID for bus BW request.
- * @ab_ib_lock:		Lock to protect the bus ab & ib values, list.
+ * @geni_dev_lock:		Lock to protect the bus ab & ib values, list.
  * @ab_list_head:	Sorted resource list based on average bus BW.
  * @ib_list_head:	Sorted resource list based on instantaneous bus BW.
  * @cur_ab:		Current Bus Average BW request value.
@@ -67,7 +67,7 @@
 	struct msm_bus_client_handle *bus_bw;
 	u32 bus_mas_id;
 	u32 bus_slv_id;
-	struct mutex ab_ib_lock;
+	struct mutex geni_dev_lock;
 	struct list_head ab_list_head;
 	struct list_head ib_list_head;
 	unsigned long cur_ab;
@@ -609,7 +609,7 @@
 	if (unlikely(list_empty(&rsc->ab_list) || list_empty(&rsc->ib_list)))
 		return -EINVAL;
 
-	mutex_lock(&geni_se_dev->ab_ib_lock);
+	mutex_lock(&geni_se_dev->geni_dev_lock);
 	list_del_init(&rsc->ab_list);
 	geni_se_dev->cur_ab -= rsc->ab;
 
@@ -630,7 +630,7 @@
 		    "%s: %lu:%lu (%lu:%lu) %d\n", __func__,
 		    geni_se_dev->cur_ab, geni_se_dev->cur_ib,
 		    rsc->ab, rsc->ib, bus_bw_update);
-	mutex_unlock(&geni_se_dev->ab_ib_lock);
+	mutex_unlock(&geni_se_dev->geni_dev_lock);
 	return ret;
 }
 
@@ -704,7 +704,7 @@
 	bool bus_bw_update = false;
 	int ret = 0;
 
-	mutex_lock(&geni_se_dev->ab_ib_lock);
+	mutex_lock(&geni_se_dev->geni_dev_lock);
 	list_add(&rsc->ab_list, &geni_se_dev->ab_list_head);
 	geni_se_dev->cur_ab += rsc->ab;
 
@@ -728,7 +728,7 @@
 		    "%s: %lu:%lu (%lu:%lu) %d\n", __func__,
 		    geni_se_dev->cur_ab, geni_se_dev->cur_ib,
 		    rsc->ab, rsc->ib, bus_bw_update);
-	mutex_unlock(&geni_se_dev->ab_ib_lock);
+	mutex_unlock(&geni_se_dev->geni_dev_lock);
 	return ret;
 }
 
@@ -878,24 +878,29 @@
 	struct geni_se_device *geni_se_dev;
 	int i;
 	unsigned long prev_freq = 0;
+	int ret = 0;
 
 	if (unlikely(!rsc || !rsc->wrapper_dev || !rsc->se_clk || !tbl))
 		return -EINVAL;
 
-	*tbl = NULL;
 	geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
 	if (unlikely(!geni_se_dev))
 		return -EPROBE_DEFER;
+	mutex_lock(&geni_se_dev->geni_dev_lock);
+	*tbl = NULL;
 
 	if (geni_se_dev->clk_perf_tbl) {
 		*tbl = geni_se_dev->clk_perf_tbl;
-		return geni_se_dev->num_clk_levels;
+		ret = geni_se_dev->num_clk_levels;
+		goto exit_se_clk_tbl_get;
 	}
 
 	geni_se_dev->clk_perf_tbl = kzalloc(sizeof(*geni_se_dev->clk_perf_tbl) *
 						MAX_CLK_PERF_LEVEL, GFP_KERNEL);
-	if (!geni_se_dev->clk_perf_tbl)
-		return -ENOMEM;
+	if (!geni_se_dev->clk_perf_tbl) {
+		ret = -ENOMEM;
+		goto exit_se_clk_tbl_get;
+	}
 
 	for (i = 0; i < MAX_CLK_PERF_LEVEL; i++) {
 		geni_se_dev->clk_perf_tbl[i] = clk_round_rate(rsc->se_clk,
@@ -908,7 +913,10 @@
 	}
 	geni_se_dev->num_clk_levels = i;
 	*tbl = geni_se_dev->clk_perf_tbl;
-	return geni_se_dev->num_clk_levels;
+	ret = geni_se_dev->num_clk_levels;
+exit_se_clk_tbl_get:
+	mutex_unlock(&geni_se_dev->geni_dev_lock);
+	return ret;
 }
 EXPORT_SYMBOL(geni_se_clk_tbl_get);
 
@@ -1437,7 +1445,7 @@
 	mutex_init(&geni_se_dev->iommu_lock);
 	INIT_LIST_HEAD(&geni_se_dev->ab_list_head);
 	INIT_LIST_HEAD(&geni_se_dev->ib_list_head);
-	mutex_init(&geni_se_dev->ab_ib_lock);
+	mutex_init(&geni_se_dev->geni_dev_lock);
 	geni_se_dev->log_ctx = ipc_log_context_create(NUM_LOG_PAGES,
 						dev_name(geni_se_dev->dev), 0);
 	if (!geni_se_dev->log_ctx)
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index 95e3782..dec698f 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -3139,7 +3139,8 @@
 	}
 
 	dev = &ctx->usb_bam_pdev->dev;
-	if (dev && dev->parent && !device_property_present(dev->parent,
+	if (dev && dev->parent && device_property_present(dev->parent, "iommus")
+		&& !device_property_present(dev->parent,
 						"qcom,smmu-s1-bypass")) {
 		pr_info("%s: setting SPS_BAM_SMMU_EN flag with (%s)\n",
 						__func__, dev_name(dev));
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
index 9f31bc1..1871602 100644
--- a/drivers/platform/x86/asus-wireless.c
+++ b/drivers/platform/x86/asus-wireless.c
@@ -97,6 +97,7 @@
 		return;
 	}
 	input_report_key(data->idev, KEY_RFKILL, 1);
+	input_sync(data->idev);
 	input_report_key(data->idev, KEY_RFKILL, 0);
 	input_sync(data->idev);
 }
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 0935668..abd9d83 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -240,6 +240,7 @@
 	AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
 	AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
 	AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
+	AXIS_DMI_MATCH("HPB440G4", "HP ProBook 440 G4", x_inverted),
 	AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
 	AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
 	AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index a47a41f..b5b8901 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -252,28 +252,28 @@
 	 * - GTDRIVER_IPC BASE_IFACE
 	 */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-	if (res) {
+	if (res && resource_size(res) > 1) {
 		addr = devm_ioremap_resource(&pdev->dev, res);
 		if (!IS_ERR(addr))
 			punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
 	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
-	if (res) {
+	if (res && resource_size(res) > 1) {
 		addr = devm_ioremap_resource(&pdev->dev, res);
 		if (!IS_ERR(addr))
 			punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
 	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
-	if (res) {
+	if (res && resource_size(res) > 1) {
 		addr = devm_ioremap_resource(&pdev->dev, res);
 		if (!IS_ERR(addr))
 			punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
 	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
-	if (res) {
+	if (res && resource_size(res) > 1) {
 		addr = devm_ioremap_resource(&pdev->dev, res);
 		if (!IS_ERR(addr))
 			punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index ceeb8c1..00d82e8 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -848,5 +848,5 @@
 	pr_info("Mapper unloaded\n");
 }
 
-subsys_initcall(acpi_wmi_init);
+subsys_initcall_sync(acpi_wmi_init);
 module_exit(acpi_wmi_exit);
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 99120f4..a2a35c6 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -170,6 +170,7 @@
 	FG_SRAM_SYS_TERM_CURR,
 	FG_SRAM_CHG_TERM_CURR,
 	FG_SRAM_CHG_TERM_BASE_CURR,
+	FG_SRAM_CUTOFF_CURR,
 	FG_SRAM_DELTA_MSOC_THR,
 	FG_SRAM_DELTA_BSOC_THR,
 	FG_SRAM_RECHARGE_SOC_THR,
@@ -262,6 +263,7 @@
 	int	chg_term_curr_ma;
 	int	chg_term_base_curr_ma;
 	int	sys_term_curr_ma;
+	int	cutoff_curr_ma;
 	int	delta_soc_thr;
 	int	recharge_soc_thr;
 	int	recharge_volt_thr_mv;
diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c
index bc21b46..34a3fa6 100644
--- a/drivers/power/supply/qcom/fg-memif.c
+++ b/drivers/power/supply/qcom/fg-memif.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -747,7 +747,7 @@
 }
 
 #define MEM_GNT_WAIT_TIME_US	10000
-#define MEM_GNT_RETRIES		20
+#define MEM_GNT_RETRIES		50
 static int fg_direct_mem_request(struct fg_chip *chip, bool request)
 {
 	int rc, ret, i = 0;
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index afa128d..0894f37 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,8 @@
 #define ESR_PULSE_THRESH_OFFSET		3
 #define SLOPE_LIMIT_WORD		3
 #define SLOPE_LIMIT_OFFSET		0
+#define CUTOFF_CURR_WORD		4
+#define CUTOFF_CURR_OFFSET		0
 #define CUTOFF_VOLT_WORD		5
 #define CUTOFF_VOLT_OFFSET		0
 #define SYS_TERM_CURR_WORD		6
@@ -208,6 +210,8 @@
 		1000000, 122070, 0, fg_encode_current, NULL),
 	PARAM(CHG_TERM_CURR, CHG_TERM_CURR_WORD, CHG_TERM_CURR_OFFSET, 1,
 		100000, 390625, 0, fg_encode_current, NULL),
+	PARAM(CUTOFF_CURR, CUTOFF_CURR_WORD, CUTOFF_CURR_OFFSET, 3,
+		1000000, 122070, 0, fg_encode_current, NULL),
 	PARAM(DELTA_MSOC_THR, DELTA_MSOC_THR_WORD, DELTA_MSOC_THR_OFFSET, 1,
 		2048, 100, 0, fg_encode_default, NULL),
 	PARAM(DELTA_BSOC_THR, DELTA_BSOC_THR_WORD, DELTA_BSOC_THR_OFFSET, 1,
@@ -284,6 +288,8 @@
 	PARAM(CHG_TERM_BASE_CURR, CHG_TERM_CURR_v2_WORD,
 		CHG_TERM_BASE_CURR_v2_OFFSET, 1, 1024, 1000, 0,
 		fg_encode_current, NULL),
+	PARAM(CUTOFF_CURR, CUTOFF_CURR_WORD, CUTOFF_CURR_OFFSET, 3,
+		1000000, 122070, 0, fg_encode_current, NULL),
 	PARAM(DELTA_MSOC_THR, DELTA_MSOC_THR_v2_WORD, DELTA_MSOC_THR_v2_OFFSET,
 		1, 2048, 100, 0, fg_encode_default, NULL),
 	PARAM(DELTA_BSOC_THR, DELTA_BSOC_THR_v2_WORD, DELTA_BSOC_THR_v2_OFFSET,
@@ -2905,14 +2911,6 @@
 		goto out;
 	}
 
-	rc = __fg_restart(chip);
-	if (rc < 0) {
-		pr_err("Error in restarting FG, rc=%d\n", rc);
-		goto out;
-	}
-
-	fg_dbg(chip, FG_STATUS, "SOC is ready\n");
-
 	/* Set the profile integrity bit */
 	val = HLOS_RESTART_BIT | PROFILE_LOAD_BIT;
 	rc = fg_sram_write(chip, PROFILE_INTEGRITY_WORD,
@@ -2922,6 +2920,13 @@
 		goto out;
 	}
 
+	rc = __fg_restart(chip);
+	if (rc < 0) {
+		pr_err("Error in restarting FG, rc=%d\n", rc);
+		goto out;
+	}
+
+	fg_dbg(chip, FG_STATUS, "SOC is ready\n");
 done:
 	rc = fg_bp_params_config(chip);
 	if (rc < 0)
@@ -3975,6 +3980,16 @@
 		return rc;
 	}
 
+	fg_encode(chip->sp, FG_SRAM_CUTOFF_CURR, chip->dt.cutoff_curr_ma,
+		buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_CUTOFF_CURR].addr_word,
+			chip->sp[FG_SRAM_CUTOFF_CURR].addr_byte, buf,
+			chip->sp[FG_SRAM_CUTOFF_CURR].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing cutoff_curr, rc=%d\n", rc);
+		return rc;
+	}
+
 	if (!(chip->wa_flags & PMI8998_V1_REV_WA)) {
 		fg_encode(chip->sp, FG_SRAM_CHG_TERM_BASE_CURR,
 			chip->dt.chg_term_base_curr_ma, buf);
@@ -4698,6 +4713,7 @@
 #define DEFAULT_CHG_TERM_CURR_MA	100
 #define DEFAULT_CHG_TERM_BASE_CURR_MA	75
 #define DEFAULT_SYS_TERM_CURR_MA	-125
+#define DEFAULT_CUTOFF_CURR_MA		500
 #define DEFAULT_DELTA_SOC_THR		1
 #define DEFAULT_RECHARGE_SOC_THR	95
 #define DEFAULT_BATT_TEMP_COLD		0
@@ -4861,6 +4877,12 @@
 	else
 		chip->dt.chg_term_base_curr_ma = temp;
 
+	rc = of_property_read_u32(node, "qcom,fg-cutoff-current", &temp);
+	if (rc < 0)
+		chip->dt.cutoff_curr_ma = DEFAULT_CUTOFF_CURR_MA;
+	else
+		chip->dt.cutoff_curr_ma = temp;
+
 	rc = of_property_read_u32(node, "qcom,fg-delta-soc-thr", &temp);
 	if (rc < 0)
 		chip->dt.delta_soc_thr = DEFAULT_DELTA_SOC_THR;
diff --git a/drivers/power/supply/qcom/qpnp-fg.c b/drivers/power/supply/qcom/qpnp-fg.c
index a12b0ad..015da41 100644
--- a/drivers/power/supply/qcom/qpnp-fg.c
+++ b/drivers/power/supply/qcom/qpnp-fg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2548,13 +2548,13 @@
 
 	/* calculate soc_cutoff_new */
 	val = (1000000LL + temp_rs_to_rslow) * battery_esr;
-	do_div(val, 1000000);
+	val = div64_s64(val, 1000000);
 	ocv_cutoff_new = div64_s64(chip->evaluation_current * val, 1000)
 		+ chip->cutoff_voltage;
 
 	/* calculate soc_cutoff_aged */
 	val = (1000000LL + temp_rs_to_rslow) * esr_actual;
-	do_div(val, 1000000);
+	val = div64_s64(val, 1000000);
 	ocv_cutoff_aged = div64_s64(chip->evaluation_current * val, 1000)
 		+ chip->cutoff_voltage;
 
@@ -3068,11 +3068,11 @@
 
 	max_inc_val = chip->learning_data.learned_cc_uah
 			* (1000 + chip->learning_data.max_increment);
-	do_div(max_inc_val, 1000);
+	max_inc_val = div_s64(max_inc_val, 1000);
 
 	min_dec_val = chip->learning_data.learned_cc_uah
 			* (1000 - chip->learning_data.max_decrement);
-	do_div(min_dec_val, 1000);
+	min_dec_val = div_s64(min_dec_val, 1000);
 
 	old_cap = chip->learning_data.learned_cc_uah;
 	if (chip->learning_data.cc_uah > max_inc_val)
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index d6ff6fc..74e80cd 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1573,20 +1573,12 @@
 		BATT_PROFILE_VOTER, true, chg->batt_profile_fv_uv);
 	vote(chg->dc_icl_votable,
 		DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
-	vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
-			true, 0);
-	vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
-			true, 0);
 	vote(chg->hvdcp_disable_votable_indirect, DEFAULT_VOTER,
 		chip->dt.hvdcp_disable, 0);
 	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER,
 			true, 0);
 	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
 			true, 0);
-	vote(chg->pd_disallowed_votable_indirect, MICRO_USB_VOTER,
-		(chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB), 0);
-	vote(chg->hvdcp_enable_votable, MICRO_USB_VOTER,
-		(chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB), 0);
 
 	/*
 	 * AICL configuration:
@@ -1636,6 +1628,16 @@
 		return rc;
 	}
 
+	/* Connector types based votes */
+	vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
+		(chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC), 0);
+	vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+		(chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC), 0);
+	vote(chg->pd_disallowed_votable_indirect, MICRO_USB_VOTER,
+		(chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB), 0);
+	vote(chg->hvdcp_enable_votable, MICRO_USB_VOTER,
+		(chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB), 0);
+
 	/* configure VCONN for software control */
 	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
 				 VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT,
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 4656e35..496a276 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2079,9 +2079,6 @@
 		return -EINVAL;
 
 	chg->system_temp_level = val->intval;
-	/* disable parallel charge in case of system temp level */
-	vote(chg->pl_disable_votable, THERMAL_DAEMON_VOTER,
-			chg->system_temp_level ? true : false, 0);
 
 	if (chg->system_temp_level == chg->thermal_levels)
 		return vote(chg->chg_disable_votable,
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
index 833a8da..ebaaf5c 100644
--- a/drivers/power/supply/qcom/smb1355-charger.c
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@
 #define ANA2_BASE	0x1100
 #define BATIF_BASE	0x1200
 #define USBIN_BASE	0x1300
+#define ANA1_BASE	0x1400
 #define MISC_BASE	0x1600
 
 #define BATTERY_STATUS_2_REG			(CHGR_BASE + 0x0B)
@@ -82,6 +83,9 @@
 #define EXT_BIAS_PIN_BIT			BIT(2)
 #define DIE_TEMP_COMP_HYST_BIT			BIT(1)
 
+#define ANA1_ENG_SREFGEN_CFG2_REG		(ANA1_BASE + 0xC1)
+#define VALLEY_COMPARATOR_EN_BIT		BIT(0)
+
 #define TEMP_COMP_STATUS_REG			(MISC_BASE + 0x07)
 #define SKIN_TEMP_RST_HOT_BIT			BIT(6)
 #define SKIN_TEMP_UB_HOT_BIT			BIT(5)
@@ -94,6 +98,9 @@
 #define MISC_RT_STS_REG				(MISC_BASE + 0x10)
 #define HARD_ILIMIT_RT_STS_BIT			BIT(5)
 
+#define BANDGAP_ENABLE_REG			(MISC_BASE + 0x42)
+#define BANDGAP_ENABLE_CMD_BIT			BIT(0)
+
 #define BARK_BITE_WDOG_PET_REG			(MISC_BASE + 0x43)
 #define BARK_BITE_WDOG_PET_BIT			BIT(0)
 
@@ -108,6 +115,9 @@
 #define MISC_CUST_SDCDC_CLK_CFG_REG		(MISC_BASE + 0xA0)
 #define SWITCHER_CLK_FREQ_MASK			GENMASK(3, 0)
 
+#define MISC_CUST_SDCDC_ILIMIT_CFG_REG		(MISC_BASE + 0xA1)
+#define LS_VALLEY_THRESH_PCT_BIT		BIT(3)
+
 #define SNARL_BARK_BITE_WD_CFG_REG		(MISC_BASE + 0x53)
 #define BITE_WDOG_DISABLE_CHARGING_CFG_BIT	BIT(7)
 #define SNARL_WDOG_TIMEOUT_MASK			GENMASK(6, 4)
@@ -150,6 +160,8 @@
 	((mode == POWER_SUPPLY_PL_USBIN_USBIN) \
 	 || (mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
 
+#define PARALLEL_ENABLE_VOTER			"PARALLEL_ENABLE_VOTER"
+
 struct smb_chg_param {
 	const char	*name;
 	u16		reg;
@@ -224,6 +236,8 @@
 	bool			exit_die_temp;
 	struct delayed_work	die_temp_work;
 	bool			disabled;
+
+	struct votable		*irq_disable_votable;
 };
 
 static bool is_secure(struct smb1355 *chip, int addr)
@@ -449,7 +463,7 @@
 	if (of_property_read_bool(node, "qcom,stacked-batfet"))
 		chip->dt.pl_batfet_mode = POWER_SUPPLY_PL_STACKED_BATFET;
 
-	return rc;
+	return 0;
 }
 
 /*****************************
@@ -662,6 +676,18 @@
 		schedule_delayed_work(&chip->die_temp_work, 0);
 	}
 
+	if (chip->irq_disable_votable)
+		vote(chip->irq_disable_votable, PARALLEL_ENABLE_VOTER,
+				disable, 0);
+
+	rc = smb1355_masked_write(chip, BANDGAP_ENABLE_REG,
+				BANDGAP_ENABLE_CMD_BIT,
+				disable ? 0 : BANDGAP_ENABLE_CMD_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure bandgap enable rc=%d\n", rc);
+		return rc;
+	}
+
 	chip->disabled = disable;
 
 	return 0;
@@ -947,6 +973,22 @@
 		return rc;
 	}
 
+	/* Enable valley current comparator all the time */
+	rc = smb1355_masked_write(chip, ANA1_ENG_SREFGEN_CFG2_REG,
+		VALLEY_COMPARATOR_EN_BIT, VALLEY_COMPARATOR_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't enable valley current comparator rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Set LS_VALLEY threshold to 85% */
+	rc = smb1355_masked_write(chip, MISC_CUST_SDCDC_ILIMIT_CFG_REG,
+		LS_VALLEY_THRESH_PCT_BIT, LS_VALLEY_THRESH_PCT_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't set LS valley threshold to 85pc rc=%d\n", rc);
+		return rc;
+	}
+
 	rc = smb1355_tskin_sensor_config(chip);
 	if (rc < 0) {
 		pr_err("Couldn't configure tskin regs rc=%d\n", rc);
@@ -1084,6 +1126,7 @@
 		return rc;
 	}
 
+	smb1355_irqs[irq_index].irq = irq;
 	if (smb1355_irqs[irq_index].wake)
 		enable_irq_wake(irq);
 
@@ -1112,6 +1155,23 @@
 
 	return rc;
 }
+static int smb1355_irq_disable_callback(struct votable *votable, void *data,
+			int disable, const char *client)
+
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(smb1355_irqs); i++) {
+		if (smb1355_irqs[i].irq) {
+			if (disable)
+				disable_irq(smb1355_irqs[i].irq);
+			else
+				enable_irq(smb1355_irqs[i].irq);
+		}
+	}
+
+	return 0;
+}
 
 /*********
  * PROBE *
@@ -1187,6 +1247,15 @@
 		goto cleanup;
 	}
 
+	chip->irq_disable_votable = create_votable("SMB1355_IRQ_DISABLE",
+			VOTE_SET_ANY, smb1355_irq_disable_callback, chip);
+	if (IS_ERR(chip->irq_disable_votable)) {
+		rc = PTR_ERR(chip->irq_disable_votable);
+		goto cleanup;
+	}
+	/* keep IRQ's disabled until parallel is enabled */
+	vote(chip->irq_disable_votable, PARALLEL_ENABLE_VOTER, true, 0);
+
 	pr_info("%s probed successfully pl_mode=%s batfet_mode=%s\n",
 		chip->name,
 		IS_USBIN(chip->dt.pl_mode) ? "USBIN-USBIN" : "USBMID-USBMID",
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index e7d13ae..8e367c5 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -350,6 +350,16 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called pwm-rcar.
 
+config PWM_QTI_LPG
+	tristate "Qualcomm Technologies, Inc. LPG driver"
+	depends on  MFD_SPMI_PMIC && OF
+	help
+	  This driver supports the LPG (Light Pulse Generator) module found in
+	  Qualcomm Technologies, Inc. PMIC chips. Each LPG channel can be
+	  configured to operate in PWM mode to output a fixed amplitude with
+	  variable duty cycle or in LUT (Look up table) mode to output PWM
+	  signal with a modulated amplitude.
+
 config PWM_RENESAS_TPU
 	tristate "Renesas TPU PWM support"
 	depends on ARCH_RENESAS || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 24c1baf..9453eb0 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -33,6 +33,7 @@
 obj-$(CONFIG_PWM_PXA)		+= pwm-pxa.o
 obj-$(CONFIG_PWM_QPNP)		+= pwm-qpnp.o
 obj-$(CONFIG_PWM_RCAR)		+= pwm-rcar.o
+obj-$(CONFIG_PWM_QTI_LPG)	+= pwm-qti-lpg.o
 obj-$(CONFIG_PWM_RENESAS_TPU)	+= pwm-renesas-tpu.o
 obj-$(CONFIG_PWM_ROCKCHIP)	+= pwm-rockchip.o
 obj-$(CONFIG_PWM_SAMSUNG)	+= pwm-samsung.o
diff --git a/drivers/pwm/pwm-qti-lpg.c b/drivers/pwm/pwm-qti-lpg.c
new file mode 100644
index 0000000..328f4b6
--- /dev/null
+++ b/drivers/pwm/pwm-qti-lpg.c
@@ -0,0 +1,570 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#define REG_SIZE_PER_LPG	0x100
+
+#define REG_LPG_PWM_SIZE_CLK		0x41
+#define REG_LPG_PWM_FREQ_PREDIV_CLK	0x42
+#define REG_LPG_PWM_TYPE_CONFIG		0x43
+#define REG_LPG_PWM_VALUE_LSB		0x44
+#define REG_LPG_PWM_VALUE_MSB		0x45
+#define REG_LPG_ENABLE_CONTROL		0x46
+#define REG_LPG_PWM_SYNC		0x47
+
+/* REG_LPG_PWM_SIZE_CLK */
+#define LPG_PWM_SIZE_MASK		BIT(4)
+#define LPG_PWM_SIZE_SHIFT		4
+#define LPG_PWM_CLK_FREQ_SEL_MASK	GENMASK(1, 0)
+
+/* REG_LPG_PWM_FREQ_PREDIV_CLK */
+#define LPG_PWM_FREQ_PREDIV_MASK	GENMASK(6, 5)
+#define LPG_PWM_FREQ_PREDIV_SHIFT	5
+#define LPG_PWM_FREQ_EXPONENT_MASK	GENMASK(2, 0)
+
+/* REG_LPG_PWM_TYPE_CONFIG */
+#define LPG_PWM_EN_GLITCH_REMOVAL_MASK	BIT(5)
+
+/* REG_LPG_PWM_VALUE_LSB */
+#define LPG_PWM_VALUE_LSB_MASK		GENMASK(7, 0)
+
+/* REG_LPG_PWM_VALUE_MSB */
+#define LPG_PWM_VALUE_MSB_MASK		BIT(0)
+
+/* REG_LPG_ENABLE_CONTROL */
+#define LPG_EN_LPG_OUT_BIT		BIT(7)
+#define LPG_PWM_SRC_SELECT_MASK		BIT(2)
+#define LPG_PWM_SRC_SELECT_SHIFT	2
+#define LPG_EN_RAMP_GEN_MASK		BIT(1)
+#define LPG_EN_RAMP_GEN_SHIFT		1
+
+/* REG_LPG_PWM_SYNC */
+#define LPG_PWM_VALUE_SYNC		BIT(0)
+
+#define NUM_PWM_SIZE			2
+#define NUM_PWM_CLK			3
+#define NUM_CLK_PREDIV			4
+#define NUM_PWM_EXP			8
+
+enum {
+	LUT_PATTERN = 0,
+	PWM_OUTPUT,
+};
+
+static const int pwm_size[NUM_PWM_SIZE] = {6, 9};
+static const int clk_freq_hz[NUM_PWM_CLK] = {1024, 32768, 19200000};
+static const int clk_prediv[NUM_CLK_PREDIV] = {1, 3, 5, 6};
+static const int pwm_exponent[NUM_PWM_EXP] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+struct lpg_pwm_config {
+	u32	pwm_size;
+	u32	pwm_clk;
+	u32	prediv;
+	u32	clk_exp;
+	u16	pwm_value;
+	u32	best_period_ns;
+};
+
+struct qpnp_lpg_channel {
+	struct qpnp_lpg_chip		*chip;
+	struct lpg_pwm_config		pwm_config;
+	u32				lpg_idx;
+	u32				reg_base;
+	u8				src_sel;
+	int				current_period_ns;
+	int				current_duty_ns;
+};
+
+struct qpnp_lpg_chip {
+	struct pwm_chip		pwm_chip;
+	struct regmap		*regmap;
+	struct device		*dev;
+	struct qpnp_lpg_channel	*lpgs;
+	struct mutex		bus_lock;
+	u32			num_lpgs;
+};
+
+static int qpnp_lpg_write(struct qpnp_lpg_channel *lpg, u16 addr, u8 val)
+{
+	int rc;
+
+	mutex_lock(&lpg->chip->bus_lock);
+	rc = regmap_write(lpg->chip->regmap, lpg->reg_base + addr, val);
+	if (rc < 0)
+		dev_err(lpg->chip->dev, "Write addr 0x%x with value %d failed, rc=%d\n",
+				lpg->reg_base + addr, val, rc);
+	mutex_unlock(&lpg->chip->bus_lock);
+
+	return rc;
+}
+
+static int qpnp_lpg_masked_write(struct qpnp_lpg_channel *lpg,
+				u16 addr, u8 mask, u8 val)
+{
+	int rc;
+
+	mutex_lock(&lpg->chip->bus_lock);
+	rc = regmap_update_bits(lpg->chip->regmap, lpg->reg_base + addr,
+							mask, val);
+	if (rc < 0)
+		dev_err(lpg->chip->dev, "Update addr 0x%x to val 0x%x with mask 0x%x failed, rc=%d\n",
+				lpg->reg_base + addr, val, mask, rc);
+	mutex_unlock(&lpg->chip->bus_lock);
+
+	return rc;
+}
+
+static struct qpnp_lpg_channel *pwm_dev_to_qpnp_lpg(struct pwm_chip *pwm_chip,
+				struct pwm_device *pwm) {
+
+	struct qpnp_lpg_chip *chip = container_of(pwm_chip,
+			struct qpnp_lpg_chip, pwm_chip);
+	u32 hw_idx = pwm->hwpwm;
+
+	if (hw_idx >= chip->num_lpgs) {
+		dev_err(chip->dev, "hw index %d out of range [0-%d]\n",
+				hw_idx, chip->num_lpgs - 1);
+		return NULL;
+	}
+
+	return &chip->lpgs[hw_idx];
+}
+
+static int __find_index_in_array(int member, const int array[], int length)
+{
+	int i;
+
+	for (i = 0; i < length; i++) {
+		if (member == array[i])
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+static int qpnp_lpg_set_pwm_config(struct qpnp_lpg_channel *lpg)
+{
+	int rc;
+	u8 val, mask;
+	int pwm_size_idx, pwm_clk_idx, prediv_idx, clk_exp_idx;
+
+	pwm_size_idx = __find_index_in_array(lpg->pwm_config.pwm_size,
+			pwm_size, ARRAY_SIZE(pwm_size));
+	pwm_clk_idx = __find_index_in_array(lpg->pwm_config.pwm_clk,
+			clk_freq_hz, ARRAY_SIZE(clk_freq_hz));
+	prediv_idx = __find_index_in_array(lpg->pwm_config.prediv,
+			clk_prediv, ARRAY_SIZE(clk_prediv));
+	clk_exp_idx = __find_index_in_array(lpg->pwm_config.clk_exp,
+			pwm_exponent, ARRAY_SIZE(pwm_exponent));
+
+	if (pwm_size_idx < 0 || pwm_clk_idx < 0
+			|| prediv_idx < 0 || clk_exp_idx < 0)
+		return -EINVAL;
+
+	/* pwm_clk_idx is 1 bit lower than the register value */
+	pwm_clk_idx += 1;
+	val = pwm_size_idx << LPG_PWM_SIZE_SHIFT | pwm_clk_idx;
+	mask = LPG_PWM_SIZE_MASK | LPG_PWM_CLK_FREQ_SEL_MASK;
+	rc = qpnp_lpg_masked_write(lpg, REG_LPG_PWM_SIZE_CLK, mask, val);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write LPG_PWM_SIZE_CLK failed, rc=%d\n",
+							rc);
+		return rc;
+	}
+
+	val = prediv_idx << LPG_PWM_FREQ_PREDIV_SHIFT | clk_exp_idx;
+	mask = LPG_PWM_FREQ_PREDIV_MASK | LPG_PWM_FREQ_EXPONENT_MASK;
+	rc = qpnp_lpg_masked_write(lpg, REG_LPG_PWM_FREQ_PREDIV_CLK, mask, val);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write LPG_PWM_FREQ_PREDIV_CLK failed, rc=%d\n",
+							rc);
+		return rc;
+	}
+
+	val = lpg->pwm_config.pwm_value & LPG_PWM_VALUE_LSB_MASK;
+	rc = qpnp_lpg_write(lpg, REG_LPG_PWM_VALUE_LSB, val);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write LPG_PWM_VALUE_LSB failed, rc=%d\n",
+							rc);
+		return rc;
+	}
+
+	val = lpg->pwm_config.pwm_value >> 8;
+	mask = LPG_PWM_VALUE_MSB_MASK;
+	rc = qpnp_lpg_masked_write(lpg, REG_LPG_PWM_VALUE_MSB, mask, val);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write LPG_PWM_VALUE_MSB failed, rc=%d\n",
+							rc);
+		return rc;
+	}
+
+	val = LPG_PWM_VALUE_SYNC;
+	rc = qpnp_lpg_write(lpg, REG_LPG_PWM_SYNC, val);
+	if (rc < 0) {
+		dev_err(lpg->chip->dev, "Write LPG_PWM_SYNC failed, rc=%d\n",
+							rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static void __qpnp_lpg_calc_pwm_period(int period_ns,
+			struct lpg_pwm_config *pwm_config)
+{
+	struct lpg_pwm_config configs[NUM_PWM_SIZE];
+	int i, j, m, n;
+	int tmp1, tmp2;
+	int clk_period_ns = 0, pwm_clk_period_ns;
+	int clk_delta_ns = INT_MAX, min_clk_delta_ns = INT_MAX;
+	int pwm_period_delta = INT_MAX, min_pwm_period_delta = INT_MAX;
+	int pwm_size_step;
+
+	/*
+	 *              (2^pwm_size) * (2^pwm_exp) * prediv * NSEC_PER_SEC
+	 * pwm_period = ---------------------------------------------------
+	 *                               clk_freq_hz
+	 *
+	 * Searching the closest settings for the requested PWM period.
+	 */
+	for (n = 0; n < ARRAY_SIZE(pwm_size); n++) {
+		pwm_clk_period_ns = period_ns >> pwm_size[n];
+		for (i = ARRAY_SIZE(clk_freq_hz) - 1; i >= 0; i--) {
+			for (j = 0; j < ARRAY_SIZE(clk_prediv); j++) {
+				for (m = 0; m < ARRAY_SIZE(pwm_exponent); m++) {
+					tmp1 = 1 << pwm_exponent[m];
+					tmp1 *= clk_prediv[j];
+					tmp2 = NSEC_PER_SEC / clk_freq_hz[i];
+
+					clk_period_ns = tmp1 * tmp2;
+
+					clk_delta_ns = abs(pwm_clk_period_ns
+						- clk_period_ns);
+					/*
+					 * Find the closest setting for
+					 * PWM frequency predivide value
+					 */
+					if (clk_delta_ns < min_clk_delta_ns) {
+						min_clk_delta_ns
+							= clk_delta_ns;
+						configs[n].pwm_clk
+							= clk_freq_hz[i];
+						configs[n].prediv
+							= clk_prediv[j];
+						configs[n].clk_exp
+							= pwm_exponent[m];
+						configs[n].pwm_size
+							= pwm_size[n];
+						configs[n].best_period_ns
+							= clk_period_ns;
+					}
+				}
+			}
+		}
+
+		configs[n].best_period_ns *= 1 << pwm_size[n];
+		/* Find the closest setting for PWM period */
+		if (min_clk_delta_ns < INT_MAX >> pwm_size[n])
+			pwm_period_delta = min_clk_delta_ns << pwm_size[n];
+		else
+			pwm_period_delta = INT_MAX;
+		if (pwm_period_delta < min_pwm_period_delta) {
+			min_pwm_period_delta = pwm_period_delta;
+			memcpy(pwm_config, &configs[n],
+					sizeof(struct lpg_pwm_config));
+		}
+	}
+
+	/* Larger PWM size can achieve better resolution for PWM duty */
+	for (n = ARRAY_SIZE(pwm_size) - 1; n > 0; n--) {
+		if (pwm_config->pwm_size >= pwm_size[n])
+			break;
+		pwm_size_step = pwm_size[n] - pwm_config->pwm_size;
+		if (pwm_config->clk_exp >= pwm_size_step) {
+			pwm_config->pwm_size = pwm_size[n];
+			pwm_config->clk_exp -= pwm_size_step;
+		}
+	}
+	pr_debug("PWM setting for period_ns %d: pwm_clk = %dHZ, prediv = %d, exponent = %d, pwm_size = %d\n",
+			period_ns, pwm_config->pwm_clk, pwm_config->prediv,
+			pwm_config->clk_exp, pwm_config->pwm_size);
+	pr_debug("Actual period: %dns\n", pwm_config->best_period_ns);
+}
+
+static void __qpnp_lpg_calc_pwm_duty(int period_ns, int duty_ns,
+			struct lpg_pwm_config *pwm_config)
+{
+	u16 pwm_value, max_pwm_value;
+
+	if ((1 << pwm_config->pwm_size) > (INT_MAX / duty_ns))
+		pwm_value = duty_ns / (period_ns >> pwm_config->pwm_size);
+	else
+		pwm_value = (duty_ns << pwm_config->pwm_size) / period_ns;
+
+	max_pwm_value = (1 << pwm_config->pwm_size) - 1;
+	if (pwm_value > max_pwm_value)
+		pwm_value = max_pwm_value;
+	pwm_config->pwm_value = pwm_value;
+}
+
+static int qpnp_lpg_pwm_config(struct pwm_chip *pwm_chip,
+		struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+	struct qpnp_lpg_channel *lpg;
+	int rc = 0;
+
+	lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
+	if (lpg == NULL) {
+		dev_err(pwm_chip->dev, "lpg not found\n");
+		return -ENODEV;
+	}
+
+	if (duty_ns > period_ns) {
+		dev_err(pwm_chip->dev, "Duty %dns is larger than period %dns\n",
+						duty_ns, period_ns);
+		return -EINVAL;
+	}
+
+	if (period_ns != lpg->current_period_ns)
+		__qpnp_lpg_calc_pwm_period(period_ns, &lpg->pwm_config);
+
+	if (period_ns != lpg->current_period_ns ||
+			duty_ns != lpg->current_duty_ns)
+		__qpnp_lpg_calc_pwm_duty(period_ns, duty_ns, &lpg->pwm_config);
+
+	rc = qpnp_lpg_set_pwm_config(lpg);
+	if (rc < 0)
+		dev_err(pwm_chip->dev, "Config PWM failed for channel %d, rc=%d\n",
+						lpg->lpg_idx, rc);
+
+	return rc;
+}
+
+static int qpnp_lpg_pwm_enable(struct pwm_chip *pwm_chip,
+				struct pwm_device *pwm)
+{
+	struct qpnp_lpg_channel *lpg;
+	int rc = 0;
+	u8 mask, val;
+
+	lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
+	if (lpg == NULL) {
+		dev_err(pwm_chip->dev, "lpg not found\n");
+		return -ENODEV;
+	}
+
+	mask = LPG_PWM_SRC_SELECT_MASK | LPG_EN_LPG_OUT_BIT;
+	val = lpg->src_sel << LPG_PWM_SRC_SELECT_SHIFT | LPG_EN_LPG_OUT_BIT;
+
+	rc = qpnp_lpg_masked_write(lpg, REG_LPG_ENABLE_CONTROL, mask, val);
+	if (rc < 0)
+		dev_err(pwm_chip->dev, "Enable PWM output failed for channel %d, rc=%d\n",
+						lpg->lpg_idx, rc);
+
+	return rc;
+}
+
+static void qpnp_lpg_pwm_disable(struct pwm_chip *pwm_chip,
+				struct pwm_device *pwm)
+{
+	struct qpnp_lpg_channel *lpg;
+	int rc;
+	u8 mask, val;
+
+	lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
+	if (lpg == NULL) {
+		dev_err(pwm_chip->dev, "lpg not found\n");
+		return;
+	}
+
+	mask = LPG_PWM_SRC_SELECT_MASK | LPG_EN_LPG_OUT_BIT;
+	val = lpg->src_sel << LPG_PWM_SRC_SELECT_SHIFT;
+
+	rc = qpnp_lpg_masked_write(lpg, REG_LPG_ENABLE_CONTROL, mask, val);
+	if (rc < 0)
+		dev_err(pwm_chip->dev, "Disable PWM output failed for channel %d, rc=%d\n",
+						lpg->lpg_idx, rc);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void qpnp_lpg_pwm_dbg_show(struct pwm_chip *pwm_chip, struct seq_file *s)
+{
+	struct qpnp_lpg_channel *lpg;
+	struct lpg_pwm_config *cfg;
+	struct pwm_device *pwm;
+	int i;
+
+	for (i = 0; i < pwm_chip->npwm; i++) {
+		pwm = &pwm_chip->pwms[i];
+
+		lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm);
+		if (lpg == NULL) {
+			dev_err(pwm_chip->dev, "lpg not found\n");
+			return;
+		}
+
+		if (test_bit(PWMF_REQUESTED, &pwm->flags)) {
+			seq_printf(s, "LPG %d is requested by %s\n",
+					lpg->lpg_idx + 1, pwm->label);
+		} else {
+			seq_printf(s, "LPG %d is free\n",
+					lpg->lpg_idx + 1);
+			continue;
+		}
+
+		if (pwm_is_enabled(pwm)) {
+			seq_puts(s, "  enabled\n");
+		} else {
+			seq_puts(s, "  disabled\n");
+			continue;
+		}
+
+		cfg = &lpg->pwm_config;
+		seq_printf(s, "     clk = %dHz\n", cfg->pwm_clk);
+		seq_printf(s, "     pwm_size = %d\n", cfg->pwm_size);
+		seq_printf(s, "     prediv = %d\n", cfg->prediv);
+		seq_printf(s, "     exponent = %d\n", cfg->clk_exp);
+		seq_printf(s, "     pwm_value = %d\n", cfg->pwm_value);
+		seq_printf(s, "  Requested period: %dns, best period = %dns\n",
+				pwm_get_period(pwm), cfg->best_period_ns);
+	}
+}
+#endif
+
+static const struct pwm_ops qpnp_lpg_pwm_ops = {
+	.config = qpnp_lpg_pwm_config,
+	.enable = qpnp_lpg_pwm_enable,
+	.disable = qpnp_lpg_pwm_disable,
+#ifdef CONFIG_DEBUG_FS
+	.dbg_show = qpnp_lpg_pwm_dbg_show,
+#endif
+	.owner = THIS_MODULE,
+};
+
+static int qpnp_lpg_parse_dt(struct qpnp_lpg_chip *chip)
+{
+	int rc = 0, i;
+	u64 base, length;
+	const __be32 *addr;
+
+	addr = of_get_address(chip->dev->of_node, 0, NULL, NULL);
+	if (!addr) {
+		dev_err(chip->dev, "Getting address failed\n");
+		return -EINVAL;
+	}
+	base = be32_to_cpu(addr[0]);
+	length = be32_to_cpu(addr[1]);
+
+	chip->num_lpgs = length / REG_SIZE_PER_LPG;
+	chip->lpgs = devm_kcalloc(chip->dev, chip->num_lpgs,
+			sizeof(*chip->lpgs), GFP_KERNEL);
+	if (!chip->lpgs)
+		return -ENOMEM;
+
+	for (i = 0; i < chip->num_lpgs; i++) {
+		chip->lpgs[i].chip = chip;
+		chip->lpgs[i].lpg_idx = i;
+		chip->lpgs[i].reg_base = base + i * REG_SIZE_PER_LPG;
+		chip->lpgs[i].src_sel = PWM_OUTPUT;
+	}
+
+	return rc;
+}
+
+static int qpnp_lpg_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct qpnp_lpg_chip *chip;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->dev = &pdev->dev;
+	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+	if (!chip->regmap) {
+		dev_err(chip->dev, "Getting regmap failed\n");
+		return -EINVAL;
+	}
+
+	rc = qpnp_lpg_parse_dt(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Devicetree properties parsing failed, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	dev_set_drvdata(chip->dev, chip);
+
+	mutex_init(&chip->bus_lock);
+	chip->pwm_chip.dev = chip->dev;
+	chip->pwm_chip.base = -1;
+	chip->pwm_chip.npwm = chip->num_lpgs;
+	chip->pwm_chip.ops = &qpnp_lpg_pwm_ops;
+
+	rc = pwmchip_add(&chip->pwm_chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Add pwmchip failed, rc=%d\n", rc);
+		mutex_destroy(&chip->bus_lock);
+	}
+
+	return rc;
+}
+
+static int qpnp_lpg_remove(struct platform_device *pdev)
+{
+	struct qpnp_lpg_chip *chip = dev_get_drvdata(&pdev->dev);
+	int rc = 0;
+
+	rc = pwmchip_remove(&chip->pwm_chip);
+	if (rc < 0)
+		dev_err(chip->dev, "Remove pwmchip failed, rc=%d\n", rc);
+
+	mutex_destroy(&chip->bus_lock);
+	dev_set_drvdata(chip->dev, NULL);
+
+	return rc;
+}
+
+static const struct of_device_id qpnp_lpg_of_match[] = {
+	{ .compatible = "qcom,pwm-lpg",},
+	{ },
+};
+
+static struct platform_driver qpnp_lpg_driver = {
+	.driver		= {
+		.name		= "qcom,pwm-lpg",
+		.of_match_table	= qpnp_lpg_of_match,
+	},
+	.probe		= qpnp_lpg_probe,
+	.remove		= qpnp_lpg_remove,
+};
+module_platform_driver(qpnp_lpg_driver);
+
+MODULE_DESCRIPTION("QTI LPG driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("pwm:pwm-lpg");
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 9013a58..f32fc70 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -964,7 +964,8 @@
 			   req->sgt.sgl, req->sgt.nents, dir);
 	if (nents == -EFAULT) {
 		rmcd_error("Failed to map SG list");
-		return -EFAULT;
+		ret = -EFAULT;
+		goto err_pg;
 	}
 
 	ret = do_dma_request(req, xfer, sync, nents);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index ee1b322..1bd67e6 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -888,6 +888,16 @@
 	  This driver provides support for the voltage regulators on the
 	  WM8994 CODEC.
 
+config REGULATOR_CPR
+	bool "RBCPR regulator driver for APC"
+	depends on OF
+	help
+	  Compile in RBCPR (RapidBridge Core Power Reduction) driver to support
+	  corner vote for APC power rail. The driver takes PTE process voltage
+	  suggestions in efuse as initial settings. It converts corner vote
+	  to voltage value before writing to a voltage regulator API, such as
+	  that provided by spm-regulator driver.
+
 config REGULATOR_CPR3
 	bool "CPR3 regulator core support"
 	help
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index b2bfba8..c75e399 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -116,6 +116,7 @@
 obj-$(CONFIG_REGULATOR_RPM_SMD) += rpm-smd-regulator.o
 obj-$(CONFIG_REGULATOR_SPM) += spm-regulator.o
 
+obj-$(CONFIG_REGULATOR_CPR) += cpr-regulator.o
 obj-$(CONFIG_REGULATOR_CPR3) += cpr3-regulator.o cpr3-util.o
 obj-$(CONFIG_REGULATOR_CPR3_HMSS) += cpr3-hmss-regulator.o
 obj-$(CONFIG_REGULATOR_CPR3_MMSS) += cpr3-mmss-regulator.o
diff --git a/drivers/regulator/cpr-regulator.c b/drivers/regulator/cpr-regulator.c
new file mode 100644
index 0000000..9c47e82
--- /dev/null
+++ b/drivers/regulator/cpr-regulator.c
@@ -0,0 +1,6408 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/sort.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/cpr-regulator.h>
+#include <linux/msm_thermal.h>
+#include <linux/msm_tsens.h>
+#include <soc/qcom/scm.h>
+
+/* Register Offsets for RB-CPR and Bit Definitions */
+
+/* RBCPR Version Register */
+#define REG_RBCPR_VERSION		0
+#define RBCPR_VER_2			0x02
+
+/* RBCPR Gate Count and Target Registers */
+#define REG_RBCPR_GCNT_TARGET(n)	(0x60 + 4 * n)
+
+#define RBCPR_GCNT_TARGET_GCNT_BITS	10
+#define RBCPR_GCNT_TARGET_GCNT_SHIFT	12
+#define RBCPR_GCNT_TARGET_GCNT_MASK	((1<<RBCPR_GCNT_TARGET_GCNT_BITS)-1)
+
+/* RBCPR Sensor Mask and Bypass Registers */
+#define REG_RBCPR_SENSOR_MASK0		0x20
+#define RBCPR_SENSOR_MASK0_SENSOR(n)	(~BIT(n))
+#define REG_RBCPR_SENSOR_BYPASS0	0x30
+
+/* RBCPR Timer Control */
+#define REG_RBCPR_TIMER_INTERVAL	0x44
+#define REG_RBIF_TIMER_ADJUST		0x4C
+
+#define RBIF_TIMER_ADJ_CONS_UP_BITS	4
+#define RBIF_TIMER_ADJ_CONS_UP_MASK	((1<<RBIF_TIMER_ADJ_CONS_UP_BITS)-1)
+#define RBIF_TIMER_ADJ_CONS_DOWN_BITS	4
+#define RBIF_TIMER_ADJ_CONS_DOWN_MASK	((1<<RBIF_TIMER_ADJ_CONS_DOWN_BITS)-1)
+#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT	4
+#define RBIF_TIMER_ADJ_CLAMP_INT_BITS	8
+#define RBIF_TIMER_ADJ_CLAMP_INT_MASK	((1<<RBIF_TIMER_ADJ_CLAMP_INT_BITS)-1)
+#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT	8
+
+/* RBCPR Config Register */
+#define REG_RBIF_LIMIT			0x48
+#define REG_RBCPR_STEP_QUOT		0x80
+#define REG_RBIF_SW_VLEVEL		0x94
+
+#define RBIF_LIMIT_CEILING_BITS		6
+#define RBIF_LIMIT_CEILING_MASK		((1<<RBIF_LIMIT_CEILING_BITS)-1)
+#define RBIF_LIMIT_CEILING_SHIFT	6
+#define RBIF_LIMIT_FLOOR_BITS		6
+#define RBIF_LIMIT_FLOOR_MASK		((1<<RBIF_LIMIT_FLOOR_BITS)-1)
+
+#define RBIF_LIMIT_CEILING_DEFAULT	RBIF_LIMIT_CEILING_MASK
+#define RBIF_LIMIT_FLOOR_DEFAULT	0
+#define RBIF_SW_VLEVEL_DEFAULT		0x20
+
+#define RBCPR_STEP_QUOT_STEPQUOT_BITS	8
+#define RBCPR_STEP_QUOT_STEPQUOT_MASK	((1<<RBCPR_STEP_QUOT_STEPQUOT_BITS)-1)
+#define RBCPR_STEP_QUOT_IDLE_CLK_BITS	4
+#define RBCPR_STEP_QUOT_IDLE_CLK_MASK	((1<<RBCPR_STEP_QUOT_IDLE_CLK_BITS)-1)
+#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT	8
+
+/* RBCPR Control Register */
+#define REG_RBCPR_CTL			0x90
+
+#define RBCPR_CTL_LOOP_EN			BIT(0)
+#define RBCPR_CTL_TIMER_EN			BIT(3)
+#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN		BIT(5)
+#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN	BIT(6)
+#define RBCPR_CTL_COUNT_MODE			BIT(10)
+#define RBCPR_CTL_UP_THRESHOLD_BITS	4
+#define RBCPR_CTL_UP_THRESHOLD_MASK	((1<<RBCPR_CTL_UP_THRESHOLD_BITS)-1)
+#define RBCPR_CTL_UP_THRESHOLD_SHIFT	24
+#define RBCPR_CTL_DN_THRESHOLD_BITS	4
+#define RBCPR_CTL_DN_THRESHOLD_MASK	((1<<RBCPR_CTL_DN_THRESHOLD_BITS)-1)
+#define RBCPR_CTL_DN_THRESHOLD_SHIFT	28
+
+/* RBCPR Ack/Nack Response */
+#define REG_RBIF_CONT_ACK_CMD		0x98
+#define REG_RBIF_CONT_NACK_CMD		0x9C
+
+/* RBCPR Result status Registers */
+#define REG_RBCPR_RESULT_0		0xA0
+#define REG_RBCPR_RESULT_1		0xA4
+
+#define RBCPR_RESULT_1_SEL_FAST_BITS	3
+#define RBCPR_RESULT_1_SEL_FAST(val)	(val & \
+					((1<<RBCPR_RESULT_1_SEL_FAST_BITS) - 1))
+
+#define RBCPR_RESULT0_BUSY_SHIFT	19
+#define RBCPR_RESULT0_BUSY_MASK		BIT(RBCPR_RESULT0_BUSY_SHIFT)
+#define RBCPR_RESULT0_ERROR_LT0_SHIFT	18
+#define RBCPR_RESULT0_ERROR_SHIFT	6
+#define RBCPR_RESULT0_ERROR_BITS	12
+#define RBCPR_RESULT0_ERROR_MASK	((1<<RBCPR_RESULT0_ERROR_BITS)-1)
+#define RBCPR_RESULT0_ERROR_STEPS_SHIFT	2
+#define RBCPR_RESULT0_ERROR_STEPS_BITS	4
+#define RBCPR_RESULT0_ERROR_STEPS_MASK	((1<<RBCPR_RESULT0_ERROR_STEPS_BITS)-1)
+#define RBCPR_RESULT0_STEP_UP_SHIFT	1
+
+/* RBCPR Interrupt Control Register */
+#define REG_RBIF_IRQ_EN(n)		(0x100 + 4 * n)
+#define REG_RBIF_IRQ_CLEAR		0x110
+#define REG_RBIF_IRQ_STATUS		0x114
+
+#define CPR_INT_DONE		BIT(0)
+#define CPR_INT_MIN		BIT(1)
+#define CPR_INT_DOWN		BIT(2)
+#define CPR_INT_MID		BIT(3)
+#define CPR_INT_UP		BIT(4)
+#define CPR_INT_MAX		BIT(5)
+#define CPR_INT_CLAMP		BIT(6)
+#define CPR_INT_ALL	(CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
+			CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
+#define CPR_INT_DEFAULT	(CPR_INT_UP | CPR_INT_DOWN)
+
+#define CPR_NUM_RING_OSC	8
+
+/* RBCPR Debug Resgister */
+#define REG_RBCPR_DEBUG1		0x120
+#define RBCPR_DEBUG1_QUOT_FAST_BITS	12
+#define RBCPR_DEBUG1_QUOT_SLOW_BITS	12
+#define RBCPR_DEBUG1_QUOT_SLOW_SHIFT	12
+
+#define RBCPR_DEBUG1_QUOT_FAST(val)	(val & \
+					((1<<RBCPR_DEBUG1_QUOT_FAST_BITS)-1))
+
+#define RBCPR_DEBUG1_QUOT_SLOW(val)	((val>>RBCPR_DEBUG1_QUOT_SLOW_SHIFT) & \
+					((1<<RBCPR_DEBUG1_QUOT_SLOW_BITS)-1))
+
+/* RBCPR Aging Resgister */
+#define REG_RBCPR_HTOL_AGE		0x160
+#define RBCPR_HTOL_AGE_PAGE		BIT(1)
+#define RBCPR_AGE_DATA_STATUS		BIT(2)
+
+/* RBCPR Clock Control Register */
+#define RBCPR_CLK_SEL_MASK	BIT(0)
+#define RBCPR_CLK_SEL_19P2_MHZ	0
+#define RBCPR_CLK_SEL_AHB_CLK	BIT(0)
+
+/* CPR eFuse parameters */
+#define CPR_FUSE_TARGET_QUOT_BITS	12
+#define CPR_FUSE_TARGET_QUOT_BITS_MASK	((1<<CPR_FUSE_TARGET_QUOT_BITS)-1)
+#define CPR_FUSE_RO_SEL_BITS		3
+#define CPR_FUSE_RO_SEL_BITS_MASK	((1<<CPR_FUSE_RO_SEL_BITS)-1)
+
+#define CPR_FUSE_MIN_QUOT_DIFF		50
+
+#define BYTES_PER_FUSE_ROW		8
+
+#define SPEED_BIN_NONE			UINT_MAX
+
+#define FUSE_REVISION_UNKNOWN		(-1)
+#define FUSE_MAP_NO_MATCH		(-1)
+#define FUSE_PARAM_MATCH_ANY		0xFFFFFFFF
+
+#define FLAGS_IGNORE_1ST_IRQ_STATUS	BIT(0)
+#define FLAGS_SET_MIN_VOLTAGE		BIT(1)
+#define FLAGS_UPLIFT_QUOT_VOLT		BIT(2)
+
+/*
+ * The number of individual aging measurements to perform which are then
+ * averaged together in order to determine the final aging adjustment value.
+ */
+#define CPR_AGING_MEASUREMENT_ITERATIONS	16
+
+/*
+ * Aging measurements for the aged and unaged ring oscillators take place a few
+ * microseconds apart.  If the vdd-supply voltage fluctuates between the two
+ * measurements, then the difference between them will be incorrect.  The
+ * difference could end up too high or too low.  This constant defines the
+ * number of lowest and highest measurements to ignore when averaging.
+ */
+#define CPR_AGING_MEASUREMENT_FILTER	3
+
+#define CPR_REGULATOR_DRIVER_NAME	"qcom,cpr-regulator"
+
+/**
+ * enum vdd_mx_vmin_method - Method to determine vmin for vdd-mx
+ * %VDD_MX_VMIN_APC:			Equal to APC voltage
+ * %VDD_MX_VMIN_APC_CORNER_CEILING:	Equal to PVS corner ceiling voltage
+ * %VDD_MX_VMIN_APC_SLOW_CORNER_CEILING:
+ *					Equal to slow speed corner ceiling
+ * %VDD_MX_VMIN_MX_VMAX:		Equal to specified vdd-mx-vmax voltage
+ * %VDD_MX_VMIN_APC_CORNER_MAP:		Equal to the APC corner mapped MX
+ *					voltage
+ */
+enum vdd_mx_vmin_method {
+	VDD_MX_VMIN_APC,
+	VDD_MX_VMIN_APC_CORNER_CEILING,
+	VDD_MX_VMIN_APC_SLOW_CORNER_CEILING,
+	VDD_MX_VMIN_MX_VMAX,
+	VDD_MX_VMIN_APC_FUSE_CORNER_MAP,
+	VDD_MX_VMIN_APC_CORNER_MAP,
+};
+
+#define CPR_CORNER_MIN		1
+#define CPR_FUSE_CORNER_MIN	1
+/*
+ * This is an arbitrary upper limit which is used in a sanity check in order to
+ * avoid excessive memory allocation due to bad device tree data.
+ */
+#define CPR_FUSE_CORNER_LIMIT	100
+
+struct quot_adjust_info {
+	int speed_bin;
+	int virtual_corner;
+	int quot_adjust;
+};
+
+struct cpr_quot_scale {
+	u32 offset;
+	u32 multiplier;
+};
+
+struct cpr_aging_sensor_info {
+	u32 sensor_id;
+	int initial_quot_diff;
+	int current_quot_diff;
+};
+
+struct cpr_aging_info {
+	struct cpr_aging_sensor_info *sensor_info;
+	int	num_aging_sensors;
+	int	aging_corner;
+	u32	aging_ro_kv;
+	u32	*aging_derate;
+	u32	aging_sensor_bypass;
+	u32	max_aging_margin;
+	u32	aging_ref_voltage;
+	u32	cpr_ro_kv[CPR_NUM_RING_OSC];
+	int	*voltage_adjust;
+
+	bool	cpr_aging_error;
+	bool	cpr_aging_done;
+};
+
+static const char * const vdd_apc_name[] =	{"vdd-apc-optional-prim",
+						"vdd-apc-optional-sec",
+						"vdd-apc"};
+
+enum voltage_change_dir {
+	NO_CHANGE,
+	DOWN,
+	UP,
+};
+
+struct cpr_regulator {
+	struct list_head		list;
+	struct regulator_desc		rdesc;
+	struct regulator_dev		*rdev;
+	bool				vreg_enabled;
+	int				corner;
+	int				ceiling_max;
+	struct dentry			*debugfs;
+	struct device			*dev;
+
+	/* eFuse parameters */
+	phys_addr_t	efuse_addr;
+	void __iomem	*efuse_base;
+	u64		*remapped_row;
+	u32		remapped_row_base;
+	int		num_remapped_rows;
+
+	/* Process voltage parameters */
+	u32		*pvs_corner_v;
+	/* Process voltage variables */
+	u32		pvs_bin;
+	u32		speed_bin;
+	u32		pvs_version;
+
+	/* APC voltage regulator */
+	struct regulator	*vdd_apc;
+
+	/* Dependency parameters */
+	struct regulator	*vdd_mx;
+	int			vdd_mx_vmax;
+	int			vdd_mx_vmin_method;
+	int			vdd_mx_vmin;
+	int			*vdd_mx_corner_map;
+
+	struct regulator	*rpm_apc_vreg;
+	int			*rpm_apc_corner_map;
+
+	/* mem-acc regulator */
+	struct regulator	*mem_acc_vreg;
+
+	/* thermal monitor */
+	int			tsens_id;
+	int			cpr_disable_temp_threshold;
+	int			cpr_enable_temp_threshold;
+	bool			cpr_disable_on_temperature;
+	bool			cpr_thermal_disable;
+	struct threshold_info	tsens_threshold_config;
+
+	/* CPR parameters */
+	u32		num_fuse_corners;
+	u64		cpr_fuse_bits;
+	bool		cpr_fuse_disable;
+	bool		cpr_fuse_local;
+	bool		cpr_fuse_redundant;
+	int		cpr_fuse_revision;
+	int		cpr_fuse_map_count;
+	int		cpr_fuse_map_match;
+	int		*cpr_fuse_target_quot;
+	int		*cpr_fuse_ro_sel;
+	int		*fuse_quot_offset;
+	int		gcnt;
+
+	unsigned int	cpr_irq;
+	void __iomem	*rbcpr_base;
+	phys_addr_t	rbcpr_clk_addr;
+	struct mutex	cpr_mutex;
+
+	int		*cpr_max_ceiling;
+	int		*ceiling_volt;
+	int		*floor_volt;
+	int		*fuse_ceiling_volt;
+	int		*fuse_floor_volt;
+	int		*last_volt;
+	int		*open_loop_volt;
+	int		step_volt;
+
+	int		*save_ctl;
+	int		*save_irq;
+
+	int		*vsens_corner_map;
+	/* vsens status */
+	bool		vsens_enabled;
+	/* vsens regulators */
+	struct regulator	*vdd_vsens_corner;
+	struct regulator	*vdd_vsens_voltage;
+
+	/* Config parameters */
+	bool		enable;
+	u32		ref_clk_khz;
+	u32		timer_delay_us;
+	u32		timer_cons_up;
+	u32		timer_cons_down;
+	u32		irq_line;
+	u32		*step_quotient;
+	u32		up_threshold;
+	u32		down_threshold;
+	u32		idle_clocks;
+	u32		gcnt_time_us;
+	u32		clamp_timer_interval;
+	u32		vdd_apc_step_up_limit;
+	u32		vdd_apc_step_down_limit;
+	u32		flags;
+	int		*corner_map;
+	u32		num_corners;
+	int		*quot_adjust;
+	int		*mem_acc_corner_map;
+
+	int			num_adj_cpus;
+	int			online_cpus;
+	int			*adj_cpus;
+	int			**adj_cpus_save_ctl;
+	int			**adj_cpus_save_irq;
+	int			**adj_cpus_last_volt;
+	int			**adj_cpus_quot_adjust;
+	int			**adj_cpus_open_loop_volt;
+	bool			adj_cpus_open_loop_volt_as_ceiling;
+	struct notifier_block	cpu_notifier;
+	cpumask_t		cpu_mask;
+	bool			cpr_disabled_in_pc;
+	struct notifier_block	pm_notifier;
+
+	bool		is_cpr_suspended;
+	bool		skip_voltage_change_during_suspend;
+
+	struct cpr_aging_info	*aging_info;
+
+	struct notifier_block	panic_notifier;
+};
+
+#define CPR_DEBUG_MASK_IRQ	BIT(0)
+#define CPR_DEBUG_MASK_API	BIT(1)
+
+static int cpr_debug_enable;
+#if defined(CONFIG_DEBUG_FS)
+static struct dentry *cpr_debugfs_base;
+#endif
+
+static DEFINE_MUTEX(cpr_regulator_list_mutex);
+static LIST_HEAD(cpr_regulator_list);
+
+module_param_named(debug_enable, cpr_debug_enable, int, S_IRUGO | S_IWUSR);
+#define cpr_debug(cpr_vreg, message, ...) \
+	do { \
+		if (cpr_debug_enable & CPR_DEBUG_MASK_API) \
+			pr_info("%s: " message, (cpr_vreg)->rdesc.name, \
+				##__VA_ARGS__); \
+	} while (0)
+#define cpr_debug_irq(cpr_vreg, message, ...) \
+	do { \
+		if (cpr_debug_enable & CPR_DEBUG_MASK_IRQ) \
+			pr_info("%s: " message, (cpr_vreg)->rdesc.name, \
+				##__VA_ARGS__); \
+		else \
+			pr_debug("%s: " message, (cpr_vreg)->rdesc.name, \
+				##__VA_ARGS__); \
+	} while (0)
+#define cpr_info(cpr_vreg, message, ...) \
+	pr_info("%s: " message, (cpr_vreg)->rdesc.name, ##__VA_ARGS__)
+#define cpr_err(cpr_vreg, message, ...) \
+	pr_err("%s: " message, (cpr_vreg)->rdesc.name, ##__VA_ARGS__)
+
+static u64 cpr_read_remapped_efuse_row(struct cpr_regulator *cpr_vreg,
+					u32 row_num)
+{
+	if (row_num - cpr_vreg->remapped_row_base
+			>= cpr_vreg->num_remapped_rows) {
+		cpr_err(cpr_vreg, "invalid row=%u, max remapped row=%u\n",
+			row_num, cpr_vreg->remapped_row_base
+					+ cpr_vreg->num_remapped_rows - 1);
+		return 0;
+	}
+
+	return cpr_vreg->remapped_row[row_num - cpr_vreg->remapped_row_base];
+}
+
+static u64 cpr_read_efuse_row(struct cpr_regulator *cpr_vreg, u32 row_num,
+				bool use_tz_api)
+{
+	int rc;
+	u64 efuse_bits;
+	struct scm_desc desc = {0};
+	struct cpr_read_req {
+		u32 row_address;
+		int addr_type;
+	} req;
+
+	struct cpr_read_rsp {
+		u32 row_data[2];
+		u32 status;
+	} rsp;
+
+	if (cpr_vreg->remapped_row && row_num >= cpr_vreg->remapped_row_base)
+		return cpr_read_remapped_efuse_row(cpr_vreg, row_num);
+
+	if (!use_tz_api) {
+		efuse_bits = readq_relaxed(cpr_vreg->efuse_base
+			+ row_num * BYTES_PER_FUSE_ROW);
+		return efuse_bits;
+	}
+
+	desc.args[0] = req.row_address = cpr_vreg->efuse_addr +
+					row_num * BYTES_PER_FUSE_ROW;
+	desc.args[1] = req.addr_type = 0;
+	desc.arginfo = SCM_ARGS(2);
+	efuse_bits = 0;
+
+	if (!is_scm_armv8()) {
+		rc = scm_call(SCM_SVC_FUSE, SCM_FUSE_READ,
+			&req, sizeof(req), &rsp, sizeof(rsp));
+	} else {
+		rc = scm_call2(SCM_SIP_FNID(SCM_SVC_FUSE, SCM_FUSE_READ),
+				&desc);
+		rsp.row_data[0] = desc.ret[0];
+		rsp.row_data[1] = desc.ret[1];
+		rsp.status = desc.ret[2];
+	}
+
+	if (rc) {
+		cpr_err(cpr_vreg, "read row %d failed, err code = %d",
+			row_num, rc);
+	} else {
+		efuse_bits = ((u64)(rsp.row_data[1]) << 32) +
+				(u64)rsp.row_data[0];
+	}
+
+	return efuse_bits;
+}
+
+/**
+ * cpr_read_efuse_param() - read a parameter from one or two eFuse rows
+ * @cpr_vreg:	Pointer to cpr_regulator struct for this regulator.
+ * @row_start:	Fuse row number to start reading from.
+ * @bit_start:	The LSB of the parameter to read from the fuse.
+ * @bit_len:	The length of the parameter in bits.
+ * @use_tz_api:	Flag to indicate if an SCM call should be used to read the fuse.
+ *
+ * This function reads a parameter of specified offset and bit size out of one
+ * or two consecutive eFuse rows.  This allows for the reading of parameters
+ * that happen to be split between two eFuse rows.
+ *
+ * Returns the fuse parameter on success or 0 on failure.
+ */
+static u64 cpr_read_efuse_param(struct cpr_regulator *cpr_vreg, int row_start,
+		int bit_start, int bit_len, bool use_tz_api)
+{
+	u64 fuse[2];
+	u64 param = 0;
+	int bits_first, bits_second;
+
+	if (bit_start < 0) {
+		cpr_err(cpr_vreg, "Invalid LSB = %d specified\n", bit_start);
+		return 0;
+	}
+
+	if (bit_len < 0 || bit_len > 64) {
+		cpr_err(cpr_vreg, "Invalid bit length = %d specified\n",
+			bit_len);
+		return 0;
+	}
+
+	/* Allow bit indexing to start beyond the end of the start row. */
+	if (bit_start >= 64) {
+		row_start += bit_start >> 6; /* equivalent to bit_start / 64 */
+		bit_start &= 0x3F;
+	}
+
+	fuse[0] = cpr_read_efuse_row(cpr_vreg, row_start, use_tz_api);
+
+	if (bit_start == 0 && bit_len == 64) {
+		param = fuse[0];
+	} else if (bit_start + bit_len <= 64) {
+		param = (fuse[0] >> bit_start) & ((1ULL << bit_len) - 1);
+	} else {
+		fuse[1] = cpr_read_efuse_row(cpr_vreg, row_start + 1,
+						use_tz_api);
+		bits_first = 64 - bit_start;
+		bits_second = bit_len - bits_first;
+		param = (fuse[0] >> bit_start) & ((1ULL << bits_first) - 1);
+		param |= (fuse[1] & ((1ULL << bits_second) - 1)) << bits_first;
+	}
+
+	return param;
+}
+
+static bool cpr_is_allowed(struct cpr_regulator *cpr_vreg)
+{
+	if (cpr_vreg->cpr_fuse_disable || !cpr_vreg->enable ||
+				cpr_vreg->cpr_thermal_disable)
+		return false;
+	else
+		return true;
+}
+
+static void cpr_write(struct cpr_regulator *cpr_vreg, u32 offset, u32 value)
+{
+	writel_relaxed(value, cpr_vreg->rbcpr_base + offset);
+}
+
+static u32 cpr_read(struct cpr_regulator *cpr_vreg, u32 offset)
+{
+	return readl_relaxed(cpr_vreg->rbcpr_base + offset);
+}
+
+static void cpr_masked_write(struct cpr_regulator *cpr_vreg, u32 offset,
+			     u32 mask, u32 value)
+{
+	u32 reg_val;
+
+	reg_val = readl_relaxed(cpr_vreg->rbcpr_base + offset);
+	reg_val &= ~mask;
+	reg_val |= value & mask;
+	writel_relaxed(reg_val, cpr_vreg->rbcpr_base + offset);
+}
+
+static void cpr_irq_clr(struct cpr_regulator *cpr_vreg)
+{
+	cpr_write(cpr_vreg, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
+}
+
+static void cpr_irq_clr_nack(struct cpr_regulator *cpr_vreg)
+{
+	cpr_irq_clr(cpr_vreg);
+	cpr_write(cpr_vreg, REG_RBIF_CONT_NACK_CMD, 1);
+}
+
+static void cpr_irq_clr_ack(struct cpr_regulator *cpr_vreg)
+{
+	cpr_irq_clr(cpr_vreg);
+	cpr_write(cpr_vreg, REG_RBIF_CONT_ACK_CMD, 1);
+}
+
+static void cpr_irq_set(struct cpr_regulator *cpr_vreg, u32 int_bits)
+{
+	cpr_write(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line), int_bits);
+}
+
+static void cpr_ctl_modify(struct cpr_regulator *cpr_vreg, u32 mask, u32 value)
+{
+	cpr_masked_write(cpr_vreg, REG_RBCPR_CTL, mask, value);
+}
+
+static void cpr_ctl_enable(struct cpr_regulator *cpr_vreg, int corner)
+{
+	u32 val;
+
+	if (cpr_vreg->is_cpr_suspended)
+		return;
+
+	/* Program Consecutive Up & Down */
+	val = ((cpr_vreg->timer_cons_down & RBIF_TIMER_ADJ_CONS_DOWN_MASK)
+			<< RBIF_TIMER_ADJ_CONS_DOWN_SHIFT) |
+		(cpr_vreg->timer_cons_up & RBIF_TIMER_ADJ_CONS_UP_MASK);
+	cpr_masked_write(cpr_vreg, REG_RBIF_TIMER_ADJUST,
+			RBIF_TIMER_ADJ_CONS_UP_MASK |
+			RBIF_TIMER_ADJ_CONS_DOWN_MASK, val);
+	cpr_masked_write(cpr_vreg, REG_RBCPR_CTL,
+			RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+			RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
+			cpr_vreg->save_ctl[corner]);
+	cpr_irq_set(cpr_vreg, cpr_vreg->save_irq[corner]);
+
+	if (cpr_is_allowed(cpr_vreg) && cpr_vreg->vreg_enabled &&
+	    (cpr_vreg->ceiling_volt[corner] >
+		cpr_vreg->floor_volt[corner]))
+		val = RBCPR_CTL_LOOP_EN;
+	else
+		val = 0;
+	cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, val);
+}
+
+static void cpr_ctl_disable(struct cpr_regulator *cpr_vreg)
+{
+	if (cpr_vreg->is_cpr_suspended)
+		return;
+
+	cpr_irq_set(cpr_vreg, 0);
+	cpr_ctl_modify(cpr_vreg, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+			RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
+	cpr_masked_write(cpr_vreg, REG_RBIF_TIMER_ADJUST,
+			RBIF_TIMER_ADJ_CONS_UP_MASK |
+			RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
+	cpr_irq_clr(cpr_vreg);
+	cpr_write(cpr_vreg, REG_RBIF_CONT_ACK_CMD, 1);
+	cpr_write(cpr_vreg, REG_RBIF_CONT_NACK_CMD, 1);
+	cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, 0);
+}
+
+static bool cpr_ctl_is_enabled(struct cpr_regulator *cpr_vreg)
+{
+	u32 reg_val;
+
+	reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+	return reg_val & RBCPR_CTL_LOOP_EN;
+}
+
+static bool cpr_ctl_is_busy(struct cpr_regulator *cpr_vreg)
+{
+	u32 reg_val;
+
+	reg_val = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
+	return reg_val & RBCPR_RESULT0_BUSY_MASK;
+}
+
+static void cpr_corner_save(struct cpr_regulator *cpr_vreg, int corner)
+{
+	cpr_vreg->save_ctl[corner] = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+	cpr_vreg->save_irq[corner] =
+		cpr_read(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line));
+}
+
+static void cpr_corner_restore(struct cpr_regulator *cpr_vreg, int corner)
+{
+	u32 gcnt, ctl, irq, ro_sel, step_quot;
+	int fuse_corner = cpr_vreg->corner_map[corner];
+	int i;
+
+	ro_sel = cpr_vreg->cpr_fuse_ro_sel[fuse_corner];
+	gcnt = cpr_vreg->gcnt | (cpr_vreg->cpr_fuse_target_quot[fuse_corner] -
+					cpr_vreg->quot_adjust[corner]);
+
+	/* Program the step quotient and idle clocks */
+	step_quot = ((cpr_vreg->idle_clocks & RBCPR_STEP_QUOT_IDLE_CLK_MASK)
+			<< RBCPR_STEP_QUOT_IDLE_CLK_SHIFT) |
+		(cpr_vreg->step_quotient[fuse_corner]
+			& RBCPR_STEP_QUOT_STEPQUOT_MASK);
+	cpr_write(cpr_vreg, REG_RBCPR_STEP_QUOT, step_quot);
+
+	/* Clear the target quotient value and gate count of all ROs */
+	for (i = 0; i < CPR_NUM_RING_OSC; i++)
+		cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(i), 0);
+
+	cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
+	ctl = cpr_vreg->save_ctl[corner];
+	cpr_write(cpr_vreg, REG_RBCPR_CTL, ctl);
+	irq = cpr_vreg->save_irq[corner];
+	cpr_irq_set(cpr_vreg, irq);
+	cpr_debug(cpr_vreg, "gcnt = 0x%08x, ctl = 0x%08x, irq = 0x%08x\n",
+		  gcnt, ctl, irq);
+}
+
+static void cpr_corner_switch(struct cpr_regulator *cpr_vreg, int corner)
+{
+	if (cpr_vreg->corner == corner)
+		return;
+
+	cpr_corner_restore(cpr_vreg, corner);
+}
+
+static int cpr_apc_set(struct cpr_regulator *cpr_vreg, u32 new_volt)
+{
+	int max_volt, rc;
+
+	max_volt = cpr_vreg->ceiling_max;
+	rc = regulator_set_voltage(cpr_vreg->vdd_apc, new_volt, max_volt);
+	if (rc)
+		cpr_err(cpr_vreg, "set: vdd_apc = %d uV: rc=%d\n",
+			new_volt, rc);
+	return rc;
+}
+
+static int cpr_mx_get(struct cpr_regulator *cpr_vreg, int corner, int apc_volt)
+{
+	int vdd_mx;
+	int fuse_corner = cpr_vreg->corner_map[corner];
+	int highest_fuse_corner = cpr_vreg->num_fuse_corners;
+
+	switch (cpr_vreg->vdd_mx_vmin_method) {
+	case VDD_MX_VMIN_APC:
+		vdd_mx = apc_volt;
+		break;
+	case VDD_MX_VMIN_APC_CORNER_CEILING:
+		vdd_mx = cpr_vreg->fuse_ceiling_volt[fuse_corner];
+		break;
+	case VDD_MX_VMIN_APC_SLOW_CORNER_CEILING:
+		vdd_mx = cpr_vreg->fuse_ceiling_volt[highest_fuse_corner];
+		break;
+	case VDD_MX_VMIN_MX_VMAX:
+		vdd_mx = cpr_vreg->vdd_mx_vmax;
+		break;
+	case VDD_MX_VMIN_APC_FUSE_CORNER_MAP:
+		vdd_mx = cpr_vreg->vdd_mx_corner_map[fuse_corner];
+		break;
+	case VDD_MX_VMIN_APC_CORNER_MAP:
+		vdd_mx = cpr_vreg->vdd_mx_corner_map[corner];
+		break;
+	default:
+		vdd_mx = 0;
+		break;
+	}
+
+	return vdd_mx;
+}
+
+static int cpr_mx_set(struct cpr_regulator *cpr_vreg, int corner,
+		      int vdd_mx_vmin)
+{
+	int rc;
+	int fuse_corner = cpr_vreg->corner_map[corner];
+
+	rc = regulator_set_voltage(cpr_vreg->vdd_mx, vdd_mx_vmin,
+				   cpr_vreg->vdd_mx_vmax);
+	cpr_debug(cpr_vreg, "[corner:%d, fuse_corner:%d] %d uV\n", corner,
+			fuse_corner, vdd_mx_vmin);
+
+	if (!rc) {
+		cpr_vreg->vdd_mx_vmin = vdd_mx_vmin;
+	} else {
+		cpr_err(cpr_vreg, "set: vdd_mx [corner:%d, fuse_corner:%d] = %d uV failed: rc=%d\n",
+			corner, fuse_corner, vdd_mx_vmin, rc);
+	}
+	return rc;
+}
+
+static int cpr_scale_voltage(struct cpr_regulator *cpr_vreg, int corner,
+			     int new_apc_volt, enum voltage_change_dir dir)
+{
+	int rc = 0, vdd_mx_vmin = 0;
+	int mem_acc_corner = cpr_vreg->mem_acc_corner_map[corner];
+	int fuse_corner = cpr_vreg->corner_map[corner];
+	int apc_corner, vsens_corner;
+
+	/* Determine the vdd_mx voltage */
+	if (dir != NO_CHANGE && cpr_vreg->vdd_mx != NULL)
+		vdd_mx_vmin = cpr_mx_get(cpr_vreg, corner, new_apc_volt);
+
+
+	if (cpr_vreg->vdd_vsens_voltage && cpr_vreg->vsens_enabled) {
+		rc = regulator_disable(cpr_vreg->vdd_vsens_voltage);
+		if (!rc)
+			cpr_vreg->vsens_enabled = false;
+	}
+
+	if (dir == DOWN) {
+		if (!rc && cpr_vreg->mem_acc_vreg)
+			rc = regulator_set_voltage(cpr_vreg->mem_acc_vreg,
+					mem_acc_corner, mem_acc_corner);
+		if (!rc && cpr_vreg->rpm_apc_vreg) {
+			apc_corner = cpr_vreg->rpm_apc_corner_map[corner];
+			rc = regulator_set_voltage(cpr_vreg->rpm_apc_vreg,
+						apc_corner, apc_corner);
+			if (rc)
+				cpr_err(cpr_vreg, "apc_corner voting failed rc=%d\n",
+						rc);
+		}
+	}
+
+	if (!rc && vdd_mx_vmin && dir == UP) {
+		if (vdd_mx_vmin != cpr_vreg->vdd_mx_vmin)
+			rc = cpr_mx_set(cpr_vreg, corner, vdd_mx_vmin);
+	}
+
+	if (!rc)
+		rc = cpr_apc_set(cpr_vreg, new_apc_volt);
+
+	if (dir == UP) {
+		if (!rc && cpr_vreg->mem_acc_vreg)
+			rc = regulator_set_voltage(cpr_vreg->mem_acc_vreg,
+					mem_acc_corner, mem_acc_corner);
+		if (!rc && cpr_vreg->rpm_apc_vreg) {
+			apc_corner = cpr_vreg->rpm_apc_corner_map[corner];
+			rc = regulator_set_voltage(cpr_vreg->rpm_apc_vreg,
+						apc_corner, apc_corner);
+			if (rc)
+				cpr_err(cpr_vreg, "apc_corner voting failed rc=%d\n",
+						rc);
+		}
+	}
+
+	if (!rc && vdd_mx_vmin && dir == DOWN) {
+		if (vdd_mx_vmin != cpr_vreg->vdd_mx_vmin)
+			rc = cpr_mx_set(cpr_vreg, corner, vdd_mx_vmin);
+	}
+
+	if (!rc && cpr_vreg->vdd_vsens_corner) {
+		vsens_corner = cpr_vreg->vsens_corner_map[fuse_corner];
+		rc = regulator_set_voltage(cpr_vreg->vdd_vsens_corner,
+					vsens_corner, vsens_corner);
+	}
+	if (!rc && cpr_vreg->vdd_vsens_voltage) {
+		rc = regulator_set_voltage(cpr_vreg->vdd_vsens_voltage,
+					cpr_vreg->floor_volt[corner],
+					cpr_vreg->ceiling_volt[corner]);
+		if (!rc && !cpr_vreg->vsens_enabled) {
+			rc = regulator_enable(cpr_vreg->vdd_vsens_voltage);
+			if (!rc)
+				cpr_vreg->vsens_enabled = true;
+		}
+	}
+
+	return rc;
+}
+
+static void cpr_scale(struct cpr_regulator *cpr_vreg,
+		      enum voltage_change_dir dir)
+{
+	u32 reg_val, error_steps, reg_mask;
+	int last_volt, new_volt, corner, fuse_corner;
+	u32 gcnt, quot;
+
+	corner = cpr_vreg->corner;
+	fuse_corner = cpr_vreg->corner_map[corner];
+
+	reg_val = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
+
+	error_steps = (reg_val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+				& RBCPR_RESULT0_ERROR_STEPS_MASK;
+	last_volt = cpr_vreg->last_volt[corner];
+
+	cpr_debug_irq(cpr_vreg,
+			"last_volt[corner:%d, fuse_corner:%d] = %d uV\n",
+			corner, fuse_corner, last_volt);
+
+	gcnt = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET
+			(cpr_vreg->cpr_fuse_ro_sel[fuse_corner]));
+	quot = gcnt & ((1 << RBCPR_GCNT_TARGET_GCNT_SHIFT) - 1);
+
+	if (dir == UP) {
+		if (cpr_vreg->clamp_timer_interval
+				&& error_steps < cpr_vreg->up_threshold) {
+			/*
+			 * Handle the case where another measurement started
+			 * after the interrupt was triggered due to a core
+			 * exiting from power collapse.
+			 */
+			error_steps = max(cpr_vreg->up_threshold,
+					cpr_vreg->vdd_apc_step_up_limit);
+		}
+		cpr_debug_irq(cpr_vreg,
+				"Up: cpr status = 0x%08x (error_steps=%d)\n",
+				reg_val, error_steps);
+
+		if (last_volt >= cpr_vreg->ceiling_volt[corner]) {
+			cpr_debug_irq(cpr_vreg,
+			"[corn:%d, fuse_corn:%d] @ ceiling: %d >= %d: NACK\n",
+				corner, fuse_corner, last_volt,
+				cpr_vreg->ceiling_volt[corner]);
+			cpr_irq_clr_nack(cpr_vreg);
+
+			cpr_debug_irq(cpr_vreg, "gcnt = 0x%08x (quot = %d)\n",
+					gcnt, quot);
+
+			/* Maximize the UP threshold */
+			reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK <<
+					RBCPR_CTL_UP_THRESHOLD_SHIFT;
+			reg_val = reg_mask;
+			cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+			/* Disable UP interrupt */
+			cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT & ~CPR_INT_UP);
+
+			return;
+		}
+
+		if (error_steps > cpr_vreg->vdd_apc_step_up_limit) {
+			cpr_debug_irq(cpr_vreg,
+				      "%d is over up-limit(%d): Clamp\n",
+				      error_steps,
+				      cpr_vreg->vdd_apc_step_up_limit);
+			error_steps = cpr_vreg->vdd_apc_step_up_limit;
+		}
+
+		/* Calculate new voltage */
+		new_volt = last_volt + (error_steps * cpr_vreg->step_volt);
+		if (new_volt > cpr_vreg->ceiling_volt[corner]) {
+			cpr_debug_irq(cpr_vreg,
+				      "new_volt(%d) >= ceiling(%d): Clamp\n",
+				      new_volt,
+				      cpr_vreg->ceiling_volt[corner]);
+
+			new_volt = cpr_vreg->ceiling_volt[corner];
+		}
+
+		if (cpr_scale_voltage(cpr_vreg, corner, new_volt, dir)) {
+			cpr_irq_clr_nack(cpr_vreg);
+			return;
+		}
+		cpr_vreg->last_volt[corner] = new_volt;
+
+		/* Disable auto nack down */
+		reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+		reg_val = 0;
+
+		cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+		/* Re-enable default interrupts */
+		cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
+
+		/* Ack */
+		cpr_irq_clr_ack(cpr_vreg);
+
+		cpr_debug_irq(cpr_vreg,
+			"UP: -> new_volt[corner:%d, fuse_corner:%d] = %d uV\n",
+			corner, fuse_corner, new_volt);
+	} else if (dir == DOWN) {
+		if (cpr_vreg->clamp_timer_interval
+				&& error_steps < cpr_vreg->down_threshold) {
+			/*
+			 * Handle the case where another measurement started
+			 * after the interrupt was triggered due to a core
+			 * exiting from power collapse.
+			 */
+			error_steps = max(cpr_vreg->down_threshold,
+					cpr_vreg->vdd_apc_step_down_limit);
+		}
+		cpr_debug_irq(cpr_vreg,
+			      "Down: cpr status = 0x%08x (error_steps=%d)\n",
+			      reg_val, error_steps);
+
+		if (last_volt <= cpr_vreg->floor_volt[corner]) {
+			cpr_debug_irq(cpr_vreg,
+			"[corn:%d, fuse_corner:%d] @ floor: %d <= %d: NACK\n",
+				corner, fuse_corner, last_volt,
+				cpr_vreg->floor_volt[corner]);
+			cpr_irq_clr_nack(cpr_vreg);
+
+			cpr_debug_irq(cpr_vreg, "gcnt = 0x%08x (quot = %d)\n",
+					gcnt, quot);
+
+			/* Enable auto nack down */
+			reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+			reg_val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+
+			cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+			/* Disable DOWN interrupt */
+			cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
+
+			return;
+		}
+
+		if (error_steps > cpr_vreg->vdd_apc_step_down_limit) {
+			cpr_debug_irq(cpr_vreg,
+				      "%d is over down-limit(%d): Clamp\n",
+				      error_steps,
+				      cpr_vreg->vdd_apc_step_down_limit);
+			error_steps = cpr_vreg->vdd_apc_step_down_limit;
+		}
+
+		/* Calculte new voltage */
+		new_volt = last_volt - (error_steps * cpr_vreg->step_volt);
+		if (new_volt < cpr_vreg->floor_volt[corner]) {
+			cpr_debug_irq(cpr_vreg,
+				      "new_volt(%d) < floor(%d): Clamp\n",
+				      new_volt,
+				      cpr_vreg->floor_volt[corner]);
+			new_volt = cpr_vreg->floor_volt[corner];
+		}
+
+		if (cpr_scale_voltage(cpr_vreg, corner, new_volt, dir)) {
+			cpr_irq_clr_nack(cpr_vreg);
+			return;
+		}
+		cpr_vreg->last_volt[corner] = new_volt;
+
+		/* Restore default threshold for UP */
+		reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK <<
+				RBCPR_CTL_UP_THRESHOLD_SHIFT;
+		reg_val = cpr_vreg->up_threshold <<
+				RBCPR_CTL_UP_THRESHOLD_SHIFT;
+		cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+		/* Re-enable default interrupts */
+		cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
+
+		/* Ack */
+		cpr_irq_clr_ack(cpr_vreg);
+
+		cpr_debug_irq(cpr_vreg,
+		"DOWN: -> new_volt[corner:%d, fuse_corner:%d] = %d uV\n",
+			corner, fuse_corner, new_volt);
+	}
+}
+
+static irqreturn_t cpr_irq_handler(int irq, void *dev)
+{
+	struct cpr_regulator *cpr_vreg = dev;
+	u32 reg_val;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+
+	reg_val = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
+	if (cpr_vreg->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
+		reg_val = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
+
+	cpr_debug_irq(cpr_vreg, "IRQ_STATUS = 0x%02X\n", reg_val);
+
+	if (!cpr_ctl_is_enabled(cpr_vreg)) {
+		cpr_debug_irq(cpr_vreg, "CPR is disabled\n");
+		goto _exit;
+	} else if (cpr_ctl_is_busy(cpr_vreg)
+			&& !cpr_vreg->clamp_timer_interval) {
+		cpr_debug_irq(cpr_vreg, "CPR measurement is not ready\n");
+		goto _exit;
+	} else if (!cpr_is_allowed(cpr_vreg)) {
+		reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+		cpr_err(cpr_vreg, "Interrupt broken? RBCPR_CTL = 0x%02X\n",
+			reg_val);
+		goto _exit;
+	}
+
+	/* Following sequence of handling is as per each IRQ's priority */
+	if (reg_val & CPR_INT_UP) {
+		cpr_scale(cpr_vreg, UP);
+	} else if (reg_val & CPR_INT_DOWN) {
+		cpr_scale(cpr_vreg, DOWN);
+	} else if (reg_val & CPR_INT_MIN) {
+		cpr_irq_clr_nack(cpr_vreg);
+	} else if (reg_val & CPR_INT_MAX) {
+		cpr_irq_clr_nack(cpr_vreg);
+	} else if (reg_val & CPR_INT_MID) {
+		/* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
+		cpr_debug_irq(cpr_vreg, "IRQ occurred for Mid Flag\n");
+	} else {
+		cpr_debug_irq(cpr_vreg,
+			"IRQ occurred for unknown flag (0x%08x)\n", reg_val);
+	}
+
+	/* Save register values for the corner */
+	cpr_corner_save(cpr_vreg, cpr_vreg->corner);
+
+_exit:
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+	return IRQ_HANDLED;
+}
+
+/**
+ * cmp_int() - int comparison function to be passed into the sort() function
+ *		which leads to ascending sorting
+ * @a:			First int value
+ * @b:			Second int value
+ *
+ * Return: >0 if a > b, 0 if a == b, <0 if a < b
+ */
+static int cmp_int(const void *a, const void *b)
+{
+	return *(int *)a - *(int *)b;
+}
+
+static int cpr_get_aging_quot_delta(struct cpr_regulator *cpr_vreg,
+			struct cpr_aging_sensor_info *aging_sensor_info)
+{
+	int quot_min, quot_max, is_aging_measurement, aging_measurement_count;
+	int quot_min_scaled, quot_max_scaled, quot_delta_scaled_sum;
+	int retries, rc = 0, sel_fast = 0, i, quot_delta_scaled;
+	u32 val, gcnt_ref, gcnt;
+	int *quot_delta_results, filtered_count;
+
+
+	quot_delta_results = kcalloc(CPR_AGING_MEASUREMENT_ITERATIONS,
+			sizeof(*quot_delta_results), GFP_ATOMIC);
+	if (!quot_delta_results)
+		return -ENOMEM;
+
+	/* Clear the target quotient value and gate count of all ROs */
+	for (i = 0; i < CPR_NUM_RING_OSC; i++)
+		cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(i), 0);
+
+	/* Program GCNT0/1 for getting aging data */
+	gcnt_ref = (cpr_vreg->ref_clk_khz * cpr_vreg->gcnt_time_us) / 1000;
+	gcnt = gcnt_ref * 3 / 2;
+	val = (gcnt & RBCPR_GCNT_TARGET_GCNT_MASK) <<
+			RBCPR_GCNT_TARGET_GCNT_SHIFT;
+	cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(0), val);
+	cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(1), val);
+
+	val = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET(0));
+	cpr_debug(cpr_vreg, "RBCPR_GCNT_TARGET0 = 0x%08x\n", val);
+
+	val = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET(1));
+	cpr_debug(cpr_vreg, "RBCPR_GCNT_TARGET1 = 0x%08x\n", val);
+
+	/* Program TIMER_INTERVAL to zero */
+	cpr_write(cpr_vreg, REG_RBCPR_TIMER_INTERVAL, 0);
+
+	/* Bypass sensors in collapsible domain */
+	if (cpr_vreg->aging_info->aging_sensor_bypass)
+		cpr_write(cpr_vreg, REG_RBCPR_SENSOR_BYPASS0,
+			(cpr_vreg->aging_info->aging_sensor_bypass &
+		RBCPR_SENSOR_MASK0_SENSOR(aging_sensor_info->sensor_id)));
+
+	/* Mask other sensors */
+	cpr_write(cpr_vreg, REG_RBCPR_SENSOR_MASK0,
+		RBCPR_SENSOR_MASK0_SENSOR(aging_sensor_info->sensor_id));
+	val = cpr_read(cpr_vreg, REG_RBCPR_SENSOR_MASK0);
+	cpr_debug(cpr_vreg, "RBCPR_SENSOR_MASK0 = 0x%08x\n", val);
+
+	/* Enable cpr controller */
+	cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, RBCPR_CTL_LOOP_EN);
+
+	/* Make sure cpr starts measurement with toggling busy bit */
+	mb();
+
+	/* Wait and Ignore the first measurement. Time-out after 5ms */
+	retries = 50;
+	while (retries-- && cpr_ctl_is_busy(cpr_vreg))
+		udelay(100);
+
+	if (retries < 0) {
+		cpr_err(cpr_vreg, "Aging calibration failed\n");
+		rc = -EBUSY;
+		goto _exit;
+	}
+
+	/* Set age page mode */
+	cpr_write(cpr_vreg, REG_RBCPR_HTOL_AGE, RBCPR_HTOL_AGE_PAGE);
+
+	aging_measurement_count = 0;
+	quot_delta_scaled_sum = 0;
+
+	for (i = 0; i < CPR_AGING_MEASUREMENT_ITERATIONS; i++) {
+		/* Send cont nack */
+		cpr_write(cpr_vreg, REG_RBIF_CONT_NACK_CMD, 1);
+
+		/*
+		 * Make sure cpr starts next measurement with
+		 * toggling busy bit
+		 */
+		mb();
+
+		/*
+		 * Wait for controller to finish measurement
+		 * and time-out after 5ms
+		 */
+		retries = 50;
+		while (retries-- && cpr_ctl_is_busy(cpr_vreg))
+			udelay(100);
+
+		if (retries < 0) {
+			cpr_err(cpr_vreg, "Aging calibration failed\n");
+			rc = -EBUSY;
+			goto _exit;
+		}
+
+		/* Check for PAGE_IS_AGE flag in status register */
+		val = cpr_read(cpr_vreg, REG_RBCPR_HTOL_AGE);
+		is_aging_measurement = val & RBCPR_AGE_DATA_STATUS;
+
+		val = cpr_read(cpr_vreg, REG_RBCPR_RESULT_1);
+		sel_fast = RBCPR_RESULT_1_SEL_FAST(val);
+		cpr_debug(cpr_vreg, "RBCPR_RESULT_1 = 0x%08x\n", val);
+
+		val = cpr_read(cpr_vreg, REG_RBCPR_DEBUG1);
+		cpr_debug(cpr_vreg, "RBCPR_DEBUG1 = 0x%08x\n", val);
+
+		if (sel_fast == 1) {
+			quot_min = RBCPR_DEBUG1_QUOT_FAST(val);
+			quot_max = RBCPR_DEBUG1_QUOT_SLOW(val);
+		} else {
+			quot_min = RBCPR_DEBUG1_QUOT_SLOW(val);
+			quot_max = RBCPR_DEBUG1_QUOT_FAST(val);
+		}
+
+		/*
+		 * Scale the quotients so that they are equivalent to the fused
+		 * values.  This accounts for the difference in measurement
+		 * interval times.
+		 */
+
+		quot_min_scaled = quot_min * (gcnt_ref + 1) / (gcnt + 1);
+		quot_max_scaled = quot_max * (gcnt_ref + 1) / (gcnt + 1);
+
+		quot_delta_scaled = 0;
+		if (is_aging_measurement) {
+			quot_delta_scaled = quot_min_scaled - quot_max_scaled;
+			quot_delta_results[aging_measurement_count++] =
+					quot_delta_scaled;
+		}
+
+		cpr_debug(cpr_vreg,
+			"Age sensor[%d]: measurement[%d]: page_is_age=%u quot_min = %d, quot_max = %d quot_min_scaled = %d, quot_max_scaled = %d quot_delta_scaled = %d\n",
+			aging_sensor_info->sensor_id, i, is_aging_measurement,
+			quot_min, quot_max, quot_min_scaled, quot_max_scaled,
+			quot_delta_scaled);
+	}
+
+	filtered_count
+		= aging_measurement_count - CPR_AGING_MEASUREMENT_FILTER * 2;
+	if (filtered_count > 0) {
+		sort(quot_delta_results, aging_measurement_count,
+			sizeof(*quot_delta_results), cmp_int, NULL);
+
+		quot_delta_scaled_sum = 0;
+		for (i = 0; i < filtered_count; i++)
+			quot_delta_scaled_sum
+				+= quot_delta_results[i
+					+ CPR_AGING_MEASUREMENT_FILTER];
+
+		aging_sensor_info->current_quot_diff
+			= quot_delta_scaled_sum / filtered_count;
+		cpr_debug(cpr_vreg,
+			"Age sensor[%d]: average aging quotient delta = %d (count = %d)\n",
+			aging_sensor_info->sensor_id,
+			aging_sensor_info->current_quot_diff, filtered_count);
+	} else {
+		cpr_err(cpr_vreg, "%d aging measurements completed after %d iterations\n",
+			aging_measurement_count,
+			CPR_AGING_MEASUREMENT_ITERATIONS);
+		rc = -EBUSY;
+	}
+
+_exit:
+	/* Clear age page bit */
+	cpr_write(cpr_vreg, REG_RBCPR_HTOL_AGE, 0x0);
+
+	/* Disable the CPR controller after aging procedure */
+	cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, 0x0);
+
+	/* Clear the sensor bypass */
+	if (cpr_vreg->aging_info->aging_sensor_bypass)
+		cpr_write(cpr_vreg, REG_RBCPR_SENSOR_BYPASS0, 0x0);
+
+	/* Unmask all sensors */
+	cpr_write(cpr_vreg, REG_RBCPR_SENSOR_MASK0, 0x0);
+
+	/* Clear gcnt0/1 registers */
+	cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(0), 0x0);
+	cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(1), 0x0);
+
+	/* Program the delay count for the timer */
+	val = (cpr_vreg->ref_clk_khz * cpr_vreg->timer_delay_us) / 1000;
+	cpr_write(cpr_vreg, REG_RBCPR_TIMER_INTERVAL, val);
+
+	kfree(quot_delta_results);
+
+	return rc;
+}
+
+static void cpr_de_aging_adjustment(void *data)
+{
+	struct cpr_regulator *cpr_vreg = (struct cpr_regulator *)data;
+	struct cpr_aging_info *aging_info = cpr_vreg->aging_info;
+	struct cpr_aging_sensor_info *aging_sensor_info;
+	int i, num_aging_sensors, retries, rc = 0;
+	int max_quot_diff = 0, ro_sel = 0;
+	u32 voltage_adjust, aging_voltage_adjust = 0;
+
+	aging_sensor_info = aging_info->sensor_info;
+	num_aging_sensors = aging_info->num_aging_sensors;
+
+	for (i = 0; i < num_aging_sensors; i++, aging_sensor_info++) {
+		retries = 2;
+		while (retries--) {
+			rc = cpr_get_aging_quot_delta(cpr_vreg,
+					aging_sensor_info);
+			if (!rc)
+				break;
+		}
+		if (rc && retries < 0) {
+			cpr_err(cpr_vreg, "error in age calibration: rc = %d\n",
+				rc);
+			aging_info->cpr_aging_error = true;
+			return;
+		}
+
+		max_quot_diff = max(max_quot_diff,
+					(aging_sensor_info->current_quot_diff -
+					aging_sensor_info->initial_quot_diff));
+	}
+
+	cpr_debug(cpr_vreg, "Max aging quot delta = %d\n",
+				max_quot_diff);
+	aging_voltage_adjust = DIV_ROUND_UP(max_quot_diff * 1000000,
+					aging_info->aging_ro_kv);
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		/* Remove initial max aging adjustment */
+		ro_sel = cpr_vreg->cpr_fuse_ro_sel[i];
+		cpr_vreg->cpr_fuse_target_quot[i] -=
+				(aging_info->cpr_ro_kv[ro_sel]
+				* aging_info->max_aging_margin) / 1000000;
+		aging_info->voltage_adjust[i] = 0;
+
+		if (aging_voltage_adjust > 0) {
+			/* Add required aging adjustment */
+			voltage_adjust = (aging_voltage_adjust
+					* aging_info->aging_derate[i]) / 1000;
+			voltage_adjust = min(voltage_adjust,
+						aging_info->max_aging_margin);
+			cpr_vreg->cpr_fuse_target_quot[i] +=
+					(aging_info->cpr_ro_kv[ro_sel]
+					* voltage_adjust) / 1000000;
+			aging_info->voltage_adjust[i] = voltage_adjust;
+		}
+	}
+}
+
+static int cpr_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+
+	return cpr_vreg->vreg_enabled;
+}
+
+static int cpr_regulator_enable(struct regulator_dev *rdev)
+{
+	struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	/* Enable dependency power before vdd_apc */
+	if (cpr_vreg->vdd_mx) {
+		rc = regulator_enable(cpr_vreg->vdd_mx);
+		if (rc) {
+			cpr_err(cpr_vreg, "regulator_enable: vdd_mx: rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	rc = regulator_enable(cpr_vreg->vdd_apc);
+	if (rc) {
+		cpr_err(cpr_vreg, "regulator_enable: vdd_apc: rc=%d\n", rc);
+		return rc;
+	}
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+	cpr_vreg->vreg_enabled = true;
+	if (cpr_is_allowed(cpr_vreg) && cpr_vreg->corner) {
+		cpr_irq_clr(cpr_vreg);
+		cpr_corner_restore(cpr_vreg, cpr_vreg->corner);
+		cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+	}
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+
+	return rc;
+}
+
+static int cpr_regulator_disable(struct regulator_dev *rdev)
+{
+	struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = regulator_disable(cpr_vreg->vdd_apc);
+	if (!rc) {
+		if (cpr_vreg->vdd_mx)
+			rc = regulator_disable(cpr_vreg->vdd_mx);
+
+		if (rc) {
+			cpr_err(cpr_vreg, "regulator_disable: vdd_mx: rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		mutex_lock(&cpr_vreg->cpr_mutex);
+		cpr_vreg->vreg_enabled = false;
+		if (cpr_is_allowed(cpr_vreg))
+			cpr_ctl_disable(cpr_vreg);
+		mutex_unlock(&cpr_vreg->cpr_mutex);
+	} else {
+		cpr_err(cpr_vreg, "regulator_disable: vdd_apc: rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+static int cpr_calculate_de_aging_margin(struct cpr_regulator *cpr_vreg)
+{
+	struct cpr_aging_info *aging_info = cpr_vreg->aging_info;
+	enum voltage_change_dir change_dir = NO_CHANGE;
+	u32 save_ctl, save_irq;
+	cpumask_t tmp_mask;
+	int rc = 0, i;
+
+	save_ctl = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+	save_irq = cpr_read(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line));
+
+	/* Disable interrupt and CPR */
+	cpr_irq_set(cpr_vreg, 0);
+	cpr_write(cpr_vreg, REG_RBCPR_CTL, 0);
+
+	if (aging_info->aging_corner > cpr_vreg->corner)
+		change_dir = UP;
+	else if (aging_info->aging_corner < cpr_vreg->corner)
+		change_dir = DOWN;
+
+	/* set selected reference voltage for de-aging */
+	rc = cpr_scale_voltage(cpr_vreg,
+				aging_info->aging_corner,
+				aging_info->aging_ref_voltage,
+				change_dir);
+	if (rc) {
+		cpr_err(cpr_vreg, "Unable to set aging reference voltage, rc = %d\n",
+			rc);
+		return rc;
+	}
+
+	/* Force PWM mode */
+	rc = regulator_set_mode(cpr_vreg->vdd_apc, REGULATOR_MODE_NORMAL);
+	if (rc) {
+		cpr_err(cpr_vreg, "unable to configure vdd-supply for mode=%u, rc=%d\n",
+			REGULATOR_MODE_NORMAL, rc);
+		return rc;
+	}
+
+	get_online_cpus();
+	cpumask_and(&tmp_mask, &cpr_vreg->cpu_mask, cpu_online_mask);
+	if (!cpumask_empty(&tmp_mask)) {
+		smp_call_function_any(&tmp_mask,
+					cpr_de_aging_adjustment,
+					cpr_vreg, true);
+		aging_info->cpr_aging_done = true;
+		if (!aging_info->cpr_aging_error)
+			for (i = CPR_FUSE_CORNER_MIN;
+					i <= cpr_vreg->num_fuse_corners; i++)
+				cpr_info(cpr_vreg, "Corner[%d]: age adjusted target quot = %d\n",
+					i, cpr_vreg->cpr_fuse_target_quot[i]);
+	}
+
+	put_online_cpus();
+
+	/* Set to initial mode */
+	rc = regulator_set_mode(cpr_vreg->vdd_apc, REGULATOR_MODE_IDLE);
+	if (rc) {
+		cpr_err(cpr_vreg, "unable to configure vdd-supply for mode=%u, rc=%d\n",
+			REGULATOR_MODE_IDLE, rc);
+		return rc;
+	}
+
+	/* Clear interrupts */
+	cpr_irq_clr(cpr_vreg);
+
+	/* Restore register values */
+	cpr_irq_set(cpr_vreg, save_irq);
+	cpr_write(cpr_vreg, REG_RBCPR_CTL, save_ctl);
+
+	return rc;
+}
+
+/* Note that cpr_vreg->cpr_mutex must be held by the caller. */
+static int cpr_regulator_set_voltage(struct regulator_dev *rdev,
+		int corner, bool reset_quot)
+{
+	struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+	struct cpr_aging_info *aging_info = cpr_vreg->aging_info;
+	int rc;
+	int new_volt;
+	enum voltage_change_dir change_dir = NO_CHANGE;
+	int fuse_corner = cpr_vreg->corner_map[corner];
+
+	if (cpr_is_allowed(cpr_vreg)) {
+		cpr_ctl_disable(cpr_vreg);
+		new_volt = cpr_vreg->last_volt[corner];
+	} else {
+		new_volt = cpr_vreg->open_loop_volt[corner];
+	}
+
+	cpr_debug(cpr_vreg, "[corner:%d, fuse_corner:%d] = %d uV\n",
+		corner, fuse_corner, new_volt);
+
+	if (corner > cpr_vreg->corner)
+		change_dir = UP;
+	else if (corner < cpr_vreg->corner)
+		change_dir = DOWN;
+
+	/* Read age sensor data and apply de-aging adjustments */
+	if (cpr_vreg->vreg_enabled && aging_info && !aging_info->cpr_aging_done
+		&& (corner <= aging_info->aging_corner)) {
+		rc = cpr_calculate_de_aging_margin(cpr_vreg);
+		if (rc) {
+			cpr_err(cpr_vreg, "failed in de-aging calibration: rc=%d\n",
+				rc);
+		} else {
+			change_dir = NO_CHANGE;
+			if (corner > aging_info->aging_corner)
+				change_dir = UP;
+			else if (corner  < aging_info->aging_corner)
+				change_dir = DOWN;
+		}
+		reset_quot = true;
+	}
+
+	rc = cpr_scale_voltage(cpr_vreg, corner, new_volt, change_dir);
+	if (rc)
+		return rc;
+
+	if (cpr_is_allowed(cpr_vreg) && cpr_vreg->vreg_enabled) {
+		cpr_irq_clr(cpr_vreg);
+		if (reset_quot)
+			cpr_corner_restore(cpr_vreg, corner);
+		else
+			cpr_corner_switch(cpr_vreg, corner);
+		cpr_ctl_enable(cpr_vreg, corner);
+	}
+
+	cpr_vreg->corner = corner;
+
+	return rc;
+}
+
+static int cpr_regulator_set_voltage_op(struct regulator_dev *rdev,
+		int corner, int corner_max, unsigned *selector)
+{
+	struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+	rc = cpr_regulator_set_voltage(rdev, corner, false);
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+
+	return rc;
+}
+
+static int cpr_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+
+	return cpr_vreg->corner;
+}
+
+/**
+ * cpr_regulator_list_corner_voltage() - return the ceiling voltage mapped to
+ *			the specified voltage corner
+ * @rdev:		Regulator device pointer for the cpr-regulator
+ * @corner:		Voltage corner
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr-regulator device.
+ *
+ * Return: voltage value in microvolts or -EINVAL if the corner is out of range
+ */
+static int cpr_regulator_list_corner_voltage(struct regulator_dev *rdev,
+		int corner)
+{
+	struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+
+	if (corner >= CPR_CORNER_MIN && corner <= cpr_vreg->num_corners)
+		return cpr_vreg->ceiling_volt[corner];
+	else
+		return -EINVAL;
+}
+
+static struct regulator_ops cpr_corner_ops = {
+	.enable			= cpr_regulator_enable,
+	.disable		= cpr_regulator_disable,
+	.is_enabled		= cpr_regulator_is_enabled,
+	.set_voltage		= cpr_regulator_set_voltage_op,
+	.get_voltage		= cpr_regulator_get_voltage,
+	.list_corner_voltage	= cpr_regulator_list_corner_voltage,
+};
+
+#ifdef CONFIG_PM
+static int cpr_suspend(struct cpr_regulator *cpr_vreg)
+{
+	cpr_debug(cpr_vreg, "suspend\n");
+
+	cpr_ctl_disable(cpr_vreg);
+
+	cpr_irq_clr(cpr_vreg);
+
+	return 0;
+}
+
+static int cpr_resume(struct cpr_regulator *cpr_vreg)
+
+{
+	cpr_debug(cpr_vreg, "resume\n");
+
+	cpr_irq_clr(cpr_vreg);
+
+	cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+
+	return 0;
+}
+
+static int cpr_regulator_suspend(struct platform_device *pdev,
+				 pm_message_t state)
+{
+	struct cpr_regulator *cpr_vreg = platform_get_drvdata(pdev);
+	int rc = 0;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+
+	if (cpr_is_allowed(cpr_vreg))
+		rc = cpr_suspend(cpr_vreg);
+
+	cpr_vreg->is_cpr_suspended = true;
+
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+
+	return rc;
+}
+
+static int cpr_regulator_resume(struct platform_device *pdev)
+{
+	struct cpr_regulator *cpr_vreg = platform_get_drvdata(pdev);
+	int rc = 0;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+
+	cpr_vreg->is_cpr_suspended = false;
+
+	if (cpr_is_allowed(cpr_vreg))
+		rc = cpr_resume(cpr_vreg);
+
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+
+	return rc;
+}
+#else
+#define cpr_regulator_suspend NULL
+#define cpr_regulator_resume NULL
+#endif
+
+static int cpr_config(struct cpr_regulator *cpr_vreg, struct device *dev)
+{
+	int i;
+	u32 val, gcnt, reg;
+	void __iomem *rbcpr_clk;
+	int size;
+
+	if (cpr_vreg->rbcpr_clk_addr) {
+		/* Use 19.2 MHz clock for CPR. */
+		rbcpr_clk = ioremap(cpr_vreg->rbcpr_clk_addr, 4);
+		if (!rbcpr_clk) {
+			cpr_err(cpr_vreg, "Unable to map rbcpr_clk\n");
+			return -EINVAL;
+		}
+		reg = readl_relaxed(rbcpr_clk);
+		reg &= ~RBCPR_CLK_SEL_MASK;
+		reg |= RBCPR_CLK_SEL_19P2_MHZ & RBCPR_CLK_SEL_MASK;
+		writel_relaxed(reg, rbcpr_clk);
+		iounmap(rbcpr_clk);
+	}
+
+	/* Disable interrupt and CPR */
+	cpr_write(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line), 0);
+	cpr_write(cpr_vreg, REG_RBCPR_CTL, 0);
+
+	/* Program the default HW Ceiling, Floor and vlevel */
+	val = ((RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
+			<< RBIF_LIMIT_CEILING_SHIFT)
+		| (RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK);
+	cpr_write(cpr_vreg, REG_RBIF_LIMIT, val);
+	cpr_write(cpr_vreg, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
+
+	/* Clear the target quotient value and gate count of all ROs */
+	for (i = 0; i < CPR_NUM_RING_OSC; i++)
+		cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(i), 0);
+
+	/* Init and save gcnt */
+	gcnt = (cpr_vreg->ref_clk_khz * cpr_vreg->gcnt_time_us) / 1000;
+	gcnt = (gcnt & RBCPR_GCNT_TARGET_GCNT_MASK) <<
+			RBCPR_GCNT_TARGET_GCNT_SHIFT;
+	cpr_vreg->gcnt = gcnt;
+
+	/* Program the delay count for the timer */
+	val = (cpr_vreg->ref_clk_khz * cpr_vreg->timer_delay_us) / 1000;
+	cpr_write(cpr_vreg, REG_RBCPR_TIMER_INTERVAL, val);
+	cpr_info(cpr_vreg, "Timer count: 0x%0x (for %d us)\n", val,
+		cpr_vreg->timer_delay_us);
+
+	/* Program Consecutive Up & Down */
+	val = ((cpr_vreg->timer_cons_down & RBIF_TIMER_ADJ_CONS_DOWN_MASK)
+			<< RBIF_TIMER_ADJ_CONS_DOWN_SHIFT) |
+	       (cpr_vreg->timer_cons_up & RBIF_TIMER_ADJ_CONS_UP_MASK) |
+	       ((cpr_vreg->clamp_timer_interval & RBIF_TIMER_ADJ_CLAMP_INT_MASK)
+			<< RBIF_TIMER_ADJ_CLAMP_INT_SHIFT);
+	cpr_write(cpr_vreg, REG_RBIF_TIMER_ADJUST, val);
+
+	/* Program the control register */
+	cpr_vreg->up_threshold &= RBCPR_CTL_UP_THRESHOLD_MASK;
+	cpr_vreg->down_threshold &= RBCPR_CTL_DN_THRESHOLD_MASK;
+	val = (cpr_vreg->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT)
+		| (cpr_vreg->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT);
+	val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
+	val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
+	cpr_write(cpr_vreg, REG_RBCPR_CTL, val);
+
+	cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
+
+	val = cpr_read(cpr_vreg, REG_RBCPR_VERSION);
+	if (val <= RBCPR_VER_2)
+		cpr_vreg->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
+
+	size = cpr_vreg->num_corners + 1;
+	cpr_vreg->save_ctl = devm_kzalloc(dev, sizeof(int) * size, GFP_KERNEL);
+	cpr_vreg->save_irq = devm_kzalloc(dev, sizeof(int) * size, GFP_KERNEL);
+	if (!cpr_vreg->save_ctl || !cpr_vreg->save_irq)
+		return -ENOMEM;
+
+	for (i = 1; i < size; i++)
+		cpr_corner_save(cpr_vreg, i);
+
+	return 0;
+}
+
+static int cpr_fuse_is_setting_expected(struct cpr_regulator *cpr_vreg,
+					u32 sel_array[5])
+{
+	u64 fuse_bits;
+	u32 ret;
+
+	fuse_bits = cpr_read_efuse_row(cpr_vreg, sel_array[0], sel_array[4]);
+	ret = (fuse_bits >> sel_array[1]) & ((1 << sel_array[2]) - 1);
+	if (ret == sel_array[3])
+		ret = 1;
+	else
+		ret = 0;
+
+	cpr_info(cpr_vreg, "[row:%d] = 0x%llx @%d:%d == %d ?: %s\n",
+			sel_array[0], fuse_bits,
+			sel_array[1], sel_array[2],
+			sel_array[3],
+			(ret == 1) ? "yes" : "no");
+	return ret;
+}
+
+static int cpr_voltage_uplift_wa_inc_volt(struct cpr_regulator *cpr_vreg,
+					struct device_node *of_node)
+{
+	u32 uplift_voltage;
+	u32 uplift_max_volt = 0;
+	int highest_fuse_corner = cpr_vreg->num_fuse_corners;
+	int rc;
+
+	rc = of_property_read_u32(of_node,
+		"qcom,cpr-uplift-voltage", &uplift_voltage);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "cpr-uplift-voltage is missing, rc = %d", rc);
+		return rc;
+	}
+	rc = of_property_read_u32(of_node,
+		"qcom,cpr-uplift-max-volt", &uplift_max_volt);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "cpr-uplift-max-volt is missing, rc = %d",
+			rc);
+		return rc;
+	}
+
+	cpr_vreg->pvs_corner_v[highest_fuse_corner] += uplift_voltage;
+	if (cpr_vreg->pvs_corner_v[highest_fuse_corner] > uplift_max_volt)
+		cpr_vreg->pvs_corner_v[highest_fuse_corner] = uplift_max_volt;
+
+	return rc;
+}
+
+static int cpr_adjust_init_voltages(struct device_node *of_node,
+				struct cpr_regulator *cpr_vreg)
+{
+	int tuple_count, tuple_match, i;
+	u32 index;
+	u32 volt_adjust = 0;
+	int len = 0;
+	int rc = 0;
+
+	if (!of_find_property(of_node, "qcom,cpr-init-voltage-adjustment",
+				&len)) {
+		/* No initial voltage adjustment needed. */
+		return 0;
+	}
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+			/*
+			 * No matching index to use for initial voltage
+			 * adjustment.
+			 */
+			return 0;
+		}
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != cpr_vreg->num_fuse_corners * tuple_count * sizeof(u32)) {
+		cpr_err(cpr_vreg, "qcom,cpr-init-voltage-adjustment length=%d is invalid\n",
+			len);
+		return -EINVAL;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		index = tuple_match * cpr_vreg->num_fuse_corners
+				+ i - CPR_FUSE_CORNER_MIN;
+		rc = of_property_read_u32_index(of_node,
+			"qcom,cpr-init-voltage-adjustment", index,
+			&volt_adjust);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not read qcom,cpr-init-voltage-adjustment index %u, rc=%d\n",
+				index, rc);
+			return rc;
+		}
+
+		if (volt_adjust) {
+			cpr_vreg->pvs_corner_v[i] += volt_adjust;
+			cpr_info(cpr_vreg, "adjusted initial voltage[%d]: %d -> %d uV\n",
+				i, cpr_vreg->pvs_corner_v[i] - volt_adjust,
+				cpr_vreg->pvs_corner_v[i]);
+		}
+	}
+
+	return rc;
+}
+
+/*
+ * Property qcom,cpr-fuse-init-voltage specifies the fuse position of the
+ * initial voltage for each fuse corner. MSB of the fuse value is a sign
+ * bit, and the remaining bits define the steps of the offset. Each step has
+ * units of microvolts defined in the qcom,cpr-fuse-init-voltage-step property.
+ * The initial voltages can be calculated using the formula:
+ * pvs_corner_v[corner] = ceiling_volt[corner] + (sign * steps * step_size_uv)
+ */
+static int cpr_pvs_per_corner_init(struct device_node *of_node,
+				struct cpr_regulator *cpr_vreg)
+{
+	u64 efuse_bits;
+	int i, size, sign, steps, step_size_uv, rc;
+	u32 *fuse_sel, *tmp, *ref_uv;
+	struct property *prop;
+	char *init_volt_str;
+
+	init_volt_str = cpr_vreg->cpr_fuse_redundant
+			? "qcom,cpr-fuse-redun-init-voltage"
+			: "qcom,cpr-fuse-init-voltage";
+
+	prop = of_find_property(of_node, init_volt_str, NULL);
+	if (!prop) {
+		cpr_err(cpr_vreg, "%s is missing\n", init_volt_str);
+		return -EINVAL;
+	}
+	size = prop->length / sizeof(u32);
+	if (size != cpr_vreg->num_fuse_corners * 4) {
+		cpr_err(cpr_vreg,
+			"fuse position for init voltages is invalid\n");
+		return -EINVAL;
+	}
+	fuse_sel = kzalloc(sizeof(u32) * size, GFP_KERNEL);
+	if (!fuse_sel) {
+		cpr_err(cpr_vreg, "memory alloc failed.\n");
+		return -ENOMEM;
+	}
+	rc = of_property_read_u32_array(of_node, init_volt_str,
+							fuse_sel, size);
+	if (rc < 0) {
+		cpr_err(cpr_vreg,
+			"read cpr-fuse-init-voltage failed, rc = %d\n", rc);
+		kfree(fuse_sel);
+		return rc;
+	}
+	rc = of_property_read_u32(of_node, "qcom,cpr-init-voltage-step",
+							&step_size_uv);
+	if (rc < 0) {
+		cpr_err(cpr_vreg,
+			"read cpr-init-voltage-step failed, rc = %d\n", rc);
+		kfree(fuse_sel);
+		return rc;
+	}
+
+	ref_uv = kzalloc((cpr_vreg->num_fuse_corners + 1) * sizeof(*ref_uv),
+			GFP_KERNEL);
+	if (!ref_uv) {
+		cpr_err(cpr_vreg,
+			"Could not allocate memory for reference voltages\n");
+		kfree(fuse_sel);
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-init-voltage-ref",
+		&ref_uv[CPR_FUSE_CORNER_MIN], cpr_vreg->num_fuse_corners);
+	if (rc < 0) {
+		cpr_err(cpr_vreg,
+			"read qcom,cpr-init-voltage-ref failed, rc = %d\n", rc);
+		kfree(fuse_sel);
+		kfree(ref_uv);
+		return rc;
+	}
+
+	tmp = fuse_sel;
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		efuse_bits = cpr_read_efuse_param(cpr_vreg, fuse_sel[0],
+					fuse_sel[1], fuse_sel[2], fuse_sel[3]);
+		sign = (efuse_bits & (1 << (fuse_sel[2] - 1))) ? -1 : 1;
+		steps = efuse_bits & ((1 << (fuse_sel[2] - 1)) - 1);
+		cpr_vreg->pvs_corner_v[i] =
+				ref_uv[i] + sign * steps * step_size_uv;
+		cpr_vreg->pvs_corner_v[i] = DIV_ROUND_UP(
+				cpr_vreg->pvs_corner_v[i],
+				cpr_vreg->step_volt) *
+				cpr_vreg->step_volt;
+		cpr_debug(cpr_vreg, "corner %d: sign = %d, steps = %d, volt = %d uV\n",
+			i, sign, steps, cpr_vreg->pvs_corner_v[i]);
+		fuse_sel += 4;
+	}
+
+	rc = cpr_adjust_init_voltages(of_node, cpr_vreg);
+	if (rc)
+		goto done;
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		if (cpr_vreg->pvs_corner_v[i]
+		    > cpr_vreg->fuse_ceiling_volt[i]) {
+			cpr_info(cpr_vreg, "Warning: initial voltage[%d] %d above ceiling %d\n",
+				i, cpr_vreg->pvs_corner_v[i],
+				cpr_vreg->fuse_ceiling_volt[i]);
+			cpr_vreg->pvs_corner_v[i]
+				= cpr_vreg->fuse_ceiling_volt[i];
+		} else if (cpr_vreg->pvs_corner_v[i] <
+				cpr_vreg->fuse_floor_volt[i]) {
+			cpr_info(cpr_vreg, "Warning: initial voltage[%d] %d below floor %d\n",
+				i, cpr_vreg->pvs_corner_v[i],
+				cpr_vreg->fuse_floor_volt[i]);
+			cpr_vreg->pvs_corner_v[i]
+				= cpr_vreg->fuse_floor_volt[i];
+		}
+	}
+
+done:
+	kfree(tmp);
+	kfree(ref_uv);
+
+	return rc;
+}
+
+/*
+ * A single PVS bin is stored in a fuse that's position is defined either
+ * in the qcom,pvs-fuse-redun property or in the qcom,pvs-fuse property.
+ * The fuse value defined in the qcom,pvs-fuse-redun-sel property is used
+ * to pick between the primary or redudant PVS fuse position.
+ * After the PVS bin value is read out successfully, it is used as the row
+ * index to get initial voltages for each fuse corner from the voltage table
+ * defined in the qcom,pvs-voltage-table property.
+ */
+static int cpr_pvs_single_bin_init(struct device_node *of_node,
+				struct cpr_regulator *cpr_vreg)
+{
+	u64 efuse_bits;
+	u32 pvs_fuse[4], pvs_fuse_redun_sel[5];
+	int rc, i, stripe_size;
+	bool redundant;
+	size_t pvs_bins;
+	u32 *tmp;
+
+	rc = of_property_read_u32_array(of_node, "qcom,pvs-fuse-redun-sel",
+						pvs_fuse_redun_sel, 5);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "pvs-fuse-redun-sel missing: rc=%d\n", rc);
+		return rc;
+	}
+
+	redundant = cpr_fuse_is_setting_expected(cpr_vreg, pvs_fuse_redun_sel);
+	if (redundant) {
+		rc = of_property_read_u32_array(of_node, "qcom,pvs-fuse-redun",
+								pvs_fuse, 4);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "pvs-fuse-redun missing: rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else {
+		rc = of_property_read_u32_array(of_node, "qcom,pvs-fuse",
+							pvs_fuse, 4);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "pvs-fuse missing: rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/* Construct PVS process # from the efuse bits */
+	efuse_bits = cpr_read_efuse_row(cpr_vreg, pvs_fuse[0], pvs_fuse[3]);
+	cpr_vreg->pvs_bin = (efuse_bits >> pvs_fuse[1]) &
+				((1 << pvs_fuse[2]) - 1);
+	pvs_bins = 1 << pvs_fuse[2];
+	stripe_size = cpr_vreg->num_fuse_corners;
+	tmp = kzalloc(sizeof(u32) * pvs_bins * stripe_size, GFP_KERNEL);
+	if (!tmp) {
+		cpr_err(cpr_vreg, "memory alloc failed\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,pvs-voltage-table",
+						tmp, pvs_bins * stripe_size);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "pvs-voltage-table missing: rc=%d\n", rc);
+		kfree(tmp);
+		return rc;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++)
+		cpr_vreg->pvs_corner_v[i] = tmp[cpr_vreg->pvs_bin *
+						stripe_size + i - 1];
+	kfree(tmp);
+
+	rc = cpr_adjust_init_voltages(of_node, cpr_vreg);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+/*
+ * The function reads VDD_MX dependency parameters from device node.
+ * Select the qcom,vdd-mx-corner-map length equal to either num_fuse_corners
+ * or num_corners based on selected vdd-mx-vmin-method.
+ */
+static int cpr_parse_vdd_mx_parameters(struct platform_device *pdev,
+					struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	u32 corner_map_len;
+	int rc, len, size;
+
+	rc = of_property_read_u32(of_node, "qcom,vdd-mx-vmax",
+				&cpr_vreg->vdd_mx_vmax);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "vdd-mx-vmax missing: rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,vdd-mx-vmin-method",
+			 &cpr_vreg->vdd_mx_vmin_method);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "vdd-mx-vmin-method missing: rc=%d\n",
+			rc);
+		return rc;
+	}
+	if (cpr_vreg->vdd_mx_vmin_method > VDD_MX_VMIN_APC_CORNER_MAP) {
+		cpr_err(cpr_vreg, "Invalid vdd-mx-vmin-method(%d)\n",
+			cpr_vreg->vdd_mx_vmin_method);
+		return -EINVAL;
+	}
+
+	switch (cpr_vreg->vdd_mx_vmin_method) {
+	case VDD_MX_VMIN_APC_FUSE_CORNER_MAP:
+		corner_map_len = cpr_vreg->num_fuse_corners;
+		break;
+	case VDD_MX_VMIN_APC_CORNER_MAP:
+		corner_map_len = cpr_vreg->num_corners;
+		break;
+	default:
+		cpr_vreg->vdd_mx_corner_map = NULL;
+		return 0;
+	}
+
+	if (!of_find_property(of_node, "qcom,vdd-mx-corner-map", &len)) {
+		cpr_err(cpr_vreg, "qcom,vdd-mx-corner-map missing");
+		return -EINVAL;
+	}
+
+	size = len / sizeof(u32);
+	if (size != corner_map_len) {
+		cpr_err(cpr_vreg,
+			"qcom,vdd-mx-corner-map length=%d is invalid: required:%u\n",
+			size, corner_map_len);
+		return -EINVAL;
+	}
+
+	cpr_vreg->vdd_mx_corner_map = devm_kzalloc(&pdev->dev,
+		(corner_map_len + 1) * sizeof(*cpr_vreg->vdd_mx_corner_map),
+			GFP_KERNEL);
+	if (!cpr_vreg->vdd_mx_corner_map) {
+		cpr_err(cpr_vreg,
+			"Can't allocate memory for cpr_vreg->vdd_mx_corner_map\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(of_node,
+				"qcom,vdd-mx-corner-map",
+				&cpr_vreg->vdd_mx_corner_map[1],
+				corner_map_len);
+	if (rc)
+		cpr_err(cpr_vreg,
+			"read qcom,vdd-mx-corner-map failed, rc = %d\n", rc);
+
+	return rc;
+}
+
+#define MAX_CHARS_PER_INT	10
+
+/*
+ * The initial voltage for each fuse corner may be determined by one of two
+ * possible styles of fuse. If qcom,cpr-fuse-init-voltage is present, then
+ * the initial voltages are encoded in a fuse for each fuse corner. If it is
+ * not present, then the initial voltages are all determined using a single
+ * PVS bin fuse value.
+ */
+static int cpr_pvs_init(struct platform_device *pdev,
+			       struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int highest_fuse_corner = cpr_vreg->num_fuse_corners;
+	int i, rc, pos;
+	size_t buflen;
+	char *buf;
+
+	rc = of_property_read_u32(of_node, "qcom,cpr-apc-volt-step",
+					&cpr_vreg->step_volt);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "read cpr-apc-volt-step failed, rc = %d\n",
+			rc);
+		return rc;
+	} else if (cpr_vreg->step_volt == 0) {
+		cpr_err(cpr_vreg, "apc voltage step size can't be set to 0.\n");
+		return -EINVAL;
+	}
+
+	if (of_find_property(of_node, "qcom,cpr-fuse-init-voltage", NULL)) {
+		rc = cpr_pvs_per_corner_init(of_node, cpr_vreg);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "get pvs per corner failed, rc = %d",
+				rc);
+			return rc;
+		}
+	} else {
+		rc = cpr_pvs_single_bin_init(of_node, cpr_vreg);
+		if (rc < 0) {
+			cpr_err(cpr_vreg,
+				"get pvs from single bin failed, rc = %d", rc);
+			return rc;
+		}
+	}
+
+	if (cpr_vreg->flags & FLAGS_UPLIFT_QUOT_VOLT) {
+		rc = cpr_voltage_uplift_wa_inc_volt(cpr_vreg, of_node);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "pvs volt uplift wa apply failed: %d",
+				rc);
+			return rc;
+		}
+	}
+
+	/*
+	 * Allow the highest fuse corner's PVS voltage to define the ceiling
+	 * voltage for that corner in order to support SoC's in which variable
+	 * ceiling values are required.
+	 */
+	if (cpr_vreg->pvs_corner_v[highest_fuse_corner] >
+		cpr_vreg->fuse_ceiling_volt[highest_fuse_corner])
+		cpr_vreg->fuse_ceiling_volt[highest_fuse_corner] =
+			cpr_vreg->pvs_corner_v[highest_fuse_corner];
+
+	/*
+	 * Restrict all fuse corner PVS voltages based upon per corner
+	 * ceiling and floor voltages.
+	 */
+	for (i = CPR_FUSE_CORNER_MIN; i <= highest_fuse_corner; i++)
+		if (cpr_vreg->pvs_corner_v[i] > cpr_vreg->fuse_ceiling_volt[i])
+			cpr_vreg->pvs_corner_v[i]
+				= cpr_vreg->fuse_ceiling_volt[i];
+		else if (cpr_vreg->pvs_corner_v[i]
+				< cpr_vreg->fuse_floor_volt[i])
+			cpr_vreg->pvs_corner_v[i]
+				= cpr_vreg->fuse_floor_volt[i];
+
+	cpr_vreg->ceiling_max
+		= cpr_vreg->fuse_ceiling_volt[highest_fuse_corner];
+
+	/*
+	 * Log ceiling, floor, and inital voltages since they are critical for
+	 * all CPR debugging.
+	 */
+	buflen = cpr_vreg->num_fuse_corners * (MAX_CHARS_PER_INT + 2)
+			* sizeof(*buf);
+	buf = kzalloc(buflen, GFP_KERNEL);
+	if (buf == NULL) {
+		cpr_err(cpr_vreg, "Could not allocate memory for corner voltage logging\n");
+		return 0;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN, pos = 0; i <= highest_fuse_corner; i++)
+		pos += scnprintf(buf + pos, buflen - pos, "%u%s",
+				cpr_vreg->pvs_corner_v[i],
+				i < highest_fuse_corner ? " " : "");
+	cpr_info(cpr_vreg, "pvs voltage: [%s] uV\n", buf);
+
+	for (i = CPR_FUSE_CORNER_MIN, pos = 0; i <= highest_fuse_corner; i++)
+		pos += scnprintf(buf + pos, buflen - pos, "%d%s",
+				cpr_vreg->fuse_ceiling_volt[i],
+				i < highest_fuse_corner ? " " : "");
+	cpr_info(cpr_vreg, "ceiling voltage: [%s] uV\n", buf);
+
+	for (i = CPR_FUSE_CORNER_MIN, pos = 0; i <= highest_fuse_corner; i++)
+		pos += scnprintf(buf + pos, buflen - pos, "%d%s",
+				cpr_vreg->fuse_floor_volt[i],
+				i < highest_fuse_corner ? " " : "");
+	cpr_info(cpr_vreg, "floor voltage: [%s] uV\n", buf);
+
+	kfree(buf);
+	return 0;
+}
+
+#define CPR_PROP_READ_U32(cpr_vreg, of_node, cpr_property, cpr_config, rc) \
+do {									\
+	if (!rc) {							\
+		rc = of_property_read_u32(of_node,			\
+				"qcom," cpr_property,			\
+				cpr_config);				\
+		if (rc) {						\
+			cpr_err(cpr_vreg, "Missing " #cpr_property	\
+				": rc = %d\n", rc);			\
+		}							\
+	}								\
+} while (0)
+
+static int cpr_apc_init(struct platform_device *pdev,
+			       struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int i, rc = 0;
+
+	for (i = 0; i < ARRAY_SIZE(vdd_apc_name); i++) {
+		cpr_vreg->vdd_apc = devm_regulator_get_optional(&pdev->dev,
+					vdd_apc_name[i]);
+		rc = PTR_RET(cpr_vreg->vdd_apc);
+		if (!IS_ERR_OR_NULL(cpr_vreg->vdd_apc))
+			break;
+	}
+
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			cpr_err(cpr_vreg, "devm_regulator_get: rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Check dependencies */
+	if (of_find_property(of_node, "vdd-mx-supply", NULL)) {
+		cpr_vreg->vdd_mx = devm_regulator_get(&pdev->dev, "vdd-mx");
+		if (IS_ERR_OR_NULL(cpr_vreg->vdd_mx)) {
+			rc = PTR_RET(cpr_vreg->vdd_mx);
+			if (rc != -EPROBE_DEFER)
+				cpr_err(cpr_vreg,
+					"devm_regulator_get: vdd-mx: rc=%d\n",
+					rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static void cpr_apc_exit(struct cpr_regulator *cpr_vreg)
+{
+	if (cpr_vreg->vreg_enabled) {
+		regulator_disable(cpr_vreg->vdd_apc);
+
+		if (cpr_vreg->vdd_mx)
+			regulator_disable(cpr_vreg->vdd_mx);
+	}
+}
+
+static int cpr_voltage_uplift_wa_inc_quot(struct cpr_regulator *cpr_vreg,
+					struct device_node *of_node)
+{
+	u32 delta_quot[3];
+	int rc, i;
+
+	rc = of_property_read_u32_array(of_node,
+			"qcom,cpr-uplift-quotient", delta_quot, 3);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "cpr-uplift-quotient is missing: %d", rc);
+		return rc;
+	}
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++)
+		cpr_vreg->cpr_fuse_target_quot[i] += delta_quot[i-1];
+	return rc;
+}
+
+static void cpr_parse_pvs_version_fuse(struct cpr_regulator *cpr_vreg,
+				struct device_node *of_node)
+{
+	int rc;
+	u64 fuse_bits;
+	u32 fuse_sel[4];
+
+	rc = of_property_read_u32_array(of_node,
+			"qcom,pvs-version-fuse-sel", fuse_sel, 4);
+	if (!rc) {
+		fuse_bits = cpr_read_efuse_row(cpr_vreg,
+				fuse_sel[0], fuse_sel[3]);
+		cpr_vreg->pvs_version = (fuse_bits >> fuse_sel[1]) &
+			((1 << fuse_sel[2]) - 1);
+		cpr_info(cpr_vreg, "[row: %d]: 0x%llx, pvs_version = %d\n",
+				fuse_sel[0], fuse_bits, cpr_vreg->pvs_version);
+	} else {
+		cpr_vreg->pvs_version = 0;
+	}
+}
+
+/**
+ * cpr_get_open_loop_voltage() - fill the open_loop_volt array with linearly
+ *				 interpolated open-loop CPR voltage values.
+ * @cpr_vreg:	Handle to the cpr-regulator device
+ * @dev:	Device pointer for the cpr-regulator device
+ * @corner_max:	Array of length (cpr_vreg->num_fuse_corners + 1) which maps from
+ *		fuse corners to the highest virtual corner corresponding to a
+ *		given fuse corner
+ * @freq_map:	Array of length (cpr_vreg->num_corners + 1) which maps from
+ *		virtual corners to frequencies in Hz.
+ * @maps_valid:	Boolean which indicates if the values in corner_max and freq_map
+ *		are valid.  If they are not valid, then the open_loop_volt
+ *		values are not interpolated.
+ */
+static int cpr_get_open_loop_voltage(struct cpr_regulator *cpr_vreg,
+		struct device *dev, const u32 *corner_max, const u32 *freq_map,
+		bool maps_valid)
+{
+	int rc = 0;
+	int i, j;
+	u64 volt_high, volt_low, freq_high, freq_low, freq, temp, temp_limit;
+	u32 *max_factor = NULL;
+
+	cpr_vreg->open_loop_volt = devm_kzalloc(dev,
+			sizeof(int) * (cpr_vreg->num_corners + 1), GFP_KERNEL);
+	if (!cpr_vreg->open_loop_volt) {
+		cpr_err(cpr_vreg,
+			"Can't allocate memory for cpr_vreg->open_loop_volt\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Set open loop voltage to be equal to per-fuse-corner initial voltage
+	 * by default.  This ensures that the open loop voltage is valid for
+	 * all virtual corners even if some virtual corner to frequency mappings
+	 * are missing.  It also ensures that the voltage is valid for the
+	 * higher corners not utilized by a given speed-bin.
+	 */
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
+		cpr_vreg->open_loop_volt[i]
+			= cpr_vreg->pvs_corner_v[cpr_vreg->corner_map[i]];
+
+	if (!maps_valid || !corner_max || !freq_map
+	    || !of_find_property(dev->of_node,
+				 "qcom,cpr-voltage-scaling-factor-max", NULL)) {
+		/* Not using interpolation */
+		return 0;
+	}
+
+	max_factor
+	       = kzalloc(sizeof(*max_factor) * (cpr_vreg->num_fuse_corners + 1),
+			 GFP_KERNEL);
+	if (!max_factor) {
+		cpr_err(cpr_vreg, "Could not allocate memory for max_factor array\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(dev->of_node,
+			"qcom,cpr-voltage-scaling-factor-max",
+			&max_factor[CPR_FUSE_CORNER_MIN],
+			cpr_vreg->num_fuse_corners);
+	if (rc) {
+		cpr_debug(cpr_vreg, "failed to read qcom,cpr-voltage-scaling-factor-max; initial voltage interpolation not possible\n");
+		kfree(max_factor);
+		return 0;
+	}
+
+	for (j = CPR_FUSE_CORNER_MIN + 1; j <= cpr_vreg->num_fuse_corners;
+	    j++) {
+		freq_high = freq_map[corner_max[j]];
+		freq_low = freq_map[corner_max[j - 1]];
+		volt_high = cpr_vreg->pvs_corner_v[j];
+		volt_low = cpr_vreg->pvs_corner_v[j - 1];
+		if (freq_high <= freq_low || volt_high <= volt_low)
+			continue;
+
+		for (i = corner_max[j - 1] + 1; i < corner_max[j]; i++) {
+			freq = freq_map[i];
+			if (freq_high <= freq)
+				continue;
+
+			temp = (freq_high - freq) * (volt_high - volt_low);
+			do_div(temp, (u32)(freq_high - freq_low));
+
+			/*
+			 * max_factor[j] has units of uV/MHz while freq values
+			 * have units of Hz.  Divide by 1000000 to convert.
+			 */
+			temp_limit = (freq_high - freq) * max_factor[j];
+			do_div(temp_limit, 1000000);
+
+			cpr_vreg->open_loop_volt[i]
+				= volt_high - min(temp, temp_limit);
+			cpr_vreg->open_loop_volt[i]
+				= DIV_ROUND_UP(cpr_vreg->open_loop_volt[i],
+						cpr_vreg->step_volt)
+					* cpr_vreg->step_volt;
+		}
+	}
+
+	kfree(max_factor);
+	return 0;
+}
+
+/*
+ * Limit the per-virtual-corner open-loop voltages using the per-virtual-corner
+ * ceiling and floor voltage values.  This must be called only after the
+ * open_loop_volt, ceiling, and floor arrays have all been initialized.
+ */
+static int cpr_limit_open_loop_voltage(struct cpr_regulator *cpr_vreg)
+{
+	int i;
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+		if (cpr_vreg->open_loop_volt[i] > cpr_vreg->ceiling_volt[i])
+			cpr_vreg->open_loop_volt[i] = cpr_vreg->ceiling_volt[i];
+		else if (cpr_vreg->open_loop_volt[i] < cpr_vreg->floor_volt[i])
+			cpr_vreg->open_loop_volt[i] = cpr_vreg->floor_volt[i];
+	}
+
+	return 0;
+}
+
+/*
+ * Fill an OPP table for the cpr-regulator device struct with pairs of
+ * <virtual voltage corner number, open loop voltage> tuples.
+ */
+static int cpr_populate_opp_table(struct cpr_regulator *cpr_vreg,
+				struct device *dev)
+{
+	int i, rc = 0;
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+		rc |= dev_pm_opp_add(dev, i, cpr_vreg->open_loop_volt[i]);
+		if (rc)
+			cpr_debug(cpr_vreg, "could not add OPP entry <%d, %d>, rc=%d\n",
+				i, cpr_vreg->open_loop_volt[i], rc);
+	}
+	if (rc)
+		cpr_err(cpr_vreg, "adding OPP entry failed - OPP may not be enabled, rc=%d\n",
+				rc);
+
+	return 0;
+}
+
+/*
+ * Conditionally reduce the per-virtual-corner ceiling voltages if certain
+ * device tree flags are present.  This must be called only after the ceiling
+ * array has been initialized and the open_loop_volt array values have been
+ * initialized and limited to the existing floor to ceiling voltage range.
+ */
+static int cpr_reduce_ceiling_voltage(struct cpr_regulator *cpr_vreg,
+				struct device *dev)
+{
+	bool reduce_to_fuse_open_loop, reduce_to_interpolated_open_loop;
+	int i;
+
+	reduce_to_fuse_open_loop = of_property_read_bool(dev->of_node,
+				"qcom,cpr-init-voltage-as-ceiling");
+	reduce_to_interpolated_open_loop = of_property_read_bool(dev->of_node,
+				"qcom,cpr-scaled-init-voltage-as-ceiling");
+
+	if (!reduce_to_fuse_open_loop && !reduce_to_interpolated_open_loop)
+		return 0;
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+		if (reduce_to_interpolated_open_loop &&
+		    cpr_vreg->open_loop_volt[i] < cpr_vreg->ceiling_volt[i])
+			cpr_vreg->ceiling_volt[i] = cpr_vreg->open_loop_volt[i];
+		else if (reduce_to_fuse_open_loop &&
+				cpr_vreg->pvs_corner_v[cpr_vreg->corner_map[i]]
+				< cpr_vreg->ceiling_volt[i])
+			cpr_vreg->ceiling_volt[i]
+				= max((u32)cpr_vreg->floor_volt[i],
+			       cpr_vreg->pvs_corner_v[cpr_vreg->corner_map[i]]);
+		cpr_debug(cpr_vreg, "lowered ceiling[%d] = %d uV\n",
+			i, cpr_vreg->ceiling_volt[i]);
+	}
+
+	return 0;
+}
+
+static int cpr_adjust_target_quot_offsets(struct platform_device *pdev,
+					struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int tuple_count, tuple_match, i;
+	u32 index;
+	u32 quot_offset_adjust = 0;
+	int len = 0;
+	int rc = 0;
+	char *quot_offset_str;
+
+	quot_offset_str = "qcom,cpr-quot-offset-adjustment";
+	if (!of_find_property(of_node, quot_offset_str, &len)) {
+		/* No static quotient adjustment needed. */
+		return 0;
+	}
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+			/* No matching index to use for quotient adjustment. */
+			return 0;
+		}
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != cpr_vreg->num_fuse_corners * tuple_count * sizeof(u32)) {
+		cpr_err(cpr_vreg, "%s length=%d is invalid\n", quot_offset_str,
+			len);
+		return -EINVAL;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		index = tuple_match * cpr_vreg->num_fuse_corners
+				+ i - CPR_FUSE_CORNER_MIN;
+		rc = of_property_read_u32_index(of_node, quot_offset_str, index,
+			&quot_offset_adjust);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+				quot_offset_str, index, rc);
+			return rc;
+		}
+
+		if (quot_offset_adjust) {
+			cpr_vreg->fuse_quot_offset[i] += quot_offset_adjust;
+			cpr_info(cpr_vreg, "Corner[%d]: adjusted target quot = %d\n",
+				i, cpr_vreg->fuse_quot_offset[i]);
+		}
+	}
+
+	return rc;
+}
+
+static int cpr_get_fuse_quot_offset(struct cpr_regulator *cpr_vreg,
+					struct platform_device *pdev,
+					struct cpr_quot_scale *quot_scale)
+{
+	struct device *dev = &pdev->dev;
+	struct property *prop;
+	u32 *fuse_sel, *tmp, *offset_multiplier = NULL;
+	int rc = 0, i, size, len;
+	char *quot_offset_str;
+
+	quot_offset_str = cpr_vreg->cpr_fuse_redundant
+			? "qcom,cpr-fuse-redun-quot-offset"
+			: "qcom,cpr-fuse-quot-offset";
+
+	prop = of_find_property(dev->of_node, quot_offset_str, NULL);
+	if (!prop) {
+		cpr_debug(cpr_vreg, "%s not present\n", quot_offset_str);
+		return 0;
+	} else {
+		size = prop->length / sizeof(u32);
+		if (size != cpr_vreg->num_fuse_corners * 4) {
+			cpr_err(cpr_vreg, "fuse position for quot offset is invalid\n");
+			return -EINVAL;
+		}
+	}
+
+	fuse_sel = kzalloc(sizeof(u32) * size, GFP_KERNEL);
+	if (!fuse_sel) {
+		cpr_err(cpr_vreg, "memory alloc failed.\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(dev->of_node, quot_offset_str,
+			fuse_sel, size);
+
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "read %s failed, rc = %d\n", quot_offset_str,
+			rc);
+		kfree(fuse_sel);
+		return rc;
+	}
+
+	cpr_vreg->fuse_quot_offset = devm_kzalloc(dev,
+			sizeof(u32) * (cpr_vreg->num_fuse_corners + 1),
+			GFP_KERNEL);
+	if (!cpr_vreg->fuse_quot_offset) {
+		cpr_err(cpr_vreg, "Can't allocate memory for cpr_vreg->fuse_quot_offset\n");
+		kfree(fuse_sel);
+		return -ENOMEM;
+	}
+
+	if (!of_find_property(dev->of_node,
+				"qcom,cpr-fuse-quot-offset-scale", &len)) {
+		cpr_debug(cpr_vreg, "qcom,cpr-fuse-quot-offset-scale not present\n");
+	} else {
+		if (len != cpr_vreg->num_fuse_corners * sizeof(u32)) {
+			cpr_err(cpr_vreg, "the size of qcom,cpr-fuse-quot-offset-scale is invalid\n");
+			kfree(fuse_sel);
+			return -EINVAL;
+		}
+
+		offset_multiplier = kzalloc(sizeof(*offset_multiplier)
+					* (cpr_vreg->num_fuse_corners + 1),
+					GFP_KERNEL);
+		if (!offset_multiplier) {
+			cpr_err(cpr_vreg, "memory alloc failed.\n");
+			kfree(fuse_sel);
+			return -ENOMEM;
+		}
+
+		rc = of_property_read_u32_array(dev->of_node,
+						"qcom,cpr-fuse-quot-offset-scale",
+						&offset_multiplier[1],
+						cpr_vreg->num_fuse_corners);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "read qcom,cpr-fuse-quot-offset-scale failed, rc = %d\n",
+				rc);
+			kfree(fuse_sel);
+			goto out;
+		}
+	}
+
+	tmp = fuse_sel;
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		cpr_vreg->fuse_quot_offset[i] = cpr_read_efuse_param(cpr_vreg,
+					fuse_sel[0], fuse_sel[1], fuse_sel[2],
+					fuse_sel[3]);
+		if (offset_multiplier)
+			cpr_vreg->fuse_quot_offset[i] *= offset_multiplier[i];
+		fuse_sel += 4;
+	}
+
+	rc = cpr_adjust_target_quot_offsets(pdev, cpr_vreg);
+	kfree(tmp);
+out:
+	kfree(offset_multiplier);
+	return rc;
+}
+
+/*
+ * Adjust the per-virtual-corner open loop voltage with an offset specfied by a
+ * device-tree property. This must be called after open-loop voltage scaling.
+ */
+static int cpr_virtual_corner_voltage_adjust(struct cpr_regulator *cpr_vreg,
+						struct device *dev)
+{
+	char *prop_name = "qcom,cpr-virtual-corner-init-voltage-adjustment";
+	int i, rc, tuple_count, tuple_match, index, len;
+	u32 voltage_adjust;
+
+	if (!of_find_property(dev->of_node, prop_name, &len)) {
+		cpr_debug(cpr_vreg, "%s not specified\n", prop_name);
+		return 0;
+	}
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+			/* No matching index to use for voltage adjustment. */
+			return 0;
+		}
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != cpr_vreg->num_corners * tuple_count * sizeof(u32)) {
+		cpr_err(cpr_vreg, "%s length=%d is invalid\n", prop_name,
+			len);
+		return -EINVAL;
+	}
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+		index = tuple_match * cpr_vreg->num_corners
+				+ i - CPR_CORNER_MIN;
+		rc = of_property_read_u32_index(dev->of_node, prop_name,
+						index, &voltage_adjust);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+				prop_name, index, rc);
+			return rc;
+		}
+
+		if (voltage_adjust) {
+			cpr_vreg->open_loop_volt[i] += (int)voltage_adjust;
+			cpr_info(cpr_vreg, "corner=%d adjusted open-loop voltage=%d\n",
+				i, cpr_vreg->open_loop_volt[i]);
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Adjust the per-virtual-corner quot with an offset specfied by a
+ * device-tree property. This must be called after the quot-scaling adjustments
+ * are completed.
+ */
+static int cpr_virtual_corner_quot_adjust(struct cpr_regulator *cpr_vreg,
+						struct device *dev)
+{
+	char *prop_name = "qcom,cpr-virtual-corner-quotient-adjustment";
+	int i, rc, tuple_count, tuple_match, index, len;
+	u32 quot_adjust;
+
+	if (!of_find_property(dev->of_node, prop_name, &len)) {
+		cpr_debug(cpr_vreg, "%s not specified\n", prop_name);
+		return 0;
+	}
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+			/* No matching index to use for quotient adjustment. */
+			return 0;
+		}
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != cpr_vreg->num_corners * tuple_count * sizeof(u32)) {
+		cpr_err(cpr_vreg, "%s length=%d is invalid\n", prop_name,
+			len);
+		return -EINVAL;
+	}
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+		index = tuple_match * cpr_vreg->num_corners
+				+ i - CPR_CORNER_MIN;
+		rc = of_property_read_u32_index(dev->of_node, prop_name,
+						index, &quot_adjust);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+				prop_name, index, rc);
+			return rc;
+		}
+
+		if (quot_adjust) {
+			cpr_vreg->quot_adjust[i] -= (int)quot_adjust;
+			cpr_info(cpr_vreg, "corner=%d adjusted quotient=%d\n",
+					i,
+			cpr_vreg->cpr_fuse_target_quot[cpr_vreg->corner_map[i]]
+						- cpr_vreg->quot_adjust[i]);
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * cpr_get_corner_quot_adjustment() -- get the quot_adjust for each corner.
+ *
+ * Get the virtual corner to fuse corner mapping and virtual corner to APC clock
+ * frequency mapping from device tree.
+ * Calculate the quotient adjustment scaling factor for those corners mapping to
+ * all fuse corners except for the lowest one using linear interpolation.
+ * Calculate the quotient adjustment for each of these virtual corners using the
+ * min of the calculated scaling factor and the constant max scaling factor
+ * defined for each fuse corner in device tree.
+ */
+static int cpr_get_corner_quot_adjustment(struct cpr_regulator *cpr_vreg,
+					struct device *dev)
+{
+	int rc = 0;
+	int highest_fuse_corner = cpr_vreg->num_fuse_corners;
+	int i, j, size;
+	struct property *prop;
+	bool corners_mapped, match_found;
+	u32 *tmp, *freq_map = NULL;
+	u32 corner, freq_corner;
+	u32 *freq_max = NULL;
+	u32 *scaling = NULL;
+	u32 *max_factor = NULL;
+	u32 *corner_max = NULL;
+	bool maps_valid = false;
+
+	prop = of_find_property(dev->of_node, "qcom,cpr-corner-map", NULL);
+
+	if (prop) {
+		size = prop->length / sizeof(u32);
+		corners_mapped = true;
+	} else {
+		size = cpr_vreg->num_fuse_corners;
+		corners_mapped = false;
+	}
+
+	cpr_vreg->corner_map = devm_kzalloc(dev, sizeof(int) * (size + 1),
+					GFP_KERNEL);
+	if (!cpr_vreg->corner_map) {
+		cpr_err(cpr_vreg,
+			"Can't allocate memory for cpr_vreg->corner_map\n");
+		return -ENOMEM;
+	}
+	cpr_vreg->num_corners = size;
+
+	cpr_vreg->quot_adjust = devm_kzalloc(dev,
+			sizeof(u32) * (cpr_vreg->num_corners + 1),
+			GFP_KERNEL);
+	if (!cpr_vreg->quot_adjust) {
+		cpr_err(cpr_vreg,
+			"Can't allocate memory for cpr_vreg->quot_adjust\n");
+		return -ENOMEM;
+	}
+
+	if (!corners_mapped) {
+		for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+		     i++)
+			cpr_vreg->corner_map[i] = i;
+		goto free_arrays;
+	} else {
+		rc = of_property_read_u32_array(dev->of_node,
+			"qcom,cpr-corner-map", &cpr_vreg->corner_map[1], size);
+
+		if (rc) {
+			cpr_err(cpr_vreg,
+				"qcom,cpr-corner-map missing, rc = %d\n", rc);
+			return rc;
+		}
+
+		/*
+		 * Verify that the virtual corner to fuse corner mapping is
+		 * valid.
+		 */
+		for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+			if (cpr_vreg->corner_map[i] > cpr_vreg->num_fuse_corners
+			    || cpr_vreg->corner_map[i] < CPR_FUSE_CORNER_MIN) {
+				cpr_err(cpr_vreg, "qcom,cpr-corner-map contains an element %d which isn't in the allowed range [%d, %d]\n",
+					cpr_vreg->corner_map[i],
+					CPR_FUSE_CORNER_MIN,
+					cpr_vreg->num_fuse_corners);
+				return -EINVAL;
+			}
+		}
+	}
+
+	prop = of_find_property(dev->of_node,
+			"qcom,cpr-speed-bin-max-corners", NULL);
+	if (!prop) {
+		cpr_debug(cpr_vreg, "qcom,cpr-speed-bin-max-corner missing\n");
+		goto free_arrays;
+	}
+
+	size = prop->length / sizeof(u32);
+	tmp = kzalloc(size * sizeof(u32), GFP_KERNEL);
+	if (!tmp) {
+		cpr_err(cpr_vreg, "memory alloc failed\n");
+		return -ENOMEM;
+	}
+	rc = of_property_read_u32_array(dev->of_node,
+		"qcom,cpr-speed-bin-max-corners", tmp, size);
+	if (rc < 0) {
+		kfree(tmp);
+		cpr_err(cpr_vreg,
+			"get cpr-speed-bin-max-corners failed, rc = %d\n", rc);
+		return rc;
+	}
+
+	corner_max = kzalloc((cpr_vreg->num_fuse_corners + 1)
+				* sizeof(*corner_max), GFP_KERNEL);
+	freq_max = kzalloc((cpr_vreg->num_fuse_corners + 1) * sizeof(*freq_max),
+				GFP_KERNEL);
+	if (corner_max == NULL || freq_max == NULL) {
+		cpr_err(cpr_vreg, "Could not allocate memory for quotient scaling arrays\n");
+		kfree(tmp);
+		rc = -ENOMEM;
+		goto free_arrays;
+	}
+
+	/*
+	 * Get the maximum virtual corner for each fuse corner based upon the
+	 * speed_bin and pvs_version values.
+	 */
+	match_found = false;
+	for (i = 0; i < size; i += cpr_vreg->num_fuse_corners + 2) {
+		if (tmp[i] != cpr_vreg->speed_bin &&
+		    tmp[i] != FUSE_PARAM_MATCH_ANY)
+			continue;
+		if (tmp[i + 1] != cpr_vreg->pvs_version &&
+		    tmp[i + 1] != FUSE_PARAM_MATCH_ANY)
+			continue;
+		for (j = CPR_FUSE_CORNER_MIN;
+		     j <= cpr_vreg->num_fuse_corners; j++)
+			corner_max[j] = tmp[i + 2 + j - CPR_FUSE_CORNER_MIN];
+		match_found = true;
+		break;
+	}
+	kfree(tmp);
+
+	if (!match_found) {
+		cpr_debug(cpr_vreg, "No quotient adjustment possible for speed bin=%u, pvs version=%u\n",
+			cpr_vreg->speed_bin, cpr_vreg->pvs_version);
+		goto free_arrays;
+	}
+
+	/* Verify that fuse corner to max virtual corner mapping is valid. */
+	for (i = CPR_FUSE_CORNER_MIN; i <= highest_fuse_corner; i++) {
+		if (corner_max[i] < CPR_CORNER_MIN
+		    || corner_max[i] > cpr_vreg->num_corners) {
+			cpr_err(cpr_vreg, "Invalid corner=%d in qcom,cpr-speed-bin-max-corners\n",
+				corner_max[i]);
+			goto free_arrays;
+		}
+	}
+
+	/*
+	 * Return success if the virtual corner values read from
+	 * qcom,cpr-speed-bin-max-corners property are incorrect.  This allows
+	 * the driver to continue to run without quotient scaling.
+	 */
+	for (i = CPR_FUSE_CORNER_MIN + 1; i <= highest_fuse_corner; i++) {
+		if (corner_max[i] <= corner_max[i - 1]) {
+			cpr_err(cpr_vreg, "fuse corner=%d (%u) should be larger than the fuse corner=%d (%u)\n",
+				i, corner_max[i], i - 1, corner_max[i - 1]);
+			goto free_arrays;
+		}
+	}
+
+	prop = of_find_property(dev->of_node,
+			"qcom,cpr-corner-frequency-map", NULL);
+	if (!prop) {
+		cpr_debug(cpr_vreg, "qcom,cpr-corner-frequency-map missing\n");
+		goto free_arrays;
+	}
+
+	size = prop->length / sizeof(u32);
+	tmp = kzalloc(sizeof(u32) * size, GFP_KERNEL);
+	if (!tmp) {
+		cpr_err(cpr_vreg, "memory alloc failed\n");
+		rc = -ENOMEM;
+		goto free_arrays;
+	}
+	rc = of_property_read_u32_array(dev->of_node,
+		"qcom,cpr-corner-frequency-map", tmp, size);
+	if (rc < 0) {
+		cpr_err(cpr_vreg,
+			"get cpr-corner-frequency-map failed, rc = %d\n", rc);
+		kfree(tmp);
+		goto free_arrays;
+	}
+	freq_map = kzalloc(sizeof(u32) * (cpr_vreg->num_corners + 1),
+			GFP_KERNEL);
+	if (!freq_map) {
+		cpr_err(cpr_vreg, "memory alloc for freq_map failed!\n");
+		kfree(tmp);
+		rc = -ENOMEM;
+		goto free_arrays;
+	}
+	for (i = 0; i < size; i += 2) {
+		corner = tmp[i];
+		if ((corner < 1) || (corner > cpr_vreg->num_corners)) {
+			cpr_err(cpr_vreg,
+				"corner should be in 1~%d range: %d\n",
+				cpr_vreg->num_corners, corner);
+			continue;
+		}
+		freq_map[corner] = tmp[i + 1];
+		cpr_debug(cpr_vreg,
+				"Frequency at virtual corner %d is %d Hz.\n",
+				corner, freq_map[corner]);
+	}
+	kfree(tmp);
+
+	prop = of_find_property(dev->of_node,
+			"qcom,cpr-quot-adjust-scaling-factor-max", NULL);
+	if (!prop) {
+		cpr_debug(cpr_vreg, "qcom,cpr-quot-adjust-scaling-factor-max missing\n");
+		rc = 0;
+		goto free_arrays;
+	}
+
+	size = prop->length / sizeof(u32);
+	if ((size != 1) && (size != cpr_vreg->num_fuse_corners)) {
+		cpr_err(cpr_vreg, "The size of qcom,cpr-quot-adjust-scaling-factor-max should be 1 or %d\n",
+			cpr_vreg->num_fuse_corners);
+		rc = 0;
+		goto free_arrays;
+	}
+
+	max_factor = kzalloc(sizeof(u32) * (cpr_vreg->num_fuse_corners + 1),
+			GFP_KERNEL);
+	if (!max_factor) {
+		cpr_err(cpr_vreg, "Could not allocate memory for max_factor array\n");
+		rc = -ENOMEM;
+		goto free_arrays;
+	}
+	/*
+	 * Leave max_factor[CPR_FUSE_CORNER_MIN ... highest_fuse_corner-1] = 0
+	 * if cpr-quot-adjust-scaling-factor-max is a single value in order to
+	 * maintain backward compatibility.
+	 */
+	i = (size == cpr_vreg->num_fuse_corners) ? CPR_FUSE_CORNER_MIN
+						 : highest_fuse_corner;
+	rc = of_property_read_u32_array(dev->of_node,
+			"qcom,cpr-quot-adjust-scaling-factor-max",
+			&max_factor[i], size);
+	if (rc < 0) {
+		cpr_debug(cpr_vreg, "could not read qcom,cpr-quot-adjust-scaling-factor-max, rc=%d\n",
+			rc);
+		rc = 0;
+		goto free_arrays;
+	}
+
+	/*
+	 * Get the quotient adjustment scaling factor, according to:
+	 * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
+	 *		/ (freq(corner_N) - freq(corner_N-1)), max_factor)
+	 *
+	 * QUOT(corner_N):	quotient read from fuse for fuse corner N
+	 * QUOT(corner_N-1):	quotient read from fuse for fuse corner (N - 1)
+	 * freq(corner_N):	max frequency in MHz supported by fuse corner N
+	 * freq(corner_N-1):	max frequency in MHz supported by fuse corner
+	 *			 (N - 1)
+	 */
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= highest_fuse_corner; i++)
+		freq_max[i] = freq_map[corner_max[i]];
+	for (i = CPR_FUSE_CORNER_MIN + 1; i <= highest_fuse_corner; i++) {
+		if (freq_max[i] <= freq_max[i - 1] || freq_max[i - 1] == 0) {
+			cpr_err(cpr_vreg, "fuse corner %d freq=%u should be larger than fuse corner %d freq=%u\n",
+			      i, freq_max[i], i - 1, freq_max[i - 1]);
+			rc = -EINVAL;
+			goto free_arrays;
+		}
+	}
+	scaling = kzalloc((cpr_vreg->num_fuse_corners + 1) * sizeof(*scaling),
+			GFP_KERNEL);
+	if (!scaling) {
+		cpr_err(cpr_vreg, "Could not allocate memory for scaling array\n");
+		rc = -ENOMEM;
+		goto free_arrays;
+	}
+	/* Convert corner max frequencies from Hz to MHz. */
+	for (i = CPR_FUSE_CORNER_MIN; i <= highest_fuse_corner; i++)
+		freq_max[i] /= 1000000;
+
+	for (i = CPR_FUSE_CORNER_MIN + 1; i <= highest_fuse_corner; i++) {
+		if (cpr_vreg->fuse_quot_offset &&
+			(cpr_vreg->cpr_fuse_ro_sel[i] !=
+				cpr_vreg->cpr_fuse_ro_sel[i - 1])) {
+			scaling[i] = 1000 * cpr_vreg->fuse_quot_offset[i]
+				/ (freq_max[i] - freq_max[i - 1]);
+		} else {
+			scaling[i] = 1000 * (cpr_vreg->cpr_fuse_target_quot[i]
+				      - cpr_vreg->cpr_fuse_target_quot[i - 1])
+				  / (freq_max[i] - freq_max[i - 1]);
+			if (cpr_vreg->cpr_fuse_target_quot[i]
+				< cpr_vreg->cpr_fuse_target_quot[i - 1])
+				scaling[i] = 0;
+		}
+		scaling[i] = min(scaling[i], max_factor[i]);
+		cpr_info(cpr_vreg, "fuse corner %d quotient adjustment scaling factor: %d.%03d\n",
+			i, scaling[i] / 1000, scaling[i] % 1000);
+	}
+
+	/*
+	 * Walk through the virtual corners mapped to each fuse corner
+	 * and calculate the quotient adjustment for each one using the
+	 * following formula:
+	 * quot_adjust = (freq_max - freq_corner) * scaling / 1000
+	 *
+	 * @freq_max: max frequency in MHz supported by the fuse corner
+	 * @freq_corner: frequency in MHz corresponding to the virtual corner
+	 */
+	for (j = CPR_FUSE_CORNER_MIN + 1; j <= highest_fuse_corner; j++) {
+		for (i = corner_max[j - 1] + 1; i < corner_max[j]; i++) {
+			freq_corner = freq_map[i] / 1000000; /* MHz */
+			if (freq_corner > 0) {
+				cpr_vreg->quot_adjust[i] = scaling[j] *
+				   (freq_max[j] - freq_corner) / 1000;
+			}
+		}
+	}
+
+	rc = cpr_virtual_corner_quot_adjust(cpr_vreg, dev);
+	if (rc) {
+		cpr_err(cpr_vreg, "count not adjust virtual-corner quot rc=%d\n",
+			rc);
+		goto free_arrays;
+	}
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
+		cpr_info(cpr_vreg, "adjusted quotient[%d] = %d\n", i,
+			cpr_vreg->cpr_fuse_target_quot[cpr_vreg->corner_map[i]]
+			- cpr_vreg->quot_adjust[i]);
+
+	maps_valid = true;
+
+free_arrays:
+	if (!rc) {
+
+		rc = cpr_get_open_loop_voltage(cpr_vreg, dev, corner_max,
+						freq_map, maps_valid);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not fill open loop voltage array, rc=%d\n",
+				rc);
+			goto free_arrays_1;
+		}
+
+		rc = cpr_virtual_corner_voltage_adjust(cpr_vreg, dev);
+		if (rc)
+			cpr_err(cpr_vreg, "count not adjust virtual-corner voltage rc=%d\n",
+				rc);
+	}
+
+free_arrays_1:
+	kfree(max_factor);
+	kfree(scaling);
+	kfree(freq_map);
+	kfree(corner_max);
+	kfree(freq_max);
+	return rc;
+}
+
+/*
+ * Check if the redundant set of CPR fuses should be used in place of the
+ * primary set and configure the cpr_fuse_redundant element accordingly.
+ */
+static int cpr_check_redundant(struct platform_device *pdev,
+		     struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	u32 cpr_fuse_redun_sel[5];
+	int rc;
+
+	if (of_find_property(of_node, "qcom,cpr-fuse-redun-sel", NULL)) {
+		rc = of_property_read_u32_array(of_node,
+			"qcom,cpr-fuse-redun-sel", cpr_fuse_redun_sel, 5);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "qcom,cpr-fuse-redun-sel missing: rc=%d\n",
+				rc);
+			return rc;
+		}
+		cpr_vreg->cpr_fuse_redundant
+			= cpr_fuse_is_setting_expected(cpr_vreg,
+						cpr_fuse_redun_sel);
+	} else {
+		cpr_vreg->cpr_fuse_redundant = false;
+	}
+
+	if (cpr_vreg->cpr_fuse_redundant)
+		cpr_info(cpr_vreg, "using redundant fuse parameters\n");
+
+	return 0;
+}
+
+static int cpr_read_fuse_revision(struct platform_device *pdev,
+		     struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	u32 fuse_sel[4];
+	int rc;
+
+	if (of_find_property(of_node, "qcom,cpr-fuse-revision", NULL)) {
+		rc = of_property_read_u32_array(of_node,
+			"qcom,cpr-fuse-revision", fuse_sel, 4);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "qcom,cpr-fuse-revision read failed: rc=%d\n",
+				rc);
+			return rc;
+		}
+		cpr_vreg->cpr_fuse_revision
+			= cpr_read_efuse_param(cpr_vreg, fuse_sel[0],
+					fuse_sel[1], fuse_sel[2], fuse_sel[3]);
+		cpr_info(cpr_vreg, "fuse revision = %d\n",
+			cpr_vreg->cpr_fuse_revision);
+	} else {
+		cpr_vreg->cpr_fuse_revision = FUSE_REVISION_UNKNOWN;
+	}
+
+	return 0;
+}
+
+static int cpr_read_ro_select(struct platform_device *pdev,
+				     struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int rc = 0;
+	u32 cpr_fuse_row[2];
+	char *ro_sel_str;
+	int *bp_ro_sel;
+	int i;
+
+	bp_ro_sel
+		= kzalloc((cpr_vreg->num_fuse_corners + 1) * sizeof(*bp_ro_sel),
+			GFP_KERNEL);
+	if (!bp_ro_sel) {
+		cpr_err(cpr_vreg, "could not allocate memory for temp array\n");
+		return -ENOMEM;
+	}
+
+	if (cpr_vreg->cpr_fuse_redundant) {
+		rc = of_property_read_u32_array(of_node,
+				"qcom,cpr-fuse-redun-row",
+				cpr_fuse_row, 2);
+		ro_sel_str = "qcom,cpr-fuse-redun-ro-sel";
+	} else {
+		rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-row",
+				cpr_fuse_row, 2);
+		ro_sel_str = "qcom,cpr-fuse-ro-sel";
+	}
+	if (rc)
+		goto error;
+
+	rc = of_property_read_u32_array(of_node, ro_sel_str,
+		&bp_ro_sel[CPR_FUSE_CORNER_MIN], cpr_vreg->num_fuse_corners);
+	if (rc) {
+		cpr_err(cpr_vreg, "%s read error, rc=%d\n", ro_sel_str, rc);
+		goto error;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++)
+		cpr_vreg->cpr_fuse_ro_sel[i]
+			= cpr_read_efuse_param(cpr_vreg, cpr_fuse_row[0],
+				bp_ro_sel[i], CPR_FUSE_RO_SEL_BITS,
+				cpr_fuse_row[1]);
+
+error:
+	kfree(bp_ro_sel);
+
+	return rc;
+}
+
+static int cpr_find_fuse_map_match(struct platform_device *pdev,
+				     struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int i, j, rc, tuple_size;
+	int len = 0;
+	u32 *tmp, val, ro;
+
+	/* Specify default no match case. */
+	cpr_vreg->cpr_fuse_map_match = FUSE_MAP_NO_MATCH;
+	cpr_vreg->cpr_fuse_map_count = 0;
+
+	if (!of_find_property(of_node, "qcom,cpr-fuse-version-map", &len)) {
+		/* No mapping present. */
+		return 0;
+	}
+
+	tuple_size = cpr_vreg->num_fuse_corners + 3;
+	cpr_vreg->cpr_fuse_map_count = len / (sizeof(u32) * tuple_size);
+
+	if (len == 0 || len % (sizeof(u32) * tuple_size)) {
+		cpr_err(cpr_vreg, "qcom,cpr-fuse-version-map length=%d is invalid\n",
+			len);
+		return -EINVAL;
+	}
+
+	tmp = kzalloc(len, GFP_KERNEL);
+	if (!tmp) {
+		cpr_err(cpr_vreg, "could not allocate memory for temp array\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-version-map",
+				tmp, cpr_vreg->cpr_fuse_map_count * tuple_size);
+	if (rc) {
+		cpr_err(cpr_vreg, "could not read qcom,cpr-fuse-version-map, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	/*
+	 * qcom,cpr-fuse-version-map tuple format:
+	 * <speed_bin, pvs_version, cpr_fuse_revision, ro_sel[1], ...,
+	 *  ro_sel[n]> for n == number of fuse corners
+	 */
+	for (i = 0; i < cpr_vreg->cpr_fuse_map_count; i++) {
+		if (tmp[i * tuple_size] != cpr_vreg->speed_bin
+		    && tmp[i * tuple_size] != FUSE_PARAM_MATCH_ANY)
+			continue;
+		if (tmp[i * tuple_size + 1] != cpr_vreg->pvs_version
+		    && tmp[i * tuple_size + 1] != FUSE_PARAM_MATCH_ANY)
+			continue;
+		if (tmp[i * tuple_size + 2] != cpr_vreg->cpr_fuse_revision
+		    && tmp[i * tuple_size + 2] != FUSE_PARAM_MATCH_ANY)
+			continue;
+		for (j = 0; j < cpr_vreg->num_fuse_corners; j++) {
+			val = tmp[i * tuple_size + 3 + j];
+			ro = cpr_vreg->cpr_fuse_ro_sel[j + CPR_FUSE_CORNER_MIN];
+			if (val != ro && val != FUSE_PARAM_MATCH_ANY)
+				break;
+		}
+		if (j == cpr_vreg->num_fuse_corners) {
+			cpr_vreg->cpr_fuse_map_match = i;
+			break;
+		}
+	}
+
+	if (cpr_vreg->cpr_fuse_map_match != FUSE_MAP_NO_MATCH)
+		cpr_debug(cpr_vreg, "qcom,cpr-fuse-version-map tuple match found: %d\n",
+			cpr_vreg->cpr_fuse_map_match);
+	else
+		cpr_debug(cpr_vreg, "qcom,cpr-fuse-version-map tuple match not found\n");
+
+done:
+	kfree(tmp);
+	return rc;
+}
+
+static int cpr_minimum_quot_difference_adjustment(struct platform_device *pdev,
+					struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int tuple_count, tuple_match;
+	int rc, i, len = 0;
+	u32 index, adjust_quot = 0;
+	u32 *min_diff_quot;
+
+	if (!of_find_property(of_node, "qcom,cpr-fuse-min-quot-diff", NULL))
+		/* No conditional adjustment needed on revised quotients. */
+		return 0;
+
+	if (!of_find_property(of_node, "qcom,cpr-min-quot-diff-adjustment",
+						&len)) {
+		cpr_err(cpr_vreg, "qcom,cpr-min-quot-diff-adjustment not specified\n");
+		return -ENODEV;
+	}
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH)
+			/* No matching index to use for quotient adjustment. */
+			return 0;
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != cpr_vreg->num_fuse_corners * tuple_count * sizeof(u32)) {
+		cpr_err(cpr_vreg, "qcom,cpr-min-quot-diff-adjustment length=%d is invalid\n",
+					len);
+		return -EINVAL;
+	}
+
+	min_diff_quot = kzalloc(cpr_vreg->num_fuse_corners * sizeof(u32),
+							GFP_KERNEL);
+	if (!min_diff_quot) {
+		cpr_err(cpr_vreg, "memory alloc failed\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-min-quot-diff",
+						min_diff_quot,
+						cpr_vreg->num_fuse_corners);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "qcom,cpr-fuse-min-quot-diff reading failed, rc = %d\n",
+							rc);
+		goto error;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN + 1;
+				i <= cpr_vreg->num_fuse_corners; i++) {
+		if ((cpr_vreg->cpr_fuse_target_quot[i]
+			- cpr_vreg->cpr_fuse_target_quot[i - 1])
+		    <= (int)min_diff_quot[i - CPR_FUSE_CORNER_MIN]) {
+			index = tuple_match * cpr_vreg->num_fuse_corners
+					+ i - CPR_FUSE_CORNER_MIN;
+			rc = of_property_read_u32_index(of_node,
+						"qcom,cpr-min-quot-diff-adjustment",
+						index, &adjust_quot);
+			if (rc) {
+				cpr_err(cpr_vreg, "could not read qcom,cpr-min-quot-diff-adjustment index %u, rc=%d\n",
+							index, rc);
+				goto error;
+			}
+
+			cpr_vreg->cpr_fuse_target_quot[i]
+				= cpr_vreg->cpr_fuse_target_quot[i - 1]
+					+ adjust_quot;
+			cpr_info(cpr_vreg, "Corner[%d]: revised adjusted quotient = %d\n",
+					i, cpr_vreg->cpr_fuse_target_quot[i]);
+		};
+	}
+
+error:
+	kfree(min_diff_quot);
+	return rc;
+}
+
+static int cpr_adjust_target_quots(struct platform_device *pdev,
+					struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int tuple_count, tuple_match, i;
+	u32 index;
+	u32 quot_adjust = 0;
+	int len = 0;
+	int rc = 0;
+
+	if (!of_find_property(of_node, "qcom,cpr-quotient-adjustment", &len)) {
+		/* No static quotient adjustment needed. */
+		return 0;
+	}
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+			/* No matching index to use for quotient adjustment. */
+			return 0;
+		}
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != cpr_vreg->num_fuse_corners * tuple_count * sizeof(u32)) {
+		cpr_err(cpr_vreg, "qcom,cpr-quotient-adjustment length=%d is invalid\n",
+			len);
+		return -EINVAL;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		index = tuple_match * cpr_vreg->num_fuse_corners
+				+ i - CPR_FUSE_CORNER_MIN;
+		rc = of_property_read_u32_index(of_node,
+			"qcom,cpr-quotient-adjustment", index, &quot_adjust);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not read qcom,cpr-quotient-adjustment index %u, rc=%d\n",
+				index, rc);
+			return rc;
+		}
+
+		if (quot_adjust) {
+			cpr_vreg->cpr_fuse_target_quot[i] += quot_adjust;
+			cpr_info(cpr_vreg, "Corner[%d]: adjusted target quot = %d\n",
+				i, cpr_vreg->cpr_fuse_target_quot[i]);
+		}
+	}
+
+	rc = cpr_minimum_quot_difference_adjustment(pdev, cpr_vreg);
+	if (rc)
+		cpr_err(cpr_vreg, "failed to apply minimum quot difference rc=%d\n",
+					rc);
+
+	return rc;
+}
+
+static int cpr_check_allowed(struct platform_device *pdev,
+					struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	char *allow_str = "qcom,cpr-allowed";
+	int rc = 0, count;
+	int tuple_count, tuple_match;
+	u32 allow_status;
+
+	if (!of_find_property(of_node, allow_str, &count))
+		/* CPR is allowed for all fuse revisions. */
+		return 0;
+
+	count /= sizeof(u32);
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH)
+			/* No matching index to use for CPR allowed. */
+			return 0;
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (count != tuple_count) {
+		cpr_err(cpr_vreg, "%s count=%d is invalid\n", allow_str,
+			count);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_index(of_node, allow_str, tuple_match,
+		&allow_status);
+	if (rc) {
+		cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+			allow_str, tuple_match, rc);
+		return rc;
+	}
+
+	if (allow_status && !cpr_vreg->cpr_fuse_disable)
+		cpr_vreg->cpr_fuse_disable = false;
+	else
+		cpr_vreg->cpr_fuse_disable = true;
+
+	cpr_info(cpr_vreg, "CPR closed loop is %s for fuse revision %d\n",
+		cpr_vreg->cpr_fuse_disable ? "disabled" : "enabled",
+		cpr_vreg->cpr_fuse_revision);
+
+	return rc;
+}
+
+static int cpr_check_de_aging_allowed(struct cpr_regulator *cpr_vreg,
+				struct device *dev)
+{
+	struct device_node *of_node = dev->of_node;
+	char *allow_str = "qcom,cpr-de-aging-allowed";
+	int rc = 0, count;
+	int tuple_count, tuple_match;
+	u32 allow_status = 0;
+
+	if (!of_find_property(of_node, allow_str, &count)) {
+		/* CPR de-aging is not allowed for all fuse revisions. */
+		return allow_status;
+	}
+
+	count /= sizeof(u32);
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH)
+			/* No matching index to use for CPR de-aging allowed. */
+			return 0;
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (count != tuple_count) {
+		cpr_err(cpr_vreg, "%s count=%d is invalid\n", allow_str,
+			count);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_index(of_node, allow_str, tuple_match,
+		&allow_status);
+	if (rc) {
+		cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+			allow_str, tuple_match, rc);
+		return rc;
+	}
+
+	cpr_info(cpr_vreg, "CPR de-aging is %s for fuse revision %d\n",
+			allow_status ? "allowed" : "not allowed",
+			cpr_vreg->cpr_fuse_revision);
+
+	return allow_status;
+}
+
+static int cpr_aging_init(struct platform_device *pdev,
+			struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	struct cpr_aging_info *aging_info;
+	struct cpr_aging_sensor_info *sensor_info;
+	int num_fuse_corners = cpr_vreg->num_fuse_corners;
+	int i, rc = 0, len = 0, num_aging_sensors, ro_sel, bits;
+	u32 *aging_sensor_id, *fuse_sel, *fuse_sel_orig;
+	u32 sensor = 0, non_collapsible_sensor_mask = 0;
+	u64 efuse_val;
+	struct property *prop;
+
+	if (!of_find_property(of_node, "qcom,cpr-aging-sensor-id", &len)) {
+		/* No CPR de-aging adjustments needed */
+		return 0;
+	}
+
+	if (len == 0) {
+		cpr_err(cpr_vreg, "qcom,cpr-aging-sensor-id property format is invalid\n");
+		return -EINVAL;
+	}
+	num_aging_sensors = len / sizeof(u32);
+	cpr_debug(cpr_vreg, "No of aging sensors = %d\n", num_aging_sensors);
+
+	if (cpumask_empty(&cpr_vreg->cpu_mask)) {
+		cpr_err(cpr_vreg, "qcom,cpr-cpus property missing\n");
+		return -EINVAL;
+	}
+
+	rc = cpr_check_de_aging_allowed(cpr_vreg, &pdev->dev);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "cpr_check_de_aging_allowed failed: rc=%d\n",
+			rc);
+		return rc;
+	} else if (rc == 0) {
+		/* CPR de-aging is not allowed for the current fuse combo */
+		return 0;
+	}
+
+	aging_info = devm_kzalloc(&pdev->dev, sizeof(*aging_info),
+				GFP_KERNEL);
+	if (!aging_info)
+		return -ENOMEM;
+
+	cpr_vreg->aging_info = aging_info;
+	aging_info->num_aging_sensors = num_aging_sensors;
+
+	rc = of_property_read_u32(of_node, "qcom,cpr-aging-ref-corner",
+			&aging_info->aging_corner);
+	if (rc) {
+		cpr_err(cpr_vreg, "qcom,cpr-aging-ref-corner missing rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-aging-ref-voltage",
+			&aging_info->aging_ref_voltage, rc);
+	if (rc)
+		return rc;
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-max-aging-margin",
+			&aging_info->max_aging_margin, rc);
+	if (rc)
+		return rc;
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-aging-ro-scaling-factor",
+			&aging_info->aging_ro_kv, rc);
+	if (rc)
+		return rc;
+
+	/* Check for DIV by 0 error */
+	if (aging_info->aging_ro_kv == 0) {
+		cpr_err(cpr_vreg, "invalid cpr-aging-ro-scaling-factor value: %u\n",
+			aging_info->aging_ro_kv);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-ro-scaling-factor",
+			aging_info->cpr_ro_kv, CPR_NUM_RING_OSC);
+	if (rc) {
+		cpr_err(cpr_vreg, "qcom,cpr-ro-scaling-factor property read failed, rc = %d\n",
+			rc);
+		return rc;
+	}
+
+	if (of_find_property(of_node, "qcom,cpr-non-collapsible-sensors",
+				&len)) {
+		len = len / sizeof(u32);
+		if (len <= 0 || len > 32) {
+			cpr_err(cpr_vreg, "qcom,cpr-non-collapsible-sensors has an incorrect size\n");
+			return -EINVAL;
+		}
+
+		for (i = 0; i < len; i++) {
+			rc = of_property_read_u32_index(of_node,
+						"qcom,cpr-non-collapsible-sensors",
+						i, &sensor);
+			if (rc) {
+				cpr_err(cpr_vreg, "could not read qcom,cpr-non-collapsible-sensors index %u, rc=%d\n",
+					i, rc);
+				return rc;
+			}
+
+			if (sensor > 31) {
+				cpr_err(cpr_vreg, "invalid non-collapsible sensor = %u\n",
+					sensor);
+				return -EINVAL;
+			}
+
+			non_collapsible_sensor_mask |= BIT(sensor);
+		}
+
+		/*
+		 * Bypass the sensors in collapsible domain for
+		 * de-aging measurements
+		 */
+		aging_info->aging_sensor_bypass =
+						~(non_collapsible_sensor_mask);
+		cpr_debug(cpr_vreg, "sensor bypass mask for aging = 0x%08x\n",
+			aging_info->aging_sensor_bypass);
+	}
+
+	prop = of_find_property(pdev->dev.of_node, "qcom,cpr-aging-derate",
+			NULL);
+	if ((!prop) ||
+		(prop->length != num_fuse_corners * sizeof(u32))) {
+		cpr_err(cpr_vreg, "qcom,cpr-aging-derate incorrectly configured\n");
+		return -EINVAL;
+	}
+
+	aging_sensor_id = kcalloc(num_aging_sensors, sizeof(*aging_sensor_id),
+				GFP_KERNEL);
+	fuse_sel = kcalloc(num_aging_sensors * 4, sizeof(*fuse_sel),
+				GFP_KERNEL);
+	aging_info->voltage_adjust = devm_kcalloc(&pdev->dev,
+					num_fuse_corners + 1,
+					sizeof(*aging_info->voltage_adjust),
+					GFP_KERNEL);
+	aging_info->sensor_info = devm_kcalloc(&pdev->dev, num_aging_sensors,
+					sizeof(*aging_info->sensor_info),
+					GFP_KERNEL);
+	aging_info->aging_derate = devm_kcalloc(&pdev->dev,
+					num_fuse_corners + 1,
+					sizeof(*aging_info->aging_derate),
+					GFP_KERNEL);
+
+	if (!aging_info->aging_derate || !aging_sensor_id
+		|| !aging_info->sensor_info || !fuse_sel
+		|| !aging_info->voltage_adjust)
+		goto err;
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-aging-sensor-id",
+					aging_sensor_id, num_aging_sensors);
+	if (rc) {
+		cpr_err(cpr_vreg, "qcom,cpr-aging-sensor-id property read failed, rc = %d\n",
+				rc);
+		goto err;
+	}
+
+	for (i = 0; i < num_aging_sensors; i++)
+		if (aging_sensor_id[i] < 0 || aging_sensor_id[i] > 31) {
+			cpr_err(cpr_vreg, "Invalid aging sensor id: %u\n",
+				aging_sensor_id[i]);
+			rc = -EINVAL;
+			goto err;
+		}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-aging-derate",
+			&aging_info->aging_derate[CPR_FUSE_CORNER_MIN],
+			num_fuse_corners);
+	if (rc) {
+		cpr_err(cpr_vreg, "qcom,cpr-aging-derate property read failed, rc = %d\n",
+				rc);
+		goto err;
+	}
+
+	rc = of_property_read_u32_array(of_node,
+				"qcom,cpr-fuse-aging-init-quot-diff",
+				fuse_sel, (num_aging_sensors * 4));
+	if (rc) {
+		cpr_err(cpr_vreg, "qcom,cpr-fuse-aging-init-quot-diff read failed, rc = %d\n",
+				rc);
+		goto err;
+	}
+
+	fuse_sel_orig = fuse_sel;
+	sensor_info = aging_info->sensor_info;
+	for (i = 0; i < num_aging_sensors; i++, sensor_info++) {
+		sensor_info->sensor_id = aging_sensor_id[i];
+		efuse_val = cpr_read_efuse_param(cpr_vreg, fuse_sel[0],
+				fuse_sel[1], fuse_sel[2], fuse_sel[3]);
+		bits = fuse_sel[2];
+		sensor_info->initial_quot_diff = ((efuse_val & BIT(bits - 1)) ?
+			-1 : 1) * (efuse_val & (BIT(bits - 1) - 1));
+
+		cpr_debug(cpr_vreg, "Age sensor[%d] Initial quot diff = %d\n",
+				sensor_info->sensor_id,
+				sensor_info->initial_quot_diff);
+		fuse_sel += 4;
+	}
+
+	/*
+	 * Add max aging margin here. This can be adjusted later in
+	 * de-aging algorithm.
+	 */
+	for (i = CPR_FUSE_CORNER_MIN; i <= num_fuse_corners; i++) {
+		ro_sel = cpr_vreg->cpr_fuse_ro_sel[i];
+		cpr_vreg->cpr_fuse_target_quot[i] +=
+				(aging_info->cpr_ro_kv[ro_sel]
+				* aging_info->max_aging_margin) / 1000000;
+		aging_info->voltage_adjust[i] = aging_info->max_aging_margin;
+		cpr_info(cpr_vreg, "Corner[%d]: age margin adjusted quotient = %d\n",
+			i, cpr_vreg->cpr_fuse_target_quot[i]);
+	}
+
+	kfree(fuse_sel_orig);
+err:
+	kfree(aging_sensor_id);
+	return rc;
+}
+
+static int cpr_cpu_map_init(struct cpr_regulator *cpr_vreg, struct device *dev)
+{
+	struct device_node *cpu_node;
+	int i, cpu;
+
+	if (!of_find_property(dev->of_node, "qcom,cpr-cpus",
+				&cpr_vreg->num_adj_cpus)) {
+		/* No adjustments based on online cores */
+		return 0;
+	}
+	cpr_vreg->num_adj_cpus /= sizeof(u32);
+
+	cpr_vreg->adj_cpus = devm_kcalloc(dev, cpr_vreg->num_adj_cpus,
+					sizeof(int), GFP_KERNEL);
+	if (!cpr_vreg->adj_cpus)
+		return -ENOMEM;
+
+	for (i = 0; i < cpr_vreg->num_adj_cpus; i++) {
+		cpu_node = of_parse_phandle(dev->of_node, "qcom,cpr-cpus", i);
+		if (!cpu_node) {
+			cpr_err(cpr_vreg, "could not find CPU node %d\n", i);
+			return -EINVAL;
+		}
+		cpr_vreg->adj_cpus[i] = -1;
+		for_each_possible_cpu(cpu) {
+			if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+				cpr_vreg->adj_cpus[i] = cpu;
+				cpumask_set_cpu(cpu, &cpr_vreg->cpu_mask);
+				break;
+			}
+		}
+		of_node_put(cpu_node);
+	}
+
+	return 0;
+}
+
+static int cpr_init_cpr_efuse(struct platform_device *pdev,
+				     struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int i, rc = 0;
+	bool scheme_fuse_valid = false;
+	bool disable_fuse_valid = false;
+	char *targ_quot_str;
+	u32 cpr_fuse_row[2];
+	u32 bp_cpr_disable, bp_scheme;
+	size_t len;
+	int *bp_target_quot;
+	u64 fuse_bits, fuse_bits_2;
+	u32 *target_quot_size;
+	struct cpr_quot_scale *quot_scale;
+
+	len = cpr_vreg->num_fuse_corners + 1;
+
+	bp_target_quot = kzalloc(len * sizeof(*bp_target_quot), GFP_KERNEL);
+	target_quot_size = kzalloc(len * sizeof(*target_quot_size), GFP_KERNEL);
+	quot_scale = kzalloc(len * sizeof(*quot_scale), GFP_KERNEL);
+
+	if (!bp_target_quot || !target_quot_size || !quot_scale) {
+		cpr_err(cpr_vreg,
+			"Could not allocate memory for fuse parsing arrays\n");
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	if (cpr_vreg->cpr_fuse_redundant) {
+		rc = of_property_read_u32_array(of_node,
+				"qcom,cpr-fuse-redun-row",
+				cpr_fuse_row, 2);
+		targ_quot_str = "qcom,cpr-fuse-redun-target-quot";
+	} else {
+		rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-row",
+				cpr_fuse_row, 2);
+		targ_quot_str = "qcom,cpr-fuse-target-quot";
+	}
+	if (rc)
+		goto error;
+
+	rc = of_property_read_u32_array(of_node, targ_quot_str,
+		&bp_target_quot[CPR_FUSE_CORNER_MIN],
+		cpr_vreg->num_fuse_corners);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "missing %s: rc=%d\n", targ_quot_str, rc);
+		goto error;
+	}
+
+	if (of_find_property(of_node, "qcom,cpr-fuse-target-quot-size", NULL)) {
+		rc = of_property_read_u32_array(of_node,
+			"qcom,cpr-fuse-target-quot-size",
+			&target_quot_size[CPR_FUSE_CORNER_MIN],
+			cpr_vreg->num_fuse_corners);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "error while reading qcom,cpr-fuse-target-quot-size: rc=%d\n",
+				rc);
+			goto error;
+		}
+	} else {
+		/*
+		 * Default fuse quotient parameter size to match target register
+		 * size.
+		 */
+		for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+		     i++)
+			target_quot_size[i] = CPR_FUSE_TARGET_QUOT_BITS;
+	}
+
+	if (of_find_property(of_node, "qcom,cpr-fuse-target-quot-scale",
+				NULL)) {
+		for (i = 0; i < cpr_vreg->num_fuse_corners; i++) {
+			rc = of_property_read_u32_index(of_node,
+				"qcom,cpr-fuse-target-quot-scale", i * 2,
+				&quot_scale[i + CPR_FUSE_CORNER_MIN].offset);
+			if (rc < 0) {
+				cpr_err(cpr_vreg, "error while reading qcom,cpr-fuse-target-quot-scale: rc=%d\n",
+					rc);
+				goto error;
+			}
+
+			rc = of_property_read_u32_index(of_node,
+				"qcom,cpr-fuse-target-quot-scale", i * 2 + 1,
+			       &quot_scale[i + CPR_FUSE_CORNER_MIN].multiplier);
+			if (rc < 0) {
+				cpr_err(cpr_vreg, "error while reading qcom,cpr-fuse-target-quot-scale: rc=%d\n",
+					rc);
+				goto error;
+			}
+		}
+	} else {
+		/*
+		 * In the default case, target quotients require no scaling so
+		 * use offset = 0, multiplier = 1.
+		 */
+		for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+		     i++) {
+			quot_scale[i].offset = 0;
+			quot_scale[i].multiplier = 1;
+		}
+	}
+
+	/* Read the control bits of eFuse */
+	fuse_bits = cpr_read_efuse_row(cpr_vreg, cpr_fuse_row[0],
+					cpr_fuse_row[1]);
+	cpr_info(cpr_vreg, "[row:%d] = 0x%llx\n", cpr_fuse_row[0], fuse_bits);
+
+	if (cpr_vreg->cpr_fuse_redundant) {
+		if (of_find_property(of_node,
+				"qcom,cpr-fuse-redun-bp-cpr-disable", NULL)) {
+			CPR_PROP_READ_U32(cpr_vreg, of_node,
+					  "cpr-fuse-redun-bp-cpr-disable",
+					  &bp_cpr_disable, rc);
+			disable_fuse_valid = true;
+			if (of_find_property(of_node,
+					"qcom,cpr-fuse-redun-bp-scheme",
+					NULL)) {
+				CPR_PROP_READ_U32(cpr_vreg, of_node,
+						"cpr-fuse-redun-bp-scheme",
+						&bp_scheme, rc);
+				scheme_fuse_valid = true;
+			}
+			if (rc)
+				goto error;
+			fuse_bits_2 = fuse_bits;
+		} else {
+			u32 temp_row[2];
+
+			/* Use original fuse if no optional property */
+			if (of_find_property(of_node,
+					"qcom,cpr-fuse-bp-cpr-disable", NULL)) {
+				CPR_PROP_READ_U32(cpr_vreg, of_node,
+					"cpr-fuse-bp-cpr-disable",
+					&bp_cpr_disable, rc);
+				disable_fuse_valid = true;
+			}
+			if (of_find_property(of_node,
+					"qcom,cpr-fuse-bp-scheme",
+					NULL)) {
+				CPR_PROP_READ_U32(cpr_vreg, of_node,
+						"cpr-fuse-bp-scheme",
+						&bp_scheme, rc);
+				scheme_fuse_valid = true;
+			}
+			rc = of_property_read_u32_array(of_node,
+					"qcom,cpr-fuse-row",
+					temp_row, 2);
+			if (rc)
+				goto error;
+
+			fuse_bits_2 = cpr_read_efuse_row(cpr_vreg, temp_row[0],
+							temp_row[1]);
+			cpr_info(cpr_vreg, "[original row:%d] = 0x%llx\n",
+				temp_row[0], fuse_bits_2);
+		}
+	} else {
+		if (of_find_property(of_node, "qcom,cpr-fuse-bp-cpr-disable",
+					NULL)) {
+			CPR_PROP_READ_U32(cpr_vreg, of_node,
+				"cpr-fuse-bp-cpr-disable", &bp_cpr_disable, rc);
+			disable_fuse_valid = true;
+		}
+		if (of_find_property(of_node, "qcom,cpr-fuse-bp-scheme",
+							NULL)) {
+			CPR_PROP_READ_U32(cpr_vreg, of_node,
+					"cpr-fuse-bp-scheme", &bp_scheme, rc);
+			scheme_fuse_valid = true;
+		}
+		if (rc)
+			goto error;
+		fuse_bits_2 = fuse_bits;
+	}
+
+	if (disable_fuse_valid) {
+		cpr_vreg->cpr_fuse_disable =
+					(fuse_bits_2 >> bp_cpr_disable) & 0x01;
+		cpr_info(cpr_vreg, "CPR disable fuse = %d\n",
+			cpr_vreg->cpr_fuse_disable);
+	} else {
+		cpr_vreg->cpr_fuse_disable = false;
+	}
+
+	if (scheme_fuse_valid) {
+		cpr_vreg->cpr_fuse_local = (fuse_bits_2 >> bp_scheme) & 0x01;
+		cpr_info(cpr_vreg, "local = %d\n", cpr_vreg->cpr_fuse_local);
+	} else {
+		cpr_vreg->cpr_fuse_local = true;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		cpr_vreg->cpr_fuse_target_quot[i]
+			= cpr_read_efuse_param(cpr_vreg, cpr_fuse_row[0],
+				bp_target_quot[i], target_quot_size[i],
+				cpr_fuse_row[1]);
+		/* Unpack the target quotient by scaling. */
+		cpr_vreg->cpr_fuse_target_quot[i] *= quot_scale[i].multiplier;
+		cpr_vreg->cpr_fuse_target_quot[i] += quot_scale[i].offset;
+		cpr_info(cpr_vreg,
+			"Corner[%d]: ro_sel = %d, target quot = %d\n", i,
+			cpr_vreg->cpr_fuse_ro_sel[i],
+			cpr_vreg->cpr_fuse_target_quot[i]);
+	}
+
+	rc = cpr_cpu_map_init(cpr_vreg, &pdev->dev);
+	if (rc) {
+		cpr_err(cpr_vreg, "CPR cpu map init failed: rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = cpr_aging_init(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "CPR aging init failed: rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = cpr_adjust_target_quots(pdev, cpr_vreg);
+	if (rc)
+		goto error;
+
+	for (i = CPR_FUSE_CORNER_MIN + 1;
+				i <= cpr_vreg->num_fuse_corners; i++) {
+		if (cpr_vreg->cpr_fuse_target_quot[i]
+				< cpr_vreg->cpr_fuse_target_quot[i - 1] &&
+			cpr_vreg->cpr_fuse_ro_sel[i] ==
+				cpr_vreg->cpr_fuse_ro_sel[i - 1]) {
+			cpr_vreg->cpr_fuse_disable = true;
+			cpr_err(cpr_vreg, "invalid quotient values; permanently disabling CPR\n");
+		}
+	}
+
+	if (cpr_vreg->flags & FLAGS_UPLIFT_QUOT_VOLT) {
+		cpr_voltage_uplift_wa_inc_quot(cpr_vreg, of_node);
+		for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+		     i++) {
+			cpr_info(cpr_vreg,
+				"Corner[%d]: uplifted target quot = %d\n",
+				i, cpr_vreg->cpr_fuse_target_quot[i]);
+		}
+	}
+
+	/*
+	 * Check whether the fuse-quot-offset is defined per fuse corner.
+	 * If it is defined, use it (quot_offset) in the calculation
+	 * below for obtaining scaling factor per fuse corner.
+	 */
+	rc = cpr_get_fuse_quot_offset(cpr_vreg, pdev, quot_scale);
+	if (rc < 0)
+		goto error;
+
+	rc = cpr_get_corner_quot_adjustment(cpr_vreg, &pdev->dev);
+	if (rc)
+		goto error;
+
+	cpr_vreg->cpr_fuse_bits = fuse_bits;
+	if (!cpr_vreg->cpr_fuse_bits) {
+		cpr_vreg->cpr_fuse_disable = true;
+		cpr_err(cpr_vreg,
+			"cpr_fuse_bits == 0; permanently disabling CPR\n");
+	} else if (!cpr_vreg->fuse_quot_offset) {
+		/*
+		 * Check if the target quotients for the highest two fuse
+		 * corners are too close together.
+		 */
+		int *quot = cpr_vreg->cpr_fuse_target_quot;
+		int highest_fuse_corner = cpr_vreg->num_fuse_corners;
+		u32 min_diff_quot;
+		bool valid_fuse = true;
+
+		min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF;
+		of_property_read_u32(of_node, "qcom,cpr-quot-min-diff",
+							&min_diff_quot);
+
+		if (quot[highest_fuse_corner] > quot[highest_fuse_corner - 1]) {
+			if ((quot[highest_fuse_corner]
+				- quot[highest_fuse_corner - 1])
+					<= min_diff_quot)
+				valid_fuse = false;
+		} else {
+			valid_fuse = false;
+		}
+
+		if (!valid_fuse) {
+			cpr_vreg->cpr_fuse_disable = true;
+			cpr_err(cpr_vreg, "invalid quotient values; permanently disabling CPR\n");
+		}
+	}
+	rc = cpr_check_allowed(pdev, cpr_vreg);
+
+error:
+	kfree(bp_target_quot);
+	kfree(target_quot_size);
+	kfree(quot_scale);
+
+	return rc;
+}
+
+static int cpr_init_cpr_voltages(struct cpr_regulator *cpr_vreg,
+			struct device *dev)
+{
+	int i;
+	int size = cpr_vreg->num_corners + 1;
+
+	cpr_vreg->last_volt = devm_kzalloc(dev, sizeof(int) * size, GFP_KERNEL);
+	if (!cpr_vreg->last_volt)
+		return -EINVAL;
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
+		cpr_vreg->last_volt[i] = cpr_vreg->open_loop_volt[i];
+
+	return 0;
+}
+
+/*
+ * This function fills the virtual_limit array with voltages read from the
+ * prop_name device tree property if a given tuple in the property matches
+ * the speedbin and PVS version fuses found on the chip.  Otherwise,
+ * it fills the virtual_limit_array with corresponding values from the
+ * fuse_limit_array.
+ */
+static int cpr_fill_override_voltage(struct cpr_regulator *cpr_vreg,
+		struct device *dev, const char *prop_name, const char *label,
+		int *virtual_limit, int *fuse_limit)
+{
+	int rc = 0;
+	int i, j, size, pos;
+	struct property *prop;
+	bool match_found = false;
+	size_t buflen;
+	char *buf;
+	u32 *tmp;
+
+	prop = of_find_property(dev->of_node, prop_name, NULL);
+	if (!prop)
+		goto use_fuse_corner_limits;
+
+	size = prop->length / sizeof(u32);
+	if (size == 0 || size % (cpr_vreg->num_corners + 2)) {
+		cpr_err(cpr_vreg, "%s property format is invalid; reusing per-fuse-corner limits\n",
+			prop_name);
+		goto use_fuse_corner_limits;
+	}
+
+	tmp = kzalloc(size * sizeof(u32), GFP_KERNEL);
+	if (!tmp) {
+		cpr_err(cpr_vreg, "memory alloc failed\n");
+		return -ENOMEM;
+	}
+	rc = of_property_read_u32_array(dev->of_node, prop_name, tmp, size);
+	if (rc < 0) {
+		kfree(tmp);
+		cpr_err(cpr_vreg, "%s reading failed, rc = %d\n", prop_name,
+			rc);
+		return rc;
+	}
+
+	/*
+	 * Get limit voltage for each virtual corner based upon the speed_bin
+	 * and pvs_version values.
+	 */
+	for (i = 0; i < size; i += cpr_vreg->num_corners + 2) {
+		if (tmp[i] != cpr_vreg->speed_bin &&
+		    tmp[i] != FUSE_PARAM_MATCH_ANY)
+			continue;
+		if (tmp[i + 1] != cpr_vreg->pvs_version &&
+		    tmp[i + 1] != FUSE_PARAM_MATCH_ANY)
+			continue;
+		for (j = CPR_CORNER_MIN; j <= cpr_vreg->num_corners; j++)
+			virtual_limit[j] = tmp[i + 2 + j - CPR_FUSE_CORNER_MIN];
+		match_found = true;
+		break;
+	}
+	kfree(tmp);
+
+	if (!match_found)
+		goto use_fuse_corner_limits;
+
+	/*
+	 * Log per-virtual-corner voltage limits since they are useful for
+	 * baseline CPR debugging.
+	 */
+	buflen = cpr_vreg->num_corners * (MAX_CHARS_PER_INT + 2) * sizeof(*buf);
+	buf = kzalloc(buflen, GFP_KERNEL);
+	if (buf == NULL) {
+		cpr_err(cpr_vreg, "Could not allocate memory for corner limit voltage logging\n");
+		return 0;
+	}
+
+	for (i = CPR_CORNER_MIN, pos = 0; i <= cpr_vreg->num_corners; i++)
+		pos += scnprintf(buf + pos, buflen - pos, "%d%s",
+			virtual_limit[i], i < cpr_vreg->num_corners ? " " : "");
+	cpr_info(cpr_vreg, "%s override voltage: [%s] uV\n", label, buf);
+	kfree(buf);
+
+	return rc;
+
+use_fuse_corner_limits:
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
+		virtual_limit[i] = fuse_limit[cpr_vreg->corner_map[i]];
+	return rc;
+}
+
+/*
+ * This function loads per-virtual-corner ceiling and floor voltages from device
+ * tree if their respective device tree properties are present.  These limits
+ * override those found in the per-fuse-corner arrays fuse_ceiling_volt and
+ * fuse_floor_volt.
+ */
+static int cpr_init_ceiling_floor_override_voltages(
+	struct cpr_regulator *cpr_vreg, struct device *dev)
+{
+	int rc, i;
+	int size = cpr_vreg->num_corners + 1;
+
+	cpr_vreg->ceiling_volt = devm_kzalloc(dev, sizeof(int) * size,
+						GFP_KERNEL);
+	cpr_vreg->floor_volt = devm_kzalloc(dev, sizeof(int) * size,
+						GFP_KERNEL);
+	cpr_vreg->cpr_max_ceiling = devm_kzalloc(dev, sizeof(int) * size,
+						GFP_KERNEL);
+	if (!cpr_vreg->ceiling_volt || !cpr_vreg->floor_volt ||
+		!cpr_vreg->cpr_max_ceiling)
+		return -ENOMEM;
+
+	rc = cpr_fill_override_voltage(cpr_vreg, dev,
+		"qcom,cpr-voltage-ceiling-override", "ceiling",
+		cpr_vreg->ceiling_volt, cpr_vreg->fuse_ceiling_volt);
+	if (rc)
+		return rc;
+
+	rc = cpr_fill_override_voltage(cpr_vreg, dev,
+		"qcom,cpr-voltage-floor-override", "floor",
+		cpr_vreg->floor_volt, cpr_vreg->fuse_floor_volt);
+	if (rc)
+		return rc;
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+		if (cpr_vreg->floor_volt[i] > cpr_vreg->ceiling_volt[i]) {
+			cpr_err(cpr_vreg, "virtual corner %d floor=%d uV > ceiling=%d uV\n",
+				i, cpr_vreg->floor_volt[i],
+				cpr_vreg->ceiling_volt[i]);
+			return -EINVAL;
+		}
+
+		if (cpr_vreg->ceiling_max < cpr_vreg->ceiling_volt[i])
+			cpr_vreg->ceiling_max = cpr_vreg->ceiling_volt[i];
+		cpr_vreg->cpr_max_ceiling[i] = cpr_vreg->ceiling_volt[i];
+	}
+
+	return rc;
+}
+
+/*
+ * This function computes the per-virtual-corner floor voltages from
+ * per-virtual-corner ceiling voltages with an offset specified by a
+ * device-tree property. This must be called after open-loop voltage
+ * scaling, floor_volt array loading and the ceiling voltage is
+ * conditionally reduced to the open-loop voltage. It selects the
+ * maximum value between the calculated floor voltage values and
+ * the floor_volt array values and stores them in the floor_volt array.
+ */
+static int cpr_init_floor_to_ceiling_range(
+	struct cpr_regulator *cpr_vreg, struct device *dev)
+{
+	int rc, i, tuple_count, tuple_match, len, pos;
+	u32 index, floor_volt_adjust = 0;
+	char *prop_str, *buf;
+	size_t buflen;
+
+	prop_str = "qcom,cpr-floor-to-ceiling-max-range";
+
+	if (!of_find_property(dev->of_node, prop_str, &len))
+		return 0;
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+			/*
+			 * No matching index to use for floor-to-ceiling
+			 * max range.
+			 */
+			return 0;
+		}
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != cpr_vreg->num_corners * tuple_count * sizeof(u32)) {
+		cpr_err(cpr_vreg, "%s length=%d is invalid\n", prop_str, len);
+		return -EINVAL;
+	}
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+		index = tuple_match * cpr_vreg->num_corners
+				+ i - CPR_CORNER_MIN;
+		rc = of_property_read_u32_index(dev->of_node, prop_str,
+			index, &floor_volt_adjust);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+				prop_str, index, rc);
+			return rc;
+		}
+
+		if ((int)floor_volt_adjust >= 0) {
+			cpr_vreg->floor_volt[i] = max(cpr_vreg->floor_volt[i],
+						(cpr_vreg->ceiling_volt[i]
+						- (int)floor_volt_adjust));
+			cpr_vreg->floor_volt[i]
+					= DIV_ROUND_UP(cpr_vreg->floor_volt[i],
+							cpr_vreg->step_volt) *
+							cpr_vreg->step_volt;
+			if (cpr_vreg->open_loop_volt[i]
+					< cpr_vreg->floor_volt[i])
+				cpr_vreg->open_loop_volt[i]
+						= cpr_vreg->floor_volt[i];
+		}
+	}
+
+	/*
+	 * Log per-virtual-corner voltage limits resulted after considering the
+	 * floor-to-ceiling max range since they are useful for baseline CPR
+	 * debugging.
+	 */
+	buflen = cpr_vreg->num_corners * (MAX_CHARS_PER_INT + 2) * sizeof(*buf);
+	buf = kzalloc(buflen, GFP_KERNEL);
+	if (buf == NULL) {
+		cpr_err(cpr_vreg, "Could not allocate memory for corner limit voltage logging\n");
+		return 0;
+	}
+
+	for (i = CPR_CORNER_MIN, pos = 0; i <= cpr_vreg->num_corners; i++)
+		pos += scnprintf(buf + pos, buflen - pos, "%d%s",
+			cpr_vreg->floor_volt[i],
+			i < cpr_vreg->num_corners ? " " : "");
+	cpr_info(cpr_vreg, "Final floor override voltages: [%s] uV\n", buf);
+	kfree(buf);
+
+	return 0;
+}
+
+static int cpr_init_step_quotient(struct platform_device *pdev,
+		  struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int len = 0;
+	u32 step_quot[CPR_NUM_RING_OSC];
+	int i, rc;
+
+	if (!of_find_property(of_node, "qcom,cpr-step-quotient", &len)) {
+		cpr_err(cpr_vreg, "qcom,cpr-step-quotient property missing\n");
+		return -EINVAL;
+	}
+
+	if (len == sizeof(u32)) {
+		/* Single step quotient used for all ring oscillators. */
+		rc = of_property_read_u32(of_node, "qcom,cpr-step-quotient",
+					step_quot);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not read qcom,cpr-step-quotient, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+		     i++)
+			cpr_vreg->step_quotient[i] = step_quot[0];
+	} else if (len == sizeof(u32) * CPR_NUM_RING_OSC) {
+		/* Unique step quotient used per ring oscillator. */
+		rc = of_property_read_u32_array(of_node,
+			"qcom,cpr-step-quotient", step_quot, CPR_NUM_RING_OSC);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not read qcom,cpr-step-quotient, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+		     i++)
+			cpr_vreg->step_quotient[i]
+				= step_quot[cpr_vreg->cpr_fuse_ro_sel[i]];
+	} else {
+		cpr_err(cpr_vreg, "qcom,cpr-step-quotient has invalid length=%d\n",
+			len);
+		return -EINVAL;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++)
+		cpr_debug(cpr_vreg, "step_quotient[%d]=%u\n", i,
+			cpr_vreg->step_quotient[i]);
+
+	return 0;
+}
+
+static int cpr_init_cpr_parameters(struct platform_device *pdev,
+					  struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int rc = 0;
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-ref-clk",
+			  &cpr_vreg->ref_clk_khz, rc);
+	if (rc)
+		return rc;
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-timer-delay",
+			  &cpr_vreg->timer_delay_us, rc);
+	if (rc)
+		return rc;
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-timer-cons-up",
+			  &cpr_vreg->timer_cons_up, rc);
+	if (rc)
+		return rc;
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-timer-cons-down",
+			  &cpr_vreg->timer_cons_down, rc);
+	if (rc)
+		return rc;
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-irq-line",
+			  &cpr_vreg->irq_line, rc);
+	if (rc)
+		return rc;
+
+	rc = cpr_init_step_quotient(pdev, cpr_vreg);
+	if (rc)
+		return rc;
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-up-threshold",
+			  &cpr_vreg->up_threshold, rc);
+	if (rc)
+		return rc;
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-down-threshold",
+			  &cpr_vreg->down_threshold, rc);
+	if (rc)
+		return rc;
+	cpr_info(cpr_vreg, "up threshold = %u, down threshold = %u\n",
+		cpr_vreg->up_threshold, cpr_vreg->down_threshold);
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-idle-clocks",
+			  &cpr_vreg->idle_clocks, rc);
+	if (rc)
+		return rc;
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-gcnt-time",
+			  &cpr_vreg->gcnt_time_us, rc);
+	if (rc)
+		return rc;
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "vdd-apc-step-up-limit",
+			  &cpr_vreg->vdd_apc_step_up_limit, rc);
+	if (rc)
+		return rc;
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "vdd-apc-step-down-limit",
+			  &cpr_vreg->vdd_apc_step_down_limit, rc);
+	if (rc)
+		return rc;
+
+	rc = of_property_read_u32(of_node, "qcom,cpr-clamp-timer-interval",
+				  &cpr_vreg->clamp_timer_interval);
+	if (rc && rc != -EINVAL) {
+		cpr_err(cpr_vreg,
+			"error reading qcom,cpr-clamp-timer-interval, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	cpr_vreg->clamp_timer_interval = min(cpr_vreg->clamp_timer_interval,
+					(u32)RBIF_TIMER_ADJ_CLAMP_INT_MASK);
+
+	/* Init module parameter with the DT value */
+	cpr_vreg->enable = of_property_read_bool(of_node, "qcom,cpr-enable");
+	cpr_info(cpr_vreg, "CPR is %s by default.\n",
+		cpr_vreg->enable ? "enabled" : "disabled");
+
+	return 0;
+}
+
+static void cpr_regulator_switch_adj_cpus(struct cpr_regulator *cpr_vreg)
+{
+	cpr_vreg->last_volt = cpr_vreg->adj_cpus_last_volt
+					[cpr_vreg->online_cpus];
+	cpr_vreg->save_ctl = cpr_vreg->adj_cpus_save_ctl[cpr_vreg->online_cpus];
+	cpr_vreg->save_irq = cpr_vreg->adj_cpus_save_irq[cpr_vreg->online_cpus];
+
+	if (cpr_vreg->adj_cpus_quot_adjust)
+		cpr_vreg->quot_adjust = cpr_vreg->adj_cpus_quot_adjust
+						[cpr_vreg->online_cpus];
+	if (cpr_vreg->adj_cpus_open_loop_volt)
+		cpr_vreg->open_loop_volt
+			= cpr_vreg->adj_cpus_open_loop_volt
+				[cpr_vreg->online_cpus];
+	if (cpr_vreg->adj_cpus_open_loop_volt_as_ceiling)
+		cpr_vreg->ceiling_volt = cpr_vreg->open_loop_volt;
+}
+
+static void cpr_regulator_set_online_cpus(struct cpr_regulator *cpr_vreg)
+{
+	int i, j;
+
+	cpr_vreg->online_cpus = 0;
+	get_online_cpus();
+	for_each_online_cpu(i)
+		for (j = 0; j < cpr_vreg->num_adj_cpus; j++)
+			if (i == cpr_vreg->adj_cpus[j])
+				cpr_vreg->online_cpus++;
+	put_online_cpus();
+}
+
+static int cpr_regulator_cpu_callback(struct notifier_block *nb,
+					    unsigned long action, void *data)
+{
+	struct cpr_regulator *cpr_vreg = container_of(nb, struct cpr_regulator,
+					cpu_notifier);
+	int cpu = (long)data;
+	int prev_online_cpus, rc, i;
+
+	action &= ~CPU_TASKS_FROZEN;
+
+	if (action != CPU_UP_PREPARE && action != CPU_UP_CANCELED
+	    && action != CPU_DEAD)
+		return NOTIFY_OK;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+
+	if (cpr_vreg->skip_voltage_change_during_suspend
+	    && cpr_vreg->is_cpr_suspended) {
+		/* Do nothing during system suspend/resume */
+		goto done;
+	}
+
+	prev_online_cpus = cpr_vreg->online_cpus;
+	cpr_regulator_set_online_cpus(cpr_vreg);
+
+	if (action == CPU_UP_PREPARE)
+		for (i = 0; i < cpr_vreg->num_adj_cpus; i++)
+			if (cpu == cpr_vreg->adj_cpus[i]) {
+				cpr_vreg->online_cpus++;
+				break;
+			}
+
+	if (cpr_vreg->online_cpus == prev_online_cpus)
+		goto done;
+
+	cpr_debug(cpr_vreg, "adjusting corner %d quotient for %d cpus\n",
+		cpr_vreg->corner, cpr_vreg->online_cpus);
+
+	cpr_regulator_switch_adj_cpus(cpr_vreg);
+
+	if (cpr_vreg->corner) {
+		rc = cpr_regulator_set_voltage(cpr_vreg->rdev,
+				cpr_vreg->corner, true);
+		if (rc)
+			cpr_err(cpr_vreg, "could not update quotient, rc=%d\n",
+				rc);
+	}
+
+done:
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+	return NOTIFY_OK;
+}
+
+static void cpr_pm_disable(struct cpr_regulator *cpr_vreg, bool disable)
+{
+	u32 reg_val;
+
+	if (cpr_vreg->is_cpr_suspended)
+		return;
+
+	reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+
+	if (disable) {
+		/* Proceed only if CPR is enabled */
+		if (!(reg_val & RBCPR_CTL_LOOP_EN))
+			return;
+		cpr_ctl_disable(cpr_vreg);
+		cpr_vreg->cpr_disabled_in_pc = true;
+	} else {
+		/* Proceed only if CPR was disabled in PM_ENTER */
+		if (!cpr_vreg->cpr_disabled_in_pc)
+			return;
+		cpr_vreg->cpr_disabled_in_pc = false;
+		cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+	}
+
+	/* Make sure register write is complete */
+	mb();
+}
+
+static int cpr_pm_callback(struct notifier_block *nb,
+			    unsigned long action, void *data)
+{
+	struct cpr_regulator *cpr_vreg = container_of(nb,
+			struct cpr_regulator, pm_notifier);
+
+	if (action != CPU_PM_ENTER && action != CPU_PM_ENTER_FAILED &&
+			action != CPU_PM_EXIT)
+		return NOTIFY_OK;
+
+	switch (action) {
+	case CPU_PM_ENTER:
+		cpr_pm_disable(cpr_vreg, true);
+		break;
+	case CPU_PM_ENTER_FAILED:
+	case CPU_PM_EXIT:
+		cpr_pm_disable(cpr_vreg, false);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int cpr_parse_adj_cpus_init_voltage(struct cpr_regulator *cpr_vreg,
+		struct device *dev)
+{
+	int rc, i, j, k, tuple_count, tuple_match, len, offset;
+	int *temp;
+
+	if (!of_find_property(dev->of_node,
+		   "qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment",
+		   NULL))
+		return 0;
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+			/* No matching index to use for voltage adjustment. */
+			return 0;
+		}
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	len = (cpr_vreg->num_adj_cpus + 1) * tuple_count
+		* cpr_vreg->num_corners;
+
+	temp = kzalloc(sizeof(int) * len, GFP_KERNEL);
+	if (!temp) {
+		cpr_err(cpr_vreg, "Could not allocate memory\n");
+		return -ENOMEM;
+	}
+
+	cpr_vreg->adj_cpus_open_loop_volt = devm_kzalloc(dev,
+				sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
+				GFP_KERNEL);
+	if (!cpr_vreg->adj_cpus_open_loop_volt) {
+		cpr_err(cpr_vreg, "Could not allocate memory\n");
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	cpr_vreg->adj_cpus_open_loop_volt[0] = devm_kzalloc(dev,
+				sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
+				* (cpr_vreg->num_corners + 1),
+				GFP_KERNEL);
+	if (!cpr_vreg->adj_cpus_open_loop_volt[0]) {
+		cpr_err(cpr_vreg, "Could not allocate memory\n");
+		rc = -ENOMEM;
+		goto done;
+	}
+	for (i = 1; i <= cpr_vreg->num_adj_cpus; i++)
+		cpr_vreg->adj_cpus_open_loop_volt[i] =
+			cpr_vreg->adj_cpus_open_loop_volt[0] +
+			i * (cpr_vreg->num_corners + 1);
+
+	rc = of_property_read_u32_array(dev->of_node,
+		"qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment",
+		temp, len);
+	if (rc) {
+		cpr_err(cpr_vreg, "failed to read qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	cpr_debug(cpr_vreg, "Open loop voltage based on number of online CPUs:\n");
+	offset = tuple_match * cpr_vreg->num_corners *
+			(cpr_vreg->num_adj_cpus + 1);
+
+	for (i = 0; i <= cpr_vreg->num_adj_cpus; i++) {
+		for (j = CPR_CORNER_MIN; j <= cpr_vreg->num_corners; j++) {
+			k = j - 1 + offset;
+
+			cpr_vreg->adj_cpus_open_loop_volt[i][j]
+				= cpr_vreg->open_loop_volt[j] + temp[k];
+			cpr_vreg->adj_cpus_open_loop_volt[i][j]
+			    = DIV_ROUND_UP(cpr_vreg->
+					adj_cpus_open_loop_volt[i][j],
+				cpr_vreg->step_volt) * cpr_vreg->step_volt;
+
+			if (cpr_vreg->adj_cpus_open_loop_volt[i][j]
+					> cpr_vreg->ceiling_volt[j])
+				cpr_vreg->adj_cpus_open_loop_volt[i][j]
+					= cpr_vreg->ceiling_volt[j];
+			if (cpr_vreg->adj_cpus_open_loop_volt[i][j]
+					< cpr_vreg->floor_volt[j])
+				cpr_vreg->adj_cpus_open_loop_volt[i][j]
+					= cpr_vreg->floor_volt[j];
+
+			cpr_debug(cpr_vreg, "cpus=%d, corner=%d, volt=%d\n",
+				i, j, cpr_vreg->adj_cpus_open_loop_volt[i][j]);
+		}
+		offset += cpr_vreg->num_corners;
+	}
+
+	cpr_vreg->adj_cpus_open_loop_volt_as_ceiling
+		= of_property_read_bool(dev->of_node,
+			"qcom,cpr-online-cpu-init-voltage-as-ceiling");
+done:
+	kfree(temp);
+	return rc;
+}
+
+static int cpr_parse_adj_cpus_target_quot(struct cpr_regulator *cpr_vreg,
+		struct device *dev)
+{
+	int rc, i, j, k, tuple_count, tuple_match, len, offset;
+	int *temp;
+
+	if (!of_find_property(dev->of_node,
+		   "qcom,cpr-online-cpu-virtual-corner-quotient-adjustment",
+		   NULL))
+		return 0;
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+			/* No matching index to use for quotient adjustment. */
+			return 0;
+		}
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	len = (cpr_vreg->num_adj_cpus + 1) * tuple_count
+		* cpr_vreg->num_corners;
+
+	temp = kzalloc(sizeof(int) * len, GFP_KERNEL);
+	if (!temp) {
+		cpr_err(cpr_vreg, "Could not allocate memory\n");
+		return -ENOMEM;
+	}
+
+	cpr_vreg->adj_cpus_quot_adjust = devm_kzalloc(dev,
+				sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
+				GFP_KERNEL);
+	if (!cpr_vreg->adj_cpus_quot_adjust) {
+		cpr_err(cpr_vreg, "Could not allocate memory\n");
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	cpr_vreg->adj_cpus_quot_adjust[0] = devm_kzalloc(dev,
+				sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
+				* (cpr_vreg->num_corners + 1),
+				GFP_KERNEL);
+	if (!cpr_vreg->adj_cpus_quot_adjust[0]) {
+		cpr_err(cpr_vreg, "Could not allocate memory\n");
+		rc = -ENOMEM;
+		goto done;
+	}
+	for (i = 1; i <= cpr_vreg->num_adj_cpus; i++)
+		cpr_vreg->adj_cpus_quot_adjust[i] =
+			cpr_vreg->adj_cpus_quot_adjust[0] +
+			i * (cpr_vreg->num_corners + 1);
+
+
+	rc = of_property_read_u32_array(dev->of_node,
+		"qcom,cpr-online-cpu-virtual-corner-quotient-adjustment",
+		temp, len);
+	if (rc) {
+		cpr_err(cpr_vreg, "failed to read qcom,cpr-online-cpu-virtual-corner-quotient-adjustment, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	cpr_debug(cpr_vreg, "Target quotients based on number of online CPUs:\n");
+	offset = tuple_match * cpr_vreg->num_corners *
+			(cpr_vreg->num_adj_cpus + 1);
+
+	for (i = 0; i <= cpr_vreg->num_adj_cpus; i++) {
+		for (j = CPR_CORNER_MIN; j <= cpr_vreg->num_corners; j++) {
+			k = j - 1 + offset;
+
+			cpr_vreg->adj_cpus_quot_adjust[i][j] =
+					cpr_vreg->quot_adjust[j] - temp[k];
+
+			cpr_debug(cpr_vreg, "cpus=%d, corner=%d, quot=%d\n",
+				i, j,
+				cpr_vreg->cpr_fuse_target_quot[
+							cpr_vreg->corner_map[j]]
+					- cpr_vreg->adj_cpus_quot_adjust[i][j]);
+		}
+		offset += cpr_vreg->num_corners;
+	}
+
+done:
+	kfree(temp);
+	return rc;
+}
+
+static int cpr_init_per_cpu_adjustments(struct cpr_regulator *cpr_vreg,
+		struct device *dev)
+{
+	int rc, i, j;
+
+	if (!of_find_property(dev->of_node,
+		   "qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment",
+		   NULL)
+	    && !of_find_property(dev->of_node,
+		   "qcom,cpr-online-cpu-virtual-corner-quotient-adjustment",
+		   NULL)) {
+		/* No per-online CPU adjustment needed */
+		return 0;
+	}
+
+	if (!cpr_vreg->num_adj_cpus) {
+		cpr_err(cpr_vreg, "qcom,cpr-cpus property missing\n");
+		return -EINVAL;
+	}
+
+	rc = cpr_parse_adj_cpus_init_voltage(cpr_vreg, dev);
+	if (rc) {
+		cpr_err(cpr_vreg, "cpr_parse_adj_cpus_init_voltage failed: rc =%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr_parse_adj_cpus_target_quot(cpr_vreg, dev);
+	if (rc) {
+		cpr_err(cpr_vreg, "cpr_parse_adj_cpus_target_quot failed: rc =%d\n",
+			rc);
+		return rc;
+	}
+
+	cpr_vreg->adj_cpus_last_volt = devm_kzalloc(dev,
+				sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
+				GFP_KERNEL);
+	cpr_vreg->adj_cpus_save_ctl = devm_kzalloc(dev,
+				sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
+				GFP_KERNEL);
+	cpr_vreg->adj_cpus_save_irq = devm_kzalloc(dev,
+				sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
+				GFP_KERNEL);
+	if (!cpr_vreg->adj_cpus_last_volt || !cpr_vreg->adj_cpus_save_ctl ||
+		!cpr_vreg->adj_cpus_save_irq) {
+		cpr_err(cpr_vreg, "Could not allocate memory\n");
+		return -ENOMEM;
+	}
+
+	cpr_vreg->adj_cpus_last_volt[0] = devm_kzalloc(dev,
+				sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
+				* (cpr_vreg->num_corners + 1),
+				GFP_KERNEL);
+	cpr_vreg->adj_cpus_save_ctl[0] = devm_kzalloc(dev,
+				sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
+				* (cpr_vreg->num_corners + 1),
+				GFP_KERNEL);
+	cpr_vreg->adj_cpus_save_irq[0] = devm_kzalloc(dev,
+				sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
+				* (cpr_vreg->num_corners + 1),
+				GFP_KERNEL);
+	if (!cpr_vreg->adj_cpus_last_volt[0] ||
+		!cpr_vreg->adj_cpus_save_ctl[0] ||
+		!cpr_vreg->adj_cpus_save_irq[0]) {
+		cpr_err(cpr_vreg, "Could not allocate memory\n");
+		return -ENOMEM;
+	}
+	for (i = 1; i <= cpr_vreg->num_adj_cpus; i++) {
+		j = i * (cpr_vreg->num_corners + 1);
+		cpr_vreg->adj_cpus_last_volt[i] =
+			cpr_vreg->adj_cpus_last_volt[0] + j;
+		cpr_vreg->adj_cpus_save_ctl[i] =
+			cpr_vreg->adj_cpus_save_ctl[0] + j;
+		cpr_vreg->adj_cpus_save_irq[i] =
+			cpr_vreg->adj_cpus_save_irq[0] + j;
+	}
+
+
+	for (i = 0; i <= cpr_vreg->num_adj_cpus; i++) {
+		for (j = CPR_CORNER_MIN; j <= cpr_vreg->num_corners; j++) {
+
+			cpr_vreg->adj_cpus_save_ctl[i][j] =
+				cpr_vreg->save_ctl[j];
+			cpr_vreg->adj_cpus_save_irq[i][j] =
+				cpr_vreg->save_irq[j];
+
+			cpr_vreg->adj_cpus_last_volt[i][j]
+				= cpr_vreg->adj_cpus_open_loop_volt
+				? cpr_vreg->adj_cpus_open_loop_volt[i][j]
+					: cpr_vreg->open_loop_volt[j];
+		}
+	}
+
+	cpr_regulator_set_online_cpus(cpr_vreg);
+	cpr_debug(cpr_vreg, "%d cpus online\n", cpr_vreg->online_cpus);
+
+	devm_kfree(dev, cpr_vreg->last_volt);
+	devm_kfree(dev, cpr_vreg->save_ctl);
+	devm_kfree(dev, cpr_vreg->save_irq);
+	if (cpr_vreg->adj_cpus_quot_adjust)
+		devm_kfree(dev, cpr_vreg->quot_adjust);
+	if (cpr_vreg->adj_cpus_open_loop_volt)
+		devm_kfree(dev, cpr_vreg->open_loop_volt);
+	if (cpr_vreg->adj_cpus_open_loop_volt_as_ceiling)
+		devm_kfree(dev, cpr_vreg->ceiling_volt);
+
+	cpr_regulator_switch_adj_cpus(cpr_vreg);
+
+	cpr_vreg->skip_voltage_change_during_suspend
+			= of_property_read_bool(dev->of_node,
+				"qcom,cpr-skip-voltage-change-during-suspend");
+
+	cpr_vreg->cpu_notifier.notifier_call = cpr_regulator_cpu_callback;
+	register_hotcpu_notifier(&cpr_vreg->cpu_notifier);
+
+	return rc;
+}
+
+static int cpr_init_pm_notification(struct cpr_regulator *cpr_vreg)
+{
+	int rc;
+
+	/* enabled only for single-core designs */
+	if (cpr_vreg->num_adj_cpus != 1) {
+		pr_warn("qcom,cpr-cpus not defined or invalid %d\n",
+					cpr_vreg->num_adj_cpus);
+		return 0;
+	}
+
+	cpr_vreg->pm_notifier.notifier_call = cpr_pm_callback;
+	rc = cpu_pm_register_notifier(&cpr_vreg->pm_notifier);
+	if (rc)
+		cpr_err(cpr_vreg, "Unable to register pm notifier rc=%d\n", rc);
+
+	return rc;
+}
+
+static int cpr_rpm_apc_init(struct platform_device *pdev,
+			       struct cpr_regulator *cpr_vreg)
+{
+	int rc, len = 0;
+	struct device_node *of_node = pdev->dev.of_node;
+
+	if (!of_find_property(of_node, "rpm-apc-supply", NULL))
+		return 0;
+
+	cpr_vreg->rpm_apc_vreg = devm_regulator_get(&pdev->dev, "rpm-apc");
+	if (IS_ERR_OR_NULL(cpr_vreg->rpm_apc_vreg)) {
+		rc = PTR_RET(cpr_vreg->rpm_apc_vreg);
+		if (rc != -EPROBE_DEFER)
+			cpr_err(cpr_vreg, "devm_regulator_get: rpm-apc: rc=%d\n",
+					rc);
+		return rc;
+	}
+
+	if (!of_find_property(of_node, "qcom,rpm-apc-corner-map", &len)) {
+		cpr_err(cpr_vreg,
+			"qcom,rpm-apc-corner-map missing:\n");
+		return -EINVAL;
+	}
+	if (len != cpr_vreg->num_corners * sizeof(u32)) {
+		cpr_err(cpr_vreg,
+			"qcom,rpm-apc-corner-map length=%d is invalid: required:%d\n",
+			len, cpr_vreg->num_corners);
+		return -EINVAL;
+	}
+
+	cpr_vreg->rpm_apc_corner_map = devm_kzalloc(&pdev->dev,
+		(cpr_vreg->num_corners + 1) *
+		sizeof(*cpr_vreg->rpm_apc_corner_map), GFP_KERNEL);
+	if (!cpr_vreg->rpm_apc_corner_map) {
+		cpr_err(cpr_vreg, "Can't allocate memory for cpr_vreg->rpm_apc_corner_map\n");
+			return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,rpm-apc-corner-map",
+		&cpr_vreg->rpm_apc_corner_map[1], cpr_vreg->num_corners);
+	if (rc)
+		cpr_err(cpr_vreg, "read qcom,rpm-apc-corner-map failed, rc = %d\n",
+				rc);
+
+	return rc;
+}
+
+static int cpr_vsens_init(struct platform_device *pdev,
+			       struct cpr_regulator *cpr_vreg)
+{
+	int rc = 0, len = 0;
+	struct device_node *of_node = pdev->dev.of_node;
+
+	if (of_find_property(of_node, "vdd-vsens-voltage-supply", NULL)) {
+		cpr_vreg->vdd_vsens_voltage = devm_regulator_get(&pdev->dev,
+							"vdd-vsens-voltage");
+		if (IS_ERR_OR_NULL(cpr_vreg->vdd_vsens_voltage)) {
+			rc = PTR_ERR(cpr_vreg->vdd_vsens_voltage);
+			cpr_vreg->vdd_vsens_voltage = NULL;
+			if (rc == -EPROBE_DEFER)
+				return rc;
+			/* device not found */
+			cpr_debug(cpr_vreg, "regulator_get: vdd-vsens-voltage: rc=%d\n",
+					rc);
+			return 0;
+		}
+	}
+
+	if (of_find_property(of_node, "vdd-vsens-corner-supply", NULL)) {
+		cpr_vreg->vdd_vsens_corner = devm_regulator_get(&pdev->dev,
+							"vdd-vsens-corner");
+		if (IS_ERR_OR_NULL(cpr_vreg->vdd_vsens_corner)) {
+			rc = PTR_ERR(cpr_vreg->vdd_vsens_corner);
+			cpr_vreg->vdd_vsens_corner = NULL;
+			if (rc == -EPROBE_DEFER)
+				return rc;
+			/* device not found */
+			cpr_debug(cpr_vreg, "regulator_get: vdd-vsens-corner: rc=%d\n",
+					rc);
+			return 0;
+		}
+
+		if (!of_find_property(of_node, "qcom,vsens-corner-map", &len)) {
+			cpr_err(cpr_vreg, "qcom,vsens-corner-map missing\n");
+			return -EINVAL;
+		}
+
+		if (len != cpr_vreg->num_fuse_corners * sizeof(u32)) {
+			cpr_err(cpr_vreg, "qcom,vsens-corner-map length=%d is invalid: required:%d\n",
+				len, cpr_vreg->num_fuse_corners);
+			return -EINVAL;
+		}
+
+		cpr_vreg->vsens_corner_map = devm_kcalloc(&pdev->dev,
+					(cpr_vreg->num_fuse_corners + 1),
+			sizeof(*cpr_vreg->vsens_corner_map), GFP_KERNEL);
+		if (!cpr_vreg->vsens_corner_map)
+			return -ENOMEM;
+
+		rc = of_property_read_u32_array(of_node,
+					"qcom,vsens-corner-map",
+					&cpr_vreg->vsens_corner_map[1],
+					cpr_vreg->num_fuse_corners);
+		if (rc)
+			cpr_err(cpr_vreg, "read qcom,vsens-corner-map failed, rc = %d\n",
+				rc);
+	}
+
+	return rc;
+}
+
+static int cpr_disable_on_temp(struct cpr_regulator *cpr_vreg, bool disable)
+{
+	int rc = 0;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+
+	if (cpr_vreg->cpr_fuse_disable ||
+		(cpr_vreg->cpr_thermal_disable == disable))
+		goto out;
+
+	cpr_vreg->cpr_thermal_disable = disable;
+
+	if (cpr_vreg->enable && cpr_vreg->corner) {
+		if (disable) {
+			cpr_debug(cpr_vreg, "Disabling CPR - below temperature threshold [%d]\n",
+					cpr_vreg->cpr_disable_temp_threshold);
+			/* disable CPR and force open-loop */
+			cpr_ctl_disable(cpr_vreg);
+			rc = cpr_regulator_set_voltage(cpr_vreg->rdev,
+						cpr_vreg->corner, false);
+			if (rc < 0)
+				cpr_err(cpr_vreg, "Failed to set voltage, rc=%d\n",
+						rc);
+		} else {
+			/* enable CPR */
+			cpr_debug(cpr_vreg, "Enabling CPR - above temperature thresold [%d]\n",
+					cpr_vreg->cpr_enable_temp_threshold);
+			rc = cpr_regulator_set_voltage(cpr_vreg->rdev,
+						cpr_vreg->corner, true);
+			if (rc < 0)
+				cpr_err(cpr_vreg, "Failed to set voltage, rc=%d\n",
+						rc);
+		}
+	}
+out:
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+	return rc;
+}
+
+static void tsens_threshold_notify(struct therm_threshold *tsens_cb_data)
+{
+	struct threshold_info *info = tsens_cb_data->parent;
+	struct cpr_regulator *cpr_vreg = container_of(info,
+			struct cpr_regulator, tsens_threshold_config);
+	int rc = 0;
+
+	cpr_debug(cpr_vreg, "Triggered tsens-notification trip_type=%d for thermal_zone_id=%d\n",
+		tsens_cb_data->trip_triggered, tsens_cb_data->sensor_id);
+
+	switch (tsens_cb_data->trip_triggered) {
+	case THERMAL_TRIP_CONFIGURABLE_HI:
+		rc = cpr_disable_on_temp(cpr_vreg, false);
+		if (rc < 0)
+			cpr_err(cpr_vreg, "Failed to enable CPR, rc=%d\n", rc);
+		break;
+	case THERMAL_TRIP_CONFIGURABLE_LOW:
+		rc = cpr_disable_on_temp(cpr_vreg, true);
+		if (rc < 0)
+			cpr_err(cpr_vreg, "Failed to disable CPR, rc=%d\n", rc);
+		break;
+	default:
+		cpr_debug(cpr_vreg, "trip-type %d not supported\n",
+				tsens_cb_data->trip_triggered);
+		break;
+	}
+
+	if (tsens_cb_data->cur_state != tsens_cb_data->trip_triggered) {
+		rc = sensor_mgr_set_threshold(tsens_cb_data->sensor_id,
+						tsens_cb_data->threshold);
+		if (rc < 0)
+			cpr_err(cpr_vreg,
+			"Failed to set temp. threshold, rc=%d\n", rc);
+		else
+			tsens_cb_data->cur_state =
+				tsens_cb_data->trip_triggered;
+	}
+}
+
+static int cpr_check_tsens(struct cpr_regulator *cpr_vreg)
+{
+	int rc = 0;
+	struct tsens_device tsens_dev;
+	unsigned long temp = 0;
+	bool disable;
+
+	if (tsens_is_ready() > 0) {
+		tsens_dev.sensor_num = cpr_vreg->tsens_id;
+		rc = tsens_get_temp(&tsens_dev, &temp);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "Faled to read tsens, rc=%d\n", rc);
+			return rc;
+		}
+
+		disable = (int) temp <= cpr_vreg->cpr_disable_temp_threshold;
+		rc = cpr_disable_on_temp(cpr_vreg, disable);
+		if (rc)
+			cpr_err(cpr_vreg, "Failed to %s CPR, rc=%d\n",
+					disable ? "disable" : "enable", rc);
+	}
+
+	return rc;
+}
+
+static int cpr_thermal_init(struct cpr_regulator *cpr_vreg)
+{
+	int rc;
+	struct device_node *of_node = cpr_vreg->dev->of_node;
+
+	if (!of_find_property(of_node, "qcom,cpr-thermal-sensor-id", NULL))
+		return 0;
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-thermal-sensor-id",
+			  &cpr_vreg->tsens_id, rc);
+	if (rc < 0)
+		return rc;
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-disable-temp-threshold",
+			  &cpr_vreg->cpr_disable_temp_threshold, rc);
+	if (rc < 0)
+		return rc;
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-enable-temp-threshold",
+			  &cpr_vreg->cpr_enable_temp_threshold, rc);
+	if (rc < 0)
+		return rc;
+
+	if (cpr_vreg->cpr_disable_temp_threshold >=
+				cpr_vreg->cpr_enable_temp_threshold) {
+		cpr_err(cpr_vreg, "Invalid temperature threshold cpr_disable_temp[%d] >= cpr_enable_temp[%d]\n",
+				cpr_vreg->cpr_disable_temp_threshold,
+				cpr_vreg->cpr_enable_temp_threshold);
+		return -EINVAL;
+	}
+
+	cpr_vreg->cpr_disable_on_temperature = true;
+
+	return 0;
+}
+
+static int cpr_init_cpr(struct platform_device *pdev,
+			       struct cpr_regulator *cpr_vreg)
+{
+	struct resource *res;
+	int rc = 0;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rbcpr_clk");
+	if (res && res->start)
+		cpr_vreg->rbcpr_clk_addr = res->start;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rbcpr");
+	if (!res || !res->start) {
+		cpr_err(cpr_vreg, "missing rbcpr address: res=%p\n", res);
+		return -EINVAL;
+	}
+	cpr_vreg->rbcpr_base = devm_ioremap(&pdev->dev, res->start,
+					    resource_size(res));
+
+	/* Init CPR configuration parameters */
+	rc = cpr_init_cpr_parameters(pdev, cpr_vreg);
+	if (rc)
+		return rc;
+
+	rc = cpr_init_cpr_efuse(pdev, cpr_vreg);
+	if (rc)
+		return rc;
+
+	/* Load per corner ceiling and floor voltages if they exist. */
+	rc = cpr_init_ceiling_floor_override_voltages(cpr_vreg, &pdev->dev);
+	if (rc)
+		return rc;
+
+	/*
+	 * Limit open loop voltages based upon per corner ceiling and floor
+	 * voltages.
+	 */
+	rc = cpr_limit_open_loop_voltage(cpr_vreg);
+	if (rc)
+		return rc;
+
+	/*
+	 * Fill the OPP table for this device with virtual voltage corner to
+	 * open-loop voltage pairs.
+	 */
+	rc = cpr_populate_opp_table(cpr_vreg, &pdev->dev);
+	if (rc)
+		return rc;
+
+	/* Reduce the ceiling voltage if allowed. */
+	rc = cpr_reduce_ceiling_voltage(cpr_vreg, &pdev->dev);
+	if (rc)
+		return rc;
+
+	/* Load CPR floor to ceiling range if exist. */
+	rc = cpr_init_floor_to_ceiling_range(cpr_vreg, &pdev->dev);
+	if (rc)
+		return rc;
+
+	/* Init all voltage set points of APC regulator for CPR */
+	rc = cpr_init_cpr_voltages(cpr_vreg, &pdev->dev);
+	if (rc)
+		return rc;
+
+	/* Get and Init interrupt */
+	cpr_vreg->cpr_irq = platform_get_irq(pdev, 0);
+	if (!cpr_vreg->cpr_irq) {
+		cpr_err(cpr_vreg, "missing CPR IRQ\n");
+		return -EINVAL;
+	}
+
+	/* Configure CPR HW but keep it disabled */
+	rc = cpr_config(cpr_vreg, &pdev->dev);
+	if (rc)
+		return rc;
+
+	rc = request_threaded_irq(cpr_vreg->cpr_irq, NULL, cpr_irq_handler,
+				  IRQF_ONESHOT | IRQF_TRIGGER_RISING, "cpr",
+				  cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "CPR: request irq failed for IRQ %d\n",
+				cpr_vreg->cpr_irq);
+		return rc;
+	}
+
+	return 0;
+}
+
+/*
+ * Create a set of virtual fuse rows if optional device tree properties are
+ * present.
+ */
+static int cpr_remap_efuse_data(struct platform_device *pdev,
+				 struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	struct property *prop;
+	u64 fuse_param;
+	u32 *temp;
+	int size, rc, i, bits, in_row, in_bit, out_row, out_bit;
+
+	prop = of_find_property(of_node, "qcom,fuse-remap-source", NULL);
+	if (!prop) {
+		/* No fuse remapping needed. */
+		return 0;
+	}
+
+	size = prop->length / sizeof(u32);
+	if (size == 0 || size % 4) {
+		cpr_err(cpr_vreg, "qcom,fuse-remap-source has invalid size=%d\n",
+			size);
+		return -EINVAL;
+	}
+	size /= 4;
+
+	rc = of_property_read_u32(of_node, "qcom,fuse-remap-base-row",
+				&cpr_vreg->remapped_row_base);
+	if (rc) {
+		cpr_err(cpr_vreg, "could not read qcom,fuse-remap-base-row, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	temp = kzalloc(sizeof(*temp) * size * 4, GFP_KERNEL);
+	if (!temp) {
+		cpr_err(cpr_vreg, "temp memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,fuse-remap-source", temp,
+					size * 4);
+	if (rc) {
+		cpr_err(cpr_vreg, "could not read qcom,fuse-remap-source, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	/*
+	 * Format of tuples in qcom,fuse-remap-source property:
+	 * <row bit-offset bit-count fuse-read-method>
+	 */
+	for (i = 0, bits = 0; i < size; i++)
+		bits += temp[i * 4 + 2];
+
+	cpr_vreg->num_remapped_rows = DIV_ROUND_UP(bits, 64);
+	cpr_vreg->remapped_row = devm_kzalloc(&pdev->dev,
+		sizeof(*cpr_vreg->remapped_row) * cpr_vreg->num_remapped_rows,
+		GFP_KERNEL);
+	if (!cpr_vreg->remapped_row) {
+		cpr_err(cpr_vreg, "remapped_row memory allocation failed\n");
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	for (i = 0, out_row = 0, out_bit = 0; i < size; i++) {
+		in_row = temp[i * 4];
+		in_bit = temp[i * 4 + 1];
+		bits = temp[i * 4 + 2];
+
+		while (bits > 64) {
+			fuse_param = cpr_read_efuse_param(cpr_vreg, in_row,
+					in_bit, 64, temp[i * 4 + 3]);
+
+			cpr_vreg->remapped_row[out_row++]
+				|= fuse_param << out_bit;
+			if (out_bit > 0)
+				cpr_vreg->remapped_row[out_row]
+					|= fuse_param >> (64 - out_bit);
+
+			bits -= 64;
+			in_bit += 64;
+		}
+
+		fuse_param = cpr_read_efuse_param(cpr_vreg, in_row, in_bit,
+						bits, temp[i * 4 + 3]);
+
+		cpr_vreg->remapped_row[out_row] |= fuse_param << out_bit;
+		if (bits < 64 - out_bit) {
+			out_bit += bits;
+		} else {
+			out_row++;
+			if (out_bit > 0)
+				cpr_vreg->remapped_row[out_row]
+					|= fuse_param >> (64 - out_bit);
+			out_bit = bits - (64 - out_bit);
+		}
+	}
+
+done:
+	kfree(temp);
+	return rc;
+}
+
+static int cpr_efuse_init(struct platform_device *pdev,
+				 struct cpr_regulator *cpr_vreg)
+{
+	struct resource *res;
+	int len;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse_addr");
+	if (!res || !res->start) {
+		cpr_err(cpr_vreg, "efuse_addr missing: res=%p\n", res);
+		return -EINVAL;
+	}
+
+	cpr_vreg->efuse_addr = res->start;
+	len = res->end - res->start + 1;
+
+	cpr_info(cpr_vreg, "efuse_addr = %pa (len=0x%x)\n", &res->start, len);
+
+	cpr_vreg->efuse_base = ioremap(cpr_vreg->efuse_addr, len);
+	if (!cpr_vreg->efuse_base) {
+		cpr_err(cpr_vreg, "Unable to map efuse_addr %pa\n",
+				&cpr_vreg->efuse_addr);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void cpr_efuse_free(struct cpr_regulator *cpr_vreg)
+{
+	iounmap(cpr_vreg->efuse_base);
+}
+
+static void cpr_parse_cond_min_volt_fuse(struct cpr_regulator *cpr_vreg,
+						struct device_node *of_node)
+{
+	int rc;
+	u32 fuse_sel[5];
+	/*
+	 * Restrict all pvs corner voltages to a minimum value of
+	 * qcom,cpr-cond-min-voltage if the fuse defined in
+	 * qcom,cpr-fuse-cond-min-volt-sel does not read back with
+	 * the expected value.
+	 */
+	rc = of_property_read_u32_array(of_node,
+			"qcom,cpr-fuse-cond-min-volt-sel", fuse_sel, 5);
+	if (!rc) {
+		if (!cpr_fuse_is_setting_expected(cpr_vreg, fuse_sel))
+			cpr_vreg->flags |= FLAGS_SET_MIN_VOLTAGE;
+	}
+}
+
+static void cpr_parse_speed_bin_fuse(struct cpr_regulator *cpr_vreg,
+				struct device_node *of_node)
+{
+	int rc;
+	u64 fuse_bits;
+	u32 fuse_sel[4];
+	u32 speed_bits;
+
+	rc = of_property_read_u32_array(of_node,
+			"qcom,speed-bin-fuse-sel", fuse_sel, 4);
+
+	if (!rc) {
+		fuse_bits = cpr_read_efuse_row(cpr_vreg,
+				fuse_sel[0], fuse_sel[3]);
+		speed_bits = (fuse_bits >> fuse_sel[1]) &
+			((1 << fuse_sel[2]) - 1);
+		cpr_info(cpr_vreg, "[row: %d]: 0x%llx, speed_bits = %d\n",
+				fuse_sel[0], fuse_bits, speed_bits);
+		cpr_vreg->speed_bin = speed_bits;
+	} else {
+		cpr_vreg->speed_bin = SPEED_BIN_NONE;
+	}
+}
+
+static int cpr_voltage_uplift_enable_check(struct cpr_regulator *cpr_vreg,
+					struct device_node *of_node)
+{
+	int rc;
+	u32 fuse_sel[5];
+	u32 uplift_speed_bin;
+
+	rc = of_property_read_u32_array(of_node,
+			"qcom,cpr-fuse-uplift-sel", fuse_sel, 5);
+	if (!rc) {
+		rc = of_property_read_u32(of_node,
+				"qcom,cpr-uplift-speed-bin",
+				&uplift_speed_bin);
+		if (rc < 0) {
+			cpr_err(cpr_vreg,
+				"qcom,cpr-uplift-speed-bin missing\n");
+			return rc;
+		}
+		if (cpr_fuse_is_setting_expected(cpr_vreg, fuse_sel)
+			&& (uplift_speed_bin == cpr_vreg->speed_bin)
+			&& !(cpr_vreg->flags & FLAGS_SET_MIN_VOLTAGE)) {
+			cpr_vreg->flags |= FLAGS_UPLIFT_QUOT_VOLT;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Read in the number of fuse corners and then allocate memory for arrays that
+ * are sized based upon the number of fuse corners.
+ */
+static int cpr_fuse_corner_array_alloc(struct device *dev,
+					struct cpr_regulator *cpr_vreg)
+{
+	int rc;
+	size_t len;
+
+	rc = of_property_read_u32(dev->of_node, "qcom,cpr-fuse-corners",
+				&cpr_vreg->num_fuse_corners);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "qcom,cpr-fuse-corners missing: rc=%d\n", rc);
+		return rc;
+	}
+
+	if (cpr_vreg->num_fuse_corners < CPR_FUSE_CORNER_MIN
+	    || cpr_vreg->num_fuse_corners > CPR_FUSE_CORNER_LIMIT) {
+		cpr_err(cpr_vreg, "corner count=%d is invalid\n",
+			cpr_vreg->num_fuse_corners);
+		return -EINVAL;
+	}
+
+	/*
+	 * The arrays sized based on the fuse corner count ignore element 0
+	 * in order to simplify indexing throughout the driver since min_uV = 0
+	 * cannot be passed into a set_voltage() callback.
+	 */
+	len = cpr_vreg->num_fuse_corners + 1;
+
+	cpr_vreg->pvs_corner_v = devm_kzalloc(dev,
+			len * sizeof(*cpr_vreg->pvs_corner_v), GFP_KERNEL);
+	cpr_vreg->cpr_fuse_target_quot = devm_kzalloc(dev,
+		len * sizeof(*cpr_vreg->cpr_fuse_target_quot), GFP_KERNEL);
+	cpr_vreg->cpr_fuse_ro_sel = devm_kzalloc(dev,
+			len * sizeof(*cpr_vreg->cpr_fuse_ro_sel), GFP_KERNEL);
+	cpr_vreg->fuse_ceiling_volt = devm_kzalloc(dev,
+		len * (sizeof(*cpr_vreg->fuse_ceiling_volt)), GFP_KERNEL);
+	cpr_vreg->fuse_floor_volt = devm_kzalloc(dev,
+		len * (sizeof(*cpr_vreg->fuse_floor_volt)), GFP_KERNEL);
+	cpr_vreg->step_quotient = devm_kzalloc(dev,
+		len * sizeof(*cpr_vreg->step_quotient), GFP_KERNEL);
+
+	if (cpr_vreg->pvs_corner_v == NULL || cpr_vreg->cpr_fuse_ro_sel == NULL
+	    || cpr_vreg->fuse_ceiling_volt == NULL
+	    || cpr_vreg->fuse_floor_volt == NULL
+	    || cpr_vreg->cpr_fuse_target_quot == NULL
+	    || cpr_vreg->step_quotient == NULL) {
+		cpr_err(cpr_vreg, "Could not allocate memory for CPR arrays\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int cpr_voltage_plan_init(struct platform_device *pdev,
+					struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int rc, i;
+	u32 min_uv = 0;
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-voltage-ceiling",
+		&cpr_vreg->fuse_ceiling_volt[CPR_FUSE_CORNER_MIN],
+		cpr_vreg->num_fuse_corners);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "cpr-voltage-ceiling missing: rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-voltage-floor",
+		&cpr_vreg->fuse_floor_volt[CPR_FUSE_CORNER_MIN],
+		cpr_vreg->num_fuse_corners);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "cpr-voltage-floor missing: rc=%d\n", rc);
+		return rc;
+	}
+
+	cpr_parse_cond_min_volt_fuse(cpr_vreg, of_node);
+	rc = cpr_voltage_uplift_enable_check(cpr_vreg, of_node);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "voltage uplift enable check failed, %d\n",
+			rc);
+		return rc;
+	}
+	if (cpr_vreg->flags & FLAGS_SET_MIN_VOLTAGE) {
+		of_property_read_u32(of_node, "qcom,cpr-cond-min-voltage",
+					&min_uv);
+		for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+		     i++)
+			if (cpr_vreg->fuse_ceiling_volt[i] < min_uv) {
+				cpr_vreg->fuse_ceiling_volt[i] = min_uv;
+				cpr_vreg->fuse_floor_volt[i] = min_uv;
+			} else if (cpr_vreg->fuse_floor_volt[i] < min_uv) {
+				cpr_vreg->fuse_floor_volt[i] = min_uv;
+			}
+	}
+
+	return 0;
+}
+
+static int cpr_mem_acc_init(struct platform_device *pdev,
+				struct cpr_regulator *cpr_vreg)
+{
+	int rc, size;
+	struct property *prop;
+	char *corner_map_str;
+
+	if (of_find_property(pdev->dev.of_node, "mem-acc-supply", NULL)) {
+		cpr_vreg->mem_acc_vreg = devm_regulator_get(&pdev->dev,
+							"mem-acc");
+		if (IS_ERR_OR_NULL(cpr_vreg->mem_acc_vreg)) {
+			rc = PTR_RET(cpr_vreg->mem_acc_vreg);
+			if (rc != -EPROBE_DEFER)
+				cpr_err(cpr_vreg,
+					"devm_regulator_get: mem-acc: rc=%d\n",
+					rc);
+			return rc;
+		}
+	}
+
+	corner_map_str = "qcom,mem-acc-corner-map";
+	prop = of_find_property(pdev->dev.of_node, corner_map_str, NULL);
+	if (!prop) {
+		corner_map_str = "qcom,cpr-corner-map";
+		prop = of_find_property(pdev->dev.of_node, corner_map_str,
+					NULL);
+		if (!prop) {
+			cpr_err(cpr_vreg, "qcom,cpr-corner-map missing\n");
+			return -EINVAL;
+		}
+	}
+
+	size = prop->length / sizeof(u32);
+	cpr_vreg->mem_acc_corner_map = devm_kzalloc(&pdev->dev,
+					sizeof(int) * (size + 1),
+					GFP_KERNEL);
+
+	rc = of_property_read_u32_array(pdev->dev.of_node, corner_map_str,
+			&cpr_vreg->mem_acc_corner_map[CPR_FUSE_CORNER_MIN],
+			size);
+	if (rc) {
+		cpr_err(cpr_vreg, "%s missing, rc = %d\n", corner_map_str, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int cpr_enable_set(void *data, u64 val)
+{
+	struct cpr_regulator *cpr_vreg = data;
+	bool old_cpr_enable;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+
+	old_cpr_enable = cpr_vreg->enable;
+	cpr_vreg->enable = val;
+
+	if (old_cpr_enable == cpr_vreg->enable)
+		goto _exit;
+
+	if (cpr_vreg->enable && cpr_vreg->cpr_fuse_disable) {
+		cpr_info(cpr_vreg,
+			"CPR permanently disabled due to fuse values\n");
+		cpr_vreg->enable = false;
+		goto _exit;
+	}
+
+	cpr_debug(cpr_vreg, "%s CPR [corner=%d, fuse_corner=%d]\n",
+		cpr_vreg->enable ? "enabling" : "disabling",
+		cpr_vreg->corner, cpr_vreg->corner_map[cpr_vreg->corner]);
+
+	if (cpr_vreg->corner) {
+		if (cpr_vreg->enable) {
+			cpr_ctl_disable(cpr_vreg);
+			cpr_irq_clr(cpr_vreg);
+			cpr_corner_restore(cpr_vreg, cpr_vreg->corner);
+			cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+		} else {
+			cpr_ctl_disable(cpr_vreg);
+			cpr_irq_set(cpr_vreg, 0);
+		}
+	}
+
+_exit:
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+
+	return 0;
+}
+
+static int cpr_enable_get(void *data, u64 *val)
+{
+	struct cpr_regulator *cpr_vreg = data;
+
+	*val = cpr_vreg->enable;
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr_enable_fops, cpr_enable_get, cpr_enable_set,
+			"%llu\n");
+
+static int cpr_get_cpr_ceiling(void *data, u64 *val)
+{
+	struct cpr_regulator *cpr_vreg = data;
+
+	*val = cpr_vreg->ceiling_volt[cpr_vreg->corner];
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr_ceiling_fops, cpr_get_cpr_ceiling, NULL,
+			"%llu\n");
+
+static int cpr_get_cpr_floor(void *data, u64 *val)
+{
+	struct cpr_regulator *cpr_vreg = data;
+
+	*val = cpr_vreg->floor_volt[cpr_vreg->corner];
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr_floor_fops, cpr_get_cpr_floor, NULL,
+			"%llu\n");
+
+static int cpr_get_cpr_max_ceiling(void *data, u64 *val)
+{
+	struct cpr_regulator *cpr_vreg = data;
+
+	*val = cpr_vreg->cpr_max_ceiling[cpr_vreg->corner];
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr_max_ceiling_fops, cpr_get_cpr_max_ceiling, NULL,
+			"%llu\n");
+
+static int cpr_debug_info_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+
+	return 0;
+}
+
+static ssize_t cpr_debug_info_read(struct file *file, char __user *buff,
+				size_t count, loff_t *ppos)
+{
+	struct cpr_regulator *cpr_vreg = file->private_data;
+	char *debugfs_buf;
+	ssize_t len, ret = 0;
+	u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
+	u32 step_dn, step_up, error, error_lt0, busy;
+	int fuse_corner;
+
+	debugfs_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!debugfs_buf)
+		return -ENOMEM;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+
+	fuse_corner = cpr_vreg->corner_map[cpr_vreg->corner];
+
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+		"corner = %d, current_volt = %d uV\n",
+		cpr_vreg->corner, cpr_vreg->last_volt[cpr_vreg->corner]);
+	ret += len;
+
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"fuse_corner = %d, current_volt = %d uV\n",
+			fuse_corner, cpr_vreg->last_volt[cpr_vreg->corner]);
+	ret += len;
+
+	ro_sel = cpr_vreg->cpr_fuse_ro_sel[fuse_corner];
+	gcnt = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET(ro_sel));
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"rbcpr_gcnt_target (%u) = 0x%02X\n", ro_sel, gcnt);
+	ret += len;
+
+	ctl = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"rbcpr_ctl = 0x%02X\n", ctl);
+	ret += len;
+
+	irq_status = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"rbcpr_irq_status = 0x%02X\n", irq_status);
+	ret += len;
+
+	reg = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"rbcpr_result_0 = 0x%02X\n", reg);
+	ret += len;
+
+	step_dn = reg & 0x01;
+	step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"  [step_dn = %u", step_dn);
+	ret += len;
+
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			", step_up = %u", step_up);
+	ret += len;
+
+	error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+				& RBCPR_RESULT0_ERROR_STEPS_MASK;
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			", error_steps = %u", error_steps);
+	ret += len;
+
+	error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			", error = %u", error);
+	ret += len;
+
+	error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			", error_lt_0 = %u", error_lt0);
+	ret += len;
+
+	busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			", busy = %u]\n", busy);
+	ret += len;
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+
+	ret = simple_read_from_buffer(buff, count, ppos, debugfs_buf, ret);
+	kfree(debugfs_buf);
+	return ret;
+}
+
+static const struct file_operations cpr_debug_info_fops = {
+	.open = cpr_debug_info_open,
+	.read = cpr_debug_info_read,
+};
+
+static int cpr_aging_debug_info_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+
+	return 0;
+}
+
+static ssize_t cpr_aging_debug_info_read(struct file *file, char __user *buff,
+				size_t count, loff_t *ppos)
+{
+	struct cpr_regulator *cpr_vreg = file->private_data;
+	struct cpr_aging_info *aging_info = cpr_vreg->aging_info;
+	char *debugfs_buf;
+	ssize_t len, ret = 0;
+	int i;
+
+	debugfs_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!debugfs_buf)
+		return -ENOMEM;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"aging_adj_volt = [");
+	ret += len;
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+				" %d", aging_info->voltage_adjust[i]);
+		ret += len;
+	}
+
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			" ]uV\n");
+	ret += len;
+
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"aging_measurement_done = %s\n",
+			aging_info->cpr_aging_done ? "true" : "false");
+	ret += len;
+
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"aging_measurement_error = %s\n",
+			aging_info->cpr_aging_error ? "true" : "false");
+	ret += len;
+
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+
+	ret = simple_read_from_buffer(buff, count, ppos, debugfs_buf, ret);
+	kfree(debugfs_buf);
+	return ret;
+}
+
+static const struct file_operations cpr_aging_debug_info_fops = {
+	.open = cpr_aging_debug_info_open,
+	.read = cpr_aging_debug_info_read,
+};
+
+static void cpr_debugfs_init(struct cpr_regulator *cpr_vreg)
+{
+	struct dentry *temp;
+
+	if (IS_ERR_OR_NULL(cpr_debugfs_base)) {
+		cpr_err(cpr_vreg, "Could not create debugfs nodes since base directory is missing\n");
+		return;
+	}
+
+	cpr_vreg->debugfs = debugfs_create_dir(cpr_vreg->rdesc.name,
+						cpr_debugfs_base);
+	if (IS_ERR_OR_NULL(cpr_vreg->debugfs)) {
+		cpr_err(cpr_vreg, "debugfs directory creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_file("debug_info", S_IRUGO, cpr_vreg->debugfs,
+					cpr_vreg, &cpr_debug_info_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr_err(cpr_vreg, "debug_info node creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_file("cpr_enable", S_IRUGO | S_IWUSR,
+			cpr_vreg->debugfs, cpr_vreg, &cpr_enable_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr_err(cpr_vreg, "cpr_enable node creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_file("cpr_ceiling", S_IRUGO,
+			cpr_vreg->debugfs, cpr_vreg, &cpr_ceiling_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr_err(cpr_vreg, "cpr_ceiling node creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_file("cpr_floor", S_IRUGO,
+			cpr_vreg->debugfs, cpr_vreg, &cpr_floor_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr_err(cpr_vreg, "cpr_floor node creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_file("cpr_max_ceiling", S_IRUGO,
+			cpr_vreg->debugfs, cpr_vreg, &cpr_max_ceiling_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr_err(cpr_vreg, "cpr_max_ceiling node creation failed\n");
+		return;
+	}
+
+	if (cpr_vreg->aging_info) {
+		temp = debugfs_create_file("aging_debug_info", S_IRUGO,
+					cpr_vreg->debugfs, cpr_vreg,
+					&cpr_aging_debug_info_fops);
+		if (IS_ERR_OR_NULL(temp)) {
+			cpr_err(cpr_vreg, "aging_debug_info node creation failed\n");
+			return;
+		}
+	}
+}
+
+static void cpr_debugfs_remove(struct cpr_regulator *cpr_vreg)
+{
+	debugfs_remove_recursive(cpr_vreg->debugfs);
+}
+
+static void cpr_debugfs_base_init(void)
+{
+	cpr_debugfs_base = debugfs_create_dir("cpr-regulator", NULL);
+	if (IS_ERR_OR_NULL(cpr_debugfs_base))
+		pr_err("cpr-regulator debugfs base directory creation failed\n");
+}
+
+static void cpr_debugfs_base_remove(void)
+{
+	debugfs_remove_recursive(cpr_debugfs_base);
+}
+
+#else
+
+static void cpr_debugfs_init(struct cpr_regulator *cpr_vreg)
+{}
+
+static void cpr_debugfs_remove(struct cpr_regulator *cpr_vreg)
+{}
+
+static void cpr_debugfs_base_init(void)
+{}
+
+static void cpr_debugfs_base_remove(void)
+{}
+
+#endif
+
+/**
+ * cpr_panic_callback() - panic notification callback function. This function
+ *		is invoked when a kernel panic occurs.
+ * @nfb:	Notifier block pointer of CPR regulator
+ * @event:	Value passed unmodified to notifier function
+ * @data:	Pointer passed unmodified to notifier function
+ *
+ * Return: NOTIFY_OK
+ */
+static int cpr_panic_callback(struct notifier_block *nfb,
+			unsigned long event, void *data)
+{
+	struct cpr_regulator *cpr_vreg = container_of(nfb,
+				struct cpr_regulator, panic_notifier);
+	int corner, fuse_corner, volt;
+
+	corner = cpr_vreg->corner;
+	fuse_corner = cpr_vreg->corner_map[corner];
+	if (cpr_is_allowed(cpr_vreg))
+		volt = cpr_vreg->last_volt[corner];
+	else
+		volt = cpr_vreg->open_loop_volt[corner];
+
+	cpr_err(cpr_vreg, "[corner:%d, fuse_corner:%d] = %d uV\n",
+		corner, fuse_corner, volt);
+
+	return NOTIFY_OK;
+}
+
+static int cpr_regulator_probe(struct platform_device *pdev)
+{
+	struct regulator_config reg_config = {};
+	struct cpr_regulator *cpr_vreg;
+	struct regulator_desc *rdesc;
+	struct device *dev = &pdev->dev;
+	struct regulator_init_data *init_data = pdev->dev.platform_data;
+	int rc;
+
+	if (!pdev->dev.of_node) {
+		dev_err(dev, "Device tree node is missing\n");
+		return -EINVAL;
+	}
+
+	init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
+	if (!init_data) {
+		dev_err(dev, "regulator init data is missing\n");
+		return -EINVAL;
+	} else {
+		init_data->constraints.input_uV
+			= init_data->constraints.max_uV;
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS;
+	}
+
+	cpr_vreg = devm_kzalloc(&pdev->dev, sizeof(struct cpr_regulator),
+				GFP_KERNEL);
+	if (!cpr_vreg) {
+		dev_err(dev, "Can't allocate cpr_regulator memory\n");
+		return -ENOMEM;
+	}
+
+	cpr_vreg->dev = &pdev->dev;
+	cpr_vreg->rdesc.name = init_data->constraints.name;
+	if (cpr_vreg->rdesc.name == NULL) {
+		dev_err(dev, "regulator-name missing\n");
+		return -EINVAL;
+	}
+
+	rc = cpr_fuse_corner_array_alloc(&pdev->dev, cpr_vreg);
+	if (rc)
+		return rc;
+
+	rc = cpr_mem_acc_init(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "mem_acc intialization error rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = cpr_efuse_init(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Wrong eFuse address specified: rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = cpr_remap_efuse_data(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Could not remap fuse data: rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = cpr_check_redundant(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Could not check redundant fuse: rc=%d\n",
+			rc);
+		goto err_out;
+	}
+
+	rc = cpr_read_fuse_revision(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Could not read fuse revision: rc=%d\n", rc);
+		goto err_out;
+	}
+
+	cpr_parse_speed_bin_fuse(cpr_vreg, dev->of_node);
+	cpr_parse_pvs_version_fuse(cpr_vreg, dev->of_node);
+
+	rc = cpr_read_ro_select(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Could not read RO select: rc=%d\n", rc);
+		goto err_out;
+	}
+
+	rc = cpr_find_fuse_map_match(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Could not determine fuse mapping match: rc=%d\n",
+			rc);
+		goto err_out;
+	}
+
+	rc = cpr_voltage_plan_init(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Wrong DT parameter specified: rc=%d\n", rc);
+		goto err_out;
+	}
+
+	rc = cpr_pvs_init(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Initialize PVS wrong: rc=%d\n", rc);
+		goto err_out;
+	}
+
+	rc = cpr_vsens_init(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Initialize vsens configuration failed rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr_apc_init(pdev, cpr_vreg);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			cpr_err(cpr_vreg, "Initialize APC wrong: rc=%d\n", rc);
+		goto err_out;
+	}
+
+	rc = cpr_init_cpr(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Initialize CPR failed: rc=%d\n", rc);
+		goto err_out;
+	}
+
+	rc = cpr_rpm_apc_init(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Initialize RPM APC regulator failed rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr_thermal_init(cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Thermal intialization failed rc=%d\n", rc);
+		return rc;
+	}
+
+	if (of_property_read_bool(pdev->dev.of_node,
+				"qcom,disable-closed-loop-in-pc")) {
+		rc = cpr_init_pm_notification(cpr_vreg);
+		if (rc) {
+			cpr_err(cpr_vreg,
+				"cpr_init_pm_notification failed rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/* Load per-online CPU adjustment data */
+	rc = cpr_init_per_cpu_adjustments(cpr_vreg, &pdev->dev);
+	if (rc) {
+		cpr_err(cpr_vreg, "cpr_init_per_cpu_adjustments failed: rc=%d\n",
+			rc);
+		goto err_out;
+	}
+
+	/* Parse dependency parameters */
+	if (cpr_vreg->vdd_mx) {
+		rc = cpr_parse_vdd_mx_parameters(pdev, cpr_vreg);
+		if (rc) {
+			cpr_err(cpr_vreg, "parsing vdd_mx parameters failed: rc=%d\n",
+				rc);
+			goto err_out;
+		}
+	}
+
+	cpr_efuse_free(cpr_vreg);
+
+	/*
+	 * Ensure that enable state accurately reflects the case in which CPR
+	 * is permanently disabled.
+	 */
+	cpr_vreg->enable &= !cpr_vreg->cpr_fuse_disable;
+
+	mutex_init(&cpr_vreg->cpr_mutex);
+
+	rdesc			= &cpr_vreg->rdesc;
+	rdesc->owner		= THIS_MODULE;
+	rdesc->type		= REGULATOR_VOLTAGE;
+	rdesc->ops		= &cpr_corner_ops;
+
+	reg_config.dev = &pdev->dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = cpr_vreg;
+	reg_config.of_node = pdev->dev.of_node;
+	cpr_vreg->rdev = regulator_register(rdesc, &reg_config);
+	if (IS_ERR(cpr_vreg->rdev)) {
+		rc = PTR_ERR(cpr_vreg->rdev);
+		cpr_err(cpr_vreg, "regulator_register failed: rc=%d\n", rc);
+
+		cpr_apc_exit(cpr_vreg);
+		return rc;
+	}
+
+	platform_set_drvdata(pdev, cpr_vreg);
+	cpr_debugfs_init(cpr_vreg);
+
+	if (cpr_vreg->cpr_disable_on_temperature) {
+		rc = cpr_check_tsens(cpr_vreg);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "Unable to config CPR on tsens, rc=%d\n",
+									rc);
+			cpr_apc_exit(cpr_vreg);
+			cpr_debugfs_remove(cpr_vreg);
+			return rc;
+		}
+	}
+
+	/* Register panic notification call back */
+	cpr_vreg->panic_notifier.notifier_call = cpr_panic_callback;
+	atomic_notifier_chain_register(&panic_notifier_list,
+			&cpr_vreg->panic_notifier);
+
+	mutex_lock(&cpr_regulator_list_mutex);
+	list_add(&cpr_vreg->list, &cpr_regulator_list);
+	mutex_unlock(&cpr_regulator_list_mutex);
+
+	return 0;
+
+err_out:
+	cpr_efuse_free(cpr_vreg);
+	return rc;
+}
+
+static int cpr_regulator_remove(struct platform_device *pdev)
+{
+	struct cpr_regulator *cpr_vreg;
+
+	cpr_vreg = platform_get_drvdata(pdev);
+	if (cpr_vreg) {
+		/* Disable CPR */
+		if (cpr_is_allowed(cpr_vreg)) {
+			cpr_ctl_disable(cpr_vreg);
+			cpr_irq_set(cpr_vreg, 0);
+		}
+
+		mutex_lock(&cpr_regulator_list_mutex);
+		list_del(&cpr_vreg->list);
+		mutex_unlock(&cpr_regulator_list_mutex);
+
+		if (cpr_vreg->cpu_notifier.notifier_call)
+			unregister_hotcpu_notifier(&cpr_vreg->cpu_notifier);
+
+		if (cpr_vreg->cpr_disable_on_temperature)
+			sensor_mgr_remove_threshold(
+				&cpr_vreg->tsens_threshold_config);
+
+		atomic_notifier_chain_unregister(&panic_notifier_list,
+			&cpr_vreg->panic_notifier);
+
+		cpr_apc_exit(cpr_vreg);
+		cpr_debugfs_remove(cpr_vreg);
+		regulator_unregister(cpr_vreg->rdev);
+	}
+
+	return 0;
+}
+
+static struct of_device_id cpr_regulator_match_table[] = {
+	{ .compatible = CPR_REGULATOR_DRIVER_NAME, },
+	{}
+};
+
+static struct platform_driver cpr_regulator_driver = {
+	.driver		= {
+		.name	= CPR_REGULATOR_DRIVER_NAME,
+		.of_match_table = cpr_regulator_match_table,
+		.owner = THIS_MODULE,
+	},
+	.probe		= cpr_regulator_probe,
+	.remove		= cpr_regulator_remove,
+	.suspend	= cpr_regulator_suspend,
+	.resume		= cpr_regulator_resume,
+};
+
+static int initialize_tsens_monitor(struct cpr_regulator *cpr_vreg)
+{
+	int rc;
+
+	rc = cpr_check_tsens(cpr_vreg);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "Unable to check tsens, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = sensor_mgr_init_threshold(&cpr_vreg->tsens_threshold_config,
+				cpr_vreg->tsens_id,
+				cpr_vreg->cpr_enable_temp_threshold, /* high */
+				cpr_vreg->cpr_disable_temp_threshold, /* low */
+				tsens_threshold_notify);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "Failed to init tsens monitor, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = sensor_mgr_convert_id_and_set_threshold(
+			&cpr_vreg->tsens_threshold_config);
+	if (rc < 0)
+		cpr_err(cpr_vreg, "Failed to set tsens threshold, rc=%d\n",
+					rc);
+
+	return rc;
+}
+
+int __init cpr_regulator_late_init(void)
+{
+	int rc;
+	struct cpr_regulator *cpr_vreg;
+
+	mutex_lock(&cpr_regulator_list_mutex);
+
+	list_for_each_entry(cpr_vreg, &cpr_regulator_list, list) {
+		if (cpr_vreg->cpr_disable_on_temperature) {
+			rc = initialize_tsens_monitor(cpr_vreg);
+			if (rc)
+				cpr_err(cpr_vreg, "Failed to initialize temperature monitor, rc=%d\n",
+					rc);
+		}
+	}
+
+	mutex_unlock(&cpr_regulator_list_mutex);
+	return 0;
+}
+late_initcall(cpr_regulator_late_init);
+
+/**
+ * cpr_regulator_init() - register cpr-regulator driver
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ */
+int __init cpr_regulator_init(void)
+{
+	static bool initialized;
+
+	if (initialized)
+		return 0;
+	else
+		initialized = true;
+
+	cpr_debugfs_base_init();
+	return platform_driver_register(&cpr_regulator_driver);
+}
+EXPORT_SYMBOL(cpr_regulator_init);
+
+static void __exit cpr_regulator_exit(void)
+{
+	platform_driver_unregister(&cpr_regulator_driver);
+	cpr_debugfs_base_remove();
+}
+
+MODULE_DESCRIPTION("CPR regulator driver");
+MODULE_LICENSE("GPL v2");
+
+arch_initcall(cpr_regulator_init);
+module_exit(cpr_regulator_exit);
diff --git a/drivers/regulator/qpnp-lcdb-regulator.c b/drivers/regulator/qpnp-lcdb-regulator.c
index 0be35b3..9fc5a4a 100644
--- a/drivers/regulator/qpnp-lcdb-regulator.c
+++ b/drivers/regulator/qpnp-lcdb-regulator.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -50,6 +50,8 @@
 #define ATTW_MAX_MS			32
 
 #define LCDB_BST_OUTPUT_VOLTAGE_REG	0x41
+#define PM660_BST_OUTPUT_VOLTAGE_MASK	GENMASK(4, 0)
+#define BST_OUTPUT_VOLTAGE_MASK		GENMASK(5, 0)
 
 #define LCDB_MODULE_RDY_REG		0x45
 #define MODULE_RDY_BIT			BIT(7)
@@ -71,7 +73,8 @@
 
 #define LCDB_PS_CTL_REG			0x50
 #define EN_PS_BIT			BIT(7)
-#define PS_THRESHOLD_MASK		GENMASK(1, 0)
+#define PM660_PS_THRESH_MASK		GENMASK(1, 0)
+#define PS_THRESH_MASK			GENMASK(2, 0)
 #define MIN_BST_PS_MA			50
 #define MAX_BST_PS_MA			80
 
@@ -83,6 +86,20 @@
 #define LCDB_BST_VREG_OK_CTL_REG	0x55
 #define BST_VREG_OK_DEB_MASK		GENMASK(1, 0)
 
+#define LCDB_BST_SS_CTL_REG		0x5B
+#define BST_SS_TIME_MASK		GENMASK(1, 0)
+#define BST_PRECHG_SHORT_ALARM_SHIFT	2
+#define BST_PRECHARGE_DONE_DEB_BIT	BIT(4)
+#define BST_SS_TIME_OVERRIDE_SHIFT	5
+
+#define BST_SS_TIME_OVERRIDE_0MS	0
+#define BST_SS_TIME_OVERRIDE_0P5_MS	1
+#define BST_SS_TIME_OVERRIDE_1MS	2
+#define BST_SS_TIME_OVERRIDE_2MS	3
+
+#define EN_BST_PRECHG_SHORT_ALARM	0
+#define DIS_BST_PRECHG_SHORT_ALARM	1
+
 #define LCDB_SOFT_START_CTL_REG		0x5F
 
 #define LCDB_MISC_CTL_REG		0x60
@@ -147,7 +164,8 @@
 #define MIN_SOFT_START_US		0
 #define MAX_SOFT_START_US		2000
 
-#define BST_HEADROOM_DEFAULT_MV		200
+#define PM660_BST_HEADROOM_DEFAULT_MV	200
+#define BST_HEADROOM_DEFAULT_MV		150
 
 struct ldo_regulator {
 	struct regulator_desc		rdesc;
@@ -222,6 +240,7 @@
 	u16	address;
 	u8	value;
 	bool	sec_access;
+	bool	valid;
 };
 
 enum lcdb_module {
@@ -262,6 +281,8 @@
 	LCDB_LDO_SOFT_START_CTL,
 	LCDB_NCP_PD_CTL,
 	LCDB_NCP_SOFT_START_CTL,
+	LCDB_BST_SS_CTL,
+	LCDB_LDO_VREG_OK_CTL,
 	LCDB_SETTING_MAX,
 };
 
@@ -286,10 +307,11 @@
 	810,
 };
 
-#define SETTING(_id, _sec_access)		\
+#define SETTING(_id, _sec_access, _valid)	\
 	[_id] = {				\
 		.address = _id##_REG,		\
 		.sec_access = _sec_access,	\
+		.valid = _valid			\
 	}					\
 
 static bool is_between(int value, int min, int max)
@@ -334,12 +356,16 @@
 {
 	int rc;
 	u8 val = SECURE_UNLOCK_VALUE;
+	u8 pmic_subtype = lcdb->pmic_rev_id->pmic_subtype;
 
 	mutex_lock(&lcdb->read_write_mutex);
-	rc = regmap_write(lcdb->regmap, lcdb->base + SEC_ADDRESS_REG, val);
-	if (rc < 0) {
-		pr_err("Failed to unlock register rc=%d\n", rc);
-		goto fail_write;
+	if (pmic_subtype == PM660L_SUBTYPE) {
+		rc = regmap_write(lcdb->regmap, lcdb->base + SEC_ADDRESS_REG,
+				  val);
+		if (rc < 0) {
+			pr_err("Failed to unlock register rc=%d\n", rc);
+			goto fail_write;
+		}
 	}
 	rc = regmap_write(lcdb->regmap, addr, value);
 	if (rc < 0)
@@ -397,61 +423,174 @@
 	return rc;
 }
 
+static struct settings lcdb_settings_pm660l[] = {
+	SETTING(LCDB_BST_PD_CTL, false, true),
+	SETTING(LCDB_RDSON_MGMNT, false, true),
+	SETTING(LCDB_MISC_CTL, false, true),
+	SETTING(LCDB_SOFT_START_CTL, false, true),
+	SETTING(LCDB_PFM_CTL, false, true),
+	SETTING(LCDB_PWRUP_PWRDN_CTL, true, true),
+	SETTING(LCDB_LDO_PD_CTL, false, true),
+	SETTING(LCDB_LDO_SOFT_START_CTL, false, true),
+	SETTING(LCDB_NCP_PD_CTL, false, true),
+	SETTING(LCDB_NCP_SOFT_START_CTL, false, true),
+	SETTING(LCDB_BST_SS_CTL, false, false),
+	SETTING(LCDB_LDO_VREG_OK_CTL, false, false),
+};
+
+/* For PMICs like pmi632/pm855L */
 static struct settings lcdb_settings[] = {
-	SETTING(LCDB_BST_PD_CTL, false),
-	SETTING(LCDB_RDSON_MGMNT, false),
-	SETTING(LCDB_MISC_CTL, false),
-	SETTING(LCDB_SOFT_START_CTL, false),
-	SETTING(LCDB_PFM_CTL, false),
-	SETTING(LCDB_PWRUP_PWRDN_CTL, true),
-	SETTING(LCDB_LDO_PD_CTL, false),
-	SETTING(LCDB_LDO_SOFT_START_CTL, false),
-	SETTING(LCDB_NCP_PD_CTL, false),
-	SETTING(LCDB_NCP_SOFT_START_CTL, false),
+	SETTING(LCDB_BST_PD_CTL, false, true),
+	SETTING(LCDB_RDSON_MGMNT, false, false),
+	SETTING(LCDB_MISC_CTL, false, false),
+	SETTING(LCDB_SOFT_START_CTL, false, false),
+	SETTING(LCDB_PFM_CTL, false, false),
+	SETTING(LCDB_PWRUP_PWRDN_CTL, false, true),
+	SETTING(LCDB_LDO_PD_CTL, false, true),
+	SETTING(LCDB_LDO_SOFT_START_CTL, false, true),
+	SETTING(LCDB_NCP_PD_CTL, false, true),
+	SETTING(LCDB_NCP_SOFT_START_CTL, false, true),
+	SETTING(LCDB_BST_SS_CTL, false, true),
+	SETTING(LCDB_LDO_VREG_OK_CTL, false, true),
 };
 
 static int qpnp_lcdb_save_settings(struct qpnp_lcdb *lcdb)
 {
-	int i, rc = 0;
+	int i, size, rc = 0;
+	struct settings *setting;
+	u16 pmic_subtype = lcdb->pmic_rev_id->pmic_subtype;
 
-	for (i = 0; i < ARRAY_SIZE(lcdb_settings); i++) {
-		rc = qpnp_lcdb_read(lcdb, lcdb->base +
-				lcdb_settings[i].address,
-				&lcdb_settings[i].value, 1);
-		if (rc < 0) {
-			pr_err("Failed to read lcdb register address=%x\n",
-						lcdb_settings[i].address);
-			return rc;
+	if (pmic_subtype == PM660L_SUBTYPE) {
+		setting = lcdb_settings_pm660l;
+		size = ARRAY_SIZE(lcdb_settings_pm660l);
+	} else {
+		setting = lcdb_settings;
+		size = ARRAY_SIZE(lcdb_settings);
+	}
+
+	for (i = 0; i < size; i++) {
+		if (setting[i].valid) {
+			rc = qpnp_lcdb_read(lcdb, lcdb->base +
+					    setting[i].address,
+					    &setting[i].value, 1);
+			if (rc < 0) {
+				pr_err("Failed to read lcdb register address=%x\n",
+					setting[i].address);
+				return rc;
+			}
 		}
 	}
 
-	return rc;
+	return 0;
 }
 
 static int qpnp_lcdb_restore_settings(struct qpnp_lcdb *lcdb)
 {
-	int i, rc = 0;
+	int i, size, rc = 0;
+	struct settings *setting;
+	u16 pmic_subtype = lcdb->pmic_rev_id->pmic_subtype;
 
-	for (i = 0; i < ARRAY_SIZE(lcdb_settings); i++) {
-		if (lcdb_settings[i].sec_access)
-			rc = qpnp_lcdb_secure_write(lcdb, lcdb->base +
-					lcdb_settings[i].address,
-					lcdb_settings[i].value);
-		else
-			rc = qpnp_lcdb_write(lcdb, lcdb->base +
-					lcdb_settings[i].address,
-					&lcdb_settings[i].value, 1);
-		if (rc < 0) {
-			pr_err("Failed to write register address=%x\n",
-						lcdb_settings[i].address);
-			return rc;
+	if (pmic_subtype == PM660L_SUBTYPE) {
+		setting = lcdb_settings_pm660l;
+		size = ARRAY_SIZE(lcdb_settings_pm660l);
+	} else {
+		setting = lcdb_settings;
+		size = ARRAY_SIZE(lcdb_settings);
+	}
+
+	for (i = 0; i < size; i++) {
+		if (setting[i].valid) {
+			if (setting[i].sec_access)
+				rc = qpnp_lcdb_secure_write(lcdb, lcdb->base +
+							    setting[i].address,
+							    setting[i].value);
+			else
+				rc = qpnp_lcdb_write(lcdb, lcdb->base +
+						     setting[i].address,
+						     &setting[i].value, 1);
+			if (rc < 0) {
+				pr_err("Failed to write register address=%x\n",
+					     setting[i].address);
+				return rc;
+			}
 		}
 	}
 
+	return 0;
+}
+
+static int qpnp_lcdb_ttw_enter(struct qpnp_lcdb *lcdb)
+{
+	int rc;
+	u8 val;
+
+	if (!lcdb->settings_saved) {
+		rc = qpnp_lcdb_save_settings(lcdb);
+		if (rc < 0) {
+			pr_err("Failed to save LCDB settings rc=%d\n", rc);
+			return rc;
+		}
+		lcdb->settings_saved = true;
+	}
+
+	val = HWEN_RDY_BIT;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+			     &val, 1);
+	if (rc < 0) {
+		pr_err("Failed to hw_enable lcdb rc= %d\n", rc);
+		return rc;
+	}
+
+	val = (BST_SS_TIME_OVERRIDE_1MS << BST_SS_TIME_OVERRIDE_SHIFT) |
+	      (DIS_BST_PRECHG_SHORT_ALARM << BST_PRECHG_SHORT_ALARM_SHIFT);
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_BST_SS_CTL_REG, &val, 1);
+	if (rc < 0)
+		return rc;
+
+	val = 0;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_LDO_SOFT_START_CTL_REG,
+			     &val, 1);
+	if (rc < 0)
+		return rc;
+
+	val = 0;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_NCP_SOFT_START_CTL_REG,
+			     &val, 1);
+	if (rc < 0)
+		return rc;
+
+	val = BOOST_DIS_PULLDOWN_BIT | BOOST_PD_STRENGTH_BIT;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_BST_PD_CTL_REG,
+			     &val, 1);
+	if (rc < 0)
+		return rc;
+
+	val = LDO_DIS_PULLDOWN_BIT | LDO_PD_STRENGTH_BIT;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_LDO_PD_CTL_REG,
+							&val, 1);
+	if (rc < 0)
+		return rc;
+
+	val = NCP_DIS_PULLDOWN_BIT | NCP_PD_STRENGTH_BIT;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_NCP_PD_CTL_REG,
+			     &val, 1);
+	if (rc < 0)
+		return rc;
+
+	val = 0;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_PWRUP_PWRDN_CTL_REG,
+			     &val, 1);
+	if (rc < 0)
+		return rc;
+
+	val = 0;
+	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_BST_VREG_OK_CTL_REG,
+			     &val, 1);
+
 	return rc;
 }
 
-static int qpnp_lcdb_ttw_enter(struct qpnp_lcdb *lcdb)
+static int qpnp_lcdb_ttw_enter_pm660l(struct qpnp_lcdb *lcdb)
 {
 	int rc;
 	u8 val;
@@ -491,7 +630,7 @@
 
 	val = 0;
 	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_SOFT_START_CTL_REG,
-							&val, 1);
+						&val, 1);
 	if (rc < 0) {
 		pr_err("Failed to set LCDB_SOFT_START rc=%d\n", rc);
 		return rc;
@@ -715,12 +854,17 @@
 {
 	int rc = 0;
 	u8 val;
+	u16 pmic_subtype = lcdb->pmic_rev_id->pmic_subtype;
 
 	if (!lcdb->lcdb_enabled)
 		return 0;
 
 	if (lcdb->ttw_enable) {
-		rc = qpnp_lcdb_ttw_enter(lcdb);
+		if (pmic_subtype == PM660L_SUBTYPE)
+			rc = qpnp_lcdb_ttw_enter_pm660l(lcdb);
+		else
+			rc = qpnp_lcdb_ttw_enter(lcdb);
+
 		if (rc < 0) {
 			pr_err("Failed to enable TTW mode rc=%d\n", rc);
 			return rc;
@@ -859,8 +1003,9 @@
 					int voltage_mv, u8 type)
 {
 	int rc = 0;
-	u8 val = 0;
+	u8 val, mask = 0;
 	int bst_voltage_mv;
+	u16 pmic_subtype = lcdb->pmic_rev_id->pmic_subtype;
 	struct ldo_regulator *ldo = &lcdb->ldo;
 	struct ncp_regulator *ncp = &lcdb->ncp;
 	struct bst_params *bst = &lcdb->bst;
@@ -877,10 +1022,11 @@
 	if (bst_voltage_mv != bst->voltage_mv) {
 		val = DIV_ROUND_UP(bst_voltage_mv - MIN_BST_VOLTAGE_MV,
 						VOLTAGE_STEP_50_MV);
-
+		mask = (pmic_subtype == PM660L_SUBTYPE) ?
+			PM660_BST_OUTPUT_VOLTAGE_MASK : BST_OUTPUT_VOLTAGE_MASK;
 		rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
 					LCDB_BST_OUTPUT_VOLTAGE_REG,
-					SET_OUTPUT_VOLTAGE_MASK, val);
+					mask, val);
 		if (rc < 0) {
 			pr_err("Failed to set boost voltage %d mv rc=%d\n",
 				bst_voltage_mv, rc);
@@ -898,7 +1044,8 @@
 					int *voltage_mv)
 {
 	int rc;
-	u8 val = 0;
+	u8 val, mask = 0;
+	u16 pmic_subtype = lcdb->pmic_rev_id->pmic_subtype;
 
 	rc = qpnp_lcdb_read(lcdb, lcdb->base + LCDB_BST_OUTPUT_VOLTAGE_REG,
 						&val, 1);
@@ -907,7 +1054,9 @@
 		return rc;
 	}
 
-	val &= SET_OUTPUT_VOLTAGE_MASK;
+	mask = (pmic_subtype == PM660L_SUBTYPE) ?
+		PM660_BST_OUTPUT_VOLTAGE_MASK : BST_OUTPUT_VOLTAGE_MASK;
+	val &= mask;
 	*voltage_mv = (val * VOLTAGE_STEP_50_MV) + MIN_BST_VOLTAGE_MV;
 
 	return 0;
@@ -1369,6 +1518,8 @@
 {
 	int rc = 0;
 	struct device_node *node = lcdb->bst.node;
+	u16 pmic_subtype = lcdb->pmic_rev_id->pmic_subtype;
+	u16 default_headroom_mv;
 
 	/* Boost PD  configuration */
 	lcdb->bst.pd = -EINVAL;
@@ -1402,11 +1553,14 @@
 		return -EINVAL;
 	}
 
+	default_headroom_mv = (pmic_subtype == PM660L_SUBTYPE) ?
+			       PM660_BST_HEADROOM_DEFAULT_MV :
+			       BST_HEADROOM_DEFAULT_MV;
 	/* Boost head room configuration */
 	of_property_read_u16(node, "qcom,bst-headroom-mv",
 					&lcdb->bst.headroom_mv);
-	if (lcdb->bst.headroom_mv < BST_HEADROOM_DEFAULT_MV)
-		lcdb->bst.headroom_mv = BST_HEADROOM_DEFAULT_MV;
+	if (lcdb->bst.headroom_mv < default_headroom_mv)
+		lcdb->bst.headroom_mv = default_headroom_mv;
 
 	return 0;
 }
@@ -1624,7 +1778,8 @@
 static int qpnp_lcdb_init_bst(struct qpnp_lcdb *lcdb)
 {
 	int rc = 0;
-	u8 val = 0;
+	u8 val, mask = 0;
+	u16 pmic_subtype = lcdb->pmic_rev_id->pmic_subtype;
 
 	/* configure parameters only if LCDB is disabled */
 	if (!is_lcdb_enabled(lcdb)) {
@@ -1677,13 +1832,13 @@
 		}
 
 		if (lcdb->bst.ps_threshold != -EINVAL) {
+			mask = (pmic_subtype == PM660L_SUBTYPE) ?
+					PM660_PS_THRESH_MASK : PS_THRESH_MASK;
 			val = (lcdb->bst.ps_threshold - MIN_BST_PS_MA) / 10;
-			val = (lcdb->bst.ps_threshold & PS_THRESHOLD_MASK) |
-								EN_PS_BIT;
+			val = (lcdb->bst.ps_threshold & mask) | EN_PS_BIT;
 			rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
 						LCDB_PS_CTL_REG,
-						PS_THRESHOLD_MASK | EN_PS_BIT,
-						val);
+						mask | EN_PS_BIT, val);
 			if (rc < 0) {
 				pr_err("Failed to configure BST PS threshold rc=%d",
 								rc);
@@ -1706,16 +1861,24 @@
 	}
 	lcdb->bst.vreg_ok_dbc_us = dbc_us[val & VREG_OK_DEB_MASK];
 
-	rc = qpnp_lcdb_read(lcdb, lcdb->base +
-			LCDB_SOFT_START_CTL_REG, &val, 1);
-	if (rc < 0) {
-		pr_err("Failed to read ncp_soft_start_ctl rc=%d\n", rc);
-		return rc;
+	if (pmic_subtype == PM660L_SUBTYPE) {
+		rc = qpnp_lcdb_read(lcdb, lcdb->base +
+				    LCDB_SOFT_START_CTL_REG, &val, 1);
+		if (rc < 0) {
+			pr_err("Failed to read lcdb_soft_start_ctl rc=%d\n",
+									rc);
+			return rc;
+		}
+		lcdb->bst.soft_start_us = (val & SOFT_START_MASK) * 200 + 200;
+	} else {
+		rc = qpnp_lcdb_read(lcdb, lcdb->base +
+				    LCDB_BST_SS_CTL_REG, &val, 1);
+		if (rc < 0) {
+			pr_err("Failed to read bst_soft_start_ctl rc=%d\n", rc);
+			return rc;
+		}
+		lcdb->bst.soft_start_us = soft_start_us[val & SOFT_START_MASK];
 	}
-	lcdb->bst.soft_start_us = (val & SOFT_START_MASK) * 200	+ 200;
-
-	if (!lcdb->bst.headroom_mv)
-		lcdb->bst.headroom_mv = BST_HEADROOM_DEFAULT_MV;
 
 	return 0;
 }
diff --git a/drivers/regulator/rpmh-regulator.c b/drivers/regulator/rpmh-regulator.c
index 1de08d4..f370d2b 100644
--- a/drivers/regulator/rpmh-regulator.c
+++ b/drivers/regulator/rpmh-regulator.c
@@ -46,6 +46,23 @@
 };
 
 /**
+ * enum rpmh_regulator_hw_type - supported PMIC regulator hardware types
+ * This enum defines the specific regulator type along with its PMIC family.
+ */
+enum rpmh_regulator_hw_type {
+	RPMH_REGULATOR_HW_TYPE_UNKNOWN,
+	RPMH_REGULATOR_HW_TYPE_PMIC4_LDO,
+	RPMH_REGULATOR_HW_TYPE_PMIC4_HFSMPS,
+	RPMH_REGULATOR_HW_TYPE_PMIC4_FTSMPS,
+	RPMH_REGULATOR_HW_TYPE_PMIC4_BOB,
+	RPMH_REGULATOR_HW_TYPE_PMIC5_LDO,
+	RPMH_REGULATOR_HW_TYPE_PMIC5_HFSMPS,
+	RPMH_REGULATOR_HW_TYPE_PMIC5_FTSMPS,
+	RPMH_REGULATOR_HW_TYPE_PMIC5_BOB,
+	RPMH_REGULATOR_HW_TYPE_MAX,
+};
+
+/**
  * enum rpmh_regulator_reg_index - RPMh accelerator register indices
  * %RPMH_REGULATOR_REG_VRM_VOLTAGE:	VRM voltage voting register index
  * %RPMH_REGULATOR_REG_ARC_LEVEL:	ARC voltage level voting register index
@@ -115,20 +132,6 @@
 /* XOB voting registers are found in the VRM hardware module */
 #define CMD_DB_HW_XOB			CMD_DB_HW_VRM
 
-/*
- * Mapping from RPMh VRM accelerator modes to regulator framework modes
- * Assumes that SMPS PFM mode == LDO LPM mode and SMPS PWM mode == LDO HPM mode
- */
-static const int rpmh_regulator_mode_map[] = {
-	[RPMH_REGULATOR_MODE_SMPS_PFM]	= REGULATOR_MODE_IDLE,
-	[RPMH_REGULATOR_MODE_SMPS_AUTO]	= REGULATOR_MODE_NORMAL,
-	[RPMH_REGULATOR_MODE_SMPS_PWM]	= REGULATOR_MODE_FAST,
-	[RPMH_REGULATOR_MODE_BOB_PASS]	= REGULATOR_MODE_STANDBY,
-	[RPMH_REGULATOR_MODE_BOB_PFM]	= REGULATOR_MODE_IDLE,
-	[RPMH_REGULATOR_MODE_BOB_AUTO]	= REGULATOR_MODE_NORMAL,
-	[RPMH_REGULATOR_MODE_BOB_PWM]	= REGULATOR_MODE_FAST,
-};
-
 /**
  * struct rpmh_regulator_request - rpmh request data
  * @reg:			Array of RPMh accelerator register values
@@ -175,6 +178,8 @@
  *				common to a single aggregated resource
  * @regulator_type:		RPMh accelerator type for this regulator
  *				resource
+ * @regulator_hw_type:		The regulator hardware type (e.g. LDO or SMPS)
+ *				along with PMIC family (i.e. PMIC4 or PMIC5)
  * @level:			Mapping from ARC resource specific voltage
  *				levels (0 to RPMH_ARC_MAX_LEVELS - 1) to common
  *				consumer voltage levels (i.e.
@@ -221,6 +226,7 @@
 	struct rpmh_client		*rpmh_client;
 	struct mutex			lock;
 	enum rpmh_regulator_type	regulator_type;
+	enum rpmh_regulator_hw_type	regulator_hw_type;
 	u32				level[RPMH_ARC_MAX_LEVELS];
 	int				level_count;
 	bool				always_wait_for_ack;
@@ -268,6 +274,187 @@
 	int				mode_index;
 };
 
+#define RPMH_REGULATOR_MODE_COUNT		5
+
+#define RPMH_REGULATOR_MODE_PMIC4_LDO_RM	4
+#define RPMH_REGULATOR_MODE_PMIC4_LDO_LPM	5
+#define RPMH_REGULATOR_MODE_PMIC4_LDO_HPM	7
+
+#define RPMH_REGULATOR_MODE_PMIC4_SMPS_RM	4
+#define RPMH_REGULATOR_MODE_PMIC4_SMPS_PFM	5
+#define RPMH_REGULATOR_MODE_PMIC4_SMPS_AUTO	6
+#define RPMH_REGULATOR_MODE_PMIC4_SMPS_PWM	7
+
+#define RPMH_REGULATOR_MODE_PMIC4_BOB_PASS	0
+#define RPMH_REGULATOR_MODE_PMIC4_BOB_PFM	1
+#define RPMH_REGULATOR_MODE_PMIC4_BOB_AUTO	2
+#define RPMH_REGULATOR_MODE_PMIC4_BOB_PWM	3
+
+#define RPMH_REGULATOR_MODE_PMIC5_LDO_RM	3
+#define RPMH_REGULATOR_MODE_PMIC5_LDO_LPM	4
+#define RPMH_REGULATOR_MODE_PMIC5_LDO_HPM	7
+
+#define RPMH_REGULATOR_MODE_PMIC5_HFSMPS_RM	3
+#define RPMH_REGULATOR_MODE_PMIC5_HFSMPS_PFM	4
+#define RPMH_REGULATOR_MODE_PMIC5_HFSMPS_AUTO	6
+#define RPMH_REGULATOR_MODE_PMIC5_HFSMPS_PWM	7
+
+#define RPMH_REGULATOR_MODE_PMIC5_FTSMPS_RM	3
+#define RPMH_REGULATOR_MODE_PMIC5_FTSMPS_PWM	7
+
+#define RPMH_REGULATOR_MODE_PMIC5_BOB_PASS	2
+#define RPMH_REGULATOR_MODE_PMIC5_BOB_PFM	4
+#define RPMH_REGULATOR_MODE_PMIC5_BOB_AUTO	6
+#define RPMH_REGULATOR_MODE_PMIC5_BOB_PWM	7
+
+/*
+ * Mappings from RPMh generic modes to VRM accelerator modes and regulator
+ * framework modes for each regulator type.
+ */
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic4_ldo[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_RET] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_LDO_RM,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_LDO_LPM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_LDO_HPM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic4_smps[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_RET] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_SMPS_RM,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_SMPS_PFM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_AUTO] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_SMPS_AUTO,
+		.framework_mode = REGULATOR_MODE_NORMAL,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_SMPS_PWM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic4_bob[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_PASS] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_BOB_PASS,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_BOB_PFM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_AUTO] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_BOB_AUTO,
+		.framework_mode = REGULATOR_MODE_NORMAL,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_BOB_PWM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic5_ldo[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_RET] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_LDO_RM,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_LDO_LPM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_LDO_HPM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic5_hfsmps[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_RET] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_HFSMPS_RM,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_HFSMPS_PFM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_AUTO] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_HFSMPS_AUTO,
+		.framework_mode = REGULATOR_MODE_NORMAL,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_HFSMPS_PWM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic5_ftsmps[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_RET] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_FTSMPS_RM,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_FTSMPS_PWM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic5_bob[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_PASS] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_BOB_PASS,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_BOB_PFM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_AUTO] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_BOB_AUTO,
+		.framework_mode = REGULATOR_MODE_NORMAL,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_BOB_PWM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode * const
+rpmh_regulator_mode_map[RPMH_REGULATOR_HW_TYPE_MAX] = {
+	[RPMH_REGULATOR_HW_TYPE_PMIC4_LDO]
+		= rpmh_regulator_mode_map_pmic4_ldo,
+	[RPMH_REGULATOR_HW_TYPE_PMIC4_HFSMPS]
+		= rpmh_regulator_mode_map_pmic4_smps,
+	[RPMH_REGULATOR_HW_TYPE_PMIC4_FTSMPS]
+		= rpmh_regulator_mode_map_pmic4_smps,
+	[RPMH_REGULATOR_HW_TYPE_PMIC4_BOB]
+		= rpmh_regulator_mode_map_pmic4_bob,
+	[RPMH_REGULATOR_HW_TYPE_PMIC5_LDO]
+		= rpmh_regulator_mode_map_pmic5_ldo,
+	[RPMH_REGULATOR_HW_TYPE_PMIC5_HFSMPS]
+		= rpmh_regulator_mode_map_pmic5_hfsmps,
+	[RPMH_REGULATOR_HW_TYPE_PMIC5_FTSMPS]
+		= rpmh_regulator_mode_map_pmic5_ftsmps,
+	[RPMH_REGULATOR_HW_TYPE_PMIC5_BOB]
+		= rpmh_regulator_mode_map_pmic5_bob,
+};
+
 /*
  * This voltage in uV is returned by get_voltage functions when there is no way
  * to determine the current voltage level.  It is needed because the regulator
@@ -869,9 +1056,9 @@
  *
  * This function sets the PMIC mode corresponding to the specified framework
  * mode.  The set of PMIC modes allowed is defined in device tree for a given
- * RPMh regulator resource.  The full mapping from PMIC modes to framework modes
- * is defined in the rpmh_regulator_mode_map[] array.  The RPMh resource
- * specific mapping is defined in the aggr_vreg->mode[] array.
+ * RPMh regulator resource.  The full mapping from generic modes to PMIC modes
+ * and framework modes is defined in the rpmh_regulator_mode_map[] array.  The
+ * RPMh resource specific mapping is defined in the aggr_vreg->mode[] array.
  *
  * Return: 0 on success, errno on failure
  */
@@ -1148,11 +1335,60 @@
 static int rpmh_regulator_parse_vrm_modes(struct rpmh_aggr_vreg *aggr_vreg)
 {
 	struct device_node *node = aggr_vreg->dev->of_node;
-	const char *prop = "qcom,supported-modes";
+	const char *type = "";
+	const struct rpmh_regulator_mode *map;
+	const char *prop;
 	int i, len, rc;
 	u32 *buf;
 
+	aggr_vreg->regulator_hw_type = RPMH_REGULATOR_HW_TYPE_UNKNOWN;
+
+	/* qcom,regulator-type is optional */
+	prop = "qcom,regulator-type";
+	if (!of_find_property(node, prop, &len))
+		return 0;
+
+	rc = of_property_read_string(node, prop, &type);
+	if (rc) {
+		aggr_vreg_err(aggr_vreg, "unable to read %s, rc=%d\n",
+				prop, rc);
+		return rc;
+	}
+
+	if (!strcmp(type, "pmic4-ldo")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC4_LDO;
+	} else if (!strcmp(type, "pmic4-hfsmps")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC4_HFSMPS;
+	} else if (!strcmp(type, "pmic4-ftsmps")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC4_FTSMPS;
+	} else if (!strcmp(type, "pmic4-bob")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC4_BOB;
+	} else if (!strcmp(type, "pmic5-ldo")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC5_LDO;
+	} else if (!strcmp(type, "pmic5-hfsmps")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC5_HFSMPS;
+	} else if (!strcmp(type, "pmic5-ftsmps")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC5_FTSMPS;
+	} else if (!strcmp(type, "pmic5-bob")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC5_BOB;
+	} else {
+		aggr_vreg_err(aggr_vreg, "unknown %s = %s\n",
+				prop, type);
+		return -EINVAL;
+	}
+
+	map = rpmh_regulator_mode_map[aggr_vreg->regulator_hw_type];
+
 	/* qcom,supported-modes is optional */
+	prop = "qcom,supported-modes";
 	if (!of_find_property(node, prop, &len))
 		return 0;
 
@@ -1176,15 +1412,22 @@
 	}
 
 	for (i = 0; i < len; i++) {
-		if (buf[i] >= ARRAY_SIZE(rpmh_regulator_mode_map)) {
+		if (buf[i] >= RPMH_REGULATOR_MODE_COUNT) {
 			aggr_vreg_err(aggr_vreg, "element %d of %s = %u is invalid\n",
 				i, prop, buf[i]);
 			rc = -EINVAL;
 			goto done;
 		}
-		aggr_vreg->mode[i].pmic_mode = buf[i];
-		aggr_vreg->mode[i].framework_mode
-			= rpmh_regulator_mode_map[buf[i]];
+
+		if (!map[buf[i]].framework_mode) {
+			aggr_vreg_err(aggr_vreg, "element %d of %s = %u is invalid for regulator type = %s\n",
+				i, prop, buf[i], type);
+			rc = -EINVAL;
+			goto done;
+		}
+
+		aggr_vreg->mode[i].pmic_mode = map[buf[i]].pmic_mode;
+		aggr_vreg->mode[i].framework_mode = map[buf[i]].framework_mode;
 
 		if (i > 0 && aggr_vreg->mode[i].pmic_mode
 				<= aggr_vreg->mode[i - 1].pmic_mode) {
@@ -1287,6 +1530,7 @@
 static int rpmh_regulator_load_default_parameters(struct rpmh_vreg *vreg)
 {
 	enum rpmh_regulator_type type = vreg->aggr_vreg->regulator_type;
+	const struct rpmh_regulator_mode *map;
 	const char *prop;
 	int i, rc;
 	u32 temp;
@@ -1336,15 +1580,36 @@
 		prop = "qcom,init-mode";
 		rc = of_property_read_u32(vreg->of_node, prop, &temp);
 		if (!rc) {
-			if (temp < RPMH_VRM_MODE_MIN ||
-			    temp > RPMH_VRM_MODE_MAX)  {
+			if (temp >= RPMH_REGULATOR_MODE_COUNT) {
 				vreg_err(vreg, "%s=%u is invalid\n",
 					prop, temp);
 				return -EINVAL;
+			} else if (vreg->aggr_vreg->regulator_hw_type
+					== RPMH_REGULATOR_HW_TYPE_UNKNOWN) {
+				vreg_err(vreg, "qcom,regulator-type missing so %s cannot be used\n",
+					prop);
+				return -EINVAL;
 			}
+
+			map = rpmh_regulator_mode_map[
+					vreg->aggr_vreg->regulator_hw_type];
+			if (!map[temp].framework_mode) {
+				vreg_err(vreg, "%s=%u is not supported by type = %d\n",
+					prop, temp,
+					vreg->aggr_vreg->regulator_hw_type);
+				return -EINVAL;
+			}
+
 			rpmh_regulator_set_reg(vreg,
 						RPMH_REGULATOR_REG_VRM_MODE,
-						temp);
+						map[temp].pmic_mode);
+			for (i = 0; i < vreg->aggr_vreg->mode_count; i++) {
+				if (vreg->aggr_vreg->mode[i].pmic_mode
+				    == map[temp].pmic_mode) {
+					vreg->mode_index = i;
+					break;
+				}
+			}
 		}
 
 		prop = "qcom,init-headroom-voltage";
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index f1d4ca2..99b5e35 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -784,7 +784,7 @@
 	}
 
 	timerqueue_add(&rtc->timerqueue, &timer->node);
-	if (!next) {
+	if (!next || ktime_before(timer->node.expires, next->expires)) {
 		struct rtc_wkalrm alarm;
 		int err;
 		alarm.time = rtc_ktime_to_tm(timer->node.expires);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 1227cea..a4b8b60 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -422,7 +422,7 @@
 		return 0;
 
 	buf &= PCF8563_REG_CLKO_F_MASK;
-	return clkout_rates[ret];
+	return clkout_rates[buf];
 }
 
 static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index e1687e1..a30f24c 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -308,7 +308,8 @@
 
 	dev_pm_clear_wake_irq(&adev->dev);
 	device_init_wakeup(&adev->dev, false);
-	free_irq(adev->irq[0], ldata);
+	if (adev->irq[0])
+		free_irq(adev->irq[0], ldata);
 	rtc_device_unregister(ldata->rtc);
 	iounmap(ldata->base);
 	kfree(ldata);
@@ -381,12 +382,13 @@
 		goto out_no_rtc;
 	}
 
-	if (request_irq(adev->irq[0], pl031_interrupt,
-			vendor->irqflags, "rtc-pl031", ldata)) {
-		ret = -EIO;
-		goto out_no_irq;
+	if (adev->irq[0]) {
+		ret = request_irq(adev->irq[0], pl031_interrupt,
+				  vendor->irqflags, "rtc-pl031", ldata);
+		if (ret)
+			goto out_no_irq;
+		dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
 	}
-	dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
 	return 0;
 
 out_no_irq:
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index d55e643..9b5fc50 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -576,9 +576,9 @@
 };
 
 struct qeth_ipato {
-	int enabled;
-	int invert4;
-	int invert6;
+	bool enabled;
+	bool invert4;
+	bool invert6;
 	struct list_head entries;
 };
 
@@ -969,7 +969,8 @@
 int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
 int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
 int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
-int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
+int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
+			 int extra_elems, int data_offset);
 int qeth_get_elements_for_frags(struct sk_buff *);
 int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
 			struct sk_buff *, struct qeth_hdr *, int, int, int);
@@ -1004,6 +1005,9 @@
 int qeth_set_features(struct net_device *, netdev_features_t);
 int qeth_recover_features(struct net_device *);
 netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+				      struct net_device *dev,
+				      netdev_features_t features);
 
 /* exports for OSN */
 int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 21ef802..df8f74c 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -19,6 +19,11 @@
 #include <linux/mii.h>
 #include <linux/kthread.h>
 #include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <linux/netdev_features.h>
+#include <linux/skbuff.h>
+
 #include <net/iucv/af_iucv.h>
 #include <net/dsfield.h>
 
@@ -1470,9 +1475,9 @@
 	qeth_set_intial_options(card);
 	/* IP address takeover */
 	INIT_LIST_HEAD(&card->ipato.entries);
-	card->ipato.enabled = 0;
-	card->ipato.invert4 = 0;
-	card->ipato.invert6 = 0;
+	card->ipato.enabled = false;
+	card->ipato.invert4 = false;
+	card->ipato.invert6 = false;
 	/* init QDIO stuff */
 	qeth_init_qdio_info(card);
 	INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
@@ -3837,6 +3842,7 @@
  * @card:			qeth card structure, to check max. elems.
  * @skb:			SKB address
  * @extra_elems:		extra elems needed, to check against max.
+ * @data_offset:		range starts at skb->data + data_offset
  *
  * Returns the number of pages, and thus QDIO buffer elements, needed to cover
  * skb data, including linear part and fragments. Checks if the result plus
@@ -3844,10 +3850,10 @@
  * Note: extra_elems is not included in the returned result.
  */
 int qeth_get_elements_no(struct qeth_card *card,
-		     struct sk_buff *skb, int extra_elems)
+		     struct sk_buff *skb, int extra_elems, int data_offset)
 {
 	int elements = qeth_get_elements_for_range(
-				(addr_t)skb->data,
+				(addr_t)skb->data + data_offset,
 				(addr_t)skb->data + skb_headlen(skb)) +
 			qeth_get_elements_for_frags(skb);
 
@@ -6240,6 +6246,32 @@
 }
 EXPORT_SYMBOL_GPL(qeth_fix_features);
 
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+				      struct net_device *dev,
+				      netdev_features_t features)
+{
+	/* GSO segmentation builds skbs with
+	 *	a (small) linear part for the headers, and
+	 *	page frags for the data.
+	 * Compared to a linear skb, the header-only part consumes an
+	 * additional buffer element. This reduces buffer utilization, and
+	 * hurts throughput. So compress small segments into one element.
+	 */
+	if (netif_needs_gso(skb, features)) {
+		/* match skb_segment(): */
+		unsigned int doffset = skb->data - skb_mac_header(skb);
+		unsigned int hsize = skb_shinfo(skb)->gso_size;
+		unsigned int hroom = skb_headroom(skb);
+
+		/* linearize only if resulting skb allocations are order-0: */
+		if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
+			features &= ~NETIF_F_SG;
+	}
+
+	return vlan_features_check(skb, features);
+}
+EXPORT_SYMBOL_GPL(qeth_features_check);
+
 static int __init qeth_core_init(void)
 {
 	int rc;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8530477..5082dfe 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -865,7 +865,7 @@
 	 * chaining we can not send long frag lists
 	 */
 	if ((card->info.type != QETH_CARD_TYPE_IQD) &&
-	    !qeth_get_elements_no(card, new_skb, 0)) {
+	    !qeth_get_elements_no(card, new_skb, 0, 0)) {
 		int lin_rc = skb_linearize(new_skb);
 
 		if (card->options.performance_stats) {
@@ -910,7 +910,8 @@
 		}
 	}
 
-	elements = qeth_get_elements_no(card, new_skb, elements_needed);
+	elements = qeth_get_elements_no(card, new_skb, elements_needed,
+					(data_offset > 0) ? data_offset : 0);
 	if (!elements) {
 		if (data_offset >= 0)
 			kmem_cache_free(qeth_core_header_cache, hdr);
@@ -1084,6 +1085,7 @@
 	.ndo_stop		= qeth_l2_stop,
 	.ndo_get_stats		= qeth_get_stats,
 	.ndo_start_xmit		= qeth_l2_hard_start_xmit,
+	.ndo_features_check	= qeth_features_check,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_rx_mode	= qeth_l2_set_rx_mode,
 	.ndo_do_ioctl	   	= qeth_l2_do_ioctl,
@@ -1128,6 +1130,7 @@
 	if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
 		card->dev->hw_features = NETIF_F_SG;
 		card->dev->vlan_features = NETIF_F_SG;
+		card->dev->features |= NETIF_F_SG;
 		/* OSA 3S and earlier has no RX/TX support */
 		if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
 			card->dev->hw_features |= NETIF_F_IP_CSUM;
@@ -1140,8 +1143,6 @@
 	}
 	card->info.broadcast_capable = 1;
 	qeth_l2_request_initial_mac(card);
-	card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
-				  PAGE_SIZE;
 	SET_NETDEV_DEV(card->dev, &card->gdev->dev);
 	netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
 	netif_carrier_off(card->dev);
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 26f7953..eedf9b0 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -80,7 +80,7 @@
 int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
 void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
 			const u8 *);
-int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
+void qeth_l3_update_ipato(struct qeth_card *card);
 struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
 int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
 int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 03a2619..1487f8a 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -168,8 +168,8 @@
 	}
 }
 
-int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
-						struct qeth_ipaddr *addr)
+static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
+					     struct qeth_ipaddr *addr)
 {
 	struct qeth_ipato_entry *ipatoe;
 	u8 addr_bits[128] = {0, };
@@ -178,6 +178,8 @@
 
 	if (!card->ipato.enabled)
 		return 0;
+	if (addr->type != QETH_IP_TYPE_NORMAL)
+		return 0;
 
 	qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
 				  (addr->proto == QETH_PROT_IPV4)? 4:16);
@@ -293,8 +295,7 @@
 		memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
 		addr->ref_counter = 1;
 
-		if (addr->type == QETH_IP_TYPE_NORMAL  &&
-				qeth_l3_is_addr_covered_by_ipato(card, addr)) {
+		if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
 			QETH_CARD_TEXT(card, 2, "tkovaddr");
 			addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
 		}
@@ -607,6 +608,27 @@
 /*
  * IP address takeover related functions
  */
+
+/**
+ * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
+ *
+ * Caller must hold ip_lock.
+ */
+void qeth_l3_update_ipato(struct qeth_card *card)
+{
+	struct qeth_ipaddr *addr;
+	unsigned int i;
+
+	hash_for_each(card->ip_htable, i, addr, hnode) {
+		if (addr->type != QETH_IP_TYPE_NORMAL)
+			continue;
+		if (qeth_l3_is_addr_covered_by_ipato(card, addr))
+			addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
+		else
+			addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG;
+	}
+}
+
 static void qeth_l3_clear_ipato_list(struct qeth_card *card)
 {
 	struct qeth_ipato_entry *ipatoe, *tmp;
@@ -618,6 +640,7 @@
 		kfree(ipatoe);
 	}
 
+	qeth_l3_update_ipato(card);
 	spin_unlock_bh(&card->ip_lock);
 }
 
@@ -642,8 +665,10 @@
 		}
 	}
 
-	if (!rc)
+	if (!rc) {
 		list_add_tail(&new->entry, &card->ipato.entries);
+		qeth_l3_update_ipato(card);
+	}
 
 	spin_unlock_bh(&card->ip_lock);
 
@@ -666,6 +691,7 @@
 			    (proto == QETH_PROT_IPV4)? 4:16) &&
 		    (ipatoe->mask_bits == mask_bits)) {
 			list_del(&ipatoe->entry);
+			qeth_l3_update_ipato(card);
 			kfree(ipatoe);
 		}
 	}
@@ -1416,6 +1442,7 @@
 
 		tmp->u.a4.addr = im4->multiaddr;
 		memcpy(tmp->mac, buf, sizeof(tmp->mac));
+		tmp->is_multicast = 1;
 
 		ipm = qeth_l3_ip_from_hash(card, tmp);
 		if (ipm) {
@@ -1593,7 +1620,7 @@
 
 	addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
 	if (!addr)
-		return;
+		goto out;
 
 	spin_lock_bh(&card->ip_lock);
 
@@ -1607,6 +1634,7 @@
 	spin_unlock_bh(&card->ip_lock);
 
 	kfree(addr);
+out:
 	in_dev_put(in_dev);
 }
 
@@ -1631,7 +1659,7 @@
 
 	addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
 	if (!addr)
-		return;
+		goto out;
 
 	spin_lock_bh(&card->ip_lock);
 
@@ -1646,6 +1674,7 @@
 	spin_unlock_bh(&card->ip_lock);
 
 	kfree(addr);
+out:
 	in6_dev_put(in6_dev);
 #endif /* CONFIG_QETH_IPV6 */
 }
@@ -2609,17 +2638,13 @@
 	char daddr[16];
 	struct af_iucv_trans_hdr *iucv_hdr;
 
-	skb_pull(skb, 14);
-	card->dev->header_ops->create(skb, card->dev, 0,
-				      card->dev->dev_addr, card->dev->dev_addr,
-				      card->dev->addr_len);
-	skb_pull(skb, 14);
-	iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
 	memset(hdr, 0, sizeof(struct qeth_hdr));
 	hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
 	hdr->hdr.l3.ext_flags = 0;
-	hdr->hdr.l3.length = skb->len;
+	hdr->hdr.l3.length = skb->len - ETH_HLEN;
 	hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
+
+	iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
 	memset(daddr, 0, sizeof(daddr));
 	daddr[0] = 0xfe;
 	daddr[1] = 0x80;
@@ -2823,10 +2848,7 @@
 	if ((card->info.type == QETH_CARD_TYPE_IQD) &&
 	    !skb_is_nonlinear(skb)) {
 		new_skb = skb;
-		if (new_skb->protocol == ETH_P_AF_IUCV)
-			data_offset = 0;
-		else
-			data_offset = ETH_HLEN;
+		data_offset = ETH_HLEN;
 		hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
 		if (!hdr)
 			goto tx_drop;
@@ -2867,7 +2889,7 @@
 	 */
 	if ((card->info.type != QETH_CARD_TYPE_IQD) &&
 	    ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
-	     (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) {
+	     (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) {
 		int lin_rc = skb_linearize(new_skb);
 
 		if (card->options.performance_stats) {
@@ -2909,7 +2931,8 @@
 
 	elements = use_tso ?
 		   qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
-		   qeth_get_elements_no(card, new_skb, hdr_elements);
+		   qeth_get_elements_no(card, new_skb, hdr_elements,
+					(data_offset > 0) ? data_offset : 0);
 	if (!elements) {
 		if (data_offset >= 0)
 			kmem_cache_free(qeth_core_header_cache, hdr);
@@ -3064,6 +3087,7 @@
 	.ndo_stop		= qeth_l3_stop,
 	.ndo_get_stats		= qeth_get_stats,
 	.ndo_start_xmit		= qeth_l3_hard_start_xmit,
+	.ndo_features_check	= qeth_features_check,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_rx_mode	= qeth_l3_set_multicast_list,
 	.ndo_do_ioctl		= qeth_l3_do_ioctl,
@@ -3120,6 +3144,7 @@
 				card->dev->vlan_features = NETIF_F_SG |
 					NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
 					NETIF_F_TSO;
+				card->dev->features |= NETIF_F_SG;
 			}
 		}
 	} else if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -3145,8 +3170,8 @@
 				NETIF_F_HW_VLAN_CTAG_RX |
 				NETIF_F_HW_VLAN_CTAG_FILTER;
 	netif_keep_dst(card->dev);
-	card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
-				  PAGE_SIZE;
+	netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
+					  PAGE_SIZE);
 
 	SET_NETDEV_DEV(card->dev, &card->gdev->dev);
 	netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index cffe42f..d6bdfc6 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -372,8 +372,8 @@
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
-	struct qeth_ipaddr *addr;
-	int i, rc = 0;
+	bool enable;
+	int rc = 0;
 
 	if (!card)
 		return -EINVAL;
@@ -386,25 +386,18 @@
 	}
 
 	if (sysfs_streq(buf, "toggle")) {
-		card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
-	} else if (sysfs_streq(buf, "1")) {
-		card->ipato.enabled = 1;
-		hash_for_each(card->ip_htable, i, addr, hnode) {
-				if ((addr->type == QETH_IP_TYPE_NORMAL) &&
-				qeth_l3_is_addr_covered_by_ipato(card, addr))
-					addr->set_flags |=
-					QETH_IPA_SETIP_TAKEOVER_FLAG;
-			}
-	} else if (sysfs_streq(buf, "0")) {
-		card->ipato.enabled = 0;
-		hash_for_each(card->ip_htable, i, addr, hnode) {
-			if (addr->set_flags &
-			QETH_IPA_SETIP_TAKEOVER_FLAG)
-				addr->set_flags &=
-				~QETH_IPA_SETIP_TAKEOVER_FLAG;
-			}
-	} else
+		enable = !card->ipato.enabled;
+	} else if (kstrtobool(buf, &enable)) {
 		rc = -EINVAL;
+		goto out;
+	}
+
+	if (card->ipato.enabled != enable) {
+		card->ipato.enabled = enable;
+		spin_lock_bh(&card->ip_lock);
+		qeth_l3_update_ipato(card);
+		spin_unlock_bh(&card->ip_lock);
+	}
 out:
 	mutex_unlock(&card->conf_mutex);
 	return rc ? rc : count;
@@ -430,20 +423,27 @@
 				const char *buf, size_t count)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
+	bool invert;
 	int rc = 0;
 
 	if (!card)
 		return -EINVAL;
 
 	mutex_lock(&card->conf_mutex);
-	if (sysfs_streq(buf, "toggle"))
-		card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
-	else if (sysfs_streq(buf, "1"))
-		card->ipato.invert4 = 1;
-	else if (sysfs_streq(buf, "0"))
-		card->ipato.invert4 = 0;
-	else
+	if (sysfs_streq(buf, "toggle")) {
+		invert = !card->ipato.invert4;
+	} else if (kstrtobool(buf, &invert)) {
 		rc = -EINVAL;
+		goto out;
+	}
+
+	if (card->ipato.invert4 != invert) {
+		card->ipato.invert4 = invert;
+		spin_lock_bh(&card->ip_lock);
+		qeth_l3_update_ipato(card);
+		spin_unlock_bh(&card->ip_lock);
+	}
+out:
 	mutex_unlock(&card->conf_mutex);
 	return rc ? rc : count;
 }
@@ -609,20 +609,27 @@
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct qeth_card *card = dev_get_drvdata(dev);
+	bool invert;
 	int rc = 0;
 
 	if (!card)
 		return -EINVAL;
 
 	mutex_lock(&card->conf_mutex);
-	if (sysfs_streq(buf, "toggle"))
-		card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
-	else if (sysfs_streq(buf, "1"))
-		card->ipato.invert6 = 1;
-	else if (sysfs_streq(buf, "0"))
-		card->ipato.invert6 = 0;
-	else
+	if (sysfs_streq(buf, "toggle")) {
+		invert = !card->ipato.invert6;
+	} else if (kstrtobool(buf, &invert)) {
 		rc = -EINVAL;
+		goto out;
+	}
+
+	if (card->ipato.invert6 != invert) {
+		card->ipato.invert6 = invert;
+		spin_lock_bh(&card->ip_lock);
+		qeth_l3_update_ipato(card);
+		spin_unlock_bh(&card->ip_lock);
+	}
+out:
 	mutex_unlock(&card->conf_mutex);
 	return rc ? rc : count;
 }
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 8dcd8c7..05f5239 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -255,7 +255,8 @@
 	struct bfad_s *bfad = port->bfad;
 	struct bfa_s *bfa = &bfad->bfa;
 	struct bfa_ioc_s *ioc = &bfa->ioc;
-	int addr, len, rc, i;
+	int addr, rc, i;
+	u32 len;
 	u32 *regbuf;
 	void __iomem *rb, *reg_addr;
 	unsigned long flags;
@@ -266,7 +267,7 @@
 		return PTR_ERR(kern_buf);
 
 	rc = sscanf(kern_buf, "%x:%x", &addr, &len);
-	if (rc < 2) {
+	if (rc < 2 || len > (UINT_MAX >> 2)) {
 		printk(KERN_INFO
 			"bfad[%d]: %s failed to read user buf\n",
 			bfad->inst_no, __func__);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 0039beb..358ec32 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1347,6 +1347,7 @@
 		csk, csk->state, csk->flags, csk->tid);
 
 	cxgbi_sock_free_cpl_skbs(csk);
+	cxgbi_sock_purge_write_queue(csk);
 	if (csk->wr_cred != csk->wr_max_cred) {
 		cxgbi_sock_purge_wr_queue(csk);
 		cxgbi_sock_reset_wr_list(csk);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index a1d6ab7..9962370 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2951,7 +2951,7 @@
 	/* fill_cmd can't fail here, no data buffer to map. */
 	(void) fill_cmd(c, reset_type, h, NULL, 0, 0,
 			scsi3addr, TYPE_MSG);
-	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
 	if (rc) {
 		dev_warn(&h->pdev->dev, "Failed to send reset command\n");
 		goto out;
@@ -3686,7 +3686,7 @@
  *  # (integer code indicating one of several NOT READY states
  *     describing why a volume is to be kept offline)
  */
-static int hpsa_volume_offline(struct ctlr_info *h,
+static unsigned char hpsa_volume_offline(struct ctlr_info *h,
 					unsigned char scsi3addr[])
 {
 	struct CommandList *c;
@@ -3707,7 +3707,7 @@
 					DEFAULT_TIMEOUT);
 	if (rc) {
 		cmd_free(h, c);
-		return 0;
+		return HPSA_VPD_LV_STATUS_UNSUPPORTED;
 	}
 	sense = c->err_info->SenseInfo;
 	if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
@@ -3718,19 +3718,13 @@
 	cmd_status = c->err_info->CommandStatus;
 	scsi_status = c->err_info->ScsiStatus;
 	cmd_free(h, c);
-	/* Is the volume 'not ready'? */
-	if (cmd_status != CMD_TARGET_STATUS ||
-		scsi_status != SAM_STAT_CHECK_CONDITION ||
-		sense_key != NOT_READY ||
-		asc != ASC_LUN_NOT_READY)  {
-		return 0;
-	}
 
 	/* Determine the reason for not ready state */
 	ldstat = hpsa_get_volume_status(h, scsi3addr);
 
 	/* Keep volume offline in certain cases: */
 	switch (ldstat) {
+	case HPSA_LV_FAILED:
 	case HPSA_LV_UNDERGOING_ERASE:
 	case HPSA_LV_NOT_AVAILABLE:
 	case HPSA_LV_UNDERGOING_RPI:
@@ -3752,7 +3746,7 @@
 	default:
 		break;
 	}
-	return 0;
+	return HPSA_LV_OK;
 }
 
 /*
@@ -3825,10 +3819,10 @@
 	/* Do an inquiry to the device to see what it is. */
 	if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
 		(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
-		/* Inquiry failed (msg printed already) */
 		dev_err(&h->pdev->dev,
-			"hpsa_update_device_info: inquiry failed\n");
-		rc = -EIO;
+			"%s: inquiry failed, device will be skipped.\n",
+			__func__);
+		rc = HPSA_INQUIRY_FAILED;
 		goto bail_out;
 	}
 
@@ -3857,15 +3851,19 @@
 	if ((this_device->devtype == TYPE_DISK ||
 		this_device->devtype == TYPE_ZBC) &&
 		is_logical_dev_addr_mode(scsi3addr)) {
-		int volume_offline;
+		unsigned char volume_offline;
 
 		hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
 		if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
 			hpsa_get_ioaccel_status(h, scsi3addr, this_device);
 		volume_offline = hpsa_volume_offline(h, scsi3addr);
-		if (volume_offline < 0 || volume_offline > 0xff)
-			volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
-		this_device->volume_offline = volume_offline & 0xff;
+		if (volume_offline == HPSA_LV_FAILED) {
+			rc = HPSA_LV_FAILED;
+			dev_err(&h->pdev->dev,
+				"%s: LV failed, device will be skipped.\n",
+				__func__);
+			goto bail_out;
+		}
 	} else {
 		this_device->raid_level = RAID_UNKNOWN;
 		this_device->offload_config = 0;
@@ -4353,8 +4351,7 @@
 			goto out;
 		}
 		if (rc) {
-			dev_warn(&h->pdev->dev,
-				"Inquiry failed, skipping device.\n");
+			h->drv_req_rescan = 1;
 			continue;
 		}
 
@@ -5532,7 +5529,7 @@
 
 	spin_lock_irqsave(&h->scan_lock, flags);
 	h->scan_finished = 1;
-	wake_up_all(&h->scan_wait_queue);
+	wake_up(&h->scan_wait_queue);
 	spin_unlock_irqrestore(&h->scan_lock, flags);
 }
 
@@ -5550,11 +5547,23 @@
 	if (unlikely(lockup_detected(h)))
 		return hpsa_scan_complete(h);
 
+	/*
+	 * If a scan is already waiting to run, no need to add another
+	 */
+	spin_lock_irqsave(&h->scan_lock, flags);
+	if (h->scan_waiting) {
+		spin_unlock_irqrestore(&h->scan_lock, flags);
+		return;
+	}
+
+	spin_unlock_irqrestore(&h->scan_lock, flags);
+
 	/* wait until any scan already in progress is finished. */
 	while (1) {
 		spin_lock_irqsave(&h->scan_lock, flags);
 		if (h->scan_finished)
 			break;
+		h->scan_waiting = 1;
 		spin_unlock_irqrestore(&h->scan_lock, flags);
 		wait_event(h->scan_wait_queue, h->scan_finished);
 		/* Note: We don't need to worry about a race between this
@@ -5564,6 +5573,7 @@
 		 */
 	}
 	h->scan_finished = 0; /* mark scan as in progress */
+	h->scan_waiting = 0;
 	spin_unlock_irqrestore(&h->scan_lock, flags);
 
 	if (unlikely(lockup_detected(h)))
@@ -8802,6 +8812,7 @@
 	init_waitqueue_head(&h->event_sync_wait_queue);
 	mutex_init(&h->reset_mutex);
 	h->scan_finished = 1; /* no scan currently in progress */
+	h->scan_waiting = 0;
 
 	pci_set_drvdata(pdev, h);
 	h->ndevices = 0;
@@ -9094,6 +9105,8 @@
 	destroy_workqueue(h->rescan_ctlr_wq);
 	destroy_workqueue(h->resubmit_wq);
 
+	hpsa_delete_sas_host(h);
+
 	/*
 	 * Call before disabling interrupts.
 	 * scsi_remove_host can trigger I/O operations especially
@@ -9128,8 +9141,6 @@
 	h->lockup_detected = NULL;			/* init_one 2 */
 	/* (void) pci_disable_pcie_error_reporting(pdev); */	/* init_one 1 */
 
-	hpsa_delete_sas_host(h);
-
 	kfree(h);					/* init_one 1 */
 }
 
@@ -9621,9 +9632,9 @@
 	struct sas_phy *phy = hpsa_sas_phy->phy;
 
 	sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
-	sas_phy_free(phy);
 	if (hpsa_sas_phy->added_to_port)
 		list_del(&hpsa_sas_phy->phy_list_entry);
+	sas_phy_delete(phy);
 	kfree(hpsa_sas_phy);
 }
 
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 9ea162d..e16f294 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -203,6 +203,7 @@
 	dma_addr_t		errinfo_pool_dhandle;
 	unsigned long  		*cmd_pool_bits;
 	int			scan_finished;
+	u8			scan_waiting : 1;
 	spinlock_t		scan_lock;
 	wait_queue_head_t	scan_wait_queue;
 
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index a584cdf..5961705 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -156,6 +156,7 @@
 #define CFGTBL_BusType_Fibre2G  0x00000200l
 
 /* VPD Inquiry types */
+#define HPSA_INQUIRY_FAILED		0x02
 #define HPSA_VPD_SUPPORTED_PAGES        0x00
 #define HPSA_VPD_LV_DEVICE_ID           0x83
 #define HPSA_VPD_LV_DEVICE_GEOMETRY     0xC1
@@ -166,6 +167,7 @@
 /* Logical volume states */
 #define HPSA_VPD_LV_STATUS_UNSUPPORTED			0xff
 #define HPSA_LV_OK                                      0x0
+#define HPSA_LV_FAILED					0x01
 #define HPSA_LV_NOT_AVAILABLE				0x0b
 #define HPSA_LV_UNDERGOING_ERASE			0x0F
 #define HPSA_LV_UNDERGOING_RPI				0x12
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4df3cdc..fc7adda 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7782,7 +7782,8 @@
 			did, vport->port_state, ndlp->nlp_flag);
 
 		phba->fc_stat.elsRcvPRLI++;
-		if (vport->port_state < LPFC_DISC_AUTH) {
+		if ((vport->port_state < LPFC_DISC_AUTH) &&
+		    (vport->fc_flag & FC_FABRIC)) {
 			rjt_err = LSRJT_UNABLE_TPC;
 			rjt_exp = LSEXP_NOTHING_MORE;
 			break;
@@ -8185,11 +8186,17 @@
 			spin_lock_irq(shost->host_lock);
 			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
 			spin_unlock_irq(shost->host_lock);
-			if (vport->port_type == LPFC_PHYSICAL_PORT
-				&& !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
-				lpfc_issue_init_vfi(vport);
-			else
+			if (mb->mbxStatus == MBX_NOT_FINISHED)
+				break;
+			if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
+			    !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
+				if (phba->sli_rev == LPFC_SLI_REV4)
+					lpfc_issue_init_vfi(vport);
+				else
+					lpfc_initial_flogi(vport);
+			} else {
 				lpfc_initial_fdisc(vport);
+			}
 			break;
 		}
 	} else {
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index ed22393..7d2ad63 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4784,7 +4784,8 @@
 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
 	    !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
-	    !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+	    !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
+	    phba->sli_rev != LPFC_SLI_REV4) {
 		/* For this case we need to cleanup the default rpi
 		 * allocated by the firmware.
 		 */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 55faa94..2a436df 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -3232,7 +3232,7 @@
 #define MB_CEQ_STATUS_QUEUE_FLUSHING		0x4
 #define MB_CQE_STATUS_DMA_FAILED		0x5
 
-#define LPFC_MBX_WR_CONFIG_MAX_BDE		8
+#define LPFC_MBX_WR_CONFIG_MAX_BDE		1
 struct lpfc_mbx_wr_object {
 	struct mbox_header header;
 	union {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 289374c..468acab 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4770,6 +4770,11 @@
 		} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
 			scmd->result = DID_RESET << 16;
 			break;
+		} else if ((scmd->device->channel == RAID_CHANNEL) &&
+		   (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
+		   MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
+			scmd->result = DID_RESET << 16;
+			break;
 		}
 		scmd->result = DID_SOFT_ERROR << 16;
 		break;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 658e4d1..ce4ac76 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2707,13 +2707,9 @@
 	    "%-+5d  0  1  2  3  4  5  6  7  8  9  A  B  C  D  E  F\n", size);
 	ql_dbg(level, vha, id,
 	    "----- -----------------------------------------------\n");
-	for (cnt = 0; cnt < size; cnt++, buf++) {
-		if (cnt % 16 == 0)
-			ql_dbg(level, vha, id, "%04x:", cnt & ~0xFU);
-		printk(" %02x", *buf);
-		if (cnt % 16 == 15)
-			printk("\n");
+	for (cnt = 0; cnt < size; cnt += 16) {
+		ql_dbg(level, vha, id, "%04x: ", cnt);
+		print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
+			       buf + cnt, min(16U, size - cnt), false);
 	}
-	if (cnt % 16 != 0)
-		printk("\n");
 }
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 91f5f55..59059ff 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -668,11 +668,9 @@
 {
 	struct qla_hw_data *ha = vha->hw;
 	struct qla_tgt_sess *sess = NULL;
-	uint32_t unpacked_lun, lun = 0;
 	uint16_t loop_id;
 	int res = 0;
 	struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
-	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
 	unsigned long flags;
 
 	loop_id = le16_to_cpu(n->u.isp24.nport_handle);
@@ -725,11 +723,7 @@
 	    "loop_id %d)\n", vha->host_no, sess, sess->port_name,
 	    mcmd, loop_id);
 
-	lun = a->u.isp24.fcp_cmnd.lun;
-	unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
-
-	return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
-	    iocb, QLA24XX_MGMT_SEND_NACK);
+	return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
 }
 
 /* ha->tgt.sess_lock supposed to be held on entry */
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index cf04a36..2b0e615 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2996,11 +2996,11 @@
 	if (-1 == ret) {
 		write_unlock_irqrestore(&atomic_rw, iflags);
 		return DID_ERROR << 16;
-	} else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
+	} else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
 		sdev_printk(KERN_INFO, scp->device,
-			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
+			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
 			    my_name, "write same",
-			    num * sdebug_sector_size, ret);
+			    sdebug_sector_size, ret);
 
 	/* Copy first sector to remaining blocks */
 	for (i = 1 ; i < num ; i++)
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 2464569..26e6b05 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -160,7 +160,7 @@
 	{"DGC", "RAID", NULL, BLIST_SPARSELUN},	/* Dell PV 650F, storage on LUN 0 */
 	{"DGC", "DISK", NULL, BLIST_SPARSELUN},	/* Dell PV 650F, no storage on LUN 0 */
 	{"EMC",  "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
-	{"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN},
+	{"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
 	{"EMULEX", "MD21/S2     ESDI", NULL, BLIST_SINGLELUN},
 	{"easyRAID", "16P", NULL, BLIST_NOREPORTLUN},
 	{"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 043ab9e..57a3ee0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2045,11 +2045,13 @@
 		q->limits.cluster = 0;
 
 	/*
-	 * set a reasonable default alignment on word boundaries: the
-	 * host and device may alter it using
-	 * blk_queue_update_dma_alignment() later.
+	 * Set a reasonable default alignment:  The larger of 32-byte (dword),
+	 * which is a common minimum for HBAs, and the minimum DMA alignment,
+	 * which is set by the platform.
+	 *
+	 * Devices that require a bigger alignment can increase it later.
 	 */
-	blk_queue_dma_alignment(q, 0x03);
+	blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
 }
 
 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 02823a7..4fb494a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -234,11 +234,15 @@
 {
 	struct scsi_disk *sdkp = to_scsi_disk(dev);
 	struct scsi_device *sdp = sdkp->device;
+	bool v;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
 
-	sdp->manage_start_stop = simple_strtoul(buf, NULL, 10);
+	if (kstrtobool(buf, &v))
+		return -EINVAL;
+
+	sdp->manage_start_stop = v;
 
 	return count;
 }
@@ -256,6 +260,7 @@
 allow_restart_store(struct device *dev, struct device_attribute *attr,
 		    const char *buf, size_t count)
 {
+	bool v;
 	struct scsi_disk *sdkp = to_scsi_disk(dev);
 	struct scsi_device *sdp = sdkp->device;
 
@@ -265,7 +270,10 @@
 	if (sdp->type != TYPE_DISK)
 		return -EINVAL;
 
-	sdp->allow_restart = simple_strtoul(buf, NULL, 10);
+	if (kstrtobool(buf, &v))
+		return -EINVAL;
+
+	sdp->allow_restart = v;
 
 	return count;
 }
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index db4e7bb..1b283b2 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -83,8 +83,6 @@
 	tristate "QCOM specific hooks to UFS controller platform driver"
 	depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
 	select PHY_QCOM_UFS
-	select EXTCON
-	select EXTCON_GPIO
 	help
 	  This selects the QCOM specific additions to UFSHCD platform driver.
 	  UFS host on QCOM needs some vendor specific configuration before
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index a5f1093..e929f51 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -522,7 +522,7 @@
 	u32 int_en_all;
 	u32 spi_w;
 	u32 wdt_src;
-	int has_bridge:1;
+	unsigned int has_bridge:1;
 	int (*init_reg_clock)(struct pmic_wrapper *wrp);
 	int (*init_soc_specific)(struct pmic_wrapper *wrp);
 };
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 31cf232..c3b2ca8 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -774,3 +774,5 @@
 	  The driver will help route diag traffic from modem side over the QDSS
 	  sub-system to USB on APSS side. The driver acts as a bridge between the
 	  MHI and USB interface. If unsure, say N.
+
+source "drivers/soc/qcom/wcnss/Kconfig"
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index dc641ef..0255761 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -75,6 +75,7 @@
        obj-y += subsystem_notif.o
        obj-y += subsystem_restart.o
        obj-y += ramdump.o
+       obj-y += microdump_collector.o
 endif
 obj-$(CONFIG_MSM_JTAGV8) += jtagv8.o jtagv8-etm.o
 obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
@@ -87,8 +88,12 @@
 ifdef CONFIG_MSM_RPM_SMD
 	obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpm_master_stat.o
 endif
+ifdef CONFIG_QTI_RPMH_API
+	obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpmh_master_stat.o
+endif
 obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o
 obj-$(CONFIG_QMP_DEBUGFS_CLIENT) += qmp-debugfs-client.o
 obj-$(CONFIG_MSM_REMOTEQDSS) += remoteqdss.o
 obj-$(CONFIG_QSEE_IPC_IRQ_BRIDGE) += qsee_ipc_irq_bridge.o
 obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
+obj-$(CONFIG_WCNSS_CORE) += wcnss/
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index 72abf50..c9dc547 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -354,6 +354,7 @@
 	dict = of_iomap(pdev->dev.of_node, 0);
 	if (!dict) {
 		cmd_db_status = -ENOMEM;
+		pr_err("Command DB dictionary addr not found.\n");
 		goto failed;
 	}
 
@@ -373,6 +374,7 @@
 
 	if (!cmd_db_header) {
 		cmd_db_status = -ENOMEM;
+		pr_err("Command DB header not found.\n");
 		goto failed;
 	}
 
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index d8cc2c4..59897ea 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -376,7 +376,7 @@
 
 static struct channel_ctx *ch_name_to_ch_ctx_create(
 					struct glink_core_xprt_ctx *xprt_ctx,
-					const char *name);
+					const char *name, bool local);
 
 static void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
 					uint32_t riid, void *cookie);
@@ -1836,13 +1836,14 @@
  *                              it is not found and get reference of context.
  * @xprt_ctx:	Transport to search for a matching channel.
  * @name:	Name of the desired channel.
+ * @local:	If called from local open or not
  *
  * Return: The channel corresponding to @name, NULL if a matching channel was
  *         not found AND a new channel could not be created.
  */
 static struct channel_ctx *ch_name_to_ch_ctx_create(
 					struct glink_core_xprt_ctx *xprt_ctx,
-					const char *name)
+					const char *name, bool local)
 {
 	struct channel_ctx *entry;
 	struct channel_ctx *ctx;
@@ -1886,10 +1887,23 @@
 	list_for_each_entry_safe(entry, temp, &xprt_ctx->channels,
 		    port_list_node)
 		if (!strcmp(entry->name, name) && !entry->pending_delete) {
+			rwref_get(&entry->ch_state_lhb2);
+			/* port already exists */
+			if (entry->local_open_state != GLINK_CHANNEL_CLOSED
+								&& local) {
+				/* not ready to be re-opened */
+				GLINK_INFO_CH_XPRT(entry, xprt_ctx,
+					"%s: Ch not ready. State: %u\n",
+					__func__, entry->local_open_state);
+				rwref_put(&entry->ch_state_lhb2);
+				entry = NULL;
+			} else if (local) {
+				entry->local_open_state =
+						GLINK_CHANNEL_OPENING;
+			}
 			spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
 					flags);
 			kfree(ctx);
-			rwref_get(&entry->ch_state_lhb2);
 			rwref_write_put(&xprt_ctx->xprt_state_lhb0);
 			return entry;
 		}
@@ -1919,6 +1933,8 @@
 
 		ctx->transport_ptr = xprt_ctx;
 		rwref_get(&ctx->ch_state_lhb2);
+		if (local)
+			ctx->local_open_state = GLINK_CHANNEL_OPENING;
 		list_add_tail(&ctx->port_list_node, &xprt_ctx->channels);
 
 		GLINK_INFO_PERF_CH_XPRT(ctx, xprt_ctx,
@@ -2604,23 +2620,13 @@
 	 * look for an existing port structure which can occur in
 	 * reopen and remote-open-first cases
 	 */
-	ctx = ch_name_to_ch_ctx_create(transport_ptr, cfg->name);
+	ctx = ch_name_to_ch_ctx_create(transport_ptr, cfg->name, true);
 	if (ctx == NULL) {
 		GLINK_ERR("%s:%s %s: Error - unable to allocate new channel\n",
 				cfg->transport, cfg->edge, __func__);
 		return ERR_PTR(-ENOMEM);
 	}
 
-	/* port already exists */
-	if (ctx->local_open_state != GLINK_CHANNEL_CLOSED) {
-		/* not ready to be re-opened */
-		GLINK_INFO_CH_XPRT(ctx, transport_ptr,
-		"%s: Channel not ready to be re-opened. State: %u\n",
-		__func__, ctx->local_open_state);
-		rwref_put(&ctx->ch_state_lhb2);
-		return ERR_PTR(-EBUSY);
-	}
-
 	/* initialize port structure */
 	ctx->user_priv = cfg->priv;
 	ctx->rx_intent_req_timeout_jiffies =
@@ -2651,7 +2657,6 @@
 	ctx->local_xprt_req = best_id;
 	ctx->no_migrate = cfg->transport &&
 				!(cfg->options & GLINK_OPT_INITIAL_XPORT);
-	ctx->local_open_state = GLINK_CHANNEL_OPENING;
 	GLINK_INFO_PERF_CH(ctx,
 		"%s: local:GLINK_CHANNEL_CLOSED->GLINK_CHANNEL_OPENING\n",
 		__func__);
@@ -4912,7 +4917,7 @@
 	bool do_migrate;
 
 	glink_core_migration_edge_lock(if_ptr->glink_core_priv);
-	ctx = ch_name_to_ch_ctx_create(if_ptr->glink_core_priv, name);
+	ctx = ch_name_to_ch_ctx_create(if_ptr->glink_core_priv, name, false);
 	if (ctx == NULL) {
 		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
 		       "%s: invalid rcid %u received, name '%s'\n",
@@ -5015,6 +5020,7 @@
 	struct channel_ctx *ctx;
 	bool is_ch_fully_closed;
 	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+	unsigned long flags;
 
 	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
 	if (!ctx) {
@@ -5032,11 +5038,13 @@
 		rwref_put(&ctx->ch_state_lhb2);
 		return;
 	}
+	spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
+	ctx->pending_delete = true;
+	spin_unlock_irqrestore(&ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
 	GLINK_INFO_CH(ctx, "%s: remote: OPENED->CLOSED\n", __func__);
 
 	is_ch_fully_closed = glink_core_remote_close_common(ctx, false);
 
-	ctx->pending_delete = true;
 	if_ptr->tx_cmd_ch_remote_close_ack(if_ptr, rcid);
 
 	if (is_ch_fully_closed) {
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
index dd436da..239f2c1 100644
--- a/drivers/soc/qcom/glink_ssr.c
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,7 @@
 #define GLINK_SSR_EVENT_INIT ~0
 #define NUM_LOG_PAGES 3
 
+#define GLINK_SSR_PRIORITY 1
 #define GLINK_SSR_LOG(x...) do { \
 	if (glink_ssr_log_ctx) \
 		ipc_log_string(glink_ssr_log_ctx, x); \
@@ -596,25 +597,6 @@
 		strlcpy(do_cleanup_data->name, ss_info->edge,
 				do_cleanup_data->name_len + 1);
 
-		ret = glink_queue_rx_intent(handle, do_cleanup_data,
-				sizeof(struct cleanup_done_msg));
-		if (ret) {
-			GLINK_SSR_ERR(
-				"%s %s: %s, ret[%d], resp. remaining[%d]\n",
-				"<SSR>", __func__,
-				"queue_rx_intent failed", ret,
-				atomic_read(&responses_remaining));
-			kfree(do_cleanup_data);
-
-			if (!strcmp(ss_leaf_entry->ssr_name, "rpm"))
-				panic("%s: Could not queue intent for RPM!\n",
-						__func__);
-			atomic_dec(&responses_remaining);
-			kref_put(&ss_leaf_entry->cb_data->cb_kref,
-							cb_data_release);
-			continue;
-		}
-
 		if (strcmp(ss_leaf_entry->ssr_name, "rpm"))
 			ret = glink_tx(handle, do_cleanup_data,
 					do_cleanup_data,
@@ -640,6 +622,24 @@
 							cb_data_release);
 			continue;
 		}
+		ret = glink_queue_rx_intent(handle, do_cleanup_data,
+				sizeof(struct cleanup_done_msg));
+		if (ret) {
+			GLINK_SSR_ERR(
+				"%s %s: %s, ret[%d], resp. remaining[%d]\n",
+				"<SSR>", __func__,
+				"queue_rx_intent failed", ret,
+				atomic_read(&responses_remaining));
+			kfree(do_cleanup_data);
+
+			if (!strcmp(ss_leaf_entry->ssr_name, "rpm"))
+				panic("%s: Could not queue intent for RPM!\n",
+						__func__);
+			atomic_dec(&responses_remaining);
+			kref_put(&ss_leaf_entry->cb_data->cb_kref,
+							cb_data_release);
+			continue;
+		}
 		sequence_number++;
 		kref_put(&ss_leaf_entry->cb_data->cb_kref, cb_data_release);
 	}
@@ -946,6 +946,7 @@
 
 	nb->subsystem = subsys_name;
 	nb->nb.notifier_call = glink_ssr_restart_notifier_cb;
+	nb->nb.priority = GLINK_SSR_PRIORITY;
 
 	handle = subsys_notif_register_notifier(nb->subsystem, &nb->nb);
 	if (IS_ERR_OR_NULL(handle)) {
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 649d0ff..e3a50e3 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -463,6 +463,7 @@
 	struct ramdump_device *msa0_dump_dev;
 	bool bypass_s1_smmu;
 	bool force_err_fatal;
+	bool allow_recursive_recovery;
 	u8 cause_for_rejuvenation;
 	u8 requesting_sub_system;
 	u16 line_number;
@@ -1110,6 +1111,9 @@
 	if (test_bit(HW_ALWAYS_ON, &quirks))
 		return 0;
 
+	if (test_bit(ICNSS_FW_DOWN, &priv->state))
+		return 0;
+
 	icnss_pr_dbg("HW Power off: 0x%lx\n", priv->state);
 
 	spin_lock(&priv->on_off_lock);
@@ -2230,7 +2234,8 @@
 	if (ret < 0) {
 		icnss_pr_err("Driver reinit failed: %d, state: 0x%lx\n",
 			     ret, priv->state);
-		ICNSS_ASSERT(false);
+		if (!priv->allow_recursive_recovery)
+			ICNSS_ASSERT(false);
 		goto out_power_off;
 	}
 
@@ -2257,8 +2262,6 @@
 
 	set_bit(ICNSS_FW_READY, &penv->state);
 
-	icnss_call_driver_uevent(penv, ICNSS_UEVENT_FW_READY, NULL);
-
 	icnss_pr_info("WLAN FW is ready: 0x%lx\n", penv->state);
 
 	icnss_hw_power_off(penv);
@@ -2351,29 +2354,6 @@
 	return 0;
 }
 
-static int icnss_call_driver_remove(struct icnss_priv *priv)
-{
-	icnss_pr_dbg("Calling driver remove state: 0x%lx\n", priv->state);
-
-	clear_bit(ICNSS_FW_READY, &priv->state);
-
-	if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
-		return 0;
-
-	if (!priv->ops || !priv->ops->remove)
-		return 0;
-
-	set_bit(ICNSS_DRIVER_UNLOADING, &penv->state);
-	penv->ops->remove(&priv->pdev->dev);
-
-	clear_bit(ICNSS_DRIVER_UNLOADING, &penv->state);
-	clear_bit(ICNSS_DRIVER_PROBED, &priv->state);
-
-	icnss_hw_power_off(penv);
-
-	return 0;
-}
-
 static int icnss_fw_crashed(struct icnss_priv *priv,
 			    struct icnss_event_pd_service_down_data *event_data)
 {
@@ -2405,17 +2385,15 @@
 	if (test_bit(ICNSS_PD_RESTART, &priv->state) && event_data->crashed) {
 		icnss_pr_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n",
 			     event_data->crashed, priv->state);
-		ICNSS_ASSERT(0);
+		if (!priv->allow_recursive_recovery)
+			ICNSS_ASSERT(0);
 		goto out;
 	}
 
 	if (priv->force_err_fatal)
 		ICNSS_ASSERT(0);
 
-	if (event_data->crashed)
-		icnss_fw_crashed(priv, event_data);
-	else
-		icnss_call_driver_remove(priv);
+	icnss_fw_crashed(priv, event_data);
 
 out:
 	kfree(data);
@@ -3098,6 +3076,12 @@
 	if (!dev)
 		return -ENODEV;
 
+	if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+		icnss_pr_err("FW down, ignoring fw_log_mode state: 0x%lx\n",
+			     penv->state);
+		return -EINVAL;
+	}
+
 	icnss_pr_dbg("FW log mode: %u\n", fw_log_mode);
 
 	ret = wlfw_ini_send_sync_msg(fw_log_mode);
@@ -3191,6 +3175,12 @@
 	if (!dev)
 		return -ENODEV;
 
+	if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+		icnss_pr_err("FW down, ignoring wlan_enable state: 0x%lx\n",
+			     penv->state);
+		return -EINVAL;
+	}
+
 	icnss_pr_dbg("Mode: %d, config: %p, host_version: %s\n",
 		     mode, config, host_version);
 
@@ -3725,6 +3715,15 @@
 	return ret;
 }
 
+static void icnss_allow_recursive_recovery(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	priv->allow_recursive_recovery = true;
+
+	icnss_pr_info("Recursive recovery allowed for WLAN\n");
+}
+
 static ssize_t icnss_fw_debug_write(struct file *fp,
 				    const char __user *user_buf,
 				    size_t count, loff_t *off)
@@ -3773,6 +3772,9 @@
 		case 3:
 			ret = icnss_trigger_recovery(&priv->pdev->dev);
 			break;
+		case 4:
+			icnss_allow_recursive_recovery(&priv->pdev->dev);
+			break;
 		default:
 			return -EINVAL;
 		}
diff --git a/drivers/soc/qcom/microdump_collector.c b/drivers/soc/qcom/microdump_collector.c
new file mode 100644
index 0000000..47f3336
--- /dev/null
+++ b/drivers/soc/qcom/microdump_collector.c
@@ -0,0 +1,159 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/smem.h>
+
+/*
+ * This program collects the data from SMEM regions whenever the modem crashes
+ * and stores it in /dev/ramdump_microdump_modem so as to expose it to
+ * user space.
+ */
+
+struct microdump_data {
+	struct ramdump_device *microdump_dev;
+	void *microdump_modem_notify_handler;
+	struct notifier_block microdump_modem_ssr_nb;
+};
+
+static struct microdump_data *drv;
+
+static int microdump_modem_notifier_nb(struct notifier_block *nb,
+		unsigned long code, void *data)
+{
+	int ret = 0;
+	unsigned int size_reason = 0, size_data = 0;
+	char *crash_reason = NULL;
+	char *crash_data = NULL;
+	unsigned int smem_id = 611;
+	struct ramdump_segment segment[2];
+
+	if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
+
+		memset(segment, 0, sizeof(segment));
+
+		crash_reason = smem_get_entry(SMEM_SSR_REASON_MSS0, &size_reason
+				, 0, SMEM_ANY_HOST_FLAG);
+		if (IS_ERR_OR_NULL(crash_reason)) {
+			pr_err("%s: Error in getting SMEM_reason pointer\n",
+				__func__);
+			return -ENODEV;
+		}
+
+		segment[0].v_address = crash_reason;
+		segment[0].size = size_reason;
+
+		crash_data = smem_get_entry(smem_id, &size_data, SMEM_MODEM, 0);
+		if (IS_ERR_OR_NULL(crash_data)) {
+			pr_err("%s: Error in getting SMEM_data pointer\n",
+				__func__);
+			return -ENODEV;
+		}
+
+		segment[1].v_address = crash_data;
+		segment[1].size = size_data;
+
+		ret = do_ramdump(drv->microdump_dev, segment, 2);
+		if (ret)
+			pr_err("%s: do_ramdump() failed\n", __func__);
+	}
+
+	return ret;
+}
+
+static int microdump_modem_ssr_register_notifier(struct microdump_data *drv)
+{
+	int ret = 0;
+
+	drv->microdump_modem_ssr_nb.notifier_call = microdump_modem_notifier_nb;
+
+	drv->microdump_modem_notify_handler =
+		subsys_notif_register_notifier("modem",
+			&drv->microdump_modem_ssr_nb);
+
+	if (IS_ERR(drv->microdump_modem_notify_handler)) {
+		pr_err("Modem register notifier failed: %ld\n",
+			PTR_ERR(drv->microdump_modem_notify_handler));
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static void microdump_modem_ssr_unregister_notifier(struct microdump_data *drv)
+{
+	subsys_notif_unregister_notifier(drv->microdump_modem_notify_handler,
+					&drv->microdump_modem_ssr_nb);
+	drv->microdump_modem_notify_handler = NULL;
+}
+
+/*
+ * microdump_init() - Registers kernel module for microdump collector
+ *
+ * Creates device file /dev/ramdump_microdump_modem and registers handler for
+ * modem SSR events.
+ *
+ * Returns 0 on success and negative error code in case of errors
+ */
+static int __init microdump_init(void)
+{
+	int ret = -ENOMEM;
+
+	drv = kzalloc(sizeof(struct microdump_data), GFP_KERNEL);
+	if (!drv)
+		goto out;
+
+	drv->microdump_dev = create_ramdump_device("microdump_modem", NULL);
+	if (!drv->microdump_dev) {
+		pr_err("%s: Unable to create a microdump_modem ramdump device\n"
+			, __func__);
+		ret = -ENODEV;
+		goto out_kfree;
+	}
+
+	ret = microdump_modem_ssr_register_notifier(drv);
+	if (ret) {
+		destroy_ramdump_device(drv->microdump_dev);
+		goto out_kfree;
+	}
+	return ret;
+out_kfree:
+	pr_err("%s: Failed to register microdump collector\n", __func__);
+	kfree(drv);
+	drv = NULL;
+out:
+	return ret;
+}
+
+static void __exit microdump_exit(void)
+{
+	if (!drv)
+		return;
+
+	if (!IS_ERR(drv->microdump_modem_notify_handler))
+		microdump_modem_ssr_unregister_notifier(drv);
+
+	if (drv->microdump_dev)
+		destroy_ramdump_device(drv->microdump_dev);
+
+	kfree(drv);
+}
+
+module_init(microdump_init);
+module_exit(microdump_exit);
+
+MODULE_DESCRIPTION("Microdump Collector");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/minidump_log.c b/drivers/soc/qcom/minidump_log.c
index c65dfd9..87e1700 100644
--- a/drivers/soc/qcom/minidump_log.c
+++ b/drivers/soc/qcom/minidump_log.c
@@ -76,6 +76,9 @@
 	struct md_region ksp_entry, ktsk_entry;
 	u32 cpu = smp_processor_id();
 
+	if (is_idle_task(current))
+		return;
+
 	if (sp < KIMAGE_VADDR || sp > -256UL)
 		sp = current_stack_pointer;
 
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index 5a110bb..c00749c 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -550,9 +550,9 @@
 	struct tcs_cmd *cmdlist_sleep = NULL;
 	struct rpmh_client *cur_mbox = NULL;
 	struct list_head *cur_bcm_clist = NULL;
-	int *n_active = NULL;
-	int *n_wake = NULL;
-	int *n_sleep = NULL;
+	int n_active[VCD_MAX_CNT];
+	int n_wake[VCD_MAX_CNT];
+	int n_sleep[VCD_MAX_CNT];
 	int cnt_vcd = 0;
 	int cnt_active = 0;
 	int cnt_wake = 0;
@@ -573,8 +573,15 @@
 
 	cur_mbox = cur_rsc->rscdev->mbox;
 	cur_bcm_clist = cur_rsc->rscdev->bcm_clist;
+	cmdlist_active = cur_rsc->rscdev->cmdlist_active;
+	cmdlist_wake = cur_rsc->rscdev->cmdlist_wake;
+	cmdlist_sleep = cur_rsc->rscdev->cmdlist_sleep;
 
 	for (i = 0; i < VCD_MAX_CNT; i++) {
+		n_active[i] = 0;
+		n_wake[i] = 0;
+		n_sleep[i] = 0;
+
 		if (list_empty(&cur_bcm_clist[i]))
 			continue;
 		list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
@@ -600,27 +607,6 @@
 	if (!cnt_active)
 		goto exit_msm_bus_commit_data;
 
-	n_active = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
-	if (!n_active)
-		return -ENOMEM;
-
-	n_wake = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
-	if (!n_wake)
-		return -ENOMEM;
-
-	n_sleep = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
-	if (!n_sleep)
-		return -ENOMEM;
-
-	if (cnt_active)
-		cmdlist_active = kcalloc(cnt_active, sizeof(struct tcs_cmd),
-								GFP_KERNEL);
-	if (cnt_sleep && cnt_wake) {
-		cmdlist_wake = kcalloc(cnt_wake, sizeof(struct tcs_cmd),
-								GFP_KERNEL);
-		cmdlist_sleep = kcalloc(cnt_sleep, sizeof(struct tcs_cmd),
-								GFP_KERNEL);
-	}
 	bcm_cnt = tcs_cmd_list_gen(n_active, n_wake, n_sleep, cmdlist_active,
 				cmdlist_wake, cmdlist_sleep, cur_bcm_clist);
 
@@ -654,8 +640,6 @@
 		if (ret)
 			MSM_BUS_ERR("%s: error sending wake sets: %d\n",
 							__func__, ret);
-		kfree(n_wake);
-		kfree(cmdlist_wake);
 	}
 	if (cnt_sleep) {
 		ret = rpmh_write_batch(cur_mbox, RPMH_SLEEP_STATE,
@@ -663,14 +647,8 @@
 		if (ret)
 			MSM_BUS_ERR("%s: error sending sleep sets: %d\n",
 							__func__, ret);
-		kfree(n_sleep);
-		kfree(cmdlist_sleep);
 	}
 
-	kfree(cmdlist_active);
-	kfree(n_active);
-
-
 	list_for_each_entry_safe(node, node_tmp, clist, link) {
 		if (unlikely(node->node_info->defer_qos))
 			msm_bus_dev_init_qos(&node->dev, NULL);
@@ -1168,6 +1146,41 @@
 	return ret;
 }
 
+static int msm_bus_postcon_setup(struct device *bus_dev, void *data)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	struct msm_bus_rsc_device_type *rscdev;
+
+	bus_node = to_msm_bus_node(bus_dev);
+	if (!bus_node) {
+		MSM_BUS_ERR("%s: Can't get device info", __func__);
+		return -ENODEV;
+	}
+
+	if (bus_node->node_info->is_rsc_dev) {
+		rscdev = bus_node->rscdev;
+		rscdev->cmdlist_active = devm_kcalloc(bus_dev,
+					rscdev->num_bcm_devs,
+					sizeof(struct tcs_cmd), GFP_KERNEL);
+		if (!rscdev->cmdlist_active)
+			return -ENOMEM;
+
+		rscdev->cmdlist_wake = devm_kcalloc(bus_dev,
+					rscdev->num_bcm_devs,
+					sizeof(struct tcs_cmd), GFP_KERNEL);
+		if (!rscdev->cmdlist_wake)
+			return -ENOMEM;
+
+		rscdev->cmdlist_sleep = devm_kcalloc(bus_dev,
+					rscdev->num_bcm_devs,
+					sizeof(struct tcs_cmd),	GFP_KERNEL);
+		if (!rscdev->cmdlist_sleep)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
 static int msm_bus_init_clk(struct device *bus_dev,
 				struct msm_bus_node_device_type *pdata)
 {
@@ -1641,6 +1654,7 @@
 			goto exit_setup_dev_conn;
 		}
 		rsc_node = to_msm_bus_node(bus_node->node_info->rsc_devs[j]);
+		rsc_node->rscdev->num_bcm_devs++;
 	}
 
 exit_setup_dev_conn:
@@ -1771,6 +1785,13 @@
 		goto exit_device_probe;
 	}
 
+	ret = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+						msm_bus_postcon_setup);
+	if (ret) {
+		MSM_BUS_ERR("%s: Error post connection setup", __func__);
+		goto exit_device_probe;
+	}
+
 	/*
 	 * Setup the QoS for the nodes, don't check the error codes as we
 	 * defer QoS programming to the first transaction in cases of failure
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
index 8929959..b023f72 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -88,6 +88,10 @@
 	int req_state;
 	uint32_t acv[NUM_CTX];
 	uint32_t query_acv[NUM_CTX];
+	struct tcs_cmd *cmdlist_active;
+	struct tcs_cmd *cmdlist_wake;
+	struct tcs_cmd *cmdlist_sleep;
+	int num_bcm_devs;
 };
 
 struct msm_bus_bcm_device_type {
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 0efd287..3b6c0bd 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@
 #include <soc/qcom/ramdump.h>
 #include <soc/qcom/subsystem_restart.h>
 #include <soc/qcom/secure_buffer.h>
+#include <soc/qcom/smem.h>
 
 #include <linux/uaccess.h>
 #include <asm/setup.h>
@@ -55,10 +56,9 @@
 #endif
 
 #define PIL_NUM_DESC		10
-#define NUM_OF_ENCRYPTED_KEY	3
 #define MAX_LEN 96
 static void __iomem *pil_info_base;
-static void __iomem *pil_minidump_base;
+static struct md_global_toc *g_md_toc;
 
 /**
  * proxy_timeout - Override for proxy vote timeouts
@@ -81,18 +81,6 @@
 };
 
 /**
- * struct boot_minidump_smem_region - Representation of SMEM TOC
- * @region_name: Name of modem segment to be dumped
- * @region_base_address: Where segment start from
- * @region_size: Size of segment to be dumped
- */
-struct boot_minidump_smem_region {
-	char region_name[16];
-	u64 region_base_address;
-	u64 region_size;
-};
-
-/**
  * struct pil_seg - memory map representing one segment
  * @next: points to next seg mentor NULL if last segment
  * @paddr: physical start address of segment
@@ -146,8 +134,6 @@
 	phys_addr_t region_end;
 	void *region;
 	struct pil_image_info __iomem *info;
-	struct md_ssr_ss_info __iomem *minidump;
-	int minidump_id;
 	int id;
 	int unvoted_flag;
 	size_t region_size;
@@ -155,24 +141,27 @@
 
 static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev)
 {
-	struct boot_minidump_smem_region __iomem *region_info;
+	struct md_ss_region __iomem *region_info;
 	struct ramdump_segment *ramdump_segs, *s;
 	struct pil_priv *priv = desc->priv;
-	void __iomem *subsys_smem_base;
+	void __iomem *subsys_segtable_base;
+	u64 ss_region_ptr = 0;
 	void __iomem *offset;
 	int ss_mdump_seg_cnt;
+	int ss_valid_seg_cnt;
 	int ret, i;
 
-	memcpy(&offset, &priv->minidump, sizeof(priv->minidump));
-	offset = offset + sizeof(priv->minidump->md_ss_smem_regions_baseptr);
-	/* There are 3 encryption keys which also need to be dumped */
-	ss_mdump_seg_cnt = readb_relaxed(offset) +
-				NUM_OF_ENCRYPTED_KEY;
-
-	subsys_smem_base = ioremap(__raw_readl(priv->minidump),
-				   ss_mdump_seg_cnt * sizeof(*region_info));
-	region_info =
-		(struct boot_minidump_smem_region __iomem *)subsys_smem_base;
+	ss_region_ptr = desc->minidump->md_ss_smem_regions_baseptr;
+	if (!ramdump_dev)
+		return -ENODEV;
+	ss_mdump_seg_cnt = desc->minidump->ss_region_count;
+	subsys_segtable_base =
+		ioremap((unsigned long)ss_region_ptr,
+		ss_mdump_seg_cnt * sizeof(struct md_ss_region));
+	region_info = (struct md_ss_region __iomem *)subsys_segtable_base;
+	if (!region_info)
+		return -EINVAL;
+	pr_info("Minidump : Segments in minidump 0x%x\n", ss_mdump_seg_cnt);
 	ramdump_segs = kcalloc(ss_mdump_seg_cnt,
 			       sizeof(*ramdump_segs), GFP_KERNEL);
 	if (!ramdump_segs)
@@ -183,23 +172,30 @@
 			(priv->region_end - priv->region_start));
 
 	s = ramdump_segs;
+	ss_valid_seg_cnt = ss_mdump_seg_cnt;
 	for (i = 0; i < ss_mdump_seg_cnt; i++) {
 		memcpy(&offset, &region_info, sizeof(region_info));
-		memcpy(&s->name, &region_info, sizeof(region_info));
-		offset = offset + sizeof(region_info->region_name);
-		s->address = __raw_readl(offset);
-		offset = offset + sizeof(region_info->region_base_address);
-		s->size = __raw_readl(offset);
+		offset = offset + sizeof(region_info->name) +
+				sizeof(region_info->seq_num);
+		if (__raw_readl(offset) == MD_REGION_VALID) {
+			memcpy(&s->name, &region_info, sizeof(region_info));
+			offset = offset + sizeof(region_info->md_valid);
+			s->address = __raw_readl(offset);
+			offset = offset +
+				sizeof(region_info->region_base_address);
+			s->size = __raw_readl(offset);
+			pr_info("Minidump : Dumping segment %s with address 0x%lx and size 0x%x\n",
+				s->name, s->address, (unsigned int)s->size);
+		} else
+			ss_valid_seg_cnt--;
 		s++;
 		region_info++;
 	}
-	ret = do_minidump(ramdump_dev, ramdump_segs, ss_mdump_seg_cnt);
+	ret = do_minidump(ramdump_dev, ramdump_segs, ss_valid_seg_cnt);
 	kfree(ramdump_segs);
 	if (ret)
-		pil_err(desc, "%s: Ramdump collection failed for subsys %s rc:%d\n",
+		pil_err(desc, "%s: Minidump collection failed for subsys %s rc:%d\n",
 			__func__, desc->name, ret);
-	writel_relaxed(0, &priv->minidump->md_ss_smem_regions_baseptr);
-	writeb_relaxed(1, &priv->minidump->md_ss_ssr_cause);
 
 	if (desc->subsys_vmid > 0)
 		ret = pil_assign_mem_to_subsys(desc, priv->region_start,
@@ -215,16 +211,45 @@
  * Calls the ramdump API with a list of segments generated from the addresses
  * that the descriptor corresponds to.
  */
-int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+int pil_do_ramdump(struct pil_desc *desc,
+		   void *ramdump_dev, void *minidump_dev)
 {
+	struct ramdump_segment *ramdump_segs, *s;
 	struct pil_priv *priv = desc->priv;
 	struct pil_seg *seg;
 	int count = 0, ret;
-	struct ramdump_segment *ramdump_segs, *s;
 
-	if (priv->minidump && (__raw_readl(priv->minidump) > 0))
-		return pil_do_minidump(desc, ramdump_dev);
-
+	if (desc->minidump) {
+		pr_info("Minidump : md_ss_toc->md_ss_toc_init is 0x%x\n",
+			(unsigned int)desc->minidump->md_ss_toc_init);
+		pr_info("Minidump : md_ss_toc->md_ss_enable_status is 0x%x\n",
+			(unsigned int)desc->minidump->md_ss_enable_status);
+		pr_info("Minidump : md_ss_toc->encryption_status is 0x%x\n",
+			(unsigned int)desc->minidump->encryption_status);
+		pr_info("Minidump : md_ss_toc->ss_region_count is 0x%x\n",
+			(unsigned int)desc->minidump->ss_region_count);
+		pr_info("Minidump : md_ss_toc->md_ss_smem_regions_baseptr is 0x%x\n",
+			(unsigned int)
+			desc->minidump->md_ss_smem_regions_baseptr);
+		/**
+		 * Collect minidump if SS ToC is valid and segment table
+		 * is initialized in memory and encryption status is set.
+		 */
+		if ((desc->minidump->md_ss_smem_regions_baseptr != 0) &&
+			(desc->minidump->md_ss_toc_init == true) &&
+			(desc->minidump->md_ss_enable_status ==
+				MD_SS_ENABLED)) {
+			if (desc->minidump->encryption_status ==
+				MD_SS_ENCR_DONE) {
+				pr_info("Minidump : Dumping for %s\n",
+					desc->name);
+				return pil_do_minidump(desc, minidump_dev);
+			}
+			pr_info("Minidump : aborted for %s\n", desc->name);
+			return -EINVAL;
+		}
+	}
+	pr_debug("Continuing with full SSR dump for %s\n", desc->name);
 	list_for_each_entry(seg, &priv->segs, list)
 		count++;
 
@@ -1127,7 +1152,8 @@
 {
 	struct pil_priv *priv;
 	void __iomem *addr;
-	int ret, ss_imem_offset_mdump;
+	void *ss_toc_addr;
+	int ret;
 	char buf[sizeof(priv->info->name)];
 	struct device_node *ofnode = desc->dev->of_node;
 
@@ -1153,19 +1179,15 @@
 		__iowrite32_copy(priv->info->name, buf, sizeof(buf) / 4);
 	}
 	if (of_property_read_u32(ofnode, "qcom,minidump-id",
-		&priv->minidump_id))
-		pr_debug("minidump-id not found for %s\n", desc->name);
+		&desc->minidump_id))
+		pr_err("minidump-id not found for %s\n", desc->name);
 	else {
-		ss_imem_offset_mdump =
-			sizeof(struct md_ssr_ss_info) * priv->minidump_id;
-		if (pil_minidump_base) {
-			/* Add 0x4 to get start of struct md_ssr_ss_info base
-			 * from struct md_ssr_toc for any subsystem,
-			 * struct md_ssr_ss_info is actually the pointer
-			 * of ToC in smem for any subsystem.
-			 */
-			addr = pil_minidump_base + ss_imem_offset_mdump + 0x4;
-			priv->minidump = (struct md_ssr_ss_info __iomem *)addr;
+		if (g_md_toc && g_md_toc->md_toc_init == true) {
+			ss_toc_addr = &g_md_toc->md_ss_toc[desc->minidump_id];
+			pr_debug("Minidump : ss_toc_addr is %pa and desc->minidump_id is %d\n",
+				&ss_toc_addr, desc->minidump_id);
+			memcpy(&desc->minidump, &ss_toc_addr,
+			       sizeof(ss_toc_addr));
 		}
 	}
 
@@ -1254,6 +1276,7 @@
 	struct device_node *np;
 	struct resource res;
 	int i;
+	unsigned int size;
 
 	np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-pil");
 	if (!np) {
@@ -1276,20 +1299,14 @@
 	for (i = 0; i < resource_size(&res)/sizeof(u32); i++)
 		writel_relaxed(0, pil_info_base + (i * sizeof(u32)));
 
-	np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-minidump");
-	if (!np) {
-		pr_warn("pil: failed to find qcom,msm-imem-minidump node\n");
-		goto out;
-	} else {
-		pil_minidump_base = of_iomap(np, 0);
-		if (!pil_minidump_base) {
-			pr_err("unable to map pil minidump imem offset\n");
-			goto out;
-		}
+	/* Get Global minidump ToC*/
+	g_md_toc = smem_get_entry(SBL_MINIDUMP_SMEM_ID, &size, 0,
+				  SMEM_ANY_HOST_FLAG);
+	pr_debug("Minidump: g_md_toc is %pa\n", &g_md_toc);
+	if (PTR_ERR(g_md_toc) == -EPROBE_DEFER) {
+		pr_err("SMEM is not initialized.\n");
+		return -EPROBE_DEFER;
 	}
-	for (i = 0; i < sizeof(struct md_ssr_toc)/sizeof(u32); i++)
-		writel_relaxed(0, pil_minidump_base + (i * sizeof(u32)));
-	writel_relaxed(1, pil_minidump_base);
 out:
 	return register_pm_notifier(&pil_pm_notifier);
 }
@@ -1300,8 +1317,6 @@
 	unregister_pm_notifier(&pil_pm_notifier);
 	if (pil_info_base)
 		iounmap(pil_info_base);
-	if (pil_minidump_base)
-		iounmap(pil_minidump_base);
 }
 module_exit(msm_pil_exit);
 
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
index 27ed336..78c00fe 100644
--- a/drivers/soc/qcom/peripheral-loader.h
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
 
 #include <linux/mailbox_client.h>
 #include <linux/mailbox/qmp.h>
+#include "minidump_private.h"
 
 struct device;
 struct module;
@@ -63,6 +64,8 @@
 	bool signal_aop;
 	struct mbox_client cl;
 	struct mbox_chan *mbox;
+	struct md_ss_toc *minidump;
+	int minidump_id;
 };
 
 /**
@@ -77,34 +80,6 @@
 	__le32 size;
 } __attribute__((__packed__));
 
-#define MAX_NUM_OF_SS 3
-
-/**
- * struct md_ssr_ss_info - Info in imem about smem ToC
- * @md_ss_smem_regions_baseptr: Start physical address of SMEM TOC
- * @md_ss_num_of_regions: number of segments that need to be dumped
- * @md_ss_encryption_status: status of encryption of segments
- * @md_ss_ssr_cause: ssr cause enum
- */
-struct md_ssr_ss_info {
-	u32 md_ss_smem_regions_baseptr;
-	u8 md_ss_num_of_regions;
-	u8 md_ss_encryption_status;
-	u8 md_ss_ssr_cause;
-	u8 reserved;
-};
-
-/**
- * struct md_ssr_toc - Wrapper of struct md_ssr_ss_info
- * @md_ssr_toc_init: flag to indicate to MSS SW about imem init done
- * @md_ssr_ss: Instance of struct md_ssr_ss_info for a subsystem
- */
-struct md_ssr_toc /* Shared IMEM ToC struct */
-{
-	u32 md_ssr_toc_init;
-	struct md_ssr_ss_info	md_ssr_ss[MAX_NUM_OF_SS];
-};
-
 /**
  * struct pil_reset_ops - PIL operations
  * @init_image: prepare an image for authentication
@@ -137,7 +112,8 @@
 extern void pil_free_memory(struct pil_desc *desc);
 extern void pil_desc_release(struct pil_desc *desc);
 extern phys_addr_t pil_get_entry_addr(struct pil_desc *desc);
-extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev);
+extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev,
+			  void *minidump_dev);
 extern int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
 						size_t size);
 extern int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
@@ -157,7 +133,8 @@
 {
 	return 0;
 }
-static inline int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+static inline int pil_do_ramdump(struct pil_desc *desc,
+		void *ramdump_dev, void *minidump_dev)
 {
 	return 0;
 }
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index ce31d66..bc47a95 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@
 /* Q6 Register Offsets */
 #define QDSP6SS_RST_EVB			0x010
 #define QDSP6SS_DBG_CFG			0x018
+#define QDSP6SS_NMI_CFG			0x40
 
 /* AXI Halting Registers */
 #define MSS_Q6_HALT_BASE		0x180
@@ -263,6 +264,12 @@
 
 	pil_mss_pdc_sync(drv, 1);
 	pil_mss_alt_reset(drv, 1);
+	if (drv->reset_clk) {
+		pil_mss_disable_clks(drv);
+		if (drv->ahb_clk_vote)
+			clk_disable_unprepare(drv->ahb_clk);
+	}
+
 	ret = pil_mss_restart_reg(drv, true);
 
 	return ret;
@@ -277,6 +284,9 @@
 		return ret;
 	/* Wait 6 32kHz sleep cycles for reset */
 	udelay(200);
+
+	if (drv->reset_clk)
+		pil_mss_enable_clks(drv);
 	pil_mss_alt_reset(drv, 0);
 	pil_mss_pdc_sync(drv, false);
 
@@ -357,10 +367,10 @@
 									ret);
 	}
 
-	pil_mss_assert_resets(drv);
+	pil_mss_restart_reg(drv, true);
 	/* Wait 6 32kHz sleep cycles for reset */
 	udelay(200);
-	ret = pil_mss_deassert_resets(drv);
+	ret =  pil_mss_restart_reg(drv, false);
 
 	if (drv->is_booted) {
 		pil_mss_disable_clks(drv);
@@ -402,7 +412,7 @@
 	/* In case of any failure where reclaiming MBA and DP memory
 	 * could not happen, free the memory here
 	 */
-	if (drv->q6->mba_dp_virt) {
+	if (drv->q6->mba_dp_virt && !drv->mba_mem_dev_fixed) {
 		if (pil->subsys_vmid > 0)
 			pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
 						drv->q6->mba_dp_size);
@@ -547,7 +557,7 @@
 {
 	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
 	phys_addr_t start_addr = pil_get_entry_addr(pil);
-	u32 debug_val;
+	u32 debug_val = 0;
 	int ret;
 
 	trace_pil_func(__func__);
@@ -566,8 +576,10 @@
 	if (ret)
 		goto err_clks;
 
-	/* Save state of modem debug register before full reset */
-	debug_val = readl_relaxed(drv->reg_base + QDSP6SS_DBG_CFG);
+	if (!pil->minidump || !pil->modem_ssr) {
+		/* Save state of modem debug register before full reset */
+		debug_val = readl_relaxed(drv->reg_base + QDSP6SS_DBG_CFG);
+	}
 
 	/* Assert reset to subsystem */
 	pil_mss_assert_resets(drv);
@@ -577,9 +589,12 @@
 	if (ret)
 		goto err_restart;
 
-	writel_relaxed(debug_val, drv->reg_base + QDSP6SS_DBG_CFG);
-	if (modem_dbg_cfg)
-		writel_relaxed(modem_dbg_cfg, drv->reg_base + QDSP6SS_DBG_CFG);
+	if (!pil->minidump || !pil->modem_ssr) {
+		writel_relaxed(debug_val, drv->reg_base + QDSP6SS_DBG_CFG);
+		if (modem_dbg_cfg)
+			writel_relaxed(modem_dbg_cfg,
+				drv->reg_base + QDSP6SS_DBG_CFG);
+	}
 
 	/* Program Image Address */
 	if (drv->self_auth) {
@@ -639,7 +654,7 @@
 {
 	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
 	struct modem_data *md = dev_get_drvdata(pil->dev);
-	const struct firmware *fw, *dp_fw = NULL;
+	const struct firmware *fw = NULL, *dp_fw = NULL;
 	char fw_name_legacy[10] = "mba.b00";
 	char fw_name[10] = "mba.mbn";
 	char *dp_name = "msadp";
@@ -651,6 +666,8 @@
 	struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
 
 	trace_pil_func(__func__);
+	if (drv->mba_dp_virt && md->mba_mem_dev_fixed)
+		goto mss_reset;
 	fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
 	ret = request_firmware(&fw, fw_name_p, pil->dev);
 	if (ret) {
@@ -740,17 +757,19 @@
 			goto err_mba_data;
 		}
 	}
+	if (dp_fw)
+		release_firmware(dp_fw);
+	release_firmware(fw);
+	dp_fw = NULL;
+	fw = NULL;
 
+mss_reset:
 	ret = pil_mss_reset(pil);
 	if (ret) {
 		dev_err(pil->dev, "MBA boot failed(rc:%d)\n", ret);
 		goto err_mss_reset;
 	}
 
-	if (dp_fw)
-		release_firmware(dp_fw);
-	release_firmware(fw);
-
 	return 0;
 
 err_mss_reset:
@@ -763,11 +782,66 @@
 err_invalid_fw:
 	if (dp_fw)
 		release_firmware(dp_fw);
-	release_firmware(fw);
+	if (fw)
+		release_firmware(fw);
 	drv->mba_dp_virt = NULL;
 	return ret;
 }
 
+int pil_mss_debug_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int ret;
+
+	if (!pil->minidump)
+		return 0;
+	/*
+	 * Bring subsystem out of reset and enable required
+	 * regulators and clocks.
+	 */
+	ret = pil_mss_enable_clks(drv);
+	if (ret)
+		return ret;
+
+	if (pil->minidump) {
+		writel_relaxed(0x1, drv->reg_base + QDSP6SS_NMI_CFG);
+		/* Let write complete before proceeding */
+		mb();
+		udelay(2);
+	}
+	/* Assert reset to subsystem */
+	pil_mss_restart_reg(drv, true);
+	/* Wait 6 32kHz sleep cycles for reset */
+	udelay(200);
+	ret =  pil_mss_restart_reg(drv, false);
+	if (ret)
+		goto err_restart;
+	/* Let write complete before proceeding */
+	mb();
+	udelay(200);
+	ret = pil_q6v5_reset(pil);
+	/*
+	 * Need to Wait for timeout for debug reset sequence to
+	 * complete before returning
+	 */
+	pr_info("Minidump: waiting encryption to complete\n");
+	msleep(10000);
+	if (pil->minidump) {
+		writel_relaxed(0x2, drv->reg_base + QDSP6SS_NMI_CFG);
+		/* Let write complete before proceeding */
+		mb();
+		udelay(200);
+	}
+	if (ret)
+		goto err_restart;
+	return 0;
+err_restart:
+	pil_mss_disable_clks(drv);
+	if (drv->ahb_clk_vote)
+		clk_disable_unprepare(drv->ahb_clk);
+	return ret;
+}
+
 static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
 					size_t size, phys_addr_t region_start,
 					void *region)
@@ -842,10 +916,12 @@
 		if (pil->subsys_vmid > 0)
 			pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
 						drv->q6->mba_dp_size);
-		dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+		if (drv->q6->mba_dp_virt && !drv->mba_mem_dev_fixed) {
+			dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
 				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
 				drv->attrs_dma);
-		drv->q6->mba_dp_virt = NULL;
+			drv->q6->mba_dp_virt = NULL;
+		}
 
 	}
 	return ret;
@@ -912,7 +988,7 @@
 	}
 
 	if (drv->q6) {
-		if (drv->q6->mba_dp_virt) {
+		if (drv->q6->mba_dp_virt && !drv->mba_mem_dev_fixed) {
 			/* Reclaim MBA and DP (if allocated) memory. */
 			if (pil->subsys_vmid > 0)
 				pil_assign_mem_to_linux(pil,
diff --git a/drivers/soc/qcom/pil-msa.h b/drivers/soc/qcom/pil-msa.h
index 0f1e75b..a302a14 100644
--- a/drivers/soc/qcom/pil-msa.h
+++ b/drivers/soc/qcom/pil-msa.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,7 @@
 	struct subsys_device *subsys;
 	struct subsys_desc subsys_desc;
 	void *ramdump_dev;
+	void *minidump_dev;
 	bool crash_shutdown;
 	u32 pas_id;
 	bool ignore_errors;
@@ -46,4 +47,5 @@
 int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path);
 int pil_mss_assert_resets(struct q6v5_data *drv);
 int pil_mss_deassert_resets(struct q6v5_data *drv);
+int pil_mss_debug_reset(struct pil_desc *pil);
 #endif
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index 728a68c..ac322f8 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -38,7 +38,6 @@
 #define PROXY_TIMEOUT_MS	10000
 #define MAX_SSR_REASON_LEN	256U
 #define STOP_ACK_TIMEOUT_MS	1000
-#define QDSP6SS_NMI_STATUS	0x44
 
 #define subsys_to_drv(d) container_of(d, struct modem_data, subsys_desc)
 
@@ -72,17 +71,12 @@
 static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
 {
 	struct modem_data *drv = subsys_to_drv(dev_id);
-	u32 nmi_status = readl_relaxed(drv->q6->reg_base + QDSP6SS_NMI_STATUS);
 
 	/* Ignore if we're the one that set the force stop GPIO */
 	if (drv->crash_shutdown)
 		return IRQ_HANDLED;
 
-	if (nmi_status & 0x04)
-		pr_err("%s: Fatal error on the modem due to TZ NMI\n",
-			__func__);
-	else
-		pr_err("%s: Fatal error on the modem\n", __func__);
+	pr_err("Fatal error on the modem.\n");
 	subsys_set_crash_status(drv->subsys, CRASH_STATUS_ERR_FATAL);
 	restart_modem(drv);
 	return IRQ_HANDLED;
@@ -169,11 +163,21 @@
 	if (ret)
 		return ret;
 
+	ret = pil_mss_debug_reset(&drv->q6->desc);
+	if (ret)
+		return ret;
+
+	pil_mss_remove_proxy_votes(&drv->q6->desc);
+	ret = pil_mss_make_proxy_votes(&drv->q6->desc);
+	if (ret)
+		return ret;
+
 	ret = pil_mss_reset_load_mba(&drv->q6->desc);
 	if (ret)
 		return ret;
 
-	ret = pil_do_ramdump(&drv->q6->desc, drv->ramdump_dev);
+	ret = pil_do_ramdump(&drv->q6->desc,
+			drv->ramdump_dev, drv->minidump_dev);
 	if (ret < 0)
 		pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
 
@@ -248,9 +252,18 @@
 		ret = -ENOMEM;
 		goto err_ramdump;
 	}
+	drv->minidump_dev = create_ramdump_device("md_modem", &pdev->dev);
+	if (!drv->minidump_dev) {
+		pr_err("%s: Unable to create a modem minidump device.\n",
+			__func__);
+		ret = -ENOMEM;
+		goto err_minidump;
+	}
 
 	return 0;
 
+err_minidump:
+	destroy_ramdump_device(drv->ramdump_dev);
 err_ramdump:
 	subsys_unregister(drv->subsys);
 err_subsys:
@@ -278,6 +291,8 @@
 
 	q6_desc->ops = &pil_msa_mss_ops;
 
+	q6->reset_clk = of_property_read_bool(pdev->dev.of_node,
+							"qcom,reset-clk");
 	q6->self_auth = of_property_read_bool(pdev->dev.of_node,
 							"qcom,pil-self-auth");
 	if (q6->self_auth) {
@@ -424,6 +439,7 @@
 
 	subsys_unregister(drv->subsys);
 	destroy_ramdump_device(drv->ramdump_dev);
+	destroy_ramdump_device(drv->minidump_dev);
 	pil_desc_release(&drv->q6->desc);
 	return 0;
 }
diff --git a/drivers/soc/qcom/pil-q6v5.h b/drivers/soc/qcom/pil-q6v5.h
index 4961b1f..2690bb7 100644
--- a/drivers/soc/qcom/pil-q6v5.h
+++ b/drivers/soc/qcom/pil-q6v5.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -77,6 +77,7 @@
 	int mss_pdc_offset;
 	bool ahb_clk_vote;
 	bool mx_spike_wa;
+	bool reset_clk;
 };
 
 int pil_q6v5_make_proxy_votes(struct pil_desc *pil);
diff --git a/drivers/soc/qcom/qmp-debugfs-client.c b/drivers/soc/qcom/qmp-debugfs-client.c
index 578e7f0..d7a473e 100644
--- a/drivers/soc/qcom/qmp-debugfs-client.c
+++ b/drivers/soc/qcom/qmp-debugfs-client.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,37 +20,55 @@
 #include <linux/platform_device.h>
 #include <linux/mailbox/qmp.h>
 #include <linux/uaccess.h>
+#include <linux/mailbox_controller.h>
 
 #define MAX_MSG_SIZE 96 /* Imposed by the remote*/
 
+struct qmp_debugfs_data {
+	struct qmp_pkt pkt;
+	char buf[MAX_MSG_SIZE + 1];
+};
+
+static struct qmp_debugfs_data data_pkt[MBOX_TX_QUEUE_LEN];
 static struct mbox_chan *chan;
 static struct mbox_client *cl;
 
+static DEFINE_MUTEX(qmp_debugfs_mutex);
+
 static ssize_t aop_msg_write(struct file *file, const char __user *userstr,
 		size_t len, loff_t *pos)
 {
-	char buf[MAX_MSG_SIZE + 1] = {0};
-	struct qmp_pkt pkt;
+	static int count;
 	int rc;
 
 	if (!len || (len > MAX_MSG_SIZE))
 		return len;
 
-	rc  = copy_from_user(buf, userstr, len);
+	mutex_lock(&qmp_debugfs_mutex);
+
+	if (count >= MBOX_TX_QUEUE_LEN)
+		count = 0;
+
+	memset(&(data_pkt[count]), 0, sizeof(data_pkt[count]));
+	rc  = copy_from_user(data_pkt[count].buf, userstr, len);
 	if (rc) {
 		pr_err("%s copy from user failed, rc=%d\n", __func__, rc);
+		mutex_unlock(&qmp_debugfs_mutex);
 		return len;
 	}
 
 	/*
 	 * Controller expects a 4 byte aligned buffer
 	 */
-	pkt.size = (len + 0x3) & ~0x3;
-	pkt.data = buf;
+	data_pkt[count].pkt.size = (len + 0x3) & ~0x3;
+	data_pkt[count].pkt.data = data_pkt[count].buf;
 
-	if (mbox_send_message(chan, &pkt) < 0)
+	if (mbox_send_message(chan, &(data_pkt[count].pkt)) < 0)
 		pr_err("Failed to send qmp request\n");
+	else
+		count++;
 
+	mutex_unlock(&qmp_debugfs_mutex);
 	return len;
 }
 
@@ -68,7 +86,7 @@
 
 	cl->dev = &pdev->dev;
 	cl->tx_block = true;
-	cl->tx_tout = 100;
+	cl->tx_tout = 1000;
 	cl->knows_txdone = false;
 
 	chan = mbox_request_channel(cl, 0);
diff --git a/drivers/soc/qcom/rpm-smd-debug.c b/drivers/soc/qcom/rpm-smd-debug.c
index 6ae9f08..e52fc72 100644
--- a/drivers/soc/qcom/rpm-smd-debug.c
+++ b/drivers/soc/qcom/rpm-smd-debug.c
@@ -90,23 +90,23 @@
 		cmp += pos;
 		if (sscanf(cmp, "%5s %n", key_str, &pos) != 1) {
 			pr_err("Invalid number of arguments passed\n");
-			goto err;
+			goto err_request;
 		}
 
 		if (strlen(key_str) > 4) {
 			pr_err("Key value cannot be more than 4 charecters");
-			goto err;
+			goto err_request;
 		}
 		key = string_to_uint(key_str);
 		if (!key) {
 			pr_err("Key values entered incorrectly\n");
-			goto err;
+			goto err_request;
 		}
 
 		cmp += pos;
 		if (sscanf(cmp, "%u %n", &data, &pos) != 1) {
 			pr_err("Invalid number of arguments passed\n");
-			goto err;
+			goto err_request;
 		}
 
 		if (msm_rpm_add_kvp_data(req, key,
diff --git a/drivers/soc/qcom/rpmh_master_stat.c b/drivers/soc/qcom/rpmh_master_stat.c
new file mode 100644
index 0000000..2c379a0
--- /dev/null
+++ b/drivers/soc/qcom/rpmh_master_stat.c
@@ -0,0 +1,220 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/uaccess.h>
+#include <soc/qcom/smem.h>
+
+enum master_smem_id {
+	MPSS = 605,
+	ADSP,
+	CDSP,
+	SLPI,
+	GPU,
+	DISPLAY,
+};
+
+enum master_pid {
+	PID_APSS = 0,
+	PID_MPSS = 1,
+	PID_ADSP = 2,
+	PID_SLPI = 3,
+	PID_CDSP = 5,
+	PID_GPU = PID_APSS,
+	PID_DISPLAY = PID_APSS,
+};
+
+struct msm_rpmh_master_data {
+	char *master_name;
+	enum master_smem_id smem_id;
+	enum master_pid pid;
+};
+
+static const struct msm_rpmh_master_data rpmh_masters[] = {
+	{"MPSS", MPSS, PID_MPSS},
+	{"ADSP", ADSP, PID_ADSP},
+	{"CDSP", CDSP, PID_CDSP},
+	{"SLPI", SLPI, PID_SLPI},
+	{"GPU", GPU, PID_GPU},
+	{"DISPLAY", DISPLAY, PID_DISPLAY},
+};
+
+struct msm_rpmh_master_stats {
+	uint32_t version_id;
+	uint32_t counts;
+	uint64_t last_entered_at;
+	uint64_t last_exited_at;
+	uint64_t accumulated_duration;
+};
+
+struct rpmh_master_stats_prv_data {
+	struct kobj_attribute ka;
+	struct kobject *kobj;
+};
+
+static DEFINE_MUTEX(rpmh_stats_mutex);
+
+static ssize_t msm_rpmh_master_stats_print_data(char *prvbuf, ssize_t length,
+				struct msm_rpmh_master_stats *record,
+				const char *name)
+{
+	return snprintf(prvbuf, length, "%s\n\tVersion:0x%x\n"
+			"\tSleep Count:0x%x\n"
+			"\tSleep Last Entered At:0x%llx\n"
+			"\tSleep Last Exited At:0x%llx\n"
+			"\tSleep Accumulated Duration:0x%llx\n\n",
+			name, record->version_id, record->counts,
+			record->last_entered_at, record->last_exited_at,
+			record->accumulated_duration);
+}
+
+static ssize_t msm_rpmh_master_stats_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *buf)
+{
+	ssize_t length;
+	int i = 0;
+	unsigned int size = 0;
+	struct msm_rpmh_master_stats *record = NULL;
+
+	/*
+	 * Read SMEM data written by masters
+	 */
+
+	mutex_lock(&rpmh_stats_mutex);
+
+	for (i = 0, length = 0; i < ARRAY_SIZE(rpmh_masters); i++) {
+		record = (struct msm_rpmh_master_stats *) smem_get_entry(
+					rpmh_masters[i].smem_id, &size,
+					rpmh_masters[i].pid, 0);
+		if (!IS_ERR_OR_NULL(record) && (PAGE_SIZE - length > 0))
+			length += msm_rpmh_master_stats_print_data(
+					buf + length, PAGE_SIZE - length,
+					record,
+					rpmh_masters[i].master_name);
+	}
+
+	mutex_unlock(&rpmh_stats_mutex);
+
+	return length;
+}
+
+static int msm_rpmh_master_stats_probe(struct platform_device *pdev)
+{
+	struct rpmh_master_stats_prv_data *prvdata = NULL;
+	struct kobject *rpmh_master_stats_kobj = NULL;
+	int ret = 0;
+
+	if (!pdev)
+		return -EINVAL;
+
+	prvdata = kzalloc(sizeof(struct rpmh_master_stats_prv_data),
+							GFP_KERNEL);
+	if (!prvdata) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	rpmh_master_stats_kobj = kobject_create_and_add(
+					"rpmh_stats",
+					power_kobj);
+	if (!rpmh_master_stats_kobj) {
+		ret = -ENOMEM;
+		kfree(prvdata);
+		goto fail;
+	}
+
+	prvdata->kobj = rpmh_master_stats_kobj;
+
+	sysfs_attr_init(&prvdata->ka.attr);
+	prvdata->ka.attr.mode = 0444;
+	prvdata->ka.attr.name = "master_stats";
+	prvdata->ka.show = msm_rpmh_master_stats_show;
+	prvdata->ka.store = NULL;
+
+	ret = sysfs_create_file(prvdata->kobj, &prvdata->ka.attr);
+	if (ret) {
+		pr_err("sysfs_create_file failed\n");
+		kobject_put(prvdata->kobj);
+		kfree(prvdata);
+		goto fail;
+	}
+
+	platform_set_drvdata(pdev, prvdata);
+
+fail:
+	return ret;
+}
+
+static int msm_rpmh_master_stats_remove(struct platform_device *pdev)
+{
+	struct rpmh_master_stats_prv_data *prvdata;
+
+	if (!pdev)
+		return -EINVAL;
+
+	prvdata = (struct rpmh_master_stats_prv_data *)
+				platform_get_drvdata(pdev);
+
+	sysfs_remove_file(prvdata->kobj, &prvdata->ka.attr);
+	kobject_put(prvdata->kobj);
+	kfree(prvdata);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static const struct of_device_id rpmh_master_table[] = {
+	{.compatible = "qcom,rpmh-master-stats"},
+	{},
+};
+
+static struct platform_driver msm_rpmh_master_stats_driver = {
+	.probe	= msm_rpmh_master_stats_probe,
+	.remove = msm_rpmh_master_stats_remove,
+	.driver = {
+		.name = "msm_rpmh_master_stats",
+		.owner = THIS_MODULE,
+		.of_match_table = rpmh_master_table,
+	},
+};
+
+static int __init msm_rpmh_master_stats_init(void)
+{
+	return platform_driver_register(&msm_rpmh_master_stats_driver);
+}
+
+static void __exit msm_rpmh_master_stats_exit(void)
+{
+	platform_driver_unregister(&msm_rpmh_master_stats_driver);
+}
+
+module_init(msm_rpmh_master_stats_init);
+module_exit(msm_rpmh_master_stats_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPMH Master Statistics driver");
+MODULE_ALIAS("platform:msm_rpmh_master_stat_log");
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index 492b68c..fec6f17 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -185,9 +185,8 @@
 	case SCM_ENOMEM:
 		return -ENOMEM;
 	case SCM_EBUSY:
-		return SCM_EBUSY;
 	case SCM_V2_EBUSY:
-		return SCM_V2_EBUSY;
+		return -EBUSY;
 	}
 	return -EINVAL;
 }
@@ -338,13 +337,13 @@
 	do {
 		ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len,
 					resp_buf, resp_len, cmd, len);
-		if (ret == SCM_EBUSY)
+		if (ret == -EBUSY)
 			msleep(SCM_EBUSY_WAIT_MS);
 		if (retry_count == 33)
 			pr_warn("scm: secure world has been busy for 1 second!\n");
-	} while (ret == SCM_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
+	} while (ret == -EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
 
-	if (ret == SCM_EBUSY)
+	if (ret == -EBUSY)
 		pr_err("scm: secure world busy (rc = SCM_EBUSY)\n");
 
 	return ret;
@@ -666,7 +665,7 @@
 	if (unlikely(!is_scm_armv8()))
 		return -ENODEV;
 
-	ret = allocate_extra_arg_buffer(desc, GFP_KERNEL);
+	ret = allocate_extra_arg_buffer(desc, GFP_NOIO);
 	if (ret)
 		return ret;
 
@@ -799,7 +798,7 @@
 
 	ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
 				resp_len, cmd, len);
-	if (unlikely(ret == SCM_EBUSY))
+	if (unlikely(ret == -EBUSY))
 		ret = _scm_call_retry(svc_id, cmd_id, cmd_buf, cmd_len,
 				      resp_buf, resp_len, cmd, PAGE_ALIGN(len));
 	kfree(cmd);
diff --git a/drivers/soc/qcom/smp2p_sleepstate.c b/drivers/soc/qcom/smp2p_sleepstate.c
index 310a186..d2b8733 100644
--- a/drivers/soc/qcom/smp2p_sleepstate.c
+++ b/drivers/soc/qcom/smp2p_sleepstate.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -37,12 +37,12 @@
 	switch (event) {
 	case PM_SUSPEND_PREPARE:
 		gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 0);
+		usleep_range(10000, 10500); /* Tuned based on SMP2P latencies */
 		msm_ipc_router_set_ws_allowed(true);
 		break;
 
 	case PM_POST_SUSPEND:
 		gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 1);
-		usleep_range(10000, 10500); /* Tuned based on SMP2P latencies */
 		msm_ipc_router_set_ws_allowed(false);
 		break;
 	}
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 9af39e1..556882c 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -587,6 +587,10 @@
 	/* SDM450 ID */
 	[338] = {MSM_CPU_SDM450, "SDM450"},
 
+	/* SDM632 ID */
+	[349] = {MSM_CPU_SDM632, "SDM632"},
+	[350] = {MSM_CPU_SDA632, "SDA632"},
+
 	/* Uninitialized IDs are not known to run Linux.
 	 * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
 	 * considered as unknown CPU.
@@ -648,6 +652,55 @@
 	return "UNKNOWN SOC TYPE";
 }
 
+const char * __init arch_read_machine_name(void)
+{
+	static char msm_machine_name[256] = "Qualcomm Technologies, Inc. ";
+	static bool string_generated;
+	u32 len = 0;
+	const char *name;
+
+	if (string_generated)
+		return msm_machine_name;
+
+	len = strlen(msm_machine_name);
+	name = of_get_flat_dt_prop(of_get_flat_dt_root(),
+				"qcom,msm-name", NULL);
+	if (name)
+		len += snprintf(msm_machine_name + len,
+					sizeof(msm_machine_name) - len,
+					"%s", name);
+	else
+		goto no_prop_path;
+
+	name = of_get_flat_dt_prop(of_get_flat_dt_root(),
+				"qcom,pmic-name", NULL);
+	if (name) {
+		len += snprintf(msm_machine_name + len,
+					sizeof(msm_machine_name) - len,
+					"%s", " ");
+		len += snprintf(msm_machine_name + len,
+					sizeof(msm_machine_name) - len,
+					"%s", name);
+	} else
+		goto no_prop_path;
+
+	name = of_flat_dt_get_machine_name();
+	if (name) {
+		len += snprintf(msm_machine_name + len,
+					sizeof(msm_machine_name) - len,
+					"%s", " ");
+		len += snprintf(msm_machine_name + len,
+					sizeof(msm_machine_name) - len,
+					"%s", name);
+	} else
+		goto no_prop_path;
+
+	string_generated = true;
+	return msm_machine_name;
+no_prop_path:
+	return of_flat_dt_get_machine_name();
+}
+
 uint32_t socinfo_get_raw_id(void)
 {
 	return socinfo ?
@@ -1469,6 +1522,10 @@
 		dummy_socinfo.id = 338;
 		strlcpy(dummy_socinfo.build_id, "sdm450 - ",
 			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sdm632()) {
+		dummy_socinfo.id = 349;
+		strlcpy(dummy_socinfo.build_id, "sdm632 - ",
+			sizeof(dummy_socinfo.build_id));
 	}
 
 	strlcat(dummy_socinfo.build_id, "Dummy socinfo",
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 9ff0c73..0b037d4 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -1044,7 +1044,7 @@
 	mutex_lock(&ch->lock); /* re-lock after waiting */
 	/* Check Rx Abort on SP reset */
 	if (ch->rx_abort) {
-		pr_err("rx aborted.\n");
+		pr_err("rx aborted, ch [%s].\n", ch->name);
 		goto exit_error;
 	}
 
@@ -1091,13 +1091,14 @@
 		if (!ch->is_server)
 			continue;
 
-		/* The server might not be connected to a client.
-		 * Don't check if connected, only if open.
+		/* The ch REMOTE_DISCONNECT notification happens before
+		 * the LINK_DOWN notification,
+		 * so the channel is already closed.
 		 */
-		if (!spcom_is_channel_open(ch) || (ch->rx_abort))
+		if (ch->rx_abort)
 			continue;
 
-		pr_debug("rx-abort server ch [%s].\n", ch->name);
+		pr_err("rx-abort server ch [%s].\n", ch->name);
 		ch->rx_abort = true;
 		complete_all(&ch->rx_done);
 	}
@@ -2820,7 +2821,7 @@
 {
 	int ret;
 
-	pr_info("spcom driver version 1.2 23-Aug-2017.\n");
+	pr_info("spcom driver version 1.3 28-Dec-2017.\n");
 
 	ret = platform_driver_register(&spcom_driver);
 	if (ret)
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 4b686e6..e221e6b 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -42,7 +42,6 @@
 
 #define ERR_READY	0
 #define PBL_DONE	1
-#define QDSP6SS_NMI_STATUS	0x44
 
 #define desc_to_data(d) container_of(d, struct pil_tz_data, desc)
 #define subsys_to_data(d) container_of(d, struct pil_tz_data, subsys_desc)
@@ -117,7 +116,6 @@
 	void __iomem *irq_mask;
 	void __iomem *err_status;
 	void __iomem *err_status_spare;
-	void __iomem *reg_base;
 	u32 bits_arr[2];
 };
 
@@ -907,7 +905,7 @@
 	if (!enable)
 		return 0;
 
-	return pil_do_ramdump(&d->desc, d->ramdump_dev);
+	return pil_do_ramdump(&d->desc, d->ramdump_dev, NULL);
 }
 
 static void subsys_free_memory(const struct subsys_desc *subsys)
@@ -931,19 +929,8 @@
 static irqreturn_t subsys_err_fatal_intr_handler (int irq, void *dev_id)
 {
 	struct pil_tz_data *d = subsys_to_data(dev_id);
-	u32 nmi_status = 0;
 
-	if (d->reg_base)
-		nmi_status = readl_relaxed(d->reg_base +
-						QDSP6SS_NMI_STATUS);
-
-	if (nmi_status & 0x04)
-		pr_err("%s: Fatal error on the %s due to TZ NMI\n",
-			__func__, d->subsys_desc.name);
-	else
-		pr_err("%s Fatal error on the %s\n",
-			__func__, d->subsys_desc.name);
-
+	pr_err("Fatal error on %s!\n", d->subsys_desc.name);
 	if (subsys_get_crash_status(d->subsys)) {
 		pr_err("%s: Ignoring error fatal, restart in progress\n",
 							d->subsys_desc.name);
@@ -1080,13 +1067,6 @@
 	d->keep_proxy_regs_on = of_property_read_bool(pdev->dev.of_node,
 						"qcom,keep-proxy-regs-on");
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base_reg");
-	d->reg_base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(d->reg_base)) {
-		dev_err(&pdev->dev, "Failed to ioremap base register\n");
-		d->reg_base = NULL;
-	}
-
 	rc = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
 				      &d->desc.name);
 	if (rc)
diff --git a/drivers/soc/qcom/system_pm.c b/drivers/soc/qcom/system_pm.c
index 3d978f7..480a33c 100644
--- a/drivers/soc/qcom/system_pm.c
+++ b/drivers/soc/qcom/system_pm.c
@@ -18,23 +18,35 @@
 #include <soc/qcom/rpmh.h>
 #include <soc/qcom/system_pm.h>
 
-#define ARCH_TIMER_HZ		(19200000UL)
+#include <clocksource/arm_arch_timer.h>
+
 #define PDC_TIME_VALID_SHIFT	31
 #define PDC_TIME_UPPER_MASK	0xFFFFFF
 
 static struct rpmh_client *rpmh_client;
 
-static int setup_wakeup(uint64_t sleep_val)
+static int setup_wakeup(uint32_t lo, uint32_t hi)
 {
 	struct tcs_cmd cmd[2] = { { 0 } };
 
-	cmd[0].data = (sleep_val >> 32) & PDC_TIME_UPPER_MASK;
+	cmd[0].data =  hi & PDC_TIME_UPPER_MASK;
 	cmd[0].data |= 1 << PDC_TIME_VALID_SHIFT;
-	cmd[1].data = sleep_val & 0xFFFFFFFF;
+	cmd[1].data = lo;
 
 	return rpmh_write_control(rpmh_client, cmd, ARRAY_SIZE(cmd));
 }
 
+int system_sleep_update_wakeup(void)
+{
+	uint32_t lo = ~0U, hi = ~0U;
+
+	/* Read the hardware to get the most accurate value */
+	arch_timer_mem_get_cval(&lo, &hi);
+
+	return setup_wakeup(lo, hi);
+}
+EXPORT_SYMBOL(system_sleep_update_wakeup);
+
 /**
  * system_sleep_allowed() - Returns if its okay to enter system low power modes
  */
@@ -47,35 +59,15 @@
 /**
  * system_sleep_enter() - Activties done when entering system low power modes
  *
- * @sleep_val: The sleep duration in us.
- *
- * Returns 0 for success or error values from writing the timer value in the
- * hardware block.
+ * Returns 0 for success or error values from writing the sleep/wake values to
+ * the hardware block.
  */
-int system_sleep_enter(uint64_t sleep_val)
+int system_sleep_enter(void)
 {
-	int ret;
-
 	if (IS_ERR_OR_NULL(rpmh_client))
 		return -EFAULT;
 
-	ret = rpmh_flush(rpmh_client);
-	if (ret)
-		return ret;
-
-	/*
-	 * Set up the wake up value offset from the current time.
-	 * Convert us to ns to allow div by 19.2 Mhz tick timer.
-	 */
-	if (sleep_val) {
-		sleep_val *= NSEC_PER_USEC;
-		do_div(sleep_val, NSEC_PER_SEC/ARCH_TIMER_HZ);
-		sleep_val += arch_counter_get_cntvct();
-	} else {
-		sleep_val = ~0ULL;
-	}
-
-	return setup_wakeup(sleep_val);
+	return rpmh_flush(rpmh_client);
 }
 EXPORT_SYMBOL(system_sleep_enter);
 
diff --git a/drivers/soc/qcom/wcnss/Kconfig b/drivers/soc/qcom/wcnss/Kconfig
new file mode 100644
index 0000000..5d8d010
--- /dev/null
+++ b/drivers/soc/qcom/wcnss/Kconfig
@@ -0,0 +1,39 @@
+config WCNSS_CORE
+	tristate "Qualcomm Technologies Inc. WCNSS CORE driver"
+	select WIRELESS_EXT
+	select WEXT_PRIV
+	select WEXT_CORE
+	select WEXT_SPY
+	help
+	  This module adds support for WLAN connectivity subsystem
+	  This module is responsible for communicating WLAN on/off
+	  Core driver for the Qualcomm Technologies Inc. WCNSS triple play
+	  connectivity subsystem, Enable WCNSS core platform driver
+	  for WLAN.
+
+config WCNSS_CORE_PRONTO
+	tristate "Qualcomm Technologies Inc. WCNSS Pronto Support"
+	depends on WCNSS_CORE
+	help
+	  Pronto Support for the Qualcomm Technologies Inc. WCNSS triple
+	  play connectivity subsystem, Enable WCNSS core platform driver
+	  for WLAN. This module adds support for WLAN connectivity subsystem
+	  This module is responsible for communicating WLAN on/off
+
+config WCNSS_REGISTER_DUMP_ON_BITE
+	bool "Enable/disable WCNSS register dump when there is a WCNSS bite"
+	depends on WCNSS_CORE_PRONTO
+	help
+	 When Apps receives a WDOG bite from WCNSS, collecting a register dump
+	 of WCNSS is helpful to root cause the failure. WCNSS may not be
+	 properly clocked in some WCNSS bite cases, and that may cause unclocked
+	 register access failures. So this feature is to enable/disable the
+	 register dump on WCNSS WDOG bite.
+
+config CNSS_CRYPTO
+	tristate "Enable CNSS crypto support"
+	help
+	  Add crypto support for the WLAN  driver module.
+	  This feature enable wlan driver to use the crypto APIs exported
+	  from cnss platform driver. This crypto APIs used to generate cipher
+	  key and add support for the WLAN driver module security protocol.
diff --git a/drivers/soc/qcom/wcnss/Makefile b/drivers/soc/qcom/wcnss/Makefile
new file mode 100644
index 0000000..072fef8
--- /dev/null
+++ b/drivers/soc/qcom/wcnss/Makefile
@@ -0,0 +1,6 @@
+
+# Makefile for WCNSS triple-play driver
+
+wcnsscore-objs += wcnss_wlan.o wcnss_vreg.o
+
+obj-$(CONFIG_WCNSS_CORE) += wcnsscore.o
diff --git a/drivers/soc/qcom/wcnss/wcnss_vreg.c b/drivers/soc/qcom/wcnss/wcnss_vreg.c
new file mode 100644
index 0000000..5ce2e82
--- /dev/null
+++ b/drivers/soc/qcom/wcnss/wcnss_vreg.c
@@ -0,0 +1,842 @@
+/* Copyright (c) 2011-2015, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/wcnss_wlan.h>
+#include <linux/semaphore.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+static void __iomem *msm_wcnss_base;
+static LIST_HEAD(power_on_lock_list);
+static DEFINE_MUTEX(list_lock);
+static DEFINE_SEMAPHORE(wcnss_power_on_lock);
+static int auto_detect;
+static int is_power_on;
+
+#define RIVA_PMU_OFFSET         0x28
+
+#define RIVA_SPARE_OFFSET       0x0b4
+#define PRONTO_SPARE_OFFSET     0x1088
+#define NVBIN_DLND_BIT          BIT(25)
+
+#define PRONTO_IRIS_REG_READ_OFFSET       0x1134
+#define PRONTO_IRIS_REG_CHIP_ID           0x04
+/* IRIS card chip ID's */
+#define WCN3660       0x0200
+#define WCN3660A      0x0300
+#define WCN3660B      0x0400
+#define WCN3620       0x5111
+#define WCN3620A      0x5112
+#define WCN3610       0x9101
+#define WCN3610V1     0x9110
+
+#define WCNSS_PMU_CFG_IRIS_XO_CFG          BIT(3)
+#define WCNSS_PMU_CFG_IRIS_XO_EN           BIT(4)
+#define WCNSS_PMU_CFG_IRIS_XO_CFG_STS      BIT(6) /* 1: in progress, 0: done */
+
+#define WCNSS_PMU_CFG_IRIS_RESET           BIT(7)
+#define WCNSS_PMU_CFG_IRIS_RESET_STS       BIT(8) /* 1: in progress, 0: done */
+#define WCNSS_PMU_CFG_IRIS_XO_READ         BIT(9)
+#define WCNSS_PMU_CFG_IRIS_XO_READ_STS     BIT(10)
+
+#define WCNSS_PMU_CFG_IRIS_XO_MODE         0x6
+#define WCNSS_PMU_CFG_IRIS_XO_MODE_48      (3 << 1)
+
+#define VREG_NULL_CONFIG            0x0000
+#define VREG_GET_REGULATOR_MASK     0x0001
+#define VREG_SET_VOLTAGE_MASK       0x0002
+#define VREG_OPTIMUM_MODE_MASK      0x0004
+#define VREG_ENABLE_MASK            0x0008
+#define VDD_PA                      "qcom,iris-vddpa"
+
+#define WCNSS_INVALID_IRIS_REG      0xbaadbaad
+
+struct vregs_info {
+	const char * const name;
+	const char * const curr;
+	const char * const volt;
+	int state;
+	bool required;
+	struct regulator *regulator;
+};
+
+/* IRIS regulators for Pronto hardware */
+static struct vregs_info iris_vregs[] = {
+	{"qcom,iris-vddxo", "qcom,iris-vddxo-current",
+	"qcom,iris-vddxo-voltage-level", VREG_NULL_CONFIG, true, NULL},
+	{"qcom,iris-vddrfa", "qcom,iris-vddrfa-current",
+	"qcom,iris-vddrfa-voltage-level", VREG_NULL_CONFIG, true, NULL},
+	{"qcom,iris-vddpa", "qcom,iris-vddpa-current",
+	"qcom,iris-vddpa-voltage-level", VREG_NULL_CONFIG, false, NULL},
+	{"qcom,iris-vdddig", "qcom,iris-vdddig-current",
+	"qcom,iris-vdddig-voltage-level", VREG_NULL_CONFIG, true, NULL},
+};
+
+/* WCNSS regulators for Pronto hardware */
+static struct vregs_info pronto_vregs[] = {
+	{"qcom,pronto-vddmx", "qcom,pronto-vddmx-current",
+	"qcom,vddmx-voltage-level", VREG_NULL_CONFIG, true, NULL},
+	{"qcom,pronto-vddcx", "qcom,pronto-vddcx-current",
+	"qcom,vddcx-voltage-level", VREG_NULL_CONFIG, true, NULL},
+	{"qcom,pronto-vddpx", "qcom,pronto-vddpx-current",
+	"qcom,vddpx-voltage-level", VREG_NULL_CONFIG, true, NULL},
+};
+
+struct host_driver {
+	char name[20];
+	struct list_head list;
+};
+
+enum {
+	IRIS_3660, /* also 3660A and 3680 */
+	IRIS_3620,
+	IRIS_3610
+};
+
+int xo_auto_detect(u32 reg)
+{
+	reg >>= 30;
+
+	switch (reg) {
+	case IRIS_3660:
+		return WCNSS_XO_48MHZ;
+
+	case IRIS_3620:
+		return WCNSS_XO_19MHZ;
+
+	case IRIS_3610:
+		return WCNSS_XO_19MHZ;
+
+	default:
+		return WCNSS_XO_INVALID;
+	}
+}
+
+int wcnss_get_iris_name(char *iris_name)
+{
+	struct wcnss_wlan_config *cfg = NULL;
+	int iris_id;
+
+	cfg = wcnss_get_wlan_config();
+
+	if (cfg) {
+		iris_id = cfg->iris_id;
+		iris_id = iris_id >> 16;
+	} else {
+		return 1;
+	}
+
+	switch (iris_id) {
+	case WCN3660:
+		memcpy(iris_name, "WCN3660", sizeof("WCN3660"));
+		break;
+	case WCN3660A:
+		memcpy(iris_name, "WCN3660A", sizeof("WCN3660A"));
+		break;
+	case WCN3660B:
+		memcpy(iris_name, "WCN3660B", sizeof("WCN3660B"));
+		break;
+	case WCN3620:
+		memcpy(iris_name, "WCN3620", sizeof("WCN3620"));
+		break;
+	case WCN3620A:
+		memcpy(iris_name, "WCN3620A", sizeof("WCN3620A"));
+		break;
+	case WCN3610:
+		memcpy(iris_name, "WCN3610", sizeof("WCN3610"));
+		break;
+	case WCN3610V1:
+		memcpy(iris_name, "WCN3610V1", sizeof("WCN3610V1"));
+		break;
+	default:
+		return 1;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(wcnss_get_iris_name);
+
+int validate_iris_chip_id(u32 reg)
+{
+	int iris_id;
+
+	iris_id = reg >> 16;
+
+	switch (iris_id) {
+	case WCN3660:
+	case WCN3660A:
+	case WCN3660B:
+	case WCN3620:
+	case WCN3620A:
+	case WCN3610:
+	case WCN3610V1:
+		return 0;
+	default:
+		return 1;
+	}
+}
+
+static void wcnss_free_regulator(void)
+{
+	int vreg_i;
+
+	/* Free pronto voltage regulators from device node */
+	for (vreg_i = 0; vreg_i < PRONTO_REGULATORS; vreg_i++) {
+		if (pronto_vregs[vreg_i].state) {
+			regulator_put(pronto_vregs[vreg_i].regulator);
+			pronto_vregs[vreg_i].state = VREG_NULL_CONFIG;
+		}
+	}
+
+	/* Free IRIS voltage regulators from device node */
+	for (vreg_i = 0; vreg_i < IRIS_REGULATORS; vreg_i++) {
+		if (iris_vregs[vreg_i].state) {
+			regulator_put(iris_vregs[vreg_i].regulator);
+			iris_vregs[vreg_i].state = VREG_NULL_CONFIG;
+		}
+	}
+}
+
+static int
+wcnss_dt_parse_vreg_level(struct device *dev, int index,
+			  const char *current_vreg_name, const char *vreg_name,
+			  struct vregs_level *vlevel)
+{
+	int ret = 0;
+	/* array used to store nominal, low and high voltage values */
+	u32 voltage_levels[3], current_vreg;
+
+	ret = of_property_read_u32_array(dev->of_node, vreg_name,
+					 voltage_levels,
+					 ARRAY_SIZE(voltage_levels));
+	if (ret) {
+		dev_err(dev, "error reading %s property\n", vreg_name);
+		return ret;
+	}
+
+	vlevel[index].nominal_min = voltage_levels[0];
+	vlevel[index].low_power_min = voltage_levels[1];
+	vlevel[index].max_voltage = voltage_levels[2];
+
+	ret = of_property_read_u32(dev->of_node, current_vreg_name,
+				   &current_vreg);
+	if (ret) {
+		dev_err(dev, "error reading %s property\n", current_vreg_name);
+		return ret;
+	}
+
+	vlevel[index].uA_load = current_vreg;
+
+	return ret;
+}
+
+int
+wcnss_parse_voltage_regulator(struct wcnss_wlan_config *wlan_config,
+			      struct device *dev)
+{
+	int rc, vreg_i;
+
+	/* Parse pronto voltage regulators from device node */
+	for (vreg_i = 0; vreg_i < PRONTO_REGULATORS; vreg_i++) {
+		pronto_vregs[vreg_i].regulator =
+			regulator_get(dev, pronto_vregs[vreg_i].name);
+		if (IS_ERR(pronto_vregs[vreg_i].regulator)) {
+			if (pronto_vregs[vreg_i].required) {
+				rc = PTR_ERR(pronto_vregs[vreg_i].regulator);
+				dev_err(dev, "regulator get of %s failed (%d)\n",
+					pronto_vregs[vreg_i].name, rc);
+				goto wcnss_vreg_get_err;
+			} else {
+				dev_dbg(dev, "Skip optional regulator configuration: %s\n",
+					pronto_vregs[vreg_i].name);
+				continue;
+			}
+		}
+
+		pronto_vregs[vreg_i].state |= VREG_GET_REGULATOR_MASK;
+		rc = wcnss_dt_parse_vreg_level(dev, vreg_i,
+					       pronto_vregs[vreg_i].curr,
+					       pronto_vregs[vreg_i].volt,
+					       wlan_config->pronto_vlevel);
+		if (rc) {
+			dev_err(dev, "error reading voltage-level property\n");
+			goto wcnss_vreg_get_err;
+		}
+	}
+
+	/* Parse iris voltage regulators from device node */
+	for (vreg_i = 0; vreg_i < IRIS_REGULATORS; vreg_i++) {
+		iris_vregs[vreg_i].regulator =
+			regulator_get(dev, iris_vregs[vreg_i].name);
+		if (IS_ERR(iris_vregs[vreg_i].regulator)) {
+			if (iris_vregs[vreg_i].required) {
+				rc = PTR_ERR(iris_vregs[vreg_i].regulator);
+				dev_err(dev, "regulator get of %s failed (%d)\n",
+					iris_vregs[vreg_i].name, rc);
+				goto wcnss_vreg_get_err;
+			} else {
+				dev_dbg(dev, "Skip optional regulator configuration: %s\n",
+					iris_vregs[vreg_i].name);
+				continue;
+			}
+		}
+
+		iris_vregs[vreg_i].state |= VREG_GET_REGULATOR_MASK;
+		rc = wcnss_dt_parse_vreg_level(dev, vreg_i,
+					       iris_vregs[vreg_i].curr,
+					       iris_vregs[vreg_i].volt,
+					       wlan_config->iris_vlevel);
+		if (rc) {
+			dev_err(dev, "error reading voltage-level property\n");
+			goto wcnss_vreg_get_err;
+		}
+	}
+
+	return 0;
+
+wcnss_vreg_get_err:
+	wcnss_free_regulator();
+	return rc;
+}
+
+void  wcnss_iris_reset(u32 reg, void __iomem *pmu_conf_reg)
+{
+	/* Reset IRIS */
+	reg |= WCNSS_PMU_CFG_IRIS_RESET;
+	writel_relaxed(reg, pmu_conf_reg);
+
+	/* Wait for PMU_CFG.iris_reg_reset_sts */
+	while (readl_relaxed(pmu_conf_reg) &
+			WCNSS_PMU_CFG_IRIS_RESET_STS)
+		cpu_relax();
+
+	/* Reset iris reset bit */
+	reg &= ~WCNSS_PMU_CFG_IRIS_RESET;
+	writel_relaxed(reg, pmu_conf_reg);
+}
+
+static int
+configure_iris_xo(struct device *dev,
+		  struct wcnss_wlan_config *cfg,
+		  int on, int *iris_xo_set)
+{
+	u32 reg = 0, i = 0;
+	u32 iris_reg = WCNSS_INVALID_IRIS_REG;
+	int rc = 0;
+	int pmu_offset = 0;
+	int spare_offset = 0;
+	void __iomem *pmu_conf_reg;
+	void __iomem *spare_reg;
+	void __iomem *iris_read_reg;
+	struct clk *clk;
+	struct clk *clk_rf = NULL;
+	bool use_48mhz_xo;
+
+	use_48mhz_xo = cfg->use_48mhz_xo;
+
+	if (wcnss_hardware_type() == WCNSS_PRONTO_HW) {
+		pmu_offset = PRONTO_PMU_OFFSET;
+		spare_offset = PRONTO_SPARE_OFFSET;
+
+		clk = clk_get(dev, "xo");
+		if (IS_ERR(clk)) {
+			pr_err("Couldn't get xo clock\n");
+			return PTR_ERR(clk);
+		}
+
+	} else {
+		pmu_offset = RIVA_PMU_OFFSET;
+		spare_offset = RIVA_SPARE_OFFSET;
+
+		clk = clk_get(dev, "cxo");
+		if (IS_ERR(clk)) {
+			pr_err("Couldn't get cxo clock\n");
+			return PTR_ERR(clk);
+		}
+	}
+
+	if (on) {
+		msm_wcnss_base = cfg->msm_wcnss_base;
+		if (!msm_wcnss_base) {
+			pr_err("ioremap wcnss physical failed\n");
+			goto fail;
+		}
+
+		/* Enable IRIS XO */
+		rc = clk_prepare_enable(clk);
+		if (rc) {
+			pr_err("clk enable failed\n");
+			goto fail;
+		}
+
+		/* NV bit is set to indicate that platform driver is capable
+		 * of doing NV download.
+		 */
+		pr_debug("wcnss: Indicate NV bin download\n");
+		spare_reg = msm_wcnss_base + spare_offset;
+		reg = readl_relaxed(spare_reg);
+		reg |= NVBIN_DLND_BIT;
+		writel_relaxed(reg, spare_reg);
+
+		pmu_conf_reg = msm_wcnss_base + pmu_offset;
+		writel_relaxed(0, pmu_conf_reg);
+		reg = readl_relaxed(pmu_conf_reg);
+		reg |= WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP |
+				WCNSS_PMU_CFG_IRIS_XO_EN;
+		writel_relaxed(reg, pmu_conf_reg);
+
+		if (wcnss_xo_auto_detect_enabled()) {
+			iris_read_reg = msm_wcnss_base +
+				PRONTO_IRIS_REG_READ_OFFSET;
+			iris_reg = readl_relaxed(iris_read_reg);
+		}
+
+		wcnss_iris_reset(reg, pmu_conf_reg);
+
+		if (iris_reg != WCNSS_INVALID_IRIS_REG) {
+			iris_reg &= 0xffff;
+			iris_reg |= PRONTO_IRIS_REG_CHIP_ID;
+			writel_relaxed(iris_reg, iris_read_reg);
+			do {
+				/* Iris read */
+				reg = readl_relaxed(pmu_conf_reg);
+				reg |= WCNSS_PMU_CFG_IRIS_XO_READ;
+				writel_relaxed(reg, pmu_conf_reg);
+
+				/* Wait for PMU_CFG.iris_reg_read_sts */
+				while (readl_relaxed(pmu_conf_reg) &
+						WCNSS_PMU_CFG_IRIS_XO_READ_STS)
+					cpu_relax();
+
+				iris_reg = readl_relaxed(iris_read_reg);
+				pr_info("wcnss: IRIS Reg: %08x\n", iris_reg);
+
+				if (validate_iris_chip_id(iris_reg) && i >= 4) {
+					pr_info("wcnss: IRIS Card absent/invalid\n");
+					auto_detect = WCNSS_XO_INVALID;
+					/* Reset iris read bit */
+					reg &= ~WCNSS_PMU_CFG_IRIS_XO_READ;
+					/* Clear XO_MODE[b2:b1] bits.
+					 * Clear implies 19.2 MHz TCXO
+					 */
+					reg &= ~(WCNSS_PMU_CFG_IRIS_XO_MODE);
+					goto xo_configure;
+				} else if (!validate_iris_chip_id(iris_reg)) {
+					pr_debug("wcnss: IRIS Card is present\n");
+					break;
+				}
+				reg &= ~WCNSS_PMU_CFG_IRIS_XO_READ;
+				writel_relaxed(reg, pmu_conf_reg);
+				wcnss_iris_reset(reg, pmu_conf_reg);
+			} while (i++ < 5);
+			auto_detect = xo_auto_detect(iris_reg);
+
+			/* Reset iris read bit */
+			reg &= ~WCNSS_PMU_CFG_IRIS_XO_READ;
+
+		} else if (wcnss_xo_auto_detect_enabled()) {
+			/* Default to 48 MHZ */
+			auto_detect = WCNSS_XO_48MHZ;
+		} else {
+			auto_detect = WCNSS_XO_INVALID;
+		}
+
+		cfg->iris_id = iris_reg;
+
+		/* Clear XO_MODE[b2:b1] bits. Clear implies 19.2 MHz TCXO */
+		reg &= ~(WCNSS_PMU_CFG_IRIS_XO_MODE);
+
+		if ((use_48mhz_xo && auto_detect == WCNSS_XO_INVALID) ||
+		    auto_detect ==  WCNSS_XO_48MHZ) {
+			reg |= WCNSS_PMU_CFG_IRIS_XO_MODE_48;
+
+			if (iris_xo_set)
+				*iris_xo_set = WCNSS_XO_48MHZ;
+		}
+
+xo_configure:
+		writel_relaxed(reg, pmu_conf_reg);
+
+		wcnss_iris_reset(reg, pmu_conf_reg);
+
+		/* Start IRIS XO configuration */
+		reg |= WCNSS_PMU_CFG_IRIS_XO_CFG;
+		writel_relaxed(reg, pmu_conf_reg);
+
+		/* Wait for XO configuration to finish */
+		while (readl_relaxed(pmu_conf_reg) &
+						WCNSS_PMU_CFG_IRIS_XO_CFG_STS)
+			cpu_relax();
+
+		/* Stop IRIS XO configuration */
+		reg &= ~(WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP |
+				WCNSS_PMU_CFG_IRIS_XO_CFG);
+		writel_relaxed(reg, pmu_conf_reg);
+		clk_disable_unprepare(clk);
+
+		if ((!use_48mhz_xo && auto_detect == WCNSS_XO_INVALID) ||
+		    auto_detect ==  WCNSS_XO_19MHZ) {
+			clk_rf = clk_get(dev, "rf_clk");
+			if (IS_ERR(clk_rf)) {
+				pr_err("Couldn't get rf_clk\n");
+				goto fail;
+			}
+
+			rc = clk_prepare_enable(clk_rf);
+			if (rc) {
+				pr_err("clk_rf enable failed\n");
+				goto fail;
+			}
+			if (iris_xo_set)
+				*iris_xo_set = WCNSS_XO_19MHZ;
+		}
+
+	}  else if ((!use_48mhz_xo && auto_detect == WCNSS_XO_INVALID) ||
+		    auto_detect ==  WCNSS_XO_19MHZ) {
+		clk_rf = clk_get(dev, "rf_clk");
+		if (IS_ERR(clk_rf)) {
+			pr_err("Couldn't get rf_clk\n");
+			goto fail;
+		}
+		clk_disable_unprepare(clk_rf);
+	}
+
+	/* Add some delay for XO to settle */
+	msleep(20);
+
+fail:
+	clk_put(clk);
+
+	if (clk_rf)
+		clk_put(clk_rf);
+
+	return rc;
+}
+
+/* Helper routine to turn off all WCNSS & IRIS vregs */
+static void wcnss_vregs_off(struct vregs_info regulators[], uint size,
+			    struct vregs_level *voltage_level)
+{
+	int i, rc = 0;
+	struct wcnss_wlan_config *cfg;
+
+	cfg = wcnss_get_wlan_config();
+
+	if (!cfg) {
+		pr_err("Failed to get WLAN configuration\n");
+		return;
+	}
+
+	/* Regulators need to be turned off in the reverse order */
+	for (i = (size - 1); i >= 0; i--) {
+		if (regulators[i].state == VREG_NULL_CONFIG)
+			continue;
+
+		/* Remove PWM mode */
+		if (regulators[i].state & VREG_OPTIMUM_MODE_MASK) {
+			rc = regulator_set_load(regulators[i].regulator, 0);
+			if (rc < 0) {
+				pr_err("regulator set load(%s) failed (%d)\n",
+				       regulators[i].name, rc);
+			}
+		}
+
+		/* Set voltage to lowest level */
+		if (regulators[i].state & VREG_SET_VOLTAGE_MASK) {
+			if (cfg->is_pronto_vadc) {
+				if (cfg->vbatt < WCNSS_VBATT_THRESHOLD &&
+				    !memcmp(regulators[i].name,
+					    VDD_PA, sizeof(VDD_PA))) {
+					voltage_level[i].max_voltage =
+						WCNSS_VBATT_LOW;
+				}
+			}
+
+			rc = regulator_set_voltage(regulators[i].regulator,
+						   voltage_level[i].
+						   low_power_min,
+						   voltage_level[i].
+						   max_voltage);
+
+			if (rc)
+				pr_err("regulator_set_voltage(%s) failed (%d)\n",
+				       regulators[i].name, rc);
+		}
+
+		/* Disable regulator */
+		if (regulators[i].state & VREG_ENABLE_MASK) {
+			rc = regulator_disable(regulators[i].regulator);
+			if (rc < 0)
+				pr_err("vreg %s disable failed (%d)\n",
+				       regulators[i].name, rc);
+		}
+
+		/* Free the regulator source */
+		if (regulators[i].state & VREG_GET_REGULATOR_MASK)
+			regulator_put(regulators[i].regulator);
+
+		regulators[i].state = VREG_NULL_CONFIG;
+	}
+}
+
+/* Common helper routine to turn on all WCNSS & IRIS vregs */
+static int wcnss_vregs_on(struct device *dev,
+			  struct vregs_info regulators[], uint size,
+			  struct vregs_level *voltage_level)
+{
+	int i, rc = 0, reg_cnt;
+	struct wcnss_wlan_config *cfg;
+
+	cfg = wcnss_get_wlan_config();
+
+	if (!cfg) {
+		pr_err("Failed to get WLAN configuration\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < size; i++) {
+		if (regulators[i].state == VREG_NULL_CONFIG)
+			continue;
+
+		reg_cnt = regulator_count_voltages(regulators[i].regulator);
+		/* Set voltage to nominal. Exclude swtiches e.g. LVS */
+		if ((voltage_level[i].nominal_min ||
+		     voltage_level[i].max_voltage) && (reg_cnt > 0)) {
+			if (cfg->is_pronto_vadc) {
+				if (cfg->vbatt < WCNSS_VBATT_THRESHOLD &&
+				    !memcmp(regulators[i].name,
+				    VDD_PA, sizeof(VDD_PA))) {
+					voltage_level[i].nominal_min =
+						WCNSS_VBATT_INITIAL;
+					voltage_level[i].max_voltage =
+						WCNSS_VBATT_LOW;
+				}
+			}
+
+			rc = regulator_set_voltage(regulators[i].regulator,
+						   voltage_level[i].nominal_min,
+						   voltage_level[i].
+						   max_voltage);
+
+			if (rc) {
+				pr_err("regulator_set_voltage(%s) failed (%d)\n",
+				       regulators[i].name, rc);
+				goto fail;
+			}
+			regulators[i].state |= VREG_SET_VOLTAGE_MASK;
+		}
+
+		/* Vote for PWM/PFM mode if needed */
+		if (voltage_level[i].uA_load && (reg_cnt > 0)) {
+			rc = regulator_set_load(regulators[i].regulator,
+						voltage_level[i].uA_load);
+			if (rc < 0) {
+				pr_err("regulator set load(%s) failed (%d)\n",
+				       regulators[i].name, rc);
+				goto fail;
+			}
+			regulators[i].state |= VREG_OPTIMUM_MODE_MASK;
+		}
+
+		/* Enable the regulator */
+		rc = regulator_enable(regulators[i].regulator);
+		if (rc) {
+			pr_err("vreg %s enable failed (%d)\n",
+			       regulators[i].name, rc);
+			goto fail;
+		}
+		regulators[i].state |= VREG_ENABLE_MASK;
+	}
+
+	return rc;
+
+fail:
+	wcnss_vregs_off(regulators, size, voltage_level);
+	return rc;
+}
+
+static void wcnss_iris_vregs_off(enum wcnss_hw_type hw_type,
+				 struct wcnss_wlan_config *cfg)
+{
+	switch (hw_type) {
+	case WCNSS_PRONTO_HW:
+		wcnss_vregs_off(iris_vregs, ARRAY_SIZE(iris_vregs),
+				cfg->iris_vlevel);
+		break;
+	default:
+		pr_err("%s invalid hardware %d\n", __func__, hw_type);
+	}
+}
+
+static int wcnss_iris_vregs_on(struct device *dev,
+			       enum wcnss_hw_type hw_type,
+			       struct wcnss_wlan_config *cfg)
+{
+	int ret = -1;
+
+	switch (hw_type) {
+	case WCNSS_PRONTO_HW:
+		ret = wcnss_vregs_on(dev, iris_vregs, ARRAY_SIZE(iris_vregs),
+				     cfg->iris_vlevel);
+		break;
+	default:
+		pr_err("%s invalid hardware %d\n", __func__, hw_type);
+	}
+	return ret;
+}
+
+static void wcnss_core_vregs_off(enum wcnss_hw_type hw_type,
+				 struct wcnss_wlan_config *cfg)
+{
+	switch (hw_type) {
+	case WCNSS_PRONTO_HW:
+		wcnss_vregs_off(pronto_vregs,
+				ARRAY_SIZE(pronto_vregs), cfg->pronto_vlevel);
+		break;
+	default:
+		pr_err("%s invalid hardware %d\n", __func__, hw_type);
+	}
+}
+
+static int wcnss_core_vregs_on(struct device *dev,
+			       enum wcnss_hw_type hw_type,
+			       struct wcnss_wlan_config *cfg)
+{
+	int ret = -1;
+
+	switch (hw_type) {
+	case WCNSS_PRONTO_HW:
+		ret = wcnss_vregs_on(dev, pronto_vregs,
+				     ARRAY_SIZE(pronto_vregs),
+				     cfg->pronto_vlevel);
+		break;
+	default:
+		pr_err("%s invalid hardware %d\n", __func__, hw_type);
+	}
+
+	return ret;
+}
+
+int wcnss_wlan_power(struct device *dev,
+		     struct wcnss_wlan_config *cfg,
+		     enum wcnss_opcode on, int *iris_xo_set)
+{
+	int rc = 0;
+	enum wcnss_hw_type hw_type = wcnss_hardware_type();
+
+	down(&wcnss_power_on_lock);
+	if (on) {
+		/* RIVA regulator settings */
+		rc = wcnss_core_vregs_on(dev, hw_type,
+					 cfg);
+		if (rc)
+			goto fail_wcnss_on;
+
+		/* IRIS regulator settings */
+		rc = wcnss_iris_vregs_on(dev, hw_type,
+					 cfg);
+		if (rc)
+			goto fail_iris_on;
+
+		/* Configure IRIS XO */
+		rc = configure_iris_xo(dev, cfg,
+				       WCNSS_WLAN_SWITCH_ON, iris_xo_set);
+		if (rc)
+			goto fail_iris_xo;
+
+		is_power_on = true;
+
+	}  else if (is_power_on) {
+		is_power_on = false;
+		configure_iris_xo(dev, cfg,
+				  WCNSS_WLAN_SWITCH_OFF, NULL);
+		wcnss_iris_vregs_off(hw_type, cfg);
+		wcnss_core_vregs_off(hw_type, cfg);
+	}
+
+	up(&wcnss_power_on_lock);
+	return rc;
+
+fail_iris_xo:
+	wcnss_iris_vregs_off(hw_type, cfg);
+
+fail_iris_on:
+	wcnss_core_vregs_off(hw_type, cfg);
+
+fail_wcnss_on:
+	up(&wcnss_power_on_lock);
+	return rc;
+}
+EXPORT_SYMBOL(wcnss_wlan_power);
+
+/*
+ * During SSR WCNSS should not be 'powered on' until all the host drivers
+ * finish their shutdown routines.  Host drivers use below APIs to
+ * synchronize power-on. WCNSS will not be 'powered on' until all the
+ * requests(to lock power-on) are freed.
+ */
+int wcnss_req_power_on_lock(char *driver_name)
+{
+	struct host_driver *node;
+
+	if (!driver_name)
+		goto err;
+
+	node = kmalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		goto err;
+	strlcpy(node->name, driver_name, sizeof(node->name));
+
+	mutex_lock(&list_lock);
+	/* Lock when the first request is added */
+	if (list_empty(&power_on_lock_list))
+		down(&wcnss_power_on_lock);
+	list_add(&node->list, &power_on_lock_list);
+	mutex_unlock(&list_lock);
+
+	return 0;
+
+err:
+	return -EINVAL;
+}
+EXPORT_SYMBOL(wcnss_req_power_on_lock);
+
+int wcnss_free_power_on_lock(char *driver_name)
+{
+	int ret = -1;
+	struct host_driver *node;
+
+	mutex_lock(&list_lock);
+	list_for_each_entry(node, &power_on_lock_list, list) {
+		if (!strcmp(node->name, driver_name)) {
+			list_del(&node->list);
+			kfree(node);
+			ret = 0;
+			break;
+		}
+	}
+	/* unlock when the last host driver frees the lock */
+	if (list_empty(&power_on_lock_list))
+		up(&wcnss_power_on_lock);
+	mutex_unlock(&list_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(wcnss_free_power_on_lock);
diff --git a/drivers/soc/qcom/wcnss/wcnss_wlan.c b/drivers/soc/qcom/wcnss/wcnss_wlan.c
new file mode 100644
index 0000000..db3974b
--- /dev/null
+++ b/drivers/soc/qcom/wcnss/wcnss_wlan.c
@@ -0,0 +1,3588 @@
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/wcnss_wlan.h>
+#include <linux/platform_data/qcom_wcnss_device.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/gpio.h>
+#include <linux/pm_wakeup.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/clk.h>
+#include <linux/ratelimit.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <linux/suspend.h>
+#include <linux/rwsem.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_qos.h>
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <soc/qcom/socinfo.h>
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+
+#include <soc/qcom/smd.h>
+
+#define DEVICE "wcnss_wlan"
+#define CTRL_DEVICE "wcnss_ctrl"
+#define VERSION "1.01"
+#define WCNSS_PIL_DEVICE "wcnss"
+
+#define WCNSS_PINCTRL_STATE_DEFAULT "wcnss_default"
+#define WCNSS_PINCTRL_STATE_SLEEP "wcnss_sleep"
+#define WCNSS_PINCTRL_GPIO_STATE_DEFAULT "wcnss_gpio_default"
+
+#define WCNSS_DISABLE_PC_LATENCY	100
+#define WCNSS_ENABLE_PC_LATENCY	PM_QOS_DEFAULT_VALUE
+#define WCNSS_PM_QOS_TIMEOUT	15000
+#define IS_CAL_DATA_PRESENT     0
+#define WAIT_FOR_CBC_IND     2
+#define WCNSS_DUAL_BAND_CAPABILITY_OFFSET	BIT(8)
+
+/* module params */
+#define WCNSS_CONFIG_UNSPECIFIED (-1)
+#define UINT32_MAX (0xFFFFFFFFU)
+
+#define SUBSYS_NOTIF_MIN_INDEX	0
+#define SUBSYS_NOTIF_MAX_INDEX	9
+char *wcnss_subsys_notif_type[] = {
+	"SUBSYS_BEFORE_SHUTDOWN",
+	"SUBSYS_AFTER_SHUTDOWN",
+	"SUBSYS_BEFORE_POWERUP",
+	"SUBSYS_AFTER_POWERUP",
+	"SUBSYS_RAMDUMP_NOTIFICATION",
+	"SUBSYS_POWERUP_FAILURE",
+	"SUBSYS_PROXY_VOTE",
+	"SUBSYS_PROXY_UNVOTE",
+	"SUBSYS_SOC_RESET",
+	"SUBSYS_NOTIF_TYPE_COUNT"
+};
+
+static int has_48mhz_xo = WCNSS_CONFIG_UNSPECIFIED;
+module_param(has_48mhz_xo, int, 0644);
+MODULE_PARM_DESC(has_48mhz_xo, "Is an external 48 MHz XO present");
+
+static int has_calibrated_data = WCNSS_CONFIG_UNSPECIFIED;
+module_param(has_calibrated_data, int, 0644);
+MODULE_PARM_DESC(has_calibrated_data, "whether calibrated data file available");
+
+static int has_autodetect_xo = WCNSS_CONFIG_UNSPECIFIED;
+module_param(has_autodetect_xo, int, 0644);
+MODULE_PARM_DESC(has_autodetect_xo, "Perform auto detect to configure IRIS XO");
+
+static int do_not_cancel_vote = WCNSS_CONFIG_UNSPECIFIED;
+module_param(do_not_cancel_vote, int, 0644);
+MODULE_PARM_DESC(do_not_cancel_vote, "Do not cancel votes for wcnss");
+
+static DEFINE_SPINLOCK(reg_spinlock);
+
+#define RIVA_SPARE_OFFSET		0x0b4
+#define RIVA_SUSPEND_BIT		BIT(24)
+
+#define CCU_RIVA_INVALID_ADDR_OFFSET		0x100
+#define CCU_RIVA_LAST_ADDR0_OFFSET		0x104
+#define CCU_RIVA_LAST_ADDR1_OFFSET		0x108
+#define CCU_RIVA_LAST_ADDR2_OFFSET		0x10c
+
+#define PRONTO_PMU_SPARE_OFFSET       0x1088
+#define PMU_A2XB_CFG_HSPLIT_RESP_LIMIT_OFFSET	0x117C
+
+#define PRONTO_PMU_COM_GDSCR_OFFSET       0x0024
+#define PRONTO_PMU_COM_GDSCR_SW_COLLAPSE  BIT(0)
+#define PRONTO_PMU_COM_GDSCR_HW_CTRL      BIT(1)
+
+#define PRONTO_PMU_WLAN_BCR_OFFSET         0x0050
+#define PRONTO_PMU_WLAN_BCR_BLK_ARES       BIT(0)
+
+#define PRONTO_PMU_WLAN_GDSCR_OFFSET       0x0054
+#define PRONTO_PMU_WLAN_GDSCR_SW_COLLAPSE  BIT(0)
+
+#define PRONTO_PMU_WDOG_CTL		0x0068
+
+#define PRONTO_PMU_CBCR_OFFSET        0x0008
+#define PRONTO_PMU_CBCR_CLK_EN        BIT(0)
+
+#define PRONTO_PMU_COM_CPU_CBCR_OFFSET     0x0030
+#define PRONTO_PMU_COM_AHB_CBCR_OFFSET     0x0034
+
+#define PRONTO_PMU_WLAN_AHB_CBCR_OFFSET    0x0074
+#define PRONTO_PMU_WLAN_AHB_CBCR_CLK_EN    BIT(0)
+#define PRONTO_PMU_WLAN_AHB_CBCR_CLK_OFF   BIT(31)
+
+#define PRONTO_PMU_CPU_AHB_CMD_RCGR_OFFSET  0x0120
+#define PRONTO_PMU_CPU_AHB_CMD_RCGR_ROOT_EN BIT(1)
+
+#define PRONTO_PMU_CFG_OFFSET              0x1004
+#define PRONTO_PMU_COM_CSR_OFFSET          0x1040
+#define PRONTO_PMU_SOFT_RESET_OFFSET       0x104C
+
+#define PRONTO_QFUSE_DUAL_BAND_OFFSET	   0x0018
+
+#define A2XB_CFG_OFFSET				0x00
+#define A2XB_INT_SRC_OFFSET			0x0c
+#define A2XB_TSTBUS_CTRL_OFFSET		0x14
+#define A2XB_TSTBUS_OFFSET			0x18
+#define A2XB_ERR_INFO_OFFSET		0x1c
+#define A2XB_FIFO_FILL_OFFSET		0x07
+#define A2XB_READ_FIFO_FILL_MASK		0x3F
+#define A2XB_CMD_FIFO_FILL_MASK			0x0F
+#define A2XB_WRITE_FIFO_FILL_MASK		0x1F
+#define A2XB_FIFO_EMPTY			0x2
+#define A2XB_FIFO_COUNTER			0xA
+
+#define WCNSS_TSTBUS_CTRL_EN		BIT(0)
+#define WCNSS_TSTBUS_CTRL_AXIM		(0x02 << 1)
+#define WCNSS_TSTBUS_CTRL_CMDFIFO	(0x03 << 1)
+#define WCNSS_TSTBUS_CTRL_WRFIFO	(0x04 << 1)
+#define WCNSS_TSTBUS_CTRL_RDFIFO	(0x05 << 1)
+#define WCNSS_TSTBUS_CTRL_CTRL		(0x07 << 1)
+#define WCNSS_TSTBUS_CTRL_AXIM_CFG0	(0x00 << 8)
+#define WCNSS_TSTBUS_CTRL_AXIM_CFG1	(0x01 << 8)
+#define WCNSS_TSTBUS_CTRL_CTRL_CFG0	(0x00 << 28)
+#define WCNSS_TSTBUS_CTRL_CTRL_CFG1	(0x01 << 28)
+
+#define CCU_PRONTO_INVALID_ADDR_OFFSET		0x08
+#define CCU_PRONTO_LAST_ADDR0_OFFSET		0x0c
+#define CCU_PRONTO_LAST_ADDR1_OFFSET		0x10
+#define CCU_PRONTO_LAST_ADDR2_OFFSET		0x14
+
+#define CCU_PRONTO_AOWBR_ERR_ADDR_OFFSET	0x28
+#define CCU_PRONTO_AOWBR_TIMEOUT_REG_OFFSET	0xcc
+#define CCU_PRONTO_AOWBR_ERR_TIMEOUT_OFFSET	0xd0
+#define CCU_PRONTO_A2AB_ERR_ADDR_OFFSET		0x18
+
+#define PRONTO_SAW2_SPM_STS_OFFSET		0x0c
+#define PRONTO_SAW2_SPM_CTL		0x30
+#define PRONTO_SAW2_SAW2_VERSION		0xFD0
+#define PRONTO_SAW2_MAJOR_VER_OFFSET		0x1C
+
+#define PRONTO_PLL_STATUS_OFFSET		0x1c
+#define PRONTO_PLL_MODE_OFFSET			0x1c0
+
+#define MCU_APB2PHY_STATUS_OFFSET		0xec
+#define MCU_CBR_CCAHB_ERR_OFFSET		0x380
+#define MCU_CBR_CAHB_ERR_OFFSET			0x384
+#define MCU_CBR_CCAHB_TIMEOUT_OFFSET		0x388
+#define MCU_CBR_CAHB_TIMEOUT_OFFSET		0x38c
+#define MCU_DBR_CDAHB_ERR_OFFSET		0x390
+#define MCU_DBR_DAHB_ERR_OFFSET			0x394
+#define MCU_DBR_CDAHB_TIMEOUT_OFFSET		0x398
+#define MCU_DBR_DAHB_TIMEOUT_OFFSET		0x39c
+#define MCU_FDBR_CDAHB_ERR_OFFSET		0x3a0
+#define MCU_FDBR_FDAHB_ERR_OFFSET		0x3a4
+#define MCU_FDBR_CDAHB_TIMEOUT_OFFSET		0x3a8
+#define MCU_FDBR_FDAHB_TIMEOUT_OFFSET		0x3ac
+#define PRONTO_PMU_CCPU_BOOT_REMAP_OFFSET	0x2004
+
+#define WCNSS_DEF_WLAN_RX_BUFF_COUNT		1024
+
+#define WCNSS_CTRL_CHANNEL			"WCNSS_CTRL"
+#define WCNSS_MAX_FRAME_SIZE		(4 * 1024)
+#define WCNSS_VERSION_LEN			30
+#define WCNSS_MAX_BUILD_VER_LEN		256
+#define WCNSS_MAX_CMD_LEN		(128)
+#define WCNSS_MIN_CMD_LEN		(3)
+
+/* control messages from userspace */
+#define WCNSS_USR_CTRL_MSG_START  0x00000000
+#define WCNSS_USR_HAS_CAL_DATA    (WCNSS_USR_CTRL_MSG_START + 2)
+#define WCNSS_USR_WLAN_MAC_ADDR   (WCNSS_USR_CTRL_MSG_START + 3)
+
+#define MAC_ADDRESS_STR "%02x:%02x:%02x:%02x:%02x:%02x"
+#define SHOW_MAC_ADDRESS_STR	"%02x:%02x:%02x:%02x:%02x:%02x\n"
+#define WCNSS_USER_MAC_ADDR_LENGTH	18
+
+/* message types */
+#define WCNSS_CTRL_MSG_START	0x01000000
+#define	WCNSS_VERSION_REQ             (WCNSS_CTRL_MSG_START + 0)
+#define	WCNSS_VERSION_RSP             (WCNSS_CTRL_MSG_START + 1)
+#define	WCNSS_NVBIN_DNLD_REQ          (WCNSS_CTRL_MSG_START + 2)
+#define	WCNSS_NVBIN_DNLD_RSP          (WCNSS_CTRL_MSG_START + 3)
+#define	WCNSS_CALDATA_UPLD_REQ        (WCNSS_CTRL_MSG_START + 4)
+#define	WCNSS_CALDATA_UPLD_RSP        (WCNSS_CTRL_MSG_START + 5)
+#define	WCNSS_CALDATA_DNLD_REQ        (WCNSS_CTRL_MSG_START + 6)
+#define	WCNSS_CALDATA_DNLD_RSP        (WCNSS_CTRL_MSG_START + 7)
+#define	WCNSS_VBATT_LEVEL_IND         (WCNSS_CTRL_MSG_START + 8)
+#define	WCNSS_BUILD_VER_REQ           (WCNSS_CTRL_MSG_START + 9)
+#define	WCNSS_BUILD_VER_RSP           (WCNSS_CTRL_MSG_START + 10)
+#define	WCNSS_PM_CONFIG_REQ           (WCNSS_CTRL_MSG_START + 11)
+#define	WCNSS_CBC_COMPLETE_IND        (WCNSS_CTRL_MSG_START + 12)
+
+/* max 20mhz channel count */
+#define WCNSS_MAX_CH_NUM			45
+#define WCNSS_MAX_PIL_RETRY			2
+
+#define VALID_VERSION(version) \
+	((strcmp(version, "INVALID")) ? 1 : 0)
+
+#define FW_CALDATA_CAPABLE() \
+	((penv->fw_major >= 1) && (penv->fw_minor >= 5) ? 1 : 0)
+
+static int wcnss_pinctrl_set_state(bool active);
+
+struct smd_msg_hdr {
+	unsigned int msg_type;
+	unsigned int msg_len;
+};
+
+struct wcnss_version {
+	struct smd_msg_hdr hdr;
+	unsigned char  major;
+	unsigned char  minor;
+	unsigned char  version;
+	unsigned char  revision;
+};
+
+struct wcnss_pmic_dump {
+	char reg_name[10];
+	u16 reg_addr;
+};
+
+static int wcnss_notif_cb(struct notifier_block *this, unsigned long code,
+			  void *ss_handle);
+
+static struct notifier_block wnb = {
+	.notifier_call = wcnss_notif_cb,
+};
+
+#define NVBIN_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin"
+
+/* On SMD channel 4K of maximum data can be transferred, including message
+ * header, so NV fragment size as next multiple of 1Kb is 3Kb.
+ */
+#define NV_FRAGMENT_SIZE  3072
+#define MAX_CALIBRATED_DATA_SIZE  (64 * 1024)
+#define LAST_FRAGMENT        BIT(0)
+#define MESSAGE_TO_FOLLOW    BIT(1)
+#define CAN_RECEIVE_CALDATA  BIT(15)
+#define WCNSS_RESP_SUCCESS   1
+#define WCNSS_RESP_FAIL      0
+
+/* Macro to find the total number fragments of the NV bin Image */
+#define TOTALFRAGMENTS(x) ((((x) % NV_FRAGMENT_SIZE) == 0) ? \
+	((x) / NV_FRAGMENT_SIZE) : (((x) / NV_FRAGMENT_SIZE) + 1))
+
+struct nvbin_dnld_req_params {
+	/* Fragment sequence number of the NV bin Image. NV Bin Image
+	 * might not fit into one message due to size limitation of
+	 * the SMD channel FIFO so entire NV blob is chopped into
+	 * multiple fragments starting with seqeunce number 0. The
+	 * last fragment is indicated by marking is_last_fragment field
+	 * to 1. At receiving side, NV blobs would be concatenated
+	 * together without any padding bytes in between.
+	 */
+	unsigned short frag_number;
+
+	/* bit 0: When set to 1 it indicates that no more fragments will
+	 * be sent.
+	 * bit 1: When set, a new message will be followed by this message
+	 * bit 2- bit 14:  Reserved
+	 * bit 15: when set, it indicates that the sender is capable of
+	 * receiving Calibrated data.
+	 */
+	unsigned short msg_flags;
+
+	/* NV Image size (number of bytes) */
+	unsigned int nvbin_buffer_size;
+
+	/* Following the 'nvbin_buffer_size', there should be
+	 * nvbin_buffer_size bytes of NV bin Image i.e.
+	 * uint8[nvbin_buffer_size].
+	 */
+};
+
+struct nvbin_dnld_req_msg {
+	/* Note: The length specified in nvbin_dnld_req_msg messages
+	 * should be hdr.msg_len = sizeof(nvbin_dnld_req_msg) +
+	 * nvbin_buffer_size.
+	 */
+	struct smd_msg_hdr hdr;
+	struct nvbin_dnld_req_params dnld_req_params;
+};
+
+struct cal_data_params {
+	/* The total size of the calibrated data, including all the
+	 * fragments.
+	 */
+	unsigned int total_size;
+	unsigned short frag_number;
+	/* bit 0: When set to 1 it indicates that no more fragments will
+	 * be sent.
+	 * bit 1: When set, a new message will be followed by this message
+	 * bit 2- bit 15: Reserved
+	 */
+	unsigned short msg_flags;
+	/* fragment size
+	 */
+	unsigned int frag_size;
+	/* Following the frag_size, frag_size of fragmented
+	 * data will be followed.
+	 */
+};
+
+struct cal_data_msg {
+	/* The length specified in cal_data_msg should be
+	 * hdr.msg_len = sizeof(cal_data_msg) + frag_size
+	 */
+	struct smd_msg_hdr hdr;
+	struct cal_data_params cal_params;
+};
+
+struct vbatt_level {
+	u32 curr_volt;
+	u32 threshold;
+};
+
+struct vbatt_message {
+	struct smd_msg_hdr hdr;
+	struct vbatt_level vbatt;
+};
+
+static struct {
+	struct platform_device *pdev;
+	void		*pil;
+	struct resource	*mmio_res;
+	struct resource	*tx_irq_res;
+	struct resource	*rx_irq_res;
+	struct resource	*gpios_5wire;
+	const struct dev_pm_ops *pm_ops;
+	int		triggered;
+	int		smd_channel_ready;
+	u32		wlan_rx_buff_count;
+	int		is_vsys_adc_channel;
+	int		is_a2xb_split_reg;
+	smd_channel_t	*smd_ch;
+	unsigned char	wcnss_version[WCNSS_VERSION_LEN];
+	unsigned char   fw_major;
+	unsigned char   fw_minor;
+	unsigned int	serial_number;
+	int		thermal_mitigation;
+	enum wcnss_hw_type	wcnss_hw_type;
+	void		(*tm_notify)(struct device *, int);
+	struct wcnss_wlan_config wlan_config;
+	struct delayed_work wcnss_work;
+	struct delayed_work vbatt_work;
+	struct work_struct wcnssctrl_version_work;
+	struct work_struct wcnss_pm_config_work;
+	struct work_struct wcnssctrl_nvbin_dnld_work;
+	struct work_struct wcnssctrl_rx_work;
+	struct work_struct wcnss_vadc_work;
+	struct wakeup_source wcnss_wake_lock;
+	void __iomem *msm_wcnss_base;
+	void __iomem *riva_ccu_base;
+	void __iomem *pronto_a2xb_base;
+	void __iomem *pronto_ccpu_base;
+	void __iomem *pronto_saw2_base;
+	void __iomem *pronto_pll_base;
+	void __iomem *pronto_mcu_base;
+	void __iomem *pronto_qfuse;
+	void __iomem *wlan_tx_status;
+	void __iomem *wlan_tx_phy_aborts;
+	void __iomem *wlan_brdg_err_source;
+	void __iomem *alarms_txctl;
+	void __iomem *alarms_tactl;
+	void __iomem *fiq_reg;
+	int	nv_downloaded;
+	int	is_cbc_done;
+	unsigned char *fw_cal_data;
+	unsigned char *user_cal_data;
+	int	fw_cal_rcvd;
+	int	fw_cal_exp_frag;
+	int	fw_cal_available;
+	int	user_cal_read;
+	int	user_cal_available;
+	u32	user_cal_rcvd;
+	u32	user_cal_exp_size;
+	int	iris_xo_mode_set;
+	int	fw_vbatt_state;
+	char	wlan_nv_mac_addr[WLAN_MAC_ADDR_SIZE];
+	int	ctrl_device_opened;
+	/* dev node lock */
+	struct mutex dev_lock;
+	/* dev control lock */
+	struct mutex ctrl_lock;
+	wait_queue_head_t read_wait;
+	struct qpnp_adc_tm_btm_param vbat_monitor_params;
+	struct qpnp_adc_tm_chip *adc_tm_dev;
+	struct qpnp_vadc_chip *vadc_dev;
+	/* battery monitor lock */
+	struct mutex vbat_monitor_mutex;
+	u16 unsafe_ch_count;
+	u16 unsafe_ch_list[WCNSS_MAX_CH_NUM];
+	void *wcnss_notif_hdle;
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *wcnss_5wire_active;
+	struct pinctrl_state *wcnss_5wire_suspend;
+	struct pinctrl_state *wcnss_gpio_active;
+	int gpios[WCNSS_WLAN_MAX_GPIO];
+	int use_pinctrl;
+	u8 is_shutdown;
+	struct pm_qos_request wcnss_pm_qos_request;
+	int pc_disabled;
+	struct delayed_work wcnss_pm_qos_del_req;
+	/* power manager QOS lock */
+	struct mutex pm_qos_mutex;
+	struct clk *snoc_wcnss;
+	unsigned int snoc_wcnss_clock_freq;
+	bool is_dual_band_disabled;
+	dev_t dev_ctrl, dev_node;
+	struct class *node_class;
+	struct cdev ctrl_dev, node_dev;
+} *penv = NULL;
+
+static ssize_t wcnss_wlan_macaddr_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int index;
+	int mac_addr[WLAN_MAC_ADDR_SIZE];
+
+	if (!penv)
+		return -ENODEV;
+
+	if (strlen(buf) != WCNSS_USER_MAC_ADDR_LENGTH) {
+		dev_err(dev, "%s: Invalid MAC addr length\n", __func__);
+		return -EINVAL;
+	}
+
+	if (sscanf(buf, MAC_ADDRESS_STR, &mac_addr[0], &mac_addr[1],
+		   &mac_addr[2], &mac_addr[3], &mac_addr[4],
+		   &mac_addr[5]) != WLAN_MAC_ADDR_SIZE) {
+		pr_err("%s: Failed to Copy MAC\n", __func__);
+		return -EINVAL;
+	}
+
+	for (index = 0; index < WLAN_MAC_ADDR_SIZE; index++) {
+		memcpy(&penv->wlan_nv_mac_addr[index],
+		       (char *)&mac_addr[index], sizeof(char));
+	}
+
+	pr_info("%s: Write MAC Addr:" MAC_ADDRESS_STR "\n", __func__,
+		penv->wlan_nv_mac_addr[0], penv->wlan_nv_mac_addr[1],
+		penv->wlan_nv_mac_addr[2], penv->wlan_nv_mac_addr[3],
+		penv->wlan_nv_mac_addr[4], penv->wlan_nv_mac_addr[5]);
+
+	return count;
+}
+
+static ssize_t wcnss_wlan_macaddr_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	if (!penv)
+		return -ENODEV;
+
+	return scnprintf(buf, PAGE_SIZE, SHOW_MAC_ADDRESS_STR,
+		penv->wlan_nv_mac_addr[0], penv->wlan_nv_mac_addr[1],
+		penv->wlan_nv_mac_addr[2], penv->wlan_nv_mac_addr[3],
+		penv->wlan_nv_mac_addr[4], penv->wlan_nv_mac_addr[5]);
+}
+
+static DEVICE_ATTR(wcnss_mac_addr, 0600, wcnss_wlan_macaddr_show,
+		   wcnss_wlan_macaddr_store);
+
+static ssize_t wcnss_thermal_mitigation_show(struct device *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	if (!penv)
+		return -ENODEV;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", penv->thermal_mitigation);
+}
+
+static ssize_t wcnss_thermal_mitigation_store(struct device *dev,
+					      struct device_attribute *attr,
+					      const char *buf, size_t count)
+{
+	int value;
+
+	if (!penv)
+		return -ENODEV;
+
+	if (kstrtoint(buf, 10, &value) != 1)
+		return -EINVAL;
+	penv->thermal_mitigation = value;
+	if (penv->tm_notify)
+		penv->tm_notify(dev, value);
+	return count;
+}
+
+static DEVICE_ATTR(thermal_mitigation, 0600, wcnss_thermal_mitigation_show,
+		   wcnss_thermal_mitigation_store);
+
+static ssize_t wcnss_version_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	if (!penv)
+		return -ENODEV;
+
+	return scnprintf(buf, PAGE_SIZE, "%s", penv->wcnss_version);
+}
+
+static DEVICE_ATTR(wcnss_version, 0400, wcnss_version_show, NULL);
+
+/* wcnss_reset_fiq() is invoked when host drivers fails to
+ * communicate with WCNSS over SMD; so logging these registers
+ * helps to know WCNSS failure reason
+ */
+void wcnss_riva_log_debug_regs(void)
+{
+	void __iomem *ccu_reg;
+	u32 reg = 0;
+
+	ccu_reg = penv->riva_ccu_base + CCU_RIVA_INVALID_ADDR_OFFSET;
+	reg = readl_relaxed(ccu_reg);
+	pr_info_ratelimited("%s: CCU_CCPU_INVALID_ADDR %08x\n", __func__, reg);
+
+	ccu_reg = penv->riva_ccu_base + CCU_RIVA_LAST_ADDR0_OFFSET;
+	reg = readl_relaxed(ccu_reg);
+	pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR0 %08x\n", __func__, reg);
+
+	ccu_reg = penv->riva_ccu_base + CCU_RIVA_LAST_ADDR1_OFFSET;
+	reg = readl_relaxed(ccu_reg);
+	pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR1 %08x\n", __func__, reg);
+
+	ccu_reg = penv->riva_ccu_base + CCU_RIVA_LAST_ADDR2_OFFSET;
+	reg = readl_relaxed(ccu_reg);
+	pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR2 %08x\n", __func__, reg);
+}
+EXPORT_SYMBOL(wcnss_riva_log_debug_regs);
+
+void wcnss_pronto_is_a2xb_bus_stall(void *tst_addr, u32 fifo_mask, char *type)
+{
+	u32 iter = 0, reg = 0;
+	u32 axi_fifo_count = 0, axi_fifo_count_last = 0;
+
+	reg = readl_relaxed(tst_addr);
+	axi_fifo_count = (reg >> A2XB_FIFO_FILL_OFFSET) & fifo_mask;
+	while ((++iter < A2XB_FIFO_COUNTER) && axi_fifo_count) {
+		axi_fifo_count_last = axi_fifo_count;
+		reg = readl_relaxed(tst_addr);
+		axi_fifo_count = (reg >> A2XB_FIFO_FILL_OFFSET) & fifo_mask;
+		if (axi_fifo_count < axi_fifo_count_last)
+			break;
+	}
+
+	if (iter == A2XB_FIFO_COUNTER) {
+		pr_err("%s data FIFO testbus possibly stalled reg%08x\n",
+		       type, reg);
+	} else {
+		pr_err("%s data FIFO tstbus not stalled reg%08x\n",
+		       type, reg);
+	}
+}
+
+int wcnss_get_dual_band_capability_info(struct platform_device *pdev)
+{
+	u32 reg = 0;
+	struct resource *res;
+
+	res = platform_get_resource_byname(
+			pdev, IORESOURCE_MEM, "pronto_qfuse");
+	if (!res)
+		return -EINVAL;
+
+	penv->pronto_qfuse = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(penv->pronto_qfuse))
+		return -ENOMEM;
+
+	reg = readl_relaxed(penv->pronto_qfuse +
+			PRONTO_QFUSE_DUAL_BAND_OFFSET);
+	if (reg & WCNSS_DUAL_BAND_CAPABILITY_OFFSET)
+		penv->is_dual_band_disabled = true;
+	else
+		penv->is_dual_band_disabled = false;
+
+	return 0;
+}
+
+/* Log pronto debug registers during SSR Timeout CB */
+void wcnss_pronto_log_debug_regs(void)
+{
+	void __iomem *reg_addr, *tst_addr, *tst_ctrl_addr;
+	u32 reg = 0, reg2 = 0, reg3 = 0, reg4 = 0;
+
+	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_SPARE_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("PRONTO_PMU_SPARE %08x\n", reg);
+
+	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_CPU_CBCR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("PRONTO_PMU_COM_CPU_CBCR %08x\n", reg);
+
+	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_AHB_CBCR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("PRONTO_PMU_COM_AHB_CBCR %08x\n", reg);
+
+	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CFG_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("PRONTO_PMU_CFG %08x\n", reg);
+
+	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_CSR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("PRONTO_PMU_COM_CSR %08x\n", reg);
+
+	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_SOFT_RESET_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("PRONTO_PMU_SOFT_RESET %08x\n", reg);
+
+	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WDOG_CTL;
+	reg = readl_relaxed(reg_addr);
+	pr_err("PRONTO_PMU_WDOG_CTL %08x\n", reg);
+
+	reg_addr = penv->pronto_saw2_base + PRONTO_SAW2_SPM_STS_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("PRONTO_SAW2_SPM_STS %08x\n", reg);
+
+	reg_addr = penv->pronto_saw2_base + PRONTO_SAW2_SPM_CTL;
+	reg = readl_relaxed(reg_addr);
+	pr_err("PRONTO_SAW2_SPM_CTL %08x\n", reg);
+
+	if (penv->is_a2xb_split_reg) {
+		reg_addr = penv->msm_wcnss_base +
+			   PMU_A2XB_CFG_HSPLIT_RESP_LIMIT_OFFSET;
+		reg = readl_relaxed(reg_addr);
+		pr_err("PMU_A2XB_CFG_HSPLIT_RESP_LIMIT %08x\n", reg);
+	}
+
+	reg_addr = penv->pronto_saw2_base + PRONTO_SAW2_SAW2_VERSION;
+	reg = readl_relaxed(reg_addr);
+	pr_err("PRONTO_SAW2_SAW2_VERSION %08x\n", reg);
+	reg >>= PRONTO_SAW2_MAJOR_VER_OFFSET;
+
+	reg_addr = penv->msm_wcnss_base  + PRONTO_PMU_CCPU_BOOT_REMAP_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("PRONTO_PMU_CCPU_BOOT_REMAP %08x\n", reg);
+
+	reg_addr = penv->pronto_pll_base + PRONTO_PLL_STATUS_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("PRONTO_PLL_STATUS %08x\n", reg);
+
+	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CPU_AHB_CMD_RCGR_OFFSET;
+	reg4 = readl_relaxed(reg_addr);
+	pr_err("PMU_CPU_CMD_RCGR %08x\n", reg4);
+
+	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_GDSCR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("PRONTO_PMU_COM_GDSCR %08x\n", reg);
+	reg >>= 31;
+
+	if (!reg) {
+		pr_err("Cannot log, Pronto common SS is power collapsed\n");
+		return;
+	}
+	reg &= ~(PRONTO_PMU_COM_GDSCR_SW_COLLAPSE
+			| PRONTO_PMU_COM_GDSCR_HW_CTRL);
+	writel_relaxed(reg, reg_addr);
+
+	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CBCR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	reg |= PRONTO_PMU_CBCR_CLK_EN;
+	writel_relaxed(reg, reg_addr);
+
+	reg_addr = penv->pronto_a2xb_base + A2XB_CFG_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("A2XB_CFG_OFFSET %08x\n", reg);
+
+	reg_addr = penv->pronto_a2xb_base + A2XB_INT_SRC_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("A2XB_INT_SRC_OFFSET %08x\n", reg);
+
+	reg_addr = penv->pronto_a2xb_base + A2XB_ERR_INFO_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("A2XB_ERR_INFO_OFFSET %08x\n", reg);
+
+	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_INVALID_ADDR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("CCU_CCPU_INVALID_ADDR %08x\n", reg);
+
+	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR0_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("CCU_CCPU_LAST_ADDR0 %08x\n", reg);
+
+	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR1_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("CCU_CCPU_LAST_ADDR1 %08x\n", reg);
+
+	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR2_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("CCU_CCPU_LAST_ADDR2 %08x\n", reg);
+
+	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_AOWBR_ERR_ADDR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("CCU_PRONTO_AOWBR_ERR_ADDR_OFFSET %08x\n", reg);
+
+	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_AOWBR_TIMEOUT_REG_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("CCU_PRONTO_AOWBR_TIMEOUT_REG_OFFSET %08x\n", reg);
+
+	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_AOWBR_ERR_TIMEOUT_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("CCU_PRONTO_AOWBR_ERR_TIMEOUT_OFFSET %08x\n", reg);
+
+	reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_A2AB_ERR_ADDR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("CCU_PRONTO_A2AB_ERR_ADDR_OFFSET %08x\n", reg);
+
+	tst_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_OFFSET;
+	tst_ctrl_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_CTRL_OFFSET;
+
+	/*  read data FIFO */
+	reg = 0;
+	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_RDFIFO;
+	writel_relaxed(reg, tst_ctrl_addr);
+	reg = readl_relaxed(tst_addr);
+	if (!(reg & A2XB_FIFO_EMPTY)) {
+		wcnss_pronto_is_a2xb_bus_stall(tst_addr,
+					       A2XB_READ_FIFO_FILL_MASK,
+					       "Read");
+	} else {
+		pr_err("Read data FIFO testbus %08x\n", reg);
+	}
+	/*  command FIFO */
+	reg = 0;
+	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_CMDFIFO;
+	writel_relaxed(reg, tst_ctrl_addr);
+	reg = readl_relaxed(tst_addr);
+	if (!(reg & A2XB_FIFO_EMPTY)) {
+		wcnss_pronto_is_a2xb_bus_stall(tst_addr,
+					       A2XB_CMD_FIFO_FILL_MASK, "Cmd");
+	} else {
+		pr_err("Command FIFO testbus %08x\n", reg);
+	}
+
+	/*  write data FIFO */
+	reg = 0;
+	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_WRFIFO;
+	writel_relaxed(reg, tst_ctrl_addr);
+	reg = readl_relaxed(tst_addr);
+	if (!(reg & A2XB_FIFO_EMPTY)) {
+		wcnss_pronto_is_a2xb_bus_stall(tst_addr,
+					       A2XB_WRITE_FIFO_FILL_MASK,
+					       "Write");
+	} else {
+		pr_err("Write data FIFO testbus %08x\n", reg);
+	}
+
+	/*   AXIM SEL CFG0 */
+	reg = 0;
+	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_AXIM |
+				WCNSS_TSTBUS_CTRL_AXIM_CFG0;
+	writel_relaxed(reg, tst_ctrl_addr);
+	reg = readl_relaxed(tst_addr);
+	pr_err("AXIM SEL CFG0 testbus %08x\n", reg);
+
+	/*   AXIM SEL CFG1 */
+	reg = 0;
+	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_AXIM |
+				WCNSS_TSTBUS_CTRL_AXIM_CFG1;
+	writel_relaxed(reg, tst_ctrl_addr);
+	reg = readl_relaxed(tst_addr);
+	pr_err("AXIM SEL CFG1 testbus %08x\n", reg);
+
+	/*   CTRL SEL CFG0 */
+	reg = 0;
+	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_CTRL |
+		WCNSS_TSTBUS_CTRL_CTRL_CFG0;
+	writel_relaxed(reg, tst_ctrl_addr);
+	reg = readl_relaxed(tst_addr);
+	pr_err("CTRL SEL CFG0 testbus %08x\n", reg);
+
+	/*   CTRL SEL CFG1 */
+	reg = 0;
+	reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_CTRL |
+		WCNSS_TSTBUS_CTRL_CTRL_CFG1;
+	writel_relaxed(reg, tst_ctrl_addr);
+	reg = readl_relaxed(tst_addr);
+	pr_err("CTRL SEL CFG1 testbus %08x\n", reg);
+
+	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WLAN_BCR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+
+	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WLAN_GDSCR_OFFSET;
+	reg2 = readl_relaxed(reg_addr);
+
+	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WLAN_AHB_CBCR_OFFSET;
+	reg3 = readl_relaxed(reg_addr);
+	pr_err("PMU_WLAN_AHB_CBCR %08x\n", reg3);
+
+	msleep(50);
+
+	if ((reg & PRONTO_PMU_WLAN_BCR_BLK_ARES) ||
+	    (reg2 & PRONTO_PMU_WLAN_GDSCR_SW_COLLAPSE) ||
+	    (!(reg4 & PRONTO_PMU_CPU_AHB_CMD_RCGR_ROOT_EN)) ||
+	    (reg3 & PRONTO_PMU_WLAN_AHB_CBCR_CLK_OFF) ||
+	    (!(reg3 & PRONTO_PMU_WLAN_AHB_CBCR_CLK_EN))) {
+		pr_err("Cannot log, wlan domain is power collapsed\n");
+		return;
+	}
+
+	reg = readl_relaxed(penv->wlan_tx_phy_aborts);
+	pr_err("WLAN_TX_PHY_ABORTS %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_APB2PHY_STATUS_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_APB2PHY_STATUS %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_CBR_CCAHB_ERR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_CBR_CCAHB_ERR %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_CBR_CAHB_ERR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_CBR_CAHB_ERR %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_CBR_CCAHB_TIMEOUT_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_CBR_CCAHB_TIMEOUT %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_CBR_CAHB_TIMEOUT_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_CBR_CAHB_TIMEOUT %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_DBR_CDAHB_ERR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_DBR_CDAHB_ERR %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_DBR_DAHB_ERR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_DBR_DAHB_ERR %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_DBR_CDAHB_TIMEOUT_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_DBR_CDAHB_TIMEOUT %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_DBR_DAHB_TIMEOUT_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_DBR_DAHB_TIMEOUT %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_FDBR_CDAHB_ERR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_FDBR_CDAHB_ERR %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_FDBR_FDAHB_ERR_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_FDBR_FDAHB_ERR %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_FDBR_CDAHB_TIMEOUT_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_FDBR_CDAHB_TIMEOUT %08x\n", reg);
+
+	reg_addr = penv->pronto_mcu_base + MCU_FDBR_FDAHB_TIMEOUT_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("MCU_FDBR_FDAHB_TIMEOUT %08x\n", reg);
+
+	reg = readl_relaxed(penv->wlan_brdg_err_source);
+	pr_err("WLAN_BRDG_ERR_SOURCE %08x\n", reg);
+
+	reg = readl_relaxed(penv->wlan_tx_status);
+	pr_err("WLAN_TXP_STATUS %08x\n", reg);
+
+	reg = readl_relaxed(penv->alarms_txctl);
+	pr_err("ALARMS_TXCTL %08x\n", reg);
+
+	reg = readl_relaxed(penv->alarms_tactl);
+	pr_err("ALARMS_TACTL %08x\n", reg);
+}
+EXPORT_SYMBOL(wcnss_pronto_log_debug_regs);
+
+#ifdef CONFIG_WCNSS_REGISTER_DUMP_ON_BITE
+
+static int wcnss_gpio_set_state(bool is_enable)
+{
+	struct pinctrl_state *pin_state;
+	int ret;
+	int i;
+
+	if (!is_enable) {
+		for (i = 0; i < WCNSS_WLAN_MAX_GPIO; i++) {
+			if (gpio_is_valid(penv->gpios[i]))
+				gpio_free(penv->gpios[i]);
+		}
+
+		return 0;
+	}
+
+	pin_state = penv->wcnss_gpio_active;
+	if (!IS_ERR_OR_NULL(pin_state)) {
+		ret = pinctrl_select_state(penv->pinctrl, pin_state);
+		if (ret < 0) {
+			pr_err("%s: can not set gpio pins err: %d\n",
+			       __func__, ret);
+			goto pinctrl_set_err;
+		}
+
+	} else {
+		pr_err("%s: invalid gpio pinstate err: %lu\n",
+		       __func__, PTR_ERR(pin_state));
+		goto pinctrl_set_err;
+	}
+
+	for (i = WCNSS_WLAN_DATA2; i <= WCNSS_WLAN_DATA0; i++) {
+		ret = gpio_request_one(penv->gpios[i],
+				       GPIOF_DIR_IN, NULL);
+		if (ret) {
+			pr_err("%s: request failed for gpio:%d\n",
+			       __func__, penv->gpios[i]);
+			i--;
+			goto gpio_req_err;
+		}
+	}
+
+	for (i = WCNSS_WLAN_SET; i <= WCNSS_WLAN_CLK; i++) {
+		ret = gpio_request_one(penv->gpios[i],
+				       GPIOF_OUT_INIT_LOW, NULL);
+		if (ret) {
+			pr_err("%s: request failed for gpio:%d\n",
+			       __func__, penv->gpios[i]);
+			i--;
+			goto gpio_req_err;
+		}
+	}
+
+	return 0;
+
+gpio_req_err:
+	for (; i >= WCNSS_WLAN_DATA2; --i)
+		gpio_free(penv->gpios[i]);
+
+pinctrl_set_err:
+	return -EINVAL;
+}
+
+static u32 wcnss_rf_read_reg(u32 rf_reg_addr)
+{
+	int count = 0;
+	u32 rf_cmd_and_addr = 0;
+	u32 rf_data_received = 0;
+	u32 rf_bit = 0;
+
+	if (wcnss_gpio_set_state(true))
+		return 0;
+
+	/* Reset the signal if it is already being used. */
+	gpio_set_value(penv->gpios[WCNSS_WLAN_SET], 0);
+	gpio_set_value(penv->gpios[WCNSS_WLAN_CLK], 0);
+
+	/* We start with cmd_set high penv->gpio_base + WCNSS_WLAN_SET = 1. */
+	gpio_set_value(penv->gpios[WCNSS_WLAN_SET], 1);
+
+	gpio_direction_output(penv->gpios[WCNSS_WLAN_DATA0], 1);
+	gpio_direction_output(penv->gpios[WCNSS_WLAN_DATA1], 1);
+	gpio_direction_output(penv->gpios[WCNSS_WLAN_DATA2], 1);
+
+	gpio_set_value(penv->gpios[WCNSS_WLAN_DATA0], 0);
+	gpio_set_value(penv->gpios[WCNSS_WLAN_DATA1], 0);
+	gpio_set_value(penv->gpios[WCNSS_WLAN_DATA2], 0);
+
+	/* Prepare command and RF register address that need to sent out. */
+	rf_cmd_and_addr  = (((WLAN_RF_READ_REG_CMD) |
+		(rf_reg_addr << WLAN_RF_REG_ADDR_START_OFFSET)) &
+		WLAN_RF_READ_CMD_MASK);
+	/* Send 15 bit RF register address */
+	for (count = 0; count < WLAN_RF_PREPARE_CMD_DATA; count++) {
+		gpio_set_value(penv->gpios[WCNSS_WLAN_CLK], 0);
+
+		rf_bit = (rf_cmd_and_addr & 0x1);
+		gpio_set_value(penv->gpios[WCNSS_WLAN_DATA0],
+			       rf_bit ? 1 : 0);
+		rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+		rf_bit = (rf_cmd_and_addr & 0x1);
+		gpio_set_value(penv->gpios[WCNSS_WLAN_DATA1], rf_bit ? 1 : 0);
+		rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+		rf_bit = (rf_cmd_and_addr & 0x1);
+		gpio_set_value(penv->gpios[WCNSS_WLAN_DATA2], rf_bit ? 1 : 0);
+		rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+		/* Send the data out penv->gpio_base + WCNSS_WLAN_CLK = 1 */
+		gpio_set_value(penv->gpios[WCNSS_WLAN_CLK], 1);
+	}
+
+	/* Pull down the clock signal */
+	gpio_set_value(penv->gpios[WCNSS_WLAN_CLK], 0);
+
+	/* Configure data pins to input IO pins */
+	gpio_direction_input(penv->gpios[WCNSS_WLAN_DATA0]);
+	gpio_direction_input(penv->gpios[WCNSS_WLAN_DATA1]);
+	gpio_direction_input(penv->gpios[WCNSS_WLAN_DATA2]);
+
+	for (count = 0; count < WLAN_RF_CLK_WAIT_CYCLE; count++) {
+		gpio_set_value(penv->gpios[WCNSS_WLAN_CLK], 1);
+		gpio_set_value(penv->gpios[WCNSS_WLAN_CLK], 0);
+	}
+
+	rf_bit = 0;
+	/* Read 16 bit RF register value */
+	for (count = 0; count < WLAN_RF_READ_DATA; count++) {
+		gpio_set_value(penv->gpios[WCNSS_WLAN_CLK], 1);
+		gpio_set_value(penv->gpios[WCNSS_WLAN_CLK], 0);
+
+		rf_bit = gpio_get_value(penv->gpios[WCNSS_WLAN_DATA0]);
+		rf_data_received |= (rf_bit << (count * WLAN_RF_DATA_LEN
+					+ WLAN_RF_DATA0_SHIFT));
+
+		if (count != 5) {
+			rf_bit = gpio_get_value(penv->gpios[WCNSS_WLAN_DATA1]);
+			rf_data_received |= (rf_bit << (count * WLAN_RF_DATA_LEN
+						+ WLAN_RF_DATA1_SHIFT));
+
+			rf_bit = gpio_get_value(penv->gpios[WCNSS_WLAN_DATA2]);
+			rf_data_received |= (rf_bit << (count * WLAN_RF_DATA_LEN
+						+ WLAN_RF_DATA2_SHIFT));
+		}
+	}
+
+	gpio_set_value(penv->gpios[WCNSS_WLAN_SET], 0);
+	wcnss_gpio_set_state(false);
+	wcnss_pinctrl_set_state(true);
+
+	return rf_data_received;
+}
+
+static void wcnss_log_iris_regs(void)
+{
+	int i;
+	u32 reg_val;
+	u32 regs_array[] = {
+		0x04, 0x05, 0x11, 0x1e, 0x40, 0x48,
+		0x49, 0x4b, 0x00, 0x01, 0x4d};
+
+	pr_info("%s: IRIS Registers [address] : value\n", __func__);
+
+	for (i = 0; i < ARRAY_SIZE(regs_array); i++) {
+		reg_val = wcnss_rf_read_reg(regs_array[i]);
+
+		pr_info("[0x%08x] : 0x%08x\n", regs_array[i], reg_val);
+	}
+}
+
+int wcnss_get_mux_control(void)
+{
+	void __iomem *pmu_conf_reg;
+	u32 reg = 0;
+
+	if (!penv)
+		return 0;
+
+	pmu_conf_reg = penv->msm_wcnss_base + PRONTO_PMU_OFFSET;
+	reg = readl_relaxed(pmu_conf_reg);
+	reg |= WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP;
+	writel_relaxed(reg, pmu_conf_reg);
+	return 1;
+}
+
+void wcnss_log_debug_regs_on_bite(void)
+{
+	struct platform_device *pdev = wcnss_get_platform_device();
+	struct clk *measure;
+	struct clk *wcnss_debug_mux;
+	unsigned long clk_rate;
+
+	if (wcnss_hardware_type() != WCNSS_PRONTO_HW)
+		return;
+
+	measure = clk_get(&pdev->dev, "measure");
+	wcnss_debug_mux = clk_get(&pdev->dev, "wcnss_debug");
+
+	if (!IS_ERR(measure) && !IS_ERR(wcnss_debug_mux)) {
+		if (clk_set_parent(measure, wcnss_debug_mux)) {
+			pr_err("Setting measure clk parent failed\n");
+			return;
+		}
+
+		if (clk_prepare_enable(measure)) {
+			pr_err("measure clk enable failed\n");
+			return;
+		}
+
+		clk_rate = clk_get_rate(measure);
+		pr_debug("wcnss: clock frequency is: %luHz\n", clk_rate);
+
+		if (clk_rate) {
+			wcnss_pronto_log_debug_regs();
+			if (wcnss_get_mux_control())
+				wcnss_log_iris_regs();
+		} else {
+			pr_err("clock frequency is zero, cannot access PMU or other registers\n");
+			wcnss_log_iris_regs();
+		}
+
+		clk_disable_unprepare(measure);
+	}
+}
+#endif
+
+/* interface to reset wcnss by sending the reset interrupt */
+void wcnss_reset_fiq(bool clk_chk_en)
+{
+	if (wcnss_hardware_type() == WCNSS_PRONTO_HW) {
+		if (clk_chk_en) {
+			wcnss_log_debug_regs_on_bite();
+		} else {
+			wcnss_pronto_log_debug_regs();
+			if (wcnss_get_mux_control())
+				wcnss_log_iris_regs();
+		}
+		if (!wcnss_device_is_shutdown()) {
+			/* Insert memory barrier before writing fiq register */
+			wmb();
+			__raw_writel(1 << 16, penv->fiq_reg);
+		} else {
+			pr_info("%s: Block FIQ during power up sequence\n",
+				__func__);
+		}
+	} else {
+		wcnss_riva_log_debug_regs();
+	}
+}
+EXPORT_SYMBOL(wcnss_reset_fiq);
+
+static int wcnss_create_sysfs(struct device *dev)
+{
+	int ret;
+
+	if (!dev)
+		return -ENODEV;
+
+	ret = device_create_file(dev, &dev_attr_thermal_mitigation);
+	if (ret)
+		return ret;
+
+	ret = device_create_file(dev, &dev_attr_wcnss_version);
+	if (ret)
+		goto remove_thermal;
+
+	ret = device_create_file(dev, &dev_attr_wcnss_mac_addr);
+	if (ret)
+		goto remove_version;
+
+	return 0;
+
+remove_version:
+	device_remove_file(dev, &dev_attr_wcnss_version);
+remove_thermal:
+	device_remove_file(dev, &dev_attr_thermal_mitigation);
+
+	return ret;
+}
+
+static void wcnss_remove_sysfs(struct device *dev)
+{
+	if (dev) {
+		device_remove_file(dev, &dev_attr_thermal_mitigation);
+		device_remove_file(dev, &dev_attr_wcnss_version);
+		device_remove_file(dev, &dev_attr_wcnss_mac_addr);
+	}
+}
+
+static void wcnss_pm_qos_add_request(void)
+{
+	pr_info("%s: add request\n", __func__);
+	pm_qos_add_request(&penv->wcnss_pm_qos_request, PM_QOS_CPU_DMA_LATENCY,
+			   PM_QOS_DEFAULT_VALUE);
+}
+
+static void wcnss_pm_qos_remove_request(void)
+{
+	pr_info("%s: remove request\n", __func__);
+	pm_qos_remove_request(&penv->wcnss_pm_qos_request);
+}
+
+void wcnss_pm_qos_update_request(int val)
+{
+	pr_info("%s: update request %d\n", __func__, val);
+	pm_qos_update_request(&penv->wcnss_pm_qos_request, val);
+}
+
+void wcnss_disable_pc_remove_req(void)
+{
+	mutex_lock(&penv->pm_qos_mutex);
+	if (penv->pc_disabled) {
+		penv->pc_disabled = 0;
+		wcnss_pm_qos_update_request(WCNSS_ENABLE_PC_LATENCY);
+		wcnss_pm_qos_remove_request();
+		wcnss_allow_suspend();
+	}
+	mutex_unlock(&penv->pm_qos_mutex);
+}
+
+void wcnss_disable_pc_add_req(void)
+{
+	mutex_lock(&penv->pm_qos_mutex);
+	if (!penv->pc_disabled) {
+		wcnss_pm_qos_add_request();
+		wcnss_prevent_suspend();
+		wcnss_pm_qos_update_request(WCNSS_DISABLE_PC_LATENCY);
+		penv->pc_disabled = 1;
+	}
+	mutex_unlock(&penv->pm_qos_mutex);
+}
+
+static void wcnss_smd_notify_event(void *data, unsigned int event)
+{
+	int len = 0;
+
+	if (penv != data) {
+		pr_err("wcnss: invalid env pointer in smd callback\n");
+		return;
+	}
+	switch (event) {
+	case SMD_EVENT_DATA:
+		len = smd_read_avail(penv->smd_ch);
+		if (len < 0) {
+			pr_err("wcnss: failed to read from smd %d\n", len);
+			return;
+		}
+		schedule_work(&penv->wcnssctrl_rx_work);
+		break;
+
+	case SMD_EVENT_OPEN:
+		pr_debug("wcnss: opening WCNSS SMD channel :%s",
+			 WCNSS_CTRL_CHANNEL);
+		schedule_work(&penv->wcnssctrl_version_work);
+		schedule_work(&penv->wcnss_pm_config_work);
+		cancel_delayed_work(&penv->wcnss_pm_qos_del_req);
+		schedule_delayed_work(&penv->wcnss_pm_qos_del_req, 0);
+		if (penv->wlan_config.is_pronto_vadc && (penv->vadc_dev))
+			schedule_work(&penv->wcnss_vadc_work);
+		break;
+
+	case SMD_EVENT_CLOSE:
+		pr_debug("wcnss: closing WCNSS SMD channel :%s",
+			 WCNSS_CTRL_CHANNEL);
+		penv->nv_downloaded = 0;
+		penv->is_cbc_done = 0;
+		break;
+
+	default:
+		break;
+	}
+}
+
+static int
+wcnss_pinctrl_set_state(bool active)
+{
+	struct pinctrl_state *pin_state;
+	int ret;
+
+	pr_debug("%s: Set GPIO state : %d\n", __func__, active);
+
+	pin_state = active ? penv->wcnss_5wire_active
+			: penv->wcnss_5wire_suspend;
+
+	if (!IS_ERR_OR_NULL(pin_state)) {
+		ret = pinctrl_select_state(penv->pinctrl, pin_state);
+		if (ret < 0) {
+			pr_err("%s: can not set %s pins\n", __func__,
+			       active ? WCNSS_PINCTRL_STATE_DEFAULT
+			       : WCNSS_PINCTRL_STATE_SLEEP);
+			return ret;
+		}
+	} else {
+		pr_err("%s: invalid '%s' pinstate\n", __func__,
+		       active ? WCNSS_PINCTRL_STATE_DEFAULT
+		       : WCNSS_PINCTRL_STATE_SLEEP);
+		return PTR_ERR(pin_state);
+	}
+
+	return 0;
+}
+
+static int
+wcnss_pinctrl_init(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	int i;
+
+	/* Get pinctrl if target uses pinctrl */
+	penv->pinctrl = devm_pinctrl_get(&pdev->dev);
+
+	if (IS_ERR_OR_NULL(penv->pinctrl)) {
+		pr_err("%s: failed to get pinctrl\n", __func__);
+		return PTR_ERR(penv->pinctrl);
+	}
+
+	penv->wcnss_5wire_active
+		= pinctrl_lookup_state(penv->pinctrl,
+			WCNSS_PINCTRL_STATE_DEFAULT);
+
+	if (IS_ERR_OR_NULL(penv->wcnss_5wire_active)) {
+		pr_err("%s: can not get default pinstate\n", __func__);
+		return PTR_ERR(penv->wcnss_5wire_active);
+	}
+
+	penv->wcnss_5wire_suspend
+		= pinctrl_lookup_state(penv->pinctrl,
+			WCNSS_PINCTRL_STATE_SLEEP);
+
+	if (IS_ERR_OR_NULL(penv->wcnss_5wire_suspend)) {
+		pr_warn("%s: can not get sleep pinstate\n", __func__);
+		return PTR_ERR(penv->wcnss_5wire_suspend);
+	}
+
+	penv->wcnss_gpio_active = pinctrl_lookup_state(penv->pinctrl,
+					WCNSS_PINCTRL_GPIO_STATE_DEFAULT);
+	if (IS_ERR_OR_NULL(penv->wcnss_gpio_active))
+		pr_warn("%s: can not get gpio default pinstate\n", __func__);
+
+	for (i = 0; i < WCNSS_WLAN_MAX_GPIO; i++) {
+		penv->gpios[i] = of_get_gpio(node, i);
+		if (penv->gpios[i] < 0)
+			pr_warn("%s: Fail to get 5wire gpio: %d\n",
+				__func__, i);
+	}
+
+	return 0;
+}
+
+static int
+wcnss_pronto_gpios_config(struct platform_device *pdev, bool enable)
+{
+	int rc = 0;
+	int i, j;
+	int WCNSS_WLAN_NUM_GPIOS = 5;
+
+	/* Use Pinctrl to configure 5 wire GPIOs */
+	rc = wcnss_pinctrl_init(pdev);
+	if (rc) {
+		pr_err("%s: failed to get pin resources\n", __func__);
+		penv->pinctrl = NULL;
+		goto gpio_probe;
+	} else {
+		rc = wcnss_pinctrl_set_state(true);
+		if (rc)
+			pr_err("%s: failed to set pin state\n",
+			       __func__);
+		penv->use_pinctrl = true;
+		return rc;
+	}
+
+gpio_probe:
+	for (i = 0; i < WCNSS_WLAN_NUM_GPIOS; i++) {
+		int gpio = of_get_gpio(pdev->dev.of_node, i);
+
+		if (enable) {
+			rc = gpio_request(gpio, "wcnss_wlan");
+			if (rc) {
+				pr_err("WCNSS gpio_request %d err %d\n",
+				       gpio, rc);
+				goto fail;
+			}
+		} else {
+			gpio_free(gpio);
+		}
+	}
+	return rc;
+
+fail:
+	for (j = WCNSS_WLAN_NUM_GPIOS - 1; j >= 0; j--) {
+		int gpio = of_get_gpio(pdev->dev.of_node, i);
+
+		gpio_free(gpio);
+	}
+	return rc;
+}
+
+static int
+wcnss_gpios_config(struct resource *gpios_5wire, bool enable)
+{
+	int i, j;
+	int rc = 0;
+
+	for (i = gpios_5wire->start; i <= gpios_5wire->end; i++) {
+		if (enable) {
+			rc = gpio_request(i, gpios_5wire->name);
+			if (rc) {
+				pr_err("WCNSS gpio_request %d err %d\n", i, rc);
+				goto fail;
+			}
+		} else {
+			gpio_free(i);
+		}
+	}
+
+	return rc;
+
+fail:
+	for (j = i - 1; j >= gpios_5wire->start; j--)
+		gpio_free(j);
+	return rc;
+}
+
+static int
+wcnss_wlan_ctrl_probe(struct platform_device *pdev)
+{
+	if (!penv || !penv->triggered)
+		return -ENODEV;
+
+	penv->smd_channel_ready = 1;
+
+	pr_info("%s: SMD ctrl channel up\n", __func__);
+	return 0;
+}
+
+static int
+wcnss_wlan_ctrl_remove(struct platform_device *pdev)
+{
+	if (penv)
+		penv->smd_channel_ready = 0;
+
+	pr_info("%s: SMD ctrl channel down\n", __func__);
+
+	return 0;
+}
+
+static struct platform_driver wcnss_wlan_ctrl_driver = {
+	.driver = {
+		.name	= "WLAN_CTRL",
+		.owner	= THIS_MODULE,
+	},
+	.probe	= wcnss_wlan_ctrl_probe,
+	.remove	= wcnss_wlan_ctrl_remove,
+};
+
+static int
+wcnss_ctrl_remove(struct platform_device *pdev)
+{
+	if (penv && penv->smd_ch)
+		smd_close(penv->smd_ch);
+
+	return 0;
+}
+
+static int
+wcnss_ctrl_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	if (!penv || !penv->triggered)
+		return -ENODEV;
+
+	ret = smd_named_open_on_edge(WCNSS_CTRL_CHANNEL, SMD_APPS_WCNSS,
+				     &penv->smd_ch, penv,
+				     wcnss_smd_notify_event);
+	if (ret < 0) {
+		pr_err("wcnss: cannot open the smd command channel %s: %d\n",
+		       WCNSS_CTRL_CHANNEL, ret);
+		return -ENODEV;
+	}
+	smd_disable_read_intr(penv->smd_ch);
+
+	return 0;
+}
+
+/* platform device for WCNSS_CTRL SMD channel */
+static struct platform_driver wcnss_ctrl_driver = {
+	.driver = {
+		.name	= "WCNSS_CTRL",
+		.owner	= THIS_MODULE,
+	},
+	.probe	= wcnss_ctrl_probe,
+	.remove	= wcnss_ctrl_remove,
+};
+
+struct device *wcnss_wlan_get_device(void)
+{
+	if (penv && penv->pdev && penv->smd_channel_ready)
+		return &penv->pdev->dev;
+	return NULL;
+}
+EXPORT_SYMBOL(wcnss_wlan_get_device);
+
+void wcnss_get_monotonic_boottime(struct timespec *ts)
+{
+	get_monotonic_boottime(ts);
+}
+EXPORT_SYMBOL(wcnss_get_monotonic_boottime);
+
+struct platform_device *wcnss_get_platform_device(void)
+{
+	if (penv && penv->pdev)
+		return penv->pdev;
+	return NULL;
+}
+EXPORT_SYMBOL(wcnss_get_platform_device);
+
+struct wcnss_wlan_config *wcnss_get_wlan_config(void)
+{
+	if (penv && penv->pdev)
+		return &penv->wlan_config;
+	return NULL;
+}
+EXPORT_SYMBOL(wcnss_get_wlan_config);
+
+int wcnss_is_hw_pronto_ver3(void)
+{
+	if (penv && penv->pdev) {
+		if (penv->wlan_config.is_pronto_v3)
+			return penv->wlan_config.is_pronto_v3;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(wcnss_is_hw_pronto_ver3);
+
+int wcnss_device_ready(void)
+{
+	if (penv && penv->pdev && penv->nv_downloaded &&
+	    !wcnss_device_is_shutdown())
+		return 1;
+	return 0;
+}
+EXPORT_SYMBOL(wcnss_device_ready);
+
+bool wcnss_cbc_complete(void)
+{
+	if (penv && penv->pdev && penv->is_cbc_done &&
+	    !wcnss_device_is_shutdown())
+		return true;
+	return false;
+}
+EXPORT_SYMBOL(wcnss_cbc_complete);
+
+int wcnss_device_is_shutdown(void)
+{
+	if (penv && penv->is_shutdown)
+		return 1;
+	return 0;
+}
+EXPORT_SYMBOL(wcnss_device_is_shutdown);
+
+struct resource *wcnss_wlan_get_memory_map(struct device *dev)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) && penv->smd_channel_ready)
+		return penv->mmio_res;
+	return NULL;
+}
+EXPORT_SYMBOL(wcnss_wlan_get_memory_map);
+
+int wcnss_wlan_get_dxe_tx_irq(struct device *dev)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) &&
+	    penv->tx_irq_res && penv->smd_channel_ready)
+		return penv->tx_irq_res->start;
+	return WCNSS_WLAN_IRQ_INVALID;
+}
+EXPORT_SYMBOL(wcnss_wlan_get_dxe_tx_irq);
+
+int wcnss_wlan_get_dxe_rx_irq(struct device *dev)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) &&
+	    penv->rx_irq_res && penv->smd_channel_ready)
+		return penv->rx_irq_res->start;
+	return WCNSS_WLAN_IRQ_INVALID;
+}
+EXPORT_SYMBOL(wcnss_wlan_get_dxe_rx_irq);
+
+void wcnss_wlan_register_pm_ops(struct device *dev,
+				const struct dev_pm_ops *pm_ops)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) && pm_ops)
+		penv->pm_ops = pm_ops;
+}
+EXPORT_SYMBOL(wcnss_wlan_register_pm_ops);
+
+void wcnss_wlan_unregister_pm_ops(struct device *dev,
+				  const struct dev_pm_ops *pm_ops)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) && pm_ops) {
+		if (!penv->pm_ops) {
+			pr_err("%s: pm_ops is already unregistered.\n",
+			       __func__);
+			return;
+		}
+
+		if (pm_ops->suspend != penv->pm_ops->suspend ||
+		    pm_ops->resume != penv->pm_ops->resume)
+			pr_err("PM APIs dont match with registered APIs\n");
+		penv->pm_ops = NULL;
+	}
+}
+EXPORT_SYMBOL(wcnss_wlan_unregister_pm_ops);
+
+void wcnss_register_thermal_mitigation(struct device *dev,
+				       void (*tm_notify)(struct device *, int))
+{
+	if (penv && dev && tm_notify)
+		penv->tm_notify = tm_notify;
+}
+EXPORT_SYMBOL(wcnss_register_thermal_mitigation);
+
+void wcnss_unregister_thermal_mitigation(
+				void (*tm_notify)(struct device *, int))
+{
+	if (penv && tm_notify) {
+		if (tm_notify != penv->tm_notify)
+			pr_err("tm_notify doesn't match registered\n");
+		penv->tm_notify = NULL;
+	}
+}
+EXPORT_SYMBOL(wcnss_unregister_thermal_mitigation);
+
+unsigned int wcnss_get_serial_number(void)
+{
+	if (penv) {
+		penv->serial_number = socinfo_get_serial_number();
+		pr_info("%s: Device serial number: %u\n",
+			__func__, penv->serial_number);
+		return penv->serial_number;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(wcnss_get_serial_number);
+
+int wcnss_get_wlan_mac_address(char mac_addr[WLAN_MAC_ADDR_SIZE])
+{
+	if (!penv)
+		return -ENODEV;
+
+	memcpy(mac_addr, penv->wlan_nv_mac_addr, WLAN_MAC_ADDR_SIZE);
+	pr_debug("%s: Get MAC Addr:" MAC_ADDRESS_STR "\n", __func__,
+		 penv->wlan_nv_mac_addr[0], penv->wlan_nv_mac_addr[1],
+		 penv->wlan_nv_mac_addr[2], penv->wlan_nv_mac_addr[3],
+		 penv->wlan_nv_mac_addr[4], penv->wlan_nv_mac_addr[5]);
+	return 0;
+}
+EXPORT_SYMBOL(wcnss_get_wlan_mac_address);
+
+static int enable_wcnss_suspend_notify;
+
+static int enable_wcnss_suspend_notify_set(const char *val,
+					   struct kernel_param *kp)
+{
+	int ret;
+
+	ret = param_set_int(val, kp);
+	if (ret)
+		return ret;
+
+	if (enable_wcnss_suspend_notify)
+		pr_debug("Suspend notification activated for wcnss\n");
+
+	return 0;
+}
+module_param_call(enable_wcnss_suspend_notify, enable_wcnss_suspend_notify_set,
+		  param_get_int, &enable_wcnss_suspend_notify, 0644);
+
+int wcnss_xo_auto_detect_enabled(void)
+{
+	return (has_autodetect_xo == 1 ? 1 : 0);
+}
+
+void wcnss_set_iris_xo_mode(int iris_xo_mode_set)
+{
+	penv->iris_xo_mode_set = iris_xo_mode_set;
+}
+EXPORT_SYMBOL(wcnss_set_iris_xo_mode);
+
+int wcnss_wlan_iris_xo_mode(void)
+{
+	if (penv && penv->pdev && penv->smd_channel_ready)
+		return penv->iris_xo_mode_set;
+	return -ENODEV;
+}
+EXPORT_SYMBOL(wcnss_wlan_iris_xo_mode);
+
+int wcnss_wlan_dual_band_disabled(void)
+{
+	if (penv && penv->pdev)
+		return penv->is_dual_band_disabled;
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(wcnss_wlan_dual_band_disabled);
+
+void wcnss_suspend_notify(void)
+{
+	void __iomem *pmu_spare_reg;
+	u32 reg = 0;
+	unsigned long flags;
+
+	if (!enable_wcnss_suspend_notify)
+		return;
+
+	if (wcnss_hardware_type() == WCNSS_PRONTO_HW)
+		return;
+
+	/* For Riva */
+	pmu_spare_reg = penv->msm_wcnss_base + RIVA_SPARE_OFFSET;
+	spin_lock_irqsave(&reg_spinlock, flags);
+	reg = readl_relaxed(pmu_spare_reg);
+	reg |= RIVA_SUSPEND_BIT;
+	writel_relaxed(reg, pmu_spare_reg);
+	spin_unlock_irqrestore(&reg_spinlock, flags);
+}
+EXPORT_SYMBOL(wcnss_suspend_notify);
+
+void wcnss_resume_notify(void)
+{
+	void __iomem *pmu_spare_reg;
+	u32 reg = 0;
+	unsigned long flags;
+
+	if (!enable_wcnss_suspend_notify)
+		return;
+
+	if (wcnss_hardware_type() == WCNSS_PRONTO_HW)
+		return;
+
+	/* For Riva */
+	pmu_spare_reg = penv->msm_wcnss_base + RIVA_SPARE_OFFSET;
+
+	spin_lock_irqsave(&reg_spinlock, flags);
+	reg = readl_relaxed(pmu_spare_reg);
+	reg &= ~RIVA_SUSPEND_BIT;
+	writel_relaxed(reg, pmu_spare_reg);
+	spin_unlock_irqrestore(&reg_spinlock, flags);
+}
+EXPORT_SYMBOL(wcnss_resume_notify);
+
+static int wcnss_wlan_suspend(struct device *dev)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) &&
+	    penv->smd_channel_ready &&
+	    penv->pm_ops && penv->pm_ops->suspend)
+		return penv->pm_ops->suspend(dev);
+	return 0;
+}
+
+static int wcnss_wlan_resume(struct device *dev)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) &&
+	    penv->smd_channel_ready &&
+	    penv->pm_ops && penv->pm_ops->resume)
+		return penv->pm_ops->resume(dev);
+	return 0;
+}
+
+void wcnss_prevent_suspend(void)
+{
+	if (penv)
+		__pm_stay_awake(&penv->wcnss_wake_lock);
+}
+EXPORT_SYMBOL(wcnss_prevent_suspend);
+
+void wcnss_allow_suspend(void)
+{
+	if (penv)
+		__pm_relax(&penv->wcnss_wake_lock);
+}
+EXPORT_SYMBOL(wcnss_allow_suspend);
+
+int wcnss_hardware_type(void)
+{
+	if (penv)
+		return penv->wcnss_hw_type;
+	else
+		return -ENODEV;
+}
+EXPORT_SYMBOL(wcnss_hardware_type);
+
+int fw_cal_data_available(void)
+{
+	if (penv)
+		return penv->fw_cal_available;
+	else
+		return -ENODEV;
+}
+
+u32 wcnss_get_wlan_rx_buff_count(void)
+{
+	if (penv)
+		return penv->wlan_rx_buff_count;
+	else
+		return WCNSS_DEF_WLAN_RX_BUFF_COUNT;
+}
+EXPORT_SYMBOL(wcnss_get_wlan_rx_buff_count);
+
+int wcnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count)
+{
+	if (penv && unsafe_ch_list &&
+	    (ch_count <= WCNSS_MAX_CH_NUM)) {
+		memcpy((char *)penv->unsafe_ch_list,
+		       (char *)unsafe_ch_list, ch_count * sizeof(u16));
+		penv->unsafe_ch_count = ch_count;
+		return 0;
+	} else {
+		return -ENODEV;
+	}
+}
+EXPORT_SYMBOL(wcnss_set_wlan_unsafe_channel);
+
+int wcnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 buffer_size,
+				  u16 *ch_count)
+{
+	if (penv) {
+		if (buffer_size < penv->unsafe_ch_count * sizeof(u16))
+			return -ENODEV;
+		memcpy((char *)unsafe_ch_list,
+		       (char *)penv->unsafe_ch_list,
+		       penv->unsafe_ch_count * sizeof(u16));
+		*ch_count = penv->unsafe_ch_count;
+		return 0;
+	} else {
+		return -ENODEV;
+	}
+}
+EXPORT_SYMBOL(wcnss_get_wlan_unsafe_channel);
+
+static int wcnss_smd_tx(void *data, int len)
+{
+	int ret = 0;
+
+	ret = smd_write_avail(penv->smd_ch);
+	if (ret < len) {
+		pr_err("wcnss: no space available for smd frame\n");
+		return -ENOSPC;
+	}
+	ret = smd_write(penv->smd_ch, data, len);
+	if (ret < len) {
+		pr_err("wcnss: failed to write Command %d", len);
+		ret = -ENODEV;
+	}
+	return ret;
+}
+
+static int wcnss_get_battery_volt(int *result_uv)
+{
+	int rc = -1;
+	struct qpnp_vadc_result adc_result;
+
+	if (!penv->vadc_dev) {
+		pr_err("wcnss: not setting up vadc\n");
+		return rc;
+	}
+
+	rc = qpnp_vadc_read(penv->vadc_dev, VBAT_SNS, &adc_result);
+	if (rc) {
+		pr_err("error reading adc channel = %d, rc = %d\n",
+		       VBAT_SNS, rc);
+		return rc;
+	}
+
+	pr_info("Battery mvolts phy=%lld meas=0x%llx\n", adc_result.physical,
+		adc_result.measurement);
+	*result_uv = (int)adc_result.physical;
+
+	return 0;
+}
+
+static void wcnss_notify_vbat(enum qpnp_tm_state state, void *ctx)
+{
+	int rc = 0;
+
+	mutex_lock(&penv->vbat_monitor_mutex);
+	cancel_delayed_work_sync(&penv->vbatt_work);
+
+	if (state == ADC_TM_LOW_STATE) {
+		pr_debug("wcnss: low voltage notification triggered\n");
+		penv->vbat_monitor_params.state_request =
+			ADC_TM_HIGH_THR_ENABLE;
+		penv->vbat_monitor_params.high_thr = WCNSS_VBATT_THRESHOLD +
+		WCNSS_VBATT_GUARD;
+		penv->vbat_monitor_params.low_thr = 0;
+	} else if (state == ADC_TM_HIGH_STATE) {
+		penv->vbat_monitor_params.state_request =
+			ADC_TM_LOW_THR_ENABLE;
+		penv->vbat_monitor_params.low_thr = WCNSS_VBATT_THRESHOLD -
+		WCNSS_VBATT_GUARD;
+		penv->vbat_monitor_params.high_thr = 0;
+		pr_debug("wcnss: high voltage notification triggered\n");
+	} else {
+		pr_debug("wcnss: unknown voltage notification state: %d\n",
+			 state);
+		mutex_unlock(&penv->vbat_monitor_mutex);
+		return;
+	}
+	pr_debug("wcnss: set low thr to %d and high to %d\n",
+		 penv->vbat_monitor_params.low_thr,
+		 penv->vbat_monitor_params.high_thr);
+
+	rc = qpnp_adc_tm_channel_measure(penv->adc_tm_dev,
+					 &penv->vbat_monitor_params);
+
+	if (rc)
+		pr_err("%s: tm setup failed: %d\n", __func__, rc);
+	else
+		schedule_delayed_work(&penv->vbatt_work,
+				      msecs_to_jiffies(2000));
+
+	mutex_unlock(&penv->vbat_monitor_mutex);
+}
+
+static int wcnss_setup_vbat_monitoring(void)
+{
+	int rc = -1;
+
+	if (!penv->adc_tm_dev) {
+		pr_err("wcnss: not setting up vbatt\n");
+		return rc;
+	}
+	penv->vbat_monitor_params.low_thr = WCNSS_VBATT_THRESHOLD;
+	penv->vbat_monitor_params.high_thr = WCNSS_VBATT_THRESHOLD;
+	penv->vbat_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_ENABLE;
+
+	if (penv->is_vsys_adc_channel)
+		penv->vbat_monitor_params.channel = VSYS;
+	else
+		penv->vbat_monitor_params.channel = VBAT_SNS;
+
+	penv->vbat_monitor_params.btm_ctx = (void *)penv;
+	penv->vbat_monitor_params.timer_interval = ADC_MEAS1_INTERVAL_1S;
+	penv->vbat_monitor_params.threshold_notification = &wcnss_notify_vbat;
+	pr_debug("wcnss: set low thr to %d and high to %d\n",
+		 penv->vbat_monitor_params.low_thr,
+		 penv->vbat_monitor_params.high_thr);
+
+	rc = qpnp_adc_tm_channel_measure(penv->adc_tm_dev,
+					 &penv->vbat_monitor_params);
+	if (rc)
+		pr_err("%s: tm setup failed: %d\n", __func__, rc);
+
+	return rc;
+}
+
+static void wcnss_send_vbatt_indication(struct work_struct *work)
+{
+	struct vbatt_message vbatt_msg;
+	int ret = 0;
+
+	vbatt_msg.hdr.msg_type = WCNSS_VBATT_LEVEL_IND;
+	vbatt_msg.hdr.msg_len = sizeof(struct vbatt_message);
+	vbatt_msg.vbatt.threshold = WCNSS_VBATT_THRESHOLD;
+
+	mutex_lock(&penv->vbat_monitor_mutex);
+	vbatt_msg.vbatt.curr_volt = penv->wlan_config.vbatt;
+	mutex_unlock(&penv->vbat_monitor_mutex);
+	pr_debug("wcnss: send curr_volt: %d to FW\n",
+		 vbatt_msg.vbatt.curr_volt);
+
+	ret = wcnss_smd_tx(&vbatt_msg, vbatt_msg.hdr.msg_len);
+	if (ret < 0)
+		pr_err("wcnss: smd tx failed\n");
+}
+
+static void wcnss_update_vbatt(struct work_struct *work)
+{
+	struct vbatt_message vbatt_msg;
+	int ret = 0;
+
+	vbatt_msg.hdr.msg_type = WCNSS_VBATT_LEVEL_IND;
+	vbatt_msg.hdr.msg_len = sizeof(struct vbatt_message);
+	vbatt_msg.vbatt.threshold = WCNSS_VBATT_THRESHOLD;
+
+	mutex_lock(&penv->vbat_monitor_mutex);
+	if (penv->vbat_monitor_params.low_thr &&
+	    (penv->fw_vbatt_state == WCNSS_VBATT_LOW ||
+	     penv->fw_vbatt_state == WCNSS_CONFIG_UNSPECIFIED)) {
+		vbatt_msg.vbatt.curr_volt = WCNSS_VBATT_HIGH;
+		penv->fw_vbatt_state = WCNSS_VBATT_HIGH;
+		pr_debug("wcnss: send HIGH BATT to FW\n");
+	} else if (!penv->vbat_monitor_params.low_thr &&
+		   (penv->fw_vbatt_state == WCNSS_VBATT_HIGH ||
+		    penv->fw_vbatt_state == WCNSS_CONFIG_UNSPECIFIED)){
+		vbatt_msg.vbatt.curr_volt = WCNSS_VBATT_LOW;
+		penv->fw_vbatt_state = WCNSS_VBATT_LOW;
+		pr_debug("wcnss: send LOW BATT to FW\n");
+	} else {
+		mutex_unlock(&penv->vbat_monitor_mutex);
+		return;
+	}
+	mutex_unlock(&penv->vbat_monitor_mutex);
+	ret = wcnss_smd_tx(&vbatt_msg, vbatt_msg.hdr.msg_len);
+	if (ret < 0)
+		pr_err("wcnss: smd tx failed\n");
+}
+
+static unsigned char wcnss_fw_status(void)
+{
+	int len = 0;
+	int rc = 0;
+
+	unsigned char fw_status = 0xFF;
+
+	len = smd_read_avail(penv->smd_ch);
+	if (len < 1) {
+		pr_err("%s: invalid firmware status", __func__);
+		return fw_status;
+	}
+
+	rc = smd_read(penv->smd_ch, &fw_status, 1);
+	if (rc < 0) {
+		pr_err("%s: incomplete data read from smd\n", __func__);
+		return fw_status;
+	}
+	return fw_status;
+}
+
+static void wcnss_send_cal_rsp(unsigned char fw_status)
+{
+	struct smd_msg_hdr *rsphdr;
+	unsigned char *msg = NULL;
+	int rc;
+
+	msg = kmalloc((sizeof(*rsphdr) + 1), GFP_KERNEL);
+	if (!msg)
+		return;
+
+	rsphdr = (struct smd_msg_hdr *)msg;
+	rsphdr->msg_type = WCNSS_CALDATA_UPLD_RSP;
+	rsphdr->msg_len = sizeof(struct smd_msg_hdr) + 1;
+	memcpy(msg + sizeof(struct smd_msg_hdr), &fw_status, 1);
+
+	rc = wcnss_smd_tx(msg, rsphdr->msg_len);
+	if (rc < 0)
+		pr_err("wcnss: smd tx failed\n");
+
+	kfree(msg);
+}
+
+/* Collect calibrated data from WCNSS */
+void extract_cal_data(int len)
+{
+	int rc;
+	struct cal_data_params calhdr;
+	unsigned char fw_status = WCNSS_RESP_FAIL;
+
+	if (len < sizeof(struct cal_data_params)) {
+		pr_err("wcnss: incomplete cal header length\n");
+		return;
+	}
+
+	mutex_lock(&penv->dev_lock);
+	rc = smd_read(penv->smd_ch, (unsigned char *)&calhdr,
+		      sizeof(struct cal_data_params));
+	if (rc < sizeof(struct cal_data_params)) {
+		pr_err("wcnss: incomplete cal header read from smd\n");
+		mutex_unlock(&penv->dev_lock);
+		return;
+	}
+
+	if (penv->fw_cal_exp_frag != calhdr.frag_number) {
+		pr_err("wcnss: Invalid frgament");
+		goto unlock_exit;
+	}
+
+	if (calhdr.frag_size > WCNSS_MAX_FRAME_SIZE) {
+		pr_err("wcnss: Invalid fragment size");
+		goto unlock_exit;
+	}
+
+	if (penv->fw_cal_available) {
+		/* ignore cal upload from SSR */
+		smd_read(penv->smd_ch, NULL, calhdr.frag_size);
+		penv->fw_cal_exp_frag++;
+		if (calhdr.msg_flags & LAST_FRAGMENT) {
+			penv->fw_cal_exp_frag = 0;
+			goto unlock_exit;
+		}
+		mutex_unlock(&penv->dev_lock);
+		return;
+	}
+
+	if (calhdr.frag_number == 0) {
+		if (calhdr.total_size > MAX_CALIBRATED_DATA_SIZE) {
+			pr_err("wcnss: Invalid cal data size %d",
+			       calhdr.total_size);
+			goto unlock_exit;
+		}
+		kfree(penv->fw_cal_data);
+		penv->fw_cal_rcvd = 0;
+		penv->fw_cal_data = kmalloc(calhdr.total_size,
+				GFP_KERNEL);
+		if (!penv->fw_cal_data) {
+			smd_read(penv->smd_ch, NULL, calhdr.frag_size);
+			goto unlock_exit;
+		}
+	}
+
+	if (penv->fw_cal_rcvd + calhdr.frag_size >
+			MAX_CALIBRATED_DATA_SIZE) {
+		pr_err("calibrated data size is more than expected %d",
+		       penv->fw_cal_rcvd + calhdr.frag_size);
+		penv->fw_cal_exp_frag = 0;
+		penv->fw_cal_rcvd = 0;
+		smd_read(penv->smd_ch, NULL, calhdr.frag_size);
+		goto unlock_exit;
+	}
+
+	rc = smd_read(penv->smd_ch, penv->fw_cal_data + penv->fw_cal_rcvd,
+		      calhdr.frag_size);
+	if (rc < calhdr.frag_size)
+		goto unlock_exit;
+
+	penv->fw_cal_exp_frag++;
+	penv->fw_cal_rcvd += calhdr.frag_size;
+
+	if (calhdr.msg_flags & LAST_FRAGMENT) {
+		penv->fw_cal_exp_frag = 0;
+		penv->fw_cal_available = true;
+		pr_info("wcnss: cal data collection completed\n");
+	}
+	mutex_unlock(&penv->dev_lock);
+	wake_up(&penv->read_wait);
+
+	if (penv->fw_cal_available) {
+		fw_status = WCNSS_RESP_SUCCESS;
+		wcnss_send_cal_rsp(fw_status);
+	}
+	return;
+
+unlock_exit:
+	mutex_unlock(&penv->dev_lock);
+	wcnss_send_cal_rsp(fw_status);
+}
+
+static void wcnssctrl_rx_handler(struct work_struct *worker)
+{
+	int len = 0;
+	int rc = 0;
+	unsigned char buf[sizeof(struct wcnss_version)];
+	unsigned char build[WCNSS_MAX_BUILD_VER_LEN + 1];
+	struct smd_msg_hdr *phdr;
+	struct smd_msg_hdr smd_msg;
+	struct wcnss_version *pversion;
+	int hw_type;
+	unsigned char fw_status = 0;
+
+	len = smd_read_avail(penv->smd_ch);
+	if (len > WCNSS_MAX_FRAME_SIZE) {
+		pr_err("wcnss: frame larger than the allowed size\n");
+		smd_read(penv->smd_ch, NULL, len);
+		return;
+	}
+	if (len < sizeof(struct smd_msg_hdr)) {
+		pr_err("wcnss: incomplete header available len = %d\n", len);
+		return;
+	}
+
+	rc = smd_read(penv->smd_ch, buf, sizeof(struct smd_msg_hdr));
+	if (rc < sizeof(struct smd_msg_hdr)) {
+		pr_err("wcnss: incomplete header read from smd\n");
+		return;
+	}
+	len -= sizeof(struct smd_msg_hdr);
+
+	phdr = (struct smd_msg_hdr *)buf;
+
+	switch (phdr->msg_type) {
+	case WCNSS_VERSION_RSP:
+		if (len != sizeof(struct wcnss_version)
+				- sizeof(struct smd_msg_hdr)) {
+			pr_err("wcnss: invalid version data from wcnss %d\n",
+			       len);
+			return;
+		}
+		rc = smd_read(penv->smd_ch, buf + sizeof(struct smd_msg_hdr),
+			      len);
+		if (rc < len) {
+			pr_err("wcnss: incomplete data read from smd\n");
+			return;
+		}
+		pversion = (struct wcnss_version *)buf;
+		penv->fw_major = pversion->major;
+		penv->fw_minor = pversion->minor;
+		snprintf(penv->wcnss_version, WCNSS_VERSION_LEN,
+			 "%02x%02x%02x%02x", pversion->major, pversion->minor,
+			 pversion->version, pversion->revision);
+		pr_info("wcnss: version %s\n", penv->wcnss_version);
+		/* schedule work to download nvbin to ccpu */
+		hw_type = wcnss_hardware_type();
+		switch (hw_type) {
+		case WCNSS_RIVA_HW:
+			/* supported only if riva major >= 1 and minor >= 4 */
+			if ((pversion->major >= 1) && (pversion->minor >= 4)) {
+				pr_info("wcnss: schedule dnld work for riva\n");
+				schedule_work(&penv->wcnssctrl_nvbin_dnld_work);
+			}
+			break;
+
+		case WCNSS_PRONTO_HW:
+			smd_msg.msg_type = WCNSS_BUILD_VER_REQ;
+			smd_msg.msg_len = sizeof(smd_msg);
+			rc = wcnss_smd_tx(&smd_msg, smd_msg.msg_len);
+			if (rc < 0)
+				pr_err("wcnss: smd tx failed: %s\n", __func__);
+
+			/* supported only if pronto major >= 1 and minor >= 4 */
+			if ((pversion->major >= 1) && (pversion->minor >= 4)) {
+				pr_info("wcnss: schedule dnld work for pronto\n");
+				schedule_work(&penv->wcnssctrl_nvbin_dnld_work);
+			}
+			break;
+
+		default:
+			pr_info("wcnss: unknown hw type (%d), will not schedule dnld work\n",
+				hw_type);
+			break;
+		}
+		break;
+
+	case WCNSS_BUILD_VER_RSP:
+		if (len > WCNSS_MAX_BUILD_VER_LEN) {
+			pr_err("wcnss: invalid build version data from wcnss %d\n",
+			       len);
+			return;
+		}
+		rc = smd_read(penv->smd_ch, build, len);
+		if (rc < len) {
+			pr_err("wcnss: incomplete data read from smd\n");
+			return;
+		}
+		build[len] = 0;
+		pr_info("wcnss: build version %s\n", build);
+		break;
+
+	case WCNSS_NVBIN_DNLD_RSP:
+		penv->nv_downloaded = true;
+		fw_status = wcnss_fw_status();
+		pr_debug("wcnss: received WCNSS_NVBIN_DNLD_RSP from ccpu %u\n",
+			 fw_status);
+		if (fw_status != WAIT_FOR_CBC_IND)
+			penv->is_cbc_done = 1;
+		wcnss_setup_vbat_monitoring();
+		break;
+
+	case WCNSS_CALDATA_DNLD_RSP:
+		penv->nv_downloaded = true;
+		fw_status = wcnss_fw_status();
+		pr_debug("wcnss: received WCNSS_CALDATA_DNLD_RSP from ccpu %u\n",
+			 fw_status);
+		break;
+	case WCNSS_CBC_COMPLETE_IND:
+		penv->is_cbc_done = 1;
+		pr_debug("wcnss: received WCNSS_CBC_COMPLETE_IND from FW\n");
+		break;
+
+	case WCNSS_CALDATA_UPLD_REQ:
+		extract_cal_data(len);
+		break;
+
+	default:
+		pr_err("wcnss: invalid message type %d\n", phdr->msg_type);
+	}
+}
+
+static void wcnss_send_version_req(struct work_struct *worker)
+{
+	struct smd_msg_hdr smd_msg;
+	int ret = 0;
+
+	smd_msg.msg_type = WCNSS_VERSION_REQ;
+	smd_msg.msg_len = sizeof(smd_msg);
+	ret = wcnss_smd_tx(&smd_msg, smd_msg.msg_len);
+	if (ret < 0)
+		pr_err("wcnss: smd tx failed\n");
+}
+
+static void wcnss_send_pm_config(struct work_struct *worker)
+{
+	struct smd_msg_hdr *hdr;
+	unsigned char *msg = NULL;
+	int rc, prop_len;
+	u32 *payload;
+
+	if (!of_find_property(penv->pdev->dev.of_node,
+			      "qcom,wcnss-pm", &prop_len))
+		return;
+
+	msg = kmalloc((sizeof(struct smd_msg_hdr) + prop_len), GFP_KERNEL);
+	if (!msg)
+		return;
+
+	payload = (u32 *)(msg + sizeof(struct smd_msg_hdr));
+
+	prop_len /= sizeof(int);
+
+	rc = of_property_read_u32_array(penv->pdev->dev.of_node,
+					"qcom,wcnss-pm", payload, prop_len);
+	if (rc < 0) {
+		pr_err("wcnss: property read failed\n");
+		kfree(msg);
+		return;
+	}
+
+	pr_debug("%s:size=%d: <%d, %d, %d, %d, %d %d>\n", __func__,
+		 prop_len, *payload, *(payload + 1), *(payload + 2),
+		 *(payload + 3), *(payload + 4), *(payload + 5));
+
+	hdr = (struct smd_msg_hdr *)msg;
+	hdr->msg_type = WCNSS_PM_CONFIG_REQ;
+	hdr->msg_len = sizeof(struct smd_msg_hdr) + (prop_len * sizeof(int));
+
+	rc = wcnss_smd_tx(msg, hdr->msg_len);
+	if (rc < 0)
+		pr_err("wcnss: smd tx failed\n");
+
+	kfree(msg);
+}
+
+static void wcnss_pm_qos_enable_pc(struct work_struct *worker)
+{
+	wcnss_disable_pc_remove_req();
+}
+
+static DECLARE_RWSEM(wcnss_pm_sem);
+
+static void wcnss_nvbin_dnld(void)
+{
+	int ret = 0;
+	struct nvbin_dnld_req_msg *dnld_req_msg;
+	unsigned short total_fragments = 0;
+	unsigned short count = 0;
+	unsigned short retry_count = 0;
+	unsigned short cur_frag_size = 0;
+	unsigned char *outbuffer = NULL;
+	const void *nv_blob_addr = NULL;
+	unsigned int nv_blob_size = 0;
+	const struct firmware *nv = NULL;
+	struct device *dev = &penv->pdev->dev;
+
+	down_read(&wcnss_pm_sem);
+
+	ret = request_firmware(&nv, NVBIN_FILE, dev);
+
+	if (ret || !nv || !nv->data || !nv->size) {
+		pr_err("wcnss: %s: request_firmware failed for %s (ret = %d)\n",
+		       __func__, NVBIN_FILE, ret);
+		goto out;
+	}
+
+	/* First 4 bytes in nv blob is validity bitmap.
+	 * We cannot validate nv, so skip those 4 bytes.
+	 */
+	nv_blob_addr = nv->data + 4;
+	nv_blob_size = nv->size - 4;
+
+	total_fragments = TOTALFRAGMENTS(nv_blob_size);
+
+	pr_info("wcnss: NV bin size: %d, total_fragments: %d\n",
+		nv_blob_size, total_fragments);
+
+	/* get buffer for nv bin dnld req message */
+	outbuffer = kmalloc((sizeof(struct nvbin_dnld_req_msg) +
+			     NV_FRAGMENT_SIZE), GFP_KERNEL);
+	if (!outbuffer)
+		goto err_free_nv;
+
+	dnld_req_msg = (struct nvbin_dnld_req_msg *)outbuffer;
+
+	dnld_req_msg->hdr.msg_type = WCNSS_NVBIN_DNLD_REQ;
+	dnld_req_msg->dnld_req_params.msg_flags = 0;
+
+	for (count = 0; count < total_fragments; count++) {
+		dnld_req_msg->dnld_req_params.frag_number = count;
+
+		if (count == (total_fragments - 1)) {
+			/* last fragment, take care of boundary condition */
+			cur_frag_size = nv_blob_size % NV_FRAGMENT_SIZE;
+			if (!cur_frag_size)
+				cur_frag_size = NV_FRAGMENT_SIZE;
+
+			dnld_req_msg->dnld_req_params.msg_flags |=
+				LAST_FRAGMENT;
+			dnld_req_msg->dnld_req_params.msg_flags |=
+				CAN_RECEIVE_CALDATA;
+		} else {
+			cur_frag_size = NV_FRAGMENT_SIZE;
+			dnld_req_msg->dnld_req_params.msg_flags &=
+				~LAST_FRAGMENT;
+		}
+
+		dnld_req_msg->dnld_req_params.nvbin_buffer_size =
+			cur_frag_size;
+
+		dnld_req_msg->hdr.msg_len =
+			sizeof(struct nvbin_dnld_req_msg) + cur_frag_size;
+
+		/* copy NV fragment */
+		memcpy((outbuffer + sizeof(struct nvbin_dnld_req_msg)),
+		       (nv_blob_addr + count * NV_FRAGMENT_SIZE),
+		       cur_frag_size);
+
+		ret = wcnss_smd_tx(outbuffer, dnld_req_msg->hdr.msg_len);
+
+		retry_count = 0;
+		while ((ret == -ENOSPC) && (retry_count <= 3)) {
+			pr_debug("wcnss: %s: smd tx failed, ENOSPC\n",
+				 __func__);
+			pr_debug("fragment: %d, len: %d, TotFragments: %d, retry_count: %d\n",
+				 count, dnld_req_msg->hdr.msg_len,
+				 total_fragments, retry_count);
+
+			/* wait and try again */
+			msleep(20);
+			retry_count++;
+			ret = wcnss_smd_tx(outbuffer,
+					   dnld_req_msg->hdr.msg_len);
+		}
+
+		if (ret < 0) {
+			pr_err("wcnss: %s: smd tx failed\n", __func__);
+			pr_err("fragment %d, len: %d, TotFragments: %d, retry_count: %d\n",
+			       count, dnld_req_msg->hdr.msg_len,
+			       total_fragments, retry_count);
+			goto err_dnld;
+		}
+	}
+
+err_dnld:
+	/* free buffer */
+	kfree(outbuffer);
+
+err_free_nv:
+	/* release firmware */
+	release_firmware(nv);
+
+out:
+	up_read(&wcnss_pm_sem);
+}
+
+static void wcnss_caldata_dnld(const void *cal_data,
+			       unsigned int cal_data_size, bool msg_to_follow)
+{
+	int ret = 0;
+	struct cal_data_msg *cal_msg;
+	unsigned short total_fragments = 0;
+	unsigned short count = 0;
+	unsigned short retry_count = 0;
+	unsigned short cur_frag_size = 0;
+	unsigned char *outbuffer = NULL;
+
+	total_fragments = TOTALFRAGMENTS(cal_data_size);
+
+	outbuffer = kmalloc((sizeof(struct cal_data_msg) +
+			     NV_FRAGMENT_SIZE), GFP_KERNEL);
+	if (!outbuffer)
+		return;
+
+	cal_msg = (struct cal_data_msg *)outbuffer;
+
+	cal_msg->hdr.msg_type = WCNSS_CALDATA_DNLD_REQ;
+	cal_msg->cal_params.msg_flags = 0;
+
+	for (count = 0; count < total_fragments; count++) {
+		cal_msg->cal_params.frag_number = count;
+
+		if (count == (total_fragments - 1)) {
+			cur_frag_size = cal_data_size % NV_FRAGMENT_SIZE;
+			if (!cur_frag_size)
+				cur_frag_size = NV_FRAGMENT_SIZE;
+
+			cal_msg->cal_params.msg_flags
+			    |= LAST_FRAGMENT;
+			if (msg_to_follow)
+				cal_msg->cal_params.msg_flags |=
+					MESSAGE_TO_FOLLOW;
+		} else {
+			cur_frag_size = NV_FRAGMENT_SIZE;
+			cal_msg->cal_params.msg_flags &=
+				~LAST_FRAGMENT;
+		}
+
+		cal_msg->cal_params.total_size = cal_data_size;
+		cal_msg->cal_params.frag_size =
+			cur_frag_size;
+
+		cal_msg->hdr.msg_len =
+			sizeof(struct cal_data_msg) + cur_frag_size;
+
+		memcpy((outbuffer + sizeof(struct cal_data_msg)),
+		       (cal_data + count * NV_FRAGMENT_SIZE),
+		       cur_frag_size);
+
+		ret = wcnss_smd_tx(outbuffer, cal_msg->hdr.msg_len);
+
+		retry_count = 0;
+		while ((ret == -ENOSPC) && (retry_count <= 3)) {
+			pr_debug("wcnss: %s: smd tx failed, ENOSPC\n",
+				 __func__);
+			pr_debug("fragment: %d, len: %d, TotFragments: %d, retry_count: %d\n",
+				 count, cal_msg->hdr.msg_len,
+				 total_fragments, retry_count);
+
+			/* wait and try again */
+			msleep(20);
+			retry_count++;
+			ret = wcnss_smd_tx(outbuffer,
+					   cal_msg->hdr.msg_len);
+		}
+
+		if (ret < 0) {
+			pr_err("wcnss: %s: smd tx failed\n", __func__);
+			pr_err("fragment %d, len: %d, TotFragments: %d, retry_count: %d\n",
+			       count, cal_msg->hdr.msg_len,
+				total_fragments, retry_count);
+			goto err_dnld;
+		}
+	}
+
+err_dnld:
+	/* free buffer */
+	kfree(outbuffer);
+}
+
+static void wcnss_nvbin_dnld_main(struct work_struct *worker)
+{
+	int retry = 0;
+
+	if (!FW_CALDATA_CAPABLE())
+		goto nv_download;
+
+	if (!penv->fw_cal_available && IS_CAL_DATA_PRESENT
+		!= has_calibrated_data && !penv->user_cal_available) {
+		while (!penv->user_cal_available && retry++ < 5)
+			msleep(500);
+	}
+	if (penv->fw_cal_available) {
+		pr_info_ratelimited("wcnss: cal download, using fw cal");
+		wcnss_caldata_dnld(penv->fw_cal_data, penv->fw_cal_rcvd, true);
+
+	} else if (penv->user_cal_available) {
+		pr_info_ratelimited("wcnss: cal download, using user cal");
+		wcnss_caldata_dnld(penv->user_cal_data,
+				   penv->user_cal_rcvd, true);
+	}
+
+nv_download:
+	pr_info_ratelimited("wcnss: NV download");
+	wcnss_nvbin_dnld();
+}
+
+static int wcnss_pm_notify(struct notifier_block *b,
+			   unsigned long event, void *p)
+{
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		down_write(&wcnss_pm_sem);
+		break;
+
+	case PM_POST_SUSPEND:
+		up_write(&wcnss_pm_sem);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block wcnss_pm_notifier = {
+	.notifier_call = wcnss_pm_notify,
+};
+
+static int wcnss_ctrl_open(struct inode *inode, struct file *file)
+{
+	int rc = 0;
+
+	if (!penv || penv->ctrl_device_opened)
+		return -EFAULT;
+
+	penv->ctrl_device_opened = 1;
+
+	return rc;
+}
+
+void process_usr_ctrl_cmd(u8 *buf, size_t len)
+{
+	u16 cmd = buf[0] << 8 | buf[1];
+
+	switch (cmd) {
+	case WCNSS_USR_HAS_CAL_DATA:
+		if (buf[2] > 1)
+			pr_err("%s: Invalid data for cal %d\n", __func__,
+			       buf[2]);
+		has_calibrated_data = buf[2];
+		break;
+	case WCNSS_USR_WLAN_MAC_ADDR:
+		memcpy(&penv->wlan_nv_mac_addr,  &buf[2],
+		       sizeof(penv->wlan_nv_mac_addr));
+		pr_debug("%s: MAC Addr:" MAC_ADDRESS_STR "\n", __func__,
+			 penv->wlan_nv_mac_addr[0], penv->wlan_nv_mac_addr[1],
+			 penv->wlan_nv_mac_addr[2], penv->wlan_nv_mac_addr[3],
+			 penv->wlan_nv_mac_addr[4], penv->wlan_nv_mac_addr[5]);
+		break;
+	default:
+		pr_err("%s: Invalid command %d\n", __func__, cmd);
+		break;
+	}
+}
+
+static ssize_t wcnss_ctrl_write(struct file *fp, const char __user
+			*user_buffer, size_t count, loff_t *position)
+{
+	int rc = 0;
+	u8 buf[WCNSS_MAX_CMD_LEN];
+
+	if (!penv || !penv->ctrl_device_opened || WCNSS_MAX_CMD_LEN < count ||
+	    count < WCNSS_MIN_CMD_LEN)
+		return -EFAULT;
+
+	mutex_lock(&penv->ctrl_lock);
+	rc = copy_from_user(buf, user_buffer, count);
+	if (rc == 0)
+		process_usr_ctrl_cmd(buf, count);
+
+	mutex_unlock(&penv->ctrl_lock);
+
+	return rc;
+}
+
+static const struct file_operations wcnss_ctrl_fops = {
+	.owner = THIS_MODULE,
+	.open = wcnss_ctrl_open,
+	.write = wcnss_ctrl_write,
+};
+
+static int
+wcnss_trigger_config(struct platform_device *pdev)
+{
+	int ret = 0;
+	int rc;
+	struct qcom_wcnss_opts *pdata;
+	struct resource *res;
+	int is_pronto_vadc;
+	int is_pronto_v3;
+	int pil_retry = 0;
+	struct device_node *node = (&pdev->dev)->of_node;
+	int has_pronto_hw = of_property_read_bool(node, "qcom,has-pronto-hw");
+
+	is_pronto_vadc = of_property_read_bool(node, "qcom,is-pronto-vadc");
+	is_pronto_v3 = of_property_read_bool(node, "qcom,is-pronto-v3");
+
+	penv->is_vsys_adc_channel =
+		of_property_read_bool(node, "qcom,has-vsys-adc-channel");
+	penv->is_a2xb_split_reg =
+		of_property_read_bool(node, "qcom,has-a2xb-split-reg");
+
+	if (of_property_read_u32(node, "qcom,wlan-rx-buff-count",
+				 &penv->wlan_rx_buff_count)) {
+		penv->wlan_rx_buff_count = WCNSS_DEF_WLAN_RX_BUFF_COUNT;
+	}
+
+	rc = wcnss_parse_voltage_regulator(&penv->wlan_config, &pdev->dev);
+	if (rc) {
+		dev_err(&pdev->dev, "Failed to parse voltage regulators\n");
+		goto fail;
+	}
+
+	/* make sure we are only triggered once */
+	if (penv->triggered)
+		return 0;
+	penv->triggered = 1;
+
+	/* initialize the WCNSS device configuration */
+	pdata = pdev->dev.platform_data;
+	if (has_48mhz_xo == WCNSS_CONFIG_UNSPECIFIED) {
+		if (has_pronto_hw) {
+			has_48mhz_xo =
+			of_property_read_bool(node, "qcom,has-48mhz-xo");
+		} else {
+			has_48mhz_xo = pdata->has_48mhz_xo;
+		}
+	}
+	penv->wcnss_hw_type = (has_pronto_hw) ? WCNSS_PRONTO_HW : WCNSS_RIVA_HW;
+	penv->wlan_config.use_48mhz_xo = has_48mhz_xo;
+	penv->wlan_config.is_pronto_vadc = is_pronto_vadc;
+	penv->wlan_config.is_pronto_v3 = is_pronto_v3;
+
+	if (has_autodetect_xo == WCNSS_CONFIG_UNSPECIFIED && has_pronto_hw) {
+		has_autodetect_xo =
+			of_property_read_bool(node, "qcom,has-autodetect-xo");
+	}
+
+	penv->thermal_mitigation = 0;
+	strlcpy(penv->wcnss_version, "INVALID", WCNSS_VERSION_LEN);
+
+	/* Configure 5 wire GPIOs */
+	if (!has_pronto_hw) {
+		penv->gpios_5wire = platform_get_resource_byname(pdev,
+					IORESOURCE_IO, "wcnss_gpios_5wire");
+
+		/* allocate 5-wire GPIO resources */
+		if (!penv->gpios_5wire) {
+			dev_err(&pdev->dev, "insufficient IO resources\n");
+			ret = -ENOENT;
+			goto fail_gpio_res;
+		}
+		ret = wcnss_gpios_config(penv->gpios_5wire, true);
+	} else {
+		ret = wcnss_pronto_gpios_config(pdev, true);
+	}
+
+	if (ret) {
+		dev_err(&pdev->dev, "WCNSS gpios config failed.\n");
+		goto fail_gpio_res;
+	}
+
+	/* allocate resources */
+	penv->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"wcnss_mmio");
+	penv->tx_irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+							"wcnss_wlantx_irq");
+	penv->rx_irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+							"wcnss_wlanrx_irq");
+
+	if (!(penv->mmio_res && penv->tx_irq_res && penv->rx_irq_res)) {
+		dev_err(&pdev->dev, "insufficient resources\n");
+		ret = -ENOENT;
+		goto fail_res;
+	}
+	INIT_WORK(&penv->wcnssctrl_rx_work, wcnssctrl_rx_handler);
+	INIT_WORK(&penv->wcnssctrl_version_work, wcnss_send_version_req);
+	INIT_WORK(&penv->wcnss_pm_config_work, wcnss_send_pm_config);
+	INIT_WORK(&penv->wcnssctrl_nvbin_dnld_work, wcnss_nvbin_dnld_main);
+	INIT_DELAYED_WORK(&penv->wcnss_pm_qos_del_req, wcnss_pm_qos_enable_pc);
+
+	wakeup_source_init(&penv->wcnss_wake_lock, "wcnss");
+	/* Add pm_qos request to disable power collapse for DDR */
+	wcnss_disable_pc_add_req();
+
+	if (wcnss_hardware_type() == WCNSS_PRONTO_HW) {
+		res = platform_get_resource_byname(pdev,
+						   IORESOURCE_MEM,
+						   "pronto_phy_base");
+		if (!res) {
+			ret = -EIO;
+			pr_err("%s: resource pronto_phy_base failed\n",
+			       __func__);
+			goto fail_ioremap;
+		}
+		penv->msm_wcnss_base =
+			devm_ioremap_resource(&pdev->dev, res);
+	} else {
+		res = platform_get_resource_byname(pdev,
+						   IORESOURCE_MEM,
+						   "riva_phy_base");
+		if (!res) {
+			ret = -EIO;
+			pr_err("%s: resource riva_phy_base failed\n",
+			       __func__);
+			goto fail_ioremap;
+		}
+		penv->msm_wcnss_base =
+			devm_ioremap_resource(&pdev->dev, res);
+	}
+
+	if (!penv->msm_wcnss_base) {
+		ret = -ENOMEM;
+		pr_err("%s: ioremap wcnss physical failed\n", __func__);
+		goto fail_ioremap;
+	}
+
+	penv->wlan_config.msm_wcnss_base = penv->msm_wcnss_base;
+	if (wcnss_hardware_type() == WCNSS_RIVA_HW) {
+		res = platform_get_resource_byname(pdev,
+						   IORESOURCE_MEM,
+						   "riva_ccu_base");
+		if (!res) {
+			ret = -EIO;
+			pr_err("%s: resource riva_ccu_base failed\n",
+			       __func__);
+			goto fail_ioremap;
+		}
+		penv->riva_ccu_base =
+			devm_ioremap_resource(&pdev->dev, res);
+
+		if (!penv->riva_ccu_base) {
+			ret = -ENOMEM;
+			pr_err("%s: ioremap riva ccu physical failed\n",
+			       __func__);
+			goto fail_ioremap;
+		}
+	} else {
+		res = platform_get_resource_byname(pdev,
+						   IORESOURCE_MEM,
+						   "pronto_a2xb_base");
+		if (!res) {
+			ret = -EIO;
+			pr_err("%s: resource pronto_a2xb_base failed\n",
+			       __func__);
+			goto fail_ioremap;
+		}
+		penv->pronto_a2xb_base =
+			devm_ioremap_resource(&pdev->dev, res);
+
+		if (!penv->pronto_a2xb_base) {
+			ret = -ENOMEM;
+			pr_err("%s: ioremap pronto a2xb physical failed\n",
+			       __func__);
+			goto fail_ioremap;
+		}
+
+		res = platform_get_resource_byname(pdev,
+						   IORESOURCE_MEM,
+						   "pronto_ccpu_base");
+		if (!res) {
+			ret = -EIO;
+			pr_err("%s: resource pronto_ccpu_base failed\n",
+			       __func__);
+			goto fail_ioremap;
+		}
+		penv->pronto_ccpu_base =
+			devm_ioremap_resource(&pdev->dev, res);
+
+		if (!penv->pronto_ccpu_base) {
+			ret = -ENOMEM;
+			pr_err("%s: ioremap pronto ccpu physical failed\n",
+			       __func__);
+			goto fail_ioremap;
+		}
+
+		/* for reset FIQ */
+		res = platform_get_resource_byname(penv->pdev,
+						   IORESOURCE_MEM, "wcnss_fiq");
+		if (!res) {
+			dev_err(&pdev->dev, "insufficient irq mem resources\n");
+			ret = -ENOENT;
+			goto fail_ioremap;
+		}
+		penv->fiq_reg = ioremap_nocache(res->start, resource_size(res));
+		if (!penv->fiq_reg) {
+			pr_err("wcnss: %s: ioremap_nocache() failed fiq_reg addr:%pr\n",
+			       __func__, &res->start);
+			ret = -ENOMEM;
+			goto fail_ioremap;
+		}
+
+		res = platform_get_resource_byname(pdev,
+						   IORESOURCE_MEM,
+						   "pronto_saw2_base");
+		if (!res) {
+			ret = -EIO;
+			pr_err("%s: resource pronto_saw2_base failed\n",
+			       __func__);
+			goto fail_ioremap2;
+		}
+		penv->pronto_saw2_base =
+			devm_ioremap_resource(&pdev->dev, res);
+
+		if (!penv->pronto_saw2_base) {
+			pr_err("%s: ioremap wcnss physical(saw2) failed\n",
+			       __func__);
+			ret = -ENOMEM;
+			goto fail_ioremap2;
+		}
+
+		penv->pronto_pll_base =
+			penv->msm_wcnss_base + PRONTO_PLL_MODE_OFFSET;
+		if (!penv->pronto_pll_base) {
+			pr_err("%s: ioremap wcnss physical(pll) failed\n",
+			       __func__);
+			ret = -ENOMEM;
+			goto fail_ioremap2;
+		}
+
+		res = platform_get_resource_byname(pdev,
+						   IORESOURCE_MEM,
+						   "wlan_tx_phy_aborts");
+		if (!res) {
+			ret = -EIO;
+			pr_err("%s: resource wlan_tx_phy_aborts failed\n",
+			       __func__);
+			goto fail_ioremap2;
+		}
+		penv->wlan_tx_phy_aborts =
+			devm_ioremap_resource(&pdev->dev, res);
+
+		if (!penv->wlan_tx_phy_aborts) {
+			ret = -ENOMEM;
+			pr_err("%s: ioremap wlan TX PHY failed\n", __func__);
+			goto fail_ioremap2;
+		}
+
+		res = platform_get_resource_byname(pdev,
+						   IORESOURCE_MEM,
+						   "wlan_brdg_err_source");
+		if (!res) {
+			ret = -EIO;
+			pr_err("%s: resource wlan_brdg_err_source failed\n",
+			       __func__);
+			goto fail_ioremap2;
+		}
+		penv->wlan_brdg_err_source =
+			devm_ioremap_resource(&pdev->dev, res);
+
+		if (!penv->wlan_brdg_err_source) {
+			ret = -ENOMEM;
+			pr_err("%s: ioremap wlan BRDG ERR failed\n", __func__);
+			goto fail_ioremap2;
+		}
+
+		res = platform_get_resource_byname(pdev,
+						   IORESOURCE_MEM,
+						   "wlan_tx_status");
+		if (!res) {
+			ret = -EIO;
+			pr_err("%s: resource wlan_tx_status failed\n",
+			       __func__);
+			goto fail_ioremap2;
+		}
+		penv->wlan_tx_status =
+			devm_ioremap_resource(&pdev->dev, res);
+
+		if (!penv->wlan_tx_status) {
+			ret = -ENOMEM;
+			pr_err("%s: ioremap wlan TX STATUS failed\n", __func__);
+			goto fail_ioremap2;
+		}
+
+		res = platform_get_resource_byname(pdev,
+						   IORESOURCE_MEM,
+						   "alarms_txctl");
+		if (!res) {
+			ret = -EIO;
+			pr_err("%s: resource alarms_txctl failed\n",
+			       __func__);
+			goto fail_ioremap2;
+		}
+		penv->alarms_txctl =
+			devm_ioremap_resource(&pdev->dev, res);
+
+		if (!penv->alarms_txctl) {
+			ret = -ENOMEM;
+			pr_err("%s: ioremap alarms TXCTL failed\n", __func__);
+			goto fail_ioremap2;
+		}
+
+		res = platform_get_resource_byname(pdev,
+						   IORESOURCE_MEM,
+						   "alarms_tactl");
+		if (!res) {
+			ret = -EIO;
+			pr_err("%s: resource alarms_tactl failed\n",
+			       __func__);
+			goto fail_ioremap2;
+		}
+		penv->alarms_tactl =
+			devm_ioremap_resource(&pdev->dev, res);
+
+		if (!penv->alarms_tactl) {
+			ret = -ENOMEM;
+			pr_err("%s: ioremap alarms TACTL failed\n", __func__);
+			goto fail_ioremap2;
+		}
+
+		res = platform_get_resource_byname(pdev,
+						   IORESOURCE_MEM,
+						   "pronto_mcu_base");
+		if (!res) {
+			ret = -EIO;
+			pr_err("%s: resource pronto_mcu_base failed\n",
+			       __func__);
+			goto fail_ioremap2;
+		}
+		penv->pronto_mcu_base =
+			devm_ioremap_resource(&pdev->dev, res);
+
+		if (!penv->pronto_mcu_base) {
+			ret = -ENOMEM;
+			pr_err("%s: ioremap pronto mcu physical failed\n",
+			       __func__);
+			goto fail_ioremap2;
+		}
+
+		if (of_property_read_bool(node,
+					  "qcom,is-dual-band-disabled")) {
+			ret = wcnss_get_dual_band_capability_info(pdev);
+			if (ret) {
+				pr_err("%s: failed to get dual band info\n",
+				       __func__);
+				goto fail_ioremap2;
+			}
+		}
+	}
+
+	penv->adc_tm_dev = qpnp_get_adc_tm(&penv->pdev->dev, "wcnss");
+	if (IS_ERR(penv->adc_tm_dev)) {
+		pr_err("%s:  adc get failed\n", __func__);
+		penv->adc_tm_dev = NULL;
+	} else {
+		INIT_DELAYED_WORK(&penv->vbatt_work, wcnss_update_vbatt);
+		penv->fw_vbatt_state = WCNSS_CONFIG_UNSPECIFIED;
+	}
+
+	penv->snoc_wcnss = devm_clk_get(&penv->pdev->dev, "snoc_wcnss");
+	if (IS_ERR(penv->snoc_wcnss)) {
+		pr_err("%s: couldn't get snoc_wcnss\n", __func__);
+		penv->snoc_wcnss = NULL;
+	} else {
+		if (of_property_read_u32(pdev->dev.of_node,
+					 "qcom,snoc-wcnss-clock-freq",
+					 &penv->snoc_wcnss_clock_freq)) {
+			pr_debug("%s: wcnss snoc clock frequency is not defined\n",
+				 __func__);
+			devm_clk_put(&penv->pdev->dev, penv->snoc_wcnss);
+			penv->snoc_wcnss = NULL;
+		}
+	}
+
+	if (penv->wlan_config.is_pronto_vadc) {
+		penv->vadc_dev = qpnp_get_vadc(&penv->pdev->dev, "wcnss");
+
+		if (IS_ERR(penv->vadc_dev)) {
+			pr_debug("%s:  vadc get failed\n", __func__);
+			penv->vadc_dev = NULL;
+		} else {
+			rc = wcnss_get_battery_volt(&penv->wlan_config.vbatt);
+			INIT_WORK(&penv->wcnss_vadc_work,
+				  wcnss_send_vbatt_indication);
+
+			if (rc < 0)
+				pr_err("Failed to get battery voltage with error= %d\n",
+				       rc);
+		}
+	}
+
+	do {
+		/* trigger initialization of the WCNSS */
+		penv->pil = subsystem_get(WCNSS_PIL_DEVICE);
+		if (IS_ERR(penv->pil)) {
+			dev_err(&pdev->dev, "Peripheral Loader failed on WCNSS.\n");
+			ret = PTR_ERR(penv->pil);
+			wcnss_disable_pc_add_req();
+			wcnss_pronto_log_debug_regs();
+		}
+	} while (pil_retry++ < WCNSS_MAX_PIL_RETRY && IS_ERR(penv->pil));
+
+	if (IS_ERR(penv->pil)) {
+		wcnss_reset_fiq(false);
+		if (penv->wcnss_notif_hdle)
+			subsys_notif_unregister_notifier(penv->wcnss_notif_hdle,
+							 &wnb);
+		penv->pil = NULL;
+		goto fail_ioremap2;
+	}
+	/* Remove pm_qos request */
+	wcnss_disable_pc_remove_req();
+
+	return 0;
+
+fail_ioremap2:
+	if (penv->fiq_reg)
+		iounmap(penv->fiq_reg);
+fail_ioremap:
+	wakeup_source_trash(&penv->wcnss_wake_lock);
+fail_res:
+	if (!has_pronto_hw)
+		wcnss_gpios_config(penv->gpios_5wire, false);
+	else if (penv->use_pinctrl)
+		wcnss_pinctrl_set_state(false);
+	else
+		wcnss_pronto_gpios_config(pdev, false);
+fail_gpio_res:
+	wcnss_disable_pc_remove_req();
+fail:
+	if (penv->wcnss_notif_hdle)
+		subsys_notif_unregister_notifier(penv->wcnss_notif_hdle, &wnb);
+	penv = NULL;
+	return ret;
+}
+
+/* Driver requires to directly vote the snoc clocks
+ * To enable and disable snoc clock, it call
+ * wcnss_snoc_vote function
+ */
+void wcnss_snoc_vote(bool clk_chk_en)
+{
+	int rc;
+
+	if (!penv->snoc_wcnss) {
+		pr_err("%s: couldn't get clk snoc_wcnss\n", __func__);
+		return;
+	}
+
+	if (clk_chk_en) {
+		rc = clk_set_rate(penv->snoc_wcnss,
+				  penv->snoc_wcnss_clock_freq);
+		if (rc) {
+			pr_err("%s: snoc_wcnss_clk-clk_set_rate failed =%d\n",
+			       __func__, rc);
+			return;
+		}
+
+		if (clk_prepare_enable(penv->snoc_wcnss)) {
+			pr_err("%s: snoc_wcnss clk enable failed\n", __func__);
+			return;
+		}
+	} else {
+		clk_disable_unprepare(penv->snoc_wcnss);
+	}
+}
+EXPORT_SYMBOL(wcnss_snoc_vote);
+
+/* wlan prop driver cannot invoke cancel_work_sync
+ * function directly, so to invoke this function it
+ * call wcnss_flush_work function
+ */
+void wcnss_flush_work(struct work_struct *work)
+{
+	struct work_struct *cnss_work = work;
+
+	if (cnss_work)
+		cancel_work_sync(cnss_work);
+}
+EXPORT_SYMBOL(wcnss_flush_work);
+
+/* wlan prop driver cannot invoke show_stack
+ * function directly, so to invoke this function it
+ * call wcnss_dump_stack function
+ */
+void wcnss_dump_stack(struct task_struct *task)
+{
+	show_stack(task, NULL);
+}
+EXPORT_SYMBOL(wcnss_dump_stack);
+
+/* wlan prop driver cannot invoke cancel_delayed_work_sync
+ * function directly, so to invoke this function it call
+ * wcnss_flush_delayed_work function
+ */
+void wcnss_flush_delayed_work(struct delayed_work *dwork)
+{
+	struct delayed_work *cnss_dwork = dwork;
+
+	if (cnss_dwork)
+		cancel_delayed_work_sync(cnss_dwork);
+}
+EXPORT_SYMBOL(wcnss_flush_delayed_work);
+
+/* wlan prop driver cannot invoke INIT_WORK function
+ * directly, so to invoke this function call
+ * wcnss_init_work function.
+ */
+void wcnss_init_work(struct work_struct *work, void *callbackptr)
+{
+	if (work && callbackptr)
+		INIT_WORK(work, callbackptr);
+}
+EXPORT_SYMBOL(wcnss_init_work);
+
+/* wlan prop driver cannot invoke INIT_DELAYED_WORK
+ * function directly, so to invoke this function
+ * call wcnss_init_delayed_work function.
+ */
+void wcnss_init_delayed_work(struct delayed_work *dwork, void *callbackptr)
+{
+	if (dwork && callbackptr)
+		INIT_DELAYED_WORK(dwork, callbackptr);
+}
+EXPORT_SYMBOL(wcnss_init_delayed_work);
+
+static int wcnss_node_open(struct inode *inode, struct file *file)
+{
+	struct platform_device *pdev;
+	int rc = 0;
+
+	if (!penv)
+		return -EFAULT;
+
+	if (!penv->triggered) {
+		pr_info(DEVICE " triggered by userspace\n");
+		pdev = penv->pdev;
+		rc = wcnss_trigger_config(pdev);
+		if (rc)
+			return -EFAULT;
+	}
+
+	return rc;
+}
+
+static ssize_t wcnss_wlan_read(struct file *fp, char __user
+			*buffer, size_t count, loff_t *position)
+{
+	int rc = 0;
+
+	if (!penv)
+		return -EFAULT;
+
+	rc = wait_event_interruptible(penv->read_wait, penv->fw_cal_rcvd
+			> penv->user_cal_read || penv->fw_cal_available);
+
+	if (rc < 0)
+		return rc;
+
+	mutex_lock(&penv->dev_lock);
+
+	if (penv->fw_cal_available && penv->fw_cal_rcvd
+			== penv->user_cal_read) {
+		rc = 0;
+		goto exit;
+	}
+
+	if (count > penv->fw_cal_rcvd - penv->user_cal_read)
+		count = penv->fw_cal_rcvd - penv->user_cal_read;
+
+	rc = copy_to_user(buffer, penv->fw_cal_data +
+			penv->user_cal_read, count);
+	if (rc == 0) {
+		penv->user_cal_read += count;
+		rc = count;
+	}
+
+exit:
+	mutex_unlock(&penv->dev_lock);
+	return rc;
+}
+
+/* first (valid) write to this device should be 4 bytes cal file size */
+static ssize_t wcnss_wlan_write(struct file *fp, const char __user
+			*user_buffer, size_t count, loff_t *position)
+{
+	int rc = 0;
+	char *cal_data = NULL;
+
+	if (!penv || penv->user_cal_available)
+		return -EFAULT;
+
+	if (!penv->user_cal_rcvd && count >= 4 && !penv->user_cal_exp_size) {
+		mutex_lock(&penv->dev_lock);
+		rc = copy_from_user((void *)&penv->user_cal_exp_size,
+				    user_buffer, 4);
+		if (!penv->user_cal_exp_size ||
+		    penv->user_cal_exp_size > MAX_CALIBRATED_DATA_SIZE) {
+			pr_err(DEVICE " invalid size to write %d\n",
+			       penv->user_cal_exp_size);
+			penv->user_cal_exp_size = 0;
+			mutex_unlock(&penv->dev_lock);
+			return -EFAULT;
+		}
+		mutex_unlock(&penv->dev_lock);
+		return count;
+	} else if (!penv->user_cal_rcvd && count < 4) {
+		return -EFAULT;
+	}
+
+	mutex_lock(&penv->dev_lock);
+	if ((UINT32_MAX - count < penv->user_cal_rcvd) ||
+	    (penv->user_cal_exp_size < count + penv->user_cal_rcvd)) {
+		pr_err(DEVICE " invalid size to write %zu\n", count +
+		       penv->user_cal_rcvd);
+		mutex_unlock(&penv->dev_lock);
+		return -ENOMEM;
+	}
+
+	cal_data = kmalloc(count, GFP_KERNEL);
+	if (!cal_data) {
+		mutex_unlock(&penv->dev_lock);
+		return -ENOMEM;
+	}
+
+	rc = copy_from_user(cal_data, user_buffer, count);
+	if (!rc) {
+		memcpy(penv->user_cal_data + penv->user_cal_rcvd,
+		       cal_data, count);
+		penv->user_cal_rcvd += count;
+		rc += count;
+	}
+
+	kfree(cal_data);
+	if (penv->user_cal_rcvd == penv->user_cal_exp_size) {
+		penv->user_cal_available = true;
+		pr_info_ratelimited("wcnss: user cal written");
+	}
+	mutex_unlock(&penv->dev_lock);
+
+	return rc;
+}
+
+static int wcnss_node_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static int wcnss_notif_cb(struct notifier_block *this, unsigned long code,
+			  void *ss_handle)
+{
+	struct platform_device *pdev = wcnss_get_platform_device();
+	struct wcnss_wlan_config *pwlanconfig = wcnss_get_wlan_config();
+	struct notif_data *data = (struct notif_data *)ss_handle;
+	int ret, xo_mode;
+
+	if (!(code >= SUBSYS_NOTIF_MIN_INDEX) &&
+	    (code <= SUBSYS_NOTIF_MAX_INDEX)) {
+		pr_debug("%s: Invaild subsystem notification code: %lu\n",
+			 __func__, code);
+		return NOTIFY_DONE;
+	}
+
+	pr_debug("%s: wcnss notification event: %lu : %s\n",
+		 __func__, code, wcnss_subsys_notif_type[code]);
+
+	if (code == SUBSYS_PROXY_VOTE) {
+		if (pdev && pwlanconfig) {
+			ret = wcnss_wlan_power(&pdev->dev, pwlanconfig,
+					       WCNSS_WLAN_SWITCH_ON, &xo_mode);
+			wcnss_set_iris_xo_mode(xo_mode);
+			if (ret)
+				pr_err("Failed to execute wcnss_wlan_power\n");
+		}
+	} else if (code == SUBSYS_PROXY_UNVOTE) {
+		if (pdev && pwlanconfig) {
+			/* Temporary workaround as some pronto images have an
+			 * issue of sending an interrupt that it is capable of
+			 * voting for it's resources too early.
+			 */
+			msleep(20);
+			wcnss_wlan_power(&pdev->dev, pwlanconfig,
+					 WCNSS_WLAN_SWITCH_OFF, NULL);
+		}
+	} else if ((code == SUBSYS_BEFORE_SHUTDOWN && data && data->crashed) ||
+			code == SUBSYS_SOC_RESET) {
+		wcnss_disable_pc_add_req();
+		schedule_delayed_work(&penv->wcnss_pm_qos_del_req,
+				      msecs_to_jiffies(WCNSS_PM_QOS_TIMEOUT));
+		penv->is_shutdown = 1;
+		wcnss_log_debug_regs_on_bite();
+	} else if (code == SUBSYS_POWERUP_FAILURE) {
+		if (pdev && pwlanconfig)
+			wcnss_wlan_power(&pdev->dev, pwlanconfig,
+					 WCNSS_WLAN_SWITCH_OFF, NULL);
+		wcnss_pronto_log_debug_regs();
+		wcnss_disable_pc_remove_req();
+	} else if (code == SUBSYS_BEFORE_SHUTDOWN) {
+		wcnss_disable_pc_add_req();
+		schedule_delayed_work(&penv->wcnss_pm_qos_del_req,
+				      msecs_to_jiffies(WCNSS_PM_QOS_TIMEOUT));
+		penv->is_shutdown = 1;
+	} else if (code == SUBSYS_AFTER_POWERUP) {
+		penv->is_shutdown = 0;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static const struct file_operations wcnss_node_fops = {
+	.owner = THIS_MODULE,
+	.open = wcnss_node_open,
+	.read = wcnss_wlan_read,
+	.write = wcnss_wlan_write,
+	.release = wcnss_node_release,
+};
+
+static int wcnss_cdev_register(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	ret = alloc_chrdev_region(&penv->dev_ctrl, 0, 1, CTRL_DEVICE);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "CTRL Device Registration failed\n");
+		goto alloc_region_ctrl;
+	}
+	ret = alloc_chrdev_region(&penv->dev_node, 0, 1, DEVICE);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "NODE Device Registration failed\n");
+		goto alloc_region_node;
+	}
+
+	penv->node_class = class_create(THIS_MODULE, "wcnss");
+	if (!penv->node_class) {
+		dev_err(&pdev->dev, "NODE Device Class Creation failed\n");
+		goto class_create_node;
+	}
+
+	if (device_create(penv->node_class, NULL, penv->dev_ctrl, NULL,
+			 CTRL_DEVICE) == NULL) {
+		dev_err(&pdev->dev, "CTRL Device Creation failed\n");
+		goto device_create_ctrl;
+	}
+
+	if (device_create(penv->node_class, NULL, penv->dev_node, NULL,
+			  DEVICE) == NULL) {
+		dev_err(&pdev->dev, "NODE Device Creation failed\n");
+		goto device_create_node;
+	}
+
+	cdev_init(&penv->ctrl_dev, &wcnss_ctrl_fops);
+	cdev_init(&penv->node_dev, &wcnss_node_fops);
+
+	if (cdev_add(&penv->ctrl_dev, penv->dev_ctrl, 1) == -1) {
+		dev_err(&pdev->dev, "CTRL Device addition failed\n");
+		goto cdev_add_ctrl;
+	}
+	if (cdev_add(&penv->node_dev, penv->dev_node, 1) == -1) {
+		dev_err(&pdev->dev, "NODE Device addition failed\n");
+		goto cdev_add_node;
+	}
+
+	return 0;
+
+cdev_add_node:
+	cdev_del(&penv->ctrl_dev);
+cdev_add_ctrl:
+	device_destroy(penv->node_class, penv->dev_node);
+device_create_node:
+	device_destroy(penv->node_class, penv->dev_ctrl);
+device_create_ctrl:
+	class_destroy(penv->node_class);
+class_create_node:
+	unregister_chrdev_region(penv->dev_node, 1);
+alloc_region_node:
+	unregister_chrdev_region(penv->dev_ctrl, 1);
+alloc_region_ctrl:
+	return -ENOMEM;
+}
+
+static void wcnss_cdev_unregister(struct platform_device *pdev)
+{
+	dev_err(&pdev->dev, "Unregistering cdev devices\n");
+	cdev_del(&penv->ctrl_dev);
+	cdev_del(&penv->node_dev);
+	device_destroy(penv->node_class, penv->dev_ctrl);
+	device_destroy(penv->node_class, penv->dev_node);
+	class_destroy(penv->node_class);
+	unregister_chrdev_region(penv->dev_ctrl, 1);
+	unregister_chrdev_region(penv->dev_node, 1);
+}
+
+static int
+wcnss_wlan_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	/* verify we haven't been called more than once */
+	if (penv) {
+		dev_err(&pdev->dev, "cannot handle multiple devices.\n");
+		return -ENODEV;
+	}
+
+	/* create an environment to track the device */
+	penv = devm_kzalloc(&pdev->dev, sizeof(*penv), GFP_KERNEL);
+	if (!penv)
+		return -ENOMEM;
+
+	penv->pdev = pdev;
+
+	penv->user_cal_data =
+		devm_kzalloc(&pdev->dev, MAX_CALIBRATED_DATA_SIZE, GFP_KERNEL);
+	if (!penv->user_cal_data) {
+		dev_err(&pdev->dev, "Failed to alloc memory for cal data.\n");
+		return -ENOMEM;
+	}
+
+	/* register sysfs entries */
+	ret = wcnss_create_sysfs(&pdev->dev);
+	if (ret) {
+		penv = NULL;
+		return -ENOENT;
+	}
+
+	/* register wcnss event notification */
+	penv->wcnss_notif_hdle = subsys_notif_register_notifier("wcnss", &wnb);
+	if (IS_ERR(penv->wcnss_notif_hdle)) {
+		pr_err("wcnss: register event notification failed!\n");
+		return PTR_ERR(penv->wcnss_notif_hdle);
+	}
+
+	mutex_init(&penv->dev_lock);
+	mutex_init(&penv->ctrl_lock);
+	mutex_init(&penv->vbat_monitor_mutex);
+	mutex_init(&penv->pm_qos_mutex);
+	init_waitqueue_head(&penv->read_wait);
+
+	penv->user_cal_rcvd = 0;
+	penv->user_cal_read = 0;
+	penv->user_cal_exp_size = 0;
+	penv->user_cal_available = false;
+
+	/* Since we were built into the kernel we'll be called as part
+	 * of kernel initialization.  We don't know if userspace
+	 * applications are available to service PIL at this time
+	 * (they probably are not), so we simply create a device node
+	 * here.  When userspace is available it should touch the
+	 * device so that we know that WCNSS configuration can take
+	 * place
+	 */
+	pr_info(DEVICE " probed in built-in mode\n");
+
+	return wcnss_cdev_register(pdev);
+}
+
+static int
+wcnss_wlan_remove(struct platform_device *pdev)
+{
+	if (penv->wcnss_notif_hdle)
+		subsys_notif_unregister_notifier(penv->wcnss_notif_hdle, &wnb);
+	wcnss_cdev_unregister(pdev);
+	wcnss_remove_sysfs(&pdev->dev);
+	penv = NULL;
+	return 0;
+}
+
+static const struct dev_pm_ops wcnss_wlan_pm_ops = {
+	.suspend	= wcnss_wlan_suspend,
+	.resume		= wcnss_wlan_resume,
+};
+
+#ifdef CONFIG_WCNSS_CORE_PRONTO
+static const struct of_device_id msm_wcnss_pronto_match[] = {
+	{.compatible = "qcom,wcnss_wlan"},
+	{}
+};
+#endif
+
+static struct platform_driver wcnss_wlan_driver = {
+	.driver = {
+		.name	= DEVICE,
+		.owner	= THIS_MODULE,
+		.pm	= &wcnss_wlan_pm_ops,
+#ifdef CONFIG_WCNSS_CORE_PRONTO
+		.of_match_table = msm_wcnss_pronto_match,
+#endif
+	},
+	.probe	= wcnss_wlan_probe,
+	.remove	= wcnss_wlan_remove,
+};
+
+static int __init wcnss_wlan_init(void)
+{
+	platform_driver_register(&wcnss_wlan_driver);
+	platform_driver_register(&wcnss_wlan_ctrl_driver);
+	platform_driver_register(&wcnss_ctrl_driver);
+	register_pm_notifier(&wcnss_pm_notifier);
+
+	return 0;
+}
+
+static void __exit wcnss_wlan_exit(void)
+{
+	if (penv) {
+		if (penv->pil)
+			subsystem_put(penv->pil);
+		penv = NULL;
+	}
+
+	unregister_pm_notifier(&wcnss_pm_notifier);
+	platform_driver_unregister(&wcnss_ctrl_driver);
+	platform_driver_unregister(&wcnss_wlan_ctrl_driver);
+	platform_driver_unregister(&wcnss_wlan_driver);
+}
+
+module_init(wcnss_wlan_init);
+module_exit(wcnss_wlan_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DEVICE "Driver");
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index c1eafbd..da51fed 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -553,7 +553,7 @@
 
 static int spi_engine_remove(struct platform_device *pdev)
 {
-	struct spi_master *master = platform_get_drvdata(pdev);
+	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
 	struct spi_engine *spi_engine = spi_master_get_devdata(master);
 	int irq = platform_get_irq(pdev, 0);
 
@@ -561,6 +561,8 @@
 
 	free_irq(irq, master);
 
+	spi_master_put(master);
+
 	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
 	writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 186e7ae..a074763 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,7 @@
 
 #define SPI_NUM_CHIPSELECT	(4)
 #define SPI_XFER_TIMEOUT_MS	(250)
+#define SPI_AUTO_SUSPEND_DELAY	(250)
 /* SPI SE specific registers */
 #define SE_SPI_CPHA		(0x224)
 #define SE_SPI_LOOPBACK		(0x22C)
@@ -153,6 +154,7 @@
 	int num_rx_eot;
 	int num_xfers;
 	void *ipc;
+	bool shared_se;
 };
 
 static struct spi_master *get_spi_master(struct device *dev)
@@ -647,11 +649,11 @@
 					&mas->gsi[mas->num_xfers].desc_cb;
 	mas->gsi[mas->num_xfers].tx_cookie =
 			dmaengine_submit(mas->gsi[mas->num_xfers].tx_desc);
-	if (mas->num_rx_eot)
+	if (cmd & SPI_RX_ONLY)
 		mas->gsi[mas->num_xfers].rx_cookie =
 			dmaengine_submit(mas->gsi[mas->num_xfers].rx_desc);
 	dma_async_issue_pending(mas->tx);
-	if (mas->num_rx_eot)
+	if (cmd & SPI_RX_ONLY)
 		dma_async_issue_pending(mas->rx);
 	mas->num_xfers++;
 	return ret;
@@ -726,7 +728,6 @@
 		memset(mas->gsi, 0,
 				(sizeof(struct spi_geni_gsi) * NUM_SPI_XFER));
 		geni_se_select_mode(mas->base, GSI_DMA);
-		dmaengine_resume(mas->tx);
 		ret = spi_geni_map_buf(mas, spi_msg);
 	} else {
 		dev_err(mas->dev, "%s: Couldn't select mode %d", __func__,
@@ -743,10 +744,8 @@
 
 	mas->cur_speed_hz = 0;
 	mas->cur_word_len = 0;
-	if (mas->cur_xfer_mode == GSI_DMA) {
-		dmaengine_pause(mas->tx);
+	if (mas->cur_xfer_mode == GSI_DMA)
 		spi_geni_unmap_buf(mas, spi_msg);
-	}
 	return 0;
 }
 
@@ -760,9 +759,22 @@
 	/* Adjust the AB/IB based on the max speed of the slave.*/
 	rsc->ib = max_speed * DEFAULT_BUS_WIDTH;
 	rsc->ab = max_speed * DEFAULT_BUS_WIDTH;
+	if (mas->shared_se) {
+		struct se_geni_rsc *rsc;
+		int ret = 0;
+
+		rsc = &mas->spi_rsc;
+		ret = pinctrl_select_state(rsc->geni_pinctrl,
+						rsc->geni_gpio_active);
+		if (ret)
+			GENI_SE_ERR(mas->ipc, false, NULL,
+			"%s: Error %d pinctrl_select_state\n", __func__, ret);
+	}
+
 	ret = pm_runtime_get_sync(mas->dev);
 	if (ret < 0) {
-		dev_err(mas->dev, "Error enabling SE resources\n");
+		dev_err(mas->dev, "%s:Error enabling SE resources %d\n",
+							__func__, ret);
 		pm_runtime_put_noidle(mas->dev);
 		goto exit_prepare_transfer_hardware;
 	} else {
@@ -854,6 +866,9 @@
 				"%s:Major:%d Minor:%d step:%dos%d\n",
 			__func__, major, minor, step, mas->oversampling);
 		}
+		mas->shared_se =
+			(geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) &
+							FIFO_IF_DISABLE);
 	}
 exit_prepare_transfer_hardware:
 	return ret;
@@ -863,7 +878,20 @@
 {
 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
 
-	pm_runtime_put_sync(mas->dev);
+	if (mas->shared_se) {
+		struct se_geni_rsc *rsc;
+		int ret = 0;
+
+		rsc = &mas->spi_rsc;
+		ret = pinctrl_select_state(rsc->geni_pinctrl,
+						rsc->geni_gpio_sleep);
+		if (ret)
+			GENI_SE_ERR(mas->ipc, false, NULL,
+			"%s: Error %d pinctrl_select_state\n", __func__, ret);
+	}
+
+	pm_runtime_mark_last_busy(mas->dev);
+	pm_runtime_put_autosuspend(mas->dev);
 	return 0;
 }
 
@@ -1006,7 +1034,7 @@
 
 			for (i = 0 ; i < mas->num_tx_eot; i++) {
 				timeout =
-				wait_for_completion_interruptible_timeout(
+				wait_for_completion_timeout(
 					&mas->tx_cb,
 					msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
 				if (timeout <= 0) {
@@ -1018,7 +1046,7 @@
 			}
 			for (i = 0 ; i < mas->num_rx_eot; i++) {
 				timeout =
-				wait_for_completion_interruptible_timeout(
+				wait_for_completion_timeout(
 					&mas->rx_cb,
 					msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
 				if (timeout <= 0) {
@@ -1336,6 +1364,9 @@
 	init_completion(&geni_mas->xfer_done);
 	init_completion(&geni_mas->tx_cb);
 	init_completion(&geni_mas->rx_cb);
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTO_SUSPEND_DELAY);
+	pm_runtime_use_autosuspend(&pdev->dev);
 	pm_runtime_enable(&pdev->dev);
 	ret = spi_register_master(spi);
 	if (ret) {
@@ -1369,7 +1400,14 @@
 	struct spi_master *spi = get_spi_master(dev);
 	struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
 
-	ret = se_geni_resources_off(&geni_mas->spi_rsc);
+	if (geni_mas->shared_se) {
+		ret = se_geni_clks_off(&geni_mas->spi_rsc);
+		if (ret)
+			GENI_SE_ERR(geni_mas->ipc, false, NULL,
+			"%s: Error %d turning off clocks\n", __func__, ret);
+	} else {
+		ret = se_geni_resources_off(&geni_mas->spi_rsc);
+	}
 	return ret;
 }
 
@@ -1379,7 +1417,14 @@
 	struct spi_master *spi = get_spi_master(dev);
 	struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
 
-	ret = se_geni_resources_on(&geni_mas->spi_rsc);
+	if (geni_mas->shared_se) {
+		ret = se_geni_clks_on(&geni_mas->spi_rsc);
+		if (ret)
+			GENI_SE_ERR(geni_mas->ipc, false, NULL,
+			"%s: Error %d turning on clocks\n", __func__, ret);
+	} else {
+		ret = se_geni_resources_on(&geni_mas->spi_rsc);
+	}
 	return ret;
 }
 
@@ -1390,9 +1435,29 @@
 
 static int spi_geni_suspend(struct device *dev)
 {
-	if (!pm_runtime_status_suspended(dev))
-		return -EBUSY;
-	return 0;
+	int ret = 0;
+
+	if (!pm_runtime_status_suspended(dev)) {
+		struct spi_master *spi = get_spi_master(dev);
+		struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
+
+		if (list_empty(&spi->queue) && !spi->cur_msg) {
+			GENI_SE_ERR(geni_mas->ipc, true, dev,
+					"%s: Force suspend", __func__);
+			ret = spi_geni_runtime_suspend(dev);
+			if (ret) {
+				GENI_SE_ERR(geni_mas->ipc, true, dev,
+					"Force suspend Failed:%d", ret);
+			} else {
+				pm_runtime_disable(dev);
+				pm_runtime_set_suspended(dev);
+				pm_runtime_enable(dev);
+			}
+		} else {
+			ret = -EBUSY;
+		}
+	}
+	return ret;
 }
 #else
 static int spi_geni_runtime_suspend(struct device *dev)
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 1de3a77..cbf02eb 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -862,7 +862,7 @@
 				break;
 			copy32 = copy_bswap32;
 		} else if (bits <= 16) {
-			if (l & 1)
+			if (l & 3)
 				break;
 			copy32 = copy_wswap32;
 		} else {
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index bc7100b..e0b9fe1 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -271,6 +271,7 @@
 	while (remaining_words) {
 		int n_words, tx_words, rx_words;
 		u32 sr;
+		int stalled;
 
 		n_words = min(remaining_words, xspi->buffer_size);
 
@@ -299,7 +300,17 @@
 
 		/* Read out all the data from the Rx FIFO */
 		rx_words = n_words;
+		stalled = 10;
 		while (rx_words) {
+			if (rx_words == n_words && !(stalled--) &&
+			    !(sr & XSPI_SR_TX_EMPTY_MASK) &&
+			    (sr & XSPI_SR_RX_EMPTY_MASK)) {
+				dev_err(&spi->dev,
+					"Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
+				xspi_init_hw(xspi);
+				return -EIO;
+			}
+
 			if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
 				xilinx_spi_rx(xspi);
 				rx_words--;
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 72dfb3d..6199523 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1264,6 +1264,13 @@
 		goto err_put_ctrl;
 	}
 
+	pa->ppid_to_apid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PPID,
+					sizeof(*pa->ppid_to_apid), GFP_KERNEL);
+	if (!pa->ppid_to_apid) {
+		err = -ENOMEM;
+		goto err_put_ctrl;
+	}
+
 	hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
 
 	if (hw_ver < PMIC_ARB_VERSION_V2_MIN) {
@@ -1299,15 +1306,6 @@
 			err = PTR_ERR(pa->wr_base);
 			goto err_put_ctrl;
 		}
-
-		pa->ppid_to_apid = devm_kcalloc(&ctrl->dev,
-						PMIC_ARB_MAX_PPID,
-						sizeof(*pa->ppid_to_apid),
-						GFP_KERNEL);
-		if (!pa->ppid_to_apid) {
-			err = -ENOMEM;
-			goto err_put_ctrl;
-		}
 	}
 
 	dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 4747949..83b46d4 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -431,8 +431,10 @@
 {
 	struct mm_struct *mm = tsk->mm;
 
-	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
+	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
 		atomic_inc(&tsk->signal->oom_mm->mm_count);
+		set_bit(MMF_OOM_VICTIM, &mm->flags);
+	}
 }
 
 static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
@@ -559,13 +561,15 @@
 
 		task_lock(selected);
 		send_sig(SIGKILL, selected, 0);
-		if (selected->mm)
+		if (selected->mm) {
 			task_set_lmk_waiting(selected);
-		if (oom_reaper)
-			mark_lmk_victim(selected);
+			if (!test_bit(MMF_OOM_SKIP, &selected->mm->flags) &&
+			    oom_reaper) {
+				mark_lmk_victim(selected);
+				wake_oom_reaper(selected);
+			}
+		}
 		task_unlock(selected);
-		if (oom_reaper)
-			wake_oom_reaper(selected);
 		trace_lowmemory_kill(selected, cache_size, cache_limit, free);
 		lowmem_print(1, "Killing '%s' (%d) (tgid %d), adj %hd,\n"
 			"to free %ldkB on behalf of '%s' (%d) because\n"
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index 8dffd8a..9f01427 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -924,6 +924,8 @@
 		return;
 
 	led_classdev_unregister(cdev);
+	kfree(cdev->name);
+	cdev->name = NULL;
 	channel->led = NULL;
 }
 
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
index 29dc249..3c2c233 100644
--- a/drivers/staging/greybus/loopback.c
+++ b/drivers/staging/greybus/loopback.c
@@ -1034,8 +1034,10 @@
 				error = gb_loopback_async_sink(gb, size);
 			}
 
-			if (error)
+			if (error) {
 				gb->error++;
+				gb->iteration_count++;
+			}
 		} else {
 			/* We are effectively single threaded here */
 			if (type == GB_LOOPBACK_TYPE_PING)
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index 5578a07..50a5b0c2 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -274,7 +274,7 @@
 error_ret:
 	mutex_unlock(&chip->state_lock);
 
-	return 0;
+	return ret;
 }
 
 static int ad7150_read_event_value(struct iio_dev *indio_dev,
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 4366918..27333d9 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -401,15 +401,13 @@
 		result = VM_FAULT_LOCKED;
 		break;
 	case -ENODATA:
+	case -EAGAIN:
 	case -EFAULT:
 		result = VM_FAULT_NOPAGE;
 		break;
 	case -ENOMEM:
 		result = VM_FAULT_OOM;
 		break;
-	case -EAGAIN:
-		result = VM_FAULT_RETRY;
-		break;
 	default:
 		result = VM_FAULT_SIGBUS;
 		break;
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c
index 057c9b5..75e6d5e 100644
--- a/drivers/staging/media/cec/cec-adap.c
+++ b/drivers/staging/media/cec/cec-adap.c
@@ -288,10 +288,10 @@
 
 	/* Mark it as an error */
 	data->msg.tx_ts = ktime_get_ns();
-	data->msg.tx_status = CEC_TX_STATUS_ERROR |
-			      CEC_TX_STATUS_MAX_RETRIES;
+	data->msg.tx_status |= CEC_TX_STATUS_ERROR |
+			       CEC_TX_STATUS_MAX_RETRIES;
+	data->msg.tx_error_cnt++;
 	data->attempts = 0;
-	data->msg.tx_error_cnt = 1;
 	/* Queue transmitted message for monitoring purposes */
 	cec_queue_msg_monitor(data->adap, &data->msg, 1);
 
@@ -608,8 +608,7 @@
 	}
 	memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
 	if (msg->len == 1) {
-		if (cec_msg_initiator(msg) != 0xf ||
-		    cec_msg_destination(msg) == 0xf) {
+		if (cec_msg_destination(msg) == 0xf) {
 			dprintk(1, "cec_transmit_msg: invalid poll message\n");
 			return -EINVAL;
 		}
@@ -634,7 +633,7 @@
 		dprintk(1, "cec_transmit_msg: destination is the adapter itself\n");
 		return -EINVAL;
 	}
-	if (cec_msg_initiator(msg) != 0xf &&
+	if (msg->len > 1 && adap->is_configured &&
 	    !cec_has_log_addr(adap, cec_msg_initiator(msg))) {
 		dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n",
 			cec_msg_initiator(msg));
@@ -883,7 +882,7 @@
 
 	/* Send poll message */
 	msg.len = 1;
-	msg.msg[0] = 0xf0 | log_addr;
+	msg.msg[0] = (log_addr << 4) | log_addr;
 	err = cec_transmit_msg_fh(adap, &msg, NULL, true);
 
 	/*
@@ -1062,6 +1061,8 @@
 		for (i = 1; i < las->num_log_addrs; i++)
 			las->log_addr[i] = CEC_LOG_ADDR_INVALID;
 	}
+	for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
+		las->log_addr[i] = CEC_LOG_ADDR_INVALID;
 	adap->is_configured = true;
 	adap->is_configuring = false;
 	cec_post_state_event(adap);
@@ -1079,8 +1080,6 @@
 			cec_report_features(adap, i);
 		cec_report_phys_addr(adap, i);
 	}
-	for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
-		las->log_addr[i] = CEC_LOG_ADDR_INVALID;
 	mutex_lock(&adap->lock);
 	adap->kthread_config = NULL;
 	mutex_unlock(&adap->lock);
@@ -1557,9 +1556,9 @@
 	}
 
 	case CEC_MSG_GIVE_FEATURES:
-		if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0)
-			return cec_report_features(adap, la_idx);
-		return 0;
+		if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
+			return cec_feature_abort(adap, msg);
+		return cec_report_features(adap, la_idx);
 
 	default:
 		/*
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index f1f4788..6051a7b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -342,7 +342,7 @@
 	else
 		RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
 
-	pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+	pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
 	if (!pcmd) {
 		res = _FAIL;
 		goto exit;
@@ -522,7 +522,7 @@
 
 	if (enqueue) {
 		/* need enqueue, prepare cmd_obj and enqueue */
-		cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
+		cmdobj = kzalloc(sizeof(*cmdobj), GFP_ATOMIC);
 		if (!cmdobj) {
 			res = _FAIL;
 			kfree(param);
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index ee2dcd0..0b60d1e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -107,10 +107,10 @@
 
 void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
 {
-	rtw_free_mlme_priv_ie_data(pmlmepriv);
-
-	if (pmlmepriv)
+	if (pmlmepriv) {
+		rtw_free_mlme_priv_ie_data(pmlmepriv);
 		vfree(pmlmepriv->free_bss_buf);
+	}
 }
 
 struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index f109eeac..ab96629 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1698,10 +1698,11 @@
 	MACbShutdown(priv);
 
 	pci_disable_device(pcid);
-	pci_set_power_state(pcid, pci_choose_state(pcid, state));
 
 	spin_unlock_irqrestore(&priv->lock, flags);
 
+	pci_set_power_state(pcid, pci_choose_state(pcid, state));
+
 	return 0;
 }
 
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index f3c9d18..04d2b6e 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -841,6 +841,7 @@
 	unsigned char *buf)
 {
 	struct iscsi_conn *conn;
+	const bool do_put = cmd->se_cmd.se_tfo != NULL;
 
 	if (!cmd->conn) {
 		pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
@@ -871,7 +872,7 @@
 	 * Perform the kref_put now if se_cmd has already been setup by
 	 * scsit_setup_scsi_cmd()
 	 */
-	if (cmd->se_cmd.se_tfo != NULL) {
+	if (do_put) {
 		pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
 		target_put_sess_cmd(&cmd->se_cmd);
 	}
@@ -1939,7 +1940,6 @@
 	struct iscsi_tmr_req *tmr_req;
 	struct iscsi_tm *hdr;
 	int out_of_order_cmdsn = 0, ret;
-	bool sess_ref = false;
 	u8 function, tcm_function = TMR_UNKNOWN;
 
 	hdr			= (struct iscsi_tm *) buf;
@@ -1981,18 +1981,17 @@
 					     buf);
 	}
 
+	transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
+			      conn->sess->se_sess, 0, DMA_NONE,
+			      TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
+
+	target_get_sess_cmd(&cmd->se_cmd, true);
+
 	/*
 	 * TASK_REASSIGN for ERL=2 / connection stays inside of
 	 * LIO-Target $FABRIC_MOD
 	 */
 	if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
-		transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
-				      conn->sess->se_sess, 0, DMA_NONE,
-				      TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
-
-		target_get_sess_cmd(&cmd->se_cmd, true);
-		sess_ref = true;
-
 		switch (function) {
 		case ISCSI_TM_FUNC_ABORT_TASK:
 			tcm_function = TMR_ABORT_TASK;
@@ -2104,12 +2103,14 @@
 
 	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
 		int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
-		if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
+		if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
 			out_of_order_cmdsn = 1;
-		else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+		} else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+			target_put_sess_cmd(&cmd->se_cmd);
 			return 0;
-		else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+		} else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
 			return -1;
+		}
 	}
 	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
 
@@ -2129,12 +2130,8 @@
 	 * For connection recovery, this is also the default action for
 	 * TMR TASK_REASSIGN.
 	 */
-	if (sess_ref) {
-		pr_debug("Handle TMR, using sess_ref=true check\n");
-		target_put_sess_cmd(&cmd->se_cmd);
-	}
-
 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+	target_put_sess_cmd(&cmd->se_cmd);
 	return 0;
 }
 EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 9cbbc9c..8a4bc15 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1144,7 +1144,7 @@
 
 	ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
 	if (ret < 0)
-		return NULL;
+		goto free_out;
 
 	ret = iscsit_tpg_add_portal_group(tiqn, tpg);
 	if (ret != 0)
@@ -1156,6 +1156,7 @@
 	return &tpg->tpg_se_tpg;
 out:
 	core_tpg_deregister(&tpg->tpg_se_tpg);
+free_out:
 	kfree(tpg);
 	return NULL;
 }
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 4c82bbe..ee5b29a 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -1010,7 +1010,7 @@
 static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
 {
 	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
-		struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
+		struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
 	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
 	bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
 			 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
@@ -1073,17 +1073,8 @@
 	/*
 	 * Flush any pending transitions
 	 */
-	if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
-	    atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
-	    ALUA_ACCESS_STATE_TRANSITION) {
-		/* Just in case */
-		tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
-		tg_pt_gp->tg_pt_gp_transition_complete = &wait;
-		flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
-		wait_for_completion(&wait);
-		tg_pt_gp->tg_pt_gp_transition_complete = NULL;
-		return 0;
-	}
+	if (!explicit)
+		flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
 
 	/*
 	 * Save the old primary ALUA access state, and set the current state
@@ -1114,17 +1105,9 @@
 	atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
 	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
-	if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
-		unsigned long transition_tmo;
-
-		transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
-		queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
-				   &tg_pt_gp->tg_pt_gp_transition_work,
-				   transition_tmo);
-	} else {
+	schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
+	if (explicit) {
 		tg_pt_gp->tg_pt_gp_transition_complete = &wait;
-		queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
-				   &tg_pt_gp->tg_pt_gp_transition_work, 0);
 		wait_for_completion(&wait);
 		tg_pt_gp->tg_pt_gp_transition_complete = NULL;
 	}
@@ -1692,8 +1675,8 @@
 	mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
 	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
 	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
-	INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
-			  core_alua_do_transition_tg_pt_work);
+	INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
+		  core_alua_do_transition_tg_pt_work);
 	tg_pt_gp->tg_pt_gp_dev = dev;
 	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
 		ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
@@ -1801,7 +1784,7 @@
 	dev->t10_alua.alua_tg_pt_gps_counter--;
 	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
-	flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+	flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
 
 	/*
 	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 29f807b..97928b4 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -466,6 +466,10 @@
 	struct inode *inode = file->f_mapping->host;
 	int ret;
 
+	if (!nolb) {
+		return 0;
+	}
+
 	if (cmd->se_dev->dev_attrib.pi_prot_type) {
 		ret = fd_do_prot_unmap(cmd, lba, nolb);
 		if (ret)
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 47463c9..df20921 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -56,8 +56,10 @@
 	char *buf,
 	u32 size)
 {
-	if (!pr_reg->isid_present_at_reg)
+	if (!pr_reg->isid_present_at_reg) {
 		buf[0] = '\0';
+		return;
+	}
 
 	snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
 }
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 27dd1e1..14bb2db 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -133,6 +133,15 @@
 		spin_unlock(&se_cmd->t_state_lock);
 		return false;
 	}
+	if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) {
+		if (se_cmd->scsi_status) {
+			pr_debug("Attempted to abort io tag: %llu early failure"
+				 " status: 0x%02x\n", se_cmd->tag,
+				 se_cmd->scsi_status);
+			spin_unlock(&se_cmd->t_state_lock);
+			return false;
+		}
+	}
 	if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
 		pr_debug("Attempted to abort io tag: %llu already shutdown,"
 			" skipping\n", se_cmd->tag);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index bacfa8f..6f3eccf 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1939,6 +1939,7 @@
 	}
 
 	cmd->t_state = TRANSPORT_PROCESSING;
+	cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
 	cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
 	spin_unlock_irq(&cmd->t_state_lock);
 
@@ -1976,6 +1977,8 @@
 		list_del(&cmd->se_delayed_node);
 		spin_unlock(&dev->delayed_cmd_lock);
 
+		cmd->transport_state |= CMD_T_SENT;
+
 		__target_execute_cmd(cmd, true);
 
 		if (cmd->sam_task_attr == TCM_ORDERED_TAG)
@@ -2013,6 +2016,8 @@
 		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
 			 dev->dev_cur_ordered_id);
 	}
+	cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
+
 restart:
 	target_restart_delayed_cmds(dev);
 }
@@ -2588,6 +2593,7 @@
 		ret = -ESHUTDOWN;
 		goto out;
 	}
+	se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
 	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
 out:
 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index f25bade..9e96f8a 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -330,7 +330,7 @@
 				    unsigned long event, void *data)
 {
 	struct cpufreq_policy *policy = data;
-	unsigned long clipped_freq, floor_freq;
+	unsigned long clipped_freq = ULONG_MAX, floor_freq = 0;
 	struct cpufreq_cooling_device *cpufreq_dev;
 
 	if (event != CPUFREQ_ADJUST)
@@ -338,31 +338,30 @@
 
 	mutex_lock(&cooling_list_lock);
 	list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
-		if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
+		if (!cpumask_intersects(&cpufreq_dev->allowed_cpus,
+					policy->related_cpus))
 			continue;
-
-		/*
-		 * policy->max is the maximum allowed frequency defined by user
-		 * and clipped_freq is the maximum that thermal constraints
-		 * allow.
-		 *
-		 * If clipped_freq is lower than policy->max, then we need to
-		 * readjust policy->max.
-		 *
-		 * But, if clipped_freq is greater than policy->max, we don't
-		 * need to do anything.
-		 *
-		 * Similarly, if policy minimum set by the user is less than
-		 * the floor_frequency, then adjust the policy->min.
-		 */
-		clipped_freq = cpufreq_dev->clipped_freq;
-		floor_freq = cpufreq_dev->floor_freq;
-
-		if (policy->max > clipped_freq || policy->min < floor_freq)
-			cpufreq_verify_within_limits(policy, floor_freq,
-						     clipped_freq);
-		break;
+		if (cpufreq_dev->clipped_freq < clipped_freq)
+			clipped_freq = cpufreq_dev->clipped_freq;
+		if (cpufreq_dev->floor_freq > floor_freq)
+			floor_freq = cpufreq_dev->floor_freq;
 	}
+	/*
+	 * policy->max is the maximum allowed frequency defined by user
+	 * and clipped_freq is the maximum that thermal constraints
+	 * allow.
+	 *
+	 * If clipped_freq is lower than policy->max, then we need to
+	 * readjust policy->max.
+	 *
+	 * But, if clipped_freq is greater than policy->max, we don't
+	 * need to do anything.
+	 *
+	 * Similarly, if policy minimum set by the user is less than
+	 * the floor_frequency, then adjust the policy->min.
+	 */
+	if (policy->max > clipped_freq || policy->min < floor_freq)
+		cpufreq_verify_within_limits(policy, floor_freq, clipped_freq);
 	mutex_unlock(&cooling_list_lock);
 
 	return NOTIFY_OK;
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index f642966..c5285ed 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -35,8 +35,9 @@
 #define TEMP0_RST_MSK			(0x1C)
 #define TEMP0_VALUE			(0x28)
 
-#define HISI_TEMP_BASE			(-60)
+#define HISI_TEMP_BASE			(-60000)
 #define HISI_TEMP_RESET			(100000)
+#define HISI_TEMP_STEP			(784)
 
 #define HISI_MAX_SENSORS		4
 
@@ -61,19 +62,38 @@
 	void __iomem *regs;
 };
 
-/* in millicelsius */
-static inline int _step_to_temp(int step)
+/*
+ * The temperature computation on the tsensor is as follow:
+ *	Unit: millidegree Celsius
+ *	Step: 255/200 (0.7843)
+ *	Temperature base: -60°C
+ *
+ * The register is programmed in temperature steps, every step is 784
+ * millidegree and begins at -60 000 m°C
+ *
+ * The temperature from the steps:
+ *
+ *	Temp = TempBase + (steps x 784)
+ *
+ * and the steps from the temperature:
+ *
+ *	steps = (Temp - TempBase) / 784
+ *
+ */
+static inline int hisi_thermal_step_to_temp(int step)
 {
-	/*
-	 * Every step equals (1 * 200) / 255 celsius, and finally
-	 * need convert to millicelsius.
-	 */
-	return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255));
+	return HISI_TEMP_BASE + (step * HISI_TEMP_STEP);
 }
 
-static inline long _temp_to_step(long temp)
+static inline long hisi_thermal_temp_to_step(long temp)
 {
-	return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000;
+	return (temp - HISI_TEMP_BASE) / HISI_TEMP_STEP;
+}
+
+static inline long hisi_thermal_round_temp(int temp)
+{
+	return hisi_thermal_step_to_temp(
+		hisi_thermal_temp_to_step(temp));
 }
 
 static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
@@ -99,7 +119,7 @@
 	usleep_range(3000, 5000);
 
 	val = readl(data->regs + TEMP0_VALUE);
-	val = _step_to_temp(val);
+	val = hisi_thermal_step_to_temp(val);
 
 	mutex_unlock(&data->thermal_lock);
 
@@ -126,10 +146,11 @@
 	writel((sensor->id << 12), data->regs + TEMP0_CFG);
 
 	/* enable for interrupt */
-	writel(_temp_to_step(sensor->thres_temp) | 0x0FFFFFF00,
+	writel(hisi_thermal_temp_to_step(sensor->thres_temp) | 0x0FFFFFF00,
 	       data->regs + TEMP0_TH);
 
-	writel(_temp_to_step(HISI_TEMP_RESET), data->regs + TEMP0_RST_TH);
+	writel(hisi_thermal_temp_to_step(HISI_TEMP_RESET),
+	       data->regs + TEMP0_RST_TH);
 
 	/* enable module */
 	writel(0x1, data->regs + TEMP0_RST_MSK);
@@ -230,7 +251,7 @@
 	sensor = &data->sensors[data->irq_bind_sensor];
 
 	dev_crit(&data->pdev->dev, "THERMAL ALARM: T > %d\n",
-		 sensor->thres_temp / 1000);
+		 sensor->thres_temp);
 	mutex_unlock(&data->thermal_lock);
 
 	for (i = 0; i < HISI_MAX_SENSORS; i++) {
@@ -269,7 +290,7 @@
 
 	for (i = 0; i < of_thermal_get_ntrips(sensor->tzd); i++) {
 		if (trip[i].type == THERMAL_TRIP_PASSIVE) {
-			sensor->thres_temp = trip[i].temperature;
+			sensor->thres_temp = hisi_thermal_round_temp(trip[i].temperature);
 			break;
 		}
 	}
@@ -317,15 +338,6 @@
 	if (data->irq < 0)
 		return data->irq;
 
-	ret = devm_request_threaded_irq(&pdev->dev, data->irq,
-					hisi_thermal_alarm_irq,
-					hisi_thermal_alarm_irq_thread,
-					0, "hisi_thermal", data);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret);
-		return ret;
-	}
-
 	platform_set_drvdata(pdev, data);
 
 	data->clk = devm_clk_get(&pdev->dev, "thermal_clk");
@@ -345,8 +357,7 @@
 	}
 
 	hisi_thermal_enable_bind_irq_sensor(data);
-	irq_get_irqchip_state(data->irq, IRQCHIP_STATE_MASKED,
-			      &data->irq_enabled);
+	data->irq_enabled = true;
 
 	for (i = 0; i < HISI_MAX_SENSORS; ++i) {
 		ret = hisi_thermal_register_sensor(pdev, data,
@@ -358,6 +369,17 @@
 			hisi_thermal_toggle_sensor(&data->sensors[i], true);
 	}
 
+	ret = devm_request_threaded_irq(&pdev->dev, data->irq,
+					hisi_thermal_alarm_irq,
+					hisi_thermal_alarm_irq_thread,
+					0, "hisi_thermal", data);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret);
+		return ret;
+	}
+
+	enable_irq(data->irq);
+
 	return 0;
 }
 
@@ -397,8 +419,11 @@
 static int hisi_thermal_resume(struct device *dev)
 {
 	struct hisi_thermal_data *data = dev_get_drvdata(dev);
+	int ret;
 
-	clk_prepare_enable(data->clk);
+	ret = clk_prepare_enable(data->clk);
+	if (ret)
+		return ret;
 
 	data->irq_enabled = true;
 	hisi_thermal_enable_bind_irq_sensor(data);
diff --git a/drivers/thermal/qcom/bcl_peripheral.c b/drivers/thermal/qcom/bcl_peripheral.c
index 75e553f..3dccff5 100644
--- a/drivers/thermal/qcom/bcl_peripheral.c
+++ b/drivers/thermal/qcom/bcl_peripheral.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -748,10 +748,10 @@
 	}
 
 	bcl_get_devicetree_data(pdev);
+	bcl_configure_lmh_peripheral();
 	bcl_probe_ibat(pdev);
 	bcl_probe_vbat(pdev);
 	bcl_probe_soc(pdev);
-	bcl_configure_lmh_peripheral();
 
 	dev_set_drvdata(&pdev->dev, bcl_perph);
 	ret = bcl_write_register(BCL_MONITOR_EN, BIT(7));
diff --git a/drivers/thermal/qcom/qti_virtual_sensor.c b/drivers/thermal/qcom/qti_virtual_sensor.c
index 8cb7dc3..9b4fae8 100644
--- a/drivers/thermal/qcom/qti_virtual_sensor.c
+++ b/drivers/thermal/qcom/qti_virtual_sensor.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -64,6 +64,21 @@
 				"cpu1-gold-usr"},
 		.logic = VIRT_MAXIMUM,
 	},
+	{
+		.virt_zone_name = "deca-cpu-max-step",
+		.num_sensors = 10,
+		.sensor_names = {"apc0-cpu0-usr",
+				"apc0-cpu1-usr",
+				"apc0-cpu2-usr",
+				"apc0-cpu3-usr",
+				"apc0-l2-usr",
+				"apc1-cpu0-usr",
+				"apc1-cpu1-usr",
+				"apc1-cpu2-usr",
+				"apc1-cpu3-usr",
+				"apc1-l2-usr"},
+		.logic = VIRT_MAXIMUM,
+	},
 };
 
 int qti_virtual_sensor_register(struct device *dev)
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index 152b2a2..bec3dea 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -256,22 +256,7 @@
 
 static int32_t qpnp_adc_tm_disable(struct qpnp_adc_tm_chip *chip)
 {
-	u8 data = 0;
-	int rc = 0;
-
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_CONV_REQ, data, 1);
-	if (rc < 0) {
-		pr_err("adc-tm enable failed\n");
-		return rc;
-	}
-
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_EN_CTL1, data, 1);
-	if (rc < 0) {
-		pr_err("adc-tm disable failed\n");
-		return rc;
-	}
-
-	return rc;
+	return 0;
 }
 
 static int qpnp_adc_tm_is_valid(struct qpnp_adc_tm_chip *chip)
diff --git a/drivers/thermal/qpnp-temp-alarm.c b/drivers/thermal/qpnp-temp-alarm.c
index 09c95e5..7398b7b 100644
--- a/drivers/thermal/qpnp-temp-alarm.c
+++ b/drivers/thermal/qpnp-temp-alarm.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -543,11 +543,11 @@
 
 	chip->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
 							tz_ops);
-	if (chip->tz_dev == NULL) {
+	if (IS_ERR(chip->tz_dev)) {
+		rc = PTR_ERR(chip->tz_dev);
 		dev_err(&pdev->dev,
-			"%s: thermal_zone_device_register() failed.\n",
-			__func__);
-		rc = -ENODEV;
+			"%s: thermal_zone_of_sensor_register() failed, rc=%d\n",
+			__func__, rc);
 		goto err_cancel_work;
 	}
 
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 1fff359..4bbb47a 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -31,8 +31,7 @@
  * If the temperature is higher than a trip point,
  *    a. if the trend is THERMAL_TREND_RAISING, use higher cooling
  *       state for this trip point
- *    b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
- *       state for this trip point
+ *    b. if the trend is THERMAL_TREND_DROPPING, do nothing
  *    c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit
  *       for this trip point
  *    d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit
@@ -102,10 +101,11 @@
 			if (!throttle)
 				next_target = THERMAL_NO_TARGET;
 		} else {
-			if (!throttle)
+			if (!throttle) {
 				next_target = cur_state - 1;
-			if (next_target > instance->upper)
-				next_target = instance->upper;
+				if (next_target > instance->upper)
+					next_target = instance->upper;
+			}
 		}
 		break;
 	case THERMAL_TREND_DROP_FULL:
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 80c3f91..f8a9a2f 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -2591,11 +2591,9 @@
 	case PM_POST_SUSPEND:
 		atomic_set(&in_suspend, 0);
 		list_for_each_entry(tz, &thermal_tz_list, node) {
-			mutex_lock(&tz->lock);
 			thermal_zone_device_reset(tz);
-			mod_delayed_work(system_freezable_power_efficient_wq,
-						&tz->poll_queue, 0);
-			mutex_unlock(&tz->lock);
+			thermal_zone_device_update(tz,
+						   THERMAL_EVENT_UNSPECIFIED);
 		}
 		break;
 	default:
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index bdf0e6e..faf50df 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1764,7 +1764,7 @@
 {
 	struct n_tty_data *ldata = tty->disc_data;
 
-	if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) {
+	if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) {
 		bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
 		ldata->line_start = ldata->read_tail;
 		if (!L_ICANON(tty) || !read_cnt(ldata)) {
@@ -2427,7 +2427,7 @@
 		return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
 	case TIOCINQ:
 		down_write(&tty->termios_rwsem);
-		if (L_ICANON(tty))
+		if (L_ICANON(tty) && !L_EXTPROC(tty))
 			retval = inq_canon(ldata);
 		else
 			retval = read_cnt(ldata);
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index f8c3107..2ffebb7 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -121,7 +121,7 @@
 
 	if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) ==
 			(!!(rs485->flags & SER_RS485_RTS_AFTER_SEND)))
-		rs485->flags &= SER_RS485_ENABLED;
+		rs485->flags &= ~SER_RS485_ENABLED;
 	else
 		config |= RS485_URA;
 
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 22d32d2..b80ea87 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5568,6 +5568,9 @@
 	{ PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
 	{ PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
 
+	/* Amazon PCI serial device */
+	{ PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 },
+
 	/*
 	 * These entries match devices with class COMMUNICATION_SERIAL,
 	 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 1ef31e3..f6e4373 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2526,8 +2526,11 @@
 	serial_dl_write(up, quot);
 
 	/* XR17V35x UARTs have an extra fractional divisor register (DLD) */
-	if (up->port.type == PORT_XR17V35X)
+	if (up->port.type == PORT_XR17V35X) {
+		/* Preserve bits not related to baudrate; DLD[7:4]. */
+		quot_frac |= serial_port_in(port, 0x2) & 0xf0;
 		serial_port_out(port, 0x2, quot_frac);
+	}
 }
 
 static unsigned int serial8250_get_baud_rate(struct uart_port *port,
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 0ce23c3..185a9e2 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -894,7 +894,6 @@
 
 		msm_geni_serial_prep_dma_tx(uport);
 	}
-	IPC_LOG_MSG(msm_port->ipc_log_misc, "%s\n", __func__);
 	return;
 check_flow_ctrl:
 	geni_ios = geni_read_reg_nolog(uport->membase, SE_GENI_IOS);
@@ -963,7 +962,18 @@
 							SE_GENI_M_IRQ_CLEAR);
 	}
 	geni_write_reg_nolog(M_CMD_CANCEL_EN, uport, SE_GENI_M_IRQ_CLEAR);
-	IPC_LOG_MSG(port->ipc_log_misc, "%s\n", __func__);
+	/*
+	 * If we end up having to cancel an on-going Tx for non-console usecase
+	 * then it means there was some unsent data in the Tx FIFO, consequently
+	 * it means that there is a vote imbalance as we put in a vote during
+	 * start_tx() that is removed only as part of a "done" ISR. To balance
+	 * this out, remove the vote put in during start_tx().
+	 */
+	if (!uart_console(uport)) {
+		IPC_LOG_MSG(port->ipc_log_misc, "%s:Removing vote\n", __func__);
+		msm_geni_serial_power_off(uport);
+	}
+	IPC_LOG_MSG(port->ipc_log_misc, "%s:\n", __func__);
 }
 
 static void msm_geni_serial_stop_tx(struct uart_port *uport)
@@ -1727,7 +1737,8 @@
 static int get_clk_cfg(unsigned long clk_freq, unsigned long *ser_clk)
 {
 	unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
-		32000000, 48000000, 64000000, 80000000, 96000000, 100000000};
+		32000000, 48000000, 64000000, 80000000, 96000000, 100000000,
+		102400000, 112000000, 120000000, 128000000};
 	int i;
 	int match = -1;
 
@@ -2135,7 +2146,7 @@
 exit_geni_serial_earlyconsetup:
 	return ret;
 }
-OF_EARLYCON_DECLARE(msm_geni_serial, "qcom,msm-geni-uart",
+OF_EARLYCON_DECLARE(msm_geni_serial, "qcom,msm-geni-console",
 		msm_geni_serial_earlycon_setup);
 
 static int console_register(struct uart_driver *drv)
@@ -2528,6 +2539,8 @@
 		 * doing a stop_rx else we could end up flowing off the peer.
 		 */
 		mb();
+		IPC_LOG_MSG(port->ipc_log_pwr, "%s: Manual Flow ON 0x%x\n",
+						 __func__, uart_manual_rfr);
 	}
 	stop_rx_sequencer(&port->uport);
 	if ((geni_status & M_GENI_CMD_ACTIVE))
@@ -2609,6 +2622,7 @@
 			mutex_unlock(&tty_port->mutex);
 			return -EBUSY;
 		}
+		IPC_LOG_MSG(port->ipc_log_pwr, "%s\n", __func__);
 		mutex_unlock(&tty_port->mutex);
 	}
 	return 0;
@@ -2657,17 +2671,12 @@
 	.resume_noirq = msm_geni_serial_sys_resume_noirq,
 };
 
-static const struct of_device_id msm_geni_serial_match_table[] = {
-	{ .compatible = "qcom,msm-geni-uart"},
-	{},
-};
-
 static struct platform_driver msm_geni_serial_platform_driver = {
 	.remove = msm_geni_serial_remove,
 	.probe = msm_geni_serial_probe,
 	.driver = {
 		.name = "msm_geni_serial",
-		.of_match_table = msm_geni_serial_match_table,
+		.of_match_table = msm_geni_device_tbl,
 		.pm = &msm_geni_serial_pm_ops,
 	},
 };
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 94c3718..547bd21 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -244,8 +244,10 @@
 	 * architecture has no support for it:
 	 */
 	if (!trigger_all_cpu_backtrace()) {
-		struct pt_regs *regs = get_irq_regs();
+		struct pt_regs *regs = NULL;
 
+		if (in_irq())
+			regs = get_irq_regs();
 		if (regs) {
 			pr_info("CPU%d:\n", smp_processor_id());
 			show_regs(regs);
@@ -264,7 +266,10 @@
 
 static void sysrq_handle_showregs(int key)
 {
-	struct pt_regs *regs = get_irq_regs();
+	struct pt_regs *regs = NULL;
+
+	if (in_irq())
+		regs = get_irq_regs();
 	if (regs)
 		show_regs(regs);
 	perf_event_print_debug();
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index c220c2c..e99f1c5 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -446,7 +446,7 @@
  *	Callers other than flush_to_ldisc() need to exclude the kworker
  *	from concurrent use of the line discipline, see paste_selection().
  *
- *	Returns the number of bytes not processed
+ *	Returns the number of bytes processed
  */
 int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
 			  char *f, int count)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 734a635..8d9f9a8 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1543,6 +1543,9 @@
 			"%s: %s driver does not set tty->port. This will crash the kernel later. Fix the driver!\n",
 			__func__, tty->driver->name);
 
+	retval = tty_ldisc_lock(tty, 5 * HZ);
+	if (retval)
+		goto err_release_lock;
 	tty->port->itty = tty;
 
 	/*
@@ -1553,6 +1556,7 @@
 	retval = tty_ldisc_setup(tty, tty->link);
 	if (retval)
 		goto err_release_tty;
+	tty_ldisc_unlock(tty);
 	/* Return the tty locked so that it cannot vanish under the caller */
 	return tty;
 
@@ -1565,9 +1569,11 @@
 
 	/* call the tty release_tty routine to clean out this slot */
 err_release_tty:
-	tty_unlock(tty);
+	tty_ldisc_unlock(tty);
 	tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n",
 			     retval, idx);
+err_release_lock:
+	tty_unlock(tty);
 	release_tty(tty, idx);
 	return ERR_PTR(retval);
 }
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 68947f6..3a9e2a2 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -271,10 +271,13 @@
 
 struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
 {
+	struct tty_ldisc *ld;
+
 	ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT);
-	if (!tty->ldisc)
+	ld = tty->ldisc;
+	if (!ld)
 		ldsem_up_read(&tty->ldisc_sem);
-	return tty->ldisc;
+	return ld;
 }
 EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
 
@@ -333,7 +336,7 @@
 	ldsem_up_write(&tty->ldisc_sem);
 }
 
-static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
+int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
 {
 	int ret;
 
@@ -344,7 +347,7 @@
 	return 0;
 }
 
-static void tty_ldisc_unlock(struct tty_struct *tty)
+void tty_ldisc_unlock(struct tty_struct *tty)
 {
 	clear_bit(TTY_LDISC_HALTED, &tty->flags);
 	__tty_ldisc_unlock(tty);
@@ -489,41 +492,6 @@
 }
 
 /**
- *	tty_ldisc_restore	-	helper for tty ldisc change
- *	@tty: tty to recover
- *	@old: previous ldisc
- *
- *	Restore the previous line discipline or N_TTY when a line discipline
- *	change fails due to an open error
- */
-
-static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
-{
-	struct tty_ldisc *new_ldisc;
-	int r;
-
-	/* There is an outstanding reference here so this is safe */
-	old = tty_ldisc_get(tty, old->ops->num);
-	WARN_ON(IS_ERR(old));
-	tty->ldisc = old;
-	tty_set_termios_ldisc(tty, old->ops->num);
-	if (tty_ldisc_open(tty, old) < 0) {
-		tty_ldisc_put(old);
-		/* This driver is always present */
-		new_ldisc = tty_ldisc_get(tty, N_TTY);
-		if (IS_ERR(new_ldisc))
-			panic("n_tty: get");
-		tty->ldisc = new_ldisc;
-		tty_set_termios_ldisc(tty, N_TTY);
-		r = tty_ldisc_open(tty, new_ldisc);
-		if (r < 0)
-			panic("Couldn't open N_TTY ldisc for "
-			      "%s --- error %d.",
-			      tty_name(tty), r);
-	}
-}
-
-/**
  *	tty_set_ldisc		-	set line discipline
  *	@tty: the terminal to set
  *	@ldisc: the line discipline
@@ -536,12 +504,7 @@
 
 int tty_set_ldisc(struct tty_struct *tty, int disc)
 {
-	int retval;
-	struct tty_ldisc *old_ldisc, *new_ldisc;
-
-	new_ldisc = tty_ldisc_get(tty, disc);
-	if (IS_ERR(new_ldisc))
-		return PTR_ERR(new_ldisc);
+	int retval, old_disc;
 
 	tty_lock(tty);
 	retval = tty_ldisc_lock(tty, 5 * HZ);
@@ -554,7 +517,8 @@
 	}
 
 	/* Check the no-op case */
-	if (tty->ldisc->ops->num == disc)
+	old_disc = tty->ldisc->ops->num;
+	if (old_disc == disc)
 		goto out;
 
 	if (test_bit(TTY_HUPPED, &tty->flags)) {
@@ -563,34 +527,25 @@
 		goto out;
 	}
 
-	old_ldisc = tty->ldisc;
-
-	/* Shutdown the old discipline. */
-	tty_ldisc_close(tty, old_ldisc);
-
-	/* Now set up the new line discipline. */
-	tty->ldisc = new_ldisc;
-	tty_set_termios_ldisc(tty, disc);
-
-	retval = tty_ldisc_open(tty, new_ldisc);
+	retval = tty_ldisc_reinit(tty, disc);
 	if (retval < 0) {
 		/* Back to the old one or N_TTY if we can't */
-		tty_ldisc_put(new_ldisc);
-		tty_ldisc_restore(tty, old_ldisc);
+		if (tty_ldisc_reinit(tty, old_disc) < 0) {
+			pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n");
+			if (tty_ldisc_reinit(tty, N_TTY) < 0) {
+				/* At this point we have tty->ldisc == NULL. */
+				pr_err("tty: reinitializing N_TTY failed\n");
+			}
+		}
 	}
 
-	if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) {
+	if (tty->ldisc && tty->ldisc->ops->num != old_disc &&
+	    tty->ops->set_ldisc) {
 		down_read(&tty->termios_rwsem);
 		tty->ops->set_ldisc(tty);
 		up_read(&tty->termios_rwsem);
 	}
 
-	/* At this point we hold a reference to the new ldisc and a
-	   reference to the old ldisc, or we hold two references to
-	   the old ldisc (if it was restored as part of error cleanup
-	   above). In either case, releasing a single reference from
-	   the old ldisc is correct. */
-	new_ldisc = old_ldisc;
 out:
 	tty_ldisc_unlock(tty);
 
@@ -598,7 +553,6 @@
 	   already running */
 	tty_buffer_restart_work(tty->port);
 err:
-	tty_ldisc_put(new_ldisc);	/* drop the extra reference */
 	tty_unlock(tty);
 	return retval;
 }
@@ -659,10 +613,8 @@
 	int retval;
 
 	ld = tty_ldisc_get(tty, disc);
-	if (IS_ERR(ld)) {
-		BUG_ON(disc == N_TTY);
+	if (IS_ERR(ld))
 		return PTR_ERR(ld);
-	}
 
 	if (tty->ldisc) {
 		tty_ldisc_close(tty, tty->ldisc);
@@ -674,10 +626,8 @@
 	tty_set_termios_ldisc(tty, disc);
 	retval = tty_ldisc_open(tty, tty->ldisc);
 	if (retval) {
-		if (!WARN_ON(disc == N_TTY)) {
-			tty_ldisc_put(tty->ldisc);
-			tty->ldisc = NULL;
-		}
+		tty_ldisc_put(tty->ldisc);
+		tty->ldisc = NULL;
 	}
 	return retval;
 }
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 701d9f7..0dce6ab 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -550,6 +550,9 @@
 	unsigned iad_num = 0;
 
 	memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
+	nintf = nintf_orig = config->desc.bNumInterfaces;
+	config->desc.bNumInterfaces = 0;	// Adjusted later
+
 	if (config->desc.bDescriptorType != USB_DT_CONFIG ||
 	    config->desc.bLength < USB_DT_CONFIG_SIZE ||
 	    config->desc.bLength > size) {
@@ -563,7 +566,6 @@
 	buffer += config->desc.bLength;
 	size -= config->desc.bLength;
 
-	nintf = nintf_orig = config->desc.bNumInterfaces;
 	if (nintf > USB_MAXINTERFACES) {
 		dev_warn(ddev, "config %d has too many interfaces: %d, "
 		    "using maximum allowed: %d\n",
@@ -900,14 +902,25 @@
 	}
 }
 
+static const __u8 bos_desc_len[256] = {
+	[USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE,
+	[USB_CAP_TYPE_EXT]          = USB_DT_USB_EXT_CAP_SIZE,
+	[USB_SS_CAP_TYPE]           = USB_DT_USB_SS_CAP_SIZE,
+	[USB_SSP_CAP_TYPE]          = USB_DT_USB_SSP_CAP_SIZE(1),
+	[CONTAINER_ID_TYPE]         = USB_DT_USB_SS_CONTN_ID_SIZE,
+	[USB_PTM_CAP_TYPE]          = USB_DT_USB_PTM_ID_SIZE,
+};
+
 /* Get BOS descriptor set */
 int usb_get_bos_descriptor(struct usb_device *dev)
 {
 	struct device *ddev = &dev->dev;
 	struct usb_bos_descriptor *bos;
 	struct usb_dev_cap_header *cap;
+	struct usb_ssp_cap_descriptor *ssp_cap;
 	unsigned char *buffer;
-	int length, total_len, num, i;
+	int length, total_len, num, i, ssac;
+	__u8 cap_type;
 	int ret;
 
 	bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
@@ -960,7 +973,13 @@
 			dev->bos->desc->bNumDeviceCaps = i;
 			break;
 		}
+		cap_type = cap->bDevCapabilityType;
 		length = cap->bLength;
+		if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) {
+			dev->bos->desc->bNumDeviceCaps = i;
+			break;
+		}
+
 		total_len -= length;
 
 		if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
@@ -968,7 +987,7 @@
 			continue;
 		}
 
-		switch (cap->bDevCapabilityType) {
+		switch (cap_type) {
 		case USB_CAP_TYPE_WIRELESS_USB:
 			/* Wireless USB cap descriptor is handled by wusb */
 			break;
@@ -981,8 +1000,11 @@
 				(struct usb_ss_cap_descriptor *)buffer;
 			break;
 		case USB_SSP_CAP_TYPE:
-			dev->bos->ssp_cap =
-				(struct usb_ssp_cap_descriptor *)buffer;
+			ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
+			ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
+				USB_SSP_SUBLINK_SPEED_ATTRIBS);
+			if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
+				dev->bos->ssp_cap = ssp_cap;
 			break;
 		case CONTAINER_ID_TYPE:
 			dev->bos->ss_id =
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index fa61935..893ebae 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -134,42 +134,38 @@
 #define USB_DEVICE_DEV		MKDEV(USB_DEVICE_MAJOR, 0)
 
 /* Limit on the total amount of memory we can allocate for transfers */
-static unsigned usbfs_memory_mb = 16;
+static u32 usbfs_memory_mb = 16;
 module_param(usbfs_memory_mb, uint, 0644);
 MODULE_PARM_DESC(usbfs_memory_mb,
 		"maximum MB allowed for usbfs buffers (0 = no limit)");
 
 /* Hard limit, necessary to avoid arithmetic overflow */
-#define USBFS_XFER_MAX		(UINT_MAX / 2 - 1000000)
+#define USBFS_XFER_MAX         (UINT_MAX / 2 - 1000000)
 
-static atomic_t usbfs_memory_usage;	/* Total memory currently allocated */
+static atomic64_t usbfs_memory_usage;	/* Total memory currently allocated */
 
 /* Check whether it's okay to allocate more memory for a transfer */
-static int usbfs_increase_memory_usage(unsigned amount)
+static int usbfs_increase_memory_usage(u64 amount)
 {
-	unsigned lim;
+	u64 lim;
 
-	/*
-	 * Convert usbfs_memory_mb to bytes, avoiding overflows.
-	 * 0 means use the hard limit (effectively unlimited).
-	 */
 	lim = ACCESS_ONCE(usbfs_memory_mb);
-	if (lim == 0 || lim > (USBFS_XFER_MAX >> 20))
-		lim = USBFS_XFER_MAX;
-	else
-		lim <<= 20;
+	lim <<= 20;
 
-	atomic_add(amount, &usbfs_memory_usage);
-	if (atomic_read(&usbfs_memory_usage) <= lim)
-		return 0;
-	atomic_sub(amount, &usbfs_memory_usage);
-	return -ENOMEM;
+	atomic64_add(amount, &usbfs_memory_usage);
+
+	if (lim > 0 && atomic64_read(&usbfs_memory_usage) > lim) {
+		atomic64_sub(amount, &usbfs_memory_usage);
+		return -ENOMEM;
+	}
+
+	return 0;
 }
 
 /* Memory for a transfer is being deallocated */
-static void usbfs_decrease_memory_usage(unsigned amount)
+static void usbfs_decrease_memory_usage(u64 amount)
 {
-	atomic_sub(amount, &usbfs_memory_usage);
+	atomic64_sub(amount, &usbfs_memory_usage);
 }
 
 static int connected(struct usb_dev_state *ps)
@@ -1191,7 +1187,7 @@
 	if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN)))
 		return -EINVAL;
 	len1 = bulk.len;
-	if (len1 >= USBFS_XFER_MAX)
+	if (len1 >= (INT_MAX - sizeof(struct urb)))
 		return -EINVAL;
 	ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
 	if (ret)
@@ -1458,13 +1454,19 @@
 	int number_of_packets = 0;
 	unsigned int stream_id = 0;
 	void *buf;
-
-	if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
-				USBDEVFS_URB_SHORT_NOT_OK |
+	unsigned long mask =	USBDEVFS_URB_SHORT_NOT_OK |
 				USBDEVFS_URB_BULK_CONTINUATION |
 				USBDEVFS_URB_NO_FSBR |
 				USBDEVFS_URB_ZERO_PACKET |
-				USBDEVFS_URB_NO_INTERRUPT))
+				USBDEVFS_URB_NO_INTERRUPT;
+	/* USBDEVFS_URB_ISO_ASAP is a special case */
+	if (uurb->type == USBDEVFS_URB_TYPE_ISO)
+		mask |= USBDEVFS_URB_ISO_ASAP;
+
+	if (uurb->flags & ~mask)
+			return -EINVAL;
+
+	if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
 		return -EINVAL;
 	if (uurb->buffer_length > 0 && !uurb->buffer)
 		return -EINVAL;
@@ -1584,10 +1586,6 @@
 		return -EINVAL;
 	}
 
-	if (uurb->buffer_length >= USBFS_XFER_MAX) {
-		ret = -EINVAL;
-		goto error;
-	}
 	if (uurb->buffer_length > 0 &&
 			!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
 				uurb->buffer, uurb->buffer_length)) {
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 50a6f2f..a9117ee 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4947,6 +4947,15 @@
 		usb_put_dev(udev);
 		if ((status == -ENOTCONN) || (status == -ENOTSUPP))
 			break;
+
+		/* When halfway through our retry count, power-cycle the port */
+		if (i == (SET_CONFIG_TRIES / 2) - 1) {
+			dev_info(&port_dev->dev, "attempt power cycle\n");
+			usb_hub_set_port_power(hdev, hub, port1, false);
+			msleep(2 * hub_power_on_good_delay(hub));
+			usb_hub_set_port_power(hdev, hub, port1, true);
+			msleep(hub_power_on_good_delay(hub));
+		}
 	}
 	if (hub->hdev->parent ||
 			!hcd->driver->port_handed_over ||
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 37c418e..c05c4f8 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -57,10 +57,11 @@
 	/* Microsoft LifeCam-VX700 v2.0 */
 	{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
 
-	/* Logitech HD Pro Webcams C920, C920-C and C930e */
+	/* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
 	{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
 	{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
 	{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+	{ USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
 
 	/* Logitech ConferenceCam CC3000e */
 	{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -151,6 +152,12 @@
 	/* appletouch */
 	{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
 
+	/* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
+	{ USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
+
+	/* ELSA MicroLink 56K */
+	{ USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME },
+
 	/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
 	{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
 
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 719fcbf..e0321a1 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -858,7 +858,8 @@
 	dwc3_frame_length_adjustment(dwc);
 
 	usb_phy_set_suspend(dwc->usb2_phy, 0);
-	usb_phy_set_suspend(dwc->usb3_phy, 0);
+	if (dwc->maximum_speed >= USB_SPEED_SUPER)
+		usb_phy_set_suspend(dwc->usb3_phy, 0);
 	ret = phy_power_on(dwc->usb2_generic_phy);
 	if (ret < 0)
 		goto err2;
@@ -889,6 +890,29 @@
 		dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
 	}
 
+	/*
+	 * Workaround for STAR 9001198391 which affects dwc3 core
+	 * version 3.20a only. Default HP timer value is incorrectly
+	 * set to 3us. Reprogram HP timer value to support USB 3.1
+	 * HP timer ECN.
+	 */
+	if (!dwc3_is_usb31(dwc) &&  dwc->revision == DWC3_REVISION_320A) {
+		reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
+		reg &= ~DWC3_GUCTL2_HP_TIMER_MASK;
+		reg |= DWC3_GUCTL2_HP_TIMER(11);
+		dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
+	}
+
+	/*
+	 * Enable hardware control of sending remote wakeup in HS when
+	 * the device is in the L1 state.
+	 */
+	if (dwc->revision >= DWC3_REVISION_290A) {
+		reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+		reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
+		dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+	}
+
 	return 0;
 
 err2:
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index a8400dd..b91642a 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -217,6 +217,9 @@
 /* Global Debug LTSSM Register */
 #define DWC3_GDBGLTSSM_LINKSTATE_MASK	(0xF << 22)
 
+/* Global User Control 1 Register */
+#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW	(1 << 24)
+
 /* Global USB2 PHY Configuration Register */
 #define DWC3_GUSB2PHYCFG_PHYSOFTRST	(1 << 31)
 #define DWC3_GUSB2PHYCFG_ENBLSLPM	(1 << 8)
@@ -323,6 +326,8 @@
 
 /* Global User Control Register 2 */
 #define DWC3_GUCTL2_RST_ACTBITLATER		(1 << 14)
+#define DWC3_GUCTL2_HP_TIMER(n)			((n) << 21)
+#define DWC3_GUCTL2_HP_TIMER_MASK		DWC3_GUCTL2_HP_TIMER(0x1f)
 
 /* Device Configuration Register */
 #define DWC3_DCFG_DEVADDR(addr)	((addr) << 3)
@@ -1034,8 +1039,10 @@
 #define DWC3_REVISION_260A	0x5533260a
 #define DWC3_REVISION_270A	0x5533270a
 #define DWC3_REVISION_280A	0x5533280a
+#define DWC3_REVISION_290A	0x5533290a
 #define DWC3_REVISION_300A	0x5533300a
 #define DWC3_REVISION_310A	0x5533310a
+#define DWC3_REVISION_320A	0x5533320a
 
 /*
  * NOTICE: we're using bit 31 as a "is usb 3.1" flag. This is really
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index de7fefc..89bf6b7 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -71,11 +71,6 @@
 module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
 
-/* override for USB speed */
-static int override_usb_speed;
-module_param(override_usb_speed, int, 0644);
-MODULE_PARM_DESC(override_usb_speed, "override for USB speed");
-
 /* XHCI registers */
 #define USB3_HCSPARAMS1		(0x4)
 #define USB3_PORTSC		(0x420)
@@ -121,7 +116,7 @@
 #define GSI_DBL_ADDR_L(n)	((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
 #define GSI_DBL_ADDR_H(n)	((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
 #define GSI_RING_BASE_ADDR_L(n)	((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
-#define GSI_RING_BASE_ADDR_H(n)	((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
+#define GSI_RING_BASE_ADDR_H(n)	((QSCRATCH_REG_OFFSET + 0x144) + (n*4))
 
 #define	GSI_IF_STS	(QSCRATCH_REG_OFFSET + 0x1A4)
 #define	GSI_WR_CTRL_STATE_MASK	BIT(15)
@@ -271,7 +266,10 @@
 	struct pm_qos_request pm_qos_req_dma;
 	struct delayed_work perf_vote_work;
 	struct delayed_work sdp_check;
+	bool usb_compliance_mode;
 	struct mutex suspend_resume_mutex;
+
+	enum usb_device_speed override_usb_speed;
 };
 
 #define USB_HSPHY_3P3_VOL_MIN		3050000 /* uV */
@@ -295,14 +293,6 @@
 static int dwc3_restart_usb_host_mode(struct notifier_block *nb,
 					unsigned long event, void *ptr);
 
-static inline bool is_valid_usb_speed(struct dwc3 *dwc, int speed)
-{
-
-	return (((speed == USB_SPEED_FULL) || (speed == USB_SPEED_HIGH) ||
-		(speed == USB_SPEED_SUPER) || (speed == USB_SPEED_SUPER_PLUS))
-		&& (speed <= dwc->maximum_speed));
-}
-
 /**
  *
  * Read register with debug info.
@@ -933,9 +923,10 @@
 * for GSI channel creation.
 *
 * @usb_ep - pointer to usb_ep instance.
-* @dbl_addr - Doorbell address obtained from IPA driver
+* @request - USB GSI request to get Doorbell address obtained from IPA driver
 */
-static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
+static void gsi_store_ringbase_dbl_info(struct usb_ep *ep,
+			struct usb_gsi_request *request)
 {
 	struct dwc3_ep *dep = to_dwc3_ep(ep);
 	struct dwc3	*dwc = dep->dwc;
@@ -944,11 +935,27 @@
 
 	dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
 			dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
-	dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
 
-	dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
+	if (request->mapped_db_reg_phs_addr_lsb)
+		dma_unmap_resource(dwc->sysdev,
+			request->mapped_db_reg_phs_addr_lsb,
+			PAGE_SIZE, DMA_BIDIRECTIONAL, 0);
+
+	request->mapped_db_reg_phs_addr_lsb = dma_map_resource(dwc->sysdev,
+			(phys_addr_t)request->db_reg_phs_addr_lsb, PAGE_SIZE,
+			DMA_BIDIRECTIONAL, 0);
+	if (dma_mapping_error(dwc->sysdev, request->mapped_db_reg_phs_addr_lsb))
+		dev_err(mdwc->dev, "mapping error for db_reg_phs_addr_lsb\n");
+
+	dev_dbg(mdwc->dev, "ep:%s dbl_addr_lsb:%x mapped_dbl_addr_lsb:%llx\n",
+		ep->name, request->db_reg_phs_addr_lsb,
+		(unsigned long long)request->mapped_db_reg_phs_addr_lsb);
+
+	dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n),
+			(u32)request->mapped_db_reg_phs_addr_lsb);
+	dev_dbg(mdwc->dev, "Ring Base Addr %d: %x (LSB)\n", n,
 			dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
-	dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
+	dev_dbg(mdwc->dev, "GSI DB Addr %d: %x (LSB)\n", n,
 			dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
 }
 
@@ -964,9 +971,6 @@
 	void __iomem *gsi_dbl_address_lsb;
 	void __iomem *gsi_dbl_address_msb;
 	dma_addr_t offset;
-	u64 dbl_addr = *((u64 *)request->buf_base_addr);
-	u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
-	u32 dbl_hi_addr = (dbl_addr >> 32);
 	struct dwc3_ep *dep = to_dwc3_ep(ep);
 	struct dwc3	*dwc = dep->dwc;
 	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
@@ -974,18 +978,19 @@
 					: (request->num_bufs + 2);
 
 	gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
-					dbl_lo_addr, sizeof(u32));
+				request->db_reg_phs_addr_lsb, sizeof(u32));
 	if (!gsi_dbl_address_lsb)
 		dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
 
 	gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
-					dbl_hi_addr, sizeof(u32));
+			request->db_reg_phs_addr_msb, sizeof(u32));
 	if (!gsi_dbl_address_msb)
 		dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
 
 	offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
 	dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x) for ep:%s\n",
-		&offset, gsi_dbl_address_lsb, dbl_lo_addr, ep->name);
+		&offset, gsi_dbl_address_lsb, request->db_reg_phs_addr_lsb,
+		ep->name);
 
 	writel_relaxed(offset, gsi_dbl_address_lsb);
 	writel_relaxed(0, gsi_dbl_address_msb);
@@ -1056,6 +1061,8 @@
 	struct dwc3_trb *trb;
 	int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
 					: (req->num_bufs + 2);
+	struct scatterlist *sg;
+	struct sg_table *sgt;
 
 	dep->trb_pool = dma_zalloc_coherent(dwc->sysdev,
 				num_trbs * sizeof(struct dwc3_trb),
@@ -1068,6 +1075,19 @@
 	}
 
 	dep->num_trbs = num_trbs;
+	dma_get_sgtable(dwc->sysdev, &req->sgt_trb_xfer_ring, dep->trb_pool,
+		dep->trb_pool_dma, num_trbs * sizeof(struct dwc3_trb));
+
+	sgt = &req->sgt_trb_xfer_ring;
+	dev_dbg(dwc->dev, "%s(): trb_pool:%pK trb_pool_dma:%lx\n",
+		__func__, dep->trb_pool, (unsigned long)dep->trb_pool_dma);
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i)
+		dev_dbg(dwc->dev,
+			"%i: page_link:%lx offset:%x length:%x address:%lx\n",
+			i, sg->page_link, sg->offset, sg->length,
+			(unsigned long)sg->dma_address);
+
 	/* IN direction */
 	if (dep->direction) {
 		for (i = 0; i < num_trbs ; i++) {
@@ -1133,11 +1153,13 @@
 		}
 	}
 
-	pr_debug("%s: Initialized TRB Ring for %s\n", __func__, dep->name);
+	dev_dbg(dwc->dev, "%s: Initialized TRB Ring for %s\n",
+					__func__, dep->name);
 	trb = &dep->trb_pool[0];
 	if (trb) {
 		for (i = 0; i < num_trbs; i++) {
-			pr_debug("TRB(%d): ADDRESS:%lx bpl:%x bph:%x size:%x ctrl:%x\n",
+			dev_dbg(dwc->dev,
+				"TRB %d: ADDR:%lx bpl:%x bph:%x sz:%x ctl:%x\n",
 				i, (unsigned long)dwc3_trb_dma_offset(dep,
 				&dep->trb_pool[i]), trb->bpl, trb->bph,
 				trb->size, trb->ctrl);
@@ -1154,7 +1176,7 @@
 * @usb_ep - pointer to usb_ep instance.
 *
 */
-static void gsi_free_trbs(struct usb_ep *ep)
+static void gsi_free_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
 {
 	struct dwc3_ep *dep = to_dwc3_ep(ep);
 	struct dwc3 *dwc = dep->dwc;
@@ -1171,6 +1193,7 @@
 		dep->trb_pool = NULL;
 		dep->trb_pool_dma = 0;
 	}
+	sg_free_table(&req->sgt_trb_xfer_ring);
 }
 /*
 * Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
@@ -1360,7 +1383,8 @@
 		break;
 	case GSI_EP_OP_FREE_TRBS:
 		dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
-		gsi_free_trbs(ep);
+		request = (struct usb_gsi_request *)op_data;
+		gsi_free_trbs(ep, request);
 		break;
 	case GSI_EP_OP_CONFIG:
 		request = (struct usb_gsi_request *)op_data;
@@ -1381,7 +1405,8 @@
 		break;
 	case GSI_EP_OP_STORE_DBL_INFO:
 		dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
-		gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
+		request = (struct usb_gsi_request *)op_data;
+		gsi_store_ringbase_dbl_info(ep, request);
 		break;
 	case GSI_EP_OP_ENABLE_GSI:
 		dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
@@ -2543,7 +2568,7 @@
 
 	dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
 
-	if (mdwc->vbus_active) {
+	if (mdwc->vbus_active && !mdwc->in_restart) {
 		edev = mdwc->extcon_vbus;
 		extcon_id = EXTCON_USB;
 	} else if (mdwc->id_state == DWC3_ID_GROUND) {
@@ -2564,10 +2589,12 @@
 		if (dwc->maximum_speed > dwc->max_hw_supp_speed)
 			dwc->maximum_speed = dwc->max_hw_supp_speed;
 
-		if (override_usb_speed &&
-				is_valid_usb_speed(dwc, override_usb_speed)) {
-			dwc->maximum_speed = override_usb_speed;
-			dbg_event(0xFF, "override_speed", override_usb_speed);
+		if (mdwc->override_usb_speed) {
+			dwc->maximum_speed = mdwc->override_usb_speed;
+			dwc->gadget.max_speed = dwc->maximum_speed;
+			dbg_event(0xFF, "override_speed",
+					mdwc->override_usb_speed);
+			mdwc->override_usb_speed = 0;
 		}
 
 		dbg_event(0xFF, "speed", dwc->maximum_speed);
@@ -2851,6 +2878,13 @@
 	if (!mdwc->vbus_active)
 		return;
 
+	/* USB 3.1 compliance equipment usually repoted as floating
+	 * charger as HS dp/dm lines are never connected. Do not
+	 * tear down USB stack if compliance parameter is set
+	 */
+	if (mdwc->usb_compliance_mode)
+		return;
+
 	/* floating D+/D- lines detected */
 	if (dwc->gadget.state < USB_STATE_DEFAULT &&
 		dwc3_gadget_get_link_state(dwc) != DWC3_LINK_STATE_CMPLY) {
@@ -3100,6 +3134,11 @@
 static DEVICE_ATTR_RW(mode);
 static void msm_dwc3_perf_vote_work(struct work_struct *w);
 
+/* This node only shows max speed supported dwc3 and it should be
+ * same as what is reported in udc/core.c max_speed node. For current
+ * operating gadget speed, query current_speed node which is implemented
+ * by udc/core.c
+ */
 static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
 		char *buf)
 {
@@ -3107,7 +3146,7 @@
 	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
 
 	return snprintf(buf, PAGE_SIZE, "%s\n",
-			usb_speed_string(dwc->max_hw_supp_speed));
+			usb_speed_string(dwc->maximum_speed));
 }
 
 static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
@@ -3117,14 +3156,25 @@
 	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
 	enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
 
-	if (sysfs_streq(buf, "high"))
+	/* DEVSPD can only have values SS(0x4), HS(0x0) and FS(0x1).
+	 * per 3.20a data book. Allow only these settings. Note that,
+	 * xhci does not support full-speed only mode.
+	 */
+	if (sysfs_streq(buf, "full"))
+		req_speed = USB_SPEED_FULL;
+	else if (sysfs_streq(buf, "high"))
 		req_speed = USB_SPEED_HIGH;
 	else if (sysfs_streq(buf, "super"))
 		req_speed = USB_SPEED_SUPER;
+	else
+		return -EINVAL;
 
-	if (req_speed != USB_SPEED_UNKNOWN &&
-			req_speed != dwc->max_hw_supp_speed) {
-		dwc->maximum_speed = dwc->max_hw_supp_speed = req_speed;
+	/* restart usb only works for device mode. Perform manual cable
+	 * plug in/out for host mode restart.
+	 */
+	if (req_speed != dwc->maximum_speed &&
+			req_speed <= dwc->max_hw_supp_speed) {
+		mdwc->override_usb_speed = req_speed;
 		schedule_work(&mdwc->restart_usb_work);
 	}
 
@@ -3132,6 +3182,31 @@
 }
 static DEVICE_ATTR_RW(speed);
 
+static ssize_t usb_compliance_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%c\n",
+			mdwc->usb_compliance_mode ? 'Y' : 'N');
+}
+
+static ssize_t usb_compliance_mode_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int ret = 0;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	ret = strtobool(buf, &mdwc->usb_compliance_mode);
+
+	if (ret)
+		return ret;
+
+	return count;
+}
+static DEVICE_ATTR_RW(usb_compliance_mode);
+
+
 static int dwc3_msm_probe(struct platform_device *pdev)
 {
 	struct device_node *node = pdev->dev.of_node, *dwc3_node;
@@ -3275,6 +3350,8 @@
 			 * turning on usb gdsc regulator clk is stuck off.
 			 */
 			dwc3_msm_config_gdsc(mdwc, 1);
+			clk_prepare_enable(mdwc->iface_clk);
+			clk_prepare_enable(mdwc->core_clk);
 			clk_prepare_enable(mdwc->cfg_ahb_clk);
 			/* Configure AHB2PHY for one wait state read/write*/
 			val = readl_relaxed(mdwc->ahb2phy_base +
@@ -3287,6 +3364,8 @@
 				mb();
 			}
 			clk_disable_unprepare(mdwc->cfg_ahb_clk);
+			clk_disable_unprepare(mdwc->core_clk);
+			clk_disable_unprepare(mdwc->iface_clk);
 			dwc3_msm_config_gdsc(mdwc, 0);
 		}
 	}
@@ -3482,6 +3561,7 @@
 
 	device_create_file(&pdev->dev, &dev_attr_mode);
 	device_create_file(&pdev->dev, &dev_attr_speed);
+	device_create_file(&pdev->dev, &dev_attr_usb_compliance_mode);
 
 	host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
 	if (!dwc->is_drd && host_mode) {
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index cbce880..986c97c 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -816,6 +816,16 @@
 	if (!dwc->gadget_driver)
 		goto out;
 
+	/*
+	 * Workaround for SNPS STAR: 9001046257 which affects dwc3 core
+	 * 3.10a or earlier. LPM Not rejected during control transfer. Device
+	 * is programmed to reject LPM when SETUP packet is received and
+	 * ACK LPM after completing STATUS stage.
+	 */
+	if (dwc->has_lpm_erratum && dwc->revision <= DWC3_REVISION_310A)
+		dwc3_masked_write_readback(dwc->regs, DWC3_DCTL,
+			DWC3_DCTL_LPM_ERRATA_MASK, DWC3_DCTL_LPM_ERRATA(0));
+
 	trace_dwc3_ctrl_req(ctrl);
 
 	len = le16_to_cpu(ctrl->wLength);
@@ -990,6 +1000,11 @@
 	dbg_print(dep->number, "DONE", status, "STATUS");
 	dwc->ep0state = EP0_SETUP_PHASE;
 	dwc3_ep0_out_start(dwc);
+
+	if (dwc->has_lpm_erratum && dwc->revision <= DWC3_REVISION_310A)
+		dwc3_masked_write_readback(dwc->regs, DWC3_DCTL,
+			DWC3_DCTL_LPM_ERRATA_MASK,
+			DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold));
 }
 
 static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0ffe351..8f0ca3f 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -413,7 +413,7 @@
 		dwc3_trace(trace_dwc3_gadget, "Command Timed Out");
 		dev_err(dwc->dev, "%s command timeout for %s\n",
 			dwc3_gadget_ep_cmd_string(cmd), dep->name);
-		if (!(cmd & DWC3_DEPCMD_ENDTRANSFER)) {
+		if (cmd != DWC3_DEPCMD_ENDTRANSFER) {
 			dwc->ep_cmd_timeout_cnt++;
 			dwc3_notify_event(dwc,
 				DWC3_CONTROLLER_RESTART_USB_SESSION);
@@ -3968,15 +3968,10 @@
 
 int dwc3_gadget_suspend(struct dwc3 *dwc)
 {
-	int ret;
-
 	if (!dwc->gadget_driver)
 		return 0;
 
-	ret = dwc3_gadget_run_stop(dwc, false, false);
-	if (ret < 0)
-		return ret;
-
+	dwc3_gadget_run_stop(dwc, false, false);
 	dwc3_disconnect_gadget(dwc);
 	__dwc3_gadget_stop(dwc);
 
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index a06f9a8..1f75b58 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -64,4 +64,28 @@
 			base - DWC3_GLOBALS_REGS_START + offset, value);
 }
 
+static inline void dwc3_masked_write_readback(void __iomem *base,
+	u32 offset, const u32 mask, u32 value)
+{
+	u32 write_val, tmp;
+
+	tmp = readl_relaxed(base + offset - DWC3_GLOBALS_REGS_START);
+	tmp &= ~mask;		/* retain other bits */
+	write_val = tmp | value;
+
+	writel_relaxed(write_val, base + offset - DWC3_GLOBALS_REGS_START);
+
+	/* Read back to see if value was written */
+	tmp = readl_relaxed(base + offset - DWC3_GLOBALS_REGS_START);
+
+	dwc3_trace(trace_dwc3_masked_write_readback,
+			"addr %p readback val %08x",
+			base - DWC3_GLOBALS_REGS_START + offset, tmp);
+
+	tmp &= mask;		/* clear other bits */
+	if (tmp != value)
+		pr_err("%s: write: %x to %x FAILED\n",
+			__func__, value, offset);
+}
+
 #endif /* __DRIVERS_USB_DWC3_IO_H */
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index d24cefd..88f5fb8 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -47,6 +47,11 @@
 	TP_ARGS(vaf)
 );
 
+DEFINE_EVENT(dwc3_log_msg, dwc3_masked_write_readback,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf)
+);
+
 DEFINE_EVENT(dwc3_log_msg, dwc3_gadget,
 	TP_PROTO(struct va_format *vaf),
 	TP_ARGS(vaf)
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index ceacf3d..598a67d 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -10,5 +10,3 @@
 libcomposite-y			+= composite.o functions.o configfs.o u_f.o
 
 obj-$(CONFIG_USB_GADGET)	+= udc/ function/ legacy/
-
-obj-$(CONFIG_USB_CI13XXX_MSM)   += ci13xxx_msm.o
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
deleted file mode 100644
index 78b7d3a..0000000
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ /dev/null
@@ -1,556 +0,0 @@
-/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/usb/msm_hsusb_hw.h>
-#include <linux/usb/ulpi.h>
-#include <linux/gpio.h>
-#include <linux/pinctrl/consumer.h>
-
-#include "ci13xxx_udc.c"
-
-#define MSM_USB_BASE	(udc->regs)
-
-#define CI13XXX_MSM_MAX_LOG2_ITC	7
-
-struct ci13xxx_udc_context {
-	int irq;
-	void __iomem *regs;
-	int wake_gpio;
-	int wake_irq;
-	bool wake_irq_state;
-	struct pinctrl *ci13xxx_pinctrl;
-	struct timer_list irq_enable_timer;
-	bool irq_disabled;
-};
-
-static struct ci13xxx_udc_context _udc_ctxt;
-#define IRQ_ENABLE_DELAY	(jiffies + msecs_to_jiffies(1000))
-
-static irqreturn_t msm_udc_irq(int irq, void *data)
-{
-	return udc_irq();
-}
-
-static void ci13xxx_msm_suspend(void)
-{
-	struct device *dev = _udc->gadget.dev.parent;
-
-	dev_dbg(dev, "ci13xxx_msm_suspend\n");
-
-	if (_udc_ctxt.wake_irq && !_udc_ctxt.wake_irq_state) {
-		enable_irq_wake(_udc_ctxt.wake_irq);
-		enable_irq(_udc_ctxt.wake_irq);
-		_udc_ctxt.wake_irq_state = true;
-	}
-}
-
-static void ci13xxx_msm_resume(void)
-{
-	struct device *dev = _udc->gadget.dev.parent;
-
-	dev_dbg(dev, "ci13xxx_msm_resume\n");
-
-	if (_udc_ctxt.wake_irq && _udc_ctxt.wake_irq_state) {
-		disable_irq_wake(_udc_ctxt.wake_irq);
-		disable_irq_nosync(_udc_ctxt.wake_irq);
-		_udc_ctxt.wake_irq_state = false;
-	}
-}
-
-static void ci13xxx_msm_disconnect(void)
-{
-	struct ci13xxx *udc = _udc;
-	struct usb_phy *phy = udc->transceiver;
-
-	if (phy && (phy->flags & ENABLE_DP_MANUAL_PULLUP)) {
-		u32 temp;
-
-		usb_phy_io_write(phy,
-				ULPI_MISC_A_VBUSVLDEXT |
-				ULPI_MISC_A_VBUSVLDEXTSEL,
-				ULPI_CLR(ULPI_MISC_A));
-
-		/* Notify LINK of VBUS LOW */
-		temp = readl_relaxed(USB_USBCMD);
-		temp &= ~USBCMD_SESS_VLD_CTRL;
-		writel_relaxed(temp, USB_USBCMD);
-
-		/*
-		 * Add memory barrier as it is must to complete
-		 * above USB PHY and Link register writes before
-		 * moving ahead with USB peripheral mode enumeration,
-		 * otherwise USB peripheral mode may not work.
-		 */
-		mb();
-	}
-}
-
-/* Link power management will reduce power consumption by
- * short time HW suspend/resume.
- */
-static void ci13xxx_msm_set_l1(struct ci13xxx *udc)
-{
-	int temp;
-	struct device *dev = udc->gadget.dev.parent;
-
-	dev_dbg(dev, "Enable link power management\n");
-
-	/* Enable remote wakeup and L1 for IN EPs */
-	writel_relaxed(0xffff0000, USB_L1_EP_CTRL);
-
-	temp = readl_relaxed(USB_L1_CONFIG);
-	temp |= L1_CONFIG_LPM_EN | L1_CONFIG_REMOTE_WAKEUP |
-		L1_CONFIG_GATE_SYS_CLK | L1_CONFIG_PHY_LPM |
-		L1_CONFIG_PLL;
-	writel_relaxed(temp, USB_L1_CONFIG);
-}
-
-static void ci13xxx_msm_connect(void)
-{
-	struct ci13xxx *udc = _udc;
-	struct usb_phy *phy = udc->transceiver;
-
-	if (phy && (phy->flags & ENABLE_DP_MANUAL_PULLUP)) {
-		int	temp;
-
-		usb_phy_io_write(phy,
-			ULPI_MISC_A_VBUSVLDEXT |
-			ULPI_MISC_A_VBUSVLDEXTSEL,
-			ULPI_SET(ULPI_MISC_A));
-
-		temp = readl_relaxed(USB_GENCONFIG_2);
-		temp |= GENCONFIG_2_SESS_VLD_CTRL_EN;
-		writel_relaxed(temp, USB_GENCONFIG_2);
-
-		temp = readl_relaxed(USB_USBCMD);
-		temp |= USBCMD_SESS_VLD_CTRL;
-		writel_relaxed(temp, USB_USBCMD);
-
-		/*
-		 * Add memory barrier as it is must to complete
-		 * above USB PHY and Link register writes before
-		 * moving ahead with USB peripheral mode enumeration,
-		 * otherwise USB peripheral mode may not work.
-		 */
-		mb();
-	}
-}
-
-static void ci13xxx_msm_reset(void)
-{
-	struct ci13xxx *udc = _udc;
-	struct usb_phy *phy = udc->transceiver;
-	struct device *dev = udc->gadget.dev.parent;
-	int	temp;
-
-	writel_relaxed(0, USB_AHBBURST);
-	writel_relaxed(0x08, USB_AHBMODE);
-
-	/* workaround for rx buffer collision issue */
-	temp = readl_relaxed(USB_GENCONFIG);
-	temp &= ~GENCONFIG_TXFIFO_IDLE_FORCE_DISABLE;
-	temp &= ~GENCONFIG_ULPI_SERIAL_EN;
-	writel_relaxed(temp, USB_GENCONFIG);
-
-	if (udc->gadget.l1_supported)
-		ci13xxx_msm_set_l1(udc);
-
-	if (phy && (phy->flags & ENABLE_SECONDARY_PHY)) {
-		int	temp;
-
-		dev_dbg(dev, "using secondary hsphy\n");
-		temp = readl_relaxed(USB_PHY_CTRL2);
-		temp |= (1<<16);
-		writel_relaxed(temp, USB_PHY_CTRL2);
-
-		/*
-		 * Add memory barrier to make sure above LINK writes are
-		 * complete before moving ahead with USB peripheral mode
-		 * enumeration.
-		 */
-		mb();
-	}
-}
-
-static void ci13xxx_msm_mark_err_event(void)
-{
-	struct ci13xxx *udc = _udc;
-	struct msm_otg *otg;
-
-	if (udc == NULL)
-		return;
-
-	if (udc->transceiver == NULL)
-		return;
-
-	otg = container_of(udc->transceiver, struct msm_otg, phy);
-
-	/* This will trigger hardware reset before next connection */
-	otg->err_event_seen = true;
-}
-
-static void ci13xxx_msm_notify_event(struct ci13xxx *udc, unsigned int event)
-{
-	struct device *dev = udc->gadget.dev.parent;
-
-	switch (event) {
-	case CI13XXX_CONTROLLER_RESET_EVENT:
-		dev_info(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n");
-		ci13xxx_msm_reset();
-		break;
-	case CI13XXX_CONTROLLER_DISCONNECT_EVENT:
-		dev_info(dev, "CI13XXX_CONTROLLER_DISCONNECT_EVENT received\n");
-		ci13xxx_msm_disconnect();
-		ci13xxx_msm_resume();
-		break;
-	case CI13XXX_CONTROLLER_CONNECT_EVENT:
-		dev_info(dev, "CI13XXX_CONTROLLER_CONNECT_EVENT received\n");
-		ci13xxx_msm_connect();
-		break;
-	case CI13XXX_CONTROLLER_SUSPEND_EVENT:
-		dev_info(dev, "CI13XXX_CONTROLLER_SUSPEND_EVENT received\n");
-		ci13xxx_msm_suspend();
-		break;
-	case CI13XXX_CONTROLLER_RESUME_EVENT:
-		dev_info(dev, "CI13XXX_CONTROLLER_RESUME_EVENT received\n");
-		ci13xxx_msm_resume();
-		break;
-	case CI13XXX_CONTROLLER_ERROR_EVENT:
-		dev_info(dev, "CI13XXX_CONTROLLER_ERROR_EVENT received\n");
-		ci13xxx_msm_mark_err_event();
-		break;
-	case CI13XXX_CONTROLLER_UDC_STARTED_EVENT:
-		dev_info(dev,
-			 "CI13XXX_CONTROLLER_UDC_STARTED_EVENT received\n");
-		break;
-	default:
-		dev_dbg(dev, "unknown ci13xxx_udc event\n");
-		break;
-	}
-}
-
-static bool ci13xxx_msm_in_lpm(struct ci13xxx *udc)
-{
-	struct msm_otg *otg;
-
-	if (udc == NULL)
-		return false;
-
-	if (udc->transceiver == NULL)
-		return false;
-
-	otg = container_of(udc->transceiver, struct msm_otg, phy);
-
-	return (atomic_read(&otg->in_lpm) != 0);
-}
-
-
-static irqreturn_t ci13xxx_msm_resume_irq(int irq, void *data)
-{
-	struct ci13xxx *udc = _udc;
-
-	if (udc->transceiver && udc->vbus_active && udc->suspended)
-		usb_phy_set_suspend(udc->transceiver, 0);
-	else if (!udc->suspended)
-		ci13xxx_msm_resume();
-
-	return IRQ_HANDLED;
-}
-
-static struct ci13xxx_udc_driver ci13xxx_msm_udc_driver = {
-	.name			= "ci13xxx_msm",
-	.flags			= CI13XXX_REGS_SHARED |
-				  CI13XXX_REQUIRE_TRANSCEIVER |
-				  CI13XXX_PULLUP_ON_VBUS |
-				  CI13XXX_ZERO_ITC |
-				  CI13XXX_DISABLE_STREAMING,
-	.nz_itc			= 0,
-	.notify_event		= ci13xxx_msm_notify_event,
-	.in_lpm                 = ci13xxx_msm_in_lpm,
-};
-
-static int ci13xxx_msm_install_wake_gpio(struct platform_device *pdev,
-				struct resource *res)
-{
-	int wake_irq;
-	int ret;
-	struct pinctrl_state *set_state;
-
-	dev_dbg(&pdev->dev, "ci13xxx_msm_install_wake_gpio\n");
-
-	_udc_ctxt.wake_gpio = res->start;
-	if (_udc_ctxt.ci13xxx_pinctrl) {
-		set_state = pinctrl_lookup_state(_udc_ctxt.ci13xxx_pinctrl,
-				"ci13xxx_active");
-		if (IS_ERR(set_state)) {
-			pr_err("cannot get ci13xxx pinctrl active state\n");
-			return PTR_ERR(set_state);
-		}
-		pinctrl_select_state(_udc_ctxt.ci13xxx_pinctrl, set_state);
-	}
-	gpio_request(_udc_ctxt.wake_gpio, "USB_RESUME");
-	gpio_direction_input(_udc_ctxt.wake_gpio);
-	wake_irq = gpio_to_irq(_udc_ctxt.wake_gpio);
-	if (wake_irq < 0) {
-		dev_err(&pdev->dev, "could not register USB_RESUME GPIO.\n");
-		return -ENXIO;
-	}
-
-	dev_dbg(&pdev->dev, "_udc_ctxt.gpio_irq = %d and irq = %d\n",
-			_udc_ctxt.wake_gpio, wake_irq);
-	ret = request_irq(wake_irq, ci13xxx_msm_resume_irq,
-		IRQF_TRIGGER_RISING | IRQF_ONESHOT, "usb resume", NULL);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "could not register USB_RESUME IRQ.\n");
-		goto gpio_free;
-	}
-	disable_irq(wake_irq);
-	_udc_ctxt.wake_irq = wake_irq;
-
-	return 0;
-
-gpio_free:
-	gpio_free(_udc_ctxt.wake_gpio);
-	if (_udc_ctxt.ci13xxx_pinctrl) {
-		set_state = pinctrl_lookup_state(_udc_ctxt.ci13xxx_pinctrl,
-				"ci13xxx_sleep");
-		if (IS_ERR(set_state))
-			pr_err("cannot get ci13xxx pinctrl sleep state\n");
-		else
-			pinctrl_select_state(_udc_ctxt.ci13xxx_pinctrl,
-					set_state);
-	}
-	_udc_ctxt.wake_gpio = 0;
-	return ret;
-}
-
-static void ci13xxx_msm_uninstall_wake_gpio(struct platform_device *pdev)
-{
-	struct pinctrl_state *set_state;
-
-	dev_dbg(&pdev->dev, "ci13xxx_msm_uninstall_wake_gpio\n");
-
-	if (_udc_ctxt.wake_gpio) {
-		gpio_free(_udc_ctxt.wake_gpio);
-		if (_udc_ctxt.ci13xxx_pinctrl) {
-			set_state =
-				pinctrl_lookup_state(_udc_ctxt.ci13xxx_pinctrl,
-						"ci13xxx_sleep");
-			if (IS_ERR(set_state))
-				pr_err("cannot get ci13xxx pinctrl sleep state\n");
-			else
-				pinctrl_select_state(_udc_ctxt.ci13xxx_pinctrl,
-						set_state);
-		}
-		_udc_ctxt.wake_gpio = 0;
-	}
-}
-
-static void enable_usb_irq_timer_func(unsigned long data);
-static int ci13xxx_msm_probe(struct platform_device *pdev)
-{
-	struct resource *res;
-	int ret;
-	struct ci13xxx_platform_data *pdata = pdev->dev.platform_data;
-	bool is_l1_supported = false;
-
-	dev_dbg(&pdev->dev, "ci13xxx_msm_probe\n");
-
-	if (pdata) {
-		/* Acceptable values for nz_itc are: 0,1,2,4,8,16,32,64 */
-		if (pdata->log2_itc > CI13XXX_MSM_MAX_LOG2_ITC ||
-			pdata->log2_itc <= 0)
-			ci13xxx_msm_udc_driver.nz_itc = 0;
-		else
-			ci13xxx_msm_udc_driver.nz_itc =
-				1 << (pdata->log2_itc-1);
-
-		is_l1_supported = pdata->l1_supported;
-		/* Set ahb2ahb bypass flag if it is requested. */
-		if (pdata->enable_ahb2ahb_bypass)
-			ci13xxx_msm_udc_driver.flags |=
-				CI13XXX_ENABLE_AHB2AHB_BYPASS;
-
-		/* Clear disable streaming flag if is requested. */
-		if (pdata->enable_streaming)
-			ci13xxx_msm_udc_driver.flags &=
-						~CI13XXX_DISABLE_STREAMING;
-	}
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "failed to get platform resource mem\n");
-		return -ENXIO;
-	}
-
-	_udc_ctxt.regs = ioremap(res->start, resource_size(res));
-	if (!_udc_ctxt.regs) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		return -ENOMEM;
-	}
-
-	ret = udc_probe(&ci13xxx_msm_udc_driver, &pdev->dev, _udc_ctxt.regs);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "udc_probe failed\n");
-		goto iounmap;
-	}
-
-	_udc->gadget.l1_supported = is_l1_supported;
-
-	_udc_ctxt.irq = platform_get_irq(pdev, 0);
-	if (_udc_ctxt.irq < 0) {
-		dev_err(&pdev->dev, "IRQ not found\n");
-		ret = -ENXIO;
-		goto udc_remove;
-	}
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_IO, "USB_RESUME");
-	/* Get pinctrl if target uses pinctrl */
-	_udc_ctxt.ci13xxx_pinctrl = devm_pinctrl_get(&pdev->dev);
-	if (IS_ERR(_udc_ctxt.ci13xxx_pinctrl)) {
-		if (of_property_read_bool(pdev->dev.of_node, "pinctrl-names")) {
-			dev_err(&pdev->dev, "Error encountered while getting pinctrl");
-			ret = PTR_ERR(_udc_ctxt.ci13xxx_pinctrl);
-			goto udc_remove;
-		}
-		dev_dbg(&pdev->dev, "Target does not use pinctrl\n");
-		_udc_ctxt.ci13xxx_pinctrl = NULL;
-	}
-	if (res) {
-		ret = ci13xxx_msm_install_wake_gpio(pdev, res);
-		if (ret < 0) {
-			dev_err(&pdev->dev, "gpio irq install failed\n");
-			goto udc_remove;
-		}
-	}
-
-	ret = request_irq(_udc_ctxt.irq, msm_udc_irq, IRQF_SHARED, pdev->name,
-					  pdev);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "request_irq failed\n");
-		goto gpio_uninstall;
-	}
-
-	setup_timer(&_udc_ctxt.irq_enable_timer, enable_usb_irq_timer_func,
-							(unsigned long)NULL);
-
-	pm_runtime_no_callbacks(&pdev->dev);
-	pm_runtime_set_active(&pdev->dev);
-	pm_runtime_enable(&pdev->dev);
-
-	return 0;
-
-gpio_uninstall:
-	ci13xxx_msm_uninstall_wake_gpio(pdev);
-udc_remove:
-	udc_remove();
-iounmap:
-	iounmap(_udc_ctxt.regs);
-
-	return ret;
-}
-
-int ci13xxx_msm_remove(struct platform_device *pdev)
-{
-	pm_runtime_disable(&pdev->dev);
-	free_irq(_udc_ctxt.irq, pdev);
-	ci13xxx_msm_uninstall_wake_gpio(pdev);
-	udc_remove();
-	iounmap(_udc_ctxt.regs);
-	return 0;
-}
-
-void ci13xxx_msm_shutdown(struct platform_device *pdev)
-{
-	ci13xxx_pullup(&_udc->gadget, 0);
-}
-
-void msm_hw_soft_reset(void)
-{
-	struct ci13xxx *udc = _udc;
-
-	hw_device_reset(udc);
-}
-
-void msm_hw_bam_disable(bool bam_disable)
-{
-	u32 val;
-	struct ci13xxx *udc = _udc;
-
-	if (bam_disable)
-		val = readl_relaxed(USB_GENCONFIG) | GENCONFIG_BAM_DISABLE;
-	else
-		val = readl_relaxed(USB_GENCONFIG) & ~GENCONFIG_BAM_DISABLE;
-
-	writel_relaxed(val, USB_GENCONFIG);
-}
-
-void msm_usb_irq_disable(bool disable)
-{
-	struct ci13xxx *udc = _udc;
-	unsigned long flags;
-
-	spin_lock_irqsave(udc->lock, flags);
-
-	if (_udc_ctxt.irq_disabled == disable) {
-		pr_debug("Interrupt state already disable = %d\n", disable);
-		if (disable)
-			mod_timer(&_udc_ctxt.irq_enable_timer,
-					IRQ_ENABLE_DELAY);
-		spin_unlock_irqrestore(udc->lock, flags);
-		return;
-	}
-
-	if (disable) {
-		disable_irq_nosync(_udc_ctxt.irq);
-		/* start timer here */
-		pr_debug("%s: Disabling interrupts\n", __func__);
-		mod_timer(&_udc_ctxt.irq_enable_timer, IRQ_ENABLE_DELAY);
-		_udc_ctxt.irq_disabled = true;
-
-	} else {
-		pr_debug("%s: Enabling interrupts\n", __func__);
-		del_timer(&_udc_ctxt.irq_enable_timer);
-		enable_irq(_udc_ctxt.irq);
-		_udc_ctxt.irq_disabled = false;
-	}
-
-	spin_unlock_irqrestore(udc->lock, flags);
-}
-
-static void enable_usb_irq_timer_func(unsigned long data)
-{
-	pr_debug("enabling interrupt from timer\n");
-	msm_usb_irq_disable(false);
-}
-
-static struct platform_driver ci13xxx_msm_driver = {
-	.probe = ci13xxx_msm_probe,
-	.driver = {
-		.name = "msm_hsusb",
-	},
-	.remove = ci13xxx_msm_remove,
-	.shutdown = ci13xxx_msm_shutdown,
-};
-MODULE_ALIAS("platform:msm_hsusb");
-
-static int __init ci13xxx_msm_init(void)
-{
-	return platform_driver_register(&ci13xxx_msm_driver);
-}
-module_init(ci13xxx_msm_init);
-
-static void __exit ci13xxx_msm_exit(void)
-{
-	platform_driver_unregister(&ci13xxx_msm_driver);
-}
-module_exit(ci13xxx_msm_exit);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
deleted file mode 100644
index 28aaa1f..0000000
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ /dev/null
@@ -1,3983 +0,0 @@
-/*
- * ci13xxx_udc.c - MIPS USB IP core family device controller
- *
- * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
- *
- * Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * Description: MIPS USB IP core family device controller
- *              Currently it only supports IP part number CI13412
- *
- * This driver is composed of several blocks:
- * - HW:     hardware interface
- * - DBG:    debug facilities (optional)
- * - UTIL:   utilities
- * - ISR:    interrupts handling
- * - ENDPT:  endpoint operations (Gadget API)
- * - GADGET: gadget operations (Gadget API)
- * - BUS:    bus glue code, bus abstraction layer
- *
- * Compile Options
- * - CONFIG_USB_GADGET_DEBUG_FILES: enable debug facilities
- * - STALL_IN:  non-empty bulk-in pipes cannot be halted
- *              if defined mass storage compliance succeeds but with warnings
- *              => case 4: Hi >  Dn
- *              => case 5: Hi >  Di
- *              => case 8: Hi <> Do
- *              if undefined usbtest 13 fails
- * - TRACE:     enable function tracing (depends on DEBUG)
- *
- * Main Features
- * - Chapter 9 & Mass Storage Compliance with Gadget File Storage
- * - Chapter 9 Compliance with Gadget Zero (STALL_IN undefined)
- * - Normal & LPM support
- *
- * USBTEST Report
- * - OK: 0-12, 13 (STALL_IN defined) & 14
- * - Not Supported: 15 & 16 (ISO)
- *
- * TODO List
- * - OTG
- * - Isochronous & Interrupt Traffic
- * - Handle requests which spawns into several TDs
- * - GET_STATUS(device) - always reports 0
- * - Gadget API (majority of optional features)
- */
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dmapool.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/ratelimit.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/pm_runtime.h>
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/msm_hsusb.h>
-
-#include "ci13xxx_udc.h"
-
-/******************************************************************************
- * DEFINE
- *****************************************************************************/
-
-#define USB_MAX_TIMEOUT		25 /* 25msec timeout */
-#define EP_PRIME_CHECK_DELAY	(jiffies + msecs_to_jiffies(1000))
-#define MAX_PRIME_CHECK_RETRY	3 /*Wait for 3sec for EP prime failure */
-#define EXTRA_ALLOCATION_SIZE	256
-
-/* ctrl register bank access */
-static DEFINE_SPINLOCK(udc_lock);
-
-/* control endpoint description */
-static const struct usb_endpoint_descriptor
-ctrl_endpt_out_desc = {
-	.bLength         = USB_DT_ENDPOINT_SIZE,
-	.bDescriptorType = USB_DT_ENDPOINT,
-
-	.bEndpointAddress = USB_DIR_OUT,
-	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
-	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
-};
-
-static const struct usb_endpoint_descriptor
-ctrl_endpt_in_desc = {
-	.bLength         = USB_DT_ENDPOINT_SIZE,
-	.bDescriptorType = USB_DT_ENDPOINT,
-
-	.bEndpointAddress = USB_DIR_IN,
-	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
-	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
-};
-
-/* UDC descriptor */
-static struct ci13xxx *_udc;
-
-/* Interrupt statistics */
-#define ISR_MASK   0x1F
-static struct {
-	u32 test;
-	u32 ui;
-	u32 uei;
-	u32 pci;
-	u32 uri;
-	u32 sli;
-	u32 none;
-	struct {
-		u32 cnt;
-		u32 buf[ISR_MASK+1];
-		u32 idx;
-	} hndl;
-} isr_statistics;
-
-/**
- * ffs_nr: find first (least significant) bit set
- * @x: the word to search
- *
- * This function returns bit number (instead of position)
- */
-static int ffs_nr(u32 x)
-{
-	int n = ffs(x);
-
-	return n ? n-1 : 32;
-}
-
-/******************************************************************************
- * HW block
- *****************************************************************************/
-/* register bank descriptor */
-static struct {
-	unsigned int  lpm;    /* is LPM? */
-	void __iomem *abs;    /* bus map offset */
-	void __iomem *cap;    /* bus map offset + CAP offset + CAP data */
-	size_t        size;   /* bank size */
-} hw_bank;
-
-/* MSM specific */
-#define ABS_AHBBURST        (0x0090UL)
-#define ABS_AHBMODE         (0x0098UL)
-/* UDC register map */
-#define ABS_CAPLENGTH       (0x100UL)
-#define ABS_HCCPARAMS       (0x108UL)
-#define ABS_DCCPARAMS       (0x124UL)
-#define ABS_TESTMODE        (hw_bank.lpm ? 0x0FCUL : 0x138UL)
-/* offset to CAPLENTGH (addr + data) */
-#define CAP_USBCMD          (0x000UL)
-#define CAP_USBSTS          (0x004UL)
-#define CAP_USBINTR         (0x008UL)
-#define CAP_DEVICEADDR      (0x014UL)
-#define CAP_ENDPTLISTADDR   (0x018UL)
-#define CAP_PORTSC          (0x044UL)
-#define CAP_DEVLC           (0x084UL)
-#define CAP_ENDPTPIPEID     (0x0BCUL)
-#define CAP_USBMODE         (hw_bank.lpm ? 0x0C8UL : 0x068UL)
-#define CAP_ENDPTSETUPSTAT  (hw_bank.lpm ? 0x0D8UL : 0x06CUL)
-#define CAP_ENDPTPRIME      (hw_bank.lpm ? 0x0DCUL : 0x070UL)
-#define CAP_ENDPTFLUSH      (hw_bank.lpm ? 0x0E0UL : 0x074UL)
-#define CAP_ENDPTSTAT       (hw_bank.lpm ? 0x0E4UL : 0x078UL)
-#define CAP_ENDPTCOMPLETE   (hw_bank.lpm ? 0x0E8UL : 0x07CUL)
-#define CAP_ENDPTCTRL       (hw_bank.lpm ? 0x0ECUL : 0x080UL)
-#define CAP_LAST            (hw_bank.lpm ? 0x12CUL : 0x0C0UL)
-
-#define REMOTE_WAKEUP_DELAY	msecs_to_jiffies(200)
-
-/* maximum number of enpoints: valid only after hw_device_reset() */
-static unsigned int hw_ep_max;
-static void dbg_usb_op_fail(u8 addr, const char *name,
-				const struct ci13xxx_ep *mep);
-/**
- * hw_ep_bit: calculates the bit number
- * @num: endpoint number
- * @dir: endpoint direction
- *
- * This function returns bit number
- */
-static inline int hw_ep_bit(int num, int dir)
-{
-	return num + (dir ? 16 : 0);
-}
-
-static int ep_to_bit(int n)
-{
-	int fill = 16 - hw_ep_max / 2;
-
-	if (n >= hw_ep_max / 2)
-		n += fill;
-
-	return n;
-}
-
-/**
- * hw_aread: reads from register bitfield
- * @addr: address relative to bus map
- * @mask: bitfield mask
- *
- * This function returns register bitfield data
- */
-static u32 hw_aread(u32 addr, u32 mask)
-{
-	return ioread32(addr + hw_bank.abs) & mask;
-}
-
-/**
- * hw_awrite: writes to register bitfield
- * @addr: address relative to bus map
- * @mask: bitfield mask
- * @data: new data
- */
-static void hw_awrite(u32 addr, u32 mask, u32 data)
-{
-	iowrite32(hw_aread(addr, ~mask) | (data & mask),
-		  addr + hw_bank.abs);
-}
-
-/**
- * hw_cread: reads from register bitfield
- * @addr: address relative to CAP offset plus content
- * @mask: bitfield mask
- *
- * This function returns register bitfield data
- */
-static u32 hw_cread(u32 addr, u32 mask)
-{
-	return ioread32(addr + hw_bank.cap) & mask;
-}
-
-/**
- * hw_cwrite: writes to register bitfield
- * @addr: address relative to CAP offset plus content
- * @mask: bitfield mask
- * @data: new data
- */
-static void hw_cwrite(u32 addr, u32 mask, u32 data)
-{
-	iowrite32(hw_cread(addr, ~mask) | (data & mask),
-		  addr + hw_bank.cap);
-}
-
-/**
- * hw_ctest_and_clear: tests & clears register bitfield
- * @addr: address relative to CAP offset plus content
- * @mask: bitfield mask
- *
- * This function returns register bitfield data
- */
-static u32 hw_ctest_and_clear(u32 addr, u32 mask)
-{
-	u32 reg = hw_cread(addr, mask);
-
-	iowrite32(reg, addr + hw_bank.cap);
-	return reg;
-}
-
-/**
- * hw_ctest_and_write: tests & writes register bitfield
- * @addr: address relative to CAP offset plus content
- * @mask: bitfield mask
- * @data: new data
- *
- * This function returns register bitfield data
- */
-static u32 hw_ctest_and_write(u32 addr, u32 mask, u32 data)
-{
-	u32 reg = hw_cread(addr, ~0);
-
-	iowrite32((reg & ~mask) | (data & mask), addr + hw_bank.cap);
-	return (reg & mask) >> ffs_nr(mask);
-}
-
-static int hw_device_init(void __iomem *base)
-{
-	u32 reg;
-
-	/* bank is a module variable */
-	hw_bank.abs = base;
-
-	hw_bank.cap = hw_bank.abs;
-	hw_bank.cap += ABS_CAPLENGTH;
-	hw_bank.cap += ioread8(hw_bank.cap);
-
-	reg = hw_aread(ABS_HCCPARAMS, HCCPARAMS_LEN) >> ffs_nr(HCCPARAMS_LEN);
-	hw_bank.lpm  = reg;
-	hw_bank.size = hw_bank.cap - hw_bank.abs;
-	hw_bank.size += CAP_LAST;
-	hw_bank.size /= sizeof(u32);
-
-	reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
-	hw_ep_max = reg * 2;   /* cache hw ENDPT_MAX */
-
-	if (hw_ep_max == 0 || hw_ep_max > ENDPT_MAX)
-		return -ENODEV;
-
-	/* setup lock mode ? */
-
-	/* ENDPTSETUPSTAT is '0' by default */
-
-	/* HCSPARAMS.bf.ppc SHOULD BE zero for device */
-
-	return 0;
-}
-/**
- * hw_device_reset: resets chip (execute without interruption)
- * @base: register base address
- *
- * This function returns an error code
- */
-static int hw_device_reset(struct ci13xxx *udc)
-{
-	int delay_count = 25; /* 250 usec */
-
-	/* should flush & stop before reset */
-	hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0);
-	hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
-
-	hw_cwrite(CAP_USBCMD, USBCMD_RST, USBCMD_RST);
-	while (delay_count--  && hw_cread(CAP_USBCMD, USBCMD_RST))
-		udelay(10);
-	if (delay_count < 0)
-		pr_err("USB controller reset failed\n");
-
-	if (udc->udc_driver->notify_event)
-		udc->udc_driver->notify_event(udc,
-			CI13XXX_CONTROLLER_RESET_EVENT);
-
-	/* USBMODE should be configured step by step */
-	hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
-	hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE);
-	hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM);  /* HW >= 2.3 */
-
-	/*
-	 * ITC (Interrupt Threshold Control) field is to set the maximum
-	 * rate at which the device controller will issue interrupts.
-	 * The maximum interrupt interval measured in micro frames.
-	 * Valid values are 0, 1, 2, 4, 8, 16, 32, 64. The default value is
-	 * 8 micro frames. If CPU can handle interrupts at faster rate, ITC
-	 * can be set to lesser value to gain performance.
-	 */
-	if (udc->udc_driver->nz_itc)
-		hw_cwrite(CAP_USBCMD, USBCMD_ITC_MASK,
-			USBCMD_ITC(udc->udc_driver->nz_itc));
-	else if (udc->udc_driver->flags & CI13XXX_ZERO_ITC)
-		hw_cwrite(CAP_USBCMD, USBCMD_ITC_MASK, USBCMD_ITC(0));
-
-	if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) {
-		pr_err("cannot enter in device mode");
-		pr_err("lpm = %i", hw_bank.lpm);
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-/**
- * hw_device_state: enables/disables interrupts & starts/stops device (execute
- *                  without interruption)
- * @dma: 0 => disable, !0 => enable and set dma engine
- *
- * This function returns an error code
- */
-static int hw_device_state(u32 dma)
-{
-	struct ci13xxx *udc = _udc;
-
-	if (dma) {
-		if (!(udc->udc_driver->flags & CI13XXX_DISABLE_STREAMING)) {
-			hw_cwrite(CAP_USBMODE, USBMODE_SDIS, 0);
-			pr_debug("%s(): streaming mode is enabled. USBMODE:%x\n",
-				 __func__, hw_cread(CAP_USBMODE, ~0));
-
-		} else {
-			hw_cwrite(CAP_USBMODE, USBMODE_SDIS, USBMODE_SDIS);
-			pr_debug("%s(): streaming mode is disabled. USBMODE:%x\n",
-				__func__, hw_cread(CAP_USBMODE, ~0));
-		}
-
-		hw_cwrite(CAP_ENDPTLISTADDR, ~0, dma);
-
-
-		/* Set BIT(31) to enable AHB2AHB Bypass functionality */
-		if (udc->udc_driver->flags & CI13XXX_ENABLE_AHB2AHB_BYPASS) {
-			hw_awrite(ABS_AHBMODE, AHB2AHB_BYPASS, AHB2AHB_BYPASS);
-			pr_debug("%s(): ByPass Mode is enabled. AHBMODE:%x\n",
-					__func__, hw_aread(ABS_AHBMODE, ~0));
-		}
-
-		/* interrupt, error, port change, reset, sleep/suspend */
-		hw_cwrite(CAP_USBINTR, ~0,
-			     USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
-		hw_cwrite(CAP_USBCMD, USBCMD_RS, USBCMD_RS);
-	} else {
-		hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
-		hw_cwrite(CAP_USBINTR, ~0, 0);
-		/* Clear BIT(31) to disable AHB2AHB Bypass functionality */
-		if (udc->udc_driver->flags & CI13XXX_ENABLE_AHB2AHB_BYPASS) {
-			hw_awrite(ABS_AHBMODE, AHB2AHB_BYPASS, 0);
-			pr_debug("%s(): ByPass Mode is disabled. AHBMODE:%x\n",
-					__func__, hw_aread(ABS_AHBMODE, ~0));
-		}
-	}
-	return 0;
-}
-
-static void debug_ept_flush_info(int ep_num, int dir)
-{
-	struct ci13xxx *udc = _udc;
-	struct ci13xxx_ep *mep;
-
-	if (dir)
-		mep = &udc->ci13xxx_ep[ep_num + hw_ep_max/2];
-	else
-		mep = &udc->ci13xxx_ep[ep_num];
-
-	pr_err_ratelimited("USB Registers\n");
-	pr_err_ratelimited("USBCMD:%x\n", hw_cread(CAP_USBCMD, ~0));
-	pr_err_ratelimited("USBSTS:%x\n", hw_cread(CAP_USBSTS, ~0));
-	pr_err_ratelimited("ENDPTLISTADDR:%x\n",
-			hw_cread(CAP_ENDPTLISTADDR, ~0));
-	pr_err_ratelimited("PORTSC:%x\n", hw_cread(CAP_PORTSC, ~0));
-	pr_err_ratelimited("USBMODE:%x\n", hw_cread(CAP_USBMODE, ~0));
-	pr_err_ratelimited("ENDPTSTAT:%x\n", hw_cread(CAP_ENDPTSTAT, ~0));
-
-	dbg_usb_op_fail(0xFF, "FLUSHF", mep);
-}
-/**
- * hw_ep_flush: flush endpoint fifo (execute without interruption)
- * @num: endpoint number
- * @dir: endpoint direction
- *
- * This function returns an error code
- */
-static int hw_ep_flush(int num, int dir)
-{
-	ktime_t start, diff;
-	int n = hw_ep_bit(num, dir);
-	struct ci13xxx_ep *mEp = &_udc->ci13xxx_ep[n];
-
-	/* Flush ep0 even when queue is empty */
-	if (_udc->skip_flush || (num && list_empty(&mEp->qh.queue)))
-		return 0;
-
-	start = ktime_get();
-	do {
-		/* flush any pending transfer */
-		hw_cwrite(CAP_ENDPTFLUSH, BIT(n), BIT(n));
-		while (hw_cread(CAP_ENDPTFLUSH, BIT(n))) {
-			cpu_relax();
-			diff = ktime_sub(ktime_get(), start);
-			if (ktime_to_ms(diff) > USB_MAX_TIMEOUT) {
-				printk_ratelimited(KERN_ERR
-					"%s: Failed to flush ep#%d %s\n",
-					__func__, num,
-					dir ? "IN" : "OUT");
-				debug_ept_flush_info(num, dir);
-				_udc->skip_flush = true;
-				/* Notify to trigger h/w reset recovery later */
-				if (_udc->udc_driver->notify_event)
-					_udc->udc_driver->notify_event(_udc,
-						CI13XXX_CONTROLLER_ERROR_EVENT);
-				return 0;
-			}
-		}
-	} while (hw_cread(CAP_ENDPTSTAT, BIT(n)));
-
-	return 0;
-}
-
-/**
- * hw_ep_disable: disables endpoint (execute without interruption)
- * @num: endpoint number
- * @dir: endpoint direction
- *
- * This function returns an error code
- */
-static int hw_ep_disable(int num, int dir)
-{
-	hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32),
-		  dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
-	return 0;
-}
-
-/**
- * hw_ep_enable: enables endpoint (execute without interruption)
- * @num:  endpoint number
- * @dir:  endpoint direction
- * @type: endpoint type
- *
- * This function returns an error code
- */
-static int hw_ep_enable(int num, int dir, int type)
-{
-	u32 mask, data;
-
-	if (dir) {
-		mask  = ENDPTCTRL_TXT;  /* type    */
-		data  = type << ffs_nr(mask);
-
-		mask |= ENDPTCTRL_TXS;  /* unstall */
-		mask |= ENDPTCTRL_TXR;  /* reset data toggle */
-		data |= ENDPTCTRL_TXR;
-		mask |= ENDPTCTRL_TXE;  /* enable  */
-		data |= ENDPTCTRL_TXE;
-	} else {
-		mask  = ENDPTCTRL_RXT;  /* type    */
-		data  = type << ffs_nr(mask);
-
-		mask |= ENDPTCTRL_RXS;  /* unstall */
-		mask |= ENDPTCTRL_RXR;  /* reset data toggle */
-		data |= ENDPTCTRL_RXR;
-		mask |= ENDPTCTRL_RXE;  /* enable  */
-		data |= ENDPTCTRL_RXE;
-	}
-	hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), mask, data);
-
-	/* make sure endpoint is enabled before returning */
-	mb();
-
-	return 0;
-}
-
-/**
- * hw_ep_get_halt: return endpoint halt status
- * @num: endpoint number
- * @dir: endpoint direction
- *
- * This function returns 1 if endpoint halted
- */
-static int hw_ep_get_halt(int num, int dir)
-{
-	u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
-
-	return hw_cread(CAP_ENDPTCTRL + num * sizeof(u32), mask) ? 1 : 0;
-}
-
-/**
- * hw_test_and_clear_setup_status: test & clear setup status (execute without
- *                                 interruption)
- * @n: endpoint number
- *
- * This function returns setup status
- */
-static int hw_test_and_clear_setup_status(int n)
-{
-	n = ep_to_bit(n);
-	return hw_ctest_and_clear(CAP_ENDPTSETUPSTAT, BIT(n));
-}
-
-/**
- * hw_ep_prime: primes endpoint (execute without interruption)
- * @num:     endpoint number
- * @dir:     endpoint direction
- * @is_ctrl: true if control endpoint
- *
- * This function returns an error code
- */
-static int hw_ep_prime(int num, int dir, int is_ctrl)
-{
-	int n = hw_ep_bit(num, dir);
-
-	if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
-		return -EAGAIN;
-
-	hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n));
-
-	if (is_ctrl && dir == RX  && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
-		return -EAGAIN;
-
-	/* status shoult be tested according with manual but it doesn't work */
-	return 0;
-}
-
-/**
- * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
- *                 without interruption)
- * @num:   endpoint number
- * @dir:   endpoint direction
- * @value: true => stall, false => unstall
- *
- * This function returns an error code
- */
-static int hw_ep_set_halt(int num, int dir, int value)
-{
-	u32 addr, mask_xs, mask_xr;
-
-	if (value != 0 && value != 1)
-		return -EINVAL;
-
-	do {
-		if (hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
-			return 0;
-
-		addr = CAP_ENDPTCTRL + num * sizeof(u32);
-		mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
-		mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
-
-		/* data toggle - reserved for EP0 but it's in ESS */
-		hw_cwrite(addr, mask_xs|mask_xr, value ? mask_xs : mask_xr);
-
-	} while (value != hw_ep_get_halt(num, dir));
-
-	return 0;
-}
-
-/**
- * hw_intr_clear: disables interrupt & clears interrupt status (execute without
- *                interruption)
- * @n: interrupt bit
- *
- * This function returns an error code
- */
-static int hw_intr_clear(int n)
-{
-	if (n >= REG_BITS)
-		return -EINVAL;
-
-	hw_cwrite(CAP_USBINTR, BIT(n), 0);
-	hw_cwrite(CAP_USBSTS,  BIT(n), BIT(n));
-	return 0;
-}
-
-/**
- * hw_intr_force: enables interrupt & forces interrupt status (execute without
- *                interruption)
- * @n: interrupt bit
- *
- * This function returns an error code
- */
-static int hw_intr_force(int n)
-{
-	if (n >= REG_BITS)
-		return -EINVAL;
-
-	hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, TESTMODE_FORCE);
-	hw_cwrite(CAP_USBINTR,  BIT(n), BIT(n));
-	hw_cwrite(CAP_USBSTS,   BIT(n), BIT(n));
-	hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, 0);
-	return 0;
-}
-
-/**
- * hw_is_port_high_speed: test if port is high speed
- *
- * This function returns true if high speed port
- */
-static int hw_port_is_high_speed(void)
-{
-	return hw_bank.lpm ? hw_cread(CAP_DEVLC, DEVLC_PSPD) :
-		hw_cread(CAP_PORTSC, PORTSC_HSP);
-}
-
-/**
- * hw_port_test_get: reads port test mode value
- *
- * This function returns port test mode value
- */
-static u8 hw_port_test_get(void)
-{
-	return hw_cread(CAP_PORTSC, PORTSC_PTC) >> ffs_nr(PORTSC_PTC);
-}
-
-/**
- * hw_port_test_set: writes port test mode (execute without interruption)
- * @mode: new value
- *
- * This function returns an error code
- */
-static int hw_port_test_set(u8 mode)
-{
-	const u8 TEST_MODE_MAX = 7;
-
-	if (mode > TEST_MODE_MAX)
-		return -EINVAL;
-
-	hw_cwrite(CAP_PORTSC, PORTSC_PTC, mode << ffs_nr(PORTSC_PTC));
-	return 0;
-}
-
-/**
- * hw_read_intr_enable: returns interrupt enable register
- *
- * This function returns register data
- */
-static u32 hw_read_intr_enable(void)
-{
-	return hw_cread(CAP_USBINTR, ~0);
-}
-
-/**
- * hw_read_intr_status: returns interrupt status register
- *
- * This function returns register data
- */
-static u32 hw_read_intr_status(void)
-{
-	return hw_cread(CAP_USBSTS, ~0);
-}
-
-/**
- * hw_register_read: reads all device registers (execute without interruption)
- * @buf:  destination buffer
- * @size: buffer size
- *
- * This function returns number of registers read
- */
-static size_t hw_register_read(u32 *buf, size_t size)
-{
-	unsigned int i;
-
-	if (size > hw_bank.size)
-		size = hw_bank.size;
-
-	for (i = 0; i < size; i++)
-		buf[i] = hw_aread(i * sizeof(u32), ~0);
-
-	return size;
-}
-
-/**
- * hw_register_write: writes to register
- * @addr: register address
- * @data: register value
- *
- * This function returns an error code
- */
-static int hw_register_write(u16 addr, u32 data)
-{
-	/* align */
-	addr /= sizeof(u32);
-
-	if (addr >= hw_bank.size)
-		return -EINVAL;
-
-	/* align */
-	addr *= sizeof(u32);
-
-	hw_awrite(addr, ~0, data);
-	return 0;
-}
-
-/**
- * hw_test_and_clear_complete: test & clear complete status (execute without
- *                             interruption)
- * @n: endpoint number
- *
- * This function returns complete status
- */
-static int hw_test_and_clear_complete(int n)
-{
-	n = ep_to_bit(n);
-	return hw_ctest_and_clear(CAP_ENDPTCOMPLETE, BIT(n));
-}
-
-/**
- * hw_test_and_clear_intr_active: test & clear active interrupts (execute
- *                                without interruption)
- *
- * This function returns active interrutps
- */
-static u32 hw_test_and_clear_intr_active(void)
-{
-	u32 reg = hw_read_intr_status() & hw_read_intr_enable();
-
-	hw_cwrite(CAP_USBSTS, ~0, reg);
-	return reg;
-}
-
-/**
- * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
- *                                interruption)
- *
- * This function returns guard value
- */
-static int hw_test_and_clear_setup_guard(void)
-{
-	return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, 0);
-}
-
-/**
- * hw_test_and_set_setup_guard: test & set setup guard (execute without
- *                              interruption)
- *
- * This function returns guard value
- */
-static int hw_test_and_set_setup_guard(void)
-{
-	return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
-}
-
-/**
- * hw_usb_set_address: configures USB address (execute without interruption)
- * @value: new USB address
- *
- * This function returns an error code
- */
-static int hw_usb_set_address(u8 value)
-{
-	/* advance */
-	hw_cwrite(CAP_DEVICEADDR, DEVICEADDR_USBADR | DEVICEADDR_USBADRA,
-		  value << ffs_nr(DEVICEADDR_USBADR) | DEVICEADDR_USBADRA);
-	return 0;
-}
-
-/**
- * hw_usb_reset: restart device after a bus reset (execute without
- *               interruption)
- *
- * This function returns an error code
- */
-static int hw_usb_reset(void)
-{
-	int delay_count = 10; /* 100 usec delay */
-
-	hw_usb_set_address(0);
-
-	/* ESS flushes only at end?!? */
-	hw_cwrite(CAP_ENDPTFLUSH,    ~0, ~0);   /* flush all EPs */
-
-	/* clear complete status */
-	hw_cwrite(CAP_ENDPTCOMPLETE,  0,  0);   /* writes its content */
-
-	/* wait until all bits cleared */
-	while (delay_count-- && hw_cread(CAP_ENDPTPRIME, ~0))
-		udelay(10);
-	if (delay_count < 0)
-		pr_err("ENDPTPRIME is not cleared during bus reset\n");
-
-	/* reset all endpoints ? */
-
-	/*
-	 * reset internal status and wait for further instructions
-	 * no need to verify the port reset status (ESS does it)
-	 */
-
-	return 0;
-}
-
-/******************************************************************************
- * DBG block
- *****************************************************************************/
-/**
- * show_device: prints information about device capabilities and status
- *
- * Check "device.h" for details
- */
-static ssize_t show_device(struct device *dev, struct device_attribute *attr,
-			   char *buf)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	struct usb_gadget *gadget = &udc->gadget;
-	int n = 0;
-
-	dbg_trace("[%s] %pK\n", __func__, buf);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		return 0;
-	}
-
-	n += scnprintf(buf + n, PAGE_SIZE - n, "speed             = %d\n",
-		       gadget->speed);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "max_speed         = %d\n",
-		       gadget->max_speed);
-	/* TODO: Scheduled for removal in 3.8. */
-	n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed      = %d\n",
-		       gadget_is_dualspeed(gadget));
-	n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg            = %d\n",
-		       gadget->is_otg);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral   = %d\n",
-		       gadget->is_a_peripheral);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "b_hnp_enable      = %d\n",
-		       gadget->b_hnp_enable);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "a_hnp_support     = %d\n",
-		       gadget->a_hnp_support);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "a_alt_hnp_support = %d\n",
-		       gadget->a_alt_hnp_support);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "name              = %s\n",
-		       (gadget->name ? gadget->name : ""));
-
-	return n;
-}
-static DEVICE_ATTR(device, 0400, show_device, NULL);
-
-/**
- * show_driver: prints information about attached gadget (if any)
- *
- * Check "device.h" for details
- */
-static ssize_t show_driver(struct device *dev, struct device_attribute *attr,
-			   char *buf)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	struct usb_gadget_driver *driver = udc->driver;
-	int n = 0;
-
-	dbg_trace("[%s] %pK\n", __func__, buf);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		return 0;
-	}
-
-	if (driver == NULL)
-		return scnprintf(buf, PAGE_SIZE,
-				 "There is no gadget attached!\n");
-
-	n += scnprintf(buf + n, PAGE_SIZE - n, "function  = %s\n",
-		       (driver->function ? driver->function : ""));
-	n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n",
-		       driver->max_speed);
-
-	return n;
-}
-static DEVICE_ATTR(driver, 0400, show_driver, NULL);
-
-/* Maximum event message length */
-#define DBG_DATA_MSG   64UL
-
-/* Maximum event messages */
-#define DBG_DATA_MAX   128UL
-
-/* Event buffer descriptor */
-static struct {
-	char		(buf[DBG_DATA_MAX])[DBG_DATA_MSG];   /* buffer */
-	unsigned int	idx;   /* index */
-	unsigned int	tty;   /* print to console? */
-	rwlock_t	lck;   /* lock */
-} dbg_data = {
-	.idx = 0,
-	.tty = 0,
-	.lck = __RW_LOCK_UNLOCKED(lck)
-};
-
-/**
- * dbg_dec: decrements debug event index
- * @idx: buffer index
- */
-static void dbg_dec(unsigned int *idx)
-{
-	*idx = (*idx - 1) & (DBG_DATA_MAX-1);
-}
-
-/**
- * dbg_inc: increments debug event index
- * @idx: buffer index
- */
-static void dbg_inc(unsigned int *idx)
-{
-	*idx = (*idx + 1) & (DBG_DATA_MAX-1);
-}
-
-
-static unsigned int ep_addr_txdbg_mask;
-module_param(ep_addr_txdbg_mask, uint, 0644);
-static unsigned int ep_addr_rxdbg_mask;
-module_param(ep_addr_rxdbg_mask, uint, 0644);
-
-static int allow_dbg_print(u8 addr)
-{
-	int dir, num;
-
-	/* allow bus wide events */
-	if (addr == 0xff)
-		return 1;
-
-	dir = addr & USB_ENDPOINT_DIR_MASK ? TX : RX;
-	num = addr & ~USB_ENDPOINT_DIR_MASK;
-	num = 1 << num;
-
-	if ((dir == TX) && (num & ep_addr_txdbg_mask))
-		return 1;
-	if ((dir == RX) && (num & ep_addr_rxdbg_mask))
-		return 1;
-
-	return 0;
-}
-
-#define TIME_BUF_LEN  20
-/*get_timestamp - returns time of day in us */
-static char *get_timestamp(char *tbuf)
-{
-	unsigned long long t;
-	unsigned long nanosec_rem;
-
-	t = cpu_clock(smp_processor_id());
-	nanosec_rem = do_div(t, 1000000000)/1000;
-	scnprintf(tbuf, TIME_BUF_LEN, "[%5lu.%06lu] ", (unsigned long)t,
-		nanosec_rem);
-	return tbuf;
-}
-
-/**
- * dbg_print:  prints the common part of the event
- * @addr:   endpoint address
- * @name:   event name
- * @status: status
- * @extra:  extra information
- */
-static void dbg_print(u8 addr, const char *name, int status, const char *extra)
-{
-	unsigned long flags;
-	char tbuf[TIME_BUF_LEN];
-
-	if (!allow_dbg_print(addr))
-		return;
-
-	write_lock_irqsave(&dbg_data.lck, flags);
-
-	scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
-		  "%s\t? %02X %-7.7s %4i ?\t%s\n",
-		  get_timestamp(tbuf), addr, name, status, extra);
-
-	dbg_inc(&dbg_data.idx);
-
-	write_unlock_irqrestore(&dbg_data.lck, flags);
-
-	if (dbg_data.tty != 0)
-		pr_notice("%s\t? %02X %-7.7s %4i ?\t%s\n",
-			  get_timestamp(tbuf), addr, name, status, extra);
-}
-
-/**
- * dbg_done: prints a DONE event
- * @addr:   endpoint address
- * @td:     transfer descriptor
- * @status: status
- */
-static void dbg_done(u8 addr, const u32 token, int status)
-{
-	char msg[DBG_DATA_MSG];
-
-	scnprintf(msg, sizeof(msg), "%d %02X",
-		  (int)(token & TD_TOTAL_BYTES) >> ffs_nr(TD_TOTAL_BYTES),
-		  (int)(token & TD_STATUS)      >> ffs_nr(TD_STATUS));
-	dbg_print(addr, "DONE", status, msg);
-}
-
-/**
- * dbg_event: prints a generic event
- * @addr:   endpoint address
- * @name:   event name
- * @status: status
- */
-static void dbg_event(u8 addr, const char *name, int status)
-{
-	if (name != NULL)
-		dbg_print(addr, name, status, "");
-}
-
-/*
- * dbg_queue: prints a QUEUE event
- * @addr:   endpoint address
- * @req:    USB request
- * @status: status
- */
-static void dbg_queue(u8 addr, const struct usb_request *req, int status)
-{
-	char msg[DBG_DATA_MSG];
-
-	if (req != NULL) {
-		scnprintf(msg, sizeof(msg),
-			  "%d %d", !req->no_interrupt, req->length);
-		dbg_print(addr, "QUEUE", status, msg);
-	}
-}
-
-/**
- * dbg_setup: prints a SETUP event
- * @addr: endpoint address
- * @req:  setup request
- */
-static void dbg_setup(u8 addr, const struct usb_ctrlrequest *req)
-{
-	char msg[DBG_DATA_MSG];
-
-	if (req != NULL) {
-		scnprintf(msg, sizeof(msg),
-			  "%02X %02X %04X %04X %d", req->bRequestType,
-			  req->bRequest, le16_to_cpu(req->wValue),
-			  le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength));
-		dbg_print(addr, "SETUP", 0, msg);
-	}
-}
-
-/**
- * dbg_usb_op_fail: prints USB Operation FAIL event
- * @addr: endpoint address
- * @mEp:  endpoint structure
- */
-static void dbg_usb_op_fail(u8 addr, const char *name,
-				const struct ci13xxx_ep *mep)
-{
-	char msg[DBG_DATA_MSG];
-	struct ci13xxx_req *req;
-	struct list_head *ptr = NULL;
-
-	if (mep != NULL) {
-		scnprintf(msg, sizeof(msg),
-			"%s Fail EP%d%s QH:%08X",
-			name, mep->num,
-			mep->dir ? "IN" : "OUT", mep->qh.ptr->cap);
-		dbg_print(addr, name, 0, msg);
-		scnprintf(msg, sizeof(msg),
-				"cap:%08X %08X %08X\n",
-				mep->qh.ptr->curr, mep->qh.ptr->td.next,
-				mep->qh.ptr->td.token);
-		dbg_print(addr, "QHEAD", 0, msg);
-
-		list_for_each(ptr, &mep->qh.queue) {
-			req = list_entry(ptr, struct ci13xxx_req, queue);
-			scnprintf(msg, sizeof(msg),
-					"%pKa:%08X:%08X\n",
-					&req->dma, req->ptr->next,
-					req->ptr->token);
-			dbg_print(addr, "REQ", 0, msg);
-			scnprintf(msg, sizeof(msg), "%08X:%d\n",
-					req->ptr->page[0],
-					req->req.status);
-			dbg_print(addr, "REQPAGE", 0, msg);
-		}
-	}
-}
-
-/**
- * show_events: displays the event buffer
- *
- * Check "device.h" for details
- */
-static ssize_t show_events(struct device *dev, struct device_attribute *attr,
-			   char *buf)
-{
-	unsigned long flags;
-	unsigned int i, j, n = 0;
-
-	dbg_trace("[%s] %pK\n", __func__, buf);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		return 0;
-	}
-
-	read_lock_irqsave(&dbg_data.lck, flags);
-
-	i = dbg_data.idx;
-	for (dbg_dec(&i); i != dbg_data.idx; dbg_dec(&i)) {
-		n += strlen(dbg_data.buf[i]);
-		if (n >= PAGE_SIZE) {
-			n -= strlen(dbg_data.buf[i]);
-			break;
-		}
-	}
-	for (j = 0, dbg_inc(&i); j < n; dbg_inc(&i))
-		j += scnprintf(buf + j, PAGE_SIZE - j,
-			       "%s", dbg_data.buf[i]);
-
-	read_unlock_irqrestore(&dbg_data.lck, flags);
-
-	return n;
-}
-
-/**
- * store_events: configure if events are going to be also printed to console
- *
- * Check "device.h" for details
- */
-static ssize_t store_events(struct device *dev, struct device_attribute *attr,
-			    const char *buf, size_t count)
-{
-	unsigned int tty;
-
-	dbg_trace("[%s] %pK, %d\n", __func__, buf, count);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		goto done;
-	}
-
-	if (kstrtouint(buf, 10, &tty) || tty > 1) {
-		dev_err(dev, "<1|0>: enable|disable console log\n");
-		goto done;
-	}
-
-	dbg_data.tty = tty;
-	dev_info(dev, "tty = %u", dbg_data.tty);
-
- done:
-	return count;
-}
-static DEVICE_ATTR(events, 0600, show_events, store_events);
-
-/**
- * show_inters: interrupt status, enable status and historic
- *
- * Check "device.h" for details
- */
-static ssize_t show_inters(struct device *dev, struct device_attribute *attr,
-			   char *buf)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	unsigned long flags;
-	u32 intr;
-	unsigned int i, j, n = 0;
-
-	dbg_trace("[%s] %pK\n", __func__, buf);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		return 0;
-	}
-
-	spin_lock_irqsave(udc->lock, flags);
-
-	n += scnprintf(buf + n, PAGE_SIZE - n,
-		       "status = %08x\n", hw_read_intr_status());
-	n += scnprintf(buf + n, PAGE_SIZE - n,
-		       "enable = %08x\n", hw_read_intr_enable());
-
-	n += scnprintf(buf + n, PAGE_SIZE - n, "*test = %d\n",
-		       isr_statistics.test);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "? ui  = %d\n",
-		       isr_statistics.ui);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "? uei = %d\n",
-		       isr_statistics.uei);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "? pci = %d\n",
-		       isr_statistics.pci);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "? uri = %d\n",
-		       isr_statistics.uri);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "? sli = %d\n",
-		       isr_statistics.sli);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "*none = %d\n",
-		       isr_statistics.none);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "*hndl = %d\n",
-		       isr_statistics.hndl.cnt);
-
-	for (i = isr_statistics.hndl.idx, j = 0; j <= ISR_MASK; j++, i++) {
-		i   &= ISR_MASK;
-		intr = isr_statistics.hndl.buf[i];
-
-		if (USBi_UI  & intr)
-			n += scnprintf(buf + n, PAGE_SIZE - n, "ui  ");
-		intr &= ~USBi_UI;
-		if (USBi_UEI & intr)
-			n += scnprintf(buf + n, PAGE_SIZE - n, "uei ");
-		intr &= ~USBi_UEI;
-		if (USBi_PCI & intr)
-			n += scnprintf(buf + n, PAGE_SIZE - n, "pci ");
-		intr &= ~USBi_PCI;
-		if (USBi_URI & intr)
-			n += scnprintf(buf + n, PAGE_SIZE - n, "uri ");
-		intr &= ~USBi_URI;
-		if (USBi_SLI & intr)
-			n += scnprintf(buf + n, PAGE_SIZE - n, "sli ");
-		intr &= ~USBi_SLI;
-		if (intr)
-			n += scnprintf(buf + n, PAGE_SIZE - n, "??? ");
-		if (isr_statistics.hndl.buf[i])
-			n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
-	}
-
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	return n;
-}
-
-/**
- * store_inters: enable & force or disable an individual interrutps
- *                   (to be used for test purposes only)
- *
- * Check "device.h" for details
- */
-static ssize_t store_inters(struct device *dev, struct device_attribute *attr,
-			    const char *buf, size_t count)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	unsigned long flags;
-	unsigned int en, bit;
-
-	dbg_trace("[%s] %pK, %d\n", __func__, buf, count);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		goto done;
-	}
-
-	if (sscanf(buf, "%u %u", &en, &bit) != 2 || en > 1) {
-		dev_err(dev, "<1|0> <bit>: enable|disable interrupt");
-		goto done;
-	}
-
-	spin_lock_irqsave(udc->lock, flags);
-	if (en) {
-		if (hw_intr_force(bit))
-			dev_err(dev, "invalid bit number\n");
-		else
-			isr_statistics.test++;
-	} else {
-		if (hw_intr_clear(bit))
-			dev_err(dev, "invalid bit number\n");
-	}
-	spin_unlock_irqrestore(udc->lock, flags);
-
- done:
-	return count;
-}
-static DEVICE_ATTR(inters, 0600, show_inters, store_inters);
-
-/**
- * show_port_test: reads port test mode
- *
- * Check "device.h" for details
- */
-static ssize_t show_port_test(struct device *dev,
-			      struct device_attribute *attr, char *buf)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	unsigned long flags;
-	unsigned int mode;
-
-	dbg_trace("[%s] %pK\n", __func__, buf);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		return 0;
-	}
-
-	spin_lock_irqsave(udc->lock, flags);
-	mode = hw_port_test_get();
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	return scnprintf(buf, PAGE_SIZE, "mode = %u\n", mode);
-}
-
-/**
- * store_port_test: writes port test mode
- *
- * Check "device.h" for details
- */
-static ssize_t store_port_test(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t count)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	unsigned long flags;
-	unsigned int mode;
-
-	dbg_trace("[%s] %pK, %d\n", __func__, buf, count);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		goto done;
-	}
-
-	if (kstrtouint(buf, 10, &mode)) {
-		dev_err(dev, "<mode>: set port test mode");
-		goto done;
-	}
-
-	spin_lock_irqsave(udc->lock, flags);
-	if (hw_port_test_set(mode))
-		dev_err(dev, "invalid mode\n");
-	spin_unlock_irqrestore(udc->lock, flags);
-
- done:
-	return count;
-}
-static DEVICE_ATTR(port_test, 0600, show_port_test, store_port_test);
-
-/**
- * show_qheads: DMA contents of all queue heads
- *
- * Check "device.h" for details
- */
-static ssize_t show_qheads(struct device *dev, struct device_attribute *attr,
-			   char *buf)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	unsigned long flags;
-	unsigned int i, j, n = 0;
-
-	dbg_trace("[%s] %pK\n", __func__, buf);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		return 0;
-	}
-
-	spin_lock_irqsave(udc->lock, flags);
-	for (i = 0; i < hw_ep_max/2; i++) {
-		struct ci13xxx_ep *mEpRx = &udc->ci13xxx_ep[i];
-		struct ci13xxx_ep *mEpTx = &udc->ci13xxx_ep[i + hw_ep_max/2];
-
-		n += scnprintf(buf + n, PAGE_SIZE - n,
-			       "EP=%02i: RX=%08X TX=%08X\n",
-			       i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma);
-		for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
-			n += scnprintf(buf + n, PAGE_SIZE - n,
-				       " %04X:    %08X    %08X\n", j,
-				       *((u32 *)mEpRx->qh.ptr + j),
-				       *((u32 *)mEpTx->qh.ptr + j));
-		}
-	}
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	return n;
-}
-static DEVICE_ATTR(qheads, 0400, show_qheads, NULL);
-
-/**
- * show_registers: dumps all registers
- *
- * Check "device.h" for details
- */
-#define DUMP_ENTRIES	512
-static ssize_t show_registers(struct device *dev,
-			      struct device_attribute *attr, char *buf)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	unsigned long flags;
-	u32 *dump;
-	unsigned int i, k, n = 0;
-
-	dbg_trace("[%s] %pK\n", __func__, buf);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		return 0;
-	}
-
-	dump = kmalloc(sizeof(u32) * DUMP_ENTRIES, GFP_KERNEL);
-	if (!dump)
-		return 0;
-
-	spin_lock_irqsave(udc->lock, flags);
-	k = hw_register_read(dump, DUMP_ENTRIES);
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	for (i = 0; i < k; i++) {
-		n += scnprintf(buf + n, PAGE_SIZE - n,
-			       "reg[0x%04X] = 0x%08X\n",
-			       i * (unsigned int)sizeof(u32), dump[i]);
-	}
-	kfree(dump);
-
-	return n;
-}
-
-/**
- * store_registers: writes value to register address
- *
- * Check "device.h" for details
- */
-static ssize_t store_registers(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t count)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	unsigned long addr, data, flags;
-
-	dbg_trace("[%s] %pK, %d\n", __func__, buf, count);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		goto done;
-	}
-
-	if (sscanf(buf, "%li %li", &addr, &data) != 2) {
-		dev_err(dev, "<addr> <data>: write data to register address");
-		goto done;
-	}
-
-	spin_lock_irqsave(udc->lock, flags);
-	if (hw_register_write(addr, data))
-		dev_err(dev, "invalid address range\n");
-	spin_unlock_irqrestore(udc->lock, flags);
-
- done:
-	return count;
-}
-static DEVICE_ATTR(registers, 0600, show_registers, store_registers);
-
-/**
- * show_requests: DMA contents of all requests currently queued (all endpts)
- *
- * Check "device.h" for details
- */
-static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
-			     char *buf)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	unsigned long flags;
-	struct list_head   *ptr = NULL;
-	struct ci13xxx_req *req = NULL;
-	unsigned int i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
-
-	dbg_trace("[%s] %pK\n", __func__, buf);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		return 0;
-	}
-
-	spin_lock_irqsave(udc->lock, flags);
-	for (i = 0; i < hw_ep_max; i++)
-		list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue)
-		{
-			req = list_entry(ptr, struct ci13xxx_req, queue);
-
-			n += scnprintf(buf + n, PAGE_SIZE - n,
-					"EP=%02i: TD=%08X %s\n",
-					i % hw_ep_max/2, (u32)req->dma,
-					((i < hw_ep_max/2) ? "RX" : "TX"));
-
-			for (j = 0; j < qSize; j++)
-				n += scnprintf(buf + n, PAGE_SIZE - n,
-						" %04X:    %08X\n", j,
-						*((u32 *)req->ptr + j));
-		}
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	return n;
-}
-static DEVICE_ATTR(requests, 0400, show_requests, NULL);
-
-/* EP# and Direction */
-static ssize_t prime_ept(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t count)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	struct ci13xxx_ep *mEp;
-	unsigned int ep_num, dir;
-	int n;
-	struct ci13xxx_req *mReq = NULL;
-
-	if (sscanf(buf, "%u %u", &ep_num, &dir) != 2) {
-		dev_err(dev, "<ep_num> <dir>: prime the ep");
-		goto done;
-	}
-
-	if (dir)
-		mEp = &udc->ci13xxx_ep[ep_num + hw_ep_max/2];
-	else
-		mEp = &udc->ci13xxx_ep[ep_num];
-
-	n = hw_ep_bit(mEp->num, mEp->dir);
-	mReq =  list_entry(mEp->qh.queue.next, struct ci13xxx_req, queue);
-	mEp->qh.ptr->td.next   = mReq->dma;
-	mEp->qh.ptr->td.token &= ~TD_STATUS;
-
-	/* Makes sure that above write goes through */
-	wmb();
-
-	hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n));
-	while (hw_cread(CAP_ENDPTPRIME, BIT(n)))
-		cpu_relax();
-
-	pr_info("%s: prime:%08x stat:%08x ep#%d dir:%s\n", __func__,
-			hw_cread(CAP_ENDPTPRIME, ~0),
-			hw_cread(CAP_ENDPTSTAT, ~0),
-			mEp->num, mEp->dir ? "IN" : "OUT");
-done:
-	return count;
-
-}
-static DEVICE_ATTR(prime, 0200, NULL, prime_ept);
-
-/* EP# and Direction */
-static ssize_t print_dtds(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t count)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	struct ci13xxx_ep *mEp;
-	unsigned int ep_num, dir;
-	int n;
-	struct list_head   *ptr = NULL;
-	struct ci13xxx_req *req = NULL;
-
-	if (sscanf(buf, "%u %u", &ep_num, &dir) != 2) {
-		dev_err(dev, "<ep_num> <dir>: to print dtds");
-		goto done;
-	}
-
-	if (dir)
-		mEp = &udc->ci13xxx_ep[ep_num + hw_ep_max/2];
-	else
-		mEp = &udc->ci13xxx_ep[ep_num];
-
-	n = hw_ep_bit(mEp->num, mEp->dir);
-	pr_info("%s: prime:%08x stat:%08x ep#%d dir:%s dTD_update_fail_count: %lu mEp->dTD_update_fail_count: %lu mEp->dTD_active_re_q_count: %lu mEp->prime_fail_count: %lu\n",
-			__func__,
-			hw_cread(CAP_ENDPTPRIME, ~0),
-			hw_cread(CAP_ENDPTSTAT, ~0),
-			mEp->num, mEp->dir ? "IN" : "OUT",
-			udc->dTD_update_fail_count,
-			mEp->dTD_update_fail_count,
-			mEp->dTD_active_re_q_count,
-			mEp->prime_fail_count);
-
-	pr_info("QH: cap:%08x cur:%08x next:%08x token:%08x\n",
-			mEp->qh.ptr->cap, mEp->qh.ptr->curr,
-			mEp->qh.ptr->td.next, mEp->qh.ptr->td.token);
-
-	list_for_each(ptr, &mEp->qh.queue) {
-		req = list_entry(ptr, struct ci13xxx_req, queue);
-
-		pr_info("\treq:%pKa next:%08x token:%08x page0:%08x status:%d\n",
-				&req->dma, req->ptr->next, req->ptr->token,
-				req->ptr->page[0], req->req.status);
-	}
-done:
-	return count;
-
-}
-static DEVICE_ATTR(dtds, 0200, NULL, print_dtds);
-
-static int ci13xxx_wakeup(struct usb_gadget *_gadget)
-{
-	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
-	unsigned long flags;
-	int ret = 0;
-
-	trace();
-
-	spin_lock_irqsave(udc->lock, flags);
-	if (!udc->gadget.remote_wakeup) {
-		ret = -EOPNOTSUPP;
-		dbg_trace("remote wakeup feature is not enabled\n");
-		goto out;
-	}
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	pm_runtime_get_sync(&_gadget->dev);
-
-	udc->udc_driver->notify_event(udc,
-		CI13XXX_CONTROLLER_REMOTE_WAKEUP_EVENT);
-
-	if (udc->transceiver)
-		usb_phy_set_suspend(udc->transceiver, 0);
-
-	spin_lock_irqsave(udc->lock, flags);
-	if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
-		ret = -EINVAL;
-		dbg_trace("port is not suspended\n");
-		pm_runtime_put(&_gadget->dev);
-		goto out;
-	}
-	hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
-
-	pm_runtime_mark_last_busy(&_gadget->dev);
-	pm_runtime_put_autosuspend(&_gadget->dev);
-out:
-	spin_unlock_irqrestore(udc->lock, flags);
-	return ret;
-}
-
-static void usb_do_remote_wakeup(struct work_struct *w)
-{
-	struct ci13xxx *udc = _udc;
-	unsigned long flags;
-	bool do_wake;
-
-	/*
-	 * This work can not be canceled from interrupt handler. Check
-	 * if wakeup conditions are still met.
-	 */
-	spin_lock_irqsave(udc->lock, flags);
-	do_wake = udc->suspended && udc->gadget.remote_wakeup;
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	if (do_wake)
-		ci13xxx_wakeup(&udc->gadget);
-}
-
-static ssize_t usb_remote_wakeup(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-
-	ci13xxx_wakeup(&udc->gadget);
-
-	return count;
-}
-static DEVICE_ATTR(wakeup, 0200, 0, usb_remote_wakeup);
-
-/**
- * dbg_create_files: initializes the attribute interface
- * @dev: device
- *
- * This function returns an error code
- */
-static int __maybe_unused dbg_create_files(struct device *dev)
-{
-	int retval = 0;
-
-	if (dev == NULL)
-		return -EINVAL;
-	retval = device_create_file(dev, &dev_attr_device);
-	if (retval)
-		goto done;
-	retval = device_create_file(dev, &dev_attr_driver);
-	if (retval)
-		goto rm_device;
-	retval = device_create_file(dev, &dev_attr_events);
-	if (retval)
-		goto rm_driver;
-	retval = device_create_file(dev, &dev_attr_inters);
-	if (retval)
-		goto rm_events;
-	retval = device_create_file(dev, &dev_attr_port_test);
-	if (retval)
-		goto rm_inters;
-	retval = device_create_file(dev, &dev_attr_qheads);
-	if (retval)
-		goto rm_port_test;
-	retval = device_create_file(dev, &dev_attr_registers);
-	if (retval)
-		goto rm_qheads;
-	retval = device_create_file(dev, &dev_attr_requests);
-	if (retval)
-		goto rm_registers;
-	retval = device_create_file(dev, &dev_attr_wakeup);
-	if (retval)
-		goto rm_remote_wakeup;
-	retval = device_create_file(dev, &dev_attr_prime);
-	if (retval)
-		goto rm_prime;
-	retval = device_create_file(dev, &dev_attr_dtds);
-	if (retval)
-		goto rm_dtds;
-
-	return 0;
-
-rm_dtds:
-	device_remove_file(dev, &dev_attr_dtds);
-rm_prime:
-	device_remove_file(dev, &dev_attr_prime);
-rm_remote_wakeup:
-	device_remove_file(dev, &dev_attr_wakeup);
- rm_registers:
-	device_remove_file(dev, &dev_attr_registers);
- rm_qheads:
-	device_remove_file(dev, &dev_attr_qheads);
- rm_port_test:
-	device_remove_file(dev, &dev_attr_port_test);
- rm_inters:
-	device_remove_file(dev, &dev_attr_inters);
- rm_events:
-	device_remove_file(dev, &dev_attr_events);
- rm_driver:
-	device_remove_file(dev, &dev_attr_driver);
- rm_device:
-	device_remove_file(dev, &dev_attr_device);
- done:
-	return retval;
-}
-
-/**
- * dbg_remove_files: destroys the attribute interface
- * @dev: device
- *
- * This function returns an error code
- */
-static int __maybe_unused dbg_remove_files(struct device *dev)
-{
-	if (dev == NULL)
-		return -EINVAL;
-	device_remove_file(dev, &dev_attr_requests);
-	device_remove_file(dev, &dev_attr_registers);
-	device_remove_file(dev, &dev_attr_qheads);
-	device_remove_file(dev, &dev_attr_port_test);
-	device_remove_file(dev, &dev_attr_inters);
-	device_remove_file(dev, &dev_attr_events);
-	device_remove_file(dev, &dev_attr_driver);
-	device_remove_file(dev, &dev_attr_device);
-	device_remove_file(dev, &dev_attr_wakeup);
-	return 0;
-}
-
-/******************************************************************************
- * UTIL block
- *****************************************************************************/
-/**
- * _usb_addr: calculates endpoint address from direction & number
- * @ep:  endpoint
- */
-static inline u8 _usb_addr(struct ci13xxx_ep *ep)
-{
-	return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
-}
-
-static void ep_prime_timer_func(unsigned long data)
-{
-	struct ci13xxx_ep *mep = (struct ci13xxx_ep *)data;
-	struct ci13xxx_req *req;
-	struct list_head *ptr = NULL;
-	int n = hw_ep_bit(mep->num, mep->dir);
-	unsigned long flags;
-
-
-	spin_lock_irqsave(mep->lock, flags);
-
-	if (_udc && (!_udc->vbus_active || _udc->suspended)) {
-		pr_debug("ep%d%s prime timer when vbus_active=%d,suspend=%d\n",
-			mep->num, mep->dir ? "IN" : "OUT",
-			_udc->vbus_active, _udc->suspended);
-		goto out;
-	}
-
-	if (!hw_cread(CAP_ENDPTPRIME, BIT(n)))
-		goto out;
-
-	if (list_empty(&mep->qh.queue))
-		goto out;
-
-	req = list_entry(mep->qh.queue.next, struct ci13xxx_req, queue);
-
-	/* clean speculative fetches on req->ptr->token */
-	mb();
-	if (!(TD_STATUS_ACTIVE & req->ptr->token))
-		goto out;
-
-	mep->prime_timer_count++;
-	if (mep->prime_timer_count == MAX_PRIME_CHECK_RETRY) {
-		mep->prime_timer_count = 0;
-		pr_info("ep%d dir:%s QH:cap:%08x cur:%08x next:%08x tkn:%08x\n",
-				mep->num, mep->dir ? "IN" : "OUT",
-				mep->qh.ptr->cap, mep->qh.ptr->curr,
-				mep->qh.ptr->td.next, mep->qh.ptr->td.token);
-		list_for_each(ptr, &mep->qh.queue) {
-			req = list_entry(ptr, struct ci13xxx_req, queue);
-			pr_info("\treq:%pKa:%08xtkn:%08xpage0:%08xsts:%d\n",
-					&req->dma, req->ptr->next,
-					req->ptr->token, req->ptr->page[0],
-					req->req.status);
-		}
-		dbg_usb_op_fail(0xFF, "PRIMEF", mep);
-		mep->prime_fail_count++;
-	} else {
-		mod_timer(&mep->prime_timer, EP_PRIME_CHECK_DELAY);
-	}
-
-	spin_unlock_irqrestore(mep->lock, flags);
-	return;
-
-out:
-	mep->prime_timer_count = 0;
-	spin_unlock_irqrestore(mep->lock, flags);
-
-}
-
-/**
- * _hardware_queue: configures a request at hardware level
- * @gadget: gadget
- * @mEp:    endpoint
- *
- * This function returns an error code
- */
-static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
-{
-	unsigned int i;
-	int ret = 0;
-	unsigned int length = mReq->req.length;
-	struct ci13xxx *udc = _udc;
-
-	trace("%pK, %pK", mEp, mReq);
-
-	/* don't queue twice */
-	if (mReq->req.status == -EALREADY)
-		return -EALREADY;
-
-	mReq->req.status = -EALREADY;
-	if (length && mReq->req.dma == DMA_ERROR_CODE) {
-		mReq->req.dma = dma_map_single(mEp->device, mReq->req.buf,
-					length, mEp->dir ? DMA_TO_DEVICE :
-					DMA_FROM_DEVICE);
-		if (mReq->req.dma == 0)
-			return -ENOMEM;
-
-		mReq->map = 1;
-	}
-
-	if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
-		mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
-					   &mReq->zdma);
-		if (mReq->zptr == NULL) {
-			if (mReq->map) {
-				dma_unmap_single(mEp->device, mReq->req.dma,
-					length, mEp->dir ? DMA_TO_DEVICE :
-					DMA_FROM_DEVICE);
-				mReq->req.dma = DMA_ERROR_CODE;
-				mReq->map     = 0;
-			}
-			return -ENOMEM;
-		}
-		memset(mReq->zptr, 0, sizeof(*mReq->zptr));
-		mReq->zptr->next    = TD_TERMINATE;
-		mReq->zptr->token   = TD_STATUS_ACTIVE;
-		if (!mReq->req.no_interrupt)
-			mReq->zptr->token   |= TD_IOC;
-	}
-
-	/*
-	 * TD configuration
-	 * TODO - handle requests which spawns into several TDs
-	 */
-	memset(mReq->ptr, 0, sizeof(*mReq->ptr));
-	mReq->ptr->token    = length << ffs_nr(TD_TOTAL_BYTES);
-	mReq->ptr->token   &= TD_TOTAL_BYTES;
-	mReq->ptr->token   |= TD_STATUS_ACTIVE;
-	if (mReq->zptr) {
-		mReq->ptr->next    = mReq->zdma;
-	} else {
-		mReq->ptr->next    = TD_TERMINATE;
-		if (!mReq->req.no_interrupt)
-			mReq->ptr->token  |= TD_IOC;
-	}
-
-	/* MSM Specific: updating the request as required for
-	 * SPS mode. Enable MSM DMA engine according
-	 * to the UDC private data in the request.
-	 */
-	if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
-		if (mReq->req.udc_priv & MSM_SPS_MODE) {
-			mReq->ptr->token = TD_STATUS_ACTIVE;
-			if (mReq->req.udc_priv & MSM_IS_FINITE_TRANSFER)
-				mReq->ptr->next = TD_TERMINATE;
-			else
-				mReq->ptr->next = MSM_ETD_TYPE | mReq->dma;
-			if (!mReq->req.no_interrupt)
-				mReq->ptr->token |= MSM_ETD_IOC;
-		}
-		mReq->req.dma = 0;
-	}
-
-	mReq->ptr->page[0]  = mReq->req.dma;
-	for (i = 1; i < 5; i++)
-		mReq->ptr->page[i] = (mReq->req.dma + i * CI13XXX_PAGE_SIZE) &
-							~TD_RESERVED_MASK;
-	/* Makes sure that above write goes through */
-	wmb();
-
-	/* Remote Wakeup */
-	if (udc->suspended) {
-		if (!udc->gadget.remote_wakeup) {
-			mReq->req.status = -EAGAIN;
-
-			dev_dbg(mEp->device, "%s: queue failed (suspend).",
-					__func__);
-			dev_dbg(mEp->device, "%s: Remote wakeup is not supported. ept #%d\n",
-					__func__, mEp->num);
-
-			return -EAGAIN;
-		}
-
-		usb_phy_set_suspend(udc->transceiver, 0);
-		schedule_delayed_work(&udc->rw_work, REMOTE_WAKEUP_DELAY);
-	}
-
-	if (!list_empty(&mEp->qh.queue)) {
-		struct ci13xxx_req *mReqPrev;
-		int n = hw_ep_bit(mEp->num, mEp->dir);
-		int tmp_stat;
-		ktime_t start, diff;
-
-		mReqPrev = list_entry(mEp->qh.queue.prev,
-				struct ci13xxx_req, queue);
-		if (mReqPrev->zptr)
-			mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK;
-		else
-			mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK;
-		/* Makes sure that above write goes through */
-		wmb();
-		if (hw_cread(CAP_ENDPTPRIME, BIT(n)))
-			goto done;
-		start = ktime_get();
-		do {
-			hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
-			tmp_stat = hw_cread(CAP_ENDPTSTAT, BIT(n));
-			diff = ktime_sub(ktime_get(), start);
-			/* poll for max. 100ms */
-			if (ktime_to_ms(diff) > USB_MAX_TIMEOUT) {
-				if (hw_cread(CAP_USBCMD, USBCMD_ATDTW))
-					break;
-				printk_ratelimited(KERN_ERR
-				"%s:queue failed ep#%d %s\n",
-				 __func__, mEp->num, mEp->dir ? "IN" : "OUT");
-				return -EAGAIN;
-			}
-		} while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW));
-		hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, 0);
-		if (tmp_stat)
-			goto done;
-	}
-
-	/* Hardware may leave few TDs unprocessed, check and reprime with 1st */
-	if (!list_empty(&mEp->qh.queue)) {
-		struct ci13xxx_req *mReq_active, *mReq_next;
-		u32 i = 0;
-
-		/* Nothing to be done if hardware already finished this TD */
-		if ((TD_STATUS_ACTIVE & mReq->ptr->token) == 0)
-			goto done;
-
-		/* Iterate forward to find first TD with ACTIVE bit set */
-		mReq_active = mReq;
-		list_for_each_entry(mReq_next, &mEp->qh.queue, queue) {
-			i++;
-			mEp->dTD_active_re_q_count++;
-			if (TD_STATUS_ACTIVE & mReq_next->ptr->token) {
-				mReq_active = mReq_next;
-				dbg_event(_usb_addr(mEp), "ReQUE",
-					  mReq_next->ptr->token);
-				pr_debug("!!ReQ(%u-%u-%x)-%u!!\n", mEp->num,
-					 mEp->dir, mReq_next->ptr->token, i);
-				break;
-			}
-		}
-
-		/*  QH configuration */
-		mEp->qh.ptr->td.next = mReq_active->dma;
-		mEp->qh.ptr->td.token &= ~TD_STATUS;
-		goto prime;
-	}
-
-	/*  QH configuration */
-	mEp->qh.ptr->td.next   = mReq->dma;    /* TERMINATE = 0 */
-
-	if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
-		if (mReq->req.udc_priv & MSM_SPS_MODE) {
-			mEp->qh.ptr->td.next   |= MSM_ETD_TYPE;
-			i = hw_cread(CAP_ENDPTPIPEID +
-						 mEp->num * sizeof(u32), ~0);
-			/* Read current value of this EPs pipe id */
-			i = (mEp->dir == TX) ?
-				((i >> MSM_TX_PIPE_ID_OFS) & MSM_PIPE_ID_MASK) :
-					(i & MSM_PIPE_ID_MASK);
-			/*
-			 * If requested pipe id is different from current,
-			 * then write it
-			 */
-			if (i != (mReq->req.udc_priv & MSM_PIPE_ID_MASK)) {
-				if (mEp->dir == TX)
-					hw_cwrite(
-						CAP_ENDPTPIPEID +
-							mEp->num * sizeof(u32),
-						MSM_PIPE_ID_MASK <<
-							MSM_TX_PIPE_ID_OFS,
-						(mReq->req.udc_priv &
-						 MSM_PIPE_ID_MASK)
-							<< MSM_TX_PIPE_ID_OFS);
-				else
-					hw_cwrite(
-						CAP_ENDPTPIPEID +
-							mEp->num * sizeof(u32),
-						MSM_PIPE_ID_MASK,
-						mReq->req.udc_priv &
-							MSM_PIPE_ID_MASK);
-			}
-		}
-	}
-
-	mEp->qh.ptr->td.token &= ~TD_STATUS;   /* clear status */
-	mEp->qh.ptr->cap |=  QH_ZLT;
-
-prime:
-	/* Makes sure that above write goes through */
-	wmb();   /* synchronize before ep prime */
-
-	ret = hw_ep_prime(mEp->num, mEp->dir,
-			   mEp->type == USB_ENDPOINT_XFER_CONTROL);
-	if (!ret)
-		mod_timer(&mEp->prime_timer, EP_PRIME_CHECK_DELAY);
-
-done:
-	return ret;
-}
-
-/**
- * _hardware_dequeue: handles a request at hardware level
- * @gadget: gadget
- * @mEp:    endpoint
- *
- * This function returns an error code
- */
-static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
-{
-	trace("%pK, %pK", mEp, mReq);
-
-	if (mReq->req.status != -EALREADY)
-		return -EINVAL;
-
-	/* clean speculative fetches on req->ptr->token */
-	mb();
-
-	if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0)
-		return -EBUSY;
-
-	if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID)
-		if ((mReq->req.udc_priv & MSM_SPS_MODE) &&
-			(mReq->req.udc_priv & MSM_IS_FINITE_TRANSFER))
-			return -EBUSY;
-	if (mReq->zptr) {
-		if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0)
-			return -EBUSY;
-
-		/* The controller may access this dTD one more time.
-		 * Defer freeing this to next zero length dTD completion.
-		 * It is safe to assume that controller will no longer
-		 * access the previous dTD after next dTD completion.
-		 */
-		if (mEp->last_zptr)
-			dma_pool_free(mEp->td_pool, mEp->last_zptr,
-					mEp->last_zdma);
-		mEp->last_zptr = mReq->zptr;
-		mEp->last_zdma = mReq->zdma;
-
-		mReq->zptr = NULL;
-	}
-
-	mReq->req.status = 0;
-
-	if (mReq->map) {
-		dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
-				 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-		mReq->req.dma = DMA_ERROR_CODE;
-		mReq->map     = 0;
-	}
-
-	mReq->req.status = mReq->ptr->token & TD_STATUS;
-	if ((TD_STATUS_HALTED & mReq->req.status) != 0)
-		mReq->req.status = -1;
-	else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
-		mReq->req.status = -1;
-	else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
-		mReq->req.status = -1;
-
-	mReq->req.actual   = mReq->ptr->token & TD_TOTAL_BYTES;
-	mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
-	mReq->req.actual   = mReq->req.length - mReq->req.actual;
-	mReq->req.actual   = mReq->req.status ? 0 : mReq->req.actual;
-
-	return mReq->req.actual;
-}
-
-/**
- * purge_rw_queue: Purge requests pending at the remote-wakeup
- * queue and send them to the HW.
- *
- * Go over all of the endpoints and push any pending requests to
- * the HW queue.
- */
-static void purge_rw_queue(struct ci13xxx *udc)
-{
-	int i;
-	struct ci13xxx_ep  *mEp  = NULL;
-	struct ci13xxx_req *mReq = NULL;
-
-	/*
-	 * Go over all of the endpoints and push any pending requests to
-	 * the HW queue.
-	 */
-	for (i = 0; i < hw_ep_max; i++) {
-		mEp = &udc->ci13xxx_ep[i];
-
-		while (!list_empty(&udc->ci13xxx_ep[i].rw_queue)) {
-			int retval;
-
-			/* pop oldest request */
-			mReq = list_entry(udc->ci13xxx_ep[i].rw_queue.next,
-					  struct ci13xxx_req, queue);
-
-			list_del_init(&mReq->queue);
-
-			retval = _hardware_enqueue(mEp, mReq);
-
-			if (retval != 0) {
-				dbg_event(_usb_addr(mEp), "QUEUE", retval);
-				mReq->req.status = retval;
-				if (mReq->req.complete != NULL) {
-					if (mEp->type ==
-					    USB_ENDPOINT_XFER_CONTROL)
-						mReq->req.complete(
-							&(_udc->ep0in.ep),
-							&mReq->req);
-					else
-						mReq->req.complete(
-							&mEp->ep,
-							&mReq->req);
-				}
-				retval = 0;
-			}
-
-			if (!retval)
-				list_add_tail(&mReq->queue, &mEp->qh.queue);
-			else if (mEp->multi_req)
-				mEp->multi_req = false;
-
-		}
-	}
-
-	udc->rw_pending = false;
-}
-
-/**
- * restore_original_req: Restore original req's attributes
- * @mReq: Request
- *
- * This function restores original req's attributes.  Call
- * this function before completing the large req (>16K).
- */
-static void restore_original_req(struct ci13xxx_req *mReq)
-{
-	mReq->req.buf = mReq->multi.buf;
-	mReq->req.length = mReq->multi.len;
-	if (!mReq->req.status)
-		mReq->req.actual = mReq->multi.actual;
-
-	mReq->multi.len = 0;
-	mReq->multi.actual = 0;
-	mReq->multi.buf = NULL;
-}
-
-/**
- * release_ep_request: Free and endpoint request and release
- * resources
- * @mReq: request
- * @mEp: endpoint
- *
- */
-static void release_ep_request(struct ci13xxx_ep  *mEp,
-			       struct ci13xxx_req *mReq)
-{
-	struct ci13xxx_ep *mEpTemp = mEp;
-
-	unsigned int val;
-
-	/* MSM Specific: Clear end point specific register */
-	if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
-		if (mReq->req.udc_priv & MSM_SPS_MODE) {
-			val = hw_cread(CAP_ENDPTPIPEID +
-				mEp->num * sizeof(u32),
-				~0);
-
-			if (val != MSM_EP_PIPE_ID_RESET_VAL)
-				hw_cwrite(
-					CAP_ENDPTPIPEID +
-					 mEp->num * sizeof(u32),
-					~0, MSM_EP_PIPE_ID_RESET_VAL);
-		}
-	}
-	mReq->req.status = -ESHUTDOWN;
-
-	if (mReq->map) {
-		dma_unmap_single(mEp->device, mReq->req.dma,
-			mReq->req.length,
-			mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-		mReq->req.dma = DMA_ERROR_CODE;
-		mReq->map     = 0;
-	}
-
-	if (mReq->zptr) {
-		dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
-		mReq->zptr = NULL;
-		mReq->zdma = 0;
-	}
-
-	if (mEp->multi_req) {
-		restore_original_req(mReq);
-		mEp->multi_req = false;
-	}
-
-	if (mReq->req.complete != NULL) {
-		spin_unlock(mEp->lock);
-		if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
-			mReq->req.length)
-			mEpTemp = &_udc->ep0in;
-		mReq->req.complete(&mEpTemp->ep, &mReq->req);
-		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-			mReq->req.complete = NULL;
-		spin_lock(mEp->lock);
-	}
-}
-
-/**
- * _ep_nuke: dequeues all endpoint requests
- * @mEp: endpoint
- *
- * This function returns an error code
- * Caller must hold lock
- */
-static int _ep_nuke(struct ci13xxx_ep *mEp)
-__releases(mEp->lock)
-__acquires(mEp->lock)
-{
-	trace("%pK", mEp);
-
-	if (mEp == NULL)
-		return -EINVAL;
-
-	del_timer(&mEp->prime_timer);
-	mEp->prime_timer_count = 0;
-
-	hw_ep_flush(mEp->num, mEp->dir);
-
-	while (!list_empty(&mEp->qh.queue)) {
-		/* pop oldest request */
-		struct ci13xxx_req *mReq =
-			list_entry(mEp->qh.queue.next,
-				   struct ci13xxx_req, queue);
-		list_del_init(&mReq->queue);
-
-		release_ep_request(mEp, mReq);
-	}
-
-	/* Clear the requests pending at the remote-wakeup queue */
-	while (!list_empty(&mEp->rw_queue)) {
-
-		/* pop oldest request */
-		struct ci13xxx_req *mReq =
-			list_entry(mEp->rw_queue.next,
-				   struct ci13xxx_req, queue);
-
-		list_del_init(&mReq->queue);
-
-		release_ep_request(mEp, mReq);
-	}
-
-	if (mEp->last_zptr) {
-		dma_pool_free(mEp->td_pool, mEp->last_zptr, mEp->last_zdma);
-		mEp->last_zptr = NULL;
-		mEp->last_zdma = 0;
-	}
-
-	return 0;
-}
-
-/**
- * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
- * @gadget: gadget
- *
- * This function returns an error code
- */
-static int _gadget_stop_activity(struct usb_gadget *gadget)
-{
-	struct ci13xxx    *udc = container_of(gadget, struct ci13xxx, gadget);
-	unsigned long flags;
-
-	trace("%pK", gadget);
-
-	if (gadget == NULL)
-		return -EINVAL;
-
-	spin_lock_irqsave(udc->lock, flags);
-	udc->gadget.speed = USB_SPEED_UNKNOWN;
-	udc->gadget.remote_wakeup = 0;
-	udc->suspended = 0;
-	udc->configured = 0;
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	udc->driver->disconnect(gadget);
-
-	spin_lock_irqsave(udc->lock, flags);
-	_ep_nuke(&udc->ep0out);
-	_ep_nuke(&udc->ep0in);
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	return 0;
-}
-
-/******************************************************************************
- * ISR block
- *****************************************************************************/
-/**
- * isr_reset_handler: USB reset interrupt handler
- * @udc: UDC device
- *
- * This function resets USB engine after a bus reset occurred
- */
-static void isr_reset_handler(struct ci13xxx *udc)
-__releases(udc->lock)
-__acquires(udc->lock)
-{
-	int retval;
-
-	trace("%pK", udc);
-
-	if (udc == NULL) {
-		err("EINVAL");
-		return;
-	}
-
-	dbg_event(0xFF, "BUS RST", 0);
-
-	spin_unlock(udc->lock);
-
-	if (udc->suspended) {
-		if (udc->udc_driver->notify_event)
-			udc->udc_driver->notify_event(udc,
-			CI13XXX_CONTROLLER_RESUME_EVENT);
-		if (udc->transceiver)
-			usb_phy_set_suspend(udc->transceiver, 0);
-		udc->driver->resume(&udc->gadget);
-		udc->suspended = 0;
-	}
-
-	/*stop charging upon reset */
-	if (udc->transceiver)
-		usb_phy_set_power(udc->transceiver, 100);
-
-	retval = _gadget_stop_activity(&udc->gadget);
-	if (retval)
-		goto done;
-
-	if (udc->rw_pending)
-		purge_rw_queue(udc);
-
-	_udc->skip_flush = false;
-	retval = hw_usb_reset();
-	if (retval)
-		goto done;
-
-	spin_lock(udc->lock);
-
- done:
-	if (retval)
-		err("error: %i", retval);
-}
-
-/**
- * isr_resume_handler: USB PCI interrupt handler
- * @udc: UDC device
- *
- */
-static void isr_resume_handler(struct ci13xxx *udc)
-{
-	udc->gadget.speed = hw_port_is_high_speed() ?
-		USB_SPEED_HIGH : USB_SPEED_FULL;
-	if (udc->suspended) {
-		spin_unlock(udc->lock);
-		if (udc->udc_driver->notify_event)
-			udc->udc_driver->notify_event(udc,
-			  CI13XXX_CONTROLLER_RESUME_EVENT);
-		if (udc->transceiver)
-			usb_phy_set_suspend(udc->transceiver, 0);
-		udc->suspended = 0;
-		udc->driver->resume(&udc->gadget);
-		spin_lock(udc->lock);
-
-		if (udc->rw_pending)
-			purge_rw_queue(udc);
-
-	}
-}
-
-/**
- * isr_resume_handler: USB SLI interrupt handler
- * @udc: UDC device
- *
- */
-static void isr_suspend_handler(struct ci13xxx *udc)
-{
-	if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
-		udc->vbus_active) {
-		if (udc->suspended == 0) {
-			spin_unlock(udc->lock);
-			udc->driver->suspend(&udc->gadget);
-			if (udc->udc_driver->notify_event)
-				udc->udc_driver->notify_event(udc,
-				CI13XXX_CONTROLLER_SUSPEND_EVENT);
-			if (udc->transceiver)
-				usb_phy_set_suspend(udc->transceiver, 1);
-			spin_lock(udc->lock);
-			udc->suspended = 1;
-		}
-	}
-}
-
-/**
- * isr_get_status_complete: get_status request complete function
- * @ep:  endpoint
- * @req: request handled
- *
- * Caller must release lock
- */
-static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
-{
-	trace("%pK, %pK", ep, req);
-
-	if (ep == NULL || req == NULL) {
-		err("EINVAL");
-		return;
-	}
-
-	if (req->status)
-		err("GET_STATUS failed");
-}
-
-/**
- * isr_get_status_response: get_status request response
- * @udc: udc struct
- * @setup: setup request packet
- *
- * This function returns an error code
- */
-static int isr_get_status_response(struct ci13xxx *udc,
-				   struct usb_ctrlrequest *setup)
-__releases(mEp->lock)
-__acquires(mEp->lock)
-{
-	struct ci13xxx_ep *mEp = &udc->ep0in;
-	struct usb_request *req = udc->status;
-	int dir, num, retval;
-
-	trace("%pK, %pK", mEp, setup);
-
-	if (mEp == NULL || setup == NULL)
-		return -EINVAL;
-
-	req->complete = isr_get_status_complete;
-	req->length   = 2;
-	req->buf      = udc->status_buf;
-
-	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
-		/* Assume that device is bus powered for now. */
-		*((u16 *)req->buf) = _udc->gadget.remote_wakeup << 1;
-		retval = 0;
-	} else if ((setup->bRequestType & USB_RECIP_MASK) ==
-							USB_RECIP_ENDPOINT) {
-		dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
-			TX : RX;
-		num =  le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
-		*((u16 *)req->buf) = hw_ep_get_halt(num, dir);
-	}
-	/* else do nothing; reserved for future use */
-
-	spin_unlock(mEp->lock);
-	retval = usb_ep_queue(&mEp->ep, req, GFP_ATOMIC);
-	spin_lock(mEp->lock);
-	return retval;
-}
-
-/**
- * isr_setup_status_complete: setup_status request complete function
- * @ep:  endpoint
- * @req: request handled
- *
- * Caller must release lock. Put the port in test mode if test mode
- * feature is selected.
- */
-static void
-isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
-{
-	struct ci13xxx *udc = req->context;
-	unsigned long flags;
-
-	trace("%pK, %pK", ep, req);
-
-	spin_lock_irqsave(udc->lock, flags);
-	if (udc->test_mode)
-		hw_port_test_set(udc->test_mode);
-	spin_unlock_irqrestore(udc->lock, flags);
-}
-
-/**
- * isr_setup_status_phase: queues the status phase of a setup transation
- * @udc: udc struct
- *
- * This function returns an error code
- */
-static int isr_setup_status_phase(struct ci13xxx *udc)
-__releases(mEp->lock)
-__acquires(mEp->lock)
-{
-	int retval;
-	struct ci13xxx_ep *mEp;
-
-	trace("%pK", udc);
-
-	mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
-	udc->status->context = udc;
-	udc->status->complete = isr_setup_status_complete;
-	udc->status->length = 0;
-
-	spin_unlock(mEp->lock);
-	retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
-	spin_lock(mEp->lock);
-
-	return retval;
-}
-
-/**
- * isr_tr_complete_low: transaction complete low level handler
- * @mEp: endpoint
- *
- * This function returns an error code
- * Caller must hold lock
- */
-static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
-__releases(mEp->lock)
-__acquires(mEp->lock)
-{
-	struct ci13xxx_req *mReq, *mReqTemp;
-	struct ci13xxx_ep *mEpTemp = mEp;
-	int retval = 0;
-	int req_dequeue = 1;
-	struct ci13xxx *udc = _udc;
-
-	trace("%pK", mEp);
-
-	if (list_empty(&mEp->qh.queue))
-		return 0;
-
-	del_timer(&mEp->prime_timer);
-	mEp->prime_timer_count = 0;
-	list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
-			queue) {
-dequeue:
-		retval = _hardware_dequeue(mEp, mReq);
-		if (retval < 0) {
-			/*
-			 * FIXME: don't know exact delay
-			 * required for HW to update dTD status
-			 * bits. This is a temporary workaround till
-			 * HW designers come back on this.
-			 */
-			if (retval == -EBUSY && req_dequeue &&
-				(mEp->dir == 0 || mEp->num == 0)) {
-				req_dequeue = 0;
-				udc->dTD_update_fail_count++;
-				mEp->dTD_update_fail_count++;
-				udelay(10);
-				goto dequeue;
-			}
-			break;
-		}
-		req_dequeue = 0;
-
-		if (mEp->multi_req) { /* Large request in progress */
-			unsigned int remain_len;
-
-			mReq->multi.actual += mReq->req.actual;
-			remain_len = mReq->multi.len - mReq->multi.actual;
-			if (mReq->req.status || !remain_len ||
-				(mReq->req.actual != mReq->req.length)) {
-				restore_original_req(mReq);
-				mEp->multi_req = false;
-			} else {
-				mReq->req.buf = mReq->multi.buf +
-						mReq->multi.actual;
-				mReq->req.length = min_t(unsigned int,
-							remain_len,
-							4 * CI13XXX_PAGE_SIZE);
-
-				mReq->req.status = -EINPROGRESS;
-				mReq->req.actual = 0;
-				list_del_init(&mReq->queue);
-				retval = _hardware_enqueue(mEp, mReq);
-				if (retval) {
-					err("Large req failed in middle");
-					mReq->req.status = retval;
-					restore_original_req(mReq);
-					mEp->multi_req = false;
-					goto done;
-				} else {
-					list_add_tail(&mReq->queue,
-						&mEp->qh.queue);
-					return 0;
-				}
-			}
-		}
-		list_del_init(&mReq->queue);
-done:
-
-		dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
-
-		if (mReq->req.complete != NULL) {
-			spin_unlock(mEp->lock);
-			if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
-					mReq->req.length)
-				mEpTemp = &_udc->ep0in;
-			mReq->req.complete(&mEpTemp->ep, &mReq->req);
-			spin_lock(mEp->lock);
-		}
-	}
-
-	if (retval == -EBUSY)
-		retval = 0;
-	if (retval < 0)
-		dbg_event(_usb_addr(mEp), "DONE", retval);
-
-	return retval;
-}
-
-/**
- * isr_tr_complete_handler: transaction complete interrupt handler
- * @udc: UDC descriptor
- *
- * This function handles traffic events
- */
-static void isr_tr_complete_handler(struct ci13xxx *udc)
-__releases(udc->lock)
-__acquires(udc->lock)
-{
-	unsigned int i;
-	u8 tmode = 0;
-
-	trace("%pK", udc);
-
-	if (udc == NULL) {
-		err("EINVAL");
-		return;
-	}
-
-	for (i = 0; i < hw_ep_max; i++) {
-		struct ci13xxx_ep *mEp  = &udc->ci13xxx_ep[i];
-		int type, num, dir, err = -EINVAL;
-		struct usb_ctrlrequest req;
-
-		if (mEp->desc == NULL)
-			continue;   /* not configured */
-
-		if (hw_test_and_clear_complete(i)) {
-			err = isr_tr_complete_low(mEp);
-			if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
-				if (err > 0)   /* needs status phase */
-					err = isr_setup_status_phase(udc);
-				if (err < 0) {
-					dbg_event(_usb_addr(mEp),
-						  "ERROR", err);
-					spin_unlock(udc->lock);
-					if (usb_ep_set_halt(&mEp->ep))
-						err("error: ep_set_halt");
-					spin_lock(udc->lock);
-				}
-			}
-		}
-
-		if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
-		    !hw_test_and_clear_setup_status(i))
-			continue;
-
-		if (i != 0) {
-			warn("ctrl traffic received at endpoint");
-			continue;
-		}
-
-		/*
-		 * Flush data and handshake transactions of previous
-		 * setup packet.
-		 */
-		_ep_nuke(&udc->ep0out);
-		_ep_nuke(&udc->ep0in);
-
-		/* read_setup_packet */
-		do {
-			hw_test_and_set_setup_guard();
-			memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
-			/* Ensure buffer is read before acknowledging to h/w */
-			mb();
-		} while (!hw_test_and_clear_setup_guard());
-
-		type = req.bRequestType;
-
-		udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
-
-		dbg_setup(_usb_addr(mEp), &req);
-
-		switch (req.bRequest) {
-		case USB_REQ_CLEAR_FEATURE:
-			if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
-					le16_to_cpu(req.wValue) ==
-					USB_ENDPOINT_HALT) {
-				if (req.wLength != 0)
-					break;
-				num  = le16_to_cpu(req.wIndex);
-				dir = num & USB_ENDPOINT_DIR_MASK;
-				num &= USB_ENDPOINT_NUMBER_MASK;
-				if (dir) /* TX */
-					num += hw_ep_max/2;
-				if (!udc->ci13xxx_ep[num].wedge) {
-					spin_unlock(udc->lock);
-					err = usb_ep_clear_halt(
-						&udc->ci13xxx_ep[num].ep);
-					spin_lock(udc->lock);
-					if (err)
-						break;
-				}
-				err = isr_setup_status_phase(udc);
-			} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
-					le16_to_cpu(req.wValue) ==
-					USB_DEVICE_REMOTE_WAKEUP) {
-				if (req.wLength != 0)
-					break;
-				udc->gadget.remote_wakeup = 0;
-				err = isr_setup_status_phase(udc);
-			} else {
-				goto delegate;
-			}
-			break;
-		case USB_REQ_GET_STATUS:
-			if (type != (USB_DIR_IN|USB_RECIP_DEVICE)   &&
-			    type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
-			    type != (USB_DIR_IN|USB_RECIP_INTERFACE))
-				goto delegate;
-			if (le16_to_cpu(req.wLength) != 2 ||
-			    le16_to_cpu(req.wValue)  != 0)
-				break;
-			err = isr_get_status_response(udc, &req);
-			break;
-		case USB_REQ_SET_ADDRESS:
-			if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
-				goto delegate;
-			if (le16_to_cpu(req.wLength) != 0 ||
-			    le16_to_cpu(req.wIndex)  != 0)
-				break;
-			err = hw_usb_set_address((u8)le16_to_cpu(req.wValue));
-			if (err)
-				break;
-			err = isr_setup_status_phase(udc);
-			break;
-		case USB_REQ_SET_CONFIGURATION:
-			if (type == (USB_DIR_OUT|USB_TYPE_STANDARD))
-				udc->configured = !!req.wValue;
-			goto delegate;
-		case USB_REQ_SET_FEATURE:
-			if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
-					le16_to_cpu(req.wValue) ==
-					USB_ENDPOINT_HALT) {
-				if (req.wLength != 0)
-					break;
-				num  = le16_to_cpu(req.wIndex);
-				dir = num & USB_ENDPOINT_DIR_MASK;
-				num &= USB_ENDPOINT_NUMBER_MASK;
-				if (dir) /* TX */
-					num += hw_ep_max/2;
-
-				spin_unlock(udc->lock);
-				err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
-				spin_lock(udc->lock);
-				if (!err)
-					isr_setup_status_phase(udc);
-			} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
-				if (req.wLength != 0)
-					break;
-				switch (le16_to_cpu(req.wValue)) {
-				case USB_DEVICE_REMOTE_WAKEUP:
-					udc->gadget.remote_wakeup = 1;
-					err = isr_setup_status_phase(udc);
-					break;
-				case USB_DEVICE_TEST_MODE:
-					tmode = le16_to_cpu(req.wIndex) >> 8;
-					switch (tmode) {
-					case TEST_J:
-					case TEST_K:
-					case TEST_SE0_NAK:
-					case TEST_PACKET:
-					case TEST_FORCE_EN:
-						udc->test_mode = tmode;
-						err = isr_setup_status_phase(
-								udc);
-						break;
-					default:
-						break;
-					}
-				default:
-					goto delegate;
-				}
-			} else {
-				goto delegate;
-			}
-			break;
-		default:
-delegate:
-			if (req.wLength == 0)   /* no data phase */
-				udc->ep0_dir = TX;
-
-			spin_unlock(udc->lock);
-			err = udc->driver->setup(&udc->gadget, &req);
-			spin_lock(udc->lock);
-			break;
-		}
-
-		if (err < 0) {
-			dbg_event(_usb_addr(mEp), "ERROR", err);
-
-			spin_unlock(udc->lock);
-			if (usb_ep_set_halt(&mEp->ep))
-				err("error: ep_set_halt");
-			spin_lock(udc->lock);
-		}
-	}
-}
-
-/******************************************************************************
- * ENDPT block
- *****************************************************************************/
-/**
- * ep_enable: configure endpoint, making it usable
- *
- * Check usb_ep_enable() at "usb_gadget.h" for details
- */
-static int ep_enable(struct usb_ep *ep,
-		     const struct usb_endpoint_descriptor *desc)
-{
-	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
-	int retval = 0;
-	unsigned long flags;
-	unsigned int mult = 0;
-
-	trace("ep = %pK, desc = %pK", ep, desc);
-
-	if (ep == NULL || desc == NULL)
-		return -EINVAL;
-
-	spin_lock_irqsave(mEp->lock, flags);
-
-	/* only internal SW should enable ctrl endpts */
-
-	mEp->desc = desc;
-
-	if (!list_empty(&mEp->qh.queue))
-		warn("enabling a non-empty endpoint!");
-
-	mEp->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
-	mEp->num  = usb_endpoint_num(desc);
-	mEp->type = usb_endpoint_type(desc);
-
-	mEp->ep.maxpacket = usb_endpoint_maxp(desc);
-
-	dbg_event(_usb_addr(mEp), "ENABLE", 0);
-
-	mEp->qh.ptr->cap = 0;
-
-	if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
-		mEp->qh.ptr->cap |=  QH_IOS;
-	} else if (mEp->type == USB_ENDPOINT_XFER_ISOC) {
-		mEp->qh.ptr->cap &= ~QH_MULT;
-		mult = ((mEp->ep.maxpacket >> QH_MULT_SHIFT) + 1) & 0x03;
-		mEp->qh.ptr->cap |= (mult << ffs_nr(QH_MULT));
-	} else {
-		mEp->qh.ptr->cap |= QH_ZLT;
-	}
-
-	mEp->qh.ptr->cap |=
-		(mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
-	mEp->qh.ptr->td.next |= TD_TERMINATE;   /* needed? */
-
-	/* complete all the updates to ept->head before enabling endpoint*/
-	mb();
-
-	/*
-	 * Enable endpoints in the HW other than ep0 as ep0
-	 * is always enabled
-	 */
-	if (mEp->num)
-		retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
-
-	spin_unlock_irqrestore(mEp->lock, flags);
-	return retval;
-}
-
-/**
- * ep_disable: endpoint is no longer usable
- *
- * Check usb_ep_disable() at "usb_gadget.h" for details
- */
-static int ep_disable(struct usb_ep *ep)
-{
-	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
-	int direction, retval = 0;
-	unsigned long flags;
-
-	trace("%pK", ep);
-
-	if (ep == NULL)
-		return -EINVAL;
-	else if (mEp->desc == NULL)
-		return -EBUSY;
-
-	spin_lock_irqsave(mEp->lock, flags);
-
-	/* only internal SW should disable ctrl endpts */
-
-	direction = mEp->dir;
-	do {
-		dbg_event(_usb_addr(mEp), "DISABLE", 0);
-
-		retval |= _ep_nuke(mEp);
-		retval |= hw_ep_disable(mEp->num, mEp->dir);
-
-		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-			mEp->dir = (mEp->dir == TX) ? RX : TX;
-
-	} while (mEp->dir != direction);
-
-	mEp->desc = NULL;
-	mEp->ep.desc = NULL;
-	mEp->ep.maxpacket = USHRT_MAX;
-
-	spin_unlock_irqrestore(mEp->lock, flags);
-	return retval;
-}
-
-/**
- * ep_alloc_request: allocate a request object to use with this endpoint
- *
- * Check usb_ep_alloc_request() at "usb_gadget.h" for details
- */
-static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
-{
-	struct ci13xxx_ep  *mEp  = container_of(ep, struct ci13xxx_ep, ep);
-	struct ci13xxx_req *mReq = NULL;
-
-	trace("%pK, %i", ep, gfp_flags);
-
-	if (ep == NULL) {
-		err("EINVAL");
-		return NULL;
-	}
-
-	mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
-	if (mReq != NULL) {
-		INIT_LIST_HEAD(&mReq->queue);
-		mReq->req.dma = DMA_ERROR_CODE;
-
-		mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
-					   &mReq->dma);
-		if (mReq->ptr == NULL) {
-			kfree(mReq);
-			mReq = NULL;
-		}
-	}
-
-	dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
-
-	return (mReq == NULL) ? NULL : &mReq->req;
-}
-
-/**
- * ep_free_request: frees a request object
- *
- * Check usb_ep_free_request() at "usb_gadget.h" for details
- */
-static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
-{
-	struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
-	struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
-	unsigned long flags;
-
-	trace("%pK, %pK", ep, req);
-
-	if (ep == NULL || req == NULL) {
-		err("EINVAL");
-		return;
-	} else if (!list_empty(&mReq->queue)) {
-		err("EBUSY");
-		return;
-	}
-
-	spin_lock_irqsave(mEp->lock, flags);
-
-	if (mReq->ptr)
-		dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
-	kfree(mReq);
-
-	dbg_event(_usb_addr(mEp), "FREE", 0);
-
-	spin_unlock_irqrestore(mEp->lock, flags);
-}
-
-/**
- * ep_queue: queues (submits) an I/O request to an endpoint
- *
- * Check usb_ep_queue()* at usb_gadget.h" for details
- */
-static int ep_queue(struct usb_ep *ep, struct usb_request *req,
-		    gfp_t __maybe_unused gfp_flags)
-{
-	struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
-	struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
-	int retval = 0;
-	unsigned long flags;
-	struct ci13xxx *udc = _udc;
-
-	trace("%pK, %pK, %X", ep, req, gfp_flags);
-
-	if (ep == NULL)
-		return -EINVAL;
-
-	spin_lock_irqsave(mEp->lock, flags);
-	if (req == NULL || mEp->desc == NULL) {
-		retval = -EINVAL;
-		goto done;
-	}
-
-	if (!udc->softconnect) {
-		retval = -ENODEV;
-		goto done;
-	}
-
-	if (!udc->configured && mEp->type !=
-		USB_ENDPOINT_XFER_CONTROL) {
-		trace("usb is not configured ept #%d, ept name#%s\n",
-			mEp->num, mEp->ep.name);
-		retval = -ESHUTDOWN;
-		goto done;
-	}
-
-	if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
-		if (req->length)
-			mEp = (_udc->ep0_dir == RX) ?
-				&_udc->ep0out : &_udc->ep0in;
-		if (!list_empty(&mEp->qh.queue)) {
-			_ep_nuke(mEp);
-			retval = -EOVERFLOW;
-			warn("endpoint ctrl %X nuked", _usb_addr(mEp));
-		}
-	}
-
-	if (ep->endless && udc->gadget.speed == USB_SPEED_FULL) {
-		err("Queueing endless req is not supported for FS");
-		retval = -EINVAL;
-		goto done;
-	}
-
-	/* first nuke then test link, e.g. previous status has not sent */
-	if (!list_empty(&mReq->queue)) {
-		retval = -EBUSY;
-		err("request already in queue");
-		goto done;
-	}
-	if (mEp->multi_req) {
-		retval = -EAGAIN;
-		err("Large request is in progress. come again");
-		goto done;
-	}
-
-	if (req->length > (4 * CI13XXX_PAGE_SIZE)) {
-		if (!list_empty(&mEp->qh.queue)) {
-			retval = -EAGAIN;
-			err("Queue is busy. Large req is not allowed");
-			goto done;
-		}
-		if ((mEp->type != USB_ENDPOINT_XFER_BULK) ||
-				(mEp->dir != RX)) {
-			retval = -EINVAL;
-			err("Larger req is supported only for Bulk OUT");
-			goto done;
-		}
-		mEp->multi_req = true;
-		mReq->multi.len = req->length;
-		mReq->multi.buf = req->buf;
-		req->length = (4 * CI13XXX_PAGE_SIZE);
-	}
-
-	dbg_queue(_usb_addr(mEp), req, retval);
-
-	/* push request */
-	mReq->req.status = -EINPROGRESS;
-	mReq->req.actual = 0;
-
-	if (udc->rw_pending) {
-		list_add_tail(&mReq->queue, &mEp->rw_queue);
-		retval = 0;
-		goto done;
-	}
-
-	if (udc->suspended) {
-		/* Remote Wakeup */
-		if (!udc->gadget.remote_wakeup) {
-
-			dev_dbg(mEp->device, "%s: queue failed (suspend).",
-					__func__);
-			dev_dbg(mEp->device, "%s: Remote wakeup is not supported. ept #%d\n",
-					__func__, mEp->num);
-			mEp->multi_req = false;
-
-			retval = -EAGAIN;
-			goto done;
-		}
-
-		list_add_tail(&mReq->queue, &mEp->rw_queue);
-
-		udc->rw_pending = true;
-		schedule_delayed_work(&udc->rw_work,
-				      REMOTE_WAKEUP_DELAY);
-
-		retval = 0;
-		goto done;
-	}
-
-	retval = _hardware_enqueue(mEp, mReq);
-
-	if (retval == -EALREADY) {
-		dbg_event(_usb_addr(mEp), "QUEUE", retval);
-		retval = 0;
-	}
-	if (!retval)
-		list_add_tail(&mReq->queue, &mEp->qh.queue);
-	else if (mEp->multi_req)
-		mEp->multi_req = false;
-
- done:
-	spin_unlock_irqrestore(mEp->lock, flags);
-	return retval;
-}
-
-/**
- * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
- *
- * Check usb_ep_dequeue() at "usb_gadget.h" for details
- */
-static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
-{
-	struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
-	struct ci13xxx_ep *mEpTemp = mEp;
-	struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
-	struct ci13xxx *udc = _udc;
-	unsigned long flags;
-
-	trace("%pK, %pK", ep, req);
-
-	if (udc->udc_driver->in_lpm && udc->udc_driver->in_lpm(udc)) {
-		dev_err(udc->transceiver->dev,
-				"%s: Unable to dequeue while in LPM\n",
-				__func__);
-		return -EAGAIN;
-	}
-
-	if (ep == NULL)
-		return -EINVAL;
-
-	spin_lock_irqsave(mEp->lock, flags);
-	/*
-	 * Only ep0 IN is exposed to composite.  When a req is dequeued
-	 * on ep0, check both ep0 IN and ep0 OUT queues.
-	 */
-	if (req == NULL || mReq->req.status != -EALREADY ||
-		mEp->desc == NULL || list_empty(&mReq->queue) ||
-		(list_empty(&mEp->qh.queue) && ((mEp->type !=
-			USB_ENDPOINT_XFER_CONTROL) ||
-			list_empty(&_udc->ep0out.qh.queue)))) {
-		spin_unlock_irqrestore(mEp->lock, flags);
-		return -EINVAL;
-	}
-
-	dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
-
-	if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
-		hw_ep_flush(_udc->ep0out.num, RX);
-		hw_ep_flush(_udc->ep0in.num, TX);
-	} else {
-		hw_ep_flush(mEp->num, mEp->dir);
-	}
-
-	/* pop request */
-	list_del_init(&mReq->queue);
-	if (mReq->map) {
-		dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
-				 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-		mReq->req.dma = DMA_ERROR_CODE;
-		mReq->map     = 0;
-	}
-	req->status = -ECONNRESET;
-
-	if (mEp->last_zptr) {
-		dma_pool_free(mEp->td_pool, mEp->last_zptr, mEp->last_zdma);
-		mEp->last_zptr = NULL;
-		mEp->last_zdma = 0;
-	}
-
-	if (mReq->zptr) {
-		dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
-		mReq->zptr = NULL;
-		mReq->zdma = 0;
-	}
-
-	if (mEp->multi_req) {
-		restore_original_req(mReq);
-		mEp->multi_req = false;
-	}
-
-	if (mReq->req.complete != NULL) {
-		spin_unlock(mEp->lock);
-		if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
-				mReq->req.length)
-			mEpTemp = &_udc->ep0in;
-		mReq->req.complete(&mEpTemp->ep, &mReq->req);
-		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-			mReq->req.complete = NULL;
-		spin_lock(mEp->lock);
-	}
-
-	spin_unlock_irqrestore(mEp->lock, flags);
-	return 0;
-}
-
-static int is_sps_req(struct ci13xxx_req *mReq)
-{
-	return (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID &&
-			mReq->req.udc_priv & MSM_SPS_MODE);
-}
-
-/**
- * ep_set_halt: sets the endpoint halt feature
- *
- * Check usb_ep_set_halt() at "usb_gadget.h" for details
- */
-static int ep_set_halt(struct usb_ep *ep, int value)
-{
-	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
-	struct ci13xxx *udc = _udc;
-	int direction, retval = 0;
-	unsigned long flags;
-
-	trace("%pK, %i", ep, value);
-
-	if (ep == NULL || mEp->desc == NULL)
-		return -EINVAL;
-
-	if (udc->suspended) {
-		dev_err(udc->transceiver->dev,
-			"%s: Unable to halt EP while suspended\n", __func__);
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(mEp->lock, flags);
-
-#ifndef STALL_IN
-	/* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
-	if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
-		!list_empty(&mEp->qh.queue) &&
-		!is_sps_req(list_entry(mEp->qh.queue.next, struct ci13xxx_req,
-							   queue))){
-		spin_unlock_irqrestore(mEp->lock, flags);
-		return -EAGAIN;
-	}
-#endif
-
-	direction = mEp->dir;
-	do {
-		dbg_event(_usb_addr(mEp), "HALT", value);
-		retval |= hw_ep_set_halt(mEp->num, mEp->dir, value);
-
-		if (!value)
-			mEp->wedge = 0;
-
-		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-			mEp->dir = (mEp->dir == TX) ? RX : TX;
-
-	} while (mEp->dir != direction);
-
-	spin_unlock_irqrestore(mEp->lock, flags);
-	return retval;
-}
-
-/**
- * ep_set_wedge: sets the halt feature and ignores clear requests
- *
- * Check usb_ep_set_wedge() at "usb_gadget.h" for details
- */
-static int ep_set_wedge(struct usb_ep *ep)
-{
-	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
-	unsigned long flags;
-
-	trace("%pK", ep);
-
-	if (ep == NULL || mEp->desc == NULL)
-		return -EINVAL;
-
-	spin_lock_irqsave(mEp->lock, flags);
-
-	dbg_event(_usb_addr(mEp), "WEDGE", 0);
-	mEp->wedge = 1;
-
-	spin_unlock_irqrestore(mEp->lock, flags);
-
-	return usb_ep_set_halt(ep);
-}
-
-/**
- * ep_fifo_flush: flushes contents of a fifo
- *
- * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
- */
-static void ep_fifo_flush(struct usb_ep *ep)
-{
-	struct ci13xxx *udc = _udc;
-	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
-	unsigned long flags;
-
-	trace("%pK", ep);
-
-	if (ep == NULL) {
-		err("%02X: -EINVAL", _usb_addr(mEp));
-		return;
-	}
-
-	if (udc->udc_driver->in_lpm && udc->udc_driver->in_lpm(udc)) {
-		dev_err(udc->transceiver->dev,
-				"%s: Unable to fifo_flush while in LPM\n",
-				__func__);
-		return;
-	}
-
-	spin_lock_irqsave(mEp->lock, flags);
-
-	dbg_event(_usb_addr(mEp), "FFLUSH", 0);
-	/*
-	 * _ep_nuke() takes care of flushing the endpoint.
-	 * some function drivers expect udc to retire all
-	 * pending requests upon flushing an endpoint.  There
-	 * is no harm in doing it.
-	 */
-	_ep_nuke(mEp);
-
-	spin_unlock_irqrestore(mEp->lock, flags);
-}
-
-/**
- * Endpoint-specific part of the API to the USB controller hardware
- * Check "usb_gadget.h" for details
- */
-static const struct usb_ep_ops usb_ep_ops = {
-	.enable	       = ep_enable,
-	.disable       = ep_disable,
-	.alloc_request = ep_alloc_request,
-	.free_request  = ep_free_request,
-	.queue	       = ep_queue,
-	.dequeue       = ep_dequeue,
-	.set_halt      = ep_set_halt,
-	.set_wedge     = ep_set_wedge,
-	.fifo_flush    = ep_fifo_flush,
-};
-
-/******************************************************************************
- * GADGET block
- *****************************************************************************/
-static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
-{
-	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
-	unsigned long flags;
-	int gadget_ready = 0;
-
-	if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
-		return -EOPNOTSUPP;
-
-	spin_lock_irqsave(udc->lock, flags);
-	udc->vbus_active = is_active;
-	if (udc->driver)
-		gadget_ready = 1;
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	if (!gadget_ready)
-		return 0;
-
-	if (is_active) {
-		hw_device_reset(udc);
-		if (udc->udc_driver->notify_event)
-			udc->udc_driver->notify_event(udc,
-				CI13XXX_CONTROLLER_CONNECT_EVENT);
-		/* Enable BAM (if needed) before starting controller */
-		if (udc->softconnect) {
-			dbg_event(0xFF, "BAM EN2",
-				_gadget->bam2bam_func_enabled);
-			msm_usb_bam_enable(CI_CTRL,
-				_gadget->bam2bam_func_enabled);
-			hw_device_state(udc->ep0out.qh.dma);
-		}
-	} else {
-		hw_device_state(0);
-		_gadget_stop_activity(&udc->gadget);
-		if (udc->udc_driver->notify_event)
-			udc->udc_driver->notify_event(udc,
-				CI13XXX_CONTROLLER_DISCONNECT_EVENT);
-	}
-
-	return 0;
-}
-
-#define VBUS_DRAW_BUF_LEN 10
-#define MAX_OVERRIDE_VBUS_ALLOWED 900	/* 900 mA */
-static char vbus_draw_mA[VBUS_DRAW_BUF_LEN];
-module_param_string(vbus_draw_mA, vbus_draw_mA, VBUS_DRAW_BUF_LEN, 0644);
-
-static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned int mA)
-{
-	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
-	unsigned int override_mA = 0;
-
-	/* override param to draw more current if battery draining faster */
-	if ((mA == CONFIG_USB_GADGET_VBUS_DRAW) &&
-		(vbus_draw_mA[0] != '\0')) {
-		if ((!kstrtoint(vbus_draw_mA, 10, &override_mA)) &&
-				(override_mA <= MAX_OVERRIDE_VBUS_ALLOWED)) {
-			mA = override_mA;
-		}
-	}
-
-	if (udc->transceiver)
-		return usb_phy_set_power(udc->transceiver, mA);
-	return -ENOTSUPP;
-}
-
-static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_active)
-{
-	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
-	unsigned long flags;
-
-	spin_lock_irqsave(udc->lock, flags);
-	udc->softconnect = is_active;
-	if (((udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) &&
-			!udc->vbus_active) || !udc->driver) {
-		spin_unlock_irqrestore(udc->lock, flags);
-		return 0;
-	}
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	pm_runtime_get_sync(&_gadget->dev);
-
-	/* Enable BAM (if needed) before starting controller */
-	if (is_active) {
-		dbg_event(0xFF, "BAM EN1", _gadget->bam2bam_func_enabled);
-		msm_usb_bam_enable(CI_CTRL, _gadget->bam2bam_func_enabled);
-	}
-
-	spin_lock_irqsave(udc->lock, flags);
-	if (!udc->vbus_active) {
-		spin_unlock_irqrestore(udc->lock, flags);
-		pm_runtime_put_sync(&_gadget->dev);
-		return 0;
-	}
-	if (is_active) {
-		spin_unlock(udc->lock);
-		if (udc->udc_driver->notify_event)
-			udc->udc_driver->notify_event(udc,
-				CI13XXX_CONTROLLER_CONNECT_EVENT);
-		spin_lock(udc->lock);
-		hw_device_state(udc->ep0out.qh.dma);
-	} else {
-		hw_device_state(0);
-	}
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	pm_runtime_mark_last_busy(&_gadget->dev);
-	pm_runtime_put_autosuspend(&_gadget->dev);
-
-	return 0;
-}
-
-static int ci13xxx_start(struct usb_gadget *gadget,
-			 struct usb_gadget_driver *driver);
-static int ci13xxx_stop(struct usb_gadget *gadget);
-
-/**
- * Device operations part of the API to the USB controller hardware,
- * which don't involve endpoints (or i/o)
- * Check  "usb_gadget.h" for details
- */
-static const struct usb_gadget_ops usb_gadget_ops = {
-	.vbus_session	= ci13xxx_vbus_session,
-	.wakeup		= ci13xxx_wakeup,
-	.vbus_draw	= ci13xxx_vbus_draw,
-	.pullup		= ci13xxx_pullup,
-	.udc_start	= ci13xxx_start,
-	.udc_stop	= ci13xxx_stop,
-};
-
-/**
- * ci13xxx_start: register a gadget driver
- * @gadget: our gadget
- * @driver: the driver being registered
- *
- * Interrupts are enabled here.
- */
-static int ci13xxx_start(struct usb_gadget *gadget,
-			 struct usb_gadget_driver *driver)
-{
-	struct ci13xxx *udc = _udc;
-	unsigned long flags;
-	int retval = -ENOMEM;
-
-	trace("%pK", driver);
-
-	if (driver             == NULL ||
-	    driver->setup      == NULL ||
-	    driver->disconnect == NULL)
-		return -EINVAL;
-	else if (udc         == NULL)
-		return -ENODEV;
-	else if (udc->driver != NULL)
-		return -EBUSY;
-
-	spin_lock_irqsave(udc->lock, flags);
-
-	info("hw_ep_max = %d", hw_ep_max);
-
-	udc->gadget.dev.driver = NULL;
-
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	pm_runtime_get_sync(&udc->gadget.dev);
-
-	udc->ep0out.ep.desc = &ctrl_endpt_out_desc;
-	retval = usb_ep_enable(&udc->ep0out.ep);
-	if (retval)
-		goto pm_put;
-
-	udc->ep0in.ep.desc = &ctrl_endpt_in_desc;
-	retval = usb_ep_enable(&udc->ep0in.ep);
-	if (retval)
-		goto pm_put;
-	udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_KERNEL);
-	if (!udc->status) {
-		retval = -ENOMEM;
-		goto pm_put;
-	}
-
-	udc->status_buf = kzalloc(2 + udc->gadget.extra_buf_alloc,
-				GFP_KERNEL); /* for GET_STATUS */
-	if (!udc->status_buf) {
-		usb_ep_free_request(&udc->ep0in.ep, udc->status);
-		retval = -ENOMEM;
-		goto pm_put;
-	}
-	spin_lock_irqsave(udc->lock, flags);
-
-	udc->gadget.ep0 = &udc->ep0in.ep;
-	/* bind gadget */
-	driver->driver.bus     = NULL;
-	udc->gadget.dev.driver = &driver->driver;
-
-	udc->driver = driver;
-	if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
-		if (udc->vbus_active) {
-			if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
-				hw_device_reset(udc);
-		} else {
-			goto done;
-		}
-	}
-
-	if (!udc->softconnect)
-		goto done;
-
-	retval = hw_device_state(udc->ep0out.qh.dma);
-
-done:
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	if (udc->udc_driver->notify_event)
-		udc->udc_driver->notify_event(udc,
-				CI13XXX_CONTROLLER_UDC_STARTED_EVENT);
-pm_put:
-	pm_runtime_put(&udc->gadget.dev);
-
-	return retval;
-}
-
-/**
- * ci13xxx_stop: unregister a gadget driver
- *
- * Check usb_gadget_unregister_driver() at "usb_gadget.h" for details
- */
-static int ci13xxx_stop(struct usb_gadget *gadget)
-{
-	struct ci13xxx *udc = _udc;
-	unsigned long flags;
-
-	spin_lock_irqsave(udc->lock, flags);
-
-	if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
-			udc->vbus_active) {
-		hw_device_state(0);
-		spin_unlock_irqrestore(udc->lock, flags);
-		_gadget_stop_activity(&udc->gadget);
-		spin_lock_irqsave(udc->lock, flags);
-	}
-
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	usb_ep_free_request(&udc->ep0in.ep, udc->status);
-	kfree(udc->status_buf);
-
-	return 0;
-}
-
-/******************************************************************************
- * BUS block
- *****************************************************************************/
-/**
- * udc_irq: global interrupt handler
- *
- * This function returns IRQ_HANDLED if the IRQ has been handled
- * It locks access to registers
- */
-static irqreturn_t udc_irq(void)
-{
-	struct ci13xxx *udc = _udc;
-	irqreturn_t retval;
-	u32 intr;
-
-	trace();
-
-	if (udc == NULL) {
-		err("ENODEV");
-		return IRQ_HANDLED;
-	}
-
-	spin_lock(udc->lock);
-
-	if (udc->udc_driver->in_lpm && udc->udc_driver->in_lpm(udc)) {
-		spin_unlock(udc->lock);
-		return IRQ_NONE;
-	}
-
-	if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
-		if (hw_cread(CAP_USBMODE, USBMODE_CM) !=
-				USBMODE_CM_DEVICE) {
-			spin_unlock(udc->lock);
-			return IRQ_NONE;
-		}
-	}
-	intr = hw_test_and_clear_intr_active();
-	if (intr) {
-		isr_statistics.hndl.buf[isr_statistics.hndl.idx++] = intr;
-		isr_statistics.hndl.idx &= ISR_MASK;
-		isr_statistics.hndl.cnt++;
-
-		/* order defines priority - do NOT change it */
-		if (USBi_URI & intr) {
-			isr_statistics.uri++;
-			if (!hw_cread(CAP_PORTSC, PORTSC_PR))
-				pr_info("%s: USB reset interrupt is delayed\n",
-								__func__);
-			isr_reset_handler(udc);
-		}
-		if (USBi_PCI & intr) {
-			isr_statistics.pci++;
-			isr_resume_handler(udc);
-		}
-		if (USBi_UEI & intr)
-			isr_statistics.uei++;
-		if (USBi_UI  & intr) {
-			isr_statistics.ui++;
-			isr_tr_complete_handler(udc);
-		}
-		if (USBi_SLI & intr) {
-			isr_suspend_handler(udc);
-			isr_statistics.sli++;
-		}
-		retval = IRQ_HANDLED;
-	} else {
-		isr_statistics.none++;
-		retval = IRQ_NONE;
-	}
-	spin_unlock(udc->lock);
-
-	return retval;
-}
-
-static void destroy_eps(struct ci13xxx *ci)
-{
-	int i;
-
-	for (i = 0; i < hw_ep_max; i++) {
-		struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[i];
-
-		dma_pool_free(ci->qh_pool, mEp->qh.ptr, mEp->qh.dma);
-	}
-}
-
-/**
- * udc_probe: parent probe must call this to initialize UDC
- * @dev:  parent device
- * @regs: registers base address
- * @name: driver name
- *
- * This function returns an error code
- * No interrupts active, the IRQ has not been requested yet
- * Kernel assumes 32-bit DMA operations by default, no need to dma_set_mask
- */
-static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
-		void __iomem *regs)
-{
-	struct ci13xxx *udc;
-	struct ci13xxx_platform_data *pdata;
-	int retval = 0, i, j;
-
-	trace("%pK, %pK, %pK", dev, regs, driver->name);
-
-	if (dev == NULL || regs == NULL || driver == NULL ||
-			driver->name == NULL)
-		return -EINVAL;
-
-	udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL);
-	if (udc == NULL)
-		return -ENOMEM;
-
-	udc->lock = &udc_lock;
-	udc->regs = regs;
-	udc->udc_driver = driver;
-
-	udc->gadget.ops          = &usb_gadget_ops;
-	udc->gadget.speed        = USB_SPEED_UNKNOWN;
-	udc->gadget.max_speed    = USB_SPEED_HIGH;
-	udc->gadget.is_otg       = 0;
-	udc->gadget.name         = driver->name;
-
-	/* alloc resources */
-	udc->qh_pool = dma_pool_create("ci13xxx_qh", dev,
-				       sizeof(struct ci13xxx_qh),
-				       64, CI13XXX_PAGE_SIZE);
-	if (udc->qh_pool == NULL) {
-		retval = -ENOMEM;
-		goto free_udc;
-	}
-
-	udc->td_pool = dma_pool_create("ci13xxx_td", dev,
-				       sizeof(struct ci13xxx_td),
-				       64, CI13XXX_PAGE_SIZE);
-	if (udc->td_pool == NULL) {
-		retval = -ENOMEM;
-		goto free_qh_pool;
-	}
-
-	INIT_DELAYED_WORK(&udc->rw_work, usb_do_remote_wakeup);
-
-	retval = hw_device_init(regs);
-	if (retval < 0)
-		goto free_qh_pool;
-
-	INIT_LIST_HEAD(&udc->gadget.ep_list);
-	for (i = 0; i < hw_ep_max; i++) {
-		struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
-
-		INIT_LIST_HEAD(&mEp->ep.ep_list);
-		INIT_LIST_HEAD(&mEp->rw_queue);
-		setup_timer(&mEp->prime_timer, ep_prime_timer_func,
-			(unsigned long) mEp);
-	}
-
-	for (i = 0; i < hw_ep_max/2; i++) {
-		for (j = RX; j <= TX; j++) {
-			int k = i + j * hw_ep_max/2;
-			struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
-
-			scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
-					(j == TX)  ? "in" : "out");
-
-			mEp->lock         = udc->lock;
-			mEp->device       = &udc->gadget.dev;
-			mEp->td_pool      = udc->td_pool;
-
-			mEp->ep.name      = mEp->name;
-			mEp->ep.ops       = &usb_ep_ops;
-			usb_ep_set_maxpacket_limit(&mEp->ep,
-				k ? USHRT_MAX : CTRL_PAYLOAD_MAX);
-
-			INIT_LIST_HEAD(&mEp->qh.queue);
-			mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
-					&mEp->qh.dma);
-			if (mEp->qh.ptr == NULL)
-				retval = -ENOMEM;
-			else
-				memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
-
-			/* skip ep0 out and in endpoints  */
-			if (i == 0)
-				continue;
-
-			list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
-		}
-	}
-
-	if (retval)
-		goto free_dma_pools;
-
-	udc->gadget.ep0 = &udc->ep0in.ep;
-
-	pdata = dev->platform_data;
-	if (pdata) {
-		if (pdata->enable_axi_prefetch)
-			udc->gadget.extra_buf_alloc = EXTRA_ALLOCATION_SIZE;
-	}
-
-	if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
-		udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
-		if (udc->transceiver == NULL) {
-			retval = -ENODEV;
-			goto destroy_eps;
-		}
-	}
-
-	if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
-		retval = hw_device_reset(udc);
-		if (retval)
-			goto put_transceiver;
-	}
-
-	if (udc->transceiver) {
-		retval = otg_set_peripheral(udc->transceiver->otg,
-						&udc->gadget);
-		if (retval)
-			goto put_transceiver;
-	}
-
-	retval = usb_add_gadget_udc(dev, &udc->gadget);
-	if (retval)
-		goto remove_trans;
-
-#ifdef CONFIG_USB_GADGET_DEBUG_FILES
-	retval = dbg_create_files(&udc->gadget.dev);
-	if (retval) {
-		pr_err("Registering sysfs files for debug failed!!!!\n");
-		goto del_udc;
-	}
-#endif
-
-	pm_runtime_no_callbacks(&udc->gadget.dev);
-	pm_runtime_set_active(&udc->gadget.dev);
-	pm_runtime_enable(&udc->gadget.dev);
-
-	/* Use delayed LPM especially for composition-switch in LPM (suspend) */
-	pm_runtime_set_autosuspend_delay(&udc->gadget.dev, 2000);
-	pm_runtime_use_autosuspend(&udc->gadget.dev);
-
-	_udc = udc;
-	return retval;
-
-del_udc:
-	usb_del_gadget_udc(&udc->gadget);
-remove_trans:
-	if (udc->transceiver)
-		otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
-
-	err("error = %i", retval);
-put_transceiver:
-	if (udc->transceiver)
-		usb_put_phy(udc->transceiver);
-destroy_eps:
-	destroy_eps(udc);
-free_dma_pools:
-	dma_pool_destroy(udc->td_pool);
-free_qh_pool:
-	dma_pool_destroy(udc->qh_pool);
-free_udc:
-	kfree(udc);
-	_udc = NULL;
-	return retval;
-}
-
-/**
- * udc_remove: parent remove must call this to remove UDC
- *
- * No interrupts active, the IRQ has been released
- */
-static void udc_remove(void)
-{
-	struct ci13xxx *udc = _udc;
-
-	if (udc == NULL) {
-		err("EINVAL");
-		return;
-	}
-
-	usb_del_gadget_udc(&udc->gadget);
-
-	if (udc->transceiver) {
-		otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
-		usb_put_phy(udc->transceiver);
-	}
-#ifdef CONFIG_USB_GADGET_DEBUG_FILES
-	dbg_remove_files(&udc->gadget.dev);
-#endif
-	destroy_eps(udc);
-	dma_pool_destroy(udc->td_pool);
-	dma_pool_destroy(udc->qh_pool);
-
-	kfree(udc);
-	_udc = NULL;
-}
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
deleted file mode 100644
index 8c93080..0000000
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * ci13xxx_udc.h - structures, registers, and macros MIPS USB IP core
- *
- * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
- *
- * Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Description: MIPS USB IP core family device controller
- *              Structures, registers and logging macros
- */
-
-#ifndef _CI13XXX_h_
-#define _CI13XXX_h_
-
-/******************************************************************************
- * DEFINE
- *****************************************************************************/
-#define CI13XXX_PAGE_SIZE  4096ul /* page size for TD's */
-#define ENDPT_MAX          (32)
-#define CTRL_PAYLOAD_MAX   (64)
-#define RX        (0)  /* similar to USB_DIR_OUT but can be used as an index */
-#define TX        (1)  /* similar to USB_DIR_IN  but can be used as an index */
-
-/* UDC private data:
- *  16MSb - Vendor ID | 16 LSb Vendor private data
- */
-#define CI13XX_REQ_VENDOR_ID(id)  (id & 0xFFFF0000UL)
-
-#define MSM_ETD_TYPE			BIT(1)
-#define MSM_EP_PIPE_ID_RESET_VAL	0x1F001F
-
-/******************************************************************************
- * STRUCTURES
- *****************************************************************************/
-/* DMA layout of transfer descriptors */
-struct ci13xxx_td {
-	/* 0 */
-	u32 next;
-#define TD_TERMINATE          BIT(0)
-#define TD_ADDR_MASK          (0xFFFFFFEUL << 5)
-	/* 1 */
-	u32 token;
-#define TD_STATUS             (0x00FFUL <<  0)
-#define TD_STATUS_TR_ERR      BIT(3)
-#define TD_STATUS_DT_ERR      BIT(5)
-#define TD_STATUS_HALTED      BIT(6)
-#define TD_STATUS_ACTIVE      BIT(7)
-#define TD_MULTO              (0x0003UL << 10)
-#define TD_IOC                BIT(15)
-#define TD_TOTAL_BYTES        (0x7FFFUL << 16)
-	/* 2 */
-	u32 page[5];
-#define TD_CURR_OFFSET        (0x0FFFUL <<  0)
-#define TD_FRAME_NUM          (0x07FFUL <<  0)
-#define TD_RESERVED_MASK      (0x0FFFUL <<  0)
-} __packed __aligned(4);
-
-/* DMA layout of queue heads */
-struct ci13xxx_qh {
-	/* 0 */
-	u32 cap;
-#define QH_IOS                BIT(15)
-#define QH_MAX_PKT            (0x07FFUL << 16)
-#define QH_ZLT                BIT(29)
-#define QH_MULT               (0x0003UL << 30)
-#define QH_MULT_SHIFT         11
-	/* 1 */
-	u32 curr;
-	/* 2 - 8 */
-	struct ci13xxx_td        td;
-	/* 9 */
-	u32 RESERVED;
-	struct usb_ctrlrequest   setup;
-} __packed __aligned(4);
-
-/* cache of larger request's original attributes */
-struct ci13xxx_multi_req {
-	unsigned int	     len;
-	unsigned int	     actual;
-	void                *buf;
-};
-
-/* Extension of usb_request */
-struct ci13xxx_req {
-	struct usb_request   req;
-	unsigned int	     map;
-	struct list_head     queue;
-	struct ci13xxx_td   *ptr;
-	dma_addr_t           dma;
-	struct ci13xxx_td   *zptr;
-	dma_addr_t           zdma;
-	struct ci13xxx_multi_req multi;
-};
-
-/* Extension of usb_ep */
-struct ci13xxx_ep {
-	struct usb_ep                          ep;
-	const struct usb_endpoint_descriptor  *desc;
-	u8                                     dir;
-	u8                                     num;
-	u8                                     type;
-	char                                   name[16];
-	struct {
-		struct list_head   queue;
-		struct ci13xxx_qh *ptr;
-		dma_addr_t         dma;
-	}                                      qh;
-	struct list_head                       rw_queue;
-	int                                    wedge;
-
-	/* global resources */
-	spinlock_t                            *lock;
-	struct device                         *device;
-	struct dma_pool                       *td_pool;
-	struct ci13xxx_td                     *last_zptr;
-	dma_addr_t                            last_zdma;
-	unsigned long                         dTD_update_fail_count;
-	unsigned long                         dTD_active_re_q_count;
-	unsigned long			      prime_fail_count;
-	int				      prime_timer_count;
-	struct timer_list		      prime_timer;
-
-	bool                                  multi_req;
-};
-
-struct ci13xxx;
-struct ci13xxx_udc_driver {
-	const char	*name;
-	unsigned long	 flags;
-	unsigned int nz_itc;
-#define CI13XXX_REGS_SHARED		BIT(0)
-#define CI13XXX_REQUIRE_TRANSCEIVER	BIT(1)
-#define CI13XXX_PULLUP_ON_VBUS		BIT(2)
-#define CI13XXX_DISABLE_STREAMING	BIT(3)
-#define CI13XXX_ZERO_ITC		BIT(4)
-#define CI13XXX_ENABLE_AHB2AHB_BYPASS	BIT(6)
-
-#define CI13XXX_CONTROLLER_RESET_EVENT			0
-#define CI13XXX_CONTROLLER_CONNECT_EVENT		1
-#define CI13XXX_CONTROLLER_SUSPEND_EVENT		2
-#define CI13XXX_CONTROLLER_REMOTE_WAKEUP_EVENT		3
-#define CI13XXX_CONTROLLER_RESUME_EVENT		4
-#define CI13XXX_CONTROLLER_DISCONNECT_EVENT		5
-#define CI13XXX_CONTROLLER_UDC_STARTED_EVENT		6
-#define CI13XXX_CONTROLLER_ERROR_EVENT			7
-
-	void	(*notify_event)(struct ci13xxx *udc, unsigned int event);
-	bool    (*in_lpm)(struct ci13xxx *udc);
-};
-
-/* CI13XXX UDC descriptor & global resources */
-struct ci13xxx {
-	spinlock_t		  *lock;      /* ctrl register bank access */
-	void __iomem              *regs;      /* registers address space */
-
-	struct dma_pool           *qh_pool;   /* DMA pool for queue heads */
-	struct dma_pool           *td_pool;   /* DMA pool for transfer descs */
-	struct usb_request        *status;    /* ep0 status request */
-	void                      *status_buf;/* GET_STATUS buffer */
-
-	struct usb_gadget          gadget;     /* USB slave device */
-	struct ci13xxx_ep          ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
-	u32                        ep0_dir;    /* ep0 direction */
-#define ep0out ci13xxx_ep[0]
-#define ep0in  ci13xxx_ep[hw_ep_max / 2]
-	u8                         suspended;  /* suspended by the host */
-	u8                         configured;  /* is device configured */
-	u8                         test_mode;  /* the selected test mode */
-	bool                       rw_pending; /* Remote wakeup pending flag */
-	struct delayed_work        rw_work;    /* remote wakeup delayed work */
-	struct usb_gadget_driver  *driver;     /* 3rd party gadget driver */
-	struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
-	int                        vbus_active; /* is VBUS active */
-	int                        softconnect; /* is pull-up enable allowed */
-	unsigned long dTD_update_fail_count;
-	struct usb_phy            *transceiver; /* Transceiver struct */
-	bool                      skip_flush;   /*
-						 * skip flushing remaining EP
-						 * upon flush timeout for the
-						 * first EP.
-						 */
-};
-
-/******************************************************************************
- * REGISTERS
- *****************************************************************************/
-/* register size */
-#define REG_BITS   (32)
-
-/* HCCPARAMS */
-#define HCCPARAMS_LEN         BIT(17)
-
-/* DCCPARAMS */
-#define DCCPARAMS_DEN         (0x1F << 0)
-#define DCCPARAMS_DC          BIT(7)
-
-/* TESTMODE */
-#define TESTMODE_FORCE        BIT(0)
-
-/* AHB_MODE */
-#define AHB2AHB_BYPASS	      BIT(31)
-
-/* USBCMD */
-#define USBCMD_RS             BIT(0)
-#define USBCMD_RST            BIT(1)
-#define USBCMD_SUTW           BIT(13)
-#define USBCMD_ATDTW          BIT(14)
-
-/* USBSTS & USBINTR */
-#define USBi_UI               BIT(0)
-#define USBi_UEI              BIT(1)
-#define USBi_PCI              BIT(2)
-#define USBi_URI              BIT(6)
-#define USBi_SLI              BIT(8)
-
-/* DEVICEADDR */
-#define DEVICEADDR_USBADRA    BIT(24)
-#define DEVICEADDR_USBADR     (0x7FUL << 25)
-
-/* PORTSC */
-#define PORTSC_FPR            BIT(6)
-#define PORTSC_SUSP           BIT(7)
-#define PORTSC_PR             BIT(8)
-#define PORTSC_HSP            BIT(9)
-#define PORTSC_PTC            (0x0FUL << 16)
-
-/* DEVLC */
-#define DEVLC_PSPD            (0x03UL << 25)
-#define    DEVLC_PSPD_HS      (0x02UL << 25)
-
-/* USBMODE */
-#define USBMODE_CM            (0x03UL <<  0)
-#define    USBMODE_CM_IDLE    (0x00UL <<  0)
-#define    USBMODE_CM_DEVICE  (0x02UL <<  0)
-#define    USBMODE_CM_HOST    (0x03UL <<  0)
-#define USBMODE_SLOM          BIT(3)
-#define USBMODE_SDIS          BIT(4)
-#define USBCMD_ITC(n)         (n << 16) /* n = 0, 1, 2, 4, 8, 16, 32, 64 */
-#define USBCMD_ITC_MASK       (0xFF << 16)
-
-/* ENDPTCTRL */
-#define ENDPTCTRL_RXS         BIT(0)
-#define ENDPTCTRL_RXT         (0x03UL <<  2)
-#define ENDPTCTRL_RXR         BIT(6)         /* reserved for port 0 */
-#define ENDPTCTRL_RXE         BIT(7)
-#define ENDPTCTRL_TXS         BIT(16)
-#define ENDPTCTRL_TXT         (0x03UL << 18)
-#define ENDPTCTRL_TXR         BIT(22)        /* reserved for port 0 */
-#define ENDPTCTRL_TXE         BIT(23)
-
-/******************************************************************************
- * LOGGING
- *****************************************************************************/
-#define ci13xxx_printk(level, format, args...) \
-do { \
-	if (_udc == NULL) \
-		printk(level "[%s] " format "\n", __func__, ## args); \
-	else \
-		dev_printk(level, _udc->gadget.dev.parent, \
-			   "[%s] " format "\n", __func__, ## args); \
-} while (0)
-
-#ifndef err
-#define err(format, args...)    ci13xxx_printk(KERN_ERR, format, ## args)
-#endif
-
-#define warn(format, args...)   ci13xxx_printk(KERN_WARNING, format, ## args)
-#define info(format, args...)   ci13xxx_printk(KERN_INFO, format, ## args)
-
-#ifdef TRACE
-#define trace(format, args...)      ci13xxx_printk(KERN_DEBUG, format, ## args)
-#define dbg_trace(format, args...)  dev_dbg(dev, format, ##args)
-#else
-#define trace(format, args...)      do {} while (0)
-#define dbg_trace(format, args...)  do {} while (0)
-#endif
-
-#endif	/* _CI13XXX_h_ */
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 16b6619..f779fdc30 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -317,6 +317,7 @@
 		ret = unregister_gadget(gi);
 		if (ret)
 			goto err;
+		kfree(name);
 	} else {
 		if (gi->composite.gadget_driver.udc_name) {
 			ret = -EBUSY;
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
index 9f7a29a..05a66b2 100644
--- a/drivers/usb/gadget/function/f_audio_source.c
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -158,6 +158,13 @@
 	.bInterval =		4, /* poll 1 per millisecond */
 };
 
+static struct usb_ss_ep_comp_descriptor ss_as_in_comp_desc = {
+	 .bLength =		 sizeof(ss_as_in_comp_desc),
+	 .bDescriptorType =	 USB_DT_SS_ENDPOINT_COMP,
+
+	 .wBytesPerInterval =	cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+};
+
 /* Standard ISO IN Endpoint Descriptor for highspeed */
 static struct usb_endpoint_descriptor fs_as_in_ep_desc  = {
 	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
@@ -198,6 +205,26 @@
 	NULL,
 };
 
+static struct usb_descriptor_header *ss_audio_desc[] = {
+	(struct usb_descriptor_header *)&ac_interface_desc,
+	(struct usb_descriptor_header *)&ac_header_desc,
+
+	(struct usb_descriptor_header *)&input_terminal_desc,
+	(struct usb_descriptor_header *)&output_terminal_desc,
+	(struct usb_descriptor_header *)&feature_unit_desc,
+
+	(struct usb_descriptor_header *)&as_interface_alt_0_desc,
+	(struct usb_descriptor_header *)&as_interface_alt_1_desc,
+	(struct usb_descriptor_header *)&as_header_desc,
+
+	(struct usb_descriptor_header *)&as_type_i_desc,
+
+	(struct usb_descriptor_header *)&hs_as_in_ep_desc,
+	(struct usb_descriptor_header *)&ss_as_in_comp_desc,
+	(struct usb_descriptor_header *)&as_iso_in_desc,
+	NULL,
+};
+
 static struct usb_descriptor_header *fs_audio_desc[] = {
 	(struct usb_descriptor_header *)&ac_interface_desc,
 	(struct usb_descriptor_header *)&ac_header_desc,
@@ -673,6 +700,7 @@
 
 	f->fs_descriptors = fs_audio_desc;
 	f->hs_descriptors = hs_audio_desc;
+	f->ss_descriptors = ss_audio_desc;
 
 	for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) {
 		req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 8b481da..866c3ec 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2655,9 +2655,18 @@
 		int i;
 
 		if (len < sizeof(*d) ||
-		    d->bFirstInterfaceNumber >= ffs->interfaces_count ||
-		    !d->Reserved1)
+		    d->bFirstInterfaceNumber >= ffs->interfaces_count)
 			return -EINVAL;
+		if (d->Reserved1 != 1) {
+			/*
+			 * According to the spec, Reserved1 must be set to 1
+			 * but older kernels incorrectly rejected non-zero
+			 * values.  We fix it here to avoid returning EINVAL
+			 * in response to values we used to accept.
+			 */
+			pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
+			d->Reserved1 = 1;
+		}
 		for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
 			if (d->Reserved2[i])
 				return -EINVAL;
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 7e4e7ce..4bdfadf 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -300,15 +300,17 @@
 		gsi_channel_info.gevntcount_hi_addr;
 	in_params->dir = GSI_CHAN_DIR_FROM_GSI;
 	in_params->xfer_ring_len = gsi_channel_info.xfer_ring_len;
-	in_params->xfer_ring_base_addr = gsi_channel_info.xfer_ring_base_addr;
 	in_params->xfer_scratch.last_trb_addr_iova =
 					gsi_channel_info.last_trb_addr;
-	in_params->xfer_ring_base_addr = in_params->xfer_ring_base_addr_iova =
+	in_params->xfer_ring_base_addr_iova =
 					gsi_channel_info.xfer_ring_base_addr;
 	in_params->data_buff_base_len = d_port->in_request.buf_len *
 					d_port->in_request.num_bufs;
-	in_params->data_buff_base_addr = in_params->data_buff_base_addr_iova =
-					d_port->in_request.dma;
+	in_params->data_buff_base_addr_iova = d_port->in_request.dma;
+	in_params->sgt_xfer_rings = &d_port->in_request.sgt_trb_xfer_ring;
+	in_params->sgt_data_buff = &d_port->in_request.sgt_data_buff;
+	log_event_dbg("%s(): IN: sgt_xfer_rings:%pK sgt_data_buff:%pK\n",
+		__func__, in_params->sgt_xfer_rings, in_params->sgt_data_buff);
 	in_params->xfer_scratch.const_buffer_size =
 		gsi_channel_info.const_buffer_size;
 	in_params->xfer_scratch.depcmd_low_addr =
@@ -340,14 +342,19 @@
 		out_params->dir = GSI_CHAN_DIR_TO_GSI;
 		out_params->xfer_ring_len =
 			gsi_channel_info.xfer_ring_len;
-		out_params->xfer_ring_base_addr =
-			out_params->xfer_ring_base_addr_iova =
+		out_params->xfer_ring_base_addr_iova =
 			gsi_channel_info.xfer_ring_base_addr;
 		out_params->data_buff_base_len = d_port->out_request.buf_len *
 			d_port->out_request.num_bufs;
-		out_params->data_buff_base_addr =
-			out_params->data_buff_base_addr_iova =
+		out_params->data_buff_base_addr_iova =
 			d_port->out_request.dma;
+		out_params->sgt_xfer_rings =
+			&d_port->out_request.sgt_trb_xfer_ring;
+		out_params->sgt_data_buff = &d_port->out_request.sgt_data_buff;
+		log_event_dbg("%s(): OUT: sgt_xfer_rings:%pK sgt_data_buff:%pK\n",
+			__func__, out_params->sgt_xfer_rings,
+			out_params->sgt_data_buff);
+
 		out_params->xfer_scratch.last_trb_addr_iova =
 			gsi_channel_info.last_trb_addr;
 		out_params->xfer_scratch.const_buffer_size =
@@ -407,17 +414,17 @@
 			ipa_out_channel_out_params.db_reg_phs_addr_lsb);
 
 	d_port->in_channel_handle = ipa_in_channel_out_params.clnt_hdl;
-	d_port->in_db_reg_phs_addr_lsb =
+	d_port->in_request.db_reg_phs_addr_lsb =
 		ipa_in_channel_out_params.db_reg_phs_addr_lsb;
-	d_port->in_db_reg_phs_addr_msb =
+	d_port->in_request.db_reg_phs_addr_msb =
 		ipa_in_channel_out_params.db_reg_phs_addr_msb;
 
 	if (gsi->prot_id != IPA_USB_DIAG) {
 		d_port->out_channel_handle =
 			ipa_out_channel_out_params.clnt_hdl;
-		d_port->out_db_reg_phs_addr_lsb =
+		d_port->out_request.db_reg_phs_addr_lsb =
 			ipa_out_channel_out_params.db_reg_phs_addr_lsb;
-		d_port->out_db_reg_phs_addr_msb =
+		d_port->out_request.db_reg_phs_addr_msb =
 			ipa_out_channel_out_params.db_reg_phs_addr_msb;
 	}
 	return ret;
@@ -426,22 +433,19 @@
 static void ipa_data_path_enable(struct gsi_data_port *d_port)
 {
 	struct f_gsi *gsi = d_port_to_gsi(d_port);
-	struct usb_gsi_request req;
-	u64 dbl_register_addr;
 	bool block_db = false;
 
-
-	log_event_dbg("in_db_reg_phs_addr_lsb = %x",
-			gsi->d_port.in_db_reg_phs_addr_lsb);
+	log_event_dbg("IN: db_reg_phs_addr_lsb = %x",
+			gsi->d_port.in_request.db_reg_phs_addr_lsb);
 	usb_gsi_ep_op(gsi->d_port.in_ep,
-			(void *)&gsi->d_port.in_db_reg_phs_addr_lsb,
+			&gsi->d_port.in_request,
 			GSI_EP_OP_STORE_DBL_INFO);
 
 	if (gsi->d_port.out_ep) {
-		log_event_dbg("out_db_reg_phs_addr_lsb = %x",
-				gsi->d_port.out_db_reg_phs_addr_lsb);
+		log_event_dbg("OUT: db_reg_phs_addr_lsb = %x",
+				gsi->d_port.out_request.db_reg_phs_addr_lsb);
 		usb_gsi_ep_op(gsi->d_port.out_ep,
-				(void *)&gsi->d_port.out_db_reg_phs_addr_lsb,
+				&gsi->d_port.out_request,
 				GSI_EP_OP_STORE_DBL_INFO);
 
 		usb_gsi_ep_op(gsi->d_port.out_ep, &gsi->d_port.out_request,
@@ -452,29 +456,12 @@
 	usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
 				GSI_EP_OP_SET_CLR_BLOCK_DBL);
 
-	/* GSI channel DBL address for USB IN endpoint */
-	dbl_register_addr = gsi->d_port.in_db_reg_phs_addr_msb;
-	dbl_register_addr = dbl_register_addr << 32;
-	dbl_register_addr =
-		dbl_register_addr | gsi->d_port.in_db_reg_phs_addr_lsb;
+	usb_gsi_ep_op(gsi->d_port.in_ep, &gsi->d_port.in_request,
+						GSI_EP_OP_RING_DB);
 
-	/* use temp gsi request to pass 64 bit dbl reg addr and num_bufs */
-	req.buf_base_addr = &dbl_register_addr;
-
-	req.num_bufs = gsi->d_port.in_request.num_bufs;
-	usb_gsi_ep_op(gsi->d_port.in_ep, &req, GSI_EP_OP_RING_DB);
-
-	if (gsi->d_port.out_ep) {
-		/* GSI channel DBL address for USB OUT endpoint */
-		dbl_register_addr = gsi->d_port.out_db_reg_phs_addr_msb;
-		dbl_register_addr = dbl_register_addr << 32;
-		dbl_register_addr = dbl_register_addr |
-					gsi->d_port.out_db_reg_phs_addr_lsb;
-		/* use temp request to pass 64 bit dbl reg addr and num_bufs */
-		req.buf_base_addr = &dbl_register_addr;
-		req.num_bufs = gsi->d_port.out_request.num_bufs;
-		usb_gsi_ep_op(gsi->d_port.out_ep, &req, GSI_EP_OP_RING_DB);
-	}
+	if (gsi->d_port.out_ep)
+		usb_gsi_ep_op(gsi->d_port.out_ep, &gsi->d_port.out_request,
+						GSI_EP_OP_RING_DB);
 }
 
 static void ipa_disconnect_handler(struct gsi_data_port *d_port)
@@ -491,11 +478,13 @@
 		 */
 		usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
 				GSI_EP_OP_SET_CLR_BLOCK_DBL);
-		usb_gsi_ep_op(gsi->d_port.in_ep, NULL, GSI_EP_OP_DISABLE);
+		usb_gsi_ep_op(gsi->d_port.in_ep,
+				&gsi->d_port.in_request, GSI_EP_OP_DISABLE);
 	}
 
 	if (gsi->d_port.out_ep)
-		usb_gsi_ep_op(gsi->d_port.out_ep, NULL, GSI_EP_OP_DISABLE);
+		usb_gsi_ep_op(gsi->d_port.out_ep,
+				&gsi->d_port.out_request, GSI_EP_OP_DISABLE);
 
 	gsi->d_port.net_ready_trigger = false;
 }
@@ -519,10 +508,12 @@
 	gsi->d_port.in_channel_handle = -EINVAL;
 	gsi->d_port.out_channel_handle = -EINVAL;
 
-	usb_gsi_ep_op(gsi->d_port.in_ep, NULL, GSI_EP_OP_FREE_TRBS);
+	usb_gsi_ep_op(gsi->d_port.in_ep, &gsi->d_port.in_request,
+							GSI_EP_OP_FREE_TRBS);
 
 	if (gsi->d_port.out_ep)
-		usb_gsi_ep_op(gsi->d_port.out_ep, NULL, GSI_EP_OP_FREE_TRBS);
+		usb_gsi_ep_op(gsi->d_port.out_ep, &gsi->d_port.out_request,
+							GSI_EP_OP_FREE_TRBS);
 
 	/* free buffers allocated with each TRB */
 	gsi_free_trb_buffer(gsi);
@@ -1219,14 +1210,16 @@
 		break;
 	case QTI_CTRL_GET_LINE_STATE:
 		val = atomic_read(&gsi->connected);
+		if (gsi->prot_id == IPA_USB_RMNET)
+			val = gsi->rmnet_dtr_status;
+
 		ret = copy_to_user((void __user *)arg, &val, sizeof(val));
 		if (ret) {
 			log_event_err("copy_to_user fail LINE_STATE");
 			ret = -EFAULT;
 		}
 		log_event_dbg("%s: Sent line_state: %d for prot id:%d",
-				__func__,
-				atomic_read(&gsi->connected), gsi->prot_id);
+				__func__, val, gsi->prot_id);
 		break;
 	case QTI_CTRL_EP_LOOKUP:
 	case GSI_MBIM_EP_LOOKUP:
@@ -1750,6 +1743,7 @@
 	struct gsi_ctrl_pkt *cpkt;
 	u8 *buf;
 	u32 n;
+	bool line_state;
 
 	if (!atomic_read(&gsi->connected)) {
 		log_event_dbg("usb cable is not connected");
@@ -1830,8 +1824,11 @@
 		break;
 	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
 			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		line_state = (w_value & GSI_CTRL_DTR ? true : false);
+		if (gsi->prot_id == IPA_USB_RMNET)
+			gsi->rmnet_dtr_status = line_state;
 		log_event_dbg("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE DTR:%d\n",
-				__func__, w_value & GSI_CTRL_DTR ? 1 : 0);
+						__func__, line_state);
 		gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
 		value = 0;
 		break;
@@ -1961,6 +1958,11 @@
 			ret = -ENOMEM;
 			goto fail1;
 		}
+
+		dma_get_sgtable(dev->parent,
+			&gsi->d_port.in_request.sgt_data_buff,
+			gsi->d_port.in_request.buf_base_addr,
+			gsi->d_port.in_request.dma, len_in);
 	}
 
 	if (gsi->d_port.out_ep && !gsi->d_port.out_request.buf_base_addr) {
@@ -1980,6 +1982,11 @@
 			ret = -ENOMEM;
 			goto fail;
 		}
+
+		dma_get_sgtable(dev->parent,
+			&gsi->d_port.out_request.sgt_data_buff,
+			gsi->d_port.out_request.buf_base_addr,
+			gsi->d_port.out_request.dma, len_out);
 	}
 
 	log_event_dbg("finished allocating trb's buffer\n");
@@ -2010,6 +2017,7 @@
 			gsi->d_port.out_request.buf_base_addr,
 			gsi->d_port.out_request.dma);
 		gsi->d_port.out_request.buf_base_addr = NULL;
+		sg_free_table(&gsi->d_port.out_request.sgt_data_buff);
 	}
 
 	if (gsi->d_port.in_ep &&
@@ -2020,6 +2028,7 @@
 			gsi->d_port.in_request.buf_base_addr,
 			gsi->d_port.in_request.dma);
 		gsi->d_port.in_request.buf_base_addr = NULL;
+		sg_free_table(&gsi->d_port.in_request.sgt_data_buff);
 	}
 }
 
@@ -2184,7 +2193,10 @@
 	if (gsi->prot_id == IPA_USB_RNDIS)
 		rndis_uninit(gsi->params);
 
-	 /* Disable Control Path */
+	if (gsi->prot_id == IPA_USB_RMNET)
+		gsi->rmnet_dtr_status = false;
+
+	/* Disable Control Path */
 	if (gsi->c_port.notify &&
 		gsi->c_port.notify->driver_data) {
 		usb_ep_disable(gsi->c_port.notify);
@@ -3072,13 +3084,13 @@
 				gsi->d_port.in_channel_handle);
 		len += scnprintf(buf + len, PAGE_SIZE - len,
 		"%25s %10x\n", "IN Chnl Dbl Addr: ",
-				gsi->d_port.in_db_reg_phs_addr_lsb);
+				gsi->d_port.in_request.db_reg_phs_addr_lsb);
 		len += scnprintf(buf + len, PAGE_SIZE - len,
 		"%25s %10u\n", "IN TRB Ring Len: ",
 				ipa_chnl_params->xfer_ring_len);
 		len += scnprintf(buf + len, PAGE_SIZE - len,
 		"%25s %10x\n", "IN TRB Base Addr: ", (unsigned int)
-			ipa_chnl_params->xfer_ring_base_addr);
+			ipa_chnl_params->xfer_ring_base_addr_iova);
 		len += scnprintf(buf + len, PAGE_SIZE - len,
 		"%25s %10x\n", "GEVENTCNTLO IN Addr: ",
 			ipa_chnl_params->gevntcount_low_addr);
@@ -3106,13 +3118,13 @@
 			gsi->d_port.out_channel_handle);
 		len += scnprintf(buf + len, PAGE_SIZE - len,
 		"%25s %10x\n", "OUT Channel Dbl Addr: ",
-			gsi->d_port.out_db_reg_phs_addr_lsb);
+			gsi->d_port.out_request.db_reg_phs_addr_lsb);
 		len += scnprintf(buf + len, PAGE_SIZE - len,
 		"%25s %10u\n", "OUT TRB Ring Len: ",
 			ipa_chnl_params->xfer_ring_len);
 		len += scnprintf(buf + len, PAGE_SIZE - len,
 		"%25s %10x\n", "OUT TRB Base Addr: ", (unsigned int)
-			ipa_chnl_params->xfer_ring_base_addr);
+			ipa_chnl_params->xfer_ring_base_addr_iova);
 		len += scnprintf(buf + len, PAGE_SIZE - len,
 		"%25s %10x\n", "GEVENTCNTLO OUT Addr: ",
 			ipa_chnl_params->gevntcount_low_addr);
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index fa36d05..58a7706 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -232,10 +232,6 @@
 	struct ipa_usb_teth_params ipa_init_params;
 	int in_channel_handle;
 	int out_channel_handle;
-	u32 in_db_reg_phs_addr_lsb;
-	u32 in_db_reg_phs_addr_msb;
-	u32 out_db_reg_phs_addr_lsb;
-	u32 out_db_reg_phs_addr_msb;
 	u32 in_xfer_rsc_index;
 	u32 out_xfer_rsc_index;
 	u16 in_last_trb_addr;
@@ -280,6 +276,7 @@
 	struct gsi_data_port d_port;
 	struct gsi_ctrl_port c_port;
 	void *ipc_log_ctxt;
+	bool rmnet_dtr_status;
 };
 
 static inline struct f_gsi *func_to_gsi(struct usb_function *f)
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index a0fecb2..6ae2693 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -183,15 +183,28 @@
 }
 /*----------------------------------------------------------------------*/
 
-static void qdss_ctrl_write_complete(struct usb_ep *ep,
+static void qdss_write_complete(struct usb_ep *ep,
 	struct usb_request *req)
 {
 	struct f_qdss *qdss = ep->driver_data;
 	struct qdss_request *d_req = req->context;
+	struct usb_ep *in;
+	struct list_head *list_pool;
+	enum qdss_state state;
 	unsigned long flags;
 
 	pr_debug("qdss_ctrl_write_complete\n");
 
+	if (qdss->debug_inface_enabled) {
+		in = qdss->port.ctrl_in;
+		list_pool = &qdss->ctrl_write_pool;
+		state = USB_QDSS_CTRL_WRITE_DONE;
+	} else {
+		in = qdss->port.data;
+		list_pool = &qdss->data_write_pool;
+		state = USB_QDSS_DATA_WRITE_DONE;
+	}
+
 	if (!req->status) {
 		/* send zlp */
 		if ((req->length >= ep->maxpacket) &&
@@ -199,13 +212,13 @@
 			req->length = 0;
 			d_req->actual = req->actual;
 			d_req->status = req->status;
-			if (!usb_ep_queue(qdss->port.ctrl_in, req, GFP_ATOMIC))
+			if (!usb_ep_queue(in, req, GFP_ATOMIC))
 				return;
 		}
 	}
 
 	spin_lock_irqsave(&qdss->lock, flags);
-	list_add_tail(&req->list, &qdss->ctrl_write_pool);
+	list_add_tail(&req->list, list_pool);
 	if (req->length != 0) {
 		d_req->actual = req->actual;
 		d_req->status = req->status;
@@ -213,8 +226,7 @@
 	spin_unlock_irqrestore(&qdss->lock, flags);
 
 	if (qdss->ch.notify)
-		qdss->ch.notify(qdss->ch.priv, USB_QDSS_CTRL_WRITE_DONE, d_req,
-			NULL);
+		qdss->ch.notify(qdss->ch.priv, state, d_req, NULL);
 }
 
 static void qdss_ctrl_read_complete(struct usb_ep *ep,
@@ -252,6 +264,12 @@
 		return;
 	}
 
+	list_for_each_safe(act, tmp, &qdss->data_write_pool) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		usb_ep_free_request(qdss->port.data, req);
+	}
+
 	list_for_each_safe(act, tmp, &qdss->ctrl_write_pool) {
 		req = list_entry(act, struct usb_request, list);
 		list_del(&req->list);
@@ -271,23 +289,41 @@
 {
 	struct f_qdss *qdss = ch->priv_usb;
 	struct usb_request *req;
+	struct usb_ep *in;
+	struct list_head *list_pool;
 	int i;
 
 	pr_debug("usb_qdss_alloc_req\n");
 
-	if (no_write_buf <= 0 || no_read_buf <= 0 || !qdss) {
+	if (!qdss) {
+		pr_err("usb_qdss_alloc_req: channel %s closed\n", ch->name);
+		return -ENODEV;
+	}
+
+	if ((qdss->debug_inface_enabled &&
+		(no_write_buf <= 0 || no_read_buf <= 0)) ||
+		(!qdss->debug_inface_enabled &&
+		(no_write_buf <= 0 || no_read_buf))) {
 		pr_err("usb_qdss_alloc_req: missing params\n");
 		return -ENODEV;
 	}
 
+	if (qdss->debug_inface_enabled) {
+		in = qdss->port.ctrl_in;
+		list_pool = &qdss->ctrl_write_pool;
+	} else {
+		in = qdss->port.data;
+		list_pool = &qdss->data_write_pool;
+	}
+
 	for (i = 0; i < no_write_buf; i++) {
-		req = usb_ep_alloc_request(qdss->port.ctrl_in, GFP_ATOMIC);
+		req = usb_ep_alloc_request(in, GFP_ATOMIC);
 		if (!req) {
 			pr_err("usb_qdss_alloc_req: ctrl_in allocation err\n");
 			goto fail;
 		}
-		req->complete = qdss_ctrl_write_complete;
-		list_add_tail(&req->list, &qdss->ctrl_write_pool);
+		req->complete = qdss_write_complete;
+		list_add_tail(&req->list, list_pool);
 	}
 
 	for (i = 0; i < no_read_buf; i++) {
@@ -378,6 +414,10 @@
 		qdss_ctrl_intf_desc.iInterface = id;
 	}
 
+	/* for non-accelerated path keep tx fifo size 1k */
+	if (!strcmp(qdss->ch.name, USB_QDSS_CH_MDM))
+		qdss_data_ep_comp_desc.bMaxBurst = 0;
+
 	ep = usb_ep_autoconfig_ss(gadget, &qdss_ss_data_desc,
 		&qdss_data_ep_comp_desc);
 	if (!ep) {
@@ -490,21 +530,20 @@
 	qdss = container_of(work, struct f_qdss, disconnect_w);
 	pr_debug("usb_qdss_disconnect_work\n");
 
-	/*
-	 * Uninitialized init data i.e. ep specific operation.
-	 * Notify qdss to cancel all active transfers.
-	 */
-	if (qdss->ch.app_conn) {
+
+	/* Notify qdss to cancel all active transfers */
+	if (qdss->ch.notify)
+		qdss->ch.notify(qdss->ch.priv,
+			USB_QDSS_DISCONNECT,
+			NULL,
+			NULL);
+
+	/* Uninitialized init data i.e. ep specific operation */
+	if (qdss->ch.app_conn && !strcmp(qdss->ch.name, USB_QDSS_CH_MSM)) {
 		status = uninit_data(qdss->port.data);
 		if (status)
 			pr_err("%s: uninit_data error\n", __func__);
 
-		if (qdss->ch.notify)
-			qdss->ch.notify(qdss->ch.priv,
-				USB_QDSS_DISCONNECT,
-				NULL,
-				NULL);
-
 		status = set_qdss_data_connection(qdss, 0);
 		if (status)
 			pr_err("qdss_disconnect error");
@@ -561,15 +600,16 @@
 	}
 
 	pr_debug("usb_qdss_connect_work\n");
+
+	if (!strcmp(qdss->ch.name, USB_QDSS_CH_MDM))
+		goto notify;
+
 	status = set_qdss_data_connection(qdss, 1);
 	if (status) {
 		pr_err("set_qdss_data_connection error(%d)", status);
 		return;
 	}
 
-	if (qdss->ch.notify)
-		qdss->ch.notify(qdss->ch.priv, USB_QDSS_CONNECT,
-						NULL, &qdss->ch);
 	spin_lock_irqsave(&qdss->lock, flags);
 	req = qdss->endless_req;
 	spin_unlock_irqrestore(&qdss->lock, flags);
@@ -577,8 +617,15 @@
 		return;
 
 	status = usb_ep_queue(qdss->port.data, req, GFP_ATOMIC);
-	if (status)
+	if (status) {
 		pr_err("%s: usb_ep_queue error (%d)\n", __func__, status);
+		return;
+	}
+
+notify:
+	if (qdss->ch.notify)
+		qdss->ch.notify(qdss->ch.priv, USB_QDSS_CONNECT,
+						NULL, &qdss->ch);
 }
 
 static int qdss_set_alt(struct usb_function *f, unsigned int intf,
@@ -718,6 +765,7 @@
 	spin_lock_init(&qdss->lock);
 	INIT_LIST_HEAD(&qdss->ctrl_read_pool);
 	INIT_LIST_HEAD(&qdss->ctrl_write_pool);
+	INIT_LIST_HEAD(&qdss->data_write_pool);
 	INIT_WORK(&qdss->connect_w, usb_qdss_connect_work);
 	INIT_WORK(&qdss->disconnect_w, usb_qdss_disconnect_work);
 
@@ -813,6 +861,50 @@
 }
 EXPORT_SYMBOL(usb_qdss_ctrl_write);
 
+int usb_qdss_write(struct usb_qdss_ch *ch, struct qdss_request *d_req)
+{
+	struct f_qdss *qdss = ch->priv_usb;
+	unsigned long flags;
+	struct usb_request *req = NULL;
+
+	pr_debug("usb_qdss_ctrl_write\n");
+
+	if (!qdss)
+		return -ENODEV;
+
+	spin_lock_irqsave(&qdss->lock, flags);
+
+	if (qdss->usb_connected == 0) {
+		spin_unlock_irqrestore(&qdss->lock, flags);
+		return -EIO;
+	}
+
+	if (list_empty(&qdss->data_write_pool)) {
+		pr_err("error: usb_qdss_data_write list is empty\n");
+		spin_unlock_irqrestore(&qdss->lock, flags);
+		return -EAGAIN;
+	}
+
+	req = list_first_entry(&qdss->data_write_pool, struct usb_request,
+		list);
+	list_del(&req->list);
+	spin_unlock_irqrestore(&qdss->lock, flags);
+
+	req->buf = d_req->buf;
+	req->length = d_req->length;
+	req->context = d_req;
+	if (usb_ep_queue(qdss->port.data, req, GFP_ATOMIC)) {
+		spin_lock_irqsave(&qdss->lock, flags);
+		list_add_tail(&req->list, &qdss->data_write_pool);
+		spin_unlock_irqrestore(&qdss->lock, flags);
+		pr_err("qdss usb_ep_queue failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_qdss_write);
+
 struct usb_qdss_ch *usb_qdss_open(const char *name, void *priv,
 	void (*notify)(void *priv, unsigned int event,
 		struct qdss_request *d_req, struct usb_qdss_ch *))
@@ -870,7 +962,9 @@
 	pr_debug("usb_qdss_close\n");
 
 	spin_lock_irqsave(&qdss_lock, flags);
-	if (!qdss || !qdss->usb_connected) {
+	ch->priv_usb = NULL;
+	if (!qdss || !qdss->usb_connected ||
+			!strcmp(qdss->ch.name, USB_QDSS_CH_MDM)) {
 		ch->app_conn = 0;
 		spin_unlock_irqrestore(&qdss_lock, flags);
 		return;
diff --git a/drivers/usb/gadget/function/f_qdss.h b/drivers/usb/gadget/function/f_qdss.h
index 72edb90..57c76f8 100644
--- a/drivers/usb/gadget/function/f_qdss.h
+++ b/drivers/usb/gadget/function/f_qdss.h
@@ -59,6 +59,10 @@
 	struct usb_qdss_ch ch;
 	struct list_head ctrl_read_pool;
 	struct list_head ctrl_write_pool;
+
+	/* for mdm channel SW path */
+	struct list_head data_write_pool;
+
 	struct work_struct connect_w;
 	struct work_struct disconnect_w;
 	spinlock_t lock;
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index c99d547..fbc942d 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -594,6 +594,14 @@
 	opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U);
 	opts->streaming_maxburst = min(opts->streaming_maxburst, 15U);
 
+	/* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */
+	if (opts->streaming_maxburst &&
+	    (opts->streaming_maxpacket % 1024) != 0) {
+		opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024);
+		INFO(cdev, "overriding streaming_maxpacket to %d\n",
+		     opts->streaming_maxpacket);
+	}
+
 	/* Fill in the FS/HS/SS Video Streaming specific descriptors from the
 	 * module parameters.
 	 *
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index f69dbd4..b8534d3 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1819,8 +1819,10 @@
 
 	spin_lock_irq (&dev->lock);
 	value = -EINVAL;
-	if (dev->buf)
+	if (dev->buf) {
+		kfree(kbuf);
 		goto fail;
+	}
 	dev->buf = kbuf;
 
 	/* full or low speed config */
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 243febf..658b8da 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -389,20 +389,6 @@
 	  dynamically linked module called "udc-xilinx" and force all
 	  gadget drivers to also be dynamically linked.
 
-config USB_CI13XXX_MSM
-	tristate "MIPS USB CI13xxx for MSM"
-	select USB_MSM_OTG
-	help
-	  MSM SoC has chipidea USB controller.  This driver uses
-	  ci13xxx_udc core.
-	  This driver depends on OTG driver for PHY initialization,
-	  clock management, powering up VBUS, and power management.
-	  This driver is not supported on boards like trout which
-	  has an external PHY.
-
-	  Say "y" to link the driver statically, or "m" to build a
-	  dynamically linked module called "ci13xxx_msm" and force all
-	  gadget drivers to also be dynamically linked.
 #
 # LAST -- dummy/emulated controller
 #
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 33f3987..d133252 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1146,15 +1146,15 @@
 	 */
 	while (!list_empty(&ep->queue)) {
 		struct net2280_request	*req;
-		u32			tmp;
+		u32 req_dma_count;
 
 		req = list_entry(ep->queue.next,
 				struct net2280_request, queue);
 		if (!req->valid)
 			break;
 		rmb();
-		tmp = le32_to_cpup(&req->td->dmacount);
-		if ((tmp & BIT(VALID_BIT)) != 0)
+		req_dma_count = le32_to_cpup(&req->td->dmacount);
+		if ((req_dma_count & BIT(VALID_BIT)) != 0)
 			break;
 
 		/* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
@@ -1163,40 +1163,41 @@
 		 */
 		if (unlikely(req->td->dmadesc == 0)) {
 			/* paranoia */
-			tmp = readl(&ep->dma->dmacount);
-			if (tmp & DMA_BYTE_COUNT_MASK)
+			u32 const ep_dmacount = readl(&ep->dma->dmacount);
+
+			if (ep_dmacount & DMA_BYTE_COUNT_MASK)
 				break;
 			/* single transfer mode */
-			dma_done(ep, req, tmp, 0);
+			dma_done(ep, req, req_dma_count, 0);
 			num_completed++;
 			break;
 		} else if (!ep->is_in &&
 			   (req->req.length % ep->ep.maxpacket) &&
 			   !(ep->dev->quirks & PLX_PCIE)) {
 
-			tmp = readl(&ep->regs->ep_stat);
+			u32 const ep_stat = readl(&ep->regs->ep_stat);
 			/* AVOID TROUBLE HERE by not issuing short reads from
 			 * your gadget driver.  That helps avoids errata 0121,
 			 * 0122, and 0124; not all cases trigger the warning.
 			 */
-			if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
+			if ((ep_stat & BIT(NAK_OUT_PACKETS)) == 0) {
 				ep_warn(ep->dev, "%s lost packet sync!\n",
 						ep->ep.name);
 				req->req.status = -EOVERFLOW;
 			} else {
-				tmp = readl(&ep->regs->ep_avail);
-				if (tmp) {
+				u32 const ep_avail = readl(&ep->regs->ep_avail);
+				if (ep_avail) {
 					/* fifo gets flushed later */
 					ep->out_overflow = 1;
 					ep_dbg(ep->dev,
 						"%s dma, discard %d len %d\n",
-						ep->ep.name, tmp,
+						ep->ep.name, ep_avail,
 						req->req.length);
 					req->req.status = -EOVERFLOW;
 				}
 			}
 		}
-		dma_done(ep, req, tmp, 0);
+		dma_done(ep, req, req_dma_count, 0);
 		num_completed++;
 	}
 
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index a97da64..8a365aa 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -1523,7 +1523,6 @@
 		td = phys_to_virt(addr);
 		addr2 = (dma_addr_t)td->next;
 		pci_pool_free(dev->data_requests, td, addr);
-		td->next = 0x00;
 		addr = addr2;
 	}
 	req->chain_len = 1;
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 7fa60f5..afd6b86 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -2534,9 +2534,10 @@
 	usb_del_gadget_udc(&udc->gadget);
 	pxa_cleanup_debugfs(udc);
 
-	if (!IS_ERR_OR_NULL(udc->transceiver))
+	if (!IS_ERR_OR_NULL(udc->transceiver)) {
 		usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy);
-	usb_put_phy(udc->transceiver);
+		usb_put_phy(udc->transceiver);
+	}
 
 	udc->transceiver = NULL;
 	the_controller = NULL;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index bb89e24..2197a50 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -222,7 +222,7 @@
 #define USB3_EP0_SS_MAX_PACKET_SIZE	512
 #define USB3_EP0_HSFS_MAX_PACKET_SIZE	64
 #define USB3_EP0_BUF_SIZE		8
-#define USB3_MAX_NUM_PIPES		30
+#define USB3_MAX_NUM_PIPES		6	/* This includes PIPE 0 */
 #define USB3_WAIT_US			3
 
 struct renesas_usb3;
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 1a2614a..3ff6468 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -837,7 +837,7 @@
 			default:		/* unknown */
 				break;
 			}
-			temp = (cap >> 8) & 0xff;
+			offset = (cap >> 8) & 0xff;
 		}
 	}
 #endif
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index c99121a6..2911b72 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -996,6 +996,12 @@
 	if (!vdev)
 		return;
 
+	if (vdev->real_port == 0 ||
+			vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
+		xhci_dbg(xhci, "Bad vdev->real_port.\n");
+		goto out;
+	}
+
 	tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
 	list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
 		/* is this a hub device that added a tt_info to the tts list */
@@ -1009,6 +1015,7 @@
 			}
 		}
 	}
+out:
 	/* we are now at a leaf device */
 	xhci_free_virt_device(xhci, slot_id);
 }
@@ -1025,10 +1032,9 @@
 		return 0;
 	}
 
-	xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
-	if (!xhci->devs[slot_id])
+	dev = kzalloc(sizeof(*dev), flags);
+	if (!dev)
 		return 0;
-	dev = xhci->devs[slot_id];
 
 	/* Allocate the (output) device context that will be used in the HC. */
 	dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
@@ -1076,9 +1082,18 @@
 		 &xhci->dcbaa->dev_context_ptrs[slot_id],
 		 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
 
+	xhci->devs[slot_id] = dev;
+
 	return 1;
 fail:
-	xhci_free_virt_device(xhci, slot_id);
+	if (dev->eps[0].ring)
+		xhci_ring_free(xhci, dev->eps[0].ring);
+	if (dev->in_ctx)
+		xhci_free_container_ctx(xhci, dev->in_ctx);
+	if (dev->out_ctx)
+		xhci_free_container_ctx(xhci, dev->out_ctx);
+	kfree(dev);
+
 	return 0;
 }
 
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index f2365a4..ce9e457 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -632,13 +632,13 @@
 		goto power_off_phys;
 	}
 
-	if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
-		xhci->shared_hcd->can_do_streams = 1;
-
 	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
 	if (ret)
 		goto put_usb3_hcd;
 
+	if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+		xhci->shared_hcd->can_do_streams = 1;
+
 	ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
 	if (ret)
 		goto dealloc_usb2_hcd;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c87ef38..f6782a3 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -190,6 +190,9 @@
 		xhci->quirks |= XHCI_BROKEN_STREAMS;
 	}
 	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+			pdev->device == 0x0014)
+		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
 			pdev->device == 0x0015)
 		xhci->quirks |= XHCI_RESET_ON_RESUME;
 	if (pdev->vendor == PCI_VENDOR_ID_VIA)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index c7596a7..a1dedf0 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -193,7 +193,7 @@
 {
 	const struct of_device_id *match;
 	const struct hc_driver	*driver;
-	struct device		*sysdev;
+	struct device		*sysdev, *phydev;
 	struct xhci_hcd		*xhci;
 	struct resource         *res;
 	struct usb_hcd		*hcd;
@@ -220,6 +220,9 @@
 	 * 3. xhci_plat is grandchild of a pci device (dwc3-pci)
 	 */
 	sysdev = &pdev->dev;
+	phydev = &pdev->dev;
+	if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node)
+		phydev = sysdev->parent;
 	/*
 	 * If sysdev->parent->parent is available and part of IOMMU group
 	 * (indicating possible usage of SMMU enablement), then use
@@ -327,7 +330,7 @@
 	if (device_property_read_u32(&pdev->dev, "usb-core-id", &xhci->core_id))
 		xhci->core_id = -EINVAL;
 
-	hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
+	hcd->usb_phy = devm_usb_get_phy_by_phandle(phydev, "usb-phy", 0);
 	if (IS_ERR(hcd->usb_phy)) {
 		ret = PTR_ERR(hcd->usb_phy);
 		if (ret == -EPROBE_DEFER)
@@ -486,6 +489,7 @@
 static struct platform_driver usb_xhci_driver = {
 	.probe	= xhci_plat_probe,
 	.remove	= xhci_plat_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
 	.driver	= {
 		.name = "xhci-hcd",
 		.pm = DEV_PM_OPS,
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index e185bbe..09ae74e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3132,7 +3132,7 @@
 {
 	u32 maxp, total_packet_count;
 
-	/* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
+	/* MTK xHCI 0.96 contains some features from 1.0 */
 	if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
 		return ((td_total_len - transferred) >> 10);
 
@@ -3141,8 +3141,8 @@
 	    trb_buff_len == td_total_len)
 		return 0;
 
-	/* for MTK xHCI, TD size doesn't include this TRB */
-	if (xhci->quirks & XHCI_MTK_HOST)
+	/* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
+	if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
 		trb_buff_len = 0;
 
 	maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 8e7737d..03be5d5 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -292,6 +292,8 @@
 	if (gpio_is_valid(hub->gpio_reset)) {
 		err = devm_gpio_request_one(dev, hub->gpio_reset,
 				GPIOF_OUT_INIT_LOW, "usb3503 reset");
+		/* Datasheet defines a hardware reset to be at least 100us */
+		usleep_range(100, 10000);
 		if (err) {
 			dev_err(dev,
 				"unable to request GPIO %d as reset pin (%d)\n",
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 1a874a1..80b37d2 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1002,7 +1002,9 @@
 		break;
 
 	case MON_IOCQ_RING_SIZE:
+		mutex_lock(&rp->fetch_lock);
 		ret = rp->b_size;
+		mutex_unlock(&rp->fetch_lock);
 		break;
 
 	case MON_IOCT_RING_SIZE:
@@ -1229,12 +1231,16 @@
 	unsigned long offset, chunk_idx;
 	struct page *pageptr;
 
+	mutex_lock(&rp->fetch_lock);
 	offset = vmf->pgoff << PAGE_SHIFT;
-	if (offset >= rp->b_size)
+	if (offset >= rp->b_size) {
+		mutex_unlock(&rp->fetch_lock);
 		return VM_FAULT_SIGBUS;
+	}
 	chunk_idx = offset / CHUNK_SIZE;
 	pageptr = rp->b_vec[chunk_idx].pg;
 	get_page(pageptr);
+	mutex_unlock(&rp->fetch_lock);
 	vmf->page = pageptr;
 	return 0;
 }
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index bacee0f..ea5bad4 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -302,7 +302,15 @@
 			musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
 			portstate(musb->port1_status |= USB_PORT_STAT_POWER);
 			del_timer(&otg_workaround);
-		} else {
+		} else if (!(musb->int_usb & MUSB_INTR_BABBLE)){
+			/*
+			 * When babble condition happens, drvvbus interrupt
+			 * is also generated. Ignore this drvvbus interrupt
+			 * and let babble interrupt handler recovers the
+			 * controller; otherwise, the host-mode flag is lost
+			 * due to the MUSB_DEV_MODE() call below and babble
+			 * recovery logic will not called.
+			 */
 			musb->is_active = 0;
 			MUSB_DEV_MODE(musb);
 			otg->default_a = 0;
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index f76d347..17e8edb 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -253,17 +253,4 @@
 	  the high-speed PHY which is usually paired with either the ChipIdea or
 	  Synopsys DWC3 USB IPs on MSM SOCs. This driver expects to configure the
 	  PHY with a dedicated register I/O memory region.
-
-config USB_MSM_OTG
-	tristate "Qualcomm on-chip USB OTG controller support"
-	depends on (USB || USB_GADGET) && (ARCH_QCOM || COMPILE_TEST)
-	select USB_PHY
-	help
-	  Enable this to support the USB OTG transceiver on Qualcomm chips. It
-	  handles PHY initialization, clock management, and workarounds
-	  required after resetting the hardware and power management.
-	  This driver is required even for peripheral only or host only
-	  mode configurations.
-	  This driver is not supported on boards like trout which
-	  has an external PHY.
 endmenu
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index 7e9ffa0..285659d 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -31,4 +31,3 @@
 obj-$(CONFIG_USB_MSM_SSPHY_QMP)     	+= phy-msm-ssusb-qmp.o
 obj-$(CONFIG_MSM_QUSB_PHY)              += phy-msm-qusb.o phy-msm-qusb-v2.o
 obj-$(CONFIG_MSM_HSUSB_PHY)		+= phy-msm-snps-hs.o
-obj-$(CONFIG_USB_MSM_OTG)		+= phy-msm-usb.o
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index db68156..b3b33cf 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -33,6 +33,12 @@
 };
 MODULE_DEVICE_TABLE(i2c, isp1301_id);
 
+static const struct of_device_id isp1301_of_match[] = {
+	{.compatible = "nxp,isp1301" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, isp1301_of_match);
+
 static struct i2c_client *isp1301_i2c_client;
 
 static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear)
@@ -130,6 +136,7 @@
 static struct i2c_driver isp1301_driver = {
 	.driver = {
 		.name = DRV_NAME,
+		.of_match_table = of_match_ptr(isp1301_of_match),
 	},
 	.probe = isp1301_probe,
 	.remove = isp1301_remove,
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index cce17e0..cc1a0ea 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -26,6 +26,7 @@
 #include <linux/regulator/machine.h>
 #include <linux/usb/phy.h>
 #include <linux/reset.h>
+#include <linux/nvmem-consumer.h>
 #include <linux/debugfs.h>
 #include <linux/hrtimer.h>
 
@@ -146,8 +147,65 @@
 	u8			tune[5];
 
 	struct hrtimer		timer;
+	int			soc_min_rev;
 };
 
+#ifdef CONFIG_NVMEM
+/* Parse qfprom data for deciding on errata work-arounds */
+static long qfprom_read(struct device *dev, const char *name)
+{
+	struct nvmem_cell *cell;
+	ssize_t len = 0;
+	u32 *buf, val = 0;
+	long err = 0;
+
+	cell = nvmem_cell_get(dev, name);
+	if (IS_ERR(cell)) {
+		err = PTR_ERR(cell);
+		dev_err(dev, "failed opening nvmem cell err : %ld\n", err);
+		/* If entry does not exist, then that is not an error */
+		if (err == -ENOENT)
+			err = 0;
+		return err;
+	}
+
+	buf = (u32 *)nvmem_cell_read(cell, &len);
+	if (IS_ERR(buf) || !len) {
+		dev_err(dev, "Failed reading nvmem cell, err: %u, bytes fetched: %zd\n",
+				*buf, len);
+		if (!IS_ERR(buf)) {
+			kfree(buf);
+			err = -EINVAL;
+		} else {
+			err = PTR_ERR(buf);
+		}
+	} else {
+		val = *buf;
+		kfree(buf);
+	}
+
+	nvmem_cell_put(cell);
+	return err ? err : (long) val;
+}
+
+/* Reads the SoC version */
+static int qusb_phy_get_socrev(struct device *dev, struct qusb_phy *qphy)
+{
+	qphy->soc_min_rev  = qfprom_read(dev, "minor_rev");
+	if (qphy->soc_min_rev < 0)
+		dev_err(dev, "failed getting soc_min_rev, err : %d\n",
+				qphy->soc_min_rev);
+
+	return qphy->soc_min_rev;
+};
+#else
+/* Reads the SoC version */
+static int qusb_phy_get_socrev(struct device *dev, struct qusb_phy *qphy)
+{
+	return 0;
+}
+#endif
+
 static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
 {
 	dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d on:%d\n",
@@ -1125,6 +1183,11 @@
 		return PTR_ERR(qphy->vdda18);
 	}
 
+	ret = qusb_phy_get_socrev(&pdev->dev, qphy);
+	if (ret == -EPROBE_DEFER) {
+		dev_err(&pdev->dev, "SoC version rd: fail: defer for now\n");
+		return ret;
+	}
 	qphy->pinctrl = devm_pinctrl_get(dev);
 	if (IS_ERR(qphy->pinctrl)) {
 		ret = PTR_ERR(qphy->pinctrl);
@@ -1159,7 +1222,14 @@
 	qphy->phy.type			= USB_PHY_TYPE_USB2;
 	qphy->phy.notify_connect        = qusb_phy_notify_connect;
 	qphy->phy.notify_disconnect     = qusb_phy_notify_disconnect;
-	qphy->phy.disable_chirp		= qusb_phy_disable_chirp;
+
+	/*
+	 * qusb_phy_disable_chirp is not required if soc version is
+	 * mentioned and is not base version.
+	 */
+	if (qphy->soc_min_rev == 0)
+		qphy->phy.disable_chirp	= qusb_phy_disable_chirp;
+
 	qphy->phy.start_port_reset	= qusb_phy_enable_ext_pulldown;
 
 	ret = usb_add_phy_dev(&qphy->phy);
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
deleted file mode 100644
index 6170656..0000000
--- a/drivers/usb/phy/phy-msm-usb.c
+++ /dev/null
@@ -1,5471 +0,0 @@
-/* Copyright (c) 2009-2017, Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <linux/of_platform.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/pm_runtime.h>
-#include <linux/suspend.h>
-#include <linux/of.h>
-#include <linux/dma-mapping.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/irqchip/msm-mpm-irq.h>
-#include <linux/pm_wakeup.h>
-#include <linux/reset.h>
-#include <linux/extcon.h>
-#include <soc/qcom/scm.h>
-
-#include <linux/usb.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/ulpi.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb/hcd.h>
-#include <linux/usb/msm_hsusb.h>
-#include <linux/usb/msm_hsusb_hw.h>
-#include <linux/regulator/consumer.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/qpnp/qpnp-adc.h>
-
-#include <linux/msm-bus.h>
-
-/**
- * Requested USB votes for BUS bandwidth
- *
- * USB_NO_PERF_VOTE     BUS Vote for inactive USB session or disconnect
- * USB_MAX_PERF_VOTE    Maximum BUS bandwidth vote
- * USB_MIN_PERF_VOTE    Minimum BUS bandwidth vote (for some hw same as NO_PERF)
- *
- */
-enum usb_bus_vote {
-	USB_NO_PERF_VOTE = 0,
-	USB_MAX_PERF_VOTE,
-	USB_MIN_PERF_VOTE,
-};
-
-/**
- * Supported USB modes
- *
- * USB_PERIPHERAL       Only peripheral mode is supported.
- * USB_HOST             Only host mode is supported.
- * USB_OTG              OTG mode is supported.
- *
- */
-enum usb_mode_type {
-	USB_NONE = 0,
-	USB_PERIPHERAL,
-	USB_HOST,
-	USB_OTG,
-};
-
-/**
- * OTG control
- *
- * OTG_NO_CONTROL	Id/VBUS notifications not required. Useful in host
- *                      only configuration.
- * OTG_PHY_CONTROL	Id/VBUS notifications comes form USB PHY.
- * OTG_PMIC_CONTROL	Id/VBUS notifications comes from PMIC hardware.
- * OTG_USER_CONTROL	Id/VBUS notifcations comes from User via sysfs.
- *
- */
-enum otg_control_type {
-	OTG_NO_CONTROL = 0,
-	OTG_PHY_CONTROL,
-	OTG_PMIC_CONTROL,
-	OTG_USER_CONTROL,
-};
-
-/**
- * PHY used in
- *
- * INVALID_PHY			Unsupported PHY
- * CI_PHY                      Chipidea PHY
- * SNPS_PICO_PHY               Synopsis Pico PHY
- * SNPS_FEMTO_PHY              Synopsis Femto PHY
- * QUSB_ULPI_PHY
- *
- */
-enum msm_usb_phy_type {
-	INVALID_PHY = 0,
-	CI_PHY,			/* not supported */
-	SNPS_PICO_PHY,
-	SNPS_FEMTO_PHY,
-	QUSB_ULPI_PHY,
-};
-
-#define IDEV_CHG_MAX	1500
-#define IUNIT		100
-#define IDEV_HVDCP_CHG_MAX	1800
-
-/**
- * Used different VDDCX voltage values
- */
-enum usb_vdd_value {
-	VDD_NONE = 0,
-	VDD_MIN,
-	VDD_MAX,
-	VDD_VAL_MAX,
-};
-
-/**
- * struct msm_otg_platform_data - platform device data
- *              for msm_otg driver.
- * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as
- *              "do not overwrite default value at this address".
- * @vbus_power: VBUS power on/off routine.It should return result
- *		as success(zero value) or failure(non-zero value).
- * @power_budget: VBUS power budget in mA (0 will be treated as 500mA).
- * @mode: Supported mode (OTG/peripheral/host).
- * @otg_control: OTG switch controlled by user/Id pin
- * @default_mode: Default operational mode. Applicable only if
- *              OTG switch is controller by user.
- * @pmic_id_irq: IRQ number assigned for PMIC USB ID line.
- * @mpm_otgsessvld_int: MPM wakeup pin assigned for OTG SESSVLD
- *              interrupt. Used when .otg_control == OTG_PHY_CONTROL.
- * @mpm_dpshv_int: MPM wakeup pin assigned for DP SHV interrupt.
- *		Used during host bus suspend.
- * @mpm_dmshv_int: MPM wakeup pin assigned for DM SHV interrupt.
- *		Used during host bus suspend.
- * @disable_reset_on_disconnect: perform USB PHY and LINK reset
- *              on USB cable disconnection.
- * @pnoc_errata_fix: workaround needed for PNOC hardware bug that
- *              affects USB performance.
- * @enable_lpm_on_suspend: Enable the USB core to go into Low
- *              Power Mode, when USB bus is suspended but cable
- *              is connected.
- * @core_clk_always_on_workaround: Don't disable core_clk when
- *              USB enters LPM.
- * @delay_lpm_on_disconnect: Use a delay before entering LPM
- *              upon USB cable disconnection.
- * @enable_sec_phy: Use second HSPHY with USB2 core
- * @bus_scale_table: parameters for bus bandwidth requirements
- * @log2_itc: value of 2^(log2_itc-1) will be used as the
- *              interrupt threshold (ITC), when log2_itc is
- *              between 1 to 7.
- * @l1_supported: enable link power management support.
- * @dpdm_pulldown_added: Indicates whether pull down resistors are
- *		connected on data lines or not.
- * @vddmin_gpio: dedictaed gpio in the platform that is used for
- *		pullup the D+ line in case of bus suspend with
- *		phy retention.
- * @enable_ahb2ahb_bypass: Indicates whether enable AHB2AHB BYPASS
- *		mode with controller in device mode.
- * @bool disable_retention_with_vdd_min: Indicates whether to enable
-		allowing VDDmin without putting PHY into retention.
- * @bool enable_phy_id_pullup: Indicates whether phy id pullup is
-		enabled or not.
- * @usb_id_gpio: Gpio used for USB ID detection.
- * @hub_reset_gpio: Gpio used for hub reset.
- * @switch_sel_gpio: Gpio used for controlling switch that
-		routing D+/D- from the USB HUB to the USB jack type B
-		for peripheral mode.
- * @bool phy_dvdd_always_on: PHY DVDD is supplied by always on PMIC LDO.
- * @bool emulation: Indicates whether we are running on emulation platform.
- * @bool enable_streaming: Indicates whether streaming to be enabled by default.
- * @bool enable_axi_prefetch: Indicates whether AXI Prefetch interface is used
-		for improving data performance.
- * @bool enable_sdp_typec_current_limit: Indicates whether type-c current for
-		sdp charger to be limited.
- * @usbeth_reset_gpio: Gpio used for external usb-to-eth reset.
- */
-struct msm_otg_platform_data {
-	int *phy_init_seq;
-	int phy_init_sz;
-	int (*vbus_power)(bool on);
-	unsigned int power_budget;
-	enum usb_mode_type mode;
-	enum otg_control_type otg_control;
-	enum usb_mode_type default_mode;
-	enum msm_usb_phy_type phy_type;
-	int pmic_id_irq;
-	unsigned int mpm_otgsessvld_int;
-	unsigned int mpm_dpshv_int;
-	unsigned int mpm_dmshv_int;
-	bool disable_reset_on_disconnect;
-	bool pnoc_errata_fix;
-	bool enable_lpm_on_dev_suspend;
-	bool core_clk_always_on_workaround;
-	bool delay_lpm_on_disconnect;
-	bool dp_manual_pullup;
-	bool enable_sec_phy;
-	struct msm_bus_scale_pdata *bus_scale_table;
-	int log2_itc;
-	bool l1_supported;
-	bool dpdm_pulldown_added;
-	int vddmin_gpio;
-	bool enable_ahb2ahb_bypass;
-	bool disable_retention_with_vdd_min;
-	bool enable_phy_id_pullup;
-	int usb_id_gpio;
-	int hub_reset_gpio;
-	int usbeth_reset_gpio;
-	int switch_sel_gpio;
-	bool phy_dvdd_always_on;
-	bool emulation;
-	bool enable_streaming;
-	bool enable_axi_prefetch;
-	bool enable_sdp_typec_current_limit;
-	bool vbus_low_as_hostmode;
-};
-
-#define USB_CHG_BLOCK_ULPI	1
-
-#define USB_REQUEST_5V		1
-#define USB_REQUEST_9V		2
-/**
- * struct msm_usb_chg_info - MSM USB charger block details.
- * @chg_block_type: The type of charger block. QSCRATCH/ULPI.
- * @page_offset: USB charger register base may not be aligned to
- *              PAGE_SIZE.  The kernel driver aligns the base
- *              address and use it for memory mapping.  This
- *              page_offset is used by user space to calaculate
- *              the corret charger register base address.
- * @length: The length of the charger register address space.
- */
-struct msm_usb_chg_info {
-	uint32_t chg_block_type;
-	__kernel_off_t page_offset;
-	size_t length;
-};
-
-/* Get the MSM USB charger block information */
-#define MSM_USB_EXT_CHG_INFO _IOW('M', 0, struct msm_usb_chg_info)
-
-/* Vote against USB hardware low power mode */
-#define MSM_USB_EXT_CHG_BLOCK_LPM _IOW('M', 1, int)
-
-/* To tell kernel about voltage being voted */
-#define MSM_USB_EXT_CHG_VOLTAGE_INFO _IOW('M', 2, int)
-
-/* To tell kernel about voltage request result */
-#define MSM_USB_EXT_CHG_RESULT _IOW('M', 3, int)
-
-/* To tell kernel whether charger connected is external charger or not */
-#define MSM_USB_EXT_CHG_TYPE _IOW('M', 4, int)
-
-#define MSM_USB_BASE	(motg->regs)
-#define MSM_USB_PHY_CSR_BASE (motg->phy_csr_regs)
-
-#define DRIVER_NAME	"msm_otg"
-
-#define CHG_RECHECK_DELAY	(jiffies + msecs_to_jiffies(2000))
-#define ULPI_IO_TIMEOUT_USEC	(10 * 1000)
-#define USB_PHY_3P3_VOL_MIN	3050000 /* uV */
-#define USB_PHY_3P3_VOL_MAX	3300000 /* uV */
-#define USB_PHY_3P3_HPM_LOAD	50000	/* uA */
-#define USB_PHY_3P3_LPM_LOAD	4000	/* uA */
-
-#define USB_PHY_1P8_VOL_MIN	1800000 /* uV */
-#define USB_PHY_1P8_VOL_MAX	1800000 /* uV */
-#define USB_PHY_1P8_HPM_LOAD	50000	/* uA */
-#define USB_PHY_1P8_LPM_LOAD	4000	/* uA */
-
-#define USB_PHY_VDD_DIG_VOL_NONE	0 /*uV */
-#define USB_PHY_VDD_DIG_VOL_MIN	1045000 /* uV */
-#define USB_PHY_VDD_DIG_VOL_MAX	1320000 /* uV */
-
-#define USB_SUSPEND_DELAY_TIME	(500 * HZ/1000) /* 500 msec */
-
-#define USB_DEFAULT_SYSTEM_CLOCK 80000000	/* 80 MHz */
-
-#define PM_QOS_SAMPLE_SEC	2
-#define PM_QOS_THRESHOLD	400
-
-#define MICRO_5V 5000000
-#define MICRO_9V 9000000
-
-#define SDP_CURRENT_UA 500000
-#define CDP_CURRENT_UA 1500000
-#define DCP_CURRENT_UA 1500000
-#define HVDCP_CURRENT_UA 3000000
-
-enum msm_otg_phy_reg_mode {
-	USB_PHY_REG_OFF,
-	USB_PHY_REG_ON,
-	USB_PHY_REG_LPM_ON,
-	USB_PHY_REG_LPM_OFF,
-	USB_PHY_REG_3P3_ON,
-	USB_PHY_REG_3P3_OFF,
-};
-
-static char *override_phy_init;
-module_param(override_phy_init, charp, 0644);
-MODULE_PARM_DESC(override_phy_init,
-	"Override HSUSB PHY Init Settings");
-
-unsigned int lpm_disconnect_thresh = 1000;
-module_param(lpm_disconnect_thresh, uint, 0644);
-MODULE_PARM_DESC(lpm_disconnect_thresh,
-	"Delay before entering LPM on USB disconnect");
-
-static bool floated_charger_enable;
-module_param(floated_charger_enable, bool, 0644);
-MODULE_PARM_DESC(floated_charger_enable,
-	"Whether to enable floated charger");
-
-/* by default debugging is enabled */
-static unsigned int enable_dbg_log = 1;
-module_param(enable_dbg_log, uint, 0644);
-MODULE_PARM_DESC(enable_dbg_log, "Debug buffer events");
-
-/* Max current to be drawn for HVDCP charger */
-static int hvdcp_max_current = IDEV_HVDCP_CHG_MAX;
-module_param(hvdcp_max_current, int, 0644);
-MODULE_PARM_DESC(hvdcp_max_current, "max current drawn for HVDCP charger");
-
-/* Max current to be drawn for DCP charger */
-static int dcp_max_current = IDEV_CHG_MAX;
-module_param(dcp_max_current, int, 0644);
-MODULE_PARM_DESC(dcp_max_current, "max current drawn for DCP charger");
-
-static DECLARE_COMPLETION(pmic_vbus_init);
-static struct msm_otg *the_msm_otg;
-static bool debug_bus_voting_enabled;
-
-static struct regulator *hsusb_3p3;
-static struct regulator *hsusb_1p8;
-static struct regulator *hsusb_vdd;
-static struct regulator *vbus_otg;
-static struct power_supply *psy;
-
-static int vdd_val[VDD_VAL_MAX];
-static u32 bus_freqs[USB_NOC_NUM_VOTE][USB_NUM_BUS_CLOCKS]  /*bimc,snoc,pcnoc*/;
-static char bus_clkname[USB_NUM_BUS_CLOCKS][20] = {"bimc_clk", "snoc_clk",
-						"pcnoc_clk"};
-static bool bus_clk_rate_set;
-
-static void dbg_inc(unsigned int *idx)
-{
-	*idx = (*idx + 1) & (DEBUG_MAX_MSG-1);
-}
-
-static void
-msm_otg_dbg_log_event(struct usb_phy *phy, char *event, int d1, int d2)
-{
-	struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
-	unsigned long flags;
-	unsigned long long t;
-	unsigned long nanosec;
-
-	if (!enable_dbg_log)
-		return;
-
-	write_lock_irqsave(&motg->dbg_lock, flags);
-	t = cpu_clock(smp_processor_id());
-	nanosec = do_div(t, 1000000000)/1000;
-	scnprintf(motg->buf[motg->dbg_idx], DEBUG_MSG_LEN,
-			"[%5lu.%06lu]: %s :%d:%d",
-			(unsigned long)t, nanosec, event, d1, d2);
-
-	motg->dbg_idx++;
-	motg->dbg_idx = motg->dbg_idx % DEBUG_MAX_MSG;
-	write_unlock_irqrestore(&motg->dbg_lock, flags);
-}
-
-static int msm_hsusb_ldo_init(struct msm_otg *motg, int init)
-{
-	int rc = 0;
-
-	if (init) {
-		hsusb_3p3 = devm_regulator_get(motg->phy.dev, "HSUSB_3p3");
-		if (IS_ERR(hsusb_3p3)) {
-			dev_err(motg->phy.dev, "unable to get hsusb 3p3\n");
-			return PTR_ERR(hsusb_3p3);
-		}
-
-		rc = regulator_set_voltage(hsusb_3p3, USB_PHY_3P3_VOL_MIN,
-				USB_PHY_3P3_VOL_MAX);
-		if (rc) {
-			dev_err(motg->phy.dev, "unable to set voltage level for hsusb 3p3\n"
-									);
-			return rc;
-		}
-		hsusb_1p8 = devm_regulator_get(motg->phy.dev, "HSUSB_1p8");
-		if (IS_ERR(hsusb_1p8)) {
-			dev_err(motg->phy.dev, "unable to get hsusb 1p8\n");
-			rc = PTR_ERR(hsusb_1p8);
-			goto put_3p3_lpm;
-		}
-		rc = regulator_set_voltage(hsusb_1p8, USB_PHY_1P8_VOL_MIN,
-				USB_PHY_1P8_VOL_MAX);
-		if (rc) {
-			dev_err(motg->phy.dev, "unable to set voltage level for hsusb 1p8\n"
-									);
-			goto put_1p8;
-		}
-
-		return 0;
-	}
-
-put_1p8:
-	regulator_set_voltage(hsusb_1p8, 0, USB_PHY_1P8_VOL_MAX);
-put_3p3_lpm:
-	regulator_set_voltage(hsusb_3p3, 0, USB_PHY_3P3_VOL_MAX);
-	return rc;
-}
-
-static int msm_hsusb_config_vddcx(int high)
-{
-	struct msm_otg *motg = the_msm_otg;
-	int max_vol = vdd_val[VDD_MAX];
-	int min_vol;
-	int ret;
-
-	min_vol = vdd_val[!!high];
-	ret = regulator_set_voltage(hsusb_vdd, min_vol, max_vol);
-	if (ret) {
-		pr_err("%s: unable to set the voltage for regulator HSUSB_VDDCX\n",
-								__func__);
-		return ret;
-	}
-
-	pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
-	msm_otg_dbg_log_event(&motg->phy, "CONFIG VDDCX", min_vol, max_vol);
-
-	return ret;
-}
-
-static int msm_hsusb_ldo_enable(struct msm_otg *motg,
-	enum msm_otg_phy_reg_mode mode)
-{
-	int ret = 0;
-
-	if (IS_ERR(hsusb_1p8)) {
-		pr_err("%s: HSUSB_1p8 is not initialized\n", __func__);
-		return -ENODEV;
-	}
-
-	if (IS_ERR(hsusb_3p3)) {
-		pr_err("%s: HSUSB_3p3 is not initialized\n", __func__);
-		return -ENODEV;
-	}
-
-	switch (mode) {
-	case USB_PHY_REG_ON:
-		ret = regulator_set_load(hsusb_1p8, USB_PHY_1P8_HPM_LOAD);
-		if (ret < 0) {
-			pr_err("%s: Unable to set HPM of the regulator HSUSB_1p8\n",
-								__func__);
-			return ret;
-		}
-
-		ret = regulator_enable(hsusb_1p8);
-		if (ret) {
-			dev_err(motg->phy.dev, "%s: unable to enable the hsusb 1p8\n",
-				__func__);
-			regulator_set_load(hsusb_1p8, 0);
-			return ret;
-		}
-
-	/* fall through */
-	case USB_PHY_REG_3P3_ON:
-		ret = regulator_set_load(hsusb_3p3, USB_PHY_3P3_HPM_LOAD);
-		if (ret < 0) {
-			pr_err("%s: Unable to set HPM of the regulator HSUSB_3p3\n",
-								__func__);
-			if (mode == USB_PHY_REG_ON) {
-				regulator_set_load(hsusb_1p8, 0);
-				regulator_disable(hsusb_1p8);
-			}
-			return ret;
-		}
-
-		ret = regulator_enable(hsusb_3p3);
-		if (ret) {
-			dev_err(motg->phy.dev, "%s: unable to enable the hsusb 3p3\n",
-				__func__);
-			regulator_set_load(hsusb_3p3, 0);
-			if (mode == USB_PHY_REG_ON) {
-				regulator_set_load(hsusb_1p8, 0);
-				regulator_disable(hsusb_1p8);
-			}
-			return ret;
-		}
-
-		break;
-
-	case USB_PHY_REG_OFF:
-		ret = regulator_disable(hsusb_1p8);
-		if (ret) {
-			dev_err(motg->phy.dev, "%s: unable to disable the hsusb 1p8\n",
-				__func__);
-			return ret;
-		}
-
-		ret = regulator_set_load(hsusb_1p8, 0);
-		if (ret < 0)
-			pr_err("%s: Unable to set LPM of the regulator HSUSB_1p8\n",
-								__func__);
-
-	/* fall through */
-	case USB_PHY_REG_3P3_OFF:
-		ret = regulator_disable(hsusb_3p3);
-		if (ret) {
-			dev_err(motg->phy.dev, "%s: unable to disable the hsusb 3p3\n",
-				 __func__);
-			return ret;
-		}
-		ret = regulator_set_load(hsusb_3p3, 0);
-		if (ret < 0)
-			pr_err("%s: Unable to set LPM of the regulator HSUSB_3p3\n",
-								__func__);
-
-		break;
-
-	case USB_PHY_REG_LPM_ON:
-		ret = regulator_set_load(hsusb_1p8, USB_PHY_1P8_LPM_LOAD);
-		if (ret < 0) {
-			pr_err("%s: Unable to set LPM of the regulator: HSUSB_1p8\n",
-				__func__);
-			return ret;
-		}
-
-		ret = regulator_set_load(hsusb_3p3, USB_PHY_3P3_LPM_LOAD);
-		if (ret < 0) {
-			pr_err("%s: Unable to set LPM of the regulator: HSUSB_3p3\n",
-				__func__);
-			regulator_set_load(hsusb_1p8, USB_PHY_REG_ON);
-			return ret;
-		}
-
-		break;
-
-	case USB_PHY_REG_LPM_OFF:
-		ret = regulator_set_load(hsusb_1p8, USB_PHY_1P8_HPM_LOAD);
-		if (ret < 0) {
-			pr_err("%s: Unable to set HPM of the regulator: HSUSB_1p8\n",
-				__func__);
-			return ret;
-		}
-
-		ret = regulator_set_load(hsusb_3p3, USB_PHY_3P3_HPM_LOAD);
-		if (ret < 0) {
-			pr_err("%s: Unable to set HPM of the regulator: HSUSB_3p3\n",
-				__func__);
-			regulator_set_load(hsusb_1p8, USB_PHY_REG_ON);
-			return ret;
-		}
-
-		break;
-
-	default:
-		pr_err("%s: Unsupported mode (%d).", __func__, mode);
-		return -ENOTSUPP;
-	}
-
-	pr_debug("%s: USB reg mode (%d) (OFF/HPM/LPM)\n", __func__, mode);
-	msm_otg_dbg_log_event(&motg->phy, "USB REG MODE", mode, ret);
-	return ret < 0 ? ret : 0;
-}
-
-static int ulpi_read(struct usb_phy *phy, u32 reg)
-{
-	struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
-	int cnt = 0;
-
-	if (motg->pdata->emulation)
-		return 0;
-
-	if (motg->pdata->phy_type == QUSB_ULPI_PHY && reg > 0x3F) {
-		pr_debug("%s: ULPI vendor-specific reg 0x%02x not supported\n",
-			__func__, reg);
-		return 0;
-	}
-
-	/* initiate read operation */
-	writel_relaxed(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
-	       USB_ULPI_VIEWPORT);
-
-	/* wait for completion */
-	while (cnt < ULPI_IO_TIMEOUT_USEC) {
-		if (!(readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_RUN))
-			break;
-		udelay(1);
-		cnt++;
-	}
-
-	if (cnt >= ULPI_IO_TIMEOUT_USEC) {
-		dev_err(phy->dev, "ulpi_read: timeout %08x\n",
-			readl_relaxed(USB_ULPI_VIEWPORT));
-		dev_err(phy->dev, "PORTSC: %08x USBCMD: %08x\n",
-			readl_relaxed(USB_PORTSC), readl_relaxed(USB_USBCMD));
-		return -ETIMEDOUT;
-	}
-	return ULPI_DATA_READ(readl_relaxed(USB_ULPI_VIEWPORT));
-}
-
-static int ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
-{
-	struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
-	int cnt = 0;
-
-	if (motg->pdata->emulation)
-		return 0;
-
-	if (motg->pdata->phy_type == QUSB_ULPI_PHY && reg > 0x3F) {
-		pr_debug("%s: ULPI vendor-specific reg 0x%02x not supported\n",
-			__func__, reg);
-		return 0;
-	}
-
-	/* initiate write operation */
-	writel_relaxed(ULPI_RUN | ULPI_WRITE |
-	       ULPI_ADDR(reg) | ULPI_DATA(val),
-	       USB_ULPI_VIEWPORT);
-
-	/* wait for completion */
-	while (cnt < ULPI_IO_TIMEOUT_USEC) {
-		if (!(readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_RUN))
-			break;
-		udelay(1);
-		cnt++;
-	}
-
-	if (cnt >= ULPI_IO_TIMEOUT_USEC) {
-		dev_err(phy->dev, "ulpi_write: timeout\n");
-		dev_err(phy->dev, "PORTSC: %08x USBCMD: %08x\n",
-			readl_relaxed(USB_PORTSC), readl_relaxed(USB_USBCMD));
-		return -ETIMEDOUT;
-	}
-	return 0;
-}
-
-static struct usb_phy_io_ops msm_otg_io_ops = {
-	.read = ulpi_read,
-	.write = ulpi_write,
-};
-
-static void ulpi_init(struct msm_otg *motg)
-{
-	struct msm_otg_platform_data *pdata = motg->pdata;
-	int aseq[10];
-	int *seq = NULL;
-
-	if (override_phy_init) {
-		pr_debug("%s(): HUSB PHY Init:%s\n", __func__,
-				override_phy_init);
-		get_options(override_phy_init, ARRAY_SIZE(aseq), aseq);
-		seq = &aseq[1];
-	} else {
-		seq = pdata->phy_init_seq;
-	}
-
-	if (!seq)
-		return;
-
-	while (seq[0] >= 0) {
-		if (override_phy_init)
-			pr_debug("ulpi: write 0x%02x to 0x%02x\n",
-					seq[0], seq[1]);
-
-		dev_vdbg(motg->phy.dev, "ulpi: write 0x%02x to 0x%02x\n",
-				seq[0], seq[1]);
-		msm_otg_dbg_log_event(&motg->phy, "ULPI WRITE", seq[0], seq[1]);
-		ulpi_write(&motg->phy, seq[0], seq[1]);
-		seq += 2;
-	}
-}
-
-static int msm_otg_phy_clk_reset(struct msm_otg *motg)
-{
-	int ret;
-
-	if (!motg->phy_reset_clk)
-		return 0;
-
-	if (motg->sleep_clk)
-		clk_disable_unprepare(motg->sleep_clk);
-	if (motg->phy_csr_clk)
-		clk_disable_unprepare(motg->phy_csr_clk);
-
-	ret = reset_control_assert(motg->phy_reset);
-	if (ret) {
-		pr_err("phy_reset_clk assert failed %d\n", ret);
-		return ret;
-	}
-	/*
-	 * As per databook, 10 usec delay is required between
-	 * PHY POR assert and de-assert.
-	 */
-	usleep_range(10, 15);
-	ret = reset_control_deassert(motg->phy_reset);
-	if (ret) {
-		pr_err("phy_reset_clk de-assert failed %d\n", ret);
-		return ret;
-	}
-	/*
-	 * As per databook, it takes 75 usec for PHY to stabilize
-	 * after the reset.
-	 */
-	usleep_range(80, 100);
-
-	if (motg->phy_csr_clk)
-		clk_prepare_enable(motg->phy_csr_clk);
-	if (motg->sleep_clk)
-		clk_prepare_enable(motg->sleep_clk);
-
-	return 0;
-}
-
-static int msm_otg_link_clk_reset(struct msm_otg *motg, bool assert)
-{
-	int ret;
-
-	if (assert) {
-		/* Using asynchronous block reset to the hardware */
-		dev_dbg(motg->phy.dev, "block_reset ASSERT\n");
-		clk_disable_unprepare(motg->pclk);
-		clk_disable_unprepare(motg->core_clk);
-		ret = reset_control_assert(motg->core_reset);
-		if (ret)
-			dev_err(motg->phy.dev, "usb hs_clk assert failed\n");
-	} else {
-		dev_dbg(motg->phy.dev, "block_reset DEASSERT\n");
-		ret = reset_control_deassert(motg->core_reset);
-		ndelay(200);
-		ret = clk_prepare_enable(motg->core_clk);
-		WARN(ret, "USB core_clk enable failed\n");
-		ret = clk_prepare_enable(motg->pclk);
-		WARN(ret, "USB pclk enable failed\n");
-		if (ret)
-			dev_err(motg->phy.dev, "usb hs_clk deassert failed\n");
-	}
-	return ret;
-}
-
-static int msm_otg_phy_reset(struct msm_otg *motg)
-{
-	u32 val;
-	int ret;
-	struct msm_otg_platform_data *pdata = motg->pdata;
-
-	/*
-	 * AHB2AHB Bypass mode shouldn't be enable before doing
-	 * async clock reset. If it is enable, disable the same.
-	 */
-	val = readl_relaxed(USB_AHBMODE);
-	if (val & AHB2AHB_BYPASS) {
-		pr_err("%s(): AHB2AHB_BYPASS SET: AHBMODE:%x\n",
-						__func__, val);
-		val &= ~AHB2AHB_BYPASS_BIT_MASK;
-		writel_relaxed(val | AHB2AHB_BYPASS_CLEAR, USB_AHBMODE);
-		pr_err("%s(): AHBMODE: %x\n", __func__,
-				readl_relaxed(USB_AHBMODE));
-	}
-
-	ret = msm_otg_link_clk_reset(motg, 1);
-	if (ret)
-		return ret;
-
-	msm_otg_phy_clk_reset(motg);
-
-	/* wait for 1ms delay as suggested in HPG. */
-	usleep_range(1000, 1200);
-
-	ret = msm_otg_link_clk_reset(motg, 0);
-	if (ret)
-		return ret;
-
-	if (pdata && pdata->enable_sec_phy)
-		writel_relaxed(readl_relaxed(USB_PHY_CTRL2) | (1<<16),
-							USB_PHY_CTRL2);
-	val = readl_relaxed(USB_PORTSC) & ~PORTSC_PTS_MASK;
-	writel_relaxed(val | PORTSC_PTS_ULPI, USB_PORTSC);
-
-	dev_info(motg->phy.dev, "phy_reset: success\n");
-	msm_otg_dbg_log_event(&motg->phy, "PHY RESET SUCCESS",
-			motg->inputs, motg->phy.otg->state);
-	return 0;
-}
-
-#define LINK_RESET_TIMEOUT_USEC		(250 * 1000)
-static int msm_otg_link_reset(struct msm_otg *motg)
-{
-	int cnt = 0;
-	struct msm_otg_platform_data *pdata = motg->pdata;
-
-	writel_relaxed(USBCMD_RESET, USB_USBCMD);
-	while (cnt < LINK_RESET_TIMEOUT_USEC) {
-		if (!(readl_relaxed(USB_USBCMD) & USBCMD_RESET))
-			break;
-		udelay(1);
-		cnt++;
-	}
-	if (cnt >= LINK_RESET_TIMEOUT_USEC)
-		return -ETIMEDOUT;
-
-	/* select ULPI phy */
-	writel_relaxed(0x80000000, USB_PORTSC);
-	writel_relaxed(0x0, USB_AHBBURST);
-	writel_relaxed(0x08, USB_AHBMODE);
-
-	if (pdata && pdata->enable_sec_phy)
-		writel_relaxed(readl_relaxed(USB_PHY_CTRL2) | (1<<16),
-								USB_PHY_CTRL2);
-	return 0;
-}
-
-#define QUSB2PHY_PORT_POWERDOWN		0xB4
-#define QUSB2PHY_PORT_UTMI_CTRL2	0xC4
-
-static void msm_usb_phy_reset(struct msm_otg *motg)
-{
-	u32 val;
-	int ret, *seq;
-
-	switch (motg->pdata->phy_type) {
-	case SNPS_PICO_PHY:
-		/* Assert USB PHY_PON */
-		val =  readl_relaxed(motg->usb_phy_ctrl_reg);
-		val &= ~PHY_POR_BIT_MASK;
-		val |= PHY_POR_ASSERT;
-		writel_relaxed(val, motg->usb_phy_ctrl_reg);
-
-		/* wait for minimum 10 microseconds as
-		 * suggested in HPG.
-		 */
-		usleep_range(10, 15);
-
-		/* Deassert USB PHY_PON */
-		val =  readl_relaxed(motg->usb_phy_ctrl_reg);
-		val &= ~PHY_POR_BIT_MASK;
-		val |= PHY_POR_DEASSERT;
-		writel_relaxed(val, motg->usb_phy_ctrl_reg);
-		break;
-	case QUSB_ULPI_PHY:
-		ret = reset_control_assert(motg->phy_reset);
-		if (ret) {
-			pr_err("phy_reset_clk assert failed %d\n", ret);
-			break;
-		}
-
-		/* need to delay 10us for PHY to reset */
-		usleep_range(10, 20);
-
-		ret = reset_control_deassert(motg->phy_reset);
-		if (ret) {
-			pr_err("phy_reset_clk de-assert failed %d\n", ret);
-			break;
-		}
-
-		/* Ensure that RESET operation is completed. */
-		mb();
-
-		writel_relaxed(0x23,
-				motg->phy_csr_regs + QUSB2PHY_PORT_POWERDOWN);
-		writel_relaxed(0x0,
-				motg->phy_csr_regs + QUSB2PHY_PORT_UTMI_CTRL2);
-
-		/* Program tuning parameters for PHY */
-		seq = motg->pdata->phy_init_seq;
-		if (seq) {
-			while (seq[0] >= 0) {
-				writel_relaxed(seq[1],
-						motg->phy_csr_regs + seq[0]);
-				seq += 2;
-			}
-		}
-
-		/* ensure above writes are completed before re-enabling PHY */
-		wmb();
-		writel_relaxed(0x22,
-				motg->phy_csr_regs + QUSB2PHY_PORT_POWERDOWN);
-		break;
-	case SNPS_FEMTO_PHY:
-		if (!motg->phy_por_clk) {
-			pr_err("phy_por_clk missing\n");
-			break;
-		}
-		ret = reset_control_assert(motg->phy_por_reset);
-		if (ret) {
-			pr_err("phy_por_clk assert failed %d\n", ret);
-			break;
-		}
-		/*
-		 * The Femto PHY is POR reset in the following scenarios.
-		 *
-		 * 1. After overriding the parameter registers.
-		 * 2. Low power mode exit from PHY retention.
-		 *
-		 * Ensure that SIDDQ is cleared before bringing the PHY
-		 * out of reset.
-		 *
-		 */
-
-		val = readb_relaxed(USB_PHY_CSR_PHY_CTRL_COMMON0);
-		val &= ~SIDDQ;
-		writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL_COMMON0);
-
-		/*
-		 * As per databook, 10 usec delay is required between
-		 * PHY POR assert and de-assert.
-		 */
-		usleep_range(10, 20);
-		ret = reset_control_deassert(motg->phy_por_reset);
-		if (ret) {
-			pr_err("phy_por_clk de-assert failed %d\n", ret);
-			break;
-		}
-		/*
-		 * As per databook, it takes 75 usec for PHY to stabilize
-		 * after the reset.
-		 */
-		usleep_range(80, 100);
-		break;
-	default:
-		break;
-	}
-	/* Ensure that RESET operation is completed. */
-	mb();
-}
-
-static int msm_otg_reset(struct usb_phy *phy)
-{
-	struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
-	struct msm_otg_platform_data *pdata = motg->pdata;
-	int ret;
-	u32 val = 0;
-	u32 ulpi_val = 0;
-
-	msm_otg_dbg_log_event(&motg->phy, "USB RESET", phy->otg->state,
-			get_pm_runtime_counter(phy->dev));
-	/*
-	 * USB PHY and Link reset also reset the USB BAM.
-	 * Thus perform reset operation only once to avoid
-	 * USB BAM reset on other cases e.g. USB cable disconnections.
-	 * If hardware reported error then it must be reset for recovery.
-	 */
-	if (motg->err_event_seen)
-		dev_info(phy->dev, "performing USB h/w reset for recovery\n");
-	else if (pdata->disable_reset_on_disconnect && motg->reset_counter)
-		return 0;
-
-	motg->reset_counter++;
-
-	disable_irq(motg->irq);
-	if (motg->phy_irq)
-		disable_irq(motg->phy_irq);
-
-	ret = msm_otg_phy_reset(motg);
-	if (ret) {
-		dev_err(phy->dev, "phy_reset failed\n");
-		if (motg->phy_irq)
-			enable_irq(motg->phy_irq);
-
-		enable_irq(motg->irq);
-		return ret;
-	}
-
-	if (motg->phy_irq)
-		enable_irq(motg->phy_irq);
-
-	enable_irq(motg->irq);
-	ret = msm_otg_link_reset(motg);
-	if (ret) {
-		dev_err(phy->dev, "link reset failed\n");
-		return ret;
-	}
-
-	msleep(100);
-
-	/* Reset USB PHY after performing USB Link RESET */
-	msm_usb_phy_reset(motg);
-
-	/* Program USB PHY Override registers. */
-	ulpi_init(motg);
-
-	/*
-	 * It is required to reset USB PHY after programming
-	 * the USB PHY Override registers to get the new
-	 * values into effect.
-	 */
-	msm_usb_phy_reset(motg);
-
-	if (pdata->otg_control == OTG_PHY_CONTROL) {
-		val = readl_relaxed(USB_OTGSC);
-		if (pdata->mode == USB_OTG) {
-			ulpi_val = ULPI_INT_IDGRD | ULPI_INT_SESS_VALID;
-			val |= OTGSC_IDIE | OTGSC_BSVIE;
-		} else if (pdata->mode == USB_PERIPHERAL) {
-			ulpi_val = ULPI_INT_SESS_VALID;
-			val |= OTGSC_BSVIE;
-		}
-		writel_relaxed(val, USB_OTGSC);
-		ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_RISE);
-		ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_FALL);
-	} else if (pdata->otg_control == OTG_PMIC_CONTROL) {
-		ulpi_write(phy, OTG_COMP_DISABLE,
-			ULPI_SET(ULPI_PWR_CLK_MNG_REG));
-		if (motg->phy_irq)
-			writeb_relaxed(USB_PHY_ID_MASK,
-				USB2_PHY_USB_PHY_INTERRUPT_MASK1);
-	}
-
-	if (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED)
-		writel_relaxed(readl_relaxed(USB_OTGSC) & ~(OTGSC_IDPU),
-				USB_OTGSC);
-
-	msm_otg_dbg_log_event(&motg->phy, "USB RESET DONE", phy->otg->state,
-			get_pm_runtime_counter(phy->dev));
-
-	if (pdata->enable_axi_prefetch)
-		writel_relaxed(readl_relaxed(USB_HS_APF_CTRL) | (APF_CTRL_EN),
-							USB_HS_APF_CTRL);
-
-	/*
-	 * Disable USB BAM as block reset resets USB BAM registers.
-	 */
-	msm_usb_bam_enable(CI_CTRL, false);
-
-	return 0;
-}
-
-static void msm_otg_kick_sm_work(struct msm_otg *motg)
-{
-	if (atomic_read(&motg->in_lpm))
-		motg->resume_pending = true;
-
-	/* For device mode, resume now. Let pm_resume handle other cases */
-	if (atomic_read(&motg->pm_suspended) &&
-			motg->phy.otg->state != OTG_STATE_B_SUSPEND) {
-		motg->sm_work_pending = true;
-	} else if (!motg->sm_work_pending) {
-		/* process event only if previous one is not pending */
-		queue_work(motg->otg_wq, &motg->sm_work);
-	}
-}
-
-/*
- * UDC calls usb_phy_set_suspend() to notify during bus suspend/resume.
- * Update relevant state-machine inputs and queue sm_work.
- * LPM enter/exit doesn't happen directly from this routine.
- */
-
-static int msm_otg_set_suspend(struct usb_phy *phy, int suspend)
-{
-	struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
-
-	pr_debug("%s(%d) in %s state\n", __func__, suspend,
-				usb_otg_state_string(phy->otg->state));
-	msm_otg_dbg_log_event(phy, "SET SUSPEND", suspend, phy->otg->state);
-
-	if (!(motg->caps & ALLOW_LPM_ON_DEV_SUSPEND))
-		return 0;
-
-	if (suspend) {
-		/* called in suspend interrupt context */
-		pr_debug("peripheral bus suspend\n");
-		msm_otg_dbg_log_event(phy, "PERIPHERAL BUS SUSPEND",
-				motg->inputs, phy->otg->state);
-
-		set_bit(A_BUS_SUSPEND, &motg->inputs);
-	} else {
-		/* host resume or remote-wakeup */
-		pr_debug("peripheral bus resume\n");
-		msm_otg_dbg_log_event(phy, "PERIPHERAL BUS RESUME",
-				motg->inputs, phy->otg->state);
-
-		clear_bit(A_BUS_SUSPEND, &motg->inputs);
-	}
-	/* use kick_sm_work to handle race with pm_resume */
-	msm_otg_kick_sm_work(motg);
-
-	return 0;
-}
-
-static int msm_otg_bus_freq_set(struct msm_otg *motg, enum usb_noc_mode mode)
-{
-	int i, ret;
-	long rate;
-
-	for (i = 0; i < USB_NUM_BUS_CLOCKS; i++) {
-		rate = bus_freqs[mode][i];
-		if (!rate) {
-			pr_debug("%s rate not available\n", bus_clkname[i]);
-			continue;
-		}
-
-		ret = clk_set_rate(motg->bus_clks[i], rate);
-		if (ret) {
-			pr_err("%s set rate failed: %d\n", bus_clkname[i], ret);
-			return ret;
-		}
-		pr_debug("%s set to %lu Hz\n", bus_clkname[i],
-			 clk_get_rate(motg->bus_clks[i]));
-		msm_otg_dbg_log_event(&motg->phy, "OTG BUS FREQ SET", i, rate);
-	}
-
-	bus_clk_rate_set = true;
-
-	return 0;
-}
-
-static int msm_otg_bus_freq_get(struct msm_otg *motg)
-{
-	struct device *dev = motg->phy.dev;
-	struct device_node *np = dev->of_node;
-	int len = 0, i, count = USB_NUM_BUS_CLOCKS;
-
-	if (!np)
-		return -EINVAL;
-
-	of_find_property(np, "qcom,bus-clk-rate", &len);
-	/* SVS requires extra set of frequencies for perf_mode sysfs node */
-	if (motg->default_noc_mode == USB_NOC_SVS_VOTE)
-		count *= 2;
-
-	if (!len || (len / sizeof(u32) != count)) {
-		pr_err("Invalid bus rate:%d %u\n", len, motg->default_noc_mode);
-		return -EINVAL;
-	}
-	of_property_read_u32_array(np, "qcom,bus-clk-rate", bus_freqs[0],
-				   count);
-	for (i = 0; i < USB_NUM_BUS_CLOCKS; i++) {
-		if (bus_freqs[0][i] == 0) {
-			motg->bus_clks[i] = NULL;
-			pr_debug("%s not available\n", bus_clkname[i]);
-			continue;
-		}
-
-		motg->bus_clks[i] = devm_clk_get(dev, bus_clkname[i]);
-		if (IS_ERR(motg->bus_clks[i])) {
-			pr_err("%s get failed\n", bus_clkname[i]);
-			return PTR_ERR(motg->bus_clks[i]);
-		}
-	}
-	return 0;
-}
-
-static void msm_otg_bus_clks_enable(struct msm_otg *motg)
-{
-	int i;
-	int ret;
-
-	if (!bus_clk_rate_set || motg->bus_clks_enabled)
-		return;
-
-	for (i = 0; i < USB_NUM_BUS_CLOCKS; i++) {
-		if (motg->bus_clks[i] == NULL)
-			continue;
-		ret = clk_prepare_enable(motg->bus_clks[i]);
-		if (ret) {
-			pr_err("%s enable rate failed: %d\n", bus_clkname[i],
-				ret);
-			goto err_clk_en;
-		}
-	}
-	motg->bus_clks_enabled = true;
-	return;
-err_clk_en:
-	for (--i; i >= 0; --i) {
-		if (motg->bus_clks[i] != NULL)
-			clk_disable_unprepare(motg->bus_clks[i]);
-	}
-}
-
-static void msm_otg_bus_clks_disable(struct msm_otg *motg)
-{
-	int i;
-
-	if (!bus_clk_rate_set || !motg->bus_clks_enabled)
-		return;
-
-	for (i = 0; i < USB_NUM_BUS_CLOCKS; i++) {
-		if (motg->bus_clks[i] != NULL)
-			clk_disable_unprepare(motg->bus_clks[i]);
-	}
-	motg->bus_clks_enabled = false;
-}
-
-static void msm_otg_bus_vote(struct msm_otg *motg, enum usb_bus_vote vote)
-{
-	int ret;
-	struct msm_otg_platform_data *pdata = motg->pdata;
-
-	msm_otg_dbg_log_event(&motg->phy, "BUS VOTE", vote,
-						motg->phy.otg->state);
-	/* Check if target allows min_vote to be same as no_vote */
-	if (pdata->bus_scale_table &&
-	    vote >= pdata->bus_scale_table->num_usecases)
-		vote = USB_NO_PERF_VOTE;
-
-	if (motg->bus_perf_client) {
-		ret = msm_bus_scale_client_update_request(
-			motg->bus_perf_client, vote);
-		if (ret)
-			dev_err(motg->phy.dev, "%s: Failed to vote (%d)\n"
-				   "for bus bw %d\n", __func__, vote, ret);
-	}
-
-	if (vote == USB_MAX_PERF_VOTE)
-		msm_otg_bus_clks_enable(motg);
-	else
-		msm_otg_bus_clks_disable(motg);
-}
-
-static void msm_otg_enable_phy_hv_int(struct msm_otg *motg)
-{
-	bool bsv_id_hv_int = false;
-	bool dp_dm_hv_int = false;
-	u32 val;
-
-	if (motg->pdata->otg_control == OTG_PHY_CONTROL ||
-				motg->phy_irq)
-		bsv_id_hv_int = true;
-	if (motg->host_bus_suspend || motg->device_bus_suspend)
-		dp_dm_hv_int = true;
-
-	if (!bsv_id_hv_int && !dp_dm_hv_int)
-		return;
-
-	switch (motg->pdata->phy_type) {
-	case SNPS_PICO_PHY:
-		val = readl_relaxed(motg->usb_phy_ctrl_reg);
-		if (bsv_id_hv_int)
-			val |= (PHY_IDHV_INTEN | PHY_OTGSESSVLDHV_INTEN);
-		if (dp_dm_hv_int)
-			val |= PHY_CLAMP_DPDMSE_EN;
-		writel_relaxed(val, motg->usb_phy_ctrl_reg);
-		break;
-	case SNPS_FEMTO_PHY:
-		if (bsv_id_hv_int) {
-			val = readb_relaxed(USB_PHY_CSR_PHY_CTRL1);
-			val |= ID_HV_CLAMP_EN_N;
-			writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL1);
-		}
-
-		if (dp_dm_hv_int) {
-			val = readb_relaxed(USB_PHY_CSR_PHY_CTRL3);
-			val |= CLAMP_MPM_DPSE_DMSE_EN_N;
-			writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL3);
-		}
-		break;
-	default:
-		break;
-	}
-	pr_debug("%s: bsv_id_hv = %d dp_dm_hv_int = %d\n",
-			__func__, bsv_id_hv_int, dp_dm_hv_int);
-	msm_otg_dbg_log_event(&motg->phy, "PHY HV INTR ENABLED",
-			bsv_id_hv_int, dp_dm_hv_int);
-}
-
-static void msm_otg_disable_phy_hv_int(struct msm_otg *motg)
-{
-	bool bsv_id_hv_int = false;
-	bool dp_dm_hv_int = false;
-	u32 val;
-
-	if (motg->pdata->otg_control == OTG_PHY_CONTROL ||
-				motg->phy_irq)
-		bsv_id_hv_int = true;
-	if (motg->host_bus_suspend || motg->device_bus_suspend)
-		dp_dm_hv_int = true;
-
-	if (!bsv_id_hv_int && !dp_dm_hv_int)
-		return;
-
-	switch (motg->pdata->phy_type) {
-	case SNPS_PICO_PHY:
-		val = readl_relaxed(motg->usb_phy_ctrl_reg);
-		if (bsv_id_hv_int)
-			val &= ~(PHY_IDHV_INTEN | PHY_OTGSESSVLDHV_INTEN);
-		if (dp_dm_hv_int)
-			val &= ~PHY_CLAMP_DPDMSE_EN;
-		writel_relaxed(val, motg->usb_phy_ctrl_reg);
-		break;
-	case SNPS_FEMTO_PHY:
-		if (bsv_id_hv_int) {
-			val = readb_relaxed(USB_PHY_CSR_PHY_CTRL1);
-			val &= ~ID_HV_CLAMP_EN_N;
-			writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL1);
-		}
-
-		if (dp_dm_hv_int) {
-			val = readb_relaxed(USB_PHY_CSR_PHY_CTRL3);
-			val &= ~CLAMP_MPM_DPSE_DMSE_EN_N;
-			writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL3);
-		}
-		break;
-	default:
-		break;
-	}
-	pr_debug("%s: bsv_id_hv = %d dp_dm_hv_int = %d\n",
-			__func__, bsv_id_hv_int, dp_dm_hv_int);
-	msm_otg_dbg_log_event(&motg->phy, "PHY HV INTR DISABLED",
-			bsv_id_hv_int, dp_dm_hv_int);
-}
-
-static void msm_otg_enter_phy_retention(struct msm_otg *motg)
-{
-	u32 val;
-
-	switch (motg->pdata->phy_type) {
-	case SNPS_PICO_PHY:
-		val = readl_relaxed(motg->usb_phy_ctrl_reg);
-		val &= ~PHY_RETEN;
-		writel_relaxed(val, motg->usb_phy_ctrl_reg);
-		break;
-	case SNPS_FEMTO_PHY:
-		/* Retention is supported via SIDDQ */
-		val = readb_relaxed(USB_PHY_CSR_PHY_CTRL_COMMON0);
-		val |= SIDDQ;
-		writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL_COMMON0);
-		break;
-	default:
-		break;
-	}
-	pr_debug("USB PHY is in retention\n");
-	msm_otg_dbg_log_event(&motg->phy, "USB PHY ENTER RETENTION",
-			motg->pdata->phy_type, 0);
-}
-
-static void msm_otg_exit_phy_retention(struct msm_otg *motg)
-{
-	int val;
-
-	switch (motg->pdata->phy_type) {
-	case SNPS_PICO_PHY:
-		val = readl_relaxed(motg->usb_phy_ctrl_reg);
-		val |= PHY_RETEN;
-		writel_relaxed(val, motg->usb_phy_ctrl_reg);
-		break;
-	case SNPS_FEMTO_PHY:
-		/*
-		 * It is required to do USB block reset to bring Femto PHY out
-		 * of retention.
-		 */
-		msm_otg_reset(&motg->phy);
-		break;
-	default:
-		break;
-	}
-	pr_debug("USB PHY is exited from retention\n");
-	msm_otg_dbg_log_event(&motg->phy, "USB PHY EXIT RETENTION",
-			motg->pdata->phy_type, 0);
-}
-
-static void msm_id_status_w(struct work_struct *w);
-static irqreturn_t msm_otg_phy_irq_handler(int irq, void *data)
-{
-	struct msm_otg *motg = data;
-
-	msm_otg_dbg_log_event(&motg->phy, "PHY ID IRQ",
-			atomic_read(&motg->in_lpm), motg->phy.otg->state);
-	if (atomic_read(&motg->in_lpm)) {
-		pr_debug("PHY ID IRQ in LPM\n");
-		motg->phy_irq_pending = true;
-		msm_otg_kick_sm_work(motg);
-	} else {
-		pr_debug("PHY ID IRQ outside LPM\n");
-		msm_id_status_w(&motg->id_status_work.work);
-	}
-
-	return IRQ_HANDLED;
-}
-
-#define PHY_SUSPEND_TIMEOUT_USEC (5 * 1000)
-#define PHY_DEVICE_BUS_SUSPEND_TIMEOUT_USEC 100
-#define PHY_RESUME_TIMEOUT_USEC	(100 * 1000)
-
-#define PHY_SUSPEND_RETRIES_MAX 3
-
-static void msm_otg_set_vbus_state(int online);
-static void msm_otg_perf_vote_update(struct msm_otg *motg, bool perf_mode);
-
-#ifdef CONFIG_PM_SLEEP
-static int msm_otg_suspend(struct msm_otg *motg)
-{
-	struct usb_phy *phy = &motg->phy;
-	struct usb_bus *bus = phy->otg->host;
-	struct msm_otg_platform_data *pdata = motg->pdata;
-	int cnt;
-	bool host_bus_suspend, device_bus_suspend, dcp, prop_charger;
-	bool floated_charger, sm_work_busy;
-	u32 cmd_val;
-	u32 portsc, config2;
-	u32 func_ctrl;
-	int phcd_retry_cnt = 0, ret;
-	unsigned int phy_suspend_timeout;
-
-	cnt = 0;
-	msm_otg_dbg_log_event(phy, "LPM ENTER START",
-			motg->inputs, phy->otg->state);
-
-	if (atomic_read(&motg->in_lpm))
-		return 0;
-
-	cancel_delayed_work_sync(&motg->perf_vote_work);
-
-	disable_irq(motg->irq);
-	if (motg->phy_irq)
-		disable_irq(motg->phy_irq);
-lpm_start:
-	host_bus_suspend = phy->otg->host && !test_bit(ID, &motg->inputs);
-	device_bus_suspend = phy->otg->gadget && test_bit(ID, &motg->inputs) &&
-		test_bit(A_BUS_SUSPEND, &motg->inputs) &&
-		motg->caps & ALLOW_LPM_ON_DEV_SUSPEND;
-
-	if (host_bus_suspend)
-		msm_otg_perf_vote_update(motg, false);
-	/*
-	 * Allow putting PHY into SIDDQ with wall charger connected in
-	 * case of external charger detection.
-	 */
-	dcp = (motg->chg_type == USB_DCP_CHARGER) && !motg->is_ext_chg_dcp;
-	prop_charger = motg->chg_type == USB_NONCOMPLIANT_CHARGER;
-	floated_charger = motg->chg_type == USB_FLOATED_CHARGER;
-
-	/* !BSV, but its handling is in progress by otg sm_work */
-	sm_work_busy = !test_bit(B_SESS_VLD, &motg->inputs) &&
-			phy->otg->state == OTG_STATE_B_PERIPHERAL;
-
-	/* Perform block reset to recover from UDC error events on disconnect */
-	if (motg->err_event_seen)
-		msm_otg_reset(phy);
-
-	/* Enable line state difference wakeup fix for only device and host
-	 * bus suspend scenarios.  Otherwise PHY can not be suspended when
-	 * a charger that pulls DP/DM high is connected.
-	 */
-	config2 = readl_relaxed(USB_GENCONFIG_2);
-	if (device_bus_suspend)
-		config2 |= GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN;
-	else
-		config2 &= ~GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN;
-	writel_relaxed(config2, USB_GENCONFIG_2);
-
-	/*
-	 * Abort suspend when,
-	 * 1. charging detection in progress due to cable plug-in
-	 * 2. host mode activation in progress due to Micro-A cable insertion
-	 * 3. !BSV, but its handling is in progress by otg sm_work
-	 * Don't abort suspend in case of dcp detected by PMIC
-	 */
-
-	if ((test_bit(B_SESS_VLD, &motg->inputs) && !device_bus_suspend &&
-		!dcp && !motg->is_ext_chg_dcp && !prop_charger &&
-			!floated_charger) || sm_work_busy) {
-		msm_otg_dbg_log_event(phy, "LPM ENTER ABORTED",
-				motg->inputs, motg->chg_type);
-		enable_irq(motg->irq);
-		if (motg->phy_irq)
-			enable_irq(motg->phy_irq);
-		return -EBUSY;
-	}
-
-	if (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED) {
-		/* put the controller in non-driving mode */
-		func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
-		func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
-		func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
-		ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
-		ulpi_write(phy, ULPI_IFC_CTRL_AUTORESUME,
-						ULPI_CLR(ULPI_IFC_CTRL));
-	}
-
-	/*
-	 * PHY suspend sequence as mentioned in the databook.
-	 *
-	 * Device bus suspend: The controller may abort PHY suspend if
-	 * there is an incoming reset or resume from the host. If PHCD
-	 * is not set within 100 usec. Abort the LPM sequence.
-	 *
-	 * Host bus suspend: If the peripheral is attached, PHY is already
-	 * put into suspend along with the peripheral bus suspend. poll for
-	 * PHCD upto 5 msec. If the peripheral is not attached i.e entering
-	 * LPM with Micro-A cable, set the PHCD and poll for it for 5 msec.
-	 *
-	 * No cable connected: Set the PHCD to suspend the PHY. Poll for PHCD
-	 * upto 5 msec.
-	 *
-	 * The controller aborts PHY suspend only in device bus suspend case.
-	 * In other cases, it is observed that PHCD may not get set within
-	 * the timeout. If so, set the PHCD again and poll for it before
-	 * reset recovery.
-	 */
-
-phcd_retry:
-	if (device_bus_suspend)
-		phy_suspend_timeout = PHY_DEVICE_BUS_SUSPEND_TIMEOUT_USEC;
-	else
-		phy_suspend_timeout = PHY_SUSPEND_TIMEOUT_USEC;
-
-	cnt = 0;
-	portsc = readl_relaxed(USB_PORTSC);
-	if (!(portsc & PORTSC_PHCD)) {
-		writel_relaxed(portsc | PORTSC_PHCD,
-				USB_PORTSC);
-		while (cnt < phy_suspend_timeout) {
-			if (readl_relaxed(USB_PORTSC) & PORTSC_PHCD)
-				break;
-			udelay(1);
-			cnt++;
-		}
-	}
-
-	if (cnt >= phy_suspend_timeout) {
-		if (phcd_retry_cnt > PHY_SUSPEND_RETRIES_MAX) {
-			msm_otg_dbg_log_event(phy, "PHY SUSPEND FAILED",
-				phcd_retry_cnt, phy->otg->state);
-			dev_err(phy->dev, "PHY suspend failed\n");
-			ret = -EBUSY;
-			goto phy_suspend_fail;
-		}
-
-		if (device_bus_suspend) {
-			dev_dbg(phy->dev, "PHY suspend aborted\n");
-			ret = -EBUSY;
-			goto phy_suspend_fail;
-		} else {
-			if (phcd_retry_cnt++ < PHY_SUSPEND_RETRIES_MAX) {
-				dev_dbg(phy->dev, "PHY suspend retry\n");
-				goto phcd_retry;
-			} else {
-				dev_err(phy->dev, "reset attempt during PHY suspend\n");
-				phcd_retry_cnt++;
-				motg->reset_counter = 0;
-				msm_otg_reset(phy);
-				goto lpm_start;
-			}
-		}
-	}
-
-	/*
-	 * PHY has capability to generate interrupt asynchronously in low
-	 * power mode (LPM). This interrupt is level triggered. So USB IRQ
-	 * line must be disabled till async interrupt enable bit is cleared
-	 * in USBCMD register. Assert STP (ULPI interface STOP signal) to
-	 * block data communication from PHY.
-	 *
-	 * PHY retention mode is disallowed while entering to LPM with wall
-	 * charger connected.  But PHY is put into suspend mode. Hence
-	 * enable asynchronous interrupt to detect charger disconnection when
-	 * PMIC notifications are unavailable.
-	 */
-	cmd_val = readl_relaxed(USB_USBCMD);
-	if (host_bus_suspend || device_bus_suspend ||
-		(motg->pdata->otg_control == OTG_PHY_CONTROL))
-		cmd_val |= ASYNC_INTR_CTRL | ULPI_STP_CTRL;
-	else
-		cmd_val |= ULPI_STP_CTRL;
-	writel_relaxed(cmd_val, USB_USBCMD);
-
-	/*
-	 * BC1.2 spec mandates PD to enable VDP_SRC when charging from DCP.
-	 * PHY retention and collapse can not happen with VDP_SRC enabled.
-	 */
-
-
-	/*
-	 * We come here in 3 scenarios.
-	 *
-	 * (1) No cable connected (out of session):
-	 *	- BSV/ID HV interrupts are enabled for PHY based detection.
-	 *	- PHY is put in retention.
-	 *	- If allowed (PMIC based detection), PHY is power collapsed.
-	 *	- DVDD (CX/MX) minimization and XO shutdown are allowed.
-	 *	- The wakeup is through VBUS/ID interrupt from PHY/PMIC/user.
-	 * (2) USB wall charger:
-	 *	- BSV/ID HV interrupts are enabled for PHY based detection.
-	 *	- For BC1.2 compliant charger, retention is not allowed to
-	 *	keep VDP_SRC on. XO shutdown is allowed.
-	 *	- The wakeup is through VBUS/ID interrupt from PHY/PMIC/user.
-	 * (3) Device/Host Bus suspend (if LPM is enabled):
-	 *	- BSV/ID HV interrupts are enabled for PHY based detection.
-	 *	- D+/D- MPM pin are configured to wakeup from line state
-	 *	change through PHY HV interrupts. PHY HV interrupts are
-	 *	also enabled. If MPM pins are not available, retention and
-	 *	XO is not allowed.
-	 *	- PHY is put into retention only if a gpio is used to keep
-	 *	the D+ pull-up. ALLOW_BUS_SUSPEND_WITHOUT_REWORK capability
-	 *	is set means, PHY can enable D+ pull-up or D+/D- pull-down
-	 *	without any re-work and PHY should not be put into retention.
-	 *	- DVDD (CX/MX) minimization and XO shutdown is allowed if
-	 *	ALLOW_BUS_SUSPEND_WITHOUT_REWORK is set (PHY DVDD is supplied
-	 *	via PMIC LDO) or board level re-work is present.
-	 *	- The wakeup is through VBUS/ID interrupt from PHY/PMIC/user
-	 *	or USB link asynchronous interrupt for line state change.
-	 *
-	 */
-	motg->host_bus_suspend = host_bus_suspend;
-	motg->device_bus_suspend = device_bus_suspend;
-
-	if (motg->caps & ALLOW_PHY_RETENTION && !device_bus_suspend && !dcp &&
-		 (!host_bus_suspend || (motg->caps &
-		ALLOW_BUS_SUSPEND_WITHOUT_REWORK) ||
-		  ((motg->caps & ALLOW_HOST_PHY_RETENTION)
-		&& (pdata->dpdm_pulldown_added || !(portsc & PORTSC_CCS))))) {
-		msm_otg_enable_phy_hv_int(motg);
-		if ((!host_bus_suspend || !(motg->caps &
-			ALLOW_BUS_SUSPEND_WITHOUT_REWORK)) &&
-			!(motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED)) {
-			msm_otg_enter_phy_retention(motg);
-			motg->lpm_flags |= PHY_RETENTIONED;
-		}
-	} else if (device_bus_suspend && !dcp &&
-			(pdata->mpm_dpshv_int || pdata->mpm_dmshv_int)) {
-		/* DP DM HV interrupts are used for bus resume from XO off */
-		msm_otg_enable_phy_hv_int(motg);
-		if (motg->caps & ALLOW_PHY_RETENTION && pdata->vddmin_gpio) {
-
-			/*
-			 * This is HW WA needed when PHY_CLAMP_DPDMSE_EN is
-			 * enabled and we put the phy in retention mode.
-			 * Without this WA, the async_irq will be fired right
-			 * after suspending whithout any bus resume.
-			 */
-			config2 = readl_relaxed(USB_GENCONFIG_2);
-			config2 &= ~GENCONFIG_2_DPSE_DMSE_HV_INTR_EN;
-			writel_relaxed(config2, USB_GENCONFIG_2);
-
-			msm_otg_enter_phy_retention(motg);
-			motg->lpm_flags |= PHY_RETENTIONED;
-			gpio_direction_output(pdata->vddmin_gpio, 1);
-		}
-	}
-
-	/* Ensure that above operation is completed before turning off clocks */
-	mb();
-	/* Consider clocks on workaround flag only in case of bus suspend */
-	if (!(phy->otg->state == OTG_STATE_B_PERIPHERAL &&
-			test_bit(A_BUS_SUSPEND, &motg->inputs)) ||
-			!motg->pdata->core_clk_always_on_workaround) {
-		clk_disable_unprepare(motg->pclk);
-		clk_disable_unprepare(motg->core_clk);
-		if (motg->phy_csr_clk)
-			clk_disable_unprepare(motg->phy_csr_clk);
-		motg->lpm_flags |= CLOCKS_DOWN;
-	}
-
-	/* usb phy no more require TCXO clock, hence vote for TCXO disable */
-	if (!host_bus_suspend || (motg->caps &
-		ALLOW_BUS_SUSPEND_WITHOUT_REWORK) ||
-		((motg->caps & ALLOW_HOST_PHY_RETENTION) &&
-		(pdata->dpdm_pulldown_added || !(portsc & PORTSC_CCS)))) {
-		if (motg->xo_clk) {
-			clk_disable_unprepare(motg->xo_clk);
-			motg->lpm_flags |= XO_SHUTDOWN;
-		}
-	}
-
-	if (motg->caps & ALLOW_PHY_POWER_COLLAPSE &&
-			!host_bus_suspend && !dcp && !device_bus_suspend) {
-		msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF);
-		motg->lpm_flags |= PHY_PWR_COLLAPSED;
-	} else if (motg->caps & ALLOW_PHY_REGULATORS_LPM &&
-			!host_bus_suspend && !device_bus_suspend && !dcp) {
-		msm_hsusb_ldo_enable(motg, USB_PHY_REG_LPM_ON);
-		motg->lpm_flags |= PHY_REGULATORS_LPM;
-	}
-
-	if (motg->lpm_flags & PHY_RETENTIONED ||
-		(motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED)) {
-		regulator_disable(hsusb_vdd);
-		msm_hsusb_config_vddcx(0);
-	}
-
-	if (device_may_wakeup(phy->dev)) {
-		if (host_bus_suspend || device_bus_suspend) {
-			enable_irq_wake(motg->async_irq);
-			enable_irq_wake(motg->irq);
-		}
-
-		if (motg->phy_irq)
-			enable_irq_wake(motg->phy_irq);
-		if (motg->pdata->pmic_id_irq)
-			enable_irq_wake(motg->pdata->pmic_id_irq);
-		if (motg->ext_id_irq)
-			enable_irq_wake(motg->ext_id_irq);
-		if (pdata->otg_control == OTG_PHY_CONTROL &&
-			pdata->mpm_otgsessvld_int)
-			msm_mpm_set_pin_wake(pdata->mpm_otgsessvld_int, 1);
-		if ((host_bus_suspend || device_bus_suspend) &&
-				pdata->mpm_dpshv_int)
-			msm_mpm_set_pin_wake(pdata->mpm_dpshv_int, 1);
-		if ((host_bus_suspend || device_bus_suspend) &&
-				pdata->mpm_dmshv_int)
-			msm_mpm_set_pin_wake(pdata->mpm_dmshv_int, 1);
-	}
-	if (bus)
-		clear_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
-
-	msm_otg_bus_vote(motg, USB_NO_PERF_VOTE);
-
-	atomic_set(&motg->in_lpm, 1);
-
-	/* Enable ASYNC IRQ during LPM */
-	enable_irq(motg->async_irq);
-	if (motg->phy_irq)
-		enable_irq(motg->phy_irq);
-
-	enable_irq(motg->irq);
-	pm_relax(&motg->pdev->dev);
-
-	dev_dbg(phy->dev, "LPM caps = %lu flags = %lu\n",
-			motg->caps, motg->lpm_flags);
-	dev_info(phy->dev, "USB in low power mode\n");
-	msm_otg_dbg_log_event(phy, "LPM ENTER DONE",
-			motg->caps, motg->lpm_flags);
-
-	if (motg->err_event_seen) {
-		motg->err_event_seen = false;
-		if (motg->vbus_state != test_bit(B_SESS_VLD, &motg->inputs))
-			msm_otg_set_vbus_state(motg->vbus_state);
-		if (motg->id_state != test_bit(ID, &motg->inputs))
-			msm_id_status_w(&motg->id_status_work.work);
-	}
-
-	return 0;
-
-phy_suspend_fail:
-	enable_irq(motg->irq);
-	if (motg->phy_irq)
-		enable_irq(motg->phy_irq);
-	return ret;
-}
-
-static int msm_otg_resume(struct msm_otg *motg)
-{
-	struct usb_phy *phy = &motg->phy;
-	struct usb_bus *bus = phy->otg->host;
-	struct usb_hcd *hcd = bus_to_hcd(phy->otg->host);
-	struct msm_otg_platform_data *pdata = motg->pdata;
-	int cnt = 0;
-	unsigned int temp;
-	unsigned int ret;
-	u32 func_ctrl;
-
-	msm_otg_dbg_log_event(phy, "LPM EXIT START", motg->inputs,
-							phy->otg->state);
-	if (!atomic_read(&motg->in_lpm)) {
-		msm_otg_dbg_log_event(phy, "USB NOT IN LPM",
-				atomic_read(&motg->in_lpm), phy->otg->state);
-		return 0;
-	}
-
-	disable_irq(motg->irq);
-	pm_stay_awake(&motg->pdev->dev);
-
-	/*
-	 * If we are resuming from the device bus suspend, restore
-	 * the max performance bus vote. Otherwise put a minimum
-	 * bus vote to satisfy the requirement for enabling clocks.
-	 */
-
-	if (motg->device_bus_suspend && debug_bus_voting_enabled)
-		msm_otg_bus_vote(motg, USB_MAX_PERF_VOTE);
-	else
-		msm_otg_bus_vote(motg, USB_MIN_PERF_VOTE);
-
-	/* Vote for TCXO when waking up the phy */
-	if (motg->lpm_flags & XO_SHUTDOWN) {
-		if (motg->xo_clk)
-			clk_prepare_enable(motg->xo_clk);
-		motg->lpm_flags &= ~XO_SHUTDOWN;
-	}
-
-	if (motg->lpm_flags & CLOCKS_DOWN) {
-		if (motg->phy_csr_clk) {
-			ret = clk_prepare_enable(motg->phy_csr_clk);
-			WARN(ret, "USB phy_csr_clk enable failed\n");
-		}
-		ret = clk_prepare_enable(motg->core_clk);
-		WARN(ret, "USB core_clk enable failed\n");
-		ret = clk_prepare_enable(motg->pclk);
-		WARN(ret, "USB pclk enable failed\n");
-		motg->lpm_flags &= ~CLOCKS_DOWN;
-	}
-
-	if (motg->lpm_flags & PHY_PWR_COLLAPSED) {
-		msm_hsusb_ldo_enable(motg, USB_PHY_REG_ON);
-		motg->lpm_flags &= ~PHY_PWR_COLLAPSED;
-	} else if (motg->lpm_flags & PHY_REGULATORS_LPM) {
-		msm_hsusb_ldo_enable(motg, USB_PHY_REG_LPM_OFF);
-		motg->lpm_flags &= ~PHY_REGULATORS_LPM;
-	}
-
-	if (motg->lpm_flags & PHY_RETENTIONED ||
-		(motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED)) {
-		msm_hsusb_config_vddcx(1);
-		ret = regulator_enable(hsusb_vdd);
-		WARN(ret, "hsusb_vdd LDO enable failed\n");
-		msm_otg_disable_phy_hv_int(motg);
-		msm_otg_exit_phy_retention(motg);
-		motg->lpm_flags &= ~PHY_RETENTIONED;
-		if (pdata->vddmin_gpio && motg->device_bus_suspend)
-			gpio_direction_input(pdata->vddmin_gpio);
-	} else if (motg->device_bus_suspend) {
-		msm_otg_disable_phy_hv_int(motg);
-	}
-
-	temp = readl_relaxed(USB_USBCMD);
-	temp &= ~ASYNC_INTR_CTRL;
-	temp &= ~ULPI_STP_CTRL;
-	writel_relaxed(temp, USB_USBCMD);
-
-	/*
-	 * PHY comes out of low power mode (LPM) in case of wakeup
-	 * from asynchronous interrupt.
-	 */
-	if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD))
-		goto skip_phy_resume;
-
-	writel_relaxed(readl_relaxed(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC);
-
-	while (cnt < PHY_RESUME_TIMEOUT_USEC) {
-		if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD))
-			break;
-		udelay(1);
-		cnt++;
-	}
-
-	if (cnt >= PHY_RESUME_TIMEOUT_USEC) {
-		/*
-		 * This is a fatal error. Reset the link and
-		 * PHY. USB state can not be restored. Re-insertion
-		 * of USB cable is the only way to get USB working.
-		 */
-		dev_err(phy->dev, "Unable to resume USB. Re-plugin the cable\n"
-									);
-		msm_otg_reset(phy);
-	}
-
-skip_phy_resume:
-	if (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED) {
-		/* put the controller in normal mode */
-		func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
-		func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
-		func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
-		ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
-	}
-
-	if (device_may_wakeup(phy->dev)) {
-		if (motg->host_bus_suspend || motg->device_bus_suspend) {
-			disable_irq_wake(motg->async_irq);
-			disable_irq_wake(motg->irq);
-		}
-
-		if (motg->phy_irq)
-			disable_irq_wake(motg->phy_irq);
-		if (motg->pdata->pmic_id_irq)
-			disable_irq_wake(motg->pdata->pmic_id_irq);
-		if (motg->ext_id_irq)
-			disable_irq_wake(motg->ext_id_irq);
-		if (pdata->otg_control == OTG_PHY_CONTROL &&
-			pdata->mpm_otgsessvld_int)
-			msm_mpm_set_pin_wake(pdata->mpm_otgsessvld_int, 0);
-		if ((motg->host_bus_suspend || motg->device_bus_suspend) &&
-			pdata->mpm_dpshv_int)
-			msm_mpm_set_pin_wake(pdata->mpm_dpshv_int, 0);
-		if ((motg->host_bus_suspend || motg->device_bus_suspend) &&
-			pdata->mpm_dmshv_int)
-			msm_mpm_set_pin_wake(pdata->mpm_dmshv_int, 0);
-	}
-	if (bus)
-		set_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
-
-	atomic_set(&motg->in_lpm, 0);
-
-	if (motg->async_int) {
-		/* Match the disable_irq call from ISR */
-		enable_irq(motg->async_int);
-		motg->async_int = 0;
-	}
-	enable_irq(motg->irq);
-
-	/* Enable ASYNC_IRQ only during LPM */
-	disable_irq(motg->async_irq);
-
-	if (motg->phy_irq_pending) {
-		motg->phy_irq_pending = false;
-		msm_id_status_w(&motg->id_status_work.work);
-	}
-
-	if (motg->host_bus_suspend) {
-		usb_hcd_resume_root_hub(hcd);
-		schedule_delayed_work(&motg->perf_vote_work,
-			msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
-	}
-
-	dev_info(phy->dev, "USB exited from low power mode\n");
-	msm_otg_dbg_log_event(phy, "LPM EXIT DONE",
-			motg->caps, motg->lpm_flags);
-
-	return 0;
-}
-#endif
-
-static void msm_otg_notify_host_mode(struct msm_otg *motg, bool host_mode)
-{
-	if (!psy) {
-		pr_err("No USB power supply registered!\n");
-		return;
-	}
-
-	motg->host_mode = host_mode;
-	power_supply_changed(psy);
-}
-
-static int msm_otg_notify_chg_type(struct msm_otg *motg)
-{
-	static int charger_type;
-	union power_supply_propval pval = {0};
-
-	/*
-	 * TODO
-	 * Unify OTG driver charger types and power supply charger types
-	 */
-	if (charger_type == motg->chg_type)
-		return 0;
-
-	if (motg->chg_type == USB_SDP_CHARGER)
-		charger_type = POWER_SUPPLY_TYPE_USB;
-	else if (motg->chg_type == USB_CDP_CHARGER)
-		charger_type = POWER_SUPPLY_TYPE_USB_CDP;
-	else if (motg->chg_type == USB_DCP_CHARGER ||
-			motg->chg_type == USB_NONCOMPLIANT_CHARGER ||
-			motg->chg_type == USB_FLOATED_CHARGER)
-		charger_type = POWER_SUPPLY_TYPE_USB_DCP;
-	else
-		charger_type = POWER_SUPPLY_TYPE_UNKNOWN;
-
-	if (!psy) {
-		pr_err("No USB power supply registered!\n");
-		return -EINVAL;
-	}
-
-	pr_debug("setting usb power supply type %d\n", charger_type);
-	msm_otg_dbg_log_event(&motg->phy, "SET USB PWR SUPPLY TYPE",
-			motg->chg_type, charger_type);
-	pval.intval = charger_type;
-	power_supply_set_property(psy, POWER_SUPPLY_PROP_TYPE, &pval);
-	return 0;
-}
-
-static int msm_otg_notify_power_supply(struct msm_otg *motg, unsigned int mA)
-{
-	union power_supply_propval pval = {0};
-	bool enable;
-	int limit;
-
-	if (!psy) {
-		dev_dbg(motg->phy.dev, "no usb power supply registered\n");
-		goto psy_error;
-	}
-
-	if (motg->cur_power == 0 && mA > 2) {
-		/* Enable charging */
-		enable = true;
-		limit = 1000 * mA;
-	} else if (motg->cur_power >= 0 && (mA == 0 || mA == 2)) {
-		/* Disable charging */
-		enable = false;
-		/* Set max current limit in uA */
-		limit = 1000 * mA;
-	} else {
-		enable = true;
-		/* Current has changed (100/2 --> 500) */
-		limit = 1000 * mA;
-	}
-
-	pval.intval = enable;
-	if (power_supply_set_property(psy, POWER_SUPPLY_PROP_ONLINE, &pval))
-		goto psy_error;
-
-	pval.intval = limit;
-	if (power_supply_set_property(psy, POWER_SUPPLY_PROP_CURRENT_MAX,
-									&pval))
-		goto psy_error;
-
-	power_supply_changed(psy);
-	return 0;
-
-psy_error:
-	dev_dbg(motg->phy.dev, "power supply error when setting property\n");
-	return -ENXIO;
-}
-
-static void msm_otg_set_online_status(struct msm_otg *motg)
-{
-	union power_supply_propval pval = {0};
-
-	if (!psy) {
-		dev_dbg(motg->phy.dev, "no usb power supply registered\n");
-		return;
-	}
-
-	/* Set power supply online status to false */
-	pval.intval = false;
-	if (power_supply_set_property(psy, POWER_SUPPLY_PROP_ONLINE, &pval))
-		dev_dbg(motg->phy.dev, "error setting power supply property\n");
-}
-
-static void msm_otg_notify_charger(struct msm_otg *motg, unsigned int mA)
-{
-	struct usb_gadget *g = motg->phy.otg->gadget;
-	struct msm_otg_platform_data *pdata = motg->pdata;
-
-	if (g && g->is_a_peripheral)
-		return;
-
-	dev_dbg(motg->phy.dev, "Requested curr from USB = %u, max-type-c:%u\n",
-					mA, motg->typec_current_max);
-	/* Save bc1.2 max_curr if type-c charger later moves to diff mode */
-	motg->bc1p2_current_max = mA;
-
-	/*
-	 * Limit type-c charger current to 500 for SDP charger to avoid more
-	 * current drawn than 500 with Hosts that don't support type C due to
-	 * non compliant type-c to standard A cables.
-	 */
-	if (pdata->enable_sdp_typec_current_limit &&
-			(motg->chg_type == USB_SDP_CHARGER) &&
-					motg->typec_current_max > 500)
-		motg->typec_current_max = 500;
-
-	/* Override mA if type-c charger used (use hvdcp/bc1.2 if it is 500) */
-	if (motg->typec_current_max > 500 && mA < motg->typec_current_max)
-		mA = motg->typec_current_max;
-
-	if (msm_otg_notify_chg_type(motg))
-		dev_err(motg->phy.dev,
-			"Failed notifying %d charger type to PMIC\n",
-							motg->chg_type);
-
-	/*
-	 * This condition will be true when usb cable is disconnected
-	 * during bootup before enumeration. Check charger type also
-	 * to avoid clearing online flag in case of valid charger.
-	 */
-	if (motg->online && motg->cur_power == 0 && mA == 0 &&
-			(motg->chg_type == USB_INVALID_CHARGER))
-		msm_otg_set_online_status(motg);
-
-	if (motg->cur_power == mA)
-		return;
-
-	dev_info(motg->phy.dev, "Avail curr from USB = %u\n", mA);
-	msm_otg_dbg_log_event(&motg->phy, "AVAIL CURR FROM USB",
-			mA, motg->chg_type);
-
-	msm_otg_notify_power_supply(motg, mA);
-
-	motg->cur_power = mA;
-}
-
-static int msm_otg_set_power(struct usb_phy *phy, unsigned int mA)
-{
-	struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
-
-	/*
-	 * Gadget driver uses set_power method to notify about the
-	 * available current based on suspend/configured states.
-	 *
-	 * IDEV_CHG can be drawn irrespective of suspend/un-configured
-	 * states when CDP/ACA is connected.
-	 */
-	if (motg->chg_type == USB_SDP_CHARGER)
-		msm_otg_notify_charger(motg, mA);
-
-	return 0;
-}
-
-static void msm_hsusb_vbus_power(struct msm_otg *motg, bool on);
-
-static void msm_otg_perf_vote_update(struct msm_otg *motg, bool perf_mode)
-{
-	static bool curr_perf_mode;
-	int ret, latency = motg->pm_qos_latency;
-	long clk_rate;
-
-	if (curr_perf_mode == perf_mode)
-		return;
-
-	if (perf_mode) {
-		if (latency)
-			pm_qos_update_request(&motg->pm_qos_req_dma, latency);
-		msm_otg_bus_vote(motg, USB_MAX_PERF_VOTE);
-		clk_rate = motg->core_clk_rate;
-	} else {
-		if (latency)
-			pm_qos_update_request(&motg->pm_qos_req_dma,
-						PM_QOS_DEFAULT_VALUE);
-		msm_otg_bus_vote(motg, USB_MIN_PERF_VOTE);
-		clk_rate = motg->core_clk_svs_rate;
-	}
-
-	if (clk_rate) {
-		ret = clk_set_rate(motg->core_clk, clk_rate);
-		if (ret)
-			dev_err(motg->phy.dev, "sys_clk set_rate fail:%d %ld\n",
-					ret, clk_rate);
-	}
-	curr_perf_mode = perf_mode;
-	pr_debug("%s: latency updated to: %d, core_freq to: %ld\n", __func__,
-					latency, clk_rate);
-}
-
-static void msm_otg_perf_vote_work(struct work_struct *w)
-{
-	struct msm_otg *motg = container_of(w, struct msm_otg,
-						perf_vote_work.work);
-	unsigned int curr_sample_int_count;
-	bool in_perf_mode = false;
-
-	curr_sample_int_count = motg->usb_irq_count;
-	motg->usb_irq_count = 0;
-
-	if (curr_sample_int_count >= PM_QOS_THRESHOLD)
-		in_perf_mode = true;
-
-	msm_otg_perf_vote_update(motg, in_perf_mode);
-	pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%u\n",
-		 __func__, in_perf_mode, curr_sample_int_count);
-
-	schedule_delayed_work(&motg->perf_vote_work,
-			msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
-}
-
-static void msm_otg_start_host(struct usb_otg *otg, int on)
-{
-	struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
-	struct msm_otg_platform_data *pdata = motg->pdata;
-	struct usb_hcd *hcd;
-	u32 val;
-
-	if (!otg->host)
-		return;
-
-	hcd = bus_to_hcd(otg->host);
-
-	msm_otg_dbg_log_event(&motg->phy, "PM RT: StartHost GET",
-				     get_pm_runtime_counter(motg->phy.dev), 0);
-	pm_runtime_get_sync(otg->usb_phy->dev);
-	if (on) {
-		dev_dbg(otg->usb_phy->dev, "host on\n");
-		msm_otg_dbg_log_event(&motg->phy, "HOST ON",
-				motg->inputs, otg->state);
-		msm_hsusb_vbus_power(motg, 1);
-		msm_otg_reset(&motg->phy);
-
-		if (pdata->otg_control == OTG_PHY_CONTROL)
-			ulpi_write(otg->usb_phy, OTG_COMP_DISABLE,
-				ULPI_SET(ULPI_PWR_CLK_MNG_REG));
-
-		if (pdata->enable_axi_prefetch) {
-			val = readl_relaxed(USB_HS_APF_CTRL);
-			val &= ~APF_CTRL_EN;
-			writel_relaxed(val, USB_HS_APF_CTRL);
-		}
-		usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
-#ifdef CONFIG_SMP
-		motg->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
-		motg->pm_qos_req_dma.irq = motg->irq;
-#endif
-		pm_qos_add_request(&motg->pm_qos_req_dma,
-				PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
-		/* start in perf mode for better performance initially */
-		msm_otg_perf_vote_update(motg, true);
-		schedule_delayed_work(&motg->perf_vote_work,
-				msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
-	} else {
-		dev_dbg(otg->usb_phy->dev, "host off\n");
-		msm_otg_dbg_log_event(&motg->phy, "HOST OFF",
-				motg->inputs, otg->state);
-		msm_hsusb_vbus_power(motg, 0);
-
-		cancel_delayed_work_sync(&motg->perf_vote_work);
-		msm_otg_perf_vote_update(motg, false);
-		pm_qos_remove_request(&motg->pm_qos_req_dma);
-
-		pm_runtime_disable(&hcd->self.root_hub->dev);
-		pm_runtime_barrier(&hcd->self.root_hub->dev);
-		usb_remove_hcd(hcd);
-		msm_otg_reset(&motg->phy);
-
-		if (pdata->enable_axi_prefetch)
-			writel_relaxed(readl_relaxed(USB_HS_APF_CTRL)
-					| (APF_CTRL_EN), USB_HS_APF_CTRL);
-
-		/* HCD core reset all bits of PORTSC. select ULPI phy */
-		writel_relaxed(0x80000000, USB_PORTSC);
-
-		if (pdata->otg_control == OTG_PHY_CONTROL)
-			ulpi_write(otg->usb_phy, OTG_COMP_DISABLE,
-				ULPI_CLR(ULPI_PWR_CLK_MNG_REG));
-	}
-	msm_otg_dbg_log_event(&motg->phy, "PM RT: StartHost PUT",
-				     get_pm_runtime_counter(motg->phy.dev), 0);
-
-	pm_runtime_mark_last_busy(otg->usb_phy->dev);
-	pm_runtime_put_autosuspend(otg->usb_phy->dev);
-}
-
-static void msm_hsusb_vbus_power(struct msm_otg *motg, bool on)
-{
-	int ret;
-	static bool vbus_is_on;
-
-	msm_otg_dbg_log_event(&motg->phy, "VBUS POWER", on, vbus_is_on);
-	if (vbus_is_on == on)
-		return;
-
-	if (motg->pdata->vbus_power) {
-		ret = motg->pdata->vbus_power(on);
-		if (!ret)
-			vbus_is_on = on;
-		return;
-	}
-
-	if (!vbus_otg) {
-		pr_err("vbus_otg is NULL.");
-		return;
-	}
-
-	/*
-	 * if entering host mode tell the charger to not draw any current
-	 * from usb before turning on the boost.
-	 * if exiting host mode disable the boost before enabling to draw
-	 * current from the source.
-	 */
-	if (on) {
-		msm_otg_notify_host_mode(motg, on);
-		ret = regulator_enable(vbus_otg);
-		if (ret) {
-			pr_err("unable to enable vbus_otg\n");
-			return;
-		}
-		vbus_is_on = true;
-	} else {
-		ret = regulator_disable(vbus_otg);
-		if (ret) {
-			pr_err("unable to disable vbus_otg\n");
-			return;
-		}
-		msm_otg_notify_host_mode(motg, on);
-		vbus_is_on = false;
-	}
-}
-
-static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
-{
-	struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
-	struct usb_hcd *hcd;
-
-	/*
-	 * Fail host registration if this board can support
-	 * only peripheral configuration.
-	 */
-	if (motg->pdata->mode == USB_PERIPHERAL) {
-		dev_info(otg->usb_phy->dev, "Host mode is not supported\n");
-		return -ENODEV;
-	}
-
-	if (!motg->pdata->vbus_power && host) {
-		vbus_otg = devm_regulator_get(motg->phy.dev, "vbus_otg");
-		if (IS_ERR(vbus_otg)) {
-			msm_otg_dbg_log_event(&motg->phy,
-					"UNABLE TO GET VBUS_OTG",
-					otg->state, 0);
-			pr_err("Unable to get vbus_otg\n");
-			return PTR_ERR(vbus_otg);
-		}
-	}
-
-	if (!host) {
-		if (otg->state == OTG_STATE_A_HOST) {
-			msm_otg_start_host(otg, 0);
-			otg->host = NULL;
-			otg->state = OTG_STATE_UNDEFINED;
-			queue_work(motg->otg_wq, &motg->sm_work);
-		} else {
-			otg->host = NULL;
-		}
-
-		return 0;
-	}
-
-	hcd = bus_to_hcd(host);
-	hcd->power_budget = motg->pdata->power_budget;
-
-	otg->host = host;
-	dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n");
-	msm_otg_dbg_log_event(&motg->phy, "HOST DRIVER REGISTERED",
-			hcd->power_budget, motg->pdata->mode);
-
-	/*
-	 * Kick the state machine work, if peripheral is not supported
-	 * or peripheral is already registered with us.
-	 */
-	if (motg->pdata->mode == USB_HOST || otg->gadget)
-		queue_work(motg->otg_wq, &motg->sm_work);
-
-	return 0;
-}
-
-static void msm_otg_start_peripheral(struct usb_otg *otg, int on)
-{
-	struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
-	struct msm_otg_platform_data *pdata = motg->pdata;
-	struct pinctrl_state *set_state;
-	int ret;
-
-	if (!otg->gadget)
-		return;
-
-	msm_otg_dbg_log_event(&motg->phy, "PM RT: StartPeri GET",
-				     get_pm_runtime_counter(motg->phy.dev), 0);
-	pm_runtime_get_sync(otg->usb_phy->dev);
-	if (on) {
-		dev_dbg(otg->usb_phy->dev, "gadget on\n");
-		msm_otg_dbg_log_event(&motg->phy, "GADGET ON",
-				motg->inputs, otg->state);
-
-		/* Configure BUS performance parameters for MAX bandwidth */
-		if (debug_bus_voting_enabled)
-			msm_otg_bus_vote(motg, USB_MAX_PERF_VOTE);
-		/* bump up usb core_clk to default */
-		clk_set_rate(motg->core_clk, motg->core_clk_rate);
-
-		usb_gadget_vbus_connect(otg->gadget);
-
-		/*
-		 * Request VDD min gpio, if need to support VDD
-		 * minimazation during peripheral bus suspend.
-		 */
-		if (pdata->vddmin_gpio) {
-			if (motg->phy_pinctrl) {
-				set_state =
-					pinctrl_lookup_state(motg->phy_pinctrl,
-							"hsusb_active");
-				if (IS_ERR(set_state)) {
-					pr_err("cannot get phy pinctrl active state\n");
-				} else {
-					pinctrl_select_state(motg->phy_pinctrl,
-								set_state);
-				}
-			}
-
-			ret = gpio_request(pdata->vddmin_gpio,
-					"MSM_OTG_VDD_MIN_GPIO");
-			if (ret < 0) {
-				dev_err(otg->usb_phy->dev, "gpio req failed for vdd min:%d\n",
-						ret);
-				pdata->vddmin_gpio = 0;
-			}
-		}
-	} else {
-		dev_dbg(otg->usb_phy->dev, "gadget off\n");
-		msm_otg_dbg_log_event(&motg->phy, "GADGET OFF",
-			motg->inputs, otg->state);
-		usb_gadget_vbus_disconnect(otg->gadget);
-		clear_bit(A_BUS_SUSPEND, &motg->inputs);
-		/* Configure BUS performance parameters to default */
-		msm_otg_bus_vote(motg, USB_MIN_PERF_VOTE);
-
-		if (pdata->vddmin_gpio) {
-			gpio_free(pdata->vddmin_gpio);
-			if (motg->phy_pinctrl) {
-				set_state =
-					pinctrl_lookup_state(motg->phy_pinctrl,
-							"hsusb_sleep");
-				if (IS_ERR(set_state))
-					pr_err("cannot get phy pinctrl sleep state\n");
-				else
-					pinctrl_select_state(motg->phy_pinctrl,
-						set_state);
-			}
-		}
-	}
-	msm_otg_dbg_log_event(&motg->phy, "PM RT: StartPeri PUT",
-				     get_pm_runtime_counter(motg->phy.dev), 0);
-	pm_runtime_mark_last_busy(otg->usb_phy->dev);
-	pm_runtime_put_autosuspend(otg->usb_phy->dev);
-}
-
-static int msm_otg_set_peripheral(struct usb_otg *otg,
-					struct usb_gadget *gadget)
-{
-	struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
-
-	/*
-	 * Fail peripheral registration if this board can support
-	 * only host configuration.
-	 */
-	if (motg->pdata->mode == USB_HOST) {
-		dev_info(otg->usb_phy->dev, "Peripheral mode is not supported\n");
-		return -ENODEV;
-	}
-
-	if (!gadget) {
-		if (otg->state == OTG_STATE_B_PERIPHERAL) {
-			msm_otg_dbg_log_event(&motg->phy,
-				"PM RUNTIME: PERIPHERAL GET1",
-				get_pm_runtime_counter(otg->usb_phy->dev), 0);
-			msm_otg_start_peripheral(otg, 0);
-			otg->gadget = NULL;
-			otg->state = OTG_STATE_UNDEFINED;
-			queue_work(motg->otg_wq, &motg->sm_work);
-		} else {
-			otg->gadget = NULL;
-		}
-
-		return 0;
-	}
-	otg->gadget = gadget;
-	dev_dbg(otg->usb_phy->dev, "peripheral driver registered w/ tranceiver\n");
-	msm_otg_dbg_log_event(&motg->phy, "PERIPHERAL DRIVER REGISTERED",
-			otg->state, motg->pdata->mode);
-
-	/*
-	 * Kick the state machine work, if host is not supported
-	 * or host is already registered with us.
-	 */
-	if (motg->pdata->mode == USB_PERIPHERAL || otg->host)
-		queue_work(motg->otg_wq, &motg->sm_work);
-
-	return 0;
-}
-
-static bool msm_otg_read_pmic_id_state(struct msm_otg *motg)
-{
-	unsigned long flags;
-	bool id;
-	int ret;
-
-	if (!motg->pdata->pmic_id_irq)
-		return -ENODEV;
-
-	local_irq_save(flags);
-	ret = irq_get_irqchip_state(motg->pdata->pmic_id_irq,
-					IRQCHIP_STATE_LINE_LEVEL, &id);
-	local_irq_restore(flags);
-
-	/*
-	 * If we can not read ID line state for some reason, treat
-	 * it as float. This would prevent MHL discovery and kicking
-	 * host mode unnecessarily.
-	 */
-	if (ret < 0)
-		return true;
-
-	return !!id;
-}
-
-static bool msm_otg_read_phy_id_state(struct msm_otg *motg)
-{
-	u8 val;
-
-	/*
-	 * clear the pending/outstanding interrupts and
-	 * read the ID status from the SRC_STATUS register.
-	 */
-	writeb_relaxed(USB_PHY_ID_MASK, USB2_PHY_USB_PHY_INTERRUPT_CLEAR1);
-
-	writeb_relaxed(0x1, USB2_PHY_USB_PHY_IRQ_CMD);
-	/*
-	 * Databook says 200 usec delay is required for
-	 * clearing the interrupts.
-	 */
-	udelay(200);
-	writeb_relaxed(0x0, USB2_PHY_USB_PHY_IRQ_CMD);
-
-	val = readb_relaxed(USB2_PHY_USB_PHY_INTERRUPT_SRC_STATUS);
-	if (val & USB_PHY_IDDIG_1_0)
-		return false; /* ID is grounded */
-	else
-		return true;
-}
-
-static void msm_otg_chg_check_timer_func(unsigned long data)
-{
-	struct msm_otg *motg = (struct msm_otg *) data;
-	struct usb_otg *otg = motg->phy.otg;
-
-	if (atomic_read(&motg->in_lpm) ||
-		!test_bit(B_SESS_VLD, &motg->inputs) ||
-		otg->state != OTG_STATE_B_PERIPHERAL ||
-		otg->gadget->speed != USB_SPEED_UNKNOWN) {
-		dev_dbg(otg->usb_phy->dev, "Nothing to do in chg_check_timer\n");
-		return;
-	}
-
-	if ((readl_relaxed(USB_PORTSC) & PORTSC_LS) == PORTSC_LS) {
-		dev_dbg(otg->usb_phy->dev, "DCP is detected as SDP\n");
-		msm_otg_dbg_log_event(&motg->phy, "DCP IS DETECTED AS SDP",
-				otg->state, 0);
-		set_bit(B_FALSE_SDP, &motg->inputs);
-		queue_work(motg->otg_wq, &motg->sm_work);
-	}
-}
-
-static bool msm_chg_check_secondary_det(struct msm_otg *motg)
-{
-	struct usb_phy *phy = &motg->phy;
-	u32 chg_det;
-	bool ret = false;
-
-	switch (motg->pdata->phy_type) {
-	case SNPS_PICO_PHY:
-	case SNPS_FEMTO_PHY:
-		chg_det = ulpi_read(phy, 0x87);
-		ret = chg_det & 1;
-		break;
-	default:
-		break;
-	}
-	return ret;
-}
-
-static void msm_chg_enable_secondary_det(struct msm_otg *motg)
-{
-	struct usb_phy *phy = &motg->phy;
-
-	switch (motg->pdata->phy_type) {
-	case SNPS_PICO_PHY:
-	case SNPS_FEMTO_PHY:
-		/*
-		 * Configure DM as current source, DP as current sink
-		 * and enable battery charging comparators.
-		 */
-		ulpi_write(phy, 0x8, 0x85);
-		ulpi_write(phy, 0x2, 0x85);
-		ulpi_write(phy, 0x1, 0x85);
-		break;
-	default:
-		break;
-	}
-}
-
-static bool msm_chg_check_primary_det(struct msm_otg *motg)
-{
-	struct usb_phy *phy = &motg->phy;
-	u32 chg_det;
-	bool ret = false;
-
-	switch (motg->pdata->phy_type) {
-	case SNPS_PICO_PHY:
-	case SNPS_FEMTO_PHY:
-		chg_det = ulpi_read(phy, 0x87);
-		ret = chg_det & 1;
-		/* Turn off VDP_SRC */
-		ulpi_write(phy, 0x3, 0x86);
-		msleep(20);
-		break;
-	default:
-		break;
-	}
-	return ret;
-}
-
-static void msm_chg_enable_primary_det(struct msm_otg *motg)
-{
-	struct usb_phy *phy = &motg->phy;
-
-	switch (motg->pdata->phy_type) {
-	case SNPS_PICO_PHY:
-	case SNPS_FEMTO_PHY:
-		/*
-		 * Configure DP as current source, DM as current sink
-		 * and enable battery charging comparators.
-		 */
-		ulpi_write(phy, 0x2, 0x85);
-		ulpi_write(phy, 0x1, 0x85);
-		break;
-	default:
-		break;
-	}
-}
-
-static bool msm_chg_check_dcd(struct msm_otg *motg)
-{
-	struct usb_phy *phy = &motg->phy;
-	u32 line_state;
-	bool ret = false;
-
-	switch (motg->pdata->phy_type) {
-	case SNPS_PICO_PHY:
-	case SNPS_FEMTO_PHY:
-		line_state = ulpi_read(phy, 0x87);
-		ret = line_state & 2;
-		break;
-	default:
-		break;
-	}
-	return ret;
-}
-
-static void msm_chg_disable_dcd(struct msm_otg *motg)
-{
-	struct usb_phy *phy = &motg->phy;
-
-	switch (motg->pdata->phy_type) {
-	case SNPS_PICO_PHY:
-		ulpi_write(phy, 0x10, 0x86);
-		break;
-	case SNPS_FEMTO_PHY:
-		ulpi_write(phy, 0x10, 0x86);
-		/*
-		 * Disable the Rdm_down after
-		 * the DCD is completed.
-		 */
-		ulpi_write(phy, 0x04, 0x0C);
-		break;
-	default:
-		break;
-	}
-}
-
-static void msm_chg_enable_dcd(struct msm_otg *motg)
-{
-	struct usb_phy *phy = &motg->phy;
-
-	switch (motg->pdata->phy_type) {
-	case SNPS_PICO_PHY:
-		/* Data contact detection enable */
-		ulpi_write(phy, 0x10, 0x85);
-		break;
-	case SNPS_FEMTO_PHY:
-		/*
-		 * Idp_src and Rdm_down are de-coupled
-		 * on Femto PHY. If Idp_src alone is
-		 * enabled, DCD timeout is observed with
-		 * wall charger. But a genuine DCD timeout
-		 * may be incorrectly interpreted. Also
-		 * BC1.2 compliance testers expect Rdm_down
-		 * to enabled during DCD. Enable Rdm_down
-		 * explicitly before enabling the DCD.
-		 */
-		ulpi_write(phy, 0x04, 0x0B);
-		ulpi_write(phy, 0x10, 0x85);
-		break;
-	default:
-		break;
-	}
-}
-
-static void msm_chg_block_on(struct msm_otg *motg)
-{
-	struct usb_phy *phy = &motg->phy;
-	u32 func_ctrl;
-
-	/* put the controller in non-driving mode */
-	func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
-	func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
-	func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
-	ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
-
-	switch (motg->pdata->phy_type) {
-	case SNPS_PICO_PHY:
-	case SNPS_FEMTO_PHY:
-		/* disable DP and DM pull down resistors */
-		ulpi_write(phy, 0x6, 0xC);
-		/* Clear charger detecting control bits */
-		ulpi_write(phy, 0x1F, 0x86);
-		/* Clear alt interrupt latch and enable bits */
-		ulpi_write(phy, 0x1F, 0x92);
-		ulpi_write(phy, 0x1F, 0x95);
-		udelay(100);
-		break;
-	default:
-		break;
-	}
-}
-
-static void msm_chg_block_off(struct msm_otg *motg)
-{
-	struct usb_phy *phy = &motg->phy;
-	u32 func_ctrl;
-
-	switch (motg->pdata->phy_type) {
-	case SNPS_PICO_PHY:
-	case SNPS_FEMTO_PHY:
-		/* Clear charger detecting control bits */
-		ulpi_write(phy, 0x3F, 0x86);
-		/* Clear alt interrupt latch and enable bits */
-		ulpi_write(phy, 0x1F, 0x92);
-		ulpi_write(phy, 0x1F, 0x95);
-		/* re-enable DP and DM pull down resistors */
-		ulpi_write(phy, 0x6, 0xB);
-		break;
-	default:
-		break;
-	}
-
-	/* put the controller in normal mode */
-	func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
-	func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
-	func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
-	ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
-}
-
-static const char *chg_to_string(enum usb_chg_type chg_type)
-{
-	switch (chg_type) {
-	case USB_SDP_CHARGER:		return "USB_SDP_CHARGER";
-	case USB_DCP_CHARGER:		return "USB_DCP_CHARGER";
-	case USB_CDP_CHARGER:		return "USB_CDP_CHARGER";
-	case USB_NONCOMPLIANT_CHARGER:	return "USB_NONCOMPLIANT_CHARGER";
-	case USB_FLOATED_CHARGER:	return "USB_FLOATED_CHARGER";
-	default:			return "INVALID_CHARGER";
-	}
-}
-
-#define MSM_CHG_DCD_TIMEOUT		(750 * HZ/1000) /* 750 msec */
-#define MSM_CHG_DCD_POLL_TIME		(50 * HZ/1000) /* 50 msec */
-#define MSM_CHG_PRIMARY_DET_TIME	(50 * HZ/1000) /* TVDPSRC_ON */
-#define MSM_CHG_SECONDARY_DET_TIME	(50 * HZ/1000) /* TVDMSRC_ON */
-static void msm_chg_detect_work(struct work_struct *w)
-{
-	struct msm_otg *motg = container_of(w, struct msm_otg, chg_work.work);
-	struct usb_phy *phy = &motg->phy;
-	bool is_dcd = false, tmout, vout;
-	static bool dcd;
-	u32 line_state, dm_vlgc;
-	unsigned long delay;
-
-	dev_dbg(phy->dev, "chg detection work\n");
-	msm_otg_dbg_log_event(phy, "CHG DETECTION WORK",
-			motg->chg_state, get_pm_runtime_counter(phy->dev));
-
-	switch (motg->chg_state) {
-	case USB_CHG_STATE_UNDEFINED:
-	case USB_CHG_STATE_IN_PROGRESS:
-		msm_chg_block_on(motg);
-		msm_chg_enable_dcd(motg);
-		motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
-		motg->dcd_time = 0;
-		delay = MSM_CHG_DCD_POLL_TIME;
-		break;
-	case USB_CHG_STATE_WAIT_FOR_DCD:
-		is_dcd = msm_chg_check_dcd(motg);
-		motg->dcd_time += MSM_CHG_DCD_POLL_TIME;
-		tmout = motg->dcd_time >= MSM_CHG_DCD_TIMEOUT;
-		if (is_dcd || tmout) {
-			if (is_dcd)
-				dcd = true;
-			else
-				dcd = false;
-			msm_chg_disable_dcd(motg);
-			msm_chg_enable_primary_det(motg);
-			delay = MSM_CHG_PRIMARY_DET_TIME;
-			motg->chg_state = USB_CHG_STATE_DCD_DONE;
-		} else {
-			delay = MSM_CHG_DCD_POLL_TIME;
-		}
-		break;
-	case USB_CHG_STATE_DCD_DONE:
-		vout = msm_chg_check_primary_det(motg);
-		line_state = readl_relaxed(USB_PORTSC) & PORTSC_LS;
-		dm_vlgc = line_state & PORTSC_LS_DM;
-		if (vout && !dm_vlgc) { /* VDAT_REF < DM < VLGC */
-			if (line_state) { /* DP > VLGC */
-				motg->chg_type = USB_NONCOMPLIANT_CHARGER;
-				motg->chg_state = USB_CHG_STATE_DETECTED;
-				delay = 0;
-			} else {
-				msm_chg_enable_secondary_det(motg);
-				delay = MSM_CHG_SECONDARY_DET_TIME;
-				motg->chg_state = USB_CHG_STATE_PRIMARY_DONE;
-			}
-		} else { /* DM < VDAT_REF || DM > VLGC */
-			if (line_state) /* DP > VLGC or/and DM > VLGC */
-				motg->chg_type = USB_NONCOMPLIANT_CHARGER;
-			else if (!dcd && floated_charger_enable)
-				motg->chg_type = USB_FLOATED_CHARGER;
-			else
-				motg->chg_type = USB_SDP_CHARGER;
-
-			motg->chg_state = USB_CHG_STATE_DETECTED;
-			delay = 0;
-			goto state_detected;
-		}
-		break;
-	case USB_CHG_STATE_PRIMARY_DONE:
-		vout = msm_chg_check_secondary_det(motg);
-		if (vout)
-			motg->chg_type = USB_DCP_CHARGER;
-		else
-			motg->chg_type = USB_CDP_CHARGER;
-		motg->chg_state = USB_CHG_STATE_SECONDARY_DONE;
-		/* fall through */
-	case USB_CHG_STATE_SECONDARY_DONE:
-		motg->chg_state = USB_CHG_STATE_DETECTED;
-	case USB_CHG_STATE_DETECTED:
-state_detected:
-		/*
-		 * Notify the charger type to power supply
-		 * owner as soon as we determine the charger.
-		 */
-		if (motg->chg_type == USB_DCP_CHARGER && motg->ext_chg_opened) {
-			init_completion(&motg->ext_chg_wait);
-			motg->ext_chg_active = DEFAULT;
-		}
-		msm_otg_notify_chg_type(motg);
-		msm_chg_block_off(motg);
-
-		/* Enable VDP_SRC in case of DCP charger */
-		if (motg->chg_type == USB_DCP_CHARGER)
-			ulpi_write(phy, 0x2, 0x85);
-
-		dev_dbg(phy->dev, "chg_type = %s\n",
-			chg_to_string(motg->chg_type));
-		msm_otg_dbg_log_event(phy, "CHG WORK PUT: CHG_TYPE",
-			motg->chg_type, get_pm_runtime_counter(phy->dev));
-		/* to match _get from sm_work before starting chg_det_work */
-		pm_runtime_mark_last_busy(phy->dev);
-		pm_runtime_put_autosuspend(phy->dev);
-
-		queue_work(motg->otg_wq, &motg->sm_work);
-		return;
-	default:
-		return;
-	}
-
-	msm_otg_dbg_log_event(phy, "CHG WORK: QUEUE", motg->chg_type, delay);
-	queue_delayed_work(motg->otg_wq, &motg->chg_work, delay);
-}
-
-#define VBUS_INIT_TIMEOUT	msecs_to_jiffies(5000)
-
-/*
- * We support OTG, Peripheral only and Host only configurations. In case
- * of OTG, mode switch (host-->peripheral/peripheral-->host) can happen
- * via Id pin status or user request (debugfs). Id/BSV interrupts are not
- * enabled when switch is controlled by user and default mode is supplied
- * by board file, which can be changed by userspace later.
- */
-static void msm_otg_init_sm(struct msm_otg *motg)
-{
-	struct msm_otg_platform_data *pdata = motg->pdata;
-	u32 otgsc = readl_relaxed(USB_OTGSC);
-	int ret;
-
-	switch (pdata->mode) {
-	case USB_OTG:
-		if (pdata->otg_control == OTG_USER_CONTROL) {
-			if (pdata->default_mode == USB_HOST) {
-				clear_bit(ID, &motg->inputs);
-			} else if (pdata->default_mode == USB_PERIPHERAL) {
-				set_bit(ID, &motg->inputs);
-				set_bit(B_SESS_VLD, &motg->inputs);
-			} else {
-				set_bit(ID, &motg->inputs);
-				clear_bit(B_SESS_VLD, &motg->inputs);
-			}
-		} else if (pdata->otg_control == OTG_PHY_CONTROL) {
-			if (otgsc & OTGSC_ID)
-				set_bit(ID, &motg->inputs);
-			else
-				clear_bit(ID, &motg->inputs);
-			if (otgsc & OTGSC_BSV)
-				set_bit(B_SESS_VLD, &motg->inputs);
-			else
-				clear_bit(B_SESS_VLD, &motg->inputs);
-		} else if (pdata->otg_control == OTG_PMIC_CONTROL) {
-			if (pdata->pmic_id_irq) {
-				if (msm_otg_read_pmic_id_state(motg))
-					set_bit(ID, &motg->inputs);
-				else
-					clear_bit(ID, &motg->inputs);
-			} else if (motg->ext_id_irq) {
-				if (gpio_get_value(pdata->usb_id_gpio))
-					set_bit(ID, &motg->inputs);
-				else
-					clear_bit(ID, &motg->inputs);
-			} else if (motg->phy_irq) {
-				if (msm_otg_read_phy_id_state(motg))
-					set_bit(ID, &motg->inputs);
-				else
-					clear_bit(ID, &motg->inputs);
-			}
-			/*
-			 * VBUS initial state is reported after PMIC
-			 * driver initialization. Wait for it.
-			 */
-			ret = wait_for_completion_timeout(&pmic_vbus_init,
-							  VBUS_INIT_TIMEOUT);
-			if (!ret) {
-				dev_dbg(motg->phy.dev, "%s: timeout waiting for PMIC VBUS\n",
-					__func__);
-				msm_otg_dbg_log_event(&motg->phy,
-					"PMIC VBUS WAIT TMOUT", motg->inputs,
-							motg->phy.otg->state);
-				clear_bit(B_SESS_VLD, &motg->inputs);
-				pmic_vbus_init.done = 1;
-			}
-		}
-		break;
-	case USB_HOST:
-		clear_bit(ID, &motg->inputs);
-		break;
-	case USB_PERIPHERAL:
-		set_bit(ID, &motg->inputs);
-		if (pdata->otg_control == OTG_PHY_CONTROL) {
-			if (otgsc & OTGSC_BSV)
-				set_bit(B_SESS_VLD, &motg->inputs);
-			else
-				clear_bit(B_SESS_VLD, &motg->inputs);
-		} else if (pdata->otg_control == OTG_PMIC_CONTROL) {
-			/*
-			 * VBUS initial state is reported after PMIC
-			 * driver initialization. Wait for it.
-			 */
-			ret = wait_for_completion_timeout(&pmic_vbus_init,
-							  VBUS_INIT_TIMEOUT);
-			if (!ret) {
-				dev_dbg(motg->phy.dev, "%s: timeout waiting for PMIC VBUS\n",
-					__func__);
-				msm_otg_dbg_log_event(&motg->phy,
-					"PMIC VBUS WAIT TMOUT", motg->inputs,
-							motg->phy.otg->state);
-				clear_bit(B_SESS_VLD, &motg->inputs);
-				pmic_vbus_init.done = 1;
-			}
-		} else if (pdata->otg_control == OTG_USER_CONTROL) {
-			set_bit(ID, &motg->inputs);
-			set_bit(B_SESS_VLD, &motg->inputs);
-		}
-		break;
-	default:
-		break;
-	}
-	msm_otg_dbg_log_event(&motg->phy, "SM INIT", pdata->mode, motg->inputs);
-	if (motg->id_state != USB_ID_GROUND)
-		motg->id_state = (test_bit(ID, &motg->inputs)) ? USB_ID_FLOAT :
-							USB_ID_GROUND;
-}
-
-static void msm_otg_wait_for_ext_chg_done(struct msm_otg *motg)
-{
-	struct usb_phy *phy = &motg->phy;
-	unsigned long t;
-
-	/*
-	 * Defer next cable connect event till external charger
-	 * detection is completed.
-	 */
-
-	if (motg->ext_chg_active == ACTIVE) {
-
-do_wait:
-		pr_debug("before msm_otg ext chg wait\n");
-		msm_otg_dbg_log_event(&motg->phy, "EXT CHG: WAIT", 0, 0);
-
-		t = wait_for_completion_timeout(&motg->ext_chg_wait,
-				msecs_to_jiffies(3000));
-		msm_otg_dbg_log_event(&motg->phy, "EXT CHG: DONE", t, 0);
-
-		if (!t)
-			pr_err("msm_otg ext chg wait timeout\n");
-		else if (motg->ext_chg_active == ACTIVE)
-			goto do_wait;
-		else
-			pr_debug("msm_otg ext chg wait done\n");
-	}
-
-	if (motg->ext_chg_opened) {
-		if (phy->flags & ENABLE_DP_MANUAL_PULLUP) {
-			ulpi_write(phy, ULPI_MISC_A_VBUSVLDEXT |
-					ULPI_MISC_A_VBUSVLDEXTSEL,
-					ULPI_CLR(ULPI_MISC_A));
-		}
-		/* clear charging register bits */
-		ulpi_write(phy, 0x3F, 0x86);
-		/* re-enable DP and DM pull-down resistors*/
-		ulpi_write(phy, 0x6, 0xB);
-	}
-}
-
-static void msm_otg_sm_work(struct work_struct *w)
-{
-	struct msm_otg *motg = container_of(w, struct msm_otg, sm_work);
-	struct usb_otg *otg = motg->phy.otg;
-	struct device *dev = otg->usb_phy->dev;
-	bool work = 0, dcp;
-	int ret;
-
-	pr_debug("%s work\n", usb_otg_state_string(otg->state));
-	msm_otg_dbg_log_event(&motg->phy, "SM WORK:",
-			otg->state, motg->inputs);
-
-	/* Just resume h/w if reqd, pm_count is handled based on state/inputs */
-	if (motg->resume_pending) {
-		pm_runtime_get_sync(otg->usb_phy->dev);
-		if (atomic_read(&motg->in_lpm)) {
-			dev_err(dev, "SM WORK: USB is in LPM\n");
-			msm_otg_dbg_log_event(&motg->phy,
-					"SM WORK: USB IS IN LPM",
-					otg->state, motg->inputs);
-			msm_otg_resume(motg);
-		}
-		motg->resume_pending = false;
-		pm_runtime_put_noidle(otg->usb_phy->dev);
-	}
-
-	switch (otg->state) {
-	case OTG_STATE_UNDEFINED:
-		pm_runtime_get_sync(otg->usb_phy->dev);
-		msm_otg_reset(otg->usb_phy);
-		/* Add child device only after block reset */
-		ret = of_platform_populate(motg->pdev->dev.of_node, NULL, NULL,
-					&motg->pdev->dev);
-		if (ret)
-			dev_dbg(&motg->pdev->dev, "failed to add BAM core\n");
-
-		msm_otg_init_sm(motg);
-		otg->state = OTG_STATE_B_IDLE;
-		if (!test_bit(B_SESS_VLD, &motg->inputs) &&
-				test_bit(ID, &motg->inputs)) {
-			msm_otg_dbg_log_event(&motg->phy,
-				"PM RUNTIME: UNDEF PUT",
-				get_pm_runtime_counter(otg->usb_phy->dev), 0);
-			pm_runtime_put_sync(otg->usb_phy->dev);
-			break;
-		}
-		pm_runtime_put(otg->usb_phy->dev);
-		/* FALL THROUGH */
-	case OTG_STATE_B_IDLE:
-		if (!test_bit(ID, &motg->inputs) && otg->host) {
-			pr_debug("!id\n");
-			msm_otg_dbg_log_event(&motg->phy, "!ID",
-					motg->inputs, otg->state);
-
-			msm_otg_start_host(otg, 1);
-			otg->state = OTG_STATE_A_HOST;
-		} else if (test_bit(B_SESS_VLD, &motg->inputs)) {
-			pr_debug("b_sess_vld\n");
-			msm_otg_dbg_log_event(&motg->phy, "B_SESS_VLD",
-					motg->inputs, otg->state);
-			switch (motg->chg_state) {
-			case USB_CHG_STATE_UNDEFINED:
-				/* put at the end of chg_det or disconnect */
-				pm_runtime_get_sync(otg->usb_phy->dev);
-				msm_otg_dbg_log_event(&motg->phy, "PM CHG GET",
-						get_pm_runtime_counter(dev), 0);
-				motg->chg_state = USB_CHG_STATE_IN_PROGRESS;
-				msm_chg_detect_work(&motg->chg_work.work);
-				break;
-			case USB_CHG_STATE_DETECTED:
-				switch (motg->chg_type) {
-				case USB_DCP_CHARGER:
-					/* fall through */
-				case USB_NONCOMPLIANT_CHARGER:
-					msm_otg_notify_charger(motg,
-							dcp_max_current);
-					if (!motg->is_ext_chg_dcp)
-						otg->state =
-							OTG_STATE_B_CHARGER;
-					break;
-				case USB_FLOATED_CHARGER:
-					msm_otg_notify_charger(motg,
-							IDEV_CHG_MAX);
-					otg->state = OTG_STATE_B_CHARGER;
-					break;
-				case USB_CDP_CHARGER:
-					msm_otg_notify_charger(motg,
-							IDEV_CHG_MAX);
-					/* fall through */
-				case USB_SDP_CHARGER:
-					pm_runtime_get_sync(otg->usb_phy->dev);
-					msm_otg_start_peripheral(otg, 1);
-					otg->state =
-						OTG_STATE_B_PERIPHERAL;
-					mod_timer(&motg->chg_check_timer,
-							CHG_RECHECK_DELAY);
-					break;
-				default:
-					break;
-				}
-				break;
-			default:
-				break;
-			}
-		} else {
-			pr_debug("chg_work cancel");
-			msm_otg_dbg_log_event(&motg->phy, "CHG_WORK CANCEL",
-					motg->inputs, otg->state);
-			del_timer_sync(&motg->chg_check_timer);
-			clear_bit(B_FALSE_SDP, &motg->inputs);
-			cancel_delayed_work_sync(&motg->chg_work);
-			/*
-			 * Find out whether chg_w couldn't start or finished.
-			 * In both the cases, runtime ref_count vote is missing
-			 */
-			if (motg->chg_state == USB_CHG_STATE_UNDEFINED ||
-			    motg->chg_state == USB_CHG_STATE_DETECTED) {
-				msm_otg_dbg_log_event(&motg->phy, "RT !CHG GET",
-				  get_pm_runtime_counter(otg->usb_phy->dev), 0);
-				pm_runtime_get_sync(dev);
-			}
-
-			dcp = (motg->chg_type == USB_DCP_CHARGER);
-			motg->chg_state = USB_CHG_STATE_UNDEFINED;
-			motg->chg_type = USB_INVALID_CHARGER;
-			msm_otg_notify_charger(motg, 0);
-			if (dcp) {
-				if (motg->ext_chg_active == DEFAULT)
-					motg->ext_chg_active = INACTIVE;
-				msm_otg_wait_for_ext_chg_done(motg);
-				/* Turn off VDP_SRC */
-				ulpi_write(otg->usb_phy, 0x2, 0x86);
-			}
-			msm_chg_block_off(motg);
-			msm_otg_dbg_log_event(&motg->phy, "RT: CHG A PUT",
-				get_pm_runtime_counter(otg->usb_phy->dev), 0);
-			/* Delay used only if autosuspend enabled */
-			pm_runtime_mark_last_busy(dev);
-			pm_runtime_put_autosuspend(dev);
-		}
-		break;
-	case OTG_STATE_B_PERIPHERAL:
-		if (test_bit(B_SESS_VLD, &motg->inputs) &&
-				test_bit(B_FALSE_SDP, &motg->inputs)) {
-			pr_debug("B_FALSE_SDP\n");
-			msm_otg_start_peripheral(otg, 0);
-			motg->chg_type = USB_DCP_CHARGER;
-			clear_bit(B_FALSE_SDP, &motg->inputs);
-			otg->state = OTG_STATE_B_IDLE;
-			msm_otg_dbg_log_event(&motg->phy, "B_FALSE_SDP PUT",
-				get_pm_runtime_counter(dev), motg->inputs);
-			pm_runtime_put_sync(dev);
-			/* schedule work to update charging current */
-			work = 1;
-		} else if (!test_bit(B_SESS_VLD, &motg->inputs)) {
-			msm_otg_start_peripheral(otg, 0);
-			msm_otg_dbg_log_event(&motg->phy, "RT PM: B_PERI A PUT",
-				get_pm_runtime_counter(dev), 0);
-			/* _put for _get done on cable connect in B_IDLE */
-			pm_runtime_put_noidle(dev);
-			/* Schedule work to finish cable disconnect processing*/
-			otg->state = OTG_STATE_B_IDLE;
-			work = 1;
-		} else if (test_bit(A_BUS_SUSPEND, &motg->inputs)) {
-			pr_debug("a_bus_suspend\n");
-			msm_otg_dbg_log_event(&motg->phy,
-				"BUS_SUSPEND: PM RT PUT",
-				get_pm_runtime_counter(dev), 0);
-			otg->state = OTG_STATE_B_SUSPEND;
-			/* _get on connect in B_IDLE or host resume in B_SUSP */
-			pm_runtime_mark_last_busy(dev);
-			pm_runtime_put_autosuspend(dev);
-		}
-		break;
-	case OTG_STATE_B_SUSPEND:
-		if (!test_bit(B_SESS_VLD, &motg->inputs)) {
-			msm_otg_start_peripheral(otg, 0);
-			otg->state = OTG_STATE_B_IDLE;
-			/* Schedule work to finish cable disconnect processing*/
-			work = 1;
-		} else if (!test_bit(A_BUS_SUSPEND, &motg->inputs)) {
-			pr_debug("!a_bus_suspend\n");
-			otg->state = OTG_STATE_B_PERIPHERAL;
-			msm_otg_dbg_log_event(&motg->phy,
-				"BUS_RESUME: PM RT GET",
-				get_pm_runtime_counter(dev), 0);
-			pm_runtime_get_sync(dev);
-		}
-		break;
-
-	case OTG_STATE_B_CHARGER:
-		if (test_bit(B_SESS_VLD, &motg->inputs)) {
-			pr_debug("BSV set again\n");
-			msm_otg_dbg_log_event(&motg->phy, "BSV SET AGAIN",
-					motg->inputs, otg->state);
-		} else if (!test_bit(B_SESS_VLD, &motg->inputs)) {
-			otg->state = OTG_STATE_B_IDLE;
-			work = 1;
-		}
-		break;
-	case OTG_STATE_A_HOST:
-		if (test_bit(ID, &motg->inputs)) {
-			msm_otg_start_host(otg, 0);
-			otg->state = OTG_STATE_B_IDLE;
-			work = 1;
-		}
-		break;
-	default:
-		break;
-	}
-
-	if (work)
-		queue_work(motg->otg_wq, &motg->sm_work);
-}
-
-static irqreturn_t msm_otg_irq(int irq, void *data)
-{
-	struct msm_otg *motg = data;
-	struct usb_otg *otg = motg->phy.otg;
-	u32 otgsc = 0;
-	bool work = 0;
-
-	if (atomic_read(&motg->in_lpm)) {
-		pr_debug("OTG IRQ: %d in LPM\n", irq);
-		msm_otg_dbg_log_event(&motg->phy, "OTG IRQ IS IN LPM",
-				irq, otg->state);
-		/*Ignore interrupt if one interrupt already seen in LPM*/
-		if (motg->async_int)
-			return IRQ_HANDLED;
-
-		disable_irq_nosync(irq);
-		motg->async_int = irq;
-		msm_otg_kick_sm_work(motg);
-
-		return IRQ_HANDLED;
-	}
-	motg->usb_irq_count++;
-
-	otgsc = readl_relaxed(USB_OTGSC);
-	if (!(otgsc & (OTGSC_IDIS | OTGSC_BSVIS)))
-		return IRQ_NONE;
-
-	if ((otgsc & OTGSC_IDIS) && (otgsc & OTGSC_IDIE)) {
-		if (otgsc & OTGSC_ID) {
-			dev_dbg(otg->usb_phy->dev, "ID set\n");
-			msm_otg_dbg_log_event(&motg->phy, "ID SET",
-				motg->inputs, otg->state);
-			set_bit(ID, &motg->inputs);
-		} else {
-			dev_dbg(otg->usb_phy->dev, "ID clear\n");
-			msm_otg_dbg_log_event(&motg->phy, "ID CLEAR",
-					motg->inputs, otg->state);
-			clear_bit(ID, &motg->inputs);
-		}
-		work = 1;
-	} else if ((otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) {
-		if (otgsc & OTGSC_BSV) {
-			dev_dbg(otg->usb_phy->dev, "BSV set\n");
-			msm_otg_dbg_log_event(&motg->phy, "BSV SET",
-					motg->inputs, otg->state);
-			set_bit(B_SESS_VLD, &motg->inputs);
-		} else {
-			dev_dbg(otg->usb_phy->dev, "BSV clear\n");
-			msm_otg_dbg_log_event(&motg->phy, "BSV CLEAR",
-					motg->inputs, otg->state);
-			clear_bit(B_SESS_VLD, &motg->inputs);
-			clear_bit(A_BUS_SUSPEND, &motg->inputs);
-		}
-		work = 1;
-	}
-	if (work)
-		queue_work(motg->otg_wq, &motg->sm_work);
-
-	writel_relaxed(otgsc, USB_OTGSC);
-
-	return IRQ_HANDLED;
-}
-
-static void msm_otg_set_vbus_state(int online)
-{
-	struct msm_otg *motg = the_msm_otg;
-	static bool init;
-
-	motg->vbus_state = online;
-
-	if (motg->err_event_seen)
-		return;
-
-	if (online) {
-		pr_debug("PMIC: BSV set\n");
-		msm_otg_dbg_log_event(&motg->phy, "PMIC: BSV SET",
-				init, motg->inputs);
-		if (test_and_set_bit(B_SESS_VLD, &motg->inputs) && init)
-			return;
-	} else {
-		pr_debug("PMIC: BSV clear\n");
-		msm_otg_dbg_log_event(&motg->phy, "PMIC: BSV CLEAR",
-				init, motg->inputs);
-		motg->is_ext_chg_dcp = false;
-		if (!test_and_clear_bit(B_SESS_VLD, &motg->inputs) && init)
-			return;
-	}
-
-	/* do not queue state m/c work if id is grounded */
-	if (!test_bit(ID, &motg->inputs) &&
-		!motg->pdata->vbus_low_as_hostmode) {
-		/*
-		 * state machine work waits for initial VBUS
-		 * completion in UNDEFINED state.  Process
-		 * the initial VBUS event in ID_GND state.
-		 */
-		if (init)
-			return;
-	}
-
-	if (!init) {
-		init = true;
-		if (pmic_vbus_init.done &&
-				test_bit(B_SESS_VLD, &motg->inputs)) {
-			pr_debug("PMIC: BSV came late\n");
-			msm_otg_dbg_log_event(&motg->phy, "PMIC: BSV CAME LATE",
-					init, motg->inputs);
-			goto out;
-		}
-
-		if (motg->pdata->vbus_low_as_hostmode &&
-			!test_bit(B_SESS_VLD, &motg->inputs)) {
-			motg->id_state = USB_ID_GROUND;
-			clear_bit(ID, &motg->inputs);
-		}
-		complete(&pmic_vbus_init);
-		pr_debug("PMIC: BSV init complete\n");
-		msm_otg_dbg_log_event(&motg->phy, "PMIC: BSV INIT COMPLETE",
-				init, motg->inputs);
-		return;
-	}
-
-out:
-	if (motg->is_ext_chg_dcp) {
-		if (test_bit(B_SESS_VLD, &motg->inputs)) {
-			msm_otg_notify_charger(motg, IDEV_CHG_MAX);
-		} else {
-			motg->is_ext_chg_dcp = false;
-			motg->chg_state = USB_CHG_STATE_UNDEFINED;
-			motg->chg_type = USB_INVALID_CHARGER;
-			msm_otg_notify_charger(motg, 0);
-		}
-		return;
-	}
-
-	msm_otg_dbg_log_event(&motg->phy, "CHECK VBUS EVENT DURING SUSPEND",
-			atomic_read(&motg->pm_suspended),
-			motg->sm_work_pending);
-
-	/* Move to host mode on vbus low if required */
-	if (motg->pdata->vbus_low_as_hostmode) {
-		if (!test_bit(B_SESS_VLD, &motg->inputs))
-			clear_bit(ID, &motg->inputs);
-		else
-			set_bit(ID, &motg->inputs);
-	}
-	msm_otg_kick_sm_work(motg);
-}
-
-static void msm_id_status_w(struct work_struct *w)
-{
-	struct msm_otg *motg = container_of(w, struct msm_otg,
-						id_status_work.work);
-	int work = 0;
-
-	dev_dbg(motg->phy.dev, "ID status_w\n");
-
-	if (motg->pdata->pmic_id_irq)
-		motg->id_state = msm_otg_read_pmic_id_state(motg);
-	else if (motg->ext_id_irq)
-		motg->id_state = gpio_get_value(motg->pdata->usb_id_gpio);
-	else if (motg->phy_irq)
-		motg->id_state = msm_otg_read_phy_id_state(motg);
-
-	if (motg->err_event_seen)
-		return;
-
-	if (motg->id_state) {
-		if (gpio_is_valid(motg->pdata->switch_sel_gpio))
-			gpio_direction_input(motg->pdata->switch_sel_gpio);
-		if (!test_and_set_bit(ID, &motg->inputs)) {
-			pr_debug("ID set\n");
-			msm_otg_dbg_log_event(&motg->phy, "ID SET",
-					motg->inputs, motg->phy.otg->state);
-			work = 1;
-		}
-	} else {
-		if (gpio_is_valid(motg->pdata->switch_sel_gpio))
-			gpio_direction_output(motg->pdata->switch_sel_gpio, 1);
-		if (test_and_clear_bit(ID, &motg->inputs)) {
-			pr_debug("ID clear\n");
-			msm_otg_dbg_log_event(&motg->phy, "ID CLEAR",
-					motg->inputs, motg->phy.otg->state);
-			work = 1;
-		}
-	}
-
-	if (work && (motg->phy.otg->state != OTG_STATE_UNDEFINED)) {
-		msm_otg_dbg_log_event(&motg->phy,
-				"CHECK ID EVENT DURING SUSPEND",
-				atomic_read(&motg->pm_suspended),
-				motg->sm_work_pending);
-		msm_otg_kick_sm_work(motg);
-	}
-}
-
-#define MSM_ID_STATUS_DELAY	5 /* 5msec */
-static irqreturn_t msm_id_irq(int irq, void *data)
-{
-	struct msm_otg *motg = data;
-
-	/*schedule delayed work for 5msec for ID line state to settle*/
-	queue_delayed_work(motg->otg_wq, &motg->id_status_work,
-			msecs_to_jiffies(MSM_ID_STATUS_DELAY));
-
-	return IRQ_HANDLED;
-}
-
-int msm_otg_pm_notify(struct notifier_block *notify_block,
-					unsigned long mode, void *unused)
-{
-	struct msm_otg *motg = container_of(
-		notify_block, struct msm_otg, pm_notify);
-
-	dev_dbg(motg->phy.dev, "OTG PM notify:%lx, sm_pending:%u\n", mode,
-					motg->sm_work_pending);
-	msm_otg_dbg_log_event(&motg->phy, "PM NOTIFY",
-			mode, motg->sm_work_pending);
-
-	switch (mode) {
-	case PM_POST_SUSPEND:
-		/* OTG sm_work can be armed now */
-		atomic_set(&motg->pm_suspended, 0);
-
-		/* Handle any deferred wakeup events from USB during suspend */
-		if (motg->sm_work_pending) {
-			motg->sm_work_pending = false;
-			queue_work(motg->otg_wq, &motg->sm_work);
-		}
-		break;
-
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static int msm_otg_mode_show(struct seq_file *s, void *unused)
-{
-	struct msm_otg *motg = s->private;
-	struct usb_otg *otg = motg->phy.otg;
-
-	switch (otg->state) {
-	case OTG_STATE_A_HOST:
-		seq_puts(s, "host\n");
-		break;
-	case OTG_STATE_B_IDLE:
-	case OTG_STATE_B_PERIPHERAL:
-	case OTG_STATE_B_SUSPEND:
-		seq_puts(s, "peripheral\n");
-		break;
-	default:
-		seq_puts(s, "none\n");
-		break;
-	}
-
-	return 0;
-}
-
-static int msm_otg_mode_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, msm_otg_mode_show, inode->i_private);
-}
-
-static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf,
-				size_t count, loff_t *ppos)
-{
-	struct seq_file *s = file->private_data;
-	struct msm_otg *motg = s->private;
-	char buf[16];
-	struct usb_phy *phy = &motg->phy;
-	int status = count;
-	enum usb_mode_type req_mode;
-
-	memset(buf, 0x00, sizeof(buf));
-
-	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) {
-		status = -EFAULT;
-		goto out;
-	}
-
-	if (!strncmp(buf, "host", 4)) {
-		req_mode = USB_HOST;
-	} else if (!strncmp(buf, "peripheral", 10)) {
-		req_mode = USB_PERIPHERAL;
-	} else if (!strncmp(buf, "none", 4)) {
-		req_mode = USB_NONE;
-	} else {
-		status = -EINVAL;
-		goto out;
-	}
-
-	switch (req_mode) {
-	case USB_NONE:
-		switch (phy->otg->state) {
-		case OTG_STATE_A_HOST:
-		case OTG_STATE_B_PERIPHERAL:
-		case OTG_STATE_B_SUSPEND:
-			set_bit(ID, &motg->inputs);
-			clear_bit(B_SESS_VLD, &motg->inputs);
-			break;
-		default:
-			goto out;
-		}
-		break;
-	case USB_PERIPHERAL:
-		switch (phy->otg->state) {
-		case OTG_STATE_B_IDLE:
-		case OTG_STATE_A_HOST:
-			set_bit(ID, &motg->inputs);
-			set_bit(B_SESS_VLD, &motg->inputs);
-			break;
-		default:
-			goto out;
-		}
-		break;
-	case USB_HOST:
-		switch (phy->otg->state) {
-		case OTG_STATE_B_IDLE:
-		case OTG_STATE_B_PERIPHERAL:
-		case OTG_STATE_B_SUSPEND:
-			clear_bit(ID, &motg->inputs);
-			break;
-		default:
-			goto out;
-		}
-		break;
-	default:
-		goto out;
-	}
-
-	motg->id_state = (test_bit(ID, &motg->inputs)) ? USB_ID_FLOAT :
-							USB_ID_GROUND;
-	queue_work(motg->otg_wq, &motg->sm_work);
-out:
-	return status;
-}
-
-const struct file_operations msm_otg_mode_fops = {
-	.open = msm_otg_mode_open,
-	.read = seq_read,
-	.write = msm_otg_mode_write,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-
-static int msm_otg_show_otg_state(struct seq_file *s, void *unused)
-{
-	struct msm_otg *motg = s->private;
-	struct usb_phy *phy = &motg->phy;
-
-	seq_printf(s, "%s\n", usb_otg_state_string(phy->otg->state));
-	return 0;
-}
-
-static int msm_otg_otg_state_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, msm_otg_show_otg_state, inode->i_private);
-}
-
-const struct file_operations msm_otg_state_fops = {
-	.open = msm_otg_otg_state_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-
-static int msm_otg_show_chg_type(struct seq_file *s, void *unused)
-{
-	struct msm_otg *motg = s->private;
-
-	seq_printf(s, "%s\n", chg_to_string(motg->chg_type));
-	return 0;
-}
-
-static int msm_otg_chg_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, msm_otg_show_chg_type, inode->i_private);
-}
-
-const struct file_operations msm_otg_chg_fops = {
-	.open = msm_otg_chg_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-
-static int msm_otg_bus_show(struct seq_file *s, void *unused)
-{
-	if (debug_bus_voting_enabled)
-		seq_puts(s, "enabled\n");
-	else
-		seq_puts(s, "disabled\n");
-
-	return 0;
-}
-
-static int msm_otg_bus_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, msm_otg_bus_show, inode->i_private);
-}
-
-static ssize_t msm_otg_bus_write(struct file *file, const char __user *ubuf,
-				size_t count, loff_t *ppos)
-{
-	char buf[8];
-	struct seq_file *s = file->private_data;
-	struct msm_otg *motg = s->private;
-
-	memset(buf, 0x00, sizeof(buf));
-
-	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
-		return -EFAULT;
-
-	if (!strncmp(buf, "enable", 6)) {
-		/* Do not vote here. Let OTG statemachine decide when to vote */
-		debug_bus_voting_enabled = true;
-	} else {
-		debug_bus_voting_enabled = false;
-		msm_otg_bus_vote(motg, USB_MIN_PERF_VOTE);
-	}
-
-	return count;
-}
-
-static int msm_otg_dbg_buff_show(struct seq_file *s, void *unused)
-{
-	struct msm_otg *motg = s->private;
-	unsigned long	flags;
-	unsigned int	i;
-
-	read_lock_irqsave(&motg->dbg_lock, flags);
-
-	i = motg->dbg_idx;
-	if (strnlen(motg->buf[i], DEBUG_MSG_LEN))
-		seq_printf(s, "%s\n", motg->buf[i]);
-	for (dbg_inc(&i); i != motg->dbg_idx;  dbg_inc(&i)) {
-		if (!strnlen(motg->buf[i], DEBUG_MSG_LEN))
-			continue;
-		seq_printf(s, "%s\n", motg->buf[i]);
-	}
-	read_unlock_irqrestore(&motg->dbg_lock, flags);
-
-	return 0;
-}
-
-static int msm_otg_dbg_buff_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, msm_otg_dbg_buff_show, inode->i_private);
-}
-
-const struct file_operations msm_otg_dbg_buff_fops = {
-	.open = msm_otg_dbg_buff_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-
-static int msm_otg_dpdm_regulator_enable(struct regulator_dev *rdev)
-{
-	int ret = 0;
-	struct msm_otg *motg = rdev_get_drvdata(rdev);
-
-	if (!motg->rm_pulldown) {
-		ret = msm_hsusb_ldo_enable(motg, USB_PHY_REG_3P3_ON);
-		if (!ret) {
-			motg->rm_pulldown = true;
-			msm_otg_dbg_log_event(&motg->phy, "RM Pulldown",
-					motg->rm_pulldown, 0);
-		}
-	}
-
-	return ret;
-}
-
-static int msm_otg_dpdm_regulator_disable(struct regulator_dev *rdev)
-{
-	int ret = 0;
-	struct msm_otg *motg = rdev_get_drvdata(rdev);
-
-	if (motg->rm_pulldown) {
-		ret = msm_hsusb_ldo_enable(motg, USB_PHY_REG_3P3_OFF);
-		if (!ret) {
-			motg->rm_pulldown = false;
-			msm_otg_dbg_log_event(&motg->phy, "RM Pulldown",
-					motg->rm_pulldown, 0);
-		}
-	}
-
-	return ret;
-}
-
-static int msm_otg_dpdm_regulator_is_enabled(struct regulator_dev *rdev)
-{
-	struct msm_otg *motg = rdev_get_drvdata(rdev);
-
-	return motg->rm_pulldown;
-}
-
-static struct regulator_ops msm_otg_dpdm_regulator_ops = {
-	.enable		= msm_otg_dpdm_regulator_enable,
-	.disable	= msm_otg_dpdm_regulator_disable,
-	.is_enabled	= msm_otg_dpdm_regulator_is_enabled,
-};
-
-static int usb_phy_regulator_init(struct msm_otg *motg)
-{
-	struct device *dev = motg->phy.dev;
-	struct regulator_config cfg = {};
-	struct regulator_init_data *init_data;
-
-	init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
-	if (!init_data)
-		return -ENOMEM;
-
-	init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS;
-	motg->dpdm_rdesc.owner = THIS_MODULE;
-	motg->dpdm_rdesc.type = REGULATOR_VOLTAGE;
-	motg->dpdm_rdesc.ops = &msm_otg_dpdm_regulator_ops;
-	motg->dpdm_rdesc.name = kbasename(dev->of_node->full_name);
-
-	cfg.dev = dev;
-	cfg.init_data = init_data;
-	cfg.driver_data = motg;
-	cfg.of_node = dev->of_node;
-
-	motg->dpdm_rdev = devm_regulator_register(dev, &motg->dpdm_rdesc, &cfg);
-	if (IS_ERR(motg->dpdm_rdev))
-		return PTR_ERR(motg->dpdm_rdev);
-
-	return 0;
-}
-
-const struct file_operations msm_otg_bus_fops = {
-	.open = msm_otg_bus_open,
-	.read = seq_read,
-	.write = msm_otg_bus_write,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-
-static struct dentry *msm_otg_dbg_root;
-
-static int msm_otg_debugfs_init(struct msm_otg *motg)
-{
-	struct dentry *msm_otg_dentry;
-	struct msm_otg_platform_data *pdata = motg->pdata;
-
-	msm_otg_dbg_root = debugfs_create_dir("msm_otg", NULL);
-
-	if (!msm_otg_dbg_root || IS_ERR(msm_otg_dbg_root))
-		return -ENODEV;
-
-	if ((pdata->mode == USB_OTG || pdata->mode == USB_PERIPHERAL) &&
-		pdata->otg_control == OTG_USER_CONTROL) {
-
-		msm_otg_dentry = debugfs_create_file("mode", 0644,
-			msm_otg_dbg_root, motg, &msm_otg_mode_fops);
-
-		if (!msm_otg_dentry) {
-			debugfs_remove(msm_otg_dbg_root);
-			msm_otg_dbg_root = NULL;
-			return -ENODEV;
-		}
-	}
-
-	msm_otg_dentry = debugfs_create_file("chg_type", 0444, msm_otg_dbg_root,
-						motg, &msm_otg_chg_fops);
-
-	if (!msm_otg_dentry) {
-		debugfs_remove_recursive(msm_otg_dbg_root);
-		return -ENODEV;
-	}
-
-	msm_otg_dentry = debugfs_create_file("bus_voting", 0644,
-			msm_otg_dbg_root, motg, &msm_otg_bus_fops);
-
-	if (!msm_otg_dentry) {
-		debugfs_remove_recursive(msm_otg_dbg_root);
-		return -ENODEV;
-	}
-
-	msm_otg_dentry = debugfs_create_file("otg_state", 0444,
-			msm_otg_dbg_root, motg, &msm_otg_state_fops);
-
-	if (!msm_otg_dentry) {
-		debugfs_remove_recursive(msm_otg_dbg_root);
-		return -ENODEV;
-	}
-
-	msm_otg_dentry = debugfs_create_file("dbg_buff", 0444,
-			msm_otg_dbg_root, motg, &msm_otg_dbg_buff_fops);
-
-	if (!msm_otg_dentry) {
-		debugfs_remove_recursive(msm_otg_dbg_root);
-		return -ENODEV;
-	}
-	return 0;
-}
-
-static void msm_otg_debugfs_cleanup(void)
-{
-	debugfs_remove_recursive(msm_otg_dbg_root);
-}
-
-static ssize_t
-set_msm_otg_perf_mode(struct device *dev, struct device_attribute *attr,
-		const char *buf, size_t count)
-{
-	struct msm_otg *motg = the_msm_otg;
-	int ret;
-	long clk_rate;
-
-	pr_debug("%s: enable:%d\n", __func__, !strncasecmp(buf, "enable", 6));
-
-	if (!strncasecmp(buf, "enable", 6)) {
-		clk_rate = motg->core_clk_nominal_rate;
-		msm_otg_bus_freq_set(motg, USB_NOC_NOM_VOTE);
-	} else {
-		clk_rate = motg->core_clk_svs_rate;
-		msm_otg_bus_freq_set(motg, USB_NOC_SVS_VOTE);
-	}
-
-	if (clk_rate) {
-		pr_debug("Set usb sys_clk rate:%ld\n", clk_rate);
-		ret = clk_set_rate(motg->core_clk, clk_rate);
-		if (ret)
-			pr_err("sys_clk set_rate fail:%d %ld\n", ret, clk_rate);
-		msm_otg_dbg_log_event(&motg->phy, "OTG PERF SET",
-							clk_rate, ret);
-	} else {
-		pr_err("usb sys_clk rate is undefined\n");
-	}
-
-	return count;
-}
-
-static DEVICE_ATTR(perf_mode, 0200, NULL, set_msm_otg_perf_mode);
-
-#define MSM_OTG_CMD_ID		0x09
-#define MSM_OTG_DEVICE_ID	0x04
-#define MSM_OTG_VMID_IDX	0xFF
-#define MSM_OTG_MEM_TYPE	0x02
-struct msm_otg_scm_cmd_buf {
-	unsigned int device_id;
-	unsigned int vmid_idx;
-	unsigned int mem_type;
-} __attribute__ ((__packed__));
-
-static void msm_otg_pnoc_errata_fix(struct msm_otg *motg)
-{
-	int ret;
-	struct msm_otg_platform_data *pdata = motg->pdata;
-	struct msm_otg_scm_cmd_buf cmd_buf;
-
-	if (!pdata->pnoc_errata_fix)
-		return;
-
-	dev_dbg(motg->phy.dev, "applying fix for pnoc h/w issue\n");
-
-	cmd_buf.device_id = MSM_OTG_DEVICE_ID;
-	cmd_buf.vmid_idx = MSM_OTG_VMID_IDX;
-	cmd_buf.mem_type = MSM_OTG_MEM_TYPE;
-
-	ret = scm_call(SCM_SVC_MP, MSM_OTG_CMD_ID, &cmd_buf,
-				sizeof(cmd_buf), NULL, 0);
-
-	if (ret)
-		dev_err(motg->phy.dev, "scm command failed to update VMIDMT\n");
-}
-
-static u64 msm_otg_dma_mask = DMA_BIT_MASK(32);
-static struct platform_device *msm_otg_add_pdev(
-		struct platform_device *ofdev, const char *name)
-{
-	struct platform_device *pdev;
-	const struct resource *res = ofdev->resource;
-	unsigned int num = ofdev->num_resources;
-	int retval;
-	struct ci13xxx_platform_data ci_pdata;
-	struct msm_otg_platform_data *otg_pdata;
-	struct msm_otg *motg;
-
-	pdev = platform_device_alloc(name, -1);
-	if (!pdev) {
-		retval = -ENOMEM;
-		goto error;
-	}
-
-	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-	pdev->dev.dma_mask = &msm_otg_dma_mask;
-	pdev->dev.parent = &ofdev->dev;
-
-	if (num) {
-		retval = platform_device_add_resources(pdev, res, num);
-		if (retval)
-			goto error;
-	}
-
-	if (!strcmp(name, "msm_hsusb")) {
-		otg_pdata =
-			(struct msm_otg_platform_data *)
-				ofdev->dev.platform_data;
-		motg = platform_get_drvdata(ofdev);
-		ci_pdata.log2_itc = otg_pdata->log2_itc;
-		ci_pdata.usb_core_id = 0;
-		ci_pdata.l1_supported = otg_pdata->l1_supported;
-		ci_pdata.enable_ahb2ahb_bypass =
-				otg_pdata->enable_ahb2ahb_bypass;
-		ci_pdata.enable_streaming = otg_pdata->enable_streaming;
-		ci_pdata.enable_axi_prefetch = otg_pdata->enable_axi_prefetch;
-		retval = platform_device_add_data(pdev, &ci_pdata,
-			sizeof(ci_pdata));
-		if (retval)
-			goto error;
-	}
-
-	retval = platform_device_add(pdev);
-	if (retval)
-		goto error;
-
-	return pdev;
-
-error:
-	platform_device_put(pdev);
-	return ERR_PTR(retval);
-}
-
-static int msm_otg_setup_devices(struct platform_device *ofdev,
-		enum usb_mode_type mode, bool init)
-{
-	const char *gadget_name = "msm_hsusb";
-	const char *host_name = "msm_hsusb_host";
-	static struct platform_device *gadget_pdev;
-	static struct platform_device *host_pdev;
-	int retval = 0;
-
-	if (!init) {
-		if (gadget_pdev) {
-			platform_device_unregister(gadget_pdev);
-			device_remove_file(&gadget_pdev->dev,
-					   &dev_attr_perf_mode);
-		}
-		if (host_pdev)
-			platform_device_unregister(host_pdev);
-		return 0;
-	}
-
-	switch (mode) {
-	case USB_OTG:
-		/* fall through */
-	case USB_PERIPHERAL:
-		gadget_pdev = msm_otg_add_pdev(ofdev, gadget_name);
-		if (IS_ERR(gadget_pdev)) {
-			retval = PTR_ERR(gadget_pdev);
-			break;
-		}
-		if (device_create_file(&gadget_pdev->dev, &dev_attr_perf_mode))
-			dev_err(&gadget_pdev->dev, "perf_mode file failed\n");
-		if (mode == USB_PERIPHERAL)
-			break;
-		/* fall through */
-	case USB_HOST:
-		host_pdev = msm_otg_add_pdev(ofdev, host_name);
-		if (IS_ERR(host_pdev)) {
-			retval = PTR_ERR(host_pdev);
-			if (mode == USB_OTG) {
-				platform_device_unregister(gadget_pdev);
-				device_remove_file(&gadget_pdev->dev,
-						   &dev_attr_perf_mode);
-			}
-		}
-		break;
-	default:
-		break;
-	}
-
-	return retval;
-}
-
-static int msm_otg_ext_chg_open(struct inode *inode, struct file *file)
-{
-	struct msm_otg *motg = the_msm_otg;
-
-	pr_debug("msm_otg ext chg open\n");
-	msm_otg_dbg_log_event(&motg->phy, "EXT CHG: OPEN",
-			motg->inputs, motg->phy.otg->state);
-
-	motg->ext_chg_opened = true;
-	file->private_data = (void *)motg;
-	return 0;
-}
-
-static long
-msm_otg_ext_chg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-	struct msm_otg *motg = file->private_data;
-	struct msm_usb_chg_info info = {0};
-	int ret = 0, val;
-
-	msm_otg_dbg_log_event(&motg->phy, "EXT CHG: IOCTL", cmd, 0);
-	switch (cmd) {
-	case MSM_USB_EXT_CHG_INFO:
-		info.chg_block_type = USB_CHG_BLOCK_ULPI;
-		info.page_offset = motg->io_res->start & ~PAGE_MASK;
-		/* mmap() works on PAGE granularity */
-		info.length = PAGE_SIZE;
-
-		if (copy_to_user((void __user *)arg, &info, sizeof(info))) {
-			pr_err("%s: copy to user failed\n\n", __func__);
-			ret = -EFAULT;
-		}
-		break;
-	case MSM_USB_EXT_CHG_BLOCK_LPM:
-		if (get_user(val, (int __user *)arg)) {
-			pr_err("%s: get_user failed\n\n", __func__);
-			ret = -EFAULT;
-			break;
-		}
-		pr_debug("%s: LPM block request %d\n", __func__, val);
-		msm_otg_dbg_log_event(&motg->phy, "LPM BLOCK REQ", val, 0);
-		if (val) { /* block LPM */
-			if (motg->chg_type == USB_DCP_CHARGER) {
-				motg->ext_chg_active = ACTIVE;
-				msm_otg_dbg_log_event(&motg->phy,
-				      "PM RUNTIME: EXT_CHG GET",
-				      get_pm_runtime_counter(motg->phy.dev), 0);
-				pm_runtime_get_sync(motg->phy.dev);
-			} else {
-				motg->ext_chg_active = INACTIVE;
-				complete(&motg->ext_chg_wait);
-				ret = -ENODEV;
-			}
-		} else {
-			motg->ext_chg_active = INACTIVE;
-			complete(&motg->ext_chg_wait);
-			/*
-			 * If usb cable is disconnected and then userspace
-			 * calls ioctl to unblock low power mode, make sure
-			 * otg_sm work for usb disconnect is processed first
-			 * followed by decrementing the PM usage counters.
-			 */
-			flush_work(&motg->sm_work);
-			msm_otg_dbg_log_event(&motg->phy,
-				"PM RUNTIME: EXT_CHG PUT",
-				get_pm_runtime_counter(motg->phy.dev), 0);
-			pm_runtime_put_sync(motg->phy.dev);
-		}
-		break;
-	case MSM_USB_EXT_CHG_VOLTAGE_INFO:
-		if (get_user(val, (int __user *)arg)) {
-			pr_err("%s: get_user failed\n\n", __func__);
-			ret = -EFAULT;
-			break;
-		}
-		msm_otg_dbg_log_event(&motg->phy, "EXT CHG: VOL REQ", cmd, val);
-
-		if (val == USB_REQUEST_5V)
-			pr_debug("%s:voting 5V voltage request\n", __func__);
-		else if (val == USB_REQUEST_9V)
-			pr_debug("%s:voting 9V voltage request\n", __func__);
-		break;
-	case MSM_USB_EXT_CHG_RESULT:
-		if (get_user(val, (int __user *)arg)) {
-			pr_err("%s: get_user failed\n\n", __func__);
-			ret = -EFAULT;
-			break;
-		}
-		msm_otg_dbg_log_event(&motg->phy, "EXT CHG: VOL REQ", cmd, val);
-
-		if (!val)
-			pr_debug("%s:voltage request successful\n", __func__);
-		else
-			pr_debug("%s:voltage request failed\n", __func__);
-		break;
-	case MSM_USB_EXT_CHG_TYPE:
-		if (get_user(val, (int __user *)arg)) {
-			pr_err("%s: get_user failed\n\n", __func__);
-			ret = -EFAULT;
-			break;
-		}
-		msm_otg_dbg_log_event(&motg->phy, "EXT CHG: VOL REQ", cmd, val);
-
-		if (val)
-			pr_debug("%s:charger is external charger\n", __func__);
-		else
-			pr_debug("%s:charger is not ext charger\n", __func__);
-		break;
-	default:
-		ret = -EINVAL;
-	}
-
-	return ret;
-}
-
-static int msm_otg_ext_chg_mmap(struct file *file, struct vm_area_struct *vma)
-{
-	struct msm_otg *motg = file->private_data;
-	unsigned long vsize = vma->vm_end - vma->vm_start;
-	int ret;
-
-	if (vma->vm_pgoff || vsize > PAGE_SIZE)
-		return -EINVAL;
-
-	vma->vm_pgoff = __phys_to_pfn(motg->io_res->start);
-	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-	ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-				 vsize, vma->vm_page_prot);
-	if (ret < 0) {
-		pr_err("%s: failed with return val %d\n", __func__, ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-static int msm_otg_ext_chg_release(struct inode *inode, struct file *file)
-{
-	struct msm_otg *motg = file->private_data;
-
-	pr_debug("msm_otg ext chg release\n");
-	msm_otg_dbg_log_event(&motg->phy, "EXT CHG: RELEASE",
-			motg->inputs, motg->phy.otg->state);
-
-	motg->ext_chg_opened = false;
-
-	return 0;
-}
-
-static const struct file_operations msm_otg_ext_chg_fops = {
-	.owner = THIS_MODULE,
-	.open = msm_otg_ext_chg_open,
-	.unlocked_ioctl = msm_otg_ext_chg_ioctl,
-	.mmap = msm_otg_ext_chg_mmap,
-	.release = msm_otg_ext_chg_release,
-};
-
-static int msm_otg_setup_ext_chg_cdev(struct msm_otg *motg)
-{
-	int ret;
-
-	if (motg->pdata->enable_sec_phy || motg->pdata->mode == USB_HOST ||
-			motg->pdata->otg_control != OTG_PMIC_CONTROL) {
-		pr_debug("usb ext chg is not supported by msm otg\n");
-		return -ENODEV;
-	}
-
-	ret = alloc_chrdev_region(&motg->ext_chg_dev, 0, 1, "usb_ext_chg");
-	if (ret < 0) {
-		pr_err("Fail to allocate usb ext char dev region\n");
-		return ret;
-	}
-	motg->ext_chg_class = class_create(THIS_MODULE, "msm_ext_chg");
-	if (ret < 0) {
-		pr_err("Fail to create usb ext chg class\n");
-		goto unreg_chrdev;
-	}
-	cdev_init(&motg->ext_chg_cdev, &msm_otg_ext_chg_fops);
-	motg->ext_chg_cdev.owner = THIS_MODULE;
-
-	ret = cdev_add(&motg->ext_chg_cdev, motg->ext_chg_dev, 1);
-	if (ret < 0) {
-		pr_err("Fail to add usb ext chg cdev\n");
-		goto destroy_class;
-	}
-	motg->ext_chg_device = device_create(motg->ext_chg_class,
-					NULL, motg->ext_chg_dev, NULL,
-					"usb_ext_chg");
-	if (IS_ERR(motg->ext_chg_device)) {
-		pr_err("Fail to create usb ext chg device\n");
-		ret = PTR_ERR(motg->ext_chg_device);
-		motg->ext_chg_device = NULL;
-		goto del_cdev;
-	}
-
-	init_completion(&motg->ext_chg_wait);
-	pr_debug("msm otg ext chg cdev setup success\n");
-	return 0;
-
-del_cdev:
-	cdev_del(&motg->ext_chg_cdev);
-destroy_class:
-	class_destroy(motg->ext_chg_class);
-unreg_chrdev:
-	unregister_chrdev_region(motg->ext_chg_dev, 1);
-
-	return ret;
-}
-
-static ssize_t dpdm_pulldown_enable_show(struct device *dev,
-			       struct device_attribute *attr, char *buf)
-{
-	struct msm_otg *motg = the_msm_otg;
-	struct msm_otg_platform_data *pdata = motg->pdata;
-
-	return snprintf(buf, PAGE_SIZE, "%s\n", pdata->dpdm_pulldown_added ?
-							"enabled" : "disabled");
-}
-
-static ssize_t dpdm_pulldown_enable_store(struct device *dev,
-		struct device_attribute *attr, const char
-		*buf, size_t size)
-{
-	struct msm_otg *motg = the_msm_otg;
-	struct msm_otg_platform_data *pdata = motg->pdata;
-
-	if (!strncasecmp(buf, "enable", 6)) {
-		pdata->dpdm_pulldown_added = true;
-		return size;
-	} else if (!strncasecmp(buf, "disable", 7)) {
-		pdata->dpdm_pulldown_added = false;
-		return size;
-	}
-
-	return -EINVAL;
-}
-
-static DEVICE_ATTR(dpdm_pulldown_enable, 0644,
-		dpdm_pulldown_enable_show, dpdm_pulldown_enable_store);
-
-static int msm_otg_vbus_notifier(struct notifier_block *nb, unsigned long event,
-				void *ptr)
-{
-	struct msm_otg *motg = container_of(nb, struct msm_otg, vbus_nb);
-
-	if (event)
-		set_bit(B_SESS_VLD, &motg->inputs);
-	else
-		clear_bit(B_SESS_VLD, &motg->inputs);
-
-	queue_work(motg->otg_wq, &motg->sm_work);
-
-	return NOTIFY_DONE;
-}
-
-static int msm_otg_id_notifier(struct notifier_block *nb, unsigned long event,
-				void *ptr)
-{
-	struct msm_otg *motg = container_of(nb, struct msm_otg, id_nb);
-
-	if (event)
-		clear_bit(ID, &motg->inputs);
-	else
-		set_bit(ID, &motg->inputs);
-
-	queue_work(motg->otg_wq, &motg->sm_work);
-
-	return NOTIFY_DONE;
-}
-
-static int msm_otg_extcon_register(struct msm_otg *motg)
-{
-	struct device_node *node = motg->pdev->dev.of_node;
-	struct extcon_dev *edev;
-	int ret = 0;
-
-	if (!of_property_read_bool(node, "extcon"))
-		return 0;
-
-	edev = extcon_get_edev_by_phandle(&motg->pdev->dev, 0);
-	if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
-		return PTR_ERR(edev);
-
-	if (!IS_ERR(edev)) {
-		motg->extcon_vbus = edev;
-		motg->vbus_nb.notifier_call = msm_otg_vbus_notifier;
-		ret = extcon_register_notifier(edev, EXTCON_USB,
-							&motg->vbus_nb);
-		if (ret < 0) {
-			dev_err(&motg->pdev->dev, "failed to register notifier for USB\n");
-			return ret;
-		}
-	}
-
-	if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
-		edev = extcon_get_edev_by_phandle(&motg->pdev->dev, 1);
-		if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
-			ret = PTR_ERR(edev);
-			goto err;
-		}
-	}
-
-	if (!IS_ERR(edev)) {
-		motg->extcon_id = edev;
-		motg->id_nb.notifier_call = msm_otg_id_notifier;
-		ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
-							&motg->id_nb);
-		if (ret < 0) {
-			dev_err(&motg->pdev->dev, "failed to register notifier for USB-HOST\n");
-			goto err;
-		}
-	}
-
-	return 0;
-err:
-	if (motg->extcon_vbus)
-		extcon_unregister_notifier(motg->extcon_vbus, EXTCON_USB,
-								&motg->vbus_nb);
-
-	return ret;
-}
-
-struct msm_otg_platform_data *msm_otg_dt_to_pdata(struct platform_device *pdev)
-{
-	struct device_node *node = pdev->dev.of_node;
-	struct msm_otg_platform_data *pdata;
-	int len = 0;
-	int res_gpio;
-
-	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
-	if (!pdata)
-		return NULL;
-
-	of_get_property(node, "qcom,hsusb-otg-phy-init-seq", &len);
-	if (len) {
-		pdata->phy_init_seq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
-		if (!pdata->phy_init_seq)
-			return NULL;
-		of_property_read_u32_array(node, "qcom,hsusb-otg-phy-init-seq",
-				pdata->phy_init_seq,
-				len/sizeof(*pdata->phy_init_seq));
-	}
-	of_property_read_u32(node, "qcom,hsusb-otg-power-budget",
-				&pdata->power_budget);
-	of_property_read_u32(node, "qcom,hsusb-otg-mode",
-				&pdata->mode);
-	of_property_read_u32(node, "qcom,hsusb-otg-otg-control",
-				&pdata->otg_control);
-	of_property_read_u32(node, "qcom,hsusb-otg-default-mode",
-				&pdata->default_mode);
-	of_property_read_u32(node, "qcom,hsusb-otg-phy-type",
-				&pdata->phy_type);
-	pdata->disable_reset_on_disconnect = of_property_read_bool(node,
-				"qcom,hsusb-otg-disable-reset");
-	pdata->pnoc_errata_fix = of_property_read_bool(node,
-				"qcom,hsusb-otg-pnoc-errata-fix");
-	pdata->enable_lpm_on_dev_suspend = of_property_read_bool(node,
-				"qcom,hsusb-otg-lpm-on-dev-suspend");
-	pdata->core_clk_always_on_workaround = of_property_read_bool(node,
-				"qcom,hsusb-otg-clk-always-on-workaround");
-	pdata->delay_lpm_on_disconnect = of_property_read_bool(node,
-				"qcom,hsusb-otg-delay-lpm");
-	pdata->dp_manual_pullup = of_property_read_bool(node,
-				"qcom,dp-manual-pullup");
-	pdata->enable_sec_phy = of_property_read_bool(node,
-					"qcom,usb2-enable-hsphy2");
-	of_property_read_u32(node, "qcom,hsusb-log2-itc",
-				&pdata->log2_itc);
-
-	of_property_read_u32(node, "qcom,hsusb-otg-mpm-dpsehv-int",
-				&pdata->mpm_dpshv_int);
-	of_property_read_u32(node, "qcom,hsusb-otg-mpm-dmsehv-int",
-				&pdata->mpm_dmshv_int);
-	pdata->pmic_id_irq = platform_get_irq_byname(pdev, "pmic_id_irq");
-	if (pdata->pmic_id_irq < 0)
-		pdata->pmic_id_irq = 0;
-
-	pdata->hub_reset_gpio = of_get_named_gpio(
-			node, "qcom,hub-reset-gpio", 0);
-	if (pdata->hub_reset_gpio < 0)
-		pr_debug("hub_reset_gpio is not available\n");
-
-	pdata->usbeth_reset_gpio = of_get_named_gpio(
-			node, "qcom,usbeth-reset-gpio", 0);
-	if (pdata->usbeth_reset_gpio < 0)
-		pr_debug("usbeth_reset_gpio is not available\n");
-
-	pdata->switch_sel_gpio =
-			of_get_named_gpio(node, "qcom,sw-sel-gpio", 0);
-	if (pdata->switch_sel_gpio < 0)
-		pr_debug("switch_sel_gpio is not available\n");
-
-	pdata->usb_id_gpio =
-			of_get_named_gpio(node, "qcom,usbid-gpio", 0);
-	if (pdata->usb_id_gpio < 0)
-		pr_debug("usb_id_gpio is not available\n");
-
-	pdata->l1_supported = of_property_read_bool(node,
-				"qcom,hsusb-l1-supported");
-	pdata->enable_ahb2ahb_bypass = of_property_read_bool(node,
-				"qcom,ahb-async-bridge-bypass");
-	pdata->disable_retention_with_vdd_min = of_property_read_bool(node,
-				"qcom,disable-retention-with-vdd-min");
-	pdata->enable_phy_id_pullup = of_property_read_bool(node,
-				"qcom,enable-phy-id-pullup");
-	pdata->phy_dvdd_always_on = of_property_read_bool(node,
-				"qcom,phy-dvdd-always-on");
-
-	res_gpio = of_get_named_gpio(node, "qcom,hsusb-otg-vddmin-gpio", 0);
-	if (res_gpio < 0)
-		res_gpio = 0;
-	pdata->vddmin_gpio = res_gpio;
-
-	pdata->emulation = of_property_read_bool(node,
-						"qcom,emulation");
-
-	pdata->enable_streaming = of_property_read_bool(node,
-					"qcom,boost-sysclk-with-streaming");
-
-	pdata->enable_axi_prefetch = of_property_read_bool(node,
-						"qcom,axi-prefetch-enable");
-
-	pdata->enable_sdp_typec_current_limit = of_property_read_bool(node,
-					"qcom,enable-sdp-typec-current-limit");
-	pdata->vbus_low_as_hostmode = of_property_read_bool(node,
-					"qcom,vbus-low-as-hostmode");
-	return pdata;
-}
-
-static int msm_otg_probe(struct platform_device *pdev)
-{
-	int ret = 0;
-	int len = 0;
-	u32 tmp[3];
-	struct resource *res;
-	struct msm_otg *motg;
-	struct usb_phy *phy;
-	struct msm_otg_platform_data *pdata;
-	void __iomem *tcsr;
-	int id_irq = 0;
-
-	dev_info(&pdev->dev, "msm_otg probe\n");
-
-	motg = kzalloc(sizeof(struct msm_otg), GFP_KERNEL);
-	if (!motg) {
-		ret = -ENOMEM;
-		return ret;
-	}
-
-	/*
-	 * USB Core is running its protocol engine based on CORE CLK,
-	 * CORE CLK  must be running at >55Mhz for correct HSUSB
-	 * operation and USB core cannot tolerate frequency changes on
-	 * CORE CLK. For such USB cores, vote for maximum clk frequency
-	 * on pclk source
-	 */
-	motg->core_clk = clk_get(&pdev->dev, "core_clk");
-	if (IS_ERR(motg->core_clk)) {
-		ret = PTR_ERR(motg->core_clk);
-		motg->core_clk = NULL;
-		if (ret != -EPROBE_DEFER)
-			dev_err(&pdev->dev, "failed to get core_clk\n");
-		goto free_motg;
-	}
-
-	motg->core_reset = devm_reset_control_get(&pdev->dev, "core_reset");
-	if (IS_ERR(motg->core_reset)) {
-		dev_err(&pdev->dev, "failed to get core_reset\n");
-		ret = PTR_ERR(motg->core_reset);
-		goto put_core_clk;
-	}
-
-	/*
-	 * USB Core CLK can run at max freq if streaming is enabled. Hence,
-	 * get Max supported clk frequency for USB Core CLK and request to set
-	 * the same. Otherwise set USB Core CLK to defined default value.
-	 */
-	if (of_property_read_u32(pdev->dev.of_node,
-					"qcom,max-nominal-sysclk-rate", &ret)) {
-		ret = -EINVAL;
-		goto put_core_clk;
-	} else {
-		motg->core_clk_nominal_rate = clk_round_rate(motg->core_clk,
-							     ret);
-	}
-
-	if (of_property_read_u32(pdev->dev.of_node,
-					"qcom,max-svs-sysclk-rate", &ret)) {
-		dev_dbg(&pdev->dev, "core_clk svs freq not specified\n");
-	} else {
-		motg->core_clk_svs_rate = clk_round_rate(motg->core_clk, ret);
-	}
-
-	motg->default_noc_mode = USB_NOC_NOM_VOTE;
-	if (of_property_read_bool(pdev->dev.of_node, "qcom,default-mode-svs")) {
-		motg->core_clk_rate = motg->core_clk_svs_rate;
-		motg->default_noc_mode = USB_NOC_SVS_VOTE;
-	} else if (of_property_read_bool(pdev->dev.of_node,
-					"qcom,boost-sysclk-with-streaming")) {
-		motg->core_clk_rate = motg->core_clk_nominal_rate;
-	} else {
-		motg->core_clk_rate = clk_round_rate(motg->core_clk,
-						USB_DEFAULT_SYSTEM_CLOCK);
-	}
-
-	if (IS_ERR_VALUE(motg->core_clk_rate)) {
-		dev_err(&pdev->dev, "fail to get core clk max freq.\n");
-	} else {
-		ret = clk_set_rate(motg->core_clk, motg->core_clk_rate);
-		if (ret)
-			dev_err(&pdev->dev, "fail to set core_clk freq:%d\n",
-									ret);
-	}
-
-	motg->pclk = clk_get(&pdev->dev, "iface_clk");
-	if (IS_ERR(motg->pclk)) {
-		ret = PTR_ERR(motg->pclk);
-		motg->pclk = NULL;
-		if (ret != -EPROBE_DEFER)
-			dev_err(&pdev->dev, "failed to get iface_clk\n");
-		goto put_core_clk;
-	}
-
-	motg->xo_clk = clk_get(&pdev->dev, "xo");
-	if (IS_ERR(motg->xo_clk)) {
-		ret = PTR_ERR(motg->xo_clk);
-		motg->xo_clk = NULL;
-		if (ret == -EPROBE_DEFER)
-			goto put_pclk;
-	}
-
-	/*
-	 * On few platforms USB PHY is fed with sleep clk.
-	 * Hence don't fail probe.
-	 */
-	motg->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
-	if (IS_ERR(motg->sleep_clk)) {
-		ret = PTR_ERR(motg->sleep_clk);
-		motg->sleep_clk = NULL;
-		if (ret == -EPROBE_DEFER)
-			goto put_xo_clk;
-		else
-			dev_dbg(&pdev->dev, "failed to get sleep_clk\n");
-	} else {
-		ret = clk_prepare_enable(motg->sleep_clk);
-		if (ret) {
-			dev_err(&pdev->dev, "%s failed to vote sleep_clk%d\n",
-						__func__, ret);
-			goto put_xo_clk;
-		}
-	}
-
-	/*
-	 * If present, phy_reset_clk is used to reset the PHY, ULPI bridge
-	 * and CSR Wrapper. This is a reset only clock.
-	 */
-
-	if (of_property_match_string(pdev->dev.of_node,
-			"clock-names", "phy_reset_clk") >= 0) {
-		motg->phy_reset_clk = devm_clk_get(&pdev->dev, "phy_reset_clk");
-		if (IS_ERR(motg->phy_reset_clk)) {
-			ret = PTR_ERR(motg->phy_reset_clk);
-			goto disable_sleep_clk;
-		}
-
-		motg->phy_reset = devm_reset_control_get(&pdev->dev,
-								"phy_reset");
-		if (IS_ERR(motg->phy_reset)) {
-			dev_err(&pdev->dev, "failed to get phy_reset\n");
-			ret = PTR_ERR(motg->phy_reset);
-			goto disable_sleep_clk;
-		}
-	}
-
-	/*
-	 * If present, phy_por_clk is used to assert/de-assert phy POR
-	 * input. This is a reset only clock. phy POR must be asserted
-	 * after overriding the parameter registers via CSR wrapper or
-	 * ULPI bridge.
-	 */
-	if (of_property_match_string(pdev->dev.of_node,
-				"clock-names", "phy_por_clk") >= 0) {
-		motg->phy_por_clk = devm_clk_get(&pdev->dev, "phy_por_clk");
-		if (IS_ERR(motg->phy_por_clk)) {
-			ret = PTR_ERR(motg->phy_por_clk);
-			goto disable_sleep_clk;
-		}
-
-		motg->phy_por_reset = devm_reset_control_get(&pdev->dev,
-							"phy_por_reset");
-		if (IS_ERR(motg->phy_por_reset)) {
-			dev_err(&pdev->dev, "failed to get phy_por_reset\n");
-			ret = PTR_ERR(motg->phy_por_reset);
-			goto disable_sleep_clk;
-		}
-	}
-
-	/*
-	 * If present, phy_csr_clk is required for accessing PHY
-	 * CSR registers via AHB2PHY interface.
-	 */
-	if (of_property_match_string(pdev->dev.of_node,
-				"clock-names", "phy_csr_clk") >= 0) {
-		motg->phy_csr_clk = devm_clk_get(&pdev->dev, "phy_csr_clk");
-		if (IS_ERR(motg->phy_csr_clk)) {
-			ret = PTR_ERR(motg->phy_csr_clk);
-			goto disable_sleep_clk;
-		} else {
-			ret = clk_prepare_enable(motg->phy_csr_clk);
-			if (ret) {
-				dev_err(&pdev->dev,
-					"fail to enable phy csr clk %d\n", ret);
-				goto disable_sleep_clk;
-			}
-		}
-	}
-
-	of_property_read_u32(pdev->dev.of_node, "qcom,pm-qos-latency",
-				&motg->pm_qos_latency);
-
-	pdata = msm_otg_dt_to_pdata(pdev);
-	if (!pdata) {
-		ret = -ENOMEM;
-		goto disable_phy_csr_clk;
-	}
-	pdev->dev.platform_data = pdata;
-
-	pdata->bus_scale_table = msm_bus_cl_get_pdata(pdev);
-	if (!pdata->bus_scale_table)
-		dev_dbg(&pdev->dev, "bus scaling is disabled\n");
-
-	if (pdata->phy_type == QUSB_ULPI_PHY) {
-		if (of_property_match_string(pdev->dev.of_node,
-					"clock-names", "phy_ref_clk") >= 0) {
-			motg->phy_ref_clk = devm_clk_get(&pdev->dev,
-						"phy_ref_clk");
-			if (IS_ERR(motg->phy_ref_clk)) {
-				ret = PTR_ERR(motg->phy_ref_clk);
-				goto disable_phy_csr_clk;
-			} else {
-				ret = clk_prepare_enable(motg->phy_ref_clk);
-				if (ret) {
-					dev_err(&pdev->dev,
-						"fail to enable phy ref clk %d\n",
-						ret);
-					goto disable_phy_csr_clk;
-				}
-			}
-		}
-	}
-
-	motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
-							GFP_KERNEL);
-	if (!motg->phy.otg) {
-		ret = -ENOMEM;
-		goto disable_phy_csr_clk;
-	}
-
-	the_msm_otg = motg;
-	motg->pdata = pdata;
-	phy = &motg->phy;
-	phy->dev = &pdev->dev;
-	motg->pdev = pdev;
-	motg->dbg_idx = 0;
-	motg->dbg_lock = __RW_LOCK_UNLOCKED(lck);
-
-	if (motg->pdata->bus_scale_table) {
-		motg->bus_perf_client =
-		    msm_bus_scale_register_client(motg->pdata->bus_scale_table);
-		if (!motg->bus_perf_client) {
-			dev_err(motg->phy.dev, "%s: Failed to register BUS\n"
-						"scaling client!!\n", __func__);
-		} else {
-			debug_bus_voting_enabled = true;
-			/* Some platforms require BUS vote to control clocks */
-			msm_otg_bus_vote(motg, USB_MIN_PERF_VOTE);
-		}
-	}
-
-	ret = msm_otg_bus_freq_get(motg);
-	if (ret) {
-		pr_err("failed to get noc clocks: %d\n", ret);
-	} else {
-		ret = msm_otg_bus_freq_set(motg, motg->default_noc_mode);
-		if (ret)
-			pr_err("failed to vote explicit noc rates: %d\n", ret);
-	}
-
-	/* initialize reset counter */
-	motg->reset_counter = 0;
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
-	if (!res) {
-		dev_err(&pdev->dev, "failed to get core iomem resource\n");
-		ret = -ENODEV;
-		goto devote_bus_bw;
-	}
-
-	motg->io_res = res;
-	motg->regs = ioremap(res->start, resource_size(res));
-	if (!motg->regs) {
-		dev_err(&pdev->dev, "core iomem ioremap failed\n");
-		ret = -ENOMEM;
-		goto devote_bus_bw;
-	}
-	dev_info(&pdev->dev, "OTG regs = %pK\n", motg->regs);
-
-	if (pdata->enable_sec_phy) {
-		res = platform_get_resource_byname(pdev,
-				IORESOURCE_MEM, "tcsr");
-		if (!res) {
-			dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
-		} else {
-			tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
-				resource_size(res));
-			if (!tcsr) {
-				dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
-			} else {
-				/* Enable USB2 on secondary HSPHY. */
-				writel_relaxed(0x1, tcsr);
-				/*
-				 * Ensure that TCSR write is completed before
-				 * USB registers initialization.
-				 */
-				mb();
-			}
-		}
-	}
-
-	if (pdata->enable_sec_phy)
-		motg->usb_phy_ctrl_reg = USB_PHY_CTRL2;
-	else
-		motg->usb_phy_ctrl_reg = USB_PHY_CTRL;
-
-	/*
-	 * The USB PHY wrapper provides a register interface
-	 * through AHB2PHY for performing PHY related operations
-	 * like retention, HV interrupts and overriding parameter
-	 * registers etc. The registers start at 4 byte boundary
-	 * but only the first byte is valid and remaining are not
-	 * used. Relaxed versions of readl/writel should be used.
-	 *
-	 * The link does not have any PHY specific registers.
-	 * Hence set motg->usb_phy_ctrl_reg to.
-	 */
-	if (motg->pdata->phy_type == SNPS_FEMTO_PHY ||
-		pdata->phy_type == QUSB_ULPI_PHY) {
-		res = platform_get_resource_byname(pdev,
-				IORESOURCE_MEM, "phy_csr");
-		if (!res) {
-			dev_err(&pdev->dev, "PHY CSR IOMEM missing!\n");
-			ret = -ENODEV;
-			goto free_regs;
-		}
-		motg->phy_csr_regs = devm_ioremap_resource(&pdev->dev, res);
-		if (IS_ERR(motg->phy_csr_regs)) {
-			ret = PTR_ERR(motg->phy_csr_regs);
-			dev_err(&pdev->dev, "PHY CSR ioremap failed!\n");
-			goto free_regs;
-		}
-		motg->usb_phy_ctrl_reg = 0;
-	}
-
-	motg->irq = platform_get_irq(pdev, 0);
-	if (!motg->irq) {
-		dev_err(&pdev->dev, "platform_get_irq failed\n");
-		ret = -ENODEV;
-		goto free_regs;
-	}
-
-	motg->async_irq = platform_get_irq_byname(pdev, "async_irq");
-	if (motg->async_irq < 0) {
-		dev_err(&pdev->dev, "platform_get_irq for async_int failed\n");
-		motg->async_irq = 0;
-		goto free_regs;
-	}
-
-	if (motg->xo_clk) {
-		ret = clk_prepare_enable(motg->xo_clk);
-		if (ret) {
-			dev_err(&pdev->dev,
-				"%s failed to vote for TCXO %d\n",
-					__func__, ret);
-			goto free_xo_handle;
-		}
-	}
-
-
-	clk_prepare_enable(motg->pclk);
-
-	hsusb_vdd = devm_regulator_get(motg->phy.dev, "hsusb_vdd_dig");
-	if (IS_ERR(hsusb_vdd)) {
-		hsusb_vdd = devm_regulator_get(motg->phy.dev, "HSUSB_VDDCX");
-		if (IS_ERR(hsusb_vdd)) {
-			dev_err(motg->phy.dev, "unable to get hsusb vddcx\n");
-			ret = PTR_ERR(hsusb_vdd);
-			goto devote_xo_handle;
-		}
-	}
-
-	if (of_get_property(pdev->dev.of_node,
-			"qcom,vdd-voltage-level",
-			&len)){
-		if (len == sizeof(tmp)) {
-			of_property_read_u32_array(pdev->dev.of_node,
-					"qcom,vdd-voltage-level",
-					tmp, len/sizeof(*tmp));
-			vdd_val[0] = tmp[0];
-			vdd_val[1] = tmp[1];
-			vdd_val[2] = tmp[2];
-		} else {
-			dev_dbg(&pdev->dev,
-				"Using default hsusb vdd config.\n");
-			goto devote_xo_handle;
-		}
-	} else {
-		goto devote_xo_handle;
-	}
-
-	ret = msm_hsusb_config_vddcx(1);
-	if (ret) {
-		dev_err(&pdev->dev, "hsusb vddcx configuration failed\n");
-		goto devote_xo_handle;
-	}
-
-	ret = regulator_enable(hsusb_vdd);
-	if (ret) {
-		dev_err(&pdev->dev, "unable to enable the hsusb vddcx\n");
-		goto free_config_vddcx;
-	}
-
-	ret = msm_hsusb_ldo_init(motg, 1);
-	if (ret) {
-		dev_err(&pdev->dev, "hsusb vreg configuration failed\n");
-		goto free_hsusb_vdd;
-	}
-
-	/* Get pinctrl if target uses pinctrl */
-	motg->phy_pinctrl = devm_pinctrl_get(&pdev->dev);
-	if (IS_ERR(motg->phy_pinctrl)) {
-		if (of_property_read_bool(pdev->dev.of_node, "pinctrl-names")) {
-			dev_err(&pdev->dev, "Error encountered while getting pinctrl");
-			ret = PTR_ERR(motg->phy_pinctrl);
-			goto free_ldo_init;
-		}
-		dev_dbg(&pdev->dev, "Target does not use pinctrl\n");
-		motg->phy_pinctrl = NULL;
-	}
-
-	ret = msm_hsusb_ldo_enable(motg, USB_PHY_REG_ON);
-	if (ret) {
-		dev_err(&pdev->dev, "hsusb vreg enable failed\n");
-		goto free_ldo_init;
-	}
-	clk_prepare_enable(motg->core_clk);
-
-	/* Check if USB mem_type change is needed to workaround PNOC hw issue */
-	msm_otg_pnoc_errata_fix(motg);
-
-	writel_relaxed(0, USB_USBINTR);
-	writel_relaxed(0, USB_OTGSC);
-	/* Ensure that above STOREs are completed before enabling interrupts */
-	mb();
-
-	motg->id_state = USB_ID_FLOAT;
-	set_bit(ID, &motg->inputs);
-	INIT_WORK(&motg->sm_work, msm_otg_sm_work);
-	INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
-	INIT_DELAYED_WORK(&motg->id_status_work, msm_id_status_w);
-	INIT_DELAYED_WORK(&motg->perf_vote_work, msm_otg_perf_vote_work);
-	setup_timer(&motg->chg_check_timer, msm_otg_chg_check_timer_func,
-				(unsigned long) motg);
-	motg->otg_wq = alloc_ordered_workqueue("k_otg", 0);
-	if (!motg->otg_wq) {
-		pr_err("%s: Unable to create workqueue otg_wq\n",
-			__func__);
-		goto disable_core_clk;
-	}
-
-	ret = request_irq(motg->irq, msm_otg_irq, IRQF_SHARED,
-					"msm_otg", motg);
-	if (ret) {
-		dev_err(&pdev->dev, "request irq failed\n");
-		goto destroy_wq;
-	}
-
-	motg->phy_irq = platform_get_irq_byname(pdev, "phy_irq");
-	if (motg->phy_irq < 0) {
-		dev_dbg(&pdev->dev, "phy_irq is not present\n");
-		motg->phy_irq = 0;
-	} else {
-
-		/* clear all interrupts before enabling the IRQ */
-		writeb_relaxed(0xFF, USB2_PHY_USB_PHY_INTERRUPT_CLEAR0);
-		writeb_relaxed(0xFF, USB2_PHY_USB_PHY_INTERRUPT_CLEAR1);
-
-		writeb_relaxed(0x1, USB2_PHY_USB_PHY_IRQ_CMD);
-		/*
-		 * Databook says 200 usec delay is required for
-		 * clearing the interrupts.
-		 */
-		udelay(200);
-		writeb_relaxed(0x0, USB2_PHY_USB_PHY_IRQ_CMD);
-
-		ret = request_irq(motg->phy_irq, msm_otg_phy_irq_handler,
-				IRQF_TRIGGER_RISING, "msm_otg_phy_irq", motg);
-		if (ret < 0) {
-			dev_err(&pdev->dev, "phy_irq request fail %d\n", ret);
-			goto free_irq;
-		}
-	}
-
-	ret = request_irq(motg->async_irq, msm_otg_irq,
-				IRQF_TRIGGER_RISING, "msm_otg", motg);
-	if (ret) {
-		dev_err(&pdev->dev, "request irq failed (ASYNC INT)\n");
-		goto free_phy_irq;
-	}
-	disable_irq(motg->async_irq);
-
-	if (pdata->otg_control == OTG_PHY_CONTROL && pdata->mpm_otgsessvld_int)
-		msm_mpm_enable_pin(pdata->mpm_otgsessvld_int, 1);
-
-	if (pdata->mpm_dpshv_int)
-		msm_mpm_enable_pin(pdata->mpm_dpshv_int, 1);
-	if (pdata->mpm_dmshv_int)
-		msm_mpm_enable_pin(pdata->mpm_dmshv_int, 1);
-
-	phy->init = msm_otg_reset;
-	phy->set_power = msm_otg_set_power;
-	phy->set_suspend = msm_otg_set_suspend;
-	phy->dbg_event = msm_otg_dbg_log_event;
-
-	phy->io_ops = &msm_otg_io_ops;
-
-	phy->otg->usb_phy = &motg->phy;
-	phy->otg->set_host = msm_otg_set_host;
-	phy->otg->set_peripheral = msm_otg_set_peripheral;
-	if (pdata->dp_manual_pullup)
-		phy->flags |= ENABLE_DP_MANUAL_PULLUP;
-
-	if (pdata->enable_sec_phy)
-		phy->flags |= ENABLE_SECONDARY_PHY;
-
-	ret = usb_add_phy(&motg->phy, USB_PHY_TYPE_USB2);
-	if (ret) {
-		dev_err(&pdev->dev, "usb_add_phy failed\n");
-		goto free_async_irq;
-	}
-
-	ret = usb_phy_regulator_init(motg);
-	if (ret) {
-		dev_err(&pdev->dev, "usb_phy_regulator_init failed\n");
-		goto remove_phy;
-	}
-
-	if (motg->pdata->mode == USB_OTG &&
-		motg->pdata->otg_control == OTG_PMIC_CONTROL &&
-		!motg->phy_irq) {
-
-		if (gpio_is_valid(motg->pdata->usb_id_gpio)) {
-			/* usb_id_gpio request */
-			ret = gpio_request(motg->pdata->usb_id_gpio,
-							"USB_ID_GPIO");
-			if (ret < 0) {
-				dev_err(&pdev->dev, "gpio req failed for id\n");
-				motg->pdata->usb_id_gpio = 0;
-				goto remove_phy;
-			}
-
-			/*
-			 * The following code implements switch between the HOST
-			 * mode to device mode when used different HW components
-			 * on the same port: USB HUB and the usb jack type B
-			 * for device mode In this case HUB should be gone
-			 * only once out of reset at the boot time and after
-			 * that always stay on
-			 */
-			if (gpio_is_valid(motg->pdata->hub_reset_gpio)) {
-				ret = devm_gpio_request(&pdev->dev,
-						motg->pdata->hub_reset_gpio,
-						"qcom,hub-reset-gpio");
-				if (ret < 0) {
-					dev_err(&pdev->dev, "gpio req failed for hub reset\n");
-					goto remove_phy;
-				}
-				gpio_direction_output(
-					motg->pdata->hub_reset_gpio, 1);
-			}
-
-			if (gpio_is_valid(motg->pdata->switch_sel_gpio)) {
-				ret = devm_gpio_request(&pdev->dev,
-						motg->pdata->switch_sel_gpio,
-						"qcom,sw-sel-gpio");
-				if (ret < 0) {
-					dev_err(&pdev->dev, "gpio req failed for switch sel\n");
-					goto remove_phy;
-				}
-				if (gpio_get_value(motg->pdata->usb_id_gpio))
-					gpio_direction_input(
-						motg->pdata->switch_sel_gpio);
-
-				else
-					gpio_direction_output(
-					    motg->pdata->switch_sel_gpio,
-					    1);
-			}
-
-			/* usb_id_gpio to irq */
-			id_irq = gpio_to_irq(motg->pdata->usb_id_gpio);
-			motg->ext_id_irq = id_irq;
-		} else if (motg->pdata->pmic_id_irq) {
-			id_irq = motg->pdata->pmic_id_irq;
-		}
-
-		if (id_irq) {
-			ret = request_irq(id_irq,
-					  msm_id_irq,
-					  IRQF_TRIGGER_RISING |
-					  IRQF_TRIGGER_FALLING,
-					  "msm_otg", motg);
-			if (ret) {
-				dev_err(&pdev->dev, "request irq failed for ID\n");
-				goto remove_phy;
-			}
-		} else {
-			/* PMIC does USB ID detection and notifies through
-			 * USB_OTG property of USB powersupply.
-			 */
-			dev_dbg(&pdev->dev, "PMIC does ID detection\n");
-		}
-	}
-
-	platform_set_drvdata(pdev, motg);
-	device_init_wakeup(&pdev->dev, 1);
-
-	ret = msm_otg_debugfs_init(motg);
-	if (ret)
-		dev_dbg(&pdev->dev, "mode debugfs file is not available\n");
-
-	if (motg->pdata->otg_control == OTG_PMIC_CONTROL &&
-			(!(motg->pdata->mode == USB_OTG) ||
-			 motg->pdata->pmic_id_irq || motg->ext_id_irq ||
-								!motg->phy_irq))
-		motg->caps = ALLOW_PHY_POWER_COLLAPSE | ALLOW_PHY_RETENTION;
-
-	if (motg->pdata->otg_control == OTG_PHY_CONTROL || motg->phy_irq ||
-				motg->pdata->enable_phy_id_pullup)
-		motg->caps = ALLOW_PHY_RETENTION | ALLOW_PHY_REGULATORS_LPM;
-
-	if (motg->pdata->mpm_dpshv_int || motg->pdata->mpm_dmshv_int)
-		motg->caps |= ALLOW_HOST_PHY_RETENTION;
-
-	device_create_file(&pdev->dev, &dev_attr_dpdm_pulldown_enable);
-
-	if (motg->pdata->enable_lpm_on_dev_suspend)
-		motg->caps |= ALLOW_LPM_ON_DEV_SUSPEND;
-
-	if (motg->pdata->disable_retention_with_vdd_min)
-		motg->caps |= ALLOW_VDD_MIN_WITH_RETENTION_DISABLED;
-
-	/*
-	 * PHY DVDD is supplied by a always on PMIC LDO (unlike
-	 * vddcx/vddmx). PHY can keep D+ pull-up and D+/D-
-	 * pull-down during suspend without any additional
-	 * hardware re-work.
-	 */
-	if (motg->pdata->phy_type == SNPS_FEMTO_PHY)
-		motg->caps |= ALLOW_BUS_SUSPEND_WITHOUT_REWORK;
-
-	pm_stay_awake(&pdev->dev);
-	pm_runtime_set_active(&pdev->dev);
-	pm_runtime_enable(&pdev->dev);
-
-	if (motg->pdata->delay_lpm_on_disconnect) {
-		pm_runtime_set_autosuspend_delay(&pdev->dev,
-			lpm_disconnect_thresh);
-		pm_runtime_use_autosuspend(&pdev->dev);
-	}
-
-	ret = msm_otg_setup_ext_chg_cdev(motg);
-	if (ret)
-		dev_dbg(&pdev->dev, "fail to setup cdev\n");
-
-	if (pdev->dev.of_node) {
-		ret = msm_otg_setup_devices(pdev, pdata->mode, true);
-		if (ret) {
-			dev_err(&pdev->dev, "devices setup failed\n");
-			goto remove_cdev;
-		}
-	}
-
-	psy = power_supply_get_by_name("usb");
-	if (!psy) {
-		dev_dbg(&pdev->dev, "Could not get usb power_supply\n");
-		ret = -EPROBE_DEFER;
-		goto otg_remove_devices;
-	}
-
-
-	ret = msm_otg_extcon_register(motg);
-	if (ret)
-		goto put_psy;
-
-	if (motg->extcon_vbus) {
-		ret = extcon_get_cable_state_(motg->extcon_vbus, EXTCON_USB);
-		if (ret)
-			set_bit(B_SESS_VLD, &motg->inputs);
-		else
-			clear_bit(B_SESS_VLD, &motg->inputs);
-	}
-
-	if (motg->extcon_id) {
-		ret = extcon_get_cable_state_(motg->extcon_id, EXTCON_USB_HOST);
-		if (ret)
-			clear_bit(ID, &motg->inputs);
-		else
-			set_bit(ID, &motg->inputs);
-	}
-
-	if (gpio_is_valid(motg->pdata->hub_reset_gpio)) {
-		ret = devm_gpio_request(&pdev->dev,
-				motg->pdata->hub_reset_gpio,
-				"HUB_RESET");
-		if (ret < 0) {
-			dev_err(&pdev->dev, "gpio req failed for hub_reset\n");
-		} else {
-			gpio_direction_output(
-				motg->pdata->hub_reset_gpio, 0);
-			/* 5 microsecs reset signaling to usb hub */
-			usleep_range(5, 10);
-			gpio_direction_output(
-				motg->pdata->hub_reset_gpio, 1);
-		}
-	}
-
-	if (gpio_is_valid(motg->pdata->usbeth_reset_gpio)) {
-		ret = devm_gpio_request(&pdev->dev,
-				motg->pdata->usbeth_reset_gpio,
-				"ETH_RESET");
-		if (ret < 0) {
-			dev_err(&pdev->dev, "gpio req failed for usbeth_reset\n");
-		} else {
-			gpio_direction_output(
-				motg->pdata->usbeth_reset_gpio, 0);
-			/* 100 microsecs reset signaling to usb-to-eth */
-			usleep_range(100, 110);
-			gpio_direction_output(
-				motg->pdata->usbeth_reset_gpio, 1);
-		}
-	}
-
-	motg->pm_notify.notifier_call = msm_otg_pm_notify;
-	register_pm_notifier(&motg->pm_notify);
-	msm_otg_dbg_log_event(phy, "OTG PROBE", motg->caps, motg->lpm_flags);
-
-	return 0;
-
-put_psy:
-	if (psy)
-		power_supply_put(psy);
-otg_remove_devices:
-	if (pdev->dev.of_node)
-		msm_otg_setup_devices(pdev, motg->pdata->mode, false);
-remove_cdev:
-	if (!motg->ext_chg_device) {
-		device_destroy(motg->ext_chg_class, motg->ext_chg_dev);
-		cdev_del(&motg->ext_chg_cdev);
-		class_destroy(motg->ext_chg_class);
-		unregister_chrdev_region(motg->ext_chg_dev, 1);
-	}
-remove_phy:
-	usb_remove_phy(&motg->phy);
-free_async_irq:
-	free_irq(motg->async_irq, motg);
-free_phy_irq:
-	if (motg->phy_irq)
-		free_irq(motg->phy_irq, motg);
-free_irq:
-	free_irq(motg->irq, motg);
-destroy_wq:
-	destroy_workqueue(motg->otg_wq);
-disable_core_clk:
-	clk_disable_unprepare(motg->core_clk);
-	msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF);
-free_ldo_init:
-	msm_hsusb_ldo_init(motg, 0);
-free_hsusb_vdd:
-	regulator_disable(hsusb_vdd);
-free_config_vddcx:
-	regulator_set_voltage(hsusb_vdd,
-		vdd_val[VDD_NONE],
-		vdd_val[VDD_MAX]);
-devote_xo_handle:
-	clk_disable_unprepare(motg->pclk);
-	if (motg->xo_clk)
-		clk_disable_unprepare(motg->xo_clk);
-free_xo_handle:
-	if (motg->xo_clk) {
-		clk_put(motg->xo_clk);
-		motg->xo_clk = NULL;
-	}
-free_regs:
-	iounmap(motg->regs);
-devote_bus_bw:
-	if (motg->bus_perf_client) {
-		msm_otg_bus_vote(motg, USB_NO_PERF_VOTE);
-		msm_bus_scale_unregister_client(motg->bus_perf_client);
-	}
-disable_phy_csr_clk:
-	if (motg->phy_csr_clk)
-		clk_disable_unprepare(motg->phy_csr_clk);
-disable_sleep_clk:
-	if (motg->sleep_clk)
-		clk_disable_unprepare(motg->sleep_clk);
-put_xo_clk:
-	if (motg->xo_clk)
-		clk_put(motg->xo_clk);
-put_pclk:
-	if (motg->pclk)
-		clk_put(motg->pclk);
-put_core_clk:
-	if (motg->core_clk)
-		clk_put(motg->core_clk);
-free_motg:
-	kfree(motg);
-	return ret;
-}
-
-static int msm_otg_remove(struct platform_device *pdev)
-{
-	struct msm_otg *motg = platform_get_drvdata(pdev);
-	struct usb_phy *phy = &motg->phy;
-	int cnt = 0;
-
-	if (phy->otg->host || phy->otg->gadget)
-		return -EBUSY;
-
-	unregister_pm_notifier(&motg->pm_notify);
-
-	extcon_unregister_notifier(motg->extcon_id, EXTCON_USB_HOST,
-							&motg->id_nb);
-	extcon_unregister_notifier(motg->extcon_vbus, EXTCON_USB,
-							&motg->vbus_nb);
-
-	if (!motg->ext_chg_device) {
-		device_destroy(motg->ext_chg_class, motg->ext_chg_dev);
-		cdev_del(&motg->ext_chg_cdev);
-		class_destroy(motg->ext_chg_class);
-		unregister_chrdev_region(motg->ext_chg_dev, 1);
-	}
-
-	if (pdev->dev.of_node)
-		msm_otg_setup_devices(pdev, motg->pdata->mode, false);
-	if (psy)
-		power_supply_put(psy);
-	msm_otg_debugfs_cleanup();
-	cancel_delayed_work_sync(&motg->chg_work);
-	cancel_delayed_work_sync(&motg->id_status_work);
-	cancel_delayed_work_sync(&motg->perf_vote_work);
-	msm_otg_perf_vote_update(motg, false);
-	cancel_work_sync(&motg->sm_work);
-	destroy_workqueue(motg->otg_wq);
-
-	pm_runtime_resume(&pdev->dev);
-
-	device_init_wakeup(&pdev->dev, 0);
-	pm_runtime_disable(&pdev->dev);
-
-	if (motg->phy_irq)
-		free_irq(motg->phy_irq, motg);
-	if (motg->pdata->pmic_id_irq)
-		free_irq(motg->pdata->pmic_id_irq, motg);
-	usb_remove_phy(phy);
-	free_irq(motg->irq, motg);
-
-	if (motg->pdata->mpm_dpshv_int || motg->pdata->mpm_dmshv_int)
-		device_remove_file(&pdev->dev,
-				&dev_attr_dpdm_pulldown_enable);
-	if (motg->pdata->otg_control == OTG_PHY_CONTROL &&
-		motg->pdata->mpm_otgsessvld_int)
-		msm_mpm_enable_pin(motg->pdata->mpm_otgsessvld_int, 0);
-
-	if (motg->pdata->mpm_dpshv_int)
-		msm_mpm_enable_pin(motg->pdata->mpm_dpshv_int, 0);
-	if (motg->pdata->mpm_dmshv_int)
-		msm_mpm_enable_pin(motg->pdata->mpm_dmshv_int, 0);
-
-	/*
-	 * Put PHY in low power mode.
-	 */
-	ulpi_read(phy, 0x14);
-	ulpi_write(phy, 0x08, 0x09);
-
-	writel_relaxed(readl_relaxed(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
-	while (cnt < PHY_SUSPEND_TIMEOUT_USEC) {
-		if (readl_relaxed(USB_PORTSC) & PORTSC_PHCD)
-			break;
-		udelay(1);
-		cnt++;
-	}
-	if (cnt >= PHY_SUSPEND_TIMEOUT_USEC)
-		dev_err(phy->dev, "Unable to suspend PHY\n");
-
-	clk_disable_unprepare(motg->pclk);
-	clk_disable_unprepare(motg->core_clk);
-	if (motg->phy_csr_clk)
-		clk_disable_unprepare(motg->phy_csr_clk);
-	if (motg->xo_clk) {
-		clk_disable_unprepare(motg->xo_clk);
-		clk_put(motg->xo_clk);
-	}
-
-	if (!IS_ERR(motg->sleep_clk))
-		clk_disable_unprepare(motg->sleep_clk);
-
-	msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF);
-	msm_hsusb_ldo_init(motg, 0);
-	regulator_disable(hsusb_vdd);
-	regulator_set_voltage(hsusb_vdd,
-		vdd_val[VDD_NONE],
-		vdd_val[VDD_MAX]);
-
-	iounmap(motg->regs);
-	pm_runtime_set_suspended(&pdev->dev);
-
-	clk_put(motg->pclk);
-	clk_put(motg->core_clk);
-
-	if (motg->bus_perf_client) {
-		msm_otg_bus_vote(motg, USB_NO_PERF_VOTE);
-		msm_bus_scale_unregister_client(motg->bus_perf_client);
-	}
-
-	return 0;
-}
-
-static void msm_otg_shutdown(struct platform_device *pdev)
-{
-	struct msm_otg *motg = platform_get_drvdata(pdev);
-
-	dev_dbg(&pdev->dev, "OTG shutdown\n");
-	msm_hsusb_vbus_power(motg, 0);
-}
-
-#ifdef CONFIG_PM
-static int msm_otg_runtime_idle(struct device *dev)
-{
-	struct msm_otg *motg = dev_get_drvdata(dev);
-	struct usb_phy *phy = &motg->phy;
-
-	dev_dbg(dev, "OTG runtime idle\n");
-	msm_otg_dbg_log_event(phy, "RUNTIME IDLE",
-			phy->otg->state, motg->ext_chg_active);
-
-	if (phy->otg->state == OTG_STATE_UNDEFINED)
-		return -EAGAIN;
-
-	if (motg->ext_chg_active == DEFAULT) {
-		dev_dbg(dev, "Deferring LPM\n");
-		/*
-		 * Charger detection may happen in user space.
-		 * Delay entering LPM by 3 sec.  Otherwise we
-		 * have to exit LPM when user space begins
-		 * charger detection.
-		 *
-		 * This timer will be canceled when user space
-		 * votes against LPM by incrementing PM usage
-		 * counter.  We enter low power mode when
-		 * PM usage counter is decremented.
-		 */
-		pm_schedule_suspend(dev, 3000);
-		return -EAGAIN;
-	}
-
-	return 0;
-}
-
-static int msm_otg_runtime_suspend(struct device *dev)
-{
-	struct msm_otg *motg = dev_get_drvdata(dev);
-
-	dev_dbg(dev, "OTG runtime suspend\n");
-	msm_otg_dbg_log_event(&motg->phy, "RUNTIME SUSPEND",
-			get_pm_runtime_counter(dev), 0);
-	return msm_otg_suspend(motg);
-}
-
-static int msm_otg_runtime_resume(struct device *dev)
-{
-	struct msm_otg *motg = dev_get_drvdata(dev);
-
-	dev_dbg(dev, "OTG runtime resume\n");
-	msm_otg_dbg_log_event(&motg->phy, "RUNTIME RESUME",
-			get_pm_runtime_counter(dev), 0);
-
-	return msm_otg_resume(motg);
-}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-static int msm_otg_pm_suspend(struct device *dev)
-{
-	struct msm_otg *motg = dev_get_drvdata(dev);
-
-	dev_dbg(dev, "OTG PM suspend\n");
-	msm_otg_dbg_log_event(&motg->phy, "PM SUSPEND START",
-			get_pm_runtime_counter(dev),
-			atomic_read(&motg->pm_suspended));
-
-	/* flush any pending sm_work first */
-	flush_work(&motg->sm_work);
-	if (!atomic_read(&motg->in_lpm)) {
-		dev_err(dev, "Abort PM suspend!! (USB is outside LPM)\n");
-		return -EBUSY;
-	}
-	atomic_set(&motg->pm_suspended, 1);
-
-	return 0;
-}
-
-static int msm_otg_pm_resume(struct device *dev)
-{
-	int ret = 0;
-	struct msm_otg *motg = dev_get_drvdata(dev);
-
-	dev_dbg(dev, "OTG PM resume\n");
-	msm_otg_dbg_log_event(&motg->phy, "PM RESUME START",
-			get_pm_runtime_counter(dev), pm_runtime_suspended(dev));
-
-	if (motg->resume_pending || motg->phy_irq_pending) {
-		msm_otg_dbg_log_event(&motg->phy, "PM RESUME BY USB",
-				motg->async_int, motg->resume_pending);
-		/* sm work if pending will start in pm notify to exit LPM */
-	}
-
-	return ret;
-}
-#endif
-
-#ifdef CONFIG_PM
-static const struct dev_pm_ops msm_otg_dev_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume)
-	SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume,
-				msm_otg_runtime_idle)
-};
-#endif
-
-static const struct of_device_id msm_otg_dt_match[] = {
-	{	.compatible = "qcom,hsusb-otg",
-	},
-	{}
-};
-
-static struct platform_driver msm_otg_driver = {
-	.probe = msm_otg_probe,
-	.remove = msm_otg_remove,
-	.shutdown = msm_otg_shutdown,
-	.driver = {
-		.name = DRIVER_NAME,
-		.owner = THIS_MODULE,
-#ifdef CONFIG_PM
-		.pm = &msm_otg_dev_pm_ops,
-#endif
-		.of_match_table = msm_otg_dt_match,
-	},
-};
-
-module_platform_driver(msm_otg_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("MSM USB transceiver driver");
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index ab5d364..335a1ef 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -368,7 +368,8 @@
 	tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable);
 	if (IS_ERR(tu->extcon)) {
 		dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
-		return -ENOMEM;
+		ret = PTR_ERR(tu->extcon);
+		goto err_disable_clk;
 	}
 
 	ret = devm_extcon_dev_register(&pdev->dev, tu->extcon);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 11ee55e..3178d8a 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -121,6 +121,7 @@
 	{ USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
 	{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
 	{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+	{ USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
 	{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
 	{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
 	{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -171,6 +172,7 @@
 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
 	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+	{ USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */
 	{ USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
 	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
 	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 3249f42..0c743e4 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1017,6 +1017,7 @@
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
 	{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
 	{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
+	{ USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
 	{ }					/* Terminating entry */
 };
 
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f9d15bd..543d280 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -914,6 +914,12 @@
 #define ICPDAS_I7563U_PID		0x0105
 
 /*
+ * Airbus Defence and Space
+ */
+#define AIRBUS_DS_VID			0x1e8e  /* Vendor ID */
+#define AIRBUS_DS_P8GR			0x6001  /* Tetra P8GR */
+
+/*
  * RT Systems programming cables for various ham radios
  */
 #define RTSYSTEMS_VID		0x2100	/* Vendor ID */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index db3d34c..a818c43 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -236,11 +236,14 @@
 /* These Quectel products use Qualcomm's vendor ID */
 #define QUECTEL_PRODUCT_UC20			0x9003
 #define QUECTEL_PRODUCT_UC15			0x9090
+/* These Yuga products use Qualcomm's vendor ID */
+#define YUGA_PRODUCT_CLM920_NC5			0x9625
 
 #define QUECTEL_VENDOR_ID			0x2c7c
 /* These Quectel products use Quectel's vendor ID */
 #define QUECTEL_PRODUCT_EC21			0x0121
 #define QUECTEL_PRODUCT_EC25			0x0125
+#define QUECTEL_PRODUCT_BG96			0x0296
 
 #define CMOTECH_VENDOR_ID			0x16d8
 #define CMOTECH_PRODUCT_6001			0x6001
@@ -282,6 +285,7 @@
 #define TELIT_PRODUCT_LE922_USBCFG3		0x1043
 #define TELIT_PRODUCT_LE922_USBCFG5		0x1045
 #define TELIT_PRODUCT_ME910			0x1100
+#define TELIT_PRODUCT_ME910_DUAL_MODEM		0x1101
 #define TELIT_PRODUCT_LE920			0x1200
 #define TELIT_PRODUCT_LE910			0x1201
 #define TELIT_PRODUCT_LE910_USBCFG4		0x1206
@@ -647,6 +651,11 @@
 	.reserved = BIT(1) | BIT(3),
 };
 
+static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
+	.sendsetup = BIT(0),
+	.reserved = BIT(3),
+};
+
 static const struct option_blacklist_info telit_le910_blacklist = {
 	.sendsetup = BIT(0),
 	.reserved = BIT(1) | BIT(2),
@@ -676,6 +685,10 @@
 	.reserved = BIT(4) | BIT(5),
 };
 
+static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
+	.reserved = BIT(1) | BIT(4),
+};
+
 static const struct usb_device_id option_ids[] = {
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1180,11 +1193,16 @@
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	/* Yuga products use Qualcomm vendor ID */
+	{ USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
+	  .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
 	/* Quectel products using Quectel vendor ID */
 	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
 	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
+	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1244,6 +1262,8 @@
 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
 		.driver_info = (kernel_ulong_t)&telit_me910_blacklist },
+	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+		.driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
 		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 4516291..fb6dc16 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -166,6 +166,8 @@
 	{DEVICE_SWI(0x1199, 0x9079)},	/* Sierra Wireless EM74xx */
 	{DEVICE_SWI(0x1199, 0x907a)},	/* Sierra Wireless EM74xx QDL */
 	{DEVICE_SWI(0x1199, 0x907b)},	/* Sierra Wireless EM74xx */
+	{DEVICE_SWI(0x1199, 0x9090)},	/* Sierra Wireless EM7565 QDL */
+	{DEVICE_SWI(0x1199, 0x9091)},	/* Sierra Wireless EM7565 */
 	{DEVICE_SWI(0x413c, 0x81a2)},	/* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
 	{DEVICE_SWI(0x413c, 0x81a3)},	/* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
 	{DEVICE_SWI(0x413c, 0x81a4)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
@@ -346,6 +348,7 @@
 			break;
 		case 2:
 			dev_dbg(dev, "NMEA GPS interface found\n");
+			sendsetup = true;
 			break;
 		case 3:
 			dev_dbg(dev, "Modem port found\n");
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index a155cd0..ecc83c4 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -111,6 +111,10 @@
 		}
 	}
 
+	/* All Seagate disk enclosures have broken ATA pass-through support */
+	if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
+		flags |= US_FL_NO_ATA_1X;
+
 	usb_stor_adjust_quirks(udev, &flags);
 
 	if (flags & US_FL_IGNORE_UAS) {
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 2572fd5..b605115 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2113,6 +2113,13 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_BROKEN_FUA ),
 
+/* Reported by David Kozub <zub@linux.fjfi.cvut.cz> */
+UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+		"JMicron",
+		"JMS567",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_BROKEN_FUA),
+
 /*
  * Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
  * JMicron responds to USN and several other SCSI ioctls with a
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index cde1153..719ec68 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -142,6 +142,13 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
 
+/* Reported-by: David Kozub <zub@linux.fjfi.cvut.cz> */
+UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+		"JMicron",
+		"JMS567",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_BROKEN_FUA),
+
 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
 UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
 		"VIA",
@@ -149,6 +156,13 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_NO_ATA_1X),
 
+/* Reported-by: Icenowy Zheng <icenowy@aosc.io> */
+UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999,
+		"Norelsys",
+		"NS1068X",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_IGNORE_UAS),
+
 /* Reported-by: Takeo Nakayama <javhera@gmx.com> */
 UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
 		"JMicron",
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index c653ce5..1886d8e 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -163,8 +163,7 @@
 	 * step 1?
 	 */
 	if (ud->tcp_socket) {
-		dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n",
-			ud->tcp_socket);
+		dev_dbg(&sdev->udev->dev, "shutdown sockfd\n");
 		kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
 	}
 
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index af10f7b..325b4c0 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -252,11 +252,12 @@
 	struct stub_priv *priv;
 	struct urb *urb;
 
-	dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev);
+	dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n");
 
 	while ((priv = stub_priv_pop(sdev))) {
 		urb = priv->urb;
-		dev_dbg(&sdev->udev->dev, "free urb %p\n", urb);
+		dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n",
+			priv->seqnum);
 		usb_kill_urb(urb);
 
 		kmem_cache_free(stub_priv_cache, priv);
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 191b176..5b80718 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -225,9 +225,6 @@
 		if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
 			continue;
 
-		dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
-			 priv->urb);
-
 		/*
 		 * This matched urb is not completed yet (i.e., be in
 		 * flight in usb hcd hardware/driver). Now we are
@@ -266,8 +263,8 @@
 		ret = usb_unlink_urb(priv->urb);
 		if (ret != -EINPROGRESS)
 			dev_err(&priv->urb->dev->dev,
-				"failed to unlink a urb %p, ret %d\n",
-				priv->urb, ret);
+				"failed to unlink a urb # %lu, ret %d\n",
+				priv->seqnum, ret);
 
 		return 0;
 	}
@@ -336,23 +333,34 @@
 	return priv;
 }
 
-static int get_pipe(struct stub_device *sdev, int epnum, int dir)
+static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
 {
 	struct usb_device *udev = sdev->udev;
 	struct usb_host_endpoint *ep;
 	struct usb_endpoint_descriptor *epd = NULL;
+	int epnum = pdu->base.ep;
+	int dir = pdu->base.direction;
+
+	if (epnum < 0 || epnum > 15)
+		goto err_ret;
 
 	if (dir == USBIP_DIR_IN)
 		ep = udev->ep_in[epnum & 0x7f];
 	else
 		ep = udev->ep_out[epnum & 0x7f];
-	if (!ep) {
-		dev_err(&sdev->udev->dev, "no such endpoint?, %d\n",
-			epnum);
-		BUG();
-	}
+	if (!ep)
+		goto err_ret;
 
 	epd = &ep->desc;
+
+	/* validate transfer_buffer_length */
+	if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) {
+		dev_err(&sdev->udev->dev,
+			"CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n",
+			pdu->u.cmd_submit.transfer_buffer_length);
+		return -1;
+	}
+
 	if (usb_endpoint_xfer_control(epd)) {
 		if (dir == USBIP_DIR_OUT)
 			return usb_sndctrlpipe(udev, epnum);
@@ -375,15 +383,31 @@
 	}
 
 	if (usb_endpoint_xfer_isoc(epd)) {
+		/* validate packet size and number of packets */
+		unsigned int maxp, packets, bytes;
+
+		maxp = usb_endpoint_maxp(epd);
+		maxp *= usb_endpoint_maxp_mult(epd);
+		bytes = pdu->u.cmd_submit.transfer_buffer_length;
+		packets = DIV_ROUND_UP(bytes, maxp);
+
+		if (pdu->u.cmd_submit.number_of_packets < 0 ||
+		    pdu->u.cmd_submit.number_of_packets > packets) {
+			dev_err(&sdev->udev->dev,
+				"CMD_SUBMIT: isoc invalid num packets %d\n",
+				pdu->u.cmd_submit.number_of_packets);
+			return -1;
+		}
 		if (dir == USBIP_DIR_OUT)
 			return usb_sndisocpipe(udev, epnum);
 		else
 			return usb_rcvisocpipe(udev, epnum);
 	}
 
+err_ret:
 	/* NOT REACHED */
-	dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum);
-	return 0;
+	dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
+	return -1;
 }
 
 static void masking_bogus_flags(struct urb *urb)
@@ -447,7 +471,10 @@
 	struct stub_priv *priv;
 	struct usbip_device *ud = &sdev->ud;
 	struct usb_device *udev = sdev->udev;
-	int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
+	int pipe = get_pipe(sdev, pdu);
+
+	if (pipe == -1)
+		return;
 
 	priv = stub_priv_alloc(sdev, pdu);
 	if (!priv)
@@ -466,7 +493,8 @@
 	}
 
 	/* allocate urb transfer buffer, if needed */
-	if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
+	if (pdu->u.cmd_submit.transfer_buffer_length > 0 &&
+	    pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) {
 		priv->urb->transfer_buffer =
 			kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
 				GFP_KERNEL);
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index be50cef..96aa375 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -102,7 +102,7 @@
 	/* link a urb to the queue of tx. */
 	spin_lock_irqsave(&sdev->priv_lock, flags);
 	if (sdev->ud.tcp_socket == NULL) {
-		usbip_dbg_stub_tx("ignore urb for closed connection %p", urb);
+		usbip_dbg_stub_tx("ignore urb for closed connection\n");
 		/* It will be freed in stub_device_cleanup_urbs(). */
 	} else if (priv->unlinking) {
 		stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
@@ -181,6 +181,13 @@
 		memset(&pdu_header, 0, sizeof(pdu_header));
 		memset(&msg, 0, sizeof(msg));
 
+		if (urb->actual_length > 0 && !urb->transfer_buffer) {
+			dev_err(&sdev->udev->dev,
+				"urb: actual_length %d transfer_buffer null\n",
+				urb->actual_length);
+			return -1;
+		}
+
 		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
 			iovnum = 2 + urb->number_of_packets;
 		else
@@ -197,8 +204,8 @@
 
 		/* 1. setup usbip_header */
 		setup_ret_submit_pdu(&pdu_header, urb);
-		usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
-				  pdu_header.base.seqnum, urb);
+		usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+				  pdu_header.base.seqnum);
 		usbip_header_correct_endian(&pdu_header, 1);
 
 		iov[iovnum].iov_base = &pdu_header;
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index 8b23229..2a5d318 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -105,7 +105,7 @@
 	dev_dbg(dev, "       devnum(%d) devpath(%s) usb speed(%s)",
 		udev->devnum, udev->devpath, usb_speed_string(udev->speed));
 
-	pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport);
+	pr_debug("tt hub ttport %d\n", udev->ttport);
 
 	dev_dbg(dev, "                    ");
 	for (i = 0; i < 16; i++)
@@ -138,12 +138,8 @@
 	}
 	pr_debug("\n");
 
-	dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus);
-
-	dev_dbg(dev,
-		"descriptor %p, config %p, actconfig %p, rawdescriptors %p\n",
-		&udev->descriptor, udev->config,
-		udev->actconfig, udev->rawdescriptors);
+	dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev),
+		udev->bus->bus_name);
 
 	dev_dbg(dev, "have_langid %d, string_langid %d\n",
 		udev->have_langid, udev->string_langid);
@@ -251,9 +247,6 @@
 
 	dev = &urb->dev->dev;
 
-	dev_dbg(dev, "   urb                   :%p\n", urb);
-	dev_dbg(dev, "   dev                   :%p\n", urb->dev);
-
 	usbip_dump_usb_device(urb->dev);
 
 	dev_dbg(dev, "   pipe                  :%08x ", urb->pipe);
@@ -262,11 +255,9 @@
 
 	dev_dbg(dev, "   status                :%d\n", urb->status);
 	dev_dbg(dev, "   transfer_flags        :%08X\n", urb->transfer_flags);
-	dev_dbg(dev, "   transfer_buffer       :%p\n", urb->transfer_buffer);
 	dev_dbg(dev, "   transfer_buffer_length:%d\n",
 						urb->transfer_buffer_length);
 	dev_dbg(dev, "   actual_length         :%d\n", urb->actual_length);
-	dev_dbg(dev, "   setup_packet          :%p\n", urb->setup_packet);
 
 	if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL)
 		usbip_dump_usb_ctrlrequest(
@@ -276,8 +267,6 @@
 	dev_dbg(dev, "   number_of_packets     :%d\n", urb->number_of_packets);
 	dev_dbg(dev, "   interval              :%d\n", urb->interval);
 	dev_dbg(dev, "   error_count           :%d\n", urb->error_count);
-	dev_dbg(dev, "   context               :%p\n", urb->context);
-	dev_dbg(dev, "   complete              :%p\n", urb->complete);
 }
 EXPORT_SYMBOL_GPL(usbip_dump_urb);
 
@@ -335,13 +324,10 @@
 	char *bp = buf;
 	int osize = size;
 
-	usbip_dbg_xmit("enter\n");
-
-	if (!sock || !buf || !size) {
-		pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
-		       size);
+	if (!sock || !buf || !size)
 		return -EINVAL;
-	}
+
+	usbip_dbg_xmit("enter\n");
 
 	do {
 		sock->sk->sk_allocation = GFP_NOIO;
@@ -354,11 +340,8 @@
 		msg.msg_flags      = MSG_NOSIGNAL;
 
 		result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL);
-		if (result <= 0) {
-			pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
-				 sock, buf, size, result, total);
+		if (result <= 0)
 			goto err;
-		}
 
 		size -= result;
 		buf += result;
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index d6dc165..7f161b0 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -506,9 +506,6 @@
 	struct vhci_device *vdev;
 	unsigned long flags;
 
-	usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
-			  hcd, urb, mem_flags);
-
 	if (portnum > VHCI_HC_PORTS) {
 		pr_err("invalid port number %d\n", portnum);
 		return -ENODEV;
@@ -671,8 +668,6 @@
 	struct vhci_device *vdev;
 	unsigned long flags;
 
-	pr_info("dequeue a urb %p\n", urb);
-
 	spin_lock_irqsave(&vhci->lock, flags);
 
 	priv = urb->hcpriv;
@@ -700,7 +695,6 @@
 		/* tcp connection is closed */
 		spin_lock(&vdev->priv_lock);
 
-		pr_info("device %p seems to be disconnected\n", vdev);
 		list_del(&priv->list);
 		kfree(priv);
 		urb->hcpriv = NULL;
@@ -712,8 +706,6 @@
 		 * vhci_rx will receive RET_UNLINK and give back the URB.
 		 * Otherwise, we give back it here.
 		 */
-		pr_info("gives back urb %p\n", urb);
-
 		usb_hcd_unlink_urb_from_ep(hcd, urb);
 
 		spin_unlock_irqrestore(&vhci->lock, flags);
@@ -741,8 +733,6 @@
 
 		unlink->unlink_seqnum = priv->seqnum;
 
-		pr_info("device %p seems to be still connected\n", vdev);
-
 		/* send cmd_unlink and try to cancel the pending URB in the
 		 * peer */
 		list_add_tail(&unlink->list, &vdev->unlink_tx);
@@ -823,7 +813,7 @@
 
 	/* need this? see stub_dev.c */
 	if (ud->tcp_socket) {
-		pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket);
+		pr_debug("shutdown tcp_socket\n");
 		kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
 	}
 
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index fc2d319..5943dee 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -37,24 +37,23 @@
 		urb = priv->urb;
 		status = urb->status;
 
-		usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n",
-				urb, priv, seqnum);
+		usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum);
 
 		switch (status) {
 		case -ENOENT:
 			/* fall through */
 		case -ECONNRESET:
-			dev_info(&urb->dev->dev,
-				 "urb %p was unlinked %ssynchronuously.\n", urb,
-				 status == -ENOENT ? "" : "a");
+			dev_dbg(&urb->dev->dev,
+				 "urb seq# %u was unlinked %ssynchronuously\n",
+				 seqnum, status == -ENOENT ? "" : "a");
 			break;
 		case -EINPROGRESS:
 			/* no info output */
 			break;
 		default:
-			dev_info(&urb->dev->dev,
-				 "urb %p may be in a error, status %d\n", urb,
-				 status);
+			dev_dbg(&urb->dev->dev,
+				 "urb seq# %u may be in a error, status %d\n",
+				 seqnum, status);
 		}
 
 		list_del(&priv->list);
@@ -80,8 +79,8 @@
 	spin_unlock_irqrestore(&vdev->priv_lock, flags);
 
 	if (!urb) {
-		pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
-		pr_info("max seqnum %d\n",
+		pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
+			pdu->base.seqnum,
 			atomic_read(&vhci->seqnum));
 		usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
 		return;
@@ -104,7 +103,7 @@
 	if (usbip_dbg_flag_vhci_rx)
 		usbip_dump_urb(urb);
 
-	usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
+	usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
 
 	spin_lock_irqsave(&vhci->lock, flags);
 	usb_hcd_unlink_urb_from_ep(vhci_to_hcd(vhci), urb);
@@ -170,7 +169,7 @@
 		pr_info("the urb (seqnum %d) was already given back\n",
 			pdu->base.seqnum);
 	} else {
-		usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
+		usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum);
 
 		/* If unlink is successful, status is -ECONNRESET */
 		urb->status = pdu->u.ret_unlink.status;
diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
index 3e7878f..a9a663a 100644
--- a/drivers/usb/usbip/vhci_tx.c
+++ b/drivers/usb/usbip/vhci_tx.c
@@ -83,7 +83,8 @@
 		memset(&msg, 0, sizeof(msg));
 		memset(&iov, 0, sizeof(iov));
 
-		usbip_dbg_vhci_tx("setup txdata urb %p\n", urb);
+		usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
+				  priv->seqnum);
 
 		/* 1. setup usbip_header */
 		setup_cmd_submit_pdu(&pdu_header, urb);
diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c
index e429b59..d020e72 100644
--- a/drivers/usb/usbip/vudc_rx.c
+++ b/drivers/usb/usbip/vudc_rx.c
@@ -132,6 +132,25 @@
 	urb_p->new = 1;
 	urb_p->seqnum = pdu->base.seqnum;
 
+	if (urb_p->ep->type == USB_ENDPOINT_XFER_ISOC) {
+		/* validate packet size and number of packets */
+		unsigned int maxp, packets, bytes;
+
+		maxp = usb_endpoint_maxp(urb_p->ep->desc);
+		maxp *= usb_endpoint_maxp_mult(urb_p->ep->desc);
+		bytes = pdu->u.cmd_submit.transfer_buffer_length;
+		packets = DIV_ROUND_UP(bytes, maxp);
+
+		if (pdu->u.cmd_submit.number_of_packets < 0 ||
+		    pdu->u.cmd_submit.number_of_packets > packets) {
+			dev_err(&udc->gadget.dev,
+				"CMD_SUBMIT: isoc invalid num packets %d\n",
+				pdu->u.cmd_submit.number_of_packets);
+			ret = -EMSGSIZE;
+			goto free_urbp;
+		}
+	}
+
 	ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type);
 	if (ret) {
 		usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c
index 2346617..3ab4c86 100644
--- a/drivers/usb/usbip/vudc_tx.c
+++ b/drivers/usb/usbip/vudc_tx.c
@@ -97,6 +97,13 @@
 	memset(&pdu_header, 0, sizeof(pdu_header));
 	memset(&msg, 0, sizeof(msg));
 
+	if (urb->actual_length > 0 && !urb->transfer_buffer) {
+		dev_err(&udc->gadget.dev,
+			"urb: actual_length %d transfer_buffer null\n",
+			urb->actual_length);
+		return -1;
+	}
+
 	if (urb_p->type == USB_ENDPOINT_XFER_ISOC)
 		iovnum = 2 + urb->number_of_packets;
 	else
@@ -112,8 +119,8 @@
 
 	/* 1. setup usbip_header */
 	setup_ret_submit_pdu(&pdu_header, urb_p);
-	usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
-			  pdu_header.base.seqnum, urb);
+	usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+			  pdu_header.base.seqnum);
 	usbip_header_correct_endian(&pdu_header, 1);
 
 	iov[iovnum].iov_base = &pdu_header;
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 65d4a30..9f1ec43 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -851,11 +851,13 @@
 
 	/*
 	 * Allow writes to device control fields, except devctl_phantom,
-	 * which could confuse IOMMU, and the ARI bit in devctl2, which
+	 * which could confuse IOMMU, MPS, which can break communication
+	 * with other physical devices, and the ARI bit in devctl2, which
 	 * is set at probe time.  FLR gets virtualized via our writefn.
 	 */
 	p_setw(perm, PCI_EXP_DEVCTL,
-	       PCI_EXP_DEVCTL_BCR_FLR, ~PCI_EXP_DEVCTL_PHANTOM);
+	       PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD,
+	       ~PCI_EXP_DEVCTL_PHANTOM);
 	p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
 	return 0;
 }
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 85d3e64..59b3f62 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1123,12 +1123,11 @@
 		mutex_lock(&container->lock);
 
 		ret = tce_iommu_create_default_window(container);
-		if (ret)
-			return ret;
-
-		ret = tce_iommu_create_window(container, create.page_shift,
-				create.window_size, create.levels,
-				&create.start_addr);
+		if (!ret)
+			ret = tce_iommu_create_window(container,
+					create.page_shift,
+					create.window_size, create.levels,
+					&create.start_addr);
 
 		mutex_unlock(&container->lock);
 
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 6e29d05..9e36632 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -693,6 +693,7 @@
 		      struct scatterlist *sg, int sg_count)
 {
 	size_t off = iter->iov_offset;
+	struct scatterlist *p = sg;
 	int i, ret;
 
 	for (i = 0; i < iter->nr_segs; i++) {
@@ -701,8 +702,8 @@
 
 		ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
 		if (ret < 0) {
-			for (i = 0; i < sg_count; i++) {
-				struct page *page = sg_page(&sg[i]);
+			while (p < sg) {
+				struct page *page = sg_page(p++);
 				if (page)
 					put_page(page);
 			}
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index e3fad30..0ec970c 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -218,6 +218,46 @@
 	return len;
 }
 
+static int
+vhost_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+	struct vhost_vsock *vsock;
+	struct virtio_vsock_pkt *pkt, *n;
+	int cnt = 0;
+	LIST_HEAD(freeme);
+
+	/* Find the vhost_vsock according to guest context id  */
+	vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
+	if (!vsock)
+		return -ENODEV;
+
+	spin_lock_bh(&vsock->send_pkt_list_lock);
+	list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
+		if (pkt->vsk != vsk)
+			continue;
+		list_move(&pkt->list, &freeme);
+	}
+	spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+	list_for_each_entry_safe(pkt, n, &freeme, list) {
+		if (pkt->reply)
+			cnt++;
+		list_del(&pkt->list);
+		virtio_transport_free_pkt(pkt);
+	}
+
+	if (cnt) {
+		struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+		int new_cnt;
+
+		new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
+		if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
+			vhost_poll_queue(&tx_vq->poll);
+	}
+
+	return 0;
+}
+
 static struct virtio_vsock_pkt *
 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
 		      unsigned int out, unsigned int in)
@@ -669,6 +709,7 @@
 		.release                  = virtio_transport_release,
 		.connect                  = virtio_transport_connect,
 		.shutdown                 = virtio_transport_shutdown,
+		.cancel_pkt               = vhost_transport_cancel_pkt,
 
 		.dgram_enqueue            = virtio_transport_dgram_enqueue,
 		.dgram_dequeue            = virtio_transport_dgram_dequeue,
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 1261400..d95ae09 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -79,14 +79,17 @@
 static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
 {
 	unsigned int lth = pb->lth_brightness;
-	int duty_cycle;
+	u64 duty_cycle;
 
 	if (pb->levels)
 		duty_cycle = pb->levels[brightness];
 	else
 		duty_cycle = brightness;
 
-	return (duty_cycle * (pb->period - lth) / pb->scale) + lth;
+	duty_cycle *= pb->period - lth;
+	do_div(duty_cycle, pb->scale);
+
+	return duty_cycle + lth;
 }
 
 static int pwm_backlight_update_status(struct backlight_device *bl)
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 6c2b2ca..44c2be1 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1681,8 +1681,10 @@
 
 		fbi = framebuffer_alloc(sizeof(struct au1200fb_device),
 					&dev->dev);
-		if (!fbi)
+		if (!fbi) {
+			ret = -ENOMEM;
 			goto failed;
+		}
 
 		_au1200fb_infos[plane] = fbi;
 		fbdev = fbi->par;
@@ -1700,7 +1702,8 @@
 		if (!fbdev->fb_mem) {
 			print_err("fail to allocate frambuffer (size: %dK))",
 				  fbdev->fb_len / 1024);
-			return -ENOMEM;
+			ret = -ENOMEM;
+			goto failed;
 		}
 
 		/*
diff --git a/drivers/video/fbdev/controlfb.h b/drivers/video/fbdev/controlfb.h
index 6026c60..261522f 100644
--- a/drivers/video/fbdev/controlfb.h
+++ b/drivers/video/fbdev/controlfb.h
@@ -141,5 +141,7 @@
 	{{ 1, 2}},	/* 1152x870, 75Hz */
 	{{ 0, 1}},	/* 1280x960, 75Hz */
 	{{ 0, 1}},	/* 1280x1024, 75Hz */
+	{{ 1, 2}},	/* 1152x768, 60Hz */
+	{{ 0, 1}},	/* 1600x1024, 60Hz */
 };
 
diff --git a/drivers/video/fbdev/msm/Kconfig b/drivers/video/fbdev/msm/Kconfig
new file mode 100644
index 0000000..60b86e7
--- /dev/null
+++ b/drivers/video/fbdev/msm/Kconfig
@@ -0,0 +1,138 @@
+source "drivers/video/fbdev/msm/msm_dba/Kconfig"
+
+if FB_MSM
+
+config FB_MSM_MDSS_COMMON
+	bool
+
+choice
+	prompt "MDP HW version"
+	default FB_MSM_MDP
+
+config FB_MSM_MDP
+	bool "MDP HW"
+	select FB_MSM_MDP_HW
+	---help---
+	The Mobile Display Processor (MDP) driver support devices which
+	contain MDP hardware block.
+
+	Support for MSM MDP HW revision 2.2.
+	Say Y here if this is msm7201 variant platform.
+
+config FB_MSM_MDSS
+	bool "MDSS HW"
+	select SYNC
+	select SW_SYNC
+	select FB_MSM_MDSS_COMMON
+	---help---
+	The Mobile Display Sub System (MDSS) driver supports devices which
+	contain MDSS hardware block.
+
+	The MDSS driver implements frame buffer interface to provide access to
+	the display hardware and provide a way for users to display graphics
+	on connected display panels.
+
+config FB_MSM_MDP_NONE
+	bool "MDP HW None"
+	---help---
+	This is used for platforms without Mobile Display Sub System (MDSS).
+	mdm platform don't have MDSS hardware block.
+
+	Say Y here if this is mdm platform.
+
+endchoice
+
+config FB_MSM_QPIC
+	bool
+	select FB_MSM_MDSS_COMMON
+
+config FB_MSM_QPIC_ILI_QVGA_PANEL
+	bool "Qpic MIPI ILI QVGA Panel"
+	select FB_MSM_QPIC
+	---help---
+	Support for MIPI ILI QVGA (240x320) panel ILI TECHNOLOGY 9341
+	with on-chip full display RAM use parallel interface.
+
+config FB_MSM_QPIC_PANEL_DETECT
+	bool "Qpic Panel Detect"
+	select FB_MSM_QPIC_ILI_QVGA_PANEL
+	---help---
+	Support for Qpic panel auto detect.
+
+config FB_MSM_MDSS_WRITEBACK
+	bool "MDSS Writeback Panel"
+	---help---
+	The MDSS Writeback Panel provides support for routing the output of
+	MDSS frame buffer driver and MDP processing to memory.
+
+config FB_MSM_MDSS_HDMI_PANEL
+	bool "MDSS HDMI Tx Panel"
+	depends on FB_MSM_MDSS
+	select MSM_EXT_DISPLAY
+	default n
+	---help---
+	The MDSS HDMI Panel provides support for transmitting TMDS signals of
+	MDSS frame buffer data to connected hdmi compliant TVs, monitors etc.
+
+config FB_MSM_MDSS_HDMI_MHL_SII8334
+	depends on FB_MSM_MDSS_HDMI_PANEL
+	bool 'MHL SII8334 support '
+	default n
+	---help---
+	Support the HDMI to MHL conversion.
+	MHL (Mobile High-Definition Link) technology
+	uses USB connector to output HDMI content
+
+config FB_MSM_MDSS_MHL3
+	depends on FB_MSM_MDSS_HDMI_PANEL
+	bool "MHL3 SII8620 Support"
+	default n
+	---help---
+	Support the SiliconImage 8620 MHL Tx transmitter that uses
+	USB connector to output HDMI content. Transmitter is an
+	i2c device acting as an HDMI to MHL bridge. Chip supports
+	MHL 3.0 standard.
+
+config FB_MSM_MDSS_DSI_CTRL_STATUS
+	tristate "DSI controller status check feature"
+	---help---
+	Check DSI controller status periodically (default period is 5
+	seconds) by sending Bus-Turn-Around (BTA) command. If DSI controller
+	fails to acknowledge the BTA command, it sends PANEL_ALIVE=0 status
+	to HAL layer to reset the controller.
+
+config FB_MSM_MDSS_EDP_PANEL
+	depends on FB_MSM_MDSS
+	bool "MDSS eDP Panel"
+	---help---
+	The MDSS eDP Panel provides support for eDP host controller driver.
+	Which runs in Video mode only and is responsible for transmitting
+	frame buffer from host SOC to eDP display panel.
+
+config FB_MSM_MDSS_MDP3
+	depends on FB_MSM_MDSS
+	bool "MDP3 display controller"
+	---help---
+	The MDP3 provides support for an older version display controller.
+	Included in latest display sub-system, known as MDSS.
+
+config FB_MSM_MDSS_XLOG_DEBUG
+	depends on FB_MSM_MDSS
+	bool "Enable MDSS debugging"
+	---help---
+	The MDSS debugging provides support to enable display debugging
+	features to: Dump MDSS registers during driver errors, panic
+	driver during fatal errors and enable some display-driver logging
+	into an internal buffer (this avoids logging overhead).
+
+config FB_MSM_MDSS_FRC_DEBUG
+	depends on DEBUG_FS && FB_MSM_MDSS
+	bool "Enable Video FRC debugging"
+	default n
+	---help---
+	The MDSS FRC debugging provides support to enable the deterministic
+	frame rate control (FRC) debugging features to: Collect video frame
+	statistics and check whether its output pattern matches expected
+	cadence.
+
+endif
diff --git a/drivers/video/fbdev/msm/Makefile b/drivers/video/fbdev/msm/Makefile
new file mode 100644
index 0000000..ed3ff87
--- /dev/null
+++ b/drivers/video/fbdev/msm/Makefile
@@ -0,0 +1,71 @@
+ccflags-y += -I$(src)
+
+obj-$(CONFIG_FB_MSM_MDSS_MHL3) += mhl3/
+
+mdss-mdp3-objs = mdp3.o mdp3_layer.o mdp3_dma.o mdp3_ctrl.o dsi_status_v2.o
+mdss-mdp3-objs += mdp3_ppp.o mdp3_ppp_hwio.o mdp3_ppp_data.o
+obj-$(CONFIG_FB_MSM_MDSS_MDP3) += mdss-mdp3.o
+ifeq ($(CONFIG_FB_MSM_MDSS_MDP3), y)
+ccflags-y += -DTARGET_HW_MDSS_MDP3
+endif
+mdss-mdp-objs := mdss_mdp.o mdss_mdp_ctl.o mdss_mdp_pipe.o mdss_mdp_util.o dsi_status_6g.o
+mdss-mdp-objs += mdss_mdp_pp.o mdss_mdp_pp_debug.o mdss_mdp_pp_cache_config.o
+mdss-mdp-objs += mdss_mdp_intf_video.o
+mdss-mdp-objs += mdss_mdp_intf_cmd.o
+mdss-mdp-objs += mdss_mdp_intf_writeback.o
+mdss-mdp-objs += mdss_rotator.o
+mdss-mdp-objs += mdss_mdp_overlay.o
+mdss-mdp-objs += mdss_mdp_layer.o
+mdss-mdp-objs += mdss_mdp_splash_logo.o
+mdss-mdp-objs += mdss_mdp_cdm.o
+mdss-mdp-objs += mdss_smmu.o
+mdss-mdp-objs += mdss_mdp_wfd.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_mdp_debug.o
+
+mdss-mdp-objs += mdss_mdp_pp_v1_7.o
+mdss-mdp-objs += mdss_mdp_pp_v3.o
+mdss-mdp-objs += mdss_mdp_pp_common.o
+
+ifeq ($(CONFIG_FB_MSM_MDSS),y)
+obj-$(CONFIG_DEBUG_FS) += mdss_debug.o mdss_debug_xlog.o
+endif
+
+ifeq ($(CONFIG_FB_MSM_MDSS_FRC_DEBUG),y)
+obj-$(CONFIG_DEBUG_FS) += mdss_debug_frc.o
+endif
+
+mdss-dsi-objs := mdss_dsi.o mdss_dsi_host.o mdss_dsi_cmd.o mdss_dsi_status.o
+mdss-dsi-objs += mdss_dsi_panel.o
+mdss-dsi-objs += msm_mdss_io_8974.o
+mdss-dsi-objs += mdss_dsi_phy.o
+mdss-dsi-objs += mdss_dsi_clk.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss-dsi.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_panel.o
+
+ifneq ($(CONFIG_FB_MSM_MDSS_MDP3), y)
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_hdmi_util.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_hdmi_edid.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_cec_core.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_dba_utils.o
+obj-$(CONFIG_FB_MSM_MDSS_EDP_PANEL) += mdss_edp.o
+obj-$(CONFIG_FB_MSM_MDSS_EDP_PANEL) += mdss_edp_aux.o
+
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_tx.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_panel.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_hdcp.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_hdcp2p2.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_cec.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_audio.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_MHL_SII8334) += mhl_sii8334.o mhl_msc.o
+ccflags-y += -DTARGET_HW_MDSS_HDMI
+endif
+
+obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o
+
+mdss-qpic-objs := mdss_qpic.o mdss_fb.o mdss_qpic_panel.o
+obj-$(CONFIG_FB_MSM_QPIC) += mdss-qpic.o
+obj-$(CONFIG_FB_MSM_QPIC_ILI_QVGA_PANEL) += qpic_panel_ili_qvga.o
+
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o mdss_util.o
+obj-$(CONFIG_COMPAT) += mdss_compat_utils.o
diff --git a/drivers/video/fbdev/msm/dsi_host_v2.c b/drivers/video/fbdev/msm/dsi_host_v2.c
new file mode 100644
index 0000000..2782702
--- /dev/null
+++ b/drivers/video/fbdev/msm/dsi_host_v2.c
@@ -0,0 +1,1889 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+
+#include "dsi_v2.h"
+#include "dsi_io_v2.h"
+#include "dsi_host_v2.h"
+#include "mdss_debug.h"
+#include "mdp3.h"
+
+#define DSI_POLL_SLEEP_US 1000
+#define DSI_POLL_TIMEOUT_US 16000
+#define DSI_ESC_CLK_RATE 19200000
+#define DSI_DMA_CMD_TIMEOUT_MS 200
+#define VSYNC_PERIOD 17
+#define DSI_MAX_PKT_SIZE 10
+#define DSI_SHORT_PKT_DATA_SIZE 2
+#define DSI_MAX_BYTES_TO_READ 16
+
+struct dsi_host_v2_private {
+	unsigned char *dsi_base;
+	size_t dsi_reg_size;
+	struct device dis_dev;
+	int clk_count;
+	int dsi_on;
+
+	void (*debug_enable_clk)(int on);
+};
+
+static struct dsi_host_v2_private *dsi_host_private;
+static int msm_dsi_clk_ctrl(struct mdss_panel_data *pdata, int enable);
+
+int msm_dsi_init(void)
+{
+	if (!dsi_host_private) {
+		dsi_host_private = kzalloc(sizeof(struct dsi_host_v2_private),
+					GFP_KERNEL);
+		if (!dsi_host_private)
+			return -ENOMEM;
+
+	}
+
+	return 0;
+}
+
+void msm_dsi_deinit(void)
+{
+	kfree(dsi_host_private);
+	dsi_host_private = NULL;
+}
+
+void msm_dsi_ack_err_status(unsigned char *ctrl_base)
+{
+	u32 status;
+
+	status = MIPI_INP(ctrl_base + DSI_ACK_ERR_STATUS);
+
+	if (status) {
+		MIPI_OUTP(ctrl_base + DSI_ACK_ERR_STATUS, status);
+
+		/* Writing of an extra 0 needed to clear error bits */
+		MIPI_OUTP(ctrl_base + DSI_ACK_ERR_STATUS, 0);
+		pr_err("%s: status=%x\n", __func__, status);
+	}
+}
+
+void msm_dsi_timeout_status(unsigned char *ctrl_base)
+{
+	u32 status;
+
+	status = MIPI_INP(ctrl_base + DSI_TIMEOUT_STATUS);
+	if (status & 0x0111) {
+		MIPI_OUTP(ctrl_base + DSI_TIMEOUT_STATUS, status);
+		pr_err("%s: status=%x\n", __func__, status);
+	}
+}
+
+void msm_dsi_dln0_phy_err(unsigned char *ctrl_base)
+{
+	u32 status;
+
+	status = MIPI_INP(ctrl_base + DSI_DLN0_PHY_ERR);
+
+	if (status & 0x011111) {
+		MIPI_OUTP(ctrl_base + DSI_DLN0_PHY_ERR, status);
+		pr_err("%s: status=%x\n", __func__, status);
+	}
+}
+
+void msm_dsi_fifo_status(unsigned char *ctrl_base)
+{
+	u32 status;
+
+	status = MIPI_INP(ctrl_base + DSI_FIFO_STATUS);
+
+	if (status & 0x44444489) {
+		MIPI_OUTP(ctrl_base + DSI_FIFO_STATUS, status);
+		pr_err("%s: status=%x\n", __func__, status);
+	}
+}
+
+void msm_dsi_status(unsigned char *ctrl_base)
+{
+	u32 status;
+
+	status = MIPI_INP(ctrl_base + DSI_STATUS);
+
+	if (status & 0x80000000) {
+		MIPI_OUTP(ctrl_base + DSI_STATUS, status);
+		pr_err("%s: status=%x\n", __func__, status);
+	}
+}
+
+void msm_dsi_error(unsigned char *ctrl_base)
+{
+	msm_dsi_ack_err_status(ctrl_base);
+	msm_dsi_timeout_status(ctrl_base);
+	msm_dsi_fifo_status(ctrl_base);
+	msm_dsi_status(ctrl_base);
+	msm_dsi_dln0_phy_err(ctrl_base);
+}
+
+static void msm_dsi_set_irq_mask(struct mdss_dsi_ctrl_pdata *ctrl, u32 mask)
+{
+	u32 intr_ctrl;
+
+	intr_ctrl = MIPI_INP(dsi_host_private->dsi_base + DSI_INT_CTRL);
+	intr_ctrl |= mask;
+	MIPI_OUTP(dsi_host_private->dsi_base + DSI_INT_CTRL, intr_ctrl);
+}
+
+static void msm_dsi_clear_irq_mask(struct mdss_dsi_ctrl_pdata *ctrl, u32 mask)
+{
+	u32 intr_ctrl;
+
+	intr_ctrl = MIPI_INP(dsi_host_private->dsi_base + DSI_INT_CTRL);
+	intr_ctrl &= ~mask;
+	MIPI_OUTP(dsi_host_private->dsi_base + DSI_INT_CTRL, intr_ctrl);
+}
+
+static void msm_dsi_set_irq(struct mdss_dsi_ctrl_pdata *ctrl, u32 mask)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl->irq_lock, flags);
+	if (ctrl->dsi_irq_mask & mask) {
+		spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+		return;
+	}
+	if (ctrl->dsi_irq_mask == 0) {
+		ctrl->mdss_util->enable_irq(ctrl->dsi_hw);
+		pr_debug("%s: IRQ Enable, mask=%x term=%x\n", __func__,
+			(int)ctrl->dsi_irq_mask, (int)mask);
+	}
+
+	msm_dsi_set_irq_mask(ctrl, mask);
+	ctrl->dsi_irq_mask |= mask;
+	spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+}
+
+static void msm_dsi_clear_irq(struct mdss_dsi_ctrl_pdata *ctrl, u32 mask)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl->irq_lock, flags);
+	if (!(ctrl->dsi_irq_mask & mask)) {
+		spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+		return;
+	}
+	ctrl->dsi_irq_mask &= ~mask;
+	if (ctrl->dsi_irq_mask == 0) {
+		ctrl->mdss_util->disable_irq(ctrl->dsi_hw);
+		pr_debug("%s: IRQ Disable, mask=%x term=%x\n", __func__,
+			(int)ctrl->dsi_irq_mask, (int)mask);
+	}
+	msm_dsi_clear_irq_mask(ctrl, mask);
+	spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+}
+
+irqreturn_t msm_dsi_isr_handler(int irq, void *ptr)
+{
+	u32 isr;
+
+	struct mdss_dsi_ctrl_pdata *ctrl =
+		(struct mdss_dsi_ctrl_pdata *)ptr;
+
+	spin_lock(&ctrl->mdp_lock);
+
+	if (ctrl->dsi_irq_mask == 0) {
+		spin_unlock(&ctrl->mdp_lock);
+		return IRQ_HANDLED;
+	}
+
+	isr = MIPI_INP(dsi_host_private->dsi_base + DSI_INT_CTRL);
+	MIPI_OUTP(dsi_host_private->dsi_base + DSI_INT_CTRL, isr);
+
+	pr_debug("%s: isr=%x", __func__, isr);
+
+	if (isr & DSI_INTR_ERROR) {
+		pr_err("%s: isr=%x %x", __func__, isr, (int)DSI_INTR_ERROR);
+		msm_dsi_error(dsi_host_private->dsi_base);
+	}
+
+	if (isr & DSI_INTR_VIDEO_DONE)
+		complete(&ctrl->video_comp);
+
+	if (isr & DSI_INTR_CMD_DMA_DONE)
+		complete(&ctrl->dma_comp);
+
+	if (isr & DSI_INTR_BTA_DONE)
+		complete(&ctrl->bta_comp);
+
+	if (isr & DSI_INTR_CMD_MDP_DONE)
+		complete(&ctrl->mdp_comp);
+
+	spin_unlock(&ctrl->mdp_lock);
+
+	return IRQ_HANDLED;
+}
+
+int msm_dsi_irq_init(struct device *dev, int irq_no,
+			struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int ret;
+	u32 isr;
+	struct mdss_hw *dsi_hw;
+
+	msm_dsi_ahb_ctrl(1);
+	isr = MIPI_INP(dsi_host_private->dsi_base + DSI_INT_CTRL);
+	isr &= ~DSI_INTR_ALL_MASK;
+	MIPI_OUTP(dsi_host_private->dsi_base + DSI_INT_CTRL, isr);
+	msm_dsi_ahb_ctrl(0);
+
+	ret = devm_request_irq(dev, irq_no, msm_dsi_isr_handler,
+				IRQF_DISABLED, "DSI", ctrl);
+	if (ret) {
+		pr_err("msm_dsi_irq_init request_irq() failed!\n");
+		return ret;
+	}
+
+	dsi_hw = kzalloc(sizeof(struct mdss_hw), GFP_KERNEL);
+	if (!dsi_hw)
+		return -ENOMEM;
+
+	ctrl->dsi_hw = dsi_hw;
+
+	dsi_hw->irq_info = kzalloc(sizeof(struct irq_info), GFP_KERNEL);
+	if (!dsi_hw->irq_info) {
+		kfree(dsi_hw);
+		pr_err("no mem to save irq info: kzalloc fail\n");
+		return -ENOMEM;
+	}
+
+	dsi_hw->hw_ndx = MDSS_HW_DSI0;
+	dsi_hw->irq_info->irq = irq_no;
+	dsi_hw->irq_info->irq_mask = 0;
+	dsi_hw->irq_info->irq_ena = false;
+	dsi_hw->irq_info->irq_buzy = false;
+
+	ctrl->mdss_util->register_irq(ctrl->dsi_hw);
+	ctrl->mdss_util->disable_irq(ctrl->dsi_hw);
+
+	return 0;
+}
+
+static void msm_dsi_get_cmd_engine(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+	u32 dsi_ctrl;
+
+	if (ctrl->panel_mode == DSI_VIDEO_MODE) {
+		dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+		MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl | 0x04);
+	}
+}
+
+static void msm_dsi_release_cmd_engine(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+	u32 dsi_ctrl;
+
+	if (ctrl->panel_mode == DSI_VIDEO_MODE) {
+		dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+		dsi_ctrl &= ~0x04;
+		MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+	}
+}
+
+static int msm_dsi_wait4mdp_done(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int rc;
+	unsigned long flag;
+
+	spin_lock_irqsave(&ctrl->mdp_lock, flag);
+	reinit_completion(&ctrl->mdp_comp);
+	msm_dsi_set_irq(ctrl, DSI_INTR_CMD_MDP_DONE_MASK);
+	spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+
+	rc = wait_for_completion_timeout(&ctrl->mdp_comp,
+			msecs_to_jiffies(VSYNC_PERIOD * 4));
+
+	if (rc == 0) {
+		pr_err("DSI wait 4 mdp done time out\n");
+		rc = -ETIME;
+	} else if (!IS_ERR_VALUE(rc)) {
+		rc = 0;
+	}
+
+	msm_dsi_clear_irq(ctrl, DSI_INTR_CMD_MDP_DONE_MASK);
+
+	return rc;
+}
+
+void msm_dsi_cmd_mdp_busy(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int rc;
+	u32 dsi_status;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	if (ctrl->panel_mode == DSI_VIDEO_MODE)
+		return;
+
+	dsi_status = MIPI_INP(ctrl_base + DSI_STATUS);
+	if (dsi_status & 0x04) {
+		pr_debug("dsi command engine is busy\n");
+		rc = msm_dsi_wait4mdp_done(ctrl);
+		if (rc)
+			pr_err("Timed out waiting for mdp done");
+	}
+}
+
+static int msm_dsi_wait4video_done(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int rc;
+	unsigned long flag;
+
+	spin_lock_irqsave(&ctrl->mdp_lock, flag);
+	reinit_completion(&ctrl->video_comp);
+	msm_dsi_set_irq(ctrl, DSI_INTR_VIDEO_DONE_MASK);
+	spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+
+	rc = wait_for_completion_timeout(&ctrl->video_comp,
+				msecs_to_jiffies(VSYNC_PERIOD * 4));
+
+	if (rc == 0) {
+		pr_err("DSI wait 4 video done time out\n");
+		rc = -ETIME;
+	} else if (!IS_ERR_VALUE(rc)) {
+		rc = 0;
+	}
+
+	msm_dsi_clear_irq(ctrl, DSI_INTR_VIDEO_DONE_MASK);
+
+	return rc;
+}
+
+static int msm_dsi_wait4video_eng_busy(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int rc = 0;
+	u32 dsi_status;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	if (ctrl->panel_mode == DSI_CMD_MODE)
+		return rc;
+
+	dsi_status = MIPI_INP(ctrl_base + DSI_STATUS);
+	if (dsi_status & 0x08) {
+		pr_debug("dsi command in video mode wait for active region\n");
+		rc = msm_dsi_wait4video_done(ctrl);
+		/* delay 4-5 ms to skip BLLP */
+		if (!rc)
+			usleep_range(4000, 5000);
+	}
+	return rc;
+}
+
+void msm_dsi_host_init(struct mdss_panel_data *pdata)
+{
+	u32 dsi_ctrl, data;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mipi_panel_info *pinfo;
+
+	pr_debug("msm_dsi_host_init\n");
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+	pinfo  = &pdata->panel_info.mipi;
+
+
+	if (pinfo->mode == DSI_VIDEO_MODE) {
+		data = 0;
+		if (pinfo->pulse_mode_hsa_he)
+			data |= BIT(28);
+		if (pinfo->hfp_power_stop)
+			data |= BIT(24);
+		if (pinfo->hbp_power_stop)
+			data |= BIT(20);
+		if (pinfo->hsa_power_stop)
+			data |= BIT(16);
+		if (pinfo->eof_bllp_power_stop)
+			data |= BIT(15);
+		if (pinfo->bllp_power_stop)
+			data |= BIT(12);
+		data |= ((pinfo->traffic_mode & 0x03) << 8);
+		data |= ((pinfo->dst_format & 0x03) << 4); /* 2 bits */
+		data |= (pinfo->vc & 0x03);
+		MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_CTRL, data);
+
+		data = 0;
+		data |= ((pinfo->rgb_swap & 0x07) << 12);
+		if (pinfo->b_sel)
+			data |= BIT(8);
+		if (pinfo->g_sel)
+			data |= BIT(4);
+		if (pinfo->r_sel)
+			data |= BIT(0);
+		MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_DATA_CTRL, data);
+	} else if (pinfo->mode == DSI_CMD_MODE) {
+		data = 0;
+		data |= ((pinfo->interleave_max & 0x0f) << 20);
+		data |= ((pinfo->rgb_swap & 0x07) << 16);
+		if (pinfo->b_sel)
+			data |= BIT(12);
+		if (pinfo->g_sel)
+			data |= BIT(8);
+		if (pinfo->r_sel)
+			data |= BIT(4);
+		data |= (pinfo->dst_format & 0x0f); /* 4 bits */
+		MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_CTRL, data);
+
+		/* DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL */
+		data = pinfo->wr_mem_continue & 0x0ff;
+		data <<= 8;
+		data |= (pinfo->wr_mem_start & 0x0ff);
+		if (pinfo->insert_dcs_cmd)
+			data |= BIT(16);
+		MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL,
+				data);
+	} else
+		pr_err("%s: Unknown DSI mode=%d\n", __func__, pinfo->mode);
+
+	dsi_ctrl = BIT(8) | BIT(2); /* clock enable & cmd mode */
+
+	if (pinfo->crc_check)
+		dsi_ctrl |= BIT(24);
+	if (pinfo->ecc_check)
+		dsi_ctrl |= BIT(20);
+	if (pinfo->data_lane3)
+		dsi_ctrl |= BIT(7);
+	if (pinfo->data_lane2)
+		dsi_ctrl |= BIT(6);
+	if (pinfo->data_lane1)
+		dsi_ctrl |= BIT(5);
+	if (pinfo->data_lane0)
+		dsi_ctrl |= BIT(4);
+
+	/* from frame buffer, low power mode */
+	/* DSI_COMMAND_MODE_DMA_CTRL */
+	MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL, 0x14000000);
+
+	data = 0;
+	if (pinfo->te_sel)
+		data |= BIT(31);
+	data |= pinfo->mdp_trigger << 4;/* cmd mdp trigger */
+	data |= pinfo->dma_trigger;	/* cmd dma trigger */
+	data |= (pinfo->stream & 0x01) << 8;
+	MIPI_OUTP(ctrl_base + DSI_TRIG_CTRL, data);
+
+	/* DSI_LAN_SWAP_CTRL */
+	MIPI_OUTP(ctrl_base + DSI_LANE_SWAP_CTRL, ctrl_pdata->dlane_swap);
+
+	/* clock out ctrl */
+	data = pinfo->t_clk_post & 0x3f;	/* 6 bits */
+	data <<= 8;
+	data |= pinfo->t_clk_pre & 0x3f;	/*  6 bits */
+	/* DSI_CLKOUT_TIMING_CTRL */
+	MIPI_OUTP(ctrl_base + DSI_CLKOUT_TIMING_CTRL, data);
+
+	data = 0;
+	if (pinfo->rx_eot_ignore)
+		data |= BIT(4);
+	if (pinfo->tx_eot_append)
+		data |= BIT(0);
+	MIPI_OUTP(ctrl_base + DSI_EOT_PACKET_CTRL, data);
+
+
+	/* allow only ack-err-status  to generate interrupt */
+	/* DSI_ERR_INT_MASK0 */
+	MIPI_OUTP(ctrl_base + DSI_ERR_INT_MASK0, 0x13ff3fe0);
+
+	/* turn esc, byte, dsi, pclk, sclk, hclk on */
+	MIPI_OUTP(ctrl_base + DSI_CLK_CTRL, 0x23f);
+
+	dsi_ctrl |= BIT(0);	/* enable dsi */
+	MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+
+	wmb(); /* ensure write is finished before progressing */
+}
+
+void dsi_set_tx_power_mode(int mode)
+{
+	u32 data;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	data = MIPI_INP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL);
+
+	if (mode == 0)
+		data &= ~BIT(26);
+	else
+		data |= BIT(26);
+
+	MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL, data);
+}
+
+void msm_dsi_sw_reset(void)
+{
+	u32 dsi_ctrl;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	pr_debug("msm_dsi_sw_reset\n");
+
+	dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+	dsi_ctrl &= ~0x01;
+	MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+	wmb(); /* ensure write is finished before progressing */
+
+	/* turn esc, byte, dsi, pclk, sclk, hclk on */
+	MIPI_OUTP(ctrl_base + DSI_CLK_CTRL, 0x23f);
+	wmb(); /* ensure write is finished before progressing */
+
+	MIPI_OUTP(ctrl_base + DSI_SOFT_RESET, 0x01);
+	wmb(); /* ensure write is finished before progressing */
+	MIPI_OUTP(ctrl_base + DSI_SOFT_RESET, 0x00);
+	wmb(); /* ensure write is finished before progressing */
+}
+
+void msm_dsi_controller_cfg(int enable)
+{
+	u32 dsi_ctrl, status;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	pr_debug("msm_dsi_controller_cfg\n");
+
+	/* Check for CMD_MODE_DMA_BUSY */
+	if (readl_poll_timeout((ctrl_base + DSI_STATUS),
+				status,
+				((status & 0x02) == 0),
+				DSI_POLL_SLEEP_US, DSI_POLL_TIMEOUT_US)) {
+		pr_err("%s: DSI status=%x failed\n", __func__, status);
+		pr_err("%s: Doing sw reset\n", __func__);
+		msm_dsi_sw_reset();
+	}
+
+	/* Check for x_HS_FIFO_EMPTY */
+	if (readl_poll_timeout((ctrl_base + DSI_FIFO_STATUS),
+				status,
+				((status & 0x11111000) == 0x11111000),
+				DSI_POLL_SLEEP_US, DSI_POLL_TIMEOUT_US))
+		pr_err("%s: FIFO status=%x failed\n", __func__, status);
+
+	/* Check for VIDEO_MODE_ENGINE_BUSY */
+	if (readl_poll_timeout((ctrl_base + DSI_STATUS),
+				status,
+				((status & 0x08) == 0),
+				DSI_POLL_SLEEP_US, DSI_POLL_TIMEOUT_US)) {
+		pr_err("%s: DSI status=%x\n", __func__, status);
+		pr_err("%s: Doing sw reset\n", __func__);
+		msm_dsi_sw_reset();
+	}
+
+	dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+	if (enable)
+		dsi_ctrl |= 0x01;
+	else
+		dsi_ctrl &= ~0x01;
+
+	MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+	wmb(); /* ensure write is finished before progressing */
+}
+
+void msm_dsi_op_mode_config(int mode, struct mdss_panel_data *pdata)
+{
+	u32 dsi_ctrl;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	pr_debug("msm_dsi_op_mode_config\n");
+
+	dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+
+	if (dsi_ctrl & DSI_VIDEO_MODE_EN)
+		dsi_ctrl &= ~(DSI_CMD_MODE_EN|DSI_EN);
+	else
+		dsi_ctrl &= ~(DSI_CMD_MODE_EN|DSI_VIDEO_MODE_EN|DSI_EN);
+
+	if (mode == DSI_VIDEO_MODE) {
+		dsi_ctrl |= (DSI_VIDEO_MODE_EN|DSI_EN);
+	} else {
+		dsi_ctrl |= (DSI_CMD_MODE_EN|DSI_EN);
+		/* For Video mode panel, keep Video and Cmd mode ON */
+		if (pdata->panel_info.type == MIPI_VIDEO_PANEL)
+			dsi_ctrl |= DSI_VIDEO_MODE_EN;
+	}
+
+	pr_debug("%s: dsi_ctrl=%x\n", __func__, dsi_ctrl);
+
+	MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+	wmb(); /* ensure write is finished before progressing */
+}
+
+int msm_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+				struct dsi_buf *tp)
+{
+	int len, rc;
+	unsigned long size, addr;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+	unsigned long flag;
+
+	len = ALIGN(tp->len, 4);
+	size = ALIGN(tp->len, SZ_4K);
+
+	tp->dmap = dma_map_single(&dsi_host_private->dis_dev, tp->data, size,
+				DMA_TO_DEVICE);
+	if (dma_mapping_error(&dsi_host_private->dis_dev, tp->dmap)) {
+		pr_err("%s: dmap mapp failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	addr = tp->dmap;
+
+	msm_dsi_get_cmd_engine(ctrl);
+
+	spin_lock_irqsave(&ctrl->mdp_lock, flag);
+	reinit_completion(&ctrl->dma_comp);
+	msm_dsi_set_irq(ctrl, DSI_INTR_CMD_DMA_DONE_MASK);
+	spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+
+	MIPI_OUTP(ctrl_base + DSI_DMA_CMD_OFFSET, addr);
+	MIPI_OUTP(ctrl_base + DSI_DMA_CMD_LENGTH, len);
+	wmb(); /* ensure write is finished before progressing */
+
+	MIPI_OUTP(ctrl_base + DSI_CMD_MODE_DMA_SW_TRIGGER, 0x01);
+	wmb(); /* ensure write is finished before progressing */
+
+	rc = wait_for_completion_timeout(&ctrl->dma_comp,
+				msecs_to_jiffies(DSI_DMA_CMD_TIMEOUT_MS));
+	if (rc == 0) {
+		pr_err("DSI command transaction time out\n");
+		rc = -ETIME;
+	} else if (!IS_ERR_VALUE(rc)) {
+		rc = 0;
+	}
+
+	dma_unmap_single(&dsi_host_private->dis_dev, tp->dmap, size,
+			DMA_TO_DEVICE);
+	tp->dmap = 0;
+
+	msm_dsi_clear_irq(ctrl, DSI_INTR_CMD_DMA_DONE_MASK);
+
+	msm_dsi_release_cmd_engine(ctrl);
+
+	return rc;
+}
+
+int msm_dsi_cmd_dma_rx(struct mdss_dsi_ctrl_pdata *ctrl,
+			struct dsi_buf *rp, int rlen)
+{
+	u32 *lp, data;
+	int i, off, cnt;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	lp = (u32 *)rp->data;
+	cnt = rlen;
+	cnt += 3;
+	cnt >>= 2;
+
+	if (cnt > 4)
+		cnt = 4; /* 4 x 32 bits registers only */
+
+	off = DSI_RDBK_DATA0;
+	off += ((cnt - 1) * 4);
+
+	for (i = 0; i < cnt; i++) {
+		data = (u32)MIPI_INP(ctrl_base + off);
+		*lp++ = ntohl(data); /* to network byte order */
+		pr_debug("%s: data = 0x%x and ntohl(data) = 0x%x\n",
+					 __func__, data, ntohl(data));
+		off -= 4;
+		rp->len += sizeof(*lp);
+	}
+
+	return rlen;
+}
+
+static int msm_dsi_cmds_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+			struct dsi_cmd_desc *cmds, int cnt)
+{
+	struct dsi_buf *tp;
+	struct dsi_cmd_desc *cm;
+	struct dsi_ctrl_hdr *dchdr;
+	int len;
+	int rc = 0;
+
+	tp = &ctrl->tx_buf;
+	mdss_dsi_buf_init(tp);
+	cm = cmds;
+	len = 0;
+	while (cnt--) {
+		dchdr = &cm->dchdr;
+		mdss_dsi_buf_reserve(tp, len);
+		len = mdss_dsi_cmd_dma_add(tp, cm);
+		if (!len) {
+			pr_err("%s: failed to add cmd = 0x%x\n",
+				__func__,  cm->payload[0]);
+			rc = -EINVAL;
+			goto dsi_cmds_tx_err;
+		}
+
+		if (dchdr->last) {
+			tp->data = tp->start; /* begin of buf */
+			rc = msm_dsi_wait4video_eng_busy(ctrl);
+			if (rc) {
+				pr_err("%s: wait4video_eng failed\n", __func__);
+				goto dsi_cmds_tx_err;
+
+			}
+
+			rc = msm_dsi_cmd_dma_tx(ctrl, tp);
+			if (IS_ERR_VALUE(len)) {
+				pr_err("%s: failed to call cmd_dma_tx for cmd = 0x%x\n",
+					__func__,  cmds->payload[0]);
+				goto dsi_cmds_tx_err;
+			}
+
+			if (dchdr->wait)
+				usleep_range(dchdr->wait * 1000,
+					     dchdr->wait * 1000);
+
+			mdss_dsi_buf_init(tp);
+			len = 0;
+		}
+		cm++;
+	}
+
+dsi_cmds_tx_err:
+	return rc;
+}
+
+static int msm_dsi_parse_rx_response(struct dsi_buf *rp)
+{
+	int rc = 0;
+	unsigned char cmd;
+
+	cmd = rp->data[0];
+	switch (cmd) {
+	case DTYPE_ACK_ERR_RESP:
+		pr_debug("%s: rx ACK_ERR_PACLAGE\n", __func__);
+		rc = -EINVAL;
+		break;
+	case DTYPE_GEN_READ1_RESP:
+	case DTYPE_DCS_READ1_RESP:
+		mdss_dsi_short_read1_resp(rp);
+		break;
+	case DTYPE_GEN_READ2_RESP:
+	case DTYPE_DCS_READ2_RESP:
+		mdss_dsi_short_read2_resp(rp);
+		break;
+	case DTYPE_GEN_LREAD_RESP:
+	case DTYPE_DCS_LREAD_RESP:
+		mdss_dsi_long_read_resp(rp);
+		break;
+	default:
+		rc = -EINVAL;
+		pr_warn("%s: Unknown cmd received\n", __func__);
+		break;
+	}
+
+	return rc;
+}
+
+/* MIPI_DSI_MRPS, Maximum Return Packet Size */
+static char max_pktsize[2] = {0x00, 0x00}; /* LSB tx first, 10 bytes */
+
+static struct dsi_cmd_desc pkt_size_cmd = {
+	{DTYPE_MAX_PKTSIZE, 1, 0, 0, 0, sizeof(max_pktsize)},
+	max_pktsize,
+};
+
+static int msm_dsi_set_max_packet_size(struct mdss_dsi_ctrl_pdata *ctrl,
+						int size)
+{
+	struct dsi_buf *tp;
+	int rc;
+
+	tp = &ctrl->tx_buf;
+	mdss_dsi_buf_init(tp);
+	max_pktsize[0] = size;
+
+	rc = mdss_dsi_cmd_dma_add(tp, &pkt_size_cmd);
+	if (!rc) {
+		pr_err("%s: failed to add max_pkt_size\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = msm_dsi_wait4video_eng_busy(ctrl);
+	if (rc) {
+		pr_err("%s: failed to wait4video_eng\n", __func__);
+		return rc;
+	}
+
+	rc = msm_dsi_cmd_dma_tx(ctrl, tp);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("%s: failed to tx max_pkt_size\n", __func__);
+		return rc;
+	}
+	pr_debug("%s: max_pkt_size=%d sent\n", __func__, size);
+	return rc;
+}
+
+/* read data length is less than or equal to 10 bytes*/
+static int msm_dsi_cmds_rx_1(struct mdss_dsi_ctrl_pdata *ctrl,
+				struct dsi_cmd_desc *cmds, int rlen)
+{
+	int rc;
+	struct dsi_buf *tp, *rp;
+
+	tp = &ctrl->tx_buf;
+	rp = &ctrl->rx_buf;
+	mdss_dsi_buf_init(rp);
+	mdss_dsi_buf_init(tp);
+
+	rc = mdss_dsi_cmd_dma_add(tp, cmds);
+	if (!rc) {
+		pr_err("%s: dsi_cmd_dma_add failed\n", __func__);
+		rc = -EINVAL;
+		goto dsi_cmds_rx_1_error;
+	}
+
+	rc = msm_dsi_wait4video_eng_busy(ctrl);
+	if (rc) {
+		pr_err("%s: wait4video_eng failed\n", __func__);
+		goto dsi_cmds_rx_1_error;
+	}
+
+	rc = msm_dsi_cmd_dma_tx(ctrl, tp);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("%s: msm_dsi_cmd_dma_tx failed\n", __func__);
+		goto dsi_cmds_rx_1_error;
+	}
+
+	if (rlen <= DSI_SHORT_PKT_DATA_SIZE) {
+		msm_dsi_cmd_dma_rx(ctrl, rp, rlen);
+	} else {
+		msm_dsi_cmd_dma_rx(ctrl, rp, rlen + DSI_HOST_HDR_SIZE);
+		rp->len = rlen + DSI_HOST_HDR_SIZE;
+	}
+	rc = msm_dsi_parse_rx_response(rp);
+
+dsi_cmds_rx_1_error:
+	if (rc)
+		rp->len = 0;
+
+	return rc;
+}
+
+/* read data length is more than 10 bytes, which requires multiple DSI read*/
+static int msm_dsi_cmds_rx_2(struct mdss_dsi_ctrl_pdata *ctrl,
+				struct dsi_cmd_desc *cmds, int rlen)
+{
+	int rc;
+	struct dsi_buf *tp, *rp;
+	int pkt_size, data_bytes, total;
+
+	tp = &ctrl->tx_buf;
+	rp = &ctrl->rx_buf;
+	mdss_dsi_buf_init(rp);
+	pkt_size = DSI_MAX_PKT_SIZE;
+	data_bytes = MDSS_DSI_LEN;
+	total = 0;
+
+	while (true) {
+		rc = msm_dsi_set_max_packet_size(ctrl, pkt_size);
+		if (rc)
+			break;
+
+		mdss_dsi_buf_init(tp);
+		rc = mdss_dsi_cmd_dma_add(tp, cmds);
+		if (!rc) {
+			pr_err("%s: dsi_cmd_dma_add failed\n", __func__);
+			rc = -EINVAL;
+			break;
+	}
+		rc = msm_dsi_wait4video_eng_busy(ctrl);
+		if (rc) {
+			pr_err("%s: wait4video_eng failed\n", __func__);
+			break;
+		}
+
+		rc = msm_dsi_cmd_dma_tx(ctrl, tp);
+		if (IS_ERR_VALUE(rc)) {
+			pr_err("%s: msm_dsi_cmd_dma_tx failed\n", __func__);
+			break;
+		}
+
+		msm_dsi_cmd_dma_rx(ctrl, rp, DSI_MAX_BYTES_TO_READ);
+
+		rp->data += DSI_MAX_BYTES_TO_READ - DSI_HOST_HDR_SIZE;
+		total += data_bytes;
+		if (total >= rlen)
+			break;
+
+		data_bytes = DSI_MAX_BYTES_TO_READ - DSI_HOST_HDR_SIZE;
+		pkt_size += data_bytes;
+	}
+
+	if (!rc) {
+		rp->data = rp->start;
+		rp->len = rlen + DSI_HOST_HDR_SIZE;
+		rc = msm_dsi_parse_rx_response(rp);
+	}
+
+	if (rc)
+		rp->len = 0;
+
+	return rc;
+}
+
+int msm_dsi_cmds_rx(struct mdss_dsi_ctrl_pdata *ctrl,
+			struct dsi_cmd_desc *cmds, int rlen)
+{
+	int rc;
+
+	if (rlen <= DSI_MAX_PKT_SIZE)
+		rc = msm_dsi_cmds_rx_1(ctrl, cmds, rlen);
+	else
+		rc = msm_dsi_cmds_rx_2(ctrl, cmds, rlen);
+
+	return rc;
+}
+
+void msm_dsi_cmdlist_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+				struct dcs_cmd_req *req)
+{
+	int ret;
+
+	ret = msm_dsi_cmds_tx(ctrl, req->cmds, req->cmds_cnt);
+
+	if (req->cb)
+		req->cb(ret);
+}
+
+void msm_dsi_cmdlist_rx(struct mdss_dsi_ctrl_pdata *ctrl,
+				struct dcs_cmd_req *req)
+{
+	struct dsi_buf *rp;
+	int len = 0;
+
+	if (req->rbuf) {
+		rp = &ctrl->rx_buf;
+		len = msm_dsi_cmds_rx(ctrl, req->cmds, req->rlen);
+		memcpy(req->rbuf, rp->data, rp->len);
+	} else {
+		pr_err("%s: No rx buffer provided\n", __func__);
+	}
+
+	if (req->cb)
+		req->cb(len);
+}
+int msm_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp)
+{
+	struct dcs_cmd_req *req;
+	int dsi_on;
+	int ret = -EINVAL;
+
+	mutex_lock(&ctrl->mutex);
+	dsi_on = dsi_host_private->dsi_on;
+	mutex_unlock(&ctrl->mutex);
+	if (!dsi_on) {
+		pr_err("try to send DSI commands while dsi is off\n");
+		return ret;
+	}
+
+	if (from_mdp)	/* from mdp kickoff */
+		mutex_lock(&ctrl->cmd_mutex);
+	req = mdss_dsi_cmdlist_get(ctrl, from_mdp);
+
+	if (!req) {
+		mutex_unlock(&ctrl->cmd_mutex);
+		return ret;
+	}
+	/*
+	 * mdss interrupt is generated in mdp core clock domain
+	 * mdp clock need to be enabled to receive dsi interrupt
+	 * also, axi bus bandwidth need since dsi controller will
+	 * fetch dcs commands from axi bus
+	 */
+	mdp3_res_update(1, 1, MDP3_CLIENT_DMA_P);
+	msm_dsi_clk_ctrl(&ctrl->panel_data, 1);
+
+	if (0 == (req->flags & CMD_REQ_LP_MODE))
+		dsi_set_tx_power_mode(0);
+
+	if (req->flags & CMD_REQ_RX)
+		msm_dsi_cmdlist_rx(ctrl, req);
+	else
+		msm_dsi_cmdlist_tx(ctrl, req);
+
+	if (0 == (req->flags & CMD_REQ_LP_MODE))
+		dsi_set_tx_power_mode(1);
+
+	msm_dsi_clk_ctrl(&ctrl->panel_data, 0);
+	mdp3_res_update(0, 1, MDP3_CLIENT_DMA_P);
+
+	if (from_mdp)	/* from mdp kickoff */
+		mutex_unlock(&ctrl->cmd_mutex);
+	return 0;
+}
+
+static int msm_dsi_cal_clk_rate(struct mdss_panel_data *pdata,
+				u64 *bitclk_rate,
+				u32 *dsiclk_rate,
+				u32 *byteclk_rate,
+				u32 *pclk_rate)
+{
+	struct mdss_panel_info *pinfo;
+	struct mipi_panel_info *mipi;
+	u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
+	int lanes;
+	u64 clk_rate;
+
+	pinfo = &pdata->panel_info;
+	mipi  = &pdata->panel_info.mipi;
+
+	hbp = pdata->panel_info.lcdc.h_back_porch;
+	hfp = pdata->panel_info.lcdc.h_front_porch;
+	vbp = pdata->panel_info.lcdc.v_back_porch;
+	vfp = pdata->panel_info.lcdc.v_front_porch;
+	hspw = pdata->panel_info.lcdc.h_pulse_width;
+	vspw = pdata->panel_info.lcdc.v_pulse_width;
+	width = pdata->panel_info.xres;
+	height = pdata->panel_info.yres;
+
+	lanes = 0;
+	if (mipi->data_lane0)
+		lanes++;
+	if (mipi->data_lane1)
+		lanes++;
+	if (mipi->data_lane2)
+		lanes++;
+	if (mipi->data_lane3)
+		lanes++;
+	if (lanes == 0)
+		return -EINVAL;
+
+	*bitclk_rate = (width + hbp + hfp + hspw) * (height + vbp + vfp + vspw);
+	*bitclk_rate *= mipi->frame_rate;
+	*bitclk_rate *= pdata->panel_info.bpp;
+	do_div(*bitclk_rate, lanes);
+	clk_rate = *bitclk_rate;
+
+	do_div(clk_rate, 8U);
+	*byteclk_rate = (u32) clk_rate;
+	*dsiclk_rate = *byteclk_rate * lanes;
+	*pclk_rate = *byteclk_rate * lanes * 8 / pdata->panel_info.bpp;
+
+	pr_debug("dsiclk_rate=%u, byteclk=%u, pck_=%u\n",
+		*dsiclk_rate, *byteclk_rate, *pclk_rate);
+	return 0;
+}
+
+static int msm_dsi_on(struct mdss_panel_data *pdata)
+{
+	int ret = 0, i;
+	u64 clk_rate;
+	struct mdss_panel_info *pinfo;
+	struct mipi_panel_info *mipi;
+	u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
+	u32 ystride, bpp, data;
+	u32 dummy_xres, dummy_yres;
+	u64 bitclk_rate = 0
+	u32 byteclk_rate = 0, pclk_rate = 0, dsiclk_rate = 0;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	pr_debug("msm_dsi_on\n");
+
+	pinfo = &pdata->panel_info;
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	mutex_lock(&ctrl_pdata->mutex);
+
+
+	if (!pdata->panel_info.dynamic_switch_pending) {
+		for (i = 0; !ret && (i < DSI_MAX_PM); i++) {
+			ret = msm_dss_enable_vreg(
+				ctrl_pdata->power_data[i].vreg_config,
+				ctrl_pdata->power_data[i].num_vreg, 1);
+			if (ret) {
+				pr_err("%s: failed to enable vregs for %s\n",
+					__func__, __mdss_dsi_pm_name(i));
+				goto error_vreg;
+			}
+		}
+	}
+
+	msm_dsi_ahb_ctrl(1);
+	msm_dsi_phy_sw_reset(dsi_host_private->dsi_base);
+	msm_dsi_phy_init(dsi_host_private->dsi_base, pdata);
+
+	msm_dsi_cal_clk_rate(pdata, &bitclk_rate, &dsiclk_rate,
+				&byteclk_rate, &pclk_rate);
+	msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, dsiclk_rate,
+				byteclk_rate, pclk_rate);
+	msm_dsi_prepare_clocks();
+	msm_dsi_clk_enable();
+
+	clk_rate = pdata->panel_info.clk_rate;
+	clk_rate = min(clk_rate, pdata->panel_info.clk_max);
+
+	hbp = pdata->panel_info.lcdc.h_back_porch;
+	hfp = pdata->panel_info.lcdc.h_front_porch;
+	vbp = pdata->panel_info.lcdc.v_back_porch;
+	vfp = pdata->panel_info.lcdc.v_front_porch;
+	hspw = pdata->panel_info.lcdc.h_pulse_width;
+	vspw = pdata->panel_info.lcdc.v_pulse_width;
+	width = pdata->panel_info.xres;
+	height = pdata->panel_info.yres;
+
+	mipi  = &pdata->panel_info.mipi;
+	if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+		dummy_xres = pdata->panel_info.lcdc.xres_pad;
+		dummy_yres = pdata->panel_info.lcdc.yres_pad;
+
+		MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_ACTIVE_H,
+			((hspw + hbp + width + dummy_xres) << 16 |
+			(hspw + hbp)));
+		MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_ACTIVE_V,
+			((vspw + vbp + height + dummy_yres) << 16 |
+			(vspw + vbp)));
+		MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_TOTAL,
+			(vspw + vbp + height + dummy_yres +
+				vfp - 1) << 16 | (hspw + hbp +
+				width + dummy_xres + hfp - 1));
+
+		MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_HSYNC, (hspw << 16));
+		MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_VSYNC, 0);
+		MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_VSYNC_VPOS,
+				(vspw << 16));
+
+	} else {		/* command mode */
+		if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
+			bpp = 3;
+		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB666)
+			bpp = 3;
+		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
+			bpp = 2;
+		else
+			bpp = 3;	/* Default format set to RGB888 */
+
+		ystride = width * bpp + 1;
+
+		data = (ystride << 16) | (mipi->vc << 8) | DTYPE_DCS_LWRITE;
+		MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM0_CTRL,
+			data);
+		MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM1_CTRL,
+			data);
+
+		data = height << 16 | width;
+		MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM1_TOTAL,
+			data);
+		MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM0_TOTAL,
+			data);
+	}
+
+	msm_dsi_sw_reset();
+	msm_dsi_host_init(pdata);
+
+	if (mipi->force_clk_lane_hs) {
+		u32 tmp;
+
+		tmp = MIPI_INP(ctrl_base + DSI_LANE_CTRL);
+		tmp |= (1<<28);
+		MIPI_OUTP(ctrl_base + DSI_LANE_CTRL, tmp);
+		wmb(); /* ensure write is finished before progressing */
+	}
+
+	msm_dsi_op_mode_config(mipi->mode, pdata);
+
+	msm_dsi_set_irq(ctrl_pdata, DSI_INTR_ERROR_MASK);
+	dsi_host_private->clk_count = 1;
+	dsi_host_private->dsi_on = 1;
+
+error_vreg:
+	if (ret) {
+		for (; i >= 0; i--)
+			msm_dss_enable_vreg(
+				ctrl_pdata->power_data[i].vreg_config,
+				ctrl_pdata->power_data[i].num_vreg, 0);
+	}
+
+	mutex_unlock(&ctrl_pdata->mutex);
+	return ret;
+}
+
+static int msm_dsi_off(struct mdss_panel_data *pdata)
+{
+	int ret = 0, i;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	pr_debug("msm_dsi_off\n");
+	mutex_lock(&ctrl_pdata->mutex);
+	msm_dsi_clear_irq(ctrl_pdata, ctrl_pdata->dsi_irq_mask);
+	msm_dsi_controller_cfg(0);
+	msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, 0, 0, 0);
+	msm_dsi_clk_disable();
+	msm_dsi_unprepare_clocks();
+	msm_dsi_phy_off(dsi_host_private->dsi_base);
+	msm_dsi_ahb_ctrl(0);
+
+	if (!pdata->panel_info.dynamic_switch_pending) {
+		for (i = DSI_MAX_PM - 1; i >= 0; i--) {
+			ret = msm_dss_enable_vreg(
+				ctrl_pdata->power_data[i].vreg_config,
+				ctrl_pdata->power_data[i].num_vreg, 0);
+			if (ret)
+				pr_err("%s: failed to disable vregs for %s\n",
+					__func__, __mdss_dsi_pm_name(i));
+		}
+	}
+	dsi_host_private->clk_count = 0;
+	dsi_host_private->dsi_on = 0;
+
+	mutex_unlock(&ctrl_pdata->mutex);
+
+	return ret;
+}
+
+static int msm_dsi_cont_on(struct mdss_panel_data *pdata)
+{
+	struct mdss_panel_info *pinfo;
+	int ret = 0, i;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		ret = -EINVAL;
+		return ret;
+	}
+
+
+	pr_debug("%s:\n", __func__);
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	pinfo = &pdata->panel_info;
+	mutex_lock(&ctrl_pdata->mutex);
+	for (i = 0; !ret && (i < DSI_MAX_PM); i++) {
+		ret = msm_dss_enable_vreg(
+			ctrl_pdata->power_data[i].vreg_config,
+			ctrl_pdata->power_data[i].num_vreg, 1);
+		if (ret) {
+			pr_err("%s: failed to enable vregs for %s\n",
+				__func__, __mdss_dsi_pm_name(i));
+			goto error_vreg;
+		}
+	}
+	pinfo->panel_power_state = MDSS_PANEL_POWER_ON;
+	ret = mdss_dsi_panel_reset(pdata, 1);
+	if (ret) {
+		pr_err("%s: Panel reset failed\n", __func__);
+		mutex_unlock(&ctrl_pdata->mutex);
+		return ret;
+	}
+
+	msm_dsi_ahb_ctrl(1);
+	msm_dsi_prepare_clocks();
+	msm_dsi_clk_enable();
+	msm_dsi_set_irq(ctrl_pdata, DSI_INTR_ERROR_MASK);
+	dsi_host_private->clk_count = 1;
+	dsi_host_private->dsi_on = 1;
+
+error_vreg:
+	if (ret) {
+		for (; i >= 0; i--)
+			msm_dss_enable_vreg(
+				ctrl_pdata->power_data[i].vreg_config,
+				ctrl_pdata->power_data[i].num_vreg, 0);
+	}
+
+	mutex_unlock(&ctrl_pdata->mutex);
+	return ret;
+}
+
+static int msm_dsi_read_status(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct dcs_cmd_req cmdreq;
+
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	cmdreq.cmds = ctrl->status_cmds.cmds;
+	cmdreq.cmds_cnt = ctrl->status_cmds.cmd_cnt;
+	cmdreq.flags = CMD_REQ_COMMIT | CMD_REQ_RX;
+	cmdreq.rlen = 1;
+	cmdreq.cb = NULL;
+	cmdreq.rbuf = ctrl->status_buf.data;
+
+	return mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+}
+
+
+/**
+ * msm_dsi_reg_status_check() - Check dsi panel status through reg read
+ * @ctrl_pdata: pointer to the dsi controller structure
+ *
+ * This function can be used to check the panel status through reading the
+ * status register from the panel.
+ *
+ * Return: positive value if the panel is in good state, negative value or
+ * zero otherwise.
+ */
+int msm_dsi_reg_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	int ret = 0;
+
+	if (ctrl_pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return 0;
+	}
+
+	pr_debug("%s: Checking Register status\n", __func__);
+
+	msm_dsi_clk_ctrl(&ctrl_pdata->panel_data, 1);
+
+	if (ctrl_pdata->status_cmds.link_state == DSI_HS_MODE)
+		dsi_set_tx_power_mode(0);
+
+	ret = msm_dsi_read_status(ctrl_pdata);
+
+	if (ctrl_pdata->status_cmds.link_state == DSI_HS_MODE)
+		dsi_set_tx_power_mode(1);
+
+	if (ret == 0) {
+		if (!mdss_dsi_cmp_panel_reg(ctrl_pdata->status_buf,
+			ctrl_pdata->status_value, 0)) {
+			pr_err("%s: Read back value from panel is incorrect\n",
+								__func__);
+			ret = -EINVAL;
+		} else {
+			ret = 1;
+		}
+	} else {
+		pr_err("%s: Read status register returned error\n", __func__);
+	}
+
+	msm_dsi_clk_ctrl(&ctrl_pdata->panel_data, 0);
+	pr_debug("%s: Read register done with ret: %d\n", __func__, ret);
+
+	return ret;
+}
+
+/**
+ * msm_dsi_bta_status_check() - Check dsi panel status through bta check
+ * @ctrl_pdata: pointer to the dsi controller structure
+ *
+ * This function can be used to check status of the panel using bta check
+ * for the panel.
+ *
+ * Return: positive value if the panel is in good state, negative value or
+ * zero otherwise.
+ */
+static int msm_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	int ret = 0;
+
+	if (ctrl_pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return 0;
+	}
+
+	mutex_lock(&ctrl_pdata->cmd_mutex);
+	msm_dsi_clk_ctrl(&ctrl_pdata->panel_data, 1);
+	msm_dsi_cmd_mdp_busy(ctrl_pdata);
+	msm_dsi_set_irq(ctrl_pdata, DSI_INTR_BTA_DONE_MASK);
+	reinit_completion(&ctrl_pdata->bta_comp);
+
+	/* BTA trigger */
+	MIPI_OUTP(dsi_host_private->dsi_base + DSI_CMD_MODE_BTA_SW_TRIGGER,
+									0x01);
+	wmb(); /* ensure write is finished before progressing */
+	ret = wait_for_completion_killable_timeout(&ctrl_pdata->bta_comp,
+									HZ/10);
+	msm_dsi_clear_irq(ctrl_pdata, DSI_INTR_BTA_DONE_MASK);
+	msm_dsi_clk_ctrl(&ctrl_pdata->panel_data, 0);
+	mutex_unlock(&ctrl_pdata->cmd_mutex);
+
+	if (ret <= 0)
+		pr_err("%s: DSI BTA error: %i\n", __func__, __LINE__);
+
+	pr_debug("%s: BTA done with ret: %d\n", __func__, ret);
+	return ret;
+}
+
+static void msm_dsi_debug_enable_clock(int on)
+{
+	if (dsi_host_private->debug_enable_clk)
+		dsi_host_private->debug_enable_clk(on);
+
+	if (on)
+		msm_dsi_ahb_ctrl(1);
+	else
+		msm_dsi_ahb_ctrl(0);
+}
+
+static int msm_dsi_debug_init(void)
+{
+	int rc;
+
+	if (!mdss_res)
+		return 0;
+
+	dsi_host_private->debug_enable_clk =
+			mdss_res->debug_inf.debug_enable_clock;
+
+	mdss_res->debug_inf.debug_enable_clock = msm_dsi_debug_enable_clock;
+
+
+	rc = mdss_debug_register_base("dsi0",
+				dsi_host_private->dsi_base,
+				dsi_host_private->dsi_reg_size,
+				NULL);
+
+	return rc;
+}
+
+static int dsi_get_panel_cfg(char *panel_cfg)
+{
+	int rc;
+	struct mdss_panel_cfg *pan_cfg = NULL;
+
+	if (!panel_cfg)
+		return MDSS_PANEL_INTF_INVALID;
+
+	pan_cfg = mdp3_panel_intf_type(MDSS_PANEL_INTF_DSI);
+	if (IS_ERR(pan_cfg)) {
+		panel_cfg[0] = 0;
+		return PTR_ERR(pan_cfg);
+	} else if (!pan_cfg) {
+		panel_cfg[0] = 0;
+		return 0;
+	}
+
+	pr_debug("%s:%d: cfg:[%s]\n", __func__, __LINE__,
+		 pan_cfg->arg_cfg);
+	rc = strlcpy(panel_cfg, pan_cfg->arg_cfg,
+				MDSS_MAX_PANEL_LEN);
+	return rc;
+}
+
+static struct device_node *dsi_pref_prim_panel(
+		struct platform_device *pdev)
+{
+	struct device_node *dsi_pan_node = NULL;
+
+	pr_debug("%s:%d: Select primary panel from dt\n",
+					__func__, __LINE__);
+	dsi_pan_node = of_parse_phandle(pdev->dev.of_node,
+					"qcom,dsi-pref-prim-pan", 0);
+	if (!dsi_pan_node)
+		pr_err("%s:can't find panel phandle\n", __func__);
+
+	return dsi_pan_node;
+}
+
+/**
+ * dsi_find_panel_of_node(): find device node of dsi panel
+ * @pdev: platform_device of the dsi ctrl node
+ * @panel_cfg: string containing intf specific config data
+ *
+ * Function finds the panel device node using the interface
+ * specific configuration data. This configuration data is
+ * could be derived from the result of bootloader's GCDB
+ * panel detection mechanism. If such config data doesn't
+ * exist then this panel returns the default panel configured
+ * in the device tree.
+ *
+ * returns pointer to panel node on success, NULL on error.
+ */
+static struct device_node *dsi_find_panel_of_node(
+		struct platform_device *pdev, char *panel_cfg)
+{
+	int l;
+	char *panel_name;
+	struct device_node *dsi_pan_node = NULL, *mdss_node = NULL;
+
+	if (!panel_cfg)
+		return NULL;
+
+	l = strlen(panel_cfg);
+	if (!l) {
+		/* no panel cfg chg, parse dt */
+		pr_debug("%s:%d: no cmd line cfg present\n",
+			 __func__, __LINE__);
+		dsi_pan_node = dsi_pref_prim_panel(pdev);
+	} else {
+		if (panel_cfg[0] != '0') {
+			pr_err("%s:%d:ctrl id=[%d] not supported\n",
+			       __func__, __LINE__, panel_cfg[0]);
+			return NULL;
+		}
+		/*
+		 * skip first two chars '<dsi_ctrl_id>' and
+		 * ':' to get to the panel name
+		 */
+		panel_name = panel_cfg + 2;
+		pr_debug("%s:%d:%s:%s\n", __func__, __LINE__,
+			 panel_cfg, panel_name);
+
+		mdss_node = of_parse_phandle(pdev->dev.of_node,
+					     "qcom,mdss-mdp", 0);
+
+		if (!mdss_node) {
+			pr_err("%s: %d: mdss_node null\n",
+			       __func__, __LINE__);
+			return NULL;
+		}
+		dsi_pan_node = of_find_node_by_name(mdss_node,
+						    panel_name);
+		if (!dsi_pan_node) {
+			pr_err("%s: invalid pan node\n",
+			       __func__);
+			dsi_pan_node = dsi_pref_prim_panel(pdev);
+		}
+	}
+	return dsi_pan_node;
+}
+
+static int msm_dsi_clk_ctrl(struct mdss_panel_data *pdata, int enable)
+{
+	u32 bitclk_rate = 0, byteclk_rate = 0, pclk_rate = 0, dsiclk_rate = 0;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	pr_debug("%s:\n", __func__);
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	mutex_lock(&ctrl_pdata->mutex);
+
+	if (enable) {
+		dsi_host_private->clk_count++;
+		if (dsi_host_private->clk_count == 1) {
+			msm_dsi_ahb_ctrl(1);
+			msm_dsi_cal_clk_rate(pdata, &bitclk_rate, &dsiclk_rate,
+						&byteclk_rate, &pclk_rate);
+			msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, dsiclk_rate,
+						byteclk_rate, pclk_rate);
+			msm_dsi_prepare_clocks();
+			msm_dsi_clk_enable();
+		}
+	} else {
+		dsi_host_private->clk_count--;
+		if (dsi_host_private->clk_count == 0) {
+			msm_dsi_clear_irq(ctrl_pdata, ctrl_pdata->dsi_irq_mask);
+			msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, 0, 0, 0);
+			msm_dsi_clk_disable();
+			msm_dsi_unprepare_clocks();
+			msm_dsi_ahb_ctrl(0);
+		}
+	}
+	mutex_unlock(&ctrl_pdata->mutex);
+	return 0;
+}
+
+void msm_dsi_ctrl_init(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	init_completion(&ctrl->dma_comp);
+	init_completion(&ctrl->mdp_comp);
+	init_completion(&ctrl->bta_comp);
+	init_completion(&ctrl->video_comp);
+	spin_lock_init(&ctrl->irq_lock);
+	spin_lock_init(&ctrl->mdp_lock);
+	mutex_init(&ctrl->mutex);
+	mutex_init(&ctrl->cmd_mutex);
+	complete(&ctrl->mdp_comp);
+	dsi_buf_alloc(&ctrl->tx_buf, SZ_4K);
+	dsi_buf_alloc(&ctrl->rx_buf, SZ_4K);
+	dsi_buf_alloc(&ctrl->status_buf, SZ_4K);
+	ctrl->cmdlist_commit = msm_dsi_cmdlist_commit;
+	ctrl->panel_mode = ctrl->panel_data.panel_info.mipi.mode;
+
+	if (ctrl->status_mode == ESD_REG)
+		ctrl->check_status = msm_dsi_reg_status_check;
+	else if (ctrl->status_mode == ESD_BTA)
+		ctrl->check_status = msm_dsi_bta_status_check;
+
+	if (ctrl->status_mode == ESD_MAX) {
+		pr_err("%s: Using default BTA for ESD check\n", __func__);
+		ctrl->check_status = msm_dsi_bta_status_check;
+	}
+}
+
+static void msm_dsi_parse_lane_swap(struct device_node *np, char *dlane_swap)
+{
+	const char *data;
+
+	*dlane_swap = DSI_LANE_MAP_0123;
+	data = of_get_property(np, "qcom,lane-map", NULL);
+	if (data) {
+		if (!strcmp(data, "lane_map_3012"))
+			*dlane_swap = DSI_LANE_MAP_3012;
+		else if (!strcmp(data, "lane_map_2301"))
+			*dlane_swap = DSI_LANE_MAP_2301;
+		else if (!strcmp(data, "lane_map_1230"))
+			*dlane_swap = DSI_LANE_MAP_1230;
+		else if (!strcmp(data, "lane_map_0321"))
+			*dlane_swap = DSI_LANE_MAP_0321;
+		else if (!strcmp(data, "lane_map_1032"))
+			*dlane_swap = DSI_LANE_MAP_1032;
+		else if (!strcmp(data, "lane_map_2103"))
+			*dlane_swap = DSI_LANE_MAP_2103;
+		else if (!strcmp(data, "lane_map_3210"))
+			*dlane_swap = DSI_LANE_MAP_3210;
+	}
+}
+
+static int msm_dsi_probe(struct platform_device *pdev)
+{
+	struct dsi_interface intf;
+	char panel_cfg[MDSS_MAX_PANEL_LEN];
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	int rc = 0;
+	struct device_node *dsi_pan_node = NULL;
+	bool cmd_cfg_cont_splash = false;
+	struct resource *mdss_dsi_mres;
+	int i;
+
+	pr_debug("%s\n", __func__);
+
+	rc = msm_dsi_init();
+	if (rc)
+		return rc;
+
+	if (!pdev->dev.of_node) {
+		pr_err("%s: Device node is not accessible\n", __func__);
+		rc = -ENODEV;
+		goto error_no_mem;
+	}
+	pdev->id = 0;
+
+	ctrl_pdata = platform_get_drvdata(pdev);
+	if (!ctrl_pdata) {
+		ctrl_pdata = devm_kzalloc(&pdev->dev,
+			sizeof(struct mdss_dsi_ctrl_pdata), GFP_KERNEL);
+		if (!ctrl_pdata) {
+			rc = -ENOMEM;
+			goto error_no_mem;
+		}
+		platform_set_drvdata(pdev, ctrl_pdata);
+	}
+
+	ctrl_pdata->mdss_util = mdss_get_util_intf();
+	if (mdp3_res->mdss_util == NULL) {
+		pr_err("Failed to get mdss utility functions\n");
+		return -ENODEV;
+	}
+
+	mdss_dsi_mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mdss_dsi_mres) {
+		pr_err("%s:%d unable to get the MDSS reg resources",
+							__func__, __LINE__);
+		rc = -ENOMEM;
+		goto error_io_resource;
+	} else {
+		dsi_host_private->dsi_reg_size = resource_size(mdss_dsi_mres);
+		dsi_host_private->dsi_base = ioremap(mdss_dsi_mres->start,
+						dsi_host_private->dsi_reg_size);
+		if (!dsi_host_private->dsi_base) {
+			pr_err("%s:%d unable to remap dsi resources",
+							__func__, __LINE__);
+			rc = -ENOMEM;
+			goto error_io_resource;
+		}
+	}
+
+	mdss_dsi_mres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!mdss_dsi_mres || mdss_dsi_mres->start == 0) {
+		pr_err("%s:%d unable to get the MDSS irq resources",
+							__func__, __LINE__);
+		rc = -ENODEV;
+		goto error_irq_resource;
+	}
+
+	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: failed to add child nodes, rc=%d\n",
+								__func__, rc);
+		goto error_platform_pop;
+	}
+
+	/* DSI panels can be different between controllers */
+	rc = dsi_get_panel_cfg(panel_cfg);
+	if (!rc)
+		/* dsi panel cfg not present */
+		pr_warn("%s:%d:dsi specific cfg not present\n",
+							 __func__, __LINE__);
+
+	/* find panel device node */
+	dsi_pan_node = dsi_find_panel_of_node(pdev, panel_cfg);
+	if (!dsi_pan_node) {
+		pr_err("%s: can't find panel node %s\n", __func__,
+								panel_cfg);
+		goto error_pan_node;
+	}
+
+	cmd_cfg_cont_splash = mdp3_panel_get_boot_cfg() ? true : false;
+
+	rc = mdss_dsi_panel_init(dsi_pan_node, ctrl_pdata, cmd_cfg_cont_splash);
+	if (rc) {
+		pr_err("%s: dsi panel init failed\n", __func__);
+		goto error_pan_node;
+	}
+
+	rc = dsi_ctrl_config_init(pdev, ctrl_pdata);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: failed to parse mdss dtsi rc=%d\n",
+								__func__, rc);
+		goto error_pan_node;
+	}
+
+	msm_dsi_parse_lane_swap(pdev->dev.of_node, &(ctrl_pdata->dlane_swap));
+
+	for (i = 0;  i < DSI_MAX_PM; i++) {
+		rc = msm_dsi_io_init(pdev, &(ctrl_pdata->power_data[i]));
+		if (rc) {
+			dev_err(&pdev->dev, "%s: failed to init IO for %s\n",
+				__func__, __mdss_dsi_pm_name(i));
+			goto error_io_init;
+		}
+	}
+
+	pr_debug("%s: Dsi Ctrl->0 initialized\n", __func__);
+
+	dsi_host_private->dis_dev = pdev->dev;
+	intf.on = msm_dsi_on;
+	intf.off = msm_dsi_off;
+	intf.cont_on = msm_dsi_cont_on;
+	intf.clk_ctrl = msm_dsi_clk_ctrl;
+	intf.op_mode_config = msm_dsi_op_mode_config;
+	intf.index = 0;
+	intf.private = NULL;
+	dsi_register_interface(&intf);
+
+	msm_dsi_debug_init();
+
+	msm_dsi_ctrl_init(ctrl_pdata);
+
+	rc = msm_dsi_irq_init(&pdev->dev, mdss_dsi_mres->start,
+					   ctrl_pdata);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: failed to init irq, rc=%d\n",
+			__func__, rc);
+		goto error_irq_init;
+	}
+
+	rc = dsi_panel_device_register_v2(pdev, ctrl_pdata);
+	if (rc) {
+		pr_err("%s: dsi panel dev reg failed\n", __func__);
+		goto error_device_register;
+	}
+	pr_debug("%s success\n", __func__);
+	return 0;
+error_device_register:
+	kfree(ctrl_pdata->dsi_hw->irq_info);
+	kfree(ctrl_pdata->dsi_hw);
+error_irq_init:
+	for (i = DSI_MAX_PM - 1; i >= 0; i--)
+		msm_dsi_io_deinit(pdev, &(ctrl_pdata->power_data[i]));
+error_io_init:
+	dsi_ctrl_config_deinit(pdev, ctrl_pdata);
+error_pan_node:
+	of_node_put(dsi_pan_node);
+error_platform_pop:
+	msm_dsi_clear_irq(ctrl_pdata, ctrl_pdata->dsi_irq_mask);
+error_irq_resource:
+	if (dsi_host_private->dsi_base) {
+		iounmap(dsi_host_private->dsi_base);
+		dsi_host_private->dsi_base = NULL;
+	}
+error_io_resource:
+	devm_kfree(&pdev->dev, ctrl_pdata);
+error_no_mem:
+	msm_dsi_deinit();
+
+	return rc;
+}
+
+static int msm_dsi_remove(struct platform_device *pdev)
+{
+	int i;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = platform_get_drvdata(pdev);
+
+	if (!ctrl_pdata) {
+		pr_err("%s: no driver data\n", __func__);
+		return -ENODEV;
+	}
+
+	msm_dsi_clear_irq(ctrl_pdata, ctrl_pdata->dsi_irq_mask);
+	for (i = DSI_MAX_PM - 1; i >= 0; i--)
+		msm_dsi_io_deinit(pdev, &(ctrl_pdata->power_data[i]));
+	dsi_ctrl_config_deinit(pdev, ctrl_pdata);
+	iounmap(dsi_host_private->dsi_base);
+	dsi_host_private->dsi_base = NULL;
+	msm_dsi_deinit();
+	devm_kfree(&pdev->dev, ctrl_pdata);
+
+	return 0;
+}
+
+static const struct of_device_id msm_dsi_v2_dt_match[] = {
+	{.compatible = "qcom,msm-dsi-v2"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_dsi_v2_dt_match);
+
+static struct platform_driver msm_dsi_v2_driver = {
+	.probe = msm_dsi_probe,
+	.remove = msm_dsi_remove,
+	.shutdown = NULL,
+	.driver = {
+		.name = "qcom,dsi-panel-v2",
+		.of_match_table = msm_dsi_v2_dt_match,
+	},
+};
+
+static int msm_dsi_v2_register_driver(void)
+{
+	return platform_driver_register(&msm_dsi_v2_driver);
+}
+
+static int __init msm_dsi_v2_driver_init(void)
+{
+	int ret;
+
+	ret = msm_dsi_v2_register_driver();
+	if (ret) {
+		pr_err("msm_dsi_v2_register_driver() failed!\n");
+		return ret;
+	}
+
+	return ret;
+}
+module_init(msm_dsi_v2_driver_init);
+
+static void __exit msm_dsi_v2_driver_cleanup(void)
+{
+	platform_driver_unregister(&msm_dsi_v2_driver);
+}
+module_exit(msm_dsi_v2_driver_cleanup);
diff --git a/drivers/video/fbdev/msm/dsi_host_v2.h b/drivers/video/fbdev/msm/dsi_host_v2.h
new file mode 100644
index 0000000..d61bcf9
--- /dev/null
+++ b/drivers/video/fbdev/msm/dsi_host_v2.h
@@ -0,0 +1,178 @@
+/* Copyright (c) 2012-2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef DSI_HOST_V2_H
+#define DSI_HOST_V2_H
+
+#include <linux/bitops.h>
+
+#define DSI_INTR_ERROR_MASK			BIT(25)
+#define DSI_INTR_ERROR				BIT(24)
+#define DSI_INTR_BTA_DONE_MASK			BIT(21)
+#define DSI_INTR_BTA_DONE			BIT(20)
+#define DSI_INTR_VIDEO_DONE_MASK		BIT(17)
+#define DSI_INTR_VIDEO_DONE			BIT(16)
+#define DSI_INTR_CMD_MDP_DONE_MASK		BIT(9)
+#define DSI_INTR_CMD_MDP_DONE			BIT(8)
+#define DSI_INTR_CMD_DMA_DONE_MASK		BIT(1)
+#define DSI_INTR_CMD_DMA_DONE			BIT(0)
+#define DSI_INTR_ALL_MASK			0x2220202
+
+#define DSI_BTA_TERM				BIT(1)
+
+#define DSI_CTRL				0x0000
+#define DSI_STATUS				0x0004
+#define DSI_FIFO_STATUS			0x0008
+#define DSI_VIDEO_MODE_CTRL			0x000C
+#define DSI_VIDEO_MODE_DATA_CTRL		0x001C
+#define DSI_VIDEO_MODE_ACTIVE_H			0x0020
+#define DSI_VIDEO_MODE_ACTIVE_V		0x0024
+#define DSI_VIDEO_MODE_TOTAL		0x0028
+#define DSI_VIDEO_MODE_HSYNC			0x002C
+#define DSI_VIDEO_MODE_VSYNC			0x0030
+#define DSI_VIDEO_MODE_VSYNC_VPOS		0x0034
+#define DSI_COMMAND_MODE_DMA_CTRL		0x0038
+#define DSI_COMMAND_MODE_MDP_CTRL		0x003C
+#define DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL	0x0040
+#define DSI_DMA_CMD_OFFSET			0x0044
+#define DSI_DMA_CMD_LENGTH			0x0048
+#define DSI_DMA_FIFO_CTRL			0x004C
+#define DSI_COMMAND_MODE_MDP_STREAM0_CTRL	0x0054
+#define DSI_COMMAND_MODE_MDP_STREAM0_TOTAL	0x0058
+#define DSI_COMMAND_MODE_MDP_STREAM1_CTRL	0x005C
+#define DSI_COMMAND_MODE_MDP_STREAM1_TOTAL	0x0060
+#define DSI_ACK_ERR_STATUS			0x0064
+#define DSI_RDBK_DATA0				0x0068
+#define DSI_RDBK_DATA1				0x006C
+#define DSI_RDBK_DATA2				0x0070
+#define DSI_RDBK_DATA3				0x0074
+#define DSI_RDBK_DATATYPE0			0x0078
+#define DSI_RDBK_DATATYPE1			0x007C
+#define DSI_TRIG_CTRL				0x0080
+#define DSI_EXT_MUX				0x0084
+#define DSI_EXT_TE_PULSE_DETECT_CTRL		0x0088
+#define DSI_CMD_MODE_DMA_SW_TRIGGER		0x008C
+#define DSI_CMD_MODE_MDP_SW_TRIGGER		0x0090
+#define DSI_CMD_MODE_BTA_SW_TRIGGER		0x0094
+#define DSI_RESET_SW_TRIGGER			0x0098
+#define DSI_LANE_CTRL				0x00A8
+#define DSI_LANE_SWAP_CTRL			0x00AC
+#define DSI_DLN0_PHY_ERR			0x00B0
+#define DSI_TIMEOUT_STATUS			0x00BC
+#define DSI_CLKOUT_TIMING_CTRL			0x00C0
+#define DSI_EOT_PACKET				0x00C4
+#define DSI_EOT_PACKET_CTRL			0x00C8
+#define DSI_ERR_INT_MASK0			0x0108
+#define DSI_INT_CTRL				0x010c
+#define DSI_SOFT_RESET				0x0114
+#define DSI_CLK_CTRL				0x0118
+#define DSI_CLK_STATUS				0x011C
+#define DSI_PHY_SW_RESET			0x0128
+#define DSI_COMMAND_MODE_MDP_IDLE_CTRL		0x0190
+#define DSI_VERSION				0x01F0
+
+#define DSI_DSIPHY_PLL_CTRL_0			0x0200
+#define DSI_DSIPHY_PLL_CTRL_1			0x0204
+#define DSI_DSIPHY_PLL_CTRL_2			0x0208
+#define DSI_DSIPHY_PLL_CTRL_3			0x020C
+#define DSI_DSIPHY_PLL_CTRL_4			0x0210
+#define DSI_DSIPHY_PLL_CTRL_5			0x0214
+#define DSI_DSIPHY_PLL_CTRL_6			0x0218
+#define DSI_DSIPHY_PLL_CTRL_7			0x021C
+#define DSI_DSIPHY_PLL_CTRL_8			0x0220
+#define DSI_DSIPHY_PLL_CTRL_9			0x0224
+#define DSI_DSIPHY_PLL_CTRL_10			0x0228
+#define DSI_DSIPHY_PLL_CTRL_11			0x022C
+#define DSI_DSIPHY_PLL_CTRL_12			0x0230
+#define DSI_DSIPHY_PLL_CTRL_13			0x0234
+#define DSI_DSIPHY_PLL_CTRL_14			0x0238
+#define DSI_DSIPHY_PLL_CTRL_15			0x023C
+#define DSI_DSIPHY_PLL_CTRL_16			0x0240
+#define DSI_DSIPHY_PLL_CTRL_17			0x0244
+#define DSI_DSIPHY_PLL_CTRL_18			0x0248
+#define DSI_DSIPHY_PLL_CTRL_19			0x024C
+#define DSI_DSIPHY_ANA_CTRL0			0x0260
+#define DSI_DSIPHY_ANA_CTRL1			0x0264
+#define DSI_DSIPHY_ANA_CTRL2			0x0268
+#define DSI_DSIPHY_ANA_CTRL3			0x026C
+#define DSI_DSIPHY_ANA_CTRL4			0x0270
+#define DSI_DSIPHY_ANA_CTRL5			0x0274
+#define DSI_DSIPHY_ANA_CTRL6			0x0278
+#define DSI_DSIPHY_ANA_CTRL7			0x027C
+#define DSI_DSIPHY_PLL_RDY			0x0280
+#define DSI_DSIPHY_PLL_ANA_STATUS0		0x0294
+#define DSI_DSIPHY_PLL_ANA_STATUS1		0x0298
+#define DSI_DSIPHY_PLL_ANA_STATUS2		0x029C
+#define DSI_DSIPHY_LN0_CFG0			0x0300
+#define DSI_DSIPHY_LN0_CFG1			0x0304
+#define DSI_DSIPHY_LN0_CFG2			0x0308
+#define DSI_DSIPHY_LN1_CFG0			0x0340
+#define DSI_DSIPHY_LN1_CFG1			0x0344
+#define DSI_DSIPHY_LN1_CFG2			0x0348
+#define DSI_DSIPHY_LN2_CFG0			0x0380
+#define DSI_DSIPHY_LN2_CFG1			0x0384
+#define DSI_DSIPHY_LN2_CFG2			0x0388
+#define DSI_DSIPHY_LN3_CFG0			0x03C0
+#define DSI_DSIPHY_LN3_CFG1			0x03C4
+#define DSI_DSIPHY_LN3_CFG2			0x03C8
+#define DSI_DSIPHY_LNCK_CFG0			0x0400
+#define DSI_DSIPHY_LNCK_CFG1			0x0404
+#define DSI_DSIPHY_LNCK_CFG2			0x0408
+#define DSI_DSIPHY_TIMING_CTRL_0		0x0440
+#define DSI_DSIPHY_TIMING_CTRL_1		0x0444
+#define DSI_DSIPHY_TIMING_CTRL_2		0x0448
+#define DSI_DSIPHY_TIMING_CTRL_3		0x044C
+#define DSI_DSIPHY_TIMING_CTRL_4		0x0450
+#define DSI_DSIPHY_TIMING_CTRL_5		0x0454
+#define DSI_DSIPHY_TIMING_CTRL_6		0x0458
+#define DSI_DSIPHY_TIMING_CTRL_7		0x045C
+#define DSI_DSIPHY_TIMING_CTRL_8		0x0460
+#define DSI_DSIPHY_TIMING_CTRL_9		0x0464
+#define DSI_DSIPHY_TIMING_CTRL_10		0x0468
+#define DSI_DSIPHY_TIMING_CTRL_11		0x046C
+#define DSI_DSIPHY_CTRL_0			0x0470
+#define DSI_DSIPHY_CTRL_1			0x0474
+#define DSI_DSIPHY_CTRL_2			0x0478
+#define DSI_DSIPHY_CTRL_3			0x047C
+#define DSI_DSIPHY_STRENGTH_CTRL_0		0x0480
+#define DSI_DSIPHY_STRENGTH_CTRL_1		0x0484
+#define DSI_DSIPHY_STRENGTH_CTRL_2		0x0488
+#define DSI_DSIPHY_LDO_CNTRL			0x04B0
+#define DSI_DSIPHY_REGULATOR_CTRL_0		0x0500
+#define DSI_DSIPHY_REGULATOR_CTRL_1		0x0504
+#define DSI_DSIPHY_REGULATOR_CTRL_2		0x0508
+#define DSI_DSIPHY_REGULATOR_CTRL_3		0x050C
+#define DSI_DSIPHY_REGULATOR_CTRL_4		0x0510
+#define DSI_DSIPHY_REGULATOR_TEST		0x0514
+#define DSI_DSIPHY_REGULATOR_CAL_PWR_CFG	0x0518
+#define DSI_DSIPHY_CAL_HW_TRIGGER		0x0528
+#define DSI_DSIPHY_CAL_SW_CFG0			0x052C
+#define DSI_DSIPHY_CAL_SW_CFG1			0x0530
+#define DSI_DSIPHY_CAL_SW_CFG2			0x0534
+#define DSI_DSIPHY_CAL_HW_CFG0			0x0538
+#define DSI_DSIPHY_CAL_HW_CFG1			0x053C
+#define DSI_DSIPHY_CAL_HW_CFG2			0x0540
+#define DSI_DSIPHY_CAL_HW_CFG3			0x0544
+#define DSI_DSIPHY_CAL_HW_CFG4			0x0548
+#define DSI_DSIPHY_REGULATOR_CAL_STATUS0	0x0550
+#define DSI_DSIPHY_BIST_CTRL0			0x048C
+#define DSI_DSIPHY_BIST_CTRL1			0x0490
+#define DSI_DSIPHY_BIST_CTRL2			0x0494
+#define DSI_DSIPHY_BIST_CTRL3			0x0498
+#define DSI_DSIPHY_BIST_CTRL4			0x049C
+#define DSI_DSIPHY_BIST_CTRL5			0x04A0
+
+#define DSI_EN			BIT(0)
+#define DSI_VIDEO_MODE_EN	BIT(1)
+#define DSI_CMD_MODE_EN		BIT(2)
+
+#endif /* DSI_HOST_V2_H */
diff --git a/drivers/video/fbdev/msm/dsi_io_v2.c b/drivers/video/fbdev/msm/dsi_io_v2.c
new file mode 100644
index 0000000..dd2e308
--- /dev/null
+++ b/drivers/video/fbdev/msm/dsi_io_v2.c
@@ -0,0 +1,389 @@
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk/msm-clk.h>
+
+
+#include "dsi_v2.h"
+#include "dsi_io_v2.h"
+#include "dsi_host_v2.h"
+
+struct msm_dsi_io_private {
+	struct clk *dsi_byte_clk;
+	struct clk *dsi_esc_clk;
+	struct clk *dsi_pixel_clk;
+	struct clk *dsi_ahb_clk;
+	struct clk *dsi_clk;
+	int msm_dsi_clk_on;
+	int msm_dsi_ahb_clk_on;
+};
+
+static struct msm_dsi_io_private *dsi_io_private;
+
+#define DSI_VDDA_VOLTAGE 1200000
+
+void msm_dsi_ahb_ctrl(int enable)
+{
+	if (enable) {
+		dsi_io_private->msm_dsi_ahb_clk_on++;
+		if (dsi_io_private->msm_dsi_ahb_clk_on == 1)
+			clk_enable(dsi_io_private->dsi_ahb_clk);
+	} else {
+		dsi_io_private->msm_dsi_ahb_clk_on--;
+		if (dsi_io_private->msm_dsi_ahb_clk_on == 0)
+			clk_disable(dsi_io_private->dsi_ahb_clk);
+	}
+}
+
+int msm_dsi_io_init(struct platform_device *pdev, struct dss_module_power *mp)
+{
+	int rc;
+
+	if (!dsi_io_private) {
+		dsi_io_private = kzalloc(sizeof(struct msm_dsi_io_private),
+					GFP_KERNEL);
+		if (!dsi_io_private)
+			return -ENOMEM;
+	}
+
+	rc = msm_dsi_clk_init(pdev);
+	if (rc) {
+		pr_err("fail to initialize DSI clock\n");
+		return rc;
+	}
+
+	rc = msm_dss_config_vreg(&pdev->dev, mp->vreg_config,
+						mp->num_vreg, 1);
+	if (rc) {
+		pr_err("fail to initialize DSI regulator\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+void msm_dsi_io_deinit(struct platform_device *pdev,
+				 struct dss_module_power *mp)
+{
+	if (dsi_io_private) {
+		msm_dsi_clk_deinit();
+		msm_dss_config_vreg(&pdev->dev, mp->vreg_config,
+					mp->num_vreg, 0);
+		kfree(dsi_io_private);
+		dsi_io_private = NULL;
+	}
+}
+
+int msm_dsi_clk_init(struct platform_device *dev)
+{
+	int rc = 0;
+
+	dsi_io_private->dsi_clk = clk_get(&dev->dev, "dsi_clk");
+	if (IS_ERR(dsi_io_private->dsi_clk)) {
+		pr_err("can't find dsi core_clk\n");
+		rc = PTR_ERR(dsi_io_private->dsi_clk);
+		dsi_io_private->dsi_clk = NULL;
+		return rc;
+	}
+	dsi_io_private->dsi_byte_clk = clk_get(&dev->dev, "byte_clk");
+	if (IS_ERR(dsi_io_private->dsi_byte_clk)) {
+		pr_err("can't find dsi byte_clk\n");
+		rc = PTR_ERR(dsi_io_private->dsi_byte_clk);
+		dsi_io_private->dsi_byte_clk = NULL;
+		return rc;
+	}
+
+	dsi_io_private->dsi_esc_clk = clk_get(&dev->dev, "esc_clk");
+	if (IS_ERR(dsi_io_private->dsi_esc_clk)) {
+		pr_err("can't find dsi esc_clk\n");
+		rc = PTR_ERR(dsi_io_private->dsi_esc_clk);
+		dsi_io_private->dsi_esc_clk = NULL;
+		return rc;
+	}
+
+	dsi_io_private->dsi_pixel_clk = clk_get(&dev->dev, "pixel_clk");
+	if (IS_ERR(dsi_io_private->dsi_pixel_clk)) {
+		pr_err("can't find dsi pixel\n");
+		rc = PTR_ERR(dsi_io_private->dsi_pixel_clk);
+		dsi_io_private->dsi_pixel_clk = NULL;
+		return rc;
+	}
+
+	dsi_io_private->dsi_ahb_clk = clk_get(&dev->dev, "iface_clk");
+	if (IS_ERR(dsi_io_private->dsi_ahb_clk)) {
+		pr_err("can't find dsi iface_clk\n");
+		rc = PTR_ERR(dsi_io_private->dsi_ahb_clk);
+		dsi_io_private->dsi_ahb_clk = NULL;
+		return rc;
+	}
+	clk_prepare(dsi_io_private->dsi_ahb_clk);
+
+	return 0;
+}
+
+void msm_dsi_clk_deinit(void)
+{
+	if (dsi_io_private->dsi_clk) {
+		clk_put(dsi_io_private->dsi_clk);
+		dsi_io_private->dsi_clk = NULL;
+	}
+	if (dsi_io_private->dsi_byte_clk) {
+		clk_put(dsi_io_private->dsi_byte_clk);
+		dsi_io_private->dsi_byte_clk = NULL;
+	}
+	if (dsi_io_private->dsi_esc_clk) {
+		clk_put(dsi_io_private->dsi_esc_clk);
+		dsi_io_private->dsi_esc_clk = NULL;
+	}
+	if (dsi_io_private->dsi_pixel_clk) {
+		clk_put(dsi_io_private->dsi_pixel_clk);
+		dsi_io_private->dsi_pixel_clk = NULL;
+	}
+	if (dsi_io_private->dsi_ahb_clk) {
+		clk_unprepare(dsi_io_private->dsi_ahb_clk);
+		clk_put(dsi_io_private->dsi_ahb_clk);
+		dsi_io_private->dsi_ahb_clk = NULL;
+	}
+}
+
+int msm_dsi_prepare_clocks(void)
+{
+	clk_prepare(dsi_io_private->dsi_clk);
+	clk_prepare(dsi_io_private->dsi_byte_clk);
+	clk_prepare(dsi_io_private->dsi_esc_clk);
+	clk_prepare(dsi_io_private->dsi_pixel_clk);
+	return 0;
+}
+
+int msm_dsi_unprepare_clocks(void)
+{
+	clk_unprepare(dsi_io_private->dsi_clk);
+	clk_unprepare(dsi_io_private->dsi_esc_clk);
+	clk_unprepare(dsi_io_private->dsi_byte_clk);
+	clk_unprepare(dsi_io_private->dsi_pixel_clk);
+	return 0;
+}
+
+int msm_dsi_clk_set_rate(unsigned long esc_rate,
+			unsigned long dsi_rate,
+			unsigned long byte_rate,
+			unsigned long pixel_rate)
+{
+	int rc;
+
+	rc = clk_set_rate(dsi_io_private->dsi_clk, dsi_rate);
+	if (rc) {
+		pr_err("dsi_esc_clk - clk_set_rate failed =%d\n", rc);
+		return rc;
+	}
+
+	rc = clk_set_rate(dsi_io_private->dsi_esc_clk, esc_rate);
+	if (rc) {
+		pr_err("dsi_esc_clk - clk_set_rate failed =%d\n", rc);
+		return rc;
+	}
+
+	rc = clk_set_rate(dsi_io_private->dsi_byte_clk, byte_rate);
+	if (rc) {
+		pr_err("dsi_byte_clk - clk_set_rate faile = %dd\n", rc);
+		return rc;
+	}
+
+	rc = clk_set_rate(dsi_io_private->dsi_pixel_clk, pixel_rate);
+	if (rc) {
+		pr_err("dsi_pixel_clk - clk_set_rate failed = %d\n", rc);
+		return rc;
+	}
+	return 0;
+}
+
+int  msm_dsi_clk_enable(void)
+{
+	if (dsi_io_private->msm_dsi_clk_on) {
+		pr_debug("dsi_clks on already\n");
+		return 0;
+	}
+
+	clk_enable(dsi_io_private->dsi_clk);
+	clk_enable(dsi_io_private->dsi_esc_clk);
+	clk_enable(dsi_io_private->dsi_byte_clk);
+	clk_enable(dsi_io_private->dsi_pixel_clk);
+
+	dsi_io_private->msm_dsi_clk_on = 1;
+	return 0;
+}
+
+int msm_dsi_clk_disable(void)
+{
+	if (dsi_io_private->msm_dsi_clk_on == 0) {
+		pr_debug("mdss_dsi_clks already OFF\n");
+		return 0;
+	}
+
+	clk_disable(dsi_io_private->dsi_clk);
+	clk_disable(dsi_io_private->dsi_byte_clk);
+	clk_disable(dsi_io_private->dsi_esc_clk);
+	clk_disable(dsi_io_private->dsi_pixel_clk);
+
+	dsi_io_private->msm_dsi_clk_on = 0;
+	return 0;
+}
+
+static void msm_dsi_phy_strength_init(unsigned char *ctrl_base,
+					struct mdss_dsi_phy_ctrl *pd)
+{
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_STRENGTH_CTRL_0, pd->strength[0]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_STRENGTH_CTRL_2, pd->strength[1]);
+}
+
+static void msm_dsi_phy_ctrl_init(unsigned char *ctrl_base,
+				struct mdss_panel_data *pdata)
+{
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_0, 0x5f);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_3, 0x10);
+}
+
+static void msm_dsi_phy_regulator_init(unsigned char *ctrl_base,
+					struct mdss_dsi_phy_ctrl *pd)
+{
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_LDO_CNTRL, 0x25);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_0, pd->regulator[0]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_1, pd->regulator[1]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_2, pd->regulator[2]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_3, pd->regulator[3]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_4, pd->regulator[4]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CAL_PWR_CFG,
+			pd->regulator[5]);
+
+}
+
+static int msm_dsi_phy_calibration(unsigned char *ctrl_base)
+{
+	int i = 0, term_cnt = 5000, ret = 0, cal_busy;
+
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_SW_CFG2, 0x0);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG1, 0x5a);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG3, 0x10);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG4, 0x01);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG0, 0x01);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_TRIGGER, 0x01);
+	usleep_range(5000, 5100); /*per DSI controller spec*/
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_TRIGGER, 0x00);
+
+	cal_busy = MIPI_INP(ctrl_base + DSI_DSIPHY_REGULATOR_CAL_STATUS0);
+	while (cal_busy & 0x10) {
+		i++;
+		if (i > term_cnt) {
+			ret = -EINVAL;
+			pr_err("msm_dsi_phy_calibration error\n");
+			break;
+		}
+		cal_busy = MIPI_INP(ctrl_base +
+					DSI_DSIPHY_REGULATOR_CAL_STATUS0);
+	}
+
+	return ret;
+}
+
+static void msm_dsi_phy_lane_init(unsigned char *ctrl_base,
+			struct mdss_dsi_phy_ctrl *pd)
+{
+	int ln, index;
+
+	/*CFG0, CFG1, CFG2, TEST_DATAPATH, TEST_STR0, TEST_STR1*/
+	for (ln = 0; ln < 5; ln++) {
+		unsigned char *off = ctrl_base + 0x0300 + (ln * 0x40);
+
+		index = ln * 6;
+
+		MIPI_OUTP(off, pd->lanecfg[index]);
+		MIPI_OUTP(off + 4, pd->lanecfg[index + 1]);
+		MIPI_OUTP(off + 8, pd->lanecfg[index + 2]);
+		MIPI_OUTP(off + 12, pd->lanecfg[index + 3]);
+		MIPI_OUTP(off + 20, pd->lanecfg[index + 4]);
+		MIPI_OUTP(off + 24, pd->lanecfg[index + 5]);
+	}
+	wmb(); /* ensure write is finished before progressing */
+}
+
+static void msm_dsi_phy_timing_init(unsigned char *ctrl_base,
+			struct mdss_dsi_phy_ctrl *pd)
+{
+	int i, off = DSI_DSIPHY_TIMING_CTRL_0;
+
+	for (i = 0; i < 12; i++) {
+		MIPI_OUTP(ctrl_base + off, pd->timing[i]);
+		off += 4;
+	}
+	wmb(); /* ensure write is finished before progressing */
+}
+
+static void msm_dsi_phy_bist_init(unsigned char *ctrl_base,
+			struct mdss_dsi_phy_ctrl *pd)
+{
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL4, pd->bistctrl[4]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL1, pd->bistctrl[1]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL0, pd->bistctrl[0]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL4, 0);
+	wmb(); /* ensure write is finished before progressing */
+}
+
+int msm_dsi_phy_init(unsigned char *ctrl_base,
+			struct mdss_panel_data *pdata)
+{
+	struct mdss_dsi_phy_ctrl *pd;
+
+	pd = &(pdata->panel_info.mipi.dsi_phy_db);
+
+	msm_dsi_phy_strength_init(ctrl_base, pd);
+
+	msm_dsi_phy_ctrl_init(ctrl_base, pdata);
+
+	msm_dsi_phy_regulator_init(ctrl_base, pd);
+
+	msm_dsi_phy_calibration(ctrl_base);
+
+	msm_dsi_phy_lane_init(ctrl_base, pd);
+
+	msm_dsi_phy_timing_init(ctrl_base, pd);
+
+	msm_dsi_phy_bist_init(ctrl_base, pd);
+
+	return 0;
+}
+
+void msm_dsi_phy_sw_reset(unsigned char *ctrl_base)
+{
+	/* start phy sw reset */
+	MIPI_OUTP(ctrl_base + DSI_PHY_SW_RESET, 0x0001);
+	udelay(1000); /*per DSI controller spec*/
+	wmb(); /* ensure write is finished before progressing */
+	/* end phy sw reset */
+	MIPI_OUTP(ctrl_base + DSI_PHY_SW_RESET, 0x0000);
+	udelay(100); /*per DSI controller spec*/
+	wmb(); /* ensure write is finished before progressing */
+}
+
+void msm_dsi_phy_off(unsigned char *ctrl_base)
+{
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_PLL_CTRL_5, 0x05f);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_0, 0x02);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_0, 0x00);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_1, 0x7f);
+	MIPI_OUTP(ctrl_base + DSI_CLK_CTRL, 0);
+}
diff --git a/drivers/video/fbdev/msm/dsi_io_v2.h b/drivers/video/fbdev/msm/dsi_io_v2.h
new file mode 100644
index 0000000..dd9adf9
--- /dev/null
+++ b/drivers/video/fbdev/msm/dsi_io_v2.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef DSI_IO_V2_H
+#define DSI_IO_V2_H
+
+#include "mdss_panel.h"
+
+void msm_dsi_ahb_ctrl(int enable);
+
+int msm_dsi_io_init(struct platform_device *dev,
+				struct dss_module_power *mp);
+
+void msm_dsi_io_deinit(struct platform_device *dev,
+				struct dss_module_power *mp);
+
+int msm_dsi_clk_init(struct platform_device *dev);
+
+void msm_dsi_clk_deinit(void);
+
+int msm_dsi_prepare_clocks(void);
+
+int msm_dsi_unprepare_clocks(void);
+
+int msm_dsi_clk_set_rate(unsigned long esc_rate,
+			unsigned long dsi_rate,
+			unsigned long byte_rate,
+			unsigned long pixel_rate);
+
+int msm_dsi_clk_enable(void);
+
+int msm_dsi_clk_disable(void);
+
+int msm_dsi_phy_init(unsigned char *ctrl_base,
+			struct mdss_panel_data *pdata);
+
+void msm_dsi_phy_sw_reset(unsigned char *ctrl_base);
+
+void msm_dsi_phy_off(unsigned char *ctrl_base);
+#endif /* DSI_IO_V2_H */
diff --git a/drivers/video/fbdev/msm/dsi_status_6g.c b/drivers/video/fbdev/msm/dsi_status_6g.c
new file mode 100644
index 0000000..88bf0aa
--- /dev/null
+++ b/drivers/video/fbdev/msm/dsi_status_6g.c
@@ -0,0 +1,186 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/interrupt.h>
+
+#include "mdss_dsi.h"
+#include "mdss_mdp.h"
+
+/*
+ * mdss_check_te_status() - Check the status of panel for TE based ESD.
+ * @ctrl_pdata   : dsi controller data
+ * @pstatus_data : dsi status data
+ * @interval     : duration in milliseconds for panel TE wait
+ *
+ * This function is called when the TE signal from the panel doesn't arrive
+ * after 'interval' milliseconds. If the TE IRQ is not ready, the workqueue
+ * gets re-scheduled. Otherwise, report the panel to be dead due to ESD attack.
+ */
+static bool mdss_check_te_status(struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+		struct dsi_status_data *pstatus_data, uint32_t interval)
+{
+	bool ret;
+
+	atomic_set(&ctrl_pdata->te_irq_ready, 0);
+	reinit_completion(&ctrl_pdata->te_irq_comp);
+	enable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
+	/* Define TE interrupt timeout value as 3x(1/fps) */
+	ret = wait_for_completion_timeout(&ctrl_pdata->te_irq_comp,
+			msecs_to_jiffies(interval));
+	disable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
+	pr_debug("%s: Panel TE check done with ret = %d\n", __func__, ret);
+	return ret;
+}
+
+/*
+ * mdss_check_dsi_ctrl_status() - Check MDP5 DSI controller status periodically.
+ * @work     : dsi controller status data
+ * @interval : duration in milliseconds to schedule work queue
+ *
+ * This function calls check_status API on DSI controller to send the BTA
+ * command. If DSI controller fails to acknowledge the BTA command, it sends
+ * the PANEL_ALIVE=0 status to HAL layer.
+ */
+void mdss_check_dsi_ctrl_status(struct work_struct *work, uint32_t interval)
+{
+	struct dsi_status_data *pstatus_data = NULL;
+	struct mdss_panel_data *pdata = NULL;
+	struct mipi_panel_info *mipi = NULL;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_overlay_private *mdp5_data = NULL;
+	struct mdss_mdp_ctl *ctl = NULL;
+	int ret = 0;
+
+	pstatus_data = container_of(to_delayed_work(work),
+		struct dsi_status_data, check_status);
+	if (!pstatus_data || !(pstatus_data->mfd)) {
+		pr_err("%s: mfd not available\n", __func__);
+		return;
+	}
+
+	pdata = dev_get_platdata(&pstatus_data->mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("%s: Panel data not available\n", __func__);
+		return;
+	}
+	mipi = &pdata->panel_info.mipi;
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+							panel_data);
+	if (!ctrl_pdata || (!ctrl_pdata->check_status &&
+		(ctrl_pdata->status_mode != ESD_TE))) {
+		pr_err("%s: DSI ctrl or status_check callback not available\n",
+								__func__);
+		return;
+	}
+
+	if (!pdata->panel_info.esd_rdy) {
+		pr_debug("%s: unblank not complete, reschedule check status\n",
+			__func__);
+		schedule_delayed_work(&pstatus_data->check_status,
+				msecs_to_jiffies(interval));
+		return;
+	}
+
+	mdp5_data = mfd_to_mdp5_data(pstatus_data->mfd);
+	ctl = mfd_to_ctl(pstatus_data->mfd);
+
+	if (!ctl) {
+		pr_err("%s: Display is off\n", __func__);
+		return;
+	}
+
+	if (ctrl_pdata->status_mode == ESD_TE) {
+		uint32_t fps = mdss_panel_get_framerate(&pdata->panel_info,
+							FPS_RESOLUTION_HZ);
+		uint32_t timeout = ((1000 / fps) + 1) *
+					MDSS_STATUS_TE_WAIT_MAX;
+
+		if (mdss_check_te_status(ctrl_pdata, pstatus_data, timeout))
+			goto sim;
+		else
+			goto status_dead;
+	}
+
+	/*
+	 * TODO: Because mdss_dsi_cmd_mdp_busy has made sure DMA to
+	 * be idle in mdss_dsi_cmdlist_commit, it is not necessary
+	 * to acquire ov_lock in case of video mode. Removing this
+	 * lock to fix issues so that ESD thread would not block other
+	 * overlay operations. Need refine this lock for command mode
+	 *
+	 * If Burst mode is enabled then we dont have to acquire ov_lock as
+	 * command and data arbitration is possible in h/w
+	 */
+
+	if ((mipi->mode == DSI_CMD_MODE) && !ctrl_pdata->burst_mode_enabled)
+		mutex_lock(&mdp5_data->ov_lock);
+	mutex_lock(&ctl->offlock);
+
+	if (mdss_panel_is_power_off(pstatus_data->mfd->panel_power_state) ||
+			pstatus_data->mfd->shutdown_pending) {
+		mutex_unlock(&ctl->offlock);
+		if ((mipi->mode == DSI_CMD_MODE) &&
+		    !ctrl_pdata->burst_mode_enabled)
+			mutex_unlock(&mdp5_data->ov_lock);
+		pr_err("%s: DSI turning off, avoiding panel status check\n",
+							__func__);
+		return;
+	}
+
+	/*
+	 * For the command mode panels, we return pan display
+	 * IOCTL on vsync interrupt. So, after vsync interrupt comes
+	 * and when DMA_P is in progress, if the panel stops responding
+	 * and if we trigger BTA before DMA_P finishes, then the DSI
+	 * FIFO will not be cleared since the DSI data bus control
+	 * doesn't come back to the host after BTA. This may cause the
+	 * display reset not to be proper. Hence, wait for DMA_P done
+	 * for command mode panels before triggering BTA.
+	 */
+	if (ctl->ops.wait_pingpong && !ctrl_pdata->burst_mode_enabled)
+		ctl->ops.wait_pingpong(ctl, NULL);
+
+	pr_debug("%s: DSI ctrl wait for ping pong done\n", __func__);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	ret = ctrl_pdata->check_status(ctrl_pdata);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+	mutex_unlock(&ctl->offlock);
+	if ((mipi->mode == DSI_CMD_MODE) && !ctrl_pdata->burst_mode_enabled)
+		mutex_unlock(&mdp5_data->ov_lock);
+
+	if (pstatus_data->mfd->panel_power_state == MDSS_PANEL_POWER_ON) {
+		if (ret > 0)
+			schedule_delayed_work(&pstatus_data->check_status,
+				msecs_to_jiffies(interval));
+		else
+			goto status_dead;
+	}
+sim:
+	if (pdata->panel_info.panel_force_dead) {
+		pr_debug("force_dead=%d\n", pdata->panel_info.panel_force_dead);
+		pdata->panel_info.panel_force_dead--;
+		if (!pdata->panel_info.panel_force_dead)
+			goto status_dead;
+	}
+
+	return;
+
+status_dead:
+	mdss_fb_report_panel_dead(pstatus_data->mfd);
+}
diff --git a/drivers/video/fbdev/msm/dsi_status_v2.c b/drivers/video/fbdev/msm/dsi_status_v2.c
new file mode 100644
index 0000000..35b0984
--- /dev/null
+++ b/drivers/video/fbdev/msm/dsi_status_v2.c
@@ -0,0 +1,167 @@
+/* Copyright (c) 2013-2015, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/interrupt.h>
+
+#include "mdss_dsi.h"
+#include "mdp3_ctrl.h"
+
+/*
+ * mdp3_check_te_status() - Check the status of panel for TE based ESD.
+ * @ctrl_pdata   : dsi controller data
+ * @pstatus_data : dsi status data
+ * @interval     : duration in milliseconds for panel TE wait
+ *
+ * This function waits for TE signal from the panel for a maximum
+ * duration of 3 vsyncs. If timeout occurs, report the panel to be
+ * dead due to ESD attack.
+ * NOTE: The TE IRQ handling is linked to the ESD thread scheduling,
+ * i.e. rate of TE IRQs firing is bound by the ESD interval.
+ */
+static int mdp3_check_te_status(struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+		struct dsi_status_data *pstatus_data, uint32_t interval)
+{
+	int ret;
+
+	pr_debug("%s: Checking panel TE status\n", __func__);
+
+	atomic_set(&ctrl_pdata->te_irq_ready, 0);
+	reinit_completion(&ctrl_pdata->te_irq_comp);
+	enable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
+
+	ret = wait_for_completion_timeout(&ctrl_pdata->te_irq_comp,
+			msecs_to_jiffies(interval));
+
+	disable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
+	pr_debug("%s: Panel TE check done with ret = %d\n", __func__, ret);
+
+	return ret;
+}
+
+/*
+ * mdp3_check_dsi_ctrl_status() - Check MDP3 DSI controller status periodically.
+ * @work     : dsi controller status data
+ * @interval : duration in milliseconds to schedule work queue
+ *
+ * This function calls check_status API on DSI controller to send the BTA
+ * command. If DSI controller fails to acknowledge the BTA command, it sends
+ * the PANEL_ALIVE=0 status to HAL layer.
+ */
+void mdp3_check_dsi_ctrl_status(struct work_struct *work,
+				uint32_t interval)
+{
+	struct dsi_status_data *pdsi_status = NULL;
+	struct mdss_panel_data *pdata = NULL;
+	struct mipi_panel_info *mipi = NULL;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdp3_session_data *mdp3_session = NULL;
+	int ret = 0;
+
+	pdsi_status = container_of(to_delayed_work(work),
+	struct dsi_status_data, check_status);
+
+	if (!pdsi_status || !(pdsi_status->mfd)) {
+		pr_err("%s: mfd not available\n", __func__);
+		return;
+	}
+
+	pdata = dev_get_platdata(&pdsi_status->mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("%s: Panel data not available\n", __func__);
+		return;
+	}
+
+	mipi = &pdata->panel_info.mipi;
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+							panel_data);
+
+	if (!ctrl_pdata || (!ctrl_pdata->check_status &&
+		(ctrl_pdata->status_mode != ESD_TE))) {
+		pr_err("%s: DSI ctrl or status_check callback not available\n",
+								__func__);
+		return;
+	}
+
+	if (!pdata->panel_info.esd_rdy) {
+		pr_err("%s: unblank not complete, reschedule check status\n",
+			__func__);
+		schedule_delayed_work(&pdsi_status->check_status,
+				msecs_to_jiffies(interval));
+		return;
+	}
+
+	mdp3_session = pdsi_status->mfd->mdp.private1;
+	if (!mdp3_session) {
+		pr_err("%s: Display is off\n", __func__);
+		return;
+	}
+
+	if (mdp3_session->in_splash_screen) {
+		schedule_delayed_work(&pdsi_status->check_status,
+			msecs_to_jiffies(interval));
+		pr_debug("%s: cont splash is on\n", __func__);
+		return;
+	}
+
+	if (mipi->mode == DSI_CMD_MODE &&
+		mipi->hw_vsync_mode &&
+		mdss_dsi_is_te_based_esd(ctrl_pdata)) {
+		uint32_t fps = mdss_panel_get_framerate(&pdata->panel_info,
+					FPS_RESOLUTION_HZ);
+		uint32_t timeout = ((1000 / fps) + 1) *
+					MDSS_STATUS_TE_WAIT_MAX;
+
+		if (mdp3_check_te_status(ctrl_pdata, pdsi_status, timeout) > 0)
+			goto sim;
+		goto status_dead;
+	}
+
+	mutex_lock(&mdp3_session->lock);
+	if (!mdp3_session->status) {
+		pr_debug("%s: display off already\n", __func__);
+		mutex_unlock(&mdp3_session->lock);
+		return;
+	}
+
+	if (mdp3_session->wait_for_dma_done)
+		ret = mdp3_session->wait_for_dma_done(mdp3_session);
+	mutex_unlock(&mdp3_session->lock);
+
+	if (!ret)
+		ret = ctrl_pdata->check_status(ctrl_pdata);
+	else
+		pr_err("%s: wait_for_dma_done error\n", __func__);
+
+	if (mdss_fb_is_power_on_interactive(pdsi_status->mfd)) {
+		if (ret > 0)
+			schedule_delayed_work(&pdsi_status->check_status,
+						msecs_to_jiffies(interval));
+		else
+			goto status_dead;
+	}
+sim:
+	if (pdata->panel_info.panel_force_dead) {
+		pr_debug("force_dead=%d\n", pdata->panel_info.panel_force_dead);
+		pdata->panel_info.panel_force_dead--;
+		if (!pdata->panel_info.panel_force_dead)
+			goto status_dead;
+	}
+	return;
+
+status_dead:
+	mdss_fb_report_panel_dead(pdsi_status->mfd);
+}
+
diff --git a/drivers/video/fbdev/msm/dsi_v2.c b/drivers/video/fbdev/msm/dsi_v2.c
new file mode 100644
index 0000000..92d512a
--- /dev/null
+++ b/drivers/video/fbdev/msm/dsi_v2.c
@@ -0,0 +1,619 @@
+/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+
+#include "dsi_v2.h"
+
+static struct dsi_interface dsi_intf;
+
+static int dsi_off(struct mdss_panel_data *pdata)
+{
+	int rc = 0;
+
+	pr_debug("turn off dsi controller\n");
+	if (dsi_intf.off)
+		rc = dsi_intf.off(pdata);
+
+	if (rc) {
+		pr_err("mdss_dsi_off DSI failed %d\n", rc);
+		return rc;
+	}
+	return rc;
+}
+
+static int dsi_on(struct mdss_panel_data *pdata)
+{
+	int rc = 0;
+
+	pr_debug("dsi_on DSI controller on\n");
+	if (dsi_intf.on)
+		rc = dsi_intf.on(pdata);
+
+	if (rc) {
+		pr_err("mdss_dsi_on DSI failed %d\n", rc);
+		return rc;
+	}
+	return rc;
+}
+
+static int dsi_update_pconfig(struct mdss_panel_data *pdata,
+				int mode)
+{
+	int ret = 0;
+	struct mdss_panel_info *pinfo = &pdata->panel_info;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (!pdata)
+		return -ENODEV;
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	if (mode == DSI_CMD_MODE) {
+		pinfo->mipi.mode = DSI_CMD_MODE;
+		pinfo->type = MIPI_CMD_PANEL;
+		pinfo->mipi.vsync_enable = 1;
+		pinfo->mipi.hw_vsync_mode = 1;
+	} else {
+		pinfo->mipi.mode = DSI_VIDEO_MODE;
+		pinfo->type = MIPI_VIDEO_PANEL;
+		pinfo->mipi.vsync_enable = 0;
+		pinfo->mipi.hw_vsync_mode = 0;
+	}
+
+	ctrl_pdata->panel_mode = pinfo->mipi.mode;
+	mdss_panel_get_dst_fmt(pinfo->bpp, pinfo->mipi.mode,
+			pinfo->mipi.pixel_packing, &(pinfo->mipi.dst_format));
+	pinfo->cont_splash_enabled = 0;
+
+	return ret;
+}
+
+static int dsi_panel_handler(struct mdss_panel_data *pdata, int enable)
+{
+	int rc = 0;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	pr_debug("dsi_panel_handler enable=%d\n", enable);
+	if (!pdata)
+		return -ENODEV;
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	if (enable &&
+		(pdata->panel_info.panel_power_state == MDSS_PANEL_POWER_OFF)) {
+		if (!pdata->panel_info.dynamic_switch_pending) {
+			mdss_dsi_panel_reset(pdata, 1);
+			rc = ctrl_pdata->on(pdata);
+			if (rc)
+				pr_err("dsi_panel_handler panel on failed %d\n",
+									rc);
+		}
+		pdata->panel_info.panel_power_state = MDSS_PANEL_POWER_ON;
+		if (pdata->panel_info.type == MIPI_CMD_PANEL)
+			mdss_dsi_set_tear_on(ctrl_pdata);
+	} else if (!enable &&
+		(pdata->panel_info.panel_power_state == MDSS_PANEL_POWER_ON)) {
+		msm_dsi_sw_reset();
+		if (dsi_intf.op_mode_config)
+			dsi_intf.op_mode_config(DSI_CMD_MODE, pdata);
+		if (pdata->panel_info.dynamic_switch_pending) {
+			pr_info("%s: switching to %s mode\n", __func__,
+			(pdata->panel_info.mipi.mode ? "video" : "command"));
+			if (pdata->panel_info.type == MIPI_CMD_PANEL) {
+				ctrl_pdata->switch_mode(pdata, DSI_VIDEO_MODE);
+			} else if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+				ctrl_pdata->switch_mode(pdata, DSI_CMD_MODE);
+				mdss_dsi_set_tear_off(ctrl_pdata);
+			}
+		}
+		pdata->panel_info.panel_power_state = MDSS_PANEL_POWER_OFF;
+		if (!pdata->panel_info.dynamic_switch_pending) {
+			rc = ctrl_pdata->off(pdata);
+			mdss_dsi_panel_reset(pdata, 0);
+		}
+	}
+	return rc;
+}
+
+static int dsi_splash_on(struct mdss_panel_data *pdata)
+{
+	int rc = 0;
+
+	pr_debug("%s:\n", __func__);
+
+	if (dsi_intf.cont_on)
+		rc = dsi_intf.cont_on(pdata);
+
+	if (rc) {
+		pr_err("mdss_dsi_on DSI failed %d\n", rc);
+		return rc;
+	}
+	return rc;
+}
+
+static int dsi_clk_ctrl(struct mdss_panel_data *pdata, int enable)
+{
+	int rc = 0;
+
+	pr_debug("%s:\n", __func__);
+
+	if (dsi_intf.clk_ctrl)
+		rc = dsi_intf.clk_ctrl(pdata, enable);
+
+	return rc;
+}
+
+static int dsi_event_handler(struct mdss_panel_data *pdata,
+				int event, void *arg)
+{
+	int rc = 0;
+
+	if (!pdata) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -ENODEV;
+	}
+
+	switch (event) {
+	case MDSS_EVENT_UNBLANK:
+		rc = dsi_on(pdata);
+		break;
+	case MDSS_EVENT_BLANK:
+		rc = dsi_off(pdata);
+		break;
+	case MDSS_EVENT_PANEL_ON:
+		rc = dsi_panel_handler(pdata, 1);
+		break;
+	case MDSS_EVENT_PANEL_OFF:
+		rc = dsi_panel_handler(pdata, 0);
+		break;
+	case MDSS_EVENT_CONT_SPLASH_BEGIN:
+		rc = dsi_splash_on(pdata);
+		break;
+	case MDSS_EVENT_PANEL_CLK_CTRL:
+		rc = dsi_clk_ctrl(pdata,
+			(int)(((struct dsi_panel_clk_ctrl *)arg)->state));
+		break;
+	case MDSS_EVENT_DSI_UPDATE_PANEL_DATA:
+		rc = dsi_update_pconfig(pdata, (int)(unsigned long) arg);
+		break;
+	default:
+		pr_debug("%s: unhandled event=%d\n", __func__, event);
+		break;
+	}
+	return rc;
+}
+
+static int dsi_parse_gpio(struct platform_device *pdev,
+				struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	struct device_node *np = pdev->dev.of_node;
+
+	ctrl_pdata->disp_en_gpio = of_get_named_gpio(np,
+		"qcom,platform-enable-gpio", 0);
+
+	if (!gpio_is_valid(ctrl_pdata->disp_en_gpio))
+		pr_err("%s:%d, Disp_en gpio not specified\n",
+						__func__, __LINE__);
+
+	ctrl_pdata->rst_gpio = of_get_named_gpio(np,
+					"qcom,platform-reset-gpio", 0);
+	if (!gpio_is_valid(ctrl_pdata->rst_gpio))
+		pr_err("%s:%d, reset gpio not specified\n",
+						__func__, __LINE__);
+
+	ctrl_pdata->mode_gpio = -1;
+	if (ctrl_pdata->panel_data.panel_info.mode_gpio_state !=
+						MODE_GPIO_NOT_VALID) {
+		ctrl_pdata->mode_gpio = of_get_named_gpio(np,
+						"qcom,platform-mode-gpio", 0);
+		if (!gpio_is_valid(ctrl_pdata->mode_gpio))
+			pr_info("%s:%d, reset gpio not specified\n",
+							__func__, __LINE__);
+	}
+
+	ctrl_pdata->bklt_en_gpio = of_get_named_gpio(np,
+					"qcom,platform-bklight-en-gpio", 0);
+	if (!gpio_is_valid(ctrl_pdata->bklt_en_gpio))
+		pr_err("%s:%d, bklt_en gpio not specified\n",
+						__func__, __LINE__);
+
+	return 0;
+}
+
+static void mdss_dsi_put_dt_vreg_data(struct device *dev,
+	struct dss_module_power *module_power)
+{
+	if (!module_power) {
+		pr_err("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (module_power->vreg_config) {
+		devm_kfree(dev, module_power->vreg_config);
+		module_power->vreg_config = NULL;
+	}
+	module_power->num_vreg = 0;
+}
+
+static int mdss_dsi_get_dt_vreg_data(struct device *dev,
+	struct dss_module_power *mp, enum dsi_pm_type module)
+{
+	int i = 0, rc = 0;
+	u32 tmp = 0;
+	struct device_node *of_node = NULL, *supply_node = NULL;
+	const char *pm_supply_name = NULL;
+	struct device_node *supply_root_node = NULL;
+
+	if (!dev || !mp) {
+		pr_err("%s: invalid input\n", __func__);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	of_node = dev->of_node;
+
+	mp->num_vreg = 0;
+	pm_supply_name = __mdss_dsi_pm_supply_node_name(module);
+	supply_root_node = of_get_child_by_name(of_node, pm_supply_name);
+	if (!supply_root_node) {
+		pr_err("no supply entry present\n");
+		goto novreg;
+	}
+
+	for_each_child_of_node(supply_root_node, supply_node) {
+		mp->num_vreg++;
+	}
+
+	if (mp->num_vreg == 0) {
+		pr_debug("%s: no vreg\n", __func__);
+		goto novreg;
+	} else {
+		pr_debug("%s: vreg found. count=%d\n", __func__, mp->num_vreg);
+	}
+
+	mp->vreg_config = devm_kzalloc(dev, sizeof(struct dss_vreg) *
+		mp->num_vreg, GFP_KERNEL);
+	if (!mp->vreg_config) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	for_each_child_of_node(supply_root_node, supply_node) {
+		const char *st = NULL;
+		/* vreg-name */
+		rc = of_property_read_string(supply_node,
+			"qcom,supply-name", &st);
+		if (rc) {
+			pr_err("%s: error reading name. rc=%d\n",
+				__func__, rc);
+			goto error;
+		}
+		snprintf(mp->vreg_config[i].vreg_name,
+			ARRAY_SIZE((mp->vreg_config[i].vreg_name)), "%s", st);
+		/* vreg-min-voltage */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-min-voltage", &tmp);
+		if (rc) {
+			pr_err("%s: error reading min volt. rc=%d\n",
+				__func__, rc);
+			goto error;
+		}
+		mp->vreg_config[i].min_voltage = tmp;
+
+		/* vreg-max-voltage */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-max-voltage", &tmp);
+		if (rc) {
+			pr_err("%s: error reading max volt. rc=%d\n",
+				__func__, rc);
+			goto error;
+		}
+		mp->vreg_config[i].max_voltage = tmp;
+
+		/* enable-load */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-enable-load", &tmp);
+		if (rc) {
+			pr_err("%s: error reading enable load. rc=%d\n",
+				__func__, rc);
+			goto error;
+		}
+		mp->vreg_config[i].load[DSS_REG_MODE_ENABLE] = tmp;
+
+		/* disable-load */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-disable-load", &tmp);
+		if (rc) {
+			pr_err("%s: error reading disable load. rc=%d\n",
+				__func__, rc);
+			goto error;
+		}
+		mp->vreg_config[i].load[DSS_REG_MODE_DISABLE] = tmp;
+
+		/* ulp-load */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-ulp-load", &tmp);
+		if (rc)
+			pr_warn("%s: error reading ulp load. rc=%d\n",
+				__func__, rc);
+
+		mp->vreg_config[i].load[DSS_REG_MODE_ULP] = (!rc ? tmp :
+			mp->vreg_config[i].load[DSS_REG_MODE_ENABLE]);
+
+		/* pre-sleep */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-pre-on-sleep", &tmp);
+		if (rc) {
+			pr_debug("%s: error reading supply pre sleep value. rc=%d\n",
+				__func__, rc);
+			rc = 0;
+		} else {
+			mp->vreg_config[i].pre_on_sleep = tmp;
+		}
+
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-pre-off-sleep", &tmp);
+		if (rc) {
+			pr_debug("%s: error reading supply pre sleep value. rc=%d\n",
+				__func__, rc);
+			rc = 0;
+		} else {
+			mp->vreg_config[i].pre_off_sleep = tmp;
+		}
+
+		/* post-sleep */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-post-on-sleep", &tmp);
+		if (rc) {
+			pr_debug("%s: error reading supply post sleep value. rc=%d\n",
+				__func__, rc);
+			rc = 0;
+		} else {
+			mp->vreg_config[i].post_on_sleep = tmp;
+		}
+
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-post-off-sleep", &tmp);
+		if (rc) {
+			pr_debug("%s: error reading supply post sleep value. rc=%d\n",
+				__func__, rc);
+			rc = 0;
+		} else {
+			mp->vreg_config[i].post_off_sleep = tmp;
+		}
+
+		pr_debug("%s: %s min=%d, max=%d, enable=%d, disable=%d, ulp=%d, preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d\n",
+			__func__,
+			mp->vreg_config[i].vreg_name,
+			mp->vreg_config[i].min_voltage,
+			mp->vreg_config[i].max_voltage,
+			mp->vreg_config[i].load[DSS_REG_MODE_ENABLE]
+			mp->vreg_config[i].load[DSS_REG_MODE_DISABLE]
+			mp->vreg_config[i].load[DSS_REG_MODE_ULP]
+			mp->vreg_config[i].pre_on_sleep,
+			mp->vreg_config[i].post_on_sleep,
+			mp->vreg_config[i].pre_off_sleep,
+			mp->vreg_config[i].post_off_sleep
+			);
+		++i;
+	}
+
+	return rc;
+
+error:
+	if (mp->vreg_config) {
+		devm_kfree(dev, mp->vreg_config);
+		mp->vreg_config = NULL;
+	}
+novreg:
+	mp->num_vreg = 0;
+
+	return rc;
+}
+
+static int dsi_parse_phy(struct platform_device *pdev,
+				struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	struct device_node *np = pdev->dev.of_node;
+	int i, len;
+	const char *data;
+	struct mdss_dsi_phy_ctrl *phy_db
+		= &(ctrl_pdata->panel_data.panel_info.mipi.dsi_phy_db);
+
+	data = of_get_property(np, "qcom,platform-regulator-settings", &len);
+	if ((!data) || (len != 6)) {
+		pr_err("%s:%d, Unable to read Phy regulator settings",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	for (i = 0; i < len; i++)
+		phy_db->regulator[i] = data[i];
+
+	data = of_get_property(np, "qcom,platform-strength-ctrl", &len);
+	if ((!data) || (len != 2)) {
+		pr_err("%s:%d, Unable to read Phy Strength ctrl settings",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	phy_db->strength[0] = data[0];
+	phy_db->strength[1] = data[1];
+
+	data = of_get_property(np, "qcom,platform-bist-ctrl", &len);
+	if ((!data) || (len != 6)) {
+		pr_err("%s:%d, Unable to read Phy Bist Ctrl settings",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	for (i = 0; i < len; i++)
+		phy_db->bistctrl[i] = data[i];
+
+	data = of_get_property(np, "qcom,platform-lane-config", &len);
+	if ((!data) || (len != 30)) {
+		pr_err("%s:%d, Unable to read Phy lane configure settings",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	for (i = 0; i < len; i++)
+		phy_db->lanecfg[i] = data[i];
+
+	return 0;
+}
+
+void dsi_ctrl_config_deinit(struct platform_device *pdev,
+				struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	int i;
+
+	for (i = DSI_MAX_PM - 1; i >= 0; i--) {
+		mdss_dsi_put_dt_vreg_data(&pdev->dev,
+			&ctrl_pdata->power_data[i]);
+	}
+}
+
+int dsi_ctrl_config_init(struct platform_device *pdev,
+				struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	int rc = 0, i;
+
+	for (i = 0; i < DSI_MAX_PM; i++) {
+		rc = mdss_dsi_get_dt_vreg_data(&pdev->dev,
+			&ctrl_pdata->power_data[i], i);
+		if (rc) {
+			DEV_ERR("%s: '%s' get_dt_vreg_data failed.rc=%d\n",
+				__func__, __mdss_dsi_pm_name(i), rc);
+			return rc;
+		}
+	}
+
+	rc = dsi_parse_gpio(pdev, ctrl_pdata);
+	if (rc) {
+		pr_err("fail to parse panel GPIOs\n");
+		return rc;
+	}
+
+	rc = dsi_parse_phy(pdev, ctrl_pdata);
+	if (rc) {
+		pr_err("fail to parse DSI PHY settings\n");
+		return rc;
+	}
+
+	return 0;
+}
+int dsi_panel_device_register_v2(struct platform_device *dev,
+				struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	struct mipi_panel_info *mipi;
+	int rc;
+	u8 lanes = 0, bpp;
+	u32 h_period, v_period;
+	struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+
+	h_period = ((pinfo->lcdc.h_pulse_width)
+			+ (pinfo->lcdc.h_back_porch)
+			+ (pinfo->xres)
+			+ (pinfo->lcdc.h_front_porch));
+
+	v_period = ((pinfo->lcdc.v_pulse_width)
+			+ (pinfo->lcdc.v_back_porch)
+			+ (pinfo->yres)
+			+ (pinfo->lcdc.v_front_porch));
+
+	mipi  = &pinfo->mipi;
+
+	pinfo->type =
+		((mipi->mode == DSI_VIDEO_MODE)
+			? MIPI_VIDEO_PANEL : MIPI_CMD_PANEL);
+
+	if (mipi->data_lane3)
+		lanes += 1;
+	if (mipi->data_lane2)
+		lanes += 1;
+	if (mipi->data_lane1)
+		lanes += 1;
+	if (mipi->data_lane0)
+		lanes += 1;
+
+	if ((mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
+		|| (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB888)
+		|| (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB666_LOOSE))
+		bpp = 3;
+	else if ((mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
+		|| (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB565))
+		bpp = 2;
+	else
+		bpp = 3; /* Default format set to RGB888 */
+
+	if (pinfo->type == MIPI_VIDEO_PANEL &&
+		!pinfo->clk_rate) {
+		h_period += pinfo->lcdc.xres_pad;
+		v_period += pinfo->lcdc.yres_pad;
+
+		if (lanes > 0) {
+			pinfo->clk_rate =
+			((h_period * v_period * (mipi->frame_rate) * bpp * 8)
+			   / lanes);
+		} else {
+			pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
+			pinfo->clk_rate =
+				(h_period * v_period
+					 * (mipi->frame_rate) * bpp * 8);
+		}
+	}
+
+	ctrl_pdata->panel_data.event_handler = dsi_event_handler;
+
+	/*
+	 * register in mdp driver
+	 */
+	rc = mdss_register_panel(dev, &(ctrl_pdata->panel_data));
+	if (rc) {
+		dev_err(&dev->dev, "unable to register MIPI DSI panel\n");
+		return rc;
+	}
+
+	pr_debug("%s: Panal data initialized\n", __func__);
+	return 0;
+}
+
+void dsi_register_interface(struct dsi_interface *intf)
+{
+	dsi_intf = *intf;
+}
+
+int dsi_buf_alloc(struct dsi_buf *dp, int size)
+{
+	dp->start = kzalloc(size, GFP_KERNEL);
+	if (dp->start == NULL) {
+		pr_err("%s:%u\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	dp->end = dp->start + size;
+	dp->size = size;
+
+	if ((int)dp->start & 0x07) {
+		pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
+		return -EINVAL;
+	}
+
+	dp->data = dp->start;
+	dp->len = 0;
+	return 0;
+}
+
diff --git a/drivers/video/fbdev/msm/dsi_v2.h b/drivers/video/fbdev/msm/dsi_v2.h
new file mode 100644
index 0000000..2f6f404
--- /dev/null
+++ b/drivers/video/fbdev/msm/dsi_v2.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2012-2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef DSI_V2_H
+#define DSI_V2_H
+
+#include <linux/list.h>
+#include <mach/scm-io.h>
+
+#include "mdss_dsi.h"
+#include "mdss_panel.h"
+
+#define DSI_BUF_SIZE	1024
+#define DSI_MRPS	0x04  /* Maximum Return Packet Size */
+
+struct dsi_interface {
+	int (*on)(struct mdss_panel_data *pdata);
+	int (*off)(struct mdss_panel_data *pdata);
+	int (*cont_on)(struct mdss_panel_data *pdata);
+	int (*clk_ctrl)(struct mdss_panel_data *pdata, int enable);
+	void (*op_mode_config)(int mode, struct mdss_panel_data *pdata);
+	int index;
+	void *private;
+};
+
+int dsi_panel_device_register_v2(struct platform_device *pdev,
+				struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+
+void dsi_register_interface(struct dsi_interface *intf);
+
+int dsi_buf_alloc(struct dsi_buf *dp, int size);
+
+void dsi_set_tx_power_mode(int mode);
+
+void dsi_ctrl_config_deinit(struct platform_device *pdev,
+				struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+
+int dsi_ctrl_config_init(struct platform_device *pdev,
+				struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+
+struct mdss_panel_cfg *mdp3_panel_intf_type(int intf_val);
+
+int mdp3_panel_get_boot_cfg(void);
+
+void msm_dsi_sw_reset(void);
+#endif /* DSI_V2_H */
diff --git a/drivers/video/fbdev/msm/mdp3.c b/drivers/video/fbdev/msm/mdp3.c
new file mode 100644
index 0000000..308af51
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3.c
@@ -0,0 +1,3174 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+#include <linux/msm_kgsl.h>
+#include <linux/major.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/iopoll.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/qcom_iommu.h>
+#include <linux/msm_iommu_domains.h>
+
+#include <linux/msm_dma_iommu_mapping.h>
+
+#include "mdp3.h"
+#include "mdss_fb.h"
+#include "mdp3_hwio.h"
+#include "mdp3_ctrl.h"
+#include "mdp3_ppp.h"
+#include "mdss_debug.h"
+#include "mdss_smmu.h"
+#include "mdss.h"
+
+#ifndef EXPORT_COMPAT
+#define EXPORT_COMPAT(x)
+#endif
+
+#define AUTOSUSPEND_TIMEOUT_MS	100
+#define MISR_POLL_SLEEP                 2000
+#define MISR_POLL_TIMEOUT               32000
+#define MDP3_REG_CAPTURED_DSI_PCLK_MASK 1
+
+#define MDP_CORE_HW_VERSION	0x03050306
+struct mdp3_hw_resource *mdp3_res;
+
+#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val)		\
+	{						\
+		.src = MSM_BUS_MASTER_MDP_PORT0,	\
+		.dst = MSM_BUS_SLAVE_EBI_CH0,		\
+		.ab = (ab_val),				\
+		.ib = (ib_val),				\
+	}
+
+#define SET_BIT(value, bit_num) \
+{ \
+	value[bit_num >> 3] |= (1 << (bit_num & 7)); \
+}
+
+#define MAX_BPP_SUPPORTED 4
+
+static struct msm_bus_vectors mdp_bus_vectors[] = {
+	MDP_BUS_VECTOR_ENTRY(0, 0),
+	MDP_BUS_VECTOR_ENTRY(SZ_128M, SZ_256M),
+	MDP_BUS_VECTOR_ENTRY(SZ_256M, SZ_512M),
+};
+static struct msm_bus_paths
+	mdp_bus_usecases[ARRAY_SIZE(mdp_bus_vectors)];
+static struct msm_bus_scale_pdata mdp_bus_scale_table = {
+	.usecase = mdp_bus_usecases,
+	.num_usecases = ARRAY_SIZE(mdp_bus_usecases),
+	.name = "mdp3",
+};
+
+struct mdp3_bus_handle_map mdp3_bus_handle[MDP3_BUS_HANDLE_MAX] = {
+	[MDP3_BUS_HANDLE] = {
+		.bus_vector = mdp_bus_vectors,
+		.usecases = mdp_bus_usecases,
+		.scale_pdata = &mdp_bus_scale_table,
+		.current_bus_idx = 0,
+		.handle = 0,
+	},
+};
+
+static struct mdss_panel_intf pan_types[] = {
+	{"dsi", MDSS_PANEL_INTF_DSI},
+};
+static char mdss_mdp3_panel[MDSS_MAX_PANEL_LEN];
+
+struct mdp3_iommu_domain_map mdp3_iommu_domains[MDP3_IOMMU_DOMAIN_MAX] = {
+	[MDP3_IOMMU_DOMAIN_UNSECURE] = {
+		.domain_type = MDP3_IOMMU_DOMAIN_UNSECURE,
+		.client_name = "mdp_ns",
+		.partitions = {
+			{
+				.start = SZ_128K,
+				.size = SZ_1G - SZ_128K,
+			},
+		},
+		.npartitions = 1,
+	},
+	[MDP3_IOMMU_DOMAIN_SECURE] = {
+		.domain_type = MDP3_IOMMU_DOMAIN_SECURE,
+		.client_name = "mdp_secure",
+		.partitions = {
+			{
+				.start = SZ_1G,
+				.size = SZ_1G,
+			},
+		},
+		.npartitions = 1,
+	},
+};
+
+struct mdp3_iommu_ctx_map mdp3_iommu_contexts[MDP3_IOMMU_CTX_MAX] = {
+	[MDP3_IOMMU_CTX_MDP_0] = {
+		.ctx_type = MDP3_IOMMU_CTX_MDP_0,
+		.domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN_UNSECURE],
+		.ctx_name = "mdp_0",
+		.attached = 0,
+	},
+	[MDP3_IOMMU_CTX_MDP_1] = {
+		.ctx_type = MDP3_IOMMU_CTX_MDP_1,
+		.domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN_SECURE],
+		.ctx_name = "mdp_1",
+		.attached = 0,
+	},
+};
+
+static irqreturn_t mdp3_irq_handler(int irq, void *ptr)
+{
+	int i = 0;
+	struct mdp3_hw_resource *mdata = (struct mdp3_hw_resource *)ptr;
+	u32 mdp_interrupt = 0;
+	u32 mdp_status = 0;
+
+	spin_lock(&mdata->irq_lock);
+	if (!mdata->irq_mask) {
+		pr_err("spurious interrupt\n");
+		spin_unlock(&mdata->irq_lock);
+		return IRQ_HANDLED;
+	}
+	mdp_status = MDP3_REG_READ(MDP3_REG_INTR_STATUS);
+	mdp_interrupt = mdp_status;
+	pr_debug("mdp3_irq_handler irq=%d\n", mdp_interrupt);
+
+	mdp_interrupt &= mdata->irq_mask;
+
+	while (mdp_interrupt && i < MDP3_MAX_INTR) {
+		if ((mdp_interrupt & 0x1) && mdata->callbacks[i].cb)
+			mdata->callbacks[i].cb(i, mdata->callbacks[i].data);
+		mdp_interrupt = mdp_interrupt >> 1;
+		i++;
+	}
+	MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, mdp_status);
+
+	spin_unlock(&mdata->irq_lock);
+
+	return IRQ_HANDLED;
+}
+
+void mdp3_irq_enable(int type)
+{
+	unsigned long flag;
+
+	pr_debug("mdp3_irq_enable type=%d\n", type);
+	spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+	if (mdp3_res->irq_ref_count[type] > 0) {
+		pr_debug("interrupt %d already enabled\n", type);
+		spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+		return;
+	}
+
+	mdp3_res->irq_mask |= BIT(type);
+	MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irq_mask);
+
+	mdp3_res->irq_ref_count[type] += 1;
+	spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+}
+
+void mdp3_irq_disable(int type)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+	mdp3_irq_disable_nosync(type);
+	spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+}
+
+void mdp3_irq_disable_nosync(int type)
+{
+	if (mdp3_res->irq_ref_count[type] <= 0) {
+		pr_debug("interrupt %d not enabled\n", type);
+		return;
+	}
+	mdp3_res->irq_ref_count[type] -= 1;
+	if (mdp3_res->irq_ref_count[type] == 0) {
+		mdp3_res->irq_mask &= ~BIT(type);
+		MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irq_mask);
+	}
+}
+
+int mdp3_set_intr_callback(u32 type, struct mdp3_intr_cb *cb)
+{
+	unsigned long flag;
+
+	pr_debug("interrupt %d callback\n", type);
+	spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+	if (cb)
+		mdp3_res->callbacks[type] = *cb;
+	else
+		mdp3_res->callbacks[type].cb = NULL;
+
+	spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+	return 0;
+}
+
+void mdp3_irq_register(void)
+{
+	unsigned long flag;
+	struct mdss_hw *mdp3_hw;
+
+	pr_debug("mdp3_irq_register\n");
+	mdp3_hw = &mdp3_res->mdp3_hw;
+	spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+	mdp3_res->irq_ref_cnt++;
+	if (mdp3_res->irq_ref_cnt == 1) {
+		MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irq_mask);
+		mdp3_res->mdss_util->enable_irq(&mdp3_res->mdp3_hw);
+	}
+	spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+}
+
+void mdp3_irq_deregister(void)
+{
+	unsigned long flag;
+	bool irq_enabled = true;
+	struct mdss_hw *mdp3_hw;
+
+	pr_debug("mdp3_irq_deregister\n");
+	mdp3_hw = &mdp3_res->mdp3_hw;
+	spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+	memset(mdp3_res->irq_ref_count, 0, sizeof(u32) * MDP3_MAX_INTR);
+	mdp3_res->irq_mask = 0;
+	MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, 0);
+	mdp3_res->irq_ref_cnt--;
+	/* This can happen if suspend is called first */
+	if (mdp3_res->irq_ref_cnt < 0) {
+		irq_enabled = false;
+		mdp3_res->irq_ref_cnt = 0;
+	}
+	if (mdp3_res->irq_ref_cnt == 0 && irq_enabled)
+		mdp3_res->mdss_util->disable_irq_nosync(&mdp3_res->mdp3_hw);
+	spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+}
+
+void mdp3_irq_suspend(void)
+{
+	unsigned long flag;
+	bool irq_enabled = true;
+	struct mdss_hw *mdp3_hw;
+
+	pr_debug("%s\n", __func__);
+	mdp3_hw = &mdp3_res->mdp3_hw;
+	spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+	mdp3_res->irq_ref_cnt--;
+	if (mdp3_res->irq_ref_cnt < 0) {
+		irq_enabled = false;
+		mdp3_res->irq_ref_cnt = 0;
+	}
+	if (mdp3_res->irq_ref_cnt == 0 && irq_enabled) {
+		MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, 0);
+		mdp3_res->mdss_util->disable_irq_nosync(&mdp3_res->mdp3_hw);
+	}
+	spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+}
+
+static int mdp3_bus_scale_register(void)
+{
+	int i, j;
+
+	if (!mdp3_res->bus_handle) {
+		pr_err("No bus handle\n");
+		return -EINVAL;
+	}
+	for (i = 0; i < MDP3_BUS_HANDLE_MAX; i++) {
+		struct mdp3_bus_handle_map *bus_handle =
+			&mdp3_res->bus_handle[i];
+
+		if (!bus_handle->handle) {
+			int j;
+			struct msm_bus_scale_pdata *bus_pdata =
+				bus_handle->scale_pdata;
+
+			for (j = 0; j < bus_pdata->num_usecases; j++) {
+				bus_handle->usecases[j].num_paths = 1;
+				bus_handle->usecases[j].vectors =
+					&bus_handle->bus_vector[j];
+			}
+
+			bus_handle->handle =
+				msm_bus_scale_register_client(bus_pdata);
+			if (!bus_handle->handle) {
+				pr_err("not able to get bus scale i=%d\n", i);
+				return -ENOMEM;
+			}
+			pr_debug("register bus_hdl=%x\n",
+				bus_handle->handle);
+		}
+
+		for (j = 0; j < MDP3_CLIENT_MAX; j++) {
+			bus_handle->ab[j] = 0;
+			bus_handle->ib[j] = 0;
+		}
+	}
+	return 0;
+}
+
+static void mdp3_bus_scale_unregister(void)
+{
+	int i;
+
+	if (!mdp3_res->bus_handle)
+		return;
+
+	for (i = 0; i < MDP3_BUS_HANDLE_MAX; i++) {
+		pr_debug("unregister index=%d bus_handle=%x\n",
+			i, mdp3_res->bus_handle[i].handle);
+		if (mdp3_res->bus_handle[i].handle) {
+			msm_bus_scale_unregister_client(
+				mdp3_res->bus_handle[i].handle);
+			mdp3_res->bus_handle[i].handle = 0;
+		}
+	}
+}
+
+int mdp3_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
+{
+	struct mdp3_bus_handle_map *bus_handle;
+	int cur_bus_idx;
+	int bus_idx;
+	int client_idx;
+	u64 total_ib = 0, total_ab = 0;
+	int i, rc;
+
+	client_idx  = MDP3_BUS_HANDLE;
+
+	bus_handle = &mdp3_res->bus_handle[client_idx];
+	cur_bus_idx = bus_handle->current_bus_idx;
+
+	if (bus_handle->handle < 1) {
+		pr_err("invalid bus handle %d\n", bus_handle->handle);
+		return -EINVAL;
+	}
+
+	bus_handle->ab[client] = ab_quota;
+	bus_handle->ib[client] = ib_quota;
+
+	for (i = 0; i < MDP3_CLIENT_MAX; i++) {
+		total_ab += bus_handle->ab[i];
+		total_ib += bus_handle->ib[i];
+	}
+
+	if ((total_ab | total_ib) == 0) {
+		bus_idx = 0;
+	} else {
+		int num_cases = bus_handle->scale_pdata->num_usecases;
+		struct msm_bus_vectors *vect = NULL;
+
+		bus_idx = (cur_bus_idx % (num_cases - 1)) + 1;
+
+		/* aligning to avoid performing updates for small changes */
+		total_ab = ALIGN(total_ab, SZ_64M);
+		total_ib = ALIGN(total_ib, SZ_64M);
+
+		vect = bus_handle->scale_pdata->usecase[cur_bus_idx].vectors;
+		if ((total_ab == vect->ab) && (total_ib == vect->ib)) {
+			pr_debug("skip bus scaling, no change in vectors\n");
+			return 0;
+		}
+
+		vect = bus_handle->scale_pdata->usecase[bus_idx].vectors;
+		vect->ab = total_ab;
+		vect->ib = total_ib;
+
+		pr_debug("bus scale idx=%d ab=%llu ib=%llu\n", bus_idx,
+				vect->ab, vect->ib);
+	}
+	bus_handle->current_bus_idx = bus_idx;
+	rc = msm_bus_scale_client_update_request(bus_handle->handle, bus_idx);
+
+	if (!rc && ab_quota != 0 && ib_quota != 0) {
+		bus_handle->restore_ab[client] = ab_quota;
+		bus_handle->restore_ib[client] = ib_quota;
+	}
+
+	return rc;
+}
+
+static int mdp3_clk_update(u32 clk_idx, u32 enable)
+{
+	int ret = 0;
+	struct clk *clk;
+	int count = 0;
+
+	if (clk_idx >= MDP3_MAX_CLK || !mdp3_res->clocks[clk_idx])
+		return -ENODEV;
+
+	clk = mdp3_res->clocks[clk_idx];
+
+	if (enable)
+		mdp3_res->clock_ref_count[clk_idx]++;
+	else
+		mdp3_res->clock_ref_count[clk_idx]--;
+
+	count = mdp3_res->clock_ref_count[clk_idx];
+	if (count == 1 && enable) {
+		pr_debug("clk=%d en=%d\n", clk_idx, enable);
+		ret = clk_prepare(clk);
+		if (ret) {
+			pr_err("%s: Failed to prepare clock %d",
+						__func__, clk_idx);
+			mdp3_res->clock_ref_count[clk_idx]--;
+			return ret;
+		}
+		if (clk_idx == MDP3_CLK_MDP_CORE)
+			MDSS_XLOG(enable);
+		ret = clk_enable(clk);
+		if (ret)
+			pr_err("%s: clock enable failed %d\n", __func__,
+					clk_idx);
+	} else if (count == 0) {
+		pr_debug("clk=%d disable\n", clk_idx);
+		if (clk_idx == MDP3_CLK_MDP_CORE)
+			MDSS_XLOG(enable);
+		clk_disable(clk);
+		clk_unprepare(clk);
+		ret = 0;
+	} else if (count < 0) {
+		pr_err("clk=%d count=%d\n", clk_idx, count);
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+
+
+int mdp3_clk_set_rate(int clk_type, unsigned long clk_rate,
+			int client)
+{
+	int ret = 0;
+	unsigned long rounded_rate;
+	struct clk *clk = mdp3_res->clocks[clk_type];
+
+	if (clk) {
+		mutex_lock(&mdp3_res->res_mutex);
+		rounded_rate = clk_round_rate(clk, clk_rate);
+		if (IS_ERR_VALUE(rounded_rate)) {
+			pr_err("unable to round rate err=%ld\n", rounded_rate);
+			mutex_unlock(&mdp3_res->res_mutex);
+			return -EINVAL;
+		}
+		if (clk_type == MDP3_CLK_MDP_SRC) {
+			if (client == MDP3_CLIENT_DMA_P) {
+				mdp3_res->dma_core_clk_request = rounded_rate;
+			} else if (client == MDP3_CLIENT_PPP) {
+				mdp3_res->ppp_core_clk_request = rounded_rate;
+			} else {
+				pr_err("unrecognized client=%d\n", client);
+				mutex_unlock(&mdp3_res->res_mutex);
+				return -EINVAL;
+			}
+			rounded_rate = max(mdp3_res->dma_core_clk_request,
+				mdp3_res->ppp_core_clk_request);
+		}
+		if (rounded_rate != clk_get_rate(clk)) {
+			ret = clk_set_rate(clk, rounded_rate);
+			if (ret)
+				pr_err("clk_set_rate failed ret=%d\n", ret);
+			else
+				pr_debug("mdp clk rate=%lu, client = %d\n",
+					rounded_rate, client);
+		}
+		mutex_unlock(&mdp3_res->res_mutex);
+	} else {
+		pr_err("mdp src clk not setup properly\n");
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+unsigned long mdp3_get_clk_rate(u32 clk_idx)
+{
+	unsigned long clk_rate = 0;
+	struct clk *clk;
+
+	if (clk_idx >= MDP3_MAX_CLK)
+		return -ENODEV;
+
+	clk = mdp3_res->clocks[clk_idx];
+
+	if (clk) {
+		mutex_lock(&mdp3_res->res_mutex);
+		clk_rate = clk_get_rate(clk);
+		mutex_unlock(&mdp3_res->res_mutex);
+	}
+	return clk_rate;
+}
+
+static int mdp3_clk_register(char *clk_name, int clk_idx)
+{
+	struct clk *tmp;
+
+	if (clk_idx >= MDP3_MAX_CLK) {
+		pr_err("invalid clk index %d\n", clk_idx);
+		return -EINVAL;
+	}
+
+	tmp = devm_clk_get(&mdp3_res->pdev->dev, clk_name);
+	if (IS_ERR(tmp)) {
+		pr_err("unable to get clk: %s\n", clk_name);
+		return PTR_ERR(tmp);
+	}
+
+	mdp3_res->clocks[clk_idx] = tmp;
+
+	return 0;
+}
+
+static int mdp3_clk_setup(void)
+{
+	int rc;
+
+	rc = mdp3_clk_register("iface_clk", MDP3_CLK_AHB);
+	if (rc)
+		return rc;
+
+	rc = mdp3_clk_register("bus_clk", MDP3_CLK_AXI);
+	if (rc)
+		return rc;
+
+	rc = mdp3_clk_register("core_clk_src", MDP3_CLK_MDP_SRC);
+	if (rc)
+		return rc;
+
+	rc = mdp3_clk_register("core_clk", MDP3_CLK_MDP_CORE);
+	if (rc)
+		return rc;
+
+	rc = mdp3_clk_register("vsync_clk", MDP3_CLK_VSYNC);
+	if (rc)
+		return rc;
+
+	rc = mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, MDP_CORE_CLK_RATE_SVS,
+			MDP3_CLIENT_DMA_P);
+	if (rc)
+		pr_err("%s: Error setting max clock during probe\n", __func__);
+	return rc;
+}
+
+static void mdp3_clk_remove(void)
+{
+	if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_AHB]))
+		clk_put(mdp3_res->clocks[MDP3_CLK_AHB]);
+
+	if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_AXI]))
+		clk_put(mdp3_res->clocks[MDP3_CLK_AXI]);
+
+	if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_MDP_SRC]))
+		clk_put(mdp3_res->clocks[MDP3_CLK_MDP_SRC]);
+
+	if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_MDP_CORE]))
+		clk_put(mdp3_res->clocks[MDP3_CLK_MDP_CORE]);
+
+	if (!IS_ERR_OR_NULL(mdp3_res->clocks[MDP3_CLK_VSYNC]))
+		clk_put(mdp3_res->clocks[MDP3_CLK_VSYNC]);
+
+}
+
+u64 mdp3_clk_round_off(u64 clk_rate)
+{
+	u64 clk_round_off = 0;
+
+	if (clk_rate <= MDP_CORE_CLK_RATE_SVS)
+		clk_round_off = MDP_CORE_CLK_RATE_SVS;
+	else if (clk_rate <= MDP_CORE_CLK_RATE_SUPER_SVS)
+		clk_round_off = MDP_CORE_CLK_RATE_SUPER_SVS;
+	else
+		clk_round_off = MDP_CORE_CLK_RATE_MAX;
+
+	pr_debug("clk = %llu rounded to = %llu\n",
+		clk_rate, clk_round_off);
+	return clk_round_off;
+}
+
+int mdp3_clk_enable(int enable, int dsi_clk)
+{
+	int rc = 0;
+	int changed = 0;
+
+	pr_debug("MDP CLKS %s\n", (enable ? "Enable" : "Disable"));
+
+	mutex_lock(&mdp3_res->res_mutex);
+
+	if (enable) {
+		if (mdp3_res->clk_ena == 0)
+			changed++;
+		mdp3_res->clk_ena++;
+	} else {
+		if (mdp3_res->clk_ena) {
+			mdp3_res->clk_ena--;
+			if (mdp3_res->clk_ena == 0)
+				changed++;
+		} else {
+			pr_err("Can not be turned off\n");
+		}
+	}
+	pr_debug("%s: clk_ena=%d changed=%d enable=%d\n",
+		__func__, mdp3_res->clk_ena, changed, enable);
+
+	if (changed) {
+		if (enable)
+			pm_runtime_get_sync(&mdp3_res->pdev->dev);
+
+	rc = mdp3_clk_update(MDP3_CLK_AHB, enable);
+	rc |= mdp3_clk_update(MDP3_CLK_AXI, enable);
+	rc |= mdp3_clk_update(MDP3_CLK_MDP_SRC, enable);
+	rc |= mdp3_clk_update(MDP3_CLK_MDP_CORE, enable);
+	rc |= mdp3_clk_update(MDP3_CLK_VSYNC, enable);
+
+		if (!enable) {
+			pm_runtime_mark_last_busy(&mdp3_res->pdev->dev);
+			pm_runtime_put_autosuspend(&mdp3_res->pdev->dev);
+		}
+	}
+
+	mutex_unlock(&mdp3_res->res_mutex);
+	return rc;
+}
+
+void mdp3_bus_bw_iommu_enable(int enable, int client)
+{
+	struct mdp3_bus_handle_map *bus_handle;
+	int client_idx;
+	u64 ab = 0, ib = 0;
+	int ref_cnt;
+
+	client_idx  = MDP3_BUS_HANDLE;
+
+	bus_handle = &mdp3_res->bus_handle[client_idx];
+	if (bus_handle->handle < 1) {
+		pr_err("invalid bus handle %d\n", bus_handle->handle);
+		return;
+	}
+	mutex_lock(&mdp3_res->res_mutex);
+	if (enable)
+		bus_handle->ref_cnt++;
+	else
+		if (bus_handle->ref_cnt)
+			bus_handle->ref_cnt--;
+	ref_cnt = bus_handle->ref_cnt;
+	mutex_unlock(&mdp3_res->res_mutex);
+
+	if (enable) {
+		if (mdp3_res->allow_iommu_update)
+			mdp3_iommu_enable(client);
+		if (ref_cnt == 1) {
+			pm_runtime_get_sync(&mdp3_res->pdev->dev);
+			ab = bus_handle->restore_ab[client];
+			ib = bus_handle->restore_ib[client];
+		mdp3_bus_scale_set_quota(client, ab, ib);
+		}
+	} else {
+		if (ref_cnt == 0) {
+			mdp3_bus_scale_set_quota(client, 0, 0);
+			pm_runtime_mark_last_busy(&mdp3_res->pdev->dev);
+			pm_runtime_put_autosuspend(&mdp3_res->pdev->dev);
+		}
+		mdp3_iommu_disable(client);
+	}
+
+	if (ref_cnt < 0) {
+		pr_err("Ref count < 0, bus client=%d, ref_cnt=%d",
+				client_idx, ref_cnt);
+	}
+}
+
+void mdp3_calc_dma_res(struct mdss_panel_info *panel_info, u64 *clk_rate,
+		u64 *ab, u64 *ib, uint32_t bpp)
+{
+	u32 vtotal = mdss_panel_get_vtotal(panel_info);
+	u32 htotal = mdss_panel_get_htotal(panel_info, 0);
+	u64 clk    = htotal * vtotal * panel_info->mipi.frame_rate;
+
+	pr_debug("clk_rate for dma = %llu, bpp = %d\n", clk, bpp);
+	if (clk_rate)
+		*clk_rate = mdp3_clk_round_off(clk);
+
+	/* ab and ib vote should be same for honest voting */
+	if (ab || ib) {
+		*ab = clk * bpp;
+		*ib = *ab;
+	}
+}
+
+int mdp3_res_update(int enable, int dsi_clk, int client)
+{
+	int rc = 0;
+
+	if (enable) {
+		rc = mdp3_clk_enable(enable, dsi_clk);
+		if (rc < 0) {
+			pr_err("mdp3_clk_enable failed, enable=%d, dsi_clk=%d\n",
+				enable, dsi_clk);
+			goto done;
+		}
+		mdp3_irq_register();
+		mdp3_bus_bw_iommu_enable(enable, client);
+	} else {
+		mdp3_bus_bw_iommu_enable(enable, client);
+		mdp3_irq_suspend();
+		rc = mdp3_clk_enable(enable, dsi_clk);
+		if (rc < 0) {
+			pr_err("mdp3_clk_enable failed, enable=%d, dsi_clk=%d\n",
+				enable, dsi_clk);
+			goto done;
+		}
+	}
+
+done:
+	return rc;
+}
+
+int mdp3_get_mdp_dsi_clk(void)
+{
+	int rc;
+
+	mutex_lock(&mdp3_res->res_mutex);
+	rc = mdp3_clk_update(MDP3_CLK_DSI, 1);
+	mutex_unlock(&mdp3_res->res_mutex);
+	return rc;
+}
+
+int mdp3_put_mdp_dsi_clk(void)
+{
+	int rc;
+
+	mutex_lock(&mdp3_res->res_mutex);
+	rc = mdp3_clk_update(MDP3_CLK_DSI, 0);
+	mutex_unlock(&mdp3_res->res_mutex);
+	return rc;
+}
+
+static int mdp3_irq_setup(void)
+{
+	int ret;
+	struct mdss_hw *mdp3_hw;
+
+	mdp3_hw = &mdp3_res->mdp3_hw;
+	ret = devm_request_irq(&mdp3_res->pdev->dev,
+				mdp3_hw->irq_info->irq,
+				mdp3_irq_handler,
+				IRQF_DISABLED, "MDP", mdp3_res);
+	if (ret) {
+		pr_err("mdp request_irq() failed!\n");
+		return ret;
+	}
+	disable_irq_nosync(mdp3_hw->irq_info->irq);
+	mdp3_res->irq_registered = true;
+	return 0;
+}
+
+
+static int mdp3_get_iommu_domain(u32 type)
+{
+	if (type >= MDSS_IOMMU_MAX_DOMAIN)
+		return -EINVAL;
+
+	if (!mdp3_res)
+		return -ENODEV;
+
+	return mdp3_res->domains[type].domain_idx;
+}
+
+int mdp3_iommu_attach(int context)
+{
+	int rc = 0;
+	struct mdp3_iommu_ctx_map *context_map;
+	struct mdp3_iommu_domain_map *domain_map;
+
+	if (context >= MDP3_IOMMU_CTX_MAX)
+		return -EINVAL;
+
+	context_map = mdp3_res->iommu_contexts + context;
+	if (context_map->attached) {
+		pr_warn("mdp iommu already attached\n");
+		return 0;
+	}
+
+	domain_map = context_map->domain;
+
+	rc = iommu_attach_device(domain_map->domain, context_map->ctx);
+	if (rc) {
+		pr_err("mpd3 iommu attach failed\n");
+		return -EINVAL;
+	}
+
+	context_map->attached = true;
+	return 0;
+}
+
+int mdp3_iommu_dettach(int context)
+{
+	struct mdp3_iommu_ctx_map *context_map;
+	struct mdp3_iommu_domain_map *domain_map;
+
+	if (!mdp3_res->iommu_contexts ||
+		context >= MDP3_IOMMU_CTX_MAX)
+		return -EINVAL;
+
+	context_map = mdp3_res->iommu_contexts + context;
+	if (!context_map->attached) {
+		pr_warn("mdp iommu not attached\n");
+		return 0;
+	}
+
+	domain_map = context_map->domain;
+	iommu_detach_device(domain_map->domain, context_map->ctx);
+	context_map->attached = false;
+
+	return 0;
+}
+
+int mdp3_iommu_domain_init(void)
+{
+	struct msm_iova_layout layout;
+	int i;
+
+	if (mdp3_res->domains) {
+		pr_warn("iommu domain already initialized\n");
+		return 0;
+	}
+
+	for (i = 0; i < MDP3_IOMMU_DOMAIN_MAX; i++) {
+		int domain_idx;
+
+		layout.client_name = mdp3_iommu_domains[i].client_name;
+		layout.partitions = mdp3_iommu_domains[i].partitions;
+		layout.npartitions = mdp3_iommu_domains[i].npartitions;
+		layout.is_secure = (i == MDP3_IOMMU_DOMAIN_SECURE);
+
+		domain_idx = msm_register_domain(&layout);
+		if (IS_ERR_VALUE(domain_idx))
+			return -EINVAL;
+
+		mdp3_iommu_domains[i].domain_idx = domain_idx;
+		mdp3_iommu_domains[i].domain = msm_get_iommu_domain(domain_idx);
+		if (IS_ERR_OR_NULL(mdp3_iommu_domains[i].domain)) {
+			pr_err("unable to get iommu domain(%d)\n",
+				domain_idx);
+			if (!mdp3_iommu_domains[i].domain)
+				return -EINVAL;
+			else
+				return PTR_ERR(mdp3_iommu_domains[i].domain);
+		}
+	}
+
+	mdp3_res->domains = mdp3_iommu_domains;
+
+	return 0;
+}
+
+int mdp3_iommu_context_init(void)
+{
+	int i;
+
+	if (mdp3_res->iommu_contexts) {
+		pr_warn("iommu context already initialized\n");
+		return 0;
+	}
+
+	for (i = 0; i < MDP3_IOMMU_CTX_MAX; i++) {
+		mdp3_iommu_contexts[i].ctx =
+			msm_iommu_get_ctx(mdp3_iommu_contexts[i].ctx_name);
+
+		if (IS_ERR_OR_NULL(mdp3_iommu_contexts[i].ctx)) {
+			pr_warn("unable to get iommu ctx(%s)\n",
+				mdp3_iommu_contexts[i].ctx_name);
+			if (!mdp3_iommu_contexts[i].ctx)
+				return -EINVAL;
+			else
+				return PTR_ERR(mdp3_iommu_contexts[i].ctx);
+		}
+	}
+
+	mdp3_res->iommu_contexts = mdp3_iommu_contexts;
+
+	return 0;
+}
+
+int mdp3_iommu_init(void)
+{
+	int ret;
+
+	mutex_init(&mdp3_res->iommu_lock);
+
+	ret = mdp3_iommu_domain_init();
+	if (ret) {
+		pr_err("mdp3 iommu domain init fails\n");
+		return ret;
+	}
+
+	ret = mdp3_iommu_context_init();
+	if (ret) {
+		pr_err("mdp3 iommu context init fails\n");
+		return ret;
+	}
+	return ret;
+}
+
+void mdp3_iommu_deinit(void)
+{
+	int i;
+
+	if (!mdp3_res->domains)
+		return;
+
+	for (i = 0; i < MDP3_IOMMU_DOMAIN_MAX; i++) {
+		if (!IS_ERR_OR_NULL(mdp3_res->domains[i].domain))
+			msm_unregister_domain(mdp3_res->domains[i].domain);
+	}
+}
+
+static int mdp3_check_version(void)
+{
+	int rc;
+
+	rc = mdp3_clk_enable(1, 0);
+	if (rc) {
+		pr_err("fail to turn on MDP core clks\n");
+		return rc;
+	}
+
+	mdp3_res->mdp_rev = MDP3_REG_READ(MDP3_REG_HW_VERSION);
+
+	if (mdp3_res->mdp_rev != MDP_CORE_HW_VERSION) {
+		pr_err("mdp_hw_revision=%x mismatch\n", mdp3_res->mdp_rev);
+		rc = -ENODEV;
+	}
+
+	rc = mdp3_clk_enable(0, 0);
+	if (rc)
+		pr_err("fail to turn off MDP core clks\n");
+
+	return rc;
+}
+
+static int mdp3_hw_init(void)
+{
+	int i;
+
+	for (i = MDP3_DMA_P; i < MDP3_DMA_MAX; i++) {
+		mdp3_res->dma[i].dma_sel = i;
+		mdp3_res->dma[i].capability = MDP3_DMA_CAP_ALL;
+		mdp3_res->dma[i].in_use = 0;
+		mdp3_res->dma[i].available = 1;
+		mdp3_res->dma[i].cc_vect_sel = 0;
+		mdp3_res->dma[i].lut_sts = 0;
+		mdp3_res->dma[i].hist_cmap = NULL;
+		mdp3_res->dma[i].gc_cmap = NULL;
+		mutex_init(&mdp3_res->dma[i].pp_lock);
+	}
+	mdp3_res->dma[MDP3_DMA_S].capability = MDP3_DMA_CAP_DITHER;
+	mdp3_res->dma[MDP3_DMA_E].available = 0;
+
+	for (i = MDP3_DMA_OUTPUT_SEL_AHB; i < MDP3_DMA_OUTPUT_SEL_MAX; i++) {
+		mdp3_res->intf[i].cfg.type = i;
+		mdp3_res->intf[i].active = 0;
+		mdp3_res->intf[i].in_use = 0;
+		mdp3_res->intf[i].available = 1;
+	}
+	mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_AHB].available = 0;
+	mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_LCDC].available = 0;
+	mdp3_res->smart_blit_en = SMART_BLIT_RGB_EN | SMART_BLIT_YUV_EN;
+	mdp3_res->solid_fill_vote_en = false;
+	return 0;
+}
+
+int mdp3_dynamic_clock_gating_ctrl(int enable)
+{
+	int rc = 0;
+	int cgc_cfg = 0;
+	/*Disable dynamic auto clock gating*/
+	pr_debug("%s Status %s\n", __func__, (enable ? "ON":"OFF"));
+	rc = mdp3_clk_enable(1, 0);
+	if (rc) {
+		pr_err("fail to turn on MDP core clks\n");
+		return rc;
+	}
+	cgc_cfg = MDP3_REG_READ(MDP3_REG_CGC_EN);
+	if (enable) {
+		cgc_cfg |= (BIT(10));
+		cgc_cfg |= (BIT(18));
+		MDP3_REG_WRITE(MDP3_REG_CGC_EN, cgc_cfg);
+		VBIF_REG_WRITE(MDP3_VBIF_REG_FORCE_EN, 0x0);
+	} else {
+		cgc_cfg &= ~(BIT(10));
+		cgc_cfg &= ~(BIT(18));
+		MDP3_REG_WRITE(MDP3_REG_CGC_EN, cgc_cfg);
+		VBIF_REG_WRITE(MDP3_VBIF_REG_FORCE_EN, 0x3);
+	}
+
+	rc = mdp3_clk_enable(0, 0);
+	if (rc)
+		pr_err("fail to turn off MDP core clks\n");
+
+	return rc;
+}
+
+/**
+ * mdp3_get_panic_lut_cfg() - calculate panic and robust lut mask
+ * @panel_width: Panel width
+ *
+ * DMA buffer has 16 fill levels. Which needs to configured as safe
+ * and panic levels based on panel resolutions.
+ * No. of fill levels used = ((panel active width * 8) / 512).
+ * Roundoff the fill levels if needed.
+ * half of the total fill levels used will be treated as panic levels.
+ * Roundoff panic levels if total used fill levels are odd.
+ *
+ * Sample calculation for 720p display:
+ * Fill levels used = (720 * 8) / 512 = 12.5 after round off 13.
+ * panic levels = 13 / 2 = 6.5 after roundoff 7.
+ * Panic mask = 0x3FFF (2 bits per level)
+ * Robust mask = 0xFF80 (1 bit per level)
+ */
+u64 mdp3_get_panic_lut_cfg(u32 panel_width)
+{
+	u32 fill_levels = (((panel_width * 8) / 512) + 1);
+	u32 panic_mask = 0;
+	u32 robust_mask = 0;
+	u32 i = 0;
+	u64 panic_config = 0;
+	u32 panic_levels = 0;
+
+	panic_levels = fill_levels / 2;
+	if (fill_levels % 2)
+		panic_levels++;
+
+	for (i = 0; i < panic_levels; i++) {
+		panic_mask |= (BIT((i * 2) + 1) | BIT(i * 2));
+		robust_mask |= BIT(i);
+	}
+	panic_config = ~robust_mask;
+	panic_config = panic_config << 32;
+	panic_config |= panic_mask;
+	return panic_config;
+}
+
+int mdp3_enable_panic_ctrl(void)
+{
+	int rc = 0;
+
+	if (MDP3_REG_READ(MDP3_PANIC_ROBUST_CTRL) == 0) {
+		pr_err("%s: Enable Panic Control\n", __func__);
+		MDP3_REG_WRITE(MDP3_PANIC_ROBUST_CTRL, BIT(0));
+	}
+	return rc;
+}
+
+int mdp3_qos_remapper_setup(struct mdss_panel_data *panel)
+{
+	int rc = 0;
+	u64 panic_config = mdp3_get_panic_lut_cfg(panel->panel_info.xres);
+
+	rc = mdp3_clk_update(MDP3_CLK_AHB, 1);
+	rc |= mdp3_clk_update(MDP3_CLK_AXI, 1);
+	rc |= mdp3_clk_update(MDP3_CLK_MDP_CORE, 1);
+	if (rc) {
+		pr_err("fail to turn on MDP core clks\n");
+		return rc;
+	}
+
+	if (!panel)
+		return -EINVAL;
+	/* Program MDP QOS Remapper */
+	MDP3_REG_WRITE(MDP3_DMA_P_QOS_REMAPPER, 0x1A9);
+	MDP3_REG_WRITE(MDP3_DMA_P_WATERMARK_0, 0x0);
+	MDP3_REG_WRITE(MDP3_DMA_P_WATERMARK_1, 0x0);
+	MDP3_REG_WRITE(MDP3_DMA_P_WATERMARK_2, 0x0);
+	/* PANIC setting depends on panel width*/
+	MDP3_REG_WRITE(MDP3_PANIC_LUT0,	(panic_config & 0xFFFF));
+	MDP3_REG_WRITE(MDP3_PANIC_LUT1, ((panic_config >> 16) & 0xFFFF));
+	MDP3_REG_WRITE(MDP3_ROBUST_LUT, ((panic_config >> 32) & 0xFFFF));
+	MDP3_REG_WRITE(MDP3_PANIC_ROBUST_CTRL, 0x1);
+	pr_debug("Panel width %d Panic Lut0 %x Lut1 %x Robust %x\n",
+		panel->panel_info.xres,
+		MDP3_REG_READ(MDP3_PANIC_LUT0),
+		MDP3_REG_READ(MDP3_PANIC_LUT1),
+		MDP3_REG_READ(MDP3_ROBUST_LUT));
+
+	rc = mdp3_clk_update(MDP3_CLK_AHB, 0);
+	rc |= mdp3_clk_update(MDP3_CLK_AXI, 0);
+	rc |= mdp3_clk_update(MDP3_CLK_MDP_CORE, 0);
+	if (rc)
+		pr_err("fail to turn off MDP core clks\n");
+	return rc;
+}
+
+static int mdp3_res_init(void)
+{
+	int rc = 0;
+
+	rc = mdp3_irq_setup();
+	if (rc)
+		return rc;
+
+	rc = mdp3_clk_setup();
+	if (rc)
+		return rc;
+
+	mdp3_res->ion_client = msm_ion_client_create(mdp3_res->pdev->name);
+	if (IS_ERR_OR_NULL(mdp3_res->ion_client)) {
+		pr_err("msm_ion_client_create() return error (%pK)\n",
+				mdp3_res->ion_client);
+		mdp3_res->ion_client = NULL;
+		return -EINVAL;
+	}
+
+	rc = mdp3_iommu_init();
+	if (rc)
+		return rc;
+
+	mdp3_res->bus_handle = mdp3_bus_handle;
+	rc = mdp3_bus_scale_register();
+	if (rc) {
+		pr_err("unable to register bus scaling\n");
+		return rc;
+	}
+
+	rc = mdp3_hw_init();
+
+	return rc;
+}
+
+static void mdp3_res_deinit(void)
+{
+	struct mdss_hw *mdp3_hw;
+	int i;
+
+	mdp3_hw = &mdp3_res->mdp3_hw;
+	mdp3_bus_scale_unregister();
+
+	mutex_lock(&mdp3_res->iommu_lock);
+	for (i = 0; i < MDP3_IOMMU_CTX_MAX; i++)
+		mdp3_iommu_dettach(i);
+	mutex_unlock(&mdp3_res->iommu_lock);
+
+	mdp3_iommu_deinit();
+
+	if (!IS_ERR_OR_NULL(mdp3_res->ion_client))
+		ion_client_destroy(mdp3_res->ion_client);
+
+	mdp3_clk_remove();
+
+	if (mdp3_res->irq_registered)
+		devm_free_irq(&mdp3_res->pdev->dev,
+				mdp3_hw->irq_info->irq, mdp3_res);
+}
+
+static int mdp3_get_pan_intf(const char *pan_intf)
+{
+	int i, rc = MDSS_PANEL_INTF_INVALID;
+
+	if (!pan_intf)
+		return rc;
+
+	for (i = 0; i < ARRAY_SIZE(pan_types); i++) {
+		if (!strcmp(pan_intf, pan_types[i].name)) {
+			rc = pan_types[i].type;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int mdp3_parse_dt_pan_intf(struct platform_device *pdev)
+{
+	int rc;
+	struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
+	const char *prim_intf = NULL;
+
+	rc = of_property_read_string(pdev->dev.of_node,
+				"qcom,mdss-pref-prim-intf", &prim_intf);
+	if (rc)
+		return -ENODEV;
+
+	rc = mdp3_get_pan_intf(prim_intf);
+	if (rc < 0) {
+		mdata->pan_cfg.pan_intf = MDSS_PANEL_INTF_INVALID;
+	} else {
+		mdata->pan_cfg.pan_intf = rc;
+		rc = 0;
+	}
+	return rc;
+}
+
+static int mdp3_get_pan_cfg(struct mdss_panel_cfg *pan_cfg)
+{
+	char *t = NULL;
+	char pan_intf_str[MDSS_MAX_PANEL_LEN];
+	int rc, i, panel_len;
+	char pan_name[MDSS_MAX_PANEL_LEN];
+
+	if (!pan_cfg)
+		return -EINVAL;
+
+	if (mdss_mdp3_panel[0] == '0') {
+		pan_cfg->lk_cfg = false;
+	} else if (mdss_mdp3_panel[0] == '1') {
+		pan_cfg->lk_cfg = true;
+	} else {
+		/* read from dt */
+		pan_cfg->lk_cfg = true;
+		pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
+		return -EINVAL;
+	}
+
+	/* skip lk cfg and delimiter; ex: "0:" */
+	strlcpy(pan_name, &mdss_mdp3_panel[2], MDSS_MAX_PANEL_LEN);
+	t = strnstr(pan_name, ":", MDSS_MAX_PANEL_LEN);
+	if (!t) {
+		pr_err("%s: pan_name=[%s] invalid\n",
+			__func__, pan_name);
+		pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
+		return -EINVAL;
+	}
+
+	for (i = 0; ((pan_name + i) < t) && (i < 4); i++)
+		pan_intf_str[i] = *(pan_name + i);
+	pan_intf_str[i] = 0;
+	pr_debug("%s:%d panel intf %s\n", __func__, __LINE__, pan_intf_str);
+	/* point to the start of panel name */
+	t = t + 1;
+	strlcpy(&pan_cfg->arg_cfg[0], t, sizeof(pan_cfg->arg_cfg));
+	pr_debug("%s:%d: t=[%s] panel name=[%s]\n", __func__, __LINE__,
+		t, pan_cfg->arg_cfg);
+
+	panel_len = strlen(pan_cfg->arg_cfg);
+	if (!panel_len) {
+		pr_err("%s: Panel name is invalid\n", __func__);
+		pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
+		return -EINVAL;
+	}
+
+	rc = mdp3_get_pan_intf(pan_intf_str);
+	pan_cfg->pan_intf = (rc < 0) ?  MDSS_PANEL_INTF_INVALID : rc;
+	return 0;
+}
+
+static int mdp3_get_cmdline_config(struct platform_device *pdev)
+{
+	int rc, len = 0;
+	int *intf_type;
+	char *panel_name;
+	struct mdss_panel_cfg *pan_cfg;
+	struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
+
+	mdata->pan_cfg.arg_cfg[MDSS_MAX_PANEL_LEN] = 0;
+	pan_cfg = &mdata->pan_cfg;
+	panel_name = &pan_cfg->arg_cfg[0];
+	intf_type = &pan_cfg->pan_intf;
+
+	/* reads from dt by default */
+	pan_cfg->lk_cfg = true;
+
+	len = strlen(mdss_mdp3_panel);
+
+	if (len > 0) {
+		rc = mdp3_get_pan_cfg(pan_cfg);
+		if (!rc) {
+			pan_cfg->init_done = true;
+			return rc;
+		}
+	}
+
+	rc = mdp3_parse_dt_pan_intf(pdev);
+	/* if pref pan intf is not present */
+	if (rc)
+		pr_err("%s:unable to parse device tree for pan intf\n",
+			__func__);
+	else
+		pan_cfg->init_done = true;
+
+	return rc;
+}
+
+
+int mdp3_irq_init(u32 irq_start)
+{
+	struct mdss_hw *mdp3_hw;
+
+	mdp3_hw = &mdp3_res->mdp3_hw;
+
+	mdp3_hw->irq_info = kzalloc(sizeof(struct irq_info), GFP_KERNEL);
+	if (!mdp3_hw->irq_info)
+		return -ENOMEM;
+
+	mdp3_hw->hw_ndx = MDSS_HW_MDP;
+	mdp3_hw->irq_info->irq = irq_start;
+	mdp3_hw->irq_info->irq_mask = 0;
+	mdp3_hw->irq_info->irq_ena = false;
+	mdp3_hw->irq_info->irq_buzy = false;
+
+	mdp3_res->mdss_util->register_irq(&mdp3_res->mdp3_hw);
+	return 0;
+}
+
+static int mdp3_parse_dt(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct property *prop = NULL;
+	bool panic_ctrl;
+	int rc;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdp_phys");
+	if (!res) {
+		pr_err("unable to get MDP base address\n");
+		return -EINVAL;
+	}
+
+	mdp3_res->mdp_reg_size = resource_size(res);
+	mdp3_res->mdp_base = devm_ioremap(&pdev->dev, res->start,
+					mdp3_res->mdp_reg_size);
+	if (unlikely(!mdp3_res->mdp_base)) {
+		pr_err("unable to map MDP base\n");
+		return -ENOMEM;
+	}
+
+	pr_debug("MDP HW Base phy_Address=0x%x virt=0x%x\n",
+		(int) res->start,
+		(int) mdp3_res->mdp_base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vbif_phys");
+	if (!res) {
+		pr_err("unable to get VBIF base address\n");
+		return -EINVAL;
+	}
+
+	mdp3_res->vbif_reg_size = resource_size(res);
+	mdp3_res->vbif_base = devm_ioremap(&pdev->dev, res->start,
+					mdp3_res->vbif_reg_size);
+	if (unlikely(!mdp3_res->vbif_base)) {
+		pr_err("unable to map VBIF base\n");
+		return -ENOMEM;
+	}
+
+	pr_debug("VBIF HW Base phy_Address=0x%x virt=0x%x\n",
+		(int) res->start,
+		(int) mdp3_res->vbif_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		pr_err("unable to get MDSS irq\n");
+		return -EINVAL;
+	}
+	rc = mdp3_irq_init(res->start);
+	if (rc) {
+		pr_err("%s: Error in irq initialization:rc=[%d]\n",
+		       __func__, rc);
+		return rc;
+	}
+
+	rc = mdp3_get_cmdline_config(pdev);
+	if (rc) {
+		pr_err("%s: Error in panel override:rc=[%d]\n",
+		       __func__, rc);
+		kfree(mdp3_res->mdp3_hw.irq_info);
+		return rc;
+	}
+
+	prop = of_find_property(pdev->dev.of_node, "batfet-supply", NULL);
+	mdp3_res->batfet_required = prop ? true : false;
+
+	panic_ctrl = of_property_read_bool(
+				pdev->dev.of_node, "qcom,mdss-has-panic-ctrl");
+	mdp3_res->dma[MDP3_DMA_P].has_panic_ctrl = panic_ctrl;
+
+	mdp3_res->idle_pc_enabled = of_property_read_bool(
+		pdev->dev.of_node, "qcom,mdss-idle-power-collapse-enabled");
+
+	return 0;
+}
+
+void msm_mdp3_cx_ctrl(int enable)
+{
+	int rc;
+
+	if (!mdp3_res->vdd_cx) {
+		mdp3_res->vdd_cx = devm_regulator_get(&mdp3_res->pdev->dev,
+								"vdd-cx");
+		if (IS_ERR_OR_NULL(mdp3_res->vdd_cx)) {
+			pr_debug("unable to get CX reg. rc=%d\n",
+				PTR_RET(mdp3_res->vdd_cx));
+			mdp3_res->vdd_cx = NULL;
+			return;
+		}
+	}
+
+	if (enable) {
+		rc = regulator_set_voltage(
+				mdp3_res->vdd_cx,
+				RPM_REGULATOR_CORNER_SVS_SOC,
+				RPM_REGULATOR_CORNER_SUPER_TURBO);
+		if (rc < 0)
+			goto vreg_set_voltage_fail;
+
+		rc = regulator_enable(mdp3_res->vdd_cx);
+		if (rc) {
+			pr_err("Failed to enable regulator vdd_cx.\n");
+			return;
+		}
+	} else {
+		rc = regulator_disable(mdp3_res->vdd_cx);
+		if (rc) {
+			pr_err("Failed to disable regulator vdd_cx.\n");
+			return;
+		}
+		rc = regulator_set_voltage(
+				mdp3_res->vdd_cx,
+				RPM_REGULATOR_CORNER_NONE,
+				RPM_REGULATOR_CORNER_SUPER_TURBO);
+		if (rc < 0)
+			goto vreg_set_voltage_fail;
+	}
+
+	return;
+vreg_set_voltage_fail:
+	pr_err("Set vltg failed\n");
+}
+
+void mdp3_batfet_ctrl(int enable)
+{
+	int rc;
+
+	if (!mdp3_res->batfet_required)
+		return;
+
+	if (!mdp3_res->batfet) {
+		if (enable) {
+			mdp3_res->batfet =
+				devm_regulator_get(&mdp3_res->pdev->dev,
+				"batfet");
+			if (IS_ERR_OR_NULL(mdp3_res->batfet)) {
+				pr_debug("unable to get batfet reg. rc=%d\n",
+					PTR_RET(mdp3_res->batfet));
+				mdp3_res->batfet = NULL;
+				return;
+			}
+		} else {
+			pr_debug("Batfet regulator disable w/o enable\n");
+			return;
+		}
+	}
+
+	if (enable)
+		rc = regulator_enable(mdp3_res->batfet);
+	else
+		rc = regulator_disable(mdp3_res->batfet);
+
+	if (rc < 0)
+		pr_err("%s: reg enable/disable failed", __func__);
+}
+
+void mdp3_enable_regulator(int enable)
+{
+	mdp3_batfet_ctrl(enable);
+}
+
+static void mdp3_iommu_heap_unmap_iommu(struct mdp3_iommu_meta *meta)
+{
+	unsigned int domain_num;
+	unsigned int partition_num = 0;
+	struct iommu_domain *domain;
+
+	domain_num = (mdp3_res->domains +
+			MDP3_IOMMU_DOMAIN_UNSECURE)->domain_idx;
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		pr_err("Could not get domain %d. Corruption?\n", domain_num);
+		return;
+	}
+
+	iommu_unmap_range(domain, meta->iova_addr, meta->mapped_size);
+	msm_free_iova_address(meta->iova_addr, domain_num, partition_num,
+		meta->mapped_size);
+}
+
+static void mdp3_iommu_meta_destroy(struct kref *kref)
+{
+	struct mdp3_iommu_meta *meta =
+			container_of(kref, struct mdp3_iommu_meta, ref);
+
+	rb_erase(&meta->node, &mdp3_res->iommu_root);
+	mdp3_iommu_heap_unmap_iommu(meta);
+	dma_buf_put(meta->dbuf);
+	kfree(meta);
+}
+
+
+static void mdp3_iommu_meta_put(struct mdp3_iommu_meta *meta)
+{
+	/* Need to lock here to prevent race against map/unmap */
+	mutex_lock(&mdp3_res->iommu_lock);
+	kref_put(&meta->ref, mdp3_iommu_meta_destroy);
+	mutex_unlock(&mdp3_res->iommu_lock);
+}
+
+static struct mdp3_iommu_meta *mdp3_iommu_meta_lookup(struct sg_table *table)
+{
+	struct rb_root *root = &mdp3_res->iommu_root;
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct mdp3_iommu_meta *entry = NULL;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct mdp3_iommu_meta, node);
+
+		if (table < entry->table)
+			p = &(*p)->rb_left;
+		else if (table > entry->table)
+			p = &(*p)->rb_right;
+		else
+			return entry;
+	}
+	return NULL;
+}
+
+void mdp3_unmap_iommu(struct ion_client *client, struct ion_handle *handle)
+{
+	struct mdp3_iommu_meta *meta;
+	struct sg_table *table;
+
+	table = ion_sg_table(client, handle);
+
+	mutex_lock(&mdp3_res->iommu_lock);
+	meta = mdp3_iommu_meta_lookup(table);
+	if (!meta) {
+		WARN(1, "%s: buffer was never mapped for %pK\n", __func__,
+				handle);
+		mutex_unlock(&mdp3_res->iommu_lock);
+		return;
+	}
+	mutex_unlock(&mdp3_res->iommu_lock);
+
+	mdp3_iommu_meta_put(meta);
+}
+
+static void mdp3_iommu_meta_add(struct mdp3_iommu_meta *meta)
+{
+	struct rb_root *root = &mdp3_res->iommu_root;
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct mdp3_iommu_meta *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct mdp3_iommu_meta, node);
+
+		if (meta->table < entry->table) {
+			p = &(*p)->rb_left;
+		} else if (meta->table > entry->table) {
+			p = &(*p)->rb_right;
+		} else {
+			pr_err("%s: handle %pK already exists\n", __func__,
+				entry->handle);
+			WARN_ON(1);
+		}
+	}
+
+	rb_link_node(&meta->node, parent, p);
+	rb_insert_color(&meta->node, root);
+}
+
+static int mdp3_iommu_map_iommu(struct mdp3_iommu_meta *meta,
+	unsigned long align, unsigned long iova_length,
+	unsigned int padding, unsigned long flags)
+{
+	struct iommu_domain *domain;
+	int ret = 0;
+	unsigned long size;
+	unsigned long unmap_size;
+	struct sg_table *table;
+	int prot = IOMMU_WRITE | IOMMU_READ;
+	unsigned int domain_num = (mdp3_res->domains +
+			MDP3_IOMMU_DOMAIN_UNSECURE)->domain_idx;
+	unsigned int partition_num = 0;
+
+	size = meta->size;
+	table = meta->table;
+
+	/* Use the biggest alignment to allow bigger IOMMU mappings.
+	 * Use the first entry since the first entry will always be the
+	 * biggest entry. To take advantage of bigger mapping sizes both the
+	 * VA and PA addresses have to be aligned to the biggest size.
+	 */
+	if (table->sgl->length > align)
+		align = table->sgl->length;
+
+	ret = msm_allocate_iova_address(domain_num, partition_num,
+			meta->mapped_size, align,
+			(unsigned long *)&meta->iova_addr);
+
+	if (ret)
+		goto out;
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		ret = -ENOMEM;
+		goto out1;
+	}
+
+	/* Adding padding to before buffer */
+	if (padding) {
+		unsigned long phys_addr = sg_phys(table->sgl);
+
+		ret = msm_iommu_map_extra(domain, meta->iova_addr, phys_addr,
+				padding, SZ_4K, prot);
+		if (ret)
+			goto out1;
+	}
+
+	/* Mapping actual buffer */
+	ret = iommu_map_range(domain, meta->iova_addr + padding,
+			table->sgl, size, prot);
+	if (ret) {
+		pr_err("%s: could not map %pa in domain %pK\n",
+			__func__, &meta->iova_addr, domain);
+			unmap_size = padding;
+		goto out2;
+	}
+
+	/* Adding padding to end of buffer */
+	if (padding) {
+		unsigned long phys_addr = sg_phys(table->sgl);
+		unsigned long extra_iova_addr = meta->iova_addr +
+				padding + size;
+		ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
+				padding, SZ_4K, prot);
+		if (ret) {
+			unmap_size = padding + size;
+			goto out2;
+		}
+	}
+	return ret;
+
+out2:
+	iommu_unmap_range(domain, meta->iova_addr, unmap_size);
+out1:
+	msm_free_iova_address(meta->iova_addr, domain_num, partition_num,
+				iova_length);
+
+out:
+	return ret;
+}
+
+static struct mdp3_iommu_meta *mdp3_iommu_meta_create(struct ion_client *client,
+	struct ion_handle *handle, struct sg_table *table, unsigned long size,
+	unsigned long align, unsigned long iova_length, unsigned int padding,
+	unsigned long flags, dma_addr_t *iova)
+{
+	struct mdp3_iommu_meta *meta;
+	int ret;
+
+	meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+
+	if (!meta)
+		return ERR_PTR(-ENOMEM);
+
+	meta->handle = handle;
+	meta->table = table;
+	meta->size = size;
+	meta->mapped_size = iova_length;
+	meta->dbuf = ion_share_dma_buf(client, handle);
+	kref_init(&meta->ref);
+
+	ret = mdp3_iommu_map_iommu(meta,
+		align, iova_length, padding, flags);
+	if (ret < 0)	{
+		pr_err("%s: Unable to map buffer\n", __func__);
+		goto out;
+	}
+
+	*iova = meta->iova_addr;
+	mdp3_iommu_meta_add(meta);
+
+	return meta;
+out:
+	kfree(meta);
+	return ERR_PTR(ret);
+}
+
+/*
+ * PPP hw reads in tiles of 16 which might be outside mapped region
+ * need to map buffers ourseleve to add extra padding
+ */
+int mdp3_self_map_iommu(struct ion_client *client, struct ion_handle *handle,
+	unsigned long align, unsigned long padding, dma_addr_t *iova,
+	unsigned long *buffer_size, unsigned long flags,
+	unsigned long iommu_flags)
+{
+	struct mdp3_iommu_meta *iommu_meta = NULL;
+	struct sg_table *table;
+	struct scatterlist *sg;
+	unsigned long size = 0, iova_length = 0;
+	int ret = 0;
+	int i;
+
+	table = ion_sg_table(client, handle);
+	if (IS_ERR_OR_NULL(table))
+		return PTR_ERR(table);
+
+	for_each_sg(table->sgl, sg, table->nents, i)
+		size += sg->length;
+
+	padding = PAGE_ALIGN(padding);
+
+	/* Adding 16 lines padding before and after buffer */
+	iova_length = size + 2 * padding;
+
+	if (size & ~PAGE_MASK) {
+		pr_debug("%s: buffer size %lx is not aligned to %lx",
+			__func__, size, PAGE_SIZE);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (iova_length & ~PAGE_MASK) {
+		pr_debug("%s: iova_length %lx is not aligned to %lx",
+			__func__, iova_length, PAGE_SIZE);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	mutex_lock(&mdp3_res->iommu_lock);
+	iommu_meta = mdp3_iommu_meta_lookup(table);
+
+	if (!iommu_meta) {
+		iommu_meta = mdp3_iommu_meta_create(client, handle, table, size,
+				align, iova_length, padding, flags, iova);
+		if (!IS_ERR_OR_NULL(iommu_meta)) {
+			iommu_meta->flags = iommu_flags;
+			ret = 0;
+		} else {
+			ret = PTR_ERR(iommu_meta);
+			goto out_unlock;
+		}
+	} else {
+		if (iommu_meta->flags != iommu_flags) {
+			pr_err("%s: hndl %pK already mapped with diff flag\n",
+				__func__, handle);
+			ret = -EINVAL;
+			goto out_unlock;
+		} else if (iommu_meta->mapped_size != iova_length) {
+			pr_err("%s: hndl %pK already mapped with diff len\n",
+				__func__, handle);
+			ret = -EINVAL;
+			goto out_unlock;
+		} else {
+			kref_get(&iommu_meta->ref);
+			*iova = iommu_meta->iova_addr;
+		}
+	}
+	WARN_ON(iommu_meta->size != size);
+	mutex_unlock(&mdp3_res->iommu_lock);
+
+	*iova = *iova + padding;
+	*buffer_size = size;
+	return ret;
+
+out_unlock:
+	mutex_unlock(&mdp3_res->iommu_lock);
+out:
+	mdp3_iommu_meta_put(iommu_meta);
+	return ret;
+}
+
+int mdp3_put_img(struct mdp3_img_data *data, int client)
+{
+	struct ion_client *iclient = mdp3_res->ion_client;
+	int dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN_UNSECURE)->domain_idx;
+	int dir = DMA_BIDIRECTIONAL;
+
+	if (data->flags & MDP_MEMORY_ID_TYPE_FB) {
+		pr_info("mdp3_put_img fb mem buf=0x%pa\n", &data->addr);
+		fdput(data->srcp_f);
+		memset(&data->srcp_f, 0, sizeof(struct fd));
+	} else if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
+		pr_debug("ion hdl = %pK buf=0x%pa\n", data->srcp_dma_buf,
+							&data->addr);
+		if (!iclient) {
+			pr_err("invalid ion client\n");
+			return -ENOMEM;
+		}
+		if (data->mapped) {
+			if (client == MDP3_CLIENT_PPP ||
+						client == MDP3_CLIENT_DMA_P)
+				mdss_smmu_unmap_dma_buf(data->tab_clone,
+					dom, dir, data->srcp_dma_buf);
+			else
+				mdss_smmu_unmap_dma_buf(data->srcp_table,
+					dom, dir, data->srcp_dma_buf);
+			data->mapped = false;
+		}
+		if (!data->skip_detach) {
+			dma_buf_unmap_attachment(data->srcp_attachment,
+				data->srcp_table,
+			mdss_smmu_dma_data_direction(dir));
+			dma_buf_detach(data->srcp_dma_buf,
+					data->srcp_attachment);
+			dma_buf_put(data->srcp_dma_buf);
+			data->srcp_dma_buf = NULL;
+		}
+	} else {
+		return -EINVAL;
+	}
+	if (client == MDP3_CLIENT_PPP || client == MDP3_CLIENT_DMA_P) {
+		kfree(data->tab_clone->sgl);
+		kfree(data->tab_clone);
+	}
+	return 0;
+}
+
+int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data, int client)
+{
+	struct fd f;
+	int ret = -EINVAL;
+	int fb_num;
+	struct ion_client *iclient = mdp3_res->ion_client;
+	int dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN_UNSECURE)->domain_idx;
+
+	data->flags = img->flags;
+
+	if (img->flags & MDP_MEMORY_ID_TYPE_FB) {
+		f = fdget(img->memory_id);
+		if (f.file == NULL) {
+			pr_err("invalid framebuffer file (%d)\n",
+					img->memory_id);
+			return -EINVAL;
+		}
+		if (MAJOR(f.file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
+			fb_num = MINOR(f.file->f_dentry->d_inode->i_rdev);
+			ret = mdss_fb_get_phys_info(&data->addr,
+						&data->len, fb_num);
+			if (ret) {
+				pr_err("mdss_fb_get_phys_info() failed\n");
+				fdput(f);
+				memset(&f, 0, sizeof(struct fd));
+			}
+		} else {
+			pr_err("invalid FB_MAJOR\n");
+			fdput(f);
+			ret = -EINVAL;
+		}
+		data->srcp_f = f;
+		if (!ret)
+			goto done;
+	} else if (iclient) {
+		data->srcp_dma_buf = dma_buf_get(img->memory_id);
+			if (IS_ERR(data->srcp_dma_buf)) {
+				pr_err("DMA : error on ion_import_fd\n");
+				ret = PTR_ERR(data->srcp_dma_buf);
+				data->srcp_dma_buf = NULL;
+				return ret;
+			}
+
+			data->srcp_attachment =
+			mdss_smmu_dma_buf_attach(data->srcp_dma_buf,
+					&mdp3_res->pdev->dev, dom);
+			if (IS_ERR(data->srcp_attachment)) {
+				ret = PTR_ERR(data->srcp_attachment);
+				goto err_put;
+			}
+
+			data->srcp_table =
+				dma_buf_map_attachment(data->srcp_attachment,
+			mdss_smmu_dma_data_direction(DMA_BIDIRECTIONAL));
+			if (IS_ERR(data->srcp_table)) {
+				ret = PTR_ERR(data->srcp_table);
+				goto err_detach;
+			}
+
+			if (client == MDP3_CLIENT_PPP ||
+						client == MDP3_CLIENT_DMA_P) {
+				data->tab_clone =
+				mdss_smmu_sg_table_clone(data->srcp_table,
+							GFP_KERNEL, true);
+				if (IS_ERR_OR_NULL(data->tab_clone)) {
+					if (!(data->tab_clone))
+						ret = -EINVAL;
+					else
+						ret = PTR_ERR(data->tab_clone);
+					goto clone_err;
+				}
+				ret = mdss_smmu_map_dma_buf(data->srcp_dma_buf,
+					data->tab_clone, dom,
+					&data->addr, &data->len,
+					DMA_BIDIRECTIONAL);
+			} else {
+				ret = mdss_smmu_map_dma_buf(data->srcp_dma_buf,
+					data->srcp_table, dom, &data->addr,
+					&data->len, DMA_BIDIRECTIONAL);
+			}
+
+			if (IS_ERR_VALUE(ret)) {
+				pr_err("smmu map dma buf failed: (%d)\n", ret);
+				goto err_unmap;
+			}
+
+		data->mapped = true;
+		data->skip_detach = false;
+	}
+done:
+	if (client ==  MDP3_CLIENT_PPP || client == MDP3_CLIENT_DMA_P) {
+		data->addr  += data->tab_clone->sgl->length;
+		data->len   -= data->tab_clone->sgl->length;
+	}
+	if (!ret && (img->offset < data->len)) {
+		data->addr += img->offset;
+		data->len -= img->offset;
+
+		pr_debug("mem=%d ihdl=%pK buf=0x%pa len=0x%lx\n",
+			img->memory_id, data->srcp_dma_buf,
+			&data->addr, data->len);
+
+	} else {
+		mdp3_put_img(data, client);
+		return -EINVAL;
+	}
+	return ret;
+
+clone_err:
+	dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
+		mdss_smmu_dma_data_direction(DMA_BIDIRECTIONAL));
+err_detach:
+	dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
+err_put:
+	dma_buf_put(data->srcp_dma_buf);
+	return ret;
+err_unmap:
+	dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
+			mdss_smmu_dma_data_direction(DMA_BIDIRECTIONAL));
+	dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
+	dma_buf_put(data->srcp_dma_buf);
+
+	if (client ==  MDP3_CLIENT_PPP || client == MDP3_CLIENT_DMA_P) {
+		kfree(data->tab_clone->sgl);
+		kfree(data->tab_clone);
+	}
+	return ret;
+
+}
+
+int mdp3_iommu_enable(int client)
+{
+	int rc = 0;
+
+	mutex_lock(&mdp3_res->iommu_lock);
+
+	if (mdp3_res->iommu_ref_cnt == 0) {
+		rc = mdss_smmu_attach(mdss_res);
+		if (rc)
+			rc = mdss_smmu_detach(mdss_res);
+	}
+
+	if (!rc)
+		mdp3_res->iommu_ref_cnt++;
+	mutex_unlock(&mdp3_res->iommu_lock);
+
+	pr_debug("client :%d total_ref_cnt: %d\n",
+			client, mdp3_res->iommu_ref_cnt);
+	return rc;
+}
+
+int mdp3_iommu_disable(int client)
+{
+	int rc = 0;
+
+	mutex_lock(&mdp3_res->iommu_lock);
+	if (mdp3_res->iommu_ref_cnt) {
+		mdp3_res->iommu_ref_cnt--;
+
+		pr_debug("client :%d total_ref_cnt: %d\n",
+				client, mdp3_res->iommu_ref_cnt);
+		if (mdp3_res->iommu_ref_cnt == 0)
+			rc = mdss_smmu_detach(mdss_res);
+	} else {
+		pr_err("iommu ref count unbalanced for client %d\n", client);
+	}
+	mutex_unlock(&mdp3_res->iommu_lock);
+
+	return rc;
+}
+
+int mdp3_iommu_ctrl(int enable)
+{
+	int rc;
+
+	if (mdp3_res->allow_iommu_update == false)
+		return 0;
+
+	if (enable)
+		rc = mdp3_iommu_enable(MDP3_CLIENT_DSI);
+	else
+		rc = mdp3_iommu_disable(MDP3_CLIENT_DSI);
+	return rc;
+}
+
+static int mdp3_init(struct msm_fb_data_type *mfd)
+{
+	int rc;
+
+	rc = mdp3_ctrl_init(mfd);
+	if (rc) {
+		pr_err("mdp3 ctl init fail\n");
+		return rc;
+	}
+
+	rc = mdp3_ppp_res_init(mfd);
+	if (rc)
+		pr_err("mdp3 ppp res init fail\n");
+
+	return rc;
+}
+
+u32 mdp3_fb_stride(u32 fb_index, u32 xres, int bpp)
+{
+	/*
+	 * The adreno GPU hardware requires that the pitch be aligned to
+	 * 32 pixels for color buffers, so for the cases where the GPU
+	 * is writing directly to fb0, the framebuffer pitch
+	 * also needs to be 32 pixel aligned
+	 */
+
+	if (fb_index == 0)
+		return ALIGN(xres, 32) * bpp;
+	else
+		return xres * bpp;
+}
+
+__ref int mdp3_parse_dt_splash(struct msm_fb_data_type *mfd)
+{
+	struct platform_device *pdev = mfd->pdev;
+	int len = 0, rc = 0;
+	u32 offsets[2];
+	struct device_node *pnode, *child_node;
+	struct property *prop = NULL;
+
+	mfd->splash_info.splash_logo_enabled =
+				of_property_read_bool(pdev->dev.of_node,
+				"qcom,mdss-fb-splash-logo-enabled");
+
+	prop = of_find_property(pdev->dev.of_node, "qcom,memblock-reserve",
+				&len);
+	if (!prop) {
+		pr_debug("Read memblock reserve settings for fb failed\n");
+		pr_debug("Read cont-splash-memory settings\n");
+	}
+
+	if (len) {
+		len = len / sizeof(u32);
+
+		rc = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,memblock-reserve", offsets, len);
+		if (rc) {
+			pr_err("error reading mem reserve settings for fb\n");
+			rc = -EINVAL;
+			goto error;
+		}
+	} else {
+		child_node = of_get_child_by_name(pdev->dev.of_node,
+					"qcom,cont-splash-memory");
+		if (!child_node) {
+			pr_err("splash mem child node is not present\n");
+			rc = -EINVAL;
+			goto error;
+		}
+
+		pnode = of_parse_phandle(child_node, "linux,contiguous-region",
+					0);
+		if (pnode != NULL) {
+			const u32 *addr;
+			u64 size;
+
+			addr = of_get_address(pnode, 0, &size, NULL);
+			if (!addr) {
+				pr_err("failed to parse the splash memory address\n");
+				of_node_put(pnode);
+				rc = -EINVAL;
+				goto error;
+			}
+			offsets[0] = (u32) of_read_ulong(addr, 2);
+			offsets[1] = (u32) size;
+			of_node_put(pnode);
+		} else {
+			pr_err("mem reservation for splash screen fb not present\n");
+			rc = -EINVAL;
+			goto error;
+		}
+	}
+
+	if (!memblock_is_reserved(offsets[0])) {
+		pr_debug("failed to reserve memory for fb splash\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	mdp3_res->splash_mem_addr = offsets[0];
+	mdp3_res->splash_mem_size = offsets[1];
+error:
+	if (rc && mfd->panel_info->cont_splash_enabled)
+		pr_err("no rsvd mem found in DT for splash screen\n");
+	else
+		rc = 0;
+
+	return rc;
+}
+
+void mdp3_free(struct msm_fb_data_type *mfd)
+{
+	size_t size = 0;
+	int dom;
+	unsigned long phys;
+
+	if (!mfd->iova || !mfd->fbi->screen_base) {
+		pr_info("no fbmem allocated\n");
+		return;
+	}
+
+	size = mfd->fbi->fix.smem_len;
+	phys = mfd->fbi->fix.smem_start;
+	dom = mdp3_res->domains[MDP3_IOMMU_DOMAIN_UNSECURE].domain_idx;
+	iommu_unmap(mdp3_res->domains[MDP3_IOMMU_DOMAIN_UNSECURE].domain,
+			phys, size);
+	msm_iommu_unmap_contig_buffer(mfd->iova, dom, 0, size);
+
+	mfd->fbi->screen_base = NULL;
+	mfd->fbi->fix.smem_start = 0;
+	mfd->iova = 0;
+}
+
+void mdp3_release_splash_memory(struct msm_fb_data_type *mfd)
+{
+	/* Give back the reserved memory to the system */
+	if (mdp3_res->splash_mem_addr) {
+		if ((mfd->panel.type == MIPI_VIDEO_PANEL) &&
+				(mdp3_res->cont_splash_en)) {
+			mdss_smmu_unmap(MDSS_IOMMU_DOMAIN_UNSECURE,
+				mdp3_res->splash_mem_addr,
+				mdp3_res->splash_mem_size);
+		}
+		mdp3_free(mfd);
+		pr_debug("mdp3_release_splash_memory\n");
+		memblock_free(mdp3_res->splash_mem_addr,
+				mdp3_res->splash_mem_size);
+		free_bootmem_late(mdp3_res->splash_mem_addr,
+				mdp3_res->splash_mem_size);
+		mdp3_res->splash_mem_addr = 0;
+	}
+}
+
+struct mdp3_dma *mdp3_get_dma_pipe(int capability)
+{
+	int i;
+
+	for (i = MDP3_DMA_P; i < MDP3_DMA_MAX; i++) {
+		if (!mdp3_res->dma[i].in_use && mdp3_res->dma[i].available &&
+			mdp3_res->dma[i].capability & capability) {
+			mdp3_res->dma[i].in_use = true;
+			return &mdp3_res->dma[i];
+		}
+	}
+	return NULL;
+}
+
+struct mdp3_intf *mdp3_get_display_intf(int type)
+{
+	int i;
+
+	for (i = MDP3_DMA_OUTPUT_SEL_AHB; i < MDP3_DMA_OUTPUT_SEL_MAX; i++) {
+		if (!mdp3_res->intf[i].in_use && mdp3_res->intf[i].available &&
+			mdp3_res->intf[i].cfg.type == type) {
+			mdp3_res->intf[i].in_use = true;
+			return &mdp3_res->intf[i];
+		}
+	}
+	return NULL;
+}
+
+static int mdp3_fb_mem_get_iommu_domain(void)
+{
+	if (!mdp3_res)
+		return -ENODEV;
+	return mdp3_res->domains[MDP3_IOMMU_DOMAIN_UNSECURE].domain_idx;
+}
+
+int mdp3_get_cont_spash_en(void)
+{
+	return mdp3_res->cont_splash_en;
+}
+
+static int mdp3_is_display_on(struct mdss_panel_data *pdata)
+{
+	int rc = 0;
+	u32 status;
+
+	rc = mdp3_clk_enable(1, 0);
+	if (rc) {
+		pr_err("fail to turn on MDP core clks\n");
+		return rc;
+	}
+	if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+		status = MDP3_REG_READ(MDP3_REG_DSI_VIDEO_EN);
+		rc = status & 0x1;
+	} else {
+		status = MDP3_REG_READ(MDP3_REG_DMA_P_CONFIG);
+		status &= 0x180000;
+		rc = (status == 0x080000);
+	}
+
+	mdp3_res->splash_mem_addr = MDP3_REG_READ(MDP3_REG_DMA_P_IBUF_ADDR);
+
+	if (mdp3_clk_enable(0, 0))
+		pr_err("fail to turn off MDP core clks\n");
+	return rc;
+}
+
+static int mdp3_continuous_splash_on(struct mdss_panel_data *pdata)
+{
+	struct mdss_panel_info *panel_info = &pdata->panel_info;
+	struct mdp3_bus_handle_map *bus_handle;
+	u64 ab = 0;
+	u64 ib = 0;
+	u64 mdp_clk_rate = 0;
+	int rc = 0;
+
+	pr_debug("mdp3__continuous_splash_on\n");
+
+	bus_handle = &mdp3_res->bus_handle[MDP3_BUS_HANDLE];
+	if (bus_handle->handle < 1) {
+		pr_err("invalid bus handle %d\n", bus_handle->handle);
+		return -EINVAL;
+	}
+	mdp3_calc_dma_res(panel_info, &mdp_clk_rate, &ab, &ib, panel_info->bpp);
+
+	mdp3_clk_set_rate(MDP3_CLK_VSYNC, MDP_VSYNC_CLK_RATE,
+			MDP3_CLIENT_DMA_P);
+	mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, mdp_clk_rate,
+			MDP3_CLIENT_DMA_P);
+
+	rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, ab, ib);
+	bus_handle->restore_ab[MDP3_CLIENT_DMA_P] = ab;
+	bus_handle->restore_ib[MDP3_CLIENT_DMA_P] = ib;
+
+	rc = mdp3_res_update(1, 1, MDP3_CLIENT_DMA_P);
+	if (rc) {
+		pr_err("fail to enable clk\n");
+		return rc;
+	}
+
+	rc = mdp3_ppp_init();
+	if (rc) {
+		pr_err("ppp init failed\n");
+		goto splash_on_err;
+	}
+
+	if (panel_info->type == MIPI_VIDEO_PANEL)
+		mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_DSI_VIDEO].active = 1;
+	else
+		mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_DSI_CMD].active = 1;
+
+	mdp3_enable_regulator(true);
+	mdp3_res->cont_splash_en = 1;
+	return 0;
+
+splash_on_err:
+	if (mdp3_res_update(0, 1, MDP3_CLIENT_DMA_P))
+		pr_err("%s: Unable to disable mdp3 clocks\n", __func__);
+
+	return rc;
+}
+
+static int mdp3_panel_register_done(struct mdss_panel_data *pdata)
+{
+	int rc = 0;
+	u64 ab = 0; u64 ib = 0;
+	u64 mdp_clk_rate = 0;
+
+	/* Store max bandwidth supported in mdp res */
+	mdp3_calc_dma_res(&pdata->panel_info, &mdp_clk_rate, &ab, &ib,
+			MAX_BPP_SUPPORTED);
+	do_div(ab, 1024);
+	mdp3_res->max_bw = ab+1;
+
+	/*
+	 * If idle pc feature is not enabled, then get a reference to the
+	 * runtime device which will be released when device is turned off
+	 */
+	if (!mdp3_res->idle_pc_enabled ||
+		pdata->panel_info.type != MIPI_CMD_PANEL) {
+		pm_runtime_get_sync(&mdp3_res->pdev->dev);
+	}
+
+	if (pdata->panel_info.cont_splash_enabled) {
+		if (!mdp3_is_display_on(pdata)) {
+			pr_err("continuous splash, but bootloader is not\n");
+			return 0;
+		}
+		rc = mdp3_continuous_splash_on(pdata);
+	} else {
+		if (mdp3_is_display_on(pdata)) {
+			pr_err("lk continuous splash, but kerenl not\n");
+			rc = mdp3_continuous_splash_on(pdata);
+		}
+	}
+	/*
+	 * We want to prevent iommu from being enabled if there is
+	 * continue splash screen. This would have happened in
+	 * res_update in continuous_splash_on without this flag.
+	 */
+	if (pdata->panel_info.cont_splash_enabled == false)
+		mdp3_res->allow_iommu_update = true;
+
+	mdss_res->pdata = pdata;
+	return rc;
+}
+
+/* mdp3_clear_irq() - Clear interrupt
+ * @ interrupt_mask : interrupt mask
+ *
+ * This function clear sync irq for command mode panel.
+ * When system is entering in idle screen state.
+ */
+void mdp3_clear_irq(u32 interrupt_mask)
+{
+	unsigned long flag;
+	u32 irq_status = 0;
+
+	spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+	irq_status = interrupt_mask &
+		MDP3_REG_READ(MDP3_REG_INTR_STATUS);
+	if (irq_status)
+		MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, irq_status);
+	spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+
+}
+
+/* mdp3_autorefresh_disable() - Disable Auto refresh
+ * @ panel_info : pointer to panel configuration structure
+ *
+ * This function disable Auto refresh block for command mode panel.
+ */
+int mdp3_autorefresh_disable(struct mdss_panel_info *panel_info)
+{
+	if ((panel_info->type == MIPI_CMD_PANEL) &&
+		(MDP3_REG_READ(MDP3_REG_AUTOREFRESH_CONFIG_P)))
+		MDP3_REG_WRITE(MDP3_REG_AUTOREFRESH_CONFIG_P, 0);
+	return 0;
+}
+
+int mdp3_splash_done(struct mdss_panel_info *panel_info)
+{
+	if (panel_info->cont_splash_enabled) {
+		pr_err("continuous splash is on and splash done called\n");
+		return -EINVAL;
+	}
+	mdp3_res->allow_iommu_update = true;
+	return 0;
+}
+
+static int mdp3_debug_dump_stats_show(struct seq_file *s, void *v)
+{
+	struct mdp3_hw_resource *res = (struct mdp3_hw_resource *)s->private;
+
+	seq_printf(s, "underrun: %08u\n", res->underrun_cnt);
+
+	return 0;
+}
+DEFINE_MDSS_DEBUGFS_SEQ_FOPS(mdp3_debug_dump_stats);
+
+static void mdp3_debug_enable_clock(int on)
+{
+	if (on)
+		mdp3_clk_enable(1, 0);
+	else
+		mdp3_clk_enable(0, 0);
+}
+
+static int mdp3_debug_init(struct platform_device *pdev)
+{
+	int rc;
+	struct mdss_data_type *mdata;
+	struct mdss_debug_data *mdd;
+
+	mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL);
+	if (!mdata)
+		return -ENOMEM;
+
+	mdss_res = mdata;
+	mutex_init(&mdata->reg_lock);
+	mutex_init(&mdata->reg_bus_lock);
+	mutex_init(&mdata->bus_lock);
+	INIT_LIST_HEAD(&mdata->reg_bus_clist);
+	atomic_set(&mdata->sd_client_count, 0);
+	atomic_set(&mdata->active_intf_cnt, 0);
+	mdss_res->mdss_util = mdp3_res->mdss_util;
+
+	mdata->debug_inf.debug_enable_clock = mdp3_debug_enable_clock;
+	mdata->mdp_rev = mdp3_res->mdp_rev;
+
+	rc = mdss_debugfs_init(mdata);
+	if (rc)
+		return rc;
+
+	mdd = mdata->debug_inf.debug_data;
+	if (!mdd)
+		return -EINVAL;
+
+	debugfs_create_file("stat", 0644, mdd->root, mdp3_res,
+				&mdp3_debug_dump_stats_fops);
+
+	rc = mdss_debug_register_base(NULL, mdp3_res->mdp_base,
+					mdp3_res->mdp_reg_size, NULL);
+
+	return rc;
+}
+
+static void mdp3_debug_deinit(struct platform_device *pdev)
+{
+	if (mdss_res) {
+		mdss_debugfs_remove(mdss_res);
+		devm_kfree(&pdev->dev, mdss_res);
+		mdss_res = NULL;
+	}
+}
+
+static void mdp3_dma_underrun_intr_handler(int type, void *arg)
+{
+	struct mdp3_dma *dma = &mdp3_res->dma[MDP3_DMA_P];
+
+	mdp3_res->underrun_cnt++;
+	pr_err_ratelimited("display underrun detected count=%d\n",
+			mdp3_res->underrun_cnt);
+	ATRACE_INT("mdp3_dma_underrun_intr_handler", mdp3_res->underrun_cnt);
+
+	if (dma->ccs_config.ccs_enable && !dma->ccs_config.ccs_dirty) {
+		dma->ccs_config.ccs_dirty = true;
+		schedule_work(&dma->underrun_work);
+	}
+}
+
+uint32_t ppp_formats_supported[] = {
+	MDP_RGB_565,
+	MDP_BGR_565,
+	MDP_RGB_888,
+	MDP_BGR_888,
+	MDP_XRGB_8888,
+	MDP_ARGB_8888,
+	MDP_RGBA_8888,
+	MDP_BGRA_8888,
+	MDP_RGBX_8888,
+	MDP_Y_CBCR_H2V1,
+	MDP_Y_CBCR_H2V2,
+	MDP_Y_CBCR_H2V2_ADRENO,
+	MDP_Y_CBCR_H2V2_VENUS,
+	MDP_Y_CRCB_H2V1,
+	MDP_Y_CRCB_H2V2,
+	MDP_YCRYCB_H2V1,
+	MDP_BGRX_8888,
+};
+
+uint32_t dma_formats_supported[] = {
+	MDP_RGB_565,
+	MDP_RGB_888,
+	MDP_XRGB_8888,
+};
+
+static void __mdp3_set_supported_formats(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ppp_formats_supported); i++)
+		SET_BIT(mdp3_res->ppp_formats, ppp_formats_supported[i]);
+
+	for (i = 0; i < ARRAY_SIZE(dma_formats_supported); i++)
+		SET_BIT(mdp3_res->dma_formats, dma_formats_supported[i]);
+}
+
+static void __update_format_supported_info(char *buf, int *cnt)
+{
+	int j;
+	size_t len = PAGE_SIZE;
+	int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1);
+#define SPRINT(fmt, ...) \
+	(*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__))
+
+	SPRINT("ppp_input_fmts=");
+	for (j = 0; j < num_bytes; j++)
+		SPRINT("%d,", mdp3_res->ppp_formats[j]);
+	SPRINT("\ndma_output_fmts=");
+	for (j = 0; j < num_bytes; j++)
+		SPRINT("%d,", mdp3_res->dma_formats[j]);
+	SPRINT("\n");
+#undef SPRINT
+}
+
+static ssize_t mdp3_show_capabilities(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	size_t len = PAGE_SIZE;
+	int cnt = 0;
+
+#define SPRINT(fmt, ...) \
+		(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
+
+	SPRINT("dma_pipes=%d\n", 1);
+	SPRINT("mdp_version=3\n");
+	SPRINT("hw_rev=%d\n", 305);
+	SPRINT("pipe_count:%d\n", 1);
+	SPRINT("pipe_num:%d pipe_type:dma pipe_ndx:%d rects:%d ", 0, 1, 1);
+	SPRINT("pipe_is_handoff:%d display_id:%d\n", 0, 0);
+	__update_format_supported_info(buf, &cnt);
+	SPRINT("rgb_pipes=%d\n", 0);
+	SPRINT("vig_pipes=%d\n", 0);
+	SPRINT("dma_pipes=%d\n", 1);
+	SPRINT("blending_stages=%d\n", 1);
+	SPRINT("cursor_pipes=%d\n", 0);
+	SPRINT("max_cursor_size=%d\n", 0);
+	SPRINT("smp_count=%d\n", 0);
+	SPRINT("smp_size=%d\n", 0);
+	SPRINT("smp_mb_per_pipe=%d\n", 0);
+	SPRINT("max_downscale_ratio=%d\n", PPP_DOWNSCALE_MAX);
+	SPRINT("max_upscale_ratio=%d\n", PPP_UPSCALE_MAX);
+	SPRINT("max_pipe_bw=%u\n", mdp3_res->max_bw);
+	SPRINT("max_bandwidth_low=%u\n", mdp3_res->max_bw);
+	SPRINT("max_bandwidth_high=%u\n", mdp3_res->max_bw);
+	SPRINT("max_mdp_clk=%u\n", MDP_CORE_CLK_RATE_MAX);
+	SPRINT("clk_fudge_factor=%u,%u\n", CLK_FUDGE_NUM, CLK_FUDGE_DEN);
+	SPRINT("features=has_ppp\n");
+
+#undef SPRINT
+
+	return cnt;
+}
+
+static DEVICE_ATTR(caps, 0444, mdp3_show_capabilities, NULL);
+
+static ssize_t mdp3_store_smart_blit(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	u32 data = -1;
+	ssize_t rc = 0;
+
+	rc = kstrtoint(buf, 10, &data);
+	if (rc) {
+		pr_err("kstrtoint failed. rc=%d\n", rc);
+		return rc;
+	}
+	mdp3_res->smart_blit_en = data;
+	pr_debug("mdp3 smart blit RGB %s YUV %s\n",
+		(mdp3_res->smart_blit_en & SMART_BLIT_RGB_EN) ?
+		"ENABLED" : "DISABLED",
+		(mdp3_res->smart_blit_en & SMART_BLIT_YUV_EN) ?
+		"ENABLED" : "DISABLED");
+	return len;
+}
+
+static ssize_t mdp3_show_smart_blit(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+
+	pr_debug("mdp3 smart blit RGB %s YUV %s\n",
+		(mdp3_res->smart_blit_en & SMART_BLIT_RGB_EN) ?
+		"ENABLED" : "DISABLED",
+		(mdp3_res->smart_blit_en & SMART_BLIT_YUV_EN) ?
+		"ENABLED" : "DISABLED");
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", mdp3_res->smart_blit_en);
+	return ret;
+}
+
+static DEVICE_ATTR(smart_blit, 0664,
+			mdp3_show_smart_blit, mdp3_store_smart_blit);
+
+static struct attribute *mdp3_fs_attrs[] = {
+	&dev_attr_caps.attr,
+	&dev_attr_smart_blit.attr,
+	NULL
+};
+
+static struct attribute_group mdp3_fs_attr_group = {
+	.attrs = mdp3_fs_attrs
+};
+
+static int mdp3_register_sysfs(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int rc;
+
+	rc = sysfs_create_group(&dev->kobj, &mdp3_fs_attr_group);
+
+	return rc;
+}
+
+int mdp3_create_sysfs_link(struct device *dev)
+{
+	int rc;
+
+	rc = sysfs_create_link_nowarn(&dev->kobj,
+			&mdp3_res->pdev->dev.kobj, "mdp");
+
+	return rc;
+}
+
+int mdp3_misr_get(struct mdp_misr *misr_resp)
+{
+	int result = 0, ret = -1;
+	int crc = 0;
+
+	pr_debug("%s CRC Capture on DSI\n", __func__);
+	switch (misr_resp->block_id) {
+	case DISPLAY_MISR_DSI0:
+		MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, 0);
+		/* Sleep for one vsync after DSI video engine is disabled */
+		msleep(20);
+		/* Enable DSI_VIDEO_0 MISR Block */
+		MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x20);
+		/* Reset MISR Block */
+		MDP3_REG_WRITE(MDP3_REG_MISR_RESET_DSI_PCLK, 1);
+		/* Clear MISR capture done bit */
+		MDP3_REG_WRITE(MDP3_REG_CAPTURED_DSI_PCLK, 0);
+		/* Enable MDP DSI interface */
+		MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, 1);
+		ret = readl_poll_timeout(mdp3_res->mdp_base +
+			MDP3_REG_CAPTURED_DSI_PCLK, result,
+			result & MDP3_REG_CAPTURED_DSI_PCLK_MASK,
+			MISR_POLL_SLEEP, MISR_POLL_TIMEOUT);
+			MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0);
+		if (ret == 0) {
+			/* Disable DSI MISR interface */
+			MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x0);
+			crc = MDP3_REG_READ(MDP3_REG_MISR_CAPT_VAL_DSI_PCLK);
+			pr_debug("CRC Val %d\n", crc);
+		} else {
+			pr_err("CRC Read Timed Out\n");
+		}
+		break;
+
+	case DISPLAY_MISR_DSI_CMD:
+		/* Select DSI PCLK Domain */
+		MDP3_REG_WRITE(MDP3_REG_SEL_CLK_OR_HCLK_TEST_BUS, 0x004);
+		/* Select Block id DSI_CMD */
+		MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x10);
+		/* Reset MISR Block */
+		MDP3_REG_WRITE(MDP3_REG_MISR_RESET_DSI_PCLK, 1);
+		/* Drive Data on Test Bus */
+		MDP3_REG_WRITE(MDP3_REG_EXPORT_MISR_DSI_PCLK, 0);
+		/* Kikk off DMA_P */
+		MDP3_REG_WRITE(MDP3_REG_DMA_P_START, 0x11);
+		/* Wait for DMA_P Done */
+		ret = readl_poll_timeout(mdp3_res->mdp_base +
+			MDP3_REG_INTR_STATUS, result,
+			result & MDP3_INTR_DMA_P_DONE_BIT,
+			MISR_POLL_SLEEP, MISR_POLL_TIMEOUT);
+		if (ret == 0) {
+			crc = MDP3_REG_READ(MDP3_REG_MISR_CURR_VAL_DSI_PCLK);
+			pr_debug("CRC Val %d\n", crc);
+		} else {
+			pr_err("CRC Read Timed Out\n");
+		}
+		break;
+
+	default:
+		pr_err("%s CRC Capture not supported\n", __func__);
+		ret = -EINVAL;
+		break;
+	}
+
+	misr_resp->crc_value[0] = crc;
+	pr_debug("%s, CRC Capture on DSI Param Block = 0x%x, CRC 0x%x\n",
+			__func__, misr_resp->block_id, misr_resp->crc_value[0]);
+	return ret;
+}
+
+int mdp3_misr_set(struct mdp_misr *misr_req)
+{
+	int ret = 0;
+
+	pr_debug("%s Parameters Block = %d Cframe Count = %d CRC = %d\n",
+			__func__, misr_req->block_id, misr_req->frame_count,
+			misr_req->crc_value[0]);
+
+	switch (misr_req->block_id) {
+	case DISPLAY_MISR_DSI0:
+		pr_debug("In the case DISPLAY_MISR_DSI0\n");
+		MDP3_REG_WRITE(MDP3_REG_SEL_CLK_OR_HCLK_TEST_BUS, 1);
+		MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x20);
+		MDP3_REG_WRITE(MDP3_REG_MISR_RESET_DSI_PCLK, 0x1);
+		break;
+
+	case DISPLAY_MISR_DSI_CMD:
+		pr_debug("In the case DISPLAY_MISR_DSI_CMD\n");
+		MDP3_REG_WRITE(MDP3_REG_SEL_CLK_OR_HCLK_TEST_BUS, 1);
+		MDP3_REG_WRITE(MDP3_REG_MODE_DSI_PCLK, 0x10);
+		MDP3_REG_WRITE(MDP3_REG_MISR_RESET_DSI_PCLK, 0x1);
+		break;
+
+	default:
+		pr_err("%s CRC Capture not supported\n", __func__);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+struct mdss_panel_cfg *mdp3_panel_intf_type(int intf_val)
+{
+	if (!mdp3_res || !mdp3_res->pan_cfg.init_done)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	if (mdp3_res->pan_cfg.pan_intf == intf_val)
+		return &mdp3_res->pan_cfg;
+	else
+		return NULL;
+}
+EXPORT_SYMBOL(mdp3_panel_intf_type);
+
+int mdp3_footswitch_ctrl(int enable)
+{
+	int rc = 0;
+	int active_cnt = 0;
+
+	mutex_lock(&mdp3_res->fs_idle_pc_lock);
+	MDSS_XLOG(enable);
+	if (!mdp3_res->fs_ena && enable) {
+		rc = regulator_enable(mdp3_res->fs);
+		if (rc) {
+			pr_err("mdp footswitch ctrl enable failed\n");
+			mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+			return -EINVAL;
+		}
+		pr_debug("mdp footswitch ctrl enable success\n");
+		mdp3_enable_regulator(true);
+		mdp3_res->fs_ena = true;
+	} else if (!enable && mdp3_res->fs_ena) {
+		active_cnt = atomic_read(&mdp3_res->active_intf_cnt);
+		if (active_cnt != 0) {
+			/*
+			 * Turning off GDSC while overlays are still
+			 * active.
+			 */
+			mdp3_res->idle_pc = true;
+			pr_debug("idle pc. active overlays=%d\n",
+				active_cnt);
+		}
+		mdp3_enable_regulator(false);
+		rc = regulator_disable(mdp3_res->fs);
+		if (rc) {
+			pr_err("mdp footswitch ctrl disable failed\n");
+			mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+			return -EINVAL;
+		}
+			mdp3_res->fs_ena = false;
+		pr_debug("mdp3 footswitch ctrl disable configured\n");
+	} else {
+		pr_debug("mdp3 footswitch ctrl already configured\n");
+	}
+
+	mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+	return rc;
+}
+
+int mdp3_panel_get_intf_status(u32 disp_num, u32 intf_type)
+{
+	int rc = 0, status = 0;
+
+	if (intf_type != MDSS_PANEL_INTF_DSI)
+		return 0;
+
+	rc = mdp3_clk_enable(1, 0);
+	if (rc) {
+		pr_err("fail to turn on MDP core clks\n");
+		return rc;
+	}
+
+	status = (MDP3_REG_READ(MDP3_REG_DMA_P_CONFIG) & 0x180000);
+	/* DSI video mode or command mode */
+	rc = (status == 0x180000) || (status == 0x080000);
+
+	if (mdp3_clk_enable(0, 0))
+		pr_err("fail to turn off MDP core clks\n");
+	return rc;
+}
+
+static int mdp3_probe(struct platform_device *pdev)
+{
+	int rc;
+	static struct msm_mdp_interface mdp3_interface = {
+	.init_fnc = mdp3_init,
+	.fb_mem_get_iommu_domain = mdp3_fb_mem_get_iommu_domain,
+	.panel_register_done = mdp3_panel_register_done,
+	.fb_stride = mdp3_fb_stride,
+	.check_dsi_status = mdp3_check_dsi_ctrl_status,
+	};
+
+	struct mdp3_intr_cb underrun_cb = {
+		.cb = mdp3_dma_underrun_intr_handler,
+		.data = NULL,
+	};
+
+	pr_debug("%s: START\n", __func__);
+	if (!pdev->dev.of_node) {
+		pr_err("MDP driver only supports device tree probe\n");
+		return -ENOTSUPP;
+	}
+
+	if (mdp3_res) {
+		pr_err("MDP already initialized\n");
+		return -EINVAL;
+	}
+
+	mdp3_res = devm_kzalloc(&pdev->dev, sizeof(struct mdp3_hw_resource),
+				GFP_KERNEL);
+	if (mdp3_res == NULL)
+		return -ENOMEM;
+
+	pdev->id = 0;
+	mdp3_res->pdev = pdev;
+	mutex_init(&mdp3_res->res_mutex);
+	mutex_init(&mdp3_res->fs_idle_pc_lock);
+	spin_lock_init(&mdp3_res->irq_lock);
+	platform_set_drvdata(pdev, mdp3_res);
+	atomic_set(&mdp3_res->active_intf_cnt, 0);
+	mutex_init(&mdp3_res->reg_bus_lock);
+	INIT_LIST_HEAD(&mdp3_res->reg_bus_clist);
+
+	mdp3_res->mdss_util = mdss_get_util_intf();
+	if (mdp3_res->mdss_util == NULL) {
+		pr_err("Failed to get mdss utility functions\n");
+		rc =  -ENODEV;
+		goto get_util_fail;
+	}
+	mdp3_res->mdss_util->get_iommu_domain = mdp3_get_iommu_domain;
+	mdp3_res->mdss_util->iommu_attached = is_mdss_iommu_attached;
+	mdp3_res->mdss_util->iommu_ctrl = mdp3_iommu_ctrl;
+	mdp3_res->mdss_util->bus_scale_set_quota = mdp3_bus_scale_set_quota;
+	mdp3_res->mdss_util->panel_intf_type = mdp3_panel_intf_type;
+	mdp3_res->mdss_util->dyn_clk_gating_ctrl =
+		mdp3_dynamic_clock_gating_ctrl;
+	mdp3_res->mdss_util->panel_intf_type = mdp3_panel_intf_type;
+	mdp3_res->mdss_util->panel_intf_status = mdp3_panel_get_intf_status;
+
+	if (mdp3_res->mdss_util->param_check(mdss_mdp3_panel)) {
+		mdp3_res->mdss_util->display_disabled = true;
+		mdp3_res->mdss_util->mdp_probe_done = true;
+		return 0;
+	}
+
+	rc = mdp3_parse_dt(pdev);
+	if (rc)
+		goto probe_done;
+
+	rc = mdp3_res_init();
+	if (rc) {
+		pr_err("unable to initialize mdp3 resources\n");
+		goto probe_done;
+	}
+
+	mdp3_res->fs_ena = false;
+	mdp3_res->fs = devm_regulator_get(&pdev->dev, "vdd");
+	if (IS_ERR_OR_NULL(mdp3_res->fs)) {
+		pr_err("unable to get mdss gdsc regulator\n");
+		return -EINVAL;
+	}
+
+	rc = mdp3_debug_init(pdev);
+	if (rc) {
+		pr_err("unable to initialize mdp debugging\n");
+		goto probe_done;
+	}
+
+	pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT_MS);
+	if (mdp3_res->idle_pc_enabled) {
+		pr_debug("%s: Enabling autosuspend\n", __func__);
+		pm_runtime_use_autosuspend(&pdev->dev);
+	}
+	/* Enable PM runtime */
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	if (!pm_runtime_enabled(&pdev->dev)) {
+		rc = mdp3_footswitch_ctrl(1);
+		if (rc) {
+			pr_err("unable to turn on FS\n");
+			goto probe_done;
+		}
+	}
+
+	rc = mdp3_check_version();
+	if (rc) {
+		pr_err("mdp3 check version failed\n");
+		goto probe_done;
+	}
+	rc = mdp3_register_sysfs(pdev);
+	if (rc)
+		pr_err("unable to register mdp sysfs nodes\n");
+
+	rc = mdss_fb_register_mdp_instance(&mdp3_interface);
+	if (rc)
+		pr_err("unable to register mdp instance\n");
+
+	rc = mdp3_set_intr_callback(MDP3_INTR_LCDC_UNDERFLOW,
+					&underrun_cb);
+	if (rc)
+		pr_err("unable to configure interrupt callback\n");
+
+	rc = mdss_smmu_init(mdss_res, &pdev->dev);
+	if (rc)
+		pr_err("mdss smmu init failed\n");
+
+	__mdp3_set_supported_formats();
+
+	mdp3_res->mdss_util->mdp_probe_done = true;
+	pr_debug("%s: END\n", __func__);
+
+probe_done:
+	if (IS_ERR_VALUE(rc))
+		kfree(mdp3_res->mdp3_hw.irq_info);
+get_util_fail:
+	if (IS_ERR_VALUE(rc)) {
+		mdp3_res_deinit();
+
+		if (mdp3_res->mdp_base)
+			devm_iounmap(&pdev->dev, mdp3_res->mdp_base);
+
+		devm_kfree(&pdev->dev, mdp3_res);
+		mdp3_res = NULL;
+
+		if (mdss_res) {
+			devm_kfree(&pdev->dev, mdss_res);
+			mdss_res = NULL;
+		}
+	}
+
+	return rc;
+}
+
+int mdp3_panel_get_boot_cfg(void)
+{
+	int rc;
+
+	if (!mdp3_res || !mdp3_res->pan_cfg.init_done)
+		rc = -EPROBE_DEFER;
+	else if (mdp3_res->pan_cfg.lk_cfg)
+		rc = 1;
+	else
+		rc = 0;
+	return rc;
+}
+
+static  int mdp3_suspend_sub(void)
+{
+	mdp3_footswitch_ctrl(0);
+	return 0;
+}
+
+static  int mdp3_resume_sub(void)
+{
+	mdp3_footswitch_ctrl(1);
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mdp3_pm_suspend(struct device *dev)
+{
+	dev_dbg(dev, "Display pm suspend\n");
+	MDSS_XLOG(XLOG_FUNC_ENTRY);
+	return mdp3_suspend_sub();
+}
+
+static int mdp3_pm_resume(struct device *dev)
+{
+	dev_dbg(dev, "Display pm resume\n");
+
+	/*
+	 * It is possible that the runtime status of the mdp device may
+	 * have been active when the system was suspended. Reset the runtime
+	 * status to suspended state after a complete system resume.
+	 */
+	pm_runtime_disable(dev);
+	pm_runtime_set_suspended(dev);
+	pm_runtime_enable(dev);
+
+	MDSS_XLOG(XLOG_FUNC_ENTRY);
+	return mdp3_resume_sub();
+}
+#endif
+
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+static int mdp3_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	pr_debug("Display suspend\n");
+
+	MDSS_XLOG(XLOG_FUNC_ENTRY);
+	return mdp3_suspend_sub();
+}
+
+static int mdp3_resume(struct platform_device *pdev)
+{
+	pr_debug("Display resume\n");
+
+	MDSS_XLOG(XLOG_FUNC_ENTRY);
+	return mdp3_resume_sub();
+}
+#else
+#define mdp3_suspend NULL
+#define mdp3_resume  NULL
+#endif
+
+
+#ifdef CONFIG_PM_RUNTIME
+static int mdp3_runtime_resume(struct device *dev)
+{
+	bool device_on = true;
+
+	dev_dbg(dev, "Display pm runtime resume, active overlay cnt=%d\n",
+		atomic_read(&mdp3_res->active_intf_cnt));
+
+	/* do not resume panels when coming out of idle power collapse */
+	if (!mdp3_res->idle_pc)
+		device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
+
+	MDSS_XLOG(XLOG_FUNC_ENTRY);
+	mdp3_footswitch_ctrl(1);
+
+	return 0;
+}
+
+static int mdp3_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "Display pm runtime idle\n");
+
+	return 0;
+}
+
+static int mdp3_runtime_suspend(struct device *dev)
+{
+	bool device_on = false;
+
+	dev_dbg(dev, "Display pm runtime suspend, active overlay cnt=%d\n",
+		atomic_read(&mdp3_res->active_intf_cnt));
+
+	if (mdp3_res->clk_ena) {
+		pr_debug("Clk turned on...MDP suspend failed\n");
+		return -EBUSY;
+	}
+
+	MDSS_XLOG(XLOG_FUNC_ENTRY);
+	mdp3_footswitch_ctrl(0);
+
+	/* do not suspend panels when going in to idle power collapse */
+	if (!mdp3_res->idle_pc)
+		device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops mdp3_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(mdp3_pm_suspend,
+				mdp3_pm_resume)
+	SET_RUNTIME_PM_OPS(mdp3_runtime_suspend,
+				mdp3_runtime_resume,
+				mdp3_runtime_idle)
+};
+
+
+static int mdp3_remove(struct platform_device *pdev)
+{
+	struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
+
+	if (!mdata)
+		return -ENODEV;
+	pm_runtime_disable(&pdev->dev);
+	mdp3_bus_scale_unregister();
+	mdp3_clk_remove();
+	mdp3_debug_deinit(pdev);
+	return 0;
+}
+
+static const struct of_device_id mdp3_dt_match[] = {
+	{ .compatible = "qcom,mdss_mdp3",},
+	{}
+};
+MODULE_DEVICE_TABLE(of, mdp3_dt_match);
+EXPORT_COMPAT("qcom,mdss_mdp3");
+
+static struct platform_driver mdp3_driver = {
+	.probe = mdp3_probe,
+	.remove = mdp3_remove,
+	.suspend = mdp3_suspend,
+	.resume = mdp3_resume,
+	.shutdown = NULL,
+	.driver = {
+		.name = "mdp3",
+		.of_match_table = mdp3_dt_match,
+		.pm             = &mdp3_pm_ops,
+	},
+};
+
+static int __init mdp3_driver_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&mdp3_driver);
+	if (ret) {
+		pr_err("register mdp3 driver failed!\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+module_param_string(panel, mdss_mdp3_panel, MDSS_MAX_PANEL_LEN, 0600);
+/*
+ * panel=<lk_cfg>:<pan_intf>:<pan_intf_cfg>
+ * where <lk_cfg> is "1"-lk/gcdb config or "0" non-lk/non-gcdb
+ * config; <pan_intf> is dsi:0
+ * <pan_intf_cfg> is panel interface specific string
+ * Ex: This string is panel's device node name from DT
+ *	for DSI interface
+ */
+MODULE_PARM_DESC(panel, "lk supplied panel selection string");
+module_init(mdp3_driver_init);
diff --git a/drivers/video/fbdev/msm/mdp3.h b/drivers/video/fbdev/msm/mdp3.h
new file mode 100644
index 0000000..3f0d979
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3.h
@@ -0,0 +1,295 @@
+/* Copyright (c) 2013-2014, 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef MDP3_H
+#define MDP3_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <linux/msm_iommu_domains.h>
+
+#include "mdss_dsi_clk.h"
+#include "mdp3_dma.h"
+#include "mdss_fb.h"
+#include "mdss.h"
+
+#define MDP_VSYNC_CLK_RATE	19200000
+#define MDP_CORE_CLK_RATE_SVS	160000000
+#define MDP_CORE_CLK_RATE_SUPER_SVS	200000000
+#define MDP_CORE_CLK_RATE_MAX	307200000
+
+#define CLK_FUDGE_NUM		12
+#define CLK_FUDGE_DEN		10
+
+/* PPP cant work at SVS for panel res above qHD */
+#define SVS_MAX_PIXEL		(540 * 960)
+
+#define KOFF_TIMEOUT_MS 84
+#define KOFF_TIMEOUT msecs_to_jiffies(KOFF_TIMEOUT_MS)
+#define WAIT_DMA_TIMEOUT msecs_to_jiffies(84)
+
+/*
+ * MDP_DEINTERLACE & MDP_SHARPENING Flags are not valid for MDP3
+ * so using them together for MDP_SMART_BLIT.
+ */
+#define MDP_SMART_BLIT                 0xC0000000
+
+#define BITS_PER_BYTE 8
+#define MDP_IMGTYPE_LIMIT1 0x100
+#define BITS_TO_BYTES(x) DIV_ROUND_UP(x, BITS_PER_BYTE)
+
+enum  {
+	MDP3_CLK_AHB,
+	MDP3_CLK_AXI,
+	MDP3_CLK_MDP_SRC,
+	MDP3_CLK_MDP_CORE,
+	MDP3_CLK_VSYNC,
+	MDP3_CLK_DSI,
+	MDP3_MAX_CLK
+};
+
+enum {
+	MDP3_BUS_HANDLE,
+	MDP3_BUS_HANDLE_MAX,
+};
+
+enum {
+	MDP3_IOMMU_DOMAIN_UNSECURE,
+	MDP3_IOMMU_DOMAIN_SECURE,
+	MDP3_IOMMU_DOMAIN_MAX,
+};
+
+enum {
+	MDP3_IOMMU_CTX_MDP_0,
+	MDP3_IOMMU_CTX_MDP_1,
+	MDP3_IOMMU_CTX_MAX
+};
+
+/* Keep DSI entry in sync with mdss
+ * which is being used by DSI 6G
+ */
+enum {
+	MDP3_CLIENT_DMA_P,
+	MDP3_CLIENT_DSI = 1,
+	MDP3_CLIENT_PPP,
+	MDP3_CLIENT_IOMMU,
+	MDP3_CLIENT_MAX,
+};
+
+enum {
+	DI_PARTITION_NUM = 0,
+	DI_DOMAIN_NUM = 1,
+	DI_MAX,
+};
+
+struct mdp3_bus_handle_map {
+	struct msm_bus_vectors *bus_vector;
+	struct msm_bus_paths *usecases;
+	struct msm_bus_scale_pdata *scale_pdata;
+	int current_bus_idx;
+	int ref_cnt;
+	u64 restore_ab[MDP3_CLIENT_MAX];
+	u64 restore_ib[MDP3_CLIENT_MAX];
+	u64 ab[MDP3_CLIENT_MAX];
+	u64 ib[MDP3_CLIENT_MAX];
+	u32 handle;
+};
+
+struct mdp3_iommu_domain_map {
+	u32 domain_type;
+	char *client_name;
+	struct msm_iova_partition partitions[1];
+	int npartitions;
+	int domain_idx;
+	struct iommu_domain *domain;
+};
+
+struct mdp3_iommu_ctx_map {
+	u32 ctx_type;
+	struct mdp3_iommu_domain_map *domain;
+	char *ctx_name;
+	struct device *ctx;
+	int attached;
+};
+
+struct mdp3_iommu_meta {
+	struct rb_node node;
+	struct ion_handle *handle;
+	struct rb_root iommu_maps;
+	struct kref ref;
+	struct sg_table *table;
+	struct dma_buf *dbuf;
+	int mapped_size;
+	unsigned long size;
+	dma_addr_t iova_addr;
+	unsigned long flags;
+};
+
+#define MDP3_MAX_INTR 28
+
+struct mdp3_intr_cb {
+	void (*cb)(int type, void *);
+	void *data;
+};
+
+#define SMART_BLIT_RGB_EN	1
+#define SMART_BLIT_YUV_EN	2
+
+struct mdp3_hw_resource {
+	struct platform_device *pdev;
+	u32 mdp_rev;
+
+	struct mutex res_mutex;
+
+	struct clk *clocks[MDP3_MAX_CLK];
+	int clock_ref_count[MDP3_MAX_CLK];
+	unsigned long dma_core_clk_request;
+	unsigned long ppp_core_clk_request;
+	struct mdss_hw mdp3_hw;
+	struct mdss_util_intf *mdss_util;
+
+	char __iomem *mdp_base;
+	size_t mdp_reg_size;
+
+	char __iomem *vbif_base;
+	size_t vbif_reg_size;
+
+	struct mdp3_bus_handle_map *bus_handle;
+
+	struct ion_client *ion_client;
+	struct mdp3_iommu_domain_map *domains;
+	struct mdp3_iommu_ctx_map *iommu_contexts;
+	unsigned int iommu_ref_cnt;
+	bool allow_iommu_update;
+	struct ion_handle *ion_handle;
+	struct mutex iommu_lock;
+	struct mutex fs_idle_pc_lock;
+
+	struct mdp3_dma dma[MDP3_DMA_MAX];
+	struct mdp3_intf intf[MDP3_DMA_OUTPUT_SEL_MAX];
+
+	struct rb_root iommu_root;
+	spinlock_t irq_lock;
+	u32 irq_ref_count[MDP3_MAX_INTR];
+	u32 irq_mask;
+	int irq_ref_cnt;
+	struct mdp3_intr_cb callbacks[MDP3_MAX_INTR];
+	u32 underrun_cnt;
+
+	int irq_registered;
+
+	unsigned long splash_mem_addr;
+	u32 splash_mem_size;
+	struct mdss_panel_cfg pan_cfg;
+
+	int clk_prepare_count;
+	int cont_splash_en;
+
+	bool batfet_required;
+	struct regulator *batfet;
+	struct regulator *vdd_cx;
+	struct regulator *fs;
+	bool fs_ena;
+	int  clk_ena;
+	bool idle_pc_enabled;
+	bool idle_pc;
+	atomic_t active_intf_cnt;
+	u8 smart_blit_en;
+	bool solid_fill_vote_en;
+	struct list_head reg_bus_clist;
+	struct mutex reg_bus_lock;
+
+	u32 max_bw;
+
+	u8 ppp_formats[BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1)];
+	u8 dma_formats[BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1)];
+};
+
+struct mdp3_img_data {
+	dma_addr_t addr;
+	unsigned long len;
+	u32 offset;
+	u32 flags;
+	u32 padding;
+	int p_need;
+	struct ion_handle *srcp_ihdl;
+	u32 dir;
+	u32 domain;
+	bool mapped;
+	bool skip_detach;
+	struct fd srcp_f;
+	struct dma_buf *srcp_dma_buf;
+	struct dma_buf_attachment *srcp_attachment;
+	struct sg_table *srcp_table;
+	struct sg_table *tab_clone;
+};
+
+extern struct mdp3_hw_resource *mdp3_res;
+
+struct mdp3_dma *mdp3_get_dma_pipe(int capability);
+struct mdp3_intf *mdp3_get_display_intf(int type);
+void mdp3_irq_enable(int type);
+void mdp3_irq_disable(int type);
+void mdp3_irq_disable_nosync(int type);
+int mdp3_set_intr_callback(u32 type, struct mdp3_intr_cb *cb);
+void mdp3_irq_register(void);
+void mdp3_irq_deregister(void);
+int mdp3_clk_set_rate(int clk_type, unsigned long clk_rate, int client);
+int mdp3_clk_enable(int enable, int dsi_clk);
+int mdp3_res_update(int enable, int dsi_clk, int client);
+int mdp3_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota);
+int mdp3_put_img(struct mdp3_img_data *data, int client);
+int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data,
+		int client);
+int mdp3_iommu_enable(int client);
+int mdp3_iommu_disable(int client);
+int mdp3_iommu_is_attached(void);
+void mdp3_free(struct msm_fb_data_type *mfd);
+int mdp3_parse_dt_splash(struct msm_fb_data_type *mfd);
+void mdp3_release_splash_memory(struct msm_fb_data_type *mfd);
+int mdp3_create_sysfs_link(struct device *dev);
+int mdp3_get_cont_spash_en(void);
+int mdp3_get_mdp_dsi_clk(void);
+int mdp3_put_mdp_dsi_clk(void);
+
+int mdp3_misr_set(struct mdp_misr *misr_req);
+int mdp3_misr_get(struct mdp_misr *misr_resp);
+void mdp3_enable_regulator(int enable);
+void mdp3_check_dsi_ctrl_status(struct work_struct *work,
+				uint32_t interval);
+int mdp3_dynamic_clock_gating_ctrl(int enable);
+int mdp3_footswitch_ctrl(int enable);
+int mdp3_qos_remapper_setup(struct mdss_panel_data *panel);
+int mdp3_splash_done(struct mdss_panel_info *panel_info);
+int mdp3_autorefresh_disable(struct mdss_panel_info *panel_info);
+u64 mdp3_clk_round_off(u64 clk_rate);
+
+void mdp3_calc_dma_res(struct mdss_panel_info *panel_info, u64 *clk_rate,
+		u64 *ab, u64 *ib, uint32_t bpp);
+void mdp3_clear_irq(u32 interrupt_mask);
+int mdp3_enable_panic_ctrl(void);
+
+int mdp3_layer_pre_commit(struct msm_fb_data_type *mfd,
+	struct file *file, struct mdp_layer_commit_v1 *commit);
+int mdp3_layer_atomic_validate(struct msm_fb_data_type *mfd,
+	struct file *file, struct mdp_layer_commit_v1 *commit);
+
+#define MDP3_REG_WRITE(addr, val) writel_relaxed(val, mdp3_res->mdp_base + addr)
+#define MDP3_REG_READ(addr) readl_relaxed(mdp3_res->mdp_base + addr)
+#define VBIF_REG_WRITE(off, val) writel_relaxed(val, mdp3_res->vbif_base + off)
+#define VBIF_REG_READ(off) readl_relaxed(mdp3_res->vbif_base + off)
+
+#endif /* MDP3_H */
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c
new file mode 100644
index 0000000..889c302
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.c
@@ -0,0 +1,3018 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/dma-buf.h>
+#include <linux/pm_runtime.h>
+#include <linux/sw_sync.h>
+#include <linux/iommu.h>
+
+#include "mdp3_ctrl.h"
+#include "mdp3.h"
+#include "mdp3_ppp.h"
+#include "mdss_smmu.h"
+
+#define VSYNC_EXPIRE_TICK	4
+
+static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd);
+static int mdp3_overlay_unset(struct msm_fb_data_type *mfd, int ndx);
+static int mdp3_histogram_stop(struct mdp3_session_data *session,
+					u32 block);
+static int mdp3_ctrl_clk_enable(struct msm_fb_data_type *mfd, int enable);
+static int mdp3_ctrl_vsync_enable(struct msm_fb_data_type *mfd, int enable);
+static int mdp3_ctrl_get_intf_type(struct msm_fb_data_type *mfd);
+static int mdp3_ctrl_lut_read(struct msm_fb_data_type *mfd,
+				struct mdp_rgb_lut_data *cfg);
+static int mdp3_ctrl_lut_config(struct msm_fb_data_type *mfd,
+				struct mdp_rgb_lut_data *cfg);
+static void mdp3_ctrl_pp_resume(struct msm_fb_data_type *mfd);
+
+u32 mdp_lut_inverse16[MDP_LUT_SIZE] = {
+0, 65536, 32768, 21845, 16384, 13107, 10923, 9362, 8192, 7282, 6554, 5958,
+5461, 5041, 4681, 4369, 4096, 3855, 3641, 3449, 3277, 3121, 2979, 2849, 2731,
+2621, 2521, 2427, 2341, 2260, 2185, 2114, 2048, 1986, 1928, 1872, 1820, 1771,
+1725, 1680, 1638, 1598, 1560, 1524, 1489, 1456, 1425, 1394, 1365, 1337, 1311,
+1285, 1260, 1237, 1214, 1192, 1170, 1150, 1130, 1111, 1092, 1074, 1057, 1040,
+1024, 1008, 993, 978, 964, 950, 936, 923, 910, 898, 886, 874, 862, 851, 840,
+830, 819, 809, 799, 790, 780, 771, 762, 753, 745, 736, 728, 720, 712, 705, 697,
+690, 683, 676, 669, 662, 655, 649, 643, 636, 630, 624, 618, 612, 607, 601, 596,
+590, 585, 580, 575, 570, 565, 560, 555, 551, 546, 542, 537, 533, 529, 524, 520,
+516, 512, 508, 504, 500, 496, 493, 489, 485, 482, 478, 475, 471, 468, 465, 462,
+458, 455, 452, 449, 446, 443, 440, 437, 434, 431, 428, 426, 423, 420, 417, 415,
+412, 410, 407, 405, 402, 400, 397, 395, 392, 390, 388, 386, 383, 381, 379, 377,
+374, 372, 370, 368, 366, 364, 362, 360, 358, 356, 354, 352, 350, 349, 347, 345,
+343, 341, 340, 338, 336, 334, 333, 331, 329, 328, 326, 324, 323, 321, 320, 318,
+317, 315, 314, 312, 311, 309, 308, 306, 305, 303, 302, 301, 299, 298, 297, 295,
+294, 293, 291, 290, 289, 287, 286, 285, 284, 282, 281, 280, 279, 278, 277, 275,
+274, 273, 272, 271, 270, 269, 267, 266, 265, 264, 263, 262, 261, 260, 259, 258,
+257};
+
+static void mdp3_bufq_init(struct mdp3_buffer_queue *bufq)
+{
+	bufq->count = 0;
+	bufq->push_idx = 0;
+	bufq->pop_idx = 0;
+}
+
+void mdp3_bufq_deinit(struct mdp3_buffer_queue *bufq)
+{
+	int count = bufq->count;
+
+	if (!count)
+		return;
+
+	while (count-- && (bufq->pop_idx >= 0)) {
+		struct mdp3_img_data *data = &bufq->img_data[bufq->pop_idx];
+
+		bufq->pop_idx = (bufq->pop_idx + 1) % MDP3_MAX_BUF_QUEUE;
+		mdp3_put_img(data, MDP3_CLIENT_DMA_P);
+	}
+	bufq->count = 0;
+	bufq->push_idx = 0;
+	bufq->pop_idx = 0;
+}
+
+int mdp3_bufq_push(struct mdp3_buffer_queue *bufq,
+			struct mdp3_img_data *data)
+{
+	if (bufq->count >= MDP3_MAX_BUF_QUEUE) {
+		pr_err("bufq full\n");
+		return -EPERM;
+	}
+
+	bufq->img_data[bufq->push_idx] = *data;
+	bufq->push_idx = (bufq->push_idx + 1) % MDP3_MAX_BUF_QUEUE;
+	bufq->count++;
+	return 0;
+}
+
+static struct mdp3_img_data *mdp3_bufq_pop(struct mdp3_buffer_queue *bufq)
+{
+	struct mdp3_img_data *data;
+
+	if (bufq->count == 0)
+		return NULL;
+
+	data = &bufq->img_data[bufq->pop_idx];
+	bufq->count--;
+	bufq->pop_idx = (bufq->pop_idx + 1) % MDP3_MAX_BUF_QUEUE;
+	return data;
+}
+
+static int mdp3_bufq_count(struct mdp3_buffer_queue *bufq)
+{
+	return bufq->count;
+}
+
+void mdp3_ctrl_notifier_register(struct mdp3_session_data *ses,
+	struct notifier_block *notifier)
+{
+	blocking_notifier_chain_register(&ses->notifier_head, notifier);
+}
+
+void mdp3_ctrl_notifier_unregister(struct mdp3_session_data *ses,
+	struct notifier_block *notifier)
+{
+	blocking_notifier_chain_unregister(&ses->notifier_head, notifier);
+}
+
+int mdp3_ctrl_notify(struct mdp3_session_data *ses, int event)
+{
+	return blocking_notifier_call_chain(&ses->notifier_head, event, ses);
+}
+
+static void mdp3_dispatch_dma_done(struct kthread_work *work)
+{
+	struct mdp3_session_data *session;
+	int cnt = 0;
+
+	pr_debug("%s\n", __func__);
+	session = container_of(work, struct mdp3_session_data,
+				dma_done_work);
+	if (!session)
+		return;
+
+	cnt = atomic_read(&session->dma_done_cnt);
+	MDSS_XLOG(cnt);
+	while (cnt > 0) {
+		mdp3_ctrl_notify(session, MDP_NOTIFY_FRAME_DONE);
+		atomic_dec(&session->dma_done_cnt);
+		cnt--;
+	}
+}
+
+static void mdp3_dispatch_clk_off(struct work_struct *work)
+{
+	struct mdp3_session_data *session;
+	int rc;
+	bool dmap_busy;
+	int retry_count = 2;
+
+	pr_debug("%s\n", __func__);
+	MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__);
+	session = container_of(work, struct mdp3_session_data,
+				clk_off_work);
+	if (!session)
+		return;
+
+	mutex_lock(&session->lock);
+	if (session->vsync_enabled ||
+		atomic_read(&session->vsync_countdown) > 0) {
+		mutex_unlock(&session->lock);
+		pr_debug("%s: Ignoring clk shut down\n", __func__);
+		MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__);
+		return;
+	}
+
+	if (session->intf->active) {
+retry_dma_done:
+		rc = wait_for_completion_timeout(&session->dma_completion,
+							WAIT_DMA_TIMEOUT);
+		if (rc <= 0) {
+			struct mdss_panel_data *panel;
+
+			panel = session->panel;
+			pr_debug("cmd kickoff timed out (%d)\n", rc);
+			dmap_busy = session->dma->busy();
+			if (dmap_busy) {
+				if (--retry_count) {
+					pr_err("dmap is busy, retry %d\n",
+						retry_count);
+					goto retry_dma_done;
+				}
+				pr_err("dmap is still busy, bug_on\n");
+				WARN_ON(1);
+			} else {
+				pr_debug("dmap is not busy, continue\n");
+			}
+		}
+	}
+	mdp3_ctrl_vsync_enable(session->mfd, 0);
+	mdp3_ctrl_clk_enable(session->mfd, 0);
+	MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__);
+	mutex_unlock(&session->lock);
+}
+
+static void mdp3_vsync_retire_handle_vsync(void *arg)
+{
+	struct mdp3_session_data *mdp3_session;
+
+	mdp3_session = (struct mdp3_session_data *)arg;
+
+	if (!mdp3_session) {
+		pr_warn("Invalid handle for vsync\n");
+		return;
+	}
+
+	schedule_work(&mdp3_session->retire_work);
+}
+
+static void mdp3_vsync_retire_signal(struct msm_fb_data_type *mfd, int val)
+{
+	struct mdp3_session_data *mdp3_session;
+
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+
+	mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+	if (mdp3_session->retire_cnt > 0) {
+		sw_sync_timeline_inc(mdp3_session->vsync_timeline, val);
+		mdp3_session->retire_cnt -= min(val, mdp3_session->retire_cnt);
+	}
+	mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
+}
+
+static void mdp3_vsync_retire_work_handler(struct work_struct *work)
+{
+	struct mdp3_session_data *mdp3_session =
+		container_of(work, struct mdp3_session_data, retire_work);
+
+	if (!mdp3_session)
+		return;
+
+	mdp3_vsync_retire_signal(mdp3_session->mfd, 1);
+}
+
+void mdp3_hist_intr_notify(struct mdp3_dma *dma)
+{
+	dma->hist_events++;
+	sysfs_notify_dirent(dma->hist_event_sd);
+	pr_debug("%s:: hist_events = %u\n", __func__, dma->hist_events);
+}
+
+void vsync_notify_handler(void *arg)
+{
+	struct mdp3_session_data *session = (struct mdp3_session_data *)arg;
+
+	session->vsync_time = ktime_get();
+	MDSS_XLOG(ktime_to_ms(session->vsync_time));
+	sysfs_notify_dirent(session->vsync_event_sd);
+}
+
+void dma_done_notify_handler(void *arg)
+{
+	struct mdp3_session_data *session = (struct mdp3_session_data *)arg;
+
+	atomic_inc(&session->dma_done_cnt);
+	queue_kthread_work(&session->worker, &session->dma_done_work);
+	complete_all(&session->dma_completion);
+}
+
+void vsync_count_down(void *arg)
+{
+	struct mdp3_session_data *session = (struct mdp3_session_data *)arg;
+
+	/* We are counting down to turn off clocks */
+	if (atomic_read(&session->vsync_countdown) > 0)
+		atomic_dec(&session->vsync_countdown);
+	if (atomic_read(&session->vsync_countdown) == 0)
+		schedule_work(&session->clk_off_work);
+}
+
+void mdp3_ctrl_reset_countdown(struct mdp3_session_data *session,
+		struct msm_fb_data_type *mfd)
+{
+	if (mdp3_ctrl_get_intf_type(mfd) == MDP3_DMA_OUTPUT_SEL_DSI_CMD)
+		atomic_set(&session->vsync_countdown, VSYNC_EXPIRE_TICK);
+}
+
+static int mdp3_ctrl_vsync_enable(struct msm_fb_data_type *mfd, int enable)
+{
+	struct mdp3_session_data *mdp3_session;
+	struct mdp3_notification vsync_client;
+	struct mdp3_notification *arg = NULL;
+	bool mod_vsync_timer = false;
+
+	pr_debug("mdp3_ctrl_vsync_enable =%d\n", enable);
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
+		!mdp3_session->intf)
+		return -ENODEV;
+
+	if (!mdp3_session->status) {
+		pr_debug("fb%d is not on yet", mfd->index);
+		return -EINVAL;
+	}
+	if (enable) {
+		vsync_client.handler = vsync_notify_handler;
+		vsync_client.arg = mdp3_session;
+		arg = &vsync_client;
+	} else if (atomic_read(&mdp3_session->vsync_countdown) > 0) {
+		/*
+		 * Now that vsync is no longer needed we will
+		 * shutdown dsi clocks as soon as cnt down == 0
+		 * for cmd mode panels
+		 */
+		vsync_client.handler = vsync_count_down;
+		vsync_client.arg = mdp3_session;
+		arg = &vsync_client;
+		enable = 1;
+	}
+
+	if (enable) {
+		if (mdp3_session->status == 1 &&
+			(mdp3_session->vsync_before_commit ||
+			!mdp3_session->intf->active)) {
+			mod_vsync_timer = true;
+		} else if (!mdp3_session->clk_on) {
+			/* Enable clocks before enabling the vsync interrupt */
+			mdp3_ctrl_reset_countdown(mdp3_session, mfd);
+			mdp3_ctrl_clk_enable(mfd, 1);
+		}
+	}
+
+	mdp3_clk_enable(1, 0);
+	mdp3_session->dma->vsync_enable(mdp3_session->dma, arg);
+	mdp3_clk_enable(0, 0);
+
+	/*
+	 * Need to fake vsync whenever dsi interface is not
+	 * active or when dsi clocks are currently off
+	 */
+	if (mod_vsync_timer) {
+		mod_timer(&mdp3_session->vsync_timer,
+			jiffies + msecs_to_jiffies(mdp3_session->vsync_period));
+	} else if (!enable) {
+		del_timer(&mdp3_session->vsync_timer);
+	}
+
+	return 0;
+}
+
+void mdp3_vsync_timer_func(unsigned long arg)
+{
+	struct mdp3_session_data *session = (struct mdp3_session_data *)arg;
+
+	if (session->status == 1 && (session->vsync_before_commit ||
+			!session->intf->active)) {
+		pr_debug("mdp3_vsync_timer_func trigger\n");
+		vsync_notify_handler(session);
+		mod_timer(&session->vsync_timer,
+			jiffies + msecs_to_jiffies(session->vsync_period));
+	}
+}
+
+static int mdp3_ctrl_async_blit_req(struct msm_fb_data_type *mfd,
+	void __user *p)
+{
+	struct mdp_async_blit_req_list req_list_header;
+	int rc, count;
+	void __user *p_req;
+
+	if (copy_from_user(&req_list_header, p, sizeof(req_list_header)))
+		return -EFAULT;
+	p_req = p + sizeof(req_list_header);
+	count = req_list_header.count;
+	if (count < 0 || count >= MAX_BLIT_REQ)
+		return -EINVAL;
+	rc = mdp3_ppp_parse_req(p_req, &req_list_header, 1);
+	if (!rc)
+		rc = copy_to_user(p, &req_list_header, sizeof(req_list_header));
+	return rc;
+}
+
+static int mdp3_ctrl_blit_req(struct msm_fb_data_type *mfd, void __user *p)
+{
+	struct mdp_async_blit_req_list req_list_header;
+	int rc, count;
+	void __user *p_req;
+
+	if (copy_from_user(&(req_list_header.count), p,
+		sizeof(struct mdp_blit_req_list)))
+		return -EFAULT;
+	p_req = p + sizeof(struct mdp_blit_req_list);
+	count = req_list_header.count;
+	if (count < 0 || count >= MAX_BLIT_REQ)
+		return -EINVAL;
+	req_list_header.sync.acq_fen_fd_cnt = 0;
+	rc = mdp3_ppp_parse_req(p_req, &req_list_header, 0);
+	return rc;
+}
+
+static ssize_t mdp3_bl_show_event(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdp3_session_data *mdp3_session = NULL;
+	int ret;
+
+	if (!mfd || !mfd->mdp.private1)
+		return -EAGAIN;
+
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp3_session->bl_events);
+	return ret;
+}
+
+static ssize_t mdp3_hist_show_event(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdp3_session_data *mdp3_session = NULL;
+	struct mdp3_dma *dma = NULL;
+	int ret;
+
+	if (!mfd || !mfd->mdp.private1)
+		return -EAGAIN;
+
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	dma = (struct mdp3_dma *)mdp3_session->dma;
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", dma->hist_events);
+	return ret;
+}
+
+static ssize_t mdp3_vsync_show_event(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdp3_session_data *mdp3_session = NULL;
+	u64 vsync_ticks;
+	int rc;
+
+	if (!mfd || !mfd->mdp.private1)
+		return -EAGAIN;
+
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+
+	vsync_ticks = ktime_to_ns(mdp3_session->vsync_time);
+
+	pr_debug("fb%d vsync=%llu\n", mfd->index, vsync_ticks);
+	rc = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu\n", vsync_ticks);
+	return rc;
+}
+
+static ssize_t mdp3_packpattern_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdp3_session_data *mdp3_session = NULL;
+	int rc;
+	u32 pattern = 0;
+
+	if (!mfd || !mfd->mdp.private1)
+		return -EAGAIN;
+
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+
+	pattern = mdp3_session->dma->output_config.pack_pattern;
+
+	/* If pattern was found to be 0 then get pattern for fb imagetype */
+	if (!pattern)
+		pattern = mdp3_ctrl_get_pack_pattern(mfd->fb_imgType);
+
+	pr_debug("fb%d pack_pattern c= %d.", mfd->index, pattern);
+	rc = scnprintf(buf, PAGE_SIZE, "packpattern=%d\n", pattern);
+	return rc;
+}
+
+static ssize_t mdp3_dyn_pu_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = fbi->par;
+	struct mdp3_session_data *mdp3_session = NULL;
+	int ret, state;
+
+	if (!mfd || !mfd->mdp.private1)
+		return -EAGAIN;
+
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	state = (mdp3_session->dyn_pu_state >= 0) ?
+		mdp3_session->dyn_pu_state : -1;
+	ret = scnprintf(buf, PAGE_SIZE, "%d", state);
+	return ret;
+}
+
+static ssize_t mdp3_dyn_pu_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = fbi->par;
+	struct mdp3_session_data *mdp3_session = NULL;
+	int ret, dyn_pu;
+
+	if (!mfd || !mfd->mdp.private1)
+		return -EAGAIN;
+
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	ret = kstrtoint(buf, 10, &dyn_pu);
+	if (ret) {
+		pr_err("Invalid input for partial update: ret = %d\n", ret);
+		return ret;
+	}
+
+	mdp3_session->dyn_pu_state = dyn_pu;
+	sysfs_notify(&dev->kobj, NULL, "dyn_pu");
+	return count;
+}
+
+static DEVICE_ATTR(hist_event, 0444, mdp3_hist_show_event, NULL);
+static DEVICE_ATTR(bl_event, 0444, mdp3_bl_show_event, NULL);
+static DEVICE_ATTR(vsync_event, 0444, mdp3_vsync_show_event, NULL);
+static DEVICE_ATTR(packpattern, 0444, mdp3_packpattern_show, NULL);
+static DEVICE_ATTR(dyn_pu, 0664, mdp3_dyn_pu_show,
+		mdp3_dyn_pu_store);
+
+static struct attribute *generic_attrs[] = {
+	&dev_attr_packpattern.attr,
+	&dev_attr_dyn_pu.attr,
+	&dev_attr_hist_event.attr,
+	&dev_attr_bl_event.attr,
+	NULL,
+};
+
+static struct attribute *vsync_fs_attrs[] = {
+	&dev_attr_vsync_event.attr,
+	NULL,
+};
+
+static struct attribute_group vsync_fs_attr_group = {
+	.attrs = vsync_fs_attrs,
+};
+
+static struct attribute_group generic_attr_group = {
+	.attrs = generic_attrs,
+};
+
+static int mdp3_ctrl_clk_enable(struct msm_fb_data_type *mfd, int enable)
+{
+	struct mdp3_session_data *session;
+	struct mdss_panel_data *panel;
+	struct dsi_panel_clk_ctrl clk_ctrl;
+	int rc = 0;
+
+	pr_debug("mdp3_ctrl_clk_enable %d\n", enable);
+
+	session = mfd->mdp.private1;
+	panel = session->panel;
+
+	if (!panel->event_handler)
+		return 0;
+
+	if ((enable && session->clk_on == 0) ||
+				(!enable && session->clk_on == 1)) {
+		clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+		clk_ctrl.state = enable;
+		rc = panel->event_handler(panel,
+			MDSS_EVENT_PANEL_CLK_CTRL, (void *)&clk_ctrl);
+		rc |= mdp3_res_update(enable, 1, MDP3_CLIENT_DMA_P);
+	} else {
+		pr_debug("enable = %d, clk_on=%d\n", enable, session->clk_on);
+	}
+
+	session->clk_on = enable;
+	return rc;
+}
+
+static int mdp3_ctrl_res_req_bus(struct msm_fb_data_type *mfd, int status)
+{
+	int rc = 0;
+
+	if (status) {
+		u64 ab = 0;
+		u64 ib = 0;
+
+		mdp3_calc_dma_res(mfd->panel_info, NULL, &ab, &ib,
+			ppp_bpp(mfd->fb_imgType));
+		rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, ab, ib);
+	} else {
+		rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, 0, 0);
+	}
+	return rc;
+}
+
+static int mdp3_ctrl_res_req_clk(struct msm_fb_data_type *mfd, int status)
+{
+	int rc = 0;
+
+	if (status) {
+		u64 mdp_clk_rate = 0;
+
+		mdp3_calc_dma_res(mfd->panel_info, &mdp_clk_rate,
+			NULL, NULL, 0);
+
+		mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, mdp_clk_rate,
+				MDP3_CLIENT_DMA_P);
+		mdp3_clk_set_rate(MDP3_CLK_VSYNC, MDP_VSYNC_CLK_RATE,
+				MDP3_CLIENT_DMA_P);
+
+		rc = mdp3_res_update(1, 1, MDP3_CLIENT_DMA_P);
+		if (rc) {
+			pr_err("mdp3 clk enable fail\n");
+			return rc;
+		}
+	} else {
+		rc = mdp3_res_update(0, 1, MDP3_CLIENT_DMA_P);
+		if (rc)
+			pr_err("mdp3 clk disable fail\n");
+	}
+	return rc;
+}
+
+static int mdp3_ctrl_get_intf_type(struct msm_fb_data_type *mfd)
+{
+	int type;
+
+	switch (mfd->panel.type) {
+	case MIPI_VIDEO_PANEL:
+		type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO;
+		break;
+	case MIPI_CMD_PANEL:
+		type = MDP3_DMA_OUTPUT_SEL_DSI_CMD;
+		break;
+	case LCDC_PANEL:
+		type = MDP3_DMA_OUTPUT_SEL_LCDC;
+		break;
+	default:
+		type = MDP3_DMA_OUTPUT_SEL_MAX;
+	}
+	return type;
+}
+
+int mdp3_ctrl_get_source_format(u32 imgType)
+{
+	int format;
+
+	switch (imgType) {
+	case MDP_RGB_565:
+		format = MDP3_DMA_IBUF_FORMAT_RGB565;
+		break;
+	case MDP_RGB_888:
+		format = MDP3_DMA_IBUF_FORMAT_RGB888;
+		break;
+	case MDP_ARGB_8888:
+	case MDP_RGBA_8888:
+		format = MDP3_DMA_IBUF_FORMAT_XRGB8888;
+		break;
+	default:
+		format = MDP3_DMA_IBUF_FORMAT_UNDEFINED;
+	}
+	return format;
+}
+
+int mdp3_ctrl_get_pack_pattern(u32 imgType)
+{
+	int packPattern = MDP3_DMA_OUTPUT_PACK_PATTERN_RGB;
+
+	if (imgType == MDP_RGBA_8888 || imgType == MDP_RGB_888)
+		packPattern = MDP3_DMA_OUTPUT_PACK_PATTERN_BGR;
+	return packPattern;
+}
+
+static int mdp3_ctrl_intf_init(struct msm_fb_data_type *mfd,
+				struct mdp3_intf *intf)
+{
+	int rc = 0;
+	struct mdp3_intf_cfg cfg;
+	struct mdp3_video_intf_cfg *video = &cfg.video;
+	struct mdss_panel_info *p = mfd->panel_info;
+	int h_back_porch = p->lcdc.h_back_porch;
+	int h_front_porch = p->lcdc.h_front_porch;
+	int w = p->xres;
+	int v_back_porch = p->lcdc.v_back_porch;
+	int v_front_porch = p->lcdc.v_front_porch;
+	int h = p->yres;
+	int h_sync_skew = p->lcdc.hsync_skew;
+	int h_pulse_width = p->lcdc.h_pulse_width;
+	int v_pulse_width = p->lcdc.v_pulse_width;
+	int hsync_period = h_front_porch + h_back_porch + w + h_pulse_width;
+	int vsync_period = v_front_porch + v_back_porch + h + v_pulse_width;
+	struct mdp3_session_data *mdp3_session;
+
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	vsync_period *= hsync_period;
+
+	cfg.type = mdp3_ctrl_get_intf_type(mfd);
+	if (cfg.type == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+		cfg.type == MDP3_DMA_OUTPUT_SEL_LCDC) {
+		video->hsync_period = hsync_period;
+		video->hsync_pulse_width = h_pulse_width;
+		video->vsync_period = vsync_period;
+		video->vsync_pulse_width = v_pulse_width * hsync_period;
+		video->display_start_x = h_back_porch + h_pulse_width;
+		video->display_end_x = hsync_period - h_front_porch - 1;
+		video->display_start_y =
+			(v_back_porch + v_pulse_width) * hsync_period;
+		video->display_end_y =
+			vsync_period - v_front_porch * hsync_period - 1;
+		video->active_start_x = video->display_start_x;
+		video->active_end_x = video->display_end_x;
+		video->active_h_enable = true;
+		video->active_start_y = video->display_start_y;
+		video->active_end_y = video->display_end_y;
+		video->active_v_enable = true;
+		video->hsync_skew = h_sync_skew;
+		video->hsync_polarity = 1;
+		video->vsync_polarity = 1;
+		video->de_polarity = 1;
+		video->underflow_color = p->lcdc.underflow_clr;
+	} else if (cfg.type == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		cfg.dsi_cmd.primary_dsi_cmd_id = 0;
+		cfg.dsi_cmd.secondary_dsi_cmd_id = 1;
+		cfg.dsi_cmd.dsi_cmd_tg_intf_sel = 0;
+	} else
+		return -EINVAL;
+
+	if (!(mdp3_session->in_splash_screen)) {
+		if (intf->config)
+			rc = intf->config(intf, &cfg);
+		else
+			rc = -EINVAL;
+	}
+	return rc;
+}
+
+static int mdp3_ctrl_dma_init(struct msm_fb_data_type *mfd,
+				struct mdp3_dma *dma)
+{
+	int rc;
+	struct mdss_panel_info *panel_info = mfd->panel_info;
+	struct fb_info *fbi = mfd->fbi;
+	struct fb_fix_screeninfo *fix;
+	struct fb_var_screeninfo *var;
+	struct mdp3_dma_output_config outputConfig;
+	struct mdp3_dma_source sourceConfig;
+	int frame_rate = mfd->panel_info->mipi.frame_rate;
+	int vbp, vfp, vspw;
+	int vtotal, vporch;
+	struct mdp3_notification dma_done_callback;
+	struct mdp3_tear_check te;
+	struct mdp3_session_data *mdp3_session;
+
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+
+	vbp = panel_info->lcdc.v_back_porch;
+	vfp = panel_info->lcdc.v_front_porch;
+	vspw = panel_info->lcdc.v_pulse_width;
+	vporch = vbp + vfp + vspw;
+	vtotal = vporch + panel_info->yres;
+
+	fix = &fbi->fix;
+	var = &fbi->var;
+
+	sourceConfig.width = panel_info->xres;
+	sourceConfig.height = panel_info->yres;
+	sourceConfig.x = 0;
+	sourceConfig.y = 0;
+	sourceConfig.buf = mfd->iova;
+	sourceConfig.vporch = vporch;
+	sourceConfig.vsync_count =
+		MDP_VSYNC_CLK_RATE / (frame_rate * vtotal);
+
+	outputConfig.dither_en = 0;
+	outputConfig.out_sel = mdp3_ctrl_get_intf_type(mfd);
+	outputConfig.bit_mask_polarity = 0;
+	outputConfig.color_components_flip = 0;
+	outputConfig.pack_align = MDP3_DMA_OUTPUT_PACK_ALIGN_LSB;
+	outputConfig.color_comp_out_bits = (MDP3_DMA_OUTPUT_COMP_BITS_8 << 4) |
+					(MDP3_DMA_OUTPUT_COMP_BITS_8 << 2)|
+					MDP3_DMA_OUTPUT_COMP_BITS_8;
+
+	if (dma->update_src_cfg) {
+		/* configuration has been updated through PREPARE call */
+		sourceConfig.format = dma->source_config.format;
+		sourceConfig.stride = dma->source_config.stride;
+		outputConfig.pack_pattern = dma->output_config.pack_pattern;
+	} else {
+		sourceConfig.format =
+			mdp3_ctrl_get_source_format(mfd->fb_imgType);
+		outputConfig.pack_pattern =
+			mdp3_ctrl_get_pack_pattern(mfd->fb_imgType);
+		sourceConfig.stride = fix->line_length;
+	}
+
+	te.frame_rate = panel_info->mipi.frame_rate;
+	te.hw_vsync_mode = panel_info->mipi.hw_vsync_mode;
+	te.tear_check_en = panel_info->te.tear_check_en;
+	te.sync_cfg_height = panel_info->te.sync_cfg_height;
+	te.vsync_init_val = panel_info->te.vsync_init_val;
+	te.sync_threshold_start = panel_info->te.sync_threshold_start;
+	te.sync_threshold_continue = panel_info->te.sync_threshold_continue;
+	te.start_pos = panel_info->te.start_pos;
+	te.rd_ptr_irq = panel_info->te.rd_ptr_irq;
+	te.refx100 = panel_info->te.refx100;
+
+	if (dma->dma_config) {
+		if (!panel_info->partial_update_enabled) {
+			dma->roi.w = sourceConfig.width;
+			dma->roi.h = sourceConfig.height;
+			dma->roi.x = sourceConfig.x;
+			dma->roi.y = sourceConfig.y;
+		}
+		rc = dma->dma_config(dma, &sourceConfig, &outputConfig,
+					mdp3_session->in_splash_screen);
+	} else {
+		pr_err("%s: dma config failed\n", __func__);
+		rc = -EINVAL;
+	}
+
+	if (outputConfig.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		if (dma->dma_sync_config)
+			rc = dma->dma_sync_config(dma,
+					&sourceConfig, &te);
+		else
+			rc = -EINVAL;
+		dma_done_callback.handler = dma_done_notify_handler;
+		dma_done_callback.arg = mfd->mdp.private1;
+		dma->dma_done_notifier(dma, &dma_done_callback);
+	}
+
+	return rc;
+}
+
+static int mdp3_ctrl_on(struct msm_fb_data_type *mfd)
+{
+	int rc = 0;
+	struct mdp3_session_data *mdp3_session;
+	struct mdss_panel_data *panel;
+
+	pr_debug("mdp3_ctrl_on\n");
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
+		!mdp3_session->intf) {
+		pr_err("mdp3_ctrl_on no device");
+		return -ENODEV;
+	}
+	mutex_lock(&mdp3_session->lock);
+
+	MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__, mfd->panel_power_state);
+	panel = mdp3_session->panel;
+	/* make sure DSI host is initialized properly */
+	if (panel) {
+		pr_debug("%s : dsi host init, power state = %d Splash %d\n",
+			__func__, mfd->panel_power_state,
+			mdp3_session->in_splash_screen);
+		if (mdss_fb_is_power_on_lp(mfd) ||
+			mdp3_session->in_splash_screen) {
+			/* Turn on panel so that it can exit low power mode */
+			mdp3_clk_enable(1, 0);
+		rc = panel->event_handler(panel,
+				MDSS_EVENT_LINK_READY, NULL);
+		rc |= panel->event_handler(panel,
+				MDSS_EVENT_UNBLANK, NULL);
+		rc |= panel->event_handler(panel,
+				MDSS_EVENT_PANEL_ON, NULL);
+		if (mdss_fb_is_power_on_ulp(mfd))
+			rc |= mdp3_enable_panic_ctrl();
+			mdp3_clk_enable(0, 0);
+		}
+	}
+
+	if (mdp3_session->status) {
+		pr_debug("fb%d is on already\n", mfd->index);
+		MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__, mfd->panel_power_state);
+		goto end;
+	}
+
+	if (mdp3_session->intf->active) {
+		pr_debug("continuous splash screen, initialized already\n");
+		mdp3_session->status = 1;
+		goto end;
+	}
+
+	/*
+	 * Get a reference to the runtime pm device.
+	 * If idle pc feature is enabled, it will be released
+	 * at end of this routine else, when device is turned off.
+	 */
+	pm_runtime_get_sync(&mdp3_res->pdev->dev);
+
+	/* Increment the overlay active count */
+	atomic_inc(&mdp3_res->active_intf_cnt);
+	mdp3_ctrl_notifier_register(mdp3_session,
+		&mdp3_session->mfd->mdp_sync_pt_data.notifier);
+
+	/* request bus bandwidth before DSI DMA traffic */
+	rc = mdp3_ctrl_res_req_bus(mfd, 1);
+	if (rc) {
+		pr_err("fail to request bus resource\n");
+		goto on_error;
+	}
+
+	rc = mdp3_dynamic_clock_gating_ctrl(0);
+	if (rc) {
+		pr_err("fail to disable dynamic clock gating\n");
+		goto on_error;
+	}
+	mdp3_qos_remapper_setup(panel);
+
+	rc = mdp3_ctrl_res_req_clk(mfd, 1);
+	if (rc) {
+		pr_err("fail to request mdp clk resource\n");
+		goto on_error;
+	}
+
+	if (panel->event_handler) {
+		rc = panel->event_handler(panel, MDSS_EVENT_LINK_READY, NULL);
+		rc |= panel->event_handler(panel, MDSS_EVENT_UNBLANK, NULL);
+		rc |= panel->event_handler(panel, MDSS_EVENT_PANEL_ON, NULL);
+		if (panel->panel_info.type == MIPI_CMD_PANEL) {
+			struct dsi_panel_clk_ctrl clk_ctrl;
+
+			clk_ctrl.state = MDSS_DSI_CLK_ON;
+			clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+			rc |= panel->event_handler(panel,
+					MDSS_EVENT_PANEL_CLK_CTRL,
+					(void *)&clk_ctrl);
+	}
+	}
+	if (rc) {
+		pr_err("fail to turn on the panel\n");
+		goto on_error;
+	}
+
+	rc = mdp3_ctrl_dma_init(mfd, mdp3_session->dma);
+	if (rc) {
+		pr_err("dma init failed\n");
+		goto on_error;
+	}
+
+	rc = mdp3_ppp_init();
+	if (rc) {
+		pr_err("ppp init failed\n");
+		goto on_error;
+	}
+
+	rc = mdp3_ctrl_intf_init(mfd, mdp3_session->intf);
+	if (rc) {
+		pr_err("display interface init failed\n");
+		goto on_error;
+	}
+	mdp3_session->clk_on = 1;
+
+	mdp3_session->first_commit = true;
+	if (mfd->panel_info->panel_dead)
+		mdp3_session->esd_recovery = true;
+
+		mdp3_session->status = 1;
+
+	mdp3_ctrl_pp_resume(mfd);
+	MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__, mfd->panel_power_state);
+on_error:
+	if (rc || (mdp3_res->idle_pc_enabled &&
+			(mfd->panel_info->type == MIPI_CMD_PANEL))) {
+		if (rc) {
+			pr_err("Failed to turn on fb%d\n", mfd->index);
+			atomic_dec(&mdp3_res->active_intf_cnt);
+		}
+		pm_runtime_put(&mdp3_res->pdev->dev);
+	}
+end:
+	mutex_unlock(&mdp3_session->lock);
+	return rc;
+}
+
+static int mdp3_ctrl_off(struct msm_fb_data_type *mfd)
+{
+	int rc = 0;
+	bool intf_stopped = true;
+	struct mdp3_session_data *mdp3_session;
+	struct mdss_panel_data *panel;
+
+	pr_debug("mdp3_ctrl_off\n");
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
+		!mdp3_session->intf) {
+		pr_err("mdp3_ctrl_on no device");
+		return -ENODEV;
+	}
+
+	/*
+	 * Keep a reference to the runtime pm until the overlay is turned
+	 * off, and then release this last reference at the end. This will
+	 * help in distinguishing between idle power collapse versus suspend
+	 * power collapse
+	 */
+	pm_runtime_get_sync(&mdp3_res->pdev->dev);
+
+	MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__, mdss_fb_is_power_on_ulp(mfd),
+		mfd->panel_power_state);
+	panel = mdp3_session->panel;
+	mutex_lock(&mdp3_session->lock);
+
+	pr_debug("Requested power state = %d\n", mfd->panel_power_state);
+	if (mdss_fb_is_power_on_lp(mfd)) {
+		/*
+		 * Transition to low power
+		 * As display updates are expected in low power mode,
+		 * keep the interface and clocks on.
+		 */
+		intf_stopped = false;
+	} else {
+		/* Transition to display off */
+		if (!mdp3_session->status) {
+			pr_debug("fb%d is off already", mfd->index);
+			goto off_error;
+		}
+		if (panel && panel->set_backlight)
+			panel->set_backlight(panel, 0);
+	}
+
+	/*
+	 * While transitioning from interactive to low power,
+	 * events need to be sent to the interface so that the
+	 * panel can be configured in low power mode
+	 */
+	if (panel->event_handler)
+		rc = panel->event_handler(panel, MDSS_EVENT_BLANK,
+			(void *) (long int)mfd->panel_power_state);
+	if (rc)
+		pr_err("EVENT_BLANK error (%d)\n", rc);
+
+	if (intf_stopped) {
+		if (!mdp3_session->clk_on)
+			mdp3_ctrl_clk_enable(mfd, 1);
+		/* PP related programming for ctrl off */
+		mdp3_histogram_stop(mdp3_session, MDP_BLOCK_DMA_P);
+		mutex_lock(&mdp3_session->dma->pp_lock);
+		mdp3_session->dma->ccs_config.ccs_dirty = false;
+		mdp3_session->dma->lut_config.lut_dirty = false;
+		mutex_unlock(&mdp3_session->dma->pp_lock);
+
+		rc = mdp3_session->dma->stop(mdp3_session->dma,
+					mdp3_session->intf);
+		if (rc)
+			pr_debug("fail to stop the MDP3 dma\n");
+		/* Wait to ensure TG to turn off */
+		msleep(20);
+		mfd->panel_info->cont_splash_enabled = 0;
+
+		/* Disable Auto refresh once continuous splash disabled */
+		mdp3_autorefresh_disable(mfd->panel_info);
+		mdp3_splash_done(mfd->panel_info);
+
+		mdp3_irq_deregister();
+	}
+
+	if (panel->event_handler)
+		rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF,
+			(void *) (long int)mfd->panel_power_state);
+	if (rc)
+		pr_err("EVENT_PANEL_OFF error (%d)\n", rc);
+
+	if (intf_stopped) {
+		if (mdp3_session->clk_on) {
+			pr_debug("mdp3_ctrl_off stop clock\n");
+			if (panel->event_handler &&
+				(panel->panel_info.type == MIPI_CMD_PANEL)) {
+				struct dsi_panel_clk_ctrl clk_ctrl;
+
+				clk_ctrl.state = MDSS_DSI_CLK_OFF;
+				clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+				rc |= panel->event_handler(panel,
+					MDSS_EVENT_PANEL_CLK_CTRL,
+					(void *)&clk_ctrl);
+			}
+
+			rc = mdp3_dynamic_clock_gating_ctrl(1);
+			rc = mdp3_res_update(0, 1, MDP3_CLIENT_DMA_P);
+			if (rc)
+				pr_err("mdp clock resource release failed\n");
+		}
+
+		mdp3_ctrl_notifier_unregister(mdp3_session,
+			&mdp3_session->mfd->mdp_sync_pt_data.notifier);
+
+		mdp3_session->vsync_enabled = 0;
+		atomic_set(&mdp3_session->vsync_countdown, 0);
+		atomic_set(&mdp3_session->dma_done_cnt, 0);
+		mdp3_session->clk_on = 0;
+		mdp3_session->in_splash_screen = 0;
+		mdp3_res->solid_fill_vote_en = false;
+		mdp3_session->status = 0;
+		if (atomic_dec_return(&mdp3_res->active_intf_cnt) != 0) {
+			pr_warn("active_intf_cnt unbalanced\n");
+			atomic_set(&mdp3_res->active_intf_cnt, 0);
+		}
+		/*
+		 * Release the pm runtime reference held when
+		 * idle pc feature is not enabled
+		 */
+		if (!mdp3_res->idle_pc_enabled ||
+			(mfd->panel_info->type != MIPI_CMD_PANEL)) {
+			rc = pm_runtime_put(&mdp3_res->pdev->dev);
+			if (rc)
+				pr_err("%s: pm_runtime_put failed (rc %d)\n",
+					__func__, rc);
+		}
+		mdp3_bufq_deinit(&mdp3_session->bufq_out);
+		if (mdp3_session->overlay.id != MSMFB_NEW_REQUEST) {
+			mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
+			mdp3_bufq_deinit(&mdp3_session->bufq_in);
+		}
+	}
+
+	if (mdss_fb_is_power_on_ulp(mfd) &&
+		(mfd->panel.type == MIPI_CMD_PANEL)) {
+		pr_debug("%s: Disable MDP3 clocks in ULP\n", __func__);
+		if (!mdp3_session->clk_on)
+			mdp3_ctrl_clk_enable(mfd, 1);
+		/*
+		 * STOP DMA transfer first and signal vsync notification
+		 * Before releasing the resource in ULP state.
+		 */
+		rc = mdp3_session->dma->stop(mdp3_session->dma,
+					mdp3_session->intf);
+		if (rc)
+			pr_warn("fail to stop the MDP3 dma in ULP\n");
+		/* Wait to ensure TG to turn off */
+		msleep(20);
+		/*
+		 * Handle ULP request initiated from fb_pm_suspend.
+		 * For ULP panel power state disabling vsync and set
+		 * vsync_count to zero and Turn off MDP3 clocks
+		 */
+		atomic_set(&mdp3_session->vsync_countdown, 0);
+		mdp3_session->vsync_enabled = 0;
+		mdp3_ctrl_vsync_enable(mdp3_session->mfd, 0);
+		mdp3_ctrl_clk_enable(mdp3_session->mfd, 0);
+	}
+off_error:
+	MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__);
+	mutex_unlock(&mdp3_session->lock);
+	/* Release the last reference to the runtime device */
+	pm_runtime_put(&mdp3_res->pdev->dev);
+
+	return 0;
+}
+
+int mdp3_ctrl_reset(struct msm_fb_data_type *mfd)
+{
+	int rc = 0;
+	struct mdp3_session_data *mdp3_session;
+	struct mdp3_dma *mdp3_dma;
+	struct mdss_panel_data *panel;
+	struct mdp3_notification vsync_client;
+
+	pr_debug("mdp3_ctrl_reset\n");
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
+		!mdp3_session->intf) {
+		pr_err("mdp3_ctrl_reset no device");
+		return -ENODEV;
+	}
+
+	panel = mdp3_session->panel;
+	mdp3_dma = mdp3_session->dma;
+	mutex_lock(&mdp3_session->lock);
+	pr_debug("mdp3_ctrl_reset idle_pc %s FS_EN %s\n",
+		mdp3_res->idle_pc ? "True":"False",
+		mdp3_res->fs_ena ? "True":"False");
+	if (mdp3_res->idle_pc) {
+		mdp3_clk_enable(1, 0);
+		mdp3_dynamic_clock_gating_ctrl(0);
+		mdp3_qos_remapper_setup(panel);
+	}
+
+	/*Map the splash addr for VIDEO mode panel before smmu attach*/
+	if ((mfd->panel.type == MIPI_VIDEO_PANEL) &&
+				(mdp3_session->in_splash_screen)) {
+		rc = mdss_smmu_map(MDSS_IOMMU_DOMAIN_UNSECURE,
+				mdp3_res->splash_mem_addr,
+				mdp3_res->splash_mem_addr,
+				mdp3_res->splash_mem_size,
+				IOMMU_READ | IOMMU_NOEXEC);
+	}
+
+	rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P);
+	if (rc) {
+		pr_err("fail to attach dma iommu\n");
+		if (mdp3_res->idle_pc)
+			mdp3_clk_enable(0, 0);
+		goto reset_error;
+	}
+
+	vsync_client = mdp3_dma->vsync_client;
+
+	mdp3_ctrl_intf_init(mfd, mdp3_session->intf);
+	mdp3_ctrl_dma_init(mfd, mdp3_dma);
+	mdp3_ppp_init();
+	mdp3_ctrl_pp_resume(mfd);
+	if (vsync_client.handler)
+		mdp3_dma->vsync_enable(mdp3_dma, &vsync_client);
+
+	if (!mdp3_res->idle_pc) {
+		mdp3_session->first_commit = true;
+	mfd->panel_info->cont_splash_enabled = 0;
+	mdp3_session->in_splash_screen = 0;
+	mdp3_splash_done(mfd->panel_info);
+		/* Disable Auto refresh */
+		mdp3_autorefresh_disable(mfd->panel_info);
+	} else {
+		mdp3_res->idle_pc = false;
+		mdp3_clk_enable(0, 0);
+		mdp3_iommu_disable(MDP3_CLIENT_DMA_P);
+	}
+
+reset_error:
+	mutex_unlock(&mdp3_session->lock);
+	return rc;
+}
+
+static int mdp3_overlay_get(struct msm_fb_data_type *mfd,
+				struct mdp_overlay *req)
+{
+	int rc = 0;
+	struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+
+	mutex_lock(&mdp3_session->lock);
+
+	if (mdp3_session->overlay.id == req->id)
+		*req = mdp3_session->overlay;
+	else
+		rc = -EINVAL;
+
+	mutex_unlock(&mdp3_session->lock);
+
+	return rc;
+}
+
+static int mdp3_overlay_set(struct msm_fb_data_type *mfd,
+				struct mdp_overlay *req)
+{
+	int rc = 0;
+	struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+	struct mdp3_dma *dma = mdp3_session->dma;
+	struct fb_fix_screeninfo *fix;
+	struct fb_info *fbi = mfd->fbi;
+	int stride;
+	int format;
+
+	fix = &fbi->fix;
+	stride = req->src.width * ppp_bpp(req->src.format);
+	format = mdp3_ctrl_get_source_format(req->src.format);
+
+
+	if (mdp3_session->overlay.id != req->id)
+		pr_err("overlay was not released, continue to recover\n");
+	/*
+	 * A change in overlay structure will always come with
+	 * MSMFB_NEW_REQUEST for MDP3
+	 */
+	if (req->id == MSMFB_NEW_REQUEST) {
+		mutex_lock(&mdp3_session->lock);
+		if (dma->source_config.stride != stride ||
+				dma->source_config.format != format) {
+			dma->source_config.format = format;
+			dma->source_config.stride = stride;
+			dma->output_config.pack_pattern =
+				mdp3_ctrl_get_pack_pattern(req->src.format);
+			dma->update_src_cfg = true;
+		}
+		mdp3_session->overlay = *req;
+		mdp3_session->overlay.id = 1;
+		req->id = 1;
+	mutex_unlock(&mdp3_session->lock);
+	}
+
+	return rc;
+}
+
+static int mdp3_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
+{
+	int rc = 0;
+	struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+	struct fb_info *fbi = mfd->fbi;
+	struct fb_fix_screeninfo *fix;
+	int format;
+
+	fix = &fbi->fix;
+	format = mdp3_ctrl_get_source_format(mfd->fb_imgType);
+	mutex_lock(&mdp3_session->lock);
+
+	if (mdp3_session->overlay.id == ndx && ndx == 1) {
+		mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
+		mdp3_bufq_deinit(&mdp3_session->bufq_in);
+	} else {
+		rc = -EINVAL;
+	}
+
+	mutex_unlock(&mdp3_session->lock);
+
+	return rc;
+}
+
+static int mdp3_overlay_queue_buffer(struct msm_fb_data_type *mfd,
+					struct msmfb_overlay_data *req)
+{
+	int rc;
+	bool is_panel_type_cmd = false;
+	struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+	struct msmfb_data *img = &req->data;
+	struct mdp3_img_data data;
+	struct mdp3_dma *dma = mdp3_session->dma;
+
+	memset(&data, 0, sizeof(struct mdp3_img_data));
+	if (mfd->panel.type == MIPI_CMD_PANEL)
+		is_panel_type_cmd = true;
+	if (is_panel_type_cmd) {
+		rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P);
+		if (rc) {
+			pr_err("fail to enable iommu\n");
+			return rc;
+		}
+	}
+	rc = mdp3_get_img(img, &data, MDP3_CLIENT_DMA_P);
+	if (rc) {
+		pr_err("fail to get overlay buffer\n");
+		goto err;
+	}
+
+	if (data.len < dma->source_config.stride * dma->source_config.height) {
+		pr_err("buf size(0x%lx) is smaller than dma config(0x%x)\n",
+			data.len, (dma->source_config.stride *
+			dma->source_config.height));
+		mdp3_put_img(&data, MDP3_CLIENT_DMA_P);
+		rc = -EINVAL;
+		goto err;
+	}
+	rc = mdp3_bufq_push(&mdp3_session->bufq_in, &data);
+	if (rc) {
+		pr_err("fail to queue the overlay buffer, buffer drop\n");
+		mdp3_put_img(&data, MDP3_CLIENT_DMA_P);
+		goto err;
+	}
+	rc = 0;
+err:
+	if (is_panel_type_cmd)
+		mdp3_iommu_disable(MDP3_CLIENT_DMA_P);
+	return rc;
+}
+
+static int mdp3_overlay_play(struct msm_fb_data_type *mfd,
+				 struct msmfb_overlay_data *req)
+{
+	struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+	int rc = 0;
+
+	pr_debug("mdp3_overlay_play req id=%x mem_id=%d\n",
+		req->id, req->data.memory_id);
+
+	mutex_lock(&mdp3_session->lock);
+
+	if (mdp3_session->overlay.id == MSMFB_NEW_REQUEST) {
+		pr_err("overlay play without overlay set first\n");
+		mutex_unlock(&mdp3_session->lock);
+		return -EINVAL;
+	}
+
+	if (mdss_fb_is_power_on(mfd))
+		rc = mdp3_overlay_queue_buffer(mfd, req);
+	else
+		rc = -EPERM;
+
+	mutex_unlock(&mdp3_session->lock);
+
+	return rc;
+}
+
+bool update_roi(struct mdp3_rect oldROI, struct mdp_rect newROI)
+{
+	return ((newROI.x != oldROI.x) || (newROI.y != oldROI.y) ||
+		(newROI.w != oldROI.w) || (newROI.h != oldROI.h));
+}
+
+bool is_roi_valid(struct mdp3_dma_source source_config, struct mdp_rect roi)
+{
+	return  (roi.w > 0) && (roi.h > 0) &&
+		(roi.x >= source_config.x) &&
+		((roi.x + roi.w) <= source_config.width) &&
+		(roi.y >= source_config.y) &&
+		((roi.y + roi.h) <= source_config.height);
+}
+
+static int mdp3_ctrl_display_commit_kickoff(struct msm_fb_data_type *mfd,
+					struct mdp_display_commit *cmt_data)
+{
+	struct mdp3_session_data *mdp3_session;
+	struct mdp3_img_data *data;
+	struct mdss_panel_info *panel_info;
+	int rc = 0;
+	static bool splash_done;
+	struct mdss_panel_data *panel;
+
+	if (!mfd || !mfd->mdp.private1)
+		return -EINVAL;
+
+	panel_info = mfd->panel_info;
+	mdp3_session = mfd->mdp.private1;
+	if (!mdp3_session || !mdp3_session->dma)
+		return -EINVAL;
+
+	if (mdp3_bufq_count(&mdp3_session->bufq_in) == 0) {
+		pr_debug("no buffer in queue yet\n");
+		return -EPERM;
+	}
+
+	if (panel_info->partial_update_enabled &&
+		is_roi_valid(mdp3_session->dma->source_config,
+			     cmt_data->l_roi) &&
+		update_roi(mdp3_session->dma->roi, cmt_data->l_roi)) {
+		mdp3_session->dma->roi.x = cmt_data->l_roi.x;
+		mdp3_session->dma->roi.y = cmt_data->l_roi.y;
+		mdp3_session->dma->roi.w = cmt_data->l_roi.w;
+		mdp3_session->dma->roi.h = cmt_data->l_roi.h;
+		mdp3_session->dma->update_src_cfg = true;
+		pr_debug("%s: ROI: x=%d y=%d w=%d h=%d\n", __func__,
+			mdp3_session->dma->roi.x,
+			mdp3_session->dma->roi.y,
+			mdp3_session->dma->roi.w,
+			mdp3_session->dma->roi.h);
+	}
+
+	panel = mdp3_session->panel;
+	mutex_lock(&mdp3_res->fs_idle_pc_lock);
+	if (mdp3_session->in_splash_screen ||
+		mdp3_res->idle_pc) {
+		pr_debug("%s: reset- in_splash = %d, idle_pc = %d", __func__,
+			mdp3_session->in_splash_screen, mdp3_res->idle_pc);
+		rc = mdp3_ctrl_reset(mfd);
+		if (rc) {
+			pr_err("fail to reset display\n");
+			mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+			return -EINVAL;
+		}
+	}
+	mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+
+	mutex_lock(&mdp3_session->lock);
+
+	if (!mdp3_session->status) {
+		pr_err("%s, display off!\n", __func__);
+		mutex_unlock(&mdp3_session->lock);
+		return -EPERM;
+	}
+
+	mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_BEGIN);
+	data = mdp3_bufq_pop(&mdp3_session->bufq_in);
+	if (data) {
+		mdp3_ctrl_reset_countdown(mdp3_session, mfd);
+		mdp3_ctrl_clk_enable(mfd, 1);
+		if (mdp3_session->dma->update_src_cfg &&
+				panel_info->partial_update_enabled) {
+			panel->panel_info.roi.x = mdp3_session->dma->roi.x;
+			panel->panel_info.roi.y = mdp3_session->dma->roi.y;
+			panel->panel_info.roi.w = mdp3_session->dma->roi.w;
+			panel->panel_info.roi.h = mdp3_session->dma->roi.h;
+			rc = mdp3_session->dma->update(mdp3_session->dma,
+					(void *)(int)data->addr,
+					mdp3_session->intf, (void *)panel);
+		} else {
+			rc = mdp3_session->dma->update(mdp3_session->dma,
+					(void *)(int)data->addr,
+					mdp3_session->intf, NULL);
+		}
+		/* This is for the previous frame */
+		if (rc < 0) {
+			mdp3_ctrl_notify(mdp3_session,
+				MDP_NOTIFY_FRAME_TIMEOUT);
+		} else {
+			if (mdp3_ctrl_get_intf_type(mfd) ==
+						MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) {
+				mdp3_ctrl_notify(mdp3_session,
+					MDP_NOTIFY_FRAME_DONE);
+			}
+		}
+		mdp3_session->dma_active = 1;
+		init_completion(&mdp3_session->dma_completion);
+		mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_FLUSHED);
+		mdp3_bufq_push(&mdp3_session->bufq_out, data);
+	}
+
+	if (mdp3_bufq_count(&mdp3_session->bufq_out) > 1) {
+		mdp3_release_splash_memory(mfd);
+		data = mdp3_bufq_pop(&mdp3_session->bufq_out);
+		if (data)
+			mdp3_put_img(data, MDP3_CLIENT_DMA_P);
+	}
+
+	if (mdp3_session->first_commit) {
+		/*wait to ensure frame is sent to panel*/
+		if (panel_info->mipi.post_init_delay)
+			msleep(((1000 / panel_info->mipi.frame_rate) + 1) *
+					panel_info->mipi.post_init_delay);
+		else
+			msleep(1000 / panel_info->mipi.frame_rate);
+		mdp3_session->first_commit = false;
+		if (panel)
+			rc |= panel->event_handler(panel,
+				MDSS_EVENT_POST_PANEL_ON, NULL);
+	}
+
+	mdp3_session->vsync_before_commit = 0;
+	if (!splash_done || mdp3_session->esd_recovery == true) {
+		if (panel && panel->set_backlight)
+			panel->set_backlight(panel, panel->panel_info.bl_max);
+		splash_done = true;
+		mdp3_session->esd_recovery = false;
+	}
+
+	/* start vsync tick countdown for cmd mode if vsync isn't enabled */
+	if (mfd->panel.type == MIPI_CMD_PANEL && !mdp3_session->vsync_enabled)
+		mdp3_ctrl_vsync_enable(mdp3_session->mfd, 0);
+
+	mutex_unlock(&mdp3_session->lock);
+
+	mdss_fb_update_notify_update(mfd);
+
+	return 0;
+}
+
+static int mdp3_map_pan_buff_immediate(struct msm_fb_data_type *mfd)
+{
+	int rc = 0;
+	unsigned long length;
+	dma_addr_t addr;
+	int domain = mfd->mdp.fb_mem_get_iommu_domain();
+
+	rc = mdss_smmu_map_dma_buf(mfd->fbmem_buf, mfd->fb_table, domain,
+					&addr, &length, DMA_BIDIRECTIONAL);
+	if (IS_ERR_VALUE(rc))
+		goto err_unmap;
+	else
+		mfd->iova = addr;
+
+	pr_debug("%s : smmu map dma buf VA: (%llx) MFD->iova %llx\n",
+			__func__, (u64) addr, (u64) mfd->iova);
+	return rc;
+
+err_unmap:
+	pr_err("smmu map dma buf failed: (%d)\n", rc);
+	dma_buf_unmap_attachment(mfd->fb_attachment, mfd->fb_table,
+			mdss_smmu_dma_data_direction(DMA_BIDIRECTIONAL));
+	dma_buf_detach(mfd->fbmem_buf, mfd->fb_attachment);
+	dma_buf_put(mfd->fbmem_buf);
+	return rc;
+}
+
+static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd)
+{
+	struct fb_info *fbi;
+	struct mdp3_session_data *mdp3_session;
+	u32 offset;
+	int bpp;
+	struct mdss_panel_info *panel_info;
+	static bool splash_done;
+	struct mdss_panel_data *panel;
+
+	int rc;
+
+	pr_debug("mdp3_ctrl_pan_display\n");
+	if (!mfd || !mfd->mdp.private1)
+		return;
+
+	panel_info = mfd->panel_info;
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	if (!mdp3_session || !mdp3_session->dma)
+		return;
+
+	mutex_lock(&mdp3_res->fs_idle_pc_lock);
+	if (mdp3_session->in_splash_screen ||
+		mdp3_res->idle_pc) {
+		pr_debug("%s: reset- in_splash = %d, idle_pc = %d", __func__,
+			mdp3_session->in_splash_screen, mdp3_res->idle_pc);
+		rc = mdp3_ctrl_reset(mfd);
+		if (rc) {
+			pr_err("fail to reset display\n");
+			mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+			return;
+		}
+	}
+	mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+
+	mutex_lock(&mdp3_session->lock);
+
+	if (!mdp3_session->status) {
+		pr_err("mdp3_ctrl_pan_display, display off!\n");
+		goto pan_error;
+	}
+
+	fbi = mfd->fbi;
+
+	bpp = fbi->var.bits_per_pixel / 8;
+	offset = fbi->var.xoffset * bpp +
+		 fbi->var.yoffset * fbi->fix.line_length;
+
+	if (offset > fbi->fix.smem_len) {
+		pr_err("invalid fb offset=%u total length=%u\n",
+			offset, fbi->fix.smem_len);
+		goto pan_error;
+	}
+
+	if (mfd->fbi->screen_base) {
+		mdp3_ctrl_reset_countdown(mdp3_session, mfd);
+		mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_BEGIN);
+		mdp3_ctrl_clk_enable(mfd, 1);
+		if (mdp3_session->first_commit) {
+			rc = mdp3_map_pan_buff_immediate(mfd);
+			if (IS_ERR_VALUE(rc))
+				goto pan_error;
+		}
+		rc = mdp3_session->dma->update(mdp3_session->dma,
+				(void *)(int)(mfd->iova + offset),
+				mdp3_session->intf, NULL);
+		/* This is for the previous frame */
+		if (rc < 0) {
+			mdp3_ctrl_notify(mdp3_session,
+				MDP_NOTIFY_FRAME_TIMEOUT);
+		} else {
+			if (mdp3_ctrl_get_intf_type(mfd) ==
+				MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) {
+				mdp3_ctrl_notify(mdp3_session,
+					MDP_NOTIFY_FRAME_DONE);
+			}
+		}
+		mdp3_session->dma_active = 1;
+		init_completion(&mdp3_session->dma_completion);
+		mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_FLUSHED);
+	} else {
+		pr_debug("mdp3_ctrl_pan_display no memory, stop interface");
+		mdp3_clk_enable(1, 0);
+		mdp3_session->dma->stop(mdp3_session->dma, mdp3_session->intf);
+		mdp3_clk_enable(0, 0);
+	}
+
+	panel = mdp3_session->panel;
+	if (mdp3_session->first_commit) {
+		/*wait to ensure frame is sent to panel*/
+		if (panel_info->mipi.init_delay)
+			msleep(((1000 / panel_info->mipi.frame_rate) + 1) *
+					panel_info->mipi.init_delay);
+		else
+			msleep(1000 / panel_info->mipi.frame_rate);
+		mdp3_session->first_commit = false;
+		if (panel)
+			panel->event_handler(panel, MDSS_EVENT_POST_PANEL_ON,
+					NULL);
+	}
+
+	mdp3_session->vsync_before_commit = 0;
+	if (!splash_done || mdp3_session->esd_recovery == true) {
+		if (panel && panel->set_backlight)
+			panel->set_backlight(panel, panel->panel_info.bl_max);
+		splash_done = true;
+		mdp3_session->esd_recovery = false;
+	}
+
+
+pan_error:
+	mutex_unlock(&mdp3_session->lock);
+}
+
+static int mdp3_set_metadata(struct msm_fb_data_type *mfd,
+				struct msmfb_metadata *metadata_ptr)
+{
+	int ret = 0;
+
+	switch (metadata_ptr->op) {
+	case metadata_op_crc:
+		ret = mdp3_ctrl_res_req_clk(mfd, 1);
+		if (ret) {
+			pr_err("failed to turn on mdp clks\n");
+			return ret;
+		}
+		ret = mdp3_misr_set(&metadata_ptr->data.misr_request);
+		ret = mdp3_ctrl_res_req_clk(mfd, 0);
+		if (ret) {
+			pr_err("failed to release mdp clks\n");
+			return ret;
+		}
+		break;
+	default:
+		pr_warn("Unsupported request to MDP SET META IOCTL.\n");
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static int mdp3_get_metadata(struct msm_fb_data_type *mfd,
+				struct msmfb_metadata *metadata)
+{
+	int ret = 0;
+
+	switch (metadata->op) {
+	case metadata_op_frame_rate:
+		metadata->data.panel_frame_rate =
+			mfd->panel_info->mipi.frame_rate;
+		break;
+	case metadata_op_get_caps:
+		metadata->data.caps.mdp_rev = 305;
+		metadata->data.caps.rgb_pipes = 0;
+		metadata->data.caps.vig_pipes = 0;
+		metadata->data.caps.dma_pipes = 1;
+		break;
+	case metadata_op_crc:
+		ret = mdp3_ctrl_res_req_clk(mfd, 1);
+		if (ret) {
+			pr_err("failed to turn on mdp clks\n");
+			return ret;
+		}
+		ret = mdp3_misr_get(&metadata->data.misr_request);
+		ret = mdp3_ctrl_res_req_clk(mfd, 0);
+		if (ret) {
+			pr_err("failed to release mdp clks\n");
+			return ret;
+		}
+		break;
+	case metadata_op_get_ion_fd:
+		if (mfd->fb_ion_handle) {
+			metadata->data.fbmem_ionfd =
+					dma_buf_fd(mfd->fbmem_buf, 0);
+			if (metadata->data.fbmem_ionfd < 0)
+				pr_err("fd allocation failed. fd = %d\n",
+						metadata->data.fbmem_ionfd);
+		}
+		break;
+	default:
+		pr_warn("Unsupported request to MDP GET  META IOCTL.\n");
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+int mdp3_validate_start_req(struct mdp_histogram_start_req *req)
+{
+	if (req->frame_cnt >= MDP_HISTOGRAM_FRAME_COUNT_MAX) {
+		pr_err("%s invalid req frame_cnt\n", __func__);
+		return -EINVAL;
+	}
+	if (req->bit_mask >= MDP_HISTOGRAM_BIT_MASK_MAX) {
+		pr_err("%s invalid req bit mask\n", __func__);
+		return -EINVAL;
+	}
+	if (req->block != MDP_BLOCK_DMA_P ||
+		req->num_bins != MDP_HISTOGRAM_BIN_NUM) {
+		pr_err("mdp3_histogram_start invalid request\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int mdp3_validate_scale_config(struct mdp_bl_scale_data *data)
+{
+	if (data->scale > MDP_HISTOGRAM_BL_SCALE_MAX) {
+		pr_err("%s invalid bl_scale\n", __func__);
+		return -EINVAL;
+	}
+	if (data->min_lvl > MDP_HISTOGRAM_BL_LEVEL_MAX) {
+		pr_err("%s invalid bl_min_lvl\n", __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int mdp3_validate_csc_data(struct mdp_csc_cfg_data *data)
+{
+	int i;
+	bool mv_valid = false;
+
+	for (i = 0; i < 9; i++) {
+		if (data->csc_data.csc_mv[i] >=
+				MDP_HISTOGRAM_CSC_MATRIX_MAX)
+			return -EINVAL;
+		if ((!mv_valid) && (data->csc_data.csc_mv[i] != 0))
+			mv_valid = true;
+	}
+	if (!mv_valid) {
+		pr_err("%s: black screen data! csc_mv is all 0s\n", __func__);
+		return -EINVAL;
+	}
+	for (i = 0; i < 3; i++) {
+		if (data->csc_data.csc_pre_bv[i] >=
+				MDP_HISTOGRAM_CSC_VECTOR_MAX)
+			return -EINVAL;
+		if (data->csc_data.csc_post_bv[i] >=
+				MDP_HISTOGRAM_CSC_VECTOR_MAX)
+			return -EINVAL;
+	}
+	for (i = 0; i < 6; i++) {
+		if (data->csc_data.csc_pre_lv[i] >=
+				MDP_HISTOGRAM_CSC_VECTOR_MAX)
+			return -EINVAL;
+		if (data->csc_data.csc_post_lv[i] >=
+				MDP_HISTOGRAM_CSC_VECTOR_MAX)
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static int mdp3_histogram_start(struct mdp3_session_data *session,
+					struct mdp_histogram_start_req *req)
+{
+	int ret;
+	struct mdp3_dma_histogram_config histo_config;
+
+	mutex_lock(&session->lock);
+	if (!session->status) {
+		mutex_unlock(&session->lock);
+		return -EPERM;
+	}
+
+	pr_debug("mdp3_histogram_start\n");
+
+	ret = mdp3_validate_start_req(req);
+	if (ret) {
+		mutex_unlock(&session->lock);
+		return ret;
+	}
+
+	if (!session->dma->histo_op ||
+		!session->dma->config_histo) {
+		pr_err("mdp3_histogram_start not supported\n");
+		mutex_unlock(&session->lock);
+		return -EINVAL;
+	}
+
+	mutex_lock(&session->histo_lock);
+
+	if (session->histo_status) {
+		pr_info("mdp3_histogram_start already started\n");
+		mutex_unlock(&session->histo_lock);
+		mutex_unlock(&session->lock);
+		return 0;
+	}
+
+	mdp3_res_update(1, 0, MDP3_CLIENT_DMA_P);
+	ret = session->dma->histo_op(session->dma, MDP3_DMA_HISTO_OP_RESET);
+	if (ret) {
+		pr_err("mdp3_histogram_start reset error\n");
+		goto histogram_start_err;
+	}
+
+	histo_config.frame_count = req->frame_cnt;
+	histo_config.bit_mask = req->bit_mask;
+	histo_config.auto_clear_en = 1;
+	histo_config.bit_mask_polarity = 0;
+	ret = session->dma->config_histo(session->dma, &histo_config);
+	if (ret) {
+		pr_err("mdp3_histogram_start config error\n");
+		goto histogram_start_err;
+	}
+
+	ret = session->dma->histo_op(session->dma, MDP3_DMA_HISTO_OP_START);
+	if (ret) {
+		pr_err("mdp3_histogram_start config error\n");
+		goto histogram_start_err;
+	}
+
+	session->histo_status = 1;
+
+histogram_start_err:
+	mdp3_res_update(0, 0, MDP3_CLIENT_DMA_P);
+	mutex_unlock(&session->histo_lock);
+	mutex_unlock(&session->lock);
+	return ret;
+}
+
+static int mdp3_histogram_stop(struct mdp3_session_data *session,
+					u32 block)
+{
+	int ret;
+
+	pr_debug("mdp3_histogram_stop\n");
+
+	if (!session->dma->histo_op || block != MDP_BLOCK_DMA_P) {
+		pr_err("mdp3_histogram_stop not supported\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&session->histo_lock);
+
+	if (!session->histo_status) {
+		pr_debug("mdp3_histogram_stop already stopped!");
+		ret = 0;
+		goto histogram_stop_err;
+	}
+
+	mdp3_clk_enable(1, 0);
+	ret = session->dma->histo_op(session->dma, MDP3_DMA_HISTO_OP_CANCEL);
+	mdp3_clk_enable(0, 0);
+	if (ret)
+		pr_err("mdp3_histogram_stop error\n");
+
+	session->histo_status = 0;
+
+histogram_stop_err:
+	mutex_unlock(&session->histo_lock);
+	return ret;
+}
+
+static int mdp3_histogram_collect(struct mdp3_session_data *session,
+				struct mdp_histogram_data *hist)
+{
+	int ret;
+	struct mdp3_dma_histogram_data *mdp3_histo;
+
+	pr_debug("%s\n", __func__);
+	if (!session->dma->get_histo) {
+		pr_err("mdp3_histogram_collect not supported\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&session->histo_lock);
+
+	if (!session->histo_status) {
+		pr_debug("mdp3_histogram_collect not started\n");
+		mutex_unlock(&session->histo_lock);
+		return -EPROTO;
+	}
+
+	mutex_unlock(&session->histo_lock);
+
+	if (!session->clk_on) {
+		pr_debug("mdp/dsi clock off currently\n");
+		return -EPERM;
+	}
+
+	mdp3_clk_enable(1, 0);
+	ret = session->dma->get_histo(session->dma);
+	mdp3_clk_enable(0, 0);
+	if (ret) {
+		pr_debug("mdp3_histogram_collect error = %d\n", ret);
+		return ret;
+	}
+
+	mdp3_histo = &session->dma->histo_data;
+
+	ret = copy_to_user(hist->c0, mdp3_histo->r_data,
+			sizeof(uint32_t) * MDP_HISTOGRAM_BIN_NUM);
+	if (ret)
+		return ret;
+
+	ret = copy_to_user(hist->c1, mdp3_histo->g_data,
+			sizeof(uint32_t) * MDP_HISTOGRAM_BIN_NUM);
+	if (ret)
+		return ret;
+
+	ret = copy_to_user(hist->c2, mdp3_histo->b_data,
+			sizeof(uint32_t) * MDP_HISTOGRAM_BIN_NUM);
+	if (ret)
+		return ret;
+
+	ret = copy_to_user(hist->extra_info, mdp3_histo->extra,
+			sizeof(uint32_t) * 2);
+	if (ret)
+		return ret;
+
+	hist->bin_cnt = MDP_HISTOGRAM_BIN_NUM;
+	hist->block = MDP_BLOCK_DMA_P;
+	return ret;
+}
+
+static int mdp3_bl_scale_config(struct msm_fb_data_type *mfd,
+					struct mdp_bl_scale_data *data)
+{
+	int ret = 0;
+	int curr_bl;
+
+	mutex_lock(&mfd->bl_lock);
+	curr_bl = mfd->bl_level;
+	mfd->bl_scale = data->scale;
+	mfd->bl_min_lvl = data->min_lvl;
+	pr_debug("update scale = %d, min_lvl = %d\n", mfd->bl_scale,
+							mfd->bl_min_lvl);
+
+	/* update current backlight to use new scaling*/
+	mdss_fb_set_backlight(mfd, curr_bl);
+	mutex_unlock(&mfd->bl_lock);
+	return ret;
+}
+
+static int mdp3_csc_config(struct mdp3_session_data *session,
+					struct mdp_csc_cfg_data *data)
+{
+	struct mdp3_dma_color_correct_config config;
+	struct mdp3_dma_ccs ccs;
+	int ret = -EINVAL;
+
+	if (!data->csc_data.csc_mv || !data->csc_data.csc_pre_bv ||
+		!data->csc_data.csc_post_bv || !data->csc_data.csc_pre_lv ||
+			!data->csc_data.csc_post_lv) {
+		pr_err("%s : Invalid csc vectors", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&session->lock);
+	mutex_lock(&session->dma->pp_lock);
+	session->dma->cc_vect_sel = (session->dma->cc_vect_sel + 1) % 2;
+
+	config.ccs_enable = 1;
+	config.ccs_sel = session->dma->cc_vect_sel;
+	config.pre_limit_sel = session->dma->cc_vect_sel;
+	config.post_limit_sel = session->dma->cc_vect_sel;
+	config.pre_bias_sel = session->dma->cc_vect_sel;
+	config.post_bias_sel = session->dma->cc_vect_sel;
+	config.ccs_dirty = true;
+
+	ccs.mv = data->csc_data.csc_mv;
+	ccs.pre_bv = data->csc_data.csc_pre_bv;
+	ccs.post_bv = data->csc_data.csc_post_bv;
+	ccs.pre_lv = data->csc_data.csc_pre_lv;
+	ccs.post_lv = data->csc_data.csc_post_lv;
+
+	/* cache one copy of setting for suspend/resume reconfiguring */
+	session->dma->ccs_cache = *data;
+
+	mdp3_clk_enable(1, 0);
+	ret = session->dma->config_ccs(session->dma, &config, &ccs);
+	mdp3_clk_enable(0, 0);
+	mutex_unlock(&session->dma->pp_lock);
+	mutex_unlock(&session->lock);
+	return ret;
+}
+
+static int mdp3_pp_ioctl(struct msm_fb_data_type *mfd,
+					void __user *argp)
+{
+	int ret = -EINVAL;
+	struct msmfb_mdp_pp mdp_pp;
+	struct mdp_lut_cfg_data *lut;
+	struct mdp3_session_data *mdp3_session;
+
+	if (!mfd || !mfd->mdp.private1)
+		return -EINVAL;
+
+	mdp3_session = mfd->mdp.private1;
+
+	ret = copy_from_user(&mdp_pp, argp, sizeof(mdp_pp));
+	if (ret)
+		return ret;
+
+	switch (mdp_pp.op) {
+	case mdp_bl_scale_cfg:
+		ret = mdp3_validate_scale_config(&mdp_pp.data.bl_scale_data);
+		if (ret) {
+			pr_err("%s: invalid scale config\n", __func__);
+			break;
+		}
+		ret = mdp3_bl_scale_config(mfd, (struct mdp_bl_scale_data *)
+						&mdp_pp.data.bl_scale_data);
+		break;
+	case mdp_op_csc_cfg:
+		/* Checking state of dyn_pu before programming CSC block */
+		if (mdp3_session->dyn_pu_state) {
+			pr_debug("Partial update feature is enabled.\n");
+			return -EPERM;
+		}
+		ret = mdp3_validate_csc_data(&(mdp_pp.data.csc_cfg_data));
+		if (ret) {
+			pr_err("%s: invalid csc data\n", __func__);
+			break;
+		}
+		ret = mdp3_csc_config(mdp3_session,
+						&(mdp_pp.data.csc_cfg_data));
+		break;
+	case mdp_op_lut_cfg:
+		lut = &mdp_pp.data.lut_cfg_data;
+		if (lut->lut_type != mdp_lut_rgb) {
+			pr_err("Lut type %d is not supported", lut->lut_type);
+			return -EINVAL;
+		}
+		if (lut->data.rgb_lut_data.flags & MDP_PP_OPS_READ)
+			ret = mdp3_ctrl_lut_read(mfd,
+						&(lut->data.rgb_lut_data));
+		else
+			ret = mdp3_ctrl_lut_config(mfd,
+						&(lut->data.rgb_lut_data));
+		if (ret)
+			pr_err("RGB LUT ioctl failed\n");
+		else
+			ret = copy_to_user(argp, &mdp_pp, sizeof(mdp_pp));
+		break;
+
+	default:
+		pr_err("Unsupported request to MDP_PP IOCTL.\n");
+		ret = -EINVAL;
+		break;
+	}
+	if (!ret)
+		ret = copy_to_user(argp, &mdp_pp, sizeof(struct msmfb_mdp_pp));
+	return ret;
+}
+
+static int mdp3_histo_ioctl(struct msm_fb_data_type *mfd, u32 cmd,
+				void __user *argp)
+{
+	int ret = -ENOTSUP;
+	struct mdp_histogram_data hist;
+	struct mdp_histogram_start_req hist_req;
+	u32 block;
+	struct mdp3_session_data *mdp3_session;
+
+	if (!mfd || !mfd->mdp.private1)
+		return -EINVAL;
+
+	mdp3_session = mfd->mdp.private1;
+
+	switch (cmd) {
+	case MSMFB_HISTOGRAM_START:
+		ret = copy_from_user(&hist_req, argp, sizeof(hist_req));
+		if (ret)
+			return ret;
+
+		ret = mdp3_histogram_start(mdp3_session, &hist_req);
+		break;
+
+	case MSMFB_HISTOGRAM_STOP:
+		ret = copy_from_user(&block, argp, sizeof(int));
+		if (ret)
+			return ret;
+
+		ret = mdp3_histogram_stop(mdp3_session, block);
+		break;
+
+	case MSMFB_HISTOGRAM:
+		ret = copy_from_user(&hist, argp, sizeof(hist));
+		if (ret)
+			return ret;
+
+		ret = mdp3_histogram_collect(mdp3_session, &hist);
+		if (!ret)
+			ret = copy_to_user(argp, &hist, sizeof(hist));
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+static int mdp3_validate_lut_data(struct fb_cmap *cmap)
+{
+	u32 i = 0;
+
+	if (!cmap || !cmap->red || !cmap->green || !cmap->blue) {
+		pr_err("Invalid arguments!\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < MDP_LUT_SIZE; i++) {
+		if (cmap->red[i] > 0xFF || cmap->green[i] > 0xFF ||
+			cmap->blue[i] > 0xFF) {
+			pr_err("LUT value over 255 (limit) at %d index\n", i);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static inline int mdp3_copy_lut_buffer(struct fb_cmap *dst, struct fb_cmap *src)
+{
+	if (!dst || !src || !dst->red || !dst->blue || !dst->green ||
+		!src->red || !src->green || !src->blue) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	dst->start = src->start;
+	dst->len = src->len;
+
+	memcpy(dst->red,   src->red,   MDP_LUT_SIZE * sizeof(u16));
+	memcpy(dst->green, src->green, MDP_LUT_SIZE * sizeof(u16));
+	memcpy(dst->blue,  src->blue,  MDP_LUT_SIZE * sizeof(u16));
+	return 0;
+}
+
+static int mdp3_alloc_lut_buffer(struct platform_device *pdev, void **cmap)
+{
+	struct fb_cmap *map;
+
+	map = devm_kzalloc(&pdev->dev, sizeof(struct fb_cmap), GFP_KERNEL);
+	if (map == NULL)
+		return -ENOMEM;
+
+	memset(map, 0, sizeof(struct fb_cmap));
+
+	map->red = devm_kzalloc(&pdev->dev, MDP_LUT_SIZE * sizeof(u16),
+				GFP_KERNEL);
+	if (map->red == NULL)
+		goto exit_red;
+
+	memset(map->red, 0, sizeof(u16) * MDP_LUT_SIZE);
+
+	map->green = devm_kzalloc(&pdev->dev, MDP_LUT_SIZE * sizeof(u16),
+				GFP_KERNEL);
+	if (map->green == NULL)
+		goto exit_green;
+
+	memset(map->green, 0, sizeof(u16) * MDP_LUT_SIZE);
+
+	map->blue = devm_kzalloc(&pdev->dev, MDP_LUT_SIZE * sizeof(u16),
+				GFP_KERNEL);
+	if (map->blue == NULL)
+		goto exit_blue;
+
+	memset(map->blue, 0, sizeof(u16) * MDP_LUT_SIZE);
+
+	*cmap = map;
+	return 0;
+exit_blue:
+	devm_kfree(&pdev->dev, map->green);
+exit_green:
+	devm_kfree(&pdev->dev, map->red);
+exit_red:
+	devm_kfree(&pdev->dev, map);
+	return -ENOMEM;
+}
+
+static void mdp3_free_lut_buffer(struct platform_device *pdev, void **cmap)
+{
+	struct fb_cmap *map = (struct fb_cmap *)(*cmap);
+
+	if (map == NULL)
+		return;
+
+	devm_kfree(&pdev->dev, map->blue);
+	map->blue = NULL;
+	devm_kfree(&pdev->dev, map->green);
+	map->green = NULL;
+	devm_kfree(&pdev->dev, map->red);
+	map->red = NULL;
+	devm_kfree(&pdev->dev, map);
+	map = NULL;
+}
+
+static int mdp3_lut_combine_gain(struct fb_cmap *cmap, struct mdp3_dma *dma)
+{
+	int i = 0;
+	u32 r = 0, g = 0, b = 0;
+
+	if (!cmap || !dma || !dma->gc_cmap || !dma->hist_cmap ||
+		!dma->gc_cmap->red || !dma->gc_cmap->green ||
+		!dma->gc_cmap->blue || !dma->hist_cmap->red ||
+		!dma->hist_cmap->green || !dma->hist_cmap->blue) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	for (i = 1; i < MDP_LUT_SIZE; i++) {
+		r = MIN(dma->gc_cmap->red[i] * dma->hist_cmap->red[i] *
+			mdp_lut_inverse16[i], 0xFF0000);
+		g = MIN(dma->gc_cmap->green[i] * dma->hist_cmap->green[i] *
+			mdp_lut_inverse16[i], 0xFF0000);
+		b = MIN(dma->gc_cmap->blue[i] * dma->hist_cmap->blue[i] *
+			mdp_lut_inverse16[i], 0xFF0000);
+
+		cmap->red[i]   = (r >> 16) & 0xFF;
+		cmap->green[i] = (g >> 16) & 0xFF;
+		cmap->blue[i]  = (b >> 16) & 0xFF;
+	}
+	return 0;
+}
+
+/* Called from within pp_lock and session lock locked context */
+static int mdp3_ctrl_lut_update(struct msm_fb_data_type *mfd,
+				struct fb_cmap *cmap)
+{
+	int rc = 0;
+	struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+	struct mdp3_dma *dma;
+	struct mdp3_dma_lut_config lut_config;
+
+	dma = mdp3_session->dma;
+
+	if (!dma->config_lut) {
+		pr_err("Config LUT not defined!\n");
+		return -EINVAL;
+	}
+
+	lut_config.lut_enable = 7;
+	lut_config.lut_sel = mdp3_session->lut_sel;
+	lut_config.lut_position = 1;
+	lut_config.lut_dirty = true;
+
+	if (!mdp3_session->status) {
+		pr_err("display off!\n");
+		return -EPERM;
+	}
+
+	mdp3_clk_enable(1, 0);
+	rc = dma->config_lut(dma, &lut_config, cmap);
+	mdp3_clk_enable(0, 0);
+	if (rc)
+		pr_err("mdp3_ctrl_lut_update failed\n");
+
+	mdp3_session->lut_sel = (mdp3_session->lut_sel + 1) % 2;
+	return rc;
+}
+
+static int mdp3_ctrl_lut_config(struct msm_fb_data_type *mfd,
+				struct mdp_rgb_lut_data *cfg)
+{
+	int rc = 0;
+	bool data_validated = false;
+	struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+	struct mdp3_dma *dma;
+	struct fb_cmap *cmap;
+
+	dma = mdp3_session->dma;
+
+	if ((cfg->cmap.start > MDP_LUT_SIZE) ||
+		(cfg->cmap.len > MDP_LUT_SIZE) ||
+		(cfg->cmap.start + cfg->cmap.len > MDP_LUT_SIZE)) {
+		pr_err("Invalid arguments.\n");
+		return  -EINVAL;
+	}
+
+	rc = mdp3_alloc_lut_buffer(mfd->pdev, (void **) &cmap);
+	if (rc) {
+		pr_err("No memory\n");
+		return -ENOMEM;
+	}
+
+	mutex_lock(&mdp3_session->lock);
+	mutex_lock(&dma->pp_lock);
+	rc = copy_from_user(cmap->red + cfg->cmap.start,
+			cfg->cmap.red, sizeof(u16) * cfg->cmap.len);
+	rc |= copy_from_user(cmap->green + cfg->cmap.start,
+			cfg->cmap.green, sizeof(u16) * cfg->cmap.len);
+	rc |= copy_from_user(cmap->blue + cfg->cmap.start,
+			cfg->cmap.blue, sizeof(u16) * cfg->cmap.len);
+	if (rc) {
+		pr_err("Copying user data failed!\n");
+		goto exit_err;
+	}
+
+	switch (cfg->lut_type) {
+	case mdp_rgb_lut_gc:
+		if (cfg->flags & MDP_PP_OPS_DISABLE) {
+			if (dma->lut_sts & MDP3_LUT_GC_EN)
+				/* Free GC cmap cache since disabled */
+				mdp3_free_lut_buffer(mfd->pdev,
+						(void **)&dma->gc_cmap);
+			dma->lut_sts &= ~MDP3_LUT_GC_EN;
+		} else if (!(dma->lut_sts & MDP3_LUT_GC_EN)) {
+			/* Check if values sent are valid */
+			rc = mdp3_validate_lut_data(cmap);
+			if (rc) {
+				pr_err("Invalid GC LUT data\n");
+				goto exit_err;
+			}
+			data_validated = true;
+
+			/* Allocate GC cmap cache to store values */
+			rc = mdp3_alloc_lut_buffer(mfd->pdev,
+					(void **)&dma->gc_cmap);
+			if (rc) {
+				pr_err("GC LUT config failed\n");
+				goto exit_err;
+			}
+			dma->lut_sts |= MDP3_LUT_GC_EN;
+		}
+		/*
+		 * Copy the GC values from userspace to maintain the
+		 * correct values user intended to program in cache.
+		 * The values programmed in HW might factor in presence
+		 * of other LUT modifying features hence can be
+		 * different from these user given values.
+		 */
+		if (dma->lut_sts & MDP3_LUT_GC_EN) {
+			/* Validate LUT data if not yet validated */
+			if (!data_validated) {
+				rc = mdp3_validate_lut_data(cmap);
+				if (rc) {
+					pr_err("Invalid GC LUT data\n");
+					goto exit_err;
+				}
+			}
+			rc = mdp3_copy_lut_buffer(dma->gc_cmap, cmap);
+			if (rc) {
+				pr_err("Could not store GC to cache\n");
+				goto exit_err;
+			}
+		}
+		break;
+	case mdp_rgb_lut_hist:
+		if (cfg->flags & MDP_PP_OPS_DISABLE) {
+			if (dma->lut_sts & MDP3_LUT_HIST_EN)
+				/* Free HIST cmap cache since disabled */
+				mdp3_free_lut_buffer(mfd->pdev,
+						(void **)&dma->hist_cmap);
+			dma->lut_sts &= ~MDP3_LUT_HIST_EN;
+		} else if (!(dma->lut_sts & MDP3_LUT_HIST_EN)) {
+			/* Check if values sent are valid */
+			rc = mdp3_validate_lut_data(cmap);
+			if (rc) {
+				pr_err("Invalid HIST LUT data\n");
+				goto exit_err;
+			}
+			data_validated = true;
+
+			/* Allocate HIST cmap cache to store values */
+			rc = mdp3_alloc_lut_buffer(mfd->pdev,
+					(void **)&dma->hist_cmap);
+			if (rc) {
+				pr_err("HIST LUT config failed\n");
+				goto exit_err;
+			}
+			dma->lut_sts |= MDP3_LUT_HIST_EN;
+		}
+		/*
+		 * Copy the HIST LUT values from userspace to maintain
+		 * correct values user intended to program in cache.
+		 * The values programmed in HW might factor in presence
+		 * of other LUT modifying features hence can be
+		 * different from these user given values.
+		 */
+		if (dma->lut_sts & MDP3_LUT_HIST_EN) {
+			/* Validate LUT data if not yet validated */
+			if (!data_validated) {
+				rc = mdp3_validate_lut_data(cmap);
+				if (rc) {
+					pr_err("Invalid H LUT data\n");
+					goto exit_err;
+				}
+			}
+			rc = mdp3_copy_lut_buffer(dma->hist_cmap, cmap);
+			if (rc) {
+				pr_err("Could not cache Hist LUT\n");
+				goto exit_err;
+			}
+		}
+		break;
+	default:
+		pr_err("Invalid lut type: %u\n", cfg->lut_type);
+		rc = -EINVAL;
+		goto exit_err;
+	}
+
+	/*
+	 * In case both GC LUT and HIST LUT need to be programmed the gains
+	 * of each the individual LUTs need to be applied onto a single LUT
+	 * and applied in HW
+	 */
+	if ((dma->lut_sts & MDP3_LUT_HIST_EN) &&
+		(dma->lut_sts & MDP3_LUT_GC_EN)) {
+		rc = mdp3_lut_combine_gain(cmap, dma);
+		if (rc) {
+			pr_err("Combining gains failed rc = %d\n", rc);
+		goto exit_err;
+	}
+	}
+
+	rc = mdp3_ctrl_lut_update(mfd, cmap);
+	if (rc)
+		pr_err("Updating LUT failed! rc = %d\n", rc);
+exit_err:
+	mutex_unlock(&dma->pp_lock);
+	mutex_unlock(&mdp3_session->lock);
+	mdp3_free_lut_buffer(mfd->pdev, (void **) &cmap);
+	return rc;
+}
+
+static int mdp3_ctrl_lut_read(struct msm_fb_data_type *mfd,
+				struct mdp_rgb_lut_data *cfg)
+{
+	int rc = 0;
+	struct fb_cmap *cmap;
+	struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+	struct mdp3_dma *dma = mdp3_session->dma;
+
+	switch (cfg->lut_type) {
+	case mdp_rgb_lut_gc:
+		if (!dma->gc_cmap) {
+			pr_err("GC not programmed\n");
+			return -EPERM;
+		}
+		cmap = dma->gc_cmap;
+		break;
+	case mdp_rgb_lut_hist:
+		if (!dma->hist_cmap) {
+			pr_err("Hist LUT not programmed\n");
+			return -EPERM;
+		}
+		cmap = dma->hist_cmap;
+		break;
+	default:
+		pr_err("Invalid lut type %u\n", cfg->lut_type);
+		return -EINVAL;
+	}
+
+	cfg->cmap.start = cmap->start;
+	cfg->cmap.len = cmap->len;
+
+	mutex_lock(&dma->pp_lock);
+	rc = copy_to_user(cfg->cmap.red, cmap->red, sizeof(u16) *
+								MDP_LUT_SIZE);
+	rc |= copy_to_user(cfg->cmap.green, cmap->green, sizeof(u16) *
+								MDP_LUT_SIZE);
+	rc |= copy_to_user(cfg->cmap.blue, cmap->blue, sizeof(u16) *
+								MDP_LUT_SIZE);
+	mutex_unlock(&dma->pp_lock);
+	return rc;
+}
+
+/*  Invoked from ctrl_on with session lock locked context */
+static void mdp3_ctrl_pp_resume(struct msm_fb_data_type *mfd)
+{
+	struct mdp3_session_data *mdp3_session;
+	struct mdp3_dma *dma;
+	struct fb_cmap *cmap;
+	int rc = 0;
+
+	mdp3_session = mfd->mdp.private1;
+	dma = mdp3_session->dma;
+
+	mutex_lock(&dma->pp_lock);
+	/*
+	 * if dma->ccs_config.ccs_enable is set then DMA PP block was enabled
+	 * via user space IOCTL.
+	 * Then set dma->ccs_config.ccs_dirty flag
+	 * Then PP block will be reconfigured when next kickoff comes.
+	 */
+	if (dma->ccs_config.ccs_enable)
+		dma->ccs_config.ccs_dirty = true;
+
+	/*
+	 * If gamma correction was enabled then we program the LUT registers
+	 * with the last configuration data before suspend. If gamma correction
+	 * is not enabled then we do not program anything. The LUT from
+	 * histogram processing algorithms will program hardware based on new
+	 * frame data if they are enabled.
+	 */
+	if (dma->lut_sts & MDP3_LUT_GC_EN) {
+
+		rc = mdp3_alloc_lut_buffer(mfd->pdev, (void **)&cmap);
+		if (rc) {
+			pr_err("No memory for GC LUT, rc = %d\n", rc);
+			goto exit_err;
+		}
+
+		if (dma->lut_sts & MDP3_LUT_HIST_EN) {
+			rc = mdp3_lut_combine_gain(cmap, dma);
+			if (rc) {
+				pr_err("Combining the gain failed rc=%d\n", rc);
+				goto exit_err;
+			}
+		} else {
+			rc = mdp3_copy_lut_buffer(cmap, dma->gc_cmap);
+			if (rc) {
+				pr_err("Updating GC failed rc = %d\n", rc);
+				goto exit_err;
+			}
+		}
+
+		rc = mdp3_ctrl_lut_update(mfd, cmap);
+		if (rc)
+			pr_err("GC Lut update failed rc=%d\n", rc);
+exit_err:
+		mdp3_free_lut_buffer(mfd->pdev, (void **)&cmap);
+	}
+
+	mutex_unlock(&dma->pp_lock);
+}
+
+static int mdp3_overlay_prepare(struct msm_fb_data_type *mfd,
+		struct mdp_overlay_list __user *user_ovlist)
+{
+	struct mdp_overlay_list ovlist;
+	struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+	struct mdp_overlay *req_list;
+	struct mdp_overlay *req;
+	int rc;
+
+	if (!mdp3_session)
+		return -ENODEV;
+
+	req = &mdp3_session->req_overlay;
+
+	if (copy_from_user(&ovlist, user_ovlist, sizeof(ovlist)))
+		return -EFAULT;
+
+	if (ovlist.num_overlays != 1) {
+		pr_err("OV_PREPARE failed: only 1 overlay allowed\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&req_list, ovlist.overlay_list,
+				sizeof(struct mdp_overlay *)))
+		return -EFAULT;
+
+	if (copy_from_user(req, req_list, sizeof(*req)))
+		return -EFAULT;
+
+	rc = mdp3_overlay_set(mfd, req);
+	if (!IS_ERR_VALUE(rc)) {
+		if (copy_to_user(req_list, req, sizeof(*req)))
+			return -EFAULT;
+	}
+
+	if (put_user(IS_ERR_VALUE(rc) ? 0 : 1,
+			&user_ovlist->processed_overlays))
+		return -EFAULT;
+
+	return rc;
+}
+
+static int mdp3_ctrl_ioctl_handler(struct msm_fb_data_type *mfd,
+					u32 cmd, void __user *argp)
+{
+	int rc = -EINVAL;
+	struct mdp3_session_data *mdp3_session;
+	struct msmfb_metadata metadata;
+	struct mdp_overlay *req = NULL;
+	struct msmfb_overlay_data ov_data;
+	int val;
+
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	if (!mdp3_session)
+		return -ENODEV;
+
+	req = &mdp3_session->req_overlay;
+
+	if (!mdp3_session->status && cmd != MSMFB_METADATA_GET &&
+		cmd != MSMFB_HISTOGRAM_STOP && cmd != MSMFB_HISTOGRAM) {
+		pr_err("mdp3_ctrl_ioctl_handler, display off!\n");
+		return -EPERM;
+	}
+
+	switch (cmd) {
+	case MSMFB_MDP_PP:
+		rc = mdp3_pp_ioctl(mfd, argp);
+		break;
+	case MSMFB_HISTOGRAM_START:
+	case MSMFB_HISTOGRAM_STOP:
+	case MSMFB_HISTOGRAM:
+		rc = mdp3_histo_ioctl(mfd, cmd, argp);
+		break;
+
+	case MSMFB_VSYNC_CTRL:
+	case MSMFB_OVERLAY_VSYNC_CTRL:
+		if (!copy_from_user(&val, argp, sizeof(val))) {
+			mutex_lock(&mdp3_session->lock);
+			mdp3_session->vsync_enabled = val;
+			rc = mdp3_ctrl_vsync_enable(mfd, val);
+			mutex_unlock(&mdp3_session->lock);
+		} else {
+			pr_err("MSMFB_OVERLAY_VSYNC_CTRL failed\n");
+			rc = -EFAULT;
+		}
+		break;
+	case MSMFB_ASYNC_BLIT:
+		mutex_lock(&mdp3_res->fs_idle_pc_lock);
+		if (mdp3_session->in_splash_screen || mdp3_res->idle_pc) {
+			pr_debug("%s: reset- in_splash = %d, idle_pc = %d",
+				__func__, mdp3_session->in_splash_screen,
+				mdp3_res->idle_pc);
+			mdp3_ctrl_reset(mfd);
+		}
+		mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+		rc = mdp3_ctrl_async_blit_req(mfd, argp);
+		break;
+	case MSMFB_BLIT:
+		mutex_lock(&mdp3_res->fs_idle_pc_lock);
+		if (mdp3_session->in_splash_screen)
+			mdp3_ctrl_reset(mfd);
+		mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+		rc = mdp3_ctrl_blit_req(mfd, argp);
+		break;
+	case MSMFB_METADATA_GET:
+		rc = copy_from_user(&metadata, argp, sizeof(metadata));
+		if (!rc)
+			rc = mdp3_get_metadata(mfd, &metadata);
+		if (!rc)
+			rc = copy_to_user(argp, &metadata, sizeof(metadata));
+		if (rc)
+			pr_err("mdp3_get_metadata failed (%d)\n", rc);
+		break;
+	case MSMFB_METADATA_SET:
+		rc = copy_from_user(&metadata, argp, sizeof(metadata));
+		if (!rc)
+			rc = mdp3_set_metadata(mfd, &metadata);
+		if (rc)
+			pr_err("mdp3_set_metadata failed (%d)\n", rc);
+		break;
+	case MSMFB_OVERLAY_GET:
+		rc = copy_from_user(req, argp, sizeof(*req));
+		if (!rc) {
+			rc = mdp3_overlay_get(mfd, req);
+
+		if (!IS_ERR_VALUE(rc))
+			rc = copy_to_user(argp, req, sizeof(*req));
+		}
+		if (rc)
+			pr_err("OVERLAY_GET failed (%d)\n", rc);
+		break;
+	case MSMFB_OVERLAY_SET:
+		rc = copy_from_user(req, argp, sizeof(*req));
+		if (!rc) {
+			rc = mdp3_overlay_set(mfd, req);
+
+		if (!IS_ERR_VALUE(rc))
+			rc = copy_to_user(argp, req, sizeof(*req));
+		}
+		if (rc)
+			pr_err("OVERLAY_SET failed (%d)\n", rc);
+		break;
+	case MSMFB_OVERLAY_UNSET:
+		if (!IS_ERR_VALUE(copy_from_user(&val, argp, sizeof(val))))
+			rc = mdp3_overlay_unset(mfd, val);
+		break;
+	case MSMFB_OVERLAY_PLAY:
+		rc = copy_from_user(&ov_data, argp, sizeof(ov_data));
+		mutex_lock(&mdp3_res->fs_idle_pc_lock);
+		if (mdp3_session->in_splash_screen)
+			mdp3_ctrl_reset(mfd);
+		mutex_unlock(&mdp3_res->fs_idle_pc_lock);
+		if (!rc)
+			rc = mdp3_overlay_play(mfd, &ov_data);
+		if (rc)
+			pr_err("OVERLAY_PLAY failed (%d)\n", rc);
+		break;
+	case MSMFB_OVERLAY_PREPARE:
+		rc = mdp3_overlay_prepare(mfd, argp);
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+int mdp3_wait_for_dma_done(struct mdp3_session_data *session)
+{
+	int rc = 0;
+
+	if (session->dma_active) {
+		rc = wait_for_completion_timeout(&session->dma_completion,
+			KOFF_TIMEOUT);
+		if (rc > 0) {
+			session->dma_active = 0;
+			rc = 0;
+		} else if (rc == 0) {
+			rc = -ETIME;
+		}
+	}
+	return rc;
+}
+
+static int mdp3_update_panel_info(struct msm_fb_data_type *mfd, int mode,
+		int dest_ctrl)
+{
+	int ret = 0;
+	struct mdp3_session_data *mdp3_session;
+	struct mdss_panel_data *panel;
+	u32 intf_type = 0;
+
+	if (!mfd || !mfd->mdp.private1)
+		return -EINVAL;
+
+	mdp3_session = mfd->mdp.private1;
+	panel = mdp3_session->panel;
+
+	if (!panel->event_handler)
+		return 0;
+	ret = panel->event_handler(panel, MDSS_EVENT_DSI_UPDATE_PANEL_DATA,
+						(void *)(unsigned long)mode);
+	if (ret)
+		pr_err("Dynamic switch to %s mode failed!\n",
+					mode ? "command" : "video");
+	if (mode == 1)
+		mfd->panel.type = MIPI_CMD_PANEL;
+	else
+		mfd->panel.type = MIPI_VIDEO_PANEL;
+
+	if (mfd->panel.type != MIPI_VIDEO_PANEL)
+		mdp3_session->wait_for_dma_done = mdp3_wait_for_dma_done;
+
+	intf_type = mdp3_ctrl_get_intf_type(mfd);
+	mdp3_session->intf->cfg.type = intf_type;
+	mdp3_session->intf->available = 1;
+	mdp3_session->intf->in_use = 1;
+	mdp3_res->intf[intf_type].in_use = 1;
+
+	mdp3_intf_init(mdp3_session->intf);
+
+	mdp3_session->dma->output_config.out_sel = intf_type;
+	mdp3_session->status = mdp3_session->intf->active;
+
+	return 0;
+}
+
+static int mdp3_vsync_retire_setup(struct msm_fb_data_type *mfd)
+{
+	struct mdp3_session_data *mdp3_session;
+	struct mdp3_notification retire_client;
+	char name[24];
+
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+
+	snprintf(name, sizeof(name), "mdss_fb%d_retire", mfd->index);
+	mdp3_session->vsync_timeline = sw_sync_timeline_create(name);
+	if (mdp3_session->vsync_timeline == NULL) {
+		pr_err("cannot vsync create time line");
+		return -ENOMEM;
+	}
+
+	/* Add retire vsync handler */
+	retire_client.handler = mdp3_vsync_retire_handle_vsync;
+	retire_client.arg = mdp3_session;
+
+	if (mdp3_session->dma)
+		mdp3_session->dma->retire_client = retire_client;
+
+	INIT_WORK(&mdp3_session->retire_work, mdp3_vsync_retire_work_handler);
+
+	return 0;
+}
+
+int mdp3_ctrl_init(struct msm_fb_data_type *mfd)
+{
+	struct device *dev = mfd->fbi->dev;
+	struct msm_mdp_interface *mdp3_interface = &mfd->mdp;
+	struct mdp3_session_data *mdp3_session = NULL;
+	u32 intf_type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO;
+	int rc;
+	int splash_mismatch = 0;
+	struct sched_param sched = { .sched_priority = 16 };
+
+	pr_info("mdp3_ctrl_init\n");
+	rc = mdp3_parse_dt_splash(mfd);
+	if (rc)
+		splash_mismatch = 1;
+
+	mdp3_interface->on_fnc = mdp3_ctrl_on;
+	mdp3_interface->off_fnc = mdp3_ctrl_off;
+	mdp3_interface->do_histogram = NULL;
+	mdp3_interface->cursor_update = NULL;
+	mdp3_interface->dma_fnc = mdp3_ctrl_pan_display;
+	mdp3_interface->ioctl_handler = mdp3_ctrl_ioctl_handler;
+	mdp3_interface->kickoff_fnc = mdp3_ctrl_display_commit_kickoff;
+	mdp3_interface->pre_commit = mdp3_layer_pre_commit;
+	mdp3_interface->atomic_validate = mdp3_layer_atomic_validate;
+	mdp3_interface->lut_update = NULL;
+	mdp3_interface->configure_panel = mdp3_update_panel_info;
+	mdp3_interface->input_event_handler = NULL;
+	mdp3_interface->signal_retire_fence = NULL;
+
+	mdp3_session = kzalloc(sizeof(struct mdp3_session_data), GFP_KERNEL);
+	if (!mdp3_session)
+		return -ENOMEM;
+
+	mutex_init(&mdp3_session->lock);
+	INIT_WORK(&mdp3_session->clk_off_work, mdp3_dispatch_clk_off);
+
+	init_kthread_worker(&mdp3_session->worker);
+	init_kthread_work(&mdp3_session->dma_done_work, mdp3_dispatch_dma_done);
+
+	mdp3_session->thread = kthread_run(kthread_worker_fn,
+					   &mdp3_session->worker,
+					   "mdp3_dispatch_dma_done");
+
+	if (IS_ERR(mdp3_session->thread)) {
+		pr_err("Can't initialize mdp3_dispatch_dma_done thread\n");
+		rc = -ENODEV;
+		goto init_done;
+	}
+
+	sched_setscheduler(mdp3_session->thread, SCHED_FIFO, &sched);
+
+	atomic_set(&mdp3_session->vsync_countdown, 0);
+	mutex_init(&mdp3_session->histo_lock);
+	mdp3_session->dma = mdp3_get_dma_pipe(MDP3_DMA_CAP_ALL);
+	if (!mdp3_session->dma) {
+		rc = -ENODEV;
+		goto init_done;
+	}
+
+	rc = mdp3_dma_init(mdp3_session->dma);
+	if (rc) {
+		pr_err("fail to init dma\n");
+		goto init_done;
+	}
+
+	intf_type = mdp3_ctrl_get_intf_type(mfd);
+	mdp3_session->intf = mdp3_get_display_intf(intf_type);
+	if (!mdp3_session->intf) {
+		rc = -ENODEV;
+		goto init_done;
+	}
+	rc = mdp3_intf_init(mdp3_session->intf);
+	if (rc) {
+		pr_err("fail to init interface\n");
+		goto init_done;
+	}
+
+	mdp3_session->dma->output_config.out_sel = intf_type;
+	mdp3_session->mfd = mfd;
+	mdp3_session->panel = dev_get_platdata(&mfd->pdev->dev);
+	mdp3_session->status = mdp3_session->intf->active;
+	mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
+	mdp3_bufq_init(&mdp3_session->bufq_in);
+	mdp3_bufq_init(&mdp3_session->bufq_out);
+	mdp3_session->histo_status = 0;
+	mdp3_session->lut_sel = 0;
+	BLOCKING_INIT_NOTIFIER_HEAD(&mdp3_session->notifier_head);
+
+	init_timer(&mdp3_session->vsync_timer);
+	mdp3_session->vsync_timer.function = mdp3_vsync_timer_func;
+	mdp3_session->vsync_timer.data = (u32)mdp3_session;
+	mdp3_session->vsync_period = 1000 / mfd->panel_info->mipi.frame_rate;
+	mfd->mdp.private1 = mdp3_session;
+	init_completion(&mdp3_session->dma_completion);
+	if (intf_type != MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
+		mdp3_session->wait_for_dma_done = mdp3_wait_for_dma_done;
+
+	rc = sysfs_create_group(&dev->kobj, &vsync_fs_attr_group);
+	if (rc) {
+		pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
+		goto init_done;
+	}
+	rc = sysfs_create_group(&dev->kobj, &generic_attr_group);
+	if (rc) {
+		pr_err("generic sysfs group creation failed, ret=%d\n", rc);
+		goto init_done;
+	}
+
+	mdp3_session->vsync_event_sd = sysfs_get_dirent(dev->kobj.sd,
+							"vsync_event");
+	if (!mdp3_session->vsync_event_sd) {
+		pr_err("vsync_event sysfs lookup failed\n");
+		rc = -ENODEV;
+		goto init_done;
+	}
+
+	mdp3_session->dma->hist_event_sd = sysfs_get_dirent(dev->kobj.sd,
+							"hist_event");
+	if (!mdp3_session->dma->hist_event_sd) {
+		pr_err("hist_event sysfs lookup failed\n");
+		rc = -ENODEV;
+		goto init_done;
+	}
+
+	mdp3_session->bl_event_sd = sysfs_get_dirent(dev->kobj.sd,
+							"bl_event");
+	if (!mdp3_session->bl_event_sd) {
+		pr_err("bl_event sysfs lookup failed\n");
+		rc = -ENODEV;
+		goto init_done;
+	}
+
+	rc = mdp3_create_sysfs_link(dev);
+	if (rc)
+		pr_warn("problem creating link to mdp sysfs\n");
+
+	/* Enable PM runtime */
+	pm_runtime_set_suspended(&mdp3_res->pdev->dev);
+	pm_runtime_enable(&mdp3_res->pdev->dev);
+
+	kobject_uevent(&dev->kobj, KOBJ_ADD);
+	pr_debug("vsync kobject_uevent(KOBJ_ADD)\n");
+
+	if (mdp3_get_cont_spash_en()) {
+		mdp3_session->clk_on = 1;
+		mdp3_session->in_splash_screen = 1;
+		mdp3_ctrl_notifier_register(mdp3_session,
+			&mdp3_session->mfd->mdp_sync_pt_data.notifier);
+	}
+
+	/*
+	 * Increment the overlay active count.
+	 * This is needed to ensure that if idle power collapse kicks in
+	 * right away, it would be handled correctly.
+	 */
+	atomic_inc(&mdp3_res->active_intf_cnt);
+	if (splash_mismatch) {
+		pr_err("splash memory mismatch, stop splash\n");
+		mdp3_ctrl_off(mfd);
+	}
+
+	mdp3_session->vsync_before_commit = true;
+	mdp3_session->dyn_pu_state = mfd->panel_info->partial_update_enabled;
+
+	if (mfd->panel_info->mipi.dms_mode ||
+			mfd->panel_info->type == MIPI_CMD_PANEL) {
+		rc = mdp3_vsync_retire_setup(mfd);
+		if (IS_ERR_VALUE(rc)) {
+			pr_err("unable to create vsync timeline\n");
+			goto init_done;
+		}
+	}
+init_done:
+	if (IS_ERR_VALUE(rc))
+		kfree(mdp3_session);
+
+	return rc;
+}
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.h b/drivers/video/fbdev/msm/mdp3_ctrl.h
new file mode 100644
index 0000000..2cc3421
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2013-2014, 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDP3_CTRL_H
+#define MDP3_CTRL_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/timer.h>
+#include <linux/kthread.h>
+
+#include "mdp3.h"
+#include "mdp3_dma.h"
+#include "mdss_fb.h"
+#include "mdss_panel.h"
+
+#define MDP3_MAX_BUF_QUEUE 8
+#define MDP3_LUT_HIST_EN 0x001
+#define MDP3_LUT_GC_EN 0x002
+
+struct mdp3_buffer_queue {
+	struct mdp3_img_data img_data[MDP3_MAX_BUF_QUEUE];
+	int count;
+	int push_idx;
+	int pop_idx;
+};
+
+struct mdp3_session_data {
+	struct mutex lock;
+	int status;
+	struct mdp3_dma *dma;
+	struct mdss_panel_data *panel;
+	struct mdp3_intf *intf;
+	struct msm_fb_data_type *mfd;
+	ktime_t vsync_time;
+	struct timer_list vsync_timer;
+	int vsync_period;
+	struct kernfs_node *vsync_event_sd;
+	struct kernfs_node *bl_event_sd;
+	struct mdp_overlay overlay;
+	struct mdp_overlay req_overlay;
+	struct mdp3_buffer_queue bufq_in;
+	struct mdp3_buffer_queue bufq_out;
+	struct work_struct clk_off_work;
+
+	struct kthread_work dma_done_work;
+	struct kthread_worker worker;
+	struct task_struct *thread;
+
+	atomic_t dma_done_cnt;
+	int histo_status;
+	struct mutex histo_lock;
+	int lut_sel;
+	bool vsync_before_commit;
+	bool first_commit;
+	int clk_on;
+	struct blocking_notifier_head notifier_head;
+
+	int vsync_enabled;
+	atomic_t vsync_countdown; /* Used to count down  */
+	bool in_splash_screen;
+	bool esd_recovery;
+	int dyn_pu_state; /* dynamic partial update status */
+	u32 bl_events;
+
+	bool dma_active;
+	struct completion dma_completion;
+	int (*wait_for_dma_done)(struct mdp3_session_data *session);
+
+	/* For retire fence */
+	struct sw_sync_timeline *vsync_timeline;
+	int retire_cnt;
+	struct work_struct retire_work;
+};
+
+void mdp3_bufq_deinit(struct mdp3_buffer_queue *bufq);
+int mdp3_ctrl_init(struct msm_fb_data_type *mfd);
+int mdp3_bufq_push(struct mdp3_buffer_queue *bufq,
+			struct mdp3_img_data *data);
+int mdp3_ctrl_get_source_format(u32 imgType);
+int mdp3_ctrl_get_pack_pattern(u32 imgType);
+int mdp3_ctrl_reset(struct msm_fb_data_type *mfd);
+
+#endif /* MDP3_CTRL_H */
diff --git a/drivers/video/fbdev/msm/mdp3_dma.c b/drivers/video/fbdev/msm/mdp3_dma.c
new file mode 100644
index 0000000..b7c8d43
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_dma.c
@@ -0,0 +1,1291 @@
+/* Copyright (c) 2013-2014, 2016, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+
+#include "mdp3.h"
+#include "mdp3_dma.h"
+#include "mdp3_hwio.h"
+#include "mdss_debug.h"
+
+#define DMA_STOP_POLL_SLEEP_US 1000
+#define DMA_STOP_POLL_TIMEOUT_US 200000
+#define DMA_HISTO_RESET_TIMEOUT_MS 40
+#define DMA_LUT_CONFIG_MASK 0xfffffbe8
+#define DMA_CCS_CONFIG_MASK 0xfffffc17
+#define HIST_WAIT_TIMEOUT(frame) ((75 * HZ * (frame)) / 1000)
+
+#define VSYNC_SELECT 0x024
+#define VSYNC_TOTAL_LINES_SHIFT 21
+#define VSYNC_COUNT_MASK 0x7ffff
+#define VSYNC_THRESH_CONT_SHIFT 16
+
+static void mdp3_vsync_intr_handler(int type, void *arg)
+{
+	struct mdp3_dma *dma = (struct mdp3_dma *)arg;
+	struct mdp3_notification vsync_client;
+	struct mdp3_notification retire_client;
+	unsigned int wait_for_next_vs;
+
+	pr_debug("mdp3_vsync_intr_handler\n");
+	spin_lock(&dma->dma_lock);
+	vsync_client = dma->vsync_client;
+	retire_client = dma->retire_client;
+	wait_for_next_vs = !dma->vsync_status;
+	dma->vsync_status = 0;
+	if (wait_for_next_vs)
+		complete(&dma->vsync_comp);
+	spin_unlock(&dma->dma_lock);
+	if (vsync_client.handler) {
+		vsync_client.handler(vsync_client.arg);
+	} else {
+		if (wait_for_next_vs)
+			mdp3_irq_disable_nosync(type);
+	}
+
+	if (retire_client.handler)
+		retire_client.handler(retire_client.arg);
+}
+
+static void mdp3_dma_done_intr_handler(int type, void *arg)
+{
+	struct mdp3_dma *dma = (struct mdp3_dma *)arg;
+	struct mdp3_notification dma_client;
+
+	pr_debug("mdp3_dma_done_intr_handler\n");
+	spin_lock(&dma->dma_lock);
+	dma_client = dma->dma_notifier_client;
+	complete(&dma->dma_comp);
+	spin_unlock(&dma->dma_lock);
+	mdp3_irq_disable_nosync(type);
+	if (dma_client.handler)
+		dma_client.handler(dma_client.arg);
+}
+
+static void mdp3_hist_done_intr_handler(int type, void *arg)
+{
+	struct mdp3_dma *dma = (struct mdp3_dma *)arg;
+	u32 isr, mask;
+
+	isr = MDP3_REG_READ(MDP3_REG_DMA_P_HIST_INTR_STATUS);
+	mask = MDP3_REG_READ(MDP3_REG_DMA_P_HIST_INTR_ENABLE);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_CLEAR, isr);
+
+	isr &= mask;
+	if (isr == 0)
+		return;
+
+	if (isr & MDP3_DMA_P_HIST_INTR_HIST_DONE_BIT) {
+		spin_lock(&dma->histo_lock);
+		dma->histo_state = MDP3_DMA_HISTO_STATE_READY;
+		complete(&dma->histo_comp);
+		spin_unlock(&dma->histo_lock);
+		mdp3_hist_intr_notify(dma);
+	}
+	if (isr & MDP3_DMA_P_HIST_INTR_RESET_DONE_BIT) {
+		spin_lock(&dma->histo_lock);
+		dma->histo_state = MDP3_DMA_HISTO_STATE_IDLE;
+		complete(&dma->histo_comp);
+		spin_unlock(&dma->histo_lock);
+	}
+}
+
+void mdp3_dma_callback_enable(struct mdp3_dma *dma, int type)
+{
+	int irq_bit;
+
+	pr_debug("mdp3_dma_callback_enable type=%d\n", type);
+
+	if (dma->dma_sel == MDP3_DMA_P) {
+		if (type & MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE)
+			mdp3_irq_enable(MDP3_INTR_DMA_P_HISTO);
+
+		if (type & MDP3_DMA_CALLBACK_TYPE_HIST_DONE)
+			mdp3_irq_enable(MDP3_INTR_DMA_P_HISTO);
+	}
+
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC) {
+		if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC)
+			mdp3_irq_enable(MDP3_INTR_LCDC_START_OF_FRAME);
+	} else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC) {
+			irq_bit = MDP3_INTR_SYNC_PRIMARY_LINE;
+			irq_bit += dma->dma_sel;
+			mdp3_irq_enable(irq_bit);
+		}
+
+		if (type & MDP3_DMA_CALLBACK_TYPE_DMA_DONE) {
+			irq_bit = MDP3_INTR_DMA_P_DONE;
+			if (dma->dma_sel == MDP3_DMA_S)
+				irq_bit = MDP3_INTR_DMA_S_DONE;
+			mdp3_irq_enable(irq_bit);
+		}
+	} else {
+		pr_err("mdp3_dma_callback_enable not supported interface\n");
+	}
+}
+
+void mdp3_dma_callback_disable(struct mdp3_dma *dma, int type)
+{
+	int irq_bit;
+
+	pr_debug("mdp3_dma_callback_disable type=%d\n", type);
+
+	if (dma->dma_sel == MDP3_DMA_P) {
+		if (type & MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE)
+			mdp3_irq_disable(MDP3_INTR_DMA_P_HISTO);
+
+		if (type & MDP3_DMA_CALLBACK_TYPE_HIST_DONE)
+			mdp3_irq_disable(MDP3_INTR_DMA_P_HISTO);
+	}
+
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC) {
+		if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC)
+			mdp3_irq_disable(MDP3_INTR_LCDC_START_OF_FRAME);
+	} else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC) {
+			irq_bit = MDP3_INTR_SYNC_PRIMARY_LINE;
+			irq_bit += dma->dma_sel;
+			mdp3_irq_disable(irq_bit);
+			/*
+			 * Clear read pointer interrupt before disabling clocks.
+			 * Else pending ISR handling will result in NOC error
+			 * since the clock will be disable after this point.
+			 */
+			mdp3_clear_irq(irq_bit);
+		}
+
+		if (type & MDP3_DMA_CALLBACK_TYPE_DMA_DONE) {
+			irq_bit = MDP3_INTR_DMA_P_DONE;
+			if (dma->dma_sel == MDP3_DMA_S)
+				irq_bit = MDP3_INTR_DMA_S_DONE;
+			mdp3_irq_disable(irq_bit);
+		}
+	}
+}
+
+static int mdp3_dma_callback_setup(struct mdp3_dma *dma)
+{
+	int rc = 0;
+	struct mdp3_intr_cb vsync_cb = {
+		.cb = mdp3_vsync_intr_handler,
+		.data = dma,
+	};
+
+	struct mdp3_intr_cb dma_cb = {
+		.cb = mdp3_dma_done_intr_handler,
+		.data = dma,
+	};
+
+
+	struct mdp3_intr_cb hist_cb = {
+		.cb = mdp3_hist_done_intr_handler,
+		.data = dma,
+	};
+
+	if (dma->dma_sel == MDP3_DMA_P)
+		rc = mdp3_set_intr_callback(MDP3_INTR_DMA_P_HISTO, &hist_cb);
+
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC)
+		rc |= mdp3_set_intr_callback(MDP3_INTR_LCDC_START_OF_FRAME,
+					&vsync_cb);
+	else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		int irq_bit = MDP3_INTR_SYNC_PRIMARY_LINE;
+
+		irq_bit += dma->dma_sel;
+		rc |= mdp3_set_intr_callback(irq_bit, &vsync_cb);
+		irq_bit = MDP3_INTR_DMA_P_DONE;
+		if (dma->dma_sel == MDP3_DMA_S)
+			irq_bit = MDP3_INTR_DMA_S_DONE;
+		rc |= mdp3_set_intr_callback(irq_bit, &dma_cb);
+	} else {
+		pr_err("mdp3_dma_callback_setup not supported interface\n");
+		rc = -ENODEV;
+	}
+
+	return rc;
+}
+
+static void mdp3_dma_vsync_enable(struct mdp3_dma *dma,
+				struct mdp3_notification *vsync_client)
+{
+	unsigned long flag;
+	int updated = 0;
+	int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+
+	pr_debug("mdp3_dma_vsync_enable\n");
+
+	spin_lock_irqsave(&dma->dma_lock, flag);
+	if (vsync_client) {
+		if (dma->vsync_client.handler != vsync_client->handler) {
+			dma->vsync_client = *vsync_client;
+			updated = 1;
+		}
+	} else {
+		if (dma->vsync_client.handler) {
+			dma->vsync_client.handler = NULL;
+			dma->vsync_client.arg = NULL;
+			updated = 1;
+		}
+	}
+	spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+	if (updated) {
+		if (vsync_client && vsync_client->handler)
+			mdp3_dma_callback_enable(dma, cb_type);
+		else
+			mdp3_dma_callback_disable(dma, cb_type);
+	}
+}
+
+static void mdp3_dma_done_notifier(struct mdp3_dma *dma,
+				struct mdp3_notification *dma_client)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&dma->dma_lock, flag);
+	if (dma_client) {
+		dma->dma_notifier_client = *dma_client;
+	} else {
+		dma->dma_notifier_client.handler = NULL;
+		dma->dma_notifier_client.arg = NULL;
+	}
+	spin_unlock_irqrestore(&dma->dma_lock, flag);
+}
+
+int mdp3_dma_sync_config(struct mdp3_dma *dma,
+	struct mdp3_dma_source *source_config, struct mdp3_tear_check *te)
+{
+	u32 vsync_clk_speed_hz, vclks_line, cfg;
+	int porch = source_config->vporch;
+	int height = source_config->height;
+	int total_lines = height + porch;
+	int dma_sel = dma->dma_sel;
+
+	vsync_clk_speed_hz = MDP_VSYNC_CLK_RATE;
+
+	cfg = total_lines << VSYNC_TOTAL_LINES_SHIFT;
+	total_lines *= te->frame_rate;
+
+	vclks_line = (total_lines) ? vsync_clk_speed_hz / total_lines : 0;
+
+	cfg |= BIT(19);
+	if (te->hw_vsync_mode)
+		cfg |= BIT(20);
+
+	if (te->refx100) {
+		vclks_line = vclks_line * te->frame_rate *
+			100 / te->refx100;
+	} else {
+		pr_warn("refx100 cannot be zero! Use 6000 as default\n");
+		vclks_line = vclks_line * te->frame_rate *
+			100 / 6000;
+	}
+
+	cfg |= (vclks_line & VSYNC_COUNT_MASK);
+
+	MDP3_REG_WRITE(MDP3_REG_SYNC_CONFIG_0 + dma_sel, cfg);
+	MDP3_REG_WRITE(MDP3_REG_VSYNC_SEL, VSYNC_SELECT);
+	MDP3_REG_WRITE(MDP3_REG_PRIMARY_VSYNC_INIT_VAL + dma_sel,
+				te->vsync_init_val);
+	MDP3_REG_WRITE(MDP3_REG_PRIMARY_RD_PTR_IRQ, te->rd_ptr_irq);
+	MDP3_REG_WRITE(MDP3_REG_SYNC_THRESH_0 + dma_sel,
+		((te->sync_threshold_continue << VSYNC_THRESH_CONT_SHIFT) |
+				 te->sync_threshold_start));
+	MDP3_REG_WRITE(MDP3_REG_PRIMARY_START_P0S + dma_sel, te->start_pos);
+	MDP3_REG_WRITE(MDP3_REG_TEAR_CHECK_EN, te->tear_check_en);
+	return 0;
+}
+
+static int mdp3_dmap_config(struct mdp3_dma *dma,
+			struct mdp3_dma_source *source_config,
+			struct mdp3_dma_output_config *output_config,
+			bool splash_screen_active)
+{
+	u32 dma_p_cfg_reg, dma_p_size, dma_p_out_xy;
+
+	dma_p_cfg_reg = source_config->format << 25;
+	if (output_config->dither_en)
+		dma_p_cfg_reg |= BIT(24);
+	dma_p_cfg_reg |= output_config->out_sel << 19;
+	dma_p_cfg_reg |= output_config->bit_mask_polarity << 18;
+	dma_p_cfg_reg |= output_config->color_components_flip << 14;
+	dma_p_cfg_reg |= output_config->pack_pattern << 8;
+	dma_p_cfg_reg |= output_config->pack_align << 7;
+	dma_p_cfg_reg |= output_config->color_comp_out_bits;
+
+	dma_p_size = source_config->width | (source_config->height << 16);
+	dma_p_out_xy = source_config->x | (source_config->y << 16);
+	if (!splash_screen_active) {
+		MDP3_REG_WRITE(MDP3_REG_DMA_P_CONFIG, dma_p_cfg_reg);
+		MDP3_REG_WRITE(MDP3_REG_DMA_P_SIZE, dma_p_size);
+		MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_ADDR,
+				(u32)source_config->buf);
+		MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_Y_STRIDE,
+				source_config->stride);
+		MDP3_REG_WRITE(MDP3_REG_DMA_P_OUT_XY, dma_p_out_xy);
+		MDP3_REG_WRITE(MDP3_REG_DMA_P_FETCH_CFG, 0x40);
+	}
+
+	dma->source_config = *source_config;
+	dma->output_config = *output_config;
+
+	if (dma->output_config.out_sel != MDP3_DMA_OUTPUT_SEL_DSI_CMD)
+		mdp3_irq_enable(MDP3_INTR_LCDC_UNDERFLOW);
+
+	mdp3_dma_callback_setup(dma);
+	return 0;
+}
+
+static void mdp3_dmap_config_source(struct mdp3_dma *dma)
+{
+	struct mdp3_dma_source *source_config = &dma->source_config;
+	u32 dma_p_cfg_reg, dma_p_size;
+
+	dma_p_cfg_reg = MDP3_REG_READ(MDP3_REG_DMA_P_CONFIG);
+	dma_p_cfg_reg &= ~MDP3_DMA_IBUF_FORMAT_MASK;
+	dma_p_cfg_reg |= source_config->format << 25;
+	dma_p_cfg_reg &= ~MDP3_DMA_PACK_PATTERN_MASK;
+	dma_p_cfg_reg |= dma->output_config.pack_pattern << 8;
+
+	dma_p_size = dma->roi.w | (dma->roi.h << 16);
+
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CONFIG, dma_p_cfg_reg);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_SIZE, dma_p_size);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_Y_STRIDE, source_config->stride);
+}
+
+static int mdp3_dmas_config(struct mdp3_dma *dma,
+			struct mdp3_dma_source *source_config,
+			struct mdp3_dma_output_config *output_config,
+			bool splash_screen_active)
+{
+	u32 dma_s_cfg_reg, dma_s_size, dma_s_out_xy;
+
+	dma_s_cfg_reg = source_config->format << 25;
+	if (output_config->dither_en)
+		dma_s_cfg_reg |= BIT(24);
+	dma_s_cfg_reg |= output_config->out_sel << 19;
+	dma_s_cfg_reg |= output_config->bit_mask_polarity << 18;
+	dma_s_cfg_reg |= output_config->color_components_flip << 14;
+	dma_s_cfg_reg |= output_config->pack_pattern << 8;
+	dma_s_cfg_reg |= output_config->pack_align << 7;
+	dma_s_cfg_reg |= output_config->color_comp_out_bits;
+
+	dma_s_size = source_config->width | (source_config->height << 16);
+	dma_s_out_xy = source_config->x | (source_config->y << 16);
+
+	if (!splash_screen_active) {
+		MDP3_REG_WRITE(MDP3_REG_DMA_S_CONFIG, dma_s_cfg_reg);
+		MDP3_REG_WRITE(MDP3_REG_DMA_S_SIZE, dma_s_size);
+		MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_ADDR,
+				(u32)source_config->buf);
+		MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_Y_STRIDE,
+				source_config->stride);
+		MDP3_REG_WRITE(MDP3_REG_DMA_S_OUT_XY, dma_s_out_xy);
+		MDP3_REG_WRITE(MDP3_REG_SECONDARY_RD_PTR_IRQ, 0x10);
+	}
+	dma->source_config = *source_config;
+	dma->output_config = *output_config;
+
+	mdp3_dma_callback_setup(dma);
+	return 0;
+}
+
+static void mdp3_dmas_config_source(struct mdp3_dma *dma)
+{
+	struct mdp3_dma_source *source_config = &dma->source_config;
+	u32 dma_s_cfg_reg, dma_s_size;
+
+	dma_s_cfg_reg = MDP3_REG_READ(MDP3_REG_DMA_S_CONFIG);
+	dma_s_cfg_reg &= ~MDP3_DMA_IBUF_FORMAT_MASK;
+	dma_s_cfg_reg |= source_config->format << 25;
+
+	dma_s_size = source_config->width | (source_config->height << 16);
+
+	MDP3_REG_WRITE(MDP3_REG_DMA_S_CONFIG, dma_s_cfg_reg);
+	MDP3_REG_WRITE(MDP3_REG_DMA_S_SIZE, dma_s_size);
+	MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_Y_STRIDE, source_config->stride);
+}
+
+static int mdp3_dmap_cursor_config(struct mdp3_dma *dma,
+				struct mdp3_dma_cursor *cursor)
+{
+	u32 cursor_size, cursor_pos, blend_param, trans_mask;
+
+	cursor_size = cursor->width | (cursor->height << 16);
+	cursor_pos = cursor->x | (cursor->y << 16);
+	trans_mask = 0;
+	if (cursor->blend_config.mode == MDP3_DMA_CURSOR_BLEND_CONSTANT_ALPHA) {
+		blend_param = cursor->blend_config.constant_alpha << 24;
+	} else if (cursor->blend_config.mode ==
+			MDP3_DMA_CURSOR_BLEND_COLOR_KEYING) {
+		blend_param = cursor->blend_config.transparent_color;
+		trans_mask = cursor->blend_config.transparency_mask;
+	} else {
+		blend_param = 0;
+	}
+
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_FORMAT, cursor->format);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_SIZE, cursor_size);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BUF_ADDR, (u32)cursor->buf);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_POS, cursor_pos);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BLEND_CONFIG,
+			cursor->blend_config.mode);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BLEND_PARAM, blend_param);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BLEND_TRANS_MASK, trans_mask);
+	dma->cursor = *cursor;
+	return 0;
+}
+
+static int mdp3_dmap_ccs_config_internal(struct mdp3_dma *dma,
+			struct mdp3_dma_color_correct_config *config,
+			struct mdp3_dma_ccs *ccs)
+{
+	int i;
+	u32 addr;
+
+	if (!ccs)
+		return -EINVAL;
+
+	if (config->ccs_enable) {
+		addr = MDP3_REG_DMA_P_CSC_MV1;
+		if (config->ccs_sel)
+			addr = MDP3_REG_DMA_P_CSC_MV2;
+		for (i = 0; i < 9; i++) {
+			MDP3_REG_WRITE(addr, ccs->mv[i]);
+			addr += 4;
+		}
+
+		addr = MDP3_REG_DMA_P_CSC_PRE_BV1;
+		if (config->pre_bias_sel)
+			addr = MDP3_REG_DMA_P_CSC_PRE_BV2;
+		for (i = 0; i < 3; i++) {
+			MDP3_REG_WRITE(addr, ccs->pre_bv[i]);
+			addr += 4;
+		}
+
+		addr = MDP3_REG_DMA_P_CSC_POST_BV1;
+		if (config->post_bias_sel)
+			addr = MDP3_REG_DMA_P_CSC_POST_BV2;
+		for (i = 0; i < 3; i++) {
+			MDP3_REG_WRITE(addr, ccs->post_bv[i]);
+			addr += 4;
+		}
+
+		addr = MDP3_REG_DMA_P_CSC_PRE_LV1;
+		if (config->pre_limit_sel)
+			addr = MDP3_REG_DMA_P_CSC_PRE_LV2;
+		for (i = 0; i < 6; i++) {
+			MDP3_REG_WRITE(addr, ccs->pre_lv[i]);
+			addr += 4;
+		}
+
+		addr = MDP3_REG_DMA_P_CSC_POST_LV1;
+		if (config->post_limit_sel)
+			addr = MDP3_REG_DMA_P_CSC_POST_LV2;
+		for (i = 0; i < 6; i++) {
+			MDP3_REG_WRITE(addr, ccs->post_lv[i]);
+			addr += 4;
+		}
+	}
+	return 0;
+}
+
+static void mdp3_ccs_update(struct mdp3_dma *dma, bool from_kickoff)
+{
+	u32 cc_config;
+	bool ccs_updated = false, lut_updated = false;
+	struct mdp3_dma_ccs ccs;
+
+	cc_config = MDP3_REG_READ(MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG);
+
+	if (dma->ccs_config.ccs_dirty) {
+		cc_config &= DMA_CCS_CONFIG_MASK;
+		if (dma->ccs_config.ccs_enable)
+			cc_config |= BIT(3);
+		else
+			cc_config &= ~BIT(3);
+		cc_config |= dma->ccs_config.ccs_sel << 5;
+		cc_config |= dma->ccs_config.pre_bias_sel << 6;
+		cc_config |= dma->ccs_config.post_bias_sel << 7;
+		cc_config |= dma->ccs_config.pre_limit_sel << 8;
+		cc_config |= dma->ccs_config.post_limit_sel << 9;
+		/*
+		 * CCS dirty flag should be reset when call is made from frame
+		 * kickoff, or else upon resume the flag would be dirty and LUT
+		 * config could call this function thereby causing no register
+		 * programming for CCS, which will cause screen to go dark
+		 */
+		if (from_kickoff)
+			dma->ccs_config.ccs_dirty = false;
+		ccs_updated = true;
+	}
+
+	if (dma->lut_config.lut_dirty) {
+		cc_config &= DMA_LUT_CONFIG_MASK;
+		cc_config |= dma->lut_config.lut_enable;
+		cc_config |= dma->lut_config.lut_position << 4;
+		cc_config |= dma->lut_config.lut_sel << 10;
+		dma->lut_config.lut_dirty = false;
+		lut_updated = true;
+	}
+
+	if (ccs_updated && from_kickoff) {
+		ccs.mv = dma->ccs_cache.csc_data.csc_mv;
+		ccs.pre_bv = dma->ccs_cache.csc_data.csc_pre_bv;
+		ccs.post_bv = dma->ccs_cache.csc_data.csc_post_bv;
+		ccs.pre_lv = dma->ccs_cache.csc_data.csc_pre_lv;
+		ccs.post_lv = dma->ccs_cache.csc_data.csc_post_lv;
+		mdp3_dmap_ccs_config_internal(dma, &dma->ccs_config, &ccs);
+	}
+
+	if (lut_updated || ccs_updated) {
+		MDP3_REG_WRITE(MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG, cc_config);
+		/*
+		 * Make sure ccs configuration update is done before continuing
+		 * with the DMA transfer
+		 */
+		wmb(); /* ensure write is finished before progressing */
+	}
+}
+
+static int mdp3_dmap_ccs_config(struct mdp3_dma *dma,
+			struct mdp3_dma_color_correct_config *config,
+			struct mdp3_dma_ccs *ccs)
+{
+	mdp3_dmap_ccs_config_internal(dma, config, ccs);
+
+	dma->ccs_config = *config;
+
+	if (dma->output_config.out_sel != MDP3_DMA_OUTPUT_SEL_DSI_CMD)
+		mdp3_ccs_update(dma, false);
+
+	return 0;
+}
+
+static int mdp3_dmap_lut_config(struct mdp3_dma *dma,
+			struct mdp3_dma_lut_config *config,
+			struct fb_cmap *cmap)
+{
+	u32 addr, color;
+	int i;
+
+	if (config->lut_enable && cmap) {
+		addr = MDP3_REG_DMA_P_CSC_LUT1;
+		if (config->lut_sel)
+			addr = MDP3_REG_DMA_P_CSC_LUT2;
+
+		for (i = 0; i < MDP_LUT_SIZE; i++) {
+			color = cmap->green[i] & 0xff;
+			color |= (cmap->red[i] & 0xff) << 8;
+			color |= (cmap->blue[i] & 0xff) << 16;
+			MDP3_REG_WRITE(addr, color);
+			addr += 4;
+		}
+	}
+
+	dma->lut_config = *config;
+
+	if (dma->output_config.out_sel != MDP3_DMA_OUTPUT_SEL_DSI_CMD)
+		mdp3_ccs_update(dma, false);
+
+	return 0;
+}
+
+static int mdp3_dmap_histo_config(struct mdp3_dma *dma,
+			struct mdp3_dma_histogram_config *histo_config)
+{
+	unsigned long flag;
+	u32 histo_bit_mask = 0, histo_control = 0;
+	u32 histo_isr_mask = MDP3_DMA_P_HIST_INTR_HIST_DONE_BIT |
+			MDP3_DMA_P_HIST_INTR_RESET_DONE_BIT;
+
+	spin_lock_irqsave(&dma->histo_lock, flag);
+
+	if (histo_config->bit_mask_polarity)
+		histo_bit_mask = BIT(31);
+	histo_bit_mask |= histo_config->bit_mask;
+
+	if (histo_config->auto_clear_en)
+		histo_control = BIT(0);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_FRAME_CNT,
+			histo_config->frame_count);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_BIT_MASK, histo_bit_mask);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_CONTROL, histo_control);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_ENABLE, histo_isr_mask);
+
+	spin_unlock_irqrestore(&dma->histo_lock, flag);
+
+	dma->histogram_config = *histo_config;
+	return 0;
+}
+
+int dma_bpp(int format)
+{
+	int bpp;
+
+	switch (format) {
+	case MDP3_DMA_IBUF_FORMAT_RGB888:
+		bpp = 3;
+		break;
+	case MDP3_DMA_IBUF_FORMAT_RGB565:
+		bpp = 2;
+		break;
+	case MDP3_DMA_IBUF_FORMAT_XRGB8888:
+		bpp = 4;
+		break;
+	default:
+		bpp = 0;
+	}
+	return bpp;
+}
+
+static int mdp3_dmap_update(struct mdp3_dma *dma, void *buf,
+				struct mdp3_intf *intf, void *data)
+{
+	unsigned long flag;
+	int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+	struct mdss_panel_data *panel;
+	int rc = 0;
+	int retry_count = 2;
+
+	ATRACE_BEGIN(__func__);
+	pr_debug("mdp3_dmap_update\n");
+
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		cb_type = MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
+		if (intf->active) {
+			ATRACE_BEGIN("mdp3_wait_for_dma_comp");
+retry_dma_done:
+			rc = wait_for_completion_timeout(&dma->dma_comp,
+				KOFF_TIMEOUT);
+			if (rc <= 0 && --retry_count) {
+				int  vsync_status;
+
+				vsync_status = (1 << MDP3_INTR_DMA_P_DONE) &
+					MDP3_REG_READ(MDP3_REG_INTR_STATUS);
+				if (!vsync_status) {
+					pr_err("%s: cmd timeout retry cnt %d\n",
+						__func__, retry_count);
+					goto retry_dma_done;
+				}
+				rc = -1;
+			}
+				ATRACE_END("mdp3_wait_for_dma_comp");
+		}
+	}
+	if (dma->update_src_cfg) {
+		if (dma->output_config.out_sel ==
+			MDP3_DMA_OUTPUT_SEL_DSI_VIDEO && intf->active)
+			pr_err("configuring dma source while it is active\n");
+		dma->dma_config_source(dma);
+		if (data) {
+			panel = (struct mdss_panel_data *)data;
+			if (panel->event_handler) {
+				panel->event_handler(panel,
+					MDSS_EVENT_ENABLE_PARTIAL_ROI, NULL);
+				panel->event_handler(panel,
+					MDSS_EVENT_DSI_STREAM_SIZE, NULL);
+			}
+		}
+		dma->update_src_cfg = false;
+	}
+	mutex_lock(&dma->pp_lock);
+	if (dma->ccs_config.ccs_dirty)
+		mdp3_ccs_update(dma, true);
+	mutex_unlock(&dma->pp_lock);
+	spin_lock_irqsave(&dma->dma_lock, flag);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_ADDR, (u32)(buf +
+			dma->roi.y * dma->source_config.stride +
+			dma->roi.x * dma_bpp(dma->source_config.format)));
+	dma->source_config.buf = (int)buf;
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD)
+		MDP3_REG_WRITE(MDP3_REG_DMA_P_START, 1);
+
+	if (!intf->active) {
+		pr_debug("%s start interface\n", __func__);
+		intf->start(intf);
+	}
+
+	mb(); /* make sure everything is written before enable */
+	dma->vsync_status = MDP3_REG_READ(MDP3_REG_INTR_STATUS) &
+		(1 << MDP3_INTR_LCDC_START_OF_FRAME);
+	init_completion(&dma->vsync_comp);
+	spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+	mdp3_dma_callback_enable(dma, cb_type);
+	pr_debug("%s wait for vsync_comp\n", __func__);
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) {
+		ATRACE_BEGIN("mdp3_wait_for_vsync_comp");
+retry_vsync:
+		rc = wait_for_completion_timeout(&dma->vsync_comp,
+			KOFF_TIMEOUT);
+		if (rc <= 0 && --retry_count) {
+			int vsync = MDP3_REG_READ(MDP3_REG_INTR_STATUS) &
+					(1 << MDP3_INTR_LCDC_START_OF_FRAME);
+
+			if (!vsync) {
+				pr_err("%s trying again count = %d\n",
+					__func__, retry_count);
+				goto retry_vsync;
+			}
+			rc = -1;
+		}
+		ATRACE_END("mdp3_wait_for_vsync_comp");
+	}
+	pr_debug("$%s wait for vsync_comp out\n", __func__);
+	ATRACE_END(__func__);
+	return rc;
+}
+
+static int mdp3_dmas_update(struct mdp3_dma *dma, void *buf,
+				struct mdp3_intf *intf, void *data)
+{
+	unsigned long flag;
+	int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		cb_type = MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
+		if (intf->active)
+			wait_for_completion_killable(&dma->dma_comp);
+	}
+
+	spin_lock_irqsave(&dma->dma_lock, flag);
+	MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_ADDR, (u32)buf);
+	dma->source_config.buf = (int)buf;
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD)
+		MDP3_REG_WRITE(MDP3_REG_DMA_S_START, 1);
+
+	if (!intf->active) {
+		pr_debug("mdp3_dmap_update start interface\n");
+		intf->start(intf);
+	}
+
+	wmb(); /* ensure write is finished before progressing */
+	init_completion(&dma->vsync_comp);
+	spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+	mdp3_dma_callback_enable(dma, cb_type);
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
+		wait_for_completion_killable(&dma->vsync_comp);
+	return 0;
+}
+
+static int mdp3_dmap_cursor_update(struct mdp3_dma *dma, int x, int y)
+{
+	u32 cursor_pos;
+
+	cursor_pos = x | (y << 16);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_POS, cursor_pos);
+	dma->cursor.x = x;
+	dma->cursor.y = y;
+	return 0;
+}
+
+static int mdp3_dmap_histo_get(struct mdp3_dma *dma)
+{
+	int i, state, timeout, ret;
+	u32 addr;
+	unsigned long flag;
+
+	spin_lock_irqsave(&dma->histo_lock, flag);
+	state = dma->histo_state;
+	spin_unlock_irqrestore(&dma->histo_lock, flag);
+
+	if (state != MDP3_DMA_HISTO_STATE_START &&
+		state != MDP3_DMA_HISTO_STATE_READY) {
+		pr_err("mdp3_dmap_histo_get invalid state %d\n", state);
+		return -EINVAL;
+	}
+
+	timeout = HIST_WAIT_TIMEOUT(dma->histogram_config.frame_count);
+	ret = wait_for_completion_killable_timeout(&dma->histo_comp, timeout);
+
+	if (ret == 0) {
+		pr_debug("mdp3_dmap_histo_get time out\n");
+		ret = -ETIMEDOUT;
+	} else if (ret < 0) {
+		pr_err("mdp3_dmap_histo_get interrupted\n");
+	}
+
+	if (ret < 0)
+		return ret;
+
+	if (dma->histo_state != MDP3_DMA_HISTO_STATE_READY) {
+		pr_debug("mdp3_dmap_histo_get after dma shut down\n");
+		return -EPERM;
+	}
+
+	addr = MDP3_REG_DMA_P_HIST_R_DATA;
+	for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) {
+		dma->histo_data.r_data[i] = MDP3_REG_READ(addr);
+		addr += 4;
+	}
+
+	addr = MDP3_REG_DMA_P_HIST_G_DATA;
+	for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) {
+		dma->histo_data.g_data[i] = MDP3_REG_READ(addr);
+		addr += 4;
+	}
+
+	addr = MDP3_REG_DMA_P_HIST_B_DATA;
+	for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) {
+		dma->histo_data.b_data[i] = MDP3_REG_READ(addr);
+		addr += 4;
+	}
+
+	dma->histo_data.extra[0] =
+			MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_0);
+	dma->histo_data.extra[1] =
+			MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_1);
+
+	spin_lock_irqsave(&dma->histo_lock, flag);
+	init_completion(&dma->histo_comp);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_START, 1);
+	wmb(); /* ensure write is finished before progressing */
+	dma->histo_state = MDP3_DMA_HISTO_STATE_START;
+	spin_unlock_irqrestore(&dma->histo_lock, flag);
+
+	return 0;
+}
+
+static int mdp3_dmap_histo_start(struct mdp3_dma *dma)
+{
+	unsigned long flag;
+
+	if (dma->histo_state != MDP3_DMA_HISTO_STATE_IDLE)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dma->histo_lock, flag);
+
+	init_completion(&dma->histo_comp);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_START, 1);
+	wmb(); /* ensure write is finished before progressing */
+	dma->histo_state = MDP3_DMA_HISTO_STATE_START;
+
+	spin_unlock_irqrestore(&dma->histo_lock, flag);
+
+	mdp3_dma_callback_enable(dma, MDP3_DMA_CALLBACK_TYPE_HIST_DONE);
+	return 0;
+
+}
+
+static int mdp3_dmap_histo_reset(struct mdp3_dma *dma)
+{
+	unsigned long flag;
+	int ret;
+
+	spin_lock_irqsave(&dma->histo_lock, flag);
+
+	init_completion(&dma->histo_comp);
+
+
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_ENABLE, BIT(0)|BIT(1));
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_RESET_SEQ_START, 1);
+	wmb(); /* ensure write is finished before progressing */
+	dma->histo_state = MDP3_DMA_HISTO_STATE_RESET;
+
+	spin_unlock_irqrestore(&dma->histo_lock, flag);
+
+	mdp3_dma_callback_enable(dma, MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE);
+	ret = wait_for_completion_killable_timeout(&dma->histo_comp,
+				msecs_to_jiffies(DMA_HISTO_RESET_TIMEOUT_MS));
+
+	if (ret == 0) {
+		pr_err("mdp3_dmap_histo_reset time out\n");
+		ret = -ETIMEDOUT;
+	} else if (ret < 0) {
+		pr_err("mdp3_dmap_histo_reset interrupted\n");
+	} else {
+		ret = 0;
+	}
+	mdp3_dma_callback_disable(dma, MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE);
+
+	return ret;
+}
+
+static int mdp3_dmap_histo_stop(struct mdp3_dma *dma)
+{
+	unsigned long flag;
+	int cb_type = MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE |
+			MDP3_DMA_CALLBACK_TYPE_HIST_DONE;
+
+	spin_lock_irqsave(&dma->histo_lock, flag);
+
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_CANCEL_REQ, 1);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_ENABLE, 0);
+	wmb(); /* ensure write is finished before progressing */
+	dma->histo_state = MDP3_DMA_HISTO_STATE_IDLE;
+	complete(&dma->histo_comp);
+
+	spin_unlock_irqrestore(&dma->histo_lock, flag);
+
+	mdp3_dma_callback_disable(dma, cb_type);
+	return 0;
+}
+
+static int mdp3_dmap_histo_op(struct mdp3_dma *dma, u32 op)
+{
+	int ret;
+
+	switch (op) {
+	case MDP3_DMA_HISTO_OP_START:
+		ret = mdp3_dmap_histo_start(dma);
+		break;
+	case MDP3_DMA_HISTO_OP_STOP:
+	case MDP3_DMA_HISTO_OP_CANCEL:
+		ret = mdp3_dmap_histo_stop(dma);
+		break;
+	case MDP3_DMA_HISTO_OP_RESET:
+		ret = mdp3_dmap_histo_reset(dma);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+bool mdp3_dmap_busy(void)
+{
+	u32 val;
+
+	val = MDP3_REG_READ(MDP3_REG_DISPLAY_STATUS);
+	pr_err("%s DMAP Status %s\n", __func__,
+		(val & MDP3_DMA_P_BUSY_BIT) ? "BUSY":"IDLE");
+	return val & MDP3_DMA_P_BUSY_BIT;
+}
+
+/*
+ * During underrun DMA_P registers are reset. Reprogramming CSC to prevent
+ * black screen
+ */
+static void mdp3_dmap_underrun_worker(struct work_struct *work)
+{
+	struct mdp3_dma *dma;
+
+	dma = container_of(work, struct mdp3_dma, underrun_work);
+	mutex_lock(&dma->pp_lock);
+	if (dma->ccs_config.ccs_enable && dma->ccs_config.ccs_dirty) {
+		dma->cc_vect_sel = (dma->cc_vect_sel + 1) % 2;
+		dma->ccs_config.ccs_sel = dma->cc_vect_sel;
+		dma->ccs_config.pre_limit_sel = dma->cc_vect_sel;
+		dma->ccs_config.post_limit_sel = dma->cc_vect_sel;
+		dma->ccs_config.pre_bias_sel = dma->cc_vect_sel;
+		dma->ccs_config.post_bias_sel = dma->cc_vect_sel;
+		mdp3_ccs_update(dma, true);
+	}
+	mutex_unlock(&dma->pp_lock);
+}
+
+static int mdp3_dma_start(struct mdp3_dma *dma, struct mdp3_intf *intf)
+{
+	unsigned long flag;
+	int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+	u32 dma_start_offset = MDP3_REG_DMA_P_START;
+
+	if (dma->dma_sel == MDP3_DMA_P)
+		dma_start_offset = MDP3_REG_DMA_P_START;
+	else if (dma->dma_sel == MDP3_DMA_S)
+		dma_start_offset = MDP3_REG_DMA_S_START;
+	else
+		return -EINVAL;
+
+	spin_lock_irqsave(&dma->dma_lock, flag);
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		cb_type |= MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
+		MDP3_REG_WRITE(dma_start_offset, 1);
+	}
+
+	intf->start(intf);
+	wmb(); /* ensure write is finished before progressing */
+	init_completion(&dma->vsync_comp);
+	spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+	if (dma->dma_sel == MDP3_DMA_P && dma->has_panic_ctrl)
+		MDP3_REG_WRITE(MDP3_PANIC_ROBUST_CTRL, BIT(0));
+
+	mdp3_dma_callback_enable(dma, cb_type);
+	pr_debug("mdp3_dma_start wait for vsync_comp in\n");
+	wait_for_completion_killable(&dma->vsync_comp);
+	pr_debug("mdp3_dma_start wait for vsync_comp out\n");
+	return 0;
+}
+
+static int mdp3_dma_stop(struct mdp3_dma *dma, struct mdp3_intf *intf)
+{
+	int ret = 0;
+	u32 status, display_status_bit;
+
+	if (dma->dma_sel == MDP3_DMA_P)
+		display_status_bit = BIT(6);
+	else if (dma->dma_sel == MDP3_DMA_S)
+		display_status_bit = BIT(7);
+	else
+		return -EINVAL;
+
+	if (dma->dma_sel == MDP3_DMA_P && dma->has_panic_ctrl)
+		MDP3_REG_WRITE(MDP3_PANIC_ROBUST_CTRL, 0);
+
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
+		display_status_bit |= BIT(11);
+
+	intf->stop(intf);
+	ret = readl_poll_timeout((mdp3_res->mdp_base + MDP3_REG_DISPLAY_STATUS),
+				status,
+				((status & display_status_bit) == 0),
+				DMA_STOP_POLL_SLEEP_US,
+				DMA_STOP_POLL_TIMEOUT_US);
+
+	mdp3_dma_callback_disable(dma, MDP3_DMA_CALLBACK_TYPE_VSYNC |
+					MDP3_DMA_CALLBACK_TYPE_DMA_DONE);
+	mdp3_irq_disable(MDP3_INTR_LCDC_UNDERFLOW);
+
+	MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, 0);
+	MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, 0xfffffff);
+
+	init_completion(&dma->dma_comp);
+	dma->vsync_client.handler = NULL;
+	return ret;
+}
+
+int mdp3_dma_init(struct mdp3_dma *dma)
+{
+	int ret = 0;
+
+	pr_debug("mdp3_dma_init\n");
+	switch (dma->dma_sel) {
+	case MDP3_DMA_P:
+		dma->dma_config = mdp3_dmap_config;
+		dma->dma_sync_config = mdp3_dma_sync_config;
+		dma->dma_config_source = mdp3_dmap_config_source;
+		dma->config_cursor = mdp3_dmap_cursor_config;
+		dma->config_ccs = mdp3_dmap_ccs_config;
+		dma->config_histo = mdp3_dmap_histo_config;
+		dma->config_lut = mdp3_dmap_lut_config;
+		dma->update = mdp3_dmap_update;
+		dma->update_cursor = mdp3_dmap_cursor_update;
+		dma->get_histo = mdp3_dmap_histo_get;
+		dma->histo_op = mdp3_dmap_histo_op;
+		dma->vsync_enable = mdp3_dma_vsync_enable;
+		dma->dma_done_notifier = mdp3_dma_done_notifier;
+		dma->start = mdp3_dma_start;
+		dma->stop = mdp3_dma_stop;
+		dma->busy = mdp3_dmap_busy;
+		INIT_WORK(&dma->underrun_work, mdp3_dmap_underrun_worker);
+		break;
+	case MDP3_DMA_S:
+		dma->dma_config = mdp3_dmas_config;
+		dma->dma_sync_config = mdp3_dma_sync_config;
+		dma->dma_config_source = mdp3_dmas_config_source;
+		dma->config_cursor = NULL;
+		dma->config_ccs = NULL;
+		dma->config_histo = NULL;
+		dma->config_lut = NULL;
+		dma->update = mdp3_dmas_update;
+		dma->update_cursor = NULL;
+		dma->get_histo = NULL;
+		dma->histo_op = NULL;
+		dma->vsync_enable = mdp3_dma_vsync_enable;
+		dma->start = mdp3_dma_start;
+		dma->stop = mdp3_dma_stop;
+		break;
+	case MDP3_DMA_E:
+	default:
+		ret = -ENODEV;
+		break;
+	}
+
+	spin_lock_init(&dma->dma_lock);
+	spin_lock_init(&dma->histo_lock);
+	init_completion(&dma->vsync_comp);
+	init_completion(&dma->dma_comp);
+	init_completion(&dma->histo_comp);
+	dma->vsync_client.handler = NULL;
+	dma->vsync_client.arg = NULL;
+	dma->histo_state = MDP3_DMA_HISTO_STATE_IDLE;
+	dma->update_src_cfg = false;
+
+	memset(&dma->cursor, 0, sizeof(dma->cursor));
+	memset(&dma->ccs_config, 0, sizeof(dma->ccs_config));
+	memset(&dma->histogram_config, 0, sizeof(dma->histogram_config));
+
+	return ret;
+}
+
+int lcdc_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+{
+	u32 temp;
+	struct mdp3_video_intf_cfg *v = &cfg->video;
+
+	temp = v->hsync_pulse_width | (v->hsync_period << 16);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_HSYNC_CTL, temp);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_VSYNC_PERIOD, v->vsync_period);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_VSYNC_PULSE_WIDTH, v->vsync_pulse_width);
+	temp = v->display_start_x | (v->display_end_x << 16);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_DISPLAY_HCTL, temp);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_DISPLAY_V_START, v->display_start_y);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_DISPLAY_V_END, v->display_end_y);
+	temp = v->active_start_x | (v->active_end_x);
+	if (v->active_h_enable)
+		temp |= BIT(31);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_ACTIVE_HCTL, temp);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_ACTIVE_V_START, v->active_start_y);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_ACTIVE_V_END, v->active_end_y);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_HSYNC_SKEW, v->hsync_skew);
+	temp = 0;
+	if (!v->hsync_polarity)
+		temp = BIT(0);
+	if (!v->vsync_polarity)
+		temp = BIT(1);
+	if (!v->de_polarity)
+		temp = BIT(2);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_CTL_POLARITY, temp);
+
+	return 0;
+}
+
+int lcdc_start(struct mdp3_intf *intf)
+{
+	MDP3_REG_WRITE(MDP3_REG_LCDC_EN, BIT(0));
+	wmb(); /* ensure write is finished before progressing */
+	intf->active = true;
+	return 0;
+}
+
+int lcdc_stop(struct mdp3_intf *intf)
+{
+	MDP3_REG_WRITE(MDP3_REG_LCDC_EN, 0);
+	wmb(); /* ensure write is finished before progressing */
+	intf->active = false;
+	return 0;
+}
+
+int dsi_video_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+{
+	u32 temp;
+	struct mdp3_video_intf_cfg *v = &cfg->video;
+
+	pr_debug("dsi_video_config\n");
+
+	temp = v->hsync_pulse_width | (v->hsync_period << 16);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_HSYNC_CTL, temp);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_VSYNC_PERIOD, v->vsync_period);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_VSYNC_PULSE_WIDTH,
+			v->vsync_pulse_width);
+	temp = v->display_start_x | (v->display_end_x << 16);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_DISPLAY_HCTL, temp);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_DISPLAY_V_START, v->display_start_y);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_DISPLAY_V_END, v->display_end_y);
+	temp = v->active_start_x | (v->active_end_x << 16);
+	if (v->active_h_enable)
+		temp |= BIT(31);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_ACTIVE_HCTL, temp);
+
+	temp = v->active_start_y;
+	if (v->active_v_enable)
+		temp |= BIT(31);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_ACTIVE_V_START, temp);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_ACTIVE_V_END, v->active_end_y);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_HSYNC_SKEW, v->hsync_skew);
+	temp = 0;
+	if (!v->hsync_polarity)
+		temp |= BIT(0);
+	if (!v->vsync_polarity)
+		temp |= BIT(1);
+	if (!v->de_polarity)
+		temp |= BIT(2);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_CTL_POLARITY, temp);
+
+	v->underflow_color |= 0x80000000;
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_UNDERFLOW_CTL, v->underflow_color);
+
+	return 0;
+}
+
+int dsi_video_start(struct mdp3_intf *intf)
+{
+	pr_debug("dsi_video_start\n");
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, BIT(0));
+	wmb(); /* ensure write is finished before progressing */
+	intf->active = true;
+	return 0;
+}
+
+int dsi_video_stop(struct mdp3_intf *intf)
+{
+	pr_debug("dsi_video_stop\n");
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, 0);
+	wmb(); /* ensure write is finished before progressing */
+	intf->active = false;
+	return 0;
+}
+
+int dsi_cmd_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+{
+	u32 id_map = 0;
+	u32 trigger_en = 0;
+
+	if (cfg->dsi_cmd.primary_dsi_cmd_id)
+		id_map = BIT(0);
+	if (cfg->dsi_cmd.secondary_dsi_cmd_id)
+		id_map = BIT(4);
+
+	if (cfg->dsi_cmd.dsi_cmd_tg_intf_sel)
+		trigger_en = BIT(4);
+
+	MDP3_REG_WRITE(MDP3_REG_DSI_CMD_MODE_ID_MAP, id_map);
+	MDP3_REG_WRITE(MDP3_REG_DSI_CMD_MODE_TRIGGER_EN, trigger_en);
+
+	return 0;
+}
+
+int dsi_cmd_start(struct mdp3_intf *intf)
+{
+	intf->active = true;
+	return 0;
+}
+
+int dsi_cmd_stop(struct mdp3_intf *intf)
+{
+	intf->active = false;
+	return 0;
+}
+
+int mdp3_intf_init(struct mdp3_intf *intf)
+{
+	switch (intf->cfg.type) {
+	case MDP3_DMA_OUTPUT_SEL_LCDC:
+		intf->config = lcdc_config;
+		intf->start = lcdc_start;
+		intf->stop = lcdc_stop;
+		break;
+	case MDP3_DMA_OUTPUT_SEL_DSI_VIDEO:
+		intf->config = dsi_video_config;
+		intf->start = dsi_video_start;
+		intf->stop = dsi_video_stop;
+		break;
+	case MDP3_DMA_OUTPUT_SEL_DSI_CMD:
+		intf->config = dsi_cmd_config;
+		intf->start = dsi_cmd_start;
+		intf->stop = dsi_cmd_stop;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
diff --git a/drivers/video/fbdev/msm/mdp3_dma.h b/drivers/video/fbdev/msm/mdp3_dma.h
new file mode 100644
index 0000000..6c8e7fe
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_dma.h
@@ -0,0 +1,396 @@
+/* Copyright (c) 2013-2014, 2016, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDP3_DMA_H
+#define MDP3_DMA_H
+
+#include <linux/notifier.h>
+#include <linux/sched.h>
+#include <linux/msm_mdp.h>
+
+#define MDP_HISTOGRAM_BL_SCALE_MAX 1024
+#define MDP_HISTOGRAM_BL_LEVEL_MAX 255
+#define MDP_HISTOGRAM_FRAME_COUNT_MAX 0x20
+#define MDP_HISTOGRAM_BIT_MASK_MAX 0x4
+#define MDP_HISTOGRAM_CSC_MATRIX_MAX 0x2000
+#define MDP_HISTOGRAM_CSC_VECTOR_MAX 0x200
+#define MDP_HISTOGRAM_BIN_NUM	32
+#define MDP_LUT_SIZE 256
+
+enum {
+	MDP3_DMA_P,
+	MDP3_DMA_S,
+	MDP3_DMA_E,
+	MDP3_DMA_MAX
+};
+
+enum {
+	MDP3_DMA_CAP_CURSOR = 0x1,
+	MDP3_DMA_CAP_COLOR_CORRECTION = 0x2,
+	MDP3_DMA_CAP_HISTOGRAM = 0x4,
+	MDP3_DMA_CAP_GAMMA_CORRECTION = 0x8,
+	MDP3_DMA_CAP_DITHER = 0x10,
+	MDP3_DMA_CAP_ALL = 0x1F
+};
+
+enum {
+	MDP3_DMA_OUTPUT_SEL_AHB,
+	MDP3_DMA_OUTPUT_SEL_DSI_CMD,
+	MDP3_DMA_OUTPUT_SEL_LCDC,
+	MDP3_DMA_OUTPUT_SEL_DSI_VIDEO,
+	MDP3_DMA_OUTPUT_SEL_MAX
+};
+
+enum {
+	MDP3_DMA_IBUF_FORMAT_RGB888,
+	MDP3_DMA_IBUF_FORMAT_RGB565,
+	MDP3_DMA_IBUF_FORMAT_XRGB8888,
+	MDP3_DMA_IBUF_FORMAT_UNDEFINED
+};
+
+enum {
+	MDP3_DMA_OUTPUT_PACK_PATTERN_RGB = 0x21,
+	MDP3_DMA_OUTPUT_PACK_PATTERN_RBG = 0x24,
+	MDP3_DMA_OUTPUT_PACK_PATTERN_BGR = 0x12,
+	MDP3_DMA_OUTPUT_PACK_PATTERN_BRG = 0x18,
+	MDP3_DMA_OUTPUT_PACK_PATTERN_GBR = 0x06,
+	MDP3_DMA_OUTPUT_PACK_PATTERN_GRB = 0x09,
+};
+
+enum {
+	MDP3_DMA_OUTPUT_PACK_ALIGN_LSB,
+	MDP3_DMA_OUTPUT_PACK_ALIGN_MSB
+};
+
+enum {
+	MDP3_DMA_OUTPUT_COMP_BITS_4, /*4 bits per color component*/
+	MDP3_DMA_OUTPUT_COMP_BITS_5,
+	MDP3_DMA_OUTPUT_COMP_BITS_6,
+	MDP3_DMA_OUTPUT_COMP_BITS_8,
+};
+
+enum {
+	MDP3_DMA_CURSOR_FORMAT_ARGB888,
+};
+
+enum {
+	MDP3_DMA_COLOR_CORRECT_SET_1,
+	MDP3_DMA_COLOR_CORRECT_SET_2
+};
+
+enum {
+	MDP3_DMA_LUT_POSITION_PRE,
+	MDP3_DMA_LUT_POSITION_POST
+};
+
+enum {
+	MDP3_DMA_LUT_DISABLE = 0x0,
+	MDP3_DMA_LUT_ENABLE_C0 = 0x01,
+	MDP3_DMA_LUT_ENABLE_C1 = 0x02,
+	MDP3_DMA_LUT_ENABLE_C2 = 0x04,
+	MDP3_DMA_LUT_ENABLE_ALL = 0x07,
+};
+
+enum {
+	MDP3_DMA_HISTOGRAM_BIT_MASK_NONE = 0X0,
+	MDP3_DMA_HISTOGRAM_BIT_MASK_ONE_MSB = 0x1,
+	MDP3_DMA_HISTOGRAM_BIT_MASK_TWO_MSB = 0x2,
+	MDP3_DMA_HISTOGRAM_BIT_MASK_THREE_MSB = 0x3
+};
+
+enum {
+	MDP3_DMA_COLOR_FLIP_NONE,
+	MDP3_DMA_COLOR_FLIP_COMP1 = 0x1,
+	MDP3_DMA_COLOR_FLIP_COMP2 = 0x2,
+	MDP3_DMA_COLOR_FLIP_COMP3 = 0x4,
+};
+
+enum {
+	MDP3_DMA_CURSOR_BLEND_NONE = 0x0,
+	MDP3_DMA_CURSOR_BLEND_PER_PIXEL_ALPHA =  0x3,
+	MDP3_DMA_CURSOR_BLEND_CONSTANT_ALPHA = 0x5,
+	MDP3_DMA_CURSOR_BLEND_COLOR_KEYING = 0x9
+};
+
+enum {
+	MDP3_DMA_HISTO_OP_START,
+	MDP3_DMA_HISTO_OP_STOP,
+	MDP3_DMA_HISTO_OP_CANCEL,
+	MDP3_DMA_HISTO_OP_RESET
+};
+
+enum {
+	MDP3_DMA_HISTO_STATE_UNKNOWN,
+	MDP3_DMA_HISTO_STATE_IDLE,
+	MDP3_DMA_HISTO_STATE_RESET,
+	MDP3_DMA_HISTO_STATE_START,
+	MDP3_DMA_HISTO_STATE_READY,
+};
+
+enum {
+	MDP3_DMA_CALLBACK_TYPE_VSYNC = 0x01,
+	MDP3_DMA_CALLBACK_TYPE_DMA_DONE = 0x02,
+	MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE = 0x04,
+	MDP3_DMA_CALLBACK_TYPE_HIST_DONE = 0x08,
+};
+
+struct mdp3_dma_source {
+	u32 format;
+	int width;
+	int height;
+	int x;
+	int y;
+	dma_addr_t buf;
+	int stride;
+	int vsync_count;
+	int vporch;
+};
+
+struct mdp3_dma_output_config {
+	int dither_en;
+	u32 out_sel;
+	u32 bit_mask_polarity;
+	u32 color_components_flip;
+	u32 pack_pattern;
+	u32 pack_align;
+	u32 color_comp_out_bits;
+};
+
+struct mdp3_dma_cursor_blend_config {
+	u32 mode;
+	u32 transparent_color; /*color keying*/
+	u32 transparency_mask;
+	u32 constant_alpha;
+};
+
+struct mdp3_dma_cursor {
+	int enable; /* enable cursor or not*/
+	u32 format;
+	int width;
+	int height;
+	int x;
+	int y;
+	void *buf;
+	struct mdp3_dma_cursor_blend_config blend_config;
+};
+
+struct mdp3_dma_ccs {
+	u32 *mv; /*set1 matrix vector, 3x3 */
+	u32 *pre_bv; /*pre-bias vector for set1, 1x3*/
+	u32 *post_bv; /*post-bias vecotr for set1,  */
+	u32 *pre_lv; /*pre-limit vector for set 1, 1x6*/
+	u32 *post_lv;
+};
+
+struct mdp3_dma_lut_config {
+	int lut_enable;
+	u32 lut_sel;
+	u32 lut_position;
+	bool lut_dirty;
+};
+
+struct mdp3_dma_color_correct_config {
+	int ccs_enable;
+	u32 post_limit_sel;
+	u32 pre_limit_sel;
+	u32 post_bias_sel;
+	u32 pre_bias_sel;
+	u32 ccs_sel;
+	bool ccs_dirty;
+};
+
+struct mdp3_dma_histogram_config {
+	int frame_count;
+	u32 bit_mask_polarity;
+	u32 bit_mask;
+	int auto_clear_en;
+};
+
+struct mdp3_dma_histogram_data {
+	u32 r_data[MDP_HISTOGRAM_BIN_NUM];
+	u32 g_data[MDP_HISTOGRAM_BIN_NUM];
+	u32 b_data[MDP_HISTOGRAM_BIN_NUM];
+	u32 extra[2];
+};
+
+struct mdp3_notification {
+	void (*handler)(void *arg);
+	void *arg;
+};
+
+struct mdp3_tear_check {
+	int frame_rate;
+	bool hw_vsync_mode;
+	u32 tear_check_en;
+	u32 sync_cfg_height;
+	u32 vsync_init_val;
+	u32 sync_threshold_start;
+	u32 sync_threshold_continue;
+	u32 start_pos;
+	u32 rd_ptr_irq;
+	u32 refx100;
+};
+
+struct mdp3_rect {
+	u32 x;
+	u32 y;
+	u32 w;
+	u32 h;
+};
+
+struct mdp3_intf;
+
+struct mdp3_dma {
+	u32 dma_sel;
+	u32 capability;
+	int in_use;
+	int available;
+
+	spinlock_t dma_lock;
+	spinlock_t histo_lock;
+	struct completion vsync_comp;
+	struct completion dma_comp;
+	struct completion histo_comp;
+	struct kernfs_node *hist_event_sd;
+	struct mdp3_notification vsync_client;
+	struct mdp3_notification dma_notifier_client;
+	struct mdp3_notification retire_client;
+
+	struct mdp3_dma_output_config output_config;
+	struct mdp3_dma_source source_config;
+
+	struct mdp3_dma_cursor cursor;
+	struct mdp3_dma_color_correct_config ccs_config;
+	struct mdp_csc_cfg_data ccs_cache;
+	int cc_vect_sel;
+
+	struct work_struct underrun_work;
+	struct mutex pp_lock;
+
+	struct mdp3_dma_lut_config lut_config;
+	struct mdp3_dma_histogram_config histogram_config;
+	int histo_state;
+	struct mdp3_dma_histogram_data histo_data;
+	unsigned int vsync_status;
+	bool update_src_cfg;
+	bool has_panic_ctrl;
+	struct mdp3_rect roi;
+
+	u32 lut_sts;
+	u32 hist_events;
+	struct fb_cmap *gc_cmap;
+	struct fb_cmap *hist_cmap;
+
+	bool (*busy)(void);
+
+	int (*dma_config)(struct mdp3_dma *dma,
+			struct mdp3_dma_source *source_config,
+			struct mdp3_dma_output_config *output_config,
+			bool splash_screen_active);
+
+	int (*dma_sync_config)(struct mdp3_dma *dma, struct mdp3_dma_source
+				*source_config, struct mdp3_tear_check *te);
+
+	void (*dma_config_source)(struct mdp3_dma *dma);
+
+	int (*start)(struct mdp3_dma *dma, struct mdp3_intf *intf);
+
+	int (*stop)(struct mdp3_dma *dma, struct mdp3_intf *intf);
+
+	int (*config_cursor)(struct mdp3_dma *dma,
+				struct mdp3_dma_cursor *cursor);
+
+	int (*config_ccs)(struct mdp3_dma *dma,
+			struct mdp3_dma_color_correct_config *config,
+			struct mdp3_dma_ccs *ccs);
+
+	int (*config_lut)(struct mdp3_dma *dma,
+			struct mdp3_dma_lut_config *config,
+			struct fb_cmap *cmap);
+
+	int (*update)(struct mdp3_dma *dma,
+			void *buf, struct mdp3_intf *intf, void *data);
+
+	int (*update_cursor)(struct mdp3_dma *dma, int x, int y);
+
+	int (*get_histo)(struct mdp3_dma *dma);
+
+	int (*config_histo)(struct mdp3_dma *dma,
+				struct mdp3_dma_histogram_config *histo_config);
+
+	int (*histo_op)(struct mdp3_dma *dma, u32 op);
+
+	void (*vsync_enable)(struct mdp3_dma *dma,
+			struct mdp3_notification *vsync_client);
+
+	void (*retire_enable)(struct mdp3_dma *dma,
+			struct mdp3_notification *retire_client);
+
+	void (*dma_done_notifier)(struct mdp3_dma *dma,
+			struct mdp3_notification *dma_client);
+};
+
+struct mdp3_video_intf_cfg {
+	int hsync_period;
+	int hsync_pulse_width;
+	int vsync_period;
+	int vsync_pulse_width;
+	int display_start_x;
+	int display_end_x;
+	int display_start_y;
+	int display_end_y;
+	int active_start_x;
+	int active_end_x;
+	int active_h_enable;
+	int active_start_y;
+	int active_end_y;
+	int active_v_enable;
+	int hsync_skew;
+	int hsync_polarity;
+	int vsync_polarity;
+	int de_polarity;
+	int underflow_color;
+};
+
+struct mdp3_dsi_cmd_intf_cfg {
+	int primary_dsi_cmd_id;
+	int secondary_dsi_cmd_id;
+	int dsi_cmd_tg_intf_sel;
+};
+
+struct mdp3_intf_cfg {
+	u32 type;
+	struct mdp3_video_intf_cfg video;
+	struct mdp3_dsi_cmd_intf_cfg dsi_cmd;
+};
+
+struct mdp3_intf {
+	struct mdp3_intf_cfg cfg;
+	int active;
+	int available;
+	int in_use;
+	int (*config)(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg);
+	int (*start)(struct mdp3_intf *intf);
+	int (*stop)(struct mdp3_intf *intf);
+};
+
+int mdp3_dma_init(struct mdp3_dma *dma);
+
+int mdp3_intf_init(struct mdp3_intf *intf);
+
+void mdp3_dma_callback_enable(struct mdp3_dma *dma, int type);
+
+void mdp3_dma_callback_disable(struct mdp3_dma *dma, int type);
+
+void mdp3_hist_intr_notify(struct mdp3_dma *dma);
+#endif /* MDP3_DMA_H */
diff --git a/drivers/video/fbdev/msm/mdp3_hwio.h b/drivers/video/fbdev/msm/mdp3_hwio.h
new file mode 100644
index 0000000..2e3d358
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_hwio.h
@@ -0,0 +1,361 @@
+/* Copyright (c) 2013-2014, 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDP3_HWIO_H
+#define MDP3_HWIO_H
+
+#include <linux/bitops.h>
+
+/*synchronization*/
+#define MDP3_REG_SYNC_CONFIG_0				0x0300
+#define MDP3_REG_SYNC_CONFIG_1				0x0304
+#define MDP3_REG_SYNC_CONFIG_2				0x0308
+#define MDP3_REG_SYNC_STATUS_0				0x030c
+#define MDP3_REG_SYNC_STATUS_1				0x0310
+#define MDP3_REG_SYNC_STATUS_2				0x0314
+#define MDP3_REG_PRIMARY_VSYNC_OUT_CTRL			0x0318
+#define MDP3_REG_SECONDARY_VSYNC_OUT_CTRL		0x031c
+#define MDP3_REG_EXTERNAL_VSYNC_OUT_CTRL		0x0320
+#define MDP3_REG_VSYNC_SEL				0x0324
+#define MDP3_REG_PRIMARY_VSYNC_INIT_VAL			0x0328
+#define MDP3_REG_SECONDARY_VSYNC_INIT_VAL		0x032c
+#define MDP3_REG_EXTERNAL_VSYNC_INIT_VAL		0x0330
+#define MDP3_REG_AUTOREFRESH_CONFIG_P			0x034C
+#define MDP3_REG_SYNC_THRESH_0				0x0200
+#define MDP3_REG_SYNC_THRESH_1				0x0204
+#define MDP3_REG_SYNC_THRESH_2				0x0208
+#define MDP3_REG_TEAR_CHECK_EN				0x020C
+#define MDP3_REG_PRIMARY_START_P0S			0x0210
+#define MDP3_REG_SECONDARY_START_POS			0x0214
+#define MDP3_REG_EXTERNAL_START_POS			0x0218
+
+/*interrupt*/
+#define MDP3_REG_INTR_ENABLE				0x0020
+#define MDP3_REG_INTR_STATUS				0x0024
+#define MDP3_REG_INTR_CLEAR				0x0028
+
+#define MDP3_REG_PRIMARY_RD_PTR_IRQ			0x021C
+#define MDP3_REG_SECONDARY_RD_PTR_IRQ			0x0220
+
+/*operation control*/
+#define MDP3_REG_DMA_P_START				0x0044
+#define MDP3_REG_DMA_S_START				0x0048
+#define MDP3_REG_DMA_E_START				0x004c
+
+#define MDP3_REG_DISPLAY_STATUS				0x0038
+
+#define MDP3_REG_HW_VERSION				0x0070
+#define MDP3_REG_SW_RESET				0x0074
+#define MDP3_REG_SEL_CLK_OR_HCLK_TEST_BUS		0x007C
+
+/*EBI*/
+#define MDP3_REG_EBI2_LCD0				0x003c
+#define MDP3_REG_EBI2_LCD0_YSTRIDE			0x0050
+
+/*clock control*/
+#define MDP3_REG_CGC_EN					0x0100
+#define MDP3_VBIF_REG_FORCE_EN				0x0004
+
+/* QOS Remapper */
+#define MDP3_DMA_P_QOS_REMAPPER				0x90090
+#define MDP3_DMA_P_WATERMARK_0				0x90094
+#define MDP3_DMA_P_WATERMARK_1				0x90098
+#define MDP3_DMA_P_WATERMARK_2				0x9009C
+#define MDP3_PANIC_ROBUST_CTRL				0x900A0
+#define MDP3_PANIC_LUT0					0x900A4
+#define MDP3_PANIC_LUT1					0x900A8
+#define MDP3_ROBUST_LUT					0x900AC
+
+/*danger safe*/
+#define MDP3_PANIC_ROBUST_CTRL				0x900A0
+
+/*DMA_P*/
+#define MDP3_REG_DMA_P_CONFIG				0x90000
+#define MDP3_REG_DMA_P_SIZE				0x90004
+#define MDP3_REG_DMA_P_IBUF_ADDR			0x90008
+#define MDP3_REG_DMA_P_IBUF_Y_STRIDE			0x9000C
+#define MDP3_REG_DMA_P_PROFILE_EN			0x90020
+#define MDP3_REG_DMA_P_OUT_XY				0x90010
+#define MDP3_REG_DMA_P_CURSOR_FORMAT			0x90040
+#define MDP3_REG_DMA_P_CURSOR_SIZE			0x90044
+#define MDP3_REG_DMA_P_CURSOR_BUF_ADDR			0x90048
+#define MDP3_REG_DMA_P_CURSOR_POS			0x9004c
+#define MDP3_REG_DMA_P_CURSOR_BLEND_CONFIG		0x90060
+#define MDP3_REG_DMA_P_CURSOR_BLEND_PARAM		0x90064
+#define MDP3_REG_DMA_P_CURSOR_BLEND_TRANS_MASK		0x90068
+#define MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG		0x90070
+#define MDP3_REG_DMA_P_CSC_BYPASS			0X93004
+#define MDP3_REG_DMA_P_CSC_MV1				0x93400
+#define MDP3_REG_DMA_P_CSC_MV2				0x93440
+#define MDP3_REG_DMA_P_CSC_PRE_BV1			0x93500
+#define MDP3_REG_DMA_P_CSC_PRE_BV2			0x93540
+#define MDP3_REG_DMA_P_CSC_POST_BV1			0x93580
+#define MDP3_REG_DMA_P_CSC_POST_BV2			0x935c0
+#define MDP3_REG_DMA_P_CSC_PRE_LV1			0x93600
+#define MDP3_REG_DMA_P_CSC_PRE_LV2			0x93640
+#define MDP3_REG_DMA_P_CSC_POST_LV1			0x93680
+#define MDP3_REG_DMA_P_CSC_POST_LV2			0x936c0
+#define MDP3_REG_DMA_P_CSC_LUT1				0x93800
+#define MDP3_REG_DMA_P_CSC_LUT2				0x93c00
+#define MDP3_REG_DMA_P_HIST_START			0x94000
+#define MDP3_REG_DMA_P_HIST_FRAME_CNT			0x94004
+#define MDP3_REG_DMA_P_HIST_BIT_MASK			0x94008
+#define MDP3_REG_DMA_P_HIST_RESET_SEQ_START		0x9400c
+#define MDP3_REG_DMA_P_HIST_CONTROL			0x94010
+#define MDP3_REG_DMA_P_HIST_INTR_STATUS			0x94014
+#define MDP3_REG_DMA_P_HIST_INTR_CLEAR			0x94018
+#define MDP3_REG_DMA_P_HIST_INTR_ENABLE			0x9401c
+#define MDP3_REG_DMA_P_HIST_STOP_REQ			0x94020
+#define MDP3_REG_DMA_P_HIST_CANCEL_REQ			0x94024
+#define MDP3_REG_DMA_P_HIST_EXTRA_INFO_0		0x94028
+#define MDP3_REG_DMA_P_HIST_EXTRA_INFO_1		0x9402c
+#define MDP3_REG_DMA_P_HIST_R_DATA			0x94100
+#define MDP3_REG_DMA_P_HIST_G_DATA			0x94200
+#define MDP3_REG_DMA_P_HIST_B_DATA			0x94300
+#define MDP3_REG_DMA_P_FETCH_CFG			0x90074
+#define MDP3_REG_DMA_P_DCVS_CTRL			0x90080
+#define MDP3_REG_DMA_P_DCVS_STATUS			0x90084
+
+/*DMA_S*/
+#define MDP3_REG_DMA_S_CONFIG				0xA0000
+#define MDP3_REG_DMA_S_SIZE				0xA0004
+#define MDP3_REG_DMA_S_IBUF_ADDR			0xA0008
+#define MDP3_REG_DMA_S_IBUF_Y_STRIDE			0xA000C
+#define MDP3_REG_DMA_S_OUT_XY				0xA0010
+
+/*DMA MASK*/
+#define MDP3_DMA_IBUF_FORMAT_MASK 0x06000000
+#define MDP3_DMA_PACK_PATTERN_MASK 0x00003f00
+
+/*MISR*/
+#define MDP3_REG_MODE_CLK				0x000D0000
+#define MDP3_REG_MISR_RESET_CLK			0x000D0004
+#define MDP3_REG_EXPORT_MISR_CLK			0x000D0008
+#define MDP3_REG_MISR_CURR_VAL_CLK			0x000D000C
+#define MDP3_REG_MODE_HCLK				0x000D0100
+#define MDP3_REG_MISR_RESET_HCLK			0x000D0104
+#define MDP3_REG_EXPORT_MISR_HCLK			0x000D0108
+#define MDP3_REG_MISR_CURR_VAL_HCLK			0x000D010C
+#define MDP3_REG_MODE_DCLK				0x000D0200
+#define MDP3_REG_MISR_RESET_DCLK			0x000D0204
+#define MDP3_REG_EXPORT_MISR_DCLK			0x000D0208
+#define MDP3_REG_MISR_CURR_VAL_DCLK			0x000D020C
+#define MDP3_REG_CAPTURED_DCLK				0x000D0210
+#define MDP3_REG_MISR_CAPT_VAL_DCLK			0x000D0214
+#define MDP3_REG_MODE_TVCLK				0x000D0300
+#define MDP3_REG_MISR_RESET_TVCLK			0x000D0304
+#define MDP3_REG_EXPORT_MISR_TVCLK			0x000D0308
+#define MDP3_REG_MISR_CURR_VAL_TVCLK			0x000D030C
+#define MDP3_REG_CAPTURED_TVCLK			0x000D0310
+#define MDP3_REG_MISR_CAPT_VAL_TVCLK			0x000D0314
+
+/* Select DSI operation type(CMD/VIDEO) */
+#define MDP3_REG_MODE_DSI_PCLK				0x000D0400
+#define MDP3_REG_MODE_DSI_PCLK_BLOCK_DSI_CMD		0x10
+#define MDP3_REG_MODE_DSI_PCLK_BLOCK_DSI_VIDEO1	0x20
+#define MDP3_REG_MODE_DSI_PCLK_BLOCK_DSI_VIDEO2	0x30
+/* RESET DSI MISR STATE */
+#define MDP3_REG_MISR_RESET_DSI_PCLK			0x000D0404
+
+/* For reading MISR State(1) and driving data on test bus(0) */
+#define MDP3_REG_EXPORT_MISR_DSI_PCLK			0x000D0408
+/* Read MISR signature */
+#define MDP3_REG_MISR_CURR_VAL_DSI_PCLK		0x000D040C
+
+/* MISR status Bit0 (1) Capture Done */
+#define MDP3_REG_CAPTURED_DSI_PCLK			0x000D0410
+#define MDP3_REG_MISR_CAPT_VAL_DSI_PCLK		0x000D0414
+#define MDP3_REG_MISR_TESTBUS_CAPT_VAL			0x000D0600
+
+/*interface*/
+#define MDP3_REG_LCDC_EN				0xE0000
+#define MDP3_REG_LCDC_HSYNC_CTL				0xE0004
+#define MDP3_REG_LCDC_VSYNC_PERIOD			0xE0008
+#define MDP3_REG_LCDC_VSYNC_PULSE_WIDTH			0xE000C
+#define MDP3_REG_LCDC_DISPLAY_HCTL			0xE0010
+#define MDP3_REG_LCDC_DISPLAY_V_START			0xE0014
+#define MDP3_REG_LCDC_DISPLAY_V_END			0xE0018
+#define MDP3_REG_LCDC_ACTIVE_HCTL			0xE001C
+#define MDP3_REG_LCDC_ACTIVE_V_START			0xE0020
+#define MDP3_REG_LCDC_ACTIVE_V_END			0xE0024
+#define MDP3_REG_LCDC_BORDER_COLOR			0xE0028
+#define MDP3_REG_LCDC_UNDERFLOW_CTL			0xE002C
+#define MDP3_REG_LCDC_HSYNC_SKEW			0xE0030
+#define MDP3_REG_LCDC_TEST_CTL				0xE0034
+#define MDP3_REG_LCDC_CTL_POLARITY			0xE0038
+#define MDP3_REG_LCDC_TEST_COL_VAR1			0xE003C
+#define MDP3_REG_LCDC_TEST_COL_VAR2			0xE0040
+#define MDP3_REG_LCDC_UFLOW_HIDING_CTL			0xE0044
+#define MDP3_REG_LCDC_LOST_PIXEL_CNT_VALUE		0xE0048
+
+#define MDP3_REG_DSI_VIDEO_EN				0xF0000
+#define MDP3_REG_DSI_VIDEO_HSYNC_CTL			0xF0004
+#define MDP3_REG_DSI_VIDEO_VSYNC_PERIOD			0xF0008
+#define MDP3_REG_DSI_VIDEO_VSYNC_PULSE_WIDTH		0xF000C
+#define MDP3_REG_DSI_VIDEO_DISPLAY_HCTL			0xF0010
+#define MDP3_REG_DSI_VIDEO_DISPLAY_V_START		0xF0014
+#define MDP3_REG_DSI_VIDEO_DISPLAY_V_END		0xF0018
+#define MDP3_REG_DSI_VIDEO_ACTIVE_HCTL			0xF001C
+#define MDP3_REG_DSI_VIDEO_ACTIVE_V_START		0xF0020
+#define MDP3_REG_DSI_VIDEO_ACTIVE_V_END			0xF0024
+#define MDP3_REG_DSI_VIDEO_BORDER_COLOR			0xF0028
+#define MDP3_REG_DSI_VIDEO_UNDERFLOW_CTL		0xF002C
+#define MDP3_REG_DSI_VIDEO_HSYNC_SKEW			0xF0030
+#define MDP3_REG_DSI_VIDEO_TEST_CTL			0xF0034
+#define MDP3_REG_DSI_VIDEO_CTL_POLARITY			0xF0038
+#define MDP3_REG_DSI_VIDEO_TEST_COL_VAR1		0xF003C
+#define MDP3_REG_DSI_VIDEO_TEST_COL_VAR2		0xF0040
+#define MDP3_REG_DSI_VIDEO_UFLOW_HIDING_CTL		0xF0044
+#define MDP3_REG_DSI_VIDEO_LOST_PIXEL_CNT_VALUE		0xF0048
+
+#define MDP3_REG_DSI_CMD_MODE_ID_MAP			0xF1000
+#define MDP3_REG_DSI_CMD_MODE_TRIGGER_EN		0xF1004
+
+#define MDP3_PPP_CSC_PFMVn(n)		(0x40400 + (4 * (n)))
+#define MDP3_PPP_CSC_PRMVn(n)		(0x40440 + (4 * (n)))
+#define MDP3_PPP_CSC_PBVn(n)		(0x40500 + (4 * (n)))
+#define MDP3_PPP_CSC_PLVn(n)		(0x40580 + (4 * (n)))
+
+#define MDP3_PPP_CSC_SFMVn(n)		(0x40480 + (4 * (n)))
+#define MDP3_PPP_CSC_SRMVn(n)		(0x404C0 + (4 * (n)))
+#define MDP3_PPP_CSC_SBVn(n)		(0x40540 + (4 * (n)))
+#define MDP3_PPP_CSC_SLVn(n)		(0x405C0 + (4 * (n)))
+
+#define MDP3_PPP_SCALE_PHASEX_INIT	0x1013C
+#define MDP3_PPP_SCALE_PHASEY_INIT	0x10140
+#define MDP3_PPP_SCALE_PHASEX_STEP	0x10144
+#define MDP3_PPP_SCALE_PHASEY_STEP	0x10148
+
+#define MDP3_PPP_OP_MODE			0x10138
+
+#define MDP3_PPP_PRE_LUT			0x40800
+#define MDP3_PPP_POST_LUT			0x40C00
+#define MDP3_PPP_LUTn(n)			((4 * (n)))
+
+#define MDP3_PPP_BG_EDGE_REP		0x101BC
+#define MDP3_PPP_SRC_EDGE_REP		0x101B8
+
+#define MDP3_PPP_STRIDE_MASK		0x3FFF
+#define MDP3_PPP_STRIDE1_OFFSET		16
+
+#define MDP3_PPP_XY_MASK			0x0FFF
+#define MDP3_PPP_XY_OFFSET			16
+
+#define MDP3_PPP_SRC_SIZE			0x10108
+#define MDP3_PPP_SRCP0_ADDR			0x1010C
+#define MDP3_PPP_SRCP1_ADDR			0x10110
+#define MDP3_PPP_SRCP3_ADDR			0x10118
+#define MDP3_PPP_SRC_YSTRIDE1_ADDR	0x1011C
+#define MDP3_PPP_SRC_YSTRIDE2_ADDR	0x10120
+#define MDP3_PPP_SRC_FORMAT			0x10124
+#define MDP3_PPP_SRC_UNPACK_PATTERN1	0x10128
+#define MDP3_PPP_SRC_UNPACK_PATTERN2	0x1012C
+
+#define MDP3_PPP_OUT_FORMAT			0x10150
+#define MDP3_PPP_OUT_PACK_PATTERN1	0x10154
+#define MDP3_PPP_OUT_PACK_PATTERN2	0x10158
+#define MDP3_PPP_OUT_SIZE			0x10164
+#define MDP3_PPP_OUTP0_ADDR			0x10168
+#define MDP3_PPP_OUTP1_ADDR			0x1016C
+#define MDP3_PPP_OUTP3_ADDR			0x10174
+#define MDP3_PPP_OUT_YSTRIDE1_ADDR	0x10178
+#define MDP3_PPP_OUT_YSTRIDE2_ADDR	0x1017C
+#define MDP3_PPP_OUT_XY				0x1019C
+
+#define MDP3_PPP_BGP0_ADDR			0x101C0
+#define MDP3_PPP_BGP1_ADDR			0x101C4
+#define MDP3_PPP_BGP3_ADDR			0x101C8
+#define MDP3_PPP_BG_YSTRIDE1_ADDR	0x101CC
+#define MDP3_PPP_BG_YSTRIDE2_ADDR	0x101D0
+#define MDP3_PPP_BG_FORMAT			0x101D4
+#define MDP3_PPP_BG_UNPACK_PATTERN1	0x101D8
+#define MDP3_PPP_BG_UNPACK_PATTERN2	0x101DC
+
+#define MDP3_TFETCH_SOLID_FILL		0x20004
+#define MDP3_TFETCH_FILL_COLOR		0x20040
+
+#define MDP3_PPP_BLEND_PARAM		0x1014C
+
+#define MDP3_PPP_BLEND_BG_ALPHA_SEL	0x70010
+
+#define MDP3_PPP_ACTIVE BIT(0)
+
+/*interrupt mask*/
+
+#define MDP3_INTR_DP0_ROI_DONE_BIT			BIT(0)
+#define MDP3_INTR_DP1_ROI_DONE_BIT			BIT(1)
+#define MDP3_INTR_DMA_S_DONE_BIT			BIT(2)
+#define MDP3_INTR_DMA_E_DONE_BIT			BIT(3)
+#define MDP3_INTR_DP0_TERMINAL_FRAME_DONE_BIT		BIT(4)
+#define MDP3_INTR_DP1_TERMINAL_FRAME_DONE_BIT		BIT(5)
+#define MDP3_INTR_DMA_TV_DONE_BIT			BIT(6)
+#define MDP3_INTR_TV_ENCODER_UNDER_RUN_BIT		BIT(7)
+#define MDP3_INTR_SYNC_PRIMARY_LINE_BIT			BIT(8)
+#define MDP3_INTR_SYNC_SECONDARY_LINE_BIT		BIT(9)
+#define MDP3_INTR_SYNC_EXTERNAL_LINE_BIT		BIT(10)
+#define MDP3_INTR_DP0_FETCH_DONE_BIT			BIT(11)
+#define MDP3_INTR_DP1_FETCH_DONE_BIT			BIT(12)
+#define MDP3_INTR_TV_OUT_FRAME_START_BIT		BIT(13)
+#define MDP3_INTR_DMA_P_DONE_BIT			BIT(14)
+#define MDP3_INTR_LCDC_START_OF_FRAME_BIT		BIT(15)
+#define MDP3_INTR_LCDC_UNDERFLOW_BIT			BIT(16)
+#define MDP3_INTR_DMA_P_LINE_BIT			BIT(17)
+#define MDP3_INTR_DMA_S_LINE_BIT			BIT(18)
+#define MDP3_INTR_DMA_E_LINE_BIT			BIT(19)
+#define MDP3_INTR_DMA_P_HISTO_BIT			BIT(20)
+#define MDP3_INTR_DTV_OUT_DONE_BIT			BIT(21)
+#define MDP3_INTR_DTV_OUT_START_OF_FRAME_BIT		BIT(22)
+#define MDP3_INTR_DTV_OUT_UNDERFLOW_BIT			BIT(23)
+#define MDP3_INTR_DTV_OUT_LINE_BIT			BIT(24)
+#define MDP3_INTR_DMA_P_AUTO_FREFRESH_START_BIT		BIT(25)
+#define MDP3_INTR_DMA_S_AUTO_FREFRESH_START_BIT		BIT(26)
+#define MDP3_INTR_QPIC_EOF_ENABLE_BIT			BIT(27)
+
+enum {
+	MDP3_INTR_DP0_ROI_DONE,
+	MDP3_INTR_DP1_ROI_DONE,
+	MDP3_INTR_DMA_S_DONE,
+	MDP3_INTR_DMA_E_DONE,
+	MDP3_INTR_DP0_TERMINAL_FRAME_DONE,
+	MDP3_INTR_DP1_TERMINAL_FRAME_DONE,
+	MDP3_INTR_DMA_TV_DONE,
+	MDP3_INTR_TV_ENCODER_UNDER_RUN,
+	MDP3_INTR_SYNC_PRIMARY_LINE,
+	MDP3_INTR_SYNC_SECONDARY_LINE,
+	MDP3_INTR_SYNC_EXTERNAL_LINE,
+	MDP3_INTR_DP0_FETCH_DONE,
+	MDP3_INTR_DP1_FETCH_DONE,
+	MDP3_INTR_TV_OUT_FRAME_START,
+	MDP3_INTR_DMA_P_DONE,
+	MDP3_INTR_LCDC_START_OF_FRAME,
+	MDP3_INTR_LCDC_UNDERFLOW,
+	MDP3_INTR_DMA_P_LINE,
+	MDP3_INTR_DMA_S_LINE,
+	MDP3_INTR_DMA_E_LINE,
+	MDP3_INTR_DMA_P_HISTO,
+	MDP3_INTR_DTV_OUT_DONE,
+	MDP3_INTR_DTV_OUT_START_OF_FRAME,
+	MDP3_INTR_DTV_OUT_UNDERFLOW,
+	MDP3_INTR_DTV_OUT_LINE,
+	MDP3_INTR_DMA_P_AUTO_FREFRESH_START,
+	MDP3_INTR_DMA_S_AUTO_FREFRESH_START,
+	MDP3_INTR_QPIC_EOF_ENABLE,
+};
+
+#define MDP3_DMA_P_HIST_INTR_RESET_DONE_BIT		BIT(0)
+#define MDP3_DMA_P_HIST_INTR_HIST_DONE_BIT		BIT(1)
+#define MDP3_PPP_DONE MDP3_INTR_DP0_ROI_DONE
+
+#define MDP3_DMA_P_BUSY_BIT				BIT(6)
+
+#endif /* MDP3_HWIO_H */
diff --git a/drivers/video/fbdev/msm/mdp3_layer.c b/drivers/video/fbdev/msm/mdp3_layer.c
new file mode 100644
index 0000000..6c45395
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_layer.c
@@ -0,0 +1,345 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/msm_mdp.h>
+#include <linux/memblock.h>
+#include <linux/sync.h>
+#include <linux/sw_sync.h>
+#include <linux/file.h>
+
+#include <soc/qcom/event_timer.h>
+#include "mdp3_ctrl.h"
+#include "mdp3.h"
+#include "mdp3_ppp.h"
+#include "mdp3_ctrl.h"
+#include "mdss_fb.h"
+
+enum {
+	MDP3_RELEASE_FENCE = 0,
+	MDP3_RETIRE_FENCE,
+};
+
+static struct sync_fence *__mdp3_create_fence(struct msm_fb_data_type *mfd,
+	struct msm_sync_pt_data *sync_pt_data, u32 fence_type,
+	int *fence_fd, int value)
+{
+	struct sync_fence *sync_fence = NULL;
+	char fence_name[32];
+	struct mdp3_session_data *mdp3_session;
+
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+
+	if (fence_type == MDP3_RETIRE_FENCE)
+		snprintf(fence_name, sizeof(fence_name), "fb%d_retire",
+			mfd->index);
+	else
+		snprintf(fence_name, sizeof(fence_name), "fb%d_release",
+			mfd->index);
+
+	if ((fence_type == MDP3_RETIRE_FENCE) &&
+		(mfd->panel.type == MIPI_CMD_PANEL)) {
+		if (mdp3_session->vsync_timeline) {
+			value = mdp3_session->vsync_timeline->value + 1 +
+				mdp3_session->retire_cnt++;
+			sync_fence = mdss_fb_sync_get_fence(
+					mdp3_session->vsync_timeline,
+						fence_name, value);
+		} else {
+			return ERR_PTR(-EPERM);
+		}
+	} else {
+		sync_fence = mdss_fb_sync_get_fence(sync_pt_data->timeline,
+			fence_name, value);
+	}
+
+	if (IS_ERR_OR_NULL(sync_fence)) {
+		pr_err("%s: unable to retrieve release fence\n", fence_name);
+		goto end;
+	}
+
+	/* get fence fd */
+	*fence_fd = get_unused_fd_flags(0);
+	if (*fence_fd < 0) {
+		pr_err("%s: get_unused_fd_flags failed error:0x%x\n",
+			fence_name, *fence_fd);
+		sync_fence_put(sync_fence);
+		sync_fence = NULL;
+		goto end;
+	}
+
+	sync_fence_install(sync_fence, *fence_fd);
+end:
+
+	return sync_fence;
+}
+
+/*
+ * __handle_buffer_fences() - copy sync fences and return release
+ * fence to caller.
+ *
+ * This function copies all input sync fences to acquire fence array and
+ * returns release fences to caller. It acts like buff_sync ioctl.
+ */
+static int __mdp3_handle_buffer_fences(struct msm_fb_data_type *mfd,
+	struct mdp_layer_commit_v1 *commit, struct mdp_input_layer *layer_list)
+{
+	struct sync_fence *fence, *release_fence, *retire_fence;
+	struct msm_sync_pt_data *sync_pt_data = NULL;
+	struct mdp_input_layer *layer;
+	int value;
+
+	u32 acq_fen_count, i, ret = 0;
+	u32 layer_count = commit->input_layer_cnt;
+
+	sync_pt_data = &mfd->mdp_sync_pt_data;
+	if (!sync_pt_data) {
+		pr_err("sync point data are NULL\n");
+		return -EINVAL;
+	}
+
+	i = mdss_fb_wait_for_fence(sync_pt_data);
+	if (i > 0)
+		pr_warn("%s: waited on %d active fences\n",
+			sync_pt_data->fence_name, i);
+
+	mutex_lock(&sync_pt_data->sync_mutex);
+	for (i = 0, acq_fen_count = 0; i < layer_count; i++) {
+		layer = &layer_list[i];
+
+		if (layer->buffer.fence < 0)
+			continue;
+
+		fence = sync_fence_fdget(layer->buffer.fence);
+		if (!fence) {
+			pr_err("%s: sync fence get failed! fd=%d\n",
+				sync_pt_data->fence_name, layer->buffer.fence);
+			ret = -EINVAL;
+			goto sync_fence_err;
+		} else {
+			sync_pt_data->acq_fen[acq_fen_count++] = fence;
+		}
+	}
+
+	sync_pt_data->acq_fen_cnt = acq_fen_count;
+	if (ret)
+		goto sync_fence_err;
+
+	value = sync_pt_data->timeline_value + sync_pt_data->threshold +
+			atomic_read(&sync_pt_data->commit_cnt);
+
+	release_fence = __mdp3_create_fence(mfd, sync_pt_data,
+		MDP3_RELEASE_FENCE, &commit->release_fence, value);
+	if (IS_ERR_OR_NULL(release_fence)) {
+		pr_err("unable to retrieve release fence\n");
+		ret = PTR_ERR(release_fence);
+		goto release_fence_err;
+	}
+
+	retire_fence = __mdp3_create_fence(mfd, sync_pt_data,
+		MDP3_RETIRE_FENCE, &commit->retire_fence, value);
+	if (IS_ERR_OR_NULL(retire_fence)) {
+		pr_err("unable to retrieve retire fence\n");
+		ret = PTR_ERR(retire_fence);
+		goto retire_fence_err;
+	}
+
+	mutex_unlock(&sync_pt_data->sync_mutex);
+	return ret;
+
+retire_fence_err:
+	put_unused_fd(commit->release_fence);
+	sync_fence_put(release_fence);
+release_fence_err:
+	commit->retire_fence = -1;
+	commit->release_fence = -1;
+sync_fence_err:
+	for (i = 0; i < sync_pt_data->acq_fen_cnt; i++)
+		sync_fence_put(sync_pt_data->acq_fen[i]);
+	sync_pt_data->acq_fen_cnt = 0;
+
+	mutex_unlock(&sync_pt_data->sync_mutex);
+
+	return ret;
+}
+
+/*
+ * __map_layer_buffer() - map input layer buffer
+ *
+ */
+static int __mdp3_map_layer_buffer(struct msm_fb_data_type *mfd,
+		struct mdp_input_layer *input_layer)
+{
+	struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+	struct mdp3_dma *dma = mdp3_session->dma;
+	struct mdp_input_layer *layer = NULL;
+	struct mdp_layer_buffer *buffer;
+	struct msmfb_data img;
+	bool is_panel_type_cmd = false;
+	struct mdp3_img_data data;
+	int rc = 0;
+
+	layer = &input_layer[0];
+	buffer = &layer->buffer;
+
+	/* current implementation only supports one plane mapping */
+	if (buffer->planes[0].fd < 0) {
+		pr_err("invalid file descriptor for layer buffer\n");
+		goto err;
+	}
+
+	memset(&img, 0, sizeof(img));
+	img.memory_id = buffer->planes[0].fd;
+	img.offset = buffer->planes[0].offset;
+
+	memset(&data, 0, sizeof(struct mdp3_img_data));
+
+	if (mfd->panel.type == MIPI_CMD_PANEL)
+		is_panel_type_cmd = true;
+	if (is_panel_type_cmd) {
+		rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P);
+		if (rc) {
+			pr_err("fail to enable iommu\n");
+			return rc;
+		}
+	}
+
+	rc = mdp3_get_img(&img, &data, MDP3_CLIENT_DMA_P);
+	if (rc) {
+		pr_err("fail to get overlay buffer\n");
+		goto err;
+	}
+
+	if (data.len < dma->source_config.stride * dma->source_config.height) {
+		pr_err("buf size(0x%lx) is smaller than dma config(0x%x)\n",
+			data.len, (dma->source_config.stride *
+			dma->source_config.height));
+		mdp3_put_img(&data, MDP3_CLIENT_DMA_P);
+		rc = -EINVAL;
+		goto err;
+	}
+
+	rc = mdp3_bufq_push(&mdp3_session->bufq_in, &data);
+	if (rc) {
+		pr_err("fail to queue the overlay buffer, buffer drop\n");
+		mdp3_put_img(&data, MDP3_CLIENT_DMA_P);
+		goto err;
+	}
+	rc = 0;
+err:
+	if (is_panel_type_cmd)
+		mdp3_iommu_disable(MDP3_CLIENT_DMA_P);
+	return rc;
+}
+
+int mdp3_layer_pre_commit(struct msm_fb_data_type *mfd,
+	struct file *file, struct mdp_layer_commit_v1 *commit)
+{
+	int ret;
+	struct mdp_input_layer *layer, *layer_list;
+	struct mdp3_session_data *mdp3_session;
+	struct mdp3_dma *dma;
+	int layer_count = commit->input_layer_cnt;
+	int stride, format;
+
+	/* Handle NULL commit */
+	if (!layer_count) {
+		pr_debug("Handle NULL commit\n");
+		return 0;
+	}
+
+	mdp3_session = mfd->mdp.private1;
+	dma = mdp3_session->dma;
+
+	mutex_lock(&mdp3_session->lock);
+
+	mdp3_bufq_deinit(&mdp3_session->bufq_in);
+
+	layer_list = commit->input_layers;
+	layer = &layer_list[0];
+
+	stride = layer->buffer.width * ppp_bpp(layer->buffer.format);
+	format = mdp3_ctrl_get_source_format(layer->buffer.format);
+	pr_debug("stride:%d layer_width:%d", stride, layer->buffer.width);
+
+	if ((dma->source_config.format != format) ||
+			(dma->source_config.stride != stride)) {
+		dma->source_config.format = format;
+		dma->source_config.stride = stride;
+		dma->output_config.pack_pattern =
+			mdp3_ctrl_get_pack_pattern(layer->buffer.format);
+		dma->update_src_cfg = true;
+	}
+	mdp3_session->overlay.id = 1;
+
+	ret = __mdp3_handle_buffer_fences(mfd, commit, layer_list);
+	if (ret) {
+		pr_err("Failed to handle buffer fences\n");
+		mutex_unlock(&mdp3_session->lock);
+		return ret;
+	}
+
+	ret = __mdp3_map_layer_buffer(mfd, layer);
+	if (ret) {
+		pr_err("Failed to map buffer\n");
+		mutex_unlock(&mdp3_session->lock);
+		return ret;
+	}
+
+	pr_debug("mdp3 precommit ret = %d\n", ret);
+	mutex_unlock(&mdp3_session->lock);
+	return ret;
+}
+
+/*
+ * mdp3_layer_atomic_validate() - validate input layers
+ * @mfd:	Framebuffer data structure for display
+ * @commit:	Commit version-1 structure for display
+ *
+ * This function validates only input layers received from client. It
+ * does perform any validation for mdp_output_layer defined for writeback
+ * display.
+ */
+int mdp3_layer_atomic_validate(struct msm_fb_data_type *mfd,
+	struct file *file, struct mdp_layer_commit_v1 *commit)
+{
+	struct mdp3_session_data *mdp3_session;
+
+	if (!mfd || !commit) {
+		pr_err("invalid input params\n");
+		return -EINVAL;
+	}
+
+	if (mdss_fb_is_power_off(mfd)) {
+		pr_err("display interface is in off state fb:%d\n",
+			mfd->index);
+		return -EPERM;
+	}
+
+	mdp3_session = mfd->mdp.private1;
+
+	if (mdp3_session->in_splash_screen) {
+		mdp3_ctrl_reset(mfd);
+		mdp3_session->in_splash_screen = 0;
+	}
+
+	return 0;
+}
+
diff --git a/drivers/video/fbdev/msm/mdp3_ppp.c b/drivers/video/fbdev/msm/mdp3_ppp.c
new file mode 100644
index 0000000..7964cf0
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_ppp.c
@@ -0,0 +1,1733 @@
+/* Copyright (c) 2007, 2013-2014, 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/file.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/sync.h>
+#include <linux/sw_sync.h>
+#include "linux/proc_fs.h"
+#include <linux/delay.h>
+
+#include "mdss_fb.h"
+#include "mdp3_ppp.h"
+#include "mdp3_hwio.h"
+#include "mdp3.h"
+#include "mdss_debug.h"
+
+#define MDP_IS_IMGTYPE_BAD(x) ((x) >= MDP_IMGTYPE_LIMIT)
+#define MDP_RELEASE_BW_TIMEOUT 50
+
+#define MDP_PPP_MAX_BPP 4
+#define MDP_PPP_DYNAMIC_FACTOR 3
+#define MDP_PPP_MAX_READ_WRITE 3
+#define MDP_PPP_MAX_WIDTH	0xFFF
+#define ENABLE_SOLID_FILL	0x2
+#define DISABLE_SOLID_FILL	0x0
+#define BLEND_LATENCY		3
+#define CSC_LATENCY		1
+
+#define YUV_BW_FUDGE_NUM	10
+#define YUV_BW_FUDGE_DEN	10
+
+struct ppp_resource ppp_res;
+
+static const bool valid_fmt[MDP_IMGTYPE_LIMIT] = {
+	[MDP_RGB_565] = true,
+	[MDP_BGR_565] = true,
+	[MDP_RGB_888] = true,
+	[MDP_BGR_888] = true,
+	[MDP_BGRA_8888] = true,
+	[MDP_RGBA_8888] = true,
+	[MDP_ARGB_8888] = true,
+	[MDP_XRGB_8888] = true,
+	[MDP_RGBX_8888] = true,
+	[MDP_Y_CRCB_H2V2] = true,
+	[MDP_Y_CBCR_H2V2] = true,
+	[MDP_Y_CBCR_H2V2_ADRENO] = true,
+	[MDP_Y_CBCR_H2V2_VENUS] = true,
+	[MDP_YCRYCB_H2V1] = true,
+	[MDP_Y_CBCR_H2V1] = true,
+	[MDP_Y_CRCB_H2V1] = true,
+	[MDP_BGRX_8888] = true,
+};
+
+#define MAX_LIST_WINDOW 16
+#define MDP3_PPP_MAX_LIST_REQ 8
+
+struct blit_req_list {
+	int count;
+	struct mdp_blit_req req_list[MAX_LIST_WINDOW];
+	struct mdp3_img_data src_data[MAX_LIST_WINDOW];
+	struct mdp3_img_data dst_data[MAX_LIST_WINDOW];
+	struct sync_fence *acq_fen[MDP_MAX_FENCE_FD];
+	u32 acq_fen_cnt;
+	int cur_rel_fen_fd;
+	struct sync_pt *cur_rel_sync_pt;
+	struct sync_fence *cur_rel_fence;
+	struct sync_fence *last_rel_fence;
+};
+
+struct blit_req_queue {
+	struct blit_req_list req[MDP3_PPP_MAX_LIST_REQ];
+	int count;
+	int push_idx;
+	int pop_idx;
+};
+
+struct ppp_status {
+	bool wait_for_pop;
+	struct completion ppp_comp;
+	struct completion pop_q_comp;
+	struct mutex req_mutex; /* Protect request queue */
+	struct mutex config_ppp_mutex; /* Only one client configure register */
+	struct msm_fb_data_type *mfd;
+
+	struct kthread_work blit_work;
+	struct kthread_worker kworker;
+	struct task_struct *blit_thread;
+	struct blit_req_queue req_q;
+
+	struct sw_sync_timeline *timeline;
+	int timeline_value;
+
+	struct timer_list free_bw_timer;
+	struct work_struct free_bw_work;
+	bool bw_update;
+	bool bw_on;
+	u32 mdp_clk;
+};
+
+static struct ppp_status *ppp_stat;
+static bool is_blit_optimization_possible(struct blit_req_list *req, int indx);
+
+static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
+{
+	u64 result = (val * (u64)numer);
+
+	do_div(result, denom);
+	return result;
+}
+
+int ppp_get_bpp(uint32_t format, uint32_t fb_format)
+{
+	int bpp = -EINVAL;
+
+	if (format == MDP_FB_FORMAT)
+		format = fb_format;
+
+	bpp = ppp_bpp(format);
+	if (bpp <= 0)
+		pr_err("%s incorrect format %d\n", __func__, format);
+	return bpp;
+}
+
+int mdp3_ppp_get_img(struct mdp_img *img, struct mdp_blit_req *req,
+		struct mdp3_img_data *data)
+{
+	struct msmfb_data fb_data;
+	uint32_t stride;
+	int bpp = ppp_bpp(img->format);
+
+	if (bpp <= 0) {
+		pr_err("%s incorrect format %d\n", __func__, img->format);
+		return -EINVAL;
+	}
+
+	if (img->width > MDP_PPP_MAX_WIDTH) {
+		pr_err("%s incorrect width %d\n", __func__, img->width);
+		return -EINVAL;
+	}
+
+	fb_data.flags = img->priv;
+	fb_data.memory_id = img->memory_id;
+	fb_data.offset = 0;
+
+	stride = img->width * bpp;
+	data->padding = 16 * stride;
+
+	return mdp3_get_img(&fb_data, data, MDP3_CLIENT_PPP);
+}
+
+/* Check format */
+int mdp3_ppp_verify_fmt(struct mdp_blit_req *req)
+{
+	if (MDP_IS_IMGTYPE_BAD(req->src.format) ||
+	    MDP_IS_IMGTYPE_BAD(req->dst.format)) {
+		pr_err("%s: Color format out of range\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!valid_fmt[req->src.format] ||
+	    !valid_fmt[req->dst.format]) {
+		pr_err("%s: Color format not supported\n", __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/* Check resolution */
+int mdp3_ppp_verify_res(struct mdp_blit_req *req)
+{
+	if ((req->src.width == 0) || (req->src.height == 0) ||
+	    (req->src_rect.w == 0) || (req->src_rect.h == 0) ||
+	    (req->dst.width == 0) || (req->dst.height == 0) ||
+	    (req->dst_rect.w == 0) || (req->dst_rect.h == 0)) {
+		pr_err("%s: Height/width can't be 0\n", __func__);
+		return -EINVAL;
+	}
+
+	if (((req->src_rect.x + req->src_rect.w) > req->src.width) ||
+	    ((req->src_rect.y + req->src_rect.h) > req->src.height)) {
+		pr_err("%s: src roi larger than boundary\n", __func__);
+		return -EINVAL;
+	}
+
+	if (((req->dst_rect.x + req->dst_rect.w) > req->dst.width) ||
+	    ((req->dst_rect.y + req->dst_rect.h) > req->dst.height)) {
+		pr_err("%s: dst roi larger than boundary\n", __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/* scaling range check */
+int mdp3_ppp_verify_scale(struct mdp_blit_req *req)
+{
+	u32 src_width, src_height, dst_width, dst_height;
+
+	src_width = req->src_rect.w;
+	src_height = req->src_rect.h;
+
+	if (req->flags & MDP_ROT_90) {
+		dst_width = req->dst_rect.h;
+		dst_height = req->dst_rect.w;
+	} else {
+		dst_width = req->dst_rect.w;
+		dst_height = req->dst_rect.h;
+	}
+
+	switch (req->dst.format) {
+	case MDP_Y_CRCB_H2V2:
+	case MDP_Y_CBCR_H2V2:
+		src_width = (src_width / 2) * 2;
+		src_height = (src_height / 2) * 2;
+		dst_width = (dst_width / 2) * 2;
+		dst_height = (dst_height / 2) * 2;
+		break;
+
+	case MDP_Y_CRCB_H2V1:
+	case MDP_Y_CBCR_H2V1:
+	case MDP_YCRYCB_H2V1:
+		src_width = (src_width / 2) * 2;
+		dst_width = (dst_width / 2) * 2;
+		break;
+
+	default:
+		break;
+	}
+
+	if (((MDP_SCALE_Q_FACTOR * dst_width) / src_width >
+	     MDP_MAX_X_SCALE_FACTOR)
+	    || ((MDP_SCALE_Q_FACTOR * dst_width) / src_width <
+		MDP_MIN_X_SCALE_FACTOR)) {
+		pr_err("%s: x req scale factor beyond capability\n", __func__);
+		return -EINVAL;
+	}
+
+	if (((MDP_SCALE_Q_FACTOR * dst_height) / src_height >
+	     MDP_MAX_Y_SCALE_FACTOR)
+	    || ((MDP_SCALE_Q_FACTOR * dst_height) / src_height <
+		MDP_MIN_Y_SCALE_FACTOR)) {
+		pr_err("%s: y req scale factor beyond capability\n", __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/* operation check */
+int mdp3_ppp_verify_op(struct mdp_blit_req *req)
+{
+	/*
+	 * MDP_DEINTERLACE & MDP_SHARPENING Flags are not valid for MDP3
+	 * so using them together for MDP_SMART_BLIT.
+	 */
+	if ((req->flags & MDP_SMART_BLIT) == MDP_SMART_BLIT)
+		return 0;
+	if (req->flags & MDP_DEINTERLACE) {
+		pr_err("\n%s(): deinterlace not supported", __func__);
+		return -EINVAL;
+	}
+
+	if (req->flags & MDP_SHARPENING) {
+		pr_err("\n%s(): sharpening not supported", __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int mdp3_ppp_verify_req(struct mdp_blit_req *req)
+{
+	int rc;
+
+	if (req == NULL) {
+		pr_err("%s: req == null\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = mdp3_ppp_verify_fmt(req);
+	rc |= mdp3_ppp_verify_res(req);
+	rc |= mdp3_ppp_verify_scale(req);
+	rc |= mdp3_ppp_verify_op(req);
+
+	return rc;
+}
+
+int mdp3_ppp_pipe_wait(void)
+{
+	int ret = 1;
+
+	/*
+	 * wait 200 ms for ppp operation to complete before declaring
+	 * the MDP hung
+	 */
+	ret = wait_for_completion_timeout(
+	  &ppp_stat->ppp_comp, msecs_to_jiffies(200));
+	if (!ret)
+		pr_err("%s: Timed out waiting for the MDP.\n",
+			__func__);
+
+	return ret;
+}
+
+uint32_t mdp3_calc_tpval(struct ppp_img_desc *img, uint32_t old_tp)
+{
+	uint32_t tpVal;
+	uint8_t plane_tp;
+
+	tpVal = 0;
+	if ((img->color_fmt == MDP_RGB_565)
+	    || (img->color_fmt == MDP_BGR_565)) {
+		/* transparent color conversion into 24 bpp */
+		plane_tp = (uint8_t) ((old_tp & 0xF800) >> 11);
+		tpVal |= ((plane_tp << 3) | ((plane_tp & 0x1C) >> 2)) << 16;
+		plane_tp = (uint8_t) (old_tp & 0x1F);
+		tpVal |= ((plane_tp << 3) | ((plane_tp & 0x1C) >> 2)) << 8;
+
+		plane_tp = (uint8_t) ((old_tp & 0x7E0) >> 5);
+		tpVal |= ((plane_tp << 2) | ((plane_tp & 0x30) >> 4));
+	} else {
+		/* 24bit RGB to RBG conversion */
+		tpVal = (old_tp & 0xFF00) >> 8;
+		tpVal |= (old_tp & 0xFF) << 8;
+		tpVal |= (old_tp & 0xFF0000);
+	}
+
+	return tpVal;
+}
+
+static void mdp3_ppp_intr_handler(int type, void *arg)
+{
+	complete(&ppp_stat->ppp_comp);
+}
+
+static int mdp3_ppp_callback_setup(void)
+{
+	int rc;
+	struct mdp3_intr_cb ppp_done_cb = {
+		.cb = mdp3_ppp_intr_handler,
+		.data = NULL,
+	};
+
+	rc = mdp3_set_intr_callback(MDP3_PPP_DONE, &ppp_done_cb);
+	return rc;
+}
+
+void mdp3_ppp_kickoff(void)
+{
+	init_completion(&ppp_stat->ppp_comp);
+	mdp3_irq_enable(MDP3_PPP_DONE);
+	ppp_enable();
+	ATRACE_BEGIN("mdp3_wait_for_ppp_comp");
+	mdp3_ppp_pipe_wait();
+	ATRACE_END("mdp3_wait_for_ppp_comp");
+	mdp3_irq_disable(MDP3_PPP_DONE);
+}
+
+struct bpp_info {
+	int bpp_num;
+	int bpp_den;
+	int bpp_pln;
+};
+
+int mdp3_get_bpp_info(int format, struct bpp_info *bpp)
+{
+	int rc = 0;
+
+	switch (format) {
+	case MDP_RGB_565:
+	case MDP_BGR_565:
+		bpp->bpp_num = 2;
+		bpp->bpp_den = 1;
+		bpp->bpp_pln = 2;
+		break;
+	case MDP_RGB_888:
+	case MDP_BGR_888:
+		bpp->bpp_num = 3;
+		bpp->bpp_den = 1;
+		bpp->bpp_pln = 3;
+		break;
+	case MDP_BGRA_8888:
+	case MDP_RGBA_8888:
+	case MDP_ARGB_8888:
+	case MDP_XRGB_8888:
+	case MDP_RGBX_8888:
+	case MDP_BGRX_8888:
+		bpp->bpp_num = 4;
+		bpp->bpp_den = 1;
+		bpp->bpp_pln = 4;
+		break;
+	case MDP_Y_CRCB_H2V2:
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CBCR_H2V2_ADRENO:
+	case MDP_Y_CBCR_H2V2_VENUS:
+		bpp->bpp_num = 3;
+		bpp->bpp_den = 2;
+		bpp->bpp_pln = 1;
+		break;
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V1:
+		bpp->bpp_num = 2;
+		bpp->bpp_den = 1;
+		bpp->bpp_pln = 1;
+		break;
+	case MDP_YCRYCB_H2V1:
+		bpp->bpp_num = 2;
+		bpp->bpp_den = 1;
+		bpp->bpp_pln = 2;
+		break;
+	default:
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+bool mdp3_is_blend(struct mdp_blit_req *req)
+{
+	if ((req->transp_mask != MDP_TRANSP_NOP) ||
+		(req->alpha < MDP_ALPHA_NOP) ||
+		(req->src.format == MDP_ARGB_8888) ||
+		(req->src.format == MDP_BGRA_8888) ||
+		(req->src.format == MDP_RGBA_8888))
+		return true;
+	return false;
+}
+
+bool mdp3_is_scale(struct mdp_blit_req *req)
+{
+	if (req->flags & MDP_ROT_90) {
+		if (req->src_rect.w != req->dst_rect.h ||
+			req->src_rect.h != req->dst_rect.w)
+			return true;
+	} else {
+		if (req->src_rect.h != req->dst_rect.h ||
+			req->src_rect.w != req->dst_rect.w)
+			return true;
+	}
+	return false;
+}
+
+u32 mdp3_clk_calc(struct msm_fb_data_type *mfd,
+				struct blit_req_list *lreq, u32 fps)
+{
+	int i, lcount = 0;
+	struct mdp_blit_req *req;
+	u64 mdp_clk_rate = 0;
+	u32 scale_x = 0, scale_y = 0, scale = 0;
+	u32 blend_l, csc_l;
+
+	lcount = lreq->count;
+
+	blend_l = 100 * BLEND_LATENCY;
+	csc_l = 100 * CSC_LATENCY;
+
+	for (i = 0; i < lcount; i++) {
+		req = &(lreq->req_list[i]);
+
+		if (req->flags & MDP_SMART_BLIT)
+			continue;
+
+		if (mdp3_is_scale(req)) {
+			if (req->flags & MDP_ROT_90) {
+				scale_x = 100 * req->src_rect.h /
+							req->dst_rect.w;
+				scale_y = 100 * req->src_rect.w /
+							req->dst_rect.h;
+			} else {
+				scale_x = 100 * req->src_rect.w /
+							req->dst_rect.w;
+				scale_y = 100 * req->src_rect.h /
+							req->dst_rect.h;
+			}
+			scale = max(scale_x, scale_y);
+		}
+		scale = scale >= 100 ? scale : 100;
+		if (mdp3_is_blend(req))
+			scale = max(scale, blend_l);
+
+		if (!check_if_rgb(req->src.format))
+			scale = max(scale, csc_l);
+
+		mdp_clk_rate += (req->src_rect.w * req->src_rect.h *
+							scale / 100) * fps;
+	}
+	mdp_clk_rate += (ppp_res.solid_fill_pixel * fps);
+	mdp_clk_rate = fudge_factor(mdp_clk_rate,
+				CLK_FUDGE_NUM, CLK_FUDGE_DEN);
+	pr_debug("mdp_clk_rate for ppp = %llu\n", mdp_clk_rate);
+	mdp_clk_rate = mdp3_clk_round_off(mdp_clk_rate);
+
+	return mdp_clk_rate;
+}
+
+u64 mdp3_adjust_scale_factor(struct mdp_blit_req *req, u32 bw_req, int bpp)
+{
+	int src_h, src_w;
+	int dst_h, dst_w;
+
+	src_h = req->src_rect.h;
+	src_w = req->src_rect.w;
+
+	dst_h = req->dst_rect.h;
+	dst_w = req->dst_rect.w;
+
+	if ((!(req->flags & MDP_ROT_90) && src_h == dst_h &&
+		src_w == dst_w) || ((req->flags & MDP_ROT_90) &&
+		src_h == dst_w && src_w == dst_h))
+		return bw_req;
+
+	bw_req = (bw_req + (bw_req * dst_h) / (4 * src_h));
+	bw_req = (bw_req + (bw_req * dst_w) / (4 * src_w) +
+			(bw_req * dst_w) / (bpp * src_w));
+	return bw_req;
+}
+
+int mdp3_calc_ppp_res(struct msm_fb_data_type *mfd,
+		struct blit_req_list *lreq)
+{
+	struct mdss_panel_info *panel_info = mfd->panel_info;
+	int i, lcount = 0;
+	struct mdp_blit_req *req;
+	struct bpp_info bpp;
+	u64 old_solid_fill_pixel = 0;
+	u64 new_solid_fill_pixel = 0;
+	u64 src_read_bw = 0;
+	u32 bg_read_bw = 0;
+	u32 dst_write_bw = 0;
+	u64 honest_ppp_ab = 0;
+	u32 fps = 0;
+	int smart_blit_fg_indx = -1;
+	u32 smart_blit_bg_read_bw = 0;
+
+	ATRACE_BEGIN(__func__);
+	lcount = lreq->count;
+	if (lcount == 0) {
+		pr_err("Blit with request count 0, continue to recover!!!\n");
+		ATRACE_END(__func__);
+		return 0;
+	}
+	if (lreq->req_list[0].flags & MDP_SOLID_FILL) {
+		req = &(lreq->req_list[0]);
+		mdp3_get_bpp_info(req->dst.format, &bpp);
+		old_solid_fill_pixel = ppp_res.solid_fill_pixel;
+		new_solid_fill_pixel = req->dst_rect.w * req->dst_rect.h;
+		ppp_res.solid_fill_pixel += new_solid_fill_pixel;
+		ppp_res.solid_fill_byte += req->dst_rect.w * req->dst_rect.h *
+						bpp.bpp_num / bpp.bpp_den;
+		if ((old_solid_fill_pixel >= new_solid_fill_pixel) ||
+			(mdp3_res->solid_fill_vote_en)) {
+			pr_debug("Last fill pixels are higher or fill_en %d\n",
+				mdp3_res->solid_fill_vote_en);
+			ATRACE_END(__func__);
+			return 0;
+		}
+	}
+
+	for (i = 0; i < lcount; i++) {
+		/* Set Smart blit flag before BW calculation */
+		is_blit_optimization_possible(lreq, i);
+		req = &(lreq->req_list[i]);
+
+		if (req->fps > 0 && req->fps <= panel_info->mipi.frame_rate) {
+			if (fps == 0)
+				fps = req->fps;
+			else
+				fps = panel_info->mipi.frame_rate;
+		}
+
+		mdp3_get_bpp_info(req->src.format, &bpp);
+		if (lreq->req_list[i].flags & MDP_SMART_BLIT) {
+			/*
+			 * Flag for smart blit FG layer index
+			 * If blit request at index "n" has
+			 * MDP_SMART_BLIT flag set then it will be used as BG
+			 * layer in smart blit and request at index "n+1"
+			 * will be used as FG layer
+			 */
+			smart_blit_fg_indx = i + 1;
+			bg_read_bw = req->src_rect.w * req->src_rect.h *
+						bpp.bpp_num / bpp.bpp_den;
+			bg_read_bw = mdp3_adjust_scale_factor(req,
+						bg_read_bw, bpp.bpp_pln);
+			/* Cache read BW of smart blit BG layer */
+			smart_blit_bg_read_bw = bg_read_bw;
+		} else {
+			src_read_bw = req->src_rect.w * req->src_rect.h *
+				bpp.bpp_num / bpp.bpp_den;
+			src_read_bw = mdp3_adjust_scale_factor(req,
+					src_read_bw, bpp.bpp_pln);
+			if (!(check_if_rgb(req->src.format))) {
+				src_read_bw = fudge_factor(src_read_bw,
+						YUV_BW_FUDGE_NUM,
+						YUV_BW_FUDGE_DEN);
+			}
+			mdp3_get_bpp_info(req->dst.format, &bpp);
+
+			if (smart_blit_fg_indx == i) {
+				bg_read_bw = smart_blit_bg_read_bw;
+				smart_blit_fg_indx = -1;
+			} else {
+				if ((req->transp_mask != MDP_TRANSP_NOP) ||
+					(req->alpha < MDP_ALPHA_NOP) ||
+					(req->src.format == MDP_ARGB_8888) ||
+					(req->src.format == MDP_BGRA_8888) ||
+					(req->src.format == MDP_RGBA_8888)) {
+					bg_read_bw = req->dst_rect.w *
+						req->dst_rect.h *
+						bpp.bpp_num / bpp.bpp_den;
+					bg_read_bw = mdp3_adjust_scale_factor(
+							req, bg_read_bw,
+							bpp.bpp_pln);
+				} else {
+					bg_read_bw = 0;
+				}
+			}
+			dst_write_bw = req->dst_rect.w * req->dst_rect.h *
+						bpp.bpp_num / bpp.bpp_den;
+			honest_ppp_ab += (src_read_bw + bg_read_bw +
+					dst_write_bw);
+		}
+	}
+
+	if (fps == 0)
+		fps = panel_info->mipi.frame_rate;
+
+	if (lreq->req_list[0].flags & MDP_SOLID_FILL) {
+		honest_ppp_ab = ppp_res.solid_fill_byte * 4;
+		pr_debug("solid fill honest_ppp_ab %llu\n", honest_ppp_ab);
+	} else {
+		honest_ppp_ab += ppp_res.solid_fill_byte;
+		mdp3_res->solid_fill_vote_en = true;
+	}
+
+	honest_ppp_ab = honest_ppp_ab * fps;
+	if (honest_ppp_ab != ppp_res.next_ab) {
+		ppp_res.next_ab = honest_ppp_ab;
+		ppp_res.next_ib = honest_ppp_ab;
+		ppp_stat->bw_update = true;
+		pr_debug("solid fill ab = %llx, total ab = %llx ",
+			(ppp_res.solid_fill_byte * fps), honest_ppp_ab);
+		pr_debug("(%d fps) Solid_fill_vote %d\n",
+			fps, mdp3_res->solid_fill_vote_en);
+		ATRACE_INT("mdp3_ppp_bus_quota", honest_ppp_ab);
+	}
+	ppp_res.clk_rate = mdp3_clk_calc(mfd, lreq, fps);
+	ATRACE_INT("mdp3_ppp_clk_rate", ppp_res.clk_rate);
+	ATRACE_END(__func__);
+	return 0;
+}
+
+int mdp3_ppp_turnon(struct msm_fb_data_type *mfd, int on_off)
+{
+	uint64_t ab = 0, ib = 0;
+	int rate = 0;
+	int rc;
+
+	if (on_off) {
+		rate = ppp_res.clk_rate;
+		ab = ppp_res.next_ab;
+		ib = ppp_res.next_ib;
+	}
+	mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, rate, MDP3_CLIENT_PPP);
+	rc = mdp3_res_update(on_off, 0, MDP3_CLIENT_PPP);
+	if (rc < 0) {
+		pr_err("%s: mdp3_clk_enable failed\n", __func__);
+		return rc;
+	}
+	rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_PPP, ab, ib);
+	if (rc < 0) {
+		mdp3_res_update(!on_off, 0, MDP3_CLIENT_PPP);
+		pr_err("%s: scale_set_quota failed\n", __func__);
+		return rc;
+	}
+	ppp_stat->bw_on = on_off;
+	ppp_stat->mdp_clk = MDP_CORE_CLK_RATE_SVS;
+	ppp_stat->bw_update = false;
+	return 0;
+}
+
+void mdp3_start_ppp(struct ppp_blit_op *blit_op)
+{
+	/* Wait for the pipe to clear */
+	if (MDP3_REG_READ(MDP3_REG_DISPLAY_STATUS) &
+			MDP3_PPP_ACTIVE) {
+		pr_err("ppp core is hung up on previous request\n");
+		return;
+	}
+	config_ppp_op_mode(blit_op);
+	if (blit_op->solid_fill) {
+		MDP3_REG_WRITE(0x10138, 0x10000000);
+		MDP3_REG_WRITE(0x1014c, 0xffffffff);
+		MDP3_REG_WRITE(0x101b8, 0);
+		MDP3_REG_WRITE(0x101bc, 0);
+		MDP3_REG_WRITE(0x1013c, 0);
+		MDP3_REG_WRITE(0x10140, 0);
+		MDP3_REG_WRITE(0x10144, 0);
+		MDP3_REG_WRITE(0x10148, 0);
+		MDP3_REG_WRITE(MDP3_TFETCH_FILL_COLOR,
+					blit_op->solid_fill_color);
+		MDP3_REG_WRITE(MDP3_TFETCH_SOLID_FILL,
+					ENABLE_SOLID_FILL);
+	} else {
+		MDP3_REG_WRITE(MDP3_TFETCH_SOLID_FILL,
+					DISABLE_SOLID_FILL);
+	}
+	/* Skip PPP kickoff for SMART_BLIT BG layer */
+	if (blit_op->mdp_op & MDPOP_SMART_BLIT)
+		pr_debug("Skip mdp3_ppp_kickoff\n");
+	else
+	mdp3_ppp_kickoff();
+
+	if (!(blit_op->solid_fill)) {
+		ppp_res.solid_fill_pixel = 0;
+		ppp_res.solid_fill_byte = 0;
+	}
+}
+
+static int solid_fill_workaround(struct mdp_blit_req *req,
+						struct ppp_blit_op *blit_op)
+{
+	/* Make width 2 when there is a solid fill of width 1, and make
+	 * sure width does not become zero while trying to avoid odd width
+	 */
+	if (blit_op->dst.roi.width == 1) {
+		if (req->dst_rect.x + 2 > req->dst.width) {
+			pr_err("%s: Unable to handle solid fill of width 1",
+								__func__);
+			return -EINVAL;
+		}
+		blit_op->dst.roi.width = 2;
+	}
+	if (blit_op->src.roi.width == 1) {
+		if (req->src_rect.x + 2 > req->src.width) {
+			pr_err("%s: Unable to handle solid fill of width 1",
+								__func__);
+			return -EINVAL;
+		}
+		blit_op->src.roi.width = 2;
+	}
+
+	/* Avoid odd width, as it could hang ppp during solid fill */
+	blit_op->dst.roi.width = (blit_op->dst.roi.width / 2) * 2;
+	blit_op->src.roi.width = (blit_op->src.roi.width / 2) * 2;
+
+	/* Set src format to RGBX, to avoid ppp hang issues */
+	blit_op->src.color_fmt = MDP_RGBX_8888;
+
+	/* Avoid RGBA format, as it could hang ppp during solid fill */
+	if (blit_op->dst.color_fmt == MDP_RGBA_8888)
+		blit_op->dst.color_fmt = MDP_RGBX_8888;
+	return 0;
+}
+
+static int mdp3_ppp_process_req(struct ppp_blit_op *blit_op,
+	struct mdp_blit_req *req, struct mdp3_img_data *src_data,
+	struct mdp3_img_data *dst_data)
+{
+	unsigned long srcp0_start, srcp0_len, dst_start, dst_len;
+	uint32_t dst_width, dst_height;
+	int ret = 0;
+
+	srcp0_start = (unsigned long) src_data->addr;
+	srcp0_len = (unsigned long) src_data->len;
+	dst_start = (unsigned long) dst_data->addr;
+	dst_len = (unsigned long) dst_data->len;
+
+	blit_op->dst.prop.width = req->dst.width;
+	blit_op->dst.prop.height = req->dst.height;
+
+	blit_op->dst.color_fmt = req->dst.format;
+	blit_op->dst.p0 = (void *) dst_start;
+	blit_op->dst.p0 += req->dst.offset;
+
+	blit_op->dst.roi.x = req->dst_rect.x;
+	blit_op->dst.roi.y = req->dst_rect.y;
+	blit_op->dst.roi.width = req->dst_rect.w;
+	blit_op->dst.roi.height = req->dst_rect.h;
+
+	blit_op->src.roi.x = req->src_rect.x;
+	blit_op->src.roi.y = req->src_rect.y;
+	blit_op->src.roi.width = req->src_rect.w;
+	blit_op->src.roi.height = req->src_rect.h;
+
+	blit_op->src.prop.width = req->src.width;
+	blit_op->src.prop.height = req->src.height;
+	blit_op->src.color_fmt = req->src.format;
+
+
+	blit_op->src.p0 = (void *) (srcp0_start + req->src.offset);
+	if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_ADRENO)
+		blit_op->src.p1 =
+			(void *) ((uint32_t) blit_op->src.p0 +
+				ALIGN((ALIGN(req->src.width, 32) *
+				ALIGN(req->src.height, 32)), 4096));
+	else if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_VENUS)
+		blit_op->src.p1 =
+			(void *) ((uint32_t) blit_op->src.p0 +
+				ALIGN((ALIGN(req->src.width, 128) *
+				ALIGN(req->src.height, 32)), 4096));
+	else
+		blit_op->src.p1 = (void *) ((uint32_t) blit_op->src.p0 +
+			req->src.width * req->src.height);
+
+	if (req->flags & MDP_IS_FG)
+		blit_op->mdp_op |= MDPOP_LAYER_IS_FG;
+
+	/* blending check */
+	if (req->transp_mask != MDP_TRANSP_NOP) {
+		blit_op->mdp_op |= MDPOP_TRANSP;
+		blit_op->blend.trans_color =
+			mdp3_calc_tpval(&blit_op->src, req->transp_mask);
+	} else {
+		blit_op->blend.trans_color = 0;
+	}
+
+	req->alpha &= 0xff;
+	if (req->alpha < MDP_ALPHA_NOP) {
+		blit_op->mdp_op |= MDPOP_ALPHAB;
+		blit_op->blend.const_alpha = req->alpha;
+	} else {
+		blit_op->blend.const_alpha = 0xff;
+	}
+
+	/* rotation check */
+	if (req->flags & MDP_FLIP_LR)
+		blit_op->mdp_op |= MDPOP_LR;
+	if (req->flags & MDP_FLIP_UD)
+		blit_op->mdp_op |= MDPOP_UD;
+	if (req->flags & MDP_ROT_90)
+		blit_op->mdp_op |= MDPOP_ROT90;
+	if (req->flags & MDP_DITHER)
+		blit_op->mdp_op |= MDPOP_DITHER;
+
+	if (req->flags & MDP_BLEND_FG_PREMULT)
+		blit_op->mdp_op |= MDPOP_FG_PM_ALPHA;
+
+	/* scale check */
+	if (req->flags & MDP_ROT_90) {
+		dst_width = req->dst_rect.h;
+		dst_height = req->dst_rect.w;
+	} else {
+		dst_width = req->dst_rect.w;
+		dst_height = req->dst_rect.h;
+	}
+
+	if ((blit_op->src.roi.width != dst_width) ||
+			(blit_op->src.roi.height != dst_height))
+		blit_op->mdp_op |= MDPOP_ASCALE;
+
+	if (req->flags & MDP_BLUR)
+		blit_op->mdp_op |= MDPOP_ASCALE | MDPOP_BLUR;
+
+	if (req->flags & MDP_SOLID_FILL) {
+		ret = solid_fill_workaround(req, blit_op);
+		if (ret)
+			return ret;
+
+		blit_op->solid_fill_color = (req->const_color.g & 0xFF)|
+				(req->const_color.r & 0xFF) << 8 |
+				(req->const_color.b & 0xFF)  << 16 |
+				(req->const_color.alpha & 0xFF) << 24;
+		blit_op->solid_fill = true;
+	} else {
+		blit_op->solid_fill = false;
+	}
+
+	if (req->flags & MDP_SMART_BLIT)
+		blit_op->mdp_op |= MDPOP_SMART_BLIT;
+
+	return ret;
+}
+
+static void mdp3_ppp_tile_workaround(struct ppp_blit_op *blit_op,
+	struct mdp_blit_req *req)
+{
+	int dst_h, src_w, i;
+	uint32_t mdp_op = blit_op->mdp_op;
+	void *src_p0 = blit_op->src.p0;
+	void *src_p1 = blit_op->src.p1;
+	void *dst_p0 = blit_op->dst.p0;
+
+	src_w = req->src_rect.w;
+	dst_h = blit_op->dst.roi.height;
+	/* bg tile fetching HW workaround */
+	for (i = 0; i < (req->dst_rect.h / 16); i++) {
+		/* this tile size */
+		blit_op->dst.roi.height = 16;
+		blit_op->src.roi.width =
+			(16 * req->src_rect.w) / req->dst_rect.h;
+
+		/* if it's out of scale range... */
+		if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
+			blit_op->src.roi.width) > MDP_MAX_X_SCALE_FACTOR)
+			blit_op->src.roi.width =
+				(MDP_SCALE_Q_FACTOR *
+				blit_op->dst.roi.height) /
+				MDP_MAX_X_SCALE_FACTOR;
+		else if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
+			blit_op->src.roi.width) < MDP_MIN_X_SCALE_FACTOR)
+			blit_op->src.roi.width =
+				(MDP_SCALE_Q_FACTOR *
+				blit_op->dst.roi.height) /
+				MDP_MIN_X_SCALE_FACTOR;
+
+		mdp3_start_ppp(blit_op);
+
+		/* next tile location */
+		blit_op->dst.roi.y += 16;
+		blit_op->src.roi.x += blit_op->src.roi.width;
+
+		/* this is for a remainder update */
+		dst_h -= 16;
+		src_w -= blit_op->src.roi.width;
+		/* restore parameters that may have been overwritten */
+		blit_op->mdp_op = mdp_op;
+		blit_op->src.p0 = src_p0;
+		blit_op->src.p1 = src_p1;
+		blit_op->dst.p0 = dst_p0;
+	}
+
+	if ((dst_h < 0) || (src_w < 0))
+		pr_err("msm_fb: mdp_blt_ex() unexpected result! line:%d\n",
+			__LINE__);
+
+	/* remainder update */
+	if ((dst_h > 0) && (src_w > 0)) {
+		u32 tmp_v;
+
+		blit_op->dst.roi.height = dst_h;
+		blit_op->src.roi.width = src_w;
+
+		if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
+			blit_op->src.roi.width) > MDP_MAX_X_SCALE_FACTOR) {
+			tmp_v = (MDP_SCALE_Q_FACTOR *
+				blit_op->dst.roi.height) /
+				MDP_MAX_X_SCALE_FACTOR +
+				((MDP_SCALE_Q_FACTOR *
+				blit_op->dst.roi.height) %
+				MDP_MAX_X_SCALE_FACTOR ? 1 : 0);
+
+			/* move x location as roi width gets bigger */
+			blit_op->src.roi.x -= tmp_v - blit_op->src.roi.width;
+			blit_op->src.roi.width = tmp_v;
+		} else if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
+			blit_op->src.roi.width) < MDP_MIN_X_SCALE_FACTOR) {
+			tmp_v = (MDP_SCALE_Q_FACTOR *
+				blit_op->dst.roi.height) /
+				MDP_MIN_X_SCALE_FACTOR +
+				((MDP_SCALE_Q_FACTOR *
+				blit_op->dst.roi.height) %
+				MDP_MIN_X_SCALE_FACTOR ? 1 : 0);
+			/*
+			 * we don't move x location for continuity of
+			 * source image
+			 */
+			blit_op->src.roi.width = tmp_v;
+		}
+
+
+		mdp3_start_ppp(blit_op);
+	}
+}
+
+static int mdp3_ppp_blit(struct msm_fb_data_type *mfd,
+	struct mdp_blit_req *req, struct mdp3_img_data *src_data,
+	struct mdp3_img_data *dst_data)
+{
+	struct ppp_blit_op blit_op;
+	int ret = 0;
+
+	memset(&blit_op, 0, sizeof(blit_op));
+
+	if (req->dst.format == MDP_FB_FORMAT)
+		req->dst.format =  mfd->fb_imgType;
+	if (req->src.format == MDP_FB_FORMAT)
+		req->src.format = mfd->fb_imgType;
+
+	if (mdp3_ppp_verify_req(req)) {
+		pr_err("%s: invalid image!\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = mdp3_ppp_process_req(&blit_op, req, src_data, dst_data);
+	if (ret) {
+		pr_err("%s: Failed to process the blit request", __func__);
+		return ret;
+	}
+
+	if (((blit_op.mdp_op & (MDPOP_TRANSP | MDPOP_ALPHAB)) ||
+	     (req->src.format == MDP_ARGB_8888) ||
+	     (req->src.format == MDP_BGRA_8888) ||
+	     (req->src.format == MDP_RGBA_8888)) &&
+	    (blit_op.mdp_op & MDPOP_ROT90) && (req->dst_rect.w <= 16)) {
+		mdp3_ppp_tile_workaround(&blit_op, req);
+	} else {
+		mdp3_start_ppp(&blit_op);
+	}
+
+	return 0;
+}
+
+static int mdp3_ppp_blit_workaround(struct msm_fb_data_type *mfd,
+		struct mdp_blit_req *req, unsigned int remainder,
+		struct mdp3_img_data *src_data,
+		struct mdp3_img_data *dst_data)
+{
+	int ret;
+	struct mdp_blit_req splitreq;
+	int s_x_0, s_x_1, s_w_0, s_w_1, s_y_0, s_y_1, s_h_0, s_h_1;
+	int d_x_0, d_x_1, d_w_0, d_w_1, d_y_0, d_y_1, d_h_0, d_h_1;
+
+	/* make new request as provide by user */
+	splitreq = *req;
+
+	/* break dest roi at width*/
+	d_y_0 = d_y_1 = req->dst_rect.y;
+	d_h_0 = d_h_1 = req->dst_rect.h;
+	d_x_0 = req->dst_rect.x;
+
+	if (remainder == 14 || remainder == 6)
+		d_w_1 = req->dst_rect.w / 2;
+	else
+		d_w_1 = (req->dst_rect.w - 1) / 2 - 1;
+
+	d_w_0 = req->dst_rect.w - d_w_1;
+	d_x_1 = d_x_0 + d_w_0;
+	/* blit first region */
+	if (((splitreq.flags & 0x07) == 0x07) ||
+		((splitreq.flags & 0x07) == 0x05) ||
+		((splitreq.flags & 0x07) == 0x02) ||
+		((splitreq.flags & 0x07) == 0x0)) {
+
+		if (splitreq.flags & MDP_ROT_90) {
+			s_x_0 = s_x_1 = req->src_rect.x;
+			s_w_0 = s_w_1 = req->src_rect.w;
+			s_y_0 = req->src_rect.y;
+			s_h_1 = (req->src_rect.h * d_w_1) /
+				req->dst_rect.w;
+			s_h_0 = req->src_rect.h - s_h_1;
+			s_y_1 = s_y_0 + s_h_0;
+			if (d_w_1 >= 8 * s_h_1) {
+				s_h_1++;
+				s_y_1--;
+			}
+		} else {
+			s_y_0 = s_y_1 = req->src_rect.y;
+			s_h_0 = s_h_1 = req->src_rect.h;
+			s_x_0 = req->src_rect.x;
+			s_w_1 = (req->src_rect.w * d_w_1) /
+				req->dst_rect.w;
+			s_w_0 = req->src_rect.w - s_w_1;
+			s_x_1 = s_x_0 + s_w_0;
+			if (d_w_1 >= 8 * s_w_1) {
+				s_w_1++;
+				s_x_1--;
+			}
+		}
+
+		splitreq.src_rect.h = s_h_0;
+		splitreq.src_rect.y = s_y_0;
+		splitreq.dst_rect.h = d_h_0;
+		splitreq.dst_rect.y = d_y_0;
+		splitreq.src_rect.x = s_x_0;
+		splitreq.src_rect.w = s_w_0;
+		splitreq.dst_rect.x = d_x_0;
+		splitreq.dst_rect.w = d_w_0;
+	} else {
+		if (splitreq.flags & MDP_ROT_90) {
+			s_x_0 = s_x_1 = req->src_rect.x;
+			s_w_0 = s_w_1 = req->src_rect.w;
+			s_y_0 = req->src_rect.y;
+			s_h_1 = (req->src_rect.h * d_w_0) /
+				req->dst_rect.w;
+			s_h_0 = req->src_rect.h - s_h_1;
+			s_y_1 = s_y_0 + s_h_0;
+			if (d_w_0 >= 8 * s_h_1) {
+				s_h_1++;
+				s_y_1--;
+			}
+		} else {
+			s_y_0 = s_y_1 = req->src_rect.y;
+			s_h_0 = s_h_1 = req->src_rect.h;
+			s_x_0 = req->src_rect.x;
+			s_w_1 = (req->src_rect.w * d_w_0) /
+				req->dst_rect.w;
+			s_w_0 = req->src_rect.w - s_w_1;
+			s_x_1 = s_x_0 + s_w_0;
+			if (d_w_0 >= 8 * s_w_1) {
+				s_w_1++;
+				s_x_1--;
+			}
+		}
+		splitreq.src_rect.h = s_h_0;
+		splitreq.src_rect.y = s_y_0;
+		splitreq.dst_rect.h = d_h_1;
+		splitreq.dst_rect.y = d_y_1;
+		splitreq.src_rect.x = s_x_0;
+		splitreq.src_rect.w = s_w_0;
+		splitreq.dst_rect.x = d_x_1;
+		splitreq.dst_rect.w = d_w_1;
+	}
+
+	/* No need to split in height */
+	ret = mdp3_ppp_blit(mfd, &splitreq, src_data, dst_data);
+
+	if (ret)
+		return ret;
+	/* blit second region */
+	if (((splitreq.flags & 0x07) == 0x07) ||
+		((splitreq.flags & 0x07) == 0x05) ||
+		((splitreq.flags & 0x07) == 0x02) ||
+		((splitreq.flags & 0x07) == 0x0)) {
+		splitreq.src_rect.h = s_h_1;
+		splitreq.src_rect.y = s_y_1;
+		splitreq.dst_rect.h = d_h_1;
+		splitreq.dst_rect.y = d_y_1;
+		splitreq.src_rect.x = s_x_1;
+		splitreq.src_rect.w = s_w_1;
+		splitreq.dst_rect.x = d_x_1;
+		splitreq.dst_rect.w = d_w_1;
+	} else {
+		splitreq.src_rect.h = s_h_1;
+		splitreq.src_rect.y = s_y_1;
+		splitreq.dst_rect.h = d_h_0;
+		splitreq.dst_rect.y = d_y_0;
+		splitreq.src_rect.x = s_x_1;
+		splitreq.src_rect.w = s_w_1;
+		splitreq.dst_rect.x = d_x_0;
+		splitreq.dst_rect.w = d_w_0;
+	}
+
+	/* No need to split in height ... just width */
+	return mdp3_ppp_blit(mfd, &splitreq, src_data, dst_data);
+}
+
+int mdp3_ppp_start_blit(struct msm_fb_data_type *mfd,
+		struct mdp_blit_req *req,
+		struct mdp3_img_data *src_data,
+		struct mdp3_img_data *dst_data)
+{
+	int ret;
+	unsigned int remainder = 0, is_bpp_4 = 0;
+
+	if (unlikely(req->src_rect.h == 0 || req->src_rect.w == 0)) {
+		pr_err("mdp_ppp: src img of zero size!\n");
+		return -EINVAL;
+	}
+	if (unlikely(req->dst_rect.h == 0 || req->dst_rect.w == 0))
+		return 0;
+
+	/* MDP width split workaround */
+	remainder = (req->dst_rect.w) % 16;
+	ret = ppp_get_bpp(req->dst.format, mfd->fb_imgType);
+	if (ret <= 0) {
+		pr_err("mdp_ppp: incorrect bpp!\n");
+		return -EINVAL;
+	}
+	is_bpp_4 = (ret == 4) ? 1 : 0;
+
+	if ((is_bpp_4 && (remainder == 6 || remainder == 14)) &&
+						!(req->flags & MDP_SOLID_FILL))
+		ret = mdp3_ppp_blit_workaround(mfd, req, remainder,
+							src_data, dst_data);
+	else
+		ret = mdp3_ppp_blit(mfd, req, src_data, dst_data);
+	return ret;
+}
+
+void mdp3_ppp_wait_for_fence(struct blit_req_list *req)
+{
+	int i, ret = 0;
+
+	ATRACE_BEGIN(__func__);
+	/* buf sync */
+	for (i = 0; i < req->acq_fen_cnt; i++) {
+		ret = sync_fence_wait(req->acq_fen[i],
+				WAIT_FENCE_FINAL_TIMEOUT);
+		if (ret < 0) {
+			pr_err("%s: sync_fence_wait failed! ret = %x\n",
+				__func__, ret);
+			break;
+		}
+		sync_fence_put(req->acq_fen[i]);
+	}
+	ATRACE_END(__func__);
+	if (ret < 0) {
+		while (i < req->acq_fen_cnt) {
+			sync_fence_put(req->acq_fen[i]);
+			i++;
+		}
+	}
+	req->acq_fen_cnt = 0;
+}
+
+void mdp3_ppp_signal_timeline(struct blit_req_list *req)
+{
+	sw_sync_timeline_inc(ppp_stat->timeline, 1);
+	MDSS_XLOG(ppp_stat->timeline->value, ppp_stat->timeline_value);
+	req->last_rel_fence = req->cur_rel_fence;
+	req->cur_rel_fence = 0;
+}
+
+
+static void mdp3_ppp_deinit_buf_sync(struct blit_req_list *req)
+{
+	int i;
+
+	put_unused_fd(req->cur_rel_fen_fd);
+	sync_fence_put(req->cur_rel_fence);
+	req->cur_rel_fence = NULL;
+	req->cur_rel_fen_fd = 0;
+	ppp_stat->timeline_value--;
+	for (i = 0; i < req->acq_fen_cnt; i++)
+		sync_fence_put(req->acq_fen[i]);
+	req->acq_fen_cnt = 0;
+}
+
+static int mdp3_ppp_handle_buf_sync(struct blit_req_list *req,
+	struct mdp_buf_sync *buf_sync)
+{
+	int i, fence_cnt = 0, ret = 0;
+	int acq_fen_fd[MDP_MAX_FENCE_FD];
+	struct sync_fence *fence;
+
+	if ((buf_sync->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) ||
+		(ppp_stat->timeline == NULL))
+		return -EINVAL;
+
+	if (buf_sync->acq_fen_fd_cnt)
+		ret = copy_from_user(acq_fen_fd, buf_sync->acq_fen_fd,
+				buf_sync->acq_fen_fd_cnt * sizeof(int));
+	if (ret) {
+		pr_err("%s: copy_from_user failed\n", __func__);
+		return ret;
+	}
+	for (i = 0; i < buf_sync->acq_fen_fd_cnt; i++) {
+		fence = sync_fence_fdget(acq_fen_fd[i]);
+		if (fence == NULL) {
+			pr_info("%s: null fence! i=%d fd=%d\n", __func__, i,
+				acq_fen_fd[i]);
+			ret = -EINVAL;
+			break;
+		}
+		req->acq_fen[i] = fence;
+	}
+	fence_cnt = i;
+	if (ret)
+		goto buf_sync_err_1;
+	req->acq_fen_cnt = fence_cnt;
+	if (buf_sync->flags & MDP_BUF_SYNC_FLAG_WAIT)
+		mdp3_ppp_wait_for_fence(req);
+
+	req->cur_rel_sync_pt = sw_sync_pt_create(ppp_stat->timeline,
+			ppp_stat->timeline_value++);
+	MDSS_XLOG(ppp_stat->timeline_value);
+	if (req->cur_rel_sync_pt == NULL) {
+		pr_err("%s: cannot create sync point\n", __func__);
+		ret = -ENOMEM;
+		goto buf_sync_err_2;
+	}
+	/* create fence */
+	req->cur_rel_fence = sync_fence_create("ppp-fence",
+			req->cur_rel_sync_pt);
+	if (req->cur_rel_fence == NULL) {
+		sync_pt_free(req->cur_rel_sync_pt);
+		req->cur_rel_sync_pt = NULL;
+		pr_err("%s: cannot create fence\n", __func__);
+		ret = -ENOMEM;
+		goto buf_sync_err_2;
+	}
+	/* create fd */
+	return ret;
+buf_sync_err_2:
+	ppp_stat->timeline_value--;
+buf_sync_err_1:
+	for (i = 0; i < fence_cnt; i++)
+		sync_fence_put(req->acq_fen[i]);
+	req->acq_fen_cnt = 0;
+	return ret;
+}
+
+void mdp3_ppp_req_push(struct blit_req_queue *req_q, struct blit_req_list *req)
+{
+	int idx = req_q->push_idx;
+
+	req_q->req[idx] = *req;
+	req_q->count++;
+	req_q->push_idx = (req_q->push_idx + 1) % MDP3_PPP_MAX_LIST_REQ;
+}
+
+struct blit_req_list *mdp3_ppp_next_req(struct blit_req_queue *req_q)
+{
+	struct blit_req_list *req;
+
+	if (req_q->count == 0)
+		return NULL;
+	req = &req_q->req[req_q->pop_idx];
+	return req;
+}
+
+void mdp3_ppp_req_pop(struct blit_req_queue *req_q)
+{
+	req_q->count--;
+	req_q->pop_idx = (req_q->pop_idx + 1) % MDP3_PPP_MAX_LIST_REQ;
+}
+
+void mdp3_free_fw_timer_func(unsigned long arg)
+{
+	mdp3_res->solid_fill_vote_en = false;
+	schedule_work(&ppp_stat->free_bw_work);
+}
+
+static void mdp3_free_bw_wq_handler(struct work_struct *work)
+{
+	struct msm_fb_data_type *mfd = ppp_stat->mfd;
+
+	mutex_lock(&ppp_stat->config_ppp_mutex);
+	if (ppp_stat->bw_on)
+		mdp3_ppp_turnon(mfd, 0);
+	mutex_unlock(&ppp_stat->config_ppp_mutex);
+}
+
+static bool is_hw_workaround_needed(struct mdp_blit_req req)
+{
+	bool result = false;
+	bool is_bpp_4 = false;
+	uint32_t remainder = 0;
+	uint32_t bpp = ppp_get_bpp(req.dst.format, ppp_stat->mfd->fb_imgType);
+
+	/* MDP width split workaround */
+	remainder = (req.dst_rect.w) % 16;
+	is_bpp_4 = (bpp == 4) ? 1 : 0;
+	if ((is_bpp_4 && (remainder == 6 || remainder == 14)) &&
+		!(req.flags & MDP_SOLID_FILL))
+		result = true;
+
+	/* bg tile fetching HW workaround */
+	if (((req.alpha < MDP_ALPHA_NOP) ||
+		(req.transp_mask != MDP_TRANSP_NOP) ||
+		(req.src.format == MDP_ARGB_8888) ||
+		(req.src.format == MDP_BGRA_8888) ||
+		(req.src.format == MDP_RGBA_8888)) &&
+		(req.flags & MDP_ROT_90) && (req.dst_rect.w <= 16))
+		result = true;
+
+	return result;
+}
+
+static bool is_roi_equal(struct mdp_blit_req req0,
+		 struct mdp_blit_req req1)
+{
+	bool result = false;
+	struct mdss_panel_info *panel_info = ppp_stat->mfd->panel_info;
+
+	/*
+	 * Check req0 and req1 layer destination ROI and return true if
+	 * they are equal.
+	 */
+	if ((req0.dst_rect.x == req1.dst_rect.x) &&
+		(req0.dst_rect.y == req1.dst_rect.y) &&
+		(req0.dst_rect.w == req1.dst_rect.w) &&
+		(req0.dst_rect.h == req1.dst_rect.h))
+		result = true;
+	/*
+	 *  Layers are source cropped and cropped layer width and hight are
+	 *  same panel width and height
+	 */
+	else if ((req0.dst_rect.w == req1.dst_rect.w) &&
+		(req0.dst_rect.h == req1.dst_rect.h) &&
+		(req0.dst_rect.w == panel_info->xres) &&
+		(req0.dst_rect.h == panel_info->yres))
+		result = true;
+
+	return result;
+}
+
+static bool is_scaling_needed(struct mdp_blit_req req)
+{
+	bool result = true;
+
+	/* Return true if layer need scaling else return false */
+	if ((req.src_rect.w == req.dst_rect.w) &&
+		(req.src_rect.h == req.dst_rect.h))
+		result = false;
+	return result;
+}
+
+static bool is_blit_optimization_possible(struct blit_req_list *req, int indx)
+{
+	int next = indx + 1;
+	bool status = false;
+	struct mdp3_img_data tmp_data;
+	bool dst_roi_equal = false;
+	bool hw_woraround_active = false;
+	struct mdp_blit_req bg_req;
+	struct mdp_blit_req fg_req;
+
+	if (!(mdp3_res->smart_blit_en)) {
+		pr_debug("Smart BLIT disabled from sysfs\n");
+		return status;
+	}
+	if (next < req->count) {
+		bg_req = req->req_list[indx];
+		fg_req = req->req_list[next];
+		hw_woraround_active = is_hw_workaround_needed(bg_req);
+		dst_roi_equal = is_roi_equal(bg_req, fg_req);
+		/*
+		 * Check userspace Smart BLIT Flag for current and next
+		 * request Flag for smart blit FG layer index If blit
+		 * request at index "n" has MDP_SMART_BLIT flag set then
+		 * it will be used as BG layer in smart blit
+		 * and request at index "n+1" will be used as FG layer
+		 */
+		if ((bg_req.flags & MDP_SMART_BLIT) &&
+		(!(fg_req.flags & MDP_SMART_BLIT)) &&
+		(!(hw_woraround_active)))
+			status = true;
+		/*
+		 * Enable SMART blit between request 0(BG) & request 1(FG) when
+		 * destination ROI of BG and FG layer are same,
+		 * No scaling on BG layer
+		 * No rotation on BG Layer.
+		 * BG Layer color format is RGB and marked as MDP_IS_FG.
+		 */
+		else if ((mdp3_res->smart_blit_en & SMART_BLIT_RGB_EN) &&
+			(indx == 0) && (dst_roi_equal) &&
+			(bg_req.flags & MDP_IS_FG) &&
+			(!(is_scaling_needed(bg_req))) &&
+			(!(bg_req.flags & (MDP_ROT_90))) &&
+			(check_if_rgb(bg_req.src.format)) &&
+			(!(hw_woraround_active))) {
+			status = true;
+			req->req_list[indx].flags |= MDP_SMART_BLIT;
+			pr_debug("Optimize RGB Blit for Req Indx %d\n", indx);
+		}
+		/*
+		 * Swap BG and FG layer to enable SMART blit between request
+		 * 0(BG) & request 1(FG) when destination ROI of BG and FG
+		 * layer are same, No scaling on FG and BG layer
+		 * No rotation on FG Layer. BG Layer color format is YUV
+		 */
+		else if ((indx == 0) &&
+			(mdp3_res->smart_blit_en & SMART_BLIT_YUV_EN) &&
+			(!(fg_req.flags & (MDP_ROT_90))) && (dst_roi_equal) &&
+			(!(check_if_rgb(bg_req.src.format))) &&
+			(!(hw_woraround_active))) {
+			/*
+			 * swap blit requests at index 0 and 1. YUV layer at
+			 * index 0 is replaced with UI layer request present
+			 * at index 1. Since UI layer will be in  background
+			 * set IS_FG flag and clear it from YUV layer flags
+			 */
+			if (!(is_scaling_needed(req->req_list[next]))) {
+				if (bg_req.flags & MDP_IS_FG) {
+					req->req_list[indx].flags &=
+								~MDP_IS_FG;
+					req->req_list[next].flags |= MDP_IS_FG;
+				}
+				bg_req = req->req_list[next];
+				req->req_list[next] = req->req_list[indx];
+				req->req_list[indx] = bg_req;
+
+				tmp_data = req->src_data[next];
+				req->src_data[next] = req->src_data[indx];
+				req->src_data[indx] = tmp_data;
+
+				tmp_data = req->dst_data[next];
+				req->dst_data[next] = req->dst_data[indx];
+				req->dst_data[indx] = tmp_data;
+				status = true;
+				req->req_list[indx].flags |= MDP_SMART_BLIT;
+				pr_debug("Optimize YUV Blit for Req Indx %d\n",
+					indx);
+			}
+		}
+	}
+	return status;
+}
+
+static void mdp3_ppp_blit_handler(struct kthread_work *work)
+{
+	struct msm_fb_data_type *mfd = ppp_stat->mfd;
+	struct blit_req_list *req;
+	int i, rc = 0;
+	bool smart_blit = false;
+	int smart_blit_fg_index = -1;
+
+	mutex_lock(&ppp_stat->config_ppp_mutex);
+	req = mdp3_ppp_next_req(&ppp_stat->req_q);
+	if (!req) {
+		mutex_unlock(&ppp_stat->config_ppp_mutex);
+		return;
+	}
+
+	if (!ppp_stat->bw_on) {
+		mdp3_ppp_turnon(mfd, 1);
+		if (rc < 0) {
+			mutex_unlock(&ppp_stat->config_ppp_mutex);
+			pr_err("%s: Enable ppp resources failed\n", __func__);
+			return;
+		}
+	}
+	while (req) {
+		mdp3_ppp_wait_for_fence(req);
+		mdp3_calc_ppp_res(mfd, req);
+		if (ppp_res.clk_rate != ppp_stat->mdp_clk) {
+			ppp_stat->mdp_clk = ppp_res.clk_rate;
+			mdp3_clk_set_rate(MDP3_CLK_MDP_SRC,
+					ppp_stat->mdp_clk, MDP3_CLIENT_PPP);
+		}
+		if (ppp_stat->bw_update) {
+			rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_PPP,
+					ppp_res.next_ab, ppp_res.next_ib);
+			if (rc < 0) {
+				pr_err("%s: bw set quota failed\n", __func__);
+				return;
+			}
+			ppp_stat->bw_update = false;
+		}
+		ATRACE_BEGIN("mpd3_ppp_start");
+		for (i = 0; i < req->count; i++) {
+			smart_blit = is_blit_optimization_possible(req, i);
+			if (smart_blit)
+				/*
+				 * Blit request index of FG layer in
+				 * smart blit
+				 */
+				smart_blit_fg_index = i + 1;
+			if (!(req->req_list[i].flags & MDP_NO_BLIT)) {
+				/* Do the actual blit. */
+				if (!rc) {
+					rc = mdp3_ppp_start_blit(mfd,
+						&(req->req_list[i]),
+						&req->src_data[i],
+						&req->dst_data[i]);
+				}
+				/* Unmap blit source buffer */
+				if (smart_blit == false) {
+					mdp3_put_img(&req->src_data[i],
+						MDP3_CLIENT_PPP);
+				}
+				if (smart_blit_fg_index == i) {
+					/* Unmap smart blit BG buffer */
+					mdp3_put_img(&req->src_data[i - 1],
+						MDP3_CLIENT_PPP);
+					smart_blit_fg_index = -1;
+				}
+				mdp3_put_img(&req->dst_data[i],
+					MDP3_CLIENT_PPP);
+				smart_blit = false;
+			}
+		}
+		ATRACE_END("mdp3_ppp_start");
+		/* Signal to release fence */
+		mutex_lock(&ppp_stat->req_mutex);
+		mdp3_ppp_signal_timeline(req);
+		mdp3_ppp_req_pop(&ppp_stat->req_q);
+		req = mdp3_ppp_next_req(&ppp_stat->req_q);
+		if (ppp_stat->wait_for_pop)
+			complete(&ppp_stat->pop_q_comp);
+		mutex_unlock(&ppp_stat->req_mutex);
+	}
+	mod_timer(&ppp_stat->free_bw_timer, jiffies +
+		msecs_to_jiffies(MDP_RELEASE_BW_TIMEOUT));
+	mutex_unlock(&ppp_stat->config_ppp_mutex);
+}
+
+int mdp3_ppp_parse_req(void __user *p,
+	struct mdp_async_blit_req_list *req_list_header,
+	int async)
+{
+	struct blit_req_list *req;
+	struct blit_req_queue *req_q = &ppp_stat->req_q;
+	struct sync_fence *fence = NULL;
+	int count, rc, idx, i;
+
+	count = req_list_header->count;
+
+	mutex_lock(&ppp_stat->req_mutex);
+	while (req_q->count >= MDP3_PPP_MAX_LIST_REQ) {
+		ppp_stat->wait_for_pop = true;
+		mutex_unlock(&ppp_stat->req_mutex);
+		rc = wait_for_completion_timeout(
+		   &ppp_stat->pop_q_comp, 5 * HZ);
+		if (rc == 0) {
+			/* This will only occur if there is serious problem */
+			pr_err("%s: timeout exiting queuing request\n",
+				   __func__);
+			return -EBUSY;
+		}
+		mutex_lock(&ppp_stat->req_mutex);
+		ppp_stat->wait_for_pop = false;
+	}
+	idx = req_q->push_idx;
+	req = &req_q->req[idx];
+
+	if (copy_from_user(&req->req_list, p,
+			sizeof(struct mdp_blit_req) * count)) {
+		mutex_unlock(&ppp_stat->req_mutex);
+		return -EFAULT;
+	}
+
+	rc = mdp3_ppp_handle_buf_sync(req, &req_list_header->sync);
+	if (rc < 0) {
+		pr_err("%s: Failed create sync point\n", __func__);
+		mutex_unlock(&ppp_stat->req_mutex);
+		return rc;
+	}
+	req->count = count;
+
+	/* We need to grab ion handle while running in client thread */
+	for (i = 0; i < count; i++) {
+		rc = mdp3_ppp_get_img(&req->req_list[i].src,
+				&req->req_list[i], &req->src_data[i]);
+		if (rc < 0 || req->src_data[i].len == 0) {
+			pr_err("mdp_ppp: couldn't retrieve src img from mem\n");
+			goto parse_err_1;
+		}
+
+		rc = mdp3_ppp_get_img(&req->req_list[i].dst,
+				&req->req_list[i], &req->dst_data[i]);
+		if (rc < 0 || req->dst_data[i].len == 0) {
+			mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP);
+			pr_err("mdp_ppp: couldn't retrieve dest img from mem\n");
+			goto parse_err_1;
+		}
+	}
+
+	if (async) {
+		req->cur_rel_fen_fd = get_unused_fd_flags(0);
+		if (req->cur_rel_fen_fd < 0) {
+			pr_err("%s: get_unused_fd_flags failed\n", __func__);
+			rc  = -ENOMEM;
+			goto parse_err_1;
+		}
+		sync_fence_install(req->cur_rel_fence, req->cur_rel_fen_fd);
+		rc = copy_to_user(req_list_header->sync.rel_fen_fd,
+			&req->cur_rel_fen_fd, sizeof(int));
+		if (rc) {
+			pr_err("%s:copy_to_user failed\n", __func__);
+			goto parse_err_2;
+		}
+	} else {
+		fence = req->cur_rel_fence;
+	}
+
+	mdp3_ppp_req_push(req_q, req);
+	mutex_unlock(&ppp_stat->req_mutex);
+	queue_kthread_work(&ppp_stat->kworker, &ppp_stat->blit_work);
+	if (!async) {
+		/* wait for release fence */
+		rc = sync_fence_wait(fence,
+				5 * MSEC_PER_SEC);
+		if (rc < 0)
+			pr_err("%s: sync blit! rc = %x\n", __func__, rc);
+
+		sync_fence_put(fence);
+		fence = NULL;
+	}
+	return 0;
+
+parse_err_2:
+	put_unused_fd(req->cur_rel_fen_fd);
+parse_err_1:
+	for (i--; i >= 0; i--) {
+		mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP);
+		mdp3_put_img(&req->dst_data[i], MDP3_CLIENT_PPP);
+	}
+	mdp3_ppp_deinit_buf_sync(req);
+	mutex_unlock(&ppp_stat->req_mutex);
+	return rc;
+}
+
+int mdp3_ppp_res_init(struct msm_fb_data_type *mfd)
+{
+	int rc;
+	struct sched_param param = {.sched_priority = 16};
+	const char timeline_name[] = "mdp3_ppp";
+
+	ppp_stat = kzalloc(sizeof(struct ppp_status), GFP_KERNEL);
+	if (!ppp_stat)
+		return -ENOMEM;
+
+	/*Setup sync_pt timeline for ppp*/
+	ppp_stat->timeline = sw_sync_timeline_create(timeline_name);
+	if (ppp_stat->timeline == NULL) {
+		pr_err("%s: cannot create time line\n", __func__);
+		return -ENOMEM;
+	}
+	ppp_stat->timeline_value = 1;
+
+	init_kthread_worker(&ppp_stat->kworker);
+	init_kthread_work(&ppp_stat->blit_work, mdp3_ppp_blit_handler);
+	ppp_stat->blit_thread = kthread_run(kthread_worker_fn,
+					&ppp_stat->kworker,
+					"mdp3_ppp");
+
+	if (IS_ERR(ppp_stat->blit_thread)) {
+		rc = PTR_ERR(ppp_stat->blit_thread);
+		pr_err("ERROR: unable to start ppp blit thread,err = %d\n",
+							rc);
+		ppp_stat->blit_thread = NULL;
+		return rc;
+	}
+	if (sched_setscheduler(ppp_stat->blit_thread, SCHED_FIFO, &param))
+		pr_warn("set priority failed for mdp3 blit thread\n");
+
+	INIT_WORK(&ppp_stat->free_bw_work, mdp3_free_bw_wq_handler);
+	init_completion(&ppp_stat->pop_q_comp);
+	mutex_init(&ppp_stat->req_mutex);
+	mutex_init(&ppp_stat->config_ppp_mutex);
+	init_timer(&ppp_stat->free_bw_timer);
+	ppp_stat->free_bw_timer.function = mdp3_free_fw_timer_func;
+	ppp_stat->free_bw_timer.data = 0;
+	ppp_stat->mfd = mfd;
+	mdp3_ppp_callback_setup();
+	return 0;
+}
diff --git a/drivers/video/fbdev/msm/mdp3_ppp.h b/drivers/video/fbdev/msm/mdp3_ppp.h
new file mode 100644
index 0000000..1f82851
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_ppp.h
@@ -0,0 +1,430 @@
+/* Copyright (c) 2007, 2013, 2016, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MDP3_PPP_H
+#define MDP3_PPP_H
+#include "mdp3.h"
+#include "mdss_fb.h"
+
+#define PPP_WRITEL(val, off) MDP3_REG_WRITE(off, val)
+
+#define MAX_BLIT_REQ 16
+#define PPP_UPSCALE_MAX 64
+#define PPP_BLUR_SCALE_MAX 128
+#define PPP_LUT_MAX 256
+
+#define MDPOP_SMART_BLIT        BIT(31) /* blit optimization flag */
+
+/* MDP PPP Operations */
+#define MDPOP_NOP               0
+#define MDPOP_LR                BIT(0)	/* left to right flip */
+#define MDPOP_UD                BIT(1)	/* up and down flip */
+#define MDPOP_ROT90             BIT(2)	/* rotate image to 90 degree */
+#define MDPOP_ROT180            (MDPOP_UD|MDPOP_LR)
+#define MDPOP_ROT270            (MDPOP_ROT90|MDPOP_UD|MDPOP_LR)
+#define MDPOP_ASCALE            BIT(7)
+#define MDPOP_ALPHAB            BIT(8)	/* enable alpha blending */
+#define MDPOP_TRANSP            BIT(9)	/* enable transparency */
+#define MDPOP_DITHER            BIT(10)	/* enable dither */
+#define MDPOP_SHARPENING		BIT(11) /* enable sharpening */
+#define MDPOP_BLUR				BIT(12) /* enable blur */
+#define MDPOP_FG_PM_ALPHA       BIT(13)
+#define MDPOP_LAYER_IS_FG       BIT(14)
+
+#define MDPOP_ROTATION (MDPOP_ROT90|MDPOP_LR|MDPOP_UD)
+
+#define PPP_OP_CONVERT_YCBCR2RGB BIT(2)
+#define PPP_OP_CONVERT_ON		BIT(3)
+#define PPP_OP_SCALE_X_ON		BIT(0)
+#define PPP_OP_SCALE_Y_ON		BIT(1)
+#define PPP_OP_ROT_ON			BIT(8)
+#define PPP_OP_ROT_90			BIT(9)
+#define PPP_OP_FLIP_LR			BIT(10)
+#define PPP_OP_FLIP_UD			BIT(11)
+#define PPP_OP_BLEND_ON			BIT(12)
+#define PPP_OP_BLEND_CONSTANT_ALPHA BIT(14)
+#define PPP_OP_BLEND_BG_ALPHA		BIT(13)
+#define PPP_OP_BLEND_EQ_REVERSE		BIT(15)
+#define PPP_OP_DITHER_EN		BIT(16)
+#define PPP_BLEND_CALPHA_TRNASP BIT(24)
+
+#define PPP_OP_BLEND_SRCPIXEL_ALPHA 0
+#define PPP_OP_BLEND_ALPHA_BLEND_NORMAL 0
+#define PPP_OP_BLEND_ALPHA_BLEND_REVERSE BIT(15)
+
+#define PPP_BLEND_BG_USE_ALPHA_SEL      (1 << 0)
+#define PPP_BLEND_BG_ALPHA_REVERSE      (1 << 3)
+#define PPP_BLEND_BG_SRCPIXEL_ALPHA     (0 << 1)
+#define PPP_BLEND_BG_DSTPIXEL_ALPHA     (1 << 1)
+#define PPP_BLEND_BG_CONSTANT_ALPHA     (2 << 1)
+#define PPP_BLEND_BG_CONST_ALPHA_VAL(x) ((x) << 24)
+#define PPP_OP_BG_CHROMA_H2V1 BIT(25)
+
+#define CLR_G 0x0
+#define CLR_B 0x1
+#define CLR_R 0x2
+#define CLR_ALPHA 0x3
+
+#define CLR_Y  CLR_G
+#define CLR_CB CLR_B
+#define CLR_CR CLR_R
+
+/* from lsb to msb */
+#define PPP_GET_PACK_PATTERN(a, x, y, z, bit) \
+	(((a)<<(bit*3))|((x)<<(bit*2))|((y)<<bit)|(z))
+
+/* Frame unpacking */
+#define PPP_C0G_8BITS (BIT(1)|BIT(0))
+#define PPP_C1B_8BITS (BIT(3)|BIT(2))
+#define PPP_C2R_8BITS (BIT(5)|BIT(4))
+#define PPP_C3A_8BITS (BIT(7)|BIT(6))
+
+#define PPP_C0G_6BITS BIT(1)
+#define PPP_C1B_6BITS BIT(3)
+#define PPP_C2R_6BITS BIT(5)
+
+#define PPP_C0G_5BITS BIT(0)
+#define PPP_C1B_5BITS BIT(2)
+#define PPP_C2R_5BITS BIT(4)
+
+#define PPP_SRC_C3_ALPHA_EN BIT(8)
+
+#define PPP_SRC_BPP_INTERLVD_1BYTES 0
+#define PPP_SRC_BPP_INTERLVD_2BYTES BIT(9)
+#define PPP_SRC_BPP_INTERLVD_3BYTES BIT(10)
+#define PPP_SRC_BPP_INTERLVD_4BYTES (BIT(10)|BIT(9))
+
+#define PPP_SRC_BPP_ROI_ODD_X BIT(11)
+#define PPP_SRC_BPP_ROI_ODD_Y BIT(12)
+#define PPP_SRC_INTERLVD_2COMPONENTS BIT(13)
+#define PPP_SRC_INTERLVD_3COMPONENTS BIT(14)
+#define PPP_SRC_INTERLVD_4COMPONENTS (BIT(14)|BIT(13))
+
+#define PPP_SRC_UNPACK_TIGHT BIT(17)
+#define PPP_SRC_UNPACK_LOOSE 0
+#define PPP_SRC_UNPACK_ALIGN_LSB 0
+#define PPP_SRC_UNPACK_ALIGN_MSB BIT(18)
+
+#define PPP_SRC_FETCH_PLANES_INTERLVD 0
+#define PPP_SRC_FETCH_PLANES_PSEUDOPLNR BIT(20)
+
+#define PPP_OP_SRC_CHROMA_H2V1 BIT(18)
+#define PPP_OP_SRC_CHROMA_H1V2 BIT(19)
+#define PPP_OP_SRC_CHROMA_420 (BIT(18)|BIT(19))
+#define PPP_OP_SRC_CHROMA_OFFSITE BIT(20)
+
+#define PPP_DST_PACKET_CNT_INTERLVD_2ELEM BIT(9)
+#define PPP_DST_PACKET_CNT_INTERLVD_3ELEM BIT(10)
+#define PPP_DST_PACKET_CNT_INTERLVD_4ELEM (BIT(10)|BIT(9))
+#define PPP_DST_PACKET_CNT_INTERLVD_6ELEM (BIT(11)|BIT(9))
+
+#define PPP_DST_C3A_8BIT (BIT(7)|BIT(6))
+#define PPP_DST_C3ALPHA_EN BIT(8)
+
+#define PPP_DST_PACK_LOOSE 0
+#define PPP_DST_PACK_TIGHT BIT(13)
+#define PPP_DST_PACK_ALIGN_LSB 0
+#define PPP_DST_PACK_ALIGN_MSB BIT(14)
+
+#define PPP_DST_OUT_SEL_AXI 0
+#define PPP_DST_OUT_SEL_MDDI BIT(15)
+
+#define PPP_DST_BPP_2BYTES BIT(16)
+#define PPP_DST_BPP_3BYTES BIT(17)
+#define PPP_DST_BPP_4BYTES (BIT(17)|BIT(16))
+
+#define PPP_DST_PLANE_INTERLVD 0
+#define PPP_DST_PLANE_PLANAR BIT(18)
+#define PPP_DST_PLANE_PSEUDOPLN BIT(19)
+
+#define PPP_OP_DST_CHROMA_H2V1 BIT(21)
+#define PPP_OP_DST_CHROMA_420 (BIT(21)|BIT(22))
+#define PPP_OP_COLOR_SPACE_YCBCR BIT(17)
+
+#define MDP_SCALE_Q_FACTOR 512
+#define MDP_MAX_X_SCALE_FACTOR (MDP_SCALE_Q_FACTOR*4)
+#define MDP_MIN_X_SCALE_FACTOR (MDP_SCALE_Q_FACTOR/4)
+#define MDP_MAX_Y_SCALE_FACTOR (MDP_SCALE_Q_FACTOR*4)
+#define MDP_MIN_Y_SCALE_FACTOR (MDP_SCALE_Q_FACTOR/4)
+
+#define MDP_TOP_LUMA       16
+#define MDP_TOP_CHROMA     0
+#define MDP_BOTTOM_LUMA    19
+#define MDP_BOTTOM_CHROMA  3
+#define MDP_LEFT_LUMA      22
+#define MDP_LEFT_CHROMA    6
+#define MDP_RIGHT_LUMA     25
+#define MDP_RIGHT_CHROMA   9
+
+#define MDP_RGB_565_SRC_REG (PPP_C2R_5BITS | PPP_C0G_6BITS | \
+	PPP_C1B_5BITS | PPP_SRC_BPP_INTERLVD_2BYTES | \
+	PPP_SRC_INTERLVD_3COMPONENTS | PPP_SRC_UNPACK_TIGHT | \
+	PPP_SRC_UNPACK_ALIGN_LSB | \
+	PPP_SRC_FETCH_PLANES_INTERLVD)
+
+#define MDP_RGB_888_SRC_REG (PPP_C2R_8BITS | PPP_C0G_8BITS | \
+	PPP_C1B_8BITS | PPP_SRC_BPP_INTERLVD_3BYTES | \
+	PPP_SRC_INTERLVD_3COMPONENTS | PPP_SRC_UNPACK_TIGHT | \
+	PPP_SRC_UNPACK_ALIGN_LSB | PPP_SRC_FETCH_PLANES_INTERLVD)
+
+#define MDP_RGBX_8888_SRC_REG (PPP_C2R_8BITS | PPP_C0G_8BITS | \
+	PPP_C1B_8BITS | PPP_C3A_8BITS | \
+	PPP_SRC_C3_ALPHA_EN | PPP_SRC_BPP_INTERLVD_4BYTES | \
+	PPP_SRC_INTERLVD_4COMPONENTS | PPP_SRC_UNPACK_TIGHT | \
+	PPP_SRC_UNPACK_ALIGN_LSB | \
+	PPP_SRC_FETCH_PLANES_INTERLVD)
+
+#define MDP_Y_CBCR_H2V2_SRC_REG (PPP_C2R_8BITS | PPP_C0G_8BITS | \
+	PPP_C1B_8BITS | PPP_SRC_BPP_INTERLVD_2BYTES | \
+	PPP_SRC_INTERLVD_2COMPONENTS | PPP_SRC_UNPACK_TIGHT | \
+	PPP_SRC_UNPACK_ALIGN_LSB | \
+	PPP_SRC_FETCH_PLANES_PSEUDOPLNR)
+
+#define MDP_YCRYCB_H2V1_SRC_REG (PPP_C2R_8BITS | \
+	PPP_C0G_8BITS | PPP_C1B_8BITS | \
+	PPP_C3A_8BITS | PPP_SRC_BPP_INTERLVD_2BYTES | \
+	PPP_SRC_INTERLVD_4COMPONENTS | \
+	PPP_SRC_UNPACK_TIGHT | PPP_SRC_UNPACK_ALIGN_LSB)
+
+#define MDP_Y_CRCB_H2V1_SRC_REG (PPP_C2R_8BITS | \
+	PPP_C0G_8BITS | PPP_C1B_8BITS | \
+	PPP_C3A_8BITS | PPP_SRC_BPP_INTERLVD_2BYTES | \
+	PPP_SRC_INTERLVD_2COMPONENTS | PPP_SRC_UNPACK_TIGHT | \
+	PPP_SRC_UNPACK_ALIGN_LSB | PPP_SRC_FETCH_PLANES_PSEUDOPLNR)
+
+#define MDP_RGB_565_DST_REG (PPP_C0G_6BITS | \
+	PPP_C1B_5BITS | PPP_C2R_5BITS | \
+	PPP_DST_PACKET_CNT_INTERLVD_3ELEM | \
+	PPP_DST_PACK_TIGHT | PPP_DST_PACK_ALIGN_LSB | \
+	PPP_DST_OUT_SEL_AXI | PPP_DST_BPP_2BYTES | \
+	PPP_DST_PLANE_INTERLVD)
+
+#define MDP_RGB_888_DST_REG (PPP_C0G_8BITS | \
+	PPP_C1B_8BITS | PPP_C2R_8BITS | \
+	PPP_DST_PACKET_CNT_INTERLVD_3ELEM | PPP_DST_PACK_TIGHT | \
+	PPP_DST_PACK_ALIGN_LSB | PPP_DST_OUT_SEL_AXI | \
+	PPP_DST_BPP_3BYTES | PPP_DST_PLANE_INTERLVD)
+
+#define MDP_RGBX_8888_DST_REG (PPP_C0G_8BITS | \
+	PPP_C1B_8BITS | PPP_C2R_8BITS | PPP_C3A_8BITS | \
+	PPP_DST_C3ALPHA_EN | PPP_DST_PACKET_CNT_INTERLVD_4ELEM | \
+	PPP_DST_PACK_TIGHT | PPP_DST_PACK_ALIGN_LSB | \
+	PPP_DST_OUT_SEL_AXI | PPP_DST_BPP_4BYTES | \
+	PPP_DST_PLANE_INTERLVD)
+
+#define MDP_Y_CBCR_H2V2_DST_REG (PPP_C2R_8BITS | \
+	PPP_C0G_8BITS | PPP_C1B_8BITS | PPP_C3A_8BITS | \
+	PPP_DST_PACKET_CNT_INTERLVD_2ELEM | \
+	PPP_DST_PACK_TIGHT | PPP_DST_PACK_ALIGN_LSB | \
+	PPP_DST_OUT_SEL_AXI | PPP_DST_BPP_2BYTES)
+
+#define MDP_YCRYCB_H2V1_DST_REG (PPP_C2R_8BITS | PPP_C0G_8BITS | \
+	PPP_C1B_8BITS | PPP_C3A_8BITS | PPP_DST_PACKET_CNT_INTERLVD_4ELEM | \
+	PPP_DST_PACK_TIGHT | PPP_DST_PACK_ALIGN_LSB | \
+	PPP_DST_OUT_SEL_AXI | PPP_DST_BPP_2BYTES | \
+	PPP_DST_PLANE_INTERLVD)
+
+#define MDP_Y_CRCB_H2V1_DST_REG (PPP_C2R_8BITS | \
+	PPP_C0G_8BITS | PPP_C1B_8BITS | PPP_C3A_8BITS | \
+	PPP_DST_PACKET_CNT_INTERLVD_2ELEM | PPP_DST_PACK_TIGHT | \
+	PPP_DST_PACK_ALIGN_LSB | PPP_DST_OUT_SEL_AXI | \
+	PPP_DST_BPP_2BYTES)
+
+/* LUT */
+#define MDP_LUT_C0_EN BIT(5)
+#define MDP_LUT_C1_EN BIT(6)
+#define MDP_LUT_C2_EN BIT(7)
+
+/* Dither */
+#define MDP_OP_DITHER_EN BIT(16)
+
+/* Rotator */
+#define MDP_OP_ROT_ON BIT(8)
+#define MDP_OP_ROT_90 BIT(9)
+#define MDP_OP_FLIP_LR BIT(10)
+#define MDP_OP_FLIP_UD BIT(11)
+
+/* Blend */
+#define MDP_OP_BLEND_EN BIT(12)
+#define MDP_OP_BLEND_EQ_SEL BIT(15)
+#define MDP_OP_BLEND_TRANSP_EN BIT(24)
+#define MDP_BLEND_MASK (MDP_OP_BLEND_EN | MDP_OP_BLEND_EQ_SEL | \
+	MDP_OP_BLEND_TRANSP_EN | BIT(14) | BIT(13))
+
+#define MDP_BLEND_ALPHA_SEL 13
+#define MDP_BLEND_ALPHA_MASK 0x3
+#define MDP_BLEND_CONST_ALPHA 24
+#define MDP_BLEND_TRASP_COL_MASK 0xFFFFFF
+
+/* CSC Matrix */
+#define MDP_CSC_RGB2YUV		0
+#define MDP_CSC_YUV2RGB		1
+
+#define MDP_CSC_SIZE	9
+#define MDP_BV_SIZE		3
+#define MDP_LV_SIZE		4
+
+enum ppp_lut_type {
+	LUT_PRE_TABLE = 0,
+	LUT_POST_TABLE,
+};
+
+enum ppp_csc_matrix {
+	CSC_PRIMARY_MATRIX = 0,
+	CSC_SECONDARY_MATRIX,
+};
+
+/* scale tables */
+enum {
+	PPP_DOWNSCALE_PT2TOPT4,
+	PPP_DOWNSCALE_PT4TOPT6,
+	PPP_DOWNSCALE_PT6TOPT8,
+	PPP_DOWNSCALE_PT8TOPT1,
+	PPP_DOWNSCALE_MAX,
+};
+
+struct ppp_table {
+	uint32_t reg;
+	uint32_t val;
+};
+
+struct ppp_resource {
+	u64 next_ab;
+	u64 next_ib;
+	u64 clk_rate;
+	u64 solid_fill_pixel;
+	u64 solid_fill_byte;
+};
+
+struct ppp_csc_table {
+	int direction;			/* MDP_CCS_RGB2YUV or YUV2RGB */
+	uint16_t fwd_matrix[MDP_CCS_SIZE];	/* 3x3 color coefficients */
+	uint16_t rev_matrix[MDP_CCS_SIZE];	/* 3x3 color coefficients */
+	uint16_t bv[MDP_BV_SIZE];	/* 1x3 bias vector */
+	uint16_t lv[MDP_LV_SIZE];	/* 1x3 limit vector */
+};
+
+struct ppp_blend {
+	int const_alpha;
+	int trans_color; /*color keying*/
+};
+
+struct ppp_img_prop {
+	int32_t x;
+	int32_t y;
+	uint32_t width;
+	uint32_t height;
+};
+
+struct ppp_img_desc {
+	struct ppp_img_prop prop;
+	struct ppp_img_prop roi;
+	int color_fmt;
+	void *p0;  /* plane 0 */
+	void *p1;
+	void *p3;
+	int stride0;
+	int stride1;
+	int stride2;
+};
+
+struct ppp_blit_op {
+	struct ppp_img_desc src;
+	struct ppp_img_desc dst;
+	struct ppp_img_desc bg;
+	struct ppp_blend blend;
+	uint32_t mdp_op; /* Operations */
+	uint32_t solid_fill_color;
+	bool solid_fill;
+};
+
+struct ppp_edge_rep {
+	uint32_t dst_roi_width;
+	uint32_t dst_roi_height;
+	uint32_t is_scale_enabled;
+
+	/*
+	 * positions of the luma pixel(relative to the image ) required for
+	 * scaling the ROI
+	 */
+	int32_t luma_interp_point_left;
+	int32_t luma_interp_point_right;
+	int32_t luma_interp_point_top;
+	int32_t luma_interp_point_bottom;
+
+	/*
+	 * positions of the chroma pixel(relative to the image ) required for
+	 * interpolating a chroma value at all required luma positions
+	 */
+	int32_t chroma_interp_point_left;
+	int32_t chroma_interp_point_right;
+	int32_t chroma_interp_point_top;
+	int32_t chroma_interp_point_bottom;
+
+	/*
+	 * a rectangular region within the chroma plane of the "image".
+	 * Chroma pixels falling inside of this rectangle belongs to the ROI
+	 */
+	int32_t chroma_bound_left;
+	int32_t chroma_bound_right;
+	int32_t chroma_bound_top;
+	int32_t chroma_bound_bottom;
+
+	/*
+	 * number of chroma pixels to replicate on the left, right,
+	 * top and bottom edge of the ROI.
+	 */
+	int32_t chroma_repeat_left;
+	int32_t chroma_repeat_right;
+	int32_t chroma_repeat_top;
+	int32_t chroma_repeat_bottom;
+
+	/*
+	 * number of luma pixels to replicate on the left, right,
+	 * top and bottom edge of the ROI.
+	 */
+	int32_t luma_repeat_left;
+	int32_t luma_repeat_right;
+	int32_t luma_repeat_top;
+	int32_t luma_repeat_bottom;
+};
+
+bool check_if_rgb(int color);
+
+/* func for ppp register values */
+uint32_t ppp_bpp(uint32_t type);
+uint32_t ppp_src_config(uint32_t type);
+uint32_t ppp_out_config(uint32_t type);
+uint32_t ppp_pack_pattern(uint32_t type, uint32_t yuv2rgb);
+uint32_t ppp_dst_op_reg(uint32_t type);
+uint32_t ppp_src_op_reg(uint32_t type);
+bool ppp_per_p_alpha(uint32_t type);
+bool ppp_multi_plane(uint32_t type);
+uint32_t *ppp_default_pre_lut(void);
+uint32_t *ppp_default_post_lut(void);
+struct ppp_csc_table *ppp_csc_rgb2yuv(void);
+struct ppp_csc_table *ppp_csc_table2(void);
+void ppp_load_up_lut(void);
+void ppp_load_gaussian_lut(void);
+void ppp_load_x_scale_table(int idx);
+void ppp_load_y_scale_table(int idx);
+
+int mdp3_ppp_res_init(struct msm_fb_data_type *mfd);
+int mdp3_ppp_init(void);
+int config_ppp_op_mode(struct ppp_blit_op *blit_op);
+void ppp_enable(void);
+int mdp3_ppp_parse_req(void __user *p,
+	struct mdp_async_blit_req_list *req_list_header,
+	int async);
+
+#endif
diff --git a/drivers/video/fbdev/msm/mdp3_ppp_data.c b/drivers/video/fbdev/msm/mdp3_ppp_data.c
new file mode 100644
index 0000000..ac88d9b
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_ppp_data.c
@@ -0,0 +1,1619 @@
+/* Copyright (c) 2007, 2012-2013, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+
+#include "mdss_fb.h"
+#include "mdp3_ppp.h"
+
+#define MDP_IS_IMGTYPE_BAD(x) ((x) >= MDP_IMGTYPE_LIMIT)
+
+/* bg_config_lut not needed since it is same as src */
+const uint32_t src_cfg_lut[MDP_IMGTYPE_LIMIT] = {
+	[MDP_RGB_565] = MDP_RGB_565_SRC_REG,
+	[MDP_BGR_565] = MDP_RGB_565_SRC_REG,
+	[MDP_RGB_888] = MDP_RGB_888_SRC_REG,
+	[MDP_BGR_888] = MDP_RGB_888_SRC_REG,
+	[MDP_BGRA_8888] = MDP_RGBX_8888_SRC_REG,
+	[MDP_RGBA_8888] = MDP_RGBX_8888_SRC_REG,
+	[MDP_ARGB_8888] = MDP_RGBX_8888_SRC_REG,
+	[MDP_XRGB_8888] = MDP_RGBX_8888_SRC_REG,
+	[MDP_RGBX_8888] = MDP_RGBX_8888_SRC_REG,
+	[MDP_Y_CRCB_H2V2] = MDP_Y_CBCR_H2V2_SRC_REG,
+	[MDP_Y_CBCR_H2V2] = MDP_Y_CBCR_H2V2_SRC_REG,
+	[MDP_Y_CBCR_H2V2_ADRENO] = MDP_Y_CBCR_H2V2_SRC_REG,
+	[MDP_Y_CBCR_H2V2_VENUS] = MDP_Y_CBCR_H2V2_SRC_REG,
+	[MDP_YCRYCB_H2V1] = MDP_YCRYCB_H2V1_SRC_REG,
+	[MDP_Y_CBCR_H2V1] = MDP_Y_CRCB_H2V1_SRC_REG,
+	[MDP_Y_CRCB_H2V1] = MDP_Y_CRCB_H2V1_SRC_REG,
+	[MDP_BGRX_8888] = MDP_RGBX_8888_SRC_REG,
+};
+
+const uint32_t out_cfg_lut[MDP_IMGTYPE_LIMIT] = {
+	[MDP_RGB_565] = MDP_RGB_565_DST_REG,
+	[MDP_BGR_565] = MDP_RGB_565_DST_REG,
+	[MDP_RGB_888] = MDP_RGB_888_DST_REG,
+	[MDP_BGR_888] = MDP_RGB_888_DST_REG,
+	[MDP_BGRA_8888] = MDP_RGBX_8888_DST_REG,
+	[MDP_RGBA_8888] = MDP_RGBX_8888_DST_REG,
+	[MDP_ARGB_8888] = MDP_RGBX_8888_DST_REG,
+	[MDP_XRGB_8888] = MDP_RGBX_8888_DST_REG,
+	[MDP_RGBX_8888] = MDP_RGBX_8888_DST_REG,
+	[MDP_Y_CRCB_H2V2] = MDP_Y_CBCR_H2V2_DST_REG,
+	[MDP_Y_CBCR_H2V2] = MDP_Y_CBCR_H2V2_DST_REG,
+	[MDP_Y_CBCR_H2V2_ADRENO] = MDP_Y_CBCR_H2V2_DST_REG,
+	[MDP_Y_CBCR_H2V2_VENUS] = MDP_Y_CBCR_H2V2_DST_REG,
+	[MDP_YCRYCB_H2V1] = MDP_YCRYCB_H2V1_DST_REG,
+	[MDP_Y_CBCR_H2V1] = MDP_Y_CRCB_H2V1_DST_REG,
+	[MDP_Y_CRCB_H2V1] = MDP_Y_CRCB_H2V1_DST_REG,
+	[MDP_BGRX_8888] = MDP_RGBX_8888_DST_REG,
+};
+
+const uint32_t pack_patt_lut[MDP_IMGTYPE_LIMIT] = {
+	[MDP_RGB_565] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
+	[MDP_BGR_565] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+	[MDP_RGB_888] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+	[MDP_BGR_888] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
+	[MDP_BGRA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+		CLR_G, CLR_R, 8),
+	[MDP_RGBA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R,
+		CLR_G, CLR_B, 8),
+	[MDP_ARGB_8888] = PPP_GET_PACK_PATTERN(CLR_R,
+		CLR_G, CLR_B, CLR_ALPHA, 8),
+	[MDP_XRGB_8888] = PPP_GET_PACK_PATTERN(CLR_R,
+		CLR_G, CLR_B, CLR_ALPHA, 8),
+	[MDP_RGBX_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R,
+		CLR_G, CLR_B, 8),
+	[MDP_Y_CRCB_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+	[MDP_Y_CBCR_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+	[MDP_Y_CBCR_H2V2_ADRENO] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB,
+		CLR_CR, 8),
+	[MDP_Y_CBCR_H2V2_VENUS] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB,
+		CLR_CR, 8),
+	[MDP_YCRYCB_H2V1] = PPP_GET_PACK_PATTERN(CLR_Y,
+		CLR_CR, CLR_Y, CLR_CB, 8),
+	[MDP_Y_CBCR_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+	[MDP_Y_CRCB_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+	[MDP_BGRX_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+		CLR_G, CLR_R, 8),
+};
+
+const uint32_t swapped_pack_patt_lut[MDP_IMGTYPE_LIMIT] = {
+	[MDP_RGB_565] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
+	[MDP_BGR_565] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+	[MDP_RGB_888] = PPP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8),
+	[MDP_BGR_888] = PPP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8),
+	[MDP_BGRA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R,
+		CLR_G, CLR_B, 8),
+	[MDP_RGBA_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+		CLR_G, CLR_R, 8),
+	[MDP_ARGB_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+		CLR_G, CLR_R, 8),
+	[MDP_XRGB_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+		CLR_G, CLR_R, 8),
+	[MDP_RGBX_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B,
+		CLR_G, CLR_R, 8),
+	[MDP_Y_CRCB_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+	[MDP_Y_CBCR_H2V2] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+	[MDP_Y_CBCR_H2V2_ADRENO] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR,
+		CLR_CB, 8),
+	[MDP_Y_CBCR_H2V2_VENUS] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR,
+		CLR_CB, 8),
+	[MDP_YCRYCB_H2V1] = PPP_GET_PACK_PATTERN(CLR_Y,
+		CLR_CB, CLR_Y, CLR_CR, 8),
+	[MDP_Y_CBCR_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8),
+	[MDP_Y_CRCB_H2V1] = PPP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8),
+	[MDP_BGRX_8888] = PPP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R,
+		CLR_G, CLR_B, 8),
+};
+
+const uint32_t dst_op_reg[MDP_IMGTYPE_LIMIT] = {
+	[MDP_Y_CRCB_H2V2] = PPP_OP_DST_CHROMA_420,
+	[MDP_Y_CBCR_H2V2] = PPP_OP_DST_CHROMA_420,
+	[MDP_Y_CBCR_H2V1] = PPP_OP_DST_CHROMA_H2V1,
+	[MDP_Y_CRCB_H2V1] = PPP_OP_DST_CHROMA_H2V1,
+	[MDP_YCRYCB_H2V1] = PPP_OP_DST_CHROMA_H2V1,
+};
+
+const uint32_t src_op_reg[MDP_IMGTYPE_LIMIT] = {
+	[MDP_Y_CRCB_H2V2] = PPP_OP_SRC_CHROMA_420 | PPP_OP_COLOR_SPACE_YCBCR,
+	[MDP_Y_CBCR_H2V2] = PPP_OP_SRC_CHROMA_420 | PPP_OP_COLOR_SPACE_YCBCR,
+	[MDP_Y_CBCR_H2V2_ADRENO] = PPP_OP_SRC_CHROMA_420 |
+			PPP_OP_COLOR_SPACE_YCBCR,
+	[MDP_Y_CBCR_H2V2_VENUS] = PPP_OP_SRC_CHROMA_420 |
+			PPP_OP_COLOR_SPACE_YCBCR,
+	[MDP_Y_CBCR_H2V1] = PPP_OP_SRC_CHROMA_H2V1,
+	[MDP_Y_CRCB_H2V1] = PPP_OP_SRC_CHROMA_H2V1,
+	[MDP_YCRYCB_H2V1] = PPP_OP_SRC_CHROMA_H2V1,
+};
+
+const uint32_t bytes_per_pixel[MDP_IMGTYPE_LIMIT] = {
+	[MDP_RGB_565] = 2,
+	[MDP_BGR_565] = 2,
+	[MDP_RGB_888] = 3,
+	[MDP_BGR_888] = 3,
+	[MDP_XRGB_8888] = 4,
+	[MDP_ARGB_8888] = 4,
+	[MDP_RGBA_8888] = 4,
+	[MDP_BGRA_8888] = 4,
+	[MDP_RGBX_8888] = 4,
+	[MDP_Y_CBCR_H2V1] = 1,
+	[MDP_Y_CBCR_H2V2] = 1,
+	[MDP_Y_CBCR_H2V2_ADRENO] = 1,
+	[MDP_Y_CBCR_H2V2_VENUS] = 1,
+	[MDP_Y_CRCB_H2V1] = 1,
+	[MDP_Y_CRCB_H2V2] = 1,
+	[MDP_YCRYCB_H2V1] = 2,
+	[MDP_BGRX_8888] = 4,
+};
+
+const bool per_pixel_alpha[MDP_IMGTYPE_LIMIT] = {
+	[MDP_BGRA_8888] = true,
+	[MDP_RGBA_8888] = true,
+	[MDP_ARGB_8888] = true,
+};
+
+const bool multi_plane[MDP_IMGTYPE_LIMIT] = {
+	[MDP_Y_CRCB_H2V2] = true,
+	[MDP_Y_CBCR_H2V2] = true,
+	[MDP_Y_CBCR_H2V1] = true,
+	[MDP_Y_CRCB_H2V1] = true,
+};
+
+/* lut default */
+uint32_t default_pre_lut_val[PPP_LUT_MAX] = {
+	0x0,
+	0x151515,
+	0x1d1d1d,
+	0x232323,
+	0x272727,
+	0x2b2b2b,
+	0x2f2f2f,
+	0x333333,
+	0x363636,
+	0x393939,
+	0x3b3b3b,
+	0x3e3e3e,
+	0x404040,
+	0x434343,
+	0x454545,
+	0x474747,
+	0x494949,
+	0x4b4b4b,
+	0x4d4d4d,
+	0x4f4f4f,
+	0x515151,
+	0x535353,
+	0x555555,
+	0x565656,
+	0x585858,
+	0x5a5a5a,
+	0x5b5b5b,
+	0x5d5d5d,
+	0x5e5e5e,
+	0x606060,
+	0x616161,
+	0x636363,
+	0x646464,
+	0x666666,
+	0x676767,
+	0x686868,
+	0x6a6a6a,
+	0x6b6b6b,
+	0x6c6c6c,
+	0x6e6e6e,
+	0x6f6f6f,
+	0x707070,
+	0x717171,
+	0x727272,
+	0x747474,
+	0x757575,
+	0x767676,
+	0x777777,
+	0x787878,
+	0x797979,
+	0x7a7a7a,
+	0x7c7c7c,
+	0x7d7d7d,
+	0x7e7e7e,
+	0x7f7f7f,
+	0x808080,
+	0x818181,
+	0x828282,
+	0x838383,
+	0x848484,
+	0x858585,
+	0x868686,
+	0x878787,
+	0x888888,
+	0x898989,
+	0x8a8a8a,
+	0x8b8b8b,
+	0x8c8c8c,
+	0x8d8d8d,
+	0x8e8e8e,
+	0x8f8f8f,
+	0x8f8f8f,
+	0x909090,
+	0x919191,
+	0x929292,
+	0x939393,
+	0x949494,
+	0x959595,
+	0x969696,
+	0x969696,
+	0x979797,
+	0x989898,
+	0x999999,
+	0x9a9a9a,
+	0x9b9b9b,
+	0x9c9c9c,
+	0x9c9c9c,
+	0x9d9d9d,
+	0x9e9e9e,
+	0x9f9f9f,
+	0xa0a0a0,
+	0xa0a0a0,
+	0xa1a1a1,
+	0xa2a2a2,
+	0xa3a3a3,
+	0xa4a4a4,
+	0xa4a4a4,
+	0xa5a5a5,
+	0xa6a6a6,
+	0xa7a7a7,
+	0xa7a7a7,
+	0xa8a8a8,
+	0xa9a9a9,
+	0xaaaaaa,
+	0xaaaaaa,
+	0xababab,
+	0xacacac,
+	0xadadad,
+	0xadadad,
+	0xaeaeae,
+	0xafafaf,
+	0xafafaf,
+	0xb0b0b0,
+	0xb1b1b1,
+	0xb2b2b2,
+	0xb2b2b2,
+	0xb3b3b3,
+	0xb4b4b4,
+	0xb4b4b4,
+	0xb5b5b5,
+	0xb6b6b6,
+	0xb6b6b6,
+	0xb7b7b7,
+	0xb8b8b8,
+	0xb8b8b8,
+	0xb9b9b9,
+	0xbababa,
+	0xbababa,
+	0xbbbbbb,
+	0xbcbcbc,
+	0xbcbcbc,
+	0xbdbdbd,
+	0xbebebe,
+	0xbebebe,
+	0xbfbfbf,
+	0xc0c0c0,
+	0xc0c0c0,
+	0xc1c1c1,
+	0xc1c1c1,
+	0xc2c2c2,
+	0xc3c3c3,
+	0xc3c3c3,
+	0xc4c4c4,
+	0xc5c5c5,
+	0xc5c5c5,
+	0xc6c6c6,
+	0xc6c6c6,
+	0xc7c7c7,
+	0xc8c8c8,
+	0xc8c8c8,
+	0xc9c9c9,
+	0xc9c9c9,
+	0xcacaca,
+	0xcbcbcb,
+	0xcbcbcb,
+	0xcccccc,
+	0xcccccc,
+	0xcdcdcd,
+	0xcecece,
+	0xcecece,
+	0xcfcfcf,
+	0xcfcfcf,
+	0xd0d0d0,
+	0xd0d0d0,
+	0xd1d1d1,
+	0xd2d2d2,
+	0xd2d2d2,
+	0xd3d3d3,
+	0xd3d3d3,
+	0xd4d4d4,
+	0xd4d4d4,
+	0xd5d5d5,
+	0xd6d6d6,
+	0xd6d6d6,
+	0xd7d7d7,
+	0xd7d7d7,
+	0xd8d8d8,
+	0xd8d8d8,
+	0xd9d9d9,
+	0xd9d9d9,
+	0xdadada,
+	0xdbdbdb,
+	0xdbdbdb,
+	0xdcdcdc,
+	0xdcdcdc,
+	0xdddddd,
+	0xdddddd,
+	0xdedede,
+	0xdedede,
+	0xdfdfdf,
+	0xdfdfdf,
+	0xe0e0e0,
+	0xe0e0e0,
+	0xe1e1e1,
+	0xe1e1e1,
+	0xe2e2e2,
+	0xe3e3e3,
+	0xe3e3e3,
+	0xe4e4e4,
+	0xe4e4e4,
+	0xe5e5e5,
+	0xe5e5e5,
+	0xe6e6e6,
+	0xe6e6e6,
+	0xe7e7e7,
+	0xe7e7e7,
+	0xe8e8e8,
+	0xe8e8e8,
+	0xe9e9e9,
+	0xe9e9e9,
+	0xeaeaea,
+	0xeaeaea,
+	0xebebeb,
+	0xebebeb,
+	0xececec,
+	0xececec,
+	0xededed,
+	0xededed,
+	0xeeeeee,
+	0xeeeeee,
+	0xefefef,
+	0xefefef,
+	0xf0f0f0,
+	0xf0f0f0,
+	0xf1f1f1,
+	0xf1f1f1,
+	0xf2f2f2,
+	0xf2f2f2,
+	0xf2f2f2,
+	0xf3f3f3,
+	0xf3f3f3,
+	0xf4f4f4,
+	0xf4f4f4,
+	0xf5f5f5,
+	0xf5f5f5,
+	0xf6f6f6,
+	0xf6f6f6,
+	0xf7f7f7,
+	0xf7f7f7,
+	0xf8f8f8,
+	0xf8f8f8,
+	0xf9f9f9,
+	0xf9f9f9,
+	0xfafafa,
+	0xfafafa,
+	0xfafafa,
+	0xfbfbfb,
+	0xfbfbfb,
+	0xfcfcfc,
+	0xfcfcfc,
+	0xfdfdfd,
+	0xfdfdfd,
+	0xfefefe,
+	0xfefefe,
+	0xffffff,
+	0xffffff,
+};
+
+uint32_t default_post_lut_val[PPP_LUT_MAX] = {
+	0x0,
+	0x0,
+	0x0,
+	0x0,
+	0x0,
+	0x0,
+	0x0,
+	0x0,
+	0x0,
+	0x0,
+	0x0,
+	0x0,
+	0x0,
+	0x0,
+	0x0,
+	0x0,
+	0x10101,
+	0x10101,
+	0x10101,
+	0x10101,
+	0x10101,
+	0x10101,
+	0x10101,
+	0x10101,
+	0x10101,
+	0x10101,
+	0x20202,
+	0x20202,
+	0x20202,
+	0x20202,
+	0x20202,
+	0x20202,
+	0x30303,
+	0x30303,
+	0x30303,
+	0x30303,
+	0x30303,
+	0x40404,
+	0x40404,
+	0x40404,
+	0x40404,
+	0x40404,
+	0x50505,
+	0x50505,
+	0x50505,
+	0x50505,
+	0x60606,
+	0x60606,
+	0x60606,
+	0x70707,
+	0x70707,
+	0x70707,
+	0x70707,
+	0x80808,
+	0x80808,
+	0x80808,
+	0x90909,
+	0x90909,
+	0xa0a0a,
+	0xa0a0a,
+	0xa0a0a,
+	0xb0b0b,
+	0xb0b0b,
+	0xb0b0b,
+	0xc0c0c,
+	0xc0c0c,
+	0xd0d0d,
+	0xd0d0d,
+	0xe0e0e,
+	0xe0e0e,
+	0xe0e0e,
+	0xf0f0f,
+	0xf0f0f,
+	0x101010,
+	0x101010,
+	0x111111,
+	0x111111,
+	0x121212,
+	0x121212,
+	0x131313,
+	0x131313,
+	0x141414,
+	0x151515,
+	0x151515,
+	0x161616,
+	0x161616,
+	0x171717,
+	0x171717,
+	0x181818,
+	0x191919,
+	0x191919,
+	0x1a1a1a,
+	0x1b1b1b,
+	0x1b1b1b,
+	0x1c1c1c,
+	0x1c1c1c,
+	0x1d1d1d,
+	0x1e1e1e,
+	0x1f1f1f,
+	0x1f1f1f,
+	0x202020,
+	0x212121,
+	0x212121,
+	0x222222,
+	0x232323,
+	0x242424,
+	0x242424,
+	0x252525,
+	0x262626,
+	0x272727,
+	0x272727,
+	0x282828,
+	0x292929,
+	0x2a2a2a,
+	0x2b2b2b,
+	0x2c2c2c,
+	0x2c2c2c,
+	0x2d2d2d,
+	0x2e2e2e,
+	0x2f2f2f,
+	0x303030,
+	0x313131,
+	0x323232,
+	0x333333,
+	0x333333,
+	0x343434,
+	0x353535,
+	0x363636,
+	0x373737,
+	0x383838,
+	0x393939,
+	0x3a3a3a,
+	0x3b3b3b,
+	0x3c3c3c,
+	0x3d3d3d,
+	0x3e3e3e,
+	0x3f3f3f,
+	0x404040,
+	0x414141,
+	0x424242,
+	0x434343,
+	0x444444,
+	0x464646,
+	0x474747,
+	0x484848,
+	0x494949,
+	0x4a4a4a,
+	0x4b4b4b,
+	0x4c4c4c,
+	0x4d4d4d,
+	0x4f4f4f,
+	0x505050,
+	0x515151,
+	0x525252,
+	0x535353,
+	0x545454,
+	0x565656,
+	0x575757,
+	0x585858,
+	0x595959,
+	0x5b5b5b,
+	0x5c5c5c,
+	0x5d5d5d,
+	0x5e5e5e,
+	0x606060,
+	0x616161,
+	0x626262,
+	0x646464,
+	0x656565,
+	0x666666,
+	0x686868,
+	0x696969,
+	0x6a6a6a,
+	0x6c6c6c,
+	0x6d6d6d,
+	0x6f6f6f,
+	0x707070,
+	0x717171,
+	0x737373,
+	0x747474,
+	0x767676,
+	0x777777,
+	0x797979,
+	0x7a7a7a,
+	0x7c7c7c,
+	0x7d7d7d,
+	0x7f7f7f,
+	0x808080,
+	0x828282,
+	0x838383,
+	0x858585,
+	0x868686,
+	0x888888,
+	0x898989,
+	0x8b8b8b,
+	0x8d8d8d,
+	0x8e8e8e,
+	0x909090,
+	0x919191,
+	0x939393,
+	0x959595,
+	0x969696,
+	0x989898,
+	0x9a9a9a,
+	0x9b9b9b,
+	0x9d9d9d,
+	0x9f9f9f,
+	0xa1a1a1,
+	0xa2a2a2,
+	0xa4a4a4,
+	0xa6a6a6,
+	0xa7a7a7,
+	0xa9a9a9,
+	0xababab,
+	0xadadad,
+	0xafafaf,
+	0xb0b0b0,
+	0xb2b2b2,
+	0xb4b4b4,
+	0xb6b6b6,
+	0xb8b8b8,
+	0xbababa,
+	0xbbbbbb,
+	0xbdbdbd,
+	0xbfbfbf,
+	0xc1c1c1,
+	0xc3c3c3,
+	0xc5c5c5,
+	0xc7c7c7,
+	0xc9c9c9,
+	0xcbcbcb,
+	0xcdcdcd,
+	0xcfcfcf,
+	0xd1d1d1,
+	0xd3d3d3,
+	0xd5d5d5,
+	0xd7d7d7,
+	0xd9d9d9,
+	0xdbdbdb,
+	0xdddddd,
+	0xdfdfdf,
+	0xe1e1e1,
+	0xe3e3e3,
+	0xe5e5e5,
+	0xe7e7e7,
+	0xe9e9e9,
+	0xebebeb,
+	0xeeeeee,
+	0xf0f0f0,
+	0xf2f2f2,
+	0xf4f4f4,
+	0xf6f6f6,
+	0xf8f8f8,
+	0xfbfbfb,
+	0xfdfdfd,
+	0xffffff,
+};
+
+struct ppp_csc_table rgb2yuv = {
+	.fwd_matrix = {
+		0x83,
+		0x102,
+		0x32,
+		0xffb5,
+		0xff6c,
+		0xe1,
+		0xe1,
+		0xff45,
+		0xffdc,
+	},
+	.rev_matrix = {
+		0x254,
+		0x0,
+		0x331,
+		0x254,
+		0xff38,
+		0xfe61,
+		0x254,
+		0x409,
+		0x0,
+	},
+	.bv = {
+		0x10,
+		0x80,
+		0x80,
+	},
+	.lv = {
+		0x10,
+		0xeb,
+		0x10,
+		0xf0,
+	},
+};
+
+struct ppp_csc_table default_table2 = {
+	.fwd_matrix = {
+		0x5d,
+		0x13a,
+		0x20,
+		0xffcd,
+		0xff54,
+		0xe1,
+		0xe1,
+		0xff35,
+	},
+	.rev_matrix = {
+		0x254,
+		0x0,
+		0x396,
+		0x254,
+		0xff94,
+		0xfef0,
+		0x254,
+		0x43a,
+		0x0,
+	},
+	.bv = {
+		0x10,
+		0x80,
+		0x80,
+	},
+	.lv = {
+		0x10,
+		0xeb,
+		0x10,
+		0xf0,
+	},
+};
+
+const struct ppp_table upscale_table[PPP_UPSCALE_MAX] = {
+	{ 0x5fffc, 0x0 },
+	{ 0x50200, 0x7fc00000 },
+	{ 0x5fffc, 0xff80000d },
+	{ 0x50204, 0x7ec003f9 },
+	{ 0x5fffc, 0xfec0001c },
+	{ 0x50208, 0x7d4003f3 },
+	{ 0x5fffc, 0xfe40002b },
+	{ 0x5020c, 0x7b8003ed },
+	{ 0x5fffc, 0xfd80003c },
+	{ 0x50210, 0x794003e8 },
+	{ 0x5fffc, 0xfcc0004d },
+	{ 0x50214, 0x76c003e4 },
+	{ 0x5fffc, 0xfc40005f },
+	{ 0x50218, 0x73c003e0 },
+	{ 0x5fffc, 0xfb800071 },
+	{ 0x5021c, 0x708003de },
+	{ 0x5fffc, 0xfac00085 },
+	{ 0x50220, 0x6d0003db },
+	{ 0x5fffc, 0xfa000098 },
+	{ 0x50224, 0x698003d9 },
+	{ 0x5fffc, 0xf98000ac },
+	{ 0x50228, 0x654003d8 },
+	{ 0x5fffc, 0xf8c000c1 },
+	{ 0x5022c, 0x610003d7 },
+	{ 0x5fffc, 0xf84000d5 },
+	{ 0x50230, 0x5c8003d7 },
+	{ 0x5fffc, 0xf7c000e9 },
+	{ 0x50234, 0x580003d7 },
+	{ 0x5fffc, 0xf74000fd },
+	{ 0x50238, 0x534003d8 },
+	{ 0x5fffc, 0xf6c00112 },
+	{ 0x5023c, 0x4e8003d8 },
+	{ 0x5fffc, 0xf6800126 },
+	{ 0x50240, 0x494003da },
+	{ 0x5fffc, 0xf600013a },
+	{ 0x50244, 0x448003db },
+	{ 0x5fffc, 0xf600014d },
+	{ 0x50248, 0x3f4003dd },
+	{ 0x5fffc, 0xf5c00160 },
+	{ 0x5024c, 0x3a4003df },
+	{ 0x5fffc, 0xf5c00172 },
+	{ 0x50250, 0x354003e1 },
+	{ 0x5fffc, 0xf5c00184 },
+	{ 0x50254, 0x304003e3 },
+	{ 0x5fffc, 0xf6000195 },
+	{ 0x50258, 0x2b0003e6 },
+	{ 0x5fffc, 0xf64001a6 },
+	{ 0x5025c, 0x260003e8 },
+	{ 0x5fffc, 0xf6c001b4 },
+	{ 0x50260, 0x214003eb },
+	{ 0x5fffc, 0xf78001c2 },
+	{ 0x50264, 0x1c4003ee },
+	{ 0x5fffc, 0xf80001cf },
+	{ 0x50268, 0x17c003f1 },
+	{ 0x5fffc, 0xf90001db },
+	{ 0x5026c, 0x134003f3 },
+	{ 0x5fffc, 0xfa0001e5 },
+	{ 0x50270, 0xf0003f6 },
+	{ 0x5fffc, 0xfb4001ee },
+	{ 0x50274, 0xac003f9 },
+	{ 0x5fffc, 0xfcc001f5 },
+	{ 0x50278, 0x70003fb },
+	{ 0x5fffc, 0xfe4001fb },
+	{ 0x5027c, 0x34003fe },
+};
+
+const struct ppp_table mdp_gaussian_blur_table[PPP_BLUR_SCALE_MAX] = {
+	/* max variance */
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50280, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50284, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50288, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5028c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50290, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50294, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50298, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5029c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502a0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502a4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502a8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502ac, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502b0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502b4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502b8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502bc, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502c0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502c4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502c8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502cc, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502d0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502d4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502d8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502dc, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502e0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502e4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502e8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502ec, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502f0, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502f4, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502f8, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x502fc, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50300, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50304, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50308, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5030c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50310, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50314, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50318, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5031c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50320, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50324, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50328, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5032c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50330, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50334, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50338, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5033c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50340, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50344, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50348, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5034c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50350, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50354, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50358, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5035c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50360, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50364, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50368, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5036c, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50370, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50374, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x50378, 0x20000080 },
+	{ 0x5fffc, 0x20000080 },
+	{ 0x5037c, 0x20000080 },
+};
+
+const struct ppp_table downscale_x_table_pt2topt4[] = {
+	{ 0x5fffc, 0x740008c },
+	{ 0x50280, 0x33800088 },
+	{ 0x5fffc, 0x800008e },
+	{ 0x50284, 0x33400084 },
+	{ 0x5fffc, 0x8400092 },
+	{ 0x50288, 0x33000080 },
+	{ 0x5fffc, 0x9000094 },
+	{ 0x5028c, 0x3300007b },
+	{ 0x5fffc, 0x9c00098 },
+	{ 0x50290, 0x32400077 },
+	{ 0x5fffc, 0xa40009b },
+	{ 0x50294, 0x32000073 },
+	{ 0x5fffc, 0xb00009d },
+	{ 0x50298, 0x31c0006f },
+	{ 0x5fffc, 0xbc000a0 },
+	{ 0x5029c, 0x3140006b },
+	{ 0x5fffc, 0xc8000a2 },
+	{ 0x502a0, 0x31000067 },
+	{ 0x5fffc, 0xd8000a5 },
+	{ 0x502a4, 0x30800062 },
+	{ 0x5fffc, 0xe4000a8 },
+	{ 0x502a8, 0x2fc0005f },
+	{ 0x5fffc, 0xec000aa },
+	{ 0x502ac, 0x2fc0005b },
+	{ 0x5fffc, 0xf8000ad },
+	{ 0x502b0, 0x2f400057 },
+	{ 0x5fffc, 0x108000b0 },
+	{ 0x502b4, 0x2e400054 },
+	{ 0x5fffc, 0x114000b2 },
+	{ 0x502b8, 0x2e000050 },
+	{ 0x5fffc, 0x124000b4 },
+	{ 0x502bc, 0x2d80004c },
+	{ 0x5fffc, 0x130000b6 },
+	{ 0x502c0, 0x2d000049 },
+	{ 0x5fffc, 0x140000b8 },
+	{ 0x502c4, 0x2c800045 },
+	{ 0x5fffc, 0x150000b9 },
+	{ 0x502c8, 0x2c000042 },
+	{ 0x5fffc, 0x15c000bd },
+	{ 0x502cc, 0x2b40003e },
+	{ 0x5fffc, 0x16c000bf },
+	{ 0x502d0, 0x2a80003b },
+	{ 0x5fffc, 0x17c000bf },
+	{ 0x502d4, 0x2a000039 },
+	{ 0x5fffc, 0x188000c2 },
+	{ 0x502d8, 0x29400036 },
+	{ 0x5fffc, 0x19c000c4 },
+	{ 0x502dc, 0x28800032 },
+	{ 0x5fffc, 0x1ac000c5 },
+	{ 0x502e0, 0x2800002f },
+	{ 0x5fffc, 0x1bc000c7 },
+	{ 0x502e4, 0x2740002c },
+	{ 0x5fffc, 0x1cc000c8 },
+	{ 0x502e8, 0x26c00029 },
+	{ 0x5fffc, 0x1dc000c9 },
+	{ 0x502ec, 0x26000027 },
+	{ 0x5fffc, 0x1ec000cc },
+	{ 0x502f0, 0x25000024 },
+	{ 0x5fffc, 0x200000cc },
+	{ 0x502f4, 0x24800021 },
+	{ 0x5fffc, 0x210000cd },
+	{ 0x502f8, 0x23800020 },
+	{ 0x5fffc, 0x220000ce },
+	{ 0x502fc, 0x2300001d },
+};
+
+static const struct ppp_table downscale_x_table_pt4topt6[] = {
+	{ 0x5fffc, 0x740008c },
+	{ 0x50280, 0x33800088 },
+	{ 0x5fffc, 0x800008e },
+	{ 0x50284, 0x33400084 },
+	{ 0x5fffc, 0x8400092 },
+	{ 0x50288, 0x33000080 },
+	{ 0x5fffc, 0x9000094 },
+	{ 0x5028c, 0x3300007b },
+	{ 0x5fffc, 0x9c00098 },
+	{ 0x50290, 0x32400077 },
+	{ 0x5fffc, 0xa40009b },
+	{ 0x50294, 0x32000073 },
+	{ 0x5fffc, 0xb00009d },
+	{ 0x50298, 0x31c0006f },
+	{ 0x5fffc, 0xbc000a0 },
+	{ 0x5029c, 0x3140006b },
+	{ 0x5fffc, 0xc8000a2 },
+	{ 0x502a0, 0x31000067 },
+	{ 0x5fffc, 0xd8000a5 },
+	{ 0x502a4, 0x30800062 },
+	{ 0x5fffc, 0xe4000a8 },
+	{ 0x502a8, 0x2fc0005f },
+	{ 0x5fffc, 0xec000aa },
+	{ 0x502ac, 0x2fc0005b },
+	{ 0x5fffc, 0xf8000ad },
+	{ 0x502b0, 0x2f400057 },
+	{ 0x5fffc, 0x108000b0 },
+	{ 0x502b4, 0x2e400054 },
+	{ 0x5fffc, 0x114000b2 },
+	{ 0x502b8, 0x2e000050 },
+	{ 0x5fffc, 0x124000b4 },
+	{ 0x502bc, 0x2d80004c },
+	{ 0x5fffc, 0x130000b6 },
+	{ 0x502c0, 0x2d000049 },
+	{ 0x5fffc, 0x140000b8 },
+	{ 0x502c4, 0x2c800045 },
+	{ 0x5fffc, 0x150000b9 },
+	{ 0x502c8, 0x2c000042 },
+	{ 0x5fffc, 0x15c000bd },
+	{ 0x502cc, 0x2b40003e },
+	{ 0x5fffc, 0x16c000bf },
+	{ 0x502d0, 0x2a80003b },
+	{ 0x5fffc, 0x17c000bf },
+	{ 0x502d4, 0x2a000039 },
+	{ 0x5fffc, 0x188000c2 },
+	{ 0x502d8, 0x29400036 },
+	{ 0x5fffc, 0x19c000c4 },
+	{ 0x502dc, 0x28800032 },
+	{ 0x5fffc, 0x1ac000c5 },
+	{ 0x502e0, 0x2800002f },
+	{ 0x5fffc, 0x1bc000c7 },
+	{ 0x502e4, 0x2740002c },
+	{ 0x5fffc, 0x1cc000c8 },
+	{ 0x502e8, 0x26c00029 },
+	{ 0x5fffc, 0x1dc000c9 },
+	{ 0x502ec, 0x26000027 },
+	{ 0x5fffc, 0x1ec000cc },
+	{ 0x502f0, 0x25000024 },
+	{ 0x5fffc, 0x200000cc },
+	{ 0x502f4, 0x24800021 },
+	{ 0x5fffc, 0x210000cd },
+	{ 0x502f8, 0x23800020 },
+	{ 0x5fffc, 0x220000ce },
+	{ 0x502fc, 0x2300001d },
+};
+
+static const struct ppp_table downscale_x_table_pt6topt8[] = {
+	{ 0x5fffc, 0xfe000070 },
+	{ 0x50280, 0x4bc00068 },
+	{ 0x5fffc, 0xfe000078 },
+	{ 0x50284, 0x4bc00060 },
+	{ 0x5fffc, 0xfe000080 },
+	{ 0x50288, 0x4b800059 },
+	{ 0x5fffc, 0xfe000089 },
+	{ 0x5028c, 0x4b000052 },
+	{ 0x5fffc, 0xfe400091 },
+	{ 0x50290, 0x4a80004b },
+	{ 0x5fffc, 0xfe40009a },
+	{ 0x50294, 0x4a000044 },
+	{ 0x5fffc, 0xfe8000a3 },
+	{ 0x50298, 0x4940003d },
+	{ 0x5fffc, 0xfec000ac },
+	{ 0x5029c, 0x48400037 },
+	{ 0x5fffc, 0xff0000b4 },
+	{ 0x502a0, 0x47800031 },
+	{ 0x5fffc, 0xff8000bd },
+	{ 0x502a4, 0x4640002b },
+	{ 0x5fffc, 0xc5 },
+	{ 0x502a8, 0x45000026 },
+	{ 0x5fffc, 0x8000ce },
+	{ 0x502ac, 0x43800021 },
+	{ 0x5fffc, 0x10000d6 },
+	{ 0x502b0, 0x4240001c },
+	{ 0x5fffc, 0x18000df },
+	{ 0x502b4, 0x40800018 },
+	{ 0x5fffc, 0x24000e6 },
+	{ 0x502b8, 0x3f000014 },
+	{ 0x5fffc, 0x30000ee },
+	{ 0x502bc, 0x3d400010 },
+	{ 0x5fffc, 0x40000f5 },
+	{ 0x502c0, 0x3b80000c },
+	{ 0x5fffc, 0x50000fc },
+	{ 0x502c4, 0x39800009 },
+	{ 0x5fffc, 0x6000102 },
+	{ 0x502c8, 0x37c00006 },
+	{ 0x5fffc, 0x7000109 },
+	{ 0x502cc, 0x35800004 },
+	{ 0x5fffc, 0x840010e },
+	{ 0x502d0, 0x33800002 },
+	{ 0x5fffc, 0x9800114 },
+	{ 0x502d4, 0x31400000 },
+	{ 0x5fffc, 0xac00119 },
+	{ 0x502d8, 0x2f4003fe },
+	{ 0x5fffc, 0xc40011e },
+	{ 0x502dc, 0x2d0003fc },
+	{ 0x5fffc, 0xdc00121 },
+	{ 0x502e0, 0x2b0003fb },
+	{ 0x5fffc, 0xf400125 },
+	{ 0x502e4, 0x28c003fa },
+	{ 0x5fffc, 0x11000128 },
+	{ 0x502e8, 0x268003f9 },
+	{ 0x5fffc, 0x12c0012a },
+	{ 0x502ec, 0x244003f9 },
+	{ 0x5fffc, 0x1480012c },
+	{ 0x502f0, 0x224003f8 },
+	{ 0x5fffc, 0x1640012e },
+	{ 0x502f4, 0x200003f8 },
+	{ 0x5fffc, 0x1800012f },
+	{ 0x502f8, 0x1e0003f8 },
+	{ 0x5fffc, 0x1a00012f },
+	{ 0x502fc, 0x1c0003f8 },
+};
+
+static const struct ppp_table downscale_x_table_pt8topt1[] = {
+	{ 0x5fffc, 0x0 },
+	{ 0x50280, 0x7fc00000 },
+	{ 0x5fffc, 0xff80000d },
+	{ 0x50284, 0x7ec003f9 },
+	{ 0x5fffc, 0xfec0001c },
+	{ 0x50288, 0x7d4003f3 },
+	{ 0x5fffc, 0xfe40002b },
+	{ 0x5028c, 0x7b8003ed },
+	{ 0x5fffc, 0xfd80003c },
+	{ 0x50290, 0x794003e8 },
+	{ 0x5fffc, 0xfcc0004d },
+	{ 0x50294, 0x76c003e4 },
+	{ 0x5fffc, 0xfc40005f },
+	{ 0x50298, 0x73c003e0 },
+	{ 0x5fffc, 0xfb800071 },
+	{ 0x5029c, 0x708003de },
+	{ 0x5fffc, 0xfac00085 },
+	{ 0x502a0, 0x6d0003db },
+	{ 0x5fffc, 0xfa000098 },
+	{ 0x502a4, 0x698003d9 },
+	{ 0x5fffc, 0xf98000ac },
+	{ 0x502a8, 0x654003d8 },
+	{ 0x5fffc, 0xf8c000c1 },
+	{ 0x502ac, 0x610003d7 },
+	{ 0x5fffc, 0xf84000d5 },
+	{ 0x502b0, 0x5c8003d7 },
+	{ 0x5fffc, 0xf7c000e9 },
+	{ 0x502b4, 0x580003d7 },
+	{ 0x5fffc, 0xf74000fd },
+	{ 0x502b8, 0x534003d8 },
+	{ 0x5fffc, 0xf6c00112 },
+	{ 0x502bc, 0x4e8003d8 },
+	{ 0x5fffc, 0xf6800126 },
+	{ 0x502c0, 0x494003da },
+	{ 0x5fffc, 0xf600013a },
+	{ 0x502c4, 0x448003db },
+	{ 0x5fffc, 0xf600014d },
+	{ 0x502c8, 0x3f4003dd },
+	{ 0x5fffc, 0xf5c00160 },
+	{ 0x502cc, 0x3a4003df },
+	{ 0x5fffc, 0xf5c00172 },
+	{ 0x502d0, 0x354003e1 },
+	{ 0x5fffc, 0xf5c00184 },
+	{ 0x502d4, 0x304003e3 },
+	{ 0x5fffc, 0xf6000195 },
+	{ 0x502d8, 0x2b0003e6 },
+	{ 0x5fffc, 0xf64001a6 },
+	{ 0x502dc, 0x260003e8 },
+	{ 0x5fffc, 0xf6c001b4 },
+	{ 0x502e0, 0x214003eb },
+	{ 0x5fffc, 0xf78001c2 },
+	{ 0x502e4, 0x1c4003ee },
+	{ 0x5fffc, 0xf80001cf },
+	{ 0x502e8, 0x17c003f1 },
+	{ 0x5fffc, 0xf90001db },
+	{ 0x502ec, 0x134003f3 },
+	{ 0x5fffc, 0xfa0001e5 },
+	{ 0x502f0, 0xf0003f6 },
+	{ 0x5fffc, 0xfb4001ee },
+	{ 0x502f4, 0xac003f9 },
+	{ 0x5fffc, 0xfcc001f5 },
+	{ 0x502f8, 0x70003fb },
+	{ 0x5fffc, 0xfe4001fb },
+	{ 0x502fc, 0x34003fe },
+};
+
+static const struct ppp_table *downscale_x_table[PPP_DOWNSCALE_MAX] = {
+	[PPP_DOWNSCALE_PT2TOPT4] = downscale_x_table_pt2topt4,
+	[PPP_DOWNSCALE_PT4TOPT6] = downscale_x_table_pt4topt6,
+	[PPP_DOWNSCALE_PT6TOPT8] = downscale_x_table_pt6topt8,
+	[PPP_DOWNSCALE_PT8TOPT1] = downscale_x_table_pt8topt1,
+};
+
+static const struct ppp_table downscale_y_table_pt2topt4[] = {
+	{ 0x5fffc, 0x740008c },
+	{ 0x50300, 0x33800088 },
+	{ 0x5fffc, 0x800008e },
+	{ 0x50304, 0x33400084 },
+	{ 0x5fffc, 0x8400092 },
+	{ 0x50308, 0x33000080 },
+	{ 0x5fffc, 0x9000094 },
+	{ 0x5030c, 0x3300007b },
+	{ 0x5fffc, 0x9c00098 },
+	{ 0x50310, 0x32400077 },
+	{ 0x5fffc, 0xa40009b },
+	{ 0x50314, 0x32000073 },
+	{ 0x5fffc, 0xb00009d },
+	{ 0x50318, 0x31c0006f },
+	{ 0x5fffc, 0xbc000a0 },
+	{ 0x5031c, 0x3140006b },
+	{ 0x5fffc, 0xc8000a2 },
+	{ 0x50320, 0x31000067 },
+	{ 0x5fffc, 0xd8000a5 },
+	{ 0x50324, 0x30800062 },
+	{ 0x5fffc, 0xe4000a8 },
+	{ 0x50328, 0x2fc0005f },
+	{ 0x5fffc, 0xec000aa },
+	{ 0x5032c, 0x2fc0005b },
+	{ 0x5fffc, 0xf8000ad },
+	{ 0x50330, 0x2f400057 },
+	{ 0x5fffc, 0x108000b0 },
+	{ 0x50334, 0x2e400054 },
+	{ 0x5fffc, 0x114000b2 },
+	{ 0x50338, 0x2e000050 },
+	{ 0x5fffc, 0x124000b4 },
+	{ 0x5033c, 0x2d80004c },
+	{ 0x5fffc, 0x130000b6 },
+	{ 0x50340, 0x2d000049 },
+	{ 0x5fffc, 0x140000b8 },
+	{ 0x50344, 0x2c800045 },
+	{ 0x5fffc, 0x150000b9 },
+	{ 0x50348, 0x2c000042 },
+	{ 0x5fffc, 0x15c000bd },
+	{ 0x5034c, 0x2b40003e },
+	{ 0x5fffc, 0x16c000bf },
+	{ 0x50350, 0x2a80003b },
+	{ 0x5fffc, 0x17c000bf },
+	{ 0x50354, 0x2a000039 },
+	{ 0x5fffc, 0x188000c2 },
+	{ 0x50358, 0x29400036 },
+	{ 0x5fffc, 0x19c000c4 },
+	{ 0x5035c, 0x28800032 },
+	{ 0x5fffc, 0x1ac000c5 },
+	{ 0x50360, 0x2800002f },
+	{ 0x5fffc, 0x1bc000c7 },
+	{ 0x50364, 0x2740002c },
+	{ 0x5fffc, 0x1cc000c8 },
+	{ 0x50368, 0x26c00029 },
+	{ 0x5fffc, 0x1dc000c9 },
+	{ 0x5036c, 0x26000027 },
+	{ 0x5fffc, 0x1ec000cc },
+	{ 0x50370, 0x25000024 },
+	{ 0x5fffc, 0x200000cc },
+	{ 0x50374, 0x24800021 },
+	{ 0x5fffc, 0x210000cd },
+	{ 0x50378, 0x23800020 },
+	{ 0x5fffc, 0x220000ce },
+	{ 0x5037c, 0x2300001d },
+};
+
+static const struct ppp_table downscale_y_table_pt4topt6[] = {
+	{ 0x5fffc, 0x740008c },
+	{ 0x50300, 0x33800088 },
+	{ 0x5fffc, 0x800008e },
+	{ 0x50304, 0x33400084 },
+	{ 0x5fffc, 0x8400092 },
+	{ 0x50308, 0x33000080 },
+	{ 0x5fffc, 0x9000094 },
+	{ 0x5030c, 0x3300007b },
+	{ 0x5fffc, 0x9c00098 },
+	{ 0x50310, 0x32400077 },
+	{ 0x5fffc, 0xa40009b },
+	{ 0x50314, 0x32000073 },
+	{ 0x5fffc, 0xb00009d },
+	{ 0x50318, 0x31c0006f },
+	{ 0x5fffc, 0xbc000a0 },
+	{ 0x5031c, 0x3140006b },
+	{ 0x5fffc, 0xc8000a2 },
+	{ 0x50320, 0x31000067 },
+	{ 0x5fffc, 0xd8000a5 },
+	{ 0x50324, 0x30800062 },
+	{ 0x5fffc, 0xe4000a8 },
+	{ 0x50328, 0x2fc0005f },
+	{ 0x5fffc, 0xec000aa },
+	{ 0x5032c, 0x2fc0005b },
+	{ 0x5fffc, 0xf8000ad },
+	{ 0x50330, 0x2f400057 },
+	{ 0x5fffc, 0x108000b0 },
+	{ 0x50334, 0x2e400054 },
+	{ 0x5fffc, 0x114000b2 },
+	{ 0x50338, 0x2e000050 },
+	{ 0x5fffc, 0x124000b4 },
+	{ 0x5033c, 0x2d80004c },
+	{ 0x5fffc, 0x130000b6 },
+	{ 0x50340, 0x2d000049 },
+	{ 0x5fffc, 0x140000b8 },
+	{ 0x50344, 0x2c800045 },
+	{ 0x5fffc, 0x150000b9 },
+	{ 0x50348, 0x2c000042 },
+	{ 0x5fffc, 0x15c000bd },
+	{ 0x5034c, 0x2b40003e },
+	{ 0x5fffc, 0x16c000bf },
+	{ 0x50350, 0x2a80003b },
+	{ 0x5fffc, 0x17c000bf },
+	{ 0x50354, 0x2a000039 },
+	{ 0x5fffc, 0x188000c2 },
+	{ 0x50358, 0x29400036 },
+	{ 0x5fffc, 0x19c000c4 },
+	{ 0x5035c, 0x28800032 },
+	{ 0x5fffc, 0x1ac000c5 },
+	{ 0x50360, 0x2800002f },
+	{ 0x5fffc, 0x1bc000c7 },
+	{ 0x50364, 0x2740002c },
+	{ 0x5fffc, 0x1cc000c8 },
+	{ 0x50368, 0x26c00029 },
+	{ 0x5fffc, 0x1dc000c9 },
+	{ 0x5036c, 0x26000027 },
+	{ 0x5fffc, 0x1ec000cc },
+	{ 0x50370, 0x25000024 },
+	{ 0x5fffc, 0x200000cc },
+	{ 0x50374, 0x24800021 },
+	{ 0x5fffc, 0x210000cd },
+	{ 0x50378, 0x23800020 },
+	{ 0x5fffc, 0x220000ce },
+	{ 0x5037c, 0x2300001d },
+};
+
+static const struct ppp_table downscale_y_table_pt6topt8[] = {
+	{ 0x5fffc, 0xfe000070 },
+	{ 0x50300, 0x4bc00068 },
+	{ 0x5fffc, 0xfe000078 },
+	{ 0x50304, 0x4bc00060 },
+	{ 0x5fffc, 0xfe000080 },
+	{ 0x50308, 0x4b800059 },
+	{ 0x5fffc, 0xfe000089 },
+	{ 0x5030c, 0x4b000052 },
+	{ 0x5fffc, 0xfe400091 },
+	{ 0x50310, 0x4a80004b },
+	{ 0x5fffc, 0xfe40009a },
+	{ 0x50314, 0x4a000044 },
+	{ 0x5fffc, 0xfe8000a3 },
+	{ 0x50318, 0x4940003d },
+	{ 0x5fffc, 0xfec000ac },
+	{ 0x5031c, 0x48400037 },
+	{ 0x5fffc, 0xff0000b4 },
+	{ 0x50320, 0x47800031 },
+	{ 0x5fffc, 0xff8000bd },
+	{ 0x50324, 0x4640002b },
+	{ 0x5fffc, 0xc5 },
+	{ 0x50328, 0x45000026 },
+	{ 0x5fffc, 0x8000ce },
+	{ 0x5032c, 0x43800021 },
+	{ 0x5fffc, 0x10000d6 },
+	{ 0x50330, 0x4240001c },
+	{ 0x5fffc, 0x18000df },
+	{ 0x50334, 0x40800018 },
+	{ 0x5fffc, 0x24000e6 },
+	{ 0x50338, 0x3f000014 },
+	{ 0x5fffc, 0x30000ee },
+	{ 0x5033c, 0x3d400010 },
+	{ 0x5fffc, 0x40000f5 },
+	{ 0x50340, 0x3b80000c },
+	{ 0x5fffc, 0x50000fc },
+	{ 0x50344, 0x39800009 },
+	{ 0x5fffc, 0x6000102 },
+	{ 0x50348, 0x37c00006 },
+	{ 0x5fffc, 0x7000109 },
+	{ 0x5034c, 0x35800004 },
+	{ 0x5fffc, 0x840010e },
+	{ 0x50350, 0x33800002 },
+	{ 0x5fffc, 0x9800114 },
+	{ 0x50354, 0x31400000 },
+	{ 0x5fffc, 0xac00119 },
+	{ 0x50358, 0x2f4003fe },
+	{ 0x5fffc, 0xc40011e },
+	{ 0x5035c, 0x2d0003fc },
+	{ 0x5fffc, 0xdc00121 },
+	{ 0x50360, 0x2b0003fb },
+	{ 0x5fffc, 0xf400125 },
+	{ 0x50364, 0x28c003fa },
+	{ 0x5fffc, 0x11000128 },
+	{ 0x50368, 0x268003f9 },
+	{ 0x5fffc, 0x12c0012a },
+	{ 0x5036c, 0x244003f9 },
+	{ 0x5fffc, 0x1480012c },
+	{ 0x50370, 0x224003f8 },
+	{ 0x5fffc, 0x1640012e },
+	{ 0x50374, 0x200003f8 },
+	{ 0x5fffc, 0x1800012f },
+	{ 0x50378, 0x1e0003f8 },
+	{ 0x5fffc, 0x1a00012f },
+	{ 0x5037c, 0x1c0003f8 },
+};
+
+static const struct ppp_table downscale_y_table_pt8topt1[] = {
+	{ 0x5fffc, 0x0 },
+	{ 0x50300, 0x7fc00000 },
+	{ 0x5fffc, 0xff80000d },
+	{ 0x50304, 0x7ec003f9 },
+	{ 0x5fffc, 0xfec0001c },
+	{ 0x50308, 0x7d4003f3 },
+	{ 0x5fffc, 0xfe40002b },
+	{ 0x5030c, 0x7b8003ed },
+	{ 0x5fffc, 0xfd80003c },
+	{ 0x50310, 0x794003e8 },
+	{ 0x5fffc, 0xfcc0004d },
+	{ 0x50314, 0x76c003e4 },
+	{ 0x5fffc, 0xfc40005f },
+	{ 0x50318, 0x73c003e0 },
+	{ 0x5fffc, 0xfb800071 },
+	{ 0x5031c, 0x708003de },
+	{ 0x5fffc, 0xfac00085 },
+	{ 0x50320, 0x6d0003db },
+	{ 0x5fffc, 0xfa000098 },
+	{ 0x50324, 0x698003d9 },
+	{ 0x5fffc, 0xf98000ac },
+	{ 0x50328, 0x654003d8 },
+	{ 0x5fffc, 0xf8c000c1 },
+	{ 0x5032c, 0x610003d7 },
+	{ 0x5fffc, 0xf84000d5 },
+	{ 0x50330, 0x5c8003d7 },
+	{ 0x5fffc, 0xf7c000e9 },
+	{ 0x50334, 0x580003d7 },
+	{ 0x5fffc, 0xf74000fd },
+	{ 0x50338, 0x534003d8 },
+	{ 0x5fffc, 0xf6c00112 },
+	{ 0x5033c, 0x4e8003d8 },
+	{ 0x5fffc, 0xf6800126 },
+	{ 0x50340, 0x494003da },
+	{ 0x5fffc, 0xf600013a },
+	{ 0x50344, 0x448003db },
+	{ 0x5fffc, 0xf600014d },
+	{ 0x50348, 0x3f4003dd },
+	{ 0x5fffc, 0xf5c00160 },
+	{ 0x5034c, 0x3a4003df },
+	{ 0x5fffc, 0xf5c00172 },
+	{ 0x50350, 0x354003e1 },
+	{ 0x5fffc, 0xf5c00184 },
+	{ 0x50354, 0x304003e3 },
+	{ 0x5fffc, 0xf6000195 },
+	{ 0x50358, 0x2b0003e6 },
+	{ 0x5fffc, 0xf64001a6 },
+	{ 0x5035c, 0x260003e8 },
+	{ 0x5fffc, 0xf6c001b4 },
+	{ 0x50360, 0x214003eb },
+	{ 0x5fffc, 0xf78001c2 },
+	{ 0x50364, 0x1c4003ee },
+	{ 0x5fffc, 0xf80001cf },
+	{ 0x50368, 0x17c003f1 },
+	{ 0x5fffc, 0xf90001db },
+	{ 0x5036c, 0x134003f3 },
+	{ 0x5fffc, 0xfa0001e5 },
+	{ 0x50370, 0xf0003f6 },
+	{ 0x5fffc, 0xfb4001ee },
+	{ 0x50374, 0xac003f9 },
+	{ 0x5fffc, 0xfcc001f5 },
+	{ 0x50378, 0x70003fb },
+	{ 0x5fffc, 0xfe4001fb },
+	{ 0x5037c, 0x34003fe },
+};
+
+static const struct ppp_table *downscale_y_table[PPP_DOWNSCALE_MAX] = {
+	[PPP_DOWNSCALE_PT2TOPT4] = downscale_y_table_pt2topt4,
+	[PPP_DOWNSCALE_PT4TOPT6] = downscale_y_table_pt4topt6,
+	[PPP_DOWNSCALE_PT6TOPT8] = downscale_y_table_pt6topt8,
+	[PPP_DOWNSCALE_PT8TOPT1] = downscale_y_table_pt8topt1,
+};
+
+void ppp_load_table(const struct ppp_table *table, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		PPP_WRITEL(table[i].val, table[i].reg);
+}
+
+void ppp_load_up_lut(void)
+{
+	ppp_load_table(upscale_table,
+		PPP_UPSCALE_MAX);
+}
+
+void ppp_load_gaussian_lut(void)
+{
+	ppp_load_table(mdp_gaussian_blur_table,
+		PPP_BLUR_SCALE_MAX);
+}
+
+void ppp_load_x_scale_table(int idx)
+{
+	ppp_load_table(downscale_x_table[idx], 64);
+}
+
+void ppp_load_y_scale_table(int idx)
+{
+	ppp_load_table(downscale_y_table[idx], 64);
+}
+
+uint32_t ppp_bpp(uint32_t type)
+{
+	if (MDP_IS_IMGTYPE_BAD(type))
+		return 0;
+	return bytes_per_pixel[type];
+}
+
+uint32_t ppp_src_config(uint32_t type)
+{
+	if (MDP_IS_IMGTYPE_BAD(type))
+		return 0;
+	return src_cfg_lut[type];
+}
+
+uint32_t ppp_out_config(uint32_t type)
+{
+	if (MDP_IS_IMGTYPE_BAD(type))
+		return 0;
+	return out_cfg_lut[type];
+}
+
+uint32_t ppp_pack_pattern(uint32_t type, uint32_t yuv2rgb)
+{
+	if (MDP_IS_IMGTYPE_BAD(type))
+		return 0;
+	if (yuv2rgb)
+		return swapped_pack_patt_lut[type];
+
+	return pack_patt_lut[type];
+}
+
+uint32_t ppp_dst_op_reg(uint32_t type)
+{
+	if (MDP_IS_IMGTYPE_BAD(type))
+		return 0;
+	return dst_op_reg[type];
+}
+
+uint32_t ppp_src_op_reg(uint32_t type)
+{
+	if (MDP_IS_IMGTYPE_BAD(type))
+		return 0;
+	return src_op_reg[type];
+}
+
+bool ppp_per_p_alpha(uint32_t type)
+{
+	if (MDP_IS_IMGTYPE_BAD(type))
+		return 0;
+	return per_pixel_alpha[type];
+}
+
+bool ppp_multi_plane(uint32_t type)
+{
+	if (MDP_IS_IMGTYPE_BAD(type))
+		return 0;
+	return multi_plane[type];
+}
+
+uint32_t *ppp_default_pre_lut(void)
+{
+	return default_pre_lut_val;
+}
+
+uint32_t *ppp_default_post_lut(void)
+{
+	return default_post_lut_val;
+}
+
+struct ppp_csc_table *ppp_csc_rgb2yuv(void)
+{
+	return &rgb2yuv;
+}
+
+struct ppp_csc_table *ppp_csc_table2(void)
+{
+	return &default_table2;
+}
diff --git a/drivers/video/fbdev/msm/mdp3_ppp_hwio.c b/drivers/video/fbdev/msm/mdp3_ppp_hwio.c
new file mode 100644
index 0000000..6f077e2
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdp3_ppp_hwio.c
@@ -0,0 +1,1365 @@
+/* Copyright (c) 2007, 2012-2013, 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/file.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include "linux/proc_fs.h"
+
+#include "mdss_fb.h"
+#include "mdp3_ppp.h"
+#include "mdp3_hwio.h"
+#include "mdss_debug.h"
+
+/* SHIM Q Factor */
+#define PHI_Q_FACTOR          29
+#define PQF_PLUS_5            (PHI_Q_FACTOR + 5)	/* due to 32 phases */
+#define PQF_PLUS_4            (PHI_Q_FACTOR + 4)
+#define PQF_PLUS_2            (PHI_Q_FACTOR + 2)	/* to get 4.0 */
+#define PQF_MINUS_2           (PHI_Q_FACTOR - 2)	/* to get 0.25 */
+#define PQF_PLUS_5_PLUS_2     (PQF_PLUS_5 + 2)
+#define PQF_PLUS_5_MINUS_2    (PQF_PLUS_5 - 2)
+
+enum {
+	LAYER_FG = 0,
+	LAYER_BG,
+	LAYER_FB,
+	LAYER_MAX,
+};
+
+static long long mdp_do_div(long long num, long long den)
+{
+	do_div(num, den);
+	return num;
+}
+
+static int mdp_calc_scale_params(uint32_t org, uint32_t dim_in,
+	uint32_t dim_out, bool is_W, int32_t *phase_init_ptr,
+	uint32_t *phase_step_ptr)
+{
+	bool rpa_on = false;
+	int init_phase = 0;
+	uint64_t numer = 0;
+	uint64_t denom = 0;
+	int64_t point5 = 1;
+	int64_t one = 1;
+	int64_t k1, k2, k3, k4;	/* linear equation coefficients */
+	uint64_t int_mask;
+	uint64_t fract_mask;
+	uint64_t Os;
+	int64_t Osprime;
+	int64_t Od;
+	int64_t Odprime;
+	int64_t Oreq;
+	int64_t init_phase_temp;
+	int64_t delta;
+	uint32_t mult;
+
+	/*
+	 * The phase accumulator should really be rational for all cases in a
+	 * general purpose polyphase scaler for a tiled architecture with
+	 * non-zero * origin capability because there is no way to represent
+	 * certain scale factors in fixed point regardless of precision.
+	 * The error incurred in attempting to use fixed point is most
+	 * eggregious for SF where 1/SF is an integral multiple of 1/3.
+	 *
+	 * Set the RPA flag for this dimension.
+	 *
+	 * In order for 1/SF (dim_in/dim_out) to be an integral multiple of
+	 * 1/3, dim_out must be an integral multiple of 3.
+	 */
+	if (!(dim_out % 3)) {
+		mult = dim_out / 3;
+		rpa_on = (!(dim_in % mult));
+	}
+
+	numer = dim_out;
+	denom = dim_in;
+
+	/*
+	 * convert to U30.34 before division
+	 *
+	 * The K vectors carry 4 extra bits of precision
+	 * and are rounded.
+	 *
+	 * We initially go 5 bits over then round by adding
+	 * 1 and right shifting by 1
+	 * so final result is U31.33
+	 */
+	numer <<= PQF_PLUS_5;
+
+	/* now calculate the scale factor (aka k3) */
+	k3 = ((mdp_do_div(numer, denom) + 1) >> 1);
+
+	/* check scale factor for legal range [0.25 - 4.0] */
+	if (((k3 >> 4) < (1LL << PQF_MINUS_2)) ||
+	    ((k3 >> 4) > (1LL << PQF_PLUS_2))) {
+		return -EINVAL;
+	}
+
+	/* calculate inverse scale factor (aka k1) for phase init */
+	numer = dim_in;
+	denom = dim_out;
+	numer <<= PQF_PLUS_5;
+	k1 = ((mdp_do_div(numer, denom) + 1) >> 1);
+
+	/*
+	 * calculate initial phase and ROI overfetch
+	 */
+	/* convert point5 & one to S39.24 (will always be positive) */
+	point5 <<= (PQF_PLUS_4 - 1);
+	one <<= PQF_PLUS_4;
+	k2 = ((k1 - one) >> 1);
+	init_phase = (int)(k2 >> 4);
+	k4 = ((k3 - one) >> 1);
+	if (k3 != one) {
+		/* calculate the masks */
+		fract_mask = one - 1;
+		int_mask = ~fract_mask;
+
+		if (!rpa_on) {
+			/*
+			 * FIXED POINT IMPLEMENTATION
+			 */
+			if (org) {
+				/*
+				 * The complicated case; ROI origin != 0
+				 * init_phase needs to be adjusted
+				 * OF is also position dependent
+				 */
+
+				/* map (org - .5) into destination space */
+				Os = ((uint64_t) org << 1) - 1;
+				Od = ((k3 * Os) >> 1) + k4;
+
+				/* take the ceiling */
+				Odprime = (Od & int_mask);
+				if (Odprime != Od)
+					Odprime += one;
+
+				/* now map that back to source space */
+				Osprime = (k1 * (Odprime >> PQF_PLUS_4)) + k2;
+
+				/* then floor & decrement to calc the required
+				 * starting coordinate
+				 */
+				Oreq = (Osprime & int_mask) - one;
+
+				/* calculate initial phase */
+				init_phase_temp = Osprime - Oreq;
+				delta = ((int64_t) (org) << PQF_PLUS_4) - Oreq;
+				init_phase_temp -= delta;
+
+				/* limit to valid range before left shift */
+				delta = (init_phase_temp & (1LL << 63)) ?
+						4 : -4;
+				delta <<= PQF_PLUS_4;
+				while (abs((int)(init_phase_temp >>
+							PQF_PLUS_4)) > 4)
+					init_phase_temp += delta;
+
+				/*
+				 * right shift to account for extra bits of
+				 * precision
+				 */
+				init_phase = (int)(init_phase_temp >> 4);
+
+			}
+		} else {
+			/*
+			 * RPA IMPLEMENTATION
+			 *
+			 * init_phase needs to be calculated in all RPA_on
+			 * cases because it's a numerator, not a fixed
+			 * point value.
+			 */
+
+			/* map (org - .5) into destination space */
+			Os = ((uint64_t) org << PQF_PLUS_4) - point5;
+			Od = mdp_do_div((dim_out * (Os + point5)),
+					dim_in);
+			Od -= point5;
+
+			/* take the ceiling */
+			Odprime = (Od & int_mask);
+			if (Odprime != Od)
+				Odprime += one;
+
+			/* now map that back to source space */
+			Osprime =
+			    mdp_do_div((dim_in * (Odprime + point5)),
+				       dim_out);
+			Osprime -= point5;
+
+			/*
+			 * then floor & decrement to calculate the required
+			 * starting coordinate
+			 */
+			Oreq = (Osprime & int_mask) - one;
+
+			/* calculate initial phase */
+			init_phase_temp = Osprime - Oreq;
+			delta = ((int64_t) (org) << PQF_PLUS_4) - Oreq;
+			init_phase_temp -= delta;
+
+			/* limit to valid range before the left shift */
+			delta = (init_phase_temp & (1LL << 63)) ? 4 : -4;
+			delta <<= PQF_PLUS_4;
+			while (abs((int)(init_phase_temp >> PQF_PLUS_4)) > 4)
+				init_phase_temp += delta;
+
+			/*
+			 * right shift to account for extra bits of precision
+			 */
+			init_phase = (int)(init_phase_temp >> 4);
+		}
+	}
+
+	/* return the scale parameters */
+	*phase_init_ptr = init_phase;
+	*phase_step_ptr = (uint32_t) (k1 >> 4);
+
+	return 0;
+}
+
+static int scale_idx(int factor)
+{
+	int idx;
+
+	if (factor > 80)
+		idx = PPP_DOWNSCALE_PT8TOPT1;
+	else if (factor > 60)
+		idx = PPP_DOWNSCALE_PT6TOPT8;
+	else if (factor > 40)
+		idx = PPP_DOWNSCALE_PT4TOPT6;
+	else
+		idx = PPP_DOWNSCALE_PT2TOPT4;
+
+	return idx;
+}
+
+inline int32_t comp_conv_rgb2yuv(int32_t comp, int32_t y_high,
+		int32_t y_low, int32_t c_high, int32_t c_low)
+{
+	if (comp < 0)
+		comp = 0;
+	if (comp > 255)
+		comp = 255;
+
+	/* clamp */
+	if (comp < y_low)
+		comp = y_low;
+	if (comp > y_high)
+		comp = y_high;
+	return comp;
+}
+
+static uint32_t conv_rgb2yuv(uint32_t input_pixel,
+		uint16_t *matrix_vector,
+		uint16_t *bv,
+		uint16_t *clamp_vector)
+{
+	uint8_t input_C2, input_C0, input_C1;
+	uint32_t output;
+	int32_t comp_C2, comp_C1, comp_C0, temp;
+	int32_t temp1, temp2, temp3;
+	int32_t matrix[9];
+	int32_t bias_vector[3];
+	int32_t Y_low_limit, Y_high_limit, C_low_limit, C_high_limit;
+	int32_t i;
+
+	input_C2 = (input_pixel >> 16) & 0xFF;
+	input_C1 = (input_pixel >> 8) & 0xFF;
+	input_C0 = (input_pixel >> 0) & 0xFF;
+
+	comp_C0 = input_C0;
+	comp_C1 = input_C1;
+	comp_C2 = input_C2;
+
+	for (i = 0; i < MDP_CSC_SIZE; i++)
+		matrix[i] =
+		    ((int32_t) (((int32_t) matrix_vector[i]) << 20)) >> 20;
+
+	bias_vector[0] = (int32_t) (bv[0] & 0xFF);
+	bias_vector[1] = (int32_t) (bv[1] & 0xFF);
+	bias_vector[2] = (int32_t) (bv[2] & 0xFF);
+
+	Y_low_limit = (int32_t) clamp_vector[0];
+	Y_high_limit = (int32_t) clamp_vector[1];
+	C_low_limit = (int32_t) clamp_vector[2];
+	C_high_limit = (int32_t) clamp_vector[3];
+
+	/*
+	 * Color Conversion
+	 * reorder input colors
+	 */
+	temp = comp_C2;
+	comp_C2 = comp_C1;
+	comp_C1 = comp_C0;
+	comp_C0 = temp;
+
+	/* matrix multiplication */
+	temp1 = comp_C0 * matrix[0] + comp_C1 * matrix[1] +
+		comp_C2 * matrix[2];
+	temp2 = comp_C0 * matrix[3] + comp_C1 * matrix[4] +
+		comp_C2 * matrix[5];
+	temp3 = comp_C0 * matrix[6] + comp_C1 * matrix[7] +
+		comp_C2 * matrix[8];
+
+	comp_C0 = temp1 + 0x100;
+	comp_C1 = temp2 + 0x100;
+	comp_C2 = temp3 + 0x100;
+
+	/* take integer part */
+	comp_C0 >>= 9;
+	comp_C1 >>= 9;
+	comp_C2 >>= 9;
+
+	/* post bias (+) */
+	comp_C0 += bias_vector[0];
+	comp_C1 += bias_vector[1];
+	comp_C2 += bias_vector[2];
+
+	/* limit pixel to 8-bit */
+	comp_C0 = comp_conv_rgb2yuv(comp_C0, Y_high_limit,
+			Y_low_limit, C_high_limit, C_low_limit);
+	comp_C1 = comp_conv_rgb2yuv(comp_C1, Y_high_limit,
+			Y_low_limit, C_high_limit, C_low_limit);
+	comp_C2 = comp_conv_rgb2yuv(comp_C2, Y_high_limit,
+			Y_low_limit, C_high_limit, C_low_limit);
+
+	output = (comp_C2 << 16) | (comp_C1 << 8) | comp_C0;
+	return output;
+}
+
+inline void y_h_even_num(struct ppp_img_desc *img)
+{
+	img->roi.y = (img->roi.y / 2) * 2;
+	img->roi.height = (img->roi.height / 2) * 2;
+}
+
+inline void x_w_even_num(struct ppp_img_desc *img)
+{
+	img->roi.x = (img->roi.x / 2) * 2;
+	img->roi.width = (img->roi.width / 2) * 2;
+}
+
+bool check_if_rgb(int color)
+{
+	bool rgb = false;
+
+	switch (color) {
+	case MDP_RGB_565:
+	case MDP_BGR_565:
+	case MDP_RGB_888:
+	case MDP_BGR_888:
+	case MDP_BGRA_8888:
+	case MDP_RGBA_8888:
+	case MDP_ARGB_8888:
+	case MDP_XRGB_8888:
+	case MDP_RGBX_8888:
+	case MDP_BGRX_8888:
+		rgb = true;
+	default:
+		break;
+	}
+	return rgb;
+}
+
+uint8_t *mdp_adjust_rot_addr(struct ppp_blit_op *iBuf,
+	uint8_t *addr, uint32_t bpp, uint32_t uv, uint32_t layer)
+{
+	uint32_t ystride = 0;
+	uint32_t h_slice = 1;
+	uint32_t roi_width = 0;
+	uint32_t roi_height = 0;
+	uint32_t color_fmt = 0;
+
+	if (layer == LAYER_BG) {
+		ystride = iBuf->bg.prop.width * bpp;
+		roi_width =  iBuf->bg.roi.width;
+		roi_height = iBuf->bg.roi.height;
+		color_fmt = iBuf->bg.color_fmt;
+	} else {
+		ystride = iBuf->dst.prop.width * bpp;
+		roi_width =  iBuf->dst.roi.width;
+		roi_height = iBuf->dst.roi.height;
+		color_fmt = iBuf->dst.color_fmt;
+	}
+	if (uv && ((color_fmt == MDP_Y_CBCR_H2V2) ||
+		(color_fmt == MDP_Y_CRCB_H2V2)))
+		h_slice = 2;
+
+	if (((iBuf->mdp_op & MDPOP_ROT90) == MDPOP_ROT90) ^
+		((iBuf->mdp_op & MDPOP_LR) == MDPOP_LR)) {
+		addr += (roi_width - MIN(16, roi_width)) * bpp;
+	}
+	if ((iBuf->mdp_op & MDPOP_UD) == MDPOP_UD) {
+		addr += ((roi_height - MIN(16, roi_height))/h_slice) *
+			ystride;
+	}
+
+	return addr;
+}
+
+void mdp_adjust_start_addr(struct ppp_blit_op *blit_op,
+	struct ppp_img_desc *img, int v_slice,
+	int h_slice, uint32_t layer)
+{
+	uint32_t bpp = ppp_bpp(img->color_fmt);
+	int x = img->roi.x;
+	int y = img->roi.y;
+	uint32_t width = img->prop.width;
+
+	if (img->color_fmt == MDP_Y_CBCR_H2V2_ADRENO && layer == 0)
+		img->p0 += (x + y * ALIGN(width, 32)) * bpp;
+	else if (img->color_fmt == MDP_Y_CBCR_H2V2_VENUS && layer == 0)
+		img->p0 += (x + y * ALIGN(width, 128)) * bpp;
+	else
+		img->p0 += (x + y * width) * bpp;
+	if (layer != LAYER_FG)
+		img->p0 = mdp_adjust_rot_addr(blit_op, img->p0, bpp, 0, layer);
+
+	if (img->p1) {
+		/*
+		 * MDP_Y_CBCR_H2V2/MDP_Y_CRCB_H2V2 cosite for now
+		 * we need to shift x direction same as y dir for offsite
+		 */
+		if ((img->color_fmt == MDP_Y_CBCR_H2V2_ADRENO ||
+				img->color_fmt == MDP_Y_CBCR_H2V2_VENUS)
+							&& layer == 0)
+			img->p1 += ((x / h_slice) * h_slice + ((y == 0) ? 0 :
+			(((y + 1) / v_slice - 1) * (ALIGN(width/2, 32) * 2))))
+									* bpp;
+		else
+			img->p1 += ((x / h_slice) * h_slice +
+			((y == 0) ? 0 : ((y + 1) / v_slice - 1) * width)) * bpp;
+
+		if (layer != LAYER_FG)
+			img->p1 = mdp_adjust_rot_addr(blit_op,
+					img->p1, bpp, 0, layer);
+	}
+}
+
+int load_ppp_lut(int tableType, uint32_t *lut)
+{
+	int i;
+	uint32_t base_addr;
+
+	base_addr = tableType ? MDP3_PPP_POST_LUT : MDP3_PPP_PRE_LUT;
+	for (i = 0; i < PPP_LUT_MAX; i++)
+		PPP_WRITEL(lut[i], base_addr + MDP3_PPP_LUTn(i));
+
+	return 0;
+}
+
+/* Configure Primary CSC Matrix */
+int load_primary_matrix(struct ppp_csc_table *csc)
+{
+	int i;
+
+	for (i = 0; i < MDP_CSC_SIZE; i++)
+		PPP_WRITEL(csc->fwd_matrix[i], MDP3_PPP_CSC_PFMVn(i));
+
+	for (i = 0; i < MDP_CSC_SIZE; i++)
+		PPP_WRITEL(csc->rev_matrix[i], MDP3_PPP_CSC_PRMVn(i));
+
+	for (i = 0; i < MDP_BV_SIZE; i++)
+		PPP_WRITEL(csc->bv[i], MDP3_PPP_CSC_PBVn(i));
+
+	for (i = 0; i < MDP_LV_SIZE; i++)
+		PPP_WRITEL(csc->lv[i], MDP3_PPP_CSC_PLVn(i));
+
+	return 0;
+}
+
+/* Load Secondary CSC Matrix */
+int load_secondary_matrix(struct ppp_csc_table *csc)
+{
+	int i;
+
+	for (i = 0; i < MDP_CSC_SIZE; i++)
+		PPP_WRITEL(csc->fwd_matrix[i], MDP3_PPP_CSC_SFMVn(i));
+
+	for (i = 0; i < MDP_CSC_SIZE; i++)
+		PPP_WRITEL(csc->rev_matrix[i], MDP3_PPP_CSC_SRMVn(i));
+
+	for (i = 0; i < MDP_BV_SIZE; i++)
+		PPP_WRITEL(csc->bv[i], MDP3_PPP_CSC_SBVn(i));
+
+	for (i = 0; i < MDP_LV_SIZE; i++)
+		PPP_WRITEL(csc->lv[i], MDP3_PPP_CSC_SLVn(i));
+	return 0;
+}
+
+int load_csc_matrix(int matrix_type, struct ppp_csc_table *csc)
+{
+	if (matrix_type == CSC_PRIMARY_MATRIX)
+		return load_primary_matrix(csc);
+
+	return load_secondary_matrix(csc);
+}
+
+int config_ppp_src(struct ppp_img_desc *src, uint32_t yuv2rgb)
+{
+	uint32_t val;
+
+	val = ((src->roi.height & MDP3_PPP_XY_MASK) << MDP3_PPP_XY_OFFSET) |
+		   (src->roi.width & MDP3_PPP_XY_MASK);
+	PPP_WRITEL(val, MDP3_PPP_SRC_SIZE);
+
+	PPP_WRITEL(src->p0, MDP3_PPP_SRCP0_ADDR);
+	PPP_WRITEL(src->p1, MDP3_PPP_SRCP1_ADDR);
+	PPP_WRITEL(src->p3, MDP3_PPP_SRCP3_ADDR);
+
+	val = (src->stride0 & MDP3_PPP_STRIDE_MASK) |
+			((src->stride1 & MDP3_PPP_STRIDE_MASK) <<
+			MDP3_PPP_STRIDE1_OFFSET);
+	PPP_WRITEL(val, MDP3_PPP_SRC_YSTRIDE1_ADDR);
+	val = ((src->stride2 & MDP3_PPP_STRIDE_MASK) <<
+			MDP3_PPP_STRIDE1_OFFSET);
+	PPP_WRITEL(val, MDP3_PPP_SRC_YSTRIDE2_ADDR);
+
+	val = ppp_src_config(src->color_fmt);
+	val |= (src->roi.x % 2) ? PPP_SRC_BPP_ROI_ODD_X : 0;
+	val |= (src->roi.y % 2) ? PPP_SRC_BPP_ROI_ODD_Y : 0;
+	PPP_WRITEL(val, MDP3_PPP_SRC_FORMAT);
+	PPP_WRITEL(ppp_pack_pattern(src->color_fmt, yuv2rgb),
+		MDP3_PPP_SRC_UNPACK_PATTERN1);
+	return 0;
+}
+
+int config_ppp_out(struct ppp_img_desc *dst, uint32_t yuv2rgb)
+{
+	uint32_t val;
+	bool pseudoplanr_output = false;
+
+	switch (dst->color_fmt) {
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CRCB_H2V2:
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V1:
+		pseudoplanr_output = true;
+		break;
+	default:
+		break;
+	}
+	val = ppp_out_config(dst->color_fmt);
+	if (pseudoplanr_output)
+		val |= PPP_DST_PLANE_PSEUDOPLN;
+	PPP_WRITEL(val, MDP3_PPP_OUT_FORMAT);
+	PPP_WRITEL(ppp_pack_pattern(dst->color_fmt, yuv2rgb),
+		MDP3_PPP_OUT_PACK_PATTERN1);
+
+	val = ((dst->roi.height & MDP3_PPP_XY_MASK) << MDP3_PPP_XY_OFFSET) |
+		   (dst->roi.width & MDP3_PPP_XY_MASK);
+	PPP_WRITEL(val, MDP3_PPP_OUT_SIZE);
+
+	PPP_WRITEL(dst->p0, MDP3_PPP_OUTP0_ADDR);
+	PPP_WRITEL(dst->p1, MDP3_PPP_OUTP1_ADDR);
+	PPP_WRITEL(dst->p3, MDP3_PPP_OUTP3_ADDR);
+
+	val = (dst->stride0 & MDP3_PPP_STRIDE_MASK) |
+			((dst->stride1 & MDP3_PPP_STRIDE_MASK) <<
+			MDP3_PPP_STRIDE1_OFFSET);
+	PPP_WRITEL(val, MDP3_PPP_OUT_YSTRIDE1_ADDR);
+	val = ((dst->stride2 & MDP3_PPP_STRIDE_MASK) <<
+			MDP3_PPP_STRIDE1_OFFSET);
+	PPP_WRITEL(val, MDP3_PPP_OUT_YSTRIDE2_ADDR);
+	return 0;
+}
+
+int config_ppp_background(struct ppp_img_desc *bg, uint32_t yuv2rgb)
+{
+	uint32_t val;
+
+	PPP_WRITEL(bg->p0, MDP3_PPP_BGP0_ADDR);
+	PPP_WRITEL(bg->p1, MDP3_PPP_BGP1_ADDR);
+	PPP_WRITEL(bg->p3, MDP3_PPP_BGP3_ADDR);
+
+	val = (bg->stride0 & MDP3_PPP_STRIDE_MASK) |
+			((bg->stride1 & MDP3_PPP_STRIDE_MASK) <<
+			MDP3_PPP_STRIDE1_OFFSET);
+	PPP_WRITEL(val, MDP3_PPP_BG_YSTRIDE1_ADDR);
+	val = ((bg->stride2 & MDP3_PPP_STRIDE_MASK) <<
+			MDP3_PPP_STRIDE1_OFFSET);
+	PPP_WRITEL(val, MDP3_PPP_BG_YSTRIDE2_ADDR);
+
+	PPP_WRITEL(ppp_src_config(bg->color_fmt),
+		MDP3_PPP_BG_FORMAT);
+	PPP_WRITEL(ppp_pack_pattern(bg->color_fmt, yuv2rgb),
+		MDP3_PPP_BG_UNPACK_PATTERN1);
+	return 0;
+}
+
+void ppp_edge_rep_luma_pixel(struct ppp_blit_op *blit_op,
+	struct ppp_edge_rep *er)
+{
+	if (blit_op->mdp_op & MDPOP_ASCALE) {
+
+		er->is_scale_enabled = 1;
+
+		if (blit_op->mdp_op & MDPOP_ROT90) {
+			er->dst_roi_width = blit_op->dst.roi.height;
+			er->dst_roi_height = blit_op->dst.roi.width;
+		} else {
+			er->dst_roi_width = blit_op->dst.roi.width;
+			er->dst_roi_height = blit_op->dst.roi.height;
+		}
+
+		/*
+		 * Find out the luma pixels needed for scaling in the
+		 * x direction (LEFT and RIGHT).  Locations of pixels are
+		 * relative to the ROI. Upper-left corner of ROI corresponds
+		 * to coordinates (0,0). Also set the number of luma pixel
+		 * to repeat.
+		 */
+		if (blit_op->src.roi.width > 3 * er->dst_roi_width) {
+			/* scale factor < 1/3 */
+			er->luma_interp_point_right =
+				(blit_op->src.roi.width - 1);
+		} else if (blit_op->src.roi.width == 3 * er->dst_roi_width) {
+			/* scale factor == 1/3 */
+			er->luma_interp_point_right =
+				(blit_op->src.roi.width - 1) + 1;
+			er->luma_repeat_right = 1;
+		} else if ((blit_op->src.roi.width > er->dst_roi_width) &&
+			   (blit_op->src.roi.width < 3 * er->dst_roi_width)) {
+			/* 1/3 < scale factor < 1 */
+			er->luma_interp_point_left = -1;
+			er->luma_interp_point_right =
+				(blit_op->src.roi.width - 1) + 1;
+			er->luma_repeat_left = 1;
+			er->luma_repeat_right = 1;
+		} else if (blit_op->src.roi.width == er->dst_roi_width) {
+			/* scale factor == 1 */
+			er->luma_interp_point_left = -1;
+			er->luma_interp_point_right =
+				(blit_op->src.roi.width - 1) + 2;
+			er->luma_repeat_left = 1;
+			er->luma_repeat_right = 2;
+		} else {
+			  /* scale factor > 1 */
+			er->luma_interp_point_left = -2;
+			er->luma_interp_point_right =
+				(blit_op->src.roi.width - 1) + 2;
+			er->luma_repeat_left = 2;
+			er->luma_repeat_right = 2;
+		}
+
+		/*
+		 * Find out the number of pixels needed for scaling in the
+		 * y direction (TOP and BOTTOM).  Locations of pixels are
+		 * relative to the ROI. Upper-left corner of ROI corresponds
+		 * to coordinates (0,0). Also set the number of luma pixel
+		 * to repeat.
+		 */
+		if (blit_op->src.roi.height > 3 * er->dst_roi_height) {
+			er->luma_interp_point_bottom =
+				(blit_op->src.roi.height - 1);
+		} else if (blit_op->src.roi.height == 3 * er->dst_roi_height) {
+			er->luma_interp_point_bottom =
+				(blit_op->src.roi.height - 1) + 1;
+			er->luma_repeat_bottom = 1;
+		} else if ((blit_op->src.roi.height > er->dst_roi_height) &&
+			   (blit_op->src.roi.height < 3 * er->dst_roi_height)) {
+			er->luma_interp_point_top = -1;
+			er->luma_interp_point_bottom =
+				(blit_op->src.roi.height - 1) + 1;
+			er->luma_repeat_top = 1;
+			er->luma_repeat_bottom = 1;
+		} else if (blit_op->src.roi.height == er->dst_roi_height) {
+			er->luma_interp_point_top = -1;
+			er->luma_interp_point_bottom =
+				(blit_op->src.roi.height - 1) + 2;
+			er->luma_repeat_top = 1;
+			er->luma_repeat_bottom = 2;
+		} else {
+			er->luma_interp_point_top = -2;
+			er->luma_interp_point_bottom =
+				(blit_op->src.roi.height - 1) + 2;
+			er->luma_repeat_top = 2;
+			er->luma_repeat_bottom = 2;
+		}
+	} else {
+		/*
+		 * Since no scaling needed, Tile Fetch does not require any
+		 * more luma pixel than what the ROI contains.
+		 */
+		er->luma_interp_point_right =
+			(int32_t) (blit_op->src.roi.width - 1);
+		er->luma_interp_point_bottom =
+			(int32_t) (blit_op->src.roi.height - 1);
+	}
+	/* After adding the ROI offsets, we have locations of
+	 * luma_interp_points relative to the image.
+	 */
+	er->luma_interp_point_left += (int32_t) (blit_op->src.roi.x);
+	er->luma_interp_point_right += (int32_t) (blit_op->src.roi.x);
+	er->luma_interp_point_top += (int32_t) (blit_op->src.roi.y);
+	er->luma_interp_point_bottom += (int32_t) (blit_op->src.roi.y);
+}
+
+void ppp_edge_rep_chroma_pixel(struct ppp_blit_op *blit_op,
+	struct ppp_edge_rep *er)
+{
+	bool chroma_edge_enable = true;
+	uint32_t is_yuv_offsite_vertical = 0;
+
+	/* find out which chroma pixels are needed for chroma upsampling. */
+	switch (blit_op->src.color_fmt) {
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V1:
+	case MDP_YCRYCB_H2V1:
+		er->chroma_interp_point_left = er->luma_interp_point_left >> 1;
+		er->chroma_interp_point_right =
+			(er->luma_interp_point_right + 1) >> 1;
+		er->chroma_interp_point_top = er->luma_interp_point_top;
+		er->chroma_interp_point_bottom = er->luma_interp_point_bottom;
+		break;
+
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CBCR_H2V2_ADRENO:
+	case MDP_Y_CBCR_H2V2_VENUS:
+	case MDP_Y_CRCB_H2V2:
+		er->chroma_interp_point_left = er->luma_interp_point_left >> 1;
+		er->chroma_interp_point_right =
+			(er->luma_interp_point_right + 1) >> 1;
+		er->chroma_interp_point_top =
+			(er->luma_interp_point_top - 1) >> 1;
+		er->chroma_interp_point_bottom =
+		    (er->luma_interp_point_bottom + 1) >> 1;
+		is_yuv_offsite_vertical = 1;
+		break;
+
+	default:
+		chroma_edge_enable = false;
+		er->chroma_interp_point_left = er->luma_interp_point_left;
+		er->chroma_interp_point_right = er->luma_interp_point_right;
+		er->chroma_interp_point_top = er->luma_interp_point_top;
+		er->chroma_interp_point_bottom = er->luma_interp_point_bottom;
+
+		break;
+	}
+
+	if (chroma_edge_enable) {
+		/* Defines which chroma pixels belongs to the roi */
+		switch (blit_op->src.color_fmt) {
+		case MDP_Y_CBCR_H2V1:
+		case MDP_Y_CRCB_H2V1:
+		case MDP_YCRYCB_H2V1:
+			er->chroma_bound_left = blit_op->src.roi.x / 2;
+			/* there are half as many chroma pixel as luma pixels */
+			er->chroma_bound_right =
+			    (blit_op->src.roi.width +
+				blit_op->src.roi.x - 1) / 2;
+			er->chroma_bound_top = blit_op->src.roi.y;
+			er->chroma_bound_bottom =
+			    (blit_op->src.roi.height + blit_op->src.roi.y - 1);
+			break;
+		case MDP_Y_CBCR_H2V2:
+		case MDP_Y_CBCR_H2V2_ADRENO:
+		case MDP_Y_CBCR_H2V2_VENUS:
+		case MDP_Y_CRCB_H2V2:
+			/*
+			 * cosite in horizontal dir, and offsite in vertical dir
+			 * width of chroma ROI is 1/2 of size of luma ROI
+			 * height of chroma ROI is 1/2 of size of luma ROI
+			 */
+			er->chroma_bound_left = blit_op->src.roi.x / 2;
+			er->chroma_bound_right =
+			    (blit_op->src.roi.width +
+				blit_op->src.roi.x - 1) / 2;
+			er->chroma_bound_top = blit_op->src.roi.y / 2;
+			er->chroma_bound_bottom =
+			    (blit_op->src.roi.height +
+				blit_op->src.roi.y - 1) / 2;
+			break;
+
+		default:
+			/*
+			 * If no valid chroma sub-sampling format specified,
+			 * assume 4:4:4 ( i.e. fully sampled).
+			 */
+			er->chroma_bound_left = blit_op->src.roi.x;
+			er->chroma_bound_right = blit_op->src.roi.width +
+				blit_op->src.roi.x - 1;
+			er->chroma_bound_top = blit_op->src.roi.y;
+			er->chroma_bound_bottom =
+			    (blit_op->src.roi.height + blit_op->src.roi.y - 1);
+			break;
+		}
+
+		/*
+		 * Knowing which chroma pixels are needed, and which chroma
+		 * pixels belong to the ROI (i.e. available for fetching ),
+		 * calculate how many chroma pixels Tile Fetch needs to
+		 * duplicate.  If any required chroma pixels falls outside
+		 * of the ROI, Tile Fetch must obtain them by replicating
+		 * pixels.
+		 */
+		if (er->chroma_bound_left > er->chroma_interp_point_left)
+			er->chroma_repeat_left =
+			    er->chroma_bound_left -
+				er->chroma_interp_point_left;
+		else
+			er->chroma_repeat_left = 0;
+
+		if (er->chroma_interp_point_right > er->chroma_bound_right)
+			er->chroma_repeat_right =
+			    er->chroma_interp_point_right -
+				er->chroma_bound_right;
+		else
+			er->chroma_repeat_right = 0;
+
+		if (er->chroma_bound_top > er->chroma_interp_point_top)
+			er->chroma_repeat_top =
+			    er->chroma_bound_top -
+				er->chroma_interp_point_top;
+		else
+			er->chroma_repeat_top = 0;
+
+		if (er->chroma_interp_point_bottom > er->chroma_bound_bottom)
+			er->chroma_repeat_bottom =
+			    er->chroma_interp_point_bottom -
+				er->chroma_bound_bottom;
+		else
+			er->chroma_repeat_bottom = 0;
+
+		if (er->is_scale_enabled && (blit_op->src.roi.height == 1)
+		    && is_yuv_offsite_vertical) {
+			er->chroma_repeat_bottom = 3;
+			er->chroma_repeat_top = 0;
+		}
+	}
+}
+
+int config_ppp_edge_rep(struct ppp_blit_op *blit_op)
+{
+	uint32_t reg = 0;
+	struct ppp_edge_rep er;
+
+	memset(&er, 0, sizeof(er));
+
+	ppp_edge_rep_luma_pixel(blit_op, &er);
+
+	/*
+	 * After adding the ROI offsets, we have locations of
+	 * chroma_interp_points relative to the image.
+	 */
+	er.chroma_interp_point_left = er.luma_interp_point_left;
+	er.chroma_interp_point_right = er.luma_interp_point_right;
+	er.chroma_interp_point_top = er.luma_interp_point_top;
+	er.chroma_interp_point_bottom = er.luma_interp_point_bottom;
+
+	ppp_edge_rep_chroma_pixel(blit_op, &er);
+	/* ensure repeats are >=0 and no larger than 3 pixels */
+	if ((er.chroma_repeat_left < 0) || (er.chroma_repeat_right < 0) ||
+	    (er.chroma_repeat_top < 0) || (er.chroma_repeat_bottom < 0))
+		return -EINVAL;
+	if ((er.chroma_repeat_left > 3) || (er.chroma_repeat_right > 3) ||
+	    (er.chroma_repeat_top > 3) || (er.chroma_repeat_bottom > 3))
+		return -EINVAL;
+	if ((er.luma_repeat_left < 0) || (er.luma_repeat_right < 0) ||
+	    (er.luma_repeat_top < 0) || (er.luma_repeat_bottom < 0))
+		return -EINVAL;
+	if ((er.luma_repeat_left > 3) || (er.luma_repeat_right > 3) ||
+	    (er.luma_repeat_top > 3) || (er.luma_repeat_bottom > 3))
+		return -EINVAL;
+
+	reg |= (er.chroma_repeat_left & 3) << MDP_LEFT_CHROMA;
+	reg |= (er.chroma_repeat_right & 3) << MDP_RIGHT_CHROMA;
+	reg |= (er.chroma_repeat_top & 3) << MDP_TOP_CHROMA;
+	reg |= (er.chroma_repeat_bottom & 3) << MDP_BOTTOM_CHROMA;
+	reg |= (er.luma_repeat_left & 3) << MDP_LEFT_LUMA;
+	reg |= (er.luma_repeat_right & 3) << MDP_RIGHT_LUMA;
+	reg |= (er.luma_repeat_top & 3) << MDP_TOP_LUMA;
+	reg |= (er.luma_repeat_bottom & 3) << MDP_BOTTOM_LUMA;
+	PPP_WRITEL(reg, MDP3_PPP_SRC_EDGE_REP);
+	return 0;
+}
+
+int config_ppp_bg_edge_rep(struct ppp_blit_op *blit_op)
+{
+	uint32_t reg = 0;
+
+	switch (blit_op->dst.color_fmt) {
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CRCB_H2V2:
+		if (blit_op->dst.roi.y == 0)
+			reg |= BIT(MDP_TOP_CHROMA);
+
+		if ((blit_op->dst.roi.y + blit_op->dst.roi.height) ==
+		    blit_op->dst.prop.height) {
+			reg |= BIT(MDP_BOTTOM_CHROMA);
+		}
+
+		if (((blit_op->dst.roi.x + blit_op->dst.roi.width) ==
+				blit_op->dst.prop.width) &&
+				((blit_op->dst.roi.width % 2) == 0))
+			reg |= BIT(MDP_RIGHT_CHROMA);
+		break;
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V1:
+	case MDP_YCRYCB_H2V1:
+		if (((blit_op->dst.roi.x + blit_op->dst.roi.width) ==
+				blit_op->dst.prop.width) &&
+				((blit_op->dst.roi.width % 2) == 0))
+			reg |= BIT(MDP_RIGHT_CHROMA);
+		break;
+	default:
+		break;
+	}
+	PPP_WRITEL(reg, MDP3_PPP_BG_EDGE_REP);
+	return 0;
+}
+
+int config_ppp_lut(uint32_t *pppop_reg_ptr, int lut_c0_en,
+	int lut_c1_en, int lut_c2_en)
+{
+	if (lut_c0_en)
+		*pppop_reg_ptr |= MDP_LUT_C0_EN;
+	if (lut_c1_en)
+		*pppop_reg_ptr |= MDP_LUT_C1_EN;
+	if (lut_c2_en)
+		*pppop_reg_ptr |= MDP_LUT_C2_EN;
+	return 0;
+}
+
+int config_ppp_scale(struct ppp_blit_op *blit_op, uint32_t *pppop_reg_ptr)
+{
+	struct ppp_img_desc *src = &blit_op->src;
+	struct ppp_img_desc *dst = &blit_op->dst;
+	uint32_t dstW, dstH;
+	uint32_t x_fac, y_fac;
+	uint32_t mdp_blur = 0;
+	uint32_t phase_init_x, phase_init_y, phase_step_x, phase_step_y;
+	int x_idx, y_idx;
+
+	if (blit_op->mdp_op & MDPOP_ASCALE) {
+		if (blit_op->mdp_op & MDPOP_ROT90) {
+			dstW = dst->roi.height;
+			dstH = dst->roi.width;
+		} else {
+			dstW = dst->roi.width;
+			dstH = dst->roi.height;
+		}
+		*pppop_reg_ptr |=
+			(PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON);
+
+		mdp_blur = blit_op->mdp_op & MDPOP_BLUR;
+
+		if ((dstW != src->roi.width) ||
+		    (dstH != src->roi.height) || mdp_blur) {
+
+			/*
+			 * Use source origin as 0 for computing initial
+			 * phase and step size. Incorrect initial phase and
+			 * step size value results in green line issue.
+			 */
+			mdp_calc_scale_params(0,
+					blit_op->src.roi.width,
+					dstW, 1, &phase_init_x,
+					&phase_step_x);
+			mdp_calc_scale_params(0,
+					blit_op->src.roi.height,
+					dstH, 0, &phase_init_y,
+					&phase_step_y);
+
+			PPP_WRITEL(phase_init_x, MDP3_PPP_SCALE_PHASEX_INIT);
+			PPP_WRITEL(phase_init_y, MDP3_PPP_SCALE_PHASEY_INIT);
+			PPP_WRITEL(phase_step_x, MDP3_PPP_SCALE_PHASEX_STEP);
+			PPP_WRITEL(phase_step_y, MDP3_PPP_SCALE_PHASEY_STEP);
+
+
+			if (dstW > src->roi.width || dstH > src->roi.height)
+				ppp_load_up_lut();
+
+			if (mdp_blur)
+				ppp_load_gaussian_lut();
+
+			if (dstW <= src->roi.width) {
+				x_fac = (dstW * 100) / src->roi.width;
+				x_idx = scale_idx(x_fac);
+				ppp_load_x_scale_table(x_idx);
+			}
+			if (dstH <= src->roi.height) {
+				y_fac = (dstH * 100) / src->roi.height;
+				y_idx = scale_idx(y_fac);
+				ppp_load_y_scale_table(y_idx);
+			}
+
+		} else {
+			blit_op->mdp_op &= ~(MDPOP_ASCALE);
+		}
+	}
+	config_ppp_edge_rep(blit_op);
+	config_ppp_bg_edge_rep(blit_op);
+	return 0;
+}
+
+int config_ppp_csc(int src_color, int dst_color, uint32_t *pppop_reg_ptr)
+{
+	bool inputRGB, outputRGB;
+
+	inputRGB = check_if_rgb(src_color);
+	outputRGB = check_if_rgb(dst_color);
+
+	if ((!inputRGB) && (outputRGB))
+		*pppop_reg_ptr |= PPP_OP_CONVERT_YCBCR2RGB |
+			PPP_OP_CONVERT_ON;
+	if ((inputRGB) && (!outputRGB))
+		*pppop_reg_ptr |= PPP_OP_CONVERT_ON;
+
+	return 0;
+}
+
+int config_ppp_blend(struct ppp_blit_op *blit_op,
+			uint32_t *pppop_reg_ptr,
+			bool is_yuv_smart_blit, int smart_blit_bg_alpha)
+{
+	struct ppp_csc_table *csc;
+	uint32_t alpha, trans_color;
+	uint32_t val = 0;
+	int c_fmt = blit_op->src.color_fmt;
+	int bg_alpha;
+
+	csc = ppp_csc_rgb2yuv();
+	alpha = blit_op->blend.const_alpha;
+	trans_color = blit_op->blend.trans_color;
+	if (blit_op->mdp_op & MDPOP_FG_PM_ALPHA) {
+		if (ppp_per_p_alpha(c_fmt)) {
+			*pppop_reg_ptr |= PPP_OP_ROT_ON |
+					  PPP_OP_BLEND_ON |
+					  PPP_OP_BLEND_CONSTANT_ALPHA;
+		} else {
+			if ((blit_op->mdp_op & MDPOP_ALPHAB)
+				&& (blit_op->blend.const_alpha == 0xff)) {
+				blit_op->mdp_op &= ~(MDPOP_ALPHAB);
+			}
+
+			if ((blit_op->mdp_op & MDPOP_ALPHAB)
+			   || (blit_op->mdp_op & MDPOP_TRANSP)) {
+
+				*pppop_reg_ptr |= PPP_OP_ROT_ON |
+					PPP_OP_BLEND_ON |
+					PPP_OP_BLEND_CONSTANT_ALPHA |
+					PPP_OP_BLEND_ALPHA_BLEND_NORMAL;
+			}
+		}
+
+		bg_alpha = PPP_BLEND_BG_USE_ALPHA_SEL |
+			PPP_BLEND_BG_ALPHA_REVERSE;
+
+		if ((ppp_per_p_alpha(c_fmt)) && !(blit_op->mdp_op &
+						MDPOP_LAYER_IS_FG)) {
+			bg_alpha |= PPP_BLEND_BG_SRCPIXEL_ALPHA;
+		} else {
+			bg_alpha |= PPP_BLEND_BG_CONSTANT_ALPHA;
+			bg_alpha |= blit_op->blend.const_alpha << 24;
+		}
+		PPP_WRITEL(bg_alpha, MDP3_PPP_BLEND_BG_ALPHA_SEL);
+
+		if (blit_op->mdp_op & MDPOP_TRANSP)
+			*pppop_reg_ptr |= PPP_BLEND_CALPHA_TRNASP;
+	} else if (ppp_per_p_alpha(c_fmt)) {
+		if (blit_op->mdp_op & MDPOP_LAYER_IS_FG)
+			*pppop_reg_ptr |= PPP_OP_ROT_ON |
+				  PPP_OP_BLEND_ON |
+				  PPP_OP_BLEND_CONSTANT_ALPHA;
+		else
+			*pppop_reg_ptr |= PPP_OP_ROT_ON |
+				  PPP_OP_BLEND_ON |
+				  PPP_OP_BLEND_SRCPIXEL_ALPHA;
+		PPP_WRITEL(0, MDP3_PPP_BLEND_BG_ALPHA_SEL);
+	} else {
+		if ((blit_op->mdp_op & MDPOP_ALPHAB)
+				&& (blit_op->blend.const_alpha == 0xff)) {
+			blit_op->mdp_op &=
+				~(MDPOP_ALPHAB);
+		}
+
+		if ((blit_op->mdp_op & MDPOP_ALPHAB)
+		   || (blit_op->mdp_op & MDPOP_TRANSP)) {
+			*pppop_reg_ptr |= PPP_OP_ROT_ON |
+				PPP_OP_BLEND_ON |
+				PPP_OP_BLEND_CONSTANT_ALPHA |
+				PPP_OP_BLEND_ALPHA_BLEND_NORMAL;
+		}
+
+		if (blit_op->mdp_op & MDPOP_TRANSP)
+			*pppop_reg_ptr |=
+				PPP_BLEND_CALPHA_TRNASP;
+		if (is_yuv_smart_blit) {
+			*pppop_reg_ptr |= PPP_OP_ROT_ON |
+				PPP_OP_BLEND_ON |
+				PPP_OP_BLEND_BG_ALPHA |
+				PPP_OP_BLEND_EQ_REVERSE;
+
+			if (smart_blit_bg_alpha < 0xFF)
+				bg_alpha = PPP_BLEND_BG_USE_ALPHA_SEL |
+					PPP_BLEND_BG_DSTPIXEL_ALPHA;
+			else
+				bg_alpha = PPP_BLEND_BG_USE_ALPHA_SEL |
+					PPP_BLEND_BG_DSTPIXEL_ALPHA |
+					PPP_BLEND_BG_CONSTANT_ALPHA;
+
+			bg_alpha |= smart_blit_bg_alpha << 24;
+			PPP_WRITEL(bg_alpha, MDP3_PPP_BLEND_BG_ALPHA_SEL);
+		} else {
+		PPP_WRITEL(0, MDP3_PPP_BLEND_BG_ALPHA_SEL);
+	}
+	}
+
+	if (*pppop_reg_ptr & PPP_OP_BLEND_ON) {
+		if (is_yuv_smart_blit)
+			config_ppp_background(&blit_op->bg, 1);
+		else
+			config_ppp_background(&blit_op->bg, 0);
+
+		if (blit_op->dst.color_fmt == MDP_YCRYCB_H2V1) {
+			*pppop_reg_ptr |= PPP_OP_BG_CHROMA_H2V1;
+			if (blit_op->mdp_op & MDPOP_TRANSP) {
+				trans_color = conv_rgb2yuv(trans_color,
+					&csc->fwd_matrix[0],
+					&csc->bv[0],
+					&csc->lv[0]);
+			}
+		}
+	}
+	if (is_yuv_smart_blit) {
+		PPP_WRITEL(0, MDP3_PPP_BLEND_PARAM);
+	} else {
+	val = (alpha << MDP_BLEND_CONST_ALPHA);
+	val |= (trans_color & MDP_BLEND_TRASP_COL_MASK);
+	PPP_WRITEL(val, MDP3_PPP_BLEND_PARAM);
+	}
+	return 0;
+}
+
+int config_ppp_rotation(uint32_t mdp_op, uint32_t *pppop_reg_ptr)
+{
+	*pppop_reg_ptr |= PPP_OP_ROT_ON;
+
+	if (mdp_op & MDPOP_ROT90)
+		*pppop_reg_ptr |= PPP_OP_ROT_90;
+	if (mdp_op & MDPOP_LR)
+		*pppop_reg_ptr |= PPP_OP_FLIP_LR;
+	if (mdp_op & MDPOP_UD)
+		*pppop_reg_ptr |= PPP_OP_FLIP_UD;
+
+	return 0;
+}
+
+int config_ppp_op_mode(struct ppp_blit_op *blit_op)
+{
+	uint32_t yuv2rgb;
+	uint32_t ppp_operation_reg = 0;
+	int sv_slice, sh_slice;
+	int dv_slice, dh_slice;
+	static struct ppp_img_desc bg_img_param;
+	static int bg_alpha;
+	static int bg_mdp_ops;
+	bool is_yuv_smart_blit = false;
+
+	/*
+	 * Detect YUV smart blit,
+	 * Check cached BG image plane 0 address is not NILL and
+	 * source color format is YUV than it is YUV smart blit
+	 * mark is_yuv_smart_blit true.
+	 */
+	if ((bg_img_param.p0) &&
+		(!(check_if_rgb(blit_op->src.color_fmt))))
+		is_yuv_smart_blit = true;
+
+	sv_slice = sh_slice = dv_slice = dh_slice = 1;
+
+	ppp_operation_reg |= ppp_dst_op_reg(blit_op->dst.color_fmt);
+	switch (blit_op->dst.color_fmt) {
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CRCB_H2V2:
+		y_h_even_num(&blit_op->dst);
+		y_h_even_num(&blit_op->src);
+		dv_slice = 2;
+		/* fall-through */
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V1:
+	case MDP_YCRYCB_H2V1:
+		x_w_even_num(&blit_op->dst);
+		x_w_even_num(&blit_op->src);
+		dh_slice = 2;
+		break;
+	default:
+		break;
+	}
+
+	ppp_operation_reg |= ppp_src_op_reg(blit_op->src.color_fmt);
+	switch (blit_op->src.color_fmt) {
+	case MDP_Y_CBCR_H2V2:
+	case MDP_Y_CBCR_H2V2_ADRENO:
+	case MDP_Y_CBCR_H2V2_VENUS:
+	case MDP_Y_CRCB_H2V2:
+		sh_slice = sv_slice = 2;
+		break;
+	case MDP_YCRYCB_H2V1:
+		x_w_even_num(&blit_op->dst);
+		x_w_even_num(&blit_op->src);
+		/* fall-through */
+	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H2V1:
+		sh_slice = 2;
+		break;
+	default:
+		break;
+	}
+
+	config_ppp_csc(blit_op->src.color_fmt,
+		blit_op->dst.color_fmt, &ppp_operation_reg);
+	yuv2rgb = ppp_operation_reg & PPP_OP_CONVERT_YCBCR2RGB;
+
+	if (blit_op->mdp_op & MDPOP_DITHER)
+		ppp_operation_reg |= PPP_OP_DITHER_EN;
+
+	if (blit_op->mdp_op & MDPOP_ROTATION)
+		config_ppp_rotation(blit_op->mdp_op, &ppp_operation_reg);
+
+	if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_ADRENO) {
+		blit_op->src.stride0 = ALIGN(blit_op->src.prop.width, 32) *
+			ppp_bpp(blit_op->src.color_fmt);
+		blit_op->src.stride1 = 2 * ALIGN(blit_op->src.prop.width/2, 32);
+	} else if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_VENUS) {
+		blit_op->src.stride0 = ALIGN(blit_op->src.prop.width, 128)  *
+			ppp_bpp(blit_op->src.color_fmt);
+		blit_op->src.stride1 = blit_op->src.stride0;
+	} else {
+		blit_op->src.stride0 = blit_op->src.prop.width *
+			ppp_bpp(blit_op->src.color_fmt);
+		blit_op->src.stride1 = blit_op->src.stride0;
+	}
+
+	blit_op->dst.stride0 = blit_op->dst.prop.width *
+		ppp_bpp(blit_op->dst.color_fmt);
+
+	if (ppp_multi_plane(blit_op->dst.color_fmt)) {
+		blit_op->dst.p1 = blit_op->dst.p0;
+		blit_op->dst.p1 += blit_op->dst.prop.width *
+			blit_op->dst.prop.height *
+			ppp_bpp(blit_op->dst.color_fmt);
+	} else {
+		blit_op->dst.p1 = NULL;
+	}
+
+	if ((bg_img_param.p0) && (!(blit_op->mdp_op & MDPOP_SMART_BLIT))) {
+		/*
+		 * Use cached smart blit BG layer info in
+		 * smart Blit FG request
+		 */
+		blit_op->bg = bg_img_param;
+		if (check_if_rgb(blit_op->bg.color_fmt)) {
+			blit_op->bg.p1 = 0;
+			blit_op->bg.stride1 = 0;
+		}
+		memset(&bg_img_param, 0, sizeof(bg_img_param));
+	} else {
+	blit_op->bg = blit_op->dst;
+	}
+	/* Cache smart blit BG layer info */
+	if (blit_op->mdp_op & MDPOP_SMART_BLIT)
+		bg_img_param = blit_op->src;
+
+	/* Jumping from Y-Plane to Chroma Plane */
+	/* first pixel addr calculation */
+	mdp_adjust_start_addr(blit_op, &blit_op->src, sv_slice,
+				sh_slice, LAYER_FG);
+	mdp_adjust_start_addr(blit_op, &blit_op->bg, dv_slice,
+				dh_slice, LAYER_BG);
+	mdp_adjust_start_addr(blit_op, &blit_op->dst, dv_slice,
+				dh_slice, LAYER_FB);
+
+	config_ppp_scale(blit_op, &ppp_operation_reg);
+
+	config_ppp_blend(blit_op, &ppp_operation_reg, is_yuv_smart_blit,
+			bg_alpha);
+
+	config_ppp_src(&blit_op->src, yuv2rgb);
+	config_ppp_out(&blit_op->dst, yuv2rgb);
+
+	/* Cache Smart blit BG alpha adn MDP OP values */
+	if (blit_op->mdp_op & MDPOP_SMART_BLIT) {
+		bg_alpha = blit_op->blend.const_alpha;
+		bg_mdp_ops = blit_op->mdp_op;
+	} else {
+		bg_alpha = 0;
+		bg_mdp_ops = 0;
+	}
+	pr_debug("BLIT FG Param Fmt %d (x %d,y %d,w %d,h %d), ",
+		blit_op->src.color_fmt, blit_op->src.prop.x,
+		blit_op->src.prop.y, blit_op->src.prop.width,
+		blit_op->src.prop.height);
+	pr_debug("ROI(x %d,y %d,w %d, h %d) ",
+		blit_op->src.roi.x, blit_op->src.roi.y,
+		blit_op->src.roi.width, blit_op->src.roi.height);
+	pr_debug("Addr_P0 %pK, Stride S0 %d Addr_P1 %pK, Stride S1 %d\n",
+		blit_op->src.p0, blit_op->src.stride0,
+		blit_op->src.p1, blit_op->src.stride1);
+
+	if (blit_op->bg.p0 != blit_op->dst.p0) {
+		pr_debug("BLIT BG Param Fmt %d (x %d,y %d,w %d,h %d), ",
+			blit_op->bg.color_fmt, blit_op->bg.prop.x,
+			blit_op->bg.prop.y, blit_op->bg.prop.width,
+			blit_op->bg.prop.height);
+		pr_debug("ROI(x %d,y %d, w  %d, h %d) ",
+			blit_op->bg.roi.x, blit_op->bg.roi.y,
+			blit_op->bg.roi.width, blit_op->bg.roi.height);
+		pr_debug("Addr %pK, Stride S0 %d Addr_P1 %pK, Stride S1 %d\n",
+			blit_op->bg.p0,	blit_op->bg.stride0,
+			blit_op->bg.p1,	blit_op->bg.stride1);
+	}
+	pr_debug("BLIT FB Param Fmt %d (x %d,y %d,w %d,h %d), ",
+		blit_op->dst.color_fmt, blit_op->dst.prop.x,
+		blit_op->dst.prop.y, blit_op->dst.prop.width,
+		blit_op->dst.prop.height);
+	pr_debug("ROI(x %d,y %d, w %d, h %d) ",
+		blit_op->dst.roi.x, blit_op->dst.roi.y,
+		blit_op->dst.roi.width, blit_op->dst.roi.height);
+	pr_debug("Addr %p, Stride S0 %d Addr_P1 %p, Stride S1 %d\n",
+		blit_op->dst.p0, blit_op->dst.stride0,
+		blit_op->dst.p1, blit_op->dst.stride1);
+
+	PPP_WRITEL(ppp_operation_reg, MDP3_PPP_OP_MODE);
+	mb(); /* make sure everything is written before enable */
+	MDSS_XLOG(ppp_operation_reg, blit_op->src.roi.x, blit_op->src.roi.y,
+		blit_op->src.roi.width, blit_op->src.roi.height);
+	MDSS_XLOG(blit_op->dst.roi.x, blit_op->dst.roi.y,
+		blit_op->dst.roi.width, blit_op->dst.roi.height);
+	return 0;
+}
+
+void ppp_enable(void)
+{
+	PPP_WRITEL(0x1000, 0x30);
+	mb(); /* make sure everything is written before enable */
+}
+
+int mdp3_ppp_init(void)
+{
+	load_ppp_lut(LUT_PRE_TABLE, ppp_default_pre_lut());
+	load_ppp_lut(LUT_POST_TABLE, ppp_default_post_lut());
+	load_csc_matrix(CSC_PRIMARY_MATRIX, ppp_csc_rgb2yuv());
+	load_csc_matrix(CSC_SECONDARY_MATRIX, ppp_csc_table2());
+	return 0;
+}
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
new file mode 100644
index 0000000..5d9e612
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -0,0 +1,603 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_H
+#define MDSS_H
+
+#include <linux/msm_ion.h>
+#include <linux/msm_mdp.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/irqreturn.h>
+#include <linux/irqdomain.h>
+#include <linux/mdss_io_util.h>
+
+#include <linux/msm-bus.h>
+#include <linux/file.h>
+#include <linux/dma-direction.h>
+
+#include "mdss_panel.h"
+
+#define MAX_DRV_SUP_MMB_BLKS	44
+#define MAX_DRV_SUP_PIPES 10
+#define MAX_CLIENT_NAME_LEN 20
+
+#define MDSS_PINCTRL_STATE_DEFAULT "mdss_default"
+#define MDSS_PINCTRL_STATE_SLEEP  "mdss_sleep"
+
+enum mdss_mdp_clk_type {
+	MDSS_CLK_AHB,
+	MDSS_CLK_AXI,
+	MDSS_CLK_MDP_CORE,
+	MDSS_CLK_MDP_LUT,
+	MDSS_CLK_MDP_VSYNC,
+	MDSS_MAX_CLK
+};
+
+enum mdss_iommu_domain_type {
+	MDSS_IOMMU_DOMAIN_UNSECURE,
+	MDSS_IOMMU_DOMAIN_ROT_UNSECURE,
+	MDSS_IOMMU_DOMAIN_SECURE,
+	MDSS_IOMMU_DOMAIN_ROT_SECURE,
+	MDSS_IOMMU_MAX_DOMAIN
+};
+
+enum mdss_bus_vote_type {
+	VOTE_INDEX_DISABLE,
+	VOTE_INDEX_LOW,
+	VOTE_INDEX_MID,
+	VOTE_INDEX_HIGH,
+	VOTE_INDEX_MAX,
+};
+
+struct mdss_hw_settings {
+	char __iomem *reg;
+	u32 val;
+};
+
+struct mdss_max_bw_settings {
+	u32 mdss_max_bw_mode;
+	u32 mdss_max_bw_val;
+};
+
+struct mdss_debug_inf {
+	void *debug_data;
+	void (*debug_enable_clock)(int on);
+};
+
+struct mdss_perf_tune {
+	unsigned long min_mdp_clk;
+	u64 min_bus_vote;
+};
+
+#define MDSS_IRQ_SUSPEND	-1
+#define MDSS_IRQ_RESUME		1
+#define MDSS_IRQ_REQ		0
+
+struct mdss_intr {
+	/* requested intr */
+	u32 req;
+	/* currently enabled intr */
+	u32 curr;
+	int state;
+	spinlock_t lock;
+};
+
+struct simplified_prefill_factors {
+	u32 fmt_mt_nv12_factor;
+	u32 fmt_mt_factor;
+	u32 fmt_linear_factor;
+	u32 scale_factor;
+	u32 xtra_ff_factor;
+};
+
+struct mdss_prefill_data {
+	u32 ot_bytes;
+	u32 y_buf_bytes;
+	u32 y_scaler_lines_bilinear;
+	u32 y_scaler_lines_caf;
+	u32 post_scaler_pixels;
+	u32 pp_pixels;
+	u32 fbc_lines;
+	u32 ts_threshold;
+	u32 ts_end;
+	u32 ts_overhead;
+	struct mult_factor ts_rate;
+	struct simplified_prefill_factors prefill_factors;
+};
+
+struct mdss_mdp_dsc {
+	u32 num;
+	char __iomem *base;
+};
+
+enum mdss_hw_index {
+	MDSS_HW_MDP,
+	MDSS_HW_DSI0 = 1,
+	MDSS_HW_DSI1,
+	MDSS_HW_HDMI,
+	MDSS_HW_EDP,
+	MDSS_HW_MISC,
+	MDSS_MAX_HW_BLK
+};
+
+enum mdss_bus_clients {
+	MDSS_MDP_RT,
+	MDSS_DSI_RT,
+	MDSS_HW_RT,
+	MDSS_MDP_NRT,
+	MDSS_MAX_BUS_CLIENTS
+};
+
+struct mdss_pp_block_off {
+	u32 sspp_igc_lut_off;
+	u32 vig_pcc_off;
+	u32 rgb_pcc_off;
+	u32 dma_pcc_off;
+	u32 lm_pgc_off;
+	u32 dspp_gamut_off;
+	u32 dspp_pcc_off;
+	u32 dspp_pgc_off;
+};
+
+enum mdss_hw_quirk {
+	MDSS_QUIRK_BWCPANIC,
+	MDSS_QUIRK_ROTCDP,
+	MDSS_QUIRK_DOWNSCALE_HANG,
+	MDSS_QUIRK_DSC_RIGHT_ONLY_PU,
+	MDSS_QUIRK_DSC_2SLICE_PU_THRPUT,
+	MDSS_QUIRK_DMA_BI_DIR,
+	MDSS_QUIRK_FMT_PACK_PATTERN,
+	MDSS_QUIRK_NEED_SECURE_MAP,
+	MDSS_QUIRK_SRC_SPLIT_ALWAYS,
+	MDSS_QUIRK_HDR_SUPPORT_ENABLED,
+	MDSS_QUIRK_MAX,
+};
+
+enum mdss_hw_capabilities {
+	MDSS_CAPS_YUV_CONFIG,
+	MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED,
+	MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
+	MDSS_CAPS_MIXER_1_FOR_WB,
+	MDSS_CAPS_QSEED3,
+	MDSS_CAPS_DEST_SCALER,
+	MDSS_CAPS_10_BIT_SUPPORTED,
+	MDSS_CAPS_MAX,
+};
+
+enum mdss_qos_settings {
+	MDSS_QOS_PER_PIPE_IB,
+	MDSS_QOS_OVERHEAD_FACTOR,
+	MDSS_QOS_CDP,
+	MDSS_QOS_OTLIM,
+	MDSS_QOS_PER_PIPE_LUT,
+	MDSS_QOS_SIMPLIFIED_PREFILL,
+	MDSS_QOS_VBLANK_PANIC_CTRL,
+	MDSS_QOS_TS_PREFILL,
+	MDSS_QOS_REMAPPER,
+	MDSS_QOS_IB_NOCR,
+	MDSS_QOS_MAX,
+};
+
+enum mdss_mdp_pipe_type {
+	MDSS_MDP_PIPE_TYPE_INVALID = -1,
+	MDSS_MDP_PIPE_TYPE_VIG = 0,
+	MDSS_MDP_PIPE_TYPE_RGB,
+	MDSS_MDP_PIPE_TYPE_DMA,
+	MDSS_MDP_PIPE_TYPE_CURSOR,
+	MDSS_MDP_PIPE_TYPE_MAX,
+};
+
+struct reg_bus_client {
+	char name[MAX_CLIENT_NAME_LEN];
+	short usecase_ndx;
+	u32 id;
+	struct list_head list;
+};
+
+struct mdss_smmu_client {
+	struct device *dev;
+	struct dma_iommu_mapping *mmu_mapping;
+	struct dss_module_power mp;
+	struct reg_bus_client *reg_bus_clt;
+	bool domain_attached;
+	bool handoff_pending;
+	char __iomem *mmu_base;
+};
+
+struct mdss_mdp_qseed3_lut_tbl {
+	bool valid;
+	u32 *dir_lut;
+	u32 *cir_lut;
+	u32 *sep_lut;
+};
+
+struct mdss_scaler_block {
+	u32 vig_scaler_off;
+	u32 vig_scaler_lut_off;
+	u32 has_dest_scaler;
+	char __iomem *dest_base;
+	u32 ndest_scalers;
+	u32 *dest_scaler_off;
+	u32 *dest_scaler_lut_off;
+	struct mdss_mdp_qseed3_lut_tbl lut_tbl;
+};
+
+struct mdss_data_type;
+
+struct mdss_smmu_ops {
+	int (*smmu_attach)(struct mdss_data_type *mdata);
+	int (*smmu_detach)(struct mdss_data_type *mdata);
+	int (*smmu_get_domain_id)(u32 type);
+	struct dma_buf_attachment  * (*smmu_dma_buf_attach)(
+			struct dma_buf *dma_buf, struct device *devce,
+			int domain);
+	int (*smmu_map_dma_buf)(struct dma_buf *dma_buf,
+			struct sg_table *table, int domain,
+			dma_addr_t *iova, unsigned long *size, int dir);
+	void (*smmu_unmap_dma_buf)(struct sg_table *table, int domain,
+			int dir, struct dma_buf *dma_buf);
+	int (*smmu_dma_alloc_coherent)(struct device *dev, size_t size,
+			dma_addr_t *phys, dma_addr_t *iova, void **cpu_addr,
+			gfp_t gfp, int domain);
+	void (*smmu_dma_free_coherent)(struct device *dev, size_t size,
+			void *cpu_addr, dma_addr_t phys, dma_addr_t iova,
+			int domain);
+	int (*smmu_map)(int domain, phys_addr_t iova, phys_addr_t phys, int
+			gfp_order, int prot);
+	void (*smmu_unmap)(int domain, unsigned long iova, int gfp_order);
+	char * (*smmu_dsi_alloc_buf)(struct device *dev, int size,
+			dma_addr_t *dmap, gfp_t gfp);
+	int (*smmu_dsi_map_buffer)(phys_addr_t phys, unsigned int domain,
+			unsigned long size, dma_addr_t *dma_addr,
+			void *cpu_addr, int dir);
+	void (*smmu_dsi_unmap_buffer)(dma_addr_t dma_addr, int domain,
+			unsigned long size, int dir);
+	void (*smmu_deinit)(struct mdss_data_type *mdata);
+	struct sg_table * (*smmu_sg_table_clone)(struct sg_table *orig_table,
+			gfp_t gfp_mask, bool padding);
+};
+
+struct mdss_data_type {
+	u32 mdp_rev;
+	struct clk *mdp_clk[MDSS_MAX_CLK];
+	struct regulator *fs;
+	struct regulator *venus;
+	struct regulator *vdd_cx;
+	bool batfet_required;
+	struct regulator *batfet;
+	bool en_svs_high;
+	u32 max_mdp_clk_rate;
+	struct mdss_util_intf *mdss_util;
+	struct mdss_panel_data *pdata;
+
+	struct platform_device *pdev;
+	struct dss_io_data mdss_io;
+	struct dss_io_data vbif_io;
+	struct dss_io_data vbif_nrt_io;
+	char __iomem *mdp_base;
+
+	struct mdss_smmu_client mdss_smmu[MDSS_IOMMU_MAX_DOMAIN];
+	struct mdss_smmu_ops smmu_ops;
+	struct mutex reg_lock;
+
+	/* bitmap to track pipes that have BWC enabled */
+	DECLARE_BITMAP(bwc_enable_map, MAX_DRV_SUP_PIPES);
+	/* bitmap to track hw workarounds */
+	DECLARE_BITMAP(mdss_quirk_map, MDSS_QUIRK_MAX);
+	/* bitmap to track total mmbs in use */
+	DECLARE_BITMAP(mmb_alloc_map, MAX_DRV_SUP_MMB_BLKS);
+	/* bitmap to track qos applicable settings */
+	DECLARE_BITMAP(mdss_qos_map, MDSS_QOS_MAX);
+	/* bitmap to track hw capabilities/features */
+	DECLARE_BITMAP(mdss_caps_map, MDSS_CAPS_MAX);
+
+	u32 has_bwc;
+	/* values used when HW has a common panic/robust LUT */
+	u32 default_panic_lut0;
+	u32 default_panic_lut1;
+	u32 default_robust_lut;
+
+	/* values used when HW has panic/robust LUTs per pipe */
+	u32 default_panic_lut_per_pipe_linear;
+	u32 default_panic_lut_per_pipe_tile;
+	u32 default_robust_lut_per_pipe_linear;
+	u32 default_robust_lut_per_pipe_tile;
+
+	u32 has_decimation;
+	bool has_fixed_qos_arbiter_enabled;
+	bool has_panic_ctrl;
+	u32 wfd_mode;
+	u32 has_no_lut_read;
+	atomic_t sd_client_count;
+	u8 has_wb_ad;
+	u8 has_non_scalar_rgb;
+	bool has_src_split;
+	bool idle_pc_enabled;
+	bool has_pingpong_split;
+	bool has_pixel_ram;
+	bool needs_hist_vote;
+	bool has_ubwc;
+	bool has_wb_ubwc;
+	bool has_separate_rotator;
+
+	u32 default_ot_rd_limit;
+	u32 default_ot_wr_limit;
+
+	struct irq_domain *irq_domain;
+	u32 *mdp_irq_mask;
+	u32 mdp_hist_irq_mask;
+	u32 mdp_intf_irq_mask;
+
+	int suspend_fs_ena;
+	u8 clk_ena;
+	u8 fs_ena;
+	u8 vsync_ena;
+
+	struct notifier_block gdsc_cb;
+
+	u32 res_init;
+
+	u32 highest_bank_bit;
+	u32 smp_mb_cnt;
+	u32 smp_mb_size;
+	u32 smp_mb_per_pipe;
+	u32 pixel_ram_size;
+
+	u32 rot_block_size;
+
+	/* HW RT  bus (AXI) */
+	u32 hw_rt_bus_hdl;
+	u32 hw_rt_bus_ref_cnt;
+
+	/* data bus (AXI) */
+	u32 bus_hdl;
+	u32 bus_ref_cnt;
+	struct mutex bus_lock;
+
+	/* register bus (AHB) */
+	u32 reg_bus_hdl;
+	u32 reg_bus_usecase_ndx;
+	struct list_head reg_bus_clist;
+	struct mutex reg_bus_lock;
+	struct reg_bus_client *reg_bus_clt;
+	struct reg_bus_client *pp_reg_bus_clt;
+
+	u32 axi_port_cnt;
+	u32 nrt_axi_port_cnt;
+	u32 bus_channels;
+	u32 curr_bw_uc_idx;
+	u32 ao_bw_uc_idx; /* active only idx */
+	struct msm_bus_scale_pdata *bus_scale_table;
+	struct msm_bus_scale_pdata *reg_bus_scale_table;
+	struct msm_bus_scale_pdata *hw_rt_bus_scale_table;
+	u32 max_bw_low;
+	u32 max_bw_high;
+	u32 max_bw_per_pipe;
+	u32 *vbif_rt_qos;
+	u32 *vbif_nrt_qos;
+	u32 npriority_lvl;
+	u32 rot_dwnscale_min;
+	u32 rot_dwnscale_max;
+
+	struct mult_factor ab_factor;
+	struct mult_factor ib_factor;
+	struct mult_factor ib_factor_overlap;
+	struct mult_factor clk_factor;
+	struct mult_factor per_pipe_ib_factor;
+	bool apply_post_scale_bytes;
+	bool hflip_buffer_reused;
+
+	u32 disable_prefill;
+	u32 *clock_levels;
+	u32 nclk_lvl;
+
+	u32 enable_gate;
+	u32 enable_bw_release;
+	u32 enable_rotator_bw_release;
+	u32 serialize_wait4pp;
+	u32 wait4autorefresh;
+	u32 lines_before_active;
+
+	struct mdss_hw_settings *hw_settings;
+
+	int rects_per_sspp[MDSS_MDP_PIPE_TYPE_MAX];
+	struct mdss_mdp_pipe *vig_pipes;
+	struct mdss_mdp_pipe *rgb_pipes;
+	struct mdss_mdp_pipe *dma_pipes;
+	struct mdss_mdp_pipe *cursor_pipes;
+	u32 nvig_pipes;
+	u32 nrgb_pipes;
+	u32 ndma_pipes;
+	u32 max_target_zorder;
+	u8  ncursor_pipes;
+	u32 max_cursor_size;
+
+	u32 nppb_ctl;
+	u32 *ppb_ctl;
+	u32 nppb_cfg;
+	u32 *ppb_cfg;
+	char __iomem *slave_pingpong_base;
+
+	struct mdss_mdp_mixer *mixer_intf;
+	struct mdss_mdp_mixer *mixer_wb;
+	u32 nmixers_intf;
+	u32 nmixers_wb;
+	u32 max_mixer_width;
+	u32 max_pipe_width;
+
+	struct mdss_mdp_writeback *wb;
+	u32 nwb;
+	u32 *wb_offsets;
+	u32 nwb_offsets;
+	struct mutex wb_lock;
+
+	struct mdss_mdp_ctl *ctl_off;
+	u32 nctl;
+	u32 ndspp;
+
+	struct mdss_mdp_dp_intf *dp_off;
+	u32 ndp;
+	void *video_intf;
+	u32 nintf;
+
+	struct mdss_mdp_ad *ad_off;
+	struct mdss_ad_info *ad_cfgs;
+	u32 nad_cfgs;
+	u32 nmax_concurrent_ad_hw;
+	struct workqueue_struct *ad_calc_wq;
+	u32 ad_debugen;
+
+	struct mdss_intr hist_intr;
+
+	struct ion_client *iclient;
+	int iommu_attached;
+
+	struct debug_bus *dbg_bus;
+	u32 dbg_bus_size;
+	struct vbif_debug_bus *vbif_dbg_bus;
+	u32 vbif_dbg_bus_size;
+	struct vbif_debug_bus *nrt_vbif_dbg_bus;
+	u32 nrt_vbif_dbg_bus_size;
+	struct mdss_debug_inf debug_inf;
+	bool mixer_switched;
+	struct mdss_panel_cfg pan_cfg;
+	struct mdss_prefill_data prefill_data;
+	u32 min_prefill_lines; /* this changes within different chipsets */
+	u32 props;
+
+	int handoff_pending;
+	bool idle_pc;
+	struct mdss_perf_tune perf_tune;
+	bool traffic_shaper_en;
+	int iommu_ref_cnt;
+	u32 latency_buff_per;
+	atomic_t active_intf_cnt;
+	bool has_rot_dwnscale;
+	bool regulator_notif_register;
+
+	u64 ab[MDSS_MAX_BUS_CLIENTS];
+	u64 ib[MDSS_MAX_BUS_CLIENTS];
+	struct mdss_pp_block_off pp_block_off;
+
+	struct mdss_mdp_cdm *cdm_off;
+	u32 ncdm;
+	struct mutex cdm_lock;
+
+	struct mdss_mdp_dsc *dsc_off;
+	u32 ndsc;
+
+	struct mdss_max_bw_settings *max_bw_settings;
+	u32 bw_mode_bitmap;
+	u32 max_bw_settings_cnt;
+	bool bw_limit_pending;
+
+	struct mdss_max_bw_settings *max_per_pipe_bw_settings;
+	u32 mdss_per_pipe_bw_cnt;
+	u32 min_bw_per_pipe;
+
+	u32 bcolor0;
+	u32 bcolor1;
+	u32 bcolor2;
+	struct mdss_scaler_block *scaler_off;
+
+	u32 splash_intf_sel;
+	u32 splash_split_disp;
+};
+
+extern struct mdss_data_type *mdss_res;
+
+struct irq_info {
+	u32 irq;
+	u32 irq_mask;
+	u32 irq_wake_mask;
+	u32 irq_ena;
+	u32 irq_wake_ena;
+	u32 irq_buzy;
+};
+
+struct mdss_hw {
+	u32 hw_ndx;
+	void *ptr;
+	struct irq_info *irq_info;
+	irqreturn_t (*irq_handler)(int irq, void *ptr);
+};
+
+struct irq_info *mdss_intr_line(void);
+void mdss_bus_bandwidth_ctrl(int enable);
+int mdss_iommu_ctrl(int enable);
+int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota);
+int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client,
+				u32 usecase_ndx);
+struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name);
+void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *bus_client);
+
+struct mdss_util_intf {
+	bool mdp_probe_done;
+	int (*register_irq)(struct mdss_hw *hw);
+	void (*enable_irq)(struct mdss_hw *hw);
+	void (*disable_irq)(struct mdss_hw *hw);
+	void (*enable_wake_irq)(struct mdss_hw *hw);
+	void (*disable_wake_irq)(struct mdss_hw *hw);
+	void (*disable_irq_nosync)(struct mdss_hw *hw);
+	int (*irq_dispatch)(u32 hw_ndx, int irq, void *ptr);
+	int (*get_iommu_domain)(u32 type);
+	int (*iommu_attached)(void);
+	int (*iommu_ctrl)(int enable);
+	void (*iommu_lock)(void);
+	void (*iommu_unlock)(void);
+	void (*bus_bandwidth_ctrl)(int enable);
+	int (*bus_scale_set_quota)(int client, u64 ab_quota, u64 ib_quota);
+	int (*panel_intf_status)(u32 disp_num, u32 intf_type);
+	struct mdss_panel_cfg* (*panel_intf_type)(int intf_val);
+	int (*dyn_clk_gating_ctrl)(int enable);
+	bool (*param_check)(char *param_string);
+	bool display_disabled;
+};
+
+struct mdss_util_intf *mdss_get_util_intf(void);
+bool mdss_get_irq_enable_state(struct mdss_hw *hw);
+
+static inline int mdss_get_sd_client_cnt(void)
+{
+	if (!mdss_res)
+		return 0;
+	else
+		return atomic_read(&mdss_res->sd_client_count);
+}
+
+static inline void mdss_set_quirk(struct mdss_data_type *mdata,
+	enum mdss_hw_quirk bit)
+{
+	set_bit(bit, mdata->mdss_quirk_map);
+}
+
+static inline bool mdss_has_quirk(struct mdss_data_type *mdata,
+	enum mdss_hw_quirk bit)
+{
+	return test_bit(bit, mdata->mdss_quirk_map);
+}
+
+#define MDSS_VBIF_WRITE(mdata, offset, value, nrt_vbif) \
+		(nrt_vbif ? dss_reg_w(&mdata->vbif_nrt_io, offset, value, 0) :\
+		dss_reg_w(&mdata->vbif_io, offset, value, 0))
+#define MDSS_VBIF_READ(mdata, offset, nrt_vbif) \
+		(nrt_vbif ? dss_reg_r(&mdata->vbif_nrt_io, offset, 0) :\
+		dss_reg_r(&mdata->vbif_io, offset, 0))
+#define MDSS_REG_WRITE(mdata, offset, value) \
+		dss_reg_w(&mdata->mdss_io, offset, value, 0)
+#define MDSS_REG_READ(mdata, offset) \
+		dss_reg_r(&mdata->mdss_io, offset, 0)
+
+#endif /* MDSS_H */
diff --git a/drivers/video/fbdev/msm/mdss_cec_core.c b/drivers/video/fbdev/msm/mdss_cec_core.c
new file mode 100644
index 0000000..23a3ce5
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_cec_core.c
@@ -0,0 +1,799 @@
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include "mdss_fb.h"
+#include "mdss_cec_core.h"
+
+#define CEC_ENABLE_MASK		BIT(0)
+#define CEC_WAKEUP_ENABLE_MASK	BIT(1)
+
+struct cec_msg_node {
+	struct cec_msg msg;
+	struct list_head list;
+};
+
+struct cec_ctl {
+	bool enabled;
+	bool compliance_enabled;
+	bool cec_wakeup_en;
+
+	u8 logical_addr;
+
+	spinlock_t lock;
+	struct list_head msg_head;
+	struct cec_abstract_init_data init_data;
+
+};
+
+static struct cec_ctl *cec_get_ctl(struct device *dev)
+{
+	struct fb_info *fbi;
+	struct msm_fb_data_type *mfd;
+	struct mdss_panel_info *pinfo;
+
+	if (!dev) {
+		pr_err("invalid input\n");
+		goto error;
+	}
+
+	fbi = dev_get_drvdata(dev);
+	if (!fbi) {
+		pr_err("invalid fbi\n");
+		goto error;
+	}
+
+	mfd = fbi->par;
+	if (!mfd) {
+		pr_err("invalid mfd\n");
+		goto error;
+	}
+
+	pinfo = mfd->panel_info;
+	if (!pinfo) {
+		pr_err("invalid pinfo\n");
+		goto error;
+	}
+
+	return pinfo->cec_data;
+
+error:
+	return NULL;
+}
+
+static int cec_msg_send(struct cec_ctl *ctl, struct cec_msg *msg)
+{
+	int ret = -EINVAL;
+	struct cec_ops *ops;
+
+	if (!ctl || !msg) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	ops = ctl->init_data.ops;
+
+	if (ops && ops->send_msg)
+		ret = ops->send_msg(ops->data, msg);
+end:
+	return ret;
+}
+
+static void cec_dump_msg(struct cec_ctl *ctl, struct cec_msg *msg)
+{
+	int i;
+	unsigned long flags;
+
+	if (!ctl || !msg) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	spin_lock_irqsave(&ctl->lock, flags);
+	pr_debug("==%pS dump start ==\n",
+		__builtin_return_address(0));
+
+	pr_debug("cec: sender_id: %d\n", msg->sender_id);
+	pr_debug("cec: recvr_id:  %d\n", msg->recvr_id);
+
+	if (msg->frame_size < 2) {
+		pr_debug("cec: polling message\n");
+		spin_unlock_irqrestore(&ctl->lock, flags);
+		return;
+	}
+
+	pr_debug("cec: opcode: %02x\n", msg->opcode);
+	for (i = 0; i < msg->frame_size - 2; i++)
+		pr_debug("cec: operand(%2d) : %02x\n", i + 1, msg->operand[i]);
+
+	pr_debug("==%pS dump end ==\n",
+		__builtin_return_address(0));
+	spin_unlock_irqrestore(&ctl->lock, flags);
+}
+
+static int cec_disable(struct cec_ctl *ctl)
+{
+	unsigned long flags;
+	int ret = -EINVAL;
+	struct cec_msg_node *msg_node, *tmp;
+	struct cec_ops *ops;
+
+	if (!ctl) {
+		pr_err("Invalid input\n");
+		goto end;
+	}
+
+	spin_lock_irqsave(&ctl->lock, flags);
+	list_for_each_entry_safe(msg_node, tmp, &ctl->msg_head, list) {
+		list_del(&msg_node->list);
+		kfree(msg_node);
+	}
+	spin_unlock_irqrestore(&ctl->lock, flags);
+
+	ops = ctl->init_data.ops;
+
+	if (ops && ops->enable)
+		ret = ops->enable(ops->data, false);
+
+	if (!ret)
+		ctl->enabled = false;
+
+end:
+	return ret;
+}
+
+static int cec_enable(struct cec_ctl *ctl)
+{
+	int ret = -EINVAL;
+	struct cec_ops *ops;
+
+	if (!ctl) {
+		pr_err("Invalid input\n");
+		goto end;
+	}
+
+	INIT_LIST_HEAD(&ctl->msg_head);
+
+	ops = ctl->init_data.ops;
+
+	if (ops && ops->enable)
+		ret = ops->enable(ops->data, true);
+
+	if (!ret)
+		ctl->enabled = true;
+
+end:
+	return ret;
+}
+
+static int cec_send_abort_opcode(struct cec_ctl *ctl,
+	struct cec_msg *in_msg, u8 reason_operand)
+{
+	int i = 0;
+	struct cec_msg out_msg;
+
+	if (!ctl || !in_msg) {
+		pr_err("Invalid input\n");
+		return -EINVAL;
+	}
+
+	out_msg.sender_id = 0x4;
+	out_msg.recvr_id = in_msg->sender_id;
+	out_msg.opcode = 0x0; /* opcode for feature abort */
+	out_msg.operand[i++] = in_msg->opcode;
+	out_msg.operand[i++] = reason_operand;
+	out_msg.frame_size = i + 2;
+
+	return cec_msg_send(ctl, &out_msg);
+}
+
+static int cec_msg_parser(struct cec_ctl *ctl, struct cec_msg *in_msg)
+{
+	int rc = 0, i = 0;
+	struct cec_msg out_msg;
+
+	if (!ctl || !in_msg) {
+		pr_err("Invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	pr_debug("in_msg->opcode = 0x%x\n", in_msg->opcode);
+	switch (in_msg->opcode) {
+	case CEC_MSG_SET_OSD_STRING:
+		/* Set OSD String */
+		pr_debug("Recvd OSD Str=[0x%x]\n",
+			in_msg->operand[3]);
+		break;
+	case CEC_MSG_GIVE_PHYS_ADDR:
+		/* Give Phy Addr */
+		pr_debug("Recvd a Give Phy Addr cmd\n");
+
+		out_msg.sender_id = 0x4;
+		/* Broadcast */
+		out_msg.recvr_id = 0xF;
+		out_msg.opcode = 0x84;
+		out_msg.operand[i++] = 0x10;
+		out_msg.operand[i++] = 0x0;
+		out_msg.operand[i++] = 0x04;
+		out_msg.frame_size = i + 2;
+
+		rc = cec_msg_send(ctl, &out_msg);
+		break;
+	case CEC_MSG_ABORT:
+		/* Abort */
+		pr_debug("Recvd an abort cmd.\n");
+
+		/* reason = "Refused" */
+		rc = cec_send_abort_opcode(ctl, in_msg, 0x04);
+		break;
+	case CEC_MSG_GIVE_OSD_NAME:
+		/* Give OSD name */
+		pr_debug("Recvd 'Give OSD name' cmd.\n");
+
+		out_msg.sender_id = 0x4;
+		out_msg.recvr_id = in_msg->sender_id;
+		out_msg.opcode = 0x47; /* OSD Name */
+		/* Display control byte */
+		out_msg.operand[i++] = 0x0;
+		out_msg.operand[i++] = 'H';
+		out_msg.operand[i++] = 'e';
+		out_msg.operand[i++] = 'l';
+		out_msg.operand[i++] = 'l';
+		out_msg.operand[i++] = 'o';
+		out_msg.operand[i++] = ' ';
+		out_msg.operand[i++] = 'W';
+		out_msg.operand[i++] = 'o';
+		out_msg.operand[i++] = 'r';
+		out_msg.operand[i++] = 'l';
+		out_msg.operand[i++] = 'd';
+		out_msg.frame_size = i + 2;
+
+		rc = cec_msg_send(ctl, &out_msg);
+		break;
+	case CEC_MSG_GIVE_POWER_STATUS:
+		/* Give Device Power status */
+		pr_debug("Recvd a Power status message\n");
+
+		out_msg.sender_id = 0x4;
+		out_msg.recvr_id = in_msg->sender_id;
+		out_msg.opcode = 0x90; /* OSD String */
+		out_msg.operand[i++] = 'H';
+		out_msg.operand[i++] = 'e';
+		out_msg.operand[i++] = 'l';
+		out_msg.operand[i++] = 'l';
+		out_msg.operand[i++] = 'o';
+		out_msg.operand[i++] = ' ';
+		out_msg.operand[i++] = 'W';
+		out_msg.operand[i++] = 'o';
+		out_msg.operand[i++] = 'r';
+		out_msg.operand[i++] = 'l';
+		out_msg.operand[i++] = 'd';
+		out_msg.frame_size = i + 2;
+
+		rc = cec_msg_send(ctl, &out_msg);
+		break;
+	case CEC_MSG_ROUTE_CHANGE_CMD:
+		/* Routing Change cmd */
+	case CEC_MSG_SET_STREAM_PATH:
+		/* Set Stream Path */
+		pr_debug("Recvd Set Stream or Routing Change cmd\n");
+
+		out_msg.sender_id = 0x4;
+		out_msg.recvr_id = 0xF; /* broadcast this message */
+		out_msg.opcode = 0x82; /* Active Source */
+		out_msg.operand[i++] = 0x10;
+		out_msg.operand[i++] = 0x0;
+		out_msg.frame_size = i + 2;
+
+		rc = cec_msg_send(ctl, &out_msg);
+		if (rc)
+			goto end;
+
+		/* sending <Image View On> message */
+		memset(&out_msg, 0x0, sizeof(struct cec_msg));
+		i = 0;
+		out_msg.sender_id = 0x4;
+		out_msg.recvr_id = in_msg->sender_id;
+		out_msg.opcode = 0x04; /* opcode for Image View On */
+		out_msg.frame_size = i + 2;
+
+		rc = cec_msg_send(ctl, &out_msg);
+		break;
+	case CEC_MSG_USER_CTRL_PRESS:
+		/* User Control Pressed */
+		pr_debug("User Control Pressed\n");
+		break;
+	case CEC_MSG_USER_CTRL_RELEASE:
+		/* User Control Released */
+		pr_debug("User Control Released\n");
+		break;
+	default:
+		pr_debug("Recvd an unknown cmd = [%u]\n",
+			in_msg->opcode);
+
+		/* reason = "Unrecognized opcode" */
+		rc = cec_send_abort_opcode(ctl, in_msg, 0x0);
+		break;
+	}
+end:
+	return rc;
+}
+
+static int cec_msg_recv(void *data, struct cec_msg *msg)
+{
+	unsigned long flags;
+	struct cec_ctl *ctl = data;
+	struct cec_msg_node *msg_node;
+	int ret = 0;
+
+	if (!ctl) {
+		pr_err("invalid input\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	if (!ctl->enabled) {
+		pr_err("cec not enabled\n");
+		ret = -ENODEV;
+		goto end;
+	}
+
+	msg_node = kzalloc(sizeof(*msg_node), GFP_KERNEL);
+	if (!msg_node) {
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	msg_node->msg = *msg;
+
+	pr_debug("CEC read frame done\n");
+	cec_dump_msg(ctl, &msg_node->msg);
+
+	spin_lock_irqsave(&ctl->lock, flags);
+	if (ctl->compliance_enabled) {
+		spin_unlock_irqrestore(&ctl->lock, flags);
+
+		ret = cec_msg_parser(ctl, &msg_node->msg);
+		if (ret)
+			pr_err("msg parsing failed\n");
+
+		kfree(msg_node);
+	} else {
+		list_add_tail(&msg_node->list, &ctl->msg_head);
+		spin_unlock_irqrestore(&ctl->lock, flags);
+
+		/* wake-up sysfs read_msg context */
+		sysfs_notify(ctl->init_data.kobj, "cec", "rd_msg");
+	}
+end:
+	return ret;
+}
+
+static ssize_t cec_rda_enable(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	unsigned long flags;
+	struct cec_ctl *ctl = cec_get_ctl(dev);
+
+	if (!ctl) {
+		pr_err("Invalid input\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	spin_lock_irqsave(&ctl->lock, flags);
+	if (ctl->enabled) {
+		pr_debug("cec is enabled\n");
+		ret = snprintf(buf, PAGE_SIZE, "%d\n", 1);
+	} else {
+		pr_err("cec is disabled\n");
+		ret = snprintf(buf, PAGE_SIZE, "%d\n", 0);
+	}
+	spin_unlock_irqrestore(&ctl->lock, flags);
+end:
+	return ret;
+}
+
+static ssize_t cec_wta_enable(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int val;
+	bool cec_en;
+	ssize_t ret;
+	struct cec_ctl *ctl = cec_get_ctl(dev);
+	struct cec_ops *ops;
+
+	if (!ctl) {
+		pr_err("Invalid input\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ops = ctl->init_data.ops;
+
+	ret = kstrtoint(buf, 10, &val);
+	if (ret) {
+		pr_err("kstrtoint failed.\n");
+		goto end;
+	}
+
+	cec_en = (val & CEC_ENABLE_MASK) ? true : false;
+
+	/* bit 1 is used for wakeup feature */
+	if ((val & CEC_ENABLE_MASK) && (val & CEC_WAKEUP_ENABLE_MASK))
+		ctl->cec_wakeup_en = true;
+	else
+		ctl->cec_wakeup_en = false;
+
+	if (ops && ops->wakeup_en)
+		ops->wakeup_en(ops->data, ctl->cec_wakeup_en);
+
+	if (ctl->enabled == cec_en) {
+		pr_debug("cec is already %s\n",
+			cec_en ? "enabled" : "disabled");
+		goto bail;
+	}
+
+	if (cec_en)
+		ret = cec_enable(ctl);
+	else
+		ret = cec_disable(ctl);
+
+	if (ret)
+		goto end;
+
+bail:
+	ret = strnlen(buf, PAGE_SIZE);
+end:
+	return ret;
+}
+
+static ssize_t cec_rda_enable_compliance(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	unsigned long flags;
+	ssize_t ret;
+	struct cec_ctl *ctl = cec_get_ctl(dev);
+
+	if (!ctl) {
+		pr_err("Invalid ctl\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ctl->lock, flags);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n",
+		ctl->compliance_enabled);
+
+	spin_unlock_irqrestore(&ctl->lock, flags);
+
+	return ret;
+}
+
+static ssize_t cec_wta_enable_compliance(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int val;
+	ssize_t ret;
+	struct cec_ctl *ctl = cec_get_ctl(dev);
+	struct cec_ops *ops;
+
+	if (!ctl) {
+		pr_err("Invalid ctl\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ops = ctl->init_data.ops;
+
+	ret = kstrtoint(buf, 10, &val);
+	if (ret) {
+		pr_err("kstrtoint failed.\n");
+		goto end;
+	}
+
+	ctl->compliance_enabled = (val == 1) ? true : false;
+
+	if (ctl->compliance_enabled) {
+		ret = cec_enable(ctl);
+		if (ret)
+			goto end;
+
+		ctl->logical_addr = 0x4;
+
+		if (ops && ops->wt_logical_addr)
+			ops->wt_logical_addr(ops->data, ctl->logical_addr);
+
+	} else {
+		ctl->logical_addr = 0;
+
+		ret = cec_disable(ctl);
+		if (ret)
+			goto end;
+	}
+
+	ret = strnlen(buf, PAGE_SIZE);
+end:
+	return ret;
+}
+
+static ssize_t cec_rda_logical_addr(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	unsigned long flags;
+	ssize_t ret;
+	struct cec_ctl *ctl = cec_get_ctl(dev);
+
+	if (!ctl) {
+		pr_err("Invalid ctl\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ctl->lock, flags);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", ctl->logical_addr);
+	spin_unlock_irqrestore(&ctl->lock, flags);
+
+	return ret;
+}
+
+static ssize_t cec_wta_logical_addr(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int logical_addr;
+	unsigned long flags;
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	struct cec_ctl *ctl = cec_get_ctl(dev);
+	struct cec_ops *ops;
+
+	if (!ctl) {
+		pr_err("Invalid ctl\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ops = ctl->init_data.ops;
+
+	ret = kstrtoint(buf, 10, &logical_addr);
+	if (ret) {
+		pr_err("kstrtoint failed\n");
+		goto end;
+	}
+
+	if (logical_addr < 0 || logical_addr > 15) {
+		pr_err("Invalid logical address\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	spin_lock_irqsave(&ctl->lock, flags);
+	ctl->logical_addr = (u8)logical_addr;
+	if (ctl->enabled) {
+		if (ops && ops->wt_logical_addr)
+			ops->wt_logical_addr(ops->data, ctl->logical_addr);
+	}
+	spin_unlock_irqrestore(&ctl->lock, flags);
+end:
+	return ret;
+}
+
+static ssize_t cec_rda_msg(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	int i = 0;
+	unsigned long flags;
+	struct cec_msg_node *msg_node, *tmp;
+	struct cec_ctl *ctl = cec_get_ctl(dev);
+	ssize_t ret;
+
+	if (!ctl) {
+		pr_err("Invalid ctl\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	if (!ctl->enabled) {
+		pr_err("cec not enabled\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	spin_lock_irqsave(&ctl->lock, flags);
+
+	if (ctl->compliance_enabled) {
+		spin_unlock_irqrestore(&ctl->lock, flags);
+		pr_err("Read no allowed in compliance mode\n");
+		ret = -EPERM;
+		goto end;
+	}
+
+	if (list_empty_careful(&ctl->msg_head)) {
+		spin_unlock_irqrestore(&ctl->lock, flags);
+		pr_err("CEC message queue is empty\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	list_for_each_entry_safe(msg_node, tmp, &ctl->msg_head, list) {
+		if ((i + 1) * sizeof(struct cec_msg) > PAGE_SIZE) {
+			pr_debug("Overflowing PAGE_SIZE.\n");
+			break;
+		}
+
+		memcpy(buf + (i * sizeof(struct cec_msg)), &msg_node->msg,
+			sizeof(struct cec_msg));
+		list_del(&msg_node->list);
+		kfree(msg_node);
+		i++;
+	}
+
+	spin_unlock_irqrestore(&ctl->lock, flags);
+
+	ret = i * sizeof(struct cec_msg);
+end:
+	return ret;
+}
+
+static ssize_t cec_wta_msg(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	ssize_t ret;
+	unsigned long flags;
+	struct cec_msg *msg = (struct cec_msg *)buf;
+	struct cec_ctl *ctl = cec_get_ctl(dev);
+
+	if (!ctl) {
+		pr_err("Invalid ctl\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	spin_lock_irqsave(&ctl->lock, flags);
+	if (ctl->compliance_enabled) {
+		spin_unlock_irqrestore(&ctl->lock, flags);
+		pr_err("Write not allowed in compliance mode\n");
+		ret = -EPERM;
+		goto end;
+	}
+
+	if (!ctl->enabled) {
+		spin_unlock_irqrestore(&ctl->lock, flags);
+		pr_err("CEC is not configed.\n");
+		ret = -EPERM;
+		goto end;
+	}
+	spin_unlock_irqrestore(&ctl->lock, flags);
+
+	if (msg->frame_size > MAX_OPERAND_SIZE) {
+		pr_err("msg frame too big!\n");
+		ret = -EINVAL;
+		goto end;
+	}
+	ret = cec_msg_send(ctl, msg);
+	if (ret) {
+		pr_err("cec_msg_send failed\n");
+		goto end;
+	}
+
+	ret = sizeof(struct cec_msg);
+end:
+	return ret;
+}
+
+static DEVICE_ATTR(enable, 0644, cec_rda_enable,
+	cec_wta_enable);
+static DEVICE_ATTR(enable_compliance, 0644,
+	cec_rda_enable_compliance, cec_wta_enable_compliance);
+static DEVICE_ATTR(logical_addr, 0600,
+	cec_rda_logical_addr, cec_wta_logical_addr);
+static DEVICE_ATTR(rd_msg, 0444, cec_rda_msg, NULL);
+static DEVICE_ATTR(wr_msg, 0600, NULL, cec_wta_msg);
+
+static struct attribute *cec_fs_attrs[] = {
+	&dev_attr_enable.attr,
+	&dev_attr_enable_compliance.attr,
+	&dev_attr_logical_addr.attr,
+	&dev_attr_rd_msg.attr,
+	&dev_attr_wr_msg.attr,
+	NULL,
+};
+
+static struct attribute_group cec_fs_attr_group = {
+	.name = "cec",
+	.attrs = cec_fs_attrs,
+};
+
+/**
+ * cec_abstract_deinit() - Release CEC abstract module
+ * @input: CEC abstract data
+ *
+ * This API release all the resources allocated for this
+ * module.
+ *
+ * Return: 0 on success otherwise error code.
+ */
+int cec_abstract_deinit(void *input)
+{
+	struct cec_ctl *ctl = (struct cec_ctl *)input;
+
+	if (!ctl)
+		return -EINVAL;
+
+	sysfs_remove_group(ctl->init_data.kobj, &cec_fs_attr_group);
+
+	kfree(ctl);
+
+	return 0;
+}
+
+/**
+ * cec_abstract_init() - Initialize CEC abstract module
+ * @init_data: data needed to initialize the CEC abstraction module
+ *
+ * This API will initialize the CEC abstract module which connects
+ * CEC client with CEC hardware. It creates sysfs nodes for client
+ * to read and write CEC messages. It interacts with hardware with
+ * provided operation function pointers. Also provides callback
+ * function pointers to let the hardware inform about incoming
+ * CEC message.
+ *
+ * Return: pinter to cec abstract data which needs to be passed
+ * as parameter with callback functions.
+ */
+void *cec_abstract_init(struct cec_abstract_init_data *init_data)
+{
+	struct cec_ctl *ctl = NULL;
+	int ret = 0;
+
+	if (!init_data) {
+		pr_err("invalid input\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+	if (!ctl) {
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	/* keep a copy of init data */
+	ctl->init_data = *init_data;
+
+	ret = sysfs_create_group(ctl->init_data.kobj, &cec_fs_attr_group);
+	if (ret) {
+		pr_err("cec sysfs group creation failed\n");
+		goto end;
+	}
+
+	spin_lock_init(&ctl->lock);
+
+	/* provide callback function pointers */
+	if (init_data->cbs) {
+		init_data->cbs->msg_recv_notify = cec_msg_recv;
+		init_data->cbs->data = ctl;
+	}
+
+	return ctl;
+end:
+	kfree(ctl);
+	return ERR_PTR(ret);
+}
+
diff --git a/drivers/video/fbdev/msm/mdss_cec_core.h b/drivers/video/fbdev/msm/mdss_cec_core.h
new file mode 100644
index 0000000..f8196a0
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_cec_core.h
@@ -0,0 +1,105 @@
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_CEC_CORE_H__
+#define __MDSS_CEC_CORE_H__
+
+#define MAX_OPERAND_SIZE	14
+
+/* total size:  HEADER block (1) + opcode block (1) + operands (14) */
+#define MAX_CEC_FRAME_SIZE      (MAX_OPERAND_SIZE + 2)
+
+/* CEC message set */
+#define CEC_MSG_SET_OSD_STRING		0x64
+#define CEC_MSG_GIVE_PHYS_ADDR		0x83
+#define CEC_MSG_ABORT			0xFF
+#define CEC_MSG_GIVE_OSD_NAME		0x46
+#define CEC_MSG_GIVE_POWER_STATUS	0x8F
+#define CEC_MSG_ROUTE_CHANGE_CMD	0x80
+#define CEC_MSG_SET_STREAM_PATH		0x86
+#define CEC_MSG_USER_CTRL_PRESS		0x44
+#define CEC_MSG_USER_CTRL_RELEASE	0x45
+
+/**
+ * struct cec_msg - CEC message related data
+ * @sender_id: CEC message initiator's id
+ * @recvr_id: CEC message destination's id
+ * @opcode: CEC message opcode
+ * @operand: CEC message operands corresponding to opcode
+ * @frame_size: total CEC frame size
+ * @retransmit: number of re-tries to transmit message
+ *
+ * Basic CEC message structure used by both client and driver.
+ */
+struct cec_msg {
+	u8 sender_id;
+	u8 recvr_id;
+	u8 opcode;
+	u8 operand[MAX_OPERAND_SIZE];
+	u8 frame_size;
+	u8 retransmit;
+};
+
+/**
+ * struct cec_ops - CEC operations function pointers
+ * @enable: function pointer to enable CEC
+ * @send_msg: function pointer to send CEC message
+ * @wt_logical_addr: function pointer to write logical address
+ * @wakeup_en: function pointer to enable wakeup feature
+ * @is_wakeup_en: function pointer to query wakeup feature state
+ * @device_suspend: function pointer to update device suspend state
+ * @data: pointer to the data needed to send with operation functions
+ *
+ * Defines all the operations that abstract module can call
+ * to programe the CEC driver.
+ */
+struct cec_ops {
+	int (*enable)(void *data, bool enable);
+	int (*send_msg)(void *data,
+		struct cec_msg *msg);
+	void (*wt_logical_addr)(void *data, u8 addr);
+	void (*wakeup_en)(void *data, bool en);
+	bool (*is_wakeup_en)(void *data);
+	void (*device_suspend)(void *data, bool suspend);
+	void *data;
+};
+
+/**
+ * struct cec_cbs - CEC callback function pointers
+ * @msg_recv_notify: function pointer called CEC driver to notify incoming msg
+ * @data: pointer to data needed to be send with the callback function
+ *
+ * Defines callback functions which CEC driver can callback to notify any
+ * change in the hardware.
+ */
+struct cec_cbs {
+	int (*msg_recv_notify)(void *data, struct cec_msg *msg);
+	void *data;
+};
+
+/**
+ * struct cec_abstract_init_data - initalization data for abstract module
+ * @ops: pointer to struct containing all operation function pointers
+ * @cbs: pointer to struct containing all callack function pointers
+ * @kobj: pointer to kobject instance associated with CEC driver.
+ *
+ * Defines initialization data needed by init API to initialize the module.
+ */
+struct cec_abstract_init_data {
+	struct cec_ops *ops;
+	struct cec_cbs *cbs;
+	struct kobject *kobj;
+};
+
+void *cec_abstract_init(struct cec_abstract_init_data *init_data);
+int cec_abstract_deinit(void *input);
+#endif /* __MDSS_CEC_CORE_H_*/
diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.c b/drivers/video/fbdev/msm/mdss_compat_utils.c
new file mode 100644
index 0000000..06da395
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_compat_utils.c
@@ -0,0 +1,4317 @@
+/*
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 1994 Martin Schaller
+ *
+ * 2001 - Documented with DocBook
+ * - Brad Douglas <brad@neruo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/compat.h>
+#include <linux/fb.h>
+
+#include <linux/uaccess.h>
+
+#include "mdss_fb.h"
+#include "mdss_compat_utils.h"
+#include "mdss_mdp_hwio.h"
+#include "mdss_mdp.h"
+
+#define MSMFB_CURSOR32 _IOW(MSMFB_IOCTL_MAGIC, 130, struct fb_cursor32)
+#define MSMFB_SET_LUT32 _IOW(MSMFB_IOCTL_MAGIC, 131, struct fb_cmap32)
+#define MSMFB_HISTOGRAM32 _IOWR(MSMFB_IOCTL_MAGIC, 132,\
+					struct mdp_histogram_data32)
+#define MSMFB_GET_CCS_MATRIX32  _IOWR(MSMFB_IOCTL_MAGIC, 133, struct mdp_ccs32)
+#define MSMFB_SET_CCS_MATRIX32  _IOW(MSMFB_IOCTL_MAGIC, 134, struct mdp_ccs32)
+#define MSMFB_OVERLAY_SET32       _IOWR(MSMFB_IOCTL_MAGIC, 135,\
+					struct mdp_overlay32)
+
+#define MSMFB_OVERLAY_GET32      _IOR(MSMFB_IOCTL_MAGIC, 140,\
+					struct mdp_overlay32)
+#define MSMFB_OVERLAY_BLT32       _IOWR(MSMFB_IOCTL_MAGIC, 142,\
+					struct msmfb_overlay_blt32)
+#define MSMFB_HISTOGRAM_START32	_IOR(MSMFB_IOCTL_MAGIC, 144,\
+					struct mdp_histogram_start_req32)
+
+#define MSMFB_OVERLAY_3D32       _IOWR(MSMFB_IOCTL_MAGIC, 147,\
+					struct msmfb_overlay_3d32)
+
+#define MSMFB_MIXER_INFO32       _IOWR(MSMFB_IOCTL_MAGIC, 148,\
+						struct msmfb_mixer_info_req32)
+#define MSMFB_MDP_PP32 _IOWR(MSMFB_IOCTL_MAGIC, 156, struct msmfb_mdp_pp32)
+#define MSMFB_BUFFER_SYNC32  _IOW(MSMFB_IOCTL_MAGIC, 162, struct mdp_buf_sync32)
+#define MSMFB_OVERLAY_PREPARE32		_IOWR(MSMFB_IOCTL_MAGIC, 169, \
+						struct mdp_overlay_list32)
+#define MSMFB_ATOMIC_COMMIT32	_IOWR(MDP_IOCTL_MAGIC, 128, compat_caddr_t)
+
+#define MSMFB_ASYNC_POSITION_UPDATE_32 _IOWR(MDP_IOCTL_MAGIC, 129, \
+		struct mdp_position_update32)
+
+static int __copy_layer_pp_info_params(struct mdp_input_layer *layer,
+				struct mdp_input_layer32 *layer32);
+
+static unsigned int __do_compat_ioctl_nr(unsigned int cmd32)
+{
+	unsigned int cmd;
+
+	switch (cmd32) {
+	case MSMFB_CURSOR32:
+		cmd = MSMFB_CURSOR;
+		break;
+	case MSMFB_SET_LUT32:
+		cmd = MSMFB_SET_LUT;
+		break;
+	case MSMFB_HISTOGRAM32:
+		cmd = MSMFB_HISTOGRAM;
+		break;
+	case MSMFB_GET_CCS_MATRIX32:
+		cmd = MSMFB_GET_CCS_MATRIX;
+		break;
+	case MSMFB_SET_CCS_MATRIX32:
+		cmd = MSMFB_SET_CCS_MATRIX;
+		break;
+	case MSMFB_OVERLAY_SET32:
+		cmd = MSMFB_OVERLAY_SET;
+		break;
+	case MSMFB_OVERLAY_GET32:
+		cmd = MSMFB_OVERLAY_GET;
+		break;
+	case MSMFB_OVERLAY_BLT32:
+		cmd = MSMFB_OVERLAY_BLT;
+		break;
+	case MSMFB_OVERLAY_3D32:
+		cmd = MSMFB_OVERLAY_3D;
+		break;
+	case MSMFB_MIXER_INFO32:
+		cmd = MSMFB_MIXER_INFO;
+		break;
+	case MSMFB_MDP_PP32:
+		cmd = MSMFB_MDP_PP;
+		break;
+	case MSMFB_BUFFER_SYNC32:
+		cmd = MSMFB_BUFFER_SYNC;
+		break;
+	case MSMFB_OVERLAY_PREPARE32:
+		cmd = MSMFB_OVERLAY_PREPARE;
+		break;
+	case MSMFB_ATOMIC_COMMIT32:
+		cmd = MSMFB_ATOMIC_COMMIT;
+		break;
+	case MSMFB_ASYNC_POSITION_UPDATE_32:
+		cmd = MSMFB_ASYNC_POSITION_UPDATE;
+		break;
+	default:
+		cmd = cmd32;
+		break;
+	}
+
+	return cmd;
+}
+
+static void  __copy_atomic_commit_struct(struct mdp_layer_commit  *commit,
+	struct mdp_layer_commit32 *commit32)
+{
+	unsigned int destsize = sizeof(commit->commit_v1.reserved);
+	unsigned int srcsize = sizeof(commit32->commit_v1.reserved);
+	unsigned int count = (destsize <= srcsize ? destsize : srcsize);
+
+	commit->version = commit32->version;
+	commit->commit_v1.flags = commit32->commit_v1.flags;
+	commit->commit_v1.input_layer_cnt =
+		commit32->commit_v1.input_layer_cnt;
+	commit->commit_v1.left_roi = commit32->commit_v1.left_roi;
+	commit->commit_v1.right_roi = commit32->commit_v1.right_roi;
+	memcpy(&commit->commit_v1.reserved, &commit32->commit_v1.reserved,
+		count);
+}
+
+static struct mdp_input_layer32 *__create_layer_list32(
+	struct mdp_layer_commit32 *commit32,
+	u32 layer_count)
+{
+	u32 buffer_size32;
+	struct mdp_input_layer32 *layer_list32;
+	int ret;
+
+	buffer_size32 = sizeof(struct mdp_input_layer32) * layer_count;
+
+	layer_list32 = kmalloc(buffer_size32, GFP_KERNEL);
+	if (!layer_list32) {
+		layer_list32 = ERR_PTR(-ENOMEM);
+		goto end;
+	}
+
+	ret = copy_from_user(layer_list32,
+			compat_ptr(commit32->commit_v1.input_layers),
+			sizeof(struct mdp_input_layer32) * layer_count);
+	if (ret) {
+		pr_err("layer list32 copy from user failed, ptr %pK\n",
+			compat_ptr(commit32->commit_v1.input_layers));
+		kfree(layer_list32);
+		ret = -EFAULT;
+		layer_list32 = ERR_PTR(ret);
+	}
+
+end:
+	return layer_list32;
+}
+
+static int __copy_scale_params(struct mdp_input_layer *layer,
+	struct mdp_input_layer32 *layer32)
+{
+	struct mdp_scale_data *scale;
+	int ret;
+
+	if (!(layer->flags & MDP_LAYER_ENABLE_PIXEL_EXT))
+		return 0;
+
+	scale = kmalloc(sizeof(struct mdp_scale_data), GFP_KERNEL);
+	if (!scale) {
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	/* scale structure size is same for compat and 64bit version */
+	ret = copy_from_user(scale, compat_ptr(layer32->scale),
+			sizeof(struct mdp_scale_data));
+	if (ret) {
+		kfree(scale);
+		pr_err("scale param copy from user failed, ptr %pK\n",
+			compat_ptr(layer32->scale));
+		ret = -EFAULT;
+	} else {
+		layer->scale = scale;
+	}
+end:
+	return ret;
+}
+
+static struct mdp_input_layer *__create_layer_list(
+	struct mdp_layer_commit *commit,
+	struct mdp_input_layer32 *layer_list32,
+	u32 layer_count)
+{
+	int i, ret = 0;
+	u32 buffer_size;
+	struct mdp_input_layer *layer, *layer_list;
+	struct mdp_input_layer32 *layer32;
+
+	buffer_size = sizeof(struct mdp_input_layer) * layer_count;
+
+	layer_list = kmalloc(buffer_size, GFP_KERNEL);
+	if (!layer_list) {
+		layer_list = ERR_PTR(-ENOMEM);
+		goto end;
+	}
+
+	commit->commit_v1.input_layers = layer_list;
+
+	for (i = 0; i < layer_count; i++) {
+		layer = &layer_list[i];
+		layer32 = &layer_list32[i];
+
+		layer->flags = layer32->flags;
+		layer->pipe_ndx = layer32->pipe_ndx;
+		layer->horz_deci = layer32->horz_deci;
+		layer->vert_deci = layer32->vert_deci;
+		layer->z_order = layer32->z_order;
+		layer->transp_mask = layer32->transp_mask;
+		layer->bg_color = layer32->bg_color;
+		layer->blend_op = layer32->blend_op;
+		layer->alpha = layer32->alpha;
+		layer->color_space = layer32->color_space;
+		layer->src_rect = layer32->src_rect;
+		layer->dst_rect = layer32->dst_rect;
+		layer->buffer = layer32->buffer;
+		memcpy(&layer->reserved, &layer32->reserved,
+			sizeof(layer->reserved));
+
+		layer->scale = NULL;
+		ret = __copy_scale_params(layer, layer32);
+		if (ret)
+			break;
+
+		layer->pp_info = NULL;
+		ret = __copy_layer_pp_info_params(layer, layer32);
+		if (ret)
+			break;
+	}
+
+	if (ret) {
+		for (i--; i >= 0; i--) {
+			kfree(layer_list[i].scale);
+			mdss_mdp_free_layer_pp_info(&layer_list[i]);
+		}
+		kfree(layer_list);
+		layer_list = ERR_PTR(ret);
+	}
+
+end:
+	return layer_list;
+}
+
+static int __copy_to_user_atomic_commit(struct mdp_layer_commit  *commit,
+	struct mdp_layer_commit32 *commit32,
+	struct mdp_input_layer32 *layer_list32,
+	unsigned long argp, u32 layer_count)
+{
+	int i, ret;
+	struct mdp_input_layer *layer_list;
+
+	layer_list = commit->commit_v1.input_layers;
+
+	for (i = 0; i < layer_count; i++)
+		layer_list32[i].error_code = layer_list[i].error_code;
+
+	ret = copy_to_user(compat_ptr(commit32->commit_v1.input_layers),
+		layer_list32,
+		sizeof(struct mdp_input_layer32) * layer_count);
+	if (ret)
+		goto end;
+
+	ret = copy_to_user(compat_ptr(commit32->commit_v1.output_layer),
+		commit->commit_v1.output_layer,
+		sizeof(struct mdp_output_layer));
+	if (ret)
+		goto end;
+
+	commit32->commit_v1.release_fence =
+		commit->commit_v1.release_fence;
+	commit32->commit_v1.retire_fence =
+		commit->commit_v1.retire_fence;
+
+	ret = copy_to_user((void __user *)argp, commit32,
+		sizeof(struct mdp_layer_commit32));
+
+end:
+	return ret;
+}
+
+static int __compat_atomic_commit(struct fb_info *info, unsigned int cmd,
+			 unsigned long argp, struct file *file)
+{
+	int ret, i;
+	struct mdp_layer_commit  commit;
+	struct mdp_layer_commit32 commit32;
+	u32 layer_count;
+	struct mdp_input_layer *layer_list = NULL;
+	struct mdp_input_layer32 *layer_list32 = NULL;
+	struct mdp_output_layer *output_layer = NULL;
+	struct mdp_frc_info *frc_info = NULL;
+
+	/* copy top level memory from 32 bit structure to kernel memory */
+	ret = copy_from_user(&commit32, (void __user *)argp,
+		sizeof(struct mdp_layer_commit32));
+	if (ret) {
+		pr_err("%s:copy_from_user failed, ptr %pK\n", __func__,
+			(void __user *)argp);
+		ret = -EFAULT;
+		return ret;
+	}
+
+	memset(&commit, 0, sizeof(struct mdp_layer_commit));
+	__copy_atomic_commit_struct(&commit, &commit32);
+
+	if (commit32.commit_v1.output_layer) {
+		int buffer_size = sizeof(struct mdp_output_layer);
+
+		output_layer = kzalloc(buffer_size, GFP_KERNEL);
+		if (!output_layer)
+			return -ENOMEM;
+
+		ret = copy_from_user(output_layer,
+				compat_ptr(commit32.commit_v1.output_layer),
+				buffer_size);
+		if (ret) {
+			pr_err("fail to copy output layer from user, ptr %pK\n",
+				compat_ptr(commit32.commit_v1.output_layer));
+			ret = -EFAULT;
+			goto layer_list_err;
+		}
+
+		commit.commit_v1.output_layer = output_layer;
+	}
+
+	layer_count = commit32.commit_v1.input_layer_cnt;
+	if (layer_count > MAX_LAYER_COUNT) {
+		ret = -EINVAL;
+		goto layer_list_err;
+	} else if (layer_count) {
+		/*
+		 * allocate memory for layer list in 32bit domain and copy it
+		 * from user
+		 */
+		layer_list32 = __create_layer_list32(&commit32, layer_count);
+		if (IS_ERR_OR_NULL(layer_list32)) {
+			ret = PTR_ERR(layer_list32);
+			goto layer_list_err;
+		}
+
+		/*
+		 * allocate memory for layer list in kernel memory domain and
+		 * copy layer info from 32bit structures to kernel memory
+		 */
+		layer_list = __create_layer_list(&commit, layer_list32,
+			layer_count);
+		if (IS_ERR_OR_NULL(layer_list)) {
+			ret = PTR_ERR(layer_list);
+			goto layer_list_err;
+		}
+	}
+
+	if (commit32.commit_v1.frc_info) {
+		int buffer_size = sizeof(struct mdp_frc_info);
+
+		frc_info = kzalloc(buffer_size, GFP_KERNEL);
+		if (!frc_info) {
+			ret = -ENOMEM;
+			goto frc_err;
+		}
+
+		ret = copy_from_user(frc_info,
+				compat_ptr(commit32.commit_v1.frc_info),
+				buffer_size);
+		if (ret) {
+			pr_err("fail to copy frc info from user, ptr %p\n",
+				compat_ptr(commit32.commit_v1.frc_info));
+			kfree(frc_info);
+			ret = -EFAULT;
+			goto frc_err;
+		}
+
+		commit.commit_v1.frc_info = frc_info;
+	}
+
+	ret = mdss_fb_atomic_commit(info, &commit, file);
+	if (ret)
+		pr_err("atomic commit failed ret:%d\n", ret);
+
+	if (layer_count)
+		__copy_to_user_atomic_commit(&commit, &commit32, layer_list32,
+			argp, layer_count);
+
+	for (i = 0; i < layer_count; i++) {
+		kfree(layer_list[i].scale);
+		mdss_mdp_free_layer_pp_info(&layer_list[i]);
+	}
+
+	kfree(frc_info);
+frc_err:
+	kfree(layer_list);
+layer_list_err:
+	kfree(layer_list32);
+	kfree(output_layer);
+	return ret;
+}
+
+static int __copy_to_user_async_position_update(
+		struct mdp_position_update *update_pos,
+		struct mdp_position_update32 *update_pos32,
+		unsigned long argp, u32 layer_cnt)
+{
+	int ret;
+
+	ret = copy_to_user(update_pos32->input_layers,
+			update_pos->input_layers,
+			sizeof(struct mdp_async_layer) * layer_cnt);
+	if (ret)
+		goto end;
+
+	ret = copy_to_user((void __user *) argp, update_pos32,
+			sizeof(struct mdp_position_update32));
+
+end:
+	return ret;
+}
+
+static struct mdp_async_layer *__create_async_layer_list(
+	struct mdp_position_update32 *update_pos32, u32 layer_cnt)
+{
+	u32 buffer_size;
+	struct mdp_async_layer *layer_list;
+	int ret;
+
+	buffer_size = sizeof(struct mdp_async_layer) * layer_cnt;
+
+	layer_list = kmalloc(buffer_size, GFP_KERNEL);
+	if (!layer_list) {
+		layer_list = ERR_PTR(-ENOMEM);
+		goto end;
+	}
+
+	ret = copy_from_user(layer_list,
+			update_pos32->input_layers, buffer_size);
+	if (ret) {
+		pr_err("layer list32 copy from user failed\n");
+		kfree(layer_list);
+		layer_list = ERR_PTR(ret);
+	}
+
+end:
+	return layer_list;
+}
+
+static int __compat_async_position_update(struct fb_info *info,
+		unsigned int cmd, unsigned long argp)
+{
+	struct mdp_position_update update_pos;
+	struct mdp_position_update32 update_pos32;
+	struct mdp_async_layer *layer_list = NULL;
+	u32 layer_cnt, ret;
+
+	/* copy top level memory from 32 bit structure to kernel memory */
+	ret = copy_from_user(&update_pos32, (void __user *)argp,
+		sizeof(struct mdp_position_update32));
+	if (ret) {
+		pr_err("%s:copy_from_user failed\n", __func__);
+		return ret;
+	}
+
+	update_pos.input_layer_cnt = update_pos32.input_layer_cnt;
+	layer_cnt = update_pos32.input_layer_cnt;
+	if ((!layer_cnt) || (layer_cnt > MAX_LAYER_COUNT)) {
+		pr_err("invalid async layers :%d to update\n", layer_cnt);
+		return -EINVAL;
+	}
+
+	layer_list = __create_async_layer_list(&update_pos32,
+		layer_cnt);
+	if (IS_ERR_OR_NULL(layer_list))
+		return PTR_ERR(layer_list);
+
+	update_pos.input_layers = layer_list;
+
+	ret = mdss_fb_async_position_update(info, &update_pos);
+	if (ret)
+		pr_err("async position update failed ret:%d\n", ret);
+
+	ret = __copy_to_user_async_position_update(&update_pos, &update_pos32,
+			argp, layer_cnt);
+	if (ret)
+		pr_err("copy to user of async update position failed\n");
+
+	kfree(layer_list);
+	return ret;
+}
+
+static int mdss_fb_compat_buf_sync(struct fb_info *info, unsigned int cmd,
+			 unsigned long arg, struct file *file)
+{
+	struct mdp_buf_sync32 __user *buf_sync32;
+	struct mdp_buf_sync __user *buf_sync;
+	u32 data;
+	int ret;
+
+	buf_sync = compat_alloc_user_space(sizeof(*buf_sync));
+	if (!buf_sync) {
+		pr_err("%s:%u: compat alloc error [%zu] bytes\n",
+			 __func__, __LINE__, sizeof(*buf_sync));
+		return -EINVAL;
+	}
+	buf_sync32 = compat_ptr(arg);
+
+	if (copy_in_user(&buf_sync->flags, &buf_sync32->flags,
+			 3 * sizeof(u32)))
+		return -EFAULT;
+
+	if (get_user(data, &buf_sync32->acq_fen_fd) ||
+	    put_user(compat_ptr(data), &buf_sync->acq_fen_fd) ||
+	    get_user(data, &buf_sync32->rel_fen_fd) ||
+	    put_user(compat_ptr(data), &buf_sync->rel_fen_fd) ||
+	    get_user(data, &buf_sync32->retire_fen_fd) ||
+	    put_user(compat_ptr(data), &buf_sync->retire_fen_fd))
+		return -EFAULT;
+
+	ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) buf_sync, file);
+	if (ret) {
+		pr_err("%s: failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	if (copy_in_user(compat_ptr(buf_sync32->rel_fen_fd),
+			buf_sync->rel_fen_fd,
+			sizeof(int)))
+		return -EFAULT;
+	if (copy_in_user(compat_ptr(buf_sync32->retire_fen_fd),
+			buf_sync->retire_fen_fd,
+			sizeof(int))) {
+		if (buf_sync->flags & MDP_BUF_SYNC_FLAG_RETIRE_FENCE)
+			return -EFAULT;
+		pr_debug("%s: no retire fence fd for wb\n",
+			__func__);
+	}
+
+	return ret;
+}
+
+static int __from_user_fb_cmap(struct fb_cmap __user *cmap,
+				struct fb_cmap32 __user *cmap32)
+{
+	__u32 data;
+
+	if (copy_in_user(&cmap->start, &cmap32->start, 2 * sizeof(__u32)))
+		return -EFAULT;
+
+	if (get_user(data, &cmap32->red) ||
+	    put_user(compat_ptr(data), &cmap->red) ||
+	    get_user(data, &cmap32->green) ||
+	    put_user(compat_ptr(data), &cmap->green) ||
+	    get_user(data, &cmap32->blue) ||
+	    put_user(compat_ptr(data), &cmap->blue) ||
+	    get_user(data, &cmap32->transp) ||
+	    put_user(compat_ptr(data), &cmap->transp))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __to_user_fb_cmap(struct fb_cmap __user *cmap,
+				struct fb_cmap32 __user *cmap32)
+{
+	unsigned long data;
+
+	if (copy_in_user(&cmap32->start, &cmap->start, 2 * sizeof(__u32)))
+		return -EFAULT;
+
+	if (get_user(data, (unsigned long *) &cmap->red) ||
+	    put_user((compat_caddr_t) data, &cmap32->red) ||
+	    get_user(data, (unsigned long *) &cmap->green) ||
+	    put_user((compat_caddr_t) data, &cmap32->green) ||
+	    get_user(data, (unsigned long *) &cmap->blue) ||
+	    put_user((compat_caddr_t) data, &cmap32->blue) ||
+	    get_user(data, (unsigned long *) &cmap->transp) ||
+	    put_user((compat_caddr_t) data, &cmap32->transp))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_fb_image(struct fb_image __user *image,
+				struct fb_image32 __user *image32)
+{
+	__u32 data;
+
+	if (copy_in_user(&image->dx, &image32->dx, 6 * sizeof(u32)) ||
+		copy_in_user(&image->depth, &image32->depth, sizeof(u8)))
+		return -EFAULT;
+
+	if (get_user(data, &image32->data) ||
+		put_user(compat_ptr(data), &image->data))
+		return -EFAULT;
+
+	if (__from_user_fb_cmap(&image->cmap, &image32->cmap))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int mdss_fb_compat_cursor(struct fb_info *info, unsigned int cmd,
+			unsigned long arg, struct file *file)
+{
+	struct fb_cursor32 __user *cursor32;
+	struct fb_cursor __user *cursor;
+	__u32 data;
+	int ret;
+
+	cursor = compat_alloc_user_space(sizeof(*cursor));
+	if (!cursor) {
+		pr_err("%s:%u: compat alloc error [%zu] bytes\n",
+			 __func__, __LINE__, sizeof(*cursor));
+		return -EINVAL;
+	}
+	cursor32 = compat_ptr(arg);
+
+	if (copy_in_user(&cursor->set, &cursor32->set, 3 * sizeof(u16)))
+		return -EFAULT;
+
+	if (get_user(data, &cursor32->mask) ||
+			put_user(compat_ptr(data), &cursor->mask))
+		return -EFAULT;
+
+	if (copy_in_user(&cursor->hot, &cursor32->hot, sizeof(struct fbcurpos)))
+		return -EFAULT;
+
+	if (__from_user_fb_image(&cursor->image, &cursor32->image))
+		return -EFAULT;
+
+	ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) cursor, file);
+	return ret;
+}
+
+static int mdss_fb_compat_set_lut(struct fb_info *info, unsigned long arg,
+	struct file *file)
+{
+	struct fb_cmap_user __user *cmap;
+	struct fb_cmap32 __user *cmap32;
+	__u32 data;
+	int ret;
+
+	cmap = compat_alloc_user_space(sizeof(*cmap));
+	cmap32 = compat_ptr(arg);
+
+	if (copy_in_user(&cmap->start, &cmap32->start, 2 * sizeof(__u32)))
+		return -EFAULT;
+
+	if (get_user(data, &cmap32->red) ||
+	    put_user(compat_ptr(data), &cmap->red) ||
+	    get_user(data, &cmap32->green) ||
+	    put_user(compat_ptr(data), &cmap->green) ||
+	    get_user(data, &cmap32->blue) ||
+	    put_user(compat_ptr(data), &cmap->blue) ||
+	    get_user(data, &cmap32->transp) ||
+	    put_user(compat_ptr(data), &cmap->transp))
+		return -EFAULT;
+
+	ret = mdss_fb_do_ioctl(info, MSMFB_SET_LUT, (unsigned long) cmap, file);
+	if (!ret)
+		pr_debug("%s: compat ioctl successful\n", __func__);
+
+	return ret;
+}
+
+static int __from_user_sharp_cfg(
+			struct mdp_sharp_cfg32 __user *sharp_cfg32,
+			struct mdp_sharp_cfg __user *sharp_cfg)
+{
+	if (copy_in_user(&sharp_cfg->flags,
+			&sharp_cfg32->flags,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&sharp_cfg->strength,
+			&sharp_cfg32->strength,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&sharp_cfg->edge_thr,
+			&sharp_cfg32->edge_thr,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&sharp_cfg->smooth_thr,
+			&sharp_cfg32->smooth_thr,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&sharp_cfg->noise_thr,
+			&sharp_cfg32->noise_thr,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __to_user_sharp_cfg(
+			struct mdp_sharp_cfg32 __user *sharp_cfg32,
+			struct mdp_sharp_cfg __user *sharp_cfg)
+{
+	if (copy_in_user(&sharp_cfg32->flags,
+			&sharp_cfg->flags,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&sharp_cfg32->strength,
+			&sharp_cfg->strength,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&sharp_cfg32->edge_thr,
+			&sharp_cfg->edge_thr,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&sharp_cfg32->smooth_thr,
+			&sharp_cfg->smooth_thr,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&sharp_cfg32->noise_thr,
+			&sharp_cfg->noise_thr,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_histogram_cfg(
+			struct mdp_histogram_cfg32 __user *hist_cfg32,
+			struct mdp_histogram_cfg __user *hist_cfg)
+{
+	if (copy_in_user(&hist_cfg->ops,
+			&hist_cfg32->ops,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&hist_cfg->block,
+			&hist_cfg32->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&hist_cfg->frame_cnt,
+			&hist_cfg32->frame_cnt,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&hist_cfg->bit_mask,
+			&hist_cfg32->bit_mask,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&hist_cfg->num_bins,
+			&hist_cfg32->num_bins,
+			sizeof(uint16_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __to_user_histogram_cfg(
+			struct mdp_histogram_cfg32 __user *hist_cfg32,
+			struct mdp_histogram_cfg __user *hist_cfg)
+{
+	if (copy_in_user(&hist_cfg32->ops,
+			&hist_cfg->ops,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&hist_cfg32->block,
+			&hist_cfg->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&hist_cfg32->frame_cnt,
+			&hist_cfg->frame_cnt,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&hist_cfg32->bit_mask,
+			&hist_cfg->bit_mask,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&hist_cfg32->num_bins,
+			&hist_cfg->num_bins,
+			sizeof(uint16_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_pcc_coeff(
+			struct mdp_pcc_coeff32 __user *pcc_coeff32,
+			struct mdp_pcc_coeff __user *pcc_coeff)
+{
+	if (copy_in_user(&pcc_coeff->c,
+			&pcc_coeff32->c,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff->r,
+			&pcc_coeff32->r,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff->g,
+			&pcc_coeff32->g,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff->b,
+			&pcc_coeff32->b,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff->rr,
+			&pcc_coeff32->rr,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff->gg,
+			&pcc_coeff32->gg,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff->bb,
+			&pcc_coeff32->bb,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff->rg,
+			&pcc_coeff32->rg,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff->gb,
+			&pcc_coeff32->gb,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff->rb,
+			&pcc_coeff32->rb,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff->rgb_0,
+			&pcc_coeff32->rgb_0,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff->rgb_1,
+			&pcc_coeff32->rgb_1,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __to_user_pcc_coeff(
+			struct mdp_pcc_coeff32 __user *pcc_coeff32,
+			struct mdp_pcc_coeff __user *pcc_coeff)
+{
+	if (copy_in_user(&pcc_coeff32->c,
+			&pcc_coeff->c,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff32->r,
+			&pcc_coeff->r,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff32->g,
+			&pcc_coeff->g,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff32->b,
+			&pcc_coeff->b,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff32->rr,
+			&pcc_coeff->rr,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff32->gg,
+			&pcc_coeff->gg,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff32->bb,
+			&pcc_coeff->bb,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff32->rg,
+			&pcc_coeff->rg,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff32->gb,
+			&pcc_coeff->gb,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff32->rb,
+			&pcc_coeff->rb,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff32->rgb_0,
+			&pcc_coeff->rgb_0,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_coeff32->rgb_1,
+			&pcc_coeff->rgb_1,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_pcc_coeff_v17(
+			struct mdp_pcc_cfg_data32 __user *pcc_cfg32,
+			struct mdp_pcc_cfg_data __user *pcc_cfg)
+{
+	struct mdp_pcc_data_v1_7_32 pcc_cfg_payload32;
+	struct mdp_pcc_data_v1_7 pcc_cfg_payload;
+
+	if (copy_from_user(&pcc_cfg_payload32,
+			   compat_ptr(pcc_cfg32->cfg_payload),
+			   sizeof(struct mdp_pcc_data_v1_7_32))) {
+		pr_err("failed to copy payload for pcc from user\n");
+		return -EFAULT;
+	}
+
+	memset(&pcc_cfg_payload, 0, sizeof(pcc_cfg_payload));
+	pcc_cfg_payload.r.b = pcc_cfg_payload32.r.b;
+	pcc_cfg_payload.r.g = pcc_cfg_payload32.r.g;
+	pcc_cfg_payload.r.c = pcc_cfg_payload32.r.c;
+	pcc_cfg_payload.r.r = pcc_cfg_payload32.r.r;
+	pcc_cfg_payload.r.gb = pcc_cfg_payload32.r.gb;
+	pcc_cfg_payload.r.rb = pcc_cfg_payload32.r.rb;
+	pcc_cfg_payload.r.rg = pcc_cfg_payload32.r.rg;
+	pcc_cfg_payload.r.rgb = pcc_cfg_payload32.r.rgb;
+
+	pcc_cfg_payload.g.b = pcc_cfg_payload32.g.b;
+	pcc_cfg_payload.g.g = pcc_cfg_payload32.g.g;
+	pcc_cfg_payload.g.c = pcc_cfg_payload32.g.c;
+	pcc_cfg_payload.g.r = pcc_cfg_payload32.g.r;
+	pcc_cfg_payload.g.gb = pcc_cfg_payload32.g.gb;
+	pcc_cfg_payload.g.rb = pcc_cfg_payload32.g.rb;
+	pcc_cfg_payload.g.rg = pcc_cfg_payload32.g.rg;
+	pcc_cfg_payload.g.rgb = pcc_cfg_payload32.g.rgb;
+
+	pcc_cfg_payload.b.b = pcc_cfg_payload32.b.b;
+	pcc_cfg_payload.b.g = pcc_cfg_payload32.b.g;
+	pcc_cfg_payload.b.c = pcc_cfg_payload32.b.c;
+	pcc_cfg_payload.b.r = pcc_cfg_payload32.b.r;
+	pcc_cfg_payload.b.gb = pcc_cfg_payload32.b.gb;
+	pcc_cfg_payload.b.rb = pcc_cfg_payload32.b.rb;
+	pcc_cfg_payload.b.rg = pcc_cfg_payload32.b.rg;
+	pcc_cfg_payload.b.rgb = pcc_cfg_payload32.b.rgb;
+
+	if (copy_to_user(pcc_cfg->cfg_payload, &pcc_cfg_payload,
+			 sizeof(pcc_cfg_payload))) {
+		pr_err("failed to copy payload for pcc to user\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static int __from_user_pcc_cfg_data(
+			struct mdp_pcc_cfg_data32 __user *pcc_cfg32,
+			struct mdp_pcc_cfg_data __user *pcc_cfg)
+{
+	u32 version;
+
+	if (copy_in_user(&pcc_cfg->block,
+			&pcc_cfg32->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_cfg->ops,
+			&pcc_cfg32->ops,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pcc_cfg->version,
+			&pcc_cfg32->version,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (copy_from_user(&version, &pcc_cfg32->version, sizeof(u32))) {
+		pr_err("failed to copy version for pcc\n");
+		return -EFAULT;
+	}
+
+	switch (version) {
+	case mdp_pcc_v1_7:
+		if (__from_user_pcc_coeff_v17(pcc_cfg32, pcc_cfg)) {
+			pr_err("failed to copy pcc v17 data\n");
+			return -EFAULT;
+		}
+		break;
+	default:
+		pr_debug("pcc version %d not supported use legacy\n", version);
+		if (__from_user_pcc_coeff(
+				compat_ptr((uintptr_t)&pcc_cfg32->r),
+				&pcc_cfg->r) ||
+		    __from_user_pcc_coeff(
+				compat_ptr((uintptr_t)&pcc_cfg32->g),
+				&pcc_cfg->g) ||
+		    __from_user_pcc_coeff(
+				compat_ptr((uintptr_t)&pcc_cfg32->b),
+				&pcc_cfg->b))
+			return -EFAULT;
+		break;
+	}
+	return 0;
+}
+
+static int __to_user_pcc_coeff_v1_7(
+			struct mdp_pcc_cfg_data32 __user *pcc_cfg32,
+			struct mdp_pcc_cfg_data __user *pcc_cfg)
+{
+	struct mdp_pcc_data_v1_7_32 pcc_cfg_payload32;
+	struct mdp_pcc_data_v1_7 pcc_cfg_payload;
+
+	memset(&pcc_cfg_payload32, 0, sizeof(pcc_cfg_payload32));
+	if (copy_from_user(&pcc_cfg_payload,
+			   pcc_cfg->cfg_payload,
+			   sizeof(struct mdp_pcc_data_v1_7))) {
+		pr_err("failed to copy payload for pcc from user\n");
+		return -EFAULT;
+	}
+
+	pcc_cfg_payload32.r.b = pcc_cfg_payload.r.b;
+	pcc_cfg_payload32.r.g = pcc_cfg_payload.r.g;
+	pcc_cfg_payload32.r.c = pcc_cfg_payload.r.c;
+	pcc_cfg_payload32.r.r = pcc_cfg_payload.r.r;
+	pcc_cfg_payload32.r.gb = pcc_cfg_payload.r.gb;
+	pcc_cfg_payload32.r.rb = pcc_cfg_payload.r.rb;
+	pcc_cfg_payload32.r.rg = pcc_cfg_payload.r.rg;
+	pcc_cfg_payload32.r.rgb = pcc_cfg_payload.r.rgb;
+
+	pcc_cfg_payload32.g.b = pcc_cfg_payload.g.b;
+	pcc_cfg_payload32.g.g = pcc_cfg_payload.g.g;
+	pcc_cfg_payload32.g.c = pcc_cfg_payload.g.c;
+	pcc_cfg_payload32.g.r = pcc_cfg_payload.g.r;
+	pcc_cfg_payload32.g.gb = pcc_cfg_payload.g.gb;
+	pcc_cfg_payload32.g.rb = pcc_cfg_payload.g.rb;
+	pcc_cfg_payload32.g.rg = pcc_cfg_payload.g.rg;
+	pcc_cfg_payload32.g.rgb = pcc_cfg_payload.g.rgb;
+
+	pcc_cfg_payload32.b.b = pcc_cfg_payload.b.b;
+	pcc_cfg_payload32.b.g = pcc_cfg_payload.b.g;
+	pcc_cfg_payload32.b.c = pcc_cfg_payload.b.c;
+	pcc_cfg_payload32.b.r = pcc_cfg_payload.b.r;
+	pcc_cfg_payload32.b.gb = pcc_cfg_payload.b.gb;
+	pcc_cfg_payload32.b.rb = pcc_cfg_payload.b.rb;
+	pcc_cfg_payload32.b.rg = pcc_cfg_payload.b.rg;
+	pcc_cfg_payload32.b.rgb = pcc_cfg_payload.b.rgb;
+
+	if (copy_to_user(compat_ptr(pcc_cfg32->cfg_payload),
+			 &pcc_cfg_payload32,
+			 sizeof(pcc_cfg_payload32))) {
+		pr_err("failed to copy payload for pcc to user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+
+static int __to_user_pcc_cfg_data(
+			struct mdp_pcc_cfg_data32 __user *pcc_cfg32,
+			struct mdp_pcc_cfg_data __user *pcc_cfg)
+{
+	u32 version;
+	u32 ops;
+
+	if (copy_from_user(&ops, &pcc_cfg->ops, sizeof(u32))) {
+		pr_err("failed to copy op for pcc\n");
+		return -EFAULT;
+	}
+
+	if (!(ops & MDP_PP_OPS_READ)) {
+		pr_debug("Read op is not set. Skipping compat copyback\n");
+		return 0;
+	}
+
+	if (copy_from_user(&version, &pcc_cfg->version, sizeof(u32))) {
+		pr_err("failed to copy version for pcc\n");
+		return -EFAULT;
+	}
+
+	switch (version) {
+	case mdp_pcc_v1_7:
+		if (__to_user_pcc_coeff_v1_7(pcc_cfg32, pcc_cfg)) {
+			pr_err("failed to copy pcc v1_7 data\n");
+			return -EFAULT;
+		}
+		break;
+	default:
+		pr_debug("version invalid, fallback to legacy\n");
+
+		if (__to_user_pcc_coeff(
+				compat_ptr((uintptr_t)&pcc_cfg32->r),
+				&pcc_cfg->r) ||
+		    __to_user_pcc_coeff(
+				compat_ptr((uintptr_t)&pcc_cfg32->g),
+				&pcc_cfg->g) ||
+		    __to_user_pcc_coeff(
+				compat_ptr((uintptr_t)&pcc_cfg32->b),
+				&pcc_cfg->b))
+			return -EFAULT;
+		break;
+	}
+
+	return 0;
+}
+
+static int __from_user_csc_cfg(
+			struct mdp_csc_cfg32 __user *csc_data32,
+			struct mdp_csc_cfg __user *csc_data)
+{
+	if (copy_in_user(&csc_data->flags,
+			&csc_data32->flags,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&csc_data->csc_mv[0],
+			&csc_data32->csc_mv[0],
+			9 * sizeof(uint32_t)) ||
+	    copy_in_user(&csc_data->csc_pre_bv[0],
+			&csc_data32->csc_pre_bv[0],
+			3 * sizeof(uint32_t)) ||
+	    copy_in_user(&csc_data->csc_post_bv[0],
+			&csc_data32->csc_post_bv[0],
+			3 * sizeof(uint32_t)) ||
+	    copy_in_user(&csc_data->csc_pre_lv[0],
+			&csc_data32->csc_pre_lv[0],
+			6 * sizeof(uint32_t)) ||
+	    copy_in_user(&csc_data->csc_post_lv[0],
+			&csc_data32->csc_post_lv[0],
+			6 * sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+static int __to_user_csc_cfg(
+			struct mdp_csc_cfg32 __user *csc_data32,
+			struct mdp_csc_cfg __user *csc_data)
+{
+	if (copy_in_user(&csc_data32->flags,
+			&csc_data->flags,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&csc_data32->csc_mv[0],
+			&csc_data->csc_mv[0],
+			9 * sizeof(uint32_t)) ||
+	    copy_in_user(&csc_data32->csc_pre_bv[0],
+			&csc_data->csc_pre_bv[0],
+			3 * sizeof(uint32_t)) ||
+	    copy_in_user(&csc_data32->csc_post_bv[0],
+			&csc_data->csc_post_bv[0],
+			3 * sizeof(uint32_t)) ||
+	    copy_in_user(&csc_data32->csc_pre_lv[0],
+			&csc_data->csc_pre_lv[0],
+			6 * sizeof(uint32_t)) ||
+	    copy_in_user(&csc_data32->csc_post_lv[0],
+			&csc_data->csc_post_lv[0],
+			6 * sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_csc_cfg_data(
+			struct mdp_csc_cfg_data32 __user *csc_cfg32,
+			struct mdp_csc_cfg_data __user *csc_cfg)
+{
+	if (copy_in_user(&csc_cfg->block,
+			&csc_cfg32->block,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (__from_user_csc_cfg(
+			compat_ptr((uintptr_t)&csc_cfg32->csc_data),
+			&csc_cfg->csc_data))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __to_user_csc_cfg_data(
+			struct mdp_csc_cfg_data32 __user *csc_cfg32,
+			struct mdp_csc_cfg_data __user *csc_cfg)
+{
+	if (copy_in_user(&csc_cfg32->block,
+			&csc_cfg->block,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (__to_user_csc_cfg(
+			compat_ptr((uintptr_t)&csc_cfg32->csc_data),
+			&csc_cfg->csc_data))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_igc_lut_data_v17(
+		struct mdp_igc_lut_data32 __user *igc_lut32,
+		struct mdp_igc_lut_data __user *igc_lut)
+{
+	struct mdp_igc_lut_data_v1_7_32 igc_cfg_payload_32;
+	struct mdp_igc_lut_data_v1_7 igc_cfg_payload;
+
+	if (copy_from_user(&igc_cfg_payload_32,
+			   compat_ptr(igc_lut32->cfg_payload),
+			   sizeof(igc_cfg_payload_32))) {
+		pr_err("failed to copy payload from user for igc\n");
+		return -EFAULT;
+	}
+
+	memset(&igc_cfg_payload, 0, sizeof(igc_cfg_payload));
+	igc_cfg_payload.c0_c1_data = compat_ptr(igc_cfg_payload_32.c0_c1_data);
+	igc_cfg_payload.c2_data = compat_ptr(igc_cfg_payload_32.c2_data);
+	igc_cfg_payload.len = igc_cfg_payload_32.len;
+	igc_cfg_payload.table_fmt = igc_cfg_payload_32.table_fmt;
+	if (copy_to_user(igc_lut->cfg_payload, &igc_cfg_payload,
+			 sizeof(igc_cfg_payload))) {
+		pr_err("failed to copy payload to user for igc\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static int __from_user_igc_lut_data(
+		struct mdp_igc_lut_data32 __user *igc_lut32,
+		struct mdp_igc_lut_data __user *igc_lut)
+{
+	uint32_t data;
+	uint32_t version = mdp_igc_vmax;
+	int ret = 0;
+
+	if (copy_in_user(&igc_lut->block,
+			&igc_lut32->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&igc_lut->len,
+			&igc_lut32->len,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&igc_lut->ops,
+			&igc_lut32->ops,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&igc_lut->version,
+			&igc_lut32->version,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (get_user(version, &igc_lut32->version)) {
+		pr_err("failed to copy the version for IGC\n");
+		return -EFAULT;
+	}
+
+	switch (version) {
+	case mdp_igc_v1_7:
+		ret = __from_user_igc_lut_data_v17(igc_lut32, igc_lut);
+		if (ret)
+			pr_err("failed to copy payload for igc version %d ret %d\n",
+				version, ret);
+		break;
+	default:
+		pr_debug("version not supported fallback to legacy %d\n",
+			 version);
+		if (get_user(data, &igc_lut32->c0_c1_data) ||
+		    put_user(compat_ptr(data), &igc_lut->c0_c1_data) ||
+		    get_user(data, &igc_lut32->c2_data) ||
+		    put_user(compat_ptr(data), &igc_lut->c2_data))
+			return -EFAULT;
+		break;
+	}
+	return ret;
+}
+
+static int __to_user_igc_lut_data(
+		struct mdp_igc_lut_data32 __user *igc_lut32,
+		struct mdp_igc_lut_data __user *igc_lut)
+{
+	unsigned long data;
+
+	if (copy_in_user(&igc_lut32->block,
+			&igc_lut->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&igc_lut32->len,
+			&igc_lut->len,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&igc_lut32->ops,
+			&igc_lut->ops,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (get_user(data, (unsigned long *) &igc_lut->c0_c1_data) ||
+	    put_user((compat_caddr_t) data, &igc_lut32->c0_c1_data) ||
+	    get_user(data, (unsigned long *) &igc_lut->c2_data) ||
+	    put_user((compat_caddr_t) data, &igc_lut32->c2_data))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_ar_gc_lut_data(
+			struct mdp_ar_gc_lut_data32 __user *ar_gc_data32,
+			struct mdp_ar_gc_lut_data __user *ar_gc_data)
+{
+	if (copy_in_user(&ar_gc_data->x_start,
+			&ar_gc_data32->x_start,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&ar_gc_data->slope,
+			&ar_gc_data32->slope,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&ar_gc_data->offset,
+			&ar_gc_data32->offset,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __to_user_ar_gc_lut_data(
+			struct mdp_ar_gc_lut_data32 __user *ar_gc_data32,
+			struct mdp_ar_gc_lut_data __user *ar_gc_data)
+{
+	if (copy_in_user(&ar_gc_data32->x_start,
+			&ar_gc_data->x_start,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&ar_gc_data32->slope,
+			&ar_gc_data->slope,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&ar_gc_data32->offset,
+			&ar_gc_data->offset,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+
+static int __from_user_pgc_lut_data_v1_7(
+			struct mdp_pgc_lut_data32 __user *pgc_lut32,
+			struct mdp_pgc_lut_data __user *pgc_lut)
+{
+	struct mdp_pgc_lut_data_v1_7_32 pgc_cfg_payload_32;
+	struct mdp_pgc_lut_data_v1_7 pgc_cfg_payload;
+
+	if (copy_from_user(&pgc_cfg_payload_32,
+			   compat_ptr(pgc_lut32->cfg_payload),
+			   sizeof(pgc_cfg_payload_32))) {
+		pr_err("failed to copy from user the pgc32 payload\n");
+		return -EFAULT;
+	}
+	memset(&pgc_cfg_payload, 0, sizeof(pgc_cfg_payload));
+	pgc_cfg_payload.c0_data = compat_ptr(pgc_cfg_payload_32.c0_data);
+	pgc_cfg_payload.c1_data = compat_ptr(pgc_cfg_payload_32.c1_data);
+	pgc_cfg_payload.c2_data = compat_ptr(pgc_cfg_payload_32.c2_data);
+	pgc_cfg_payload.len = pgc_cfg_payload_32.len;
+	if (copy_to_user(pgc_lut->cfg_payload, &pgc_cfg_payload,
+			 sizeof(pgc_cfg_payload))) {
+		pr_err("failed to copy to user pgc payload\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static int __from_user_pgc_lut_data_legacy(
+			struct mdp_pgc_lut_data32 __user *pgc_lut32,
+			struct mdp_pgc_lut_data __user *pgc_lut)
+{
+	struct mdp_ar_gc_lut_data32 __user *r_data_temp32;
+	struct mdp_ar_gc_lut_data32 __user *g_data_temp32;
+	struct mdp_ar_gc_lut_data32 __user *b_data_temp32;
+	struct mdp_ar_gc_lut_data __user *r_data_temp;
+	struct mdp_ar_gc_lut_data __user *g_data_temp;
+	struct mdp_ar_gc_lut_data __user *b_data_temp;
+	uint8_t num_r_stages, num_g_stages, num_b_stages;
+	int i;
+
+	if (copy_from_user(&num_r_stages,
+			&pgc_lut32->num_r_stages,
+			sizeof(uint8_t)) ||
+	    copy_from_user(&num_g_stages,
+			&pgc_lut32->num_g_stages,
+			sizeof(uint8_t)) ||
+	    copy_from_user(&num_b_stages,
+			&pgc_lut32->num_b_stages,
+			sizeof(uint8_t)))
+		return -EFAULT;
+
+	if (num_r_stages > GC_LUT_SEGMENTS || num_b_stages > GC_LUT_SEGMENTS
+	    || num_r_stages > GC_LUT_SEGMENTS || !num_r_stages || !num_b_stages
+	    || !num_g_stages) {
+		pr_err("invalid number of stages r_stages %d b_stages %d g_stages %d\n",
+		       num_r_stages, num_b_stages, num_r_stages);
+		return -EFAULT;
+	}
+
+	r_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->r_data);
+	r_data_temp = pgc_lut->r_data;
+
+	for (i = 0; i < num_r_stages; i++) {
+		if (__from_user_ar_gc_lut_data(
+				&r_data_temp32[i],
+				&r_data_temp[i]))
+			return -EFAULT;
+	}
+
+	g_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->g_data);
+	g_data_temp = pgc_lut->g_data;
+
+	for (i = 0; i < num_g_stages; i++) {
+		if (__from_user_ar_gc_lut_data(
+				&g_data_temp32[i],
+				&g_data_temp[i]))
+			return -EFAULT;
+	}
+
+	b_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->b_data);
+	b_data_temp = pgc_lut->b_data;
+
+	for (i = 0; i < num_b_stages; i++) {
+		if (__from_user_ar_gc_lut_data(
+				&b_data_temp32[i],
+				&b_data_temp[i]))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+static int __from_user_pgc_lut_data(
+			struct mdp_pgc_lut_data32 __user *pgc_lut32,
+			struct mdp_pgc_lut_data __user *pgc_lut)
+{
+	u32 version = mdp_pgc_vmax;
+	int ret = 0;
+
+	if (copy_in_user(&pgc_lut->block,
+			&pgc_lut32->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pgc_lut->flags,
+			&pgc_lut32->flags,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pgc_lut->num_r_stages,
+			&pgc_lut32->num_r_stages,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&pgc_lut->num_g_stages,
+			&pgc_lut32->num_g_stages,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&pgc_lut->num_b_stages,
+			&pgc_lut32->num_b_stages,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&pgc_lut->version,
+			&pgc_lut32->version,
+			sizeof(uint32_t)))
+		return -EFAULT;
+	if (copy_from_user(&version, &pgc_lut32->version, sizeof(u32))) {
+		pr_err("version copying failed\n");
+		return -EFAULT;
+	}
+	switch (version) {
+	case mdp_pgc_v1_7:
+		ret = __from_user_pgc_lut_data_v1_7(pgc_lut32, pgc_lut);
+		if (ret)
+			pr_err("failed to copy pgc v17\n");
+		break;
+	default:
+		pr_debug("version %d not supported fallback to legacy\n",
+			 version);
+		ret = __from_user_pgc_lut_data_legacy(pgc_lut32, pgc_lut);
+		if (ret)
+			pr_err("copy from user pgc lut legacy failed ret %d\n",
+				ret);
+		break;
+	}
+	return ret;
+}
+
+static int __to_user_pgc_lut_data(
+			struct mdp_pgc_lut_data32 __user *pgc_lut32,
+			struct mdp_pgc_lut_data __user *pgc_lut)
+{
+	struct mdp_ar_gc_lut_data32 __user *r_data_temp32;
+	struct mdp_ar_gc_lut_data32 __user *g_data_temp32;
+	struct mdp_ar_gc_lut_data32 __user *b_data_temp32;
+	struct mdp_ar_gc_lut_data __user *r_data_temp;
+	struct mdp_ar_gc_lut_data __user *g_data_temp;
+	struct mdp_ar_gc_lut_data __user *b_data_temp;
+	uint8_t num_r_stages, num_g_stages, num_b_stages;
+	int i;
+
+	if (copy_in_user(&pgc_lut32->block,
+			&pgc_lut->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pgc_lut32->flags,
+			&pgc_lut->flags,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pgc_lut32->num_r_stages,
+			&pgc_lut->num_r_stages,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&pgc_lut32->num_g_stages,
+			&pgc_lut->num_g_stages,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&pgc_lut32->num_b_stages,
+			&pgc_lut->num_b_stages,
+			sizeof(uint8_t)))
+		return -EFAULT;
+
+	if (copy_from_user(&num_r_stages,
+			&pgc_lut->num_r_stages,
+			sizeof(uint8_t)) ||
+	    copy_from_user(&num_g_stages,
+			&pgc_lut->num_g_stages,
+			sizeof(uint8_t)) ||
+	    copy_from_user(&num_b_stages,
+			&pgc_lut->num_b_stages,
+			sizeof(uint8_t)))
+		return -EFAULT;
+
+	r_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->r_data);
+	r_data_temp = pgc_lut->r_data;
+	for (i = 0; i < num_r_stages; i++) {
+		if (__to_user_ar_gc_lut_data(
+				&r_data_temp32[i],
+				&r_data_temp[i]))
+			return -EFAULT;
+	}
+
+	g_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->g_data);
+	g_data_temp = pgc_lut->g_data;
+	for (i = 0; i < num_g_stages; i++) {
+		if (__to_user_ar_gc_lut_data(
+				&g_data_temp32[i],
+				&g_data_temp[i]))
+			return -EFAULT;
+	}
+
+	b_data_temp32 = compat_ptr((uintptr_t)pgc_lut32->b_data);
+	b_data_temp = pgc_lut->b_data;
+	for (i = 0; i < num_b_stages; i++) {
+		if (__to_user_ar_gc_lut_data(
+				&b_data_temp32[i],
+				&b_data_temp[i]))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int __from_user_hist_lut_data_v1_7(
+			struct mdp_hist_lut_data32 __user *hist_lut32,
+			struct mdp_hist_lut_data __user *hist_lut)
+{
+	struct mdp_hist_lut_data_v1_7_32 hist_lut_cfg_payload32;
+	struct mdp_hist_lut_data_v1_7 hist_lut_cfg_payload;
+
+	if (copy_from_user(&hist_lut_cfg_payload32,
+			compat_ptr(hist_lut32->cfg_payload),
+			sizeof(hist_lut_cfg_payload32))) {
+		pr_err("failed to copy the Hist Lut payload from userspace\n");
+		return -EFAULT;
+	}
+
+	memset(&hist_lut_cfg_payload, 0, sizeof(hist_lut_cfg_payload));
+	hist_lut_cfg_payload.len = hist_lut_cfg_payload32.len;
+	hist_lut_cfg_payload.data = compat_ptr(hist_lut_cfg_payload32.data);
+
+	if (copy_to_user(hist_lut->cfg_payload,
+			&hist_lut_cfg_payload,
+			sizeof(hist_lut_cfg_payload))) {
+		pr_err("Failed to copy to user hist lut cfg payload\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int __from_user_hist_lut_data(
+			struct mdp_hist_lut_data32 __user *hist_lut32,
+			struct mdp_hist_lut_data __user *hist_lut)
+{
+	uint32_t version = 0;
+	uint32_t data;
+
+	if (copy_in_user(&hist_lut->block,
+			&hist_lut32->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&hist_lut->version,
+			&hist_lut32->version,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&hist_lut->hist_lut_first,
+			&hist_lut32->hist_lut_first,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&hist_lut->ops,
+			&hist_lut32->ops,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&hist_lut->len,
+			&hist_lut32->len,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (copy_from_user(&version,
+			&hist_lut32->version,
+			sizeof(uint32_t))) {
+		pr_err("failed to copy the version info\n");
+		return -EFAULT;
+	}
+
+	switch (version) {
+	case mdp_hist_lut_v1_7:
+		if (__from_user_hist_lut_data_v1_7(hist_lut32, hist_lut)) {
+			pr_err("failed to get hist lut data for version %d\n",
+				version);
+			return -EFAULT;
+		}
+		break;
+	default:
+		pr_debug("version invalid, fallback to legacy\n");
+		if (get_user(data, &hist_lut32->data) ||
+		    put_user(compat_ptr(data), &hist_lut->data))
+			return -EFAULT;
+		break;
+	}
+
+	return 0;
+}
+
+static int __to_user_hist_lut_data(
+			struct mdp_hist_lut_data32 __user *hist_lut32,
+			struct mdp_hist_lut_data __user *hist_lut)
+{
+	unsigned long data;
+
+	if (copy_in_user(&hist_lut32->block,
+			&hist_lut->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&hist_lut32->ops,
+			&hist_lut->ops,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&hist_lut32->len,
+			&hist_lut->len,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (get_user(data, (unsigned long *) &hist_lut->data) ||
+	    put_user((compat_caddr_t) data, &hist_lut32->data))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_rgb_lut_data(
+				struct mdp_rgb_lut_data32 __user *rgb_lut32,
+				struct mdp_rgb_lut_data __user *rgb_lut)
+{
+	if (copy_in_user(&rgb_lut->flags, &rgb_lut32->flags,
+		sizeof(uint32_t)) ||
+		copy_in_user(&rgb_lut->lut_type, &rgb_lut32->lut_type,
+		sizeof(uint32_t)))
+		return -EFAULT;
+
+	return __from_user_fb_cmap(&rgb_lut->cmap, &rgb_lut32->cmap);
+}
+
+static int __to_user_rgb_lut_data(
+			struct mdp_rgb_lut_data32 __user *rgb_lut32,
+			struct mdp_rgb_lut_data __user *rgb_lut)
+{
+	if (copy_in_user(&rgb_lut32->flags, &rgb_lut->flags,
+		sizeof(uint32_t)) ||
+		copy_in_user(&rgb_lut32->lut_type, &rgb_lut->lut_type,
+		sizeof(uint32_t)))
+		return -EFAULT;
+
+	return __to_user_fb_cmap(&rgb_lut->cmap, &rgb_lut32->cmap);
+}
+
+static int __from_user_lut_cfg_data(
+			struct mdp_lut_cfg_data32 __user *lut_cfg32,
+			struct mdp_lut_cfg_data __user *lut_cfg)
+{
+	uint32_t lut_type;
+	int ret = 0;
+
+	if (copy_from_user(&lut_type, &lut_cfg32->lut_type,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (copy_in_user(&lut_cfg->lut_type,
+			&lut_cfg32->lut_type,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	switch (lut_type) {
+	case mdp_lut_igc:
+		ret = __from_user_igc_lut_data(
+			compat_ptr((uintptr_t)&lut_cfg32->data.igc_lut_data),
+			&lut_cfg->data.igc_lut_data);
+		break;
+	case mdp_lut_pgc:
+		ret = __from_user_pgc_lut_data(
+			compat_ptr((uintptr_t)&lut_cfg32->data.pgc_lut_data),
+			&lut_cfg->data.pgc_lut_data);
+		break;
+	case mdp_lut_hist:
+		ret = __from_user_hist_lut_data(
+			compat_ptr((uintptr_t)&lut_cfg32->data.hist_lut_data),
+			&lut_cfg->data.hist_lut_data);
+		break;
+	case mdp_lut_rgb:
+		ret = __from_user_rgb_lut_data(
+			compat_ptr((uintptr_t)&lut_cfg32->data.rgb_lut_data),
+			&lut_cfg->data.rgb_lut_data);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static int __to_user_lut_cfg_data(
+			struct mdp_lut_cfg_data32 __user *lut_cfg32,
+			struct mdp_lut_cfg_data __user *lut_cfg)
+{
+	uint32_t lut_type;
+	int ret = 0;
+
+	if (copy_from_user(&lut_type, &lut_cfg->lut_type,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (copy_in_user(&lut_cfg32->lut_type,
+			&lut_cfg->lut_type,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	switch (lut_type) {
+	case mdp_lut_igc:
+		ret = __to_user_igc_lut_data(
+			compat_ptr((uintptr_t)&lut_cfg32->data.igc_lut_data),
+			&lut_cfg->data.igc_lut_data);
+		break;
+	case mdp_lut_pgc:
+		ret = __to_user_pgc_lut_data(
+			compat_ptr((uintptr_t)&lut_cfg32->data.pgc_lut_data),
+			&lut_cfg->data.pgc_lut_data);
+		break;
+	case mdp_lut_hist:
+		ret = __to_user_hist_lut_data(
+			compat_ptr((uintptr_t)&lut_cfg32->data.hist_lut_data),
+			&lut_cfg->data.hist_lut_data);
+		break;
+	case mdp_lut_rgb:
+		ret = __to_user_rgb_lut_data(
+			compat_ptr((uintptr_t)&lut_cfg32->data.rgb_lut_data),
+			&lut_cfg->data.rgb_lut_data);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static int __from_user_qseed_cfg(
+			struct mdp_qseed_cfg32 __user *qseed_data32,
+			struct mdp_qseed_cfg __user *qseed_data)
+{
+	uint32_t data;
+
+	if (copy_in_user(&qseed_data->table_num,
+			&qseed_data32->table_num,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&qseed_data->ops,
+			&qseed_data32->ops,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&qseed_data->len,
+			&qseed_data32->len,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (get_user(data, &qseed_data32->data) ||
+	    put_user(compat_ptr(data), &qseed_data->data))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __to_user_qseed_cfg(
+			struct mdp_qseed_cfg32 __user *qseed_data32,
+			struct mdp_qseed_cfg __user *qseed_data)
+{
+	unsigned long data;
+
+	if (copy_in_user(&qseed_data32->table_num,
+			&qseed_data->table_num,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&qseed_data32->ops,
+			&qseed_data->ops,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&qseed_data32->len,
+			&qseed_data->len,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (get_user(data, (unsigned long *) &qseed_data->data) ||
+	    put_user((compat_caddr_t) data, &qseed_data32->data))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_qseed_cfg_data(
+			struct mdp_qseed_cfg_data32 __user *qseed_cfg32,
+			struct mdp_qseed_cfg_data __user *qseed_cfg)
+{
+	if (copy_in_user(&qseed_cfg->block,
+			&qseed_cfg32->block,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (__from_user_qseed_cfg(
+			compat_ptr((uintptr_t)&qseed_cfg32->qseed_data),
+			&qseed_cfg->qseed_data))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __to_user_qseed_cfg_data(
+			struct mdp_qseed_cfg_data32 __user *qseed_cfg32,
+			struct mdp_qseed_cfg_data __user *qseed_cfg)
+{
+	if (copy_in_user(&qseed_cfg32->block,
+			&qseed_cfg->block,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (__to_user_qseed_cfg(
+			compat_ptr((uintptr_t)&qseed_cfg32->qseed_data),
+			&qseed_cfg->qseed_data))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_bl_scale_data(
+			struct mdp_bl_scale_data32 __user *bl_scale32,
+			struct mdp_bl_scale_data __user *bl_scale)
+{
+	if (copy_in_user(&bl_scale->min_lvl,
+			&bl_scale32->min_lvl,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&bl_scale->scale,
+			&bl_scale32->scale,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_pa_cfg(
+			struct mdp_pa_cfg32 __user *pa_data32,
+			struct mdp_pa_cfg __user *pa_data)
+{
+	if (copy_in_user(&pa_data->flags,
+			&pa_data32->flags,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_data->hue_adj,
+			&pa_data32->hue_adj,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_data->sat_adj,
+			&pa_data32->sat_adj,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_data->val_adj,
+			&pa_data32->val_adj,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_data->cont_adj,
+			&pa_data32->cont_adj,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __to_user_pa_cfg(
+			struct mdp_pa_cfg32 __user *pa_data32,
+			struct mdp_pa_cfg __user *pa_data)
+{
+	if (copy_in_user(&pa_data32->flags,
+			&pa_data->flags,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_data32->hue_adj,
+			&pa_data->hue_adj,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_data32->sat_adj,
+			&pa_data->sat_adj,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_data32->val_adj,
+			&pa_data->val_adj,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_data32->cont_adj,
+			&pa_data->cont_adj,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_pa_cfg_data(
+			struct mdp_pa_cfg_data32 __user *pa_cfg32,
+			struct mdp_pa_cfg_data __user *pa_cfg)
+{
+	if (copy_in_user(&pa_cfg->block,
+			&pa_cfg32->block,
+			sizeof(uint32_t)))
+		return -EFAULT;
+	if (__from_user_pa_cfg(
+			compat_ptr((uintptr_t)&pa_cfg32->pa_data),
+			&pa_cfg->pa_data))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __to_user_pa_cfg_data(
+			struct mdp_pa_cfg_data32 __user *pa_cfg32,
+			struct mdp_pa_cfg_data __user *pa_cfg)
+{
+	if (copy_in_user(&pa_cfg32->block,
+			&pa_cfg->block,
+			sizeof(uint32_t)))
+		return -EFAULT;
+	if (__to_user_pa_cfg(
+			compat_ptr((uintptr_t)&pa_cfg32->pa_data),
+			&pa_cfg->pa_data))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_mem_col_cfg(
+			struct mdp_pa_mem_col_cfg32 __user *mem_col_cfg32,
+			struct mdp_pa_mem_col_cfg __user *mem_col_cfg)
+{
+	if (copy_in_user(&mem_col_cfg->color_adjust_p0,
+			&mem_col_cfg32->color_adjust_p0,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&mem_col_cfg->color_adjust_p1,
+			&mem_col_cfg32->color_adjust_p1,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&mem_col_cfg->hue_region,
+			&mem_col_cfg32->hue_region,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&mem_col_cfg->sat_region,
+			&mem_col_cfg32->sat_region,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&mem_col_cfg->val_region,
+			&mem_col_cfg32->val_region,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __to_user_mem_col_cfg(
+			struct mdp_pa_mem_col_cfg32 __user *mem_col_cfg32,
+			struct mdp_pa_mem_col_cfg __user *mem_col_cfg)
+{
+	if (copy_in_user(&mem_col_cfg32->color_adjust_p0,
+			&mem_col_cfg->color_adjust_p0,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&mem_col_cfg32->color_adjust_p1,
+			&mem_col_cfg->color_adjust_p1,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&mem_col_cfg32->hue_region,
+			&mem_col_cfg->hue_region,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&mem_col_cfg32->sat_region,
+			&mem_col_cfg->sat_region,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&mem_col_cfg32->val_region,
+			&mem_col_cfg->val_region,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_pa_v2_data(
+			struct mdp_pa_v2_data32 __user *pa_v2_data32,
+			struct mdp_pa_v2_data __user *pa_v2_data)
+{
+	uint32_t data;
+
+	if (copy_in_user(&pa_v2_data->flags,
+			&pa_v2_data32->flags,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_v2_data->global_hue_adj,
+			&pa_v2_data32->global_hue_adj,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_v2_data->global_sat_adj,
+			&pa_v2_data32->global_sat_adj,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_v2_data->global_val_adj,
+			&pa_v2_data32->global_val_adj,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_v2_data->global_cont_adj,
+			&pa_v2_data32->global_cont_adj,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_v2_data->six_zone_thresh,
+			&pa_v2_data32->six_zone_thresh,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_v2_data->six_zone_len,
+			&pa_v2_data32->six_zone_len,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (get_user(data, &pa_v2_data32->six_zone_curve_p0) ||
+	    put_user(compat_ptr(data), &pa_v2_data->six_zone_curve_p0) ||
+	    get_user(data, &pa_v2_data32->six_zone_curve_p1) ||
+	    put_user(compat_ptr(data), &pa_v2_data->six_zone_curve_p1))
+		return -EFAULT;
+
+	if (__from_user_mem_col_cfg(
+			compat_ptr((uintptr_t)&pa_v2_data32->skin_cfg),
+			&pa_v2_data->skin_cfg) ||
+	    __from_user_mem_col_cfg(
+			compat_ptr((uintptr_t)&pa_v2_data32->sky_cfg),
+			&pa_v2_data->sky_cfg) ||
+	    __from_user_mem_col_cfg(
+			compat_ptr((uintptr_t)&pa_v2_data32->fol_cfg),
+			&pa_v2_data->fol_cfg))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __to_user_pa_v2_data(
+			struct mdp_pa_v2_data32 __user *pa_v2_data32,
+			struct mdp_pa_v2_data __user *pa_v2_data)
+{
+	unsigned long data;
+
+	if (copy_in_user(&pa_v2_data32->flags,
+			&pa_v2_data->flags,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_v2_data32->global_hue_adj,
+			&pa_v2_data->global_hue_adj,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_v2_data32->global_sat_adj,
+			&pa_v2_data->global_sat_adj,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_v2_data32->global_val_adj,
+			&pa_v2_data->global_val_adj,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_v2_data32->global_cont_adj,
+			&pa_v2_data->global_cont_adj,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_v2_data32->six_zone_thresh,
+			&pa_v2_data->six_zone_thresh,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_v2_data32->six_zone_len,
+			&pa_v2_data->six_zone_len,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (get_user(data, (unsigned long *) &pa_v2_data->six_zone_curve_p0) ||
+	    put_user((compat_caddr_t) data, &pa_v2_data32->six_zone_curve_p0) ||
+	    get_user(data, (unsigned long *) &pa_v2_data->six_zone_curve_p1) ||
+	    put_user((compat_caddr_t) data, &pa_v2_data32->six_zone_curve_p1))
+		return -EFAULT;
+
+	if (__to_user_mem_col_cfg(
+			compat_ptr((uintptr_t)&pa_v2_data32->skin_cfg),
+			&pa_v2_data->skin_cfg) ||
+	    __to_user_mem_col_cfg(
+			compat_ptr((uintptr_t)&pa_v2_data32->sky_cfg),
+			&pa_v2_data->sky_cfg) ||
+	    __to_user_mem_col_cfg(
+			compat_ptr((uintptr_t)&pa_v2_data32->fol_cfg),
+			&pa_v2_data->fol_cfg))
+		return -EFAULT;
+
+	return 0;
+}
+
+static inline void __from_user_pa_mem_col_data_v1_7(
+			struct mdp_pa_mem_col_data_v1_7_32 *mem_col_data32,
+			struct mdp_pa_mem_col_data_v1_7 *mem_col_data)
+{
+	mem_col_data->color_adjust_p0 = mem_col_data32->color_adjust_p0;
+	mem_col_data->color_adjust_p1 = mem_col_data32->color_adjust_p1;
+	mem_col_data->color_adjust_p2 = mem_col_data32->color_adjust_p2;
+	mem_col_data->blend_gain = mem_col_data32->blend_gain;
+	mem_col_data->sat_hold = mem_col_data32->sat_hold;
+	mem_col_data->val_hold = mem_col_data32->val_hold;
+	mem_col_data->hue_region = mem_col_data32->hue_region;
+	mem_col_data->sat_region = mem_col_data32->sat_region;
+	mem_col_data->val_region = mem_col_data32->val_region;
+}
+
+
+static int __from_user_pa_data_v1_7(
+			struct mdp_pa_v2_cfg_data32 __user *pa_v2_cfg32,
+			struct mdp_pa_v2_cfg_data __user *pa_v2_cfg)
+{
+	struct mdp_pa_data_v1_7_32 pa_cfg_payload32;
+	struct mdp_pa_data_v1_7 pa_cfg_payload;
+
+	if (copy_from_user(&pa_cfg_payload32,
+			compat_ptr(pa_v2_cfg32->cfg_payload),
+			sizeof(pa_cfg_payload32))) {
+		pr_err("failed to copy the PA payload from userspace\n");
+		return -EFAULT;
+	}
+
+	memset(&pa_cfg_payload, 0, sizeof(pa_cfg_payload));
+	pa_cfg_payload.mode = pa_cfg_payload32.mode;
+	pa_cfg_payload.global_hue_adj = pa_cfg_payload32.global_hue_adj;
+	pa_cfg_payload.global_sat_adj = pa_cfg_payload32.global_sat_adj;
+	pa_cfg_payload.global_val_adj = pa_cfg_payload32.global_val_adj;
+	pa_cfg_payload.global_cont_adj = pa_cfg_payload32.global_cont_adj;
+
+	__from_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.skin_cfg,
+					&pa_cfg_payload.skin_cfg);
+	__from_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.sky_cfg,
+					&pa_cfg_payload.sky_cfg);
+	__from_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.fol_cfg,
+					&pa_cfg_payload.fol_cfg);
+
+	pa_cfg_payload.six_zone_thresh = pa_cfg_payload32.six_zone_thresh;
+	pa_cfg_payload.six_zone_adj_p0 = pa_cfg_payload32.six_zone_adj_p0;
+	pa_cfg_payload.six_zone_adj_p1 = pa_cfg_payload32.six_zone_adj_p1;
+	pa_cfg_payload.six_zone_sat_hold = pa_cfg_payload32.six_zone_sat_hold;
+	pa_cfg_payload.six_zone_val_hold = pa_cfg_payload32.six_zone_val_hold;
+	pa_cfg_payload.six_zone_len = pa_cfg_payload32.six_zone_len;
+
+	pa_cfg_payload.six_zone_curve_p0 =
+		compat_ptr(pa_cfg_payload32.six_zone_curve_p0);
+	pa_cfg_payload.six_zone_curve_p1 =
+		compat_ptr(pa_cfg_payload32.six_zone_curve_p1);
+
+	if (copy_to_user(pa_v2_cfg->cfg_payload, &pa_cfg_payload,
+			sizeof(pa_cfg_payload))) {
+		pr_err("Failed to copy to user pa cfg payload\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int __from_user_pa_v2_cfg_data(
+			struct mdp_pa_v2_cfg_data32 __user *pa_v2_cfg32,
+			struct mdp_pa_v2_cfg_data __user *pa_v2_cfg)
+{
+	uint32_t version;
+
+	if (copy_in_user(&pa_v2_cfg->block,
+			&pa_v2_cfg32->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_v2_cfg->version,
+			&pa_v2_cfg32->version,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&pa_v2_cfg->flags,
+			&pa_v2_cfg32->flags,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (copy_from_user(&version,
+			&pa_v2_cfg32->version,
+			sizeof(uint32_t))) {
+		pr_err("failed to copy the version info\n");
+		return -EFAULT;
+	}
+
+	switch (version) {
+	case mdp_pa_v1_7:
+		if (__from_user_pa_data_v1_7(pa_v2_cfg32, pa_v2_cfg)) {
+			pr_err("failed to get pa data for version %d\n",
+				version);
+			return -EFAULT;
+		}
+		break;
+	default:
+		pr_debug("version invalid, fallback to legacy\n");
+		if (__from_user_pa_v2_data(
+				compat_ptr((uintptr_t)&pa_v2_cfg32->pa_v2_data),
+				&pa_v2_cfg->pa_v2_data))
+			return -EFAULT;
+		break;
+	}
+
+	return 0;
+}
+
+static inline void __to_user_pa_mem_col_data_v1_7(
+			struct mdp_pa_mem_col_data_v1_7_32 *mem_col_data32,
+			struct mdp_pa_mem_col_data_v1_7 *mem_col_data)
+{
+	mem_col_data32->color_adjust_p0 = mem_col_data->color_adjust_p0;
+	mem_col_data32->color_adjust_p1 = mem_col_data->color_adjust_p1;
+	mem_col_data32->color_adjust_p2 = mem_col_data->color_adjust_p2;
+	mem_col_data32->blend_gain = mem_col_data->blend_gain;
+	mem_col_data32->sat_hold = mem_col_data->sat_hold;
+	mem_col_data32->val_hold = mem_col_data->val_hold;
+	mem_col_data32->hue_region = mem_col_data->hue_region;
+	mem_col_data32->sat_region = mem_col_data->sat_region;
+	mem_col_data32->val_region = mem_col_data->val_region;
+}
+
+static int __to_user_pa_data_v1_7(
+			struct mdp_pa_v2_cfg_data32 __user *pa_v2_cfg32,
+			struct mdp_pa_v2_cfg_data __user *pa_v2_cfg)
+{
+	struct mdp_pa_data_v1_7_32 pa_cfg_payload32;
+	struct mdp_pa_data_v1_7 pa_cfg_payload;
+
+	memset(&pa_cfg_payload32, 0, sizeof(pa_cfg_payload32));
+	if (copy_from_user(&pa_cfg_payload,
+			pa_v2_cfg->cfg_payload,
+			sizeof(pa_cfg_payload))) {
+		pr_err("failed to copy the PA payload from userspace\n");
+		return -EFAULT;
+	}
+
+	pa_cfg_payload32.mode = pa_cfg_payload.mode;
+	pa_cfg_payload32.global_hue_adj = pa_cfg_payload.global_hue_adj;
+	pa_cfg_payload32.global_sat_adj = pa_cfg_payload.global_sat_adj;
+	pa_cfg_payload32.global_val_adj = pa_cfg_payload.global_val_adj;
+	pa_cfg_payload32.global_cont_adj = pa_cfg_payload.global_cont_adj;
+
+	__to_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.skin_cfg,
+					&pa_cfg_payload.skin_cfg);
+	__to_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.sky_cfg,
+					&pa_cfg_payload.sky_cfg);
+	__to_user_pa_mem_col_data_v1_7(&pa_cfg_payload32.fol_cfg,
+					&pa_cfg_payload.fol_cfg);
+
+	pa_cfg_payload32.six_zone_thresh = pa_cfg_payload.six_zone_thresh;
+	pa_cfg_payload32.six_zone_adj_p0 = pa_cfg_payload.six_zone_adj_p0;
+	pa_cfg_payload32.six_zone_adj_p1 = pa_cfg_payload.six_zone_adj_p1;
+	pa_cfg_payload32.six_zone_sat_hold = pa_cfg_payload.six_zone_sat_hold;
+	pa_cfg_payload32.six_zone_val_hold = pa_cfg_payload.six_zone_val_hold;
+	pa_cfg_payload32.six_zone_len = pa_cfg_payload.six_zone_len;
+
+	if (copy_to_user(compat_ptr(pa_v2_cfg32->cfg_payload),
+			&pa_cfg_payload32,
+			sizeof(pa_cfg_payload32))) {
+		pr_err("Failed to copy to user pa cfg payload\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int __to_user_pa_v2_cfg_data(
+			struct mdp_pa_v2_cfg_data32 __user *pa_v2_cfg32,
+			struct mdp_pa_v2_cfg_data __user *pa_v2_cfg)
+{
+	uint32_t version = 0;
+	uint32_t flags = 0;
+
+	if (copy_from_user(&version,
+			&pa_v2_cfg32->version,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	switch (version) {
+	case mdp_pa_v1_7:
+		if (copy_from_user(&flags,
+				&pa_v2_cfg32->flags,
+				sizeof(uint32_t))) {
+			pr_err("failed to get PA v1_7 flags\n");
+			return -EFAULT;
+		}
+
+		if (!(flags & MDP_PP_OPS_READ)) {
+			pr_debug("Read op not set. Skipping compat copyback\n");
+			return 0;
+		}
+
+		if (__to_user_pa_data_v1_7(pa_v2_cfg32, pa_v2_cfg)) {
+			pr_err("failed to set pa data for version %d\n",
+				version);
+			return -EFAULT;
+		}
+		break;
+	default:
+		pr_debug("version invalid, fallback to legacy\n");
+
+		if (copy_from_user(&flags,
+				&pa_v2_cfg32->pa_v2_data.flags,
+				sizeof(uint32_t))) {
+			pr_err("failed to get PAv2 flags\n");
+			return -EFAULT;
+		}
+
+		if (!(flags & MDP_PP_OPS_READ)) {
+			pr_debug("Read op not set. Skipping compat copyback\n");
+			return 0;
+		}
+
+		if (__to_user_pa_v2_data(
+				compat_ptr((uintptr_t)&pa_v2_cfg32->pa_v2_data),
+				&pa_v2_cfg->pa_v2_data))
+			return -EFAULT;
+		break;
+	}
+
+	return 0;
+}
+
+static int __from_user_dither_cfg_data(
+			struct mdp_dither_cfg_data32 __user *dither_cfg32,
+			struct mdp_dither_cfg_data __user *dither_cfg)
+{
+	if (copy_in_user(&dither_cfg->block,
+			&dither_cfg32->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&dither_cfg->flags,
+			&dither_cfg32->flags,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&dither_cfg->g_y_depth,
+			&dither_cfg32->g_y_depth,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&dither_cfg->r_cr_depth,
+			&dither_cfg32->r_cr_depth,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&dither_cfg->b_cb_depth,
+			&dither_cfg32->b_cb_depth,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __to_user_dither_cfg_data(
+			struct mdp_dither_cfg_data32 __user *dither_cfg32,
+			struct mdp_dither_cfg_data __user *dither_cfg)
+{
+	if (copy_in_user(&dither_cfg32->block,
+			&dither_cfg->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&dither_cfg32->flags,
+			&dither_cfg->flags,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&dither_cfg32->g_y_depth,
+			&dither_cfg->g_y_depth,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&dither_cfg32->r_cr_depth,
+			&dither_cfg->r_cr_depth,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&dither_cfg32->b_cb_depth,
+			&dither_cfg->b_cb_depth,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_gamut_cfg_data_v17(
+			struct mdp_gamut_cfg_data32 __user *gamut_cfg32,
+			struct mdp_gamut_cfg_data __user *gamut_cfg)
+{
+	struct mdp_gamut_data_v1_7 gamut_cfg_payload;
+	struct mdp_gamut_data_v1_7_32 gamut_cfg_payload32;
+	u32 i = 0;
+
+	if (copy_from_user(&gamut_cfg_payload32,
+			   compat_ptr(gamut_cfg32->cfg_payload),
+			   sizeof(gamut_cfg_payload32))) {
+		pr_err("failed to copy the gamut payload from userspace\n");
+		return -EFAULT;
+	}
+
+	memset(&gamut_cfg_payload, 0, sizeof(gamut_cfg_payload));
+	gamut_cfg_payload.mode = gamut_cfg_payload32.mode;
+	for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
+		gamut_cfg_payload.tbl_size[i] =
+			gamut_cfg_payload32.tbl_size[i];
+		gamut_cfg_payload.c0_data[i] =
+			compat_ptr(gamut_cfg_payload32.c0_data[i]);
+		gamut_cfg_payload.c1_c2_data[i] =
+			compat_ptr(gamut_cfg_payload32.c1_c2_data[i]);
+	}
+	for (i = 0; i < MDP_GAMUT_SCALE_OFF_TABLE_NUM; i++) {
+		gamut_cfg_payload.tbl_scale_off_sz[i] =
+			gamut_cfg_payload32.tbl_scale_off_sz[i];
+		gamut_cfg_payload.scale_off_data[i] =
+			compat_ptr(gamut_cfg_payload32.scale_off_data[i]);
+	}
+	if (copy_to_user(gamut_cfg->cfg_payload, &gamut_cfg_payload,
+			 sizeof(gamut_cfg_payload))) {
+		pr_err("failed to copy the gamut payload to userspace\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static int __from_user_gamut_cfg_data(
+			struct mdp_gamut_cfg_data32 __user *gamut_cfg32,
+			struct mdp_gamut_cfg_data __user *gamut_cfg)
+{
+	uint32_t data, version;
+	int i;
+
+	if (copy_in_user(&gamut_cfg->block,
+			&gamut_cfg32->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&gamut_cfg->flags,
+			&gamut_cfg32->flags,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&gamut_cfg->gamut_first,
+			&gamut_cfg32->gamut_first,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&gamut_cfg->tbl_size[0],
+			&gamut_cfg32->tbl_size[0],
+			MDP_GAMUT_TABLE_NUM * sizeof(uint32_t)) ||
+	    copy_in_user(&gamut_cfg->version,
+			&gamut_cfg32->version,
+			sizeof(uint32_t)))
+		return 0;
+
+	if (copy_from_user(&version, &gamut_cfg32->version, sizeof(u32))) {
+		pr_err("failed to copy the version info\n");
+		return -EFAULT;
+	}
+
+	switch (version) {
+	case mdp_gamut_v1_7:
+		if (__from_user_gamut_cfg_data_v17(gamut_cfg32, gamut_cfg)) {
+			pr_err("failed to get the gamut data for version %d\n",
+				version);
+			return -EFAULT;
+		}
+		break;
+	default:
+		pr_debug("version invalid fallback to legacy\n");
+	/* The Gamut LUT data contains 3 static arrays for R, G, and B
+	 * gamut data. Each these arrays contains pointers dynamic arrays
+	 * which hold the gamut LUTs for R, G, and B. Must copy the array of
+	 * pointers from 32 bit to 64 bit addresses.
+	 */
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			if (get_user(data, &gamut_cfg32->r_tbl[i]) ||
+			    put_user(compat_ptr(data), &gamut_cfg->r_tbl[i]))
+				return -EFAULT;
+		}
+
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			if (get_user(data, &gamut_cfg32->g_tbl[i]) ||
+			    put_user(compat_ptr(data), &gamut_cfg->g_tbl[i]))
+				return -EFAULT;
+		}
+
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			if (get_user(data, &gamut_cfg32->b_tbl[i]) ||
+			    put_user(compat_ptr(data), &gamut_cfg->b_tbl[i]))
+				return -EFAULT;
+		}
+		break;
+	}
+	return 0;
+}
+
+static int __to_user_gamut_cfg_data(
+			struct mdp_gamut_cfg_data32 __user *gamut_cfg32,
+			struct mdp_gamut_cfg_data __user *gamut_cfg)
+{
+	unsigned long data;
+	int i;
+
+	if (copy_in_user(&gamut_cfg32->block,
+			&gamut_cfg->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&gamut_cfg32->flags,
+			&gamut_cfg->flags,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&gamut_cfg32->gamut_first,
+			&gamut_cfg->gamut_first,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&gamut_cfg32->tbl_size[0],
+			&gamut_cfg->tbl_size[0],
+			MDP_GAMUT_TABLE_NUM * sizeof(uint32_t)))
+		return 0;
+
+	for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+		if (get_user(data, (unsigned long *) &gamut_cfg->r_tbl[i]) ||
+		    put_user((compat_caddr_t)data, &gamut_cfg32->r_tbl[i]))
+			return -EFAULT;
+	}
+
+	for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+		if (get_user(data, (unsigned long *) &gamut_cfg->g_tbl[i]) ||
+		    put_user((compat_caddr_t)data, &gamut_cfg32->g_tbl[i]))
+			return -EFAULT;
+	}
+
+	for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+		if (get_user(data, (unsigned long *) &gamut_cfg->b_tbl[i]) ||
+		    put_user((compat_caddr_t)data, &gamut_cfg32->g_tbl[i]))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int __from_user_calib_config_data(
+			struct mdp_calib_config_data32 __user *calib_cfg32,
+			struct mdp_calib_config_data __user *calib_cfg)
+{
+	if (copy_in_user(&calib_cfg->ops,
+			&calib_cfg32->ops,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&calib_cfg->addr,
+			&calib_cfg32->addr,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&calib_cfg->data,
+			&calib_cfg32->data,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __to_user_calib_config_data(
+			struct mdp_calib_config_data32 __user *calib_cfg32,
+			struct mdp_calib_config_data __user *calib_cfg)
+{
+	if (copy_in_user(&calib_cfg32->ops,
+			&calib_cfg->ops,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&calib_cfg32->addr,
+			&calib_cfg->addr,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&calib_cfg32->data,
+			&calib_cfg->data,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_ad_init(
+			struct mdss_ad_init32 __user *ad_init32,
+			struct mdss_ad_init __user *ad_init)
+{
+	uint32_t data;
+
+	if (copy_in_user(&ad_init->asym_lut[0],
+			&ad_init32->asym_lut[0],
+			33 * sizeof(uint32_t)) ||
+	    copy_in_user(&ad_init->color_corr_lut[0],
+			&ad_init32->color_corr_lut[0],
+			33 * sizeof(uint32_t)) ||
+	    copy_in_user(&ad_init->i_control[0],
+			&ad_init32->i_control[0],
+			2 * sizeof(uint8_t)) ||
+	    copy_in_user(&ad_init->black_lvl,
+			&ad_init32->black_lvl,
+			sizeof(uint16_t)) ||
+	    copy_in_user(&ad_init->white_lvl,
+			&ad_init32->white_lvl,
+			sizeof(uint16_t)) ||
+	    copy_in_user(&ad_init->var,
+			&ad_init32->var,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&ad_init->limit_ampl,
+			&ad_init32->limit_ampl,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&ad_init->i_dither,
+			&ad_init32->i_dither,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&ad_init->slope_max,
+			&ad_init32->slope_max,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&ad_init->slope_min,
+			&ad_init32->slope_min,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&ad_init->dither_ctl,
+			&ad_init32->dither_ctl,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&ad_init->format,
+			&ad_init32->format,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&ad_init->auto_size,
+			&ad_init32->auto_size,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&ad_init->frame_w,
+			&ad_init32->frame_w,
+			sizeof(uint16_t)) ||
+	    copy_in_user(&ad_init->frame_h,
+			&ad_init32->frame_h,
+			sizeof(uint16_t)) ||
+	    copy_in_user(&ad_init->logo_v,
+			&ad_init32->logo_v,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&ad_init->logo_h,
+			&ad_init32->logo_h,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&ad_init->alpha,
+			&ad_init32->alpha,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&ad_init->alpha_base,
+			&ad_init32->alpha_base,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&ad_init->bl_lin_len,
+			&ad_init32->bl_lin_len,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&ad_init->bl_att_len,
+			&ad_init32->bl_att_len,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+
+	if (get_user(data, &ad_init32->bl_lin) ||
+	    put_user(compat_ptr(data), &ad_init->bl_lin) ||
+	    get_user(data, &ad_init32->bl_lin_inv) ||
+	    put_user(compat_ptr(data), &ad_init->bl_lin_inv) ||
+	    get_user(data, &ad_init32->bl_att_lut) ||
+	    put_user(compat_ptr(data), &ad_init->bl_att_lut))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_ad_cfg(
+			struct mdss_ad_cfg32 __user *ad_cfg32,
+			struct mdss_ad_cfg __user *ad_cfg)
+{
+	if (copy_in_user(&ad_cfg->mode,
+			&ad_cfg32->mode,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&ad_cfg->al_calib_lut[0],
+			&ad_cfg32->al_calib_lut[0],
+			33 * sizeof(uint32_t)) ||
+	    copy_in_user(&ad_cfg->backlight_min,
+			&ad_cfg32->backlight_min,
+			sizeof(uint16_t)) ||
+	    copy_in_user(&ad_cfg->backlight_max,
+			&ad_cfg32->backlight_max,
+			sizeof(uint16_t)) ||
+	    copy_in_user(&ad_cfg->backlight_scale,
+			&ad_cfg32->backlight_scale,
+			sizeof(uint16_t)) ||
+	    copy_in_user(&ad_cfg->amb_light_min,
+			&ad_cfg32->amb_light_min,
+			sizeof(uint16_t)) ||
+	    copy_in_user(&ad_cfg->filter[0],
+			&ad_cfg32->filter[0],
+			2 * sizeof(uint16_t)) ||
+	    copy_in_user(&ad_cfg->calib[0],
+			&ad_cfg32->calib[0],
+			4 * sizeof(uint16_t)) ||
+	    copy_in_user(&ad_cfg->strength_limit,
+			&ad_cfg32->strength_limit,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&ad_cfg->t_filter_recursion,
+			&ad_cfg32->t_filter_recursion,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&ad_cfg->stab_itr,
+			&ad_cfg32->stab_itr,
+			sizeof(uint16_t)) ||
+	    copy_in_user(&ad_cfg->bl_ctrl_mode,
+			&ad_cfg32->bl_ctrl_mode,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_ad_init_cfg(
+			struct mdss_ad_init_cfg32 __user *ad_info32,
+			struct mdss_ad_init_cfg __user *ad_info)
+{
+	uint32_t op;
+
+	if (copy_from_user(&op, &ad_info32->ops,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (copy_in_user(&ad_info->ops,
+			&ad_info32->ops,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (op & MDP_PP_AD_INIT) {
+		if (__from_user_ad_init(
+				compat_ptr((uintptr_t)&ad_info32->params.init),
+				&ad_info->params.init))
+			return -EFAULT;
+	} else if (op & MDP_PP_AD_CFG) {
+		if (__from_user_ad_cfg(
+				compat_ptr((uintptr_t)&ad_info32->params.cfg),
+				&ad_info->params.cfg))
+			return -EFAULT;
+	} else {
+		pr_err("Invalid AD init/config operation\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __from_user_ad_input(
+			struct mdss_ad_input32 __user *ad_input32,
+			struct mdss_ad_input __user *ad_input)
+{
+	int mode;
+
+	if (copy_from_user(&mode,
+			&ad_input32->mode,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (copy_in_user(&ad_input->mode,
+			&ad_input32->mode,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&ad_input->output,
+			&ad_input32->output,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	switch (mode) {
+	case MDSS_AD_MODE_AUTO_BL:
+	case MDSS_AD_MODE_AUTO_STR:
+		if (copy_in_user(&ad_input->in.amb_light,
+				&ad_input32->in.amb_light,
+				sizeof(uint32_t)))
+			return -EFAULT;
+		break;
+	case MDSS_AD_MODE_TARG_STR:
+	case MDSS_AD_MODE_MAN_STR:
+		if (copy_in_user(&ad_input->in.strength,
+				&ad_input32->in.strength,
+				sizeof(uint32_t)))
+			return -EFAULT;
+		break;
+	case MDSS_AD_MODE_CALIB:
+		if (copy_in_user(&ad_input->in.calib_bl,
+				&ad_input32->in.calib_bl,
+				sizeof(uint32_t)))
+			return -EFAULT;
+		break;
+	}
+
+	return 0;
+}
+
+static int __to_user_ad_input(
+			struct mdss_ad_input32 __user *ad_input32,
+			struct mdss_ad_input __user *ad_input)
+{
+	int mode;
+
+	if (copy_from_user(&mode,
+			&ad_input->mode,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (copy_in_user(&ad_input32->mode,
+			&ad_input->mode,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&ad_input32->output,
+			&ad_input->output,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	switch (mode) {
+	case MDSS_AD_MODE_AUTO_BL:
+	case MDSS_AD_MODE_AUTO_STR:
+		if (copy_in_user(&ad_input32->in.amb_light,
+				&ad_input->in.amb_light,
+				sizeof(uint32_t)))
+			return -EFAULT;
+		break;
+	case MDSS_AD_MODE_TARG_STR:
+	case MDSS_AD_MODE_MAN_STR:
+		if (copy_in_user(&ad_input32->in.strength,
+				&ad_input->in.strength,
+				sizeof(uint32_t)))
+			return -EFAULT;
+		break;
+	case MDSS_AD_MODE_CALIB:
+		if (copy_in_user(&ad_input32->in.calib_bl,
+				&ad_input->in.calib_bl,
+				sizeof(uint32_t)))
+			return -EFAULT;
+		break;
+	}
+
+	return 0;
+}
+
+static int __from_user_calib_cfg(
+			struct mdss_calib_cfg32 __user *calib_cfg32,
+			struct mdss_calib_cfg __user *calib_cfg)
+{
+	if (copy_in_user(&calib_cfg->ops,
+			&calib_cfg32->ops,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&calib_cfg->calib_mask,
+			&calib_cfg32->calib_mask,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_calib_config_buffer(
+			struct mdp_calib_config_buffer32 __user *calib_buffer32,
+			struct mdp_calib_config_buffer __user *calib_buffer)
+{
+	uint32_t data;
+
+	if (copy_in_user(&calib_buffer->ops,
+			&calib_buffer32->ops,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&calib_buffer->size,
+			&calib_buffer32->size,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (get_user(data, &calib_buffer32->buffer) ||
+	    put_user(compat_ptr(data), &calib_buffer->buffer))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __to_user_calib_config_buffer(
+			struct mdp_calib_config_buffer32 __user *calib_buffer32,
+			struct mdp_calib_config_buffer __user *calib_buffer)
+{
+	unsigned long data;
+
+	if (copy_in_user(&calib_buffer32->ops,
+			&calib_buffer->ops,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&calib_buffer32->size,
+			&calib_buffer->size,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (get_user(data, (unsigned long *) &calib_buffer->buffer) ||
+	    put_user((compat_caddr_t) data, &calib_buffer32->buffer))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_calib_dcm_state(
+			struct mdp_calib_dcm_state32 __user *calib_dcm32,
+			struct mdp_calib_dcm_state __user *calib_dcm)
+{
+	if (copy_in_user(&calib_dcm->ops,
+			&calib_dcm32->ops,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&calib_dcm->dcm_state,
+			&calib_dcm32->dcm_state,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static u32 __pp_compat_size_igc(void)
+{
+	u32 alloc_size = 0;
+	/* When we have multiple versions pick largest struct size */
+	alloc_size = sizeof(struct mdp_igc_lut_data_v1_7);
+	return alloc_size;
+}
+
+static u32 __pp_compat_size_hist_lut(void)
+{
+	u32 alloc_size = 0;
+	/* When we have multiple versions pick largest struct size */
+	alloc_size = sizeof(struct mdp_hist_lut_data_v1_7);
+	return alloc_size;
+}
+
+static u32 __pp_compat_size_pgc(void)
+{
+	u32 tbl_sz_max = 0;
+
+	tbl_sz_max =  3 * GC_LUT_SEGMENTS * sizeof(struct mdp_ar_gc_lut_data);
+	tbl_sz_max += sizeof(struct mdp_pgc_lut_data_v1_7);
+	return tbl_sz_max;
+}
+
+static u32 __pp_compat_size_pcc(void)
+{
+	/* if new version of PCC is added return max struct size */
+	return sizeof(struct mdp_pcc_data_v1_7);
+}
+
+static u32 __pp_compat_size_pa(void)
+{
+	/* if new version of PA is added return max struct size */
+	return sizeof(struct mdp_pa_data_v1_7);
+}
+
+static u32 __pp_compat_size_gamut(void)
+{
+	return sizeof(struct mdp_gamut_data_v1_7);
+}
+
+static int __pp_compat_alloc(struct msmfb_mdp_pp32 __user *pp32,
+					struct msmfb_mdp_pp __user **pp,
+					uint32_t op)
+{
+	uint32_t alloc_size = 0, lut_type, pgc_size = 0;
+
+	alloc_size = sizeof(struct msmfb_mdp_pp);
+	switch (op) {
+	case  mdp_op_lut_cfg:
+		if (copy_from_user(&lut_type,
+			&pp32->data.lut_cfg_data.lut_type,
+			sizeof(uint32_t)))
+			return -EFAULT;
+
+		switch (lut_type)  {
+		case mdp_lut_pgc:
+
+			pgc_size = GC_LUT_SEGMENTS *
+				sizeof(struct mdp_ar_gc_lut_data);
+			alloc_size += __pp_compat_size_pgc();
+
+			*pp = compat_alloc_user_space(alloc_size);
+			if (*pp == NULL)
+				return -ENOMEM;
+			memset(*pp, 0, alloc_size);
+
+			(*pp)->data.lut_cfg_data.data.pgc_lut_data.r_data =
+					(struct mdp_ar_gc_lut_data *)
+					((unsigned long) *pp +
+					sizeof(struct msmfb_mdp_pp));
+			(*pp)->data.lut_cfg_data.data.pgc_lut_data.g_data =
+					(struct mdp_ar_gc_lut_data *)
+					((unsigned long) *pp +
+					sizeof(struct msmfb_mdp_pp) +
+					pgc_size);
+			(*pp)->data.lut_cfg_data.data.pgc_lut_data.b_data =
+					(struct mdp_ar_gc_lut_data *)
+					((unsigned long) *pp +
+					sizeof(struct msmfb_mdp_pp) +
+					(2 * pgc_size));
+			(*pp)->data.lut_cfg_data.data.pgc_lut_data.cfg_payload
+					 = (void *)((unsigned long) *pp +
+					sizeof(struct msmfb_mdp_pp) +
+					(3 * pgc_size));
+			break;
+		case mdp_lut_igc:
+			alloc_size += __pp_compat_size_igc();
+			*pp = compat_alloc_user_space(alloc_size);
+			if (*pp == NULL) {
+				pr_err("failed to alloc from user size %d for igc\n",
+					alloc_size);
+				return -ENOMEM;
+			}
+			memset(*pp, 0, alloc_size);
+			(*pp)->data.lut_cfg_data.data.igc_lut_data.cfg_payload
+					= (void *)((unsigned long)(*pp) +
+					   sizeof(struct msmfb_mdp_pp));
+			break;
+		case mdp_lut_hist:
+			alloc_size += __pp_compat_size_hist_lut();
+			*pp = compat_alloc_user_space(alloc_size);
+			if (*pp == NULL) {
+				pr_err("failed to alloc from user size %d for hist lut\n",
+					alloc_size);
+				return -ENOMEM;
+			}
+			memset(*pp, 0, alloc_size);
+			(*pp)->data.lut_cfg_data.data.hist_lut_data.cfg_payload
+					= (void *)((unsigned long)(*pp) +
+					   sizeof(struct msmfb_mdp_pp));
+			break;
+		default:
+			*pp = compat_alloc_user_space(alloc_size);
+			if (*pp == NULL) {
+				pr_err("failed to alloc from user size %d for lut_type %d\n",
+					alloc_size, lut_type);
+				return -ENOMEM;
+			}
+			memset(*pp, 0, alloc_size);
+			break;
+		}
+		break;
+	case mdp_op_pcc_cfg:
+		alloc_size += __pp_compat_size_pcc();
+		*pp = compat_alloc_user_space(alloc_size);
+		if (*pp == NULL) {
+			pr_err("alloc from user size %d for pcc fail\n",
+				alloc_size);
+			return -ENOMEM;
+		}
+		memset(*pp, 0, alloc_size);
+		(*pp)->data.pcc_cfg_data.cfg_payload =
+				(void *)((unsigned long)(*pp) +
+				 sizeof(struct msmfb_mdp_pp));
+		break;
+	case mdp_op_gamut_cfg:
+		alloc_size += __pp_compat_size_gamut();
+		*pp = compat_alloc_user_space(alloc_size);
+		if (*pp == NULL) {
+			pr_err("alloc from user size %d for pcc fail\n",
+				alloc_size);
+			return -ENOMEM;
+		}
+		memset(*pp, 0, alloc_size);
+		(*pp)->data.gamut_cfg_data.cfg_payload =
+				(void *)((unsigned long)(*pp) +
+				 sizeof(struct msmfb_mdp_pp));
+		break;
+	case mdp_op_pa_v2_cfg:
+		alloc_size += __pp_compat_size_pa();
+		*pp = compat_alloc_user_space(alloc_size);
+		if (*pp == NULL) {
+			pr_err("alloc from user size %d for pcc fail\n",
+				alloc_size);
+			return -ENOMEM;
+		}
+		memset(*pp, 0, alloc_size);
+		(*pp)->data.pa_v2_cfg_data.cfg_payload =
+				(void *)((unsigned long)(*pp) +
+				sizeof(struct msmfb_mdp_pp));
+		break;
+	default:
+		*pp = compat_alloc_user_space(alloc_size);
+		if (*pp == NULL)
+			return -ENOMEM;
+		memset(*pp, 0, alloc_size);
+		break;
+	}
+	return 0;
+}
+
+static int mdss_compat_pp_ioctl(struct fb_info *info, unsigned int cmd,
+			unsigned long arg, struct file *file)
+{
+	uint32_t op;
+	int ret = 0;
+	struct msmfb_mdp_pp32 __user *pp32;
+	struct msmfb_mdp_pp __user *pp;
+
+	pp32 = compat_ptr(arg);
+	if (copy_from_user(&op, &pp32->op, sizeof(uint32_t)))
+		return -EFAULT;
+
+	ret = __pp_compat_alloc(pp32, &pp, op);
+	if (ret)
+		return ret;
+
+	if (copy_in_user(&pp->op, &pp32->op, sizeof(uint32_t)))
+		return -EFAULT;
+
+	switch (op) {
+	case mdp_op_pcc_cfg:
+		ret = __from_user_pcc_cfg_data(
+			compat_ptr((uintptr_t)&pp32->data.pcc_cfg_data),
+			&pp->data.pcc_cfg_data);
+		if (ret)
+			goto pp_compat_exit;
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+		if (ret)
+			goto pp_compat_exit;
+		ret = __to_user_pcc_cfg_data(
+			compat_ptr((uintptr_t)&pp32->data.pcc_cfg_data),
+			&pp->data.pcc_cfg_data);
+		break;
+	case mdp_op_csc_cfg:
+		ret = __from_user_csc_cfg_data(
+			compat_ptr((uintptr_t)&pp32->data.csc_cfg_data),
+			&pp->data.csc_cfg_data);
+		if (ret)
+			goto pp_compat_exit;
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+		if (ret)
+			goto pp_compat_exit;
+		ret = __to_user_csc_cfg_data(
+			compat_ptr((uintptr_t)&pp32->data.csc_cfg_data),
+			&pp->data.csc_cfg_data);
+		break;
+	case mdp_op_lut_cfg:
+		ret = __from_user_lut_cfg_data(
+			compat_ptr((uintptr_t)&pp32->data.lut_cfg_data),
+			&pp->data.lut_cfg_data);
+		if (ret)
+			goto pp_compat_exit;
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+		if (ret)
+			goto pp_compat_exit;
+		ret = __to_user_lut_cfg_data(
+			compat_ptr((uintptr_t)&pp32->data.lut_cfg_data),
+			&pp->data.lut_cfg_data);
+		break;
+	case mdp_op_qseed_cfg:
+		ret = __from_user_qseed_cfg_data(
+			compat_ptr((uintptr_t)&pp32->data.qseed_cfg_data),
+			&pp->data.qseed_cfg_data);
+		if (ret)
+			goto pp_compat_exit;
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+		if (ret)
+			goto pp_compat_exit;
+		ret = __to_user_qseed_cfg_data(
+			compat_ptr((uintptr_t)&pp32->data.qseed_cfg_data),
+			&pp->data.qseed_cfg_data);
+		break;
+	case mdp_bl_scale_cfg:
+		ret = __from_user_bl_scale_data(
+			compat_ptr((uintptr_t)&pp32->data.bl_scale_data),
+			&pp->data.bl_scale_data);
+		if (ret)
+			goto pp_compat_exit;
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+		break;
+	case mdp_op_pa_cfg:
+		ret = __from_user_pa_cfg_data(
+			compat_ptr((uintptr_t)&pp32->data.pa_cfg_data),
+			&pp->data.pa_cfg_data);
+		if (ret)
+			goto pp_compat_exit;
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+		if (ret)
+			goto pp_compat_exit;
+		ret = __to_user_pa_cfg_data(
+			compat_ptr((uintptr_t)&pp32->data.pa_cfg_data),
+			&pp->data.pa_cfg_data);
+		break;
+	case mdp_op_pa_v2_cfg:
+		ret = __from_user_pa_v2_cfg_data(
+			compat_ptr((uintptr_t)&pp32->data.pa_v2_cfg_data),
+			&pp->data.pa_v2_cfg_data);
+		if (ret)
+			goto pp_compat_exit;
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+		if (ret)
+			goto pp_compat_exit;
+		ret = __to_user_pa_v2_cfg_data(
+			compat_ptr((uintptr_t)&pp32->data.pa_v2_cfg_data),
+			&pp->data.pa_v2_cfg_data);
+		break;
+	case mdp_op_dither_cfg:
+		ret = __from_user_dither_cfg_data(
+			compat_ptr((uintptr_t)&pp32->data.dither_cfg_data),
+			&pp->data.dither_cfg_data);
+		if (ret)
+			goto pp_compat_exit;
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+		if (ret)
+			goto pp_compat_exit;
+		ret = __to_user_dither_cfg_data(
+			compat_ptr((uintptr_t)&pp32->data.dither_cfg_data),
+			&pp->data.dither_cfg_data);
+		break;
+	case mdp_op_gamut_cfg:
+		ret = __from_user_gamut_cfg_data(
+			compat_ptr((uintptr_t)&pp32->data.gamut_cfg_data),
+			&pp->data.gamut_cfg_data);
+		if (ret)
+			goto pp_compat_exit;
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+		if (ret)
+			goto pp_compat_exit;
+		ret = __to_user_gamut_cfg_data(
+			compat_ptr((uintptr_t)&pp32->data.gamut_cfg_data),
+			&pp->data.gamut_cfg_data);
+		break;
+	case mdp_op_calib_cfg:
+		ret = __from_user_calib_config_data(
+			compat_ptr((uintptr_t)&pp32->data.calib_cfg),
+			&pp->data.calib_cfg);
+		if (ret)
+			goto pp_compat_exit;
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+		if (ret)
+			goto pp_compat_exit;
+		ret = __to_user_calib_config_data(
+			compat_ptr((uintptr_t)&pp32->data.calib_cfg),
+			&pp->data.calib_cfg);
+		break;
+	case mdp_op_ad_cfg:
+		ret = __from_user_ad_init_cfg(
+			compat_ptr((uintptr_t)&pp32->data.ad_init_cfg),
+			&pp->data.ad_init_cfg);
+		if (ret)
+			goto pp_compat_exit;
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+		break;
+	case mdp_op_ad_input:
+		ret = __from_user_ad_input(
+			compat_ptr((uintptr_t)&pp32->data.ad_input),
+			&pp->data.ad_input);
+		if (ret)
+			goto pp_compat_exit;
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+		if (ret)
+			goto pp_compat_exit;
+		ret = __to_user_ad_input(
+			compat_ptr((uintptr_t)&pp32->data.ad_input),
+			&pp->data.ad_input);
+		break;
+	case mdp_op_calib_mode:
+		ret = __from_user_calib_cfg(
+			compat_ptr((uintptr_t)&pp32->data.mdss_calib_cfg),
+			&pp->data.mdss_calib_cfg);
+		if (ret)
+			goto pp_compat_exit;
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+		break;
+	case mdp_op_calib_buffer:
+		ret = __from_user_calib_config_buffer(
+			compat_ptr((uintptr_t)&pp32->data.calib_buffer),
+			&pp->data.calib_buffer);
+		if (ret)
+			goto pp_compat_exit;
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+		if (ret)
+			goto pp_compat_exit;
+		ret = __to_user_calib_config_buffer(
+			compat_ptr((uintptr_t)&pp32->data.calib_buffer),
+			&pp->data.calib_buffer);
+		break;
+	case mdp_op_calib_dcm_state:
+		ret = __from_user_calib_dcm_state(
+			compat_ptr((uintptr_t)&pp32->data.calib_dcm),
+			&pp->data.calib_dcm);
+		if (ret)
+			goto pp_compat_exit;
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) pp, file);
+		break;
+	default:
+		break;
+	}
+
+pp_compat_exit:
+	return ret;
+}
+
+static int __from_user_pp_params(struct mdp_overlay_pp_params32 *ppp32,
+				struct mdp_overlay_pp_params *ppp)
+{
+	int ret = 0;
+
+	if (copy_in_user(&ppp->config_ops,
+			&ppp32->config_ops,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	ret = __from_user_csc_cfg(
+			compat_ptr((uintptr_t)&ppp32->csc_cfg),
+			&ppp->csc_cfg);
+	if (ret)
+		return ret;
+	ret = __from_user_qseed_cfg(
+			compat_ptr((uintptr_t)&ppp32->qseed_cfg[0]),
+			&ppp->qseed_cfg[0]);
+	if (ret)
+		return ret;
+	ret = __from_user_qseed_cfg(
+			compat_ptr((uintptr_t)&ppp32->qseed_cfg[1]),
+			&ppp->qseed_cfg[1]);
+	if (ret)
+		return ret;
+	ret = __from_user_pa_cfg(
+			compat_ptr((uintptr_t)&ppp32->pa_cfg),
+			&ppp->pa_cfg);
+	if (ret)
+		return ret;
+	ret = __from_user_igc_lut_data(
+			compat_ptr((uintptr_t)&ppp32->igc_cfg),
+			&ppp->igc_cfg);
+	if (ret)
+		return ret;
+	ret = __from_user_sharp_cfg(
+			compat_ptr((uintptr_t)&ppp32->sharp_cfg),
+			&ppp->sharp_cfg);
+	if (ret)
+		return ret;
+	ret = __from_user_histogram_cfg(
+			compat_ptr((uintptr_t)&ppp32->hist_cfg),
+			&ppp->hist_cfg);
+	if (ret)
+		return ret;
+	ret = __from_user_hist_lut_data(
+			compat_ptr((uintptr_t)&ppp32->hist_lut_cfg),
+			&ppp->hist_lut_cfg);
+	if (ret)
+		return ret;
+	ret = __from_user_pa_v2_data(
+			compat_ptr((uintptr_t)&ppp32->pa_v2_cfg),
+			&ppp->pa_v2_cfg);
+
+	return ret;
+}
+
+static int __to_user_pp_params(struct mdp_overlay_pp_params *ppp,
+				struct mdp_overlay_pp_params32 *ppp32)
+{
+	int ret = 0;
+
+	if (copy_in_user(&ppp32->config_ops,
+			&ppp->config_ops,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	ret = __to_user_csc_cfg(
+			compat_ptr((uintptr_t)&ppp32->csc_cfg),
+			&ppp->csc_cfg);
+	if (ret)
+		return ret;
+	ret = __to_user_qseed_cfg(
+			compat_ptr((uintptr_t)&ppp32->qseed_cfg[0]),
+			&ppp->qseed_cfg[0]);
+	if (ret)
+		return ret;
+	ret = __to_user_qseed_cfg(
+			compat_ptr((uintptr_t)&ppp32->qseed_cfg[1]),
+			&ppp->qseed_cfg[1]);
+	if (ret)
+		return ret;
+	ret = __to_user_pa_cfg(
+			compat_ptr((uintptr_t)&ppp32->pa_cfg),
+			&ppp->pa_cfg);
+	if (ret)
+		return ret;
+	ret = __to_user_igc_lut_data(
+			compat_ptr((uintptr_t)&ppp32->igc_cfg),
+			&ppp->igc_cfg);
+	if (ret)
+		return ret;
+	ret = __to_user_sharp_cfg(
+			compat_ptr((uintptr_t)&ppp32->sharp_cfg),
+			&ppp->sharp_cfg);
+	if (ret)
+		return ret;
+	ret = __to_user_histogram_cfg(
+			compat_ptr((uintptr_t)&ppp32->hist_cfg),
+			&ppp->hist_cfg);
+	if (ret)
+		return ret;
+	ret = __to_user_hist_lut_data(
+			compat_ptr((uintptr_t)&ppp32->hist_lut_cfg),
+			&ppp->hist_lut_cfg);
+	if (ret)
+		return ret;
+	ret = __to_user_pa_v2_data(
+			compat_ptr((uintptr_t)&ppp32->pa_v2_cfg),
+			&ppp->pa_v2_cfg);
+
+	return ret;
+}
+
+static int __from_user_hist_start_req(
+			struct mdp_histogram_start_req32 __user *hist_req32,
+			struct mdp_histogram_start_req __user *hist_req)
+{
+	if (copy_in_user(&hist_req->block,
+			&hist_req32->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&hist_req->frame_cnt,
+			&hist_req32->frame_cnt,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&hist_req->bit_mask,
+			&hist_req32->bit_mask,
+			sizeof(uint8_t)) ||
+	    copy_in_user(&hist_req->num_bins,
+			&hist_req32->num_bins,
+			sizeof(uint16_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_hist_data(
+			struct mdp_histogram_data32 __user *hist_data32,
+			struct mdp_histogram_data __user *hist_data)
+{
+	uint32_t data;
+
+	if (copy_in_user(&hist_data->block,
+			&hist_data32->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&hist_data->bin_cnt,
+			&hist_data32->bin_cnt,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (get_user(data, &hist_data32->c0) ||
+	    put_user(compat_ptr(data), &hist_data->c0) ||
+	    get_user(data, &hist_data32->c1) ||
+	    put_user(compat_ptr(data), &hist_data->c1) ||
+	    get_user(data, &hist_data32->c2) ||
+	    put_user(compat_ptr(data), &hist_data->c2) ||
+	    get_user(data, &hist_data32->extra_info) ||
+	    put_user(compat_ptr(data), &hist_data->extra_info))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __to_user_hist_data(
+			struct mdp_histogram_data32 __user *hist_data32,
+			struct mdp_histogram_data __user *hist_data)
+{
+	unsigned long data;
+
+	if (copy_in_user(&hist_data32->block,
+			&hist_data->block,
+			sizeof(uint32_t)) ||
+	    copy_in_user(&hist_data32->bin_cnt,
+			&hist_data->bin_cnt,
+			sizeof(uint32_t)))
+		return -EFAULT;
+
+	if (get_user(data, (unsigned long *) &hist_data->c0) ||
+	    put_user((compat_caddr_t) data, &hist_data32->c0) ||
+	    get_user(data, (unsigned long *) &hist_data->c1) ||
+	    put_user((compat_caddr_t) data, &hist_data32->c1) ||
+	    get_user(data, (unsigned long *) &hist_data->c2) ||
+	    put_user((compat_caddr_t) data, &hist_data32->c2) ||
+	    get_user(data, (unsigned long *) &hist_data->extra_info) ||
+	    put_user((compat_caddr_t) data, &hist_data32->extra_info))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int mdss_histo_compat_ioctl(struct fb_info *info, unsigned int cmd,
+			unsigned long arg, struct file *file)
+{
+	struct mdp_histogram_data __user *hist;
+	struct mdp_histogram_data32 __user *hist32;
+	struct mdp_histogram_start_req __user *hist_req;
+	struct mdp_histogram_start_req32 __user *hist_req32;
+	int ret = 0;
+
+	switch (cmd) {
+	case MSMFB_HISTOGRAM_START:
+		hist_req32 = compat_ptr(arg);
+		hist_req = compat_alloc_user_space(
+				sizeof(struct mdp_histogram_start_req));
+		if (!hist_req) {
+			pr_err("%s:%u: compat alloc error [%zu] bytes\n",
+				 __func__, __LINE__,
+				 sizeof(struct mdp_histogram_start_req));
+			return -EINVAL;
+		}
+		memset(hist_req, 0, sizeof(struct mdp_histogram_start_req));
+		ret = __from_user_hist_start_req(hist_req32, hist_req);
+		if (ret)
+			goto histo_compat_err;
+		ret = mdss_fb_do_ioctl(info, cmd,
+			(unsigned long) hist_req, file);
+		break;
+	case MSMFB_HISTOGRAM_STOP:
+		ret = mdss_fb_do_ioctl(info, cmd, arg, file);
+		break;
+	case MSMFB_HISTOGRAM:
+		hist32 = compat_ptr(arg);
+		hist = compat_alloc_user_space(
+				sizeof(struct mdp_histogram_data));
+		if (!hist) {
+			pr_err("%s:%u: compat alloc error [%zu] bytes\n",
+				 __func__, __LINE__,
+				 sizeof(struct mdp_histogram_data));
+			return -EINVAL;
+		}
+		memset(hist, 0, sizeof(struct mdp_histogram_data));
+		ret = __from_user_hist_data(hist32, hist);
+		if (ret)
+			goto histo_compat_err;
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) hist, file);
+		if (ret)
+			goto histo_compat_err;
+		ret = __to_user_hist_data(hist32, hist);
+		break;
+	default:
+		break;
+	}
+
+histo_compat_err:
+	return ret;
+}
+
+static int __copy_layer_pp_info_qseed_params(
+			struct mdp_overlay_pp_params *pp_info,
+			struct mdp_overlay_pp_params32 *pp_info32)
+{
+	pp_info->qseed_cfg[0].table_num = pp_info32->qseed_cfg[0].table_num;
+	pp_info->qseed_cfg[0].ops = pp_info32->qseed_cfg[0].ops;
+	pp_info->qseed_cfg[0].len = pp_info32->qseed_cfg[0].len;
+	pp_info->qseed_cfg[0].data = compat_ptr(pp_info32->qseed_cfg[0].data);
+
+	pp_info->qseed_cfg[1].table_num = pp_info32->qseed_cfg[1].table_num;
+	pp_info->qseed_cfg[1].ops = pp_info32->qseed_cfg[1].ops;
+	pp_info->qseed_cfg[1].len = pp_info32->qseed_cfg[1].len;
+	pp_info->qseed_cfg[1].data = compat_ptr(pp_info32->qseed_cfg[1].data);
+
+	return 0;
+}
+
+static int __copy_layer_igc_lut_data_v1_7(
+			struct mdp_igc_lut_data_v1_7 *cfg_payload,
+			struct mdp_igc_lut_data_v1_7_32 __user *cfg_payload32)
+{
+	struct mdp_igc_lut_data_v1_7_32 local_cfg_payload32;
+	int ret = 0;
+
+	ret = copy_from_user(&local_cfg_payload32,
+			cfg_payload32,
+			sizeof(struct mdp_igc_lut_data_v1_7_32));
+	if (ret) {
+		pr_err("copy from user failed, IGC cfg payload = %pK\n",
+			cfg_payload32);
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	cfg_payload->table_fmt = local_cfg_payload32.table_fmt;
+	cfg_payload->len = local_cfg_payload32.len;
+	cfg_payload->c0_c1_data = compat_ptr(local_cfg_payload32.c0_c1_data);
+	cfg_payload->c2_data = compat_ptr(local_cfg_payload32.c2_data);
+
+exit:
+	return ret;
+}
+
+static int __copy_layer_pp_info_igc_params(
+			struct mdp_overlay_pp_params *pp_info,
+			struct mdp_overlay_pp_params32 *pp_info32)
+{
+	void *cfg_payload = NULL;
+	uint32_t payload_size = 0;
+	int ret = 0;
+
+	pp_info->igc_cfg.block = pp_info32->igc_cfg.block;
+	pp_info->igc_cfg.version = pp_info32->igc_cfg.version;
+	pp_info->igc_cfg.ops = pp_info32->igc_cfg.ops;
+
+	if (pp_info->igc_cfg.version != 0) {
+		payload_size = __pp_compat_size_igc();
+
+		cfg_payload = kmalloc(payload_size, GFP_KERNEL);
+		if (!cfg_payload) {
+			ret = -ENOMEM;
+			goto exit;
+		}
+	}
+
+	switch (pp_info->igc_cfg.version) {
+	case mdp_igc_v1_7:
+		ret = __copy_layer_igc_lut_data_v1_7(cfg_payload,
+				compat_ptr(pp_info32->igc_cfg.cfg_payload));
+		if (ret) {
+			pr_err("compat copy of IGC cfg payload failed, ret %d\n",
+				ret);
+			kfree(cfg_payload);
+			cfg_payload = NULL;
+			goto exit;
+		}
+		break;
+	default:
+		pr_debug("No version set, fallback to legacy IGC version\n");
+		pp_info->igc_cfg.len = pp_info32->igc_cfg.len;
+		pp_info->igc_cfg.c0_c1_data =
+			compat_ptr(pp_info32->igc_cfg.c0_c1_data);
+		pp_info->igc_cfg.c2_data =
+			compat_ptr(pp_info32->igc_cfg.c2_data);
+		kfree(cfg_payload);
+		cfg_payload = NULL;
+		break;
+	}
+exit:
+	pp_info->igc_cfg.cfg_payload = cfg_payload;
+	return ret;
+}
+
+static int __copy_layer_hist_lut_data_v1_7(
+			struct mdp_hist_lut_data_v1_7 *cfg_payload,
+			struct mdp_hist_lut_data_v1_7_32 __user *cfg_payload32)
+{
+	struct mdp_hist_lut_data_v1_7_32 local_cfg_payload32;
+	int ret = 0;
+
+	ret = copy_from_user(&local_cfg_payload32,
+			cfg_payload32,
+			sizeof(struct mdp_hist_lut_data_v1_7_32));
+	if (ret) {
+		pr_err("copy from user failed, hist lut cfg_payload = %pK\n",
+			cfg_payload32);
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	cfg_payload->len = local_cfg_payload32.len;
+	cfg_payload->data = compat_ptr(local_cfg_payload32.data);
+exit:
+	return ret;
+}
+
+static int __copy_layer_pp_info_hist_lut_params(
+			struct mdp_overlay_pp_params *pp_info,
+			struct mdp_overlay_pp_params32 *pp_info32)
+{
+	void *cfg_payload = NULL;
+	uint32_t payload_size = 0;
+	int ret = 0;
+
+	pp_info->hist_lut_cfg.block = pp_info32->hist_lut_cfg.block;
+	pp_info->hist_lut_cfg.version = pp_info32->hist_lut_cfg.version;
+	pp_info->hist_lut_cfg.ops = pp_info32->hist_lut_cfg.ops;
+	pp_info->hist_lut_cfg.hist_lut_first =
+			pp_info32->hist_lut_cfg.hist_lut_first;
+
+	if (pp_info->hist_lut_cfg.version != 0) {
+		payload_size = __pp_compat_size_hist_lut();
+
+		cfg_payload = kmalloc(payload_size, GFP_KERNEL);
+		if (!cfg_payload) {
+			ret = -ENOMEM;
+			goto exit;
+		}
+	}
+
+	switch (pp_info->hist_lut_cfg.version) {
+	case mdp_hist_lut_v1_7:
+		ret = __copy_layer_hist_lut_data_v1_7(cfg_payload,
+			compat_ptr(pp_info32->hist_lut_cfg.cfg_payload));
+		if (ret) {
+			pr_err("compat copy of Hist LUT cfg payload failed, ret %d\n",
+				ret);
+			kfree(cfg_payload);
+			cfg_payload = NULL;
+			goto exit;
+		}
+		break;
+	default:
+		pr_debug("version invalid, fallback to legacy\n");
+		pp_info->hist_lut_cfg.len = pp_info32->hist_lut_cfg.len;
+		pp_info->hist_lut_cfg.data =
+				compat_ptr(pp_info32->hist_lut_cfg.data);
+		kfree(cfg_payload);
+		cfg_payload = NULL;
+		break;
+	}
+exit:
+	pp_info->hist_lut_cfg.cfg_payload = cfg_payload;
+	return ret;
+}
+
+static int __copy_layer_pa_data_v1_7(
+			struct mdp_pa_data_v1_7 *cfg_payload,
+			struct mdp_pa_data_v1_7_32 __user *cfg_payload32)
+{
+	struct mdp_pa_data_v1_7_32 local_cfg_payload32;
+	int ret = 0;
+
+	ret = copy_from_user(&local_cfg_payload32,
+			cfg_payload32,
+			sizeof(struct mdp_pa_data_v1_7_32));
+	if (ret) {
+		pr_err("copy from user failed, pa cfg_payload = %pK\n",
+			cfg_payload32);
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	cfg_payload->mode = local_cfg_payload32.mode;
+	cfg_payload->global_hue_adj = local_cfg_payload32.global_hue_adj;
+	cfg_payload->global_sat_adj = local_cfg_payload32.global_sat_adj;
+	cfg_payload->global_val_adj = local_cfg_payload32.global_val_adj;
+	cfg_payload->global_cont_adj = local_cfg_payload32.global_cont_adj;
+
+	memcpy(&cfg_payload->skin_cfg, &local_cfg_payload32.skin_cfg,
+			sizeof(struct mdp_pa_mem_col_data_v1_7));
+	memcpy(&cfg_payload->sky_cfg, &local_cfg_payload32.sky_cfg,
+			sizeof(struct mdp_pa_mem_col_data_v1_7));
+	memcpy(&cfg_payload->fol_cfg, &local_cfg_payload32.fol_cfg,
+			sizeof(struct mdp_pa_mem_col_data_v1_7));
+
+	cfg_payload->six_zone_thresh = local_cfg_payload32.six_zone_thresh;
+	cfg_payload->six_zone_adj_p0 = local_cfg_payload32.six_zone_adj_p0;
+	cfg_payload->six_zone_adj_p1 = local_cfg_payload32.six_zone_adj_p1;
+	cfg_payload->six_zone_sat_hold = local_cfg_payload32.six_zone_sat_hold;
+	cfg_payload->six_zone_val_hold = local_cfg_payload32.six_zone_val_hold;
+	cfg_payload->six_zone_len = local_cfg_payload32.six_zone_len;
+
+	cfg_payload->six_zone_curve_p0 =
+			compat_ptr(local_cfg_payload32.six_zone_curve_p0);
+	cfg_payload->six_zone_curve_p1 =
+			compat_ptr(local_cfg_payload32.six_zone_curve_p1);
+exit:
+	return ret;
+}
+
+static int __copy_layer_pp_info_pa_v2_params(
+			struct mdp_overlay_pp_params *pp_info,
+			struct mdp_overlay_pp_params32 *pp_info32)
+{
+	void *cfg_payload = NULL;
+	uint32_t payload_size = 0;
+	int ret = 0;
+
+	pp_info->pa_v2_cfg_data.block = pp_info32->pa_v2_cfg_data.block;
+	pp_info->pa_v2_cfg_data.version = pp_info32->pa_v2_cfg_data.version;
+	pp_info->pa_v2_cfg_data.flags = pp_info32->pa_v2_cfg_data.flags;
+
+	if (pp_info->pa_v2_cfg_data.version != 0) {
+		payload_size = __pp_compat_size_pa();
+
+		cfg_payload = kmalloc(payload_size, GFP_KERNEL);
+		if (!cfg_payload) {
+			ret = -ENOMEM;
+			goto exit;
+		}
+	}
+
+	switch (pp_info->pa_v2_cfg_data.version) {
+	case mdp_pa_v1_7:
+		ret = __copy_layer_pa_data_v1_7(cfg_payload,
+			compat_ptr(pp_info32->pa_v2_cfg_data.cfg_payload));
+		if (ret) {
+			pr_err("compat copy of PA cfg payload failed, ret %d\n",
+				ret);
+			kfree(cfg_payload);
+			cfg_payload = NULL;
+			goto exit;
+		}
+		break;
+	default:
+		pr_debug("version invalid\n");
+		kfree(cfg_payload);
+		cfg_payload = NULL;
+		break;
+	}
+exit:
+	pp_info->pa_v2_cfg_data.cfg_payload = cfg_payload;
+	return ret;
+}
+
+static int __copy_layer_pp_info_legacy_pa_v2_params(
+			struct mdp_overlay_pp_params *pp_info,
+			struct mdp_overlay_pp_params32 *pp_info32)
+{
+	pp_info->pa_v2_cfg.global_hue_adj =
+		pp_info32->pa_v2_cfg.global_hue_adj;
+	pp_info->pa_v2_cfg.global_sat_adj =
+		pp_info32->pa_v2_cfg.global_sat_adj;
+	pp_info->pa_v2_cfg.global_val_adj =
+		pp_info32->pa_v2_cfg.global_val_adj;
+	pp_info->pa_v2_cfg.global_cont_adj =
+		pp_info32->pa_v2_cfg.global_cont_adj;
+
+	memcpy(&pp_info->pa_v2_cfg.skin_cfg,
+			&pp_info32->pa_v2_cfg.skin_cfg,
+			sizeof(struct mdp_pa_mem_col_cfg));
+	memcpy(&pp_info->pa_v2_cfg.sky_cfg,
+			&pp_info32->pa_v2_cfg.sky_cfg,
+			sizeof(struct mdp_pa_mem_col_cfg));
+	memcpy(&pp_info->pa_v2_cfg.fol_cfg,
+			&pp_info32->pa_v2_cfg.fol_cfg,
+			sizeof(struct mdp_pa_mem_col_cfg));
+
+	pp_info->pa_v2_cfg.six_zone_thresh =
+		pp_info32->pa_v2_cfg.six_zone_thresh;
+	pp_info->pa_v2_cfg.six_zone_len =
+		pp_info32->pa_v2_cfg.six_zone_len;
+
+	pp_info->pa_v2_cfg.six_zone_curve_p0 =
+		compat_ptr(pp_info32->pa_v2_cfg.six_zone_curve_p0);
+	pp_info->pa_v2_cfg.six_zone_curve_p1 =
+		compat_ptr(pp_info32->pa_v2_cfg.six_zone_curve_p1);
+
+	return 0;
+}
+
+static int __copy_layer_pp_info_pcc_params(
+			struct mdp_overlay_pp_params *pp_info,
+			struct mdp_overlay_pp_params32 *pp_info32)
+{
+	void *cfg_payload = NULL;
+	uint32_t payload_size = 0;
+	int ret = 0;
+
+	pp_info->pcc_cfg_data.block = pp_info32->pcc_cfg_data.block;
+	pp_info->pcc_cfg_data.version = pp_info32->pcc_cfg_data.version;
+	pp_info->pcc_cfg_data.ops = pp_info32->pcc_cfg_data.ops;
+
+	if (pp_info->pcc_cfg_data.version != 0) {
+		payload_size = __pp_compat_size_pcc();
+
+		cfg_payload = kmalloc(payload_size, GFP_KERNEL);
+		if (!cfg_payload) {
+			ret = -ENOMEM;
+			goto exit;
+		}
+	}
+
+	switch (pp_info->pcc_cfg_data.version) {
+	case mdp_pcc_v1_7:
+		ret = copy_from_user(cfg_payload,
+			compat_ptr(pp_info32->pcc_cfg_data.cfg_payload),
+			sizeof(struct mdp_pcc_data_v1_7));
+		if (ret) {
+			pr_err("compat copy of PCC cfg payload failed, ptr %pK\n",
+				compat_ptr(
+				pp_info32->pcc_cfg_data.cfg_payload));
+			ret = -EFAULT;
+			kfree(cfg_payload);
+			cfg_payload = NULL;
+			goto exit;
+		}
+		break;
+	default:
+		pr_debug("version invalid, fallback to legacy\n");
+		kfree(cfg_payload);
+		cfg_payload = NULL;
+		break;
+	}
+exit:
+	pp_info->pcc_cfg_data.cfg_payload = cfg_payload;
+	return ret;
+}
+
+
+static int __copy_layer_pp_info_params(struct mdp_input_layer *layer,
+				struct mdp_input_layer32 *layer32)
+{
+	struct mdp_overlay_pp_params *pp_info;
+	struct mdp_overlay_pp_params32 pp_info32;
+	int ret = 0;
+
+	if (!(layer->flags & MDP_LAYER_PP))
+		return 0;
+
+	ret = copy_from_user(&pp_info32,
+			compat_ptr(layer32->pp_info),
+			sizeof(struct mdp_overlay_pp_params32));
+	if (ret) {
+		pr_err("pp info copy from user failed, pp_info %pK\n",
+			compat_ptr(layer32->pp_info));
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	pp_info = kmalloc(sizeof(struct mdp_overlay_pp_params), GFP_KERNEL);
+	if (!pp_info) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+	memset(pp_info, 0, sizeof(struct mdp_overlay_pp_params));
+
+	pp_info->config_ops = pp_info32.config_ops;
+
+	memcpy(&pp_info->csc_cfg, &pp_info32.csc_cfg,
+		sizeof(struct mdp_csc_cfg));
+	memcpy(&pp_info->sharp_cfg, &pp_info32.sharp_cfg,
+		sizeof(struct mdp_sharp_cfg));
+	memcpy(&pp_info->hist_cfg, &pp_info32.hist_cfg,
+		sizeof(struct mdp_histogram_cfg));
+	memcpy(&pp_info->pa_cfg, &pp_info32.pa_cfg,
+		sizeof(struct mdp_pa_cfg));
+
+	ret = __copy_layer_pp_info_qseed_params(pp_info, &pp_info32);
+	if (ret) {
+		pr_err("compat copy pp_info QSEED params failed, ret %d\n",
+			ret);
+		goto exit_pp_info;
+	}
+	ret = __copy_layer_pp_info_legacy_pa_v2_params(pp_info, &pp_info32);
+	if (ret) {
+		pr_err("compat copy pp_info Legacy PAv2 params failed, ret %d\n",
+			ret);
+		goto exit_pp_info;
+	}
+	ret = __copy_layer_pp_info_igc_params(pp_info, &pp_info32);
+	if (ret) {
+		pr_err("compat copy pp_info IGC params failed, ret %d\n",
+			ret);
+		goto exit_pp_info;
+	}
+	ret = __copy_layer_pp_info_hist_lut_params(pp_info, &pp_info32);
+	if (ret) {
+		pr_err("compat copy pp_info Hist LUT params failed, ret %d\n",
+			ret);
+		goto exit_igc;
+	}
+	ret = __copy_layer_pp_info_pa_v2_params(pp_info, &pp_info32);
+	if (ret) {
+		pr_err("compat copy pp_info PAv2 params failed, ret %d\n",
+			ret);
+		goto exit_hist_lut;
+	}
+	ret = __copy_layer_pp_info_pcc_params(pp_info, &pp_info32);
+	if (ret) {
+		pr_err("compat copy pp_info PCC params failed, ret %d\n",
+			ret);
+		goto exit_pa;
+	}
+
+	layer->pp_info = pp_info;
+
+	return ret;
+
+exit_pa:
+	kfree(pp_info->pa_v2_cfg_data.cfg_payload);
+exit_hist_lut:
+	kfree(pp_info->hist_lut_cfg.cfg_payload);
+exit_igc:
+	kfree(pp_info->igc_cfg.cfg_payload);
+exit_pp_info:
+	kfree(pp_info);
+exit:
+	return ret;
+}
+
+
+static int __to_user_mdp_overlay(struct mdp_overlay32 __user *ov32,
+				 struct mdp_overlay __user *ov)
+{
+	int ret = 0;
+
+	ret = copy_in_user(&ov32->src, &ov->src, sizeof(ov32->src)) ||
+		copy_in_user(&ov32->src_rect,
+			&ov->src_rect, sizeof(ov32->src_rect)) ||
+		copy_in_user(&ov32->dst_rect,
+			&ov->dst_rect, sizeof(ov32->dst_rect));
+	if (ret)
+		return -EFAULT;
+
+	ret |= put_user(ov->z_order, &ov32->z_order);
+	ret |= put_user(ov->is_fg, &ov32->is_fg);
+	ret |= put_user(ov->alpha, &ov32->alpha);
+	ret |= put_user(ov->blend_op, &ov32->blend_op);
+	ret |= put_user(ov->transp_mask, &ov32->transp_mask);
+	ret |= put_user(ov->flags, &ov32->flags);
+	ret |= put_user(ov->id, &ov32->id);
+	ret |= put_user(ov->priority, &ov32->priority);
+	if (ret)
+		return -EFAULT;
+
+	ret = copy_in_user(&ov32->user_data, &ov->user_data,
+		     sizeof(ov32->user_data));
+	if (ret)
+		return -EFAULT;
+
+	ret |= put_user(ov->horz_deci, &ov32->horz_deci);
+	ret |= put_user(ov->vert_deci, &ov32->vert_deci);
+	if (ret)
+		return -EFAULT;
+
+	ret = __to_user_pp_params(
+			&ov->overlay_pp_cfg,
+			compat_ptr((uintptr_t) &ov32->overlay_pp_cfg));
+	if (ret)
+		return -EFAULT;
+
+	ret = copy_in_user(&ov32->scale, &ov->scale,
+			   sizeof(struct mdp_scale_data));
+	if (ret)
+		return -EFAULT;
+
+	ret = put_user(ov->frame_rate, &ov32->frame_rate);
+	if (ret)
+		return -EFAULT;
+
+	return 0;
+}
+
+
+static int __from_user_mdp_overlay(struct mdp_overlay *ov,
+				   struct mdp_overlay32 __user *ov32)
+{
+	__u32 data;
+
+	if (copy_in_user(&ov->src, &ov32->src,
+			 sizeof(ov32->src)) ||
+	    copy_in_user(&ov->src_rect, &ov32->src_rect,
+			 sizeof(ov32->src_rect)) ||
+	    copy_in_user(&ov->dst_rect, &ov32->dst_rect,
+			 sizeof(ov32->dst_rect)))
+		return -EFAULT;
+
+	if (get_user(data, &ov32->z_order) ||
+	    put_user(data, &ov->z_order) ||
+	    get_user(data, &ov32->is_fg) ||
+	    put_user(data, &ov->is_fg) ||
+	    get_user(data, &ov32->alpha) ||
+	    put_user(data, &ov->alpha) ||
+	    get_user(data, &ov32->blend_op) ||
+	    put_user(data, &ov->blend_op) ||
+	    get_user(data, &ov32->transp_mask) ||
+	    put_user(data, &ov->transp_mask) ||
+	    get_user(data, &ov32->flags) ||
+	    put_user(data, &ov->flags) ||
+	    get_user(data, &ov32->pipe_type) ||
+	    put_user(data, &ov->pipe_type) ||
+	    get_user(data, &ov32->id) ||
+	    put_user(data, &ov->id) ||
+	    get_user(data, &ov32->priority) ||
+	    put_user(data, &ov->priority))
+		return -EFAULT;
+
+	if (copy_in_user(&ov->user_data, &ov32->user_data,
+			 sizeof(ov32->user_data)))
+		return -EFAULT;
+
+	if (get_user(data, &ov32->horz_deci) ||
+	    put_user(data, &ov->horz_deci) ||
+	    get_user(data, &ov32->vert_deci) ||
+	    put_user(data, &ov->vert_deci))
+		return -EFAULT;
+
+	if (__from_user_pp_params(
+			compat_ptr((uintptr_t) &ov32->overlay_pp_cfg),
+			&ov->overlay_pp_cfg))
+		return -EFAULT;
+
+	if (copy_in_user(&ov->scale, &ov32->scale,
+			 sizeof(struct mdp_scale_data)))
+		return -EFAULT;
+
+	if (get_user(data, &ov32->frame_rate) ||
+	    put_user(data, &ov->frame_rate))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int __from_user_mdp_overlaylist(struct mdp_overlay_list *ovlist,
+				   struct mdp_overlay_list32 *ovlist32,
+				   struct mdp_overlay **to_list_head)
+{
+	__u32 i, ret;
+	unsigned long data, from_list_head;
+	struct mdp_overlay32 *iter;
+
+	if (!to_list_head || !ovlist32 || !ovlist) {
+		pr_err("%s:%u: null error\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	if (copy_in_user(&ovlist->num_overlays, &ovlist32->num_overlays,
+			 sizeof(ovlist32->num_overlays)))
+		return -EFAULT;
+
+	if (copy_in_user(&ovlist->flags, &ovlist32->flags,
+			 sizeof(ovlist32->flags)))
+		return -EFAULT;
+
+	if (copy_in_user(&ovlist->processed_overlays,
+			&ovlist32->processed_overlays,
+			 sizeof(ovlist32->processed_overlays)))
+		return -EFAULT;
+
+	if (get_user(data, &ovlist32->overlay_list)) {
+		ret = -EFAULT;
+		goto validate_exit;
+	}
+	for (i = 0; i < ovlist32->num_overlays; i++) {
+		if (get_user(from_list_head, (__u32 *)data + i)) {
+			ret = -EFAULT;
+			goto validate_exit;
+		}
+
+		iter = compat_ptr(from_list_head);
+		if (__from_user_mdp_overlay(to_list_head[i],
+			       (struct mdp_overlay32 *)(iter))) {
+			ret = -EFAULT;
+			goto validate_exit;
+		}
+	}
+	ovlist->overlay_list = to_list_head;
+
+	return 0;
+
+validate_exit:
+	pr_err("%s: %u: copy error\n", __func__, __LINE__);
+	return -EFAULT;
+}
+
+static int __to_user_mdp_overlaylist(struct mdp_overlay_list32 *ovlist32,
+				   struct mdp_overlay_list *ovlist,
+				   struct mdp_overlay **l_ptr)
+{
+	__u32 i, ret;
+	unsigned long data, data1;
+	struct mdp_overlay32 *temp;
+	struct mdp_overlay *l = l_ptr[0];
+
+	if (copy_in_user(&ovlist32->num_overlays, &ovlist->num_overlays,
+			 sizeof(ovlist32->num_overlays)))
+		return -EFAULT;
+
+	if (get_user(data, &ovlist32->overlay_list)) {
+		ret = -EFAULT;
+		pr_err("%s:%u: err\n", __func__, __LINE__);
+		goto validate_exit;
+	}
+
+	for (i = 0; i < ovlist32->num_overlays; i++) {
+		if (get_user(data1, (__u32 *)data + i)) {
+			ret = -EFAULT;
+			goto validate_exit;
+		}
+		temp = compat_ptr(data1);
+		if (__to_user_mdp_overlay(
+				(struct mdp_overlay32 *) temp,
+				l + i)) {
+			ret = -EFAULT;
+			goto validate_exit;
+		}
+	}
+
+	if (copy_in_user(&ovlist32->flags, &ovlist->flags,
+				sizeof(ovlist32->flags)))
+		return -EFAULT;
+
+	if (copy_in_user(&ovlist32->processed_overlays,
+			&ovlist->processed_overlays,
+			sizeof(ovlist32->processed_overlays)))
+		return -EFAULT;
+
+	return 0;
+
+validate_exit:
+	pr_err("%s: %u: copy error\n", __func__, __LINE__);
+	return -EFAULT;
+
+}
+
+void mdss_compat_align_list(void __user *total_mem_chunk,
+		struct mdp_overlay __user **list_ptr, u32 num_ov)
+{
+	int i = 0;
+	struct mdp_overlay __user *contig_overlays;
+
+	contig_overlays = total_mem_chunk + sizeof(struct mdp_overlay_list) +
+		 (num_ov * sizeof(struct mdp_overlay *));
+
+	for (i = 0; i < num_ov; i++)
+		list_ptr[i] = contig_overlays + i;
+}
+
+static u32 __pp_sspp_size(void)
+{
+	u32 size = 0;
+	/* pick the largest of the revision when multiple revs are supported */
+	size = sizeof(struct mdp_igc_lut_data_v1_7);
+	size += sizeof(struct mdp_pa_data_v1_7);
+	size += sizeof(struct mdp_pcc_data_v1_7);
+	size += sizeof(struct mdp_hist_lut_data_v1_7);
+	return size;
+}
+
+static int __pp_sspp_set_offsets(struct mdp_overlay *ov)
+{
+	if (!ov) {
+		pr_err("invalid overlay pointer\n");
+		return -EFAULT;
+	}
+	ov->overlay_pp_cfg.igc_cfg.cfg_payload = (void *)((unsigned long)ov +
+				sizeof(struct mdp_overlay));
+	ov->overlay_pp_cfg.pa_v2_cfg_data.cfg_payload =
+		ov->overlay_pp_cfg.igc_cfg.cfg_payload +
+		sizeof(struct mdp_igc_lut_data_v1_7);
+	ov->overlay_pp_cfg.pcc_cfg_data.cfg_payload =
+		ov->overlay_pp_cfg.pa_v2_cfg_data.cfg_payload +
+		sizeof(struct mdp_pa_data_v1_7);
+	ov->overlay_pp_cfg.hist_lut_cfg.cfg_payload =
+		ov->overlay_pp_cfg.pcc_cfg_data.cfg_payload +
+		sizeof(struct mdp_pcc_data_v1_7);
+	return 0;
+}
+
+int mdss_compat_overlay_ioctl(struct fb_info *info, unsigned int cmd,
+			 unsigned long arg, struct file *file)
+{
+	struct mdp_overlay *ov, **layers_head;
+	struct mdp_overlay32 *ov32;
+	struct mdp_overlay_list __user *ovlist;
+	struct mdp_overlay_list32 __user *ovlist32;
+	size_t layers_refs_sz, layers_sz, prepare_sz;
+	void __user *total_mem_chunk;
+	uint32_t num_overlays;
+	uint32_t alloc_size = 0;
+	int ret;
+
+	if (!info || !info->par)
+		return -EINVAL;
+
+
+	switch (cmd) {
+	case MSMFB_MDP_PP:
+		ret = mdss_compat_pp_ioctl(info, cmd, arg, file);
+		break;
+	case MSMFB_HISTOGRAM_START:
+	case MSMFB_HISTOGRAM_STOP:
+	case MSMFB_HISTOGRAM:
+		ret = mdss_histo_compat_ioctl(info, cmd, arg, file);
+		break;
+	case MSMFB_OVERLAY_GET:
+		alloc_size += sizeof(*ov) + __pp_sspp_size();
+		ov = compat_alloc_user_space(alloc_size);
+		if (!ov) {
+			pr_err("%s:%u: compat alloc error [%zu] bytes\n",
+				 __func__, __LINE__, sizeof(*ov));
+			return -EINVAL;
+		}
+		ov32 = compat_ptr(arg);
+		ret = __pp_sspp_set_offsets(ov);
+		if (ret) {
+			pr_err("setting the pp offsets failed ret %d\n", ret);
+			return ret;
+		}
+		ret = __from_user_mdp_overlay(ov, ov32);
+		if (ret)
+			pr_err("%s: compat mdp overlay failed\n", __func__);
+		else
+			ret = mdss_fb_do_ioctl(info, cmd,
+				(unsigned long) ov, file);
+		ret = __to_user_mdp_overlay(ov32, ov);
+		break;
+	case MSMFB_OVERLAY_SET:
+		alloc_size += sizeof(*ov) + __pp_sspp_size();
+		ov = compat_alloc_user_space(alloc_size);
+		if (!ov) {
+			pr_err("%s:%u: compat alloc error [%zu] bytes\n",
+				 __func__, __LINE__, sizeof(*ov));
+			return -EINVAL;
+		}
+		ret = __pp_sspp_set_offsets(ov);
+		if (ret) {
+			pr_err("setting the pp offsets failed ret %d\n", ret);
+			return ret;
+		}
+		ov32 = compat_ptr(arg);
+		ret = __from_user_mdp_overlay(ov, ov32);
+		if (ret) {
+			pr_err("%s: compat mdp overlay failed\n", __func__);
+		} else {
+			ret = mdss_fb_do_ioctl(info, cmd,
+				(unsigned long) ov, file);
+			ret = __to_user_mdp_overlay(ov32, ov);
+		}
+		break;
+	case MSMFB_OVERLAY_PREPARE:
+		ovlist32 = compat_ptr(arg);
+		if (get_user(num_overlays, &ovlist32->num_overlays)) {
+			pr_err("compat mdp prepare failed: invalid arg\n");
+			return -EFAULT;
+		}
+
+		if (num_overlays >= OVERLAY_MAX) {
+			pr_err("%s: No: of overlays exceeds max\n", __func__);
+			return -EINVAL;
+		}
+
+		layers_sz = num_overlays * sizeof(struct mdp_overlay);
+		prepare_sz = sizeof(struct mdp_overlay_list);
+		layers_refs_sz = num_overlays * sizeof(struct mdp_overlay *);
+
+		total_mem_chunk = compat_alloc_user_space(
+			prepare_sz + layers_refs_sz + layers_sz);
+		if (!total_mem_chunk) {
+			pr_err("%s:%u: compat alloc error [%zu] bytes\n",
+				 __func__, __LINE__,
+				 layers_refs_sz + layers_sz + prepare_sz);
+			return -EINVAL;
+		}
+
+		layers_head = total_mem_chunk + prepare_sz;
+		mdss_compat_align_list(total_mem_chunk, layers_head,
+					num_overlays);
+		ovlist = (struct mdp_overlay_list *)total_mem_chunk;
+
+		ret = __from_user_mdp_overlaylist(ovlist, ovlist32,
+					layers_head);
+		if (ret) {
+			pr_err("compat mdp overlaylist failed\n");
+		} else {
+			ret = mdss_fb_do_ioctl(info, cmd,
+				(unsigned long) ovlist, file);
+			if (!ret)
+				ret = __to_user_mdp_overlaylist(ovlist32,
+							 ovlist, layers_head);
+		}
+		break;
+	case MSMFB_OVERLAY_UNSET:
+	case MSMFB_OVERLAY_PLAY:
+	case MSMFB_OVERLAY_VSYNC_CTRL:
+	case MSMFB_METADATA_SET:
+	case MSMFB_METADATA_GET:
+	default:
+		pr_debug("%s: overlay ioctl cmd=[%u]\n", __func__, cmd);
+		ret = mdss_fb_do_ioctl(info, cmd, (unsigned long) arg, file);
+		break;
+	}
+	return ret;
+}
+
+/*
+ * mdss_fb_compat_ioctl() - MDSS Framebuffer compat ioctl function
+ * @info:	pointer to framebuffer info
+ * @cmd:	ioctl command
+ * @arg:	argument to ioctl
+ *
+ * This function adds the compat translation layer for framebuffer
+ * ioctls to allow 32-bit userspace call ioctls on the mdss
+ * framebuffer device driven in 64-bit kernel.
+ */
+int mdss_fb_compat_ioctl(struct fb_info *info, unsigned int cmd,
+			 unsigned long arg, struct file *file)
+{
+	int ret;
+
+	if (!info || !info->par)
+		return -EINVAL;
+
+	cmd = __do_compat_ioctl_nr(cmd);
+	switch (cmd) {
+	case MSMFB_CURSOR:
+		ret = mdss_fb_compat_cursor(info, cmd, arg, file);
+		break;
+	case MSMFB_SET_LUT:
+		ret = mdss_fb_compat_set_lut(info, arg, file);
+		break;
+	case MSMFB_BUFFER_SYNC:
+		ret = mdss_fb_compat_buf_sync(info, cmd, arg, file);
+		break;
+	case MSMFB_ATOMIC_COMMIT:
+		ret = __compat_atomic_commit(info, cmd, arg, file);
+		break;
+	case MSMFB_ASYNC_POSITION_UPDATE:
+		ret = __compat_async_position_update(info, cmd, arg);
+		break;
+	case MSMFB_MDP_PP:
+	case MSMFB_HISTOGRAM_START:
+	case MSMFB_HISTOGRAM_STOP:
+	case MSMFB_HISTOGRAM:
+	case MSMFB_OVERLAY_GET:
+	case MSMFB_OVERLAY_SET:
+	case MSMFB_OVERLAY_UNSET:
+	case MSMFB_OVERLAY_PLAY:
+	case MSMFB_OVERLAY_VSYNC_CTRL:
+	case MSMFB_METADATA_SET:
+	case MSMFB_METADATA_GET:
+	case MSMFB_OVERLAY_PREPARE:
+		ret = mdss_compat_overlay_ioctl(info, cmd, arg, file);
+		break;
+	case MSMFB_NOTIFY_UPDATE:
+	case MSMFB_DISPLAY_COMMIT:
+	default:
+		ret = mdss_fb_do_ioctl(info, cmd, arg, file);
+		break;
+	}
+
+	if (ret == -ENOTSUP)
+		pr_err("%s: unsupported ioctl\n", __func__);
+	else if (ret)
+		pr_debug("%s: ioctl err cmd=%u ret=%d\n", __func__, cmd, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(mdss_fb_compat_ioctl);
diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.h b/drivers/video/fbdev/msm/mdss_compat_utils.h
new file mode 100644
index 0000000..9dcf6d4
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_compat_utils.h
@@ -0,0 +1,555 @@
+/*
+ * Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_COMPAT_UTILS_H
+#define MDSS_COMPAT_UTILS_H
+
+/*
+ * To allow proper structure padding for 64bit/32bit target
+ */
+#ifdef __LP64
+#define MDP_LAYER_COMMIT_V1_PAD 2
+#else
+#define MDP_LAYER_COMMIT_V1_PAD 3
+#endif
+
+struct mdp_buf_sync32 {
+	u32		flags;
+	u32		acq_fen_fd_cnt;
+	u32		session_id;
+	compat_caddr_t	acq_fen_fd;
+	compat_caddr_t	rel_fen_fd;
+	compat_caddr_t  retire_fen_fd;
+};
+
+struct fb_cmap32 {
+	u32		start;
+	u32		len;
+	compat_caddr_t	red;
+	compat_caddr_t	green;
+	compat_caddr_t	blue;
+	compat_caddr_t	transp;
+};
+
+struct fb_image32 {
+	u32 dx;
+	u32 dy;
+	u32 width;
+	u32 height;
+	u32 fg_color;
+	u32 bg_color;
+	u8 depth;
+	compat_caddr_t data;
+	struct fb_cmap32 cmap;
+};
+
+struct fb_cursor32 {
+	u16 set;
+	u16 enable;
+	u16 rop;
+	compat_caddr_t mask;
+	struct fbcurpos	hot;
+	struct fb_image32 image;
+};
+
+struct mdp_ccs32 {
+};
+
+struct msmfb_overlay_blt32 {
+};
+
+struct msmfb_overlay_3d32 {
+};
+
+struct msmfb_mixer_info_req32 {
+};
+
+struct msmfb_metadata32 {
+	uint32_t op;
+	uint32_t flags;
+	union {
+		struct mdp_misr misr_request;
+		struct mdp_blend_cfg blend_cfg;
+		struct mdp_mixer_cfg mixer_cfg;
+		uint32_t panel_frame_rate;
+		uint32_t video_info_code;
+		struct mdss_hw_caps caps;
+		uint8_t secure_en;
+	} data;
+};
+
+struct mdp_histogram_start_req32 {
+	uint32_t block;
+	uint8_t frame_cnt;
+	uint8_t bit_mask;
+	uint16_t num_bins;
+};
+
+struct mdp_histogram_data32 {
+	uint32_t block;
+	uint32_t bin_cnt;
+	compat_caddr_t c0;
+	compat_caddr_t c1;
+	compat_caddr_t c2;
+	compat_caddr_t extra_info;
+};
+
+struct mdp_pcc_coeff32 {
+	uint32_t c, r, g, b, rr, gg, bb, rg, gb, rb, rgb_0, rgb_1;
+};
+
+struct mdp_pcc_coeff_v1_7_32 {
+	uint32_t c, r, g, b, rg, gb, rb, rgb;
+};
+
+struct mdp_pcc_data_v1_7_32 {
+	struct mdp_pcc_coeff_v1_7_32 r, g, b;
+};
+struct mdp_pcc_cfg_data32 {
+	uint32_t version;
+	uint32_t block;
+	uint32_t ops;
+	struct mdp_pcc_coeff32 r, g, b;
+	compat_caddr_t cfg_payload;
+};
+
+struct mdp_csc_cfg32 {
+	/* flags for enable CSC, toggling RGB,YUV input/output */
+	uint32_t flags;
+	uint32_t csc_mv[9];
+	uint32_t csc_pre_bv[3];
+	uint32_t csc_post_bv[3];
+	uint32_t csc_pre_lv[6];
+	uint32_t csc_post_lv[6];
+};
+
+struct mdp_csc_cfg_data32 {
+	uint32_t block;
+	struct mdp_csc_cfg32 csc_data;
+};
+
+struct mdp_bl_scale_data32 {
+	uint32_t min_lvl;
+	uint32_t scale;
+};
+
+struct mdp_pa_mem_col_cfg32 {
+	uint32_t color_adjust_p0;
+	uint32_t color_adjust_p1;
+	uint32_t hue_region;
+	uint32_t sat_region;
+	uint32_t val_region;
+};
+
+struct mdp_pa_v2_data32 {
+	/* Mask bits for PA features */
+	uint32_t flags;
+	uint32_t global_hue_adj;
+	uint32_t global_sat_adj;
+	uint32_t global_val_adj;
+	uint32_t global_cont_adj;
+	struct mdp_pa_mem_col_cfg32 skin_cfg;
+	struct mdp_pa_mem_col_cfg32 sky_cfg;
+	struct mdp_pa_mem_col_cfg32 fol_cfg;
+	uint32_t six_zone_len;
+	uint32_t six_zone_thresh;
+	compat_caddr_t six_zone_curve_p0;
+	compat_caddr_t six_zone_curve_p1;
+};
+
+struct mdp_pa_mem_col_data_v1_7_32 {
+	uint32_t color_adjust_p0;
+	uint32_t color_adjust_p1;
+	uint32_t color_adjust_p2;
+	uint32_t blend_gain;
+	uint8_t sat_hold;
+	uint8_t val_hold;
+	uint32_t hue_region;
+	uint32_t sat_region;
+	uint32_t val_region;
+};
+
+struct mdp_pa_data_v1_7_32 {
+	uint32_t mode;
+	uint32_t global_hue_adj;
+	uint32_t global_sat_adj;
+	uint32_t global_val_adj;
+	uint32_t global_cont_adj;
+	struct mdp_pa_mem_col_data_v1_7_32 skin_cfg;
+	struct mdp_pa_mem_col_data_v1_7_32 sky_cfg;
+	struct mdp_pa_mem_col_data_v1_7_32 fol_cfg;
+	uint32_t six_zone_thresh;
+	uint32_t six_zone_adj_p0;
+	uint32_t six_zone_adj_p1;
+	uint8_t six_zone_sat_hold;
+	uint8_t six_zone_val_hold;
+	uint32_t six_zone_len;
+	compat_caddr_t six_zone_curve_p0;
+	compat_caddr_t six_zone_curve_p1;
+};
+
+struct mdp_pa_v2_cfg_data32 {
+	uint32_t version;
+	uint32_t block;
+	uint32_t flags;
+	struct mdp_pa_v2_data32 pa_v2_data;
+	compat_caddr_t cfg_payload;
+};
+
+struct mdp_pa_cfg32 {
+	uint32_t flags;
+	uint32_t hue_adj;
+	uint32_t sat_adj;
+	uint32_t val_adj;
+	uint32_t cont_adj;
+};
+
+struct mdp_pa_cfg_data32 {
+	uint32_t block;
+	struct mdp_pa_cfg32 pa_data;
+};
+
+struct mdp_igc_lut_data_v1_7_32 {
+	uint32_t table_fmt;
+	uint32_t len;
+	compat_caddr_t c0_c1_data;
+	compat_caddr_t c2_data;
+};
+
+struct mdp_rgb_lut_data32 {
+	uint32_t flags;
+	uint32_t lut_type;
+	struct fb_cmap32 cmap;
+};
+
+struct mdp_igc_lut_data32 {
+	uint32_t block;
+	uint32_t version;
+	uint32_t len, ops;
+	compat_caddr_t c0_c1_data;
+	compat_caddr_t c2_data;
+	compat_caddr_t cfg_payload;
+};
+
+struct mdp_hist_lut_data_v1_7_32 {
+	uint32_t len;
+	compat_caddr_t data;
+};
+
+struct mdp_hist_lut_data32 {
+	uint32_t block;
+	uint32_t version;
+	uint32_t hist_lut_first;
+	uint32_t ops;
+	uint32_t len;
+	compat_caddr_t data;
+	compat_caddr_t cfg_payload;
+};
+
+struct mdp_ar_gc_lut_data32 {
+	uint32_t x_start;
+	uint32_t slope;
+	uint32_t offset;
+};
+
+struct mdp_pgc_lut_data_v1_7_32 {
+	uint32_t  len;
+	compat_caddr_t c0_data;
+	compat_caddr_t c1_data;
+	compat_caddr_t c2_data;
+};
+
+struct mdp_pgc_lut_data32 {
+	uint32_t version;
+	uint32_t block;
+	uint32_t flags;
+	uint8_t num_r_stages;
+	uint8_t num_g_stages;
+	uint8_t num_b_stages;
+	compat_caddr_t r_data;
+	compat_caddr_t g_data;
+	compat_caddr_t b_data;
+	compat_caddr_t cfg_payload;
+};
+
+struct mdp_lut_cfg_data32 {
+	uint32_t lut_type;
+	union {
+		struct mdp_igc_lut_data32 igc_lut_data;
+		struct mdp_pgc_lut_data32 pgc_lut_data;
+		struct mdp_hist_lut_data32 hist_lut_data;
+		struct mdp_rgb_lut_data32 rgb_lut_data;
+	} data;
+};
+
+struct mdp_qseed_cfg32 {
+	uint32_t table_num;
+	uint32_t ops;
+	uint32_t len;
+	compat_caddr_t data;
+};
+
+struct mdp_qseed_cfg_data32 {
+	uint32_t block;
+	struct mdp_qseed_cfg32 qseed_data;
+};
+
+struct mdp_dither_cfg_data32 {
+	uint32_t block;
+	uint32_t flags;
+	uint32_t g_y_depth;
+	uint32_t r_cr_depth;
+	uint32_t b_cb_depth;
+};
+
+struct mdp_gamut_data_v1_7_32 {
+	uint32_t mode;
+	uint32_t tbl_size[MDP_GAMUT_TABLE_NUM_V1_7];
+	compat_caddr_t c0_data[MDP_GAMUT_TABLE_NUM_V1_7];
+	compat_caddr_t c1_c2_data[MDP_GAMUT_TABLE_NUM_V1_7];
+	uint32_t  tbl_scale_off_sz[MDP_GAMUT_SCALE_OFF_TABLE_NUM];
+	compat_caddr_t scale_off_data[MDP_GAMUT_SCALE_OFF_TABLE_NUM];
+};
+
+struct mdp_gamut_cfg_data32 {
+	uint32_t block;
+	uint32_t flags;
+	uint32_t version;
+	uint32_t gamut_first;
+	uint32_t tbl_size[MDP_GAMUT_TABLE_NUM];
+	compat_caddr_t r_tbl[MDP_GAMUT_TABLE_NUM];
+	compat_caddr_t g_tbl[MDP_GAMUT_TABLE_NUM];
+	compat_caddr_t b_tbl[MDP_GAMUT_TABLE_NUM];
+	compat_caddr_t cfg_payload;
+};
+
+struct mdp_calib_config_data32 {
+	uint32_t ops;
+	uint32_t addr;
+	uint32_t data;
+};
+
+struct mdp_calib_config_buffer32 {
+	uint32_t ops;
+	uint32_t size;
+	compat_caddr_t buffer;
+};
+
+struct mdp_calib_dcm_state32 {
+	uint32_t ops;
+	uint32_t dcm_state;
+};
+
+struct mdss_ad_init32 {
+	uint32_t asym_lut[33];
+	uint32_t color_corr_lut[33];
+	uint8_t i_control[2];
+	uint16_t black_lvl;
+	uint16_t white_lvl;
+	uint8_t var;
+	uint8_t limit_ampl;
+	uint8_t i_dither;
+	uint8_t slope_max;
+	uint8_t slope_min;
+	uint8_t dither_ctl;
+	uint8_t format;
+	uint8_t auto_size;
+	uint16_t frame_w;
+	uint16_t frame_h;
+	uint8_t logo_v;
+	uint8_t logo_h;
+	uint32_t alpha;
+	uint32_t alpha_base;
+	uint32_t bl_lin_len;
+	uint32_t bl_att_len;
+	compat_caddr_t bl_lin;
+	compat_caddr_t bl_lin_inv;
+	compat_caddr_t bl_att_lut;
+};
+
+struct mdss_ad_cfg32 {
+	uint32_t mode;
+	uint32_t al_calib_lut[33];
+	uint16_t backlight_min;
+	uint16_t backlight_max;
+	uint16_t backlight_scale;
+	uint16_t amb_light_min;
+	uint16_t filter[2];
+	uint16_t calib[4];
+	uint8_t strength_limit;
+	uint8_t t_filter_recursion;
+	uint16_t stab_itr;
+	uint32_t bl_ctrl_mode;
+};
+
+/* ops uses standard MDP_PP_* flags */
+struct mdss_ad_init_cfg32 {
+	uint32_t ops;
+	union {
+		struct mdss_ad_init32 init;
+		struct mdss_ad_cfg32 cfg;
+	} params;
+};
+
+struct mdss_ad_input32 {
+	uint32_t mode;
+	union {
+		uint32_t amb_light;
+		uint32_t strength;
+		uint32_t calib_bl;
+	} in;
+	uint32_t output;
+};
+
+struct mdss_calib_cfg32 {
+	uint32_t ops;
+	uint32_t calib_mask;
+};
+
+struct mdp_histogram_cfg32 {
+	uint32_t ops;
+	uint32_t block;
+	uint8_t frame_cnt;
+	uint8_t bit_mask;
+	uint16_t num_bins;
+};
+
+struct mdp_sharp_cfg32 {
+	uint32_t flags;
+	uint32_t strength;
+	uint32_t edge_thr;
+	uint32_t smooth_thr;
+	uint32_t noise_thr;
+};
+
+struct mdp_overlay_pp_params32 {
+	uint32_t config_ops;
+	struct mdp_csc_cfg32 csc_cfg;
+	struct mdp_qseed_cfg32 qseed_cfg[2];
+	struct mdp_pa_cfg32 pa_cfg;
+	struct mdp_pa_v2_data32 pa_v2_cfg;
+	struct mdp_igc_lut_data32 igc_cfg;
+	struct mdp_sharp_cfg32 sharp_cfg;
+	struct mdp_histogram_cfg32 hist_cfg;
+	struct mdp_hist_lut_data32 hist_lut_cfg;
+	struct mdp_pa_v2_cfg_data32 pa_v2_cfg_data;
+	struct mdp_pcc_cfg_data32 pcc_cfg_data;
+};
+
+struct msmfb_mdp_pp32 {
+	uint32_t op;
+	union {
+		struct mdp_pcc_cfg_data32 pcc_cfg_data;
+		struct mdp_csc_cfg_data32 csc_cfg_data;
+		struct mdp_lut_cfg_data32 lut_cfg_data;
+		struct mdp_qseed_cfg_data32 qseed_cfg_data;
+		struct mdp_bl_scale_data32 bl_scale_data;
+		struct mdp_pa_cfg_data32 pa_cfg_data;
+		struct mdp_pa_v2_cfg_data32 pa_v2_cfg_data;
+		struct mdp_dither_cfg_data32 dither_cfg_data;
+		struct mdp_gamut_cfg_data32 gamut_cfg_data;
+		struct mdp_calib_config_data32 calib_cfg;
+		struct mdss_ad_init_cfg32 ad_init_cfg;
+		struct mdss_calib_cfg32 mdss_calib_cfg;
+		struct mdss_ad_input32 ad_input;
+		struct mdp_calib_config_buffer32 calib_buffer;
+		struct mdp_calib_dcm_state32 calib_dcm;
+	} data;
+};
+
+struct mdp_overlay32 {
+	struct msmfb_img src;
+	struct mdp_rect src_rect;
+	struct mdp_rect dst_rect;
+	uint32_t z_order;	/* stage number */
+	uint32_t is_fg;	/* control alpha & transp */
+	uint32_t alpha;
+	uint32_t blend_op;
+	uint32_t transp_mask;
+	uint32_t flags;
+	uint32_t pipe_type;
+	uint32_t id;
+	uint8_t priority;
+	uint32_t user_data[6];
+	uint32_t bg_color;
+	uint8_t horz_deci;
+	uint8_t vert_deci;
+	struct mdp_overlay_pp_params32 overlay_pp_cfg;
+	struct mdp_scale_data scale;
+	uint8_t color_space;
+	uint32_t frame_rate;
+};
+
+struct mdp_overlay_list32 {
+	uint32_t num_overlays;
+	compat_caddr_t overlay_list;
+	uint32_t flags;
+	uint32_t processed_overlays;
+};
+
+struct mdp_input_layer32 {
+	uint32_t		flags;
+	uint32_t		pipe_ndx;
+	uint8_t			horz_deci;
+	uint8_t			vert_deci;
+	uint8_t			alpha;
+	uint16_t		z_order;
+	uint32_t		transp_mask;
+	uint32_t		bg_color;
+	enum mdss_mdp_blend_op	blend_op;
+	enum mdp_color_space    color_space;
+	struct mdp_rect		src_rect;
+	struct mdp_rect		dst_rect;
+	compat_caddr_t		scale;
+	struct mdp_layer_buffer	buffer;
+	compat_caddr_t		pp_info;
+	int			error_code;
+	uint32_t		reserved[6];
+};
+
+struct mdp_output_layer32 {
+	uint32_t			flags;
+	uint32_t			writeback_ndx;
+	struct mdp_layer_buffer		buffer;
+	enum mdp_color_space            color_space;
+	uint32_t			reserved[5];
+};
+struct mdp_layer_commit_v1_32 {
+	uint32_t		flags;
+	int			release_fence;
+	struct mdp_rect		left_roi;
+	struct mdp_rect		right_roi;
+	compat_caddr_t		input_layers;
+	uint32_t		input_layer_cnt;
+	compat_caddr_t		output_layer;
+	int			retire_fence;
+	compat_caddr_t		dest_scaler;
+	uint32_t                dest_scaler_cnt;
+	compat_caddr_t		frc_info;
+	uint32_t		reserved[MDP_LAYER_COMMIT_V1_PAD];
+};
+
+struct mdp_layer_commit32 {
+	uint32_t version;
+	union {
+		struct mdp_layer_commit_v1_32 commit_v1;
+	};
+};
+
+struct mdp_position_update32 {
+	compat_caddr_t __user	*input_layers;
+	uint32_t input_layer_cnt;
+};
+
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_dba_utils.c b/drivers/video/fbdev/msm/mdss_dba_utils.c
new file mode 100644
index 0000000..9edf2a8
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dba_utils.c
@@ -0,0 +1,912 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <video/msm_dba.h>
+#include <linux/switch.h>
+
+#include "mdss_dba_utils.h"
+#include "mdss_hdmi_edid.h"
+#include "mdss_cec_core.h"
+#include "mdss_fb.h"
+
+/* standard cec buf size + 1 byte specific to driver */
+#define CEC_BUF_SIZE    (MAX_CEC_FRAME_SIZE + 1)
+#define MAX_SWITCH_NAME_SIZE        5
+#define MSM_DBA_MAX_PCLK 148500
+#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3
+
+struct mdss_dba_utils_data {
+	struct msm_dba_ops ops;
+	bool hpd_state;
+	bool audio_switch_registered;
+	bool display_switch_registered;
+	struct switch_dev sdev_display;
+	struct switch_dev sdev_audio;
+	struct kobject *kobj;
+	struct mdss_panel_info *pinfo;
+	void *dba_data;
+	void *edid_data;
+	void *timing_data;
+	void *cec_abst_data;
+	u8 *edid_buf;
+	u32 edid_buf_size;
+	u8 cec_buf[CEC_BUF_SIZE];
+	struct cec_ops cops;
+	struct cec_cbs ccbs;
+	char disp_switch_name[MAX_SWITCH_NAME_SIZE];
+	u32 current_vic;
+	bool support_audio;
+};
+
+static struct mdss_dba_utils_data *mdss_dba_utils_get_data(
+	struct device *device)
+{
+	struct msm_fb_data_type *mfd;
+	struct mdss_panel_info *pinfo;
+	struct fb_info *fbi;
+	struct mdss_dba_utils_data *udata = NULL;
+
+	if (!device) {
+		pr_err("Invalid device data\n");
+		goto end;
+	}
+
+	fbi = dev_get_drvdata(device);
+	if (!fbi) {
+		pr_err("Invalid fbi data\n");
+		goto end;
+	}
+
+	mfd = (struct msm_fb_data_type *)fbi->par;
+	if (!mfd) {
+		pr_err("Invalid mfd data\n");
+		goto end;
+	}
+
+	pinfo = mfd->panel_info;
+	if (!pinfo) {
+		pr_err("Invalid pinfo data\n");
+		goto end;
+	}
+
+	udata = pinfo->dba_data;
+end:
+	return udata;
+}
+
+static void mdss_dba_utils_notify_display(
+	struct mdss_dba_utils_data *udata, int val)
+{
+	int state = 0;
+
+	if (!udata) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	if (!udata->display_switch_registered) {
+		pr_err("display switch not registered\n");
+		return;
+	}
+
+	state = udata->sdev_display.state;
+
+	switch_set_state(&udata->sdev_display, val);
+
+	pr_debug("cable state %s %d\n",
+		udata->sdev_display.state == state ?
+		"is same" : "switched to",
+		udata->sdev_display.state);
+}
+
+static void mdss_dba_utils_notify_audio(
+	struct mdss_dba_utils_data *udata, int val)
+{
+	int state = 0;
+
+	if (!udata) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	if (!udata->audio_switch_registered) {
+		pr_err("audio switch not registered\n");
+		return;
+	}
+
+	state = udata->sdev_audio.state;
+
+	switch_set_state(&udata->sdev_audio, val);
+
+	pr_debug("audio state %s %d\n",
+		udata->sdev_audio.state == state ?
+		"is same" : "switched to",
+		udata->sdev_audio.state);
+}
+
+static ssize_t mdss_dba_utils_sysfs_rda_connected(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct mdss_dba_utils_data *udata = NULL;
+
+	if (!dev) {
+		pr_err("invalid device\n");
+		return -EINVAL;
+	}
+
+	udata = mdss_dba_utils_get_data(dev);
+
+	if (!udata) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", udata->hpd_state);
+	pr_debug("'%d'\n", udata->hpd_state);
+
+	return ret;
+}
+
+static ssize_t mdss_dba_utils_sysfs_rda_video_mode(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct mdss_dba_utils_data *udata = NULL;
+
+	if (!dev) {
+		pr_debug("invalid device\n");
+		return -EINVAL;
+	}
+
+	udata = mdss_dba_utils_get_data(dev);
+
+	if (!udata) {
+		pr_debug("invalid input\n");
+		return -EINVAL;
+	}
+
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", udata->current_vic);
+	pr_debug("'%d'\n", udata->current_vic);
+
+	return ret;
+}
+
+static ssize_t mdss_dba_utils_sysfs_wta_hpd(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct mdss_dba_utils_data *udata = NULL;
+	int rc, hpd;
+
+	udata = mdss_dba_utils_get_data(dev);
+	if (!udata) {
+		pr_debug("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = kstrtoint(buf, 10, &hpd);
+	if (rc) {
+		pr_debug("%s: kstrtoint failed\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: set value: %d hpd state: %d\n", __func__,
+					hpd, udata->hpd_state);
+	if (!hpd) {
+		if (udata->ops.power_on)
+			udata->ops.power_on(udata->dba_data, false, 0);
+		return count;
+	}
+
+	/* power on downstream device */
+	if (udata->ops.power_on)
+		udata->ops.power_on(udata->dba_data, true, 0);
+
+	/* check if cable is connected to bridge chip */
+	if (udata->ops.check_hpd)
+		udata->ops.check_hpd(udata->dba_data, 0);
+
+	return count;
+}
+
+static ssize_t mdss_dba_utils_sysfs_rda_hpd(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct mdss_dba_utils_data *udata = NULL;
+
+	if (!dev) {
+		pr_debug("invalid device\n");
+		return -EINVAL;
+	}
+
+	udata = mdss_dba_utils_get_data(dev);
+
+	if (!udata) {
+		pr_debug("invalid input\n");
+		return -EINVAL;
+	}
+
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", udata->hpd_state);
+	pr_debug("'%d'\n", udata->hpd_state);
+
+	return ret;
+}
+
+static DEVICE_ATTR(connected, 0444,
+		mdss_dba_utils_sysfs_rda_connected, NULL);
+
+static DEVICE_ATTR(video_mode, 0444,
+		mdss_dba_utils_sysfs_rda_video_mode, NULL);
+
+static DEVICE_ATTR(hpd, 0644, mdss_dba_utils_sysfs_rda_hpd,
+		mdss_dba_utils_sysfs_wta_hpd);
+
+static struct attribute *mdss_dba_utils_fs_attrs[] = {
+	&dev_attr_connected.attr,
+	&dev_attr_video_mode.attr,
+	&dev_attr_hpd.attr,
+	NULL,
+};
+
+static struct attribute_group mdss_dba_utils_fs_attrs_group = {
+	.attrs = mdss_dba_utils_fs_attrs,
+};
+
+static int mdss_dba_utils_sysfs_create(struct kobject *kobj)
+{
+	int rc;
+
+	if (!kobj) {
+		pr_err("invalid input\n");
+		return -ENODEV;
+	}
+
+	rc = sysfs_create_group(kobj, &mdss_dba_utils_fs_attrs_group);
+	if (rc) {
+		pr_err("failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void mdss_dba_utils_sysfs_remove(struct kobject *kobj)
+{
+	if (!kobj) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	sysfs_remove_group(kobj, &mdss_dba_utils_fs_attrs_group);
+}
+
+static bool mdss_dba_check_audio_support(struct mdss_dba_utils_data *udata)
+{
+	bool dvi_mode = false;
+	int audio_blk_size = 0;
+	struct msm_hdmi_audio_edid_blk audio_blk;
+
+	if (!udata) {
+		pr_debug("%s: Invalid input\n", __func__);
+		return false;
+	}
+	memset(&audio_blk, 0, sizeof(audio_blk));
+
+	/* check if sink is in DVI mode */
+	dvi_mode = !hdmi_edid_get_sink_mode(udata->edid_data);
+
+	/* get the audio block size info from EDID */
+	hdmi_edid_get_audio_blk(udata->edid_data, &audio_blk);
+	audio_blk_size = audio_blk.audio_data_blk_size;
+
+	if (dvi_mode || !audio_blk_size)
+		return false;
+	else
+		return true;
+}
+
+static void mdss_dba_utils_dba_cb(void *data, enum msm_dba_callback_event event)
+{
+	int ret = -EINVAL;
+	struct mdss_dba_utils_data *udata = data;
+	struct cec_msg msg = {0};
+	bool pluggable = false;
+	bool operands_present = false;
+	u32 no_of_operands, size, i;
+	u32 operands_offset = MAX_CEC_FRAME_SIZE - MAX_OPERAND_SIZE;
+	struct msm_hdmi_audio_edid_blk blk;
+
+	if (!udata) {
+		pr_err("Invalid data\n");
+		return;
+	}
+
+	pr_debug("event: %d\n", event);
+
+	if (udata->pinfo)
+		pluggable = udata->pinfo->is_pluggable;
+
+	switch (event) {
+	case MSM_DBA_CB_HPD_CONNECT:
+		if (udata->hpd_state)
+			break;
+		if (udata->ops.get_raw_edid) {
+			ret = udata->ops.get_raw_edid(udata->dba_data,
+				udata->edid_buf_size, udata->edid_buf, 0);
+
+			if (!ret) {
+				hdmi_edid_parser(udata->edid_data);
+				/* check whether audio is supported or not */
+				udata->support_audio =
+					mdss_dba_check_audio_support(udata);
+				if (udata->support_audio) {
+					hdmi_edid_get_audio_blk(
+						udata->edid_data, &blk);
+					if (udata->ops.set_audio_block)
+						udata->ops.set_audio_block(
+							udata->dba_data,
+							sizeof(blk), &blk);
+				}
+			} else {
+				pr_err("failed to get edid%d\n", ret);
+			}
+		}
+
+		if (pluggable) {
+			mdss_dba_utils_notify_display(udata, 1);
+			if (udata->support_audio)
+				mdss_dba_utils_notify_audio(udata, 1);
+		} else {
+			mdss_dba_utils_video_on(udata, udata->pinfo);
+		}
+
+		udata->hpd_state = true;
+		break;
+
+	case MSM_DBA_CB_HPD_DISCONNECT:
+		if (!udata->hpd_state)
+			break;
+		if (pluggable) {
+			if (udata->support_audio)
+				mdss_dba_utils_notify_audio(udata, 0);
+			mdss_dba_utils_notify_display(udata, 0);
+		} else {
+			mdss_dba_utils_video_off(udata);
+		}
+
+		udata->hpd_state = false;
+		break;
+
+	case MSM_DBA_CB_CEC_READ_PENDING:
+		if (udata->ops.hdmi_cec_read) {
+			ret = udata->ops.hdmi_cec_read(
+				udata->dba_data,
+				&size,
+				udata->cec_buf, 0);
+
+			if (ret || !size || size > CEC_BUF_SIZE) {
+				pr_err("%s: cec read failed\n", __func__);
+				return;
+			}
+		}
+
+		/* prepare cec msg */
+		msg.recvr_id   = udata->cec_buf[0] & 0x0F;
+		msg.sender_id  = (udata->cec_buf[0] & 0xF0) >> 4;
+		msg.opcode     = udata->cec_buf[1];
+		msg.frame_size = (udata->cec_buf[MAX_CEC_FRAME_SIZE] & 0x1F);
+
+		operands_present = (msg.frame_size > operands_offset) &&
+			(msg.frame_size <= MAX_CEC_FRAME_SIZE);
+
+		if (operands_present) {
+			no_of_operands = msg.frame_size - operands_offset;
+
+			for (i = 0; i < no_of_operands; i++)
+				msg.operand[i] =
+					udata->cec_buf[operands_offset + i];
+		}
+
+		ret = udata->ccbs.msg_recv_notify(udata->ccbs.data, &msg);
+		if (ret)
+			pr_err("%s: failed to notify cec msg\n", __func__);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static int mdss_dba_utils_cec_enable(void *data, bool enable)
+{
+	int ret = -EINVAL;
+	struct mdss_dba_utils_data *udata = data;
+
+	if (!udata) {
+		pr_err("%s: Invalid data\n", __func__);
+		return -EINVAL;
+	}
+
+	if (udata->ops.hdmi_cec_on)
+		ret = udata->ops.hdmi_cec_on(udata->dba_data, enable, 0);
+
+	return ret;
+}
+
+static int mdss_dba_utils_send_cec_msg(void *data, struct cec_msg *msg)
+{
+	int ret = -EINVAL, i;
+	u32 operands_offset = MAX_CEC_FRAME_SIZE - MAX_OPERAND_SIZE;
+	struct mdss_dba_utils_data *udata = data;
+
+	u8 buf[MAX_CEC_FRAME_SIZE];
+
+	if (!udata || !msg) {
+		pr_err("%s: Invalid data\n", __func__);
+		return -EINVAL;
+	}
+
+	buf[0] = (msg->sender_id << 4) | msg->recvr_id;
+	buf[1] = msg->opcode;
+
+	for (i = 0; i < MAX_OPERAND_SIZE &&
+		i < msg->frame_size - operands_offset; i++)
+		buf[operands_offset + i] = msg->operand[i];
+
+	if (udata->ops.hdmi_cec_write)
+		ret = udata->ops.hdmi_cec_write(udata->dba_data,
+			msg->frame_size, (char *)buf, 0);
+
+	return ret;
+}
+
+static int mdss_dba_utils_init_switch_dev(struct mdss_dba_utils_data *udata,
+	u32 fb_node)
+{
+	int rc = -EINVAL, ret;
+
+	if (!udata) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	/* create switch device to update display modules */
+	udata->sdev_display.name = "hdmi";
+	rc = switch_dev_register(&udata->sdev_display);
+	if (rc) {
+		pr_err("display switch registration failed\n");
+		goto end;
+	}
+
+	udata->display_switch_registered = true;
+
+	/* create switch device to update audio modules */
+	udata->sdev_audio.name = "hdmi_audio";
+	ret = switch_dev_register(&udata->sdev_audio);
+	if (ret) {
+		pr_err("audio switch registration failed\n");
+		goto end;
+	}
+
+	udata->audio_switch_registered = true;
+end:
+	return rc;
+}
+
+static int mdss_dba_get_vic_panel_info(struct mdss_dba_utils_data *udata,
+					struct mdss_panel_info *pinfo)
+{
+	struct msm_hdmi_mode_timing_info timing;
+	struct hdmi_util_ds_data ds_data;
+	u32 h_total, v_total, vic = 0;
+
+	if (!udata || !pinfo) {
+		pr_err("%s: invalid input\n", __func__);
+		return 0;
+	}
+
+	timing.active_h = pinfo->xres;
+	timing.back_porch_h = pinfo->lcdc.h_back_porch;
+	timing.front_porch_h = pinfo->lcdc.h_front_porch;
+	timing.pulse_width_h = pinfo->lcdc.h_pulse_width;
+	h_total = (timing.active_h + timing.back_porch_h +
+		timing.front_porch_h + timing.pulse_width_h);
+
+	timing.active_v = pinfo->yres;
+	timing.back_porch_v = pinfo->lcdc.v_back_porch;
+	timing.front_porch_v = pinfo->lcdc.v_front_porch;
+	timing.pulse_width_v = pinfo->lcdc.v_pulse_width;
+	v_total = (timing.active_v + timing.back_porch_v +
+		timing.front_porch_v + timing.pulse_width_v);
+
+	timing.refresh_rate = pinfo->mipi.frame_rate * 1000;
+	timing.pixel_freq = (h_total * v_total *
+				pinfo->mipi.frame_rate) / 1000;
+
+	ds_data.ds_registered = true;
+	ds_data.ds_max_clk = MSM_DBA_MAX_PCLK;
+
+	vic = hdmi_get_video_id_code(&timing, &ds_data);
+	pr_debug("%s: current vic code is %d\n", __func__, vic);
+
+	return vic;
+}
+
+/**
+ * mdss_dba_utils_video_on() - Allow clients to switch on the video
+ * @data: DBA utils instance which was allocated during registration
+ * @pinfo: detailed panel information like x, y, porch values etc
+ *
+ * This API is used to power on the video on device registered
+ * with DBA.
+ *
+ * Return: returns the result of the video on call on device.
+ */
+int mdss_dba_utils_video_on(void *data, struct mdss_panel_info *pinfo)
+{
+	struct mdss_dba_utils_data *ud = data;
+	struct msm_dba_video_cfg video_cfg;
+	int ret = -EINVAL;
+
+	if (!ud || !pinfo) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	memset(&video_cfg, 0, sizeof(video_cfg));
+
+	video_cfg.h_active = pinfo->xres;
+	video_cfg.v_active = pinfo->yres;
+	video_cfg.h_front_porch = pinfo->lcdc.h_front_porch;
+	video_cfg.v_front_porch = pinfo->lcdc.v_front_porch;
+	video_cfg.h_back_porch = pinfo->lcdc.h_back_porch;
+	video_cfg.v_back_porch = pinfo->lcdc.v_back_porch;
+	video_cfg.h_pulse_width = pinfo->lcdc.h_pulse_width;
+	video_cfg.v_pulse_width = pinfo->lcdc.v_pulse_width;
+	video_cfg.pclk_khz = (unsigned long)pinfo->clk_rate / 1000;
+	video_cfg.hdmi_mode = hdmi_edid_get_sink_mode(ud->edid_data);
+
+	/* Calculate number of DSI lanes configured */
+	video_cfg.num_of_input_lanes = 0;
+	if (pinfo->mipi.data_lane0)
+		video_cfg.num_of_input_lanes++;
+	if (pinfo->mipi.data_lane1)
+		video_cfg.num_of_input_lanes++;
+	if (pinfo->mipi.data_lane2)
+		video_cfg.num_of_input_lanes++;
+	if (pinfo->mipi.data_lane3)
+		video_cfg.num_of_input_lanes++;
+
+	/* Get scan information from EDID */
+	video_cfg.vic = mdss_dba_get_vic_panel_info(ud, pinfo);
+	ud->current_vic = video_cfg.vic;
+	video_cfg.scaninfo = hdmi_edid_get_sink_scaninfo(ud->edid_data,
+							video_cfg.vic);
+	if (ud->ops.video_on)
+		ret = ud->ops.video_on(ud->dba_data, true, &video_cfg, 0);
+
+end:
+	return ret;
+}
+
+/**
+ * mdss_dba_utils_video_off() - Allow clients to switch off the video
+ * @data: DBA utils instance which was allocated during registration
+ *
+ * This API is used to power off the video on device registered
+ * with DBA.
+ *
+ * Return: returns the result of the video off call on device.
+ */
+int mdss_dba_utils_video_off(void *data)
+{
+	struct mdss_dba_utils_data *ud = data;
+	int ret = -EINVAL;
+
+	if (!ud) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	if (ud->ops.video_on)
+		ret = ud->ops.video_on(ud->dba_data, false, NULL, 0);
+
+end:
+	return ret;
+}
+
+/**
+ * mdss_dba_utils_hdcp_enable() - Allow clients to switch on HDCP.
+ * @data: DBA utils instance which was allocated during registration
+ * @enable: flag to enable or disable HDCP authentication
+ *
+ * This API is used to start the HDCP authentication process with the
+ * device registered with DBA.
+ */
+void mdss_dba_utils_hdcp_enable(void *data, bool enable)
+{
+	struct mdss_dba_utils_data *ud = data;
+
+	if (!ud) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	if (ud->ops.hdcp_enable)
+		ud->ops.hdcp_enable(ud->dba_data, enable, enable, 0);
+}
+
+void mdss_dba_update_lane_cfg(struct mdss_panel_info *pinfo)
+{
+	struct mdss_dba_utils_data *dba_data;
+	struct mdss_dba_timing_info *cfg_tbl;
+	int i = 0, lanes;
+
+	if (pinfo == NULL)
+		return;
+
+	/*
+	 * Restore to default value from DT
+	 * if resolution not found in
+	 * supported resolutions
+	 */
+	lanes = pinfo->mipi.default_lanes;
+
+	dba_data = (struct mdss_dba_utils_data *)(pinfo->dba_data);
+	if (dba_data == NULL)
+		goto lane_cfg;
+
+	/* get adv supported timing info */
+	cfg_tbl = (struct mdss_dba_timing_info *)(dba_data->timing_data);
+	if (cfg_tbl == NULL)
+		goto lane_cfg;
+
+	while (cfg_tbl[i].xres != 0xffff) {
+		if (cfg_tbl[i].xres == pinfo->xres &&
+			cfg_tbl[i].yres == pinfo->yres &&
+			cfg_tbl[i].bpp == pinfo->bpp &&
+			cfg_tbl[i].fps == pinfo->mipi.frame_rate) {
+			lanes = cfg_tbl[i].lanes;
+			break;
+		}
+		i++;
+	}
+
+lane_cfg:
+	switch (lanes) {
+	case 1:
+		pinfo->mipi.data_lane0 = 1;
+		pinfo->mipi.data_lane1 = 0;
+		pinfo->mipi.data_lane2 = 0;
+		pinfo->mipi.data_lane3 = 0;
+		break;
+	case 2:
+		pinfo->mipi.data_lane0 = 1;
+		pinfo->mipi.data_lane1 = 1;
+		pinfo->mipi.data_lane2 = 0;
+		pinfo->mipi.data_lane3 = 0;
+		break;
+	case 3:
+		pinfo->mipi.data_lane0 = 1;
+		pinfo->mipi.data_lane1 = 1;
+		pinfo->mipi.data_lane2 = 1;
+		pinfo->mipi.data_lane3 = 0;
+		break;
+	case 4:
+	default:
+		pinfo->mipi.data_lane0 = 1;
+		pinfo->mipi.data_lane1 = 1;
+		pinfo->mipi.data_lane2 = 1;
+		pinfo->mipi.data_lane3 = 1;
+		break;
+	}
+}
+
+/**
+ * mdss_dba_utils_init() - Allow clients to register with DBA utils
+ * @uid: Initialization data for registration.
+ *
+ * This API lets the client to register with DBA Utils module.
+ * This allocate utils' instance and register with DBA (Display
+ * Bridge Abstract). Creates sysfs nodes and switch nodes to interact
+ * with other modules. Also registers with EDID parser to parse
+ * the EDID buffer.
+ *
+ * Return: Instance of DBA utils which needs to be sent as parameter
+ * when calling DBA utils APIs.
+ */
+void *mdss_dba_utils_init(struct mdss_dba_utils_init_data *uid)
+{
+	struct hdmi_edid_init_data edid_init_data;
+	struct mdss_dba_utils_data *udata = NULL;
+	struct msm_dba_reg_info info;
+	struct cec_abstract_init_data cec_abst_init_data;
+	void *cec_abst_data;
+	int ret = 0;
+
+	if (!uid) {
+		pr_err("invalid input\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	udata = kzalloc(sizeof(*udata), GFP_KERNEL);
+	if (!udata) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	memset(&edid_init_data, 0, sizeof(edid_init_data));
+	memset(&info, 0, sizeof(info));
+
+	/* initialize DBA registration data */
+	strlcpy(info.client_name, uid->client_name, MSM_DBA_CLIENT_NAME_LEN);
+	strlcpy(info.chip_name, uid->chip_name, MSM_DBA_CHIP_NAME_MAX_LEN);
+	info.instance_id = uid->instance_id;
+	info.cb = mdss_dba_utils_dba_cb;
+	info.cb_data = udata;
+
+	/* register client with DBA and get device's ops*/
+	if (IS_ENABLED(CONFIG_MSM_DBA)) {
+		udata->dba_data = msm_dba_register_client(&info, &udata->ops);
+		if (IS_ERR_OR_NULL(udata->dba_data)) {
+			pr_err("ds not configured\n");
+			ret = PTR_ERR(udata->dba_data);
+			goto error;
+		}
+	} else {
+		pr_err("DBA not enabled\n");
+		ret = -ENODEV;
+		goto error;
+	}
+
+	/* create sysfs nodes for other modules to intract with utils */
+	ret = mdss_dba_utils_sysfs_create(uid->kobj);
+	if (ret) {
+		pr_err("sysfs creation failed\n");
+		goto error;
+	}
+
+	/* keep init data for future use */
+	udata->kobj = uid->kobj;
+	udata->pinfo = uid->pinfo;
+
+	/* Initialize EDID feature */
+	edid_init_data.kobj = uid->kobj;
+	edid_init_data.ds_data.ds_registered = true;
+	edid_init_data.ds_data.ds_max_clk = MSM_DBA_MAX_PCLK;
+	edid_init_data.max_pclk_khz = MSM_DBA_MAX_PCLK;
+
+	/* register with edid module for parsing edid buffer */
+	udata->edid_data = hdmi_edid_init(&edid_init_data);
+	if (!udata->edid_data) {
+		pr_err("edid parser init failed\n");
+		ret = -ENODEV;
+		goto error;
+	}
+
+	/* update edid data to retrieve it back in edid parser */
+	if (uid->pinfo) {
+		uid->pinfo->edid_data = udata->edid_data;
+		/* Initialize to default resolution */
+		hdmi_edid_set_video_resolution(uid->pinfo->edid_data,
+					DEFAULT_VIDEO_RESOLUTION, true);
+	}
+
+	/* get edid buffer from edid parser */
+	udata->edid_buf = edid_init_data.buf;
+	udata->edid_buf_size = edid_init_data.buf_size;
+
+	/* Initialize cec abstract layer and get callbacks */
+	udata->cops.send_msg = mdss_dba_utils_send_cec_msg;
+	udata->cops.enable   = mdss_dba_utils_cec_enable;
+	udata->cops.data     = udata;
+
+	/* initialize cec abstraction module */
+	cec_abst_init_data.kobj = uid->kobj;
+	cec_abst_init_data.ops  = &udata->cops;
+	cec_abst_init_data.cbs  = &udata->ccbs;
+
+	udata->cec_abst_data = cec_abstract_init(&cec_abst_init_data);
+	if (IS_ERR_OR_NULL(udata->cec_abst_data)) {
+		pr_err("error initializing cec abstract module\n");
+		ret = PTR_ERR(cec_abst_data);
+		goto error;
+	}
+
+	/* get the timing data for the adv chip */
+	if (udata->ops.get_supp_timing_info)
+		udata->timing_data = udata->ops.get_supp_timing_info();
+	else
+		udata->timing_data = NULL;
+
+	/* update cec data to retrieve it back in cec abstract module */
+	if (uid->pinfo) {
+		uid->pinfo->is_cec_supported = true;
+		uid->pinfo->cec_data = udata->cec_abst_data;
+
+		/*
+		 * TODO: Currently there is no support from HAL to send
+		 * HPD events to driver for usecase where bridge chip
+		 * is used as primary panel. Once support is added remove
+		 * this explicit calls to bridge chip driver.
+		 */
+		if (!uid->pinfo->is_pluggable) {
+			if (udata->ops.power_on && !(uid->cont_splash_enabled))
+				udata->ops.power_on(udata->dba_data, true, 0);
+			if (udata->ops.check_hpd)
+				udata->ops.check_hpd(udata->dba_data, 0);
+		} else {
+			/* register display and audio switch devices */
+			ret = mdss_dba_utils_init_switch_dev(udata,
+				uid->fb_node);
+			if (ret) {
+				pr_err("switch dev registration failed\n");
+				goto error;
+			}
+		}
+	}
+
+	return udata;
+
+error:
+	mdss_dba_utils_deinit(udata);
+	return ERR_PTR(ret);
+}
+
+/**
+ * mdss_dba_utils_deinit() - Allow clients to de-register with DBA utils
+ * @data: DBA utils data that was allocated during registration.
+ *
+ * This API will release all the resources allocated during registration
+ * and delete the DBA utils instance.
+ */
+void mdss_dba_utils_deinit(void *data)
+{
+	struct mdss_dba_utils_data *udata = data;
+
+	if (!udata) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	if (!IS_ERR_OR_NULL(udata->cec_abst_data))
+		cec_abstract_deinit(udata->cec_abst_data);
+
+	if (udata->edid_data)
+		hdmi_edid_deinit(udata->edid_data);
+
+	if (udata->pinfo) {
+		udata->pinfo->edid_data = NULL;
+		udata->pinfo->is_cec_supported = false;
+	}
+
+	if (udata->audio_switch_registered)
+		switch_dev_unregister(&udata->sdev_audio);
+
+	if (udata->display_switch_registered)
+		switch_dev_unregister(&udata->sdev_display);
+
+	if (udata->kobj)
+		mdss_dba_utils_sysfs_remove(udata->kobj);
+
+	if (IS_ENABLED(CONFIG_MSM_DBA)) {
+		if (!IS_ERR_OR_NULL(udata->dba_data))
+			msm_dba_deregister_client(udata->dba_data);
+	}
+
+	kfree(udata);
+}
diff --git a/drivers/video/fbdev/msm/mdss_dba_utils.h b/drivers/video/fbdev/msm/mdss_dba_utils.h
new file mode 100644
index 0000000..be18d2f
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dba_utils.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_DBA_UTILS__
+#define __MDSS_DBA_UTILS__
+
+#include <linux/types.h>
+
+#include "mdss_panel.h"
+
+/**
+ * struct mdss_dba_utils_init_data - Init data for registering with DBA utils.
+ * @kobj: An instance of Kobject for sysfs creation
+ * @instance_id: Instance ID of device registered with DBA
+ * @chip_name: Name of the device registered with DBA
+ * @client_name: Name of the client registering with DBA
+ * @pinfo: Detailed panel information
+ * @cont_splash_enabled: Flag to check if cont splash was enabled on bridge
+ *
+ * This structure's instance is needed to be passed as parameter
+ * to register API to let the DBA utils module configure and
+ * allocate an instance of DBA utils for the client.
+ */
+struct mdss_dba_utils_init_data {
+	struct kobject *kobj;
+	u32 instance_id;
+	u32 fb_node;
+	char *chip_name;
+	char *client_name;
+	struct mdss_panel_info *pinfo;
+	bool cont_splash_enabled;
+};
+
+int mdss_dba_utils_video_on(void *data, struct mdss_panel_info *pinfo);
+int mdss_dba_utils_video_off(void *data);
+void mdss_dba_utils_hdcp_enable(void *data, bool enable);
+
+void *mdss_dba_utils_init(struct mdss_dba_utils_init_data *init_data);
+void mdss_dba_utils_deinit(void *data);
+void mdss_dba_update_lane_cfg(struct mdss_panel_info *pinfo);
+#endif /* __MDSS_DBA_UTILS__ */
diff --git a/drivers/video/fbdev/msm/mdss_debug.c b/drivers/video/fbdev/msm/mdss_debug.c
new file mode 100644
index 0000000..19335772
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_debug.c
@@ -0,0 +1,1810 @@
+/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_hwio.h"
+#include "mdss_debug.h"
+#include "mdss_dsi.h"
+
+#define DEFAULT_BASE_REG_CNT 0x100
+#define GROUP_BYTES 4
+#define ROW_BYTES 16
+#define MAX_VSYNC_COUNT 0xFFFFFFF
+
+#define DEFAULT_READ_PANEL_POWER_MODE_REG 0x0A
+#define PANEL_REG_ADDR_LEN 8
+#define PANEL_REG_FORMAT_LEN 5
+#define PANEL_TX_MAX_BUF 256
+#define PANEL_CMD_MIN_TX_COUNT 2
+#define PANEL_DATA_NODE_LEN 80
+/* MDP3 HW Version */
+#define MDP_CORE_HW_VERSION 0x03050306
+
+/* Hex number + whitespace */
+#define NEXT_VALUE_OFFSET 3
+
+#define INVALID_XIN_ID     0xFF
+
+static DEFINE_MUTEX(mdss_debug_lock);
+
+static char panel_reg[2] = {DEFAULT_READ_PANEL_POWER_MODE_REG, 0x00};
+
+static int panel_debug_base_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static int panel_debug_base_release(struct inode *inode, struct file *file)
+{
+	struct mdss_debug_base *dbg = file->private_data;
+
+	mutex_lock(&mdss_debug_lock);
+	if (dbg && dbg->buf) {
+		kfree(dbg->buf);
+		dbg->buf_len = 0;
+		dbg->buf = NULL;
+	}
+	mutex_unlock(&mdss_debug_lock);
+	return 0;
+}
+
+static ssize_t panel_debug_base_offset_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct mdss_debug_base *dbg = file->private_data;
+	u32 off = 0;
+	u32 cnt = DEFAULT_BASE_REG_CNT;
+	char buf[PANEL_TX_MAX_BUF] = {0x0};
+
+	if (!dbg)
+		return -ENODEV;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (sscanf(buf, "%x %u", &off, &cnt) != 2)
+		return -EFAULT;
+
+	if (off > dbg->max_offset)
+		return -EINVAL;
+
+	if (cnt > (dbg->max_offset - off))
+		cnt = dbg->max_offset - off;
+
+	mutex_lock(&mdss_debug_lock);
+	dbg->off = off;
+	dbg->cnt = cnt;
+	mutex_unlock(&mdss_debug_lock);
+
+	pr_debug("offset=%x cnt=%d\n", off, cnt);
+
+	return count;
+}
+
+static ssize_t panel_debug_base_offset_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct mdss_debug_base *dbg = file->private_data;
+	int len = 0;
+	char buf[PANEL_TX_MAX_BUF] = {0x0};
+
+	if (!dbg)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	mutex_lock(&mdss_debug_lock);
+	len = snprintf(buf, sizeof(buf), "0x%02zx %zx\n", dbg->off, dbg->cnt);
+	if (len < 0 || len >= sizeof(buf)) {
+		mutex_unlock(&mdss_debug_lock);
+		return 0;
+	}
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) {
+		mutex_unlock(&mdss_debug_lock);
+		return -EFAULT;
+	}
+
+	*ppos += len;	/* increase offset */
+
+	mutex_unlock(&mdss_debug_lock);
+	return len;
+}
+
+static ssize_t panel_debug_base_reg_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct mdss_debug_base *dbg = file->private_data;
+	char buf[PANEL_TX_MAX_BUF] = {0x0};
+	char reg[PANEL_TX_MAX_BUF] = {0x0};
+	u32 len = 0, value = 0;
+	char *bufp;
+
+	struct mdss_data_type *mdata = mdss_res;
+	struct mdss_mdp_ctl *ctl = mdata->ctl_off + 0;
+	struct mdss_panel_data *panel_data = NULL;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	struct dsi_cmd_desc dsi_write_cmd = {
+		{0/*data type*/, 1, 0, 0, 0, 0/* len */}, reg};
+	struct dcs_cmd_req cmdreq;
+
+	if (!dbg || !mdata)
+		return -ENODEV;
+
+	/* get command string from user */
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	if ((mdata->mdp_rev <= MDSS_MDP_HW_REV_105) ||
+			(mdata->mdp_rev == MDP_CORE_HW_VERSION))
+		panel_data = mdss_res->pdata;
+	else
+		panel_data = ctl->panel_data;
+
+	ctrl_pdata = container_of(panel_data,
+		struct mdss_dsi_ctrl_pdata, panel_data);
+
+	buf[count] = 0;	/* end of string */
+
+	bufp = buf;
+	/* End of a hex value in given string */
+	bufp[NEXT_VALUE_OFFSET - 1] = 0;
+	while (kstrtouint(bufp, 16, &value) == 0) {
+		reg[len++] = value;
+		if (len >= PANEL_TX_MAX_BUF) {
+			pr_err("wrong input reg len\n");
+			return -EFAULT;
+		}
+		bufp += NEXT_VALUE_OFFSET;
+		if ((bufp >= (buf + count)) || (bufp < buf)) {
+			pr_warn("%s,buffer out-of-bounds\n", __func__);
+			break;
+		}
+		/* End of a hex value in given string */
+		if ((bufp + NEXT_VALUE_OFFSET - 1) < (buf + count))
+			bufp[NEXT_VALUE_OFFSET - 1] = 0;
+	}
+	if (len < PANEL_CMD_MIN_TX_COUNT) {
+		pr_err("wrong input reg len\n");
+		return -EFAULT;
+	}
+
+	/* put command to cmdlist */
+	dsi_write_cmd.dchdr.dtype = dbg->cmd_data_type;
+	dsi_write_cmd.dchdr.dlen = len;
+	dsi_write_cmd.payload = reg;
+
+	cmdreq.cmds = &dsi_write_cmd;
+	cmdreq.cmds_cnt = 1;
+	cmdreq.flags = CMD_REQ_COMMIT;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+
+	ctl = mdata->ctl_off + 0;
+	ctrl_pdata = container_of(ctl->panel_data,
+		struct mdss_dsi_ctrl_pdata, panel_data);
+
+	if (mdata->debug_inf.debug_enable_clock)
+		mdata->debug_inf.debug_enable_clock(1);
+
+	if (ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT)
+		mdss_dsi_cmdlist_put(ctrl_pdata, &cmdreq);
+
+	if (mdata->debug_inf.debug_enable_clock)
+		mdata->debug_inf.debug_enable_clock(0);
+
+	return count;
+}
+
+static ssize_t panel_debug_base_reg_read(struct file *file,
+			char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct mdss_debug_base *dbg = file->private_data;
+	u32 i, len = 0, reg_buf_len = 0;
+	char *panel_reg_buf, *rx_buf;
+	struct mdss_data_type *mdata = mdss_res;
+	struct mdss_mdp_ctl *ctl = mdata->ctl_off + 0;
+	struct mdss_panel_data *panel_data = NULL;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	int rc = -EFAULT;
+
+	if (!dbg)
+		return -ENODEV;
+
+	mutex_lock(&mdss_debug_lock);
+	if (!dbg->cnt) {
+		mutex_unlock(&mdss_debug_lock);
+		return 0;
+	}
+
+	if (*ppos) {
+		mutex_unlock(&mdss_debug_lock);
+		return 0;	/* the end */
+	}
+
+	/* '0x' + 2 digit + blank = 5 bytes for each number */
+	reg_buf_len = (dbg->cnt * PANEL_REG_FORMAT_LEN)
+		    + PANEL_REG_ADDR_LEN + 1;
+	rx_buf = kzalloc(dbg->cnt, GFP_KERNEL);
+	panel_reg_buf = kzalloc(reg_buf_len, GFP_KERNEL);
+
+	if (!rx_buf || !panel_reg_buf) {
+		pr_err("not enough memory to hold panel reg dump\n");
+		rc = -ENOMEM;
+		goto read_reg_fail;
+	}
+
+	if (mdata->debug_inf.debug_enable_clock)
+		mdata->debug_inf.debug_enable_clock(1);
+
+	panel_reg[0] = dbg->off;
+	if ((mdata->mdp_rev <= MDSS_MDP_HW_REV_105) ||
+			(mdata->mdp_rev == MDP_CORE_HW_VERSION))
+		panel_data = mdss_res->pdata;
+	else
+		panel_data = ctl->panel_data;
+
+	ctrl_pdata = container_of(panel_data,
+			struct mdss_dsi_ctrl_pdata, panel_data);
+
+	mdss_dsi_panel_cmd_read(ctrl_pdata, panel_reg[0],
+		panel_reg[1], NULL, rx_buf, dbg->cnt);
+
+	len = scnprintf(panel_reg_buf, reg_buf_len, "0x%02zx: ", dbg->off);
+
+	for (i = 0; (len < reg_buf_len) && (i < ctrl_pdata->rx_len); i++)
+		len += scnprintf(panel_reg_buf + len, reg_buf_len - len,
+				"0x%02x ", rx_buf[i]);
+
+	if (len)
+		panel_reg_buf[len - 1] = '\n';
+
+	if (mdata->debug_inf.debug_enable_clock)
+		mdata->debug_inf.debug_enable_clock(0);
+
+	if ((count < reg_buf_len)
+			|| (copy_to_user(user_buf, panel_reg_buf, len)))
+		goto read_reg_fail;
+
+	kfree(rx_buf);
+	kfree(panel_reg_buf);
+
+	*ppos += len;	/* increase offset */
+	mutex_unlock(&mdss_debug_lock);
+	return len;
+
+read_reg_fail:
+	kfree(rx_buf);
+	kfree(panel_reg_buf);
+	mutex_unlock(&mdss_debug_lock);
+	return rc;
+}
+
+static const struct file_operations panel_off_fops = {
+	.open = panel_debug_base_open,
+	.release = panel_debug_base_release,
+	.read = panel_debug_base_offset_read,
+	.write = panel_debug_base_offset_write,
+};
+
+static const struct file_operations panel_reg_fops = {
+	.open = panel_debug_base_open,
+	.release = panel_debug_base_release,
+	.read = panel_debug_base_reg_read,
+	.write = panel_debug_base_reg_write,
+};
+
+int panel_debug_register_base(const char *name, void __iomem *base,
+			     size_t max_offset)
+{
+	struct mdss_data_type *mdata = mdss_res;
+	struct mdss_debug_data *mdd;
+	struct mdss_debug_base *dbg;
+	struct dentry *ent_off, *ent_reg, *ent_type;
+	char dn[PANEL_DATA_NODE_LEN] = "";
+	int prefix_len = 0;
+
+	if (!mdata || !mdata->debug_inf.debug_data)
+		return -ENODEV;
+
+	mdd = mdata->debug_inf.debug_data;
+
+	dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
+	if (!dbg)
+		return -ENOMEM;
+
+	dbg->base = base;
+	dbg->max_offset = max_offset;
+	dbg->off = 0x0a;
+	dbg->cnt = 0x01;
+	dbg->cmd_data_type = DTYPE_DCS_LWRITE;
+
+	if (name)
+		prefix_len = snprintf(dn, sizeof(dn), "%s_", name);
+
+	strlcpy(dn + prefix_len, "cmd_data_type", sizeof(dn) - prefix_len);
+	ent_type = debugfs_create_x8(dn, 0644, mdd->root,
+		(u8 *)&dbg->cmd_data_type);
+
+	if (IS_ERR_OR_NULL(ent_type)) {
+		pr_err("debugfs_create_file: data_type fail\n");
+		goto type_fail;
+	}
+
+	strlcpy(dn + prefix_len, "off", sizeof(dn) - prefix_len);
+	ent_off = debugfs_create_file(dn, 0644, mdd->root,
+					dbg, &panel_off_fops);
+
+	if (IS_ERR_OR_NULL(ent_off)) {
+		pr_err("debugfs_create_file: offset fail\n");
+		goto off_fail;
+	}
+
+	strlcpy(dn + prefix_len, "reg", sizeof(dn) - prefix_len);
+	ent_reg = debugfs_create_file(dn, 0644, mdd->root,
+					dbg, &panel_reg_fops);
+	if (IS_ERR_OR_NULL(ent_reg)) {
+		pr_err("debugfs_create_file: reg fail\n");
+		goto reg_fail;
+	}
+
+	/* Initialize list to make sure check for null list will be valid */
+	INIT_LIST_HEAD(&dbg->dump_list);
+
+	list_add(&dbg->head, &mdd->base_list);
+
+	return 0;
+
+reg_fail:
+	debugfs_remove(ent_off);
+off_fail:
+	debugfs_remove(ent_type);
+type_fail:
+	kfree(dbg);
+	return -ENODEV;
+}
+
+static int mdss_debug_base_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static int mdss_debug_base_release(struct inode *inode, struct file *file)
+{
+	struct mdss_debug_base *dbg = file->private_data;
+
+	mutex_lock(&mdss_debug_lock);
+	if (dbg && dbg->buf) {
+		kfree(dbg->buf);
+		dbg->buf_len = 0;
+		dbg->buf = NULL;
+	}
+	mutex_unlock(&mdss_debug_lock);
+	return 0;
+}
+
+static ssize_t mdss_debug_base_offset_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct mdss_debug_base *dbg = file->private_data;
+	u32 off = 0;
+	u32 cnt = DEFAULT_BASE_REG_CNT;
+	char buf[24];
+
+	if (!dbg)
+		return -ENODEV;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (off % sizeof(u32))
+		return -EINVAL;
+
+	if (sscanf(buf, "%5x %x", &off, &cnt) != 2)
+		return -EFAULT;
+
+	if (off > dbg->max_offset)
+		return -EINVAL;
+
+	if (cnt > (dbg->max_offset - off))
+		cnt = dbg->max_offset - off;
+
+	mutex_lock(&mdss_debug_lock);
+	dbg->off = off;
+	dbg->cnt = cnt;
+	mutex_unlock(&mdss_debug_lock);
+
+	pr_debug("offset=%x cnt=%x\n", off, cnt);
+
+	return count;
+}
+
+static ssize_t mdss_debug_base_offset_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct mdss_debug_base *dbg = file->private_data;
+	int len = 0;
+	char buf[24] = {'\0'};
+
+	if (!dbg)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	mutex_lock(&mdss_debug_lock);
+	len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
+	if (len < 0 || len >= sizeof(buf)) {
+		mutex_unlock(&mdss_debug_lock);
+		return 0;
+	}
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) {
+		mutex_unlock(&mdss_debug_lock);
+		return -EFAULT;
+	}
+
+	*ppos += len;	/* increase offset */
+
+	mutex_unlock(&mdss_debug_lock);
+	return len;
+}
+
+static ssize_t mdss_debug_base_reg_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct mdss_debug_base *dbg = file->private_data;
+	struct mdss_data_type *mdata = mdss_res;
+	size_t off;
+	u32 data, cnt;
+	char buf[24];
+
+	if (!dbg || !mdata)
+		return -ENODEV;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	cnt = sscanf(buf, "%zx %x", &off, &data);
+
+	if (cnt < 2)
+		return -EFAULT;
+
+	if (off % sizeof(u32))
+		return -EFAULT;
+
+	if (off >= dbg->max_offset)
+		return -EFAULT;
+
+	if (mdata->debug_inf.debug_enable_clock)
+		mdata->debug_inf.debug_enable_clock(1);
+
+	writel_relaxed(data, dbg->base + off);
+
+	if (mdata->debug_inf.debug_enable_clock)
+		mdata->debug_inf.debug_enable_clock(0);
+
+	pr_debug("addr=%zx data=%x\n", off, data);
+
+	return count;
+}
+
+static ssize_t mdss_debug_base_reg_read(struct file *file,
+			char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct mdss_debug_base *dbg = file->private_data;
+	struct mdss_data_type *mdata = mdss_res;
+	size_t len;
+
+	if (!dbg || !mdata) {
+		pr_err("invalid handle\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&mdss_debug_lock);
+
+	if (!dbg->buf) {
+		char dump_buf[64];
+		char *ptr;
+		int cnt, tot;
+
+		dbg->buf_len = sizeof(dump_buf) *
+			DIV_ROUND_UP(dbg->cnt, ROW_BYTES);
+		dbg->buf = kzalloc(dbg->buf_len, GFP_KERNEL);
+
+		if (!dbg->buf) {
+			mutex_unlock(&mdss_debug_lock);
+			return -ENOMEM;
+		}
+
+		if (dbg->off % sizeof(u32))
+			return -EFAULT;
+
+		ptr = dbg->base + dbg->off;
+		tot = 0;
+
+		if (mdata->debug_inf.debug_enable_clock)
+			mdata->debug_inf.debug_enable_clock(1);
+
+		for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) {
+			hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
+					   ROW_BYTES, GROUP_BYTES, dump_buf,
+					   sizeof(dump_buf), false);
+			len = scnprintf(dbg->buf + tot, dbg->buf_len - tot,
+					"0x%08x: %s\n",
+					((int) (unsigned long) ptr) -
+					((int) (unsigned long) dbg->base),
+					dump_buf);
+
+			ptr += ROW_BYTES;
+			tot += len;
+			if (tot >= dbg->buf_len)
+				break;
+		}
+		if (mdata->debug_inf.debug_enable_clock)
+			mdata->debug_inf.debug_enable_clock(0);
+
+		dbg->buf_len = tot;
+	}
+
+	if (*ppos >= dbg->buf_len) {
+		mutex_unlock(&mdss_debug_lock);
+		return 0; /* done reading */
+	}
+
+	len = min(count, dbg->buf_len - (size_t) *ppos);
+	if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
+		pr_err("failed to copy to user\n");
+		mutex_unlock(&mdss_debug_lock);
+		return -EFAULT;
+	}
+
+	*ppos += len; /* increase offset */
+
+	mutex_unlock(&mdss_debug_lock);
+	return len;
+}
+
+static const struct file_operations mdss_off_fops = {
+	.open = mdss_debug_base_open,
+	.release = mdss_debug_base_release,
+	.read = mdss_debug_base_offset_read,
+	.write = mdss_debug_base_offset_write,
+};
+
+static const struct file_operations mdss_reg_fops = {
+	.open = mdss_debug_base_open,
+	.release = mdss_debug_base_release,
+	.read = mdss_debug_base_reg_read,
+	.write = mdss_debug_base_reg_write,
+};
+
+int mdss_debug_register_base(const char *name, void __iomem *base,
+	size_t max_offset, struct mdss_debug_base **dbg_blk)
+{
+	struct mdss_data_type *mdata = mdss_res;
+	struct mdss_debug_data *mdd;
+	struct mdss_debug_base *dbg;
+	struct dentry *ent_off, *ent_reg;
+	char dn[80] = "";
+	int prefix_len = 0;
+
+	if (dbg_blk)
+		(*dbg_blk) = NULL;
+
+	if (!mdata || !mdata->debug_inf.debug_data)
+		return -ENODEV;
+
+	mdd = mdata->debug_inf.debug_data;
+
+	dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
+	if (!dbg)
+		return -ENOMEM;
+
+	if (name)
+		strlcpy(dbg->name, name, sizeof(dbg->name));
+	dbg->base = base;
+	dbg->max_offset = max_offset;
+	dbg->off = 0;
+	dbg->cnt = DEFAULT_BASE_REG_CNT;
+	dbg->reg_dump = NULL;
+
+	if (name && strcmp(name, "mdp"))
+		prefix_len = snprintf(dn, sizeof(dn), "%s_", name);
+
+	strlcpy(dn + prefix_len, "off", sizeof(dn) - prefix_len);
+	ent_off = debugfs_create_file(dn, 0644, mdd->root, dbg, &mdss_off_fops);
+	if (IS_ERR_OR_NULL(ent_off)) {
+		pr_err("debugfs_create_file: offset fail\n");
+		goto off_fail;
+	}
+
+	strlcpy(dn + prefix_len, "reg", sizeof(dn) - prefix_len);
+	ent_reg = debugfs_create_file(dn, 0644, mdd->root, dbg, &mdss_reg_fops);
+	if (IS_ERR_OR_NULL(ent_reg)) {
+		pr_err("debugfs_create_file: reg fail\n");
+		goto reg_fail;
+	}
+
+	/* Initialize list to make sure check for null list will be valid */
+	INIT_LIST_HEAD(&dbg->dump_list);
+
+	list_add(&dbg->head, &mdd->base_list);
+
+	if (dbg_blk)
+		(*dbg_blk) = dbg;
+
+	return 0;
+reg_fail:
+	debugfs_remove(ent_off);
+off_fail:
+	kfree(dbg);
+	return -ENODEV;
+}
+
+static void parse_dump_range_name(struct device_node *node,
+	int total_names, int index, char *range_name, u32 range_size,
+	const char *name_prop)
+{
+	int rc = 0;
+	const char *st = NULL;
+
+	if ((total_names > 0) && (index < total_names)) {
+		rc = of_property_read_string_index(node,
+			name_prop, index, &st);
+		if (rc) {
+			pr_err("error reading name. index=%d, rc=%d\n",
+				index, rc);
+			goto error;
+		}
+		snprintf(range_name, range_size, "%s", st);
+		return;
+	}
+
+error:
+	snprintf(range_name, range_size, "%s", "<no named range>");
+}
+
+static int parse_dt_xlog_dump_list(const u32 *arr, int count,
+	struct list_head *xlog_dump_list, struct platform_device *pdev,
+	const char *name_prop, const char *xin_prop)
+{
+	struct range_dump_node *xlog_node;
+	u32 len;
+	int i, total_names, total_xin_ids, rc;
+	u32 *offsets = NULL;
+
+	/* Get the property with the name of the ranges */
+	total_names = of_property_count_strings(pdev->dev.of_node,
+		name_prop);
+	if (total_names < 0) {
+		pr_warn("dump names not found. rc=%d\n", total_names);
+		total_names = 0;
+	}
+
+	of_find_property(pdev->dev.of_node, xin_prop, &total_xin_ids);
+	if (total_xin_ids > 0) {
+		total_xin_ids /= sizeof(u32);
+		offsets = kcalloc(total_xin_ids, sizeof(u32), GFP_KERNEL);
+		if (offsets) {
+			rc = of_property_read_u32_array(pdev->dev.of_node,
+				xin_prop, offsets, total_xin_ids);
+			if (rc)
+				total_xin_ids = 0;
+		} else {
+			total_xin_ids = 0;
+		}
+	} else {
+		total_xin_ids = 0;
+	}
+
+	for (i = 0, len = count * 2; i < len; i += 2) {
+		xlog_node = kzalloc(sizeof(*xlog_node), GFP_KERNEL);
+		if (!xlog_node)
+			return -ENOMEM;
+
+		xlog_node->offset.start = be32_to_cpu(arr[i]);
+		xlog_node->offset.end = be32_to_cpu(arr[i + 1]);
+
+		parse_dump_range_name(pdev->dev.of_node, total_names, i/2,
+			xlog_node->range_name,
+			ARRAY_SIZE(xlog_node->range_name), name_prop);
+
+		if ((i / 2) < total_xin_ids)
+			xlog_node->xin_id = offsets[i / 2];
+		else
+			xlog_node->xin_id = INVALID_XIN_ID;
+
+		list_add_tail(&xlog_node->head, xlog_dump_list);
+	}
+
+	kfree(offsets);
+	return 0;
+}
+
+void mdss_debug_register_dump_range(struct platform_device *pdev,
+	struct mdss_debug_base *blk_base, const char *ranges_prop,
+	const char *name_prop, const char *xin_prop)
+{
+	int mdp_len;
+	const u32 *mdp_arr;
+
+	if (!blk_base || !ranges_prop || !name_prop)
+		return;
+
+	mdp_arr = of_get_property(pdev->dev.of_node, ranges_prop,
+			&mdp_len);
+	if (!mdp_arr) {
+		pr_warn("No xlog range dump found, continue\n");
+		mdp_len = 0;
+	} else {
+		/* 2 is the number of entries per row to calculate the rows */
+		mdp_len /= 2 * sizeof(u32);
+		parse_dt_xlog_dump_list(mdp_arr, mdp_len, &blk_base->dump_list,
+			pdev, name_prop, xin_prop);
+	}
+}
+
+static ssize_t mdss_debug_factor_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct mult_factor *factor  = file->private_data;
+	u32 numer;
+	u32 denom;
+	char buf[32];
+
+	if (!factor)
+		return -ENODEV;
+
+	numer = factor->numer;
+	denom = factor->denom;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (strnchr(buf, count, '/')) {
+		/* Parsing buf as fraction */
+		if (sscanf(buf, "%u/%u", &numer, &denom) != 2)
+			return -EFAULT;
+	} else {
+		/* Parsing buf as percentage */
+		if (kstrtouint(buf, 0, &numer))
+			return -EFAULT;
+		denom = 100;
+	}
+
+	if (numer && denom) {
+		factor->numer = numer;
+		factor->denom = denom;
+	}
+
+	pr_debug("numer=%d  denom=%d\n", numer, denom);
+
+	return count;
+}
+
+static ssize_t mdss_debug_factor_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct mult_factor *factor = file->private_data;
+	int len = 0;
+	char buf[32] = {'\0'};
+
+	if (!factor)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	len = snprintf(buf, sizeof(buf), "%d/%d\n",
+			factor->numer, factor->denom);
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;	/* increase offset */
+
+	return len;
+}
+
+static const struct file_operations mdss_factor_fops = {
+	.open = simple_open,
+	.read = mdss_debug_factor_read,
+	.write = mdss_debug_factor_write,
+};
+
+static ssize_t mdss_debug_perf_mode_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct mdss_perf_tune *perf_tune = file->private_data;
+	struct mdss_data_type *mdata = mdss_res;
+	int perf_mode = 0;
+	char buf[10];
+
+	if (!perf_tune)
+		return -EFAULT;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (kstrtoint(buf, "%d", &perf_mode) != 1)
+		return -EFAULT;
+
+	if (perf_mode) {
+		/* run the driver with max clk and BW vote */
+		mdata->perf_tune.min_mdp_clk = mdata->max_mdp_clk_rate;
+		mdata->perf_tune.min_bus_vote = (u64)mdata->max_bw_high*1000;
+	} else {
+		/* reset the perf tune params to 0 */
+		mdata->perf_tune.min_mdp_clk = 0;
+		mdata->perf_tune.min_bus_vote = 0;
+	}
+	return count;
+}
+
+static ssize_t mdss_debug_perf_mode_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct mdss_perf_tune *perf_tune = file->private_data;
+	int len = 0;
+	char buf[40] = {'\0'};
+
+	if (!perf_tune)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	len = snprintf(buf, sizeof(buf), "min_mdp_clk %lu min_bus_vote %llu\n",
+	perf_tune->min_mdp_clk, perf_tune->min_bus_vote);
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;   /* increase offset */
+
+	return len;
+}
+
+
+static const struct file_operations mdss_perf_mode_fops = {
+	.open = simple_open,
+	.read = mdss_debug_perf_mode_read,
+	.write = mdss_debug_perf_mode_write,
+};
+
+static ssize_t mdss_debug_perf_panic_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct mdss_data_type *mdata = file->private_data;
+	int len = 0;
+	char buf[40] = {'\0'};
+
+	if (!mdata)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0; /* the end */
+
+	len = snprintf(buf, sizeof(buf), "%d\n",
+		!mdata->has_panic_ctrl);
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;   /* increase offset */
+
+	return len;
+}
+
+static int mdss_debug_set_panic_signal(struct mdss_mdp_pipe *pipe_pool,
+	u32 pool_size, struct mdss_data_type *mdata, bool enable)
+{
+	int i, cnt = 0;
+	struct mdss_mdp_pipe *pipe;
+
+	for (i = 0; i < pool_size; i++) {
+		pipe = pipe_pool + i;
+		if (pipe && (atomic_read(&pipe->kref.refcount) != 0) &&
+			mdss_mdp_panic_signal_support_mode(mdata)) {
+			mdss_mdp_pipe_panic_signal_ctrl(pipe, enable);
+			pr_debug("pnum:%d count:%d img:%dx%d ",
+				pipe->num, pipe->play_cnt, pipe->img_width,
+				pipe->img_height);
+			pr_cont("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+				pipe->src.x, pipe->src.y, pipe->src.w,
+				pipe->src.h, pipe->dst.x, pipe->dst.y,
+				pipe->dst.w, pipe->dst.h);
+			cnt++;
+		} else if (pipe) {
+			pr_debug("Inactive pipe num:%d supported:%d\n",
+			       atomic_read(&pipe->kref.refcount),
+			       mdss_mdp_panic_signal_support_mode(mdata));
+		}
+	}
+	return cnt;
+}
+
+static void mdss_debug_set_panic_state(struct mdss_data_type *mdata,
+	bool enable)
+{
+	pr_debug("VIG:\n");
+	if (!mdss_debug_set_panic_signal(mdata->vig_pipes, mdata->nvig_pipes,
+		mdata, enable))
+		pr_debug("no active pipes found\n");
+	pr_debug("RGB:\n");
+	if (!mdss_debug_set_panic_signal(mdata->rgb_pipes, mdata->nrgb_pipes,
+		mdata, enable))
+		pr_debug("no active pipes found\n");
+	pr_debug("DMA:\n");
+	if (!mdss_debug_set_panic_signal(mdata->vig_pipes, mdata->ndma_pipes,
+		mdata, enable))
+		pr_debug("no active pipes found\n");
+}
+
+static ssize_t mdss_debug_perf_panic_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct mdss_data_type *mdata = file->private_data;
+	int disable_panic;
+	char buf[10];
+
+	if (!mdata)
+		return -EFAULT;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (kstrtoint(buf, "%d", &disable_panic) != 1)
+		return -EFAULT;
+
+	if (disable_panic) {
+		/* Disable panic signal for all active pipes */
+		pr_debug("Disabling panic:\n");
+		mdss_debug_set_panic_state(mdata, false);
+		mdata->has_panic_ctrl = false;
+	} else {
+		/* Enable panic signal for all active pipes */
+		pr_debug("Enabling panic:\n");
+		mdata->has_panic_ctrl = true;
+		mdss_debug_set_panic_state(mdata, true);
+	}
+
+	return count;
+}
+
+static const struct file_operations mdss_perf_panic_enable = {
+	.open = simple_open,
+	.read = mdss_debug_perf_panic_read,
+	.write = mdss_debug_perf_panic_write,
+};
+
+static int mdss_debugfs_cleanup(struct mdss_debug_data *mdd)
+{
+	struct mdss_debug_base *base, *tmp;
+
+	if (!mdd)
+		return 0;
+
+	list_for_each_entry_safe(base, tmp, &mdd->base_list, head) {
+		list_del(&base->head);
+		kfree(base);
+	}
+
+	if (mdd)
+		debugfs_remove_recursive(mdd->root);
+
+	kfree(mdd);
+
+	return 0;
+}
+
+static ssize_t mdss_debug_perf_bw_limit_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct mdss_data_type *mdata = file->private_data;
+	struct mdss_max_bw_settings *temp_settings;
+	int len = 0, i;
+	char buf[256];
+
+	if (!mdata)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	pr_debug("mdata->max_bw_settings_cnt = %d\n",
+			mdata->max_bw_settings_cnt);
+
+	temp_settings = mdata->max_bw_settings;
+	for (i = 0; i < mdata->max_bw_settings_cnt; i++) {
+		len += snprintf(buf + len, sizeof(buf), "%d %d\n",
+				temp_settings->mdss_max_bw_mode,
+					temp_settings->mdss_max_bw_val);
+		temp_settings++;
+	}
+
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;	/* increase offset */
+
+	return len;
+}
+
+static ssize_t mdss_debug_perf_bw_limit_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct mdss_data_type *mdata = file->private_data;
+	char buf[32];
+	u32 mode = 0, val = 0;
+	u32 cnt;
+	struct mdss_max_bw_settings *temp_settings;
+
+	if (!mdata)
+		return -ENODEV;
+
+	cnt = mdata->max_bw_settings_cnt;
+	temp_settings = mdata->max_bw_settings;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (strnchr(buf, count, ' ')) {
+		/* Parsing buf */
+		if (sscanf(buf, "%u %u", &mode, &val) != 2)
+			return -EFAULT;
+	}
+
+	while (cnt--) {
+		if (mode == temp_settings->mdss_max_bw_mode) {
+			temp_settings->mdss_max_bw_val = val;
+			break;
+		}
+		temp_settings++;
+
+	}
+
+	if (cnt == 0)
+		pr_err("Input mode is invalid\n");
+
+	return count;
+}
+
+static const struct file_operations mdss_perf_bw_limit_fops = {
+	.open = simple_open,
+	.read = mdss_debug_perf_bw_limit_read,
+	.write = mdss_debug_perf_bw_limit_write,
+};
+
+static int mdss_debugfs_perf_init(struct mdss_debug_data *mdd,
+			struct mdss_data_type *mdata) {
+
+	debugfs_create_u32("min_mdp_clk", 0644, mdd->perf,
+		(u32 *)&mdata->perf_tune.min_mdp_clk);
+
+	debugfs_create_u64("min_bus_vote", 0644, mdd->perf,
+		(u64 *)&mdata->perf_tune.min_bus_vote);
+
+	debugfs_create_u32("disable_prefill", 0644, mdd->perf,
+		(u32 *)&mdata->disable_prefill);
+
+	debugfs_create_file("disable_panic", 0644, mdd->perf,
+		(struct mdss_data_type *)mdata, &mdss_perf_panic_enable);
+
+	debugfs_create_bool("enable_bw_release", 0644, mdd->perf,
+		(u32 *)&mdata->enable_bw_release);
+
+	debugfs_create_bool("enable_rotator_bw_release", 0644, mdd->perf,
+		(u32 *)&mdata->enable_rotator_bw_release);
+
+	debugfs_create_file("ab_factor", 0644, mdd->perf,
+		&mdata->ab_factor, &mdss_factor_fops);
+
+	debugfs_create_file("ib_factor", 0644, mdd->perf,
+		&mdata->ib_factor, &mdss_factor_fops);
+
+	debugfs_create_file("ib_factor_overlap", 0644, mdd->perf,
+		&mdata->ib_factor_overlap, &mdss_factor_fops);
+
+	debugfs_create_file("clk_factor", 0644, mdd->perf,
+		&mdata->clk_factor, &mdss_factor_fops);
+
+	debugfs_create_u32("threshold_low", 0644, mdd->perf,
+		(u32 *)&mdata->max_bw_low);
+
+	debugfs_create_u32("threshold_high", 0644, mdd->perf,
+		(u32 *)&mdata->max_bw_high);
+
+	debugfs_create_u32("threshold_pipe", 0644, mdd->perf,
+		(u32 *)&mdata->max_bw_per_pipe);
+
+	debugfs_create_file("perf_mode", 0644, mdd->perf,
+		(u32 *)&mdata->perf_tune, &mdss_perf_mode_fops);
+
+	/* Initialize percentage to 0% */
+	mdata->latency_buff_per = 0;
+	debugfs_create_u32("latency_buff_per", 0644, mdd->perf,
+		(u32 *)&mdata->latency_buff_per);
+
+	debugfs_create_file("threshold_bw_limit", 0644, mdd->perf,
+		(struct mdss_data_type *)mdata, &mdss_perf_bw_limit_fops);
+
+	debugfs_create_u32("lines_before_active", 0644, mdd->perf,
+		(u32 *)&mdata->lines_before_active);
+
+	return 0;
+}
+
+int mdss_debugfs_init(struct mdss_data_type *mdata)
+{
+	struct mdss_debug_data *mdd;
+
+	if (mdata->debug_inf.debug_data) {
+		pr_warn("mdss debugfs already initialized\n");
+		return -EBUSY;
+	}
+
+	mdd = kzalloc(sizeof(*mdd), GFP_KERNEL);
+	if (!mdd)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&mdd->base_list);
+
+	mdd->root = debugfs_create_dir("mdp", NULL);
+	if (IS_ERR_OR_NULL(mdd->root)) {
+		pr_err("debugfs_create_dir for mdp failed, error %ld\n",
+		       PTR_ERR(mdd->root));
+		goto err;
+	}
+
+	mdd->perf = debugfs_create_dir("perf", mdd->root);
+	if (IS_ERR_OR_NULL(mdd->perf)) {
+		pr_err("debugfs_create_dir perf fail, error %ld\n",
+			PTR_ERR(mdd->perf));
+		goto err;
+	}
+
+	mdd->bordercolor = debugfs_create_dir("bordercolor", mdd->root);
+	if (IS_ERR_OR_NULL(mdd->bordercolor)) {
+		pr_err("debugfs_create_dir for bordercolor failed, error %ld\n",
+		       PTR_ERR(mdd->bordercolor));
+		goto err;
+	}
+
+	mdd->postproc = debugfs_create_dir("postproc", mdd->root);
+	if (IS_ERR_OR_NULL(mdd->postproc)) {
+		pr_err("debugfs_create_dir postproc for mdp failed, error %ld\n",
+		       PTR_ERR(mdd->postproc));
+		goto err;
+	}
+	mdss_debugfs_perf_init(mdd, mdata);
+
+	if (mdss_create_xlog_debug(mdd))
+		goto err;
+
+	if (mdss_create_frc_debug(mdd))
+		goto err;
+
+	mdata->debug_inf.debug_data = mdd;
+
+	return 0;
+
+err:
+	mdss_debugfs_cleanup(mdd);
+	return -ENODEV;
+}
+
+int mdss_debugfs_remove(struct mdss_data_type *mdata)
+{
+	struct mdss_debug_data *mdd = mdata->debug_inf.debug_data;
+
+	mdss_debugfs_cleanup(mdd);
+	mdata->debug_inf.debug_data = NULL;
+
+	return 0;
+}
+
+int vsync_count;
+static struct mdss_mdp_misr_map {
+	u32 ctrl_reg;
+	u32 value_reg;
+	u32 crc_op_mode;
+	u32 crc_index;
+	u32 last_misr;
+	bool use_ping;
+	bool is_ping_full;
+	bool is_pong_full;
+	struct mutex crc_lock;
+	u32 crc_ping[MISR_CRC_BATCH_SIZE];
+	u32 crc_pong[MISR_CRC_BATCH_SIZE];
+} mdss_mdp_misr_table[DISPLAY_MISR_MAX] = {
+	[DISPLAY_MISR_DSI0] = {
+		.ctrl_reg = MDSS_MDP_LP_MISR_CTRL_DSI0,
+		.value_reg = MDSS_MDP_LP_MISR_SIGN_DSI0,
+		.crc_op_mode = 0,
+		.crc_index = 0,
+		.last_misr = 0,
+		.use_ping = true,
+		.is_ping_full = false,
+		.is_pong_full = false,
+	},
+	[DISPLAY_MISR_DSI1] = {
+		.ctrl_reg = MDSS_MDP_LP_MISR_CTRL_DSI1,
+		.value_reg = MDSS_MDP_LP_MISR_SIGN_DSI1,
+		.crc_op_mode = 0,
+		.crc_index = 0,
+		.last_misr = 0,
+		.use_ping = true,
+		.is_ping_full = false,
+		.is_pong_full = false,
+	},
+	[DISPLAY_MISR_EDP] = {
+		.ctrl_reg = MDSS_MDP_LP_MISR_CTRL_EDP,
+		.value_reg = MDSS_MDP_LP_MISR_SIGN_EDP,
+		.crc_op_mode = 0,
+		.crc_index = 0,
+		.last_misr = 0,
+		.use_ping = true,
+		.is_ping_full = false,
+		.is_pong_full = false,
+	},
+	[DISPLAY_MISR_HDMI] = {
+		.ctrl_reg = MDSS_MDP_LP_MISR_CTRL_HDMI,
+		.value_reg = MDSS_MDP_LP_MISR_SIGN_HDMI,
+		.crc_op_mode = 0,
+		.crc_index = 0,
+		.last_misr = 0,
+		.use_ping = true,
+		.is_ping_full = false,
+		.is_pong_full = false,
+	},
+	[DISPLAY_MISR_MDP] = {
+		.ctrl_reg = MDSS_MDP_LP_MISR_CTRL_MDP,
+		.value_reg = MDSS_MDP_LP_MISR_SIGN_MDP,
+		.crc_op_mode = 0,
+		.crc_index = 0,
+		.last_misr = 0,
+		.use_ping = true,
+		.is_ping_full = false,
+		.is_pong_full = false,
+	},
+};
+
+static inline struct mdss_mdp_misr_map *mdss_misr_get_map(u32 block_id,
+		struct mdss_mdp_ctl *ctl, struct mdss_data_type *mdata,
+		bool is_video_mode)
+{
+	struct mdss_mdp_misr_map *map;
+	struct mdss_mdp_mixer *mixer;
+	char *ctrl_reg = NULL, *value_reg = NULL;
+	char *intf_base = NULL;
+
+	if (block_id > DISPLAY_MISR_HDMI && block_id != DISPLAY_MISR_MDP) {
+		pr_err("MISR Block id (%d) out of range\n", block_id);
+		return NULL;
+	}
+
+	if (mdata->mdp_rev >= MDSS_MDP_HW_REV_105) {
+		/* Use updated MDP Interface MISR Block address offset */
+		if (block_id == DISPLAY_MISR_MDP) {
+			if (ctl) {
+				mixer = mdss_mdp_mixer_get(ctl,
+					MDSS_MDP_MIXER_MUX_DEFAULT);
+
+				if (mixer) {
+					ctrl_reg = mixer->base +
+						MDSS_MDP_LAYER_MIXER_MISR_CTRL;
+					value_reg = mixer->base +
+					MDSS_MDP_LAYER_MIXER_MISR_SIGNATURE;
+				}
+			}
+		} else {
+			if (block_id <= DISPLAY_MISR_HDMI) {
+				intf_base = (char *)mdss_mdp_get_intf_base_addr(
+						mdata, block_id);
+
+				if ((block_id == DISPLAY_MISR_DSI0 ||
+				     block_id == DISPLAY_MISR_DSI1) &&
+				     !is_video_mode) {
+					ctrl_reg = intf_base +
+						MDSS_MDP_INTF_CMD_MISR_CTRL;
+					value_reg = intf_base +
+					    MDSS_MDP_INTF_CMD_MISR_SIGNATURE;
+
+					/*
+					 * extra offset required for
+					 * cmd misr in 8996
+					 */
+					if (IS_MDSS_MAJOR_MINOR_SAME(
+						  mdata->mdp_rev,
+						  MDSS_MDP_HW_REV_107)) {
+						ctrl_reg += 0x8;
+						value_reg += 0x8;
+					}
+
+				} else {
+					ctrl_reg = intf_base +
+						MDSS_MDP_INTF_MISR_CTRL;
+					value_reg = intf_base +
+						MDSS_MDP_INTF_MISR_SIGNATURE;
+				}
+			}
+			/*
+			 * For msm8916/8939, additional offset of 0x10
+			 * is required
+			 */
+			if ((mdata->mdp_rev == MDSS_MDP_HW_REV_106) ||
+				(mdata->mdp_rev == MDSS_MDP_HW_REV_108) ||
+				(mdata->mdp_rev == MDSS_MDP_HW_REV_112)) {
+				ctrl_reg += 0x10;
+				value_reg += 0x10;
+			}
+		}
+		mdss_mdp_misr_table[block_id].ctrl_reg = (u32)(ctrl_reg -
+							mdata->mdp_base);
+		mdss_mdp_misr_table[block_id].value_reg = (u32)(value_reg -
+							mdata->mdp_base);
+	}
+
+	map = mdss_mdp_misr_table + block_id;
+	if ((map->ctrl_reg == 0) || (map->value_reg == 0)) {
+		pr_err("MISR Block id (%d) config not found\n", block_id);
+		return NULL;
+	}
+
+	pr_debug("MISR Module(%d) CTRL(0x%x) SIG(0x%x) intf_base(0x%pK)\n",
+			block_id, map->ctrl_reg, map->value_reg, intf_base);
+	return map;
+}
+
+/*
+ * switch_mdp_misr_offset() - Update MDP MISR register offset for MDSS
+ * Hardware Revision 103.
+ * @map: mdss_mdp_misr_map
+ * @mdp_rev: MDSS Hardware Revision
+ * @block_id: Logical MISR Block ID
+ *
+ * Return: true when MDSS Revision is 103 else false.
+ */
+static bool switch_mdp_misr_offset(struct mdss_mdp_misr_map *map, u32 mdp_rev,
+					u32 block_id)
+{
+	bool use_mdp_up_misr = false;
+
+	if ((IS_MDSS_MAJOR_MINOR_SAME(mdp_rev, MDSS_MDP_HW_REV_103)) &&
+		(block_id == DISPLAY_MISR_MDP)) {
+		/* Use Upper pipe MISR for Layer Mixer CRC */
+		map->ctrl_reg = MDSS_MDP_UP_MISR_CTRL_MDP;
+		map->value_reg = MDSS_MDP_UP_MISR_SIGN_MDP;
+		use_mdp_up_misr = true;
+	}
+	pr_debug("MISR Module(%d) Offset of MISR_CTRL = 0x%x MISR_SIG = 0x%x\n",
+			block_id, map->ctrl_reg, map->value_reg);
+	return use_mdp_up_misr;
+}
+
+void mdss_misr_disable(struct mdss_data_type *mdata,
+			struct mdp_misr *req,
+			struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_misr_map *map;
+
+	map = mdss_misr_get_map(req->block_id, ctl, mdata,
+		ctl->is_video_mode);
+
+	if (!map)
+		return;
+
+	/* clear the map data */
+	memset(map->crc_ping, 0, sizeof(map->crc_ping));
+	memset(map->crc_pong, 0, sizeof(map->crc_pong));
+	map->crc_index = 0;
+	map->use_ping = true;
+	map->is_ping_full = false;
+	map->is_pong_full = false;
+	map->crc_op_mode = 0;
+	map->last_misr = 0;
+
+	/* disable MISR and clear the status */
+	writel_relaxed(MDSS_MDP_MISR_CTRL_STATUS_CLEAR,
+			mdata->mdp_base + map->ctrl_reg);
+
+	/* make sure status is clear */
+	wmb();
+}
+
+int mdss_misr_set(struct mdss_data_type *mdata,
+			struct mdp_misr *req,
+			struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_misr_map *map;
+	struct mdss_mdp_mixer *mixer;
+	u32 config = 0, val = 0;
+	u32 mixer_num = 0;
+	bool is_valid_wb_mixer = true;
+	bool use_mdp_up_misr = false;
+
+	if (!mdata || !req || !ctl) {
+		pr_err("Invalid input params: mdata = %pK req = %pK ctl = %pK",
+			mdata, req, ctl);
+		return -EINVAL;
+	}
+	pr_debug("req[block:%d frame:%d op_mode:%d]\n",
+		req->block_id, req->frame_count, req->crc_op_mode);
+
+	map = mdss_misr_get_map(req->block_id, ctl, mdata,
+		ctl->is_video_mode);
+	if (!map) {
+		pr_err("Invalid MISR Block=%d\n", req->block_id);
+		return -EINVAL;
+	}
+	use_mdp_up_misr = switch_mdp_misr_offset(map, mdata->mdp_rev,
+				req->block_id);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	if (req->block_id == DISPLAY_MISR_MDP) {
+		mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
+		if (!mixer) {
+			pr_err("failed to get default mixer, Block=%d\n",
+				req->block_id);
+			return -EINVAL;
+		}
+		mixer_num = mixer->num;
+		pr_debug("SET MDP MISR BLK to MDSS_MDP_LP_MISR_SEL_LMIX%d_GC\n",
+			mixer_num);
+		switch (mixer_num) {
+		case MDSS_MDP_INTF_LAYERMIXER0:
+			pr_debug("Use Layer Mixer 0 for WB CRC\n");
+			val = MDSS_MDP_LP_MISR_SEL_LMIX0_GC;
+			break;
+		case MDSS_MDP_INTF_LAYERMIXER1:
+			pr_debug("Use Layer Mixer 1 for WB CRC\n");
+			val = MDSS_MDP_LP_MISR_SEL_LMIX1_GC;
+			break;
+		case MDSS_MDP_INTF_LAYERMIXER2:
+			pr_debug("Use Layer Mixer 2 for WB CRC\n");
+			val = MDSS_MDP_LP_MISR_SEL_LMIX2_GC;
+			break;
+		default:
+			pr_err("Invalid Layer Mixer %d selected for WB CRC\n",
+				mixer_num);
+			is_valid_wb_mixer = false;
+			break;
+		}
+		if ((is_valid_wb_mixer) &&
+			(mdata->mdp_rev < MDSS_MDP_HW_REV_106)) {
+			if (use_mdp_up_misr)
+				writel_relaxed((val +
+					MDSS_MDP_UP_MISR_LMIX_SEL_OFFSET),
+					(mdata->mdp_base +
+					 MDSS_MDP_UP_MISR_SEL));
+			else
+				writel_relaxed(val,
+					(mdata->mdp_base +
+					MDSS_MDP_LP_MISR_SEL));
+		}
+	}
+	vsync_count = 0;
+	map->crc_op_mode = req->crc_op_mode;
+	config = (MDSS_MDP_MISR_CTRL_FRAME_COUNT_MASK & req->frame_count) |
+			(MDSS_MDP_MISR_CTRL_ENABLE);
+
+	writel_relaxed(MDSS_MDP_MISR_CTRL_STATUS_CLEAR,
+			mdata->mdp_base + map->ctrl_reg);
+	/* ensure clear is done */
+	wmb();
+
+	memset(map->crc_ping, 0, sizeof(map->crc_ping));
+	memset(map->crc_pong, 0, sizeof(map->crc_pong));
+	map->crc_index = 0;
+	map->use_ping = true;
+	map->is_ping_full = false;
+	map->is_pong_full = false;
+
+	if (map->crc_op_mode != MISR_OP_BM) {
+
+		writel_relaxed(config,
+				mdata->mdp_base + map->ctrl_reg);
+		pr_debug("MISR_CTRL=0x%x [base:0x%pK reg:0x%x config:0x%x]\n",
+				readl_relaxed(mdata->mdp_base + map->ctrl_reg),
+				mdata->mdp_base, map->ctrl_reg, config);
+	}
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	return 0;
+}
+
+char *get_misr_block_name(int misr_block_id)
+{
+	switch (misr_block_id) {
+	case DISPLAY_MISR_EDP: return "eDP";
+	case DISPLAY_MISR_DSI0: return "DSI_0";
+	case DISPLAY_MISR_DSI1: return "DSI_1";
+	case DISPLAY_MISR_HDMI: return "HDMI";
+	case DISPLAY_MISR_MDP: return "Writeback";
+	case DISPLAY_MISR_DSI_CMD: return "DSI_CMD";
+	default: return "???";
+	}
+}
+
+int mdss_misr_get(struct mdss_data_type *mdata,
+			struct mdp_misr *resp,
+			struct mdss_mdp_ctl *ctl,
+			bool is_video_mode)
+{
+	struct mdss_mdp_misr_map *map;
+	struct mdss_mdp_mixer *mixer;
+	u32 status;
+	int ret = -1;
+	int i;
+
+	pr_debug("req[block:%d frame:%d op_mode:%d]\n",
+		resp->block_id, resp->frame_count, resp->crc_op_mode);
+
+	map = mdss_misr_get_map(resp->block_id, ctl, mdata,
+		is_video_mode);
+	if (!map) {
+		pr_err("Invalid MISR Block=%d\n", resp->block_id);
+		return -EINVAL;
+	}
+	switch_mdp_misr_offset(map, mdata->mdp_rev, resp->block_id);
+
+	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	switch (map->crc_op_mode) {
+	case MISR_OP_SFM:
+	case MISR_OP_MFM:
+		ret = readl_poll_timeout(mdata->mdp_base + map->ctrl_reg,
+				status, status & MDSS_MDP_MISR_CTRL_STATUS,
+				MISR_POLL_SLEEP, MISR_POLL_TIMEOUT);
+		if (ret == 0) {
+			resp->crc_value[0] = readl_relaxed(mdata->mdp_base +
+				map->value_reg);
+			pr_debug("CRC %s=0x%x\n",
+				get_misr_block_name(resp->block_id),
+				resp->crc_value[0]);
+			writel_relaxed(0, mdata->mdp_base + map->ctrl_reg);
+		} else {
+			pr_debug("Get MISR TimeOut %s\n",
+				get_misr_block_name(resp->block_id));
+
+			ret = readl_poll_timeout(mdata->mdp_base +
+					map->ctrl_reg, status,
+					status & MDSS_MDP_MISR_CTRL_STATUS,
+					MISR_POLL_SLEEP, MISR_POLL_TIMEOUT);
+			if (ret == 0) {
+				resp->crc_value[0] =
+					readl_relaxed(mdata->mdp_base +
+					map->value_reg);
+				pr_debug("Retry CRC %s=0x%x\n",
+					get_misr_block_name(resp->block_id),
+					resp->crc_value[0]);
+			} else {
+				pr_err("Get MISR TimeOut %s\n",
+					get_misr_block_name(resp->block_id));
+			}
+			writel_relaxed(0, mdata->mdp_base + map->ctrl_reg);
+		}
+		break;
+	case MISR_OP_BM:
+		if (map->is_ping_full) {
+			for (i = 0; i < MISR_CRC_BATCH_SIZE; i++)
+				resp->crc_value[i] = map->crc_ping[i];
+			memset(map->crc_ping, 0, sizeof(map->crc_ping));
+			map->is_ping_full = false;
+			ret = 0;
+		} else if (map->is_pong_full) {
+			for (i = 0; i < MISR_CRC_BATCH_SIZE; i++)
+				resp->crc_value[i] = map->crc_pong[i];
+			memset(map->crc_pong, 0, sizeof(map->crc_pong));
+			map->is_pong_full = false;
+			ret = 0;
+		} else {
+			pr_debug("mdss_mdp_misr_crc_get PING BUF %s\n",
+				map->is_ping_full ? "FULL" : "EMPTRY");
+			pr_debug("mdss_mdp_misr_crc_get PONG BUF %s\n",
+				map->is_pong_full ? "FULL" : "EMPTRY");
+		}
+		resp->crc_op_mode = map->crc_op_mode;
+		break;
+	default:
+		ret = -ENOTSUP;
+		break;
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	return ret;
+}
+
+/* This function is expected to be called from interrupt context */
+void mdss_misr_crc_collect(struct mdss_data_type *mdata, int block_id,
+	bool is_video_mode)
+{
+	struct mdss_mdp_misr_map *map;
+	u32 status = 0;
+	u32 crc = 0x0BAD0BAD;
+	bool crc_stored = false;
+
+	map = mdss_misr_get_map(block_id, NULL, mdata, is_video_mode);
+	if (!map || (map->crc_op_mode != MISR_OP_BM))
+		return;
+
+	switch_mdp_misr_offset(map, mdata->mdp_rev, block_id);
+
+	status = readl_relaxed(mdata->mdp_base + map->ctrl_reg);
+
+	if (MDSS_MDP_MISR_CTRL_STATUS & status) {
+
+		crc = readl_relaxed(mdata->mdp_base + map->value_reg);
+		map->last_misr = crc; /* cache crc to get it from sysfs */
+
+		if (map->use_ping) {
+			if (map->is_ping_full) {
+				pr_err_once("PING Buffer FULL\n");
+			} else {
+				map->crc_ping[map->crc_index] = crc;
+				crc_stored = true;
+			}
+		} else {
+			if (map->is_pong_full) {
+				pr_err_once("PONG Buffer FULL\n");
+			} else {
+				map->crc_pong[map->crc_index] = crc;
+				crc_stored = true;
+			}
+		}
+
+		if (crc_stored) {
+			map->crc_index = (map->crc_index + 1);
+			if (map->crc_index == MISR_CRC_BATCH_SIZE) {
+				map->crc_index = 0;
+				if (true == map->use_ping) {
+					map->is_ping_full = true;
+					map->use_ping = false;
+				} else {
+					map->is_pong_full = true;
+					map->use_ping = true;
+				}
+				pr_debug("USE BUFF %s\n", map->use_ping ?
+					"PING" : "PONG");
+				pr_debug("mdss_misr_crc_collect PING BUF %s\n",
+					map->is_ping_full ? "FULL" : "EMPTRY");
+				pr_debug("mdss_misr_crc_collect PONG BUF %s\n",
+					map->is_pong_full ? "FULL" : "EMPTRY");
+			}
+		} else {
+			pr_err_once("CRC(%d) Not saved\n", crc);
+		}
+
+		if (mdata->mdp_rev < MDSS_MDP_HW_REV_105) {
+			writel_relaxed(MDSS_MDP_MISR_CTRL_STATUS_CLEAR,
+					mdata->mdp_base + map->ctrl_reg);
+			writel_relaxed(MISR_CRC_BATCH_CFG,
+				mdata->mdp_base + map->ctrl_reg);
+		}
+
+	} else if (status == 0) {
+
+		if (mdata->mdp_rev < MDSS_MDP_HW_REV_105)
+			writel_relaxed(MISR_CRC_BATCH_CFG,
+					mdata->mdp_base + map->ctrl_reg);
+		else
+			writel_relaxed(MISR_CRC_BATCH_CFG |
+					MDSS_MDP_LP_MISR_CTRL_FREE_RUN_MASK,
+					mdata->mdp_base + map->ctrl_reg);
+
+		pr_debug("$$ Batch CRC Start $$\n");
+	}
+
+	pr_debug("$$ Vsync Count = %d, CRC=0x%x Indx = %d$$\n",
+		vsync_count, crc, map->crc_index);
+	trace_mdp_misr_crc(block_id, vsync_count, crc);
+
+	if (vsync_count == MAX_VSYNC_COUNT) {
+		pr_debug("RESET vsync_count(%d)\n", vsync_count);
+		vsync_count = 0;
+	} else {
+		vsync_count += 1;
+	}
+
+}
+
+int mdss_dump_misr_data(char **buf, u32 size)
+{
+	struct mdss_mdp_misr_map  *dsi0_map;
+	struct mdss_mdp_misr_map  *dsi1_map;
+	struct mdss_mdp_misr_map  *hdmi_map;
+	int ret;
+
+	dsi0_map = &mdss_mdp_misr_table[DISPLAY_MISR_DSI0];
+	dsi1_map = &mdss_mdp_misr_table[DISPLAY_MISR_DSI1];
+	hdmi_map = &mdss_mdp_misr_table[DISPLAY_MISR_HDMI];
+
+	ret = scnprintf(*buf, PAGE_SIZE,
+			"\tDSI0 mode:%02d MISR:0x%08x\n"
+			"\tDSI1 mode:%02d MISR:0x%08x\n"
+			"\tHDMI mode:%02d MISR:0x%08x\n",
+			dsi0_map->crc_op_mode, dsi0_map->last_misr,
+			dsi1_map->crc_op_mode, dsi1_map->last_misr,
+			hdmi_map->crc_op_mode, hdmi_map->last_misr
+			);
+
+	return ret;
+}
diff --git a/drivers/video/fbdev/msm/mdss_debug.h b/drivers/video/fbdev/msm/mdss_debug.h
new file mode 100644
index 0000000..01d300e
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_debug.h
@@ -0,0 +1,256 @@
+/* Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_DEBUG_H
+#define MDSS_DEBUG_H
+
+#include <stdarg.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/mdss_io_util.h>
+
+#include "mdss.h"
+#include "mdss_mdp_trace.h"
+
+#define MISR_POLL_SLEEP		2000
+#define MISR_POLL_TIMEOUT	32000
+#define MISR_CRC_BATCH_CFG	0x101
+#define DATA_LIMITER (-1)
+#define XLOG_TOUT_DATA_LIMITER (NULL)
+#define XLOG_FUNC_ENTRY	0x1111
+#define XLOG_FUNC_EXIT	0x2222
+#define MDSS_REG_BLOCK_NAME_LEN (5)
+
+enum mdss_dbg_reg_dump_flag {
+	MDSS_DBG_DUMP_IN_LOG = BIT(0),
+	MDSS_DBG_DUMP_IN_MEM = BIT(1),
+};
+
+enum mdss_dbg_xlog_flag {
+	MDSS_XLOG_DEFAULT = BIT(0),
+	MDSS_XLOG_IOMMU = BIT(1),
+	MDSS_XLOG_DBG = BIT(6),
+	MDSS_XLOG_ALL = BIT(7)
+};
+
+#define TEST_MASK(id, tp)	((id << 4) | (tp << 1) | BIT(0))
+struct debug_bus {
+	u32 wr_addr;
+	u32 block_id;
+	u32 test_id;
+};
+
+struct vbif_debug_bus {
+	u32 disable_bus_addr;
+	u32 block_bus_addr;
+	u32 bit_offset;
+	u32 block_cnt;
+	u32 test_pnt_cnt;
+};
+
+#define MDSS_XLOG(...) mdss_xlog(__func__, __LINE__, MDSS_XLOG_DEFAULT, \
+		##__VA_ARGS__, DATA_LIMITER)
+
+#define MDSS_XLOG_TOUT_HANDLER(...)	\
+	mdss_xlog_tout_handler_default(false, __func__, ##__VA_ARGS__, \
+		XLOG_TOUT_DATA_LIMITER)
+
+#define MDSS_XLOG_TOUT_HANDLER_WQ(...)	\
+	mdss_xlog_tout_handler_default(true, __func__, ##__VA_ARGS__, \
+		XLOG_TOUT_DATA_LIMITER)
+
+#define MDSS_XLOG_DBG(...) mdss_xlog(__func__, __LINE__, MDSS_XLOG_DBG, \
+		##__VA_ARGS__, DATA_LIMITER)
+
+#define MDSS_XLOG_ALL(...) mdss_xlog(__func__, __LINE__, MDSS_XLOG_ALL,	\
+		##__VA_ARGS__, DATA_LIMITER)
+
+#define MDSS_XLOG_IOMMU(...) mdss_xlog(__func__, __LINE__, MDSS_XLOG_IOMMU, \
+		##__VA_ARGS__, DATA_LIMITER)
+
+#define ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
+#define ATRACE_FUNC() ATRACE_BEGIN(__func__)
+
+#define ATRACE_INT(name, value) \
+	trace_mdp_trace_counter(current->tgid, name, value)
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_FB_MSM_MDSS)
+
+#define MDSS_DEBUG_BASE_MAX 10
+
+struct mdss_debug_base {
+	struct list_head head; /* head of this node */
+	struct list_head dump_list; /* head to the list with dump ranges */
+	struct mdss_debug_data *mdd;
+	char name[80];
+	void __iomem *base;
+	size_t off;
+	size_t cnt;
+	u8 cmd_data_type;
+	size_t max_offset;
+	char *buf;
+	size_t buf_len;
+	u32 *reg_dump; /* address for the mem dump if no ranges used */
+};
+
+struct mdss_debug_data {
+	struct dentry *root;
+	struct dentry *perf;
+	struct dentry *bordercolor;
+	struct dentry *postproc;
+	struct list_head base_list;
+};
+
+struct dump_offset {
+	u32 start;
+	u32 end;
+};
+
+struct range_dump_node {
+	struct list_head head; /* head of this node */
+	u32 *reg_dump; /* address for the mem dump */
+	char range_name[40]; /* name of this range */
+	struct dump_offset offset; /* range to dump */
+	uint32_t xin_id; /* client xin id */
+};
+
+#define DEFINE_MDSS_DEBUGFS_SEQ_FOPS(__prefix)				\
+static int __prefix ## _open(struct inode *inode, struct file *file)	\
+{									\
+	return single_open(file, __prefix ## _show, inode->i_private);	\
+}									\
+static const struct file_operations __prefix ## _fops = {		\
+	.owner = THIS_MODULE,						\
+	.open = __prefix ## _open,					\
+	.release = single_release,					\
+	.read = seq_read,						\
+	.llseek = seq_lseek,						\
+}
+
+int mdss_debugfs_init(struct mdss_data_type *mdata);
+int mdss_debugfs_remove(struct mdss_data_type *mdata);
+int mdss_debug_register_base(const char *name, void __iomem *base,
+	size_t max_offset, struct mdss_debug_base **dbg_blk);
+void mdss_debug_register_dump_range(struct platform_device *pdev,
+	struct mdss_debug_base *blk_base, const char *ranges_prop,
+	const char *name_prop, const char *xin_prop);
+int panel_debug_register_base(const char *name, void __iomem *base,
+				    size_t max_offset);
+int mdss_misr_set(struct mdss_data_type *mdata,
+			struct mdp_misr *req,
+			struct mdss_mdp_ctl *ctl);
+int mdss_misr_get(struct mdss_data_type *mdata,
+			struct mdp_misr *resp,
+			struct mdss_mdp_ctl *ctl,
+			bool is_video_mode);
+void mdss_misr_disable(struct mdss_data_type *mdata,
+			struct mdp_misr *req,
+			struct mdss_mdp_ctl *ctl);
+void mdss_misr_crc_collect(struct mdss_data_type *mdata, int block_id,
+	bool is_video_mode);
+
+int mdss_create_xlog_debug(struct mdss_debug_data *mdd);
+#if defined(CONFIG_FB_MSM_MDSS_FRC_DEBUG)
+int mdss_create_frc_debug(struct mdss_debug_data *mdd);
+#else
+static inline int mdss_create_frc_debug(struct mdss_debug_data *mdd)
+	{return 0; }
+#endif
+void mdss_xlog(const char *name, int line, int flag, ...);
+void mdss_xlog_tout_handler_default(bool queue, const char *name, ...);
+u32 get_dump_range(struct dump_offset *range_node, size_t max_offset);
+void mdss_dump_reg(const char *dump_name, u32 reg_dump_flag, char *addr,
+	int len, u32 **dump_mem, bool from_isr);
+void mdss_mdp_debug_mid(u32 mid);
+#else
+struct mdss_debug_base;
+struct dump_offset;
+
+static inline int mdss_debugfs_init(struct mdss_data_type *mdata) { return 0; }
+static inline int mdss_debugfs_remove(struct mdss_data_type *mdata)
+{
+	return 0;
+}
+static inline int mdss_debug_register_base(const char *name, void __iomem *base,
+	size_t max_offset, struct mdss_debug_base **dbg_blk) { return 0; }
+static inline void mdss_debug_register_dump_range(struct platform_device *pdev,
+	struct mdss_debug_base *blk_base, const char *ranges_prop,
+	const char *name_prop, const char *xin_prop) { }
+static inline int panel_debug_register_base(const char *name,
+					void __iomem *base,
+					size_t max_offset)
+{ return 0; }
+static inline int mdss_misr_set(struct mdss_data_type *mdata,
+					struct mdp_misr *req,
+					struct mdss_mdp_ctl *ctl)
+{ return 0; }
+static inline int mdss_misr_get(struct mdss_data_type *mdata,
+					struct mdp_misr *resp,
+					struct mdss_mdp_ctl *ctl,
+					bool is_video_mode)
+{ return 0; }
+static inline void mdss_misr_disable(struct mdss_data_type *mdata,
+					struct mdp_misr *req,
+					struct mdss_mdp_ctl *ctl)
+{ return; }
+
+static inline void mdss_misr_crc_collect(struct mdss_data_type *mdata,
+					int block_id, bool is_video_mode) { }
+
+static inline int create_xlog_debug(struct mdss_data_type *mdata) { return 0; }
+static inline void mdss_xlog_dump(void) { }
+static inline void mdss_xlog(const char *name, int line, int flag, ...) { }
+
+static inline void mdss_dsi_debug_check_te(struct mdss_panel_data *pdata) { }
+static inline void mdss_xlog_tout_handler_default(bool queue,
+	const char *name, ...) { }
+u32 get_dump_range(struct dump_offset *range_node, size_t max_offset)
+	{ return 0; }
+void mdss_dump_reg(const char *dump_name, u32 reg_dump_flag, char *addr,
+	int len, u32 **dump_mem, bool from_isr) { }
+void mdss_mdp_debug_mid(u32 mid) { }
+#endif
+
+int mdss_dump_misr_data(char **buf, u32 size);
+
+static inline int mdss_debug_register_io(const char *name,
+		struct dss_io_data *io_data, struct mdss_debug_base **dbg_blk)
+{
+	return mdss_debug_register_base(name, io_data->base, io_data->len,
+		dbg_blk);
+}
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_FB_MSM_MDSS_FRC_DEBUG)
+void mdss_debug_frc_add_vsync_sample(struct mdss_mdp_ctl *ctl,
+	ktime_t vsync_time);
+void mdss_debug_frc_add_kickoff_sample_pre(struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_frc_info *frc_info, int remaining);
+void mdss_debug_frc_add_kickoff_sample_post(struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_frc_info *frc_info, int remaining);
+int mdss_debug_frc_frame_repeat_disabled(void);
+#else
+static inline void mdss_debug_frc_add_vsync_sample(
+	struct mdss_mdp_ctl *ctl, ktime_t vsync_time) {}
+static inline void mdss_debug_frc_add_kickoff_sample_pre(
+	struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_frc_info *frc_info,
+	int remaining) {}
+static inline void mdss_debug_frc_add_kickoff_sample_post(
+	struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_frc_info *frc_info,
+	int remaining) {}
+static inline int mdss_debug_frc_frame_repeat_disabled(void) {return false; }
+#endif
+
+#endif /* MDSS_DEBUG_H */
diff --git a/drivers/video/fbdev/msm/mdss_debug_frc.c b/drivers/video/fbdev/msm/mdss_debug_frc.c
new file mode 100644
index 0000000..9965d03
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_debug_frc.c
@@ -0,0 +1,574 @@
+/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+#include "mdss_debug.h"
+
+#define FRC_DEFAULT_ENABLE 1
+#define FRC_DEFAULT_LOG_ENABLE 0
+
+#define FRC_DEBUG_STAT_MAX_SLOT 1024
+
+DEFINE_SPINLOCK(frc_lock);
+
+struct cadence {
+	int repeat;
+	int kickoff_idx;
+	int vsync_idx;
+};
+
+struct vsync_stat {
+	int vsync_cnt;
+	s64 vsync_ts;
+};
+
+struct kick_stat {
+	s64 kickoff_ts;
+	u32 vsync;
+	int remain;
+	struct mdss_mdp_frc_info frc_info;
+};
+
+struct circ_buf {
+	int index;
+	int size;
+	int cnt;
+};
+
+struct vsync_samples {
+	struct circ_buf cbuf;
+	struct vsync_stat samples[FRC_DEBUG_STAT_MAX_SLOT];
+};
+
+struct kickoff_samples {
+	struct circ_buf cbuf;
+	struct kick_stat samples[FRC_DEBUG_STAT_MAX_SLOT];
+};
+
+#define cbuf_init(cbuf, len) { \
+	struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+	cb->index = 0; \
+	cb->cnt = 0; \
+	cb->size = (len); }
+
+#define cbuf_begin(cbuf, start) ({ \
+	struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+	(cb->index > cb->size) ? (cb->index + (start)) % cb->size : (start); })
+
+#define cbuf_end(cbuf, end) ({ \
+	struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+	((cb->index - 1 - (end)) % cb->size); })
+
+#define cbuf_cur(cbuf) ({ \
+	struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+	(cb->index % cb->size); })
+
+#define cbuf_next(cbuf, idx) ({ \
+	struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+	(((idx)+1) % cb->size); })
+
+#define cbuf_prev(cbuf, idx) ({ \
+	struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+	(((idx)-1) % cb->size); })
+
+#define current_sample(cbuf) ({ \
+	struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+	int idx = cb->index % cb->size; \
+	&((cbuf)->samples[idx]); })
+
+#define insert_sample(cbuf, sample) { \
+	struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+	int idx = cb->index % cb->size; \
+	(cbuf)->samples[idx] = (sample); \
+	cb->cnt++; \
+	cb->index++; }
+
+#define advance_sample(cbuf) { \
+	struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+	cb->cnt++; \
+	cb->index++; }
+
+#define sample_cnt(cbuf) ({ \
+	struct circ_buf *cb = (struct circ_buf *)(cbuf); \
+	(cb->cnt % cb->size); })
+
+struct mdss_dbg_frc_stat {
+	int cadence_id;
+	int display_fp1000s;
+	struct vsync_samples vs;
+	struct kickoff_samples ks;
+	struct cadence cadence_info[FRC_DEBUG_STAT_MAX_SLOT];
+};
+
+struct mdss_dbg_frc {
+	struct dentry *frc;
+	int frc_enable;
+	int log_enable;
+	struct mdss_dbg_frc_stat frc_stat[2];
+	int index;
+} mdss_dbg_frc;
+
+static struct mdss_dbg_frc_stat *__current_frc_stat(
+	struct mdss_dbg_frc *dbg_frc)
+{
+	return &dbg_frc->frc_stat[dbg_frc->index];
+}
+
+static void __init_frc_stat(struct mdss_dbg_frc *dbg_frc)
+{
+	struct mdss_dbg_frc_stat *frc_stat = __current_frc_stat(dbg_frc);
+
+	memset(frc_stat, 0, sizeof(struct mdss_dbg_frc_stat));
+
+	/* TODO: increase vsync buffer to avoid wrap around */
+	cbuf_init(&frc_stat->ks, FRC_DEBUG_STAT_MAX_SLOT);
+	cbuf_init(&frc_stat->vs, FRC_DEBUG_STAT_MAX_SLOT/2);
+}
+
+static struct mdss_dbg_frc_stat *__swap_frc_stat(
+	struct mdss_dbg_frc *dbg_frc)
+{
+	int prev_index = dbg_frc->index;
+
+	dbg_frc->index = (dbg_frc->index + 1) % 2;
+	__init_frc_stat(dbg_frc);
+
+	return &dbg_frc->frc_stat[prev_index];
+}
+
+void mdss_debug_frc_add_vsync_sample(struct mdss_mdp_ctl *ctl,
+	ktime_t vsync_time)
+{
+	if (mdss_dbg_frc.log_enable) {
+		unsigned long flags;
+		struct mdss_dbg_frc_stat *frc_stat;
+		struct vsync_stat vstat;
+
+		spin_lock_irqsave(&frc_lock, flags);
+		frc_stat = __current_frc_stat(&mdss_dbg_frc);
+		vstat.vsync_cnt = ctl->vsync_cnt;
+		vstat.vsync_ts = ktime_to_us(vsync_time);
+		insert_sample(&frc_stat->vs, vstat);
+		spin_unlock_irqrestore(&frc_lock, flags);
+	}
+}
+
+/* collect FRC data for debug ahead of repeat */
+void mdss_debug_frc_add_kickoff_sample_pre(struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_frc_info *frc_info, int remaining)
+{
+	if (mdss_dbg_frc.log_enable) {
+		unsigned long flags;
+		struct mdss_dbg_frc_stat *frc_stat;
+
+		spin_lock_irqsave(&frc_lock, flags);
+		frc_stat = __current_frc_stat(&mdss_dbg_frc);
+
+		/* Don't update statistics when video repeats */
+		if (frc_info->cur_frc.frame_cnt
+				!= frc_info->last_frc.frame_cnt) {
+			struct kick_stat *kstat = current_sample(&frc_stat->ks);
+
+			kstat->vsync = ctl->vsync_cnt;
+		}
+
+		frc_stat->cadence_id = frc_info->cadence_id;
+		frc_stat->display_fp1000s = frc_info->display_fp1000s;
+		spin_unlock_irqrestore(&frc_lock, flags);
+	}
+}
+
+/* collect FRC data for debug later than repeat */
+void mdss_debug_frc_add_kickoff_sample_post(struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_frc_info *frc_info, int remaining)
+{
+	if (mdss_dbg_frc.log_enable) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&frc_lock, flags);
+		/* Don't update statistics when video repeats */
+		if (frc_info->cur_frc.frame_cnt
+				!= frc_info->last_frc.frame_cnt) {
+			struct mdss_dbg_frc_stat *frc_stat
+				= __current_frc_stat(&mdss_dbg_frc);
+			struct kick_stat *kstat = current_sample(&frc_stat->ks);
+			ktime_t kickoff_time = ktime_get();
+
+			kstat->kickoff_ts = ktime_to_us(kickoff_time);
+			kstat->frc_info = *frc_info;
+			kstat->remain = remaining;
+
+			advance_sample(&frc_stat->ks);
+		}
+		spin_unlock_irqrestore(&frc_lock, flags);
+	}
+}
+
+int mdss_debug_frc_frame_repeat_disabled(void)
+{
+	return !mdss_dbg_frc.frc_enable;
+}
+
+/* find the closest vsync right to this kickoff time */
+static int __find_right_vsync(struct vsync_samples *vs, s64 kick)
+{
+	int idx = cbuf_begin(vs, 0);
+
+	for (; idx != cbuf_end(vs, 0); idx = cbuf_next(vs, idx)) {
+		if (vs->samples[idx].vsync_ts >= kick)
+			return idx;
+	}
+
+	return -EBADSLT;
+}
+
+/*
+ * These repeat number might start from any position in the sequence. E.g.,
+ * given cadence 23223, the first repeat might be 3 and the repeating pattern
+ * might be 32232, also, the first repeat could be the 4th 3, so the repeating
+ * pattern will be 32322. Below predefined patterns are going to be used to
+ * find the position of the first repeat in the full sequence, then we can
+ * easily known what the remaining expected repeats.
+ */
+#define CADENCE_22_LEN 2
+static int pattern_22[CADENCE_22_LEN] = {2, 2};
+
+#define CADENCE_23_LEN 2
+static int pattern_23[CADENCE_23_LEN][CADENCE_23_LEN] = {
+	{2, 3},
+	{3, 2}
+};
+
+#define CADENCE_23223_LEN 5
+static int pattern_23223[CADENCE_23223_LEN][CADENCE_23223_LEN] = {
+	{2, 3, 2, 2, 3},
+	{3, 2, 2, 3, 2},
+	{2, 2, 3, 2, 3},
+	{2, 3, 2, 3, 2},
+	{3, 2, 3, 2, 2}
+};
+
+static int __compare_init_pattern(struct mdss_dbg_frc_stat *frc_stat,
+	int *pattern, int s_idx, int e_idx)
+{
+	int i;
+
+	for (i = 0; i < min(CADENCE_23223_LEN, e_idx-s_idx+1); i++) {
+		if (frc_stat->cadence_info[i].repeat != pattern[i])
+			break;
+	}
+
+	return i == min(CADENCE_23223_LEN, e_idx-s_idx+1);
+}
+
+static int __pattern_len(int cadence_id)
+{
+	switch (cadence_id) {
+	case FRC_CADENCE_22:
+		return CADENCE_22_LEN;
+	case FRC_CADENCE_23:
+		return CADENCE_23_LEN;
+	case FRC_CADENCE_23223:
+		return CADENCE_23223_LEN;
+	}
+
+	return 0;
+}
+
+static int *__select_pattern(struct mdss_dbg_frc_stat *frc_stat,
+	int s_idx, int e_idx)
+{
+	int i;
+
+	switch (frc_stat->cadence_id) {
+	case FRC_CADENCE_22:
+		return pattern_22;
+	case FRC_CADENCE_23:
+		return frc_stat->cadence_info[s_idx].repeat == 2 ?
+			pattern_23[0] : pattern_23[1];
+	case FRC_CADENCE_23223:
+		for (i = 0; i < CADENCE_23223_LEN; i++) {
+			if (__compare_init_pattern(frc_stat,
+					pattern_23223[i], s_idx, e_idx))
+				return pattern_23223[i];
+		}
+	}
+
+	return NULL;
+}
+
+static void __check_cadence_pattern(struct mdss_dbg_frc_stat *frc_stat,
+	int s_idx, int e_idx)
+{
+	if (s_idx < e_idx) {
+		int *pattern = __select_pattern(frc_stat, s_idx, e_idx);
+		int pattern_len = __pattern_len(frc_stat->cadence_id);
+		struct vsync_samples *vs = &frc_stat->vs;
+		struct kickoff_samples *ks = &frc_stat->ks;
+		int i;
+
+		if (!pattern) {
+			pr_info("Can't match pattern in the beginning\n");
+			return;
+		}
+
+		for (i = s_idx; i < e_idx; i++) {
+			if (frc_stat->cadence_info[i].repeat !=
+					pattern[i % pattern_len]) {
+				int kidx =
+					frc_stat->cadence_info[i].kickoff_idx;
+				pr_info("\tUnexpected Sample: repeat=%d, kickoff=%lld, vsync=%lld\n",
+					frc_stat->cadence_info[i].repeat,
+					ks->samples[kidx].kickoff_ts,
+					vs->samples[kidx].vsync_ts);
+				break;
+			}
+		}
+
+		/* init check */
+		if (i < e_idx)
+			__check_cadence_pattern(frc_stat, i+1, e_idx);
+	}
+}
+
+static int __is_cadence_check_supported(struct mdss_dbg_frc_stat *frc_stat)
+{
+	int cadence = frc_stat->cadence_id;
+
+	return cadence == FRC_CADENCE_22 ||
+		cadence == FRC_CADENCE_23 ||
+		cadence == FRC_CADENCE_23223;
+}
+
+static int __find_first_valid_sample(struct mdss_dbg_frc_stat *frc_stat)
+{
+	int i = 0;
+	struct kickoff_samples *ks = &frc_stat->ks;
+	struct vsync_samples *vs = &frc_stat->vs;
+	struct kick_stat *cur_kstat = &ks->samples[cbuf_begin(ks, 0)];
+	s64 cur_kick = cur_kstat->kickoff_ts;
+	int cur_disp = __find_right_vsync(vs, cur_kick);
+	struct vsync_stat *vstat = &vs->samples[cur_disp];
+
+	i = cbuf_begin(ks, 0);
+	for (; i != cbuf_end(ks, 1); i = cbuf_next(ks, i)) {
+		if (vstat->vsync_ts < ks->samples[i].kickoff_ts)
+			break;
+	}
+
+	return i;
+}
+
+static int __analyze_frc_samples(struct mdss_dbg_frc_stat *frc_stat, int start)
+{
+	struct kickoff_samples *ks = &frc_stat->ks;
+	struct vsync_samples *vs = &frc_stat->vs;
+	int i = start;
+	int cnt = 0;
+
+	/* analyze kickoff & vsync samples */
+	for (; i != cbuf_end(ks, 1); i = cbuf_next(ks, i)) {
+		/*
+		 * TODO: vsync buffer is not enough so it might
+		 * wrap around and drop the samples in the beginning.
+		 * skip the first/last sample.
+		 */
+		s64 cur_kick = ks->samples[i].kickoff_ts;
+		s64 right_kick = ks->samples[cbuf_next(ks, i)].kickoff_ts;
+		int cur_disp = __find_right_vsync(vs, cur_kick);
+		int right_disp = __find_right_vsync(vs, right_kick);
+
+		frc_stat->cadence_info[cnt].repeat =
+			right_disp >= cur_disp ? right_disp - cur_disp :
+			right_disp - cur_disp + vs->cbuf.size;
+		frc_stat->cadence_info[cnt].kickoff_idx = i;
+		frc_stat->cadence_info[cnt].vsync_idx = cur_disp;
+		cnt++;
+	}
+
+	return cnt;
+}
+
+static void __dump_frc_samples(struct mdss_dbg_frc_stat *frc_stat, int cnt)
+{
+	struct kickoff_samples *ks = &frc_stat->ks;
+	struct vsync_samples *vs = &frc_stat->vs;
+	int i = 0;
+
+	pr_info("===== Collected FRC statistics: Cadence %d, FPS %d =====\n",
+		frc_stat->cadence_id, frc_stat->display_fp1000s);
+	pr_info("\tKickoff VS. VSYNC:\n");
+	for (i = 0; i < cnt; i++) {
+		struct cadence *p_info = &frc_stat->cadence_info[i];
+		struct kick_stat *kickoff = &ks->samples[p_info->kickoff_idx];
+		struct vsync_stat *vsync = &vs->samples[p_info->vsync_idx];
+
+		pr_info("\t[K: %lld V: (%d)%lld R: %d] c_ts: %lld c_cnt: %d b_ts: %lld b_cnt: %d l_ts: %lld l_cnt: %d b_v: %d l_v: %d l_r: %d pos: %d vs: %d remain: %d\n",
+			kickoff->kickoff_ts,
+			vsync->vsync_cnt,
+			vsync->vsync_ts,
+			p_info->repeat,
+			kickoff->frc_info.cur_frc.timestamp,
+			kickoff->frc_info.cur_frc.frame_cnt,
+			kickoff->frc_info.base_frc.timestamp,
+			kickoff->frc_info.base_frc.frame_cnt,
+			kickoff->frc_info.last_frc.timestamp,
+			kickoff->frc_info.last_frc.frame_cnt,
+			kickoff->frc_info.base_vsync_cnt,
+			kickoff->frc_info.last_vsync_cnt,
+			kickoff->frc_info.last_repeat,
+			kickoff->frc_info.gen.pos,
+			kickoff->vsync,
+			kickoff->remain);
+	}
+
+	pr_info("===== End FRC statistics: =====\n");
+}
+
+static bool __is_frc_stat_empty(struct mdss_dbg_frc_stat *frc_stat)
+{
+	return sample_cnt(&frc_stat->vs) == 0
+		|| sample_cnt(&frc_stat->ks) == 0;
+}
+
+static void mdss_frc_dump_debug_stat(struct mdss_dbg_frc *frc_debug)
+{
+	int i = 0;
+	int cnt = 0;
+	struct mdss_dbg_frc_stat *frc_stat = NULL;
+	unsigned long flags;
+
+	/* swap buffer of collect & analyze */
+	spin_lock_irqsave(&frc_lock, flags);
+	frc_stat = __swap_frc_stat(frc_debug);
+	spin_unlock_irqrestore(&frc_lock, flags);
+
+	if (__is_frc_stat_empty(frc_stat))
+		return;
+
+	/* find the first valid kickoff sample */
+	i = __find_first_valid_sample(frc_stat);
+
+	/* analyze kickoff & vsync samples */
+	cnt = __analyze_frc_samples(frc_stat, i);
+
+	/* print collected statistics FRC data */
+	__dump_frc_samples(frc_stat, cnt);
+
+	if (__is_cadence_check_supported(frc_stat)) {
+		pr_info("===== Check Cadence Pattern: =====\n");
+		__check_cadence_pattern(frc_stat, 0, cnt);
+		pr_info("===== Check Cadence Pattern End =====\n");
+	}
+}
+
+static ssize_t mdss_frc_log_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	int len = 0;
+	char buf[32] = {'\0'};
+
+	if (*ppos)
+		return 0; /* the end */
+
+	len = snprintf(buf, sizeof(buf), "%d\n", mdss_dbg_frc.log_enable);
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;	/* increase offset */
+
+	return len;
+}
+
+static ssize_t mdss_frc_log_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int enable;
+	unsigned long flags;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = '\0';
+	if (kstrtoint(buf, 0, &enable))
+		return -EFAULT;
+
+	if (enable && !mdss_dbg_frc.log_enable) {
+		spin_lock_irqsave(&frc_lock, flags);
+		__init_frc_stat(&mdss_dbg_frc);
+		spin_unlock_irqrestore(&frc_lock, flags);
+	}
+	mdss_dbg_frc.log_enable = enable;
+
+	pr_info("log_enable = %d\n", mdss_dbg_frc.log_enable);
+
+	return count;
+}
+
+static const struct file_operations mdss_dbg_frc_log_fops = {
+	.read = mdss_frc_log_read,
+	.write = mdss_frc_log_write,
+};
+
+static ssize_t mdss_frc_dump_write(struct file *file,
+	const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	mdss_frc_dump_debug_stat(&mdss_dbg_frc);
+
+	return count;
+}
+
+static const struct file_operations mdss_dbg_frc_dump_fops = {
+	.read = NULL,
+	.write = mdss_frc_dump_write,
+};
+
+int mdss_create_frc_debug(struct mdss_debug_data *mdd)
+{
+	mdss_dbg_frc.frc = debugfs_create_dir("frc", mdd->root);
+	if (IS_ERR_OR_NULL(mdss_dbg_frc.frc)) {
+		pr_err("debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(mdss_dbg_frc.frc));
+		mdss_dbg_frc.frc = NULL;
+		return -ENODEV;
+	}
+
+	debugfs_create_u32("enable", 0644, mdss_dbg_frc.frc,
+			&mdss_dbg_frc.frc_enable);
+	debugfs_create_file("log", 0644, mdss_dbg_frc.frc, NULL,
+						&mdss_dbg_frc_log_fops);
+	debugfs_create_file("dump", 0644, mdss_dbg_frc.frc, NULL,
+						&mdss_dbg_frc_dump_fops);
+
+	mdss_dbg_frc.frc_enable = FRC_DEFAULT_ENABLE;
+	mdss_dbg_frc.log_enable = FRC_DEFAULT_LOG_ENABLE;
+	mdss_dbg_frc.index = 0;
+
+	pr_debug("frc_dbg: frc_enable:%d log_enable:%d\n",
+		mdss_dbg_frc.frc_enable, mdss_dbg_frc.log_enable);
+
+	return 0;
+}
diff --git a/drivers/video/fbdev/msm/mdss_debug_xlog.c b/drivers/video/fbdev/msm/mdss_debug_xlog.c
new file mode 100644
index 0000000..e493dcd
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_debug_xlog.c
@@ -0,0 +1,756 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+#include "mdss_debug.h"
+
+#ifdef CONFIG_FB_MSM_MDSS_XLOG_DEBUG
+#define XLOG_DEFAULT_ENABLE 1
+#else
+#define XLOG_DEFAULT_ENABLE 0
+#endif
+
+#define XLOG_DEFAULT_PANIC 1
+#define XLOG_DEFAULT_REGDUMP 0x2 /* dump in RAM */
+#define XLOG_DEFAULT_DBGBUSDUMP 0x2 /* dump in RAM */
+#define XLOG_DEFAULT_VBIF_DBGBUSDUMP 0x2 /* dump in RAM */
+
+/*
+ * xlog will print this number of entries when it is called through
+ * sysfs node or panic. This prevents kernel log from xlog message
+ * flood.
+ */
+#define MDSS_XLOG_PRINT_ENTRY	256
+
+/*
+ * xlog keeps this number of entries in memory for debug purpose. This
+ * number must be greater than print entry to prevent out of bound xlog
+ * entry array access.
+ */
+#define MDSS_XLOG_ENTRY	(MDSS_XLOG_PRINT_ENTRY * 4)
+#define MDSS_XLOG_MAX_DATA 15
+#define MDSS_XLOG_BUF_MAX 512
+#define MDSS_XLOG_BUF_ALIGN 32
+
+DEFINE_SPINLOCK(xlock);
+
+struct tlog {
+	u32 counter;
+	s64 time;
+	const char *name;
+	int line;
+	u32 data[MDSS_XLOG_MAX_DATA];
+	u32 data_cnt;
+	int pid;
+};
+
+struct mdss_dbg_xlog {
+	struct tlog logs[MDSS_XLOG_ENTRY];
+	u32 first;
+	u32 last;
+	u32 curr;
+	struct dentry *xlog;
+	u32 xlog_enable;
+	u32 panic_on_err;
+	u32 enable_reg_dump;
+	u32 enable_dbgbus_dump;
+	u32 enable_vbif_dbgbus_dump;
+	struct work_struct xlog_dump_work;
+	struct mdss_debug_base *blk_arr[MDSS_DEBUG_BASE_MAX];
+	bool work_panic;
+	bool work_dbgbus;
+	bool work_vbif_dbgbus;
+	u32 *dbgbus_dump; /* address for the debug bus dump */
+	u32 *vbif_dbgbus_dump; /* address for the vbif debug bus dump */
+	u32 *nrt_vbif_dbgbus_dump; /* address for the nrt vbif debug bus dump */
+} mdss_dbg_xlog;
+
+static inline bool mdss_xlog_is_enabled(u32 flag)
+{
+	return (flag & mdss_dbg_xlog.xlog_enable) ||
+		(flag == MDSS_XLOG_ALL && mdss_dbg_xlog.xlog_enable);
+}
+
+void mdss_xlog(const char *name, int line, int flag, ...)
+{
+	unsigned long flags;
+	int i, val = 0;
+	va_list args;
+	struct tlog *log;
+
+	if (!mdss_xlog_is_enabled(flag))
+		return;
+
+	spin_lock_irqsave(&xlock, flags);
+	log = &mdss_dbg_xlog.logs[mdss_dbg_xlog.curr];
+	log->time = ktime_to_us(ktime_get());
+	log->name = name;
+	log->line = line;
+	log->data_cnt = 0;
+	log->pid = current->pid;
+
+	va_start(args, flag);
+	for (i = 0; i < MDSS_XLOG_MAX_DATA; i++) {
+
+		val = va_arg(args, int);
+		if (val == DATA_LIMITER)
+			break;
+
+		log->data[i] = val;
+	}
+	va_end(args);
+	log->data_cnt = i;
+	mdss_dbg_xlog.curr = (mdss_dbg_xlog.curr + 1) % MDSS_XLOG_ENTRY;
+	mdss_dbg_xlog.last++;
+
+	spin_unlock_irqrestore(&xlock, flags);
+}
+
+/* always dump the last entries which are not dumped yet */
+static bool __mdss_xlog_dump_calc_range(void)
+{
+	static u32 next;
+	bool need_dump = true;
+	unsigned long flags;
+	struct mdss_dbg_xlog *xlog = &mdss_dbg_xlog;
+
+	spin_lock_irqsave(&xlock, flags);
+
+	xlog->first = next;
+
+	if (xlog->last == xlog->first) {
+		need_dump = false;
+		goto dump_exit;
+	}
+
+	if (xlog->last < xlog->first) {
+		xlog->first %= MDSS_XLOG_ENTRY;
+		if (xlog->last < xlog->first)
+			xlog->last += MDSS_XLOG_ENTRY;
+	}
+
+	if ((xlog->last - xlog->first) > MDSS_XLOG_PRINT_ENTRY) {
+		pr_warn("xlog buffer overflow before dump: %d\n",
+			xlog->last - xlog->first);
+		xlog->first = xlog->last - MDSS_XLOG_PRINT_ENTRY;
+	}
+	next = xlog->first + 1;
+
+dump_exit:
+	spin_unlock_irqrestore(&xlock, flags);
+
+	return need_dump;
+}
+
+static ssize_t mdss_xlog_dump_entry(char *xlog_buf, ssize_t xlog_buf_size)
+{
+	int i;
+	ssize_t off = 0;
+	struct tlog *log, *prev_log;
+	unsigned long flags;
+
+	spin_lock_irqsave(&xlock, flags);
+
+	log = &mdss_dbg_xlog.logs[mdss_dbg_xlog.first %
+		MDSS_XLOG_ENTRY];
+
+	prev_log = &mdss_dbg_xlog.logs[(mdss_dbg_xlog.first - 1) %
+		MDSS_XLOG_ENTRY];
+
+	off = snprintf((xlog_buf + off), (xlog_buf_size - off), "%s:%-4d",
+		log->name, log->line);
+
+	if (off < MDSS_XLOG_BUF_ALIGN) {
+		memset((xlog_buf + off), 0x20, (MDSS_XLOG_BUF_ALIGN - off));
+		off = MDSS_XLOG_BUF_ALIGN;
+	}
+
+	off += snprintf((xlog_buf + off), (xlog_buf_size - off),
+		"=>[%-8d:%-11llu:%9llu][%-4d]:", mdss_dbg_xlog.first,
+		log->time, (log->time - prev_log->time), log->pid);
+
+	for (i = 0; i < log->data_cnt; i++)
+		off += snprintf((xlog_buf + off), (xlog_buf_size - off),
+			"%x ", log->data[i]);
+
+	off += snprintf((xlog_buf + off), (xlog_buf_size - off), "\n");
+
+	spin_unlock_irqrestore(&xlock, flags);
+
+	return off;
+}
+
+static void mdss_xlog_dump_all(void)
+{
+	char xlog_buf[MDSS_XLOG_BUF_MAX];
+
+	while (__mdss_xlog_dump_calc_range()) {
+		mdss_xlog_dump_entry(xlog_buf, MDSS_XLOG_BUF_MAX);
+		pr_info("%s", xlog_buf);
+	}
+}
+
+u32 get_dump_range(struct dump_offset *range_node, size_t max_offset)
+{
+	u32 length = 0;
+
+	if ((range_node->start > range_node->end) ||
+		(range_node->end > max_offset) || (range_node->start == 0
+		&& range_node->end == 0)) {
+		length = max_offset;
+	} else {
+		length = range_node->end - range_node->start;
+	}
+
+	return length;
+}
+
+static void mdss_dump_debug_bus(u32 bus_dump_flag,
+	u32 **dump_mem)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	bool in_log, in_mem;
+	u32 *dump_addr = NULL;
+	u32 status = 0;
+	struct debug_bus *head;
+	phys_addr_t phys = 0;
+	int list_size = mdata->dbg_bus_size;
+	int i;
+
+	if (!(mdata->dbg_bus && list_size))
+		return;
+
+	/* will keep in memory 4 entries of 4 bytes each */
+	list_size = (list_size * 4 * 4);
+
+	in_log = (bus_dump_flag & MDSS_DBG_DUMP_IN_LOG);
+	in_mem = (bus_dump_flag & MDSS_DBG_DUMP_IN_MEM);
+
+	pr_info("======== Debug bus DUMP =========\n");
+
+	if (in_mem) {
+		if (!(*dump_mem))
+			*dump_mem = dma_alloc_coherent(&mdata->pdev->dev,
+				list_size, &phys, GFP_KERNEL);
+
+		if (*dump_mem) {
+			dump_addr = *dump_mem;
+			pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
+				__func__, dump_addr, dump_addr + list_size);
+		} else {
+			in_mem = false;
+			pr_err("dump_mem: allocation fails\n");
+		}
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	for (i = 0; i < mdata->dbg_bus_size; i++) {
+		head = mdata->dbg_bus + i;
+		writel_relaxed(TEST_MASK(head->block_id, head->test_id),
+				mdss_res->mdp_base + head->wr_addr);
+		wmb(); /* make sure test bits were written */
+		status = readl_relaxed(mdss_res->mdp_base +
+			head->wr_addr + 0x4);
+
+		if (in_log)
+			pr_err("waddr=0x%x blk=%d tst=%d val=0x%x\n",
+				head->wr_addr, head->block_id, head->test_id,
+				status);
+
+		if (dump_addr && in_mem) {
+			dump_addr[i*4]     = head->wr_addr;
+			dump_addr[i*4 + 1] = head->block_id;
+			dump_addr[i*4 + 2] = head->test_id;
+			dump_addr[i*4 + 3] = status;
+		}
+
+		/* Disable debug bus once we are done */
+		writel_relaxed(0, mdss_res->mdp_base + head->wr_addr);
+
+	}
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+	pr_info("========End Debug bus=========\n");
+}
+
+static void __vbif_debug_bus(struct vbif_debug_bus *head,
+	void __iomem *vbif_base, u32 *dump_addr, bool in_log)
+{
+	int i, j;
+	u32 val;
+
+	if (!dump_addr && !in_log)
+		return;
+
+	for (i = 0; i < head->block_cnt; i++) {
+		writel_relaxed(1 << (i + head->bit_offset),
+				vbif_base + head->block_bus_addr);
+		/* make sure that current bus blcok enable */
+		wmb();
+		for (j = 0; j < head->test_pnt_cnt; j++) {
+			writel_relaxed(j, vbif_base + head->block_bus_addr + 4);
+			/* make sure that test point is enabled */
+			wmb();
+			val = readl_relaxed(vbif_base + MMSS_VBIF_TEST_BUS_OUT);
+			if (dump_addr) {
+				*dump_addr++ = head->block_bus_addr;
+				*dump_addr++ = i;
+				*dump_addr++ = j;
+				*dump_addr++ = val;
+			}
+			if (in_log)
+				pr_err("testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
+					head->block_bus_addr, i, j, val);
+		}
+	}
+}
+
+static void mdss_dump_vbif_debug_bus(u32 bus_dump_flag,
+	u32 **dump_mem, bool real_time)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	bool in_log, in_mem;
+	u32 *dump_addr = NULL;
+	u32 value;
+	struct vbif_debug_bus *head;
+	phys_addr_t phys = 0;
+	int i, list_size = 0;
+	void __iomem *vbif_base;
+	struct vbif_debug_bus *dbg_bus;
+	u32 bus_size;
+
+	if (real_time) {
+		pr_info("======== VBIF Debug bus DUMP =========\n");
+		vbif_base = mdata->vbif_io.base;
+		dbg_bus = mdata->vbif_dbg_bus;
+		bus_size = mdata->vbif_dbg_bus_size;
+	} else {
+		pr_info("======== NRT VBIF Debug bus DUMP =========\n");
+		vbif_base = mdata->vbif_nrt_io.base;
+		dbg_bus = mdata->nrt_vbif_dbg_bus;
+		bus_size = mdata->nrt_vbif_dbg_bus_size;
+	}
+
+	if (!dbg_bus || !bus_size)
+		return;
+
+	/* allocate memory for each test point */
+	for (i = 0; i < bus_size; i++) {
+		head = dbg_bus + i;
+		list_size += (head->block_cnt * head->test_pnt_cnt);
+	}
+
+	/* 4 bytes * 4 entries for each test point*/
+	list_size *= 16;
+
+	in_log = (bus_dump_flag & MDSS_DBG_DUMP_IN_LOG);
+	in_mem = (bus_dump_flag & MDSS_DBG_DUMP_IN_MEM);
+
+	if (in_mem) {
+		if (!(*dump_mem))
+			*dump_mem = dma_alloc_coherent(&mdata->pdev->dev,
+				list_size, &phys, GFP_KERNEL);
+
+		if (*dump_mem) {
+			dump_addr = *dump_mem;
+			pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
+				__func__, dump_addr, dump_addr + list_size);
+		} else {
+			in_mem = false;
+			pr_err("dump_mem: allocation fails\n");
+		}
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	value = readl_relaxed(vbif_base + MMSS_VBIF_CLKON);
+	writel_relaxed(value | BIT(1), vbif_base + MMSS_VBIF_CLKON);
+
+	/* make sure that vbif core is on */
+	wmb();
+
+	for (i = 0; i < bus_size; i++) {
+		head = dbg_bus + i;
+
+		writel_relaxed(0, vbif_base + head->disable_bus_addr);
+		writel_relaxed(BIT(0), vbif_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
+		/* make sure that other bus is off */
+		wmb();
+
+		__vbif_debug_bus(head, vbif_base, dump_addr, in_log);
+		if (dump_addr)
+			dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+	pr_info("========End VBIF Debug bus=========\n");
+}
+
+void mdss_dump_reg(const char *dump_name, u32 reg_dump_flag, char *addr,
+	int len, u32 **dump_mem, bool from_isr)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	bool in_log, in_mem;
+	u32 *dump_addr = NULL;
+	phys_addr_t phys = 0;
+	int i;
+
+	in_log = (reg_dump_flag & MDSS_DBG_DUMP_IN_LOG);
+	in_mem = (reg_dump_flag & MDSS_DBG_DUMP_IN_MEM);
+
+	pr_debug("reg_dump_flag=%d in_log=%d in_mem=%d\n",
+		reg_dump_flag, in_log, in_mem);
+
+	if (len % 16)
+		len += 16;
+	len /= 16;
+
+	if (in_mem) {
+		if (!(*dump_mem))
+			*dump_mem = dma_alloc_coherent(&mdata->pdev->dev,
+				len * 16, &phys, GFP_KERNEL);
+
+		if (*dump_mem) {
+			dump_addr = *dump_mem;
+			pr_info("%s: start_addr:0x%pK end_addr:0x%pK reg_addr=0x%pK\n",
+				dump_name, dump_addr, dump_addr + (u32)len * 16,
+				addr);
+		} else {
+			in_mem = false;
+			pr_err("dump_mem: kzalloc fails!\n");
+		}
+	}
+
+	if (!from_isr)
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	for (i = 0; i < len; i++) {
+		u32 x0, x4, x8, xc;
+
+		x0 = readl_relaxed(addr+0x0);
+		x4 = readl_relaxed(addr+0x4);
+		x8 = readl_relaxed(addr+0x8);
+		xc = readl_relaxed(addr+0xc);
+
+		if (in_log)
+			pr_info("%pK : %08x %08x %08x %08x\n", addr, x0, x4, x8,
+				xc);
+
+		if (dump_addr && in_mem) {
+			dump_addr[i*4] = x0;
+			dump_addr[i*4 + 1] = x4;
+			dump_addr[i*4 + 2] = x8;
+			dump_addr[i*4 + 3] = xc;
+		}
+
+		addr += 16;
+	}
+
+	if (!from_isr)
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+}
+
+static void mdss_dump_reg_by_ranges(struct mdss_debug_base *dbg,
+	u32 reg_dump_flag)
+{
+	char *addr;
+	int len;
+	struct range_dump_node *xlog_node, *xlog_tmp;
+
+	if (!dbg || !dbg->base) {
+		pr_err("dbg base is null!\n");
+		return;
+	}
+
+	pr_info("%s:=========%s DUMP=========\n", __func__, dbg->name);
+
+	/* If there is a list to dump the registers by ranges, use the ranges */
+	if (!list_empty(&dbg->dump_list)) {
+		list_for_each_entry_safe(xlog_node, xlog_tmp,
+			&dbg->dump_list, head) {
+			len = get_dump_range(&xlog_node->offset,
+				dbg->max_offset);
+			addr = dbg->base + xlog_node->offset.start;
+			pr_debug("%s: range_base=0x%pK start=0x%x end=0x%x\n",
+				xlog_node->range_name,
+				addr, xlog_node->offset.start,
+				xlog_node->offset.end);
+			mdss_dump_reg((const char *)xlog_node->range_name,
+				reg_dump_flag, addr, len, &xlog_node->reg_dump,
+				false);
+		}
+	} else {
+		/* If there is no list to dump ranges, dump all registers */
+		pr_info("Ranges not found, will dump full registers");
+		pr_info("base:0x%pK len:%zu\n", dbg->base, dbg->max_offset);
+		addr = dbg->base;
+		len = dbg->max_offset;
+		mdss_dump_reg((const char *)dbg->name, reg_dump_flag, addr,
+			len, &dbg->reg_dump, false);
+	}
+}
+
+static void mdss_dump_reg_by_blk(const char *blk_name)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_debug_data *mdd = mdata->debug_inf.debug_data;
+	struct mdss_debug_base *blk_base, *tmp;
+
+	if (!mdd)
+		return;
+
+	list_for_each_entry_safe(blk_base, tmp, &mdd->base_list, head) {
+		if (strlen(blk_base->name) &&
+			!strcmp(blk_base->name, blk_name)) {
+			mdss_dump_reg_by_ranges(blk_base,
+				mdss_dbg_xlog.enable_reg_dump);
+			break;
+		}
+	}
+}
+
+static void mdss_dump_reg_all(void)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_debug_data *mdd = mdata->debug_inf.debug_data;
+	struct mdss_debug_base *blk_base, *tmp;
+
+	if (!mdd)
+		return;
+
+	list_for_each_entry_safe(blk_base, tmp, &mdd->base_list, head) {
+		if (strlen(blk_base->name))
+			mdss_dump_reg_by_blk(blk_base->name);
+	}
+}
+
+static void clear_dump_blk_arr(struct mdss_debug_base *blk_arr[],
+	u32 blk_len)
+{
+	int i;
+
+	for (i = 0; i < blk_len; i++)
+		blk_arr[i] = NULL;
+}
+
+struct mdss_debug_base *get_dump_blk_addr(const char *blk_name)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_debug_data *mdd = mdata->debug_inf.debug_data;
+	struct mdss_debug_base *blk_base, *tmp;
+
+	if (!mdd)
+		return NULL;
+
+	list_for_each_entry_safe(blk_base, tmp, &mdd->base_list, head) {
+		if (strlen(blk_base->name) &&
+			!strcmp(blk_base->name, blk_name))
+			return blk_base;
+	}
+
+	return NULL;
+}
+
+static void mdss_xlog_dump_array(struct mdss_debug_base *blk_arr[],
+	u32 len, bool dead, const char *name, bool dump_dbgbus,
+	bool dump_vbif_dbgbus)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		if (blk_arr[i] != NULL)
+			mdss_dump_reg_by_ranges(blk_arr[i],
+				mdss_dbg_xlog.enable_reg_dump);
+	}
+
+	mdss_xlog_dump_all();
+
+	if (dump_dbgbus)
+		mdss_dump_debug_bus(mdss_dbg_xlog.enable_dbgbus_dump,
+			&mdss_dbg_xlog.dbgbus_dump);
+
+	if (dump_vbif_dbgbus) {
+		mdss_dump_vbif_debug_bus(mdss_dbg_xlog.enable_vbif_dbgbus_dump,
+			&mdss_dbg_xlog.vbif_dbgbus_dump, true);
+
+		mdss_dump_vbif_debug_bus(mdss_dbg_xlog.enable_vbif_dbgbus_dump,
+			&mdss_dbg_xlog.nrt_vbif_dbgbus_dump, false);
+	}
+
+	if (dead && mdss_dbg_xlog.panic_on_err)
+		panic(name);
+}
+
+static void xlog_debug_work(struct work_struct *work)
+{
+
+	mdss_xlog_dump_array(mdss_dbg_xlog.blk_arr,
+		ARRAY_SIZE(mdss_dbg_xlog.blk_arr),
+		mdss_dbg_xlog.work_panic, "xlog_workitem",
+		mdss_dbg_xlog.work_dbgbus,
+		mdss_dbg_xlog.work_vbif_dbgbus);
+}
+
+void mdss_xlog_tout_handler_default(bool queue, const char *name, ...)
+{
+	int i, index = 0;
+	bool dead = false;
+	bool dump_dbgbus = false, dump_vbif_dbgbus = false;
+	va_list args;
+	char *blk_name = NULL;
+	struct mdss_debug_base *blk_base = NULL;
+	struct mdss_debug_base **blk_arr;
+	u32 blk_len;
+
+	if (!mdss_xlog_is_enabled(MDSS_XLOG_DEFAULT))
+		return;
+
+	if (queue && work_pending(&mdss_dbg_xlog.xlog_dump_work))
+		return;
+
+	blk_arr = &mdss_dbg_xlog.blk_arr[0];
+	blk_len = ARRAY_SIZE(mdss_dbg_xlog.blk_arr);
+
+	clear_dump_blk_arr(blk_arr, blk_len);
+
+	va_start(args, name);
+	for (i = 0; i < MDSS_XLOG_MAX_DATA; i++) {
+		blk_name = va_arg(args, char*);
+		if (IS_ERR_OR_NULL(blk_name))
+			break;
+
+		blk_base = get_dump_blk_addr(blk_name);
+		if (blk_base && (index < blk_len)) {
+			blk_arr[index] = blk_base;
+			index++;
+		}
+
+		if (!strcmp(blk_name, "dbg_bus"))
+			dump_dbgbus = true;
+
+		if (!strcmp(blk_name, "vbif_dbg_bus"))
+			dump_vbif_dbgbus = true;
+
+		if (!strcmp(blk_name, "panic"))
+			dead = true;
+	}
+	va_end(args);
+
+	if (queue) {
+		/* schedule work to dump later */
+		mdss_dbg_xlog.work_panic = dead;
+		mdss_dbg_xlog.work_dbgbus = dump_dbgbus;
+		mdss_dbg_xlog.work_vbif_dbgbus = dump_vbif_dbgbus;
+		schedule_work(&mdss_dbg_xlog.xlog_dump_work);
+	} else {
+		mdss_xlog_dump_array(blk_arr, blk_len, dead, name, dump_dbgbus,
+			dump_vbif_dbgbus);
+	}
+}
+
+static int mdss_xlog_dump_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t mdss_xlog_dump_read(struct file *file, char __user *buff,
+		size_t count, loff_t *ppos)
+{
+	ssize_t len = 0;
+	char xlog_buf[MDSS_XLOG_BUF_MAX];
+
+	if (__mdss_xlog_dump_calc_range()) {
+		len = mdss_xlog_dump_entry(xlog_buf, MDSS_XLOG_BUF_MAX);
+		if (copy_to_user(buff, xlog_buf, len))
+			return -EFAULT;
+		*ppos += len;
+	}
+
+	return len;
+}
+
+static ssize_t mdss_xlog_dump_write(struct file *file,
+	const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	mdss_dump_reg_all();
+
+	mdss_xlog_dump_all();
+
+	if (mdss_dbg_xlog.panic_on_err)
+		panic("mdss");
+
+	return count;
+}
+
+
+static const struct file_operations mdss_xlog_fops = {
+	.open = mdss_xlog_dump_open,
+	.read = mdss_xlog_dump_read,
+	.write = mdss_xlog_dump_write,
+};
+
+int mdss_create_xlog_debug(struct mdss_debug_data *mdd)
+{
+	int i;
+
+	mdss_dbg_xlog.xlog = debugfs_create_dir("xlog", mdd->root);
+	if (IS_ERR_OR_NULL(mdss_dbg_xlog.xlog)) {
+		pr_err("debugfs_create_dir fail, error %ld\n",
+		       PTR_ERR(mdss_dbg_xlog.xlog));
+		mdss_dbg_xlog.xlog = NULL;
+		return -ENODEV;
+	}
+
+	INIT_WORK(&mdss_dbg_xlog.xlog_dump_work, xlog_debug_work);
+	mdss_dbg_xlog.work_panic = false;
+
+	for (i = 0; i < MDSS_XLOG_ENTRY; i++)
+		mdss_dbg_xlog.logs[i].counter = i;
+
+	debugfs_create_file("dump", 0644, mdss_dbg_xlog.xlog, NULL,
+						&mdss_xlog_fops);
+	debugfs_create_u32("enable", 0644, mdss_dbg_xlog.xlog,
+			    &mdss_dbg_xlog.xlog_enable);
+	debugfs_create_bool("panic", 0644, mdss_dbg_xlog.xlog,
+			    &mdss_dbg_xlog.panic_on_err);
+	debugfs_create_u32("reg_dump", 0644, mdss_dbg_xlog.xlog,
+			    &mdss_dbg_xlog.enable_reg_dump);
+	debugfs_create_u32("dbgbus_dump", 0644, mdss_dbg_xlog.xlog,
+			    &mdss_dbg_xlog.enable_dbgbus_dump);
+	debugfs_create_u32("vbif_dbgbus_dump", 0644, mdss_dbg_xlog.xlog,
+			    &mdss_dbg_xlog.enable_vbif_dbgbus_dump);
+
+	mdss_dbg_xlog.xlog_enable = XLOG_DEFAULT_ENABLE;
+	mdss_dbg_xlog.panic_on_err = XLOG_DEFAULT_PANIC;
+	mdss_dbg_xlog.enable_reg_dump = XLOG_DEFAULT_REGDUMP;
+	mdss_dbg_xlog.enable_dbgbus_dump = XLOG_DEFAULT_DBGBUSDUMP;
+	mdss_dbg_xlog.enable_vbif_dbgbus_dump = XLOG_DEFAULT_VBIF_DBGBUSDUMP;
+
+	pr_info("xlog_status: enable:%d, panic:%d, dump:%d\n",
+		mdss_dbg_xlog.xlog_enable, mdss_dbg_xlog.panic_on_err,
+		mdss_dbg_xlog.enable_reg_dump);
+
+	return 0;
+}
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
new file mode 100644
index 0000000..bc6d568
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -0,0 +1,4382 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/leds-qpnp-wled.h>
+#include <linux/clk.h>
+#include <linux/uaccess.h>
+#include <linux/msm-bus.h>
+#include <linux/pm_qos.h>
+
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_dsi.h"
+#include "mdss_debug.h"
+#include "mdss_dsi_phy.h"
+#include "mdss_dba_utils.h"
+
+#define XO_CLK_RATE	19200000
+#define CMDLINE_DSI_CTL_NUM_STRING_LEN 2
+
+/* Master structure to hold all the information about the DSI/panel */
+static struct mdss_dsi_data *mdss_dsi_res;
+
+#define DSI_DISABLE_PC_LATENCY 100
+#define DSI_ENABLE_PC_LATENCY PM_QOS_DEFAULT_VALUE
+
+static struct pm_qos_request mdss_dsi_pm_qos_request;
+
+static void mdss_dsi_pm_qos_add_request(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	struct irq_info *irq_info;
+
+	if (!ctrl_pdata || !ctrl_pdata->shared_data)
+		return;
+
+	irq_info = ctrl_pdata->dsi_hw->irq_info;
+
+	if (!irq_info)
+		return;
+
+	mutex_lock(&ctrl_pdata->shared_data->pm_qos_lock);
+	if (!ctrl_pdata->shared_data->pm_qos_req_cnt) {
+		pr_debug("%s: add request irq\n", __func__);
+
+		mdss_dsi_pm_qos_request.type = PM_QOS_REQ_AFFINE_IRQ;
+		mdss_dsi_pm_qos_request.irq = irq_info->irq;
+		pm_qos_add_request(&mdss_dsi_pm_qos_request,
+			PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+	}
+	ctrl_pdata->shared_data->pm_qos_req_cnt++;
+	mutex_unlock(&ctrl_pdata->shared_data->pm_qos_lock);
+}
+
+static void mdss_dsi_pm_qos_remove_request(struct dsi_shared_data *sdata)
+{
+	if (!sdata)
+		return;
+
+	mutex_lock(&sdata->pm_qos_lock);
+	if (sdata->pm_qos_req_cnt) {
+		sdata->pm_qos_req_cnt--;
+		if (!sdata->pm_qos_req_cnt) {
+			pr_debug("%s: remove request", __func__);
+			pm_qos_remove_request(&mdss_dsi_pm_qos_request);
+		}
+	} else {
+		pr_warn("%s: unbalanced pm_qos ref count\n", __func__);
+	}
+	mutex_unlock(&sdata->pm_qos_lock);
+}
+
+static void mdss_dsi_pm_qos_update_request(int val)
+{
+	pr_debug("%s: update request %d", __func__, val);
+	pm_qos_update_request(&mdss_dsi_pm_qos_request, val);
+}
+
+static int mdss_dsi_pinctrl_set_state(struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+					bool active);
+
+static struct mdss_dsi_ctrl_pdata *mdss_dsi_get_ctrl(u32 ctrl_id)
+{
+	if (ctrl_id >= DSI_CTRL_MAX || !mdss_dsi_res)
+		return NULL;
+
+	return mdss_dsi_res->ctrl_pdata[ctrl_id];
+}
+
+static void mdss_dsi_config_clk_src(struct platform_device *pdev)
+{
+	struct mdss_dsi_data *dsi_res = platform_get_drvdata(pdev);
+	struct dsi_shared_data *sdata = dsi_res->shared_data;
+
+	if (!sdata->ext_byte0_clk || !sdata->ext_pixel0_clk) {
+		pr_debug("%s: DSI-0 ext. clocks not present\n", __func__);
+		return;
+	}
+
+	if (mdss_dsi_is_pll_src_default(sdata)) {
+		/*
+		 * Default Mapping:
+		 * 1. dual-dsi/single-dsi:
+		 *     DSI0 <--> PLL0
+		 *     DSI1 <--> PLL1
+		 * 2. split-dsi:
+		 *     DSI0 <--> PLL0
+		 *     DSI1 <--> PLL0
+		 */
+		sdata->byte0_parent = sdata->ext_byte0_clk;
+		sdata->pixel0_parent = sdata->ext_pixel0_clk;
+
+		if (mdss_dsi_is_hw_config_split(sdata)) {
+			sdata->byte1_parent = sdata->byte0_parent;
+			sdata->pixel1_parent = sdata->pixel0_parent;
+		} else if (sdata->ext_byte1_clk && sdata->ext_pixel1_clk) {
+			sdata->byte1_parent = sdata->ext_byte1_clk;
+			sdata->pixel1_parent = sdata->ext_pixel1_clk;
+		} else {
+			pr_debug("%s: DSI-1 external clocks not present\n",
+				__func__);
+			return;
+		}
+
+		pr_debug("%s: default: DSI0 <--> PLL0, DSI1 <--> %s", __func__,
+			mdss_dsi_is_hw_config_split(sdata) ? "PLL0" : "PLL1");
+	} else {
+		/*
+		 * For split-dsi and single-dsi use cases, map the PLL source
+		 * based on the pll source configuration. It is possible that
+		 * for split-dsi case, the only supported config is to source
+		 * the clocks from PLL0. This is not explicitly checked here as
+		 * it should have been already enforced when validating the
+		 * board configuration.
+		 */
+		if (mdss_dsi_is_pll_src_pll0(sdata)) {
+			pr_debug("%s: single source: PLL0", __func__);
+			sdata->byte0_parent = sdata->ext_byte0_clk;
+			sdata->pixel0_parent = sdata->ext_pixel0_clk;
+		} else if (mdss_dsi_is_pll_src_pll1(sdata)) {
+			if (sdata->ext_byte1_clk && sdata->ext_pixel1_clk) {
+				pr_debug("%s: single source: PLL1", __func__);
+				sdata->byte0_parent = sdata->ext_byte1_clk;
+				sdata->pixel0_parent = sdata->ext_pixel1_clk;
+			} else {
+				pr_err("%s: DSI-1 external clocks not present\n",
+					__func__);
+				return;
+			}
+		}
+		sdata->byte1_parent = sdata->byte0_parent;
+		sdata->pixel1_parent = sdata->pixel0_parent;
+	}
+}
+
+static char const *mdss_dsi_get_clk_src(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct dsi_shared_data *sdata;
+
+	if (!ctrl) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return "????";
+	}
+
+	sdata = ctrl->shared_data;
+
+	if (mdss_dsi_is_left_ctrl(ctrl)) {
+		if (sdata->byte0_parent == sdata->ext_byte0_clk)
+			return "PLL0";
+		else
+			return "PLL1";
+	} else {
+		if (sdata->byte1_parent == sdata->ext_byte0_clk)
+			return "PLL0";
+		else
+			return "PLL1";
+	}
+}
+
+static int mdss_dsi_set_clk_src(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int rc;
+	struct dsi_shared_data *sdata;
+	struct clk *byte_parent, *pixel_parent;
+
+	if (!ctrl) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	sdata = ctrl->shared_data;
+
+	if (!ctrl->byte_clk_rcg || !ctrl->pixel_clk_rcg) {
+		pr_debug("%s: set_clk_src not needed\n", __func__);
+		return 0;
+	}
+
+	if (mdss_dsi_is_left_ctrl(ctrl)) {
+		byte_parent = sdata->byte0_parent;
+		pixel_parent = sdata->pixel0_parent;
+	} else {
+		byte_parent = sdata->byte1_parent;
+		pixel_parent = sdata->pixel1_parent;
+	}
+
+	rc = clk_set_parent(ctrl->byte_clk_rcg, byte_parent);
+	if (rc) {
+		pr_err("%s: failed to set parent for byte clk for ctrl%d. rc=%d\n",
+			__func__, ctrl->ndx, rc);
+		goto error;
+	}
+
+	rc = clk_set_parent(ctrl->pixel_clk_rcg, pixel_parent);
+	if (rc) {
+		pr_err("%s: failed to set parent for pixel clk for ctrl%d. rc=%d\n",
+			__func__, ctrl->ndx, rc);
+		goto error;
+	}
+
+	pr_debug("%s: ctrl%d clock source set to %s", __func__, ctrl->ndx,
+		mdss_dsi_get_clk_src(ctrl));
+
+error:
+	return rc;
+}
+
+static int mdss_dsi_regulator_init(struct platform_device *pdev,
+		struct dsi_shared_data *sdata)
+{
+	int rc = 0, i = 0, j = 0;
+
+	if (!pdev || !sdata) {
+		pr_err("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	for (i = DSI_CORE_PM; !rc && (i < DSI_MAX_PM); i++) {
+		rc = msm_dss_config_vreg(&pdev->dev,
+			sdata->power_data[i].vreg_config,
+			sdata->power_data[i].num_vreg, 1);
+		if (rc) {
+			pr_err("%s: failed to init vregs for %s\n",
+				__func__, __mdss_dsi_pm_name(i));
+			for (j = i-1; j >= DSI_CORE_PM; j--) {
+				msm_dss_config_vreg(&pdev->dev,
+				sdata->power_data[j].vreg_config,
+				sdata->power_data[j].num_vreg, 0);
+			}
+		}
+	}
+
+	return rc;
+}
+
+static int mdss_dsi_panel_power_off(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	ret = mdss_dsi_panel_reset(pdata, 0);
+	if (ret) {
+		pr_warn("%s: Panel reset failed. rc=%d\n", __func__, ret);
+		ret = 0;
+	}
+
+	if (mdss_dsi_pinctrl_set_state(ctrl_pdata, false))
+		pr_debug("reset disable: pinctrl not enabled\n");
+
+	ret = msm_dss_enable_vreg(
+		ctrl_pdata->panel_power_data.vreg_config,
+		ctrl_pdata->panel_power_data.num_vreg, 0);
+	if (ret)
+		pr_err("%s: failed to disable vregs for %s\n",
+			__func__, __mdss_dsi_pm_name(DSI_PANEL_PM));
+
+end:
+	return ret;
+}
+
+static int mdss_dsi_panel_power_on(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	ret = msm_dss_enable_vreg(
+		ctrl_pdata->panel_power_data.vreg_config,
+		ctrl_pdata->panel_power_data.num_vreg, 1);
+	if (ret) {
+		pr_err("%s: failed to enable vregs for %s\n",
+			__func__, __mdss_dsi_pm_name(DSI_PANEL_PM));
+		return ret;
+	}
+
+	/*
+	 * If continuous splash screen feature is enabled, then we need to
+	 * request all the GPIOs that have already been configured in the
+	 * bootloader. This needs to be done irresepective of whether
+	 * the lp11_init flag is set or not.
+	 */
+	if (pdata->panel_info.cont_splash_enabled ||
+		!pdata->panel_info.mipi.lp11_init) {
+		if (mdss_dsi_pinctrl_set_state(ctrl_pdata, true))
+			pr_debug("reset enable: pinctrl not enabled\n");
+
+		ret = mdss_dsi_panel_reset(pdata, 1);
+		if (ret)
+			pr_err("%s: Panel reset failed. rc=%d\n",
+					__func__, ret);
+	}
+
+	return ret;
+}
+
+static int mdss_dsi_panel_power_lp(struct mdss_panel_data *pdata, int enable)
+{
+	/* Panel power control when entering/exiting lp mode */
+	return 0;
+}
+
+static int mdss_dsi_panel_power_ulp(struct mdss_panel_data *pdata,
+					int enable)
+{
+	int ret = 0, i;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	u32 mode = enable ? DSS_REG_MODE_ULP : DSS_REG_MODE_ENABLE;
+	struct dsi_shared_data *sdata;
+
+	pr_debug("%s: +\n", __func__);
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+	sdata = ctrl_pdata->shared_data;
+
+	for (i = 0; i < DSI_MAX_PM; i++) {
+		/*
+		 * Core power module need to be controlled along with
+		 * DSI core clocks.
+		 */
+		if (i == DSI_CORE_PM)
+			continue;
+		if (i == DSI_PANEL_PM)
+			ret = msm_dss_config_vreg_opt_mode(
+				ctrl_pdata->panel_power_data.vreg_config,
+				ctrl_pdata->panel_power_data.num_vreg, mode);
+		else
+			ret = msm_dss_config_vreg_opt_mode(
+				sdata->power_data[i].vreg_config,
+				sdata->power_data[i].num_vreg, mode);
+		if (ret) {
+			pr_err("%s: failed to config ulp opt mode for %s.rc=%d\n",
+				__func__, __mdss_dsi_pm_name(i), ret);
+			break;
+		}
+	}
+
+	if (ret) {
+		mode = enable ? DSS_REG_MODE_ENABLE : DSS_REG_MODE_ULP;
+		for (; i >= 0; i--)
+			msm_dss_config_vreg_opt_mode(
+				ctrl_pdata->power_data[i].vreg_config,
+				ctrl_pdata->power_data[i].num_vreg, mode);
+	}
+	return ret;
+}
+
+int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata,
+	int power_state)
+{
+	int ret;
+	struct mdss_panel_info *pinfo;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	pinfo = &pdata->panel_info;
+	pr_debug("%pS-->%s: cur_power_state=%d req_power_state=%d\n",
+		__builtin_return_address(0), __func__,
+		pinfo->panel_power_state, power_state);
+
+	if (pinfo->panel_power_state == power_state) {
+		pr_debug("%s: no change needed\n", __func__);
+		return 0;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	/*
+	 * If a dynamic mode switch is pending, the regulators should not
+	 * be turned off or on.
+	 */
+	if (pdata->panel_info.dynamic_switch_pending)
+		return 0;
+
+	switch (power_state) {
+	case MDSS_PANEL_POWER_OFF:
+		ret = mdss_dsi_panel_power_off(pdata);
+		break;
+	case MDSS_PANEL_POWER_ON:
+		if (mdss_dsi_is_panel_on_ulp(pdata)) {
+			ret = mdss_dsi_panel_power_ulp(pdata, false);
+			goto end;
+		} else if (mdss_dsi_is_panel_on_lp(pdata)) {
+			ret = mdss_dsi_panel_power_lp(pdata, false);
+			goto end;
+		} else {
+			ret = mdss_dsi_panel_power_on(pdata);
+		}
+		break;
+	case MDSS_PANEL_POWER_LP1:
+		if (mdss_dsi_is_panel_on_ulp(pdata))
+			ret = mdss_dsi_panel_power_ulp(pdata, false);
+		else
+			ret = mdss_dsi_panel_power_lp(pdata, true);
+		/*
+		 * temp workaround until framework issues pertaining to LP2
+		 * power state transitions are fixed. For now, we internally
+		 * transition to LP2 state whenever core power is turned off
+		 * in LP1 state
+		 */
+		break;
+	case MDSS_PANEL_POWER_LP2:
+		if (!ctrl_pdata->core_power)
+			ret = mdss_dsi_panel_power_ulp(pdata, true);
+		break;
+	default:
+		pr_err("%s: unknown panel power state requested (%d)\n",
+			__func__, power_state);
+		ret = -EINVAL;
+	}
+
+	if (!ret)
+		pinfo->panel_power_state = power_state;
+end:
+	return ret;
+}
+
+static void mdss_dsi_put_dt_vreg_data(struct device *dev,
+	struct dss_module_power *module_power)
+{
+	if (!module_power) {
+		pr_err("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (module_power->vreg_config) {
+		devm_kfree(dev, module_power->vreg_config);
+		module_power->vreg_config = NULL;
+	}
+	module_power->num_vreg = 0;
+}
+
+static int mdss_dsi_get_dt_vreg_data(struct device *dev,
+	struct device_node *of_node, struct dss_module_power *mp,
+	enum dsi_pm_type module)
+{
+	int i = 0, rc = 0;
+	u32 tmp = 0;
+	struct device_node *supply_node = NULL;
+	const char *pm_supply_name = NULL;
+	struct device_node *supply_root_node = NULL;
+
+	if (!dev || !mp) {
+		pr_err("%s: invalid input\n", __func__);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	mp->num_vreg = 0;
+	pm_supply_name = __mdss_dsi_pm_supply_node_name(module);
+	supply_root_node = of_get_child_by_name(of_node, pm_supply_name);
+	if (!supply_root_node) {
+		/*
+		 * Try to get the root node for panel power supply using
+		 * of_parse_phandle() API if of_get_child_by_name() API fails.
+		 */
+		supply_root_node = of_parse_phandle(of_node, pm_supply_name, 0);
+		if (!supply_root_node) {
+			pr_err("no supply entry present: %s\n", pm_supply_name);
+			goto novreg;
+		}
+	}
+
+
+	for_each_child_of_node(supply_root_node, supply_node) {
+		mp->num_vreg++;
+	}
+
+	if (mp->num_vreg == 0) {
+		pr_debug("%s: no vreg\n", __func__);
+		goto novreg;
+	} else {
+		pr_debug("%s: vreg found. count=%d\n", __func__, mp->num_vreg);
+	}
+
+	mp->vreg_config = devm_kzalloc(dev, sizeof(struct dss_vreg) *
+		mp->num_vreg, GFP_KERNEL);
+	if (!mp->vreg_config) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	for_each_child_of_node(supply_root_node, supply_node) {
+		const char *st = NULL;
+		/* vreg-name */
+		rc = of_property_read_string(supply_node,
+			"qcom,supply-name", &st);
+		if (rc) {
+			pr_err("%s: error reading name. rc=%d\n",
+				__func__, rc);
+			goto error;
+		}
+		snprintf(mp->vreg_config[i].vreg_name,
+			ARRAY_SIZE((mp->vreg_config[i].vreg_name)), "%s", st);
+		/* vreg-min-voltage */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-min-voltage", &tmp);
+		if (rc) {
+			pr_err("%s: error reading min volt. rc=%d\n",
+				__func__, rc);
+			goto error;
+		}
+		mp->vreg_config[i].min_voltage = tmp;
+
+		/* vreg-max-voltage */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-max-voltage", &tmp);
+		if (rc) {
+			pr_err("%s: error reading max volt. rc=%d\n",
+				__func__, rc);
+			goto error;
+		}
+		mp->vreg_config[i].max_voltage = tmp;
+
+		/* enable-load */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-enable-load", &tmp);
+		if (rc) {
+			pr_err("%s: error reading enable load. rc=%d\n",
+				__func__, rc);
+			goto error;
+		}
+		mp->vreg_config[i].load[DSS_REG_MODE_ENABLE] = tmp;
+
+		/* disable-load */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-disable-load", &tmp);
+		if (rc) {
+			pr_err("%s: error reading disable load. rc=%d\n",
+				__func__, rc);
+			goto error;
+		}
+		mp->vreg_config[i].load[DSS_REG_MODE_DISABLE] = tmp;
+
+		/* ulp-load */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-ulp-load", &tmp);
+		if (rc) {
+			pr_warn("%s: error reading ulp load. rc=%d\n",
+				__func__, rc);
+			rc = 0;
+		}
+		mp->vreg_config[i].load[DSS_REG_MODE_ULP] = (!rc ? tmp :
+			mp->vreg_config[i].load[DSS_REG_MODE_ENABLE]);
+
+		/* pre-sleep */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-pre-on-sleep", &tmp);
+		if (rc) {
+			pr_debug("%s: error reading supply pre sleep value. rc=%d\n",
+				__func__, rc);
+			rc = 0;
+		} else {
+			mp->vreg_config[i].pre_on_sleep = tmp;
+		}
+
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-pre-off-sleep", &tmp);
+		if (rc) {
+			pr_debug("%s: error reading supply pre sleep value. rc=%d\n",
+				__func__, rc);
+			rc = 0;
+		} else {
+			mp->vreg_config[i].pre_off_sleep = tmp;
+		}
+
+		/* post-sleep */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-post-on-sleep", &tmp);
+		if (rc) {
+			pr_debug("%s: error reading supply post sleep value. rc=%d\n",
+				__func__, rc);
+			rc = 0;
+		} else {
+			mp->vreg_config[i].post_on_sleep = tmp;
+		}
+
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-post-off-sleep", &tmp);
+		if (rc) {
+			pr_debug("%s: error reading supply post sleep value. rc=%d\n",
+				__func__, rc);
+			rc = 0;
+		} else {
+			mp->vreg_config[i].post_off_sleep = tmp;
+		}
+
+		pr_debug("%s: %s min=%d, max=%d, enable=%d, disable=%d, ulp_load=%d preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d\n",
+			__func__,
+			mp->vreg_config[i].vreg_name,
+			mp->vreg_config[i].min_voltage,
+			mp->vreg_config[i].max_voltage,
+			mp->vreg_config[i].load[DSS_REG_MODE_ENABLE],
+			mp->vreg_config[i].load[DSS_REG_MODE_DISABLE],
+			mp->vreg_config[i].load[DSS_REG_MODE_ULP],
+			mp->vreg_config[i].pre_on_sleep,
+			mp->vreg_config[i].post_on_sleep,
+			mp->vreg_config[i].pre_off_sleep,
+			mp->vreg_config[i].post_off_sleep
+			);
+		++i;
+	}
+
+	return rc;
+
+error:
+	if (mp->vreg_config) {
+		devm_kfree(dev, mp->vreg_config);
+		mp->vreg_config = NULL;
+	}
+novreg:
+	mp->num_vreg = 0;
+
+	return rc;
+}
+
+static int mdss_dsi_get_panel_cfg(char *panel_cfg,
+				struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int rc;
+	struct mdss_panel_cfg *pan_cfg = NULL;
+
+	if (!panel_cfg)
+		return MDSS_PANEL_INTF_INVALID;
+
+	pan_cfg = ctrl->mdss_util->panel_intf_type(MDSS_PANEL_INTF_DSI);
+	if (IS_ERR(pan_cfg)) {
+		return PTR_ERR(pan_cfg);
+	} else if (!pan_cfg) {
+		panel_cfg[0] = 0;
+		return 0;
+	}
+
+	pr_debug("%s:%d: cfg:[%s]\n", __func__, __LINE__,
+		 pan_cfg->arg_cfg);
+	rc = strlcpy(panel_cfg, pan_cfg->arg_cfg,
+		     sizeof(pan_cfg->arg_cfg));
+	return rc;
+}
+
+struct buf_data {
+	char *buf; /* cmd buf */
+	int blen; /* cmd buf length */
+	char *string_buf; /* cmd buf as string, 3 bytes per number */
+	int sblen; /* string buffer length */
+	int sync_flag;
+	struct mutex dbg_mutex; /* mutex to synchronize read/write/flush */
+};
+
+struct mdss_dsi_debugfs_info {
+	struct dentry *root;
+	struct mdss_dsi_ctrl_pdata ctrl_pdata;
+	struct buf_data on_cmd;
+	struct buf_data off_cmd;
+	u32 override_flag;
+};
+
+static int mdss_dsi_cmd_state_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->private_data = inode->i_private;
+	return nonseekable_open(inode, file);
+}
+
+static ssize_t mdss_dsi_cmd_state_read(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	int *link_state = file->private_data;
+	char buffer[32];
+	int blen = 0;
+
+	if (*ppos)
+		return 0;
+
+	if ((*link_state) == DSI_HS_MODE)
+		blen = snprintf(buffer, sizeof(buffer), "dsi_hs_mode\n");
+	else
+		blen = snprintf(buffer, sizeof(buffer), "dsi_lp_mode\n");
+
+	if (blen < 0)
+		return 0;
+
+	if (copy_to_user(buf, buffer, blen))
+		return -EFAULT;
+
+	*ppos += blen;
+	return blen;
+}
+
+static ssize_t mdss_dsi_cmd_state_write(struct file *file,
+			const char __user *p, size_t count, loff_t *ppos)
+{
+	int *link_state = file->private_data;
+	char *input;
+
+	if (!count) {
+		pr_err("%s: Zero bytes to be written\n", __func__);
+		return -EINVAL;
+	}
+
+	input = kmalloc(count, GFP_KERNEL);
+	if (!input)
+		return -ENOMEM;
+
+	if (copy_from_user(input, p, count)) {
+		kfree(input);
+		return -EFAULT;
+	}
+	input[count-1] = '\0';
+
+	if (strnstr(input, "dsi_hs_mode", strlen("dsi_hs_mode")))
+		*link_state = DSI_HS_MODE;
+	else
+		*link_state = DSI_LP_MODE;
+
+	kfree(input);
+	return count;
+}
+
+static const struct file_operations mdss_dsi_cmd_state_fop = {
+	.open = mdss_dsi_cmd_state_open,
+	.read = mdss_dsi_cmd_state_read,
+	.write = mdss_dsi_cmd_state_write,
+};
+
+static int mdss_dsi_cmd_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->private_data = inode->i_private;
+	return nonseekable_open(inode, file);
+}
+
+static ssize_t mdss_dsi_cmd_read(struct file *file, char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct buf_data *pcmds = file->private_data;
+	char *bp;
+	ssize_t ret = 0;
+
+	mutex_lock(&pcmds->dbg_mutex);
+	if (*ppos == 0) {
+		kfree(pcmds->string_buf);
+		pcmds->string_buf = NULL;
+		pcmds->sblen = 0;
+	}
+
+	if (!pcmds->string_buf) {
+		/*
+		 * Buffer size is the sum of cmd length (3 bytes per number)
+		 * with NULL terminater
+		 */
+		int bsize = ((pcmds->blen)*3 + 1);
+		int blen = 0;
+		char *buffer;
+
+		buffer = kmalloc(bsize, GFP_KERNEL);
+		if (!buffer) {
+			mutex_unlock(&pcmds->dbg_mutex);
+			return -ENOMEM;
+		}
+
+		bp = pcmds->buf;
+		while ((blen < (bsize-1)) &&
+		       (bp < ((pcmds->buf) + (pcmds->blen)))) {
+			struct dsi_ctrl_hdr dchdr =
+					*((struct dsi_ctrl_hdr *)bp);
+			int dhrlen = sizeof(dchdr), dlen;
+			char *tmp = (char *)(&dchdr);
+
+			dlen = dchdr.dlen;
+			dchdr.dlen = htons(dchdr.dlen);
+			while (dhrlen--)
+				blen += snprintf(buffer+blen, bsize-blen,
+						 "%02x ", (*tmp++));
+
+			bp += sizeof(dchdr);
+			while (dlen--)
+				blen += snprintf(buffer+blen, bsize-blen,
+						 "%02x ", (*bp++));
+			buffer[blen-1] = '\n';
+		}
+		buffer[blen] = '\0';
+		pcmds->string_buf = buffer;
+		pcmds->sblen = blen;
+	}
+
+	/*
+	 * The max value of count is PAGE_SIZE(4096).
+	 * It may need multiple times of reading if string buf is too large
+	 */
+	if (*ppos >= (pcmds->sblen)) {
+		kfree(pcmds->string_buf);
+		pcmds->string_buf = NULL;
+		pcmds->sblen = 0;
+		mutex_unlock(&pcmds->dbg_mutex);
+		return 0; /* the end */
+	}
+	ret = simple_read_from_buffer(buf, count, ppos, pcmds->string_buf,
+				      pcmds->sblen);
+	mutex_unlock(&pcmds->dbg_mutex);
+	return ret;
+}
+
+static ssize_t mdss_dsi_cmd_write(struct file *file, const char __user *p,
+				  size_t count, loff_t *ppos)
+{
+	struct buf_data *pcmds = file->private_data;
+	ssize_t ret = 0;
+	int blen = 0;
+	char *string_buf;
+
+	mutex_lock(&pcmds->dbg_mutex);
+	if (*ppos == 0) {
+		kfree(pcmds->string_buf);
+		pcmds->string_buf = NULL;
+		pcmds->sblen = 0;
+	}
+
+	/* Allocate memory for the received string */
+	blen = count + (pcmds->sblen);
+	string_buf = krealloc(pcmds->string_buf, blen + 1, GFP_KERNEL);
+	if (!string_buf) {
+		pr_err("%s: Failed to allocate memory\n", __func__);
+		mutex_unlock(&pcmds->dbg_mutex);
+		return -ENOMEM;
+	}
+
+	/* Writing in batches is possible */
+	ret = simple_write_to_buffer(string_buf, blen, ppos, p, count);
+	if (ret < 0) {
+		pr_err("%s: Failed to copy data\n", __func__);
+		mutex_unlock(&pcmds->dbg_mutex);
+		return -EINVAL;
+	}
+
+	string_buf[ret] = '\0';
+	pcmds->string_buf = string_buf;
+	pcmds->sblen = count;
+	mutex_unlock(&pcmds->dbg_mutex);
+	return ret;
+}
+
+static int mdss_dsi_cmd_flush(struct file *file, fl_owner_t id)
+{
+	struct buf_data *pcmds = file->private_data;
+	int blen, len, i;
+	char *buf, *bufp, *bp;
+	struct dsi_ctrl_hdr *dchdr;
+
+	mutex_lock(&pcmds->dbg_mutex);
+
+	if (!pcmds->string_buf) {
+		mutex_unlock(&pcmds->dbg_mutex);
+		return 0;
+	}
+
+	/*
+	 * Allocate memory for command buffer
+	 * 3 bytes per number, and 2 bytes for the last one
+	 */
+	blen = ((pcmds->sblen) + 2) / 3;
+	buf = kcalloc(1, blen, GFP_KERNEL);
+	if (!buf) {
+		pr_err("%s: Failed to allocate memory\n", __func__);
+		kfree(pcmds->string_buf);
+		pcmds->string_buf = NULL;
+		pcmds->sblen = 0;
+		mutex_unlock(&pcmds->dbg_mutex);
+		return -ENOMEM;
+	}
+
+	/* Translate the input string to command array */
+	bufp = pcmds->string_buf;
+	for (i = 0; i < blen; i++) {
+		uint32_t value = 0;
+		int step = 0;
+
+		if (sscanf(bufp, "%02x%n", &value, &step) > 0) {
+			*(buf+i) = (char)value;
+			bufp += step;
+		}
+	}
+
+	/* Scan dcs commands */
+	bp = buf;
+	len = blen;
+	while (len >= sizeof(*dchdr)) {
+		dchdr = (struct dsi_ctrl_hdr *)bp;
+		dchdr->dlen = ntohs(dchdr->dlen);
+		if (dchdr->dlen > len || dchdr->dlen < 0) {
+			pr_err("%s: dtsi cmd=%x error, len=%d\n",
+				__func__, dchdr->dtype, dchdr->dlen);
+			kfree(buf);
+			mutex_unlock(&pcmds->dbg_mutex);
+			return -EINVAL;
+		}
+		bp += sizeof(*dchdr);
+		len -= sizeof(*dchdr);
+		bp += dchdr->dlen;
+		len -= dchdr->dlen;
+	}
+	if (len != 0) {
+		pr_err("%s: dcs_cmd=%x len=%d error!\n", __func__,
+				bp[0], len);
+		kfree(buf);
+		mutex_unlock(&pcmds->dbg_mutex);
+		return -EINVAL;
+	}
+
+	if (pcmds->sync_flag) {
+		pcmds->buf = buf;
+		pcmds->blen = blen;
+		pcmds->sync_flag = 0;
+	} else {
+		kfree(pcmds->buf);
+		pcmds->buf = buf;
+		pcmds->blen = blen;
+	}
+	mutex_unlock(&pcmds->dbg_mutex);
+	return 0;
+}
+
+static const struct file_operations mdss_dsi_cmd_fop = {
+	.open = mdss_dsi_cmd_open,
+	.read = mdss_dsi_cmd_read,
+	.write = mdss_dsi_cmd_write,
+	.flush = mdss_dsi_cmd_flush,
+};
+
+struct dentry *dsi_debugfs_create_dcs_cmd(const char *name, umode_t mode,
+				struct dentry *parent, struct buf_data *cmd,
+				struct dsi_panel_cmds ctrl_cmds)
+{
+	mutex_init(&cmd->dbg_mutex);
+	cmd->buf = ctrl_cmds.buf;
+	cmd->blen = ctrl_cmds.blen;
+	cmd->string_buf = NULL;
+	cmd->sblen = 0;
+	cmd->sync_flag = 1;
+
+	return debugfs_create_file(name, mode, parent,
+				   cmd, &mdss_dsi_cmd_fop);
+}
+
+#define DEBUGFS_CREATE_DCS_CMD(name, node, cmd, ctrl_cmd) \
+	dsi_debugfs_create_dcs_cmd(name, 0644, node, cmd, ctrl_cmd)
+
+static int mdss_dsi_debugfs_setup(struct mdss_panel_data *pdata,
+			struct dentry *parent)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata, *dfs_ctrl;
+	struct mdss_dsi_debugfs_info *dfs;
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	dfs = kcalloc(1, sizeof(*dfs), GFP_KERNEL);
+	if (!dfs)
+		return -ENOMEM;
+
+	dfs->root = debugfs_create_dir("dsi_ctrl_pdata", parent);
+	if (IS_ERR_OR_NULL(dfs->root)) {
+		pr_err("%s: debugfs_create_dir dsi fail, error %ld\n",
+			__func__, PTR_ERR(dfs->root));
+		kfree(dfs);
+		return -ENODEV;
+	}
+
+	dfs_ctrl = &dfs->ctrl_pdata;
+	debugfs_create_u32("override_flag", 0644, dfs->root,
+			   &dfs->override_flag);
+
+	debugfs_create_bool("cmd_sync_wait_broadcast", 0644, dfs->root,
+			    (u32 *)&dfs_ctrl->cmd_sync_wait_broadcast);
+	debugfs_create_bool("cmd_sync_wait_trigger", 0644, dfs->root,
+			    (u32 *)&dfs_ctrl->cmd_sync_wait_trigger);
+
+	debugfs_create_file("dsi_on_cmd_state", 0644, dfs->root,
+		&dfs_ctrl->on_cmds.link_state, &mdss_dsi_cmd_state_fop);
+	debugfs_create_file("dsi_off_cmd_state", 0644, dfs->root,
+		&dfs_ctrl->off_cmds.link_state, &mdss_dsi_cmd_state_fop);
+
+	DEBUGFS_CREATE_DCS_CMD("dsi_on_cmd", dfs->root, &dfs->on_cmd,
+				ctrl_pdata->on_cmds);
+	DEBUGFS_CREATE_DCS_CMD("dsi_off_cmd", dfs->root, &dfs->off_cmd,
+				ctrl_pdata->off_cmds);
+
+	debugfs_create_u32("dsi_err_counter", 0644, dfs->root,
+			   &dfs_ctrl->err_cont.max_err_index);
+	debugfs_create_u32("dsi_err_time_delta", 0644, dfs->root,
+			   &dfs_ctrl->err_cont.err_time_delta);
+
+	dfs->override_flag = 0;
+	dfs->ctrl_pdata = *ctrl_pdata;
+	ctrl_pdata->debugfs_info = dfs;
+	return 0;
+}
+
+static int mdss_dsi_debugfs_init(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	int rc;
+	struct mdss_panel_data *pdata;
+	struct mdss_panel_info panel_info;
+
+	if (!ctrl_pdata) {
+		pr_warn_once("%s: Invalid pdata!\n", __func__);
+		return -EINVAL;
+	}
+
+	pdata = &ctrl_pdata->panel_data;
+	if (!pdata)
+		return -EINVAL;
+
+	panel_info = pdata->panel_info;
+	rc = mdss_dsi_debugfs_setup(pdata, panel_info.debugfs_info->root);
+	if (rc) {
+		pr_err("%s: Error in initilizing dsi ctrl debugfs\n",
+				__func__);
+		return rc;
+	}
+
+	pr_debug("%s: Initialized mdss_dsi_debugfs_init\n", __func__);
+	return 0;
+}
+
+static void mdss_dsi_debugfs_cleanup(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	struct mdss_panel_data *pdata = &ctrl_pdata->panel_data;
+
+	do {
+		struct mdss_dsi_ctrl_pdata *ctrl = container_of(pdata,
+			struct mdss_dsi_ctrl_pdata, panel_data);
+		struct mdss_dsi_debugfs_info *dfs = ctrl->debugfs_info;
+
+		if (dfs && dfs->root)
+			debugfs_remove_recursive(dfs->root);
+		kfree(dfs);
+		pdata = pdata->next;
+	} while (pdata);
+	pr_debug("%s: Cleaned up mdss_dsi_debugfs_info\n", __func__);
+}
+
+static int _mdss_dsi_refresh_cmd(struct buf_data *new_cmds,
+	struct dsi_panel_cmds *original_pcmds)
+{
+	char *bp;
+	int len, cnt, i;
+	struct dsi_ctrl_hdr *dchdr;
+	struct dsi_cmd_desc *cmds;
+
+	if (new_cmds->sync_flag)
+		return 0;
+
+	bp = new_cmds->buf;
+	len = new_cmds->blen;
+	cnt = 0;
+	/* Scan dcs commands and get dcs command count */
+	while (len >= sizeof(*dchdr)) {
+		dchdr = (struct dsi_ctrl_hdr *)bp;
+		if (dchdr->dlen > len) {
+			pr_err("%s: dtsi cmd=%x error, len=%d\n",
+				__func__, dchdr->dtype, dchdr->dlen);
+			return -EINVAL;
+		}
+		bp += sizeof(*dchdr) + dchdr->dlen;
+		len -= sizeof(*dchdr) + dchdr->dlen;
+		cnt++;
+	}
+
+	if (len != 0) {
+		pr_err("%s: dcs_cmd=%x len=%d error!\n", __func__,
+				bp[0], len);
+		return -EINVAL;
+	}
+
+	/* Reallocate space for dcs commands */
+	cmds = kcalloc(cnt, sizeof(struct dsi_cmd_desc), GFP_KERNEL);
+	if (!cmds)
+		return -ENOMEM;
+
+	kfree(original_pcmds->buf);
+	kfree(original_pcmds->cmds);
+	original_pcmds->cmd_cnt = cnt;
+	original_pcmds->cmds = cmds;
+	original_pcmds->buf = new_cmds->buf;
+	original_pcmds->blen = new_cmds->blen;
+
+	bp = original_pcmds->buf;
+	len = original_pcmds->blen;
+	for (i = 0; i < cnt; i++) {
+		dchdr = (struct dsi_ctrl_hdr *)bp;
+		len -= sizeof(*dchdr);
+		bp += sizeof(*dchdr);
+		original_pcmds->cmds[i].dchdr = *dchdr;
+		original_pcmds->cmds[i].payload = bp;
+		bp += dchdr->dlen;
+		len -= dchdr->dlen;
+	}
+
+	new_cmds->sync_flag = 1;
+	return 0;
+}
+
+static void mdss_dsi_debugfsinfo_to_dsictrl_info(
+			struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	struct mdss_dsi_debugfs_info *dfs = ctrl_pdata->debugfs_info;
+	struct dsi_err_container *dfs_err_cont = &dfs->ctrl_pdata.err_cont;
+	struct dsi_err_container *err_cont = &ctrl_pdata->err_cont;
+
+	ctrl_pdata->cmd_sync_wait_broadcast =
+			dfs->ctrl_pdata.cmd_sync_wait_broadcast;
+	ctrl_pdata->cmd_sync_wait_trigger =
+			dfs->ctrl_pdata.cmd_sync_wait_trigger;
+
+	_mdss_dsi_refresh_cmd(&dfs->on_cmd, &ctrl_pdata->on_cmds);
+	_mdss_dsi_refresh_cmd(&dfs->off_cmd, &ctrl_pdata->off_cmds);
+
+	ctrl_pdata->on_cmds.link_state =
+			dfs->ctrl_pdata.on_cmds.link_state;
+	ctrl_pdata->off_cmds.link_state =
+			dfs->ctrl_pdata.off_cmds.link_state;
+
+	/* keep error counter between 2 to 10 */
+	if (dfs_err_cont->max_err_index >= 2 &&
+		dfs_err_cont->max_err_index <= MAX_ERR_INDEX) {
+		err_cont->max_err_index = dfs_err_cont->max_err_index;
+	} else {
+		dfs_err_cont->max_err_index = err_cont->max_err_index;
+		pr_warn("resetting the dsi error counter to %d\n",
+			err_cont->max_err_index);
+	}
+
+	/* keep error duration between 16 ms to 100 seconds */
+	if (dfs_err_cont->err_time_delta >= 16 &&
+		dfs_err_cont->err_time_delta <= 100000) {
+		err_cont->err_time_delta = dfs_err_cont->err_time_delta;
+	} else {
+		dfs_err_cont->err_time_delta = err_cont->err_time_delta;
+		pr_warn("resetting the dsi error time delta to %d ms\n",
+			err_cont->err_time_delta);
+	}
+}
+
+static void mdss_dsi_validate_debugfs_info(
+		struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	struct mdss_dsi_debugfs_info *dfs = ctrl_pdata->debugfs_info;
+
+	if (dfs->override_flag) {
+		pr_debug("%s: Overriding dsi ctrl_pdata with debugfs data\n",
+			__func__);
+		dfs->override_flag = 0;
+		mdss_dsi_debugfsinfo_to_dsictrl_info(ctrl_pdata);
+	}
+}
+
+static int mdss_dsi_off(struct mdss_panel_data *pdata, int power_state)
+{
+	int ret = 0;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_panel_info *panel_info = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	panel_info = &ctrl_pdata->panel_data.panel_info;
+
+	pr_debug("%s+: ctrl=%pK ndx=%d power_state=%d\n",
+		__func__, ctrl_pdata, ctrl_pdata->ndx, power_state);
+
+	if (power_state == panel_info->panel_power_state) {
+		pr_debug("%s: No change in power state %d -> %d\n", __func__,
+			panel_info->panel_power_state, power_state);
+		goto end;
+	}
+
+	if (mdss_panel_is_power_on(power_state)) {
+		pr_debug("%s: dsi_off with panel always on\n", __func__);
+		goto panel_power_ctrl;
+	}
+
+	/*
+	 * Link clocks should be turned off before PHY can be disabled.
+	 * For command mode panels, all clocks are turned off prior to reaching
+	 * here, so core clocks should be turned on before accessing hardware
+	 * registers. For video mode panel, turn off link clocks and then
+	 * disable PHY
+	 */
+	if (pdata->panel_info.type == MIPI_CMD_PANEL)
+		mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+					MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_ON);
+	else
+		mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+				  MDSS_DSI_LINK_CLK, MDSS_DSI_CLK_OFF);
+
+	if (!pdata->panel_info.ulps_suspend_enabled) {
+		/* disable DSI controller */
+		mdss_dsi_controller_cfg(0, pdata);
+
+		/* disable DSI phy */
+		mdss_dsi_phy_disable(ctrl_pdata);
+	}
+	ctrl_pdata->ctrl_state &= ~CTRL_STATE_DSI_ACTIVE;
+
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_OFF);
+
+panel_power_ctrl:
+	ret = mdss_dsi_panel_power_ctrl(pdata, power_state);
+	if (ret) {
+		pr_err("%s: Panel power off failed\n", __func__);
+		goto end;
+	}
+
+	if (panel_info->dynamic_fps
+	    && (panel_info->dfps_update == DFPS_SUSPEND_RESUME_MODE)
+	    && (panel_info->new_fps != panel_info->mipi.frame_rate))
+		panel_info->mipi.frame_rate = panel_info->new_fps;
+
+	/* Initialize Max Packet size for DCS reads */
+	ctrl_pdata->cur_max_pkt_size = 0;
+end:
+	pr_debug("%s-:\n", __func__);
+
+	return ret;
+}
+
+int mdss_dsi_switch_mode(struct mdss_panel_data *pdata, int mode)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mipi_panel_info *pinfo;
+	bool dsi_ctrl_setup_needed = false;
+
+	if (!pdata) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s, start\n", __func__);
+
+	pinfo = &pdata->panel_info.mipi;
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+		panel_data);
+
+	if ((pinfo->dms_mode != DYNAMIC_MODE_RESOLUTION_SWITCH_IMMEDIATE) &&
+			(pinfo->dms_mode != DYNAMIC_MODE_SWITCH_IMMEDIATE)) {
+		pr_debug("%s: Dynamic mode switch not enabled.\n", __func__);
+		return -EPERM;
+	}
+
+	if (mode == MIPI_VIDEO_PANEL) {
+		mode = SWITCH_TO_VIDEO_MODE;
+	} else if (mode == MIPI_CMD_PANEL) {
+		mode = SWITCH_TO_CMD_MODE;
+	} else if (mode == SWITCH_RESOLUTION) {
+		dsi_ctrl_setup_needed = true;
+		pr_debug("Resolution switch mode selected\n");
+	} else {
+		pr_err("Invalid mode selected, mode=%d\n", mode);
+		return -EINVAL;
+	}
+
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+	if (dsi_ctrl_setup_needed)
+		mdss_dsi_ctrl_setup(ctrl_pdata);
+	ctrl_pdata->switch_mode(pdata, mode);
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+
+	pr_debug("%s, end\n", __func__);
+	return 0;
+}
+
+static int mdss_dsi_reconfig(struct mdss_panel_data *pdata, int mode)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mipi_panel_info *pinfo;
+
+	if (!pdata) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s, start\n", __func__);
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+		panel_data);
+	pinfo = &pdata->panel_info.mipi;
+
+	if (pinfo->dms_mode == DYNAMIC_MODE_SWITCH_IMMEDIATE) {
+		/* reset DSI */
+		mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+				  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+		mdss_dsi_sw_reset(ctrl_pdata, true);
+		mdss_dsi_ctrl_setup(ctrl_pdata);
+		mdss_dsi_controller_cfg(true, pdata);
+		mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+				  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+	}
+
+	pr_debug("%s, end\n", __func__);
+	return 0;
+}
+static int mdss_dsi_update_panel_config(struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+				int mode)
+{
+	int ret = 0;
+	struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+
+	if (mode == DSI_CMD_MODE) {
+		pinfo->mipi.mode = DSI_CMD_MODE;
+		pinfo->type = MIPI_CMD_PANEL;
+		pinfo->mipi.vsync_enable = 1;
+		pinfo->mipi.hw_vsync_mode = 1;
+		pinfo->partial_update_enabled = pinfo->partial_update_supported;
+	} else {	/*video mode*/
+		pinfo->mipi.mode = DSI_VIDEO_MODE;
+		pinfo->type = MIPI_VIDEO_PANEL;
+		pinfo->mipi.vsync_enable = 0;
+		pinfo->mipi.hw_vsync_mode = 0;
+		pinfo->partial_update_enabled = 0;
+	}
+
+	ctrl_pdata->panel_mode = pinfo->mipi.mode;
+	mdss_panel_get_dst_fmt(pinfo->bpp, pinfo->mipi.mode,
+			pinfo->mipi.pixel_packing, &(pinfo->mipi.dst_format));
+	return ret;
+}
+
+int mdss_dsi_on(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct mdss_panel_info *pinfo;
+	struct mipi_panel_info *mipi;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	int cur_power_state;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	if (ctrl_pdata->debugfs_info)
+		mdss_dsi_validate_debugfs_info(ctrl_pdata);
+
+	cur_power_state = pdata->panel_info.panel_power_state;
+	pr_debug("%s+: ctrl=%pK ndx=%d cur_power_state=%d\n", __func__,
+		ctrl_pdata, ctrl_pdata->ndx, cur_power_state);
+
+	pinfo = &pdata->panel_info;
+	mipi = &pdata->panel_info.mipi;
+
+	if (mdss_dsi_is_panel_on_interactive(pdata)) {
+		/*
+		 * all interrupts are disabled at LK
+		 * for cont_splash case, intr mask bits need
+		 * to be restored to allow dcs command be
+		 * sent to panel
+		 */
+		mdss_dsi_restore_intr_mask(ctrl_pdata);
+		pr_debug("%s: panel already on\n", __func__);
+		goto end;
+	}
+
+	ret = mdss_dsi_panel_power_ctrl(pdata, MDSS_PANEL_POWER_ON);
+	if (ret) {
+		pr_err("%s:Panel power on failed. rc=%d\n", __func__, ret);
+		goto end;
+	}
+
+	if (mdss_panel_is_power_on(cur_power_state)) {
+		pr_debug("%s: dsi_on from panel low power state\n", __func__);
+		goto end;
+	}
+
+	ret = mdss_dsi_set_clk_src(ctrl_pdata);
+	if (ret) {
+		pr_err("%s: failed to set clk src. rc=%d\n", __func__, ret);
+		goto end;
+	}
+
+	/*
+	 * Enable DSI core clocks prior to resetting and initializing DSI
+	 * Phy. Phy and ctrl setup need to be done before enabling the link
+	 * clocks.
+	 */
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_ON);
+
+	/*
+	 * If ULPS during suspend feature is enabled, then DSI PHY was
+	 * left on during suspend. In this case, we do not need to reset/init
+	 * PHY. This would have already been done when the CORE clocks are
+	 * turned on. However, if cont splash is disabled, the first time DSI
+	 * is powered on, phy init needs to be done unconditionally.
+	 */
+	if (!pdata->panel_info.ulps_suspend_enabled || !ctrl_pdata->ulps) {
+		mdss_dsi_phy_sw_reset(ctrl_pdata);
+		mdss_dsi_phy_init(ctrl_pdata);
+		mdss_dsi_ctrl_setup(ctrl_pdata);
+	}
+	ctrl_pdata->ctrl_state |= CTRL_STATE_DSI_ACTIVE;
+
+	/* DSI link clocks need to be on prior to ctrl sw reset */
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_LINK_CLK, MDSS_DSI_CLK_ON);
+	mdss_dsi_sw_reset(ctrl_pdata, true);
+
+	/*
+	 * Issue hardware reset line after enabling the DSI clocks and data
+	 * data lanes for LP11 init
+	 */
+	if (mipi->lp11_init) {
+		if (mdss_dsi_pinctrl_set_state(ctrl_pdata, true))
+			pr_debug("reset enable: pinctrl not enabled\n");
+		mdss_dsi_panel_reset(pdata, 1);
+	}
+
+	if (mipi->init_delay)
+		usleep_range(mipi->init_delay, mipi->init_delay + 10);
+
+	if (mipi->force_clk_lane_hs) {
+		u32 tmp;
+
+		tmp = MIPI_INP((ctrl_pdata->ctrl_base) + 0xac);
+		tmp |= (1<<28);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0xac, tmp);
+		wmb(); /* ensure write is finished before progressing */
+	}
+
+	if (pdata->panel_info.type == MIPI_CMD_PANEL)
+		mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+				  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+
+end:
+	pr_debug("%s-:\n", __func__);
+	return ret;
+}
+
+static int mdss_dsi_pinctrl_set_state(
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+	bool active)
+{
+	struct pinctrl_state *pin_state;
+	struct mdss_panel_info *pinfo = NULL;
+	int rc = -EFAULT;
+
+	if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.pinctrl))
+		return PTR_ERR(ctrl_pdata->pin_res.pinctrl);
+
+	pinfo = &ctrl_pdata->panel_data.panel_info;
+	if ((mdss_dsi_is_right_ctrl(ctrl_pdata) &&
+		mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data)) ||
+			pinfo->is_dba_panel) {
+		pr_debug("%s:%d, right ctrl pinctrl config not needed\n",
+			__func__, __LINE__);
+		return 0;
+	}
+
+	pin_state = active ? ctrl_pdata->pin_res.gpio_state_active
+				: ctrl_pdata->pin_res.gpio_state_suspend;
+	if (!IS_ERR_OR_NULL(pin_state)) {
+		rc = pinctrl_select_state(ctrl_pdata->pin_res.pinctrl,
+				pin_state);
+		if (rc)
+			pr_err("%s: can not set %s pins\n", __func__,
+			       active ? MDSS_PINCTRL_STATE_DEFAULT
+			       : MDSS_PINCTRL_STATE_SLEEP);
+	} else {
+		pr_err("%s: invalid '%s' pinstate\n", __func__,
+		       active ? MDSS_PINCTRL_STATE_DEFAULT
+		       : MDSS_PINCTRL_STATE_SLEEP);
+	}
+	return rc;
+}
+
+static int mdss_dsi_pinctrl_init(struct platform_device *pdev)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata;
+
+	ctrl_pdata = platform_get_drvdata(pdev);
+	ctrl_pdata->pin_res.pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.pinctrl)) {
+		pr_err("%s: failed to get pinctrl\n", __func__);
+		return PTR_ERR(ctrl_pdata->pin_res.pinctrl);
+	}
+
+	ctrl_pdata->pin_res.gpio_state_active
+		= pinctrl_lookup_state(ctrl_pdata->pin_res.pinctrl,
+				MDSS_PINCTRL_STATE_DEFAULT);
+	if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.gpio_state_active))
+		pr_warn("%s: can not get default pinstate\n", __func__);
+
+	ctrl_pdata->pin_res.gpio_state_suspend
+		= pinctrl_lookup_state(ctrl_pdata->pin_res.pinctrl,
+				MDSS_PINCTRL_STATE_SLEEP);
+	if (IS_ERR_OR_NULL(ctrl_pdata->pin_res.gpio_state_suspend))
+		pr_warn("%s: can not get sleep pinstate\n", __func__);
+
+	return 0;
+}
+
+static int mdss_dsi_unblank(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct mipi_panel_info *mipi;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_dsi_ctrl_pdata *sctrl = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+	mipi  = &pdata->panel_info.mipi;
+
+	pr_debug("%s+: ctrl=%pK ndx=%d cur_power_state=%d ctrl_state=%x\n",
+			__func__, ctrl_pdata, ctrl_pdata->ndx,
+		pdata->panel_info.panel_power_state, ctrl_pdata->ctrl_state);
+
+	mdss_dsi_pm_qos_update_request(DSI_DISABLE_PC_LATENCY);
+
+	if (mdss_dsi_is_ctrl_clk_master(ctrl_pdata))
+		sctrl = mdss_dsi_get_ctrl_clk_slave();
+
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+	if (sctrl)
+		mdss_dsi_clk_ctrl(sctrl, sctrl->dsi_clk_handle,
+				  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+
+	if (ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_LP) {
+		pr_debug("%s: dsi_unblank with panel always on\n", __func__);
+		if (ctrl_pdata->low_power_config)
+			ret = ctrl_pdata->low_power_config(pdata, false);
+		if (!ret)
+			ctrl_pdata->ctrl_state &= ~CTRL_STATE_PANEL_LP;
+		goto error;
+	}
+
+	if (!(ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT)) {
+		if (!pdata->panel_info.dynamic_switch_pending) {
+			ATRACE_BEGIN("dsi_panel_on");
+			ret = ctrl_pdata->on(pdata);
+			if (ret) {
+				pr_err("%s: unable to initialize the panel\n",
+							__func__);
+				goto error;
+			}
+			ATRACE_END("dsi_panel_on");
+		}
+	}
+
+	if ((pdata->panel_info.type == MIPI_CMD_PANEL) &&
+		mipi->vsync_enable && mipi->hw_vsync_mode) {
+		mdss_dsi_set_tear_on(ctrl_pdata);
+	}
+
+	ctrl_pdata->ctrl_state |= CTRL_STATE_PANEL_INIT;
+
+error:
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+	if (sctrl)
+		mdss_dsi_clk_ctrl(sctrl, sctrl->dsi_clk_handle,
+				  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+
+	mdss_dsi_pm_qos_update_request(DSI_ENABLE_PC_LATENCY);
+
+	pr_debug("%s-:\n", __func__);
+
+	return ret;
+}
+
+static int mdss_dsi_blank(struct mdss_panel_data *pdata, int power_state)
+{
+	int ret = 0;
+	struct mipi_panel_info *mipi;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+	mipi = &pdata->panel_info.mipi;
+
+	pr_debug("%s+: ctrl=%pK ndx=%d power_state=%d\n",
+		__func__, ctrl_pdata, ctrl_pdata->ndx, power_state);
+
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+
+	if (mdss_panel_is_power_on_lp(power_state)) {
+		pr_debug("%s: low power state requested\n", __func__);
+		if (ctrl_pdata->low_power_config)
+			ret = ctrl_pdata->low_power_config(pdata, true);
+		if (!ret)
+			ctrl_pdata->ctrl_state |= CTRL_STATE_PANEL_LP;
+		goto error;
+	}
+
+	if (pdata->panel_info.type == MIPI_VIDEO_PANEL &&
+			ctrl_pdata->off_cmds.link_state == DSI_LP_MODE) {
+		mdss_dsi_sw_reset(ctrl_pdata, false);
+		mdss_dsi_host_init(pdata);
+	}
+
+	mdss_dsi_op_mode_config(DSI_CMD_MODE, pdata);
+
+	if (pdata->panel_info.dynamic_switch_pending) {
+		pr_info("%s: switching to %s mode\n", __func__,
+			(pdata->panel_info.mipi.mode ? "video" : "command"));
+		if (pdata->panel_info.type == MIPI_CMD_PANEL) {
+			ctrl_pdata->switch_mode(pdata, SWITCH_TO_VIDEO_MODE);
+		} else if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+			ctrl_pdata->switch_mode(pdata, SWITCH_TO_CMD_MODE);
+			mdss_dsi_set_tear_off(ctrl_pdata);
+		}
+	}
+
+	if ((pdata->panel_info.type == MIPI_CMD_PANEL) &&
+		mipi->vsync_enable && mipi->hw_vsync_mode) {
+		mdss_dsi_set_tear_off(ctrl_pdata);
+	}
+
+	if (ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT) {
+		if (!pdata->panel_info.dynamic_switch_pending) {
+			ATRACE_BEGIN("dsi_panel_off");
+			ret = ctrl_pdata->off(pdata);
+			if (ret) {
+				pr_err("%s: Panel OFF failed\n", __func__);
+				goto error;
+			}
+			ATRACE_END("dsi_panel_off");
+		}
+		ctrl_pdata->ctrl_state &= ~(CTRL_STATE_PANEL_INIT |
+			CTRL_STATE_PANEL_LP);
+	}
+
+error:
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+	pr_debug("%s-:End\n", __func__);
+	return ret;
+}
+
+static int mdss_dsi_post_panel_on(struct mdss_panel_data *pdata)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	pr_debug("%s+: ctrl=%pK ndx=%d\n", __func__,
+				ctrl_pdata, ctrl_pdata->ndx);
+
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+
+	if (ctrl_pdata->post_panel_on)
+		ctrl_pdata->post_panel_on(pdata);
+
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+	pr_debug("%s-:\n", __func__);
+
+	return 0;
+}
+
+static irqreturn_t test_hw_vsync_handler(int irq, void *data)
+{
+	struct mdss_panel_data *pdata = (struct mdss_panel_data *)data;
+
+	pr_debug("HW VSYNC\n");
+	MDSS_XLOG(0xaaa, irq);
+	complete_all(&pdata->te_done);
+	if (pdata->next)
+		complete_all(&pdata->next->te_done);
+	return IRQ_HANDLED;
+}
+
+int mdss_dsi_cont_splash_on(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct mipi_panel_info *mipi;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	pr_info("%s:%d DSI on for continuous splash.\n", __func__, __LINE__);
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	mipi = &pdata->panel_info.mipi;
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	pr_debug("%s+: ctrl=%pK ndx=%d\n", __func__,
+				ctrl_pdata, ctrl_pdata->ndx);
+
+	WARN((ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT),
+		"Incorrect Ctrl state=0x%x\n", ctrl_pdata->ctrl_state);
+
+	mdss_dsi_ctrl_setup(ctrl_pdata);
+	mdss_dsi_sw_reset(ctrl_pdata, true);
+	pr_debug("%s-:End\n", __func__);
+	return ret;
+}
+
+static void __mdss_dsi_mask_dfps_errors(struct mdss_dsi_ctrl_pdata *ctrl,
+		bool mask)
+{
+	u32 data = 0;
+
+	/*
+	 * Assumption is that the DSI clocks will be enabled
+	 * when this API is called from dfps thread
+	 */
+	if (mask) {
+		/* mask FIFO underflow and PLL unlock bits */
+		mdss_dsi_set_reg(ctrl, 0x10c, 0x7c000000, 0x7c000000);
+	} else {
+		data = MIPI_INP((ctrl->ctrl_base) + 0x0120);
+		if (data & BIT(16)) {
+			pr_debug("pll unlocked: 0x%x\n", data);
+			/* clear PLL unlock bit */
+			MIPI_OUTP((ctrl->ctrl_base) + 0x120, BIT(16));
+		}
+
+		data = MIPI_INP((ctrl->ctrl_base) + 0x00c);
+		if (data & 0x88880000) {
+			pr_debug("dsi fifo underflow: 0x%x\n", data);
+			/* clear DSI FIFO underflow and empty */
+			MIPI_OUTP((ctrl->ctrl_base) + 0x00c, 0x99990000);
+		}
+
+		/* restore FIFO underflow and PLL unlock bits */
+		mdss_dsi_set_reg(ctrl, 0x10c, 0x7c000000, 0x0);
+	}
+}
+
+static void __mdss_dsi_update_video_mode_total(struct mdss_panel_data *pdata,
+		int new_fps)
+{
+	u32 hsync_period, vsync_period;
+	u32 new_dsi_v_total, current_dsi_v_total;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s Invalid pdata\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+			panel_data);
+	if (ctrl_pdata == NULL) {
+		pr_err("%s Invalid ctrl_pdata\n", __func__);
+		return;
+	}
+
+	vsync_period =
+		mdss_panel_get_vtotal(&pdata->panel_info);
+	hsync_period =
+		mdss_panel_get_htotal(&pdata->panel_info, true);
+	current_dsi_v_total =
+		MIPI_INP((ctrl_pdata->ctrl_base) + 0x2C);
+	new_dsi_v_total =
+		((vsync_period - 1) << 16) | (hsync_period - 1);
+
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
+			(current_dsi_v_total | 0x8000000));
+	if (new_dsi_v_total & 0x8000000) {
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
+				new_dsi_v_total);
+	} else {
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
+				(new_dsi_v_total | 0x8000000));
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
+				(new_dsi_v_total & 0x7ffffff));
+	}
+
+	if (ctrl_pdata->timing_db_mode)
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x1e4, 0x1);
+
+	pr_debug("%s new_fps:%d vsync:%d hsync:%d frame_rate:%d\n",
+			__func__, new_fps, vsync_period, hsync_period,
+			ctrl_pdata->panel_data.panel_info.mipi.frame_rate);
+
+	ctrl_pdata->panel_data.panel_info.current_fps = new_fps;
+	MDSS_XLOG(current_dsi_v_total, new_dsi_v_total, new_fps,
+		ctrl_pdata->timing_db_mode);
+
+}
+
+static void __mdss_dsi_dyn_refresh_config(
+		struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	int reg_data = 0;
+	u32 phy_rev = ctrl_pdata->shared_data->phy_rev;
+
+	/* configure only for master control in split display */
+	if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
+			mdss_dsi_is_ctrl_clk_slave(ctrl_pdata))
+		return;
+
+	switch (phy_rev) {
+	case DSI_PHY_REV_10:
+		reg_data = MIPI_INP((ctrl_pdata->ctrl_base) +
+				DSI_DYNAMIC_REFRESH_CTRL);
+		reg_data &= ~BIT(12);
+		MIPI_OUTP((ctrl_pdata->ctrl_base)
+				+ DSI_DYNAMIC_REFRESH_CTRL, reg_data);
+		break;
+	case DSI_PHY_REV_20:
+		reg_data = BIT(13);
+		MIPI_OUTP((ctrl_pdata->ctrl_base)
+				+ DSI_DYNAMIC_REFRESH_CTRL, reg_data);
+		break;
+	default:
+		pr_err("Phy rev %d unsupported\n", phy_rev);
+		break;
+	}
+
+	pr_debug("Dynamic fps ctrl = 0x%x\n", reg_data);
+}
+
+static void __mdss_dsi_calc_dfps_delay(struct mdss_panel_data *pdata)
+{
+	u32 esc_clk_rate = XO_CLK_RATE;
+	u32 pipe_delay, pipe_delay2 = 0, pll_delay;
+	u32 hsync_period = 0;
+	u32 pclk_to_esc_ratio, byte_to_esc_ratio, hr_bit_to_esc_ratio;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_panel_info *pinfo = NULL;
+	struct mdss_dsi_phy_ctrl *pd = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s Invalid pdata\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+			panel_data);
+	if (ctrl_pdata == NULL) {
+		pr_err("%s Invalid ctrl_pdata\n", __func__);
+		return;
+	}
+
+	if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
+		mdss_dsi_is_ctrl_clk_slave(ctrl_pdata))
+		return;
+
+	pinfo = &pdata->panel_info;
+	pd = &(pinfo->mipi.dsi_phy_db);
+
+	pclk_to_esc_ratio = (ctrl_pdata->pclk_rate / esc_clk_rate);
+	byte_to_esc_ratio = (ctrl_pdata->byte_clk_rate / esc_clk_rate);
+	hr_bit_to_esc_ratio = ((ctrl_pdata->byte_clk_rate * 4) / esc_clk_rate);
+
+	hsync_period = mdss_panel_get_htotal(pinfo, true);
+	pipe_delay = (hsync_period + 1) / pclk_to_esc_ratio;
+	if (pinfo->mipi.eof_bllp_power_stop == 0)
+		pipe_delay += (17 / pclk_to_esc_ratio) +
+			((21 + (pinfo->mipi.t_clk_pre + 1) +
+				(pinfo->mipi.t_clk_post + 1)) /
+				byte_to_esc_ratio) +
+			((((pd->timing[8] >> 1) + 1) +
+			((pd->timing[6] >> 1) + 1) +
+			((pd->timing[3] * 4) + (pd->timing[5] >> 1) + 1) +
+			((pd->timing[7] >> 1) + 1) +
+			((pd->timing[1] >> 1) + 1) +
+			((pd->timing[4] >> 1) + 1)) / hr_bit_to_esc_ratio);
+
+	if (pinfo->mipi.force_clk_lane_hs)
+		pipe_delay2 = (6 / byte_to_esc_ratio) +
+			((((pd->timing[1] >> 1) + 1) +
+			((pd->timing[4] >> 1) + 1)) / hr_bit_to_esc_ratio);
+
+	/* 130 us pll delay recommended by h/w doc */
+	pll_delay = ((130 * esc_clk_rate) / 1000000) * 2;
+
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + DSI_DYNAMIC_REFRESH_PIPE_DELAY,
+						pipe_delay);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + DSI_DYNAMIC_REFRESH_PIPE_DELAY2,
+						pipe_delay2);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + DSI_DYNAMIC_REFRESH_PLL_DELAY,
+						pll_delay);
+}
+
+static int __mdss_dsi_dfps_calc_clks(struct mdss_panel_data *pdata,
+		int new_fps)
+{
+	int rc = 0;
+	u64 clk_rate;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_panel_info *pinfo;
+	u32 phy_rev;
+
+	if (pdata == NULL) {
+		pr_err("%s Invalid pdata\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+			panel_data);
+	if (ctrl_pdata == NULL) {
+		pr_err("%s Invalid ctrl_pdata\n", __func__);
+		return -EINVAL;
+	}
+
+	pinfo = &pdata->panel_info;
+	phy_rev = ctrl_pdata->shared_data->phy_rev;
+
+	rc = mdss_dsi_clk_div_config
+		(&ctrl_pdata->panel_data.panel_info, new_fps);
+	if (rc) {
+		pr_err("%s: unable to initialize the clk dividers\n",
+				__func__);
+		return rc;
+	}
+
+	__mdss_dsi_dyn_refresh_config(ctrl_pdata);
+
+	if (phy_rev == DSI_PHY_REV_20)
+		mdss_dsi_dfps_config_8996(ctrl_pdata);
+
+	__mdss_dsi_calc_dfps_delay(pdata);
+
+	/* take a backup of current clk rates */
+	ctrl_pdata->pclk_rate_bkp = ctrl_pdata->pclk_rate;
+	ctrl_pdata->byte_clk_rate_bkp = ctrl_pdata->byte_clk_rate;
+
+	ctrl_pdata->pclk_rate = pinfo->mipi.dsi_pclk_rate;
+	clk_rate = pinfo->clk_rate;
+	do_div(clk_rate, 8U);
+	ctrl_pdata->byte_clk_rate = (u32) clk_rate;
+
+	pr_debug("byte_rate=%i\n", ctrl_pdata->byte_clk_rate);
+	pr_debug("pclk_rate=%i\n", ctrl_pdata->pclk_rate);
+
+	return rc;
+}
+
+static int __mdss_dsi_dfps_update_clks(struct mdss_panel_data *pdata,
+		int new_fps)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_dsi_ctrl_pdata *sctrl_pdata = NULL;
+	struct mdss_panel_info *pinfo, *spinfo;
+	int rc = 0;
+
+	if (pdata == NULL) {
+		pr_err("%s Invalid pdata\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+			panel_data);
+	if (IS_ERR_OR_NULL(ctrl_pdata)) {
+		pr_err("Invalid sctrl_pdata = %lu\n", PTR_ERR(ctrl_pdata));
+		return PTR_ERR(ctrl_pdata);
+	}
+
+	pinfo = &ctrl_pdata->panel_data.panel_info;
+
+	/*
+	 * In split display case, configure and enable dynamic refresh
+	 * register only after both the ctrl data is programmed. So,
+	 * ignore enabling dynamic refresh for the master control and
+	 * configure only when it is slave control.
+	 */
+	if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
+			mdss_dsi_is_ctrl_clk_master(ctrl_pdata))
+		return 0;
+
+	if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
+			mdss_dsi_is_ctrl_clk_slave(ctrl_pdata)) {
+		sctrl_pdata = ctrl_pdata;
+		spinfo = pinfo;
+		ctrl_pdata = mdss_dsi_get_ctrl_clk_master();
+		if (IS_ERR_OR_NULL(ctrl_pdata)) {
+			pr_err("Invalid ctrl_pdata = %lu\n",
+					PTR_ERR(ctrl_pdata));
+			return PTR_ERR(ctrl_pdata);
+		}
+
+		pinfo = &ctrl_pdata->panel_data.panel_info;
+	}
+
+	/*
+	 * For programming dynamic refresh registers, we need to change
+	 * the parent to shadow clocks for the software byte and pixel mux.
+	 * After switching to shadow clocks, if there is no ref count on
+	 * main byte and pixel clocks, clock driver may shutdown those
+	 * unreferenced  byte and pixel clocks. Hence add an extra reference
+	 * count to avoid shutting down the main byte and pixel clocks.
+	 */
+	rc = clk_prepare_enable(ctrl_pdata->pll_byte_clk);
+	if (rc) {
+		pr_err("Unable to add extra refcnt for byte clock\n");
+		goto error_byte;
+	}
+
+	rc = clk_prepare_enable(ctrl_pdata->pll_pixel_clk);
+	if (rc) {
+		pr_err("Unable to add extra refcnt for pixel clock\n");
+		goto error_pixel;
+	}
+
+	/* change the parent to shadow clocks*/
+	rc = clk_set_parent(ctrl_pdata->mux_byte_clk,
+			ctrl_pdata->shadow_byte_clk);
+	if (rc) {
+		pr_err("Unable to set parent to shadow byte clock\n");
+		goto error_shadow_byte;
+	}
+
+	rc = clk_set_parent(ctrl_pdata->mux_pixel_clk,
+			ctrl_pdata->shadow_pixel_clk);
+	if (rc) {
+		pr_err("Unable to set parent to shadow pixel clock\n");
+		goto error_shadow_pixel;
+	}
+
+	rc = mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+			MDSS_DSI_LINK_BYTE_CLK, ctrl_pdata->byte_clk_rate, 0);
+	if (rc) {
+		pr_err("%s: dsi_byte_clk - clk_set_rate failed\n",
+				__func__);
+		goto error_byte_link;
+	}
+
+	rc = mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+			MDSS_DSI_LINK_PIX_CLK, ctrl_pdata->pclk_rate, 0);
+	if (rc) {
+		pr_err("%s: dsi_pixel_clk - clk_set_rate failed\n",
+				__func__);
+		goto error_pixel_link;
+	}
+
+	if (sctrl_pdata) {
+		rc = mdss_dsi_clk_set_link_rate(sctrl_pdata->dsi_clk_handle,
+			MDSS_DSI_LINK_BYTE_CLK, sctrl_pdata->byte_clk_rate, 0);
+		if (rc) {
+			pr_err("%s: slv dsi_byte_clk - clk_set_rate failed\n",
+					__func__);
+			goto error_sbyte_link;
+		}
+
+		rc = mdss_dsi_clk_set_link_rate(sctrl_pdata->dsi_clk_handle,
+			MDSS_DSI_LINK_PIX_CLK, sctrl_pdata->pclk_rate, 0);
+		if (rc) {
+			pr_err("%s: slv dsi_pixel_clk - clk_set_rate failed\n",
+					__func__);
+			goto error_spixel_link;
+		}
+	}
+
+	rc = mdss_dsi_en_wait4dynamic_done(ctrl_pdata);
+	if (rc < 0) {
+		pr_err("Unsuccessful dynamic fps change");
+		goto dfps_timeout;
+	}
+
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + DSI_DYNAMIC_REFRESH_CTRL, 0x00);
+	if (sctrl_pdata)
+		MIPI_OUTP((sctrl_pdata->ctrl_base) + DSI_DYNAMIC_REFRESH_CTRL,
+				0x00);
+
+	rc = mdss_dsi_phy_pll_reset_status(ctrl_pdata);
+	if (rc) {
+		pr_err("%s: pll cannot be locked reset core ready failed %d\n",
+			__func__, rc);
+		goto dfps_timeout;
+	}
+
+	__mdss_dsi_mask_dfps_errors(ctrl_pdata, false);
+	if (sctrl_pdata)
+		__mdss_dsi_mask_dfps_errors(sctrl_pdata, false);
+
+	/* Move the mux clocks to main byte and pixel clocks */
+	rc = clk_set_parent(ctrl_pdata->mux_byte_clk,
+			ctrl_pdata->pll_byte_clk);
+	if (rc)
+		pr_err("Unable to set parent back to main byte clock\n");
+
+	rc = clk_set_parent(ctrl_pdata->mux_pixel_clk,
+			ctrl_pdata->pll_pixel_clk);
+	if (rc)
+		pr_err("Unable to set parent back to main pixel clock\n");
+
+	/* Remove extra ref count on parent clocks */
+	clk_disable_unprepare(ctrl_pdata->pll_byte_clk);
+	clk_disable_unprepare(ctrl_pdata->pll_pixel_clk);
+
+	/* update new fps that at this point is already updated in hw */
+	pinfo->current_fps = new_fps;
+	if (sctrl_pdata)
+		spinfo->current_fps = new_fps;
+
+	return rc;
+
+dfps_timeout:
+	if (sctrl_pdata)
+		mdss_dsi_clk_set_link_rate(sctrl_pdata->dsi_clk_handle,
+				MDSS_DSI_LINK_PIX_CLK,
+				sctrl_pdata->pclk_rate_bkp, 0);
+error_spixel_link:
+	if (sctrl_pdata)
+		mdss_dsi_clk_set_link_rate(sctrl_pdata->dsi_clk_handle,
+				MDSS_DSI_LINK_BYTE_CLK,
+				sctrl_pdata->byte_clk_rate_bkp, 0);
+error_sbyte_link:
+	mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+		MDSS_DSI_LINK_PIX_CLK, ctrl_pdata->pclk_rate_bkp, 0);
+error_pixel_link:
+	mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+		MDSS_DSI_LINK_BYTE_CLK, ctrl_pdata->byte_clk_rate_bkp, 0);
+error_byte_link:
+	clk_set_parent(ctrl_pdata->mux_pixel_clk, ctrl_pdata->pll_pixel_clk);
+error_shadow_pixel:
+	clk_set_parent(ctrl_pdata->mux_byte_clk, ctrl_pdata->pll_byte_clk);
+error_shadow_byte:
+	clk_disable_unprepare(ctrl_pdata->pll_pixel_clk);
+error_pixel:
+	clk_disable_unprepare(ctrl_pdata->pll_byte_clk);
+error_byte:
+	return rc;
+}
+
+static int mdss_dsi_check_params(struct mdss_dsi_ctrl_pdata *ctrl, void *arg)
+{
+	struct mdss_panel_info *var_pinfo, *pinfo;
+	int rc = 0;
+
+	if (!ctrl || !arg)
+		return 0;
+
+	pinfo = &ctrl->panel_data.panel_info;
+	if (!pinfo->is_pluggable)
+		return 0;
+
+	var_pinfo = (struct mdss_panel_info *)arg;
+
+	pr_debug("%s: reconfig xres: %d yres: %d, current xres: %d yres: %d\n",
+			__func__, var_pinfo->xres, var_pinfo->yres,
+					pinfo->xres, pinfo->yres);
+	if ((var_pinfo->xres != pinfo->xres) ||
+		(var_pinfo->yres != pinfo->yres) ||
+		(var_pinfo->lcdc.h_back_porch != pinfo->lcdc.h_back_porch) ||
+		(var_pinfo->lcdc.h_front_porch != pinfo->lcdc.h_front_porch) ||
+		(var_pinfo->lcdc.h_pulse_width != pinfo->lcdc.h_pulse_width) ||
+		(var_pinfo->lcdc.v_back_porch != pinfo->lcdc.v_back_porch) ||
+		(var_pinfo->lcdc.v_front_porch != pinfo->lcdc.v_front_porch) ||
+		(var_pinfo->lcdc.v_pulse_width != pinfo->lcdc.v_pulse_width)
+		)
+		rc = 1;
+
+	return rc;
+}
+
+#ifdef TARGET_HW_MDSS_HDMI
+static void mdss_dsi_update_params(struct mdss_dsi_ctrl_pdata *ctrl, void *arg)
+{
+	struct mdss_panel_info *pinfo;
+
+	if (!ctrl || !arg)
+		return;
+
+	pinfo = &ctrl->panel_data.panel_info;
+	mdss_dba_update_lane_cfg(pinfo);
+}
+#else
+static void mdss_dsi_update_params(struct mdss_dsi_ctrl_pdata *ctrl, void *arg)
+{
+}
+#endif
+
+static int mdss_dsi_dfps_config(struct mdss_panel_data *pdata, int new_fps)
+{
+	int rc = 0;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_panel_info *pinfo;
+	u32 phy_rev;
+	u32 frame_rate_bkp;
+
+	pr_debug("%s+:\n", __func__);
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+			panel_data);
+
+	if (!ctrl_pdata->panel_data.panel_info.dynamic_fps) {
+		pr_err("Dynamic fps not enabled for this panel\n");
+		return -EINVAL;
+	}
+
+	phy_rev = ctrl_pdata->shared_data->phy_rev;
+	pinfo = &pdata->panel_info;
+
+	/* get the fps configured in HW */
+	frame_rate_bkp = pinfo->current_fps;
+
+	if (new_fps == pinfo->current_fps) {
+		/*
+		 * This is unlikely as mdss driver checks for previously
+		 * configured frame rate.
+		 */
+		pr_debug("Panel is already at this FPS\n");
+		goto end_update;
+	}
+
+	if (pinfo->dfps_update == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP ||
+		pinfo->dfps_update == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP) {
+		/* Porch method */
+		__mdss_dsi_update_video_mode_total(pdata, new_fps);
+	} else if (pinfo->dfps_update == DFPS_IMMEDIATE_CLK_UPDATE_MODE) {
+		/* Clock update method */
+
+		__mdss_dsi_mask_dfps_errors(ctrl_pdata, true);
+
+		if (phy_rev == DSI_PHY_REV_20) {
+			rc = mdss_dsi_phy_calc_timing_param(pinfo, phy_rev,
+					new_fps);
+			if (rc) {
+				pr_err("PHY calculations failed-%d\n", new_fps);
+				goto end_update;
+			}
+		}
+
+		rc = __mdss_dsi_dfps_calc_clks(pdata, new_fps);
+		if (rc) {
+			pr_err("error calculating clocks for %d\n", new_fps);
+			goto error_clks;
+		}
+
+		rc = __mdss_dsi_dfps_update_clks(pdata,	new_fps);
+		if (rc) {
+			pr_err("Dynamic refresh failed-%d\n", new_fps);
+			goto error_dfps;
+		}
+	}
+
+	return rc;
+error_dfps:
+	if (__mdss_dsi_dfps_calc_clks(pdata, frame_rate_bkp))
+		pr_err("error reverting clock calculations for %d\n",
+				frame_rate_bkp);
+error_clks:
+	if (mdss_dsi_phy_calc_timing_param(pinfo, phy_rev, frame_rate_bkp))
+		pr_err("Unable to revert phy timing-%d\n", frame_rate_bkp);
+end_update:
+	return rc;
+}
+
+static int mdss_dsi_ctl_partial_roi(struct mdss_panel_data *pdata)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	int rc = -EINVAL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!pdata->panel_info.partial_update_enabled)
+		return 0;
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	if (ctrl_pdata->set_col_page_addr)
+		rc = ctrl_pdata->set_col_page_addr(pdata, false);
+
+	return rc;
+}
+
+static int mdss_dsi_set_stream_size(struct mdss_panel_data *pdata)
+{
+	u32 stream_ctrl, stream_total, idle;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_panel_info *pinfo;
+	struct dsc_desc *dsc = NULL;
+	struct mdss_rect *roi;
+	struct panel_horizontal_idle *pidle;
+	int i;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	pinfo = &pdata->panel_info;
+
+	if (!pinfo->partial_update_supported)
+		return -EINVAL;
+
+	if (pinfo->compression_mode == COMPRESSION_DSC)
+		dsc = &pinfo->dsc;
+
+	roi = &pinfo->roi;
+
+	/* DSI_COMMAND_MODE_MDP_STREAM_CTRL */
+	if (dsc) {
+		u16 byte_num =  dsc->bytes_per_pkt;
+
+		if (pinfo->mipi.insert_dcs_cmd)
+			byte_num++;
+
+		stream_ctrl = (byte_num << 16) | (pinfo->mipi.vc << 8) |
+				DTYPE_DCS_LWRITE;
+		stream_total = dsc->pic_height << 16 | dsc->pclk_per_line;
+	} else  {
+
+		stream_ctrl = (((roi->w * 3) + 1) << 16) |
+			(pdata->panel_info.mipi.vc << 8) | DTYPE_DCS_LWRITE;
+		stream_total = roi->h << 16 | roi->w;
+	}
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x60, stream_ctrl);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x58, stream_ctrl);
+
+	/* DSI_COMMAND_MODE_MDP_STREAM_TOTAL */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x64, stream_total);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x5C, stream_total);
+
+	/* set idle control -- dsi clk cycle */
+	idle = 0;
+	pidle = ctrl_pdata->line_idle;
+	for (i = 0; i < ctrl_pdata->horizontal_idle_cnt; i++) {
+		if (roi->w > pidle->min && roi->w <= pidle->max) {
+			idle = pidle->idle;
+			pr_debug("%s: ndx=%d w=%d range=%d-%d idle=%d\n",
+				__func__, ctrl_pdata->ndx, roi->w,
+				pidle->min, pidle->max, pidle->idle);
+			break;
+		}
+		pidle++;
+	}
+
+	if (idle)
+		idle |= BIT(12);	/* enable */
+
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x194, idle);
+
+	if (dsc)
+		mdss_dsi_dsc_config(ctrl_pdata, dsc);
+
+	return 0;
+}
+
+#ifdef TARGET_HW_MDSS_HDMI
+static void mdss_dsi_dba_work(struct work_struct *work)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct delayed_work *dw = to_delayed_work(work);
+	struct mdss_dba_utils_init_data utils_init_data;
+	struct mdss_panel_info *pinfo;
+
+	ctrl_pdata = container_of(dw, struct mdss_dsi_ctrl_pdata, dba_work);
+	if (!ctrl_pdata) {
+		pr_err("%s: invalid ctrl data\n", __func__);
+		return;
+	}
+
+	pinfo = &ctrl_pdata->panel_data.panel_info;
+	if (!pinfo) {
+		pr_err("%s: invalid ctrl data\n", __func__);
+		return;
+	}
+
+	memset(&utils_init_data, 0, sizeof(utils_init_data));
+
+	utils_init_data.chip_name = ctrl_pdata->bridge_name;
+	utils_init_data.client_name = "dsi";
+	utils_init_data.instance_id = ctrl_pdata->bridge_index;
+	utils_init_data.fb_node = ctrl_pdata->fb_node;
+	utils_init_data.kobj = ctrl_pdata->kobj;
+	utils_init_data.pinfo = pinfo;
+	if (ctrl_pdata->mdss_util)
+		utils_init_data.cont_splash_enabled =
+			ctrl_pdata->mdss_util->panel_intf_status(
+			ctrl_pdata->panel_data.panel_info.pdest,
+			MDSS_PANEL_INTF_DSI) ? true : false;
+	else
+		utils_init_data.cont_splash_enabled = false;
+
+	pinfo->dba_data = mdss_dba_utils_init(&utils_init_data);
+
+	if (!IS_ERR_OR_NULL(pinfo->dba_data)) {
+		ctrl_pdata->ds_registered = true;
+	} else {
+		pr_debug("%s: dba device not ready, queue again\n", __func__);
+		queue_delayed_work(ctrl_pdata->workq,
+				&ctrl_pdata->dba_work, HZ);
+	}
+}
+#else
+static void mdss_dsi_dba_work(struct work_struct *work)
+{
+	(void)(*work);
+}
+#endif
+static int mdss_dsi_reset_write_ptr(struct mdss_panel_data *pdata)
+{
+
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_panel_info *pinfo;
+	int rc = 0;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+			panel_data);
+
+	pinfo = &ctrl_pdata->panel_data.panel_info;
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+	/* Need to reset the DSI core since the pixel stream was stopped. */
+	mdss_dsi_sw_reset(ctrl_pdata, true);
+
+	/*
+	 * Reset the partial update co-ordinates to the panel height and
+	 * width
+	 */
+	if (pinfo->dcs_cmd_by_left && (ctrl_pdata->ndx == 1))
+		goto skip_cmd_send;
+
+	pinfo->roi.x = 0;
+	pinfo->roi.y = 0;
+	pinfo->roi.w = pinfo->xres;
+	if (pinfo->dcs_cmd_by_left)
+		pinfo->roi.w = pinfo->xres;
+	if (pdata->next)
+		pinfo->roi.w += pdata->next->panel_info.xres;
+	pinfo->roi.h = pinfo->yres;
+
+	mdss_dsi_set_stream_size(pdata);
+
+	if (ctrl_pdata->set_col_page_addr)
+		rc = ctrl_pdata->set_col_page_addr(pdata, true);
+
+skip_cmd_send:
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+
+	pr_debug("%s: DSI%d write ptr reset finished\n", __func__,
+			ctrl_pdata->ndx);
+
+	return rc;
+}
+
+int mdss_dsi_register_recovery_handler(struct mdss_dsi_ctrl_pdata *ctrl,
+	struct mdss_intf_recovery *recovery)
+{
+	mutex_lock(&ctrl->mutex);
+	ctrl->recovery = recovery;
+	mutex_unlock(&ctrl->mutex);
+	return 0;
+}
+
+static int mdss_dsi_register_mdp_callback(struct mdss_dsi_ctrl_pdata *ctrl,
+	struct mdss_intf_recovery *mdp_callback)
+{
+	mutex_lock(&ctrl->mutex);
+	ctrl->mdp_callback = mdp_callback;
+	mutex_unlock(&ctrl->mutex);
+	return 0;
+}
+
+static struct device_node *mdss_dsi_get_fb_node_cb(struct platform_device *pdev)
+{
+	struct device_node *fb_node;
+	struct platform_device *dsi_dev;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata;
+
+	if (pdev == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return NULL;
+	}
+
+	ctrl_pdata = platform_get_drvdata(pdev);
+	dsi_dev = of_find_device_by_node(pdev->dev.of_node->parent);
+	if (!dsi_dev) {
+		pr_err("Unable to find dsi master device: %s\n",
+			pdev->dev.of_node->full_name);
+		return NULL;
+	}
+
+	fb_node = of_parse_phandle(dsi_dev->dev.of_node,
+			mdss_dsi_get_fb_name(ctrl_pdata), 0);
+	if (!fb_node) {
+		pr_err("Unable to find fb node for device: %s\n", pdev->name);
+		return NULL;
+	}
+
+	return fb_node;
+}
+
+static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
+				  int event, void *arg)
+{
+	int rc = 0;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct fb_info *fbi;
+	int power_state;
+	u32 mode;
+	struct mdss_panel_info *pinfo;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+	pinfo = &pdata->panel_info;
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+	pr_debug("%s+: ctrl=%d event=%d\n", __func__, ctrl_pdata->ndx, event);
+
+	MDSS_XLOG(event, arg, ctrl_pdata->ndx, 0x3333);
+
+	switch (event) {
+	case MDSS_EVENT_UPDATE_PARAMS:
+		pr_debug("%s:Entered Case MDSS_EVENT_UPDATE_PARAMS\n",
+				__func__);
+		mdss_dsi_update_params(ctrl_pdata, arg);
+		break;
+	case MDSS_EVENT_CHECK_PARAMS:
+		pr_debug("%s:Entered Case MDSS_EVENT_CHECK_PARAMS\n", __func__);
+		if (mdss_dsi_check_params(ctrl_pdata, arg)) {
+			ctrl_pdata->update_phy_timing = true;
+			/*
+			 * Call to MDSS_EVENT_CHECK_PARAMS expects
+			 * the return value of 1, if there is a change
+			 * in panel timing parameters.
+			 */
+			rc = 1;
+		}
+		ctrl_pdata->refresh_clk_rate = true;
+		break;
+	case MDSS_EVENT_LINK_READY:
+		if (ctrl_pdata->refresh_clk_rate)
+			rc = mdss_dsi_clk_refresh(pdata,
+				ctrl_pdata->update_phy_timing);
+
+		rc = mdss_dsi_on(pdata);
+		mdss_dsi_op_mode_config(pdata->panel_info.mipi.mode,
+							pdata);
+		break;
+	case MDSS_EVENT_UNBLANK:
+		if (ctrl_pdata->on_cmds.link_state == DSI_LP_MODE)
+			rc = mdss_dsi_unblank(pdata);
+		break;
+	case MDSS_EVENT_POST_PANEL_ON:
+		rc = mdss_dsi_post_panel_on(pdata);
+		break;
+	case MDSS_EVENT_PANEL_ON:
+		ctrl_pdata->ctrl_state |= CTRL_STATE_MDP_ACTIVE;
+		if (ctrl_pdata->on_cmds.link_state == DSI_HS_MODE)
+			rc = mdss_dsi_unblank(pdata);
+		pdata->panel_info.esd_rdy = true;
+		break;
+	case MDSS_EVENT_BLANK:
+		power_state = (int) (unsigned long) arg;
+		if (ctrl_pdata->off_cmds.link_state == DSI_HS_MODE)
+			rc = mdss_dsi_blank(pdata, power_state);
+		break;
+	case MDSS_EVENT_PANEL_OFF:
+		power_state = (int) (unsigned long) arg;
+		disable_esd_thread();
+		ctrl_pdata->ctrl_state &= ~CTRL_STATE_MDP_ACTIVE;
+		if (ctrl_pdata->off_cmds.link_state == DSI_LP_MODE)
+			rc = mdss_dsi_blank(pdata, power_state);
+		rc = mdss_dsi_off(pdata, power_state);
+		break;
+	case MDSS_EVENT_CONT_SPLASH_FINISH:
+		if (ctrl_pdata->off_cmds.link_state == DSI_LP_MODE)
+			rc = mdss_dsi_blank(pdata, MDSS_PANEL_POWER_OFF);
+		ctrl_pdata->ctrl_state &= ~CTRL_STATE_MDP_ACTIVE;
+		rc = mdss_dsi_cont_splash_on(pdata);
+		break;
+	case MDSS_EVENT_PANEL_CLK_CTRL:
+		mdss_dsi_clk_req(ctrl_pdata,
+			(struct dsi_panel_clk_ctrl *) arg);
+		break;
+	case MDSS_EVENT_DSI_CMDLIST_KOFF:
+		mdss_dsi_cmdlist_commit(ctrl_pdata, 1);
+		break;
+	case MDSS_EVENT_PANEL_UPDATE_FPS:
+		if (arg != NULL) {
+			rc = mdss_dsi_dfps_config(pdata,
+					 (int) (unsigned long) arg);
+			if (rc)
+				pr_err("unable to change fps-%d, error-%d\n",
+						(int) (unsigned long) arg, rc);
+			else
+				pr_debug("panel frame rate changed to %d\n",
+						(int) (unsigned long) arg);
+		}
+		break;
+	case MDSS_EVENT_CONT_SPLASH_BEGIN:
+		if (ctrl_pdata->off_cmds.link_state == DSI_HS_MODE) {
+			/* Panel is Enabled in Bootloader */
+			rc = mdss_dsi_blank(pdata, MDSS_PANEL_POWER_OFF);
+		}
+		break;
+	case MDSS_EVENT_DSC_PPS_SEND:
+		if (pinfo->compression_mode == COMPRESSION_DSC)
+			mdss_dsi_panel_dsc_pps_send(ctrl_pdata, pinfo);
+		break;
+	case MDSS_EVENT_ENABLE_PARTIAL_ROI:
+		rc = mdss_dsi_ctl_partial_roi(pdata);
+		break;
+	case MDSS_EVENT_DSI_RESET_WRITE_PTR:
+		rc = mdss_dsi_reset_write_ptr(pdata);
+		break;
+	case MDSS_EVENT_DSI_STREAM_SIZE:
+		rc = mdss_dsi_set_stream_size(pdata);
+		break;
+	case MDSS_EVENT_DSI_UPDATE_PANEL_DATA:
+		rc = mdss_dsi_update_panel_config(ctrl_pdata,
+					(int)(unsigned long) arg);
+		break;
+	case MDSS_EVENT_REGISTER_RECOVERY_HANDLER:
+		rc = mdss_dsi_register_recovery_handler(ctrl_pdata,
+			(struct mdss_intf_recovery *)arg);
+		break;
+	case MDSS_EVENT_REGISTER_MDP_CALLBACK:
+		rc = mdss_dsi_register_mdp_callback(ctrl_pdata,
+			(struct mdss_intf_recovery *)arg);
+		break;
+	case MDSS_EVENT_DSI_DYNAMIC_SWITCH:
+		mode = (u32)(unsigned long) arg;
+		mdss_dsi_switch_mode(pdata, mode);
+		break;
+	case MDSS_EVENT_DSI_RECONFIG_CMD:
+		mode = (u32)(unsigned long) arg;
+		rc = mdss_dsi_reconfig(pdata, mode);
+		break;
+	case MDSS_EVENT_DSI_PANEL_STATUS:
+		if (ctrl_pdata->check_status)
+			rc = ctrl_pdata->check_status(ctrl_pdata);
+		else
+			rc = true;
+		break;
+	case MDSS_EVENT_PANEL_TIMING_SWITCH:
+		rc = mdss_dsi_panel_timing_switch(ctrl_pdata, arg);
+		break;
+	case MDSS_EVENT_FB_REGISTERED:
+		mdss_dsi_debugfs_init(ctrl_pdata);
+
+		fbi = (struct fb_info *)arg;
+		if (!fbi || !fbi->dev)
+			break;
+
+		ctrl_pdata->kobj = &fbi->dev->kobj;
+		ctrl_pdata->fb_node = fbi->node;
+
+		if (IS_ENABLED(CONFIG_MSM_DBA) &&
+			pdata->panel_info.is_dba_panel) {
+			queue_delayed_work(ctrl_pdata->workq,
+				&ctrl_pdata->dba_work, HZ);
+		}
+		break;
+	default:
+		pr_debug("%s: unhandled event=%d\n", __func__, event);
+		break;
+	}
+	pr_debug("%s-:event=%d, rc=%d\n", __func__, event, rc);
+	return rc;
+}
+
+static int mdss_dsi_set_override_cfg(char *override_cfg,
+		struct mdss_dsi_ctrl_pdata *ctrl_pdata, char *panel_cfg)
+{
+	struct mdss_panel_info *pinfo = &ctrl_pdata->panel_data.panel_info;
+	char *token = NULL;
+
+	pr_debug("%s: override config:%s\n", __func__, override_cfg);
+	while ((token = strsep(&override_cfg, ":"))) {
+		if (!strcmp(token, OVERRIDE_CFG)) {
+			continue;
+		} else if (!strcmp(token, SIM_HW_TE_PANEL)) {
+			pinfo->sim_panel_mode = SIM_HW_TE_MODE;
+		} else if (!strcmp(token, SIM_SW_TE_PANEL)) {
+			pinfo->sim_panel_mode = SIM_SW_TE_MODE;
+		} else if (!strcmp(token, SIM_PANEL)) {
+			pinfo->sim_panel_mode = SIM_MODE;
+		} else {
+			pr_err("%s: invalid override_cfg token: %s\n",
+					__func__, token);
+			return -EINVAL;
+		}
+	}
+	pr_debug("%s:sim_panel_mode:%d\n", __func__, pinfo->sim_panel_mode);
+
+	return 0;
+}
+
+static struct device_node *mdss_dsi_pref_prim_panel(
+		struct platform_device *pdev)
+{
+	struct device_node *dsi_pan_node = NULL;
+
+	pr_debug("%s:%d: Select primary panel from dt\n",
+					__func__, __LINE__);
+	dsi_pan_node = of_parse_phandle(pdev->dev.of_node,
+					"qcom,dsi-pref-prim-pan", 0);
+	if (!dsi_pan_node)
+		pr_err("%s:can't find panel phandle\n", __func__);
+
+	return dsi_pan_node;
+}
+
+/**
+ * mdss_dsi_find_panel_of_node(): find device node of dsi panel
+ * @pdev: platform_device of the dsi ctrl node
+ * @panel_cfg: string containing intf specific config data
+ *
+ * Function finds the panel device node using the interface
+ * specific configuration data. This configuration data is
+ * could be derived from the result of bootloader's GCDB
+ * panel detection mechanism. If such config data doesn't
+ * exist then this panel returns the default panel configured
+ * in the device tree.
+ *
+ * returns pointer to panel node on success, NULL on error.
+ */
+static struct device_node *mdss_dsi_find_panel_of_node(
+		struct platform_device *pdev, char *panel_cfg)
+{
+	int len, i = 0;
+	int ctrl_id = pdev->id - 1;
+	char panel_name[MDSS_MAX_PANEL_LEN] = "";
+	char ctrl_id_stream[3] =  "0:";
+	char *str1 = NULL, *str2 = NULL, *override_cfg = NULL;
+	char cfg_np_name[MDSS_MAX_PANEL_LEN] = "";
+	struct device_node *dsi_pan_node = NULL, *mdss_node = NULL;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = platform_get_drvdata(pdev);
+	struct mdss_panel_info *pinfo = &ctrl_pdata->panel_data.panel_info;
+
+	len = strlen(panel_cfg);
+	ctrl_pdata->panel_data.dsc_cfg_np_name[0] = '\0';
+	if (!len) {
+		/* no panel cfg chg, parse dt */
+		pr_debug("%s:%d: no cmd line cfg present\n",
+			 __func__, __LINE__);
+		goto end;
+	} else {
+		/* check if any override parameters are set */
+		pinfo->sim_panel_mode = 0;
+		override_cfg = strnstr(panel_cfg, "#" OVERRIDE_CFG, len);
+		if (override_cfg) {
+			*override_cfg = '\0';
+			if (mdss_dsi_set_override_cfg(override_cfg + 1,
+					ctrl_pdata, panel_cfg))
+				return NULL;
+			len = strlen(panel_cfg);
+		}
+
+		if (ctrl_id == 1)
+			strlcpy(ctrl_id_stream, "1:", 3);
+
+		/* get controller number */
+		str1 = strnstr(panel_cfg, ctrl_id_stream, len);
+		if (!str1) {
+			pr_err("%s: controller %s is not present in %s\n",
+				__func__, ctrl_id_stream, panel_cfg);
+			goto end;
+		}
+		if ((str1 != panel_cfg) && (*(str1-1) != ':')) {
+			str1 += CMDLINE_DSI_CTL_NUM_STRING_LEN;
+			pr_debug("false match with config node name in \"%s\". search again in \"%s\"\n",
+				panel_cfg, str1);
+			str1 = strnstr(str1, ctrl_id_stream, len);
+			if (!str1) {
+				pr_err("%s: 2. controller %s is not present in %s\n",
+					__func__, ctrl_id_stream, str1);
+				goto end;
+			}
+		}
+		str1 += CMDLINE_DSI_CTL_NUM_STRING_LEN;
+
+		/* get panel name */
+		str2 = strnchr(str1, strlen(str1), ':');
+		if (!str2) {
+			strlcpy(panel_name, str1, MDSS_MAX_PANEL_LEN);
+		} else {
+			for (i = 0; (str1 + i) < str2; i++)
+				panel_name[i] = *(str1 + i);
+			panel_name[i] = 0;
+		}
+		pr_info("%s: cmdline:%s panel_name:%s\n",
+			__func__, panel_cfg, panel_name);
+		if (!strcmp(panel_name, NONE_PANEL))
+			goto exit;
+
+		mdss_node = of_parse_phandle(pdev->dev.of_node,
+			"qcom,mdss-mdp", 0);
+		if (!mdss_node) {
+			pr_err("%s: %d: mdss_node null\n",
+			       __func__, __LINE__);
+			return NULL;
+		}
+		dsi_pan_node = of_find_node_by_name(mdss_node, panel_name);
+		if (!dsi_pan_node) {
+			pr_err("%s: invalid pan node \"%s\"\n",
+			       __func__, panel_name);
+			goto end;
+		} else {
+			/* extract config node name if present */
+			str1 += i;
+			str2 = strnstr(str1, "config", strlen(str1));
+			if (str2) {
+				str1 = strnchr(str2, strlen(str2), ':');
+				if (str1) {
+					for (i = 0; ((str2 + i) < str1) &&
+					     i < (MDSS_MAX_PANEL_LEN - 1); i++)
+						cfg_np_name[i] = *(str2 + i);
+					if ((i >= 0)
+						&& (i < MDSS_MAX_PANEL_LEN))
+						cfg_np_name[i] = 0;
+				} else {
+					strlcpy(cfg_np_name, str2,
+						MDSS_MAX_PANEL_LEN);
+				}
+				strlcpy(ctrl_pdata->panel_data.dsc_cfg_np_name,
+					cfg_np_name, MDSS_MAX_PANEL_LEN);
+			}
+		}
+
+		return dsi_pan_node;
+	}
+end:
+	if (strcmp(panel_name, NONE_PANEL))
+		dsi_pan_node = mdss_dsi_pref_prim_panel(pdev);
+exit:
+	return dsi_pan_node;
+}
+
+static struct device_node *mdss_dsi_config_panel(struct platform_device *pdev,
+	int ndx)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = platform_get_drvdata(pdev);
+	char panel_cfg[MDSS_MAX_PANEL_LEN];
+	struct device_node *dsi_pan_node = NULL;
+	int rc = 0;
+
+	if (!ctrl_pdata) {
+		pr_err("%s: Unable to get the ctrl_pdata\n", __func__);
+		return NULL;
+	}
+
+	/* DSI panels can be different between controllers */
+	rc = mdss_dsi_get_panel_cfg(panel_cfg, ctrl_pdata);
+	if (!rc)
+		/* dsi panel cfg not present */
+		pr_warn("%s:%d:dsi specific cfg not present\n",
+			__func__, __LINE__);
+
+	/* find panel device node */
+	dsi_pan_node = mdss_dsi_find_panel_of_node(pdev, panel_cfg);
+	if (!dsi_pan_node) {
+		pr_err("%s: can't find panel node %s\n", __func__, panel_cfg);
+		of_node_put(dsi_pan_node);
+		return NULL;
+	}
+
+	rc = mdss_dsi_panel_init(dsi_pan_node, ctrl_pdata, ndx);
+	if (rc) {
+		pr_err("%s: dsi panel init failed\n", __func__);
+		of_node_put(dsi_pan_node);
+		return NULL;
+	}
+
+	return dsi_pan_node;
+}
+
+static int mdss_dsi_ctrl_clock_init(struct platform_device *ctrl_pdev,
+				    struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	int rc = 0;
+	struct mdss_dsi_clk_info info;
+	struct mdss_dsi_clk_client client1 = {"dsi_clk_client"};
+	struct mdss_dsi_clk_client client2 = {"mdp_event_client"};
+	void *handle;
+
+	if (mdss_dsi_link_clk_init(ctrl_pdev, ctrl_pdata)) {
+		pr_err("%s: unable to initialize Dsi ctrl clks\n", __func__);
+		return -EPERM;
+	}
+
+	memset(&info, 0x0, sizeof(info));
+
+	info.core_clks.mdp_core_clk = ctrl_pdata->shared_data->mdp_core_clk;
+	info.core_clks.ahb_clk = ctrl_pdata->shared_data->ahb_clk;
+	info.core_clks.axi_clk = ctrl_pdata->shared_data->axi_clk;
+	info.core_clks.mmss_misc_ahb_clk =
+		ctrl_pdata->shared_data->mmss_misc_ahb_clk;
+
+	info.link_clks.esc_clk = ctrl_pdata->esc_clk;
+	info.link_clks.byte_clk = ctrl_pdata->byte_clk;
+	info.link_clks.pixel_clk = ctrl_pdata->pixel_clk;
+
+	info.pre_clkoff_cb = mdss_dsi_pre_clkoff_cb;
+	info.post_clkon_cb = mdss_dsi_post_clkon_cb;
+	info.pre_clkon_cb = mdss_dsi_pre_clkon_cb;
+	info.post_clkoff_cb = mdss_dsi_post_clkoff_cb;
+	info.priv_data = ctrl_pdata;
+	snprintf(info.name, DSI_CLK_NAME_LEN, "DSI%d", ctrl_pdata->ndx);
+	ctrl_pdata->clk_mngr = mdss_dsi_clk_init(&info);
+	if (IS_ERR_OR_NULL(ctrl_pdata->clk_mngr)) {
+		rc = PTR_ERR(ctrl_pdata->clk_mngr);
+		ctrl_pdata->clk_mngr = NULL;
+		pr_err("dsi clock registration failed, rc = %d\n", rc);
+		goto error_link_clk_deinit;
+	}
+
+	/*
+	 * There are two clients that control dsi clocks. MDP driver controls
+	 * the clock through MDSS_PANEL_EVENT_CLK_CTRL event and dsi driver
+	 * through clock interface. To differentiate between the votes from the
+	 * two clients, dsi driver will use two different handles to vote for
+	 * clock states from dsi and mdp driver.
+	 */
+	handle = mdss_dsi_clk_register(ctrl_pdata->clk_mngr, &client1);
+	if (IS_ERR_OR_NULL(handle)) {
+		rc = PTR_ERR(handle);
+		pr_err("failed to register %s client, rc = %d\n",
+		       client1.client_name, rc);
+		goto error_clk_deinit;
+	} else {
+		ctrl_pdata->dsi_clk_handle = handle;
+	}
+
+	handle = mdss_dsi_clk_register(ctrl_pdata->clk_mngr, &client2);
+	if (IS_ERR_OR_NULL(handle)) {
+		rc = PTR_ERR(handle);
+		pr_err("failed to register %s client, rc = %d\n",
+		       client2.client_name, rc);
+		goto error_clk_client_deregister;
+	} else {
+		ctrl_pdata->mdp_clk_handle = handle;
+	}
+
+	return rc;
+error_clk_client_deregister:
+	mdss_dsi_clk_deregister(ctrl_pdata->dsi_clk_handle);
+error_clk_deinit:
+	mdss_dsi_clk_deinit(ctrl_pdata->clk_mngr);
+error_link_clk_deinit:
+	mdss_dsi_link_clk_deinit(&ctrl_pdev->dev, ctrl_pdata);
+	return rc;
+}
+
+static int mdss_dsi_set_clk_rates(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	int rc = 0;
+
+	rc = mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+					MDSS_DSI_LINK_BYTE_CLK,
+					ctrl_pdata->byte_clk_rate,
+					MDSS_DSI_CLK_UPDATE_CLK_RATE_AT_ON);
+	if (rc) {
+		pr_err("%s: dsi_byte_clk - clk_set_rate failed\n",
+				__func__);
+		return rc;
+	}
+
+	rc = mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+					MDSS_DSI_LINK_PIX_CLK,
+					ctrl_pdata->pclk_rate,
+					MDSS_DSI_CLK_UPDATE_CLK_RATE_AT_ON);
+	if (rc) {
+		pr_err("%s: dsi_pixel_clk - clk_set_rate failed\n",
+			__func__);
+		return rc;
+	}
+
+	rc = mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+					MDSS_DSI_LINK_ESC_CLK,
+					19200000,
+					MDSS_DSI_CLK_UPDATE_CLK_RATE_AT_ON);
+	if (rc) {
+		pr_err("%s: dsi_esc_clk - clk_set_rate failed\n",
+			__func__);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int mdss_dsi_cont_splash_config(struct mdss_panel_info *pinfo,
+				       struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	void *clk_handle;
+	int rc = 0;
+
+	if (pinfo->cont_splash_enabled) {
+		rc = mdss_dsi_panel_power_ctrl(&(ctrl_pdata->panel_data),
+			MDSS_PANEL_POWER_ON);
+		if (rc) {
+			pr_err("%s: Panel power on failed\n", __func__);
+			return rc;
+		}
+		if (ctrl_pdata->bklt_ctrl == BL_PWM)
+			mdss_dsi_panel_pwm_enable(ctrl_pdata);
+		ctrl_pdata->ctrl_state |= (CTRL_STATE_PANEL_INIT |
+			CTRL_STATE_MDP_ACTIVE | CTRL_STATE_DSI_ACTIVE);
+
+		/*
+		 * MDP client removes this extra vote during splash reconfigure
+		 * for command mode panel from interface. DSI removes the vote
+		 * during suspend-resume for video mode panel.
+		 */
+		if (ctrl_pdata->panel_data.panel_info.type == MIPI_CMD_PANEL)
+			clk_handle = ctrl_pdata->mdp_clk_handle;
+		else
+			clk_handle = ctrl_pdata->dsi_clk_handle;
+
+		mdss_dsi_clk_ctrl(ctrl_pdata, clk_handle,
+				  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+		mdss_dsi_read_hw_revision(ctrl_pdata);
+		mdss_dsi_read_phy_revision(ctrl_pdata);
+		ctrl_pdata->is_phyreg_enabled = 1;
+		if (pinfo->type == MIPI_CMD_PANEL)
+			mdss_dsi_set_burst_mode(ctrl_pdata);
+	} else {
+		/* Turn on the clocks to read the DSI and PHY revision */
+		mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+				  MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_ON);
+		mdss_dsi_read_hw_revision(ctrl_pdata);
+		mdss_dsi_read_phy_revision(ctrl_pdata);
+		mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+				  MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_OFF);
+		pinfo->panel_power_state = MDSS_PANEL_POWER_OFF;
+	}
+
+	return rc;
+}
+
+static int mdss_dsi_get_bridge_chip_params(struct mdss_panel_info *pinfo,
+				       struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+				       struct platform_device *pdev)
+{
+	int rc = 0;
+	u32 temp_val = 0;
+
+	if (!ctrl_pdata || !pdev || !pinfo) {
+		pr_err("%s: Invalid Params ctrl_pdata=%pK, pdev=%pK\n",
+			 __func__, ctrl_pdata, pdev);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (pinfo->is_dba_panel) {
+		rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,bridge-index", &temp_val);
+		if (rc) {
+			pr_err("%s:%d Unable to read qcom,bridge-index, ret=%d\n",
+				__func__, __LINE__, rc);
+			goto end;
+		}
+		pr_debug("%s: DT property %s is %X\n", __func__,
+			"qcom,bridge-index", temp_val);
+		ctrl_pdata->bridge_index = temp_val;
+	}
+end:
+	return rc;
+}
+
+static int mdss_dsi_ctrl_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	u32 index;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_panel_info *pinfo = NULL;
+	struct device_node *dsi_pan_node = NULL;
+	const char *ctrl_name;
+	struct mdss_util_intf *util;
+	static int te_irq_registered;
+	struct mdss_panel_data *pdata;
+
+	if (!pdev || !pdev->dev.of_node) {
+		pr_err("%s: pdev not found for DSI controller\n", __func__);
+		return -ENODEV;
+	}
+	rc = of_property_read_u32(pdev->dev.of_node,
+				  "cell-index", &index);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Cell-index not specified, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	if (index == 0)
+		pdev->id = 1;
+	else
+		pdev->id = 2;
+
+	ctrl_pdata = mdss_dsi_get_ctrl(index);
+	if (!ctrl_pdata) {
+		pr_err("%s: Unable to get the ctrl_pdata\n", __func__);
+		return -EINVAL;
+	}
+
+	platform_set_drvdata(pdev, ctrl_pdata);
+
+	util = mdss_get_util_intf();
+	if (util == NULL) {
+		pr_err("Failed to get mdss utility functions\n");
+		return -ENODEV;
+	}
+
+	ctrl_pdata->mdss_util = util;
+	atomic_set(&ctrl_pdata->te_irq_ready, 0);
+
+	ctrl_name = of_get_property(pdev->dev.of_node, "label", NULL);
+	if (!ctrl_name)
+		pr_info("%s:%d, DSI Ctrl name not specified\n",
+			__func__, __LINE__);
+	else
+		pr_info("%s: DSI Ctrl name = %s\n",
+			__func__, ctrl_name);
+
+	rc = mdss_dsi_pinctrl_init(pdev);
+	if (rc)
+		pr_warn("%s: failed to get pin resources\n", __func__);
+
+	if (index == 0) {
+		ctrl_pdata->panel_data.panel_info.pdest = DISPLAY_1;
+		ctrl_pdata->ndx = DSI_CTRL_0;
+	} else {
+		ctrl_pdata->panel_data.panel_info.pdest = DISPLAY_2;
+		ctrl_pdata->ndx = DSI_CTRL_1;
+	}
+
+	if (mdss_dsi_ctrl_clock_init(pdev, ctrl_pdata)) {
+		pr_err("%s: unable to initialize dsi clk manager\n", __func__);
+		return -EPERM;
+	}
+
+	dsi_pan_node = mdss_dsi_config_panel(pdev, index);
+	if (!dsi_pan_node) {
+		pr_err("%s: panel configuration failed\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) ||
+		(mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
+		(ctrl_pdata->panel_data.panel_info.pdest == DISPLAY_1))) {
+		rc = mdss_panel_parse_bl_settings(dsi_pan_node, ctrl_pdata);
+		if (rc) {
+			pr_warn("%s: dsi bl settings parse failed\n", __func__);
+			/* Panels like AMOLED and dsi2hdmi chip
+			 * does not need backlight control.
+			 * So we should not fail probe here.
+			 */
+			ctrl_pdata->bklt_ctrl = UNKNOWN_CTRL;
+		}
+	} else {
+		ctrl_pdata->bklt_ctrl = UNKNOWN_CTRL;
+	}
+
+	rc = dsi_panel_device_register(pdev, dsi_pan_node, ctrl_pdata);
+	if (rc) {
+		pr_err("%s: dsi panel dev reg failed\n", __func__);
+		goto error_pan_node;
+	}
+
+	pinfo = &(ctrl_pdata->panel_data.panel_info);
+	if (!(mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
+		mdss_dsi_is_ctrl_clk_slave(ctrl_pdata)) &&
+		pinfo->dynamic_fps) {
+		rc = mdss_dsi_shadow_clk_init(pdev, ctrl_pdata);
+
+		if (rc) {
+			pr_err("%s: unable to initialize shadow ctrl clks\n",
+					__func__);
+			rc = -EPERM;
+		}
+	}
+
+	rc = mdss_dsi_set_clk_rates(ctrl_pdata);
+	if (rc) {
+		pr_err("%s: Failed to set dsi clk rates\n", __func__);
+		return rc;
+	}
+
+	rc = mdss_dsi_cont_splash_config(pinfo, ctrl_pdata);
+	if (rc) {
+		pr_err("%s: Failed to set dsi splash config\n", __func__);
+		return rc;
+	}
+
+	if (mdss_dsi_is_te_based_esd(ctrl_pdata)) {
+		init_completion(&ctrl_pdata->te_irq_comp);
+		rc = devm_request_irq(&pdev->dev,
+			gpio_to_irq(ctrl_pdata->disp_te_gpio),
+			hw_vsync_handler, IRQF_TRIGGER_FALLING,
+			"VSYNC_GPIO", ctrl_pdata);
+		if (rc) {
+			pr_err("%s: TE request_irq failed for ESD\n", __func__);
+			goto error_shadow_clk_deinit;
+		}
+		te_irq_registered = 1;
+		disable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
+	}
+
+	pdata = &ctrl_pdata->panel_data;
+	init_completion(&pdata->te_done);
+	if (pdata->panel_info.type == MIPI_CMD_PANEL) {
+		if (!te_irq_registered) {
+			rc = devm_request_irq(&pdev->dev,
+				gpio_to_irq(pdata->panel_te_gpio),
+				test_hw_vsync_handler, IRQF_TRIGGER_FALLING,
+				"VSYNC_GPIO", &ctrl_pdata->panel_data);
+			if (rc) {
+				pr_err("%s: TE request_irq failed\n", __func__);
+				goto error_shadow_clk_deinit;
+			}
+			te_irq_registered = 1;
+			disable_irq_nosync(gpio_to_irq(pdata->panel_te_gpio));
+		}
+	}
+
+	rc = mdss_dsi_get_bridge_chip_params(pinfo, ctrl_pdata, pdev);
+	if (rc) {
+		pr_err("%s: Failed to get bridge params\n", __func__);
+		goto error_shadow_clk_deinit;
+	}
+
+	ctrl_pdata->workq = create_workqueue("mdss_dsi_dba");
+	if (!ctrl_pdata->workq) {
+		pr_err("%s: Error creating workqueue\n", __func__);
+		rc = -EPERM;
+		goto error_pan_node;
+	}
+
+	INIT_DELAYED_WORK(&ctrl_pdata->dba_work, mdss_dsi_dba_work);
+
+	pr_info("%s: Dsi Ctrl->%d initialized, DSI rev:0x%x, PHY rev:0x%x\n",
+		__func__, index, ctrl_pdata->shared_data->hw_rev,
+		ctrl_pdata->shared_data->phy_rev);
+	mdss_dsi_pm_qos_add_request(ctrl_pdata);
+
+	if (index == 0)
+		ctrl_pdata->shared_data->dsi0_active = true;
+	else
+		ctrl_pdata->shared_data->dsi1_active = true;
+
+	return 0;
+
+error_shadow_clk_deinit:
+	mdss_dsi_shadow_clk_deinit(&pdev->dev, ctrl_pdata);
+error_pan_node:
+	mdss_dsi_unregister_bl_settings(ctrl_pdata);
+	of_node_put(dsi_pan_node);
+	return rc;
+}
+
+static int mdss_dsi_bus_scale_init(struct platform_device *pdev,
+			    struct dsi_shared_data  *sdata)
+{
+	int rc = 0;
+
+	sdata->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+	if (IS_ERR_OR_NULL(sdata->bus_scale_table)) {
+		rc = PTR_ERR(sdata->bus_scale_table);
+		pr_err("%s: msm_bus_cl_get_pdata() failed, rc=%d\n", __func__,
+								     rc);
+		return rc;
+		sdata->bus_scale_table = NULL;
+	}
+
+	sdata->bus_handle =
+		msm_bus_scale_register_client(sdata->bus_scale_table);
+
+	if (!sdata->bus_handle) {
+		rc = -EINVAL;
+		pr_err("%sbus_client register failed\n", __func__);
+	}
+
+	return rc;
+}
+
+static void mdss_dsi_bus_scale_deinit(struct dsi_shared_data *sdata)
+{
+	if (sdata->bus_handle) {
+		if (sdata->bus_refcount)
+			msm_bus_scale_client_update_request(sdata->bus_handle,
+									0);
+
+		sdata->bus_refcount = 0;
+		msm_bus_scale_unregister_client(sdata->bus_handle);
+		sdata->bus_handle = 0;
+	}
+}
+
+static int mdss_dsi_parse_dt_params(struct platform_device *pdev,
+		struct dsi_shared_data *sdata)
+{
+	int rc = 0;
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,mmss-ulp-clamp-ctrl-offset",
+			&sdata->ulps_clamp_ctrl_off);
+	if (!rc) {
+		rc = of_property_read_u32(pdev->dev.of_node,
+				"qcom,mmss-phyreset-ctrl-offset",
+				&sdata->ulps_phyrst_ctrl_off);
+	}
+
+	sdata->cmd_clk_ln_recovery_en =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,dsi-clk-ln-recovery");
+
+	return 0;
+}
+
+#ifdef TARGET_HW_MDSS_HDMI
+static void mdss_dsi_res_deinit_hdmi(struct platform_device *pdev, int val)
+{
+	struct mdss_dsi_data *dsi_res = platform_get_drvdata(pdev);
+
+	if (dsi_res->ctrl_pdata[val]->ds_registered) {
+		struct mdss_panel_info *pinfo =
+				&dsi_res->ctrl_pdata[val]->
+					panel_data.panel_info;
+		if (pinfo)
+			mdss_dba_utils_deinit(pinfo->dba_data);
+	}
+}
+#else
+static void mdss_dsi_res_deinit_hdmi(struct platform_device *pdev, int val)
+{
+	(void)(*pdev);
+	(void)(val);
+}
+#endif
+
+static void mdss_dsi_res_deinit(struct platform_device *pdev)
+{
+	int i;
+	struct mdss_dsi_data *dsi_res = platform_get_drvdata(pdev);
+	struct dsi_shared_data *sdata;
+
+	if (!dsi_res) {
+		pr_err("%s: DSI root device drvdata not found\n", __func__);
+		return;
+	}
+
+	for (i = 0; i < DSI_CTRL_MAX; i++) {
+		if (dsi_res->ctrl_pdata[i]) {
+			mdss_dsi_res_deinit_hdmi(pdev, i);
+			devm_kfree(&pdev->dev, dsi_res->ctrl_pdata[i]);
+		}
+	}
+
+	sdata = dsi_res->shared_data;
+	if (!sdata)
+		goto res_release;
+
+	for (i = (DSI_MAX_PM - 1); i >= DSI_CORE_PM; i--) {
+		if (msm_dss_config_vreg(&pdev->dev,
+				sdata->power_data[i].vreg_config,
+				sdata->power_data[i].num_vreg, 1) < 0)
+			pr_err("%s: failed to de-init vregs for %s\n",
+				__func__, __mdss_dsi_pm_name(i));
+		mdss_dsi_put_dt_vreg_data(&pdev->dev,
+			&sdata->power_data[i]);
+	}
+
+	mdss_dsi_bus_scale_deinit(sdata);
+	mdss_dsi_core_clk_deinit(&pdev->dev, sdata);
+
+	if (sdata)
+		devm_kfree(&pdev->dev, sdata);
+
+res_release:
+	if (dsi_res)
+		devm_kfree(&pdev->dev, dsi_res);
+
+}
+
+static int mdss_dsi_res_init(struct platform_device *pdev)
+{
+	int rc = 0, i;
+	struct dsi_shared_data *sdata;
+
+	mdss_dsi_res = platform_get_drvdata(pdev);
+	if (!mdss_dsi_res) {
+		mdss_dsi_res = devm_kzalloc(&pdev->dev,
+					  sizeof(struct mdss_dsi_data),
+					  GFP_KERNEL);
+		if (!mdss_dsi_res) {
+			pr_err("%s: FAILED: cannot alloc dsi data\n",
+			       __func__);
+			rc = -ENOMEM;
+			goto mem_fail;
+		}
+
+		mdss_dsi_res->shared_data = devm_kzalloc(&pdev->dev,
+				sizeof(struct dsi_shared_data),
+				GFP_KERNEL);
+		pr_debug("%s Allocated shared_data=%pK\n", __func__,
+				mdss_dsi_res->shared_data);
+		if (!mdss_dsi_res->shared_data) {
+			pr_err("%s Unable to alloc mem for shared_data\n",
+					__func__);
+			rc = -ENOMEM;
+			goto mem_fail;
+		}
+
+		sdata = mdss_dsi_res->shared_data;
+
+		rc = mdss_dsi_parse_dt_params(pdev, sdata);
+		if (rc) {
+			pr_err("%s: failed to parse mdss dsi DT params\n",
+				__func__);
+			goto mem_fail;
+		}
+
+		rc = mdss_dsi_core_clk_init(pdev, sdata);
+		if (rc) {
+			pr_err("%s: failed to initialize DSI core clocks\n",
+				__func__);
+			goto mem_fail;
+		}
+
+		/* Parse the regulator information */
+		for (i = DSI_CORE_PM; i < DSI_MAX_PM; i++) {
+			rc = mdss_dsi_get_dt_vreg_data(&pdev->dev,
+				pdev->dev.of_node, &sdata->power_data[i], i);
+			if (rc) {
+				pr_err("%s: '%s' get_dt_vreg_data failed.rc=%d\n",
+					__func__, __mdss_dsi_pm_name(i), rc);
+				i--;
+				for (; i >= DSI_CORE_PM; i--)
+					mdss_dsi_put_dt_vreg_data(&pdev->dev,
+						&sdata->power_data[i]);
+				goto mem_fail;
+			}
+		}
+		rc = mdss_dsi_regulator_init(pdev, sdata);
+		if (rc) {
+			pr_err("%s: failed to init regulator, rc=%d\n",
+							__func__, rc);
+			goto mem_fail;
+		}
+
+		rc = mdss_dsi_bus_scale_init(pdev, sdata);
+		if (rc) {
+			pr_err("%s: failed to init bus scale settings, rc=%d\n",
+							__func__, rc);
+			goto mem_fail;
+		}
+
+		mutex_init(&sdata->phy_reg_lock);
+		mutex_init(&sdata->pm_qos_lock);
+
+		for (i = 0; i < DSI_CTRL_MAX; i++) {
+			mdss_dsi_res->ctrl_pdata[i] = devm_kzalloc(&pdev->dev,
+					sizeof(struct mdss_dsi_ctrl_pdata),
+					GFP_KERNEL);
+			if (!mdss_dsi_res->ctrl_pdata[i]) {
+				pr_err("%s Unable to alloc mem for ctrl=%d\n",
+						__func__, i);
+				rc = -ENOMEM;
+				goto mem_fail;
+			}
+			pr_debug("%s Allocated ctrl_pdata[%d]=%pK\n",
+				__func__, i, mdss_dsi_res->ctrl_pdata[i]);
+			mdss_dsi_res->ctrl_pdata[i]->shared_data =
+				mdss_dsi_res->shared_data;
+		}
+
+		platform_set_drvdata(pdev, mdss_dsi_res);
+	}
+
+	mdss_dsi_res->pdev = pdev;
+	pr_debug("%s: Setting up mdss_dsi_res=%pK\n", __func__, mdss_dsi_res);
+
+	return 0;
+
+mem_fail:
+	mdss_dsi_res_deinit(pdev);
+	return rc;
+}
+
+static int mdss_dsi_parse_hw_cfg(struct platform_device *pdev, char *pan_cfg)
+{
+	const char *data;
+	struct mdss_dsi_data *dsi_res = platform_get_drvdata(pdev);
+	struct dsi_shared_data *sdata;
+	char dsi_cfg[20];
+	char *cfg_prim = NULL, *cfg_sec = NULL, *ch = NULL;
+	int i = 0;
+
+	if (!dsi_res) {
+		pr_err("%s: DSI root device drvdata not found\n", __func__);
+		return -EINVAL;
+	}
+
+	sdata = mdss_dsi_res->shared_data;
+	if (!sdata) {
+		pr_err("%s: DSI shared data not found\n", __func__);
+		return -EINVAL;
+	}
+
+	sdata->hw_config = SINGLE_DSI;
+
+	if (pan_cfg)
+		cfg_prim = strnstr(pan_cfg, "cfg:", strlen(pan_cfg));
+	if (cfg_prim) {
+		cfg_prim += 4;
+
+		cfg_sec = strnchr(cfg_prim, strlen(cfg_prim), ':');
+		if (!cfg_sec)
+			cfg_sec = cfg_prim + strlen(cfg_prim);
+
+		for (i = 0; ((cfg_prim + i) < cfg_sec) &&
+		     (*(cfg_prim+i) != '#'); i++)
+			dsi_cfg[i] = *(cfg_prim + i);
+
+		dsi_cfg[i] = '\0';
+		data = dsi_cfg;
+	} else {
+		data = of_get_property(pdev->dev.of_node,
+			"hw-config", NULL);
+	}
+
+	if (data) {
+		/*
+		 * To handle the  override parameter (#override:sim)
+		 * passed for simulator panels
+		 */
+		ch = strnstr(data, "#", strlen(data));
+		ch ? *ch = '\0' : false;
+
+		if (!strcmp(data, "dual_dsi"))
+			sdata->hw_config = DUAL_DSI;
+		else if (!strcmp(data, "split_dsi"))
+			sdata->hw_config = SPLIT_DSI;
+		else if (!strcmp(data, "single_dsi"))
+			sdata->hw_config = SINGLE_DSI;
+		else
+			pr_err("%s: Incorrect string for DSI config:%s. Setting default as SINGLE_DSI\n",
+				__func__, data);
+	} else {
+		pr_err("%s: Error: No DSI HW config found\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: DSI h/w configuration is %d\n", __func__,
+		sdata->hw_config);
+
+	return 0;
+}
+
+static void mdss_dsi_parse_pll_src_cfg(struct platform_device *pdev,
+	char *pan_cfg)
+{
+	const char *data;
+	char *pll_ptr, pll_cfg[10] = {'\0'};
+	struct dsi_shared_data *sdata = mdss_dsi_res->shared_data;
+
+	sdata->pll_src_config = PLL_SRC_DEFAULT;
+
+	if (pan_cfg) {
+		pll_ptr = strnstr(pan_cfg, ":pll0", strlen(pan_cfg));
+		if (!pll_ptr) {
+			pll_ptr = strnstr(pan_cfg, ":pll1", strlen(pan_cfg));
+			if (pll_ptr)
+				strlcpy(pll_cfg, "PLL1", strlen(pll_cfg));
+		} else {
+			strlcpy(pll_cfg, "PLL0", strlen(pll_cfg));
+		}
+	}
+	data = pll_cfg;
+
+	if (!data || !strcmp(data, ""))
+		data = of_get_property(pdev->dev.of_node,
+			"pll-src-config", NULL);
+	if (data) {
+		if (!strcmp(data, "PLL0"))
+			sdata->pll_src_config = PLL_SRC_0;
+		else if (!strcmp(data, "PLL1"))
+			sdata->pll_src_config = PLL_SRC_1;
+		else
+			pr_err("%s: invalid pll src config %s\n",
+				__func__, data);
+	} else {
+		pr_debug("%s: PLL src config not specified\n", __func__);
+	}
+
+	pr_debug("%s: pll_src_config = %d", __func__, sdata->pll_src_config);
+}
+
+static int mdss_dsi_validate_pll_src_config(struct dsi_shared_data *sdata)
+{
+	int rc = 0;
+
+	/*
+	 * DSI PLL1 can only drive DSI PHY1. As such:
+	 *     - For split dsi config, only PLL0 is supported
+	 *     - For dual dsi config, DSI0-PLL0 and DSI1-PLL1 is the only
+	 *       possible configuration
+	 */
+	if (mdss_dsi_is_hw_config_split(sdata) &&
+		mdss_dsi_is_pll_src_pll1(sdata)) {
+		pr_err("%s: unsupported PLL config: using PLL1 for split-dsi\n",
+			__func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (mdss_dsi_is_hw_config_dual(sdata) &&
+		!mdss_dsi_is_pll_src_default(sdata)) {
+		pr_debug("%s: pll src config not applicable for dual-dsi\n",
+			__func__);
+		sdata->pll_src_config = PLL_SRC_DEFAULT;
+	}
+
+error:
+	return rc;
+}
+
+static int mdss_dsi_validate_config(struct platform_device *pdev)
+{
+	struct dsi_shared_data *sdata = mdss_dsi_res->shared_data;
+
+	return mdss_dsi_validate_pll_src_config(sdata);
+}
+
+static const struct of_device_id mdss_dsi_ctrl_dt_match[] = {
+	{.compatible = "qcom,mdss-dsi-ctrl"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, mdss_dsi_ctrl_dt_match);
+
+static int mdss_dsi_probe(struct platform_device *pdev)
+{
+	struct mdss_panel_cfg *pan_cfg = NULL;
+	struct mdss_util_intf *util;
+	char *panel_cfg;
+	int rc = 0;
+
+	util = mdss_get_util_intf();
+	if (util == NULL) {
+		pr_err("%s: Failed to get mdss utility functions\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!util->mdp_probe_done) {
+		pr_err("%s: MDP not probed yet!\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	if (util->display_disabled) {
+		pr_info("%s: Display is disabled, not progressing with dsi probe\n",
+			__func__);
+		return -ENOTSUPP;
+	}
+
+	if (!pdev || !pdev->dev.of_node) {
+		pr_err("%s: DSI driver only supports device tree probe\n",
+			__func__);
+		return -ENOTSUPP;
+	}
+
+	pan_cfg = util->panel_intf_type(MDSS_PANEL_INTF_HDMI);
+	if (IS_ERR(pan_cfg)) {
+		return PTR_ERR(pan_cfg);
+	} else if (pan_cfg) {
+		pr_debug("%s: HDMI is primary\n", __func__);
+		return -ENODEV;
+	}
+
+	pan_cfg = util->panel_intf_type(MDSS_PANEL_INTF_DSI);
+	if (IS_ERR_OR_NULL(pan_cfg)) {
+		rc = PTR_ERR(pan_cfg);
+		goto error;
+	} else {
+		panel_cfg = pan_cfg->arg_cfg;
+	}
+
+	rc = mdss_dsi_res_init(pdev);
+	if (rc) {
+		pr_err("%s Unable to set dsi res\n", __func__);
+		return rc;
+	}
+
+	rc = mdss_dsi_parse_hw_cfg(pdev, panel_cfg);
+	if (rc) {
+		pr_err("%s Unable to parse dsi h/w config\n", __func__);
+		mdss_dsi_res_deinit(pdev);
+		return rc;
+	}
+
+	mdss_dsi_parse_pll_src_cfg(pdev, panel_cfg);
+
+	of_platform_populate(pdev->dev.of_node, mdss_dsi_ctrl_dt_match,
+				NULL, &pdev->dev);
+
+	rc = mdss_dsi_validate_config(pdev);
+	if (rc) {
+		pr_err("%s: Invalid DSI hw configuration\n", __func__);
+		goto error;
+	}
+
+	mdss_dsi_config_clk_src(pdev);
+
+error:
+	return rc;
+}
+
+static int mdss_dsi_remove(struct platform_device *pdev)
+{
+	mdss_dsi_res_deinit(pdev);
+	return 0;
+}
+
+static int mdss_dsi_ctrl_remove(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = platform_get_drvdata(pdev);
+
+	if (!ctrl_pdata) {
+		pr_err("%s: no driver data\n", __func__);
+		return -ENODEV;
+	}
+
+	mdss_dsi_pm_qos_remove_request(ctrl_pdata->shared_data);
+
+	if (msm_dss_config_vreg(&pdev->dev,
+			ctrl_pdata->panel_power_data.vreg_config,
+			ctrl_pdata->panel_power_data.num_vreg, 1) < 0)
+		pr_err("%s: failed to de-init vregs for %s\n",
+				__func__, __mdss_dsi_pm_name(DSI_PANEL_PM));
+	mdss_dsi_put_dt_vreg_data(&pdev->dev, &ctrl_pdata->panel_power_data);
+
+	mfd = platform_get_drvdata(pdev);
+	msm_dss_iounmap(&ctrl_pdata->mmss_misc_io);
+	msm_dss_iounmap(&ctrl_pdata->phy_io);
+	msm_dss_iounmap(&ctrl_pdata->ctrl_io);
+	mdss_dsi_debugfs_cleanup(ctrl_pdata);
+
+	if (ctrl_pdata->workq)
+		destroy_workqueue(ctrl_pdata->workq);
+
+	return 0;
+}
+
+struct device dsi_dev;
+
+int mdss_dsi_retrieve_ctrl_resources(struct platform_device *pdev, int mode,
+			struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int rc = 0;
+	u32 index;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "cell-index", &index);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: Cell-index not specified, rc=%d\n",
+						__func__, rc);
+		return rc;
+	}
+
+	if (index == 0) {
+		if (mode != DISPLAY_1) {
+			pr_err("%s:%d Panel->Ctrl mapping is wrong\n",
+				       __func__, __LINE__);
+			return -EPERM;
+		}
+	} else if (index == 1) {
+		if (mode != DISPLAY_2) {
+			pr_err("%s:%d Panel->Ctrl mapping is wrong\n",
+				       __func__, __LINE__);
+			return -EPERM;
+		}
+	} else {
+		pr_err("%s:%d Unknown Ctrl mapped to panel\n",
+			       __func__, __LINE__);
+		return -EPERM;
+	}
+
+	rc = msm_dss_ioremap_byname(pdev, &ctrl->ctrl_io, "dsi_ctrl");
+	if (rc) {
+		pr_err("%s:%d unable to remap dsi ctrl resources\n",
+			       __func__, __LINE__);
+		return rc;
+	}
+
+	ctrl->ctrl_base = ctrl->ctrl_io.base;
+	ctrl->reg_size = ctrl->ctrl_io.len;
+
+	rc = msm_dss_ioremap_byname(pdev, &ctrl->phy_io, "dsi_phy");
+	if (rc) {
+		pr_err("%s:%d unable to remap dsi phy resources\n",
+			       __func__, __LINE__);
+		return rc;
+	}
+
+	rc = msm_dss_ioremap_byname(pdev, &ctrl->phy_regulator_io,
+			"dsi_phy_regulator");
+	if (rc)
+		pr_debug("%s:%d unable to remap dsi phy regulator resources\n",
+			       __func__, __LINE__);
+	else
+		pr_info("%s: phy_regulator_base=%pK phy_regulator_size=%x\n",
+			__func__, ctrl->phy_regulator_io.base,
+			ctrl->phy_regulator_io.len);
+
+	pr_info("%s: ctrl_base=%pK ctrl_size=%x phy_base=%pK phy_size=%x\n",
+		__func__, ctrl->ctrl_base, ctrl->reg_size, ctrl->phy_io.base,
+		ctrl->phy_io.len);
+
+	rc = msm_dss_ioremap_byname(pdev, &ctrl->mmss_misc_io,
+		"mmss_misc_phys");
+	if (rc) {
+		pr_debug("%s:%d mmss_misc IO remap failed\n",
+			__func__, __LINE__);
+	}
+
+	return 0;
+}
+
+static int mdss_dsi_irq_init(struct device *dev, int irq_no,
+			struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int ret;
+
+	ret = devm_request_irq(dev, irq_no, mdss_dsi_isr,
+				IRQF_DISABLED, "DSI", ctrl);
+	if (ret) {
+		pr_err("msm_dsi_irq_init request_irq() failed!\n");
+		return ret;
+	}
+
+	disable_irq(irq_no);
+	ctrl->dsi_hw->irq_info = kcalloc(1, sizeof(struct irq_info),
+					 GFP_KERNEL);
+	if (!ctrl->dsi_hw->irq_info)
+		return -ENOMEM;
+
+	ctrl->dsi_hw->irq_info->irq = irq_no;
+	ctrl->dsi_hw->irq_info->irq_ena = false;
+
+	return ret;
+}
+
+static void mdss_dsi_parse_lane_swap(struct device_node *np, char *dlane_swap)
+{
+	const char *data;
+
+	*dlane_swap = DSI_LANE_MAP_0123;
+	data = of_get_property(np, "qcom,lane-map", NULL);
+	if (data) {
+		if (!strcmp(data, "lane_map_3012"))
+			*dlane_swap = DSI_LANE_MAP_3012;
+		else if (!strcmp(data, "lane_map_2301"))
+			*dlane_swap = DSI_LANE_MAP_2301;
+		else if (!strcmp(data, "lane_map_1230"))
+			*dlane_swap = DSI_LANE_MAP_1230;
+		else if (!strcmp(data, "lane_map_0321"))
+			*dlane_swap = DSI_LANE_MAP_0321;
+		else if (!strcmp(data, "lane_map_1032"))
+			*dlane_swap = DSI_LANE_MAP_1032;
+		else if (!strcmp(data, "lane_map_2103"))
+			*dlane_swap = DSI_LANE_MAP_2103;
+		else if (!strcmp(data, "lane_map_3210"))
+			*dlane_swap = DSI_LANE_MAP_3210;
+	}
+}
+
+static int mdss_dsi_parse_ctrl_params(struct platform_device *ctrl_pdev,
+	struct device_node *pan_node, struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	int i, len;
+	struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+	const char *data;
+
+	ctrl_pdata->null_insert_enabled = of_property_read_bool(
+		ctrl_pdev->dev.of_node, "qcom,null-insertion-enabled");
+
+	data = of_get_property(ctrl_pdev->dev.of_node,
+		"qcom,platform-strength-ctrl", &len);
+	if (!data) {
+		pr_err("%s:%d, Unable to read Phy Strength ctrl settings\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	pinfo->mipi.dsi_phy_db.strength_len = len;
+	for (i = 0; i < len; i++)
+		pinfo->mipi.dsi_phy_db.strength[i] = data[i];
+
+	pinfo->mipi.dsi_phy_db.reg_ldo_mode = of_property_read_bool(
+		ctrl_pdev->dev.of_node, "qcom,regulator-ldo-mode");
+
+	data = of_get_property(ctrl_pdev->dev.of_node,
+		"qcom,platform-regulator-settings", &len);
+	if (!data) {
+		pr_err("%s:%d, Unable to read Phy regulator settings\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	pinfo->mipi.dsi_phy_db.regulator_len = len;
+	for (i = 0; i < len; i++)
+		pinfo->mipi.dsi_phy_db.regulator[i] = data[i];
+
+	data = of_get_property(ctrl_pdev->dev.of_node,
+		"qcom,platform-bist-ctrl", &len);
+	if ((!data) || (len != 6))
+		pr_debug("%s:%d, Unable to read Phy Bist Ctrl settings\n",
+			__func__, __LINE__);
+	else
+		for (i = 0; i < len; i++)
+			pinfo->mipi.dsi_phy_db.bistctrl[i] = data[i];
+
+	data = of_get_property(ctrl_pdev->dev.of_node,
+		"qcom,platform-lane-config", &len);
+	if (!data) {
+		pr_err("%s:%d, Unable to read Phy lane configure settings\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	pinfo->mipi.dsi_phy_db.lanecfg_len = len;
+	for (i = 0; i < len; i++)
+		pinfo->mipi.dsi_phy_db.lanecfg[i] = data[i];
+
+	ctrl_pdata->timing_db_mode = of_property_read_bool(
+		ctrl_pdev->dev.of_node, "qcom,timing-db-mode");
+
+	ctrl_pdata->cmd_sync_wait_broadcast = of_property_read_bool(
+		pan_node, "qcom,cmd-sync-wait-broadcast");
+
+	if (ctrl_pdata->cmd_sync_wait_broadcast &&
+		mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
+		(pinfo->pdest == DISPLAY_2))
+		ctrl_pdata->cmd_sync_wait_trigger = true;
+
+	pr_debug("%s: cmd_sync_wait_enable=%d trigger=%d\n", __func__,
+				ctrl_pdata->cmd_sync_wait_broadcast,
+				ctrl_pdata->cmd_sync_wait_trigger);
+
+	mdss_dsi_parse_lane_swap(ctrl_pdev->dev.of_node,
+			&(ctrl_pdata->dlane_swap));
+
+	pinfo->is_pluggable = of_property_read_bool(ctrl_pdev->dev.of_node,
+		"qcom,pluggable");
+
+	data = of_get_property(ctrl_pdev->dev.of_node,
+		"qcom,display-id", &len);
+	if (!data || len <= 0)
+		pr_err("%s:%d Unable to read qcom,display-id, data=%pK,len=%d\n",
+			__func__, __LINE__, data, len);
+	else
+		snprintf(ctrl_pdata->panel_data.panel_info.display_id,
+			MDSS_DISPLAY_ID_MAX_LEN, "%s", data);
+
+	return 0;
+
+
+}
+
+static int mdss_dsi_parse_gpio_params(struct platform_device *ctrl_pdev,
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+	struct mdss_panel_data *pdata = &ctrl_pdata->panel_data;
+
+	/*
+	 * If disp_en_gpio has been set previously (disp_en_gpio > 0)
+	 *  while parsing the panel node, then do not override it
+	 */
+	if (ctrl_pdata->disp_en_gpio <= 0) {
+		ctrl_pdata->disp_en_gpio = of_get_named_gpio(
+			ctrl_pdev->dev.of_node,
+			"qcom,platform-enable-gpio", 0);
+
+		if (!gpio_is_valid(ctrl_pdata->disp_en_gpio))
+			pr_debug("%s:%d, Disp_en gpio not specified\n",
+					__func__, __LINE__);
+	}
+
+	ctrl_pdata->disp_te_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
+		"qcom,platform-te-gpio", 0);
+
+	if (!gpio_is_valid(ctrl_pdata->disp_te_gpio))
+		pr_err("%s:%d, TE gpio not specified\n",
+						__func__, __LINE__);
+	pdata->panel_te_gpio = ctrl_pdata->disp_te_gpio;
+
+	ctrl_pdata->bklt_en_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
+		"qcom,platform-bklight-en-gpio", 0);
+	if (!gpio_is_valid(ctrl_pdata->bklt_en_gpio))
+		pr_info("%s: bklt_en gpio not specified\n", __func__);
+
+	ctrl_pdata->rst_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
+			 "qcom,platform-reset-gpio", 0);
+	if (!gpio_is_valid(ctrl_pdata->rst_gpio))
+		pr_err("%s:%d, reset gpio not specified\n",
+						__func__, __LINE__);
+
+	if (pinfo->mode_gpio_state != MODE_GPIO_NOT_VALID) {
+
+		ctrl_pdata->mode_gpio = of_get_named_gpio(
+					ctrl_pdev->dev.of_node,
+					"qcom,platform-mode-gpio", 0);
+		if (!gpio_is_valid(ctrl_pdata->mode_gpio))
+			pr_info("%s:%d, mode gpio not specified\n",
+							__func__, __LINE__);
+	} else {
+		ctrl_pdata->mode_gpio = -EINVAL;
+	}
+
+	ctrl_pdata->intf_mux_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
+			 "qcom,platform-intf-mux-gpio", 0);
+	if (!gpio_is_valid(ctrl_pdata->intf_mux_gpio))
+		pr_debug("%s:%d, intf mux gpio not specified\n",
+						__func__, __LINE__);
+
+	return 0;
+}
+
+static void mdss_dsi_set_prim_panel(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	struct mdss_dsi_ctrl_pdata *octrl = NULL;
+	struct mdss_panel_info *pinfo;
+
+	pinfo = &ctrl_pdata->panel_data.panel_info;
+
+	/*
+	 * for Split and Single DSI case default is always primary
+	 * and for Dual dsi case below assumptions are made.
+	 *	1. DSI controller with bridge chip is always secondary
+	 *	2. When there is no brigde chip, DSI1 is secondary
+	 */
+	pinfo->is_prim_panel = true;
+	if (mdss_dsi_is_hw_config_dual(ctrl_pdata->shared_data)) {
+		if (mdss_dsi_is_right_ctrl(ctrl_pdata)) {
+			octrl = mdss_dsi_get_other_ctrl(ctrl_pdata);
+			if (octrl && octrl->panel_data.panel_info.is_prim_panel)
+				pinfo->is_prim_panel = false;
+			else
+				pinfo->is_prim_panel = true;
+		}
+	}
+}
+
+int dsi_panel_device_register(struct platform_device *ctrl_pdev,
+	struct device_node *pan_node, struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	struct mipi_panel_info *mipi;
+	int rc;
+	struct dsi_shared_data *sdata;
+	struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+	struct resource *res;
+	u64 clk_rate;
+
+	mipi  = &(pinfo->mipi);
+
+	pinfo->type =
+		((mipi->mode == DSI_VIDEO_MODE)
+			? MIPI_VIDEO_PANEL : MIPI_CMD_PANEL);
+
+	rc = mdss_dsi_clk_div_config(pinfo, mipi->frame_rate);
+	if (rc) {
+		pr_err("%s: unable to initialize the clk dividers\n", __func__);
+		return rc;
+	}
+	ctrl_pdata->pclk_rate = mipi->dsi_pclk_rate;
+	clk_rate = pinfo->clk_rate;
+	do_div(clk_rate, 8U);
+	ctrl_pdata->byte_clk_rate = (u32)clk_rate;
+	pr_debug("%s: pclk=%d, bclk=%d\n", __func__,
+			ctrl_pdata->pclk_rate, ctrl_pdata->byte_clk_rate);
+
+
+	rc = mdss_dsi_get_dt_vreg_data(&ctrl_pdev->dev, pan_node,
+		&ctrl_pdata->panel_power_data, DSI_PANEL_PM);
+	if (rc) {
+		DEV_ERR("%s: '%s' get_dt_vreg_data failed.rc=%d\n",
+			__func__, __mdss_dsi_pm_name(DSI_PANEL_PM), rc);
+		return rc;
+	}
+
+	rc = msm_dss_config_vreg(&ctrl_pdev->dev,
+		ctrl_pdata->panel_power_data.vreg_config,
+		ctrl_pdata->panel_power_data.num_vreg, 1);
+	if (rc) {
+		pr_err("%s: failed to init regulator, rc=%d\n",
+						__func__, rc);
+		return rc;
+	}
+
+	rc = mdss_dsi_parse_ctrl_params(ctrl_pdev, pan_node, ctrl_pdata);
+	if (rc) {
+		pr_err("%s: failed to parse ctrl settings, rc=%d\n",
+						__func__, rc);
+		return rc;
+	}
+
+	pinfo->panel_max_fps = mdss_panel_get_framerate(pinfo,
+				FPS_RESOLUTION_HZ);
+	pinfo->panel_max_vtotal = mdss_panel_get_vtotal(pinfo);
+
+	rc = mdss_dsi_parse_gpio_params(ctrl_pdev, ctrl_pdata);
+	if (rc) {
+		pr_err("%s: failed to parse gpio params, rc=%d\n",
+						__func__, rc);
+		return rc;
+	}
+
+	if (mdss_dsi_retrieve_ctrl_resources(ctrl_pdev,
+					     pinfo->pdest,
+					     ctrl_pdata)) {
+		pr_err("%s: unable to get Dsi controller res\n", __func__);
+		return -EPERM;
+	}
+
+	ctrl_pdata->panel_data.event_handler = mdss_dsi_event_handler;
+	ctrl_pdata->panel_data.get_fb_node = mdss_dsi_get_fb_node_cb;
+
+	if (ctrl_pdata->status_mode == ESD_REG ||
+			ctrl_pdata->status_mode == ESD_REG_NT35596)
+		ctrl_pdata->check_status = mdss_dsi_reg_status_check;
+	else if (ctrl_pdata->status_mode == ESD_BTA)
+		ctrl_pdata->check_status = mdss_dsi_bta_status_check;
+
+	if (ctrl_pdata->status_mode == ESD_MAX) {
+		pr_err("%s: Using default BTA for ESD check\n", __func__);
+		ctrl_pdata->check_status = mdss_dsi_bta_status_check;
+	}
+	if (ctrl_pdata->bklt_ctrl == BL_PWM)
+		mdss_dsi_panel_pwm_cfg(ctrl_pdata);
+
+	mdss_dsi_ctrl_init(&ctrl_pdev->dev, ctrl_pdata);
+	mdss_dsi_set_prim_panel(ctrl_pdata);
+
+	ctrl_pdata->dsi_irq_line = of_property_read_bool(
+				ctrl_pdev->dev.of_node, "qcom,dsi-irq-line");
+
+	if (ctrl_pdata->dsi_irq_line) {
+		/* DSI has it's own irq line */
+		res = platform_get_resource(ctrl_pdev, IORESOURCE_IRQ, 0);
+		if (!res || res->start == 0) {
+			pr_err("%s:%d unable to get the MDSS irq resources\n",
+							__func__, __LINE__);
+			return -ENODEV;
+		}
+		rc = mdss_dsi_irq_init(&ctrl_pdev->dev, res->start, ctrl_pdata);
+		if (rc) {
+			dev_err(&ctrl_pdev->dev, "%s: failed to init irq\n",
+							__func__);
+			return rc;
+		}
+	}
+	ctrl_pdata->ctrl_state = CTRL_STATE_UNKNOWN;
+
+	/*
+	 * If ULPS during suspend is enabled, add an extra vote for the
+	 * DSI CTRL power module. This keeps the regulator always enabled.
+	 * This is needed for the DSI PHY to maintain ULPS state during
+	 * suspend also.
+	 */
+	sdata = ctrl_pdata->shared_data;
+
+	if (pinfo->ulps_suspend_enabled) {
+		rc = msm_dss_enable_vreg(
+			sdata->power_data[DSI_PHY_PM].vreg_config,
+			sdata->power_data[DSI_PHY_PM].num_vreg, 1);
+		if (rc) {
+			pr_err("%s: failed to enable vregs for DSI_CTRL_PM\n",
+				__func__);
+			return rc;
+		}
+	}
+
+	pinfo->cont_splash_enabled =
+		ctrl_pdata->mdss_util->panel_intf_status(pinfo->pdest,
+		MDSS_PANEL_INTF_DSI) ? true : false;
+
+	pr_info("%s: Continuous splash %s\n", __func__,
+		pinfo->cont_splash_enabled ? "enabled" : "disabled");
+
+	rc = mdss_register_panel(ctrl_pdev, &(ctrl_pdata->panel_data));
+	if (rc) {
+		pr_err("%s: unable to register MIPI DSI panel\n", __func__);
+		return rc;
+	}
+
+	if (pinfo->pdest == DISPLAY_1) {
+		mdss_debug_register_io("dsi0_ctrl", &ctrl_pdata->ctrl_io, NULL);
+		mdss_debug_register_io("dsi0_phy", &ctrl_pdata->phy_io, NULL);
+		if (ctrl_pdata->phy_regulator_io.len)
+			mdss_debug_register_io("dsi0_phy_regulator",
+				&ctrl_pdata->phy_regulator_io, NULL);
+	} else {
+		mdss_debug_register_io("dsi1_ctrl", &ctrl_pdata->ctrl_io, NULL);
+		mdss_debug_register_io("dsi1_phy", &ctrl_pdata->phy_io, NULL);
+		if (ctrl_pdata->phy_regulator_io.len)
+			mdss_debug_register_io("dsi1_phy_regulator",
+				&ctrl_pdata->phy_regulator_io, NULL);
+	}
+
+	panel_debug_register_base("panel",
+		ctrl_pdata->ctrl_base, ctrl_pdata->reg_size);
+
+	pr_debug("%s: Panel data initialized\n", __func__);
+	return 0;
+}
+
+static const struct of_device_id mdss_dsi_dt_match[] = {
+	{.compatible = "qcom,mdss-dsi"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, mdss_dsi_dt_match);
+
+static struct platform_driver mdss_dsi_driver = {
+	.probe = mdss_dsi_probe,
+	.remove = mdss_dsi_remove,
+	.shutdown = NULL,
+	.driver = {
+		.name = "mdss_dsi",
+		.of_match_table = mdss_dsi_dt_match,
+	},
+};
+
+static struct platform_driver mdss_dsi_ctrl_driver = {
+	.probe = mdss_dsi_ctrl_probe,
+	.remove = mdss_dsi_ctrl_remove,
+	.shutdown = NULL,
+	.driver = {
+		.name = "mdss_dsi_ctrl",
+		.of_match_table = mdss_dsi_ctrl_dt_match,
+	},
+};
+
+static int mdss_dsi_register_driver(void)
+{
+	return platform_driver_register(&mdss_dsi_driver);
+}
+
+static int __init mdss_dsi_driver_init(void)
+{
+	int ret;
+
+	ret = mdss_dsi_register_driver();
+	if (ret) {
+		pr_err("mdss_dsi_register_driver() failed!\n");
+		return ret;
+	}
+
+	return ret;
+}
+module_init(mdss_dsi_driver_init);
+
+
+static int mdss_dsi_ctrl_register_driver(void)
+{
+	return platform_driver_register(&mdss_dsi_ctrl_driver);
+}
+
+static int __init mdss_dsi_ctrl_driver_init(void)
+{
+	int ret;
+
+	ret = mdss_dsi_ctrl_register_driver();
+	if (ret) {
+		pr_err("mdss_dsi_ctrl_register_driver() failed!\n");
+		return ret;
+	}
+
+	return ret;
+}
+module_init(mdss_dsi_ctrl_driver_init);
+
+static void __exit mdss_dsi_driver_cleanup(void)
+{
+	platform_driver_unregister(&mdss_dsi_ctrl_driver);
+}
+module_exit(mdss_dsi_driver_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DSI controller driver");
diff --git a/drivers/video/fbdev/msm/mdss_dsi.h b/drivers/video/fbdev/msm/mdss_dsi.h
new file mode 100644
index 0000000..ea38c0d
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi.h
@@ -0,0 +1,899 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_DSI_H
+#define MDSS_DSI_H
+
+#include <linux/list.h>
+#include <linux/mdss_io_util.h>
+#include <linux/irqreturn.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/gpio.h>
+
+#include "mdss_panel.h"
+#include "mdss_dsi_cmd.h"
+#include "mdss_dsi_clk.h"
+
+#define MMSS_SERDES_BASE_PHY 0x04f01000 /* mmss (De)Serializer CFG */
+
+#define MIPI_OUTP(addr, data) writel_relaxed((data), (addr))
+#define MIPI_INP(addr) readl_relaxed(addr)
+
+#define MIPI_OUTP_SECURE(addr, data) writel_relaxed((data), (addr))
+#define MIPI_INP_SECURE(addr) readl_relaxed(addr)
+
+#define MIPI_DSI_PRIM 1
+#define MIPI_DSI_SECD 2
+
+#define MIPI_DSI_PANEL_VGA	0
+#define MIPI_DSI_PANEL_WVGA	1
+#define MIPI_DSI_PANEL_WVGA_PT	2
+#define MIPI_DSI_PANEL_FWVGA_PT	3
+#define MIPI_DSI_PANEL_WSVGA_PT	4
+#define MIPI_DSI_PANEL_QHD_PT 5
+#define MIPI_DSI_PANEL_WXGA	6
+#define MIPI_DSI_PANEL_WUXGA	7
+#define MIPI_DSI_PANEL_720P_PT	8
+#define DSI_PANEL_MAX	8
+
+#define MDSS_DSI_HW_REV_100		0x10000000	/* 8974    */
+#define MDSS_DSI_HW_REV_100_1		0x10000001	/* 8x26    */
+#define MDSS_DSI_HW_REV_100_2		0x10000002	/* 8x26v2  */
+#define MDSS_DSI_HW_REV_101		0x10010000	/* 8974v2  */
+#define MDSS_DSI_HW_REV_101_1		0x10010001	/* 8974Pro */
+#define MDSS_DSI_HW_REV_102		0x10020000	/* 8084    */
+#define MDSS_DSI_HW_REV_103		0x10030000	/* 8994    */
+#define MDSS_DSI_HW_REV_103_1		0x10030001	/* 8916/8936 */
+#define MDSS_DSI_HW_REV_104             0x10040000      /* 8996   */
+#define MDSS_DSI_HW_REV_104_1           0x10040001      /* 8996   */
+#define MDSS_DSI_HW_REV_104_2           0x10040002      /* 8937   */
+
+#define MDSS_DSI_HW_REV_STEP_0		0x0
+#define MDSS_DSI_HW_REV_STEP_1		0x1
+#define MDSS_DSI_HW_REV_STEP_2		0x2
+
+#define MDSS_STATUS_TE_WAIT_MAX		3
+#define NONE_PANEL "none"
+
+enum {		/* mipi dsi panel */
+	DSI_VIDEO_MODE,
+	DSI_CMD_MODE,
+};
+
+enum {
+	ST_DSI_CLK_OFF,
+	ST_DSI_SUSPEND,
+	ST_DSI_RESUME,
+	ST_DSI_PLAYING,
+	ST_DSI_NUM
+};
+
+enum {
+	EV_DSI_UPDATE,
+	EV_DSI_DONE,
+	EV_DSI_TOUT,
+	EV_DSI_NUM
+};
+
+enum {
+	LANDSCAPE = 1,
+	PORTRAIT = 2,
+};
+
+enum dsi_trigger_type {
+	DSI_CMD_MODE_DMA,
+	DSI_CMD_MODE_MDP,
+};
+
+enum dsi_panel_bl_ctrl {
+	BL_PWM,
+	BL_WLED,
+	BL_DCS_CMD,
+	UNKNOWN_CTRL,
+};
+
+enum dsi_panel_status_mode {
+	ESD_NONE = 0,
+	ESD_BTA,
+	ESD_REG,
+	ESD_REG_NT35596,
+	ESD_TE,
+	ESD_MAX,
+};
+
+enum dsi_ctrl_op_mode {
+	DSI_LP_MODE,
+	DSI_HS_MODE,
+};
+
+enum dsi_lane_map_type {
+	DSI_LANE_MAP_0123,
+	DSI_LANE_MAP_3012,
+	DSI_LANE_MAP_2301,
+	DSI_LANE_MAP_1230,
+	DSI_LANE_MAP_0321,
+	DSI_LANE_MAP_1032,
+	DSI_LANE_MAP_2103,
+	DSI_LANE_MAP_3210,
+};
+
+enum dsi_pm_type {
+	/* PANEL_PM not used as part of power_data in dsi_shared_data */
+	DSI_PANEL_PM,
+	DSI_CORE_PM,
+	DSI_CTRL_PM,
+	DSI_PHY_PM,
+	DSI_MAX_PM
+};
+
+/*
+ * DSI controller states.
+ *	CTRL_STATE_UNKNOWN - Unknown state of DSI controller.
+ *	CTRL_STATE_PANEL_INIT - State specifies that the panel is initialized.
+ *	CTRL_STATE_MDP_ACTIVE - State specifies that MDP is ready to send
+ *				data to DSI.
+ *	CTRL_STATE_DSI_ACTIVE - State specifies that DSI controller/PHY is
+ *				initialized.
+ */
+#define CTRL_STATE_UNKNOWN		0x00
+#define CTRL_STATE_PANEL_INIT		BIT(0)
+#define CTRL_STATE_MDP_ACTIVE		BIT(1)
+#define CTRL_STATE_DSI_ACTIVE		BIT(2)
+#define CTRL_STATE_PANEL_LP		BIT(3)
+
+#define DSI_NON_BURST_SYNCH_PULSE	0
+#define DSI_NON_BURST_SYNCH_EVENT	1
+#define DSI_BURST_MODE			2
+
+#define DSI_RGB_SWAP_RGB	0
+#define DSI_RGB_SWAP_RBG	1
+#define DSI_RGB_SWAP_BGR	2
+#define DSI_RGB_SWAP_BRG	3
+#define DSI_RGB_SWAP_GRB	4
+#define DSI_RGB_SWAP_GBR	5
+
+#define DSI_VIDEO_DST_FORMAT_RGB565		0
+#define DSI_VIDEO_DST_FORMAT_RGB666		1
+#define DSI_VIDEO_DST_FORMAT_RGB666_LOOSE	2
+#define DSI_VIDEO_DST_FORMAT_RGB888		3
+
+#define DSI_CMD_DST_FORMAT_RGB111	0
+#define DSI_CMD_DST_FORMAT_RGB332	3
+#define DSI_CMD_DST_FORMAT_RGB444	4
+#define DSI_CMD_DST_FORMAT_RGB565	6
+#define DSI_CMD_DST_FORMAT_RGB666	7
+#define DSI_CMD_DST_FORMAT_RGB888	8
+
+#define DSI_INTR_DESJEW_MASK			BIT(31)
+#define DSI_INTR_DYNAMIC_REFRESH_MASK		BIT(29)
+#define DSI_INTR_DYNAMIC_REFRESH_DONE		BIT(28)
+#define DSI_INTR_ERROR_MASK		BIT(25)
+#define DSI_INTR_ERROR			BIT(24)
+#define DSI_INTR_BTA_DONE_MASK          BIT(21)
+#define DSI_INTR_BTA_DONE               BIT(20)
+#define DSI_INTR_VIDEO_DONE_MASK	BIT(17)
+#define DSI_INTR_VIDEO_DONE		BIT(16)
+#define DSI_INTR_CMD_MDP_DONE_MASK	BIT(9)
+#define DSI_INTR_CMD_MDP_DONE		BIT(8)
+#define DSI_INTR_CMD_DMA_DONE_MASK	BIT(1)
+#define DSI_INTR_CMD_DMA_DONE		BIT(0)
+/* Update this if more interrupt masks are added in future chipsets */
+#define DSI_INTR_TOTAL_MASK		0x2222AA02
+
+#define DSI_INTR_MASK_ALL	\
+		(DSI_INTR_DESJEW_MASK | \
+		DSI_INTR_DYNAMIC_REFRESH_MASK | \
+		DSI_INTR_ERROR_MASK | \
+		DSI_INTR_BTA_DONE_MASK | \
+		DSI_INTR_VIDEO_DONE_MASK | \
+		DSI_INTR_CMD_MDP_DONE_MASK | \
+		DSI_INTR_CMD_DMA_DONE_MASK)
+
+#define DSI_CMD_TRIGGER_NONE		0x0	/* mdp trigger */
+#define DSI_CMD_TRIGGER_TE		0x02
+#define DSI_CMD_TRIGGER_SW		0x04
+#define DSI_CMD_TRIGGER_SW_SEOF		0x05	/* cmd dma only */
+#define DSI_CMD_TRIGGER_SW_TE		0x06
+
+#define DSI_VIDEO_TERM  BIT(16)
+#define DSI_MDP_TERM    BIT(8)
+#define DSI_DYNAMIC_TERM    BIT(4)
+#define DSI_BTA_TERM    BIT(1)
+#define DSI_CMD_TERM    BIT(0)
+
+#define DSI_DATA_LANES_STOP_STATE	0xF
+#define DSI_CLK_LANE_STOP_STATE		BIT(4)
+#define DSI_DATA_LANES_ENABLED		0xF0
+
+/* offsets for dynamic refresh */
+#define DSI_DYNAMIC_REFRESH_CTRL		0x200
+#define DSI_DYNAMIC_REFRESH_PIPE_DELAY		0x204
+#define DSI_DYNAMIC_REFRESH_PIPE_DELAY2		0x208
+#define DSI_DYNAMIC_REFRESH_PLL_DELAY		0x20C
+
+#define MAX_ERR_INDEX			10
+
+extern struct device dsi_dev;
+extern u32 dsi_irq;
+extern struct mdss_dsi_ctrl_pdata *ctrl_list[];
+
+enum {
+	DSI_CTRL_0,
+	DSI_CTRL_1,
+	DSI_CTRL_MAX,
+};
+
+/*
+ * Common DSI properties for each controller. The DSI root probe will create the
+ * shared_data struct which should be accessible to each controller. The goal is
+ * to only access ctrl_pdata and ctrl_pdata->shared_data during the lifetime of
+ * each controller i.e. mdss_dsi_res should not be used directly.
+ */
+struct dsi_shared_data {
+	u32 hw_config; /* DSI setup configuration i.e. single/dual/split */
+	u32 pll_src_config; /* PLL source selection for DSI link clocks */
+	u32 hw_rev; /* DSI h/w revision */
+	u32 phy_rev; /* DSI PHY revision*/
+
+	/* DSI ULPS clamp register offsets */
+	u32 ulps_clamp_ctrl_off;
+	u32 ulps_phyrst_ctrl_off;
+
+	bool cmd_clk_ln_recovery_en;
+	bool dsi0_active;
+	bool dsi1_active;
+
+	/* DSI bus clocks */
+	struct clk *mdp_core_clk;
+	struct clk *ahb_clk;
+	struct clk *axi_clk;
+	struct clk *mmss_misc_ahb_clk;
+
+	/* Other shared clocks */
+	struct clk *ext_byte0_clk;
+	struct clk *ext_pixel0_clk;
+	struct clk *ext_byte1_clk;
+	struct clk *ext_pixel1_clk;
+
+	/* Clock sources for branch clocks */
+	struct clk *byte0_parent;
+	struct clk *pixel0_parent;
+	struct clk *byte1_parent;
+	struct clk *pixel1_parent;
+
+	/* DSI core regulators */
+	struct dss_module_power power_data[DSI_MAX_PM];
+
+	/* Shared mutex for DSI PHY regulator */
+	struct mutex phy_reg_lock;
+
+	/* Data bus(AXI) scale settings */
+	struct msm_bus_scale_pdata *bus_scale_table;
+	u32 bus_handle;
+	u32 bus_refcount;
+
+	/* Shared mutex for pm_qos ref count */
+	struct mutex pm_qos_lock;
+	u32 pm_qos_req_cnt;
+};
+
+struct mdss_dsi_data {
+	bool res_init;
+	struct platform_device *pdev;
+	/* List of controller specific struct data */
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata[DSI_CTRL_MAX];
+	/*
+	 * This structure should hold common data structures like
+	 * mutex, clocks, regulator information, setup information
+	 */
+	struct dsi_shared_data *shared_data;
+};
+
+/*
+ * enum mdss_dsi_hw_config - Supported DSI h/w configurations
+ *
+ * @SINGLE_DSI:		Single DSI panel driven by either DSI0 or DSI1.
+ * @DUAL_DSI:		Two DSI panels driven independently by DSI0 & DSI1.
+ * @SPLIT_DSI:		A split DSI panel driven by both the DSI controllers
+ *			with the DSI link clocks sourced by a single DSI PLL.
+ */
+enum mdss_dsi_hw_config {
+	SINGLE_DSI,
+	DUAL_DSI,
+	SPLIT_DSI,
+};
+
+/*
+ * enum mdss_dsi_pll_src_config - The PLL source for DSI link clocks
+ *
+ * @PLL_SRC_0:		The link clocks are sourced out of PLL0.
+ * @PLL_SRC_1:		The link clocks are sourced out of PLL1.
+ */
+enum mdss_dsi_pll_src_config {
+	PLL_SRC_DEFAULT,
+	PLL_SRC_0,
+	PLL_SRC_1,
+};
+
+struct dsi_panel_cmds {
+	char *buf;
+	int blen;
+	struct dsi_cmd_desc *cmds;
+	int cmd_cnt;
+	int link_state;
+};
+
+struct dsi_panel_timing {
+	struct mdss_panel_timing timing;
+	uint32_t phy_timing[12];
+	uint32_t phy_timing_8996[40];
+	/* DSI_CLKOUT_TIMING_CTRL */
+	char t_clk_post;
+	char t_clk_pre;
+	struct dsi_panel_cmds on_cmds;
+	struct dsi_panel_cmds post_panel_on_cmds;
+	struct dsi_panel_cmds switch_cmds;
+};
+
+struct dsi_kickoff_action {
+	struct list_head act_entry;
+	void (*action)(void *);
+	void *data;
+};
+
+struct dsi_pinctrl_res {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *gpio_state_active;
+	struct pinctrl_state *gpio_state_suspend;
+};
+
+struct panel_horizontal_idle {
+	int min;
+	int max;
+	int idle;
+};
+
+struct dsi_err_container {
+	u32 fifo_err_cnt;
+	u32 phy_err_cnt;
+	u32 err_cnt;
+	u32 err_time_delta;
+	u32 max_err_index;
+
+	u32 index;
+	s64 err_time[MAX_ERR_INDEX];
+};
+
+#define DSI_CTRL_LEFT		DSI_CTRL_0
+#define DSI_CTRL_RIGHT		DSI_CTRL_1
+#define DSI_CTRL_CLK_SLAVE	DSI_CTRL_RIGHT
+#define DSI_CTRL_CLK_MASTER	DSI_CTRL_LEFT
+
+#define DSI_EV_PLL_UNLOCKED		0x0001
+#define DSI_EV_DLNx_FIFO_UNDERFLOW	0x0002
+#define DSI_EV_DSI_FIFO_EMPTY		0x0004
+#define DSI_EV_DLNx_FIFO_OVERFLOW	0x0008
+#define DSI_EV_LP_RX_TIMEOUT		0x0010
+#define DSI_EV_STOP_HS_CLK_LANE		0x40000000
+#define DSI_EV_MDP_BUSY_RELEASE		0x80000000
+
+#define MDSS_DSI_VIDEO_COMPRESSION_MODE_CTRL	0x02a0
+#define MDSS_DSI_VIDEO_COMPRESSION_MODE_CTRL2	0x02a4
+#define MDSS_DSI_COMMAND_COMPRESSION_MODE_CTRL	0x02a8
+#define MDSS_DSI_COMMAND_COMPRESSION_MODE_CTRL2	0x02ac
+#define MDSS_DSI_COMMAND_COMPRESSION_MODE_CTRL3	0x02b0
+#define MSM_DBA_CHIP_NAME_MAX_LEN				20
+
+struct mdss_dsi_ctrl_pdata {
+	int ndx;	/* panel_num */
+	int (*on)(struct mdss_panel_data *pdata);
+	int (*post_panel_on)(struct mdss_panel_data *pdata);
+	int (*off)(struct mdss_panel_data *pdata);
+	int (*low_power_config)(struct mdss_panel_data *pdata, int enable);
+	int (*set_col_page_addr)(struct mdss_panel_data *pdata, bool force);
+	int (*check_status)(struct mdss_dsi_ctrl_pdata *pdata);
+	int (*check_read_status)(struct mdss_dsi_ctrl_pdata *pdata);
+	int (*cmdlist_commit)(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp);
+	void (*switch_mode)(struct mdss_panel_data *pdata, int mode);
+	struct mdss_panel_data panel_data;
+	unsigned char *ctrl_base;
+	struct dss_io_data ctrl_io;
+	struct dss_io_data mmss_misc_io;
+	struct dss_io_data phy_io;
+	struct dss_io_data phy_regulator_io;
+	int reg_size;
+	u32 flags;
+	struct clk *byte_clk;
+	struct clk *esc_clk;
+	struct clk *pixel_clk;
+	struct clk *mux_byte_clk;
+	struct clk *mux_pixel_clk;
+	struct clk *pll_byte_clk;
+	struct clk *pll_pixel_clk;
+	struct clk *shadow_byte_clk;
+	struct clk *shadow_pixel_clk;
+	struct clk *byte_clk_rcg;
+	struct clk *pixel_clk_rcg;
+	struct clk *vco_dummy_clk;
+	u8 ctrl_state;
+	int panel_mode;
+	int irq_cnt;
+	int disp_te_gpio;
+	int rst_gpio;
+	int disp_en_gpio;
+	int bklt_en_gpio;
+	int mode_gpio;
+	int intf_mux_gpio;
+	int bklt_ctrl;	/* backlight ctrl */
+	bool pwm_pmi;
+	int pwm_period;
+	int pwm_pmic_gpio;
+	int pwm_lpg_chan;
+	int bklt_max;
+	int new_fps;
+	int pwm_enabled;
+	int clk_lane_cnt;
+	bool dmap_iommu_map;
+	bool dsi_irq_line;
+	bool dcs_cmd_insert;
+	atomic_t te_irq_ready;
+	bool idle;
+
+	bool cmd_sync_wait_broadcast;
+	bool cmd_sync_wait_trigger;
+
+	struct mdss_rect roi;
+	struct pwm_device *pwm_bl;
+	u32 pclk_rate;
+	u32 byte_clk_rate;
+	u32 pclk_rate_bkp;
+	u32 byte_clk_rate_bkp;
+	bool refresh_clk_rate; /* flag to recalculate clk_rate */
+	struct dss_module_power panel_power_data;
+	struct dss_module_power power_data[DSI_MAX_PM]; /* for 8x10 */
+	u32 dsi_irq_mask;
+	struct mdss_hw *dsi_hw;
+	struct mdss_intf_recovery *recovery;
+	struct mdss_intf_recovery *mdp_callback;
+
+	struct dsi_panel_cmds on_cmds;
+	struct dsi_panel_cmds post_dms_on_cmds;
+	struct dsi_panel_cmds post_panel_on_cmds;
+	struct dsi_panel_cmds off_cmds;
+	struct dsi_panel_cmds lp_on_cmds;
+	struct dsi_panel_cmds lp_off_cmds;
+	struct dsi_panel_cmds status_cmds;
+	struct dsi_panel_cmds idle_on_cmds; /* for lp mode */
+	struct dsi_panel_cmds idle_off_cmds;
+	u32 *status_valid_params;
+	u32 *status_cmds_rlen;
+	u32 *status_value;
+	unsigned char *return_buf;
+	u32 groups; /* several alternative values to compare */
+	u32 status_error_count;
+	u32 max_status_error_count;
+
+	struct dsi_panel_cmds video2cmd;
+	struct dsi_panel_cmds cmd2video;
+
+	char pps_buf[DSC_PPS_LEN];	/* dsc pps */
+
+	struct dcs_cmd_list cmdlist;
+	struct completion dma_comp;
+	struct completion mdp_comp;
+	struct completion video_comp;
+	struct completion dynamic_comp;
+	struct completion bta_comp;
+	struct completion te_irq_comp;
+	spinlock_t irq_lock;
+	spinlock_t mdp_lock;
+	int mdp_busy;
+	struct mutex mutex;
+	struct mutex cmd_mutex;
+	struct mutex cmdlist_mutex;
+	struct regulator *lab; /* vreg handle */
+	struct regulator *ibb; /* vreg handle */
+	struct mutex clk_lane_mutex;
+
+	bool null_insert_enabled;
+	bool ulps;
+	bool core_power;
+	bool mmss_clamp;
+	char dlane_swap;	/* data lane swap */
+	bool is_phyreg_enabled;
+	bool burst_mode_enabled;
+
+	struct dsi_buf tx_buf;
+	struct dsi_buf rx_buf;
+	struct dsi_buf status_buf;
+	int status_mode;
+	int rx_len;
+	int cur_max_pkt_size;
+
+	struct dsi_pinctrl_res pin_res;
+
+	unsigned long dma_size;
+	dma_addr_t dma_addr;
+	bool cmd_cfg_restore;
+	bool do_unicast;
+
+	bool idle_enabled;
+	int horizontal_idle_cnt;
+	struct panel_horizontal_idle *line_idle;
+	struct mdss_util_intf *mdss_util;
+	struct dsi_shared_data *shared_data;
+
+	void *clk_mngr;
+	void *dsi_clk_handle;
+	void *mdp_clk_handle;
+	int m_dsi_vote_cnt;
+	int m_mdp_vote_cnt;
+	/* debugfs structure */
+	struct mdss_dsi_debugfs_info *debugfs_info;
+
+	struct dsi_err_container err_cont;
+
+	struct kobject *kobj;
+	int fb_node;
+
+	/* DBA data */
+	struct workqueue_struct *workq;
+	struct delayed_work dba_work;
+	char bridge_name[MSM_DBA_CHIP_NAME_MAX_LEN];
+	uint32_t bridge_index;
+	bool ds_registered;
+
+	bool timing_db_mode;
+	bool update_phy_timing; /* flag to recalculate PHY timings */
+
+	bool phy_power_off;
+};
+
+struct dsi_status_data {
+	struct notifier_block fb_notifier;
+	struct delayed_work check_status;
+	struct msm_fb_data_type *mfd;
+};
+
+void mdss_dsi_read_hw_revision(struct mdss_dsi_ctrl_pdata *ctrl);
+int dsi_panel_device_register(struct platform_device *ctrl_pdev,
+	struct device_node *pan_node, struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+
+int mdss_dsi_cmds_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+		struct dsi_cmd_desc *cmds, int cnt, int use_dma_tpg);
+
+int mdss_dsi_cmds_rx(struct mdss_dsi_ctrl_pdata *ctrl,
+			struct dsi_cmd_desc *cmds, int rlen, int use_dma_tpg);
+
+void mdss_dsi_host_init(struct mdss_panel_data *pdata);
+void mdss_dsi_op_mode_config(int mode,
+				struct mdss_panel_data *pdata);
+void mdss_dsi_restore_intr_mask(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_cmd_mode_ctrl(int enable);
+void mdp4_dsi_cmd_trigger(void);
+void mdss_dsi_cmd_mdp_start(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_cmd_bta_sw_trigger(struct mdss_panel_data *pdata);
+bool mdss_dsi_ack_err_status(struct mdss_dsi_ctrl_pdata *ctrl);
+int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, void *clk_handle,
+	enum mdss_dsi_clk_type clk_type, enum mdss_dsi_clk_state clk_state);
+void mdss_dsi_clk_req(struct mdss_dsi_ctrl_pdata *ctrl,
+	struct dsi_panel_clk_ctrl *clk_ctrl);
+void mdss_dsi_controller_cfg(int enable,
+				struct mdss_panel_data *pdata);
+void mdss_dsi_sw_reset(struct mdss_dsi_ctrl_pdata *ctrl_pdata, bool restore);
+int mdss_dsi_wait_for_lane_idle(struct mdss_dsi_ctrl_pdata *ctrl);
+
+irqreturn_t mdss_dsi_isr(int irq, void *ptr);
+irqreturn_t hw_vsync_handler(int irq, void *data);
+void disable_esd_thread(void);
+void mdss_dsi_irq_handler_config(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+
+void mdss_dsi_set_tx_power_mode(int mode, struct mdss_panel_data *pdata);
+int mdss_dsi_clk_div_config(struct mdss_panel_info *panel_info,
+			    int frame_rate);
+int mdss_dsi_clk_refresh(struct mdss_panel_data *pdata, bool update_phy);
+int mdss_dsi_link_clk_init(struct platform_device *pdev,
+		      struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+void mdss_dsi_link_clk_deinit(struct device *dev,
+			struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+int mdss_dsi_core_clk_init(struct platform_device *pdev,
+			struct dsi_shared_data *sdata);
+void mdss_dsi_core_clk_deinit(struct device *dev,
+			struct dsi_shared_data *sdata);
+int mdss_dsi_shadow_clk_init(struct platform_device *pdev,
+		      struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+void mdss_dsi_shadow_clk_deinit(struct device *dev,
+			struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+int mdss_dsi_pre_clkoff_cb(void *priv,
+			   enum mdss_dsi_clk_type clk_type,
+			   enum mdss_dsi_clk_state new_state);
+int mdss_dsi_post_clkoff_cb(void *priv,
+			    enum mdss_dsi_clk_type clk_type,
+			    enum mdss_dsi_clk_state curr_state);
+int mdss_dsi_post_clkon_cb(void *priv,
+			   enum mdss_dsi_clk_type clk_type,
+			   enum mdss_dsi_clk_state curr_state);
+int mdss_dsi_pre_clkon_cb(void *priv,
+			  enum mdss_dsi_clk_type clk_type,
+			  enum mdss_dsi_clk_state new_state);
+int mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable);
+void mdss_dsi_phy_disable(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_cmd_test_pattern(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_video_test_pattern(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_panel_pwm_cfg(struct mdss_dsi_ctrl_pdata *ctrl);
+bool mdss_dsi_panel_pwm_enable(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_ctrl_phy_restore(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_phy_sw_reset(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_phy_init(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_ctrl_init(struct device *ctrl_dev,
+			struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_cmd_mdp_busy(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_wait4video_done(struct mdss_dsi_ctrl_pdata *ctrl);
+int mdss_dsi_en_wait4dynamic_done(struct mdss_dsi_ctrl_pdata *ctrl);
+int mdss_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp);
+void mdss_dsi_cmdlist_kickoff(int intf);
+int mdss_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl);
+int mdss_dsi_reg_status_check(struct mdss_dsi_ctrl_pdata *ctrl);
+bool __mdss_dsi_clk_enabled(struct mdss_dsi_ctrl_pdata *ctrl, u8 clk_type);
+void mdss_dsi_ctrl_setup(struct mdss_dsi_ctrl_pdata *ctrl);
+bool mdss_dsi_dln0_phy_err(struct mdss_dsi_ctrl_pdata *ctrl, bool print_en);
+void mdss_dsi_lp_cd_rx(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_read_phy_revision(struct mdss_dsi_ctrl_pdata *ctrl);
+int mdss_dsi_panel_cmd_read(struct mdss_dsi_ctrl_pdata *ctrl, char cmd0,
+		char cmd1, void (*fxn)(int), char *rbuf, int len);
+int mdss_dsi_panel_init(struct device_node *node,
+		struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+		int ndx);
+int mdss_dsi_panel_timing_switch(struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+			struct mdss_panel_timing *timing);
+
+int mdss_panel_parse_bl_settings(struct device_node *np,
+			struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+int mdss_panel_get_dst_fmt(u32 bpp, char mipi_mode, u32 pixel_packing,
+				char *dst_format);
+
+int mdss_dsi_register_recovery_handler(struct mdss_dsi_ctrl_pdata *ctrl,
+		struct mdss_intf_recovery *recovery);
+void mdss_dsi_unregister_bl_settings(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+void mdss_dsi_panel_dsc_pps_send(struct mdss_dsi_ctrl_pdata *ctrl,
+				struct mdss_panel_info *pinfo);
+void mdss_dsi_dsc_config(struct mdss_dsi_ctrl_pdata *ctrl,
+	struct dsc_desc *dsc);
+void mdss_dsi_dfps_config_8996(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_set_burst_mode(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_set_reg(struct mdss_dsi_ctrl_pdata *ctrl, int off,
+	u32 mask, u32 val);
+int mdss_dsi_phy_pll_reset_status(struct mdss_dsi_ctrl_pdata *ctrl);
+int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata, int power_state);
+
+static inline const char *__mdss_dsi_pm_name(enum dsi_pm_type module)
+{
+	switch (module) {
+	case DSI_CORE_PM:	return "DSI_CORE_PM";
+	case DSI_CTRL_PM:	return "DSI_CTRL_PM";
+	case DSI_PHY_PM:	return "DSI_PHY_PM";
+	case DSI_PANEL_PM:	return "PANEL_PM";
+	default:		return "???";
+	}
+}
+
+static inline const char *__mdss_dsi_pm_supply_node_name(
+	enum dsi_pm_type module)
+{
+	switch (module) {
+	case DSI_CORE_PM:	return "qcom,core-supply-entries";
+	case DSI_CTRL_PM:	return "qcom,ctrl-supply-entries";
+	case DSI_PHY_PM:	return "qcom,phy-supply-entries";
+	case DSI_PANEL_PM:	return "qcom,panel-supply-entries";
+	default:		return "???";
+	}
+}
+
+static inline u32 mdss_dsi_get_hw_config(struct dsi_shared_data *sdata)
+{
+	return sdata->hw_config;
+}
+
+static inline bool mdss_dsi_is_hw_config_single(struct dsi_shared_data *sdata)
+{
+	return mdss_dsi_get_hw_config(sdata) == SINGLE_DSI;
+}
+
+static inline bool mdss_dsi_is_hw_config_split(struct dsi_shared_data *sdata)
+{
+	return mdss_dsi_get_hw_config(sdata) == SPLIT_DSI;
+}
+
+static inline bool mdss_dsi_is_hw_config_dual(struct dsi_shared_data *sdata)
+{
+	return mdss_dsi_get_hw_config(sdata) == DUAL_DSI;
+}
+
+static inline bool mdss_dsi_get_pll_src_config(struct dsi_shared_data *sdata)
+{
+	return sdata->pll_src_config;
+}
+
+/*
+ * mdss_dsi_is_pll_src_default: Check if the DSI device uses default PLL src
+ * For single-dsi and dual-dsi configuration, PLL source need not be
+ * explicitly specified. In this case, the default PLL source configuration
+ * is assumed.
+ *
+ * @sdata: pointer to DSI shared data structure
+ */
+static inline bool mdss_dsi_is_pll_src_default(struct dsi_shared_data *sdata)
+{
+	return sdata->pll_src_config == PLL_SRC_DEFAULT;
+}
+
+/*
+ * mdss_dsi_is_pll_src_pll0: Check if the PLL source for a DSI device is PLL0
+ * The function is only valid if the DSI configuration is single/split DSI.
+ * Not valid for dual DSI configuration.
+ *
+ * @sdata: pointer to DSI shared data structure
+ */
+static inline bool mdss_dsi_is_pll_src_pll0(struct dsi_shared_data *sdata)
+{
+	return sdata->pll_src_config == PLL_SRC_0;
+}
+
+/*
+ * mdss_dsi_is_pll_src_pll1: Check if the PLL source for a DSI device is PLL1
+ * The function is only valid if the DSI configuration is single/split DSI.
+ * Not valid for dual DSI configuration.
+ *
+ * @sdata: pointer to DSI shared data structure
+ */
+static inline bool mdss_dsi_is_pll_src_pll1(struct dsi_shared_data *sdata)
+{
+	return sdata->pll_src_config == PLL_SRC_1;
+}
+
+static inline bool mdss_dsi_is_dsi0_active(struct dsi_shared_data *sdata)
+{
+	return sdata->dsi0_active;
+}
+
+static inline bool mdss_dsi_is_dsi1_active(struct dsi_shared_data *sdata)
+{
+	return sdata->dsi1_active;
+}
+
+static inline u32 mdss_dsi_get_phy_revision(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	return ctrl->shared_data->phy_rev;
+}
+
+static inline const char *mdss_dsi_get_fb_name(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct mdss_panel_info *pinfo = &(ctrl->panel_data.panel_info);
+
+	if (mdss_dsi_is_hw_config_dual(ctrl->shared_data)) {
+		if (pinfo->is_prim_panel)
+			return "qcom,mdss-fb-map-prim";
+		else
+			return "qcom,mdss-fb-map-sec";
+	} else {
+		return "qcom,mdss-fb-map-prim";
+	}
+}
+
+static inline bool mdss_dsi_sync_wait_enable(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	return ctrl->cmd_sync_wait_broadcast;
+}
+
+static inline bool mdss_dsi_sync_wait_trigger(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	return ctrl->cmd_sync_wait_broadcast &&
+				ctrl->cmd_sync_wait_trigger;
+}
+
+static inline bool mdss_dsi_is_left_ctrl(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	return ctrl->ndx == DSI_CTRL_LEFT;
+}
+
+static inline bool mdss_dsi_is_right_ctrl(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	return ctrl->ndx == DSI_CTRL_RIGHT;
+}
+
+static inline struct mdss_dsi_ctrl_pdata *mdss_dsi_get_other_ctrl(
+					struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	if (ctrl->ndx == DSI_CTRL_RIGHT)
+		return ctrl_list[DSI_CTRL_LEFT];
+
+	return ctrl_list[DSI_CTRL_RIGHT];
+}
+
+static inline struct mdss_dsi_ctrl_pdata *mdss_dsi_get_ctrl_by_index(int ndx)
+{
+	if (ndx >= DSI_CTRL_MAX)
+		return NULL;
+
+	return ctrl_list[ndx];
+}
+
+static inline bool mdss_dsi_is_ctrl_clk_master(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	return mdss_dsi_is_hw_config_split(ctrl->shared_data) &&
+		(ctrl->ndx == DSI_CTRL_CLK_MASTER);
+}
+
+static inline bool mdss_dsi_is_ctrl_clk_slave(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	return mdss_dsi_is_hw_config_split(ctrl->shared_data) &&
+		(ctrl->ndx == DSI_CTRL_CLK_SLAVE);
+}
+
+static inline bool mdss_dsi_is_te_based_esd(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	return (ctrl->status_mode == ESD_TE) &&
+		gpio_is_valid(ctrl->disp_te_gpio) &&
+		mdss_dsi_is_left_ctrl(ctrl);
+}
+
+static inline struct mdss_dsi_ctrl_pdata *mdss_dsi_get_ctrl_clk_master(void)
+{
+	return ctrl_list[DSI_CTRL_CLK_MASTER];
+}
+
+static inline struct mdss_dsi_ctrl_pdata *mdss_dsi_get_ctrl_clk_slave(void)
+{
+	return ctrl_list[DSI_CTRL_CLK_SLAVE];
+}
+
+static inline bool mdss_dsi_is_panel_off(struct mdss_panel_data *pdata)
+{
+	return mdss_panel_is_power_off(pdata->panel_info.panel_power_state);
+}
+
+static inline bool mdss_dsi_is_panel_on(struct mdss_panel_data *pdata)
+{
+	return mdss_panel_is_power_on(pdata->panel_info.panel_power_state);
+}
+
+static inline bool mdss_dsi_is_panel_on_interactive(
+	struct mdss_panel_data *pdata)
+{
+	return mdss_panel_is_power_on_interactive(
+		pdata->panel_info.panel_power_state);
+}
+
+static inline bool mdss_dsi_is_panel_on_lp(struct mdss_panel_data *pdata)
+{
+	return mdss_panel_is_power_on_lp(pdata->panel_info.panel_power_state);
+}
+
+static inline bool mdss_dsi_is_panel_on_ulp(struct mdss_panel_data *pdata)
+{
+	return mdss_panel_is_power_on_ulp(pdata->panel_info.panel_power_state);
+}
+
+static inline bool mdss_dsi_ulps_feature_enabled(
+	struct mdss_panel_data *pdata)
+{
+	return pdata->panel_info.ulps_feature_enabled;
+}
+
+static inline bool mdss_dsi_cmp_panel_reg(struct dsi_buf status_buf,
+	u32 *status_val, int i)
+{
+	return status_buf.data[i] == status_val[i];
+}
+
+#endif /* MDSS_DSI_H */
diff --git a/drivers/video/fbdev/msm/mdss_dsi_clk.c b/drivers/video/fbdev/msm/mdss_dsi_clk.c
new file mode 100644
index 0000000..372c93e
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_clk.c
@@ -0,0 +1,1003 @@
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "mdss-dsi-clk:[%s] " fmt, __func__
+#include <linux/clk/msm-clk.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+
+#include "mdss_dsi_clk.h"
+#include "mdss_dsi.h"
+#include "mdss_debug.h"
+
+#define MAX_CLIENT_NAME_LEN 20
+struct dsi_core_clks {
+	struct mdss_dsi_core_clk_info clks;
+	u32 current_clk_state;
+};
+
+struct dsi_link_clks {
+	struct mdss_dsi_link_clk_info clks;
+	u32 current_clk_state;
+	u32 byte_clk_rate;
+	u32 pix_clk_rate;
+	u32 esc_clk_rate;
+};
+
+struct mdss_dsi_clk_mngr {
+	char name[DSI_CLK_NAME_LEN];
+	struct dsi_core_clks core_clks;
+	struct dsi_link_clks link_clks;
+
+	struct reg_bus_client *reg_bus_clt;
+
+	pre_clockoff_cb pre_clkoff_cb;
+	post_clockoff_cb post_clkoff_cb;
+	post_clockon_cb post_clkon_cb;
+	pre_clockon_cb pre_clkon_cb;
+
+	struct list_head client_list;
+	struct mutex clk_mutex;
+
+	void *priv_data;
+};
+
+struct mdss_dsi_clk_client_info {
+	char name[MAX_CLIENT_NAME_LEN];
+	u32 core_refcount;
+	u32 link_refcount;
+	u32 core_clk_state;
+	u32 link_clk_state;
+
+	struct list_head list;
+
+	struct mdss_dsi_clk_mngr *mngr;
+};
+
+static int dsi_core_clk_start(struct dsi_core_clks *c_clks)
+{
+	int rc = 0;
+	struct mdss_dsi_clk_mngr *mngr;
+
+	mngr = container_of(c_clks, struct mdss_dsi_clk_mngr, core_clks);
+
+	rc = clk_prepare_enable(c_clks->clks.mdp_core_clk);
+	if (rc) {
+		pr_err("%s: failed to enable mdp_core_clock. rc=%d\n",
+							 __func__, rc);
+		goto error;
+	}
+
+	rc = clk_prepare_enable(c_clks->clks.ahb_clk);
+	if (rc) {
+		pr_err("%s: failed to enable ahb clock. rc=%d\n", __func__, rc);
+		goto disable_core_clk;
+	}
+
+	rc = clk_prepare_enable(c_clks->clks.axi_clk);
+	if (rc) {
+		pr_err("%s: failed to enable ahb clock. rc=%d\n", __func__, rc);
+		goto disable_ahb_clk;
+	}
+
+	if (c_clks->clks.mmss_misc_ahb_clk) {
+		rc = clk_prepare_enable(c_clks->clks.mmss_misc_ahb_clk);
+		if (rc) {
+			pr_err("%s: failed to enable mmss misc ahb clk.rc=%d\n",
+				__func__, rc);
+			goto disable_axi_clk;
+		}
+	}
+
+	rc = mdss_update_reg_bus_vote(mngr->reg_bus_clt, VOTE_INDEX_LOW);
+	if (rc) {
+		pr_err("failed to vote for reg bus\n");
+		goto disable_mmss_misc_clk;
+	}
+
+	pr_debug("%s:CORE CLOCK IS ON\n", mngr->name);
+	return rc;
+
+disable_mmss_misc_clk:
+	if (c_clks->clks.mmss_misc_ahb_clk)
+		clk_disable_unprepare(c_clks->clks.mmss_misc_ahb_clk);
+disable_axi_clk:
+	clk_disable_unprepare(c_clks->clks.axi_clk);
+disable_ahb_clk:
+	clk_disable_unprepare(c_clks->clks.ahb_clk);
+disable_core_clk:
+	clk_disable_unprepare(c_clks->clks.mdp_core_clk);
+error:
+	pr_debug("%s: EXIT, rc = %d\n", mngr->name, rc);
+	return rc;
+}
+
+static int dsi_core_clk_stop(struct dsi_core_clks *c_clks)
+{
+	int rc = 0;
+	struct mdss_dsi_clk_mngr *mngr;
+
+	mngr = container_of(c_clks, struct mdss_dsi_clk_mngr, core_clks);
+
+	mdss_update_reg_bus_vote(mngr->reg_bus_clt, VOTE_INDEX_DISABLE);
+	if (c_clks->clks.mmss_misc_ahb_clk)
+		clk_disable_unprepare(c_clks->clks.mmss_misc_ahb_clk);
+	clk_disable_unprepare(c_clks->clks.axi_clk);
+	clk_disable_unprepare(c_clks->clks.ahb_clk);
+	clk_disable_unprepare(c_clks->clks.mdp_core_clk);
+
+	pr_debug("%s: CORE CLOCK IS OFF\n", mngr->name);
+	return rc;
+}
+
+static int dsi_link_clk_set_rate(struct dsi_link_clks *l_clks)
+{
+	int rc = 0;
+	struct mdss_dsi_clk_mngr *mngr;
+	struct mdss_dsi_ctrl_pdata *ctrl;
+
+	mngr = container_of(l_clks, struct mdss_dsi_clk_mngr, link_clks);
+
+	/*
+	 * In an ideal world, cont_splash_enabled should not be required inside
+	 * the clock manager. But, in the current driver cont_splash_enabled
+	 * flag is set inside mdp driver and there is no interface event
+	 * associated with this flag setting. Also, set rate for clock need not
+	 * be called for every enable call. It should be done only once when
+	 * coming out of suspend.
+	 */
+	ctrl = mngr->priv_data;
+	if (ctrl->panel_data.panel_info.cont_splash_enabled)
+		return 0;
+
+	rc = clk_set_rate(l_clks->clks.esc_clk, l_clks->esc_clk_rate);
+	if (rc) {
+		pr_err("clk_set_rate failed for esc_clk rc = %d\n", rc);
+		goto error;
+	}
+
+	rc = clk_set_rate(l_clks->clks.byte_clk, l_clks->byte_clk_rate);
+	if (rc) {
+		pr_err("clk_set_rate failed for byte_clk rc = %d\n", rc);
+		goto error;
+	}
+
+	rc = clk_set_rate(l_clks->clks.pixel_clk, l_clks->pix_clk_rate);
+	if (rc) {
+		pr_err("clk_set_rate failed for pixel_clk rc = %d\n", rc);
+		goto error;
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_link_clk_prepare(struct dsi_link_clks *l_clks)
+{
+	int rc = 0;
+
+	rc = clk_prepare(l_clks->clks.esc_clk);
+	if (rc) {
+		pr_err("%s: Failed to prepare dsi esc clk\n", __func__);
+		goto esc_clk_err;
+	}
+
+	rc = clk_prepare(l_clks->clks.byte_clk);
+	if (rc) {
+		pr_err("%s: Failed to prepare dsi byte clk\n", __func__);
+		goto byte_clk_err;
+	}
+
+	rc = clk_prepare(l_clks->clks.pixel_clk);
+	if (rc) {
+		pr_err("%s: Failed to prepare dsi pixel clk\n", __func__);
+		goto pixel_clk_err;
+	}
+
+	return rc;
+
+pixel_clk_err:
+	clk_unprepare(l_clks->clks.byte_clk);
+byte_clk_err:
+	clk_unprepare(l_clks->clks.esc_clk);
+esc_clk_err:
+	return rc;
+}
+
+static int dsi_link_clk_unprepare(struct dsi_link_clks *l_clks)
+{
+	int rc = 0;
+
+	clk_unprepare(l_clks->clks.pixel_clk);
+	clk_unprepare(l_clks->clks.byte_clk);
+	clk_unprepare(l_clks->clks.esc_clk);
+
+	return rc;
+}
+
+static int dsi_link_clk_enable(struct dsi_link_clks *l_clks)
+{
+	int rc = 0;
+
+	rc = clk_enable(l_clks->clks.esc_clk);
+	if (rc) {
+		pr_err("%s: Failed to enable dsi esc clk\n", __func__);
+		goto esc_clk_err;
+	}
+
+	rc = clk_enable(l_clks->clks.byte_clk);
+	if (rc) {
+		pr_err("%s: Failed to enable dsi byte clk\n", __func__);
+		goto byte_clk_err;
+	}
+
+	rc = clk_enable(l_clks->clks.pixel_clk);
+	if (rc) {
+		pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
+		goto pixel_clk_err;
+	}
+
+	return rc;
+
+pixel_clk_err:
+	clk_disable(l_clks->clks.byte_clk);
+byte_clk_err:
+	clk_disable(l_clks->clks.esc_clk);
+esc_clk_err:
+	return rc;
+}
+
+static int dsi_link_clk_disable(struct dsi_link_clks *l_clks)
+{
+	int rc = 0;
+
+	clk_disable(l_clks->clks.esc_clk);
+	clk_disable(l_clks->clks.pixel_clk);
+	clk_disable(l_clks->clks.byte_clk);
+
+	return rc;
+}
+
+
+static int dsi_link_clk_start(struct dsi_link_clks *l_clks)
+{
+	int rc = 0;
+	struct mdss_dsi_clk_mngr *mngr;
+
+	mngr = container_of(l_clks, struct mdss_dsi_clk_mngr, link_clks);
+
+	rc = dsi_link_clk_set_rate(l_clks);
+	if (rc) {
+		pr_err("failed to set clk rates, rc = %d\n", rc);
+		goto error;
+	}
+
+	rc = dsi_link_clk_prepare(l_clks);
+	if (rc) {
+		pr_err("failed to prepare link clks, rc = %d\n", rc);
+		goto error;
+	}
+
+	rc = dsi_link_clk_enable(l_clks);
+	if (rc) {
+		pr_err("failed to enable link clks, rc = %d\n", rc);
+		goto error_unprepare;
+	}
+
+	pr_debug("%s: LINK CLOCK IS ON\n", mngr->name);
+	return rc;
+error_unprepare:
+	dsi_link_clk_unprepare(l_clks);
+error:
+	return rc;
+}
+
+static int dsi_link_clk_stop(struct dsi_link_clks *l_clks)
+{
+	int rc = 0;
+	struct mdss_dsi_clk_mngr *mngr;
+
+	mngr = container_of(l_clks, struct mdss_dsi_clk_mngr, link_clks);
+
+	(void)dsi_link_clk_disable(l_clks);
+
+	(void)dsi_link_clk_unprepare(l_clks);
+	pr_debug("%s: LINK CLOCK IS OFF\n", mngr->name);
+
+	return rc;
+}
+
+static int dsi_update_clk_state(struct dsi_core_clks *c_clks, u32 c_state,
+				struct dsi_link_clks *l_clks, u32 l_state)
+{
+	int rc = 0;
+	struct mdss_dsi_clk_mngr *mngr;
+	bool l_c_on = false;
+
+	if (c_clks) {
+		mngr =
+		container_of(c_clks, struct mdss_dsi_clk_mngr, core_clks);
+	} else if (l_clks) {
+		mngr =
+		container_of(l_clks, struct mdss_dsi_clk_mngr, link_clks);
+	} else {
+		mngr = NULL;
+	}
+
+	if (!mngr)
+		return -EINVAL;
+
+	pr_debug("%s: c_state = %d, l_state = %d\n", mngr ? mngr->name : "NA",
+		 c_clks ? c_state : -1, l_clks ? l_state : -1);
+	/*
+	 * Clock toggle order:
+	 *	1. When turning on, Core clocks before link clocks
+	 *	2. When turning off, Link clocks before core clocks.
+	 */
+	if (c_clks && (c_state == MDSS_DSI_CLK_ON)) {
+		if (c_clks->current_clk_state == MDSS_DSI_CLK_OFF) {
+			rc = mngr->pre_clkon_cb(mngr->priv_data,
+						MDSS_DSI_CORE_CLK,
+						MDSS_DSI_CLK_ON);
+			if (rc) {
+				pr_err("failed to turn on MDP FS rc= %d\n", rc);
+				goto error;
+			}
+		}
+		rc = dsi_core_clk_start(c_clks);
+		if (rc) {
+			pr_err("failed to turn on core clks rc = %d\n", rc);
+			goto error;
+		}
+
+		if (mngr->post_clkon_cb) {
+			rc = mngr->post_clkon_cb(mngr->priv_data,
+						 MDSS_DSI_CORE_CLK,
+						 MDSS_DSI_CLK_ON);
+			if (rc)
+				pr_err("post clk on cb failed, rc = %d\n", rc);
+		}
+		c_clks->current_clk_state = MDSS_DSI_CLK_ON;
+	}
+
+	if (l_clks) {
+
+		if (l_state == MDSS_DSI_CLK_ON) {
+			if (mngr->pre_clkon_cb) {
+				rc = mngr->pre_clkon_cb(mngr->priv_data,
+					MDSS_DSI_LINK_CLK, l_state);
+				if (rc)
+					pr_err("pre link clk on cb failed\n");
+			}
+			rc = dsi_link_clk_start(l_clks);
+			if (rc) {
+				pr_err("failed to start link clk rc= %d\n", rc);
+				goto error;
+			}
+			if (mngr->post_clkon_cb) {
+				rc = mngr->post_clkon_cb(mngr->priv_data,
+							MDSS_DSI_LINK_CLK,
+							l_state);
+				if (rc)
+					pr_err("post link clk on cb failed\n");
+			}
+		} else {
+			/*
+			 * Two conditions that need to be checked for Link
+			 * clocks:
+			 * 1. Link clocks need core clocks to be on when
+			 *    transitioning from EARLY_GATE to OFF state.
+			 * 2. ULPS mode might have to be enabled in case of OFF
+			 *    state. For ULPS, Link clocks should be turned ON
+			 *    first before they are turned off again.
+			 *
+			 * If Link is going from EARLY_GATE to OFF state AND
+			 * Core clock is already in EARLY_GATE or OFF state,
+			 * turn on Core clocks and link clocks.
+			 *
+			 * ULPS state is managed as part of the pre_clkoff_cb.
+			 */
+			if ((l_state == MDSS_DSI_CLK_OFF) &&
+			    (l_clks->current_clk_state ==
+			    MDSS_DSI_CLK_EARLY_GATE) &&
+			    (mngr->core_clks.current_clk_state !=
+			    MDSS_DSI_CLK_ON)) {
+				rc = dsi_core_clk_start(&mngr->core_clks);
+				if (rc) {
+					pr_err("core clks did not start\n");
+					goto error;
+				}
+
+				rc = dsi_link_clk_start(l_clks);
+				if (rc) {
+					pr_err("Link clks did not start\n");
+					goto error;
+				}
+				l_c_on = true;
+				pr_debug("ECG: core and Link_on\n");
+			}
+
+			if (mngr->pre_clkoff_cb) {
+				rc = mngr->pre_clkoff_cb(mngr->priv_data,
+					MDSS_DSI_LINK_CLK, l_state);
+				if (rc)
+					pr_err("pre link clk off cb failed\n");
+			}
+
+			rc = dsi_link_clk_stop(l_clks);
+			if (rc) {
+				pr_err("failed to stop link clk, rc = %d\n",
+				       rc);
+				goto error;
+			}
+
+			if (mngr->post_clkoff_cb) {
+				rc = mngr->post_clkoff_cb(mngr->priv_data,
+					MDSS_DSI_LINK_CLK, l_state);
+				if (rc)
+					pr_err("post link clk off cb failed\n");
+			}
+			/*
+			 * This check is to save unnecessary clock state
+			 * change when going from EARLY_GATE to OFF. In the
+			 * case where the request happens for both Core and Link
+			 * clocks in the same call, core clocks need to be
+			 * turned on first before OFF state can be entered.
+			 *
+			 * Core clocks are turned on here for Link clocks to go
+			 * to OFF state. If core clock request is also present,
+			 * then core clocks can be turned off Core clocks are
+			 * transitioned to OFF state.
+			 */
+			if (l_c_on && (!(c_clks && (c_state == MDSS_DSI_CLK_OFF)
+					 && (c_clks->current_clk_state ==
+					     MDSS_DSI_CLK_EARLY_GATE)))) {
+				rc = dsi_core_clk_stop(&mngr->core_clks);
+				if (rc) {
+					pr_err("core clks did not stop\n");
+					goto error;
+				}
+
+				l_c_on = false;
+				pr_debug("ECG: core off\n");
+			} else
+				pr_debug("ECG: core off skip\n");
+		}
+
+		l_clks->current_clk_state = l_state;
+	}
+
+	if (c_clks && (c_state != MDSS_DSI_CLK_ON)) {
+
+		/*
+		 * When going to OFF state from EARLY GATE state, Core clocks
+		 * should be turned on first so that the IOs can be clamped.
+		 * l_c_on flag is set, then the core clocks were turned before
+		 * to the Link clocks go to OFF state. So Core clocks are
+		 * already ON and this step can be skipped.
+		 *
+		 * IOs are clamped in pre_clkoff_cb callback.
+		 */
+		if ((c_state == MDSS_DSI_CLK_OFF) &&
+		    (c_clks->current_clk_state ==
+		    MDSS_DSI_CLK_EARLY_GATE) && !l_c_on) {
+			rc = dsi_core_clk_start(&mngr->core_clks);
+			if (rc) {
+				pr_err("core clks did not start\n");
+				goto error;
+			}
+			pr_debug("ECG: core on\n");
+		} else
+			pr_debug("ECG: core on skip\n");
+
+		if (mngr->pre_clkoff_cb) {
+			rc = mngr->pre_clkoff_cb(mngr->priv_data,
+						 MDSS_DSI_CORE_CLK,
+						 c_state);
+			if (rc)
+				pr_err("pre core clk off cb failed\n");
+		}
+
+		rc = dsi_core_clk_stop(c_clks);
+		if (rc) {
+			pr_err("failed to turn off core clks rc = %d\n", rc);
+			goto error;
+		}
+
+		if (c_state == MDSS_DSI_CLK_OFF) {
+			if (mngr->post_clkoff_cb) {
+				rc = mngr->post_clkoff_cb(mngr->priv_data,
+						MDSS_DSI_CORE_CLK,
+						MDSS_DSI_CLK_OFF);
+				if (rc)
+					pr_err("post clkoff cb fail, rc = %d\n",
+					       rc);
+			}
+		}
+		c_clks->current_clk_state = c_state;
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_recheck_clk_state(struct mdss_dsi_clk_mngr *mngr)
+{
+	int rc = 0;
+	struct list_head *pos = NULL;
+	struct mdss_dsi_clk_client_info *c;
+	u32 new_core_clk_state = MDSS_DSI_CLK_OFF;
+	u32 new_link_clk_state = MDSS_DSI_CLK_OFF;
+	u32 old_c_clk_state = MDSS_DSI_CLK_OFF;
+	u32 old_l_clk_state = MDSS_DSI_CLK_OFF;
+	struct dsi_core_clks *c_clks = NULL;
+	struct dsi_link_clks *l_clks = NULL;
+
+	/*
+	 * Rules to maintain clock state:
+	 *	1. If any client is in ON state, clocks should be ON.
+	 *	2. If any client is in ECG state with rest of them turned OFF,
+	 *	   go to Early gate state.
+	 *	3. If all clients are off, then goto OFF state.
+	 */
+	list_for_each(pos, &mngr->client_list) {
+		c = list_entry(pos, struct mdss_dsi_clk_client_info, list);
+		if (c->core_clk_state == MDSS_DSI_CLK_ON) {
+			new_core_clk_state = MDSS_DSI_CLK_ON;
+			break;
+		} else if (c->core_clk_state == MDSS_DSI_CLK_EARLY_GATE) {
+			new_core_clk_state = MDSS_DSI_CLK_EARLY_GATE;
+		}
+	}
+
+	list_for_each(pos, &mngr->client_list) {
+		c = list_entry(pos, struct mdss_dsi_clk_client_info, list);
+		if (c->link_clk_state == MDSS_DSI_CLK_ON) {
+			new_link_clk_state = MDSS_DSI_CLK_ON;
+			break;
+		} else if (c->link_clk_state == MDSS_DSI_CLK_EARLY_GATE) {
+			new_link_clk_state = MDSS_DSI_CLK_EARLY_GATE;
+		}
+	}
+
+	if (new_core_clk_state != mngr->core_clks.current_clk_state)
+		c_clks = &mngr->core_clks;
+
+	if (new_link_clk_state != mngr->link_clks.current_clk_state)
+		l_clks = &mngr->link_clks;
+
+	old_c_clk_state = mngr->core_clks.current_clk_state;
+	old_l_clk_state = mngr->link_clks.current_clk_state;
+
+	pr_debug("%s: c_clk_state (%d -> %d)\n", mngr->name,
+		 old_c_clk_state, new_core_clk_state);
+	pr_debug("%s: l_clk_state (%d -> %d)\n", mngr->name,
+		 old_l_clk_state, new_link_clk_state);
+
+	MDSS_XLOG(old_c_clk_state, new_core_clk_state, old_l_clk_state,
+		  new_link_clk_state);
+	if (c_clks || l_clks) {
+		rc = dsi_update_clk_state(c_clks, new_core_clk_state,
+					  l_clks, new_link_clk_state);
+		if (rc) {
+			pr_err("failed to update clock state, rc = %d\n", rc);
+			goto error;
+		}
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_set_clk_rate(struct mdss_dsi_clk_mngr *mngr, int clk, u32 rate,
+			    u32 flags)
+{
+	int rc = 0;
+
+	pr_debug("%s: clk = %d, rate = %d, flags = %d\n", mngr->name,
+		 clk, rate, flags);
+
+	MDSS_XLOG(clk, rate, flags);
+	switch (clk) {
+	case MDSS_DSI_LINK_ESC_CLK:
+		mngr->link_clks.esc_clk_rate = rate;
+		if (!flags) {
+			rc = clk_set_rate(mngr->link_clks.clks.esc_clk, rate);
+			if (rc)
+				pr_err("set rate failed for esc clk rc=%d\n",
+				       rc);
+		}
+		break;
+	case MDSS_DSI_LINK_BYTE_CLK:
+		mngr->link_clks.byte_clk_rate = rate;
+		if (!flags) {
+			rc = clk_set_rate(mngr->link_clks.clks.byte_clk, rate);
+			if (rc)
+				pr_err("set rate failed for byte clk rc=%d\n",
+				       rc);
+		}
+		break;
+	case MDSS_DSI_LINK_PIX_CLK:
+		mngr->link_clks.pix_clk_rate = rate;
+		if (!flags) {
+			rc = clk_set_rate(mngr->link_clks.clks.pixel_clk, rate);
+			if (rc)
+				pr_err("failed to set rate for pix clk rc=%d\n",
+				       rc);
+		}
+		break;
+	default:
+		pr_err("Unsupported clock (%d)\n", clk);
+		rc = -ENOTSUPP;
+		break;
+	}
+
+	return rc;
+}
+
+void *mdss_dsi_clk_register(void *clk_mngr, struct mdss_dsi_clk_client *client)
+{
+	void *handle = NULL;
+	struct mdss_dsi_clk_mngr *mngr = clk_mngr;
+	struct mdss_dsi_clk_client_info *c;
+
+	if (!mngr) {
+		pr_err("bad params\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	pr_debug("%s: ENTER\n", mngr->name);
+
+	mutex_lock(&mngr->clk_mutex);
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c) {
+		handle = ERR_PTR(-ENOMEM);
+		goto error;
+	}
+
+	strlcpy(c->name, client->client_name, MAX_CLIENT_NAME_LEN);
+	c->mngr = mngr;
+
+	list_add(&c->list, &mngr->client_list);
+
+	pr_debug("%s: Added new client (%s)\n", mngr->name, c->name);
+	handle = c;
+error:
+	mutex_unlock(&mngr->clk_mutex);
+	pr_debug("%s: EXIT, rc = %ld\n", mngr->name, PTR_ERR(handle));
+	return handle;
+}
+
+int mdss_dsi_clk_deregister(void *client)
+{
+	int rc = 0;
+	struct mdss_dsi_clk_client_info *c = client;
+	struct mdss_dsi_clk_mngr *mngr;
+	struct list_head *pos = NULL;
+	struct list_head *tmp = NULL;
+	struct mdss_dsi_clk_client_info *node;
+
+	if (!client) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mngr = c->mngr;
+	pr_debug("%s: ENTER\n", mngr->name);
+	mutex_lock(&mngr->clk_mutex);
+	c->core_clk_state = MDSS_DSI_CLK_OFF;
+	c->link_clk_state = MDSS_DSI_CLK_OFF;
+
+	rc = dsi_recheck_clk_state(mngr);
+	if (rc) {
+		pr_err("clock state recheck failed rc = %d\n", rc);
+		goto error;
+	}
+
+	list_for_each_safe(pos, tmp, &mngr->client_list) {
+		node = list_entry(pos, struct mdss_dsi_clk_client_info,
+				  list);
+		if (node == c) {
+			list_del(&node->list);
+			pr_debug("Removed device (%s)\n", node->name);
+			kfree(node);
+			break;
+		}
+	}
+
+error:
+	mutex_unlock(&mngr->clk_mutex);
+	pr_debug("%s: EXIT, rc = %d\n", mngr->name, rc);
+	return rc;
+}
+
+bool is_dsi_clk_in_ecg_state(void *client)
+{
+	struct mdss_dsi_clk_client_info *c = client;
+	struct mdss_dsi_clk_mngr *mngr;
+	bool is_ecg = false;
+
+
+	if (!client) {
+		pr_err("Invalid client params\n");
+		goto end;
+	}
+
+	mngr = c->mngr;
+
+	mutex_lock(&mngr->clk_mutex);
+	is_ecg = (c->core_clk_state == MDSS_DSI_CLK_EARLY_GATE);
+	mutex_unlock(&mngr->clk_mutex);
+
+end:
+	return is_ecg;
+}
+
+int mdss_dsi_clk_req_state(void *client, enum mdss_dsi_clk_type clk,
+	enum mdss_dsi_clk_state state, u32 index)
+{
+	int rc = 0;
+	struct mdss_dsi_clk_client_info *c = client;
+	struct mdss_dsi_clk_mngr *mngr;
+	bool changed = false;
+
+	if (!client || !clk || clk > (MDSS_DSI_CORE_CLK | MDSS_DSI_LINK_CLK) ||
+	    state > MDSS_DSI_CLK_EARLY_GATE) {
+		pr_err("Invalid params, client = %pK, clk = 0x%x, state = %d\n",
+		       client, clk, state);
+		return -EINVAL;
+	}
+
+	mngr = c->mngr;
+	mutex_lock(&mngr->clk_mutex);
+
+	pr_debug("[%s]%s: CLK=%d, new_state=%d, core=%d, linkl=%d\n",
+	       c->name, mngr->name, clk, state, c->core_clk_state,
+	       c->link_clk_state);
+
+	MDSS_XLOG(index, clk, state, c->core_clk_state, c->link_clk_state);
+	/*
+	 * Refcount handling rules:
+	 *	1. Increment refcount whenever ON is called
+	 *	2. Do not decrement when going from EARLY_GATE to OFF.
+	 *	3. Decrement refcount when either OFF or EARLY_GATE is called
+	 */
+	if (state == MDSS_DSI_CLK_ON) {
+		if (clk & MDSS_DSI_CORE_CLK) {
+			c->core_refcount++;
+			if (c->core_clk_state != MDSS_DSI_CLK_ON) {
+				c->core_clk_state = MDSS_DSI_CLK_ON;
+				changed = true;
+			}
+		}
+		if (clk & MDSS_DSI_LINK_CLK) {
+			c->link_refcount++;
+			if (c->link_clk_state != MDSS_DSI_CLK_ON) {
+				c->link_clk_state = MDSS_DSI_CLK_ON;
+				changed = true;
+			}
+		}
+	} else if ((state == MDSS_DSI_CLK_EARLY_GATE) ||
+		   (state == MDSS_DSI_CLK_OFF)) {
+		if (clk & MDSS_DSI_CORE_CLK) {
+			if (c->core_refcount == 0) {
+				if ((c->core_clk_state ==
+				    MDSS_DSI_CLK_EARLY_GATE) &&
+				    (state == MDSS_DSI_CLK_OFF)) {
+					changed = true;
+					c->core_clk_state = MDSS_DSI_CLK_OFF;
+				} else {
+					pr_warn("Core refcount is zero for %s",
+						c->name);
+				}
+			} else {
+				c->core_refcount--;
+				if (c->core_refcount == 0) {
+					c->core_clk_state = state;
+					changed = true;
+				}
+			}
+		}
+		if (clk & MDSS_DSI_LINK_CLK) {
+			if (c->link_refcount == 0) {
+				if ((c->link_clk_state ==
+				    MDSS_DSI_CLK_EARLY_GATE) &&
+				    (state == MDSS_DSI_CLK_OFF)) {
+					changed = true;
+					c->link_clk_state = MDSS_DSI_CLK_OFF;
+				} else {
+					pr_warn("Link refcount is zero for %s",
+						c->name);
+				}
+			} else {
+				c->link_refcount--;
+				if (c->link_refcount == 0) {
+					c->link_clk_state = state;
+					changed = true;
+				}
+			}
+		}
+	}
+	pr_debug("[%s]%s: change=%d, Core (ref=%d, state=%d), Link (ref=%d, state=%d)\n",
+		 c->name, mngr->name, changed, c->core_refcount,
+		 c->core_clk_state, c->link_refcount, c->link_clk_state);
+	MDSS_XLOG(index, clk, state, c->core_clk_state, c->link_clk_state);
+
+	if (changed) {
+		rc = dsi_recheck_clk_state(mngr);
+		if (rc)
+			pr_err("Failed to adjust clock state rc = %d\n", rc);
+	}
+
+	mutex_unlock(&mngr->clk_mutex);
+	return rc;
+}
+
+int mdss_dsi_clk_set_link_rate(void *client, enum mdss_dsi_link_clk_type clk,
+			       u32 rate, u32 flags)
+{
+	int rc = 0;
+	struct mdss_dsi_clk_client_info *c = client;
+	struct mdss_dsi_clk_mngr *mngr;
+
+	if (!client || (clk > MDSS_DSI_LINK_CLK_MAX)) {
+		pr_err("Invalid params, client = %pK, clk = 0x%x", client, clk);
+		return -EINVAL;
+	}
+
+	mngr = c->mngr;
+	pr_debug("%s: ENTER\n", mngr->name);
+	mutex_lock(&mngr->clk_mutex);
+
+	rc = dsi_set_clk_rate(mngr, clk, rate, flags);
+	if (rc)
+		pr_err("Failed to set rate for clk %d, rate = %d, rc = %d\n",
+		       clk, rate, rc);
+
+	mutex_unlock(&mngr->clk_mutex);
+	pr_debug("%s: EXIT, rc = %d\n", mngr->name, rc);
+	return rc;
+}
+
+void *mdss_dsi_clk_init(struct mdss_dsi_clk_info *info)
+{
+	struct mdss_dsi_clk_mngr *mngr;
+
+	if (!info) {
+		pr_err("Invalid params\n");
+		return ERR_PTR(-EINVAL);
+	}
+	pr_debug("ENTER %s\n", info->name);
+	mngr = kzalloc(sizeof(*mngr), GFP_KERNEL);
+	if (!mngr) {
+		mngr = ERR_PTR(-ENOMEM);
+		goto error;
+	}
+
+	mutex_init(&mngr->clk_mutex);
+	memcpy(&mngr->core_clks.clks, &info->core_clks, sizeof(struct
+						 mdss_dsi_core_clk_info));
+	memcpy(&mngr->link_clks.clks, &info->link_clks, sizeof(struct
+						 mdss_dsi_link_clk_info));
+
+	INIT_LIST_HEAD(&mngr->client_list);
+	mngr->pre_clkon_cb = info->pre_clkon_cb;
+	mngr->post_clkon_cb = info->post_clkon_cb;
+	mngr->pre_clkoff_cb = info->pre_clkoff_cb;
+	mngr->post_clkoff_cb = info->post_clkoff_cb;
+	mngr->priv_data = info->priv_data;
+	mngr->reg_bus_clt = mdss_reg_bus_vote_client_create(info->name);
+	if (IS_ERR(mngr->reg_bus_clt)) {
+		pr_err("Unable to get handle for reg bus vote\n");
+		kfree(mngr);
+		mngr = ERR_PTR(-EINVAL);
+		goto error;
+	}
+	memcpy(mngr->name, info->name, DSI_CLK_NAME_LEN);
+error:
+	pr_debug("EXIT %s, rc = %ld\n", mngr->name, PTR_ERR(mngr));
+	return mngr;
+}
+
+int mdss_dsi_clk_deinit(void *clk_mngr)
+{
+	int rc = 0;
+	struct mdss_dsi_clk_mngr *mngr = clk_mngr;
+	struct list_head *position = NULL;
+	struct list_head *tmp = NULL;
+	struct mdss_dsi_clk_client_info *node;
+
+	if (!mngr) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	pr_debug("%s: ENTER\n", mngr->name);
+	mutex_lock(&mngr->clk_mutex);
+
+	list_for_each_safe(position, tmp, &mngr->client_list) {
+		node = list_entry(position, struct mdss_dsi_clk_client_info,
+				  list);
+		list_del(&node->list);
+		pr_debug("Removed device (%s)\n", node->name);
+		kfree(node);
+	}
+
+	rc = dsi_recheck_clk_state(mngr);
+	if (rc)
+		pr_err("failed to disable all clocks\n");
+	mdss_reg_bus_vote_client_destroy(mngr->reg_bus_clt);
+	mutex_unlock(&mngr->clk_mutex);
+	pr_debug("%s: EXIT, rc = %d\n", mngr->name, rc);
+	kfree(mngr);
+	return rc;
+}
+
+int mdss_dsi_clk_force_toggle(void *client, u32 clk)
+{
+	int rc = 0;
+	struct mdss_dsi_clk_client_info *c = client;
+	struct mdss_dsi_clk_mngr *mngr;
+
+	if (!client || !clk || clk >= MDSS_DSI_CLKS_MAX) {
+		pr_err("Invalid params, client = %pK, clk = 0x%x\n",
+		       client, clk);
+		return -EINVAL;
+	}
+
+	mngr = c->mngr;
+	mutex_lock(&mngr->clk_mutex);
+
+	if ((clk & MDSS_DSI_CORE_CLK) &&
+	    (mngr->core_clks.current_clk_state == MDSS_DSI_CLK_ON)) {
+
+		rc = dsi_core_clk_stop(&mngr->core_clks);
+		if (rc) {
+			pr_err("failed to stop core clks\n");
+			goto error;
+		}
+
+		rc = dsi_core_clk_start(&mngr->core_clks);
+		if (rc)
+			pr_err("failed to start core clks\n");
+
+	} else if (clk & MDSS_DSI_CORE_CLK) {
+		pr_err("cannot reset, core clock is off\n");
+		rc = -ENOTSUPP;
+		goto error;
+	}
+
+	if ((clk & MDSS_DSI_LINK_CLK) &&
+	    (mngr->link_clks.current_clk_state == MDSS_DSI_CLK_ON)) {
+
+		rc = dsi_link_clk_stop(&mngr->link_clks);
+		if (rc) {
+			pr_err("failed to stop link clks\n");
+			goto error;
+		}
+
+		rc = dsi_link_clk_start(&mngr->link_clks);
+		if (rc)
+			pr_err("failed to start link clks\n");
+
+	} else if (clk & MDSS_DSI_LINK_CLK) {
+		pr_err("cannot reset, link clock is off\n");
+		rc = -ENOTSUPP;
+		goto error;
+	}
+
+error:
+	mutex_unlock(&mngr->clk_mutex);
+	return rc;
+}
diff --git a/drivers/video/fbdev/msm/mdss_dsi_clk.h b/drivers/video/fbdev/msm/mdss_dsi_clk.h
new file mode 100644
index 0000000..837f2f6
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_clk.h
@@ -0,0 +1,251 @@
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MDSS_DSI_CLK_H_
+#define _MDSS_DSI_CLK_H_
+
+#include <linux/mdss_io_util.h>
+#include <linux/list.h>
+
+#define DSI_CLK_NAME_LEN 20
+
+#define MDSS_DSI_CLK_UPDATE_CLK_RATE_AT_ON 0x1
+
+enum mdss_dsi_clk_state {
+	MDSS_DSI_CLK_OFF,
+	MDSS_DSI_CLK_ON,
+	MDSS_DSI_CLK_EARLY_GATE,
+};
+
+enum dsi_clk_req_client {
+	DSI_CLK_REQ_MDP_CLIENT = 0,
+	DSI_CLK_REQ_DSI_CLIENT,
+};
+
+enum mdss_dsi_link_clk_type {
+	MDSS_DSI_LINK_ESC_CLK,
+	MDSS_DSI_LINK_BYTE_CLK,
+	MDSS_DSI_LINK_PIX_CLK,
+	MDSS_DSI_LINK_CLK_MAX,
+};
+
+enum mdss_dsi_clk_type {
+	MDSS_DSI_CORE_CLK = BIT(0),
+	MDSS_DSI_LINK_CLK = BIT(1),
+	MDSS_DSI_ALL_CLKS = (BIT(0) | BIT(1)),
+	MDSS_DSI_CLKS_MAX = BIT(2),
+};
+
+/**
+ * typedef *pre_clockoff_cb() - Callback before clock is turned off
+ * @priv: private data pointer.
+ * @clk_type: clock which is being turned off.
+ * @new_state: next state for the clock.
+ *
+ * @return: error code.
+ */
+typedef int (*pre_clockoff_cb)(void *priv,
+			       enum mdss_dsi_clk_type clk_type,
+			       enum mdss_dsi_clk_state new_state);
+
+/**
+ * typedef *post_clockoff_cb() - Callback after clock is turned off
+ * @priv: private data pointer.
+ * @clk_type: clock which was turned off.
+ * @curr_state: current state for the clock.
+ *
+ * @return: error code.
+ */
+typedef int (*post_clockoff_cb)(void *priv,
+				enum mdss_dsi_clk_type clk_type,
+				enum mdss_dsi_clk_state curr_state);
+
+/**
+ * typedef *post_clockon_cb() - Callback after clock is turned on
+ * @priv: private data pointer.
+ * @clk_type: clock which was turned on.
+ * @curr_state: current state for the clock.
+ *
+ * @return: error code.
+ */
+typedef int (*post_clockon_cb)(void *priv,
+			       enum mdss_dsi_clk_type clk_type,
+			       enum mdss_dsi_clk_state curr_state);
+
+/**
+ * typedef *pre_clockon_cb() - Callback before clock is turned on
+ * @priv: private data pointer.
+ * @clk_type: clock which is being turned on.
+ * @new_state: next state for the clock.
+ *
+ * @return: error code.
+ */
+typedef int (*pre_clockon_cb)(void *priv,
+			      enum mdss_dsi_clk_type clk_type,
+			      enum mdss_dsi_clk_state new_state);
+
+struct mdss_dsi_core_clk_info {
+	struct clk *mdp_core_clk;
+	struct clk *ahb_clk;
+	struct clk *axi_clk;
+	struct clk *mmss_misc_ahb_clk;
+};
+
+struct mdss_dsi_link_clk_info {
+	struct clk *esc_clk;
+	struct clk *byte_clk;
+	struct clk *pixel_clk;
+};
+
+struct dsi_panel_clk_ctrl {
+	enum mdss_dsi_clk_state state;
+	enum dsi_clk_req_client client;
+};
+
+/**
+ * struct mdss_dsi_clk_info - clock information to initialize manager
+ * @name: name for the clocks to identify debug logs.
+ * @core_clks: core clock information.
+ * @link_clks: link clock information.
+ * @pre_clkoff_cb: callback before a clock is turned off.
+ * @post_clkoff_cb: callback after a clock is turned off.
+ * @pre_clkon_cb: callback before a clock is turned on.
+ * @post_clkon_cb: callback after a clock is turned on.
+ * @priv_data: pointer to private data passed to callbacks.
+ */
+struct mdss_dsi_clk_info {
+	char name[DSI_CLK_NAME_LEN];
+	struct mdss_dsi_core_clk_info core_clks;
+	struct mdss_dsi_link_clk_info link_clks;
+	pre_clockoff_cb pre_clkoff_cb;
+	post_clockoff_cb post_clkoff_cb;
+	post_clockon_cb post_clkon_cb;
+	pre_clockon_cb pre_clkon_cb;
+	void *priv_data;
+};
+
+struct mdss_dsi_clk_client {
+	char *client_name;
+};
+
+/**
+ * mdss_dsi_clk_init() - Initializes clock manager
+ * @info: Clock information to be managed by the clock manager.
+ *
+ * The Init API should be called during probe of the dsi driver. DSI driver
+ * provides the clock handles to the core clocks and link clocks that will be
+ * managed by the clock manager.
+ *
+ * returns handle or an error value.
+ */
+void *mdss_dsi_clk_init(struct mdss_dsi_clk_info *info);
+
+/**
+ * mdss_dsi_clk_deinit() - Deinitializes the clock manager
+ * @mngr: handle returned by mdss_dsi_clk_init().
+ *
+ * Deinit will turn off all the clocks and release all the resources acquired
+ * by mdss_dsi_clk_init().
+ *
+ * @return: error code.
+ */
+int mdss_dsi_clk_deinit(void *mngr);
+
+/**
+ * mdss_dsi_clk_register() - Register a client to control clock state
+ * @mngr: handle returned by mdss_dsi_clk_init().
+ * @client: client information.
+ *
+ * Register allows clients for DSI clock manager to acquire a handle which can
+ * be used to request a specific clock state. The clock manager maintains a
+ * reference count of the clock states requested by each client. Client has to
+ * ensure that ON and OFF/EARLY_GATE calls are balanced properly.
+ *
+ * Requesting a particular clock state does not guarantee that physical clock
+ * state. Physical clock state is determined by the states requested by all
+ * clients.
+ *
+ * @return: handle or error code.
+ */
+void *mdss_dsi_clk_register(void *mngr, struct mdss_dsi_clk_client *client);
+
+/**
+ * mdss_dsi_clk_deregister() - Deregister a registered client.
+ * @client: client handle returned by mdss_dsi_clk_register().
+ *
+ * Deregister releases all resources acquired by mdss_dsi_clk_register().
+ *
+ * @return: error code.
+ */
+int mdss_dsi_clk_deregister(void *client);
+
+/**
+ * mdss_dsi_clk_req_state() - Request a specific clock state
+ * @client: client handle.
+ * @clk: Type of clock requested (enum mdss_dsi_clk_type).
+ * @state: clock state requested.
+ * @index: controller index.
+ *
+ * This routine is used to request a new clock state for a specific clock. If
+ * turning ON the clocks, this guarantees that clocks will be on before
+ * returning. Valid state transitions are ON -> EARLY GATE, ON -> OFF,
+ * EARLY GATE -> OFF, EARLY GATE -> ON and OFF -> ON.
+ *
+ * @return: error code.
+ */
+int mdss_dsi_clk_req_state(void *client, enum mdss_dsi_clk_type clk,
+	enum mdss_dsi_clk_state state, u32 index);
+
+/**
+ * mdss_dsi_clk_set_link_rate() - set clock rate for link clocks
+ * @client: client handle.
+ * @clk: type of clock.
+ * @rate: clock rate in Hz.
+ * @flags: flags.
+ *
+ * This routine is used to request a specific clock rate. It supports an
+ * additional flags argument which can change the behavior of the routine. If
+ * MDSS_DSI_CLK_UPDATE_CLK_RATE_AT_ON flag is set, the routine caches the new
+ * clock rate and applies it next time when the clock is turned on.
+ *
+ * @return: error code.
+ */
+int mdss_dsi_clk_set_link_rate(void *client, enum mdss_dsi_link_clk_type clk,
+			       u32 rate, u32 flags);
+
+/**
+ * mdss_dsi_clk_force_toggle() - Turn off and turn on clocks
+ * @client: client handle.
+ * @clk: clock type.
+ *
+ * This routine has to be used in cases where clocks have to be toggled
+ * irrespecitive of the refcount. This API bypasses the refcount and turns off
+ * and turns on the clocks. This will fail if the clocks are in OFF state
+ * already.
+ *
+ * @return:error code.
+ */
+int mdss_dsi_clk_force_toggle(void *client, u32 clk);
+
+/**
+ * is_dsi_clk_in_ecg_state() - Checks the current state of clocks
+ * @client: client handle.
+ *
+ * This routine returns checks the clocks status for client and return
+ * success code based on it.
+ *
+ * @return:true: if clocks are in ECG state
+ *         false: for all other cases
+ */
+bool is_dsi_clk_in_ecg_state(void *client);
+#endif /* _MDSS_DSI_CLK_H_ */
diff --git a/drivers/video/fbdev/msm/mdss_dsi_cmd.c b/drivers/video/fbdev/msm/mdss_dsi_cmd.c
new file mode 100644
index 0000000..c67fd8a
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_cmd.c
@@ -0,0 +1,793 @@
+/* Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+#include <linux/kthread.h>
+
+#include "mdss_dsi_cmd.h"
+#include "mdss_dsi.h"
+#include "mdss_smmu.h"
+
+/*
+ * mipi dsi buf mechanism
+ */
+char *mdss_dsi_buf_reserve(struct dsi_buf *dp, int len)
+{
+	dp->data += len;
+	return dp->data;
+}
+
+char *mdss_dsi_buf_unreserve(struct dsi_buf *dp, int len)
+{
+	dp->data -= len;
+	return dp->data;
+}
+
+char *mdss_dsi_buf_push(struct dsi_buf *dp, int len)
+{
+	dp->data -= len;
+	dp->len += len;
+	return dp->data;
+}
+
+char *mdss_dsi_buf_reserve_hdr(struct dsi_buf *dp, int hlen)
+{
+	dp->hdr = (u32 *)dp->data;
+	return mdss_dsi_buf_reserve(dp, hlen);
+}
+
+char *mdss_dsi_buf_init(struct dsi_buf *dp)
+{
+	int off;
+
+	dp->data = dp->start;
+	off = (int) (unsigned long) dp->data;
+	/* 8 byte align */
+	off &= 0x07;
+	if (off)
+		off = 8 - off;
+	dp->data += off;
+	dp->len = 0;
+	dp->read_cnt = 0;
+	return dp->data;
+}
+
+int mdss_dsi_buf_alloc(struct device *ctrl_dev, struct dsi_buf *dp, int size)
+{
+	dp->start = mdss_smmu_dsi_alloc_buf(ctrl_dev, size, &dp->dmap,
+			GFP_KERNEL);
+	if (dp->start == NULL) {
+		pr_err("%s:%u\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+	dp->end = dp->start + size;
+	dp->size = size;
+
+	if ((int) (unsigned long) dp->start & 0x07)
+		pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
+
+	dp->data = dp->start;
+	dp->len = 0;
+	dp->read_cnt = 0;
+	return size;
+}
+
+/*
+ * mipi dsi generic long write
+ */
+static int mdss_dsi_generic_lwrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	char *bp;
+	u32 *hp;
+	int i, len = 0;
+
+	dchdr = &cm->dchdr;
+	bp = mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+
+	/* fill up payload */
+	if (cm->payload) {
+		len = dchdr->dlen;
+		len += 3;
+		len &= ~0x03;	/* multipled by 4 */
+		for (i = 0; i < dchdr->dlen; i++)
+			*bp++ = cm->payload[i];
+
+		/* append 0xff to the end */
+		for (; i < len; i++)
+			*bp++ = 0xff;
+
+		dp->len += len;
+	}
+
+	/* fill up header */
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(dchdr->dlen);
+	*hp |= DSI_HDR_VC(dchdr->vc);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_DTYPE(DTYPE_GEN_LWRITE);
+	if (dchdr->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	len += DSI_HOST_HDR_SIZE;
+
+	return len;
+}
+
+/*
+ * mipi dsi generic short write with 0, 1 2 parameters
+ */
+static int mdss_dsi_generic_swrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	u32 *hp;
+	int len;
+
+	dchdr = &cm->dchdr;
+	if (dchdr->dlen && cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return 0;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(dchdr->vc);
+	if (dchdr->last)
+		*hp |= DSI_HDR_LAST;
+
+
+	len = (dchdr->dlen > 2) ? 2 : dchdr->dlen;
+
+	if (len == 1) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE1);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(0);
+	} else if (len == 2) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE2);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(cm->payload[1]);
+	} else {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE);
+		*hp |= DSI_HDR_DATA1(0);
+		*hp |= DSI_HDR_DATA2(0);
+	}
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+/*
+ * mipi dsi gerneric read with 0, 1 2 parameters
+ */
+static int mdss_dsi_generic_read(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	u32 *hp;
+	int len;
+
+	dchdr = &cm->dchdr;
+	if (dchdr->dlen && cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return 0;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(dchdr->vc);
+	*hp |= DSI_HDR_BTA;
+	if (dchdr->last)
+		*hp |= DSI_HDR_LAST;
+
+	len = (dchdr->dlen > 2) ? 2 : dchdr->dlen;
+
+	if (len == 1) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ1);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(0);
+	} else if (len == 2) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ2);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(cm->payload[1]);
+	} else {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ);
+		*hp |= DSI_HDR_DATA1(0);
+		*hp |= DSI_HDR_DATA2(0);
+	}
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+/*
+ * mipi dsi dcs long write
+ */
+static int mdss_dsi_dcs_lwrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	char *bp;
+	u32 *hp;
+	int i, len = 0;
+
+	dchdr = &cm->dchdr;
+	bp = mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+
+	/*
+	 * fill up payload
+	 * dcs command byte (first byte) followed by payload
+	 */
+	if (cm->payload) {
+		len = dchdr->dlen;
+		len += 3;
+		len &= ~0x03;	/* multipled by 4 */
+		for (i = 0; i < dchdr->dlen; i++)
+			*bp++ = cm->payload[i];
+
+		/* append 0xff to the end */
+		for (; i < len; i++)
+			*bp++ = 0xff;
+
+		dp->len += len;
+	}
+
+	/* fill up header */
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(dchdr->dlen);
+	*hp |= DSI_HDR_VC(dchdr->vc);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_LWRITE);
+	if (dchdr->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	len += DSI_HOST_HDR_SIZE;
+	return len;
+}
+
+/*
+ * mipi dsi dcs short write with 0 parameters
+ */
+static int mdss_dsi_dcs_swrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	u32 *hp;
+	int len;
+
+	dchdr = &cm->dchdr;
+	if (cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(dchdr->vc);
+	if (dchdr->ack)		/* ask ACK trigger msg from peripeheral */
+		*hp |= DSI_HDR_BTA;
+	if (dchdr->last)
+		*hp |= DSI_HDR_LAST;
+
+	len = (dchdr->dlen > 1) ? 1 : dchdr->dlen;
+
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_WRITE);
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);	/* dcs command byte */
+	*hp |= DSI_HDR_DATA2(0);
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+/*
+ * mipi dsi dcs short write with 1 parameters
+ */
+static int mdss_dsi_dcs_swrite1(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	u32 *hp;
+
+	dchdr = &cm->dchdr;
+	if (dchdr->dlen < 2 || cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(dchdr->vc);
+	if (dchdr->ack)		/* ask ACK trigger msg from peripeheral */
+		*hp |= DSI_HDR_BTA;
+	if (dchdr->last)
+		*hp |= DSI_HDR_LAST;
+
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_WRITE1);
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);	/* dcs comamnd byte */
+	*hp |= DSI_HDR_DATA2(cm->payload[1]);	/* parameter */
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+/*
+ * mipi dsi dcs read with 0 parameters
+ */
+
+static int mdss_dsi_dcs_read(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	u32 *hp;
+
+	dchdr = &cm->dchdr;
+	if (cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(dchdr->vc);
+	*hp |= DSI_HDR_BTA;
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_READ);
+	if (dchdr->last)
+		*hp |= DSI_HDR_LAST;
+
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);	/* dcs command byte */
+	*hp |= DSI_HDR_DATA2(0);
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+static int mdss_dsi_cm_on(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	u32 *hp;
+
+	dchdr = &cm->dchdr;
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(dchdr->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_CM_ON);
+	if (dchdr->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+static int mdss_dsi_dsc_pps(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	char *bp;
+	u32 *hp;
+	int i, len = 0;
+
+	dchdr = &cm->dchdr;
+	bp = mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+
+	/*
+	 * fill up payload
+	 * dcs command byte (first byte) followed by payload
+	 */
+	if (cm->payload) {
+		len = dchdr->dlen;
+		len += 3;
+		len &= ~0x03;	/* multipled by 4 */
+		for (i = 0; i < dchdr->dlen; i++)
+			*bp++ = cm->payload[i];
+
+		/* append 0xff to the end */
+		for (; i < len; i++)
+			*bp++ = 0xff;
+
+		dp->len += len;
+	}
+
+	/* fill up header */
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(dchdr->dlen);
+	*hp |= DSI_HDR_VC(dchdr->vc);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_DTYPE(DTYPE_PPS);
+	if (dchdr->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	len += DSI_HOST_HDR_SIZE;
+	return len;
+}
+
+static int mdss_dsi_cm_off(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	u32 *hp;
+
+	dchdr = &cm->dchdr;
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(dchdr->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_CM_OFF);
+	if (dchdr->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+static int mdss_dsi_peripheral_on(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	u32 *hp;
+
+	dchdr = &cm->dchdr;
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(dchdr->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_PERIPHERAL_ON);
+	if (dchdr->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+static int mdss_dsi_peripheral_off(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	u32 *hp;
+
+	dchdr = &cm->dchdr;
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(dchdr->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_PERIPHERAL_OFF);
+	if (dchdr->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+static int mdss_dsi_set_max_pktsize(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	u32 *hp;
+
+	dchdr = &cm->dchdr;
+	if (cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return 0;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(dchdr->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_MAX_PKTSIZE);
+	if (dchdr->last)
+		*hp |= DSI_HDR_LAST;
+
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);
+	*hp |= DSI_HDR_DATA2(cm->payload[1]);
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+static int mdss_dsi_compression_mode(struct dsi_buf *dp,
+					struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	u32 *hp;
+
+	dchdr = &cm->dchdr;
+	if (cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return 0;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(dchdr->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_COMPRESSION_MODE);
+	if (dchdr->last)
+		*hp |= DSI_HDR_LAST;
+
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);
+	*hp |= DSI_HDR_DATA2(cm->payload[1]);
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+static int mdss_dsi_null_pkt(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	u32 *hp;
+
+	dchdr = &cm->dchdr;
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(dchdr->dlen);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_VC(dchdr->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_NULL_PKT);
+	if (dchdr->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+static int mdss_dsi_blank_pkt(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	u32 *hp;
+
+	dchdr = &cm->dchdr;
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(dchdr->dlen);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_VC(dchdr->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_BLANK_PKT);
+	if (dchdr->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return DSI_HOST_HDR_SIZE; /* 4 bytes */
+}
+
+/*
+ * prepare cmd buffer to be txed
+ */
+int mdss_dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	struct dsi_ctrl_hdr *dchdr;
+	int len = 0;
+
+	dchdr = &cm->dchdr;
+
+	switch (dchdr->dtype) {
+	case DTYPE_GEN_WRITE:
+	case DTYPE_GEN_WRITE1:
+	case DTYPE_GEN_WRITE2:
+		len = mdss_dsi_generic_swrite(dp, cm);
+		break;
+	case DTYPE_GEN_LWRITE:
+		len = mdss_dsi_generic_lwrite(dp, cm);
+		break;
+	case DTYPE_GEN_READ:
+	case DTYPE_GEN_READ1:
+	case DTYPE_GEN_READ2:
+		len = mdss_dsi_generic_read(dp, cm);
+		break;
+	case DTYPE_DCS_LWRITE:
+		len = mdss_dsi_dcs_lwrite(dp, cm);
+		break;
+	case DTYPE_DCS_WRITE:
+		len = mdss_dsi_dcs_swrite(dp, cm);
+		break;
+	case DTYPE_DCS_WRITE1:
+		len = mdss_dsi_dcs_swrite1(dp, cm);
+		break;
+	case DTYPE_DCS_READ:
+		len = mdss_dsi_dcs_read(dp, cm);
+		break;
+	case DTYPE_MAX_PKTSIZE:
+		len = mdss_dsi_set_max_pktsize(dp, cm);
+		break;
+	case DTYPE_PPS:
+		len = mdss_dsi_dsc_pps(dp, cm);
+		break;
+	case DTYPE_COMPRESSION_MODE:
+		len = mdss_dsi_compression_mode(dp, cm);
+		break;
+	case DTYPE_NULL_PKT:
+		len = mdss_dsi_null_pkt(dp, cm);
+		break;
+	case DTYPE_BLANK_PKT:
+		len = mdss_dsi_blank_pkt(dp, cm);
+		break;
+	case DTYPE_CM_ON:
+		len = mdss_dsi_cm_on(dp, cm);
+		break;
+	case DTYPE_CM_OFF:
+		len = mdss_dsi_cm_off(dp, cm);
+		break;
+	case DTYPE_PERIPHERAL_ON:
+		len = mdss_dsi_peripheral_on(dp, cm);
+		break;
+	case DTYPE_PERIPHERAL_OFF:
+		len = mdss_dsi_peripheral_off(dp, cm);
+		break;
+	default:
+		pr_debug("%s: dtype=%x NOT supported\n",
+					__func__, dchdr->dtype);
+		break;
+
+	}
+
+	return len;
+}
+
+/*
+ * mdss_dsi_short_read1_resp: 1 parameter
+ */
+int mdss_dsi_short_read1_resp(struct dsi_buf *rp)
+{
+	/* strip out dcs type */
+	rp->data++;
+	rp->len = 1;
+	/* 1 byte for dcs type + 1 byte for ECC + 1 byte for 2nd data byte */
+	rp->read_cnt -= 3;
+	return rp->len;
+}
+
+/*
+ * mdss_dsi_short_read2_resp: 2 parameter
+ */
+int mdss_dsi_short_read2_resp(struct dsi_buf *rp)
+{
+	/* strip out dcs type */
+	rp->data++;
+	rp->len = 2;
+	rp->read_cnt -= 2; /* 1 byte for dcs type + 1 byte for ECC */
+	return rp->len;
+}
+
+int mdss_dsi_long_read_resp(struct dsi_buf *rp)
+{
+	/* strip out dcs header */
+	rp->data += 4;
+	rp->len -= 4;
+	rp->read_cnt -= 6; /* 4 bytes for dcs header + 2 bytes for CRC */
+	return rp->len;
+}
+
+static char set_tear_on[2] = {0x35, 0x00};
+static struct dsi_cmd_desc dsi_tear_on_cmd = {
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(set_tear_on)}, set_tear_on};
+
+static char set_tear_off[2] = {0x34, 0x00};
+static struct dsi_cmd_desc dsi_tear_off_cmd = {
+	{DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(set_tear_off)}, set_tear_off};
+
+void mdss_dsi_set_tear_on(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct dcs_cmd_req cmdreq;
+	struct mdss_panel_info *pinfo;
+
+	pinfo = &(ctrl->panel_data.panel_info);
+	if (pinfo->dcs_cmd_by_left && ctrl->ndx != DSI_CTRL_LEFT)
+		return;
+
+	cmdreq.cmds = &dsi_tear_on_cmd;
+	cmdreq.cmds_cnt = 1;
+	cmdreq.flags = CMD_REQ_COMMIT;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+
+	mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+}
+
+void mdss_dsi_set_tear_off(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct dcs_cmd_req cmdreq;
+	struct mdss_panel_info *pinfo;
+
+	pinfo = &(ctrl->panel_data.panel_info);
+	if (pinfo->dcs_cmd_by_left && ctrl->ndx != DSI_CTRL_LEFT)
+		return;
+
+	cmdreq.cmds = &dsi_tear_off_cmd;
+	cmdreq.cmds_cnt = 1;
+	cmdreq.flags = CMD_REQ_COMMIT;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+
+	mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+}
+
+/*
+ * mdss_dsi_cmd_get: ctrl->cmd_mutex acquired by caller
+ */
+struct dcs_cmd_req *mdss_dsi_cmdlist_get(struct mdss_dsi_ctrl_pdata *ctrl,
+				int from_mdp)
+{
+	struct dcs_cmd_list *clist;
+	struct dcs_cmd_req *req = NULL;
+
+	mutex_lock(&ctrl->cmdlist_mutex);
+	clist = &ctrl->cmdlist;
+	if (clist->get != clist->put) {
+		req = &clist->list[clist->get];
+		/*dont let commit thread steal ESD thread's
+		 * command
+		 */
+		if (from_mdp && (req->flags & CMD_REQ_COMMIT)) {
+			mutex_unlock(&ctrl->cmdlist_mutex);
+			return NULL;
+		}
+		clist->get++;
+		clist->get %= CMD_REQ_MAX;
+		clist->tot--;
+		pr_debug("%s: tot=%d put=%d get=%d\n", __func__,
+		clist->tot, clist->put, clist->get);
+	}
+	mutex_unlock(&ctrl->cmdlist_mutex);
+	return req;
+}
+
+int mdss_dsi_cmdlist_put(struct mdss_dsi_ctrl_pdata *ctrl,
+				struct dcs_cmd_req *cmdreq)
+{
+	struct dcs_cmd_req *req;
+	struct dcs_cmd_list *clist;
+	int ret = 0;
+
+	mutex_lock(&ctrl->cmd_mutex);
+	mutex_lock(&ctrl->cmdlist_mutex);
+	clist = &ctrl->cmdlist;
+	req = &clist->list[clist->put];
+	*req = *cmdreq;
+	clist->put++;
+	clist->put %= CMD_REQ_MAX;
+	clist->tot++;
+	if (clist->put == clist->get) {
+		/* drop the oldest one */
+		pr_debug("%s: DROP, tot=%d put=%d get=%d\n", __func__,
+			clist->tot, clist->put, clist->get);
+		clist->get++;
+		clist->get %= CMD_REQ_MAX;
+		clist->tot--;
+	}
+
+	pr_debug("%s: tot=%d put=%d get=%d\n", __func__,
+		clist->tot, clist->put, clist->get);
+
+	mutex_unlock(&ctrl->cmdlist_mutex);
+
+	if (req->flags & CMD_REQ_COMMIT) {
+		if (!ctrl->cmdlist_commit)
+			pr_err("cmdlist_commit not implemented!\n");
+		else
+			ret = ctrl->cmdlist_commit(ctrl, 0);
+	}
+	mutex_unlock(&ctrl->cmd_mutex);
+
+	return ret;
+}
+
diff --git a/drivers/video/fbdev/msm/mdss_dsi_cmd.h b/drivers/video/fbdev/msm/mdss_dsi_cmd.h
new file mode 100644
index 0000000..0ec96ec
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_cmd.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_DSI_CMD_H
+#define MDSS_DSI_CMD_H
+
+#include "mdss.h"
+
+struct mdss_dsi_ctrl_pdata;
+
+#define DSI_HOST_HDR_SIZE	4
+#define DSI_HDR_LAST		BIT(31)
+#define DSI_HDR_LONG_PKT	BIT(30)
+#define DSI_HDR_BTA		BIT(29)
+#define DSI_HDR_VC(vc)		(((vc) & 0x03) << 22)
+#define DSI_HDR_DTYPE(dtype)	(((dtype) & 0x03f) << 16)
+#define DSI_HDR_DATA2(data)	(((data) & 0x0ff) << 8)
+#define DSI_HDR_DATA1(data)	((data) & 0x0ff)
+#define DSI_HDR_WC(wc)		((wc) & 0x0ffff)
+
+#define MDSS_DSI_MRPS	0x04  /* Maximum Return Packet Size */
+
+#define MDSS_DSI_LEN 8 /* 4 x 4 - 6 - 2, bytes dcs header+crc-align  */
+
+struct dsi_buf {
+	u32 *hdr;	/* dsi host header */
+	char *start;	/* buffer start addr */
+	char *end;	/* buffer end addr */
+	int size;	/* size of buffer */
+	char *data;	/* buffer */
+	int len;	/* data length */
+	int read_cnt;	/* DSI read count */
+	dma_addr_t dmap; /* mapped dma addr */
+};
+
+/* dcs read/write */
+#define DTYPE_DCS_WRITE		0x05	/* short write, 0 parameter */
+#define DTYPE_DCS_WRITE1	0x15	/* short write, 1 parameter */
+#define DTYPE_DCS_READ		0x06	/* read */
+#define DTYPE_DCS_LWRITE	0x39	/* long write */
+
+/* generic read/write */
+#define DTYPE_GEN_WRITE		0x03	/* short write, 0 parameter */
+#define DTYPE_GEN_WRITE1	0x13	/* short write, 1 parameter */
+#define DTYPE_GEN_WRITE2	0x23	/* short write, 2 parameter */
+#define DTYPE_GEN_LWRITE	0x29	/* long write */
+#define DTYPE_GEN_READ		0x04	/* long read, 0 parameter */
+#define DTYPE_GEN_READ1		0x14	/* long read, 1 parameter */
+#define DTYPE_GEN_READ2		0x24	/* long read, 2 parameter */
+
+#define DTYPE_COMPRESSION_MODE	0x07	/* compression mode */
+#define DTYPE_PPS		0x0a	/* pps */
+#define DTYPE_MAX_PKTSIZE	0x37	/* set max packet size */
+#define DTYPE_NULL_PKT		0x09	/* null packet, no data */
+#define DTYPE_BLANK_PKT		0x19	/* blankiing packet, no data */
+
+#define DTYPE_CM_ON		0x02	/* color mode off */
+#define DTYPE_CM_OFF		0x12	/* color mode on */
+#define DTYPE_PERIPHERAL_OFF	0x22
+#define DTYPE_PERIPHERAL_ON	0x32
+
+/*
+ * dcs response
+ */
+#define DTYPE_ACK_ERR_RESP      0x02
+#define DTYPE_EOT_RESP          0x08    /* end of tx */
+#define DTYPE_GEN_READ1_RESP    0x11    /* 1 parameter, short */
+#define DTYPE_GEN_READ2_RESP    0x12    /* 2 parameter, short */
+#define DTYPE_GEN_LREAD_RESP    0x1a
+#define DTYPE_DCS_LREAD_RESP    0x1c
+#define DTYPE_DCS_READ1_RESP    0x21    /* 1 parameter, short */
+#define DTYPE_DCS_READ2_RESP    0x22    /* 2 parameter, short */
+
+struct dsi_ctrl_hdr {
+	char dtype;	/* data type */
+	char last;	/* last in chain */
+	char vc;	/* virtual chan */
+	char ack;	/* ask ACK from peripheral */
+	char wait;	/* ms */
+	short dlen;	/* 16 bits */
+} __packed;
+
+struct dsi_cmd_desc {
+	struct dsi_ctrl_hdr dchdr;
+	char *payload;
+};
+
+#define CMD_REQ_MAX     4
+#define CMD_REQ_RX      0x0001
+#define CMD_REQ_COMMIT  0x0002
+#define CMD_REQ_UNICAST 0x0008
+#define CMD_REQ_DMA_TPG 0x0040
+#define CMD_REQ_NO_MAX_PKT_SIZE 0x0008
+#define CMD_REQ_LP_MODE 0x0010
+#define CMD_REQ_HS_MODE 0x0020
+
+struct dcs_cmd_req {
+	struct dsi_cmd_desc *cmds;
+	int cmds_cnt;
+	u32 flags;
+	int rlen;       /* rx length */
+	char *rbuf;	/* rx buf */
+	void (*cb)(int data);
+};
+
+struct dcs_cmd_list {
+	int put;
+	int get;
+	int tot;
+	struct dcs_cmd_req list[CMD_REQ_MAX];
+};
+
+char *mdss_dsi_buf_reserve(struct dsi_buf *dp, int len);
+char *mdss_dsi_buf_unreserve(struct dsi_buf *dp, int len);
+char *mdss_dsi_buf_push(struct dsi_buf *dp, int len);
+char *mdss_dsi_buf_reserve_hdr(struct dsi_buf *dp, int hlen);
+char *mdss_dsi_buf_init(struct dsi_buf *dp);
+int mdss_dsi_buf_alloc(struct device *ctrl_dev, struct dsi_buf *dp, int size);
+int mdss_dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm);
+int mdss_dsi_short_read1_resp(struct dsi_buf *rp);
+int mdss_dsi_short_read2_resp(struct dsi_buf *rp);
+int mdss_dsi_long_read_resp(struct dsi_buf *rp);
+void mdss_dsi_set_tear_on(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_set_tear_off(struct mdss_dsi_ctrl_pdata *ctrl);
+struct dcs_cmd_req *mdss_dsi_cmdlist_get(struct mdss_dsi_ctrl_pdata *ctrl,
+				int from_mdp);
+int mdss_dsi_cmdlist_put(struct mdss_dsi_ctrl_pdata *ctrl,
+				struct dcs_cmd_req *cmdreq);
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
new file mode 100644
index 0000000..14ac3e1
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -0,0 +1,3239 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+#include <linux/kthread.h>
+
+#include <linux/msm-bus.h>
+
+#include "mdss.h"
+#include "mdss_dsi.h"
+#include "mdss_panel.h"
+#include "mdss_debug.h"
+#include "mdss_smmu.h"
+#include "mdss_dsi_phy.h"
+
+#define VSYNC_PERIOD 17
+#define DMA_TX_TIMEOUT 200
+#define DMA_TPG_FIFO_LEN 64
+
+#define FIFO_STATUS	0x0C
+#define LANE_STATUS	0xA8
+
+#define MDSS_DSI_INT_CTRL	0x0110
+
+#define CEIL(x, y)		(((x) + ((y) - 1)) / (y))
+
+struct mdss_dsi_ctrl_pdata *ctrl_list[DSI_CTRL_MAX];
+
+struct mdss_hw mdss_dsi0_hw = {
+	.hw_ndx = MDSS_HW_DSI0,
+	.ptr = NULL,
+	.irq_handler = mdss_dsi_isr,
+};
+
+struct mdss_hw mdss_dsi1_hw = {
+	.hw_ndx = MDSS_HW_DSI1,
+	.ptr = NULL,
+	.irq_handler = mdss_dsi_isr,
+};
+
+
+#define DSI_EVENT_Q_MAX	4
+
+#define DSI_BTA_EVENT_TIMEOUT (HZ / 10)
+
+/* Mutex common for both the controllers */
+static struct mutex dsi_mtx;
+
+/* event */
+struct dsi_event_q {
+	struct mdss_dsi_ctrl_pdata *ctrl;
+	u32 arg;
+	u32 todo;
+};
+
+struct mdss_dsi_event {
+	int inited;
+	wait_queue_head_t event_q;
+	u32 event_pndx;
+	u32 event_gndx;
+	struct dsi_event_q todo_list[DSI_EVENT_Q_MAX];
+	spinlock_t event_lock;
+};
+
+static struct mdss_dsi_event dsi_event;
+
+static int dsi_event_thread(void *data);
+
+void mdss_dsi_ctrl_init(struct device *ctrl_dev,
+			struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	if (ctrl->panel_data.panel_info.pdest == DISPLAY_1) {
+		mdss_dsi0_hw.ptr = (void *)(ctrl);
+		ctrl->dsi_hw = &mdss_dsi0_hw;
+		ctrl->ndx = DSI_CTRL_0;
+	} else {
+		mdss_dsi1_hw.ptr = (void *)(ctrl);
+		ctrl->dsi_hw = &mdss_dsi1_hw;
+		ctrl->ndx = DSI_CTRL_1;
+	}
+
+	if (!(ctrl->dsi_irq_line))
+		ctrl->dsi_hw->irq_info = mdss_intr_line();
+
+	ctrl->panel_mode = ctrl->panel_data.panel_info.mipi.mode;
+
+	ctrl_list[ctrl->ndx] = ctrl;	/* keep it */
+
+	if (ctrl->mdss_util->register_irq(ctrl->dsi_hw))
+		pr_err("%s: mdss_register_irq failed.\n", __func__);
+
+	pr_debug("%s: ndx=%d base=%pK\n", __func__, ctrl->ndx, ctrl->ctrl_base);
+
+	init_completion(&ctrl->dma_comp);
+	init_completion(&ctrl->mdp_comp);
+	init_completion(&ctrl->video_comp);
+	init_completion(&ctrl->dynamic_comp);
+	init_completion(&ctrl->bta_comp);
+	spin_lock_init(&ctrl->irq_lock);
+	spin_lock_init(&ctrl->mdp_lock);
+	mutex_init(&ctrl->mutex);
+	mutex_init(&ctrl->cmd_mutex);
+	mutex_init(&ctrl->clk_lane_mutex);
+	mutex_init(&ctrl->cmdlist_mutex);
+	mdss_dsi_buf_alloc(ctrl_dev, &ctrl->tx_buf, SZ_4K);
+	mdss_dsi_buf_alloc(ctrl_dev, &ctrl->rx_buf, SZ_4K);
+	mdss_dsi_buf_alloc(ctrl_dev, &ctrl->status_buf, SZ_4K);
+	ctrl->cmdlist_commit = mdss_dsi_cmdlist_commit;
+	ctrl->err_cont.err_time_delta = 100;
+	ctrl->err_cont.max_err_index = MAX_ERR_INDEX;
+
+	if (dsi_event.inited == 0) {
+		kthread_run(dsi_event_thread, (void *)&dsi_event,
+						"mdss_dsi_event");
+		mutex_init(&dsi_mtx);
+		dsi_event.inited  = 1;
+	}
+}
+
+void mdss_dsi_set_reg(struct mdss_dsi_ctrl_pdata *ctrl, int off,
+						u32 mask, u32 val)
+{
+	u32 data;
+
+	off &= ~0x03;
+	val &= mask;    /* set bits indicated at mask only */
+	data = MIPI_INP(ctrl->ctrl_base + off);
+	data &= ~mask;
+	data |= val;
+	pr_debug("%s: ndx=%d off=%x data=%x\n", __func__,
+				ctrl->ndx, off, data);
+	MIPI_OUTP(ctrl->ctrl_base + off, data);
+}
+
+void mdss_dsi_clk_req(struct mdss_dsi_ctrl_pdata *ctrl,
+	struct dsi_panel_clk_ctrl *clk_ctrl)
+{
+	enum dsi_clk_req_client client = clk_ctrl->client;
+	int enable = clk_ctrl->state;
+	void *clk_handle = ctrl->mdp_clk_handle;
+
+	if (clk_ctrl->client == DSI_CLK_REQ_DSI_CLIENT)
+		clk_handle = ctrl->dsi_clk_handle;
+
+	MDSS_XLOG(ctrl->ndx, enable, ctrl->mdp_busy, current->pid,
+		client);
+	if (enable == 0) {
+		/* need wait before disable */
+		mutex_lock(&ctrl->cmd_mutex);
+		mdss_dsi_cmd_mdp_busy(ctrl);
+		mutex_unlock(&ctrl->cmd_mutex);
+	}
+
+	MDSS_XLOG(ctrl->ndx, enable, ctrl->mdp_busy, current->pid,
+		client);
+	mdss_dsi_clk_ctrl(ctrl, clk_handle,
+		  MDSS_DSI_ALL_CLKS, enable);
+}
+
+void mdss_dsi_pll_relock(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int rc;
+
+	/*
+	 * todo: this code does not work very well with dual
+	 * dsi use cases. Need to fix this eventually.
+	 */
+
+	rc = mdss_dsi_clk_force_toggle(ctrl->dsi_clk_handle, MDSS_DSI_LINK_CLK);
+	if (rc)
+		pr_err("clock toggle failed, rc = %d\n", rc);
+}
+
+void mdss_dsi_enable_irq(struct mdss_dsi_ctrl_pdata *ctrl, u32 term)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl->irq_lock, flags);
+	if (ctrl->dsi_irq_mask & term) {
+		spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+		return;
+	}
+	if (ctrl->dsi_irq_mask == 0) {
+		MDSS_XLOG(ctrl->ndx, term);
+		ctrl->mdss_util->enable_irq(ctrl->dsi_hw);
+		pr_debug("%s: IRQ Enable, ndx=%d mask=%x term=%x\n", __func__,
+			ctrl->ndx, (int)ctrl->dsi_irq_mask, (int)term);
+	}
+	ctrl->dsi_irq_mask |= term;
+	spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+}
+
+void mdss_dsi_disable_irq(struct mdss_dsi_ctrl_pdata *ctrl, u32 term)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl->irq_lock, flags);
+	if (!(ctrl->dsi_irq_mask & term)) {
+		spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+		return;
+	}
+	ctrl->dsi_irq_mask &= ~term;
+	if (ctrl->dsi_irq_mask == 0) {
+		MDSS_XLOG(ctrl->ndx, term);
+		ctrl->mdss_util->disable_irq(ctrl->dsi_hw);
+		pr_debug("%s: IRQ Disable, ndx=%d mask=%x term=%x\n", __func__,
+			ctrl->ndx, (int)ctrl->dsi_irq_mask, (int)term);
+	}
+	spin_unlock_irqrestore(&ctrl->irq_lock, flags);
+}
+
+/*
+ * mdss_dsi_disale_irq_nosync() should be called
+ * from interrupt context
+ */
+void mdss_dsi_disable_irq_nosync(struct mdss_dsi_ctrl_pdata *ctrl, u32 term)
+{
+	spin_lock(&ctrl->irq_lock);
+	if (!(ctrl->dsi_irq_mask & term)) {
+		spin_unlock(&ctrl->irq_lock);
+		return;
+	}
+	ctrl->dsi_irq_mask &= ~term;
+	if (ctrl->dsi_irq_mask == 0) {
+		MDSS_XLOG(ctrl->ndx, term);
+		ctrl->mdss_util->disable_irq_nosync(ctrl->dsi_hw);
+		pr_debug("%s: IRQ Disable, ndx=%d mask=%x term=%x\n", __func__,
+			ctrl->ndx, (int)ctrl->dsi_irq_mask, (int)term);
+	}
+	spin_unlock(&ctrl->irq_lock);
+}
+
+void mdss_dsi_video_test_pattern(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int i;
+
+	MIPI_OUTP((ctrl->ctrl_base) + 0x015c, 0x021);
+	MIPI_OUTP((ctrl->ctrl_base) + 0x0164, 0xff0000); /* red */
+	i = 0;
+	while (i++ < 50) {
+		MIPI_OUTP((ctrl->ctrl_base) + 0x0180, 0x1);
+		/* Add sleep to get ~50 fps frame rate*/
+		msleep(20);
+	}
+	MIPI_OUTP((ctrl->ctrl_base) + 0x015c, 0x0);
+}
+
+void mdss_dsi_cmd_test_pattern(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int i;
+
+	MIPI_OUTP((ctrl->ctrl_base) + 0x015c, 0x201);
+	MIPI_OUTP((ctrl->ctrl_base) + 0x016c, 0xff0000); /* red */
+	i = 0;
+	while (i++ < 50) {
+		MIPI_OUTP((ctrl->ctrl_base) + 0x0184, 0x1);
+		/* Add sleep to get ~50 fps frame rate*/
+		msleep(20);
+	}
+	MIPI_OUTP((ctrl->ctrl_base) + 0x015c, 0x0);
+}
+
+void mdss_dsi_read_hw_revision(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	if (ctrl->shared_data->hw_rev)
+		return;
+
+	/* clock must be on */
+	ctrl->shared_data->hw_rev = MIPI_INP(ctrl->ctrl_base);
+}
+
+void mdss_dsi_read_phy_revision(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	u32 reg_val;
+
+	if (ctrl->shared_data->phy_rev > DSI_PHY_REV_UNKNOWN)
+		return;
+
+	reg_val = MIPI_INP(ctrl->phy_io.base);
+	if (!reg_val) {
+		/*
+		 * DSI_0_PHY_DSIPHY_REVISION_ID3 for phy 1.0
+		 * reset value = 0x10
+		 * 7:4 Major
+		 * 3:0 Minor
+		 */
+		reg_val = MIPI_INP(ctrl->phy_io.base + 0x20c);
+		reg_val = reg_val >> 4;
+	}
+
+	if (reg_val == DSI_PHY_REV_20)
+		ctrl->shared_data->phy_rev = DSI_PHY_REV_20;
+	else if (reg_val == DSI_PHY_REV_10)
+		ctrl->shared_data->phy_rev = DSI_PHY_REV_10;
+	else
+		ctrl->shared_data->phy_rev = DSI_PHY_REV_UNKNOWN;
+}
+
+void mdss_dsi_host_init(struct mdss_panel_data *pdata)
+{
+	u32 dsi_ctrl, intr_ctrl;
+	u32 data;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mipi_panel_info *pinfo = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	pinfo = &pdata->panel_info.mipi;
+
+	if (pinfo->mode == DSI_VIDEO_MODE) {
+		data = 0;
+		if (pinfo->last_line_interleave_en)
+			data |= BIT(31);
+		if (pinfo->pulse_mode_hsa_he)
+			data |= BIT(28);
+		if (pinfo->hfp_power_stop)
+			data |= BIT(24);
+		if (pinfo->hbp_power_stop)
+			data |= BIT(20);
+		if (pinfo->hsa_power_stop)
+			data |= BIT(16);
+		if (pinfo->eof_bllp_power_stop)
+			data |= BIT(15);
+		if (pinfo->bllp_power_stop)
+			data |= BIT(12);
+		data |= ((pinfo->traffic_mode & 0x03) << 8);
+		data |= ((pinfo->dst_format & 0x03) << 4); /* 2 bits */
+		data |= (pinfo->vc & 0x03);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0010, data);
+
+		data = 0;
+		data |= ((pinfo->rgb_swap & 0x07) << 12);
+		if (pinfo->b_sel)
+			data |= BIT(8);
+		if (pinfo->g_sel)
+			data |= BIT(4);
+		if (pinfo->r_sel)
+			data |= BIT(0);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0020, data);
+	} else if (pinfo->mode == DSI_CMD_MODE) {
+		data = 0;
+		data |= ((pinfo->interleave_max & 0x0f) << 20);
+		data |= ((pinfo->rgb_swap & 0x07) << 16);
+		if (pinfo->b_sel)
+			data |= BIT(12);
+		if (pinfo->g_sel)
+			data |= BIT(8);
+		if (pinfo->r_sel)
+			data |= BIT(4);
+		data |= (pinfo->dst_format & 0x0f);	/* 4 bits */
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0040, data);
+
+		/* DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL */
+		data = pinfo->wr_mem_continue & 0x0ff;
+		data <<= 8;
+		data |= (pinfo->wr_mem_start & 0x0ff);
+		if (pinfo->insert_dcs_cmd)
+			data |= BIT(16);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0044, data);
+	} else
+		pr_err("%s: Unknown DSI mode=%d\n", __func__, pinfo->mode);
+
+	dsi_ctrl = BIT(8) | BIT(2);	/* clock enable & cmd mode */
+	intr_ctrl = 0;
+	intr_ctrl = (DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_CMD_MDP_DONE_MASK);
+
+	if (pinfo->crc_check)
+		dsi_ctrl |= BIT(24);
+	if (pinfo->ecc_check)
+		dsi_ctrl |= BIT(20);
+	if (pinfo->data_lane3)
+		dsi_ctrl |= BIT(7);
+	if (pinfo->data_lane2)
+		dsi_ctrl |= BIT(6);
+	if (pinfo->data_lane1)
+		dsi_ctrl |= BIT(5);
+	if (pinfo->data_lane0)
+		dsi_ctrl |= BIT(4);
+
+
+	data = 0;
+	if (pinfo->te_sel)
+		data |= BIT(31);
+	data |= pinfo->mdp_trigger << 4;/* cmd mdp trigger */
+	data |= pinfo->dma_trigger;	/* cmd dma trigger */
+	data |= (pinfo->stream & 0x01) << 8;
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0084,
+				data); /* DSI_TRIG_CTRL */
+
+	/* DSI_LAN_SWAP_CTRL */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x00b0, ctrl_pdata->dlane_swap);
+
+	/* clock out ctrl */
+	data = pinfo->t_clk_post & 0x3f;	/* 6 bits */
+	data <<= 8;
+	data |= pinfo->t_clk_pre & 0x3f;	/*  6 bits */
+	/* DSI_CLKOUT_TIMING_CTRL */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0xc4, data);
+
+	data = 0;
+	if (pinfo->rx_eot_ignore)
+		data |= BIT(4);
+	if (pinfo->tx_eot_append)
+		data |= BIT(0);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x00cc,
+				data); /* DSI_EOT_PACKET_CTRL */
+	/*
+	 * DSI_HS_TIMER_CTRL -> timer resolution = 8 esc clk
+	 * HS TX timeout - 16136 (0x3f08) esc clk
+	 */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x00bc, 0x3fd08);
+
+
+	/* allow only ack-err-status  to generate interrupt */
+	/* DSI_ERR_INT_MASK0 */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x010c, 0x03f03fc0);
+
+	intr_ctrl |= DSI_INTR_ERROR_MASK;
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0110,
+				intr_ctrl); /* DSI_INTL_CTRL */
+
+	/* turn esc, byte, dsi, pclk, sclk, hclk on */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x11c,
+					0x23f); /* DSI_CLK_CTRL */
+
+	/* Reset DSI_LANE_CTRL */
+	if (!ctrl_pdata->mmss_clamp)
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x00ac, 0x0);
+
+	dsi_ctrl |= BIT(0);	/* enable dsi */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004, dsi_ctrl);
+
+	/* enable contention detection for receiving */
+	mdss_dsi_lp_cd_rx(ctrl_pdata);
+
+	/* set DMA FIFO read watermark to 15/16 full */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x50, 0x30);
+
+	wmb(); /* ensure all DSI host configuration write are finished */
+}
+
+void mdss_dsi_set_tx_power_mode(int mode, struct mdss_panel_data *pdata)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	u32 data;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	data = MIPI_INP((ctrl_pdata->ctrl_base) + 0x3c);
+
+	if (mode == 0)
+		data &= ~BIT(26);
+	else
+		data |= BIT(26);
+
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x3c, data);
+}
+
+void mdss_dsi_sw_reset(struct mdss_dsi_ctrl_pdata *ctrl, bool restore)
+{
+	u32 data0;
+	unsigned long flag;
+
+	if (!ctrl) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	data0 = MIPI_INP(ctrl->ctrl_base + 0x0004);
+	MIPI_OUTP(ctrl->ctrl_base + 0x0004, (data0 & ~BIT(0)));
+	/*
+	 * dsi controller need to be disabled before
+	 * clocks turned on
+	 */
+	wmb();	/* make sure dsi contoller is disabled */
+
+	/* turn esc, byte, dsi, pclk, sclk, hclk on */
+	MIPI_OUTP(ctrl->ctrl_base + 0x11c, 0x23f); /* DSI_CLK_CTRL */
+	wmb();	/* make sure clocks enabled */
+
+	/* dsi controller can only be reset while clocks are running */
+	MIPI_OUTP(ctrl->ctrl_base + 0x118, 0x01);
+	wmb();	/* make sure reset happen */
+	MIPI_OUTP(ctrl->ctrl_base + 0x118, 0x00);
+	wmb();	/* controller out of reset */
+
+	if (restore) {
+		MIPI_OUTP(ctrl->ctrl_base + 0x0004, data0);
+		wmb();	/* make sure dsi controller enabled again */
+	}
+
+	/* It is safe to clear mdp_busy as reset is happening */
+	spin_lock_irqsave(&ctrl->mdp_lock, flag);
+	ctrl->mdp_busy = false;
+	complete_all(&ctrl->mdp_comp);
+	spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+}
+
+/**
+ * mdss_dsi_wait_for_lane_idle() - Wait for DSI lanes to be idle
+ * @ctrl: pointer to DSI controller structure
+ *
+ * This function waits for all the active DSI lanes to be idle by polling all
+ * the *FIFO_EMPTY bits and polling the lane status to ensure that all the lanes
+ * are in stop state. This function assumes that the bus clocks required to
+ * access the registers are already turned on.
+ */
+int mdss_dsi_wait_for_lane_idle(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int rc;
+	u32 val;
+	u32 fifo_empty_mask = 0;
+	u32 stop_state_mask = 0;
+	struct mipi_panel_info *mipi;
+	u32 const sleep_us = 10;
+	u32 const timeout_us = 100;
+
+	if (!ctrl) {
+		pr_err("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mipi = &ctrl->panel_data.panel_info.mipi;
+
+	if (mipi->data_lane0) {
+		stop_state_mask |= BIT(0);
+		fifo_empty_mask |= (BIT(12) | BIT(16));
+	}
+	if (mipi->data_lane1) {
+		stop_state_mask |= BIT(1);
+		fifo_empty_mask |= BIT(20);
+	}
+	if (mipi->data_lane2) {
+		stop_state_mask |= BIT(2);
+		fifo_empty_mask |= BIT(24);
+	}
+	if (mipi->data_lane3) {
+		stop_state_mask |= BIT(3);
+		fifo_empty_mask |= BIT(28);
+	}
+
+	pr_debug("%s: polling for fifo empty, mask=0x%08x\n", __func__,
+		fifo_empty_mask);
+	rc = readl_poll_timeout(ctrl->ctrl_base + FIFO_STATUS, val,
+		(val & fifo_empty_mask), sleep_us, timeout_us);
+	if (rc) {
+		pr_err("%s: fifo not empty, FIFO_STATUS=0x%08x\n",
+			__func__, val);
+		goto error;
+	}
+
+	pr_debug("%s: polling for lanes to be in stop state, mask=0x%08x\n",
+		__func__, stop_state_mask);
+	rc = readl_poll_timeout(ctrl->ctrl_base + LANE_STATUS, val,
+		(val & stop_state_mask), sleep_us, timeout_us);
+	if (rc) {
+		pr_err("%s: lanes not in stop state, LANE_STATUS=0x%08x\n",
+			__func__, val);
+		goto error;
+	}
+
+error:
+	return rc;
+}
+
+static void mdss_dsi_cfg_lane_ctrl(struct mdss_dsi_ctrl_pdata *ctrl,
+						u32 bits, int set)
+{
+	u32 data;
+
+	data = MIPI_INP(ctrl->ctrl_base + 0x00ac);
+	if (set)
+		data |= bits;
+	else
+		data &= ~bits;
+	MIPI_OUTP(ctrl->ctrl_base + 0x0ac, data);
+}
+
+
+static inline bool mdss_dsi_poll_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	u32 clk = 0;
+
+	if (readl_poll_timeout(((ctrl->ctrl_base) + 0x00a8),
+				clk,
+				(clk & 0x0010),
+				10, 1000)) {
+		pr_err("%s: ndx=%d clk lane NOT stopped, clk=%x\n",
+					__func__, ctrl->ndx, clk);
+
+		return false;
+	}
+	return true;
+}
+
+static void mdss_dsi_wait_clk_lane_to_stop(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	if (mdss_dsi_poll_clk_lane(ctrl)) /* stopped */
+		return;
+
+	/* clk stuck at hs, start recovery process */
+
+	/* force clk lane tx stop -- bit 20 */
+	mdss_dsi_cfg_lane_ctrl(ctrl, BIT(20), 1);
+
+	if (mdss_dsi_poll_clk_lane(ctrl) == false)
+		pr_err("%s: clk lane recovery failed\n", __func__);
+
+	/* clear clk lane tx stop -- bit 20 */
+	mdss_dsi_cfg_lane_ctrl(ctrl, BIT(20), 0);
+}
+
+static void mdss_dsi_stop_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl);
+
+/*
+ * mdss_dsi_start_hs_clk_lane:
+ * this function is work around solution for 8994 dsi clk lane
+ * may stuck at HS problem
+ */
+static void mdss_dsi_start_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+
+	/* make sure clk lane is stopped */
+	mdss_dsi_stop_hs_clk_lane(ctrl);
+
+	mutex_lock(&ctrl->clk_lane_mutex);
+	mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+			  MDSS_DSI_CLK_ON);
+	if (ctrl->clk_lane_cnt) {
+		pr_err("%s: ndx=%d do-wait, cnt=%d\n",
+				__func__, ctrl->ndx, ctrl->clk_lane_cnt);
+		mdss_dsi_wait_clk_lane_to_stop(ctrl);
+	}
+
+	/* force clk lane hs for next dma or mdp stream */
+	mdss_dsi_cfg_lane_ctrl(ctrl, BIT(28), 1);
+	ctrl->clk_lane_cnt++;
+	pr_debug("%s: ndx=%d, set_hs, cnt=%d\n", __func__,
+				ctrl->ndx, ctrl->clk_lane_cnt);
+	mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+			  MDSS_DSI_CLK_OFF);
+	mutex_unlock(&ctrl->clk_lane_mutex);
+}
+
+/*
+ * mdss_dsi_stop_hs_clk_lane:
+ * this function is work around solution for 8994 dsi clk lane
+ * may stuck at HS problem
+ */
+static void mdss_dsi_stop_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	u32 fifo = 0;
+	u32 lane = 0;
+
+	mutex_lock(&ctrl->clk_lane_mutex);
+	if (ctrl->clk_lane_cnt == 0)	/* stopped already */
+		goto release;
+
+	mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+			  MDSS_DSI_CLK_ON);
+	/* fifo */
+	if (readl_poll_timeout(((ctrl->ctrl_base) + 0x000c),
+			   fifo,
+			   ((fifo & 0x11110000) == 0x11110000),
+			       10, 1000)) {
+		pr_err("%s: fifo NOT empty, fifo=%x\n",
+					__func__, fifo);
+		goto end;
+	}
+
+	/* data lane status */
+	if (readl_poll_timeout(((ctrl->ctrl_base) + 0x00a8),
+			   lane,
+			   ((lane & 0x000f) == 0x000f),
+			       100, 2000)) {
+		pr_err("%s: datalane NOT stopped, lane=%x\n",
+					__func__, lane);
+	}
+end:
+	/* stop force clk lane hs */
+	mdss_dsi_cfg_lane_ctrl(ctrl, BIT(28), 0);
+
+	mdss_dsi_wait_clk_lane_to_stop(ctrl);
+
+	ctrl->clk_lane_cnt = 0;
+release:
+	pr_debug("%s: ndx=%d, cnt=%d\n", __func__,
+			ctrl->ndx, ctrl->clk_lane_cnt);
+
+	mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+			  MDSS_DSI_CLK_OFF);
+	mutex_unlock(&ctrl->clk_lane_mutex);
+}
+
+static void mdss_dsi_cmd_start_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct mdss_dsi_ctrl_pdata *mctrl = NULL;
+
+	if (mdss_dsi_sync_wait_enable(ctrl)) {
+		if (!mdss_dsi_sync_wait_trigger(ctrl))
+			return;
+		mctrl = mdss_dsi_get_other_ctrl(ctrl);
+
+		if (mctrl)
+			mdss_dsi_start_hs_clk_lane(mctrl);
+	}
+
+	mdss_dsi_start_hs_clk_lane(ctrl);
+}
+
+static void mdss_dsi_cmd_stop_hs_clk_lane(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct mdss_dsi_ctrl_pdata *mctrl = NULL;
+
+	if (mdss_dsi_sync_wait_enable(ctrl)) {
+		if (!mdss_dsi_sync_wait_trigger(ctrl))
+			return;
+		mctrl = mdss_dsi_get_other_ctrl(ctrl);
+
+		if (mctrl)
+			mdss_dsi_stop_hs_clk_lane(mctrl);
+	}
+
+	mdss_dsi_stop_hs_clk_lane(ctrl);
+}
+
+static void mdss_dsi_ctl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl, u32 event)
+{
+	u32 data0, data1, mask = 0, data_lane_en = 0;
+	struct mdss_dsi_ctrl_pdata *ctrl0, *ctrl1;
+	u32 ln0, ln1, ln_ctrl0, ln_ctrl1, i;
+	int rc = 0;
+
+	/*
+	 * Add 2 ms delay suggested by HW team.
+	 * Check clk lane stop state after every 200 us
+	 */
+	u32 loop = 10, u_dly = 200;
+
+	pr_debug("%s: MDSS DSI CTRL and PHY reset. ctrl-num = %d\n",
+					__func__, ctrl->ndx);
+	if (event == DSI_EV_DLNx_FIFO_OVERFLOW) {
+		mask = BIT(20); /* clock lane only for overflow recovery */
+	} else if (event == DSI_EV_LP_RX_TIMEOUT) {
+		data_lane_en = (MIPI_INP(ctrl->ctrl_base + 0x0004) &
+			DSI_DATA_LANES_ENABLED) >> 4;
+		/* clock and data lanes for LP_RX_TO recovery */
+		mask = BIT(20) | (data_lane_en << 16);
+	}
+
+	if (mdss_dsi_is_hw_config_split(ctrl->shared_data)) {
+		pr_debug("%s: Split display enabled\n", __func__);
+		ctrl0 = mdss_dsi_get_ctrl_by_index(DSI_CTRL_0);
+		ctrl1 = mdss_dsi_get_ctrl_by_index(DSI_CTRL_1);
+
+		if (ctrl0->recovery) {
+			rc = ctrl0->recovery->fxn(ctrl0->recovery->data,
+					MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW);
+			if (rc < 0) {
+				pr_debug("%s: Target is in suspend/shutdown\n",
+					__func__);
+				return;
+			}
+		}
+		/*
+		 * Disable PHY contention detection and receive.
+		 * Configure the strength ctrl 1 register.
+		 */
+		MIPI_OUTP((ctrl0->phy_io.base) + 0x0188, 0);
+		MIPI_OUTP((ctrl1->phy_io.base) + 0x0188, 0);
+
+		data0 = MIPI_INP(ctrl0->ctrl_base + 0x0004);
+		data1 = MIPI_INP(ctrl1->ctrl_base + 0x0004);
+		/* Disable DSI video mode */
+		MIPI_OUTP(ctrl0->ctrl_base + 0x004, (data0 & ~BIT(1)));
+		MIPI_OUTP(ctrl1->ctrl_base + 0x004, (data1 & ~BIT(1)));
+		/* Disable DSI controller */
+		MIPI_OUTP(ctrl0->ctrl_base + 0x004,
+					(data0 & ~(BIT(0) | BIT(1))));
+		MIPI_OUTP(ctrl1->ctrl_base + 0x004,
+					(data1 & ~(BIT(0) | BIT(1))));
+		/* "Force On" all dynamic clocks */
+		MIPI_OUTP(ctrl0->ctrl_base + 0x11c, 0x100a00);
+		MIPI_OUTP(ctrl1->ctrl_base + 0x11c, 0x100a00);
+
+		/* DSI_SW_RESET */
+		MIPI_OUTP(ctrl0->ctrl_base + 0x118, 0x1);
+		MIPI_OUTP(ctrl1->ctrl_base + 0x118, 0x1);
+		wmb(); /* ensure write is finished before progressing */
+		MIPI_OUTP(ctrl0->ctrl_base + 0x118, 0x0);
+		MIPI_OUTP(ctrl1->ctrl_base + 0x118, 0x0);
+		wmb(); /* ensure write is finished before progressing */
+
+		/* Remove "Force On" all dynamic clocks */
+		MIPI_OUTP(ctrl0->ctrl_base + 0x11c, 0x00); /* DSI_CLK_CTRL */
+		MIPI_OUTP(ctrl1->ctrl_base + 0x11c, 0x00); /* DSI_CLK_CTRL */
+
+		/* Enable DSI controller */
+		MIPI_OUTP(ctrl0->ctrl_base + 0x004, (data0 & ~BIT(1)));
+		MIPI_OUTP(ctrl1->ctrl_base + 0x004, (data1 & ~BIT(1)));
+
+		/*
+		 * Toggle Clk lane Force TX stop so that
+		 * clk lane status is no more in stop state
+		 */
+		ln0 = MIPI_INP(ctrl0->ctrl_base + 0x00a8);
+		ln1 = MIPI_INP(ctrl1->ctrl_base + 0x00a8);
+		pr_debug("%s: lane status, ctrl0 = 0x%x, ctrl1 = 0x%x\n",
+			 __func__, ln0, ln1);
+		ln_ctrl0 = MIPI_INP(ctrl0->ctrl_base + 0x00ac);
+		ln_ctrl1 = MIPI_INP(ctrl1->ctrl_base + 0x00ac);
+		MIPI_OUTP(ctrl0->ctrl_base + 0x0ac, ln_ctrl0 | mask);
+		MIPI_OUTP(ctrl1->ctrl_base + 0x0ac, ln_ctrl1 | mask);
+		ln_ctrl0 = MIPI_INP(ctrl0->ctrl_base + 0x00ac);
+		ln_ctrl1 = MIPI_INP(ctrl1->ctrl_base + 0x00ac);
+		for (i = 0; i < loop; i++) {
+			ln0 = MIPI_INP(ctrl0->ctrl_base + 0x00a8);
+			ln1 = MIPI_INP(ctrl1->ctrl_base + 0x00a8);
+			if ((ln0 == 0x1f1f) && (ln1 == 0x1f1f))
+				break;
+			/* Check clk lane stopState for every 200us */
+			udelay(u_dly);
+		}
+		if (i == loop) {
+			MDSS_XLOG(ctrl0->ndx, ln0, 0x1f1f);
+			MDSS_XLOG(ctrl1->ndx, ln1, 0x1f1f);
+			pr_err("%s: Clock lane still in stop state\n",
+					__func__);
+			MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
+				"dsi1_ctrl", "dsi1_phy", "panic");
+		}
+		pr_debug("%s: lane ctrl, ctrl0 = 0x%x, ctrl1 = 0x%x\n",
+			 __func__, ln0, ln1);
+		MIPI_OUTP(ctrl0->ctrl_base + 0x0ac, ln_ctrl0 & ~mask);
+		MIPI_OUTP(ctrl1->ctrl_base + 0x0ac, ln_ctrl1 & ~mask);
+
+		/* Enable Video mode for DSI controller */
+		MIPI_OUTP(ctrl0->ctrl_base + 0x004, data0);
+		MIPI_OUTP(ctrl1->ctrl_base + 0x004, data1);
+
+		/*
+		 * Enable PHY contention detection and receive.
+		 * Configure the strength ctrl 1 register.
+		 */
+		MIPI_OUTP((ctrl0->phy_io.base) + 0x0188, 0x6);
+		MIPI_OUTP((ctrl1->phy_io.base) + 0x0188, 0x6);
+		/*
+		 * Add sufficient delay to make sure
+		 * pixel transmission as started
+		 */
+		udelay(200);
+	} else {
+		if (ctrl->recovery) {
+			rc = ctrl->recovery->fxn(ctrl->recovery->data,
+					MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW);
+			if (rc < 0) {
+				pr_debug("%s: Target is in suspend/shutdown\n",
+					__func__);
+				return;
+			}
+		}
+		/* Disable PHY contention detection and receive */
+		MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0);
+
+		data0 = MIPI_INP(ctrl->ctrl_base + 0x0004);
+		/* Disable DSI video mode */
+		MIPI_OUTP(ctrl->ctrl_base + 0x004, (data0 & ~BIT(1)));
+		/* Disable DSI controller */
+		MIPI_OUTP(ctrl->ctrl_base + 0x004,
+					(data0 & ~(BIT(0) | BIT(1))));
+		/* "Force On" all dynamic clocks */
+		MIPI_OUTP(ctrl->ctrl_base + 0x11c, 0x100a00);
+
+		/* DSI_SW_RESET */
+		MIPI_OUTP(ctrl->ctrl_base + 0x118, 0x1);
+		wmb(); /* ensure write is finished before progressing */
+
+		MIPI_OUTP(ctrl->ctrl_base + 0x118, 0x0);
+		wmb(); /* ensure write is finished before progressing */
+
+		/* Remove "Force On" all dynamic clocks */
+		MIPI_OUTP(ctrl->ctrl_base + 0x11c, 0x00);
+		/* Enable DSI controller */
+		MIPI_OUTP(ctrl->ctrl_base + 0x004, (data0 & ~BIT(1)));
+
+		/*
+		 * Toggle Clk lane Force TX stop so that
+		 * clk lane status is no more in stop state
+		 */
+		ln0 = MIPI_INP(ctrl->ctrl_base + 0x00a8);
+		pr_debug("%s: lane status, ctrl = 0x%x\n",
+			 __func__, ln0);
+		ln_ctrl0 = MIPI_INP(ctrl->ctrl_base + 0x00ac);
+		MIPI_OUTP(ctrl->ctrl_base + 0x0ac, ln_ctrl0 | mask);
+		ln_ctrl0 = MIPI_INP(ctrl->ctrl_base + 0x00ac);
+		for (i = 0; i < loop; i++) {
+			ln0 = MIPI_INP(ctrl->ctrl_base + 0x00a8);
+			if (ln0 == 0x1f1f)
+				break;
+			/* Check clk lane stopState for every 200us */
+			udelay(u_dly);
+		}
+		if (i == loop) {
+			MDSS_XLOG(ctrl->ndx, ln0, 0x1f1f);
+			pr_err("%s: Clock lane still in stop state\n",
+					__func__);
+			MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
+				"dsi1_ctrl", "dsi1_phy", "panic");
+		}
+		pr_debug("%s: lane status = 0x%x\n",
+			 __func__, ln0);
+		MIPI_OUTP(ctrl->ctrl_base + 0x0ac, ln_ctrl0 & ~mask);
+
+		/* Enable Video mode for DSI controller */
+		MIPI_OUTP(ctrl->ctrl_base + 0x004, data0);
+		/* Enable PHY contention detection and receiver */
+		MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0x6);
+		/*
+		 * Add sufficient delay to make sure
+		 * pixel transmission as started
+		 */
+		udelay(200);
+	}
+	pr_debug("Recovery done\n");
+}
+
+void mdss_dsi_err_intr_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, u32 mask,
+					int enable)
+{
+	u32 intr;
+
+	intr = MIPI_INP(ctrl->ctrl_base + 0x0110);
+	intr &= DSI_INTR_TOTAL_MASK;
+
+	if (enable)
+		intr |= mask;
+	else
+		intr &= ~mask;
+
+	pr_debug("%s: intr=%x enable=%d\n", __func__, intr, enable);
+
+	MIPI_OUTP(ctrl->ctrl_base + 0x0110, intr); /* DSI_INTL_CTRL */
+}
+
+void mdss_dsi_controller_cfg(int enable,
+			     struct mdss_panel_data *pdata)
+{
+
+	u32 dsi_ctrl;
+	u32 status;
+	u32 sleep_us = 1000;
+	u32 timeout_us = 16000;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	/* Check for CMD_MODE_DMA_BUSY */
+	if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x0008),
+			   status,
+			   ((status & 0x02) == 0),
+			       sleep_us, timeout_us))
+		pr_info("%s: DSI status=%x failed\n", __func__, status);
+
+	/* Check for x_HS_FIFO_EMPTY */
+	if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x000c),
+			   status,
+			   ((status & 0x11111000) == 0x11111000),
+			       sleep_us, timeout_us))
+		pr_info("%s: FIFO status=%x failed\n", __func__, status);
+
+	/* Check for VIDEO_MODE_ENGINE_BUSY */
+	if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x0008),
+			   status,
+			   ((status & 0x08) == 0),
+			       sleep_us, timeout_us)) {
+		pr_debug("%s: DSI status=%x\n", __func__, status);
+		pr_debug("%s: Doing sw reset\n", __func__);
+		mdss_dsi_sw_reset(ctrl_pdata, false);
+	}
+
+	dsi_ctrl = MIPI_INP((ctrl_pdata->ctrl_base) + 0x0004);
+	if (enable)
+		dsi_ctrl |= 0x01;
+	else
+		dsi_ctrl &= ~0x01;
+
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004, dsi_ctrl);
+	wmb(); /* ensure write is finished before progressing */
+}
+
+void mdss_dsi_restore_intr_mask(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	u32 mask;
+
+	mask = MIPI_INP((ctrl->ctrl_base) + 0x0110);
+	mask &= DSI_INTR_TOTAL_MASK;
+	mask |= (DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_ERROR_MASK |
+				DSI_INTR_BTA_DONE_MASK);
+	MIPI_OUTP((ctrl->ctrl_base) + 0x0110, mask);
+}
+
+void mdss_dsi_op_mode_config(int mode,
+			     struct mdss_panel_data *pdata)
+{
+	u32 dsi_ctrl, intr_ctrl, dma_ctrl;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	dsi_ctrl = MIPI_INP((ctrl_pdata->ctrl_base) + 0x0004);
+	/*If Video enabled, Keep Video and Cmd mode ON */
+	if (dsi_ctrl & 0x02)
+		dsi_ctrl &= ~0x05;
+	else
+		dsi_ctrl &= ~0x07;
+
+	if (mode == DSI_VIDEO_MODE) {
+		dsi_ctrl |= 0x03;
+		intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_BTA_DONE_MASK
+			| DSI_INTR_ERROR_MASK;
+	} else {		/* command mode */
+		dsi_ctrl |= 0x05;
+		if (pdata->panel_info.type == MIPI_VIDEO_PANEL)
+			dsi_ctrl |= 0x02;
+
+		intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_ERROR_MASK |
+			DSI_INTR_CMD_MDP_DONE_MASK | DSI_INTR_BTA_DONE_MASK;
+	}
+
+	dma_ctrl = BIT(28) | BIT(26);	/* embedded mode & LP mode */
+	if (mdss_dsi_sync_wait_enable(ctrl_pdata))
+		dma_ctrl |= BIT(31);
+
+	pr_debug("%s: configuring ctrl%d\n", __func__, ctrl_pdata->ndx);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0110, intr_ctrl);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004, dsi_ctrl);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x003c, dma_ctrl);
+	wmb(); /* ensure dsi op mode config is finished */
+}
+
+void mdss_dsi_cmd_bta_sw_trigger(struct mdss_panel_data *pdata)
+{
+	u32 status;
+	int timeout_us = 10000;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x098, 0x01);	/* trigger */
+	wmb(); /* ensure write is finished before progressing */
+
+	/* Check for CMD_MODE_DMA_BUSY */
+	if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x0008),
+				status, ((status & 0x0010) == 0),
+				0, timeout_us))
+		pr_info("%s: DSI status=%x failed\n", __func__, status);
+
+	mdss_dsi_ack_err_status(ctrl_pdata);
+
+	pr_debug("%s: BTA done, status = %d\n", __func__, status);
+}
+
+static int mdss_dsi_read_status(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int i, rc, *lenp;
+	int start = 0;
+	struct dcs_cmd_req cmdreq;
+
+	rc = 1;
+	lenp = ctrl->status_valid_params ?: ctrl->status_cmds_rlen;
+
+	for (i = 0; i < ctrl->status_cmds.cmd_cnt; ++i) {
+		memset(&cmdreq, 0, sizeof(cmdreq));
+		cmdreq.cmds = ctrl->status_cmds.cmds + i;
+		cmdreq.cmds_cnt = 1;
+		cmdreq.flags = CMD_REQ_COMMIT | CMD_REQ_RX;
+		cmdreq.rlen = ctrl->status_cmds_rlen[i];
+		cmdreq.cb = NULL;
+		cmdreq.rbuf = ctrl->status_buf.data;
+
+		if (ctrl->status_cmds.link_state == DSI_LP_MODE)
+			cmdreq.flags  |= CMD_REQ_LP_MODE;
+		else if (ctrl->status_cmds.link_state == DSI_HS_MODE)
+			cmdreq.flags |= CMD_REQ_HS_MODE;
+
+		rc = mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+		if (rc <= 0) {
+			pr_err("%s: get status: fail\n", __func__);
+			return rc;
+		}
+
+		memcpy(ctrl->return_buf + start,
+			ctrl->status_buf.data, lenp[i]);
+		start += lenp[i];
+	}
+
+	return rc;
+}
+
+
+/**
+ * mdss_dsi_reg_status_check() - Check dsi panel status through reg read
+ * @ctrl_pdata: pointer to the dsi controller structure
+ *
+ * This function can be used to check the panel status through reading the
+ * status register from the panel.
+ *
+ * Return: positive value if the panel is in good state, negative value or
+ * zero otherwise.
+ */
+int mdss_dsi_reg_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	int ret = 0;
+	struct mdss_dsi_ctrl_pdata *sctrl_pdata = NULL;
+
+	if (ctrl_pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return 0;
+	}
+
+	pr_debug("%s: Checking Register status\n", __func__);
+
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+
+	sctrl_pdata = mdss_dsi_get_other_ctrl(ctrl_pdata);
+	if (!mdss_dsi_sync_wait_enable(ctrl_pdata)) {
+		ret = mdss_dsi_read_status(ctrl_pdata);
+	} else {
+		/*
+		 * Read commands to check ESD status are usually sent at
+		 * the same time to both the controllers. However, if
+		 * sync_wait is enabled, we need to ensure that the
+		 * dcs commands are first sent to the non-trigger
+		 * controller so that when the commands are triggered,
+		 * both controllers receive it at the same time.
+		 */
+		if (mdss_dsi_sync_wait_trigger(ctrl_pdata)) {
+			if (sctrl_pdata)
+				ret = mdss_dsi_read_status(sctrl_pdata);
+			ret = mdss_dsi_read_status(ctrl_pdata);
+		} else {
+			ret = mdss_dsi_read_status(ctrl_pdata);
+			if (sctrl_pdata)
+				ret = mdss_dsi_read_status(sctrl_pdata);
+		}
+	}
+
+	/*
+	 * mdss_dsi_read_status returns the number of bytes returned
+	 * by the panel. Success value is greater than zero and failure
+	 * case returns zero.
+	 */
+	if (ret > 0) {
+		if (!mdss_dsi_sync_wait_enable(ctrl_pdata) ||
+			mdss_dsi_sync_wait_trigger(ctrl_pdata))
+			ret = ctrl_pdata->check_read_status(ctrl_pdata);
+		else if (sctrl_pdata)
+			ret = ctrl_pdata->check_read_status(sctrl_pdata);
+	} else {
+		pr_err("%s: Read status register returned error\n", __func__);
+	}
+
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+	pr_debug("%s: Read register done with ret: %d\n", __func__, ret);
+
+	return ret;
+}
+
+void mdss_dsi_dsc_config(struct mdss_dsi_ctrl_pdata *ctrl, struct dsc_desc *dsc)
+{
+	u32 data, offset;
+
+	if (dsc->pkt_per_line <= 0) {
+		pr_err("%s: Error: pkt_per_line cannot be negative or 0\n",
+			__func__);
+		return;
+	}
+
+	if (ctrl->panel_mode == DSI_VIDEO_MODE) {
+		MIPI_OUTP((ctrl->ctrl_base) +
+			MDSS_DSI_VIDEO_COMPRESSION_MODE_CTRL2, 0);
+		data = dsc->bytes_per_pkt << 16;
+		data |= (0x0b << 8);	/*  dtype of compressed image */
+		offset = MDSS_DSI_VIDEO_COMPRESSION_MODE_CTRL;
+	} else {
+		/* strem 0 */
+		MIPI_OUTP((ctrl->ctrl_base) +
+			MDSS_DSI_COMMAND_COMPRESSION_MODE_CTRL3, 0);
+
+		MIPI_OUTP((ctrl->ctrl_base) +
+			MDSS_DSI_COMMAND_COMPRESSION_MODE_CTRL2,
+						dsc->bytes_in_slice);
+
+		data = DTYPE_DCS_LWRITE << 8;
+		offset = MDSS_DSI_COMMAND_COMPRESSION_MODE_CTRL;
+	}
+
+	/*
+	 * pkt_per_line:
+	 * 0 == 1 pkt
+	 * 1 == 2 pkt
+	 * 2 == 4 pkt
+	 * 3 pkt is not support
+	 */
+	if (dsc->pkt_per_line == 4)
+		data |= (dsc->pkt_per_line - 2) << 6;
+	else
+		data |= (dsc->pkt_per_line - 1) << 6;
+	data |= dsc->eol_byte_num << 4;
+	data |= 1;	/* enable */
+	MIPI_OUTP((ctrl->ctrl_base) + offset, data);
+}
+
+void mdss_dsi_set_burst_mode(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	u32 data;
+
+	if (ctrl->shared_data->hw_rev < MDSS_DSI_HW_REV_103)
+		return;
+
+	data = MIPI_INP(ctrl->ctrl_base + 0x1b8);
+
+	/*
+	 * idle and burst mode are mutually exclusive features,
+	 * so disable burst mode if idle has been configured for
+	 * the panel, otherwise enable the feature.
+	 */
+	if (ctrl->idle_enabled)
+		data &= ~BIT(16); /* disable burst mode */
+	else
+		data |= BIT(16); /* enable burst mode */
+
+	ctrl->burst_mode_enabled = !ctrl->idle_enabled;
+
+	MIPI_OUTP((ctrl->ctrl_base + 0x1b8), data);
+	pr_debug("%s: burst=%d\n", __func__, ctrl->burst_mode_enabled);
+
+}
+
+static void mdss_dsi_mode_setup(struct mdss_panel_data *pdata)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_panel_info *pinfo;
+	struct mipi_panel_info *mipi;
+	struct dsc_desc *dsc = NULL;
+	u32 data = 0;
+	u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
+	u32 ystride, bpp, dst_bpp, byte_num;
+	u32 stream_ctrl, stream_total;
+	u32 dummy_xres = 0, dummy_yres = 0;
+	u32 hsync_period, vsync_period;
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	pinfo = &pdata->panel_info;
+	if (pinfo->compression_mode == COMPRESSION_DSC)
+		dsc = &pinfo->dsc;
+
+	dst_bpp = pdata->panel_info.fbc.enabled ?
+		(pdata->panel_info.fbc.target_bpp) : (pinfo->bpp);
+
+	hbp = pdata->panel_info.lcdc.h_back_porch;
+	hfp = pdata->panel_info.lcdc.h_front_porch;
+	vbp = pdata->panel_info.lcdc.v_back_porch;
+	vfp = pdata->panel_info.lcdc.v_front_porch;
+	hspw = pdata->panel_info.lcdc.h_pulse_width;
+	vspw = pdata->panel_info.lcdc.v_pulse_width;
+	width = mult_frac(pdata->panel_info.xres, dst_bpp,
+			pdata->panel_info.bpp);
+	height = pdata->panel_info.yres;
+	pr_debug("%s: fbc=%d width=%d height=%d dst_bpp=%d\n", __func__,
+			pdata->panel_info.fbc.enabled, width, height, dst_bpp);
+
+	if (dsc)	/* compressed */
+		width = dsc->pclk_per_line;
+
+	if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+		dummy_xres = mult_frac((pdata->panel_info.lcdc.border_left +
+				pdata->panel_info.lcdc.border_right),
+				dst_bpp, pdata->panel_info.bpp);
+		dummy_yres = pdata->panel_info.lcdc.border_top +
+				pdata->panel_info.lcdc.border_bottom;
+	}
+
+	mipi = &pdata->panel_info.mipi;
+	if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+		vsync_period = vspw + vbp + height + dummy_yres + vfp;
+		hsync_period = hspw + hbp + width + dummy_xres + hfp;
+
+		if (ctrl_pdata->timing_db_mode)
+			MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x1e8, 0x1);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x24,
+			((hspw + hbp + width + dummy_xres) << 16 |
+			(hspw + hbp)));
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x28,
+			((vspw + vbp + height + dummy_yres) << 16 |
+			(vspw + vbp)));
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
+				((vsync_period - 1) << 16)
+				| (hsync_period - 1));
+
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x30, (hspw << 16));
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x34, 0);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x38, (vspw << 16));
+		if (ctrl_pdata->timing_db_mode)
+			MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x1e4, 0x1);
+	} else {		/* command mode */
+		if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
+			bpp = 3;
+		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB666)
+			bpp = 3;
+		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
+			bpp = 2;
+		else
+			bpp = 3;	/* Default format set to RGB888 */
+
+		ystride = width * bpp + 1;
+
+		if (dsc) {
+			byte_num =  dsc->bytes_per_pkt;
+			if (pinfo->mipi.insert_dcs_cmd)
+				byte_num++;
+
+			stream_ctrl = (byte_num << 16) |
+					(mipi->vc << 8) | DTYPE_DCS_LWRITE;
+			stream_total = dsc->pic_height << 16 |
+							dsc->pclk_per_line;
+		} else if (pinfo->partial_update_enabled &&
+			mdss_dsi_is_panel_on(pdata) && pinfo->roi.w &&
+			pinfo->roi.h) {
+			stream_ctrl = (((pinfo->roi.w * bpp) + 1) << 16) |
+					(mipi->vc << 8) | DTYPE_DCS_LWRITE;
+			stream_total = pinfo->roi.h << 16 | pinfo->roi.w;
+		} else {
+			stream_ctrl = (ystride << 16) | (mipi->vc << 8) |
+					DTYPE_DCS_LWRITE;
+			stream_total = height << 16 | width;
+		}
+
+		/* DSI_COMMAND_MODE_NULL_INSERTION_CTRL */
+		if ((ctrl_pdata->shared_data->hw_rev >= MDSS_DSI_HW_REV_104)
+			&& ctrl_pdata->null_insert_enabled) {
+			data = (mipi->vc << 1); /* Virtual channel ID */
+			data |= 0 << 16; /* Word count of the NULL packet */
+			data |= 0x1; /* Enable Null insertion */
+			MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2b4, data);
+		}
+
+		mdss_dsi_set_burst_mode(ctrl_pdata);
+
+		/* DSI_COMMAND_MODE_MDP_STREAM_CTRL */
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x60, stream_ctrl);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x58, stream_ctrl);
+
+		/* DSI_COMMAND_MODE_MDP_STREAM_TOTAL */
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x64, stream_total);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x5C, stream_total);
+	}
+
+	if (dsc)	/* compressed */
+		mdss_dsi_dsc_config(ctrl_pdata, dsc);
+}
+
+void mdss_dsi_ctrl_setup(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct mdss_panel_data *pdata = &ctrl->panel_data;
+
+	pr_debug("%s: called for ctrl%d\n", __func__, ctrl->ndx);
+
+	mdss_dsi_mode_setup(pdata);
+	mdss_dsi_host_init(pdata);
+	mdss_dsi_op_mode_config(pdata->panel_info.mipi.mode, pdata);
+}
+
+/**
+ * mdss_dsi_bta_status_check() - Check dsi panel status through bta check
+ * @ctrl_pdata: pointer to the dsi controller structure
+ *
+ * This function can be used to check status of the panel using bta check
+ * for the panel.
+ *
+ * Return: positive value if the panel is in good state, negative value or
+ * zero otherwise.
+ */
+int mdss_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	int ret = 0;
+	unsigned long flag;
+	int ignore_underflow = 0;
+
+	if (ctrl_pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+
+		/*
+		 * This should not return error otherwise
+		 * BTA status thread will treat it as dead panel scenario
+		 * and request for blank/unblank
+		 */
+		return 0;
+	}
+
+	mutex_lock(&ctrl_pdata->cmd_mutex);
+
+	if (ctrl_pdata->panel_mode == DSI_VIDEO_MODE)
+		ignore_underflow = 1;
+
+	pr_debug("%s: Checking BTA status\n", __func__);
+
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+	spin_lock_irqsave(&ctrl_pdata->mdp_lock, flag);
+	reinit_completion(&ctrl_pdata->bta_comp);
+	mdss_dsi_enable_irq(ctrl_pdata, DSI_BTA_TERM);
+	spin_unlock_irqrestore(&ctrl_pdata->mdp_lock, flag);
+	/* mask out overflow errors */
+	if (ignore_underflow)
+		mdss_dsi_set_reg(ctrl_pdata, 0x10c, 0x0f0000, 0x0f0000);
+	MIPI_OUTP(ctrl_pdata->ctrl_base + 0x098, 0x01); /* trigger  */
+	wmb(); /* ensure write is finished before progressing */
+
+	ret = wait_for_completion_killable_timeout(&ctrl_pdata->bta_comp,
+						DSI_BTA_EVENT_TIMEOUT);
+	if (ret <= 0) {
+		mdss_dsi_disable_irq(ctrl_pdata, DSI_BTA_TERM);
+		pr_err("%s: DSI BTA error: %i\n", __func__, ret);
+	}
+
+	if (ignore_underflow) {
+		/* clear pending overflow status */
+		mdss_dsi_set_reg(ctrl_pdata, 0xc, 0xffffffff, 0x44440000);
+		/* restore overflow isr */
+		mdss_dsi_set_reg(ctrl_pdata, 0x10c, 0x0f0000, 0);
+	}
+
+	mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+			  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+	pr_debug("%s: BTA done with ret: %d\n", __func__, ret);
+
+	mutex_unlock(&ctrl_pdata->cmd_mutex);
+
+	return ret;
+}
+
+int mdss_dsi_cmd_reg_tx(u32 data,
+			unsigned char *ctrl_base)
+{
+	int i;
+	char *bp;
+
+	bp = (char *)&data;
+	pr_debug("%s: ", __func__);
+	for (i = 0; i < 4; i++)
+		pr_debug("%x ", *bp++);
+
+	pr_debug("\n");
+
+	MIPI_OUTP(ctrl_base + 0x0084, 0x04);/* sw trigger */
+	MIPI_OUTP(ctrl_base + 0x0004, 0x135);
+
+	wmb(); /* ensure write is finished before progressing */
+
+	MIPI_OUTP(ctrl_base + 0x03c, data);
+	wmb(); /* ensure write is finished before progressing */
+	MIPI_OUTP(ctrl_base + 0x090, 0x01);	/* trigger */
+	wmb(); /* ensure write is finished before progressing */
+
+	udelay(300);
+
+	return 4;
+}
+
+static int mdss_dsi_wait4video_eng_busy(struct mdss_dsi_ctrl_pdata *ctrl);
+
+static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+					struct dsi_buf *tp);
+
+static int mdss_dsi_cmd_dma_rx(struct mdss_dsi_ctrl_pdata *ctrl,
+			struct dsi_buf *rp, int rlen);
+
+static int mdss_dsi_cmd_dma_tpg_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+					struct dsi_buf *tp)
+{
+	int len, i, ret = 0, data = 0;
+	u32 *bp;
+	struct mdss_dsi_ctrl_pdata *mctrl = NULL;
+
+	if (tp->len > DMA_TPG_FIFO_LEN) {
+		pr_debug("command length more than FIFO length\n");
+		return -EINVAL;
+	}
+
+	if (ctrl->shared_data->hw_rev < MDSS_DSI_HW_REV_103) {
+		pr_err("CMD DMA TPG not supported for this DSI version\n");
+		return -EINVAL;
+	}
+
+	bp = (u32 *)tp->data;
+	len = ALIGN(tp->len, 4);
+
+	reinit_completion(&ctrl->dma_comp);
+
+	if (mdss_dsi_sync_wait_trigger(ctrl))
+		mctrl = mdss_dsi_get_other_ctrl(ctrl);
+
+	data = BIT(16) | BIT(17);	/* select CMD_DMA_PATTERN_SEL to 3 */
+	data |= BIT(2);			/* select CMD_DMA_FIFO_MODE to 1 */
+	data |= BIT(1);			/* enable CMD_DMA_TPG */
+
+	MIPI_OUTP(ctrl->ctrl_base + 0x15c, data);
+	if (mctrl)
+		MIPI_OUTP(mctrl->ctrl_base + 0x15c, data);
+
+	/*
+	 * The DMA command parameters need to be programmed to the DMA_INIT_VAL
+	 * register in the proper order. The 'len' value will be a multiple
+	 * of 4, the padding bytes to make sure of this will be taken care of in
+	 * mdss_dsi_cmd_dma_add API.
+	 */
+	for (i = 0; i < len; i += 4) {
+		MIPI_OUTP(ctrl->ctrl_base + 0x17c, *bp);
+		if (mctrl)
+			MIPI_OUTP(mctrl->ctrl_base + 0x17c, *bp);
+		wmb(); /* make sure write happens before writing next command */
+		bp++;
+	}
+
+	/*
+	 * The number of writes to the DMA_INIT_VAL register should be an even
+	 * number of dwords (32 bits). In case 'len' is not a multiple of 8,
+	 * we need to do make an extra write to the register with 0x00 to
+	 * satisfy this condition.
+	 */
+	if ((len % 8) != 0) {
+		MIPI_OUTP(ctrl->ctrl_base + 0x17c, 0x00);
+		if (mctrl)
+			MIPI_OUTP(mctrl->ctrl_base + 0x17c, 0x00);
+	}
+
+	if (mctrl) {
+		MIPI_OUTP(mctrl->ctrl_base + 0x04c, len);
+		MIPI_OUTP(mctrl->ctrl_base + 0x090, 0x01); /* trigger */
+	}
+	MIPI_OUTP(ctrl->ctrl_base + 0x04c, len);
+	wmb(); /* make sure DMA length is programmed */
+
+	MIPI_OUTP(ctrl->ctrl_base + 0x090, 0x01); /* trigger */
+	wmb(); /* make sure DMA trigger happens */
+
+	ret = wait_for_completion_timeout(&ctrl->dma_comp,
+				msecs_to_jiffies(DMA_TX_TIMEOUT));
+	if (ret == 0)
+		ret = -ETIMEDOUT;
+	else
+		ret = tp->len;
+
+	/* Reset the DMA TPG FIFO */
+	MIPI_OUTP(ctrl->ctrl_base + 0x1ec, 0x1);
+	wmb(); /* make sure FIFO reset happens */
+	MIPI_OUTP(ctrl->ctrl_base + 0x1ec, 0x0);
+	wmb(); /* make sure FIFO reset happens */
+	/* Disable CMD_DMA_TPG */
+	MIPI_OUTP(ctrl->ctrl_base + 0x15c, 0x0);
+
+	if (mctrl) {
+		/* Reset the DMA TPG FIFO */
+		MIPI_OUTP(mctrl->ctrl_base + 0x1ec, 0x1);
+		wmb(); /* make sure FIFO reset happens */
+		MIPI_OUTP(mctrl->ctrl_base + 0x1ec, 0x0);
+		wmb(); /* make sure FIFO reset happens */
+		/* Disable CMD_DMA_TPG */
+		MIPI_OUTP(mctrl->ctrl_base + 0x15c, 0x0);
+	}
+
+	return ret;
+}
+
+static int mdss_dsi_cmds2buf_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+			struct dsi_cmd_desc *cmds, int cnt, int use_dma_tpg)
+{
+	struct dsi_buf *tp;
+	struct dsi_cmd_desc *cm;
+	struct dsi_ctrl_hdr *dchdr;
+	int len, wait, tot = 0;
+
+	tp = &ctrl->tx_buf;
+	mdss_dsi_buf_init(tp);
+	cm = cmds;
+	len = 0;
+	while (cnt--) {
+		dchdr = &cm->dchdr;
+		mdss_dsi_buf_reserve(tp, len);
+		len = mdss_dsi_cmd_dma_add(tp, cm);
+		if (!len) {
+			pr_err("%s: failed to add cmd = 0x%x\n",
+				__func__,  cm->payload[0]);
+			return 0;
+		}
+		tot += len;
+		if (dchdr->last) {
+			tp->data = tp->start; /* begin of buf */
+
+			wait = mdss_dsi_wait4video_eng_busy(ctrl);
+
+			mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
+			if (use_dma_tpg)
+				len = mdss_dsi_cmd_dma_tpg_tx(ctrl, tp);
+			else
+				len = mdss_dsi_cmd_dma_tx(ctrl, tp);
+			if (IS_ERR_VALUE(len)) {
+				mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM);
+				pr_err("%s: failed to call cmd_dma_tx for cmd = 0x%x\n",
+					__func__,  cm->payload[0]);
+				return 0;
+			}
+			pr_debug("%s: cmd_dma_tx for cmd = 0x%x, len = %d\n",
+					__func__,  cm->payload[0], len);
+
+			if (!wait || dchdr->wait > VSYNC_PERIOD)
+				usleep_range(dchdr->wait * 1000,
+					     dchdr->wait * 1000);
+
+			mdss_dsi_buf_init(tp);
+			len = 0;
+		}
+		cm++;
+	}
+	return tot;
+}
+
+/**
+ * __mdss_dsi_cmd_mode_config() - Enable/disable command mode engine
+ * @ctrl: pointer to the dsi controller structure
+ * @enable: true to enable command mode, false to disable command mode
+ *
+ * This function can be used to temporarily enable the command mode
+ * engine (even for video mode panels) so as to transfer any dma commands to
+ * the panel. It can also be used to disable the command mode engine
+ * when no longer needed.
+ *
+ * Return: true, if there was a mode switch to command mode for video mode
+ * panels.
+ */
+static inline bool __mdss_dsi_cmd_mode_config(
+	struct mdss_dsi_ctrl_pdata *ctrl, bool enable)
+{
+	bool mode_changed = false;
+	u32 dsi_ctrl;
+
+	dsi_ctrl = MIPI_INP((ctrl->ctrl_base) + 0x0004);
+	/* if currently in video mode, enable command mode */
+	if (enable) {
+		if ((dsi_ctrl) & BIT(1)) {
+			MIPI_OUTP((ctrl->ctrl_base) + 0x0004,
+				dsi_ctrl | BIT(2));
+			mode_changed = true;
+		}
+	} else {
+		MIPI_OUTP((ctrl->ctrl_base) + 0x0004, dsi_ctrl & ~BIT(2));
+	}
+
+	return mode_changed;
+}
+
+/*
+ * mdss_dsi_cmds_tx:
+ * thread context only
+ */
+int mdss_dsi_cmds_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+		struct dsi_cmd_desc *cmds, int cnt, int use_dma_tpg)
+{
+	int len = 0;
+	struct mdss_dsi_ctrl_pdata *mctrl = NULL;
+
+	/*
+	 * Turn on cmd mode in order to transmit the commands.
+	 * For video mode, do not send cmds more than one pixel line,
+	 * since it only transmit it during BLLP.
+	 */
+
+	if (mdss_dsi_sync_wait_enable(ctrl)) {
+		if (mdss_dsi_sync_wait_trigger(ctrl)) {
+			mctrl = mdss_dsi_get_other_ctrl(ctrl);
+			if (!mctrl) {
+				pr_warn("%s: sync_wait, NULL at other control\n",
+							__func__);
+				goto do_send;
+			}
+
+			mctrl->cmd_cfg_restore =
+					__mdss_dsi_cmd_mode_config(mctrl, 1);
+		} else if (!ctrl->do_unicast) {
+			/* broadcast cmds, let cmd_trigger do it */
+			return 0;
+
+		}
+	}
+
+	pr_debug("%s: ctrl=%d do_unicast=%d\n", __func__,
+				ctrl->ndx, ctrl->do_unicast);
+
+do_send:
+	ctrl->cmd_cfg_restore = __mdss_dsi_cmd_mode_config(ctrl, 1);
+
+	len = mdss_dsi_cmds2buf_tx(ctrl, cmds, cnt, use_dma_tpg);
+	if (!len)
+		pr_err("%s: failed to call\n", __func__);
+
+	if (!ctrl->do_unicast) {
+		if (mctrl && mctrl->cmd_cfg_restore) {
+			__mdss_dsi_cmd_mode_config(mctrl, 0);
+			mctrl->cmd_cfg_restore = false;
+		}
+
+		if (ctrl->cmd_cfg_restore) {
+			__mdss_dsi_cmd_mode_config(ctrl, 0);
+			ctrl->cmd_cfg_restore = false;
+		}
+	}
+
+	return len;
+}
+
+/* MIPI_DSI_MRPS, Maximum Return Packet Size */
+static char max_pktsize[2] = {0x00, 0x00}; /* LSB tx first, 10 bytes */
+
+static struct dsi_cmd_desc pkt_size_cmd = {
+	{DTYPE_MAX_PKTSIZE, 1, 0, 0, 0, sizeof(max_pktsize)},
+	max_pktsize,
+};
+
+/*
+ * mdss_dsi_cmds_rx() - dcs read from panel
+ * @ctrl: dsi controller
+ * @cmds: read command descriptor
+ * @len: number of bytes to read back
+ *
+ * controller have 4 registers can hold 16 bytes of rxed data
+ * dcs packet: 4 bytes header + payload + 2 bytes crc
+ * 1st read: 4 bytes header + 10 bytes payload + 2 crc
+ * 2nd read: 14 bytes payload + 2 crc
+ * 3rd read: 14 bytes payload + 2 crc
+ *
+ */
+int mdss_dsi_cmds_rx(struct mdss_dsi_ctrl_pdata *ctrl,
+			struct dsi_cmd_desc *cmds, int rlen, int use_dma_tpg)
+{
+	int data_byte, rx_byte, dlen, end;
+	int short_response, diff, pkt_size, ret = 0;
+	struct dsi_buf *tp, *rp;
+	char cmd;
+	struct mdss_dsi_ctrl_pdata *mctrl = NULL;
+
+
+	if (ctrl->panel_data.panel_info.panel_ack_disabled) {
+		pr_err("%s: ACK from Client not supported\n", __func__);
+		return rlen;
+	}
+
+	if (rlen == 0) {
+		pr_debug("%s: Minimum MRPS value should be 1\n", __func__);
+		return 0;
+	}
+
+	/*
+	 * Turn on cmd mode in order to transmit the commands.
+	 * For video mode, do not send cmds more than one pixel line,
+	 * since it only transmit it during BLLP.
+	 */
+	if (mdss_dsi_sync_wait_enable(ctrl)) {
+		if (mdss_dsi_sync_wait_trigger(ctrl)) {
+			mctrl = mdss_dsi_get_other_ctrl(ctrl);
+			if (!mctrl) {
+				pr_warn("%s: sync_wait, NULL at other control\n",
+							__func__);
+				goto do_send;
+			}
+
+			mctrl->cmd_cfg_restore =
+					__mdss_dsi_cmd_mode_config(mctrl, 1);
+		} else {
+			/* skip cmds, let cmd_trigger do it */
+			return 0;
+
+		}
+	}
+
+do_send:
+	ctrl->cmd_cfg_restore = __mdss_dsi_cmd_mode_config(ctrl, 1);
+
+	if (rlen <= 2) {
+		short_response = 1;
+		pkt_size = rlen;
+		rx_byte = 4;
+	} else {
+		short_response = 0;
+		data_byte = 10;	/* first read */
+		if (rlen < data_byte)
+			pkt_size = rlen;
+		else
+			pkt_size = data_byte;
+		rx_byte = data_byte + 6; /* 4 header + 2 crc */
+	}
+
+
+	tp = &ctrl->tx_buf;
+	rp = &ctrl->rx_buf;
+
+	end = 0;
+	mdss_dsi_buf_init(rp);
+	while (!end) {
+		pr_debug("%s:  rlen=%d pkt_size=%d rx_byte=%d\n",
+				__func__, rlen, pkt_size, rx_byte);
+		/*
+		 * Skip max_pkt_size dcs cmd if
+		 * its already been configured
+		 * for the requested pkt_size
+		 */
+		if (pkt_size == ctrl->cur_max_pkt_size)
+			goto skip_max_pkt_size;
+
+		max_pktsize[0] = pkt_size;
+		mdss_dsi_buf_init(tp);
+		ret = mdss_dsi_cmd_dma_add(tp, &pkt_size_cmd);
+		if (!ret) {
+			pr_err("%s: failed to add max_pkt_size\n",
+				__func__);
+			rp->len = 0;
+			rp->read_cnt = 0;
+			goto end;
+		}
+
+		mdss_dsi_wait4video_eng_busy(ctrl);
+
+		mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
+		if (use_dma_tpg)
+			ret = mdss_dsi_cmd_dma_tpg_tx(ctrl, tp);
+		else
+			ret = mdss_dsi_cmd_dma_tx(ctrl, tp);
+		if (IS_ERR_VALUE(ret)) {
+			mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM);
+			pr_err("%s: failed to tx max_pkt_size\n",
+				__func__);
+			rp->len = 0;
+			rp->read_cnt = 0;
+			goto end;
+		}
+		ctrl->cur_max_pkt_size = pkt_size;
+		pr_debug("%s: max_pkt_size=%d sent\n",
+					__func__, pkt_size);
+
+skip_max_pkt_size:
+		mdss_dsi_buf_init(tp);
+		ret = mdss_dsi_cmd_dma_add(tp, cmds);
+		if (!ret) {
+			pr_err("%s: failed to add cmd = 0x%x\n",
+				__func__,  cmds->payload[0]);
+			rp->len = 0;
+			rp->read_cnt = 0;
+			goto end;
+		}
+
+		if (ctrl->shared_data->hw_rev >= MDSS_DSI_HW_REV_101) {
+			/* clear the RDBK_DATA registers */
+			MIPI_OUTP(ctrl->ctrl_base + 0x01d4, 0x1);
+			wmb(); /* make sure the RDBK registers are cleared */
+			MIPI_OUTP(ctrl->ctrl_base + 0x01d4, 0x0);
+			wmb(); /* make sure the RDBK registers are cleared */
+		}
+
+		mdss_dsi_wait4video_eng_busy(ctrl);	/* video mode only */
+		mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
+		/* transmit read comamnd to client */
+		if (use_dma_tpg)
+			ret = mdss_dsi_cmd_dma_tpg_tx(ctrl, tp);
+		else
+			ret = mdss_dsi_cmd_dma_tx(ctrl, tp);
+		if (IS_ERR_VALUE(ret)) {
+			mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM);
+			pr_err("%s: failed to tx cmd = 0x%x\n",
+				__func__,  cmds->payload[0]);
+			rp->len = 0;
+			rp->read_cnt = 0;
+			goto end;
+		}
+
+		/*
+		 * once cmd_dma_done interrupt received,
+		 * return data from client is ready and stored
+		 * at RDBK_DATA register already
+		 * since rx fifo is 16 bytes, dcs header is kept at first loop,
+		 * after that dcs header lost during shift into registers
+		 */
+		dlen = mdss_dsi_cmd_dma_rx(ctrl, rp, rx_byte);
+
+		if (!dlen)
+			goto end;
+
+		if (short_response)
+			break;
+
+		if (rlen <= data_byte) {
+			diff = data_byte - rlen;
+			end = 1;
+		} else {
+			diff = 0;
+			rlen -= data_byte;
+		}
+
+		dlen -= 2; /* 2 crc */
+		dlen -= diff;
+		rp->data += dlen;	/* next start position */
+		rp->len += dlen;
+		if (!end) {
+			data_byte = 14; /* NOT first read */
+			if (rlen < data_byte)
+				pkt_size += rlen;
+			else
+				pkt_size += data_byte;
+		}
+		pr_debug("%s: rp data=%x len=%d dlen=%d diff=%d\n",
+			 __func__, (int) (unsigned long) rp->data,
+			 rp->len, dlen, diff);
+	}
+
+	/*
+	 * For single Long read, if the requested rlen < 10,
+	 * we need to shift the start position of rx
+	 * data buffer to skip the bytes which are not
+	 * updated.
+	 */
+	if (rp->read_cnt < 16 && !short_response)
+		rp->data = rp->start + (16 - rp->read_cnt);
+	else
+		rp->data = rp->start;
+	cmd = rp->data[0];
+	switch (cmd) {
+	case DTYPE_ACK_ERR_RESP:
+		pr_debug("%s: rx ACK_ERR_PACLAGE\n", __func__);
+		rp->len = 0;
+		rp->read_cnt = 0;
+		/* fall-through */
+	case DTYPE_GEN_READ1_RESP:
+	case DTYPE_DCS_READ1_RESP:
+		mdss_dsi_short_read1_resp(rp);
+		break;
+	case DTYPE_GEN_READ2_RESP:
+	case DTYPE_DCS_READ2_RESP:
+		mdss_dsi_short_read2_resp(rp);
+		break;
+	case DTYPE_GEN_LREAD_RESP:
+	case DTYPE_DCS_LREAD_RESP:
+		mdss_dsi_long_read_resp(rp);
+		break;
+	default:
+		pr_warn("%s:Invalid response cmd\n", __func__);
+		rp->len = 0;
+		rp->read_cnt = 0;
+	}
+end:
+
+	if (mctrl && mctrl->cmd_cfg_restore) {
+		__mdss_dsi_cmd_mode_config(mctrl, 0);
+		mctrl->cmd_cfg_restore = false;
+	}
+
+	if (ctrl->cmd_cfg_restore) {
+		__mdss_dsi_cmd_mode_config(ctrl, 0);
+		ctrl->cmd_cfg_restore = false;
+	}
+
+	if (rp->len && (rp->len != rp->read_cnt))
+		pr_err("Bytes read: %d requested:%d mismatch\n",
+					rp->read_cnt, rp->len);
+
+	return rp->read_cnt;
+}
+
+static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+					struct dsi_buf *tp)
+{
+	int len, ret = 0;
+	int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
+	char *bp;
+	struct mdss_dsi_ctrl_pdata *mctrl = NULL;
+	int ignored = 0;	/* overflow ignored */
+
+	bp = tp->data;
+
+	len = ALIGN(tp->len, 4);
+	ctrl->dma_size = ALIGN(tp->len, SZ_4K);
+
+	ctrl->mdss_util->iommu_lock();
+	if (ctrl->mdss_util->iommu_attached()) {
+		ret = mdss_smmu_dsi_map_buffer(tp->dmap, domain, ctrl->dma_size,
+			&(ctrl->dma_addr), tp->start, DMA_TO_DEVICE);
+		if (IS_ERR_VALUE(ret)) {
+			pr_err("unable to map dma memory to iommu(%d)\n", ret);
+			ctrl->mdss_util->iommu_unlock();
+			return -ENOMEM;
+		}
+		ctrl->dmap_iommu_map = true;
+	} else {
+		ctrl->dma_addr = tp->dmap;
+	}
+
+	reinit_completion(&ctrl->dma_comp);
+
+	if (ctrl->panel_mode == DSI_VIDEO_MODE)
+		ignored = 1;
+
+	if (mdss_dsi_sync_wait_trigger(ctrl)) {
+		/* broadcast same cmd to other panel */
+		mctrl = mdss_dsi_get_other_ctrl(ctrl);
+		if (mctrl && mctrl->dma_addr == 0) {
+			if (ignored) {
+				/* mask out overflow isr */
+				mdss_dsi_set_reg(mctrl, 0x10c,
+						0x0f0000, 0x0f0000);
+			}
+			MIPI_OUTP(mctrl->ctrl_base + 0x048, ctrl->dma_addr);
+			MIPI_OUTP(mctrl->ctrl_base + 0x04c, len);
+			MIPI_OUTP(mctrl->ctrl_base + 0x090, 0x01); /* trigger */
+		}
+	}
+
+	if (ignored) {
+		/* mask out overflow isr */
+		mdss_dsi_set_reg(ctrl, 0x10c, 0x0f0000, 0x0f0000);
+	}
+
+	/* send cmd to its panel */
+	MIPI_OUTP((ctrl->ctrl_base) + 0x048, ctrl->dma_addr);
+	MIPI_OUTP((ctrl->ctrl_base) + 0x04c, len);
+	wmb(); /* ensure write is finished before progressing */
+
+	MIPI_OUTP((ctrl->ctrl_base) + 0x090, 0x01);
+	wmb(); /* ensure write is finished before progressing */
+
+	if (ctrl->do_unicast) {
+		/* let cmd_trigger to kickoff later */
+		pr_debug("%s: SKIP, ndx=%d do_unicast=%d\n", __func__,
+					ctrl->ndx, ctrl->do_unicast);
+		ret = tp->len;
+		goto end;
+	}
+
+	ret = wait_for_completion_timeout(&ctrl->dma_comp,
+				msecs_to_jiffies(DMA_TX_TIMEOUT));
+	if (ret == 0) {
+		u32 reg_val, status;
+
+		reg_val = MIPI_INP(ctrl->ctrl_base + 0x0110);/* DSI_INTR_CTRL */
+		status = reg_val & DSI_INTR_CMD_DMA_DONE;
+		if (status) {
+			reg_val &= DSI_INTR_MASK_ALL;
+			/* clear CMD DMA and BTA_DONE isr only */
+			reg_val |= (DSI_INTR_CMD_DMA_DONE | DSI_INTR_BTA_DONE);
+			MIPI_OUTP(ctrl->ctrl_base + 0x0110, reg_val);
+			mdss_dsi_disable_irq_nosync(ctrl, DSI_CMD_TERM);
+			complete(&ctrl->dma_comp);
+
+			pr_warn("%s: dma tx done but irq not triggered\n",
+				__func__);
+		} else {
+			ret = -ETIMEDOUT;
+		}
+	}
+
+	if (!IS_ERR_VALUE(ret))
+		ret = tp->len;
+
+	if (mctrl && mctrl->dma_addr) {
+		if (ignored) {
+			/* clear pending overflow status */
+			mdss_dsi_set_reg(mctrl, 0xc, 0xffffffff, 0x44440000);
+			/* restore overflow isr */
+			mdss_dsi_set_reg(mctrl, 0x10c, 0x0f0000, 0);
+		}
+		if (mctrl->dmap_iommu_map) {
+			mdss_smmu_dsi_unmap_buffer(mctrl->dma_addr, domain,
+				mctrl->dma_size, DMA_TO_DEVICE);
+			mctrl->dmap_iommu_map = false;
+		}
+		mctrl->dma_addr = 0;
+		mctrl->dma_size = 0;
+	}
+
+	if (ctrl->dmap_iommu_map) {
+		mdss_smmu_dsi_unmap_buffer(ctrl->dma_addr, domain,
+			ctrl->dma_size, DMA_TO_DEVICE);
+		ctrl->dmap_iommu_map = false;
+	}
+
+	if (ignored) {
+		/* clear pending overflow status */
+		mdss_dsi_set_reg(ctrl, 0xc, 0xffffffff, 0x44440000);
+		/* restore overflow isr */
+		mdss_dsi_set_reg(ctrl, 0x10c, 0x0f0000, 0);
+	}
+	ctrl->dma_addr = 0;
+	ctrl->dma_size = 0;
+end:
+	ctrl->mdss_util->iommu_unlock();
+	return ret;
+}
+
+static int mdss_dsi_cmd_dma_rx(struct mdss_dsi_ctrl_pdata *ctrl,
+			struct dsi_buf *rp, int rx_byte)
+
+{
+	u32 *lp, *temp, data;
+	int i, j = 0, off, cnt;
+	bool ack_error = false;
+	char reg[16];
+	int repeated_bytes = 0;
+
+	lp = (u32 *)rp->data;
+	temp = (u32 *)reg;
+	cnt = rx_byte;
+	cnt += 3;
+	cnt >>= 2;
+
+	if (cnt > 4)
+		cnt = 4; /* 4 x 32 bits registers only */
+
+	if (ctrl->shared_data->hw_rev >= MDSS_DSI_HW_REV_101) {
+		rp->read_cnt = (MIPI_INP((ctrl->ctrl_base) + 0x01d4) >> 16);
+		pr_debug("%s: bytes read:%d\n", __func__, rp->read_cnt);
+
+		ack_error = (rx_byte == 4) ? (rp->read_cnt == 8) :
+				((rp->read_cnt - 4) == (max_pktsize[0] + 6));
+
+		if (ack_error)
+			rp->read_cnt -= 4; /* 4 byte read err report */
+		if (!rp->read_cnt) {
+			pr_err("%s: Errors detected, no data rxed\n", __func__);
+			return 0;
+		}
+	} else if (rx_byte == 4) {
+		rp->read_cnt = 4;
+	} else {
+		rp->read_cnt = (max_pktsize[0] + 6);
+	}
+
+	/*
+	 * In case of multiple reads from the panel, after the first read, there
+	 * is possibility that there are some bytes in the payload repeating in
+	 * the RDBK_DATA registers. Since we read all the parameters from the
+	 * panel right from the first byte for every pass. We need to skip the
+	 * repeating bytes and then append the new parameters to the rx buffer.
+	 */
+	if (rp->read_cnt > 16) {
+		int bytes_shifted, data_lost = 0, rem_header_bytes = 0;
+		/* Any data more than 16 bytes will be shifted out */
+		bytes_shifted = rp->read_cnt - rx_byte;
+		if (bytes_shifted >= 4)
+			data_lost = bytes_shifted - 4; /* remove dcs header */
+		else
+			rem_header_bytes = 4 - bytes_shifted; /* rem header */
+		/*
+		 * (rp->len - 4) -> current rx buffer data length.
+		 * If data_lost > 0, then ((rp->len - 4) - data_lost) will be
+		 * the number of repeating bytes.
+		 * If data_lost == 0, then ((rp->len - 4) + rem_header_bytes)
+		 * will be the number of bytes repeating in between rx buffer
+		 * and the current RDBK_DATA registers. We need to skip the
+		 * repeating bytes.
+		 */
+		repeated_bytes = (rp->len - 4) - data_lost + rem_header_bytes;
+	}
+
+	off = 0x06c;	/* DSI_RDBK_DATA0 */
+	off += ((cnt - 1) * 4);
+
+	for (i = 0; i < cnt; i++) {
+		data = (u32)MIPI_INP((ctrl->ctrl_base) + off);
+		/* to network byte order */
+		if (!repeated_bytes)
+			*lp++ = ntohl(data);
+		else
+			*temp++ = ntohl(data);
+		pr_debug("%s: data = 0x%x and ntohl(data) = 0x%x\n",
+					 __func__, data, ntohl(data));
+		off -= 4;
+	}
+
+	/* Skip duplicates and append other data to the rx buffer */
+	if (repeated_bytes) {
+		for (i = repeated_bytes; i < 16; i++)
+			rp->data[j++] = reg[i];
+	}
+
+	return rx_byte;
+}
+
+static int mdss_dsi_bus_bandwidth_vote(struct dsi_shared_data *sdata, bool on)
+{
+	int rc = 0;
+	bool changed = false;
+
+	if (on) {
+		if (sdata->bus_refcount == 0)
+			changed = true;
+		sdata->bus_refcount++;
+	} else {
+		if (sdata->bus_refcount != 0) {
+			sdata->bus_refcount--;
+			if (sdata->bus_refcount == 0)
+				changed = true;
+		} else {
+			pr_warn("%s: bus bw votes are not balanced\n",
+				__func__);
+		}
+	}
+
+	if (changed) {
+		rc = msm_bus_scale_client_update_request(sdata->bus_handle,
+							 on ? 1 : 0);
+		if (rc)
+			pr_err("%s: Bus bandwidth vote failed\n", __func__);
+	}
+
+	return rc;
+}
+
+
+int mdss_dsi_en_wait4dynamic_done(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	unsigned long flag;
+	u32 data;
+	int rc = 0;
+	struct mdss_dsi_ctrl_pdata *sctrl_pdata = NULL;
+
+	/* DSI_INTL_CTRL */
+	data = MIPI_INP((ctrl->ctrl_base) + 0x0110);
+	data &= DSI_INTR_TOTAL_MASK;
+	data |= DSI_INTR_DYNAMIC_REFRESH_MASK;
+	MIPI_OUTP((ctrl->ctrl_base) + 0x0110, data);
+
+	spin_lock_irqsave(&ctrl->mdp_lock, flag);
+	reinit_completion(&ctrl->dynamic_comp);
+	mdss_dsi_enable_irq(ctrl, DSI_DYNAMIC_TERM);
+	spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+
+	/*
+	 * Ensure that registers are updated before triggering
+	 * dynamic refresh
+	 */
+	wmb();
+
+	MIPI_OUTP((ctrl->ctrl_base) + DSI_DYNAMIC_REFRESH_CTRL,
+		(BIT(13) | BIT(8) | BIT(0)));
+
+	/*
+	 * Configure DYNAMIC_REFRESH_CTRL for second controller only
+	 * for split DSI cases.
+	 */
+	if (mdss_dsi_is_ctrl_clk_master(ctrl))
+		sctrl_pdata = mdss_dsi_get_ctrl_clk_slave();
+
+	if (sctrl_pdata)
+		MIPI_OUTP((sctrl_pdata->ctrl_base) + DSI_DYNAMIC_REFRESH_CTRL,
+				(BIT(13) | BIT(8) | BIT(0)));
+
+	rc = wait_for_completion_timeout(&ctrl->dynamic_comp,
+			msecs_to_jiffies(VSYNC_PERIOD * 4));
+	if (rc == 0) {
+		u32 reg_val, status;
+
+		reg_val = MIPI_INP(ctrl->ctrl_base + MDSS_DSI_INT_CTRL);
+		status = reg_val & DSI_INTR_DYNAMIC_REFRESH_DONE;
+		if (status) {
+			reg_val &= DSI_INTR_MASK_ALL;
+			/* clear dfps DONE isr only */
+			reg_val |= DSI_INTR_DYNAMIC_REFRESH_DONE;
+			MIPI_OUTP(ctrl->ctrl_base + MDSS_DSI_INT_CTRL, reg_val);
+			mdss_dsi_disable_irq(ctrl, DSI_DYNAMIC_TERM);
+			pr_warn_ratelimited("%s: dfps done but irq not triggered\n",
+				__func__);
+		} else {
+			pr_err("Dynamic interrupt timedout\n");
+			rc = -ETIMEDOUT;
+		}
+	}
+
+	data = MIPI_INP((ctrl->ctrl_base) + 0x0110);
+	data &= DSI_INTR_TOTAL_MASK;
+	data &= ~DSI_INTR_DYNAMIC_REFRESH_MASK;
+	MIPI_OUTP((ctrl->ctrl_base) + 0x0110, data);
+
+	return rc;
+}
+
+void mdss_dsi_wait4video_done(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	unsigned long flag;
+	u32 data;
+
+	/* DSI_INTL_CTRL */
+	data = MIPI_INP((ctrl->ctrl_base) + 0x0110);
+	/* clear previous VIDEO_DONE interrupt first */
+	data &= DSI_INTR_TOTAL_MASK;
+	MIPI_OUTP((ctrl->ctrl_base) + 0x0110, (data | DSI_INTR_VIDEO_DONE));
+	wmb(); /* make sure write happened */
+
+	spin_lock_irqsave(&ctrl->mdp_lock, flag);
+	reinit_completion(&ctrl->video_comp);
+	mdss_dsi_enable_irq(ctrl, DSI_VIDEO_TERM);
+	spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+
+	/* set interrupt enable bit for VIDEO_DONE */
+	data |= DSI_INTR_VIDEO_DONE_MASK;
+	MIPI_OUTP((ctrl->ctrl_base) + 0x0110, data);
+	wmb(); /* make sure write happened */
+
+	wait_for_completion_timeout(&ctrl->video_comp,
+			msecs_to_jiffies(VSYNC_PERIOD * 4));
+
+	data = MIPI_INP((ctrl->ctrl_base) + 0x0110);
+	data &= DSI_INTR_TOTAL_MASK;
+	data &= ~DSI_INTR_VIDEO_DONE_MASK;
+	MIPI_OUTP((ctrl->ctrl_base) + 0x0110, data);
+}
+
+static int mdss_dsi_wait4video_eng_busy(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int ret = 0;
+	u32 v_total = 0, v_blank = 0, sleep_ms = 0, fps = 0;
+	struct mdss_panel_info *pinfo = &ctrl->panel_data.panel_info;
+
+	if (ctrl->panel_mode == DSI_CMD_MODE)
+		return ret;
+
+	if (ctrl->ctrl_state & CTRL_STATE_MDP_ACTIVE) {
+		mdss_dsi_wait4video_done(ctrl);
+		v_total = mdss_panel_get_vtotal(pinfo);
+		v_blank = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width;
+		if (pinfo->dynamic_fps && pinfo->current_fps)
+			fps = pinfo->current_fps;
+		else
+			fps = pinfo->mipi.frame_rate;
+
+		sleep_ms = CEIL((v_blank * 1000), (v_total * fps));
+		/* delay sleep_ms to skip BLLP */
+		if (sleep_ms)
+			usleep_range((sleep_ms * 1000), (sleep_ms * 1000) + 10);
+		ret = 1;
+	}
+
+	return ret;
+}
+
+void mdss_dsi_cmd_mdp_start(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&ctrl->mdp_lock, flag);
+	mdss_dsi_enable_irq(ctrl, DSI_MDP_TERM);
+	ctrl->mdp_busy = true;
+	reinit_completion(&ctrl->mdp_comp);
+	MDSS_XLOG(ctrl->ndx, ctrl->mdp_busy, current->pid);
+	spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+}
+
+static int mdss_dsi_mdp_busy_tout_check(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	unsigned long flag;
+	u32 isr;
+	bool stop_hs_clk = false;
+	int tout = 1;
+
+	/*
+	 * two possible scenario:
+	 * 1) DSI_INTR_CMD_MDP_DONE set but isr not fired
+	 * 2) DSI_INTR_CMD_MDP_DONE set and cleared (isr fired)
+	 * but event_thread not wakeup
+	 */
+	mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+			  MDSS_DSI_CLK_ON);
+	spin_lock_irqsave(&ctrl->mdp_lock, flag);
+
+	isr = MIPI_INP(ctrl->ctrl_base + 0x0110);
+	if (isr & DSI_INTR_CMD_MDP_DONE) {
+		pr_warn("INTR_CMD_MDP_DONE set but isr not fired\n");
+		isr &= DSI_INTR_MASK_ALL;
+		isr |= DSI_INTR_CMD_MDP_DONE; /* clear this isr only */
+		MIPI_OUTP(ctrl->ctrl_base + 0x0110, isr);
+		mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM);
+		ctrl->mdp_busy = false;
+		if (ctrl->shared_data->cmd_clk_ln_recovery_en &&
+			ctrl->panel_mode == DSI_CMD_MODE) {
+			/* has hs_lane_recovery do the work */
+			stop_hs_clk = true;
+		}
+		tout = 0;	/* recovered */
+	}
+
+	spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+
+	if (stop_hs_clk)
+		mdss_dsi_stop_hs_clk_lane(ctrl);
+
+	complete_all(&ctrl->mdp_comp);
+
+	mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+			  MDSS_DSI_CLK_OFF);
+
+	return tout;
+}
+
+void mdss_dsi_cmd_mdp_busy(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	unsigned long flags;
+	int need_wait = 0;
+	int rc;
+
+	pr_debug("%s: start pid=%d\n",
+				__func__, current->pid);
+
+	MDSS_XLOG(ctrl->ndx, ctrl->mdp_busy, current->pid, XLOG_FUNC_ENTRY);
+	spin_lock_irqsave(&ctrl->mdp_lock, flags);
+	if (ctrl->mdp_busy == true)
+		need_wait++;
+	spin_unlock_irqrestore(&ctrl->mdp_lock, flags);
+
+	if (need_wait) {
+		/* wait until DMA finishes the current job */
+		pr_debug("%s: pending pid=%d\n",
+				__func__, current->pid);
+		rc = wait_for_completion_timeout(&ctrl->mdp_comp,
+					msecs_to_jiffies(DMA_TX_TIMEOUT));
+		spin_lock_irqsave(&ctrl->mdp_lock, flags);
+		if (!ctrl->mdp_busy)
+			rc = 1;
+		spin_unlock_irqrestore(&ctrl->mdp_lock, flags);
+		if (!rc && mdss_dsi_mdp_busy_tout_check(ctrl))
+			pr_err("%s: timeout error\n", __func__);
+	}
+	pr_debug("%s: done pid=%d\n", __func__, current->pid);
+	MDSS_XLOG(ctrl->ndx, ctrl->mdp_busy, current->pid, XLOG_FUNC_EXIT);
+}
+
+int mdss_dsi_cmdlist_tx(struct mdss_dsi_ctrl_pdata *ctrl,
+				struct dcs_cmd_req *req)
+{
+	int len;
+
+	if (mdss_dsi_sync_wait_enable(ctrl)) {
+		ctrl->do_unicast = false;
+		if (!ctrl->cmd_sync_wait_trigger &&
+			req->flags & CMD_REQ_UNICAST)
+			ctrl->do_unicast = true;
+	}
+
+	len = mdss_dsi_cmds_tx(ctrl, req->cmds, req->cmds_cnt,
+				(req->flags & CMD_REQ_DMA_TPG));
+
+	if (req->cb)
+		req->cb(len);
+
+	return len;
+}
+
+int mdss_dsi_cmdlist_rx(struct mdss_dsi_ctrl_pdata *ctrl,
+				struct dcs_cmd_req *req)
+{
+	struct dsi_buf *rp;
+	int len = 0;
+
+	if (req->rbuf) {
+		rp = &ctrl->rx_buf;
+		len = mdss_dsi_cmds_rx(ctrl, req->cmds, req->rlen,
+				(req->flags & CMD_REQ_DMA_TPG));
+		memcpy(req->rbuf, rp->data, rp->len);
+		ctrl->rx_len = len;
+	} else {
+		pr_err("%s: No rx buffer provided\n", __func__);
+	}
+
+	if (req->cb)
+		req->cb(len);
+
+	return len;
+}
+
+static inline bool mdss_dsi_delay_cmd(struct mdss_dsi_ctrl_pdata *ctrl,
+	bool from_mdp)
+{
+	unsigned long flags;
+	bool mdp_busy = false;
+	bool need_wait = false;
+
+	if (!ctrl->mdp_callback)
+		goto exit;
+
+	/* delay only for split dsi, cmd mode and burst mode enabled cases */
+	if (!mdss_dsi_is_hw_config_split(ctrl->shared_data) ||
+	    !(ctrl->panel_mode == DSI_CMD_MODE) ||
+	    !ctrl->burst_mode_enabled)
+		goto exit;
+
+	/* delay only if cmd is not from mdp and panel has been initialized */
+	if (from_mdp || !(ctrl->ctrl_state & CTRL_STATE_PANEL_INIT))
+		goto exit;
+
+	/* if broadcast enabled, apply delay only if this is the ctrl trigger */
+	if (mdss_dsi_sync_wait_enable(ctrl) &&
+	   !mdss_dsi_sync_wait_trigger(ctrl))
+		goto exit;
+
+	spin_lock_irqsave(&ctrl->mdp_lock, flags);
+	if (ctrl->mdp_busy == true)
+		mdp_busy = true;
+	spin_unlock_irqrestore(&ctrl->mdp_lock, flags);
+
+	/*
+	 * apply delay only if:
+	 *  mdp_busy bool is set - kickoff is being scheduled by sw
+	 *  MDP_BUSY bit  is not set - transfer is not on-going in hw yet
+	 */
+	if (mdp_busy && !(MIPI_INP(ctrl->ctrl_base + 0x008) & BIT(2)))
+		need_wait = true;
+
+exit:
+	MDSS_XLOG(need_wait, from_mdp, mdp_busy);
+	return need_wait;
+}
+
+int mdss_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp)
+{
+	struct dcs_cmd_req *req;
+	struct mdss_panel_info *pinfo;
+	struct mdss_rect *roi = NULL;
+	bool use_iommu = false;
+	int ret = -EINVAL;
+	int rc = 0;
+	bool hs_req = false;
+	bool cmd_mutex_acquired = false;
+
+	if (from_mdp) {	/* from mdp kickoff */
+		if (!ctrl->burst_mode_enabled) {
+			mutex_lock(&ctrl->cmd_mutex);
+			cmd_mutex_acquired = true;
+		}
+		pinfo = &ctrl->panel_data.panel_info;
+		if (pinfo->partial_update_enabled)
+			roi = &pinfo->roi;
+	}
+
+	req = mdss_dsi_cmdlist_get(ctrl, from_mdp);
+	if (req && from_mdp && ctrl->burst_mode_enabled) {
+		mutex_lock(&ctrl->cmd_mutex);
+		cmd_mutex_acquired = true;
+	}
+
+	MDSS_XLOG(ctrl->ndx, from_mdp, ctrl->mdp_busy, current->pid,
+							XLOG_FUNC_ENTRY);
+
+	if (req && (req->flags & CMD_REQ_HS_MODE))
+		hs_req = true;
+
+	if ((!ctrl->burst_mode_enabled) || from_mdp) {
+		/* make sure dsi_cmd_mdp is idle */
+		mdss_dsi_cmd_mdp_busy(ctrl);
+	}
+
+	/*
+	 * if secure display session is enabled
+	 * and DSI controller version is above 1.3.0,
+	 * then send DSI commands using TPG FIFO.
+	 */
+	if (mdss_get_sd_client_cnt() && req) {
+		if (ctrl->shared_data->hw_rev >= MDSS_DSI_HW_REV_103) {
+			req->flags |= CMD_REQ_DMA_TPG;
+		} else {
+			if (cmd_mutex_acquired)
+				mutex_unlock(&ctrl->cmd_mutex);
+			return -EPERM;
+		}
+	}
+
+	/* For DSI versions less than 1.3.0, CMD DMA TPG is not supported */
+	if (req && (ctrl->shared_data->hw_rev < MDSS_DSI_HW_REV_103))
+		req->flags &= ~CMD_REQ_DMA_TPG;
+
+	pr_debug("%s: ctrl=%d from_mdp=%d pid=%d\n", __func__,
+				ctrl->ndx, from_mdp, current->pid);
+
+	if (from_mdp) { /* from mdp kickoff */
+		/*
+		 * when partial update enabled, the roi of pinfo
+		 * is updated before mdp kickoff. Either width or
+		 * height of roi is non zero, then really kickoff
+		 * will followed.
+		 */
+		if (!roi || (roi->w != 0 || roi->h != 0)) {
+			if (ctrl->shared_data->cmd_clk_ln_recovery_en &&
+					ctrl->panel_mode == DSI_CMD_MODE)
+				mdss_dsi_start_hs_clk_lane(ctrl);
+		}
+	} else {	/* from dcs send */
+		if (ctrl->shared_data->cmd_clk_ln_recovery_en &&
+				ctrl->panel_mode == DSI_CMD_MODE && hs_req)
+			mdss_dsi_cmd_start_hs_clk_lane(ctrl);
+	}
+
+	if (!req)
+		goto need_lock;
+
+	MDSS_XLOG(ctrl->ndx, req->flags, req->cmds_cnt, from_mdp, current->pid);
+
+	pr_debug("%s:  from_mdp=%d pid=%d\n", __func__, from_mdp, current->pid);
+
+	if (!(req->flags & CMD_REQ_DMA_TPG)) {
+		/*
+		 * mdss interrupt is generated in mdp core clock domain
+		 * mdp clock need to be enabled to receive dsi interrupt
+		 * also, axi bus bandwidth need since dsi controller will
+		 * fetch dcs commands from axi bus
+		 */
+		rc = mdss_dsi_bus_bandwidth_vote(ctrl->shared_data, true);
+		if (rc) {
+			pr_err("%s: Bus bw vote failed\n", __func__);
+			if (from_mdp)
+				mutex_unlock(&ctrl->cmd_mutex);
+			return rc;
+		}
+
+		if (ctrl->mdss_util->iommu_ctrl) {
+			rc = ctrl->mdss_util->iommu_ctrl(1);
+			if (IS_ERR_VALUE(rc)) {
+				pr_err("IOMMU attach failed\n");
+				mutex_unlock(&ctrl->cmd_mutex);
+				return rc;
+			}
+			use_iommu = true;
+		}
+	}
+
+	mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+			  MDSS_DSI_CLK_ON);
+
+	/*
+	 * In ping pong split cases, check if we need to apply a
+	 * delay for any commands that are not coming from
+	 * mdp path
+	 */
+	mutex_lock(&ctrl->mutex);
+	if (mdss_dsi_delay_cmd(ctrl, from_mdp))
+		ctrl->mdp_callback->fxn(ctrl->mdp_callback->data,
+			MDP_INTF_CALLBACK_DSI_WAIT);
+	mutex_unlock(&ctrl->mutex);
+
+	if (req->flags & CMD_REQ_HS_MODE)
+		mdss_dsi_set_tx_power_mode(0, &ctrl->panel_data);
+
+	if (req->flags & CMD_REQ_RX)
+		ret = mdss_dsi_cmdlist_rx(ctrl, req);
+	else
+		ret = mdss_dsi_cmdlist_tx(ctrl, req);
+
+	if (req->flags & CMD_REQ_HS_MODE)
+		mdss_dsi_set_tx_power_mode(1, &ctrl->panel_data);
+
+	if (!(req->flags & CMD_REQ_DMA_TPG)) {
+		if (use_iommu)
+			ctrl->mdss_util->iommu_ctrl(0);
+
+		(void)mdss_dsi_bus_bandwidth_vote(ctrl->shared_data, false);
+	}
+
+	mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+			MDSS_DSI_CLK_OFF);
+need_lock:
+
+	MDSS_XLOG(ctrl->ndx, from_mdp, ctrl->mdp_busy, current->pid,
+							XLOG_FUNC_EXIT);
+
+	if (from_mdp) { /* from mdp kickoff */
+		/*
+		 * when partial update enabled, the roi of pinfo
+		 * is updated before mdp kickoff. Either width or
+		 * height of roi is 0, then it is false kickoff so
+		 * no mdp_busy flag set needed.
+		 * when partial update disabled, mdp_busy flag
+		 * alway set.
+		 */
+		if (!roi || (roi->w != 0 || roi->h != 0))
+			mdss_dsi_cmd_mdp_start(ctrl);
+		if (cmd_mutex_acquired)
+			mutex_unlock(&ctrl->cmd_mutex);
+	} else {	/* from dcs send */
+		if (ctrl->shared_data->cmd_clk_ln_recovery_en &&
+				ctrl->panel_mode == DSI_CMD_MODE &&
+				(req && (req->flags & CMD_REQ_HS_MODE)))
+			mdss_dsi_cmd_stop_hs_clk_lane(ctrl);
+	}
+
+	return ret;
+}
+
+static void __dsi_fifo_error_handler(struct mdss_dsi_ctrl_pdata *ctrl,
+	bool recovery_needed)
+{
+	struct mdss_dsi_ctrl_pdata *sctrl;
+	bool use_pp_split = false;
+
+	use_pp_split = ctrl->panel_data.panel_info.use_pingpong_split;
+
+	mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+		  MDSS_DSI_CLK_ON);
+	mdss_dsi_sw_reset(ctrl, true);
+	if (recovery_needed)
+		ctrl->recovery->fxn(ctrl->recovery->data,
+			MDP_INTF_DSI_CMD_FIFO_UNDERFLOW);
+	mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+		  MDSS_DSI_CLK_OFF);
+
+	sctrl = mdss_dsi_get_other_ctrl(ctrl);
+	if (sctrl && use_pp_split) {
+		mdss_dsi_clk_ctrl(sctrl, sctrl->dsi_clk_handle,
+			MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+		mdss_dsi_sw_reset(sctrl, true);
+		mdss_dsi_clk_ctrl(sctrl, sctrl->dsi_clk_handle,
+			MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+	}
+}
+
+static void dsi_send_events(struct mdss_dsi_ctrl_pdata *ctrl,
+					u32 events, u32 arg)
+{
+	struct dsi_event_q *evq;
+
+	if (!dsi_event.inited)
+		return;
+
+	pr_debug("%s: ev=%x\n", __func__, events);
+
+	spin_lock(&dsi_event.event_lock);
+	evq = &dsi_event.todo_list[dsi_event.event_pndx++];
+	evq->todo = events;
+	evq->arg = arg;
+	evq->ctrl = ctrl;
+	dsi_event.event_pndx %= DSI_EVENT_Q_MAX;
+	wake_up(&dsi_event.event_q);
+	spin_unlock(&dsi_event.event_lock);
+}
+
+static int dsi_event_thread(void *data)
+{
+	struct mdss_dsi_event *ev;
+	struct dsi_event_q *evq;
+	struct mdss_dsi_ctrl_pdata *ctrl;
+	unsigned long flag;
+	struct sched_param param;
+	u32 todo = 0, ln_status, force_clk_ln_hs;
+	u32 arg;
+	int ret;
+
+	param.sched_priority = 16;
+	ret = sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+	if (ret)
+		pr_err("%s: set priority failed\n", __func__);
+
+	ev = (struct mdss_dsi_event *)data;
+	/* event */
+	init_waitqueue_head(&ev->event_q);
+	spin_lock_init(&ev->event_lock);
+
+	while (1) {
+		wait_event(ev->event_q, (ev->event_pndx != ev->event_gndx));
+		spin_lock_irqsave(&ev->event_lock, flag);
+		evq = &ev->todo_list[ev->event_gndx++];
+		todo = evq->todo;
+		ctrl = evq->ctrl;
+		arg = evq->arg;
+		evq->todo = 0;
+		ev->event_gndx %= DSI_EVENT_Q_MAX;
+		spin_unlock_irqrestore(&ev->event_lock, flag);
+
+		pr_debug("%s: ev=%x\n", __func__, todo);
+
+		if (todo & DSI_EV_PLL_UNLOCKED)
+			mdss_dsi_pll_relock(ctrl);
+
+		if (todo & DSI_EV_DLNx_FIFO_UNDERFLOW) {
+			mutex_lock(&ctrl->mutex);
+			if (ctrl->recovery) {
+				pr_debug("%s: Handling underflow event\n",
+							__func__);
+				__dsi_fifo_error_handler(ctrl, true);
+			}
+			mutex_unlock(&ctrl->mutex);
+		}
+
+		if (todo & DSI_EV_DSI_FIFO_EMPTY)
+			__dsi_fifo_error_handler(ctrl, false);
+
+		if (todo & DSI_EV_DLNx_FIFO_OVERFLOW) {
+			mutex_lock(&dsi_mtx);
+			/*
+			 * For targets other than msm8994,
+			 * run the overflow recovery sequence only when
+			 * data lanes are in stop state and
+			 * clock lane is not in Stop State.
+			 */
+			ln_status = MIPI_INP(ctrl->ctrl_base + 0x00a8);
+			force_clk_ln_hs = (MIPI_INP(ctrl->ctrl_base + 0x00ac)
+					& BIT(28));
+			pr_debug("%s: lane_status: 0x%x\n",
+				       __func__, ln_status);
+			if (ctrl->recovery
+					&& (ctrl->shared_data->hw_rev
+						!= MDSS_DSI_HW_REV_103)
+					&& !(force_clk_ln_hs)
+					&& (ln_status
+						& DSI_DATA_LANES_STOP_STATE)
+					&& !(ln_status
+						& DSI_CLK_LANE_STOP_STATE)) {
+				pr_debug("%s: Handling overflow event.\n",
+								__func__);
+				mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle,
+						  MDSS_DSI_ALL_CLKS,
+						  MDSS_DSI_CLK_ON);
+				mdss_dsi_ctl_phy_reset(ctrl,
+						DSI_EV_DLNx_FIFO_OVERFLOW);
+				mdss_dsi_err_intr_ctrl(ctrl,
+						DSI_INTR_ERROR_MASK, 1);
+				mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle,
+						  MDSS_DSI_ALL_CLKS,
+						  MDSS_DSI_CLK_OFF);
+			} else if (ctrl->recovery
+					&& (ctrl->shared_data->hw_rev
+					    == MDSS_DSI_HW_REV_103)) {
+				pr_debug("%s: Handle overflow->Rev_103\n",
+								__func__);
+				mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle,
+						  MDSS_DSI_ALL_CLKS,
+						  MDSS_DSI_CLK_ON);
+				mdss_dsi_ctl_phy_reset(ctrl,
+						DSI_EV_DLNx_FIFO_OVERFLOW);
+				mdss_dsi_err_intr_ctrl(ctrl,
+						DSI_INTR_ERROR_MASK, 1);
+				mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle,
+						  MDSS_DSI_ALL_CLKS,
+						  MDSS_DSI_CLK_OFF);
+			}
+			mutex_unlock(&dsi_mtx);
+		}
+
+		if (todo & DSI_EV_MDP_BUSY_RELEASE) {
+			pr_debug("%s: Handling MDP_BUSY_RELEASE event\n",
+							__func__);
+			spin_lock_irqsave(&ctrl->mdp_lock, flag);
+			ctrl->mdp_busy = false;
+			mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM);
+			complete(&ctrl->mdp_comp);
+			spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
+
+			/* enable dsi error interrupt */
+			mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle,
+					  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+			mdss_dsi_err_intr_ctrl(ctrl, DSI_INTR_ERROR_MASK, 1);
+			mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle,
+					  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+		}
+
+		if (todo & DSI_EV_STOP_HS_CLK_LANE)
+			mdss_dsi_stop_hs_clk_lane(ctrl);
+
+		if (todo & DSI_EV_LP_RX_TIMEOUT) {
+			mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle,
+					  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+			mdss_dsi_ctl_phy_reset(ctrl, DSI_EV_LP_RX_TIMEOUT);
+			mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle,
+					  MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
+		}
+	}
+
+	return 0;
+}
+
+bool mdss_dsi_ack_err_status(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	u32 status;
+	unsigned char *base;
+	bool ret = false;
+
+	base = ctrl->ctrl_base;
+
+	status = MIPI_INP(base + 0x0068);/* DSI_ACK_ERR_STATUS */
+
+	if (status) {
+		MIPI_OUTP(base + 0x0068, status);
+		/* Writing of an extra 0 needed to clear error bits */
+		MIPI_OUTP(base + 0x0068, 0);
+		/*
+		 * After bta done, h/w may have a fake overflow and
+		 * that overflow may further cause ack_err about 3 ms
+		 * later which is another false alarm. Here the
+		 * warning message is ignored.
+		 */
+		if (ctrl->panel_data.panel_info.esd_check_enabled &&
+			(ctrl->status_mode == ESD_BTA) && (status & 0x1008000))
+			return false;
+
+		pr_err("%s: status=%x\n", __func__, status);
+		ret = true;
+	}
+
+	return ret;
+}
+
+static bool mdss_dsi_timeout_status(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	u32 status;
+	unsigned char *base;
+	bool ret = false;
+
+	base = ctrl->ctrl_base;
+
+	status = MIPI_INP(base + 0x00c0);/* DSI_TIMEOUT_STATUS */
+
+	if (status & 0x0111) {
+		MIPI_OUTP(base + 0x00c0, status);
+		if (status & 0x0110)
+			dsi_send_events(ctrl, DSI_EV_LP_RX_TIMEOUT, 0);
+		pr_err("%s: status=%x\n", __func__, status);
+		ret = true;
+	}
+
+	return ret;
+}
+
+bool mdss_dsi_dln0_phy_err(struct mdss_dsi_ctrl_pdata *ctrl, bool print_en)
+{
+	u32 status;
+	unsigned char *base;
+	bool ret = false;
+
+	base = ctrl->ctrl_base;
+
+	status = MIPI_INP(base + 0x00b4);/* DSI_DLN0_PHY_ERR */
+
+	if (status & 0x011111) {
+		MIPI_OUTP(base + 0x00b4, status);
+		if (print_en)
+			pr_err("%s: status=%x\n", __func__, status);
+		ctrl->err_cont.phy_err_cnt++;
+		ret = true;
+	}
+
+	return ret;
+}
+
+static bool mdss_dsi_fifo_status(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	u32 status;
+	unsigned char *base;
+	bool ret = false;
+
+	base = ctrl->ctrl_base;
+
+	status = MIPI_INP(base + 0x000c);/* DSI_FIFO_STATUS */
+
+	/* fifo underflow, overflow and empty*/
+	if (status & 0xcccc4409) {
+		MIPI_OUTP(base + 0x000c, status);
+
+		pr_err("%s: status=%x\n", __func__, status);
+
+		/*
+		 * if DSI FIFO overflow is masked,
+		 * do not report overflow error
+		 */
+		if (MIPI_INP(base + 0x10c) & 0xf0000)
+			status = status & 0xaaaaffff;
+
+		if (status & 0x44440000) {/* DLNx_HS_FIFO_OVERFLOW */
+			dsi_send_events(ctrl, DSI_EV_DLNx_FIFO_OVERFLOW, 0);
+			/* Ignore FIFO EMPTY when overflow happens */
+			status = status & 0xeeeeffff;
+		}
+		if (status & 0x88880000)  /* DLNx_HS_FIFO_UNDERFLOW */
+			dsi_send_events(ctrl, DSI_EV_DLNx_FIFO_UNDERFLOW, 0);
+		if (status & 0x11110000) /* DLN_FIFO_EMPTY */
+			dsi_send_events(ctrl, DSI_EV_DSI_FIFO_EMPTY, 0);
+		ctrl->err_cont.fifo_err_cnt++;
+		ret = true;
+	}
+
+	return ret;
+}
+
+static bool mdss_dsi_status(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	u32 status;
+	unsigned char *base;
+	bool ret = false;
+
+	base = ctrl->ctrl_base;
+
+	status = MIPI_INP(base + 0x0008);/* DSI_STATUS */
+
+	if (status & 0x80000000) { /* INTERLEAVE_OP_CONTENTION */
+		MIPI_OUTP(base + 0x0008, status);
+		pr_err("%s: status=%x\n", __func__, status);
+		ret = true;
+	}
+
+	return ret;
+}
+
+static bool mdss_dsi_clk_status(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	u32 status;
+	unsigned char *base;
+	bool ret = false;
+
+	base = ctrl->ctrl_base;
+	status = MIPI_INP(base + 0x0120);/* DSI_CLK_STATUS */
+
+	if (status & 0x10000) { /* DSI_CLK_PLL_UNLOCKED */
+		MIPI_OUTP(base + 0x0120, status);
+		/* If PLL unlock is masked, do not report error */
+		if (MIPI_INP(base + 0x10c) & BIT(28))
+			return false;
+
+		dsi_send_events(ctrl, DSI_EV_PLL_UNLOCKED, 0);
+		pr_err("%s: status=%x\n", __func__, status);
+		ret = true;
+	}
+
+	return ret;
+}
+
+static void __dsi_error_counter(struct dsi_err_container *err_container)
+{
+	s64 prev_time, curr_time;
+	int prev_index;
+
+	err_container->err_cnt++;
+
+	err_container->index = (err_container->index + 1) %
+		err_container->max_err_index;
+	curr_time = ktime_to_ms(ktime_get());
+	err_container->err_time[err_container->index] = curr_time;
+
+	prev_index = (err_container->index + 1) % err_container->max_err_index;
+	prev_time = err_container->err_time[prev_index];
+
+	if (prev_time &&
+		((curr_time - prev_time) < err_container->err_time_delta)) {
+		pr_err("%s: panic in WQ as dsi error intrs within:%dms\n",
+				__func__, err_container->err_time_delta);
+		MDSS_XLOG_TOUT_HANDLER_WQ("mdp", "dsi0_ctrl", "dsi0_phy",
+			"dsi1_ctrl", "dsi1_phy");
+	}
+}
+
+void mdss_dsi_error(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	u32 intr, mask;
+	bool err_handled = false;
+
+	/* Ignore the interrupt if the error intr mask is not set */
+	mask = MIPI_INP(ctrl->ctrl_base + 0x0110);
+	if (!(mask & DSI_INTR_ERROR_MASK)) {
+		pr_debug("%s: Ignore interrupt as error mask not set, 0x%x\n",
+				__func__, mask);
+		return;
+	}
+
+	/* disable dsi error interrupt */
+	mdss_dsi_err_intr_ctrl(ctrl, DSI_INTR_ERROR_MASK, 0);
+
+	/* DSI_ERR_INT_MASK0 */
+	err_handled |= mdss_dsi_clk_status(ctrl);	/* Mask0, 0x10000000 */
+	err_handled |= mdss_dsi_fifo_status(ctrl);	/* mask0, 0x133d00 */
+	err_handled |= mdss_dsi_ack_err_status(ctrl);	/* mask0, 0x01f */
+	err_handled |= mdss_dsi_timeout_status(ctrl);	/* mask0, 0x0e0 */
+	err_handled |= mdss_dsi_status(ctrl);		/* mask0, 0xc0100 */
+	err_handled |= mdss_dsi_dln0_phy_err(ctrl, true);/* mask0, 0x3e00000 */
+
+	/* clear dsi error interrupt */
+	intr = MIPI_INP(ctrl->ctrl_base + 0x0110);
+	intr &= DSI_INTR_TOTAL_MASK;
+	intr |= DSI_INTR_ERROR;
+	MIPI_OUTP(ctrl->ctrl_base + 0x0110, intr);
+
+	if (err_handled)
+		__dsi_error_counter(&ctrl->err_cont);
+
+	dsi_send_events(ctrl, DSI_EV_MDP_BUSY_RELEASE, 0);
+}
+
+irqreturn_t mdss_dsi_isr(int irq, void *ptr)
+{
+	u32 isr;
+	u32 intr;
+	struct mdss_dsi_ctrl_pdata *ctrl =
+			(struct mdss_dsi_ctrl_pdata *)ptr;
+
+	if (!ctrl->ctrl_base) {
+		pr_err("%s:%d DSI base adr no Initialized",
+						__func__, __LINE__);
+		return IRQ_HANDLED;
+	}
+
+	isr = MIPI_INP(ctrl->ctrl_base + 0x0110);/* DSI_INTR_CTRL */
+	MIPI_OUTP(ctrl->ctrl_base + 0x0110, (isr & ~DSI_INTR_ERROR));
+
+	pr_debug("%s: ndx=%d isr=%x\n", __func__, ctrl->ndx, isr);
+
+	if (isr & DSI_INTR_ERROR) {
+		MDSS_XLOG(ctrl->ndx, ctrl->mdp_busy, isr, 0x97);
+		mdss_dsi_error(ctrl);
+	}
+
+	if (isr & DSI_INTR_BTA_DONE) {
+		MDSS_XLOG(ctrl->ndx, ctrl->mdp_busy, isr, 0x96);
+		spin_lock(&ctrl->mdp_lock);
+		mdss_dsi_disable_irq_nosync(ctrl, DSI_BTA_TERM);
+		complete(&ctrl->bta_comp);
+		/*
+		 * When bta done happens, the panel should be in good
+		 * state. However, bta could cause the fake overflow
+		 * error for video mode. The similar issue happens when
+		 * sending dcs cmd. This overflow further causes
+		 * flicking because of phy reset which is unncessary,
+		 * so here overflow error is ignored, and errors are
+		 * cleared.
+		 */
+		if (ctrl->panel_data.panel_info.esd_check_enabled &&
+			(ctrl->status_mode == ESD_BTA) &&
+			(ctrl->panel_mode == DSI_VIDEO_MODE)) {
+			isr &= ~DSI_INTR_ERROR;
+			/* clear only overflow */
+			mdss_dsi_set_reg(ctrl, 0x0c, 0x44440000, 0x44440000);
+		}
+		spin_unlock(&ctrl->mdp_lock);
+	}
+
+	if (isr & DSI_INTR_VIDEO_DONE) {
+		spin_lock(&ctrl->mdp_lock);
+		mdss_dsi_disable_irq_nosync(ctrl, DSI_VIDEO_TERM);
+		complete(&ctrl->video_comp);
+		spin_unlock(&ctrl->mdp_lock);
+	}
+
+	if (isr & DSI_INTR_CMD_DMA_DONE) {
+		MDSS_XLOG(ctrl->ndx, ctrl->mdp_busy, isr, 0x98);
+		spin_lock(&ctrl->mdp_lock);
+		mdss_dsi_disable_irq_nosync(ctrl, DSI_CMD_TERM);
+		complete(&ctrl->dma_comp);
+		spin_unlock(&ctrl->mdp_lock);
+	}
+
+	if (isr & DSI_INTR_CMD_MDP_DONE) {
+		MDSS_XLOG(ctrl->ndx, ctrl->mdp_busy, isr, 0x99);
+		spin_lock(&ctrl->mdp_lock);
+		mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM);
+		if (ctrl->shared_data->cmd_clk_ln_recovery_en &&
+				ctrl->panel_mode == DSI_CMD_MODE) {
+			/* stop force clk lane hs */
+			mdss_dsi_cfg_lane_ctrl(ctrl, BIT(28), 0);
+			dsi_send_events(ctrl, DSI_EV_STOP_HS_CLK_LANE,
+							DSI_MDP_TERM);
+		}
+		ctrl->mdp_busy = false;
+		complete_all(&ctrl->mdp_comp);
+		spin_unlock(&ctrl->mdp_lock);
+	}
+
+	if (isr & DSI_INTR_DYNAMIC_REFRESH_DONE) {
+		spin_lock(&ctrl->mdp_lock);
+		mdss_dsi_disable_irq_nosync(ctrl, DSI_DYNAMIC_TERM);
+
+		/* clear dfps interrupt */
+		intr = MIPI_INP(ctrl->ctrl_base + 0x0110);
+		intr |= DSI_INTR_DYNAMIC_REFRESH_DONE;
+		MIPI_OUTP(ctrl->ctrl_base + 0x0110, intr);
+
+		complete(&ctrl->dynamic_comp);
+		spin_unlock(&ctrl->mdp_lock);
+	}
+
+	return IRQ_HANDLED;
+}
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
new file mode 100644
index 0000000..d84cf5e
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -0,0 +1,2883 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/qpnp/pin.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/qpnp/pwm.h>
+#include <linux/err.h>
+#include <linux/string.h>
+
+#include "mdss_dsi.h"
+#ifdef TARGET_HW_MDSS_HDMI
+#include "mdss_dba_utils.h"
+#endif
+#define DT_CMD_HDR 6
+#define MIN_REFRESH_RATE 48
+#define DEFAULT_MDP_TRANSFER_TIME 14000
+
+#define VSYNC_DELAY msecs_to_jiffies(17)
+
+DEFINE_LED_TRIGGER(bl_led_trigger);
+
+void mdss_dsi_panel_pwm_cfg(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	if (ctrl->pwm_pmi)
+		return;
+
+	ctrl->pwm_bl = pwm_request(ctrl->pwm_lpg_chan, "lcd-bklt");
+	if (ctrl->pwm_bl == NULL || IS_ERR(ctrl->pwm_bl)) {
+		pr_err("%s: Error: lpg_chan=%d pwm request failed",
+				__func__, ctrl->pwm_lpg_chan);
+	}
+	ctrl->pwm_enabled = 0;
+}
+
+bool mdss_dsi_panel_pwm_enable(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	bool status = true;
+
+	if (!ctrl->pwm_enabled)
+		goto end;
+
+	if (pwm_enable(ctrl->pwm_bl)) {
+		pr_err("%s: pwm_enable() failed\n", __func__);
+		status = false;
+	}
+
+	ctrl->pwm_enabled = 1;
+
+end:
+	return status;
+}
+
+static void mdss_dsi_panel_bklt_pwm(struct mdss_dsi_ctrl_pdata *ctrl, int level)
+{
+	int ret;
+	u32 duty;
+	u32 period_ns;
+
+	if (ctrl->pwm_bl == NULL) {
+		pr_err("%s: no PWM\n", __func__);
+		return;
+	}
+
+	if (level == 0) {
+		if (ctrl->pwm_enabled) {
+			ret = pwm_config_us(ctrl->pwm_bl, level,
+					ctrl->pwm_period);
+			if (ret)
+				pr_err("%s: pwm_config_us() failed err=%d.\n",
+						__func__, ret);
+			pwm_disable(ctrl->pwm_bl);
+		}
+		ctrl->pwm_enabled = 0;
+		return;
+	}
+
+	duty = level * ctrl->pwm_period;
+	duty /= ctrl->bklt_max;
+
+	pr_debug("%s: bklt_ctrl=%d pwm_period=%d pwm_gpio=%d pwm_lpg_chan=%d\n",
+			__func__, ctrl->bklt_ctrl, ctrl->pwm_period,
+				ctrl->pwm_pmic_gpio, ctrl->pwm_lpg_chan);
+
+	pr_debug("%s: ndx=%d level=%d duty=%d\n", __func__,
+					ctrl->ndx, level, duty);
+
+	if (ctrl->pwm_period >= USEC_PER_SEC) {
+		ret = pwm_config_us(ctrl->pwm_bl, duty, ctrl->pwm_period);
+		if (ret) {
+			pr_err("%s: pwm_config_us() failed err=%d.\n",
+					__func__, ret);
+			return;
+		}
+	} else {
+		period_ns = ctrl->pwm_period * NSEC_PER_USEC;
+		ret = pwm_config(ctrl->pwm_bl,
+				level * period_ns / ctrl->bklt_max,
+				period_ns);
+		if (ret) {
+			pr_err("%s: pwm_config() failed err=%d.\n",
+					__func__, ret);
+			return;
+		}
+	}
+
+	if (!ctrl->pwm_enabled) {
+		ret = pwm_enable(ctrl->pwm_bl);
+		if (ret)
+			pr_err("%s: pwm_enable() failed err=%d\n", __func__,
+				ret);
+		ctrl->pwm_enabled = 1;
+	}
+}
+
+static char dcs_cmd[2] = {0x54, 0x00}; /* DTYPE_DCS_READ */
+static struct dsi_cmd_desc dcs_read_cmd = {
+	{DTYPE_DCS_READ, 1, 0, 1, 5, sizeof(dcs_cmd)},
+	dcs_cmd
+};
+
+int mdss_dsi_panel_cmd_read(struct mdss_dsi_ctrl_pdata *ctrl, char cmd0,
+		char cmd1, void (*fxn)(int), char *rbuf, int len)
+{
+	struct dcs_cmd_req cmdreq;
+	struct mdss_panel_info *pinfo;
+
+	pinfo = &(ctrl->panel_data.panel_info);
+	if (pinfo->dcs_cmd_by_left) {
+		if (ctrl->ndx != DSI_CTRL_LEFT)
+			return -EINVAL;
+	}
+
+	dcs_cmd[0] = cmd0;
+	dcs_cmd[1] = cmd1;
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	cmdreq.cmds = &dcs_read_cmd;
+	cmdreq.cmds_cnt = 1;
+	cmdreq.flags = CMD_REQ_RX | CMD_REQ_COMMIT;
+	cmdreq.rlen = len;
+	cmdreq.rbuf = rbuf;
+	cmdreq.cb = fxn; /* call back */
+	/*
+	 * blocked here, until call back called
+	 */
+
+	return mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+}
+
+static void mdss_dsi_panel_apply_settings(struct mdss_dsi_ctrl_pdata *ctrl,
+				struct dsi_panel_cmds *pcmds)
+{
+	struct dcs_cmd_req cmdreq;
+	struct mdss_panel_info *pinfo;
+
+	pinfo = &(ctrl->panel_data.panel_info);
+	if ((pinfo->dcs_cmd_by_left) && (ctrl->ndx != DSI_CTRL_LEFT))
+		return;
+
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	cmdreq.cmds = pcmds->cmds;
+	cmdreq.cmds_cnt = pcmds->cmd_cnt;
+	cmdreq.flags = CMD_REQ_COMMIT;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+	mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+}
+
+static void mdss_dsi_panel_cmds_send(struct mdss_dsi_ctrl_pdata *ctrl,
+			struct dsi_panel_cmds *pcmds, u32 flags)
+{
+	struct dcs_cmd_req cmdreq;
+	struct mdss_panel_info *pinfo;
+
+	pinfo = &(ctrl->panel_data.panel_info);
+	if (pinfo->dcs_cmd_by_left) {
+		if (ctrl->ndx != DSI_CTRL_LEFT)
+			return;
+	}
+
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	cmdreq.cmds = pcmds->cmds;
+	cmdreq.cmds_cnt = pcmds->cmd_cnt;
+	cmdreq.flags = flags;
+
+	/*Panel ON/Off commands should be sent in DSI Low Power Mode*/
+	if (pcmds->link_state == DSI_LP_MODE)
+		cmdreq.flags  |= CMD_REQ_LP_MODE;
+	else if (pcmds->link_state == DSI_HS_MODE)
+		cmdreq.flags |= CMD_REQ_HS_MODE;
+
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+
+	mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+}
+
+static char led_pwm1[2] = {0x51, 0x0};	/* DTYPE_DCS_WRITE1 */
+static struct dsi_cmd_desc backlight_cmd = {
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 1, sizeof(led_pwm1)},
+	led_pwm1
+};
+
+static void mdss_dsi_panel_bklt_dcs(struct mdss_dsi_ctrl_pdata *ctrl, int level)
+{
+	struct dcs_cmd_req cmdreq;
+	struct mdss_panel_info *pinfo;
+
+	pinfo = &(ctrl->panel_data.panel_info);
+	if (pinfo->dcs_cmd_by_left) {
+		if (ctrl->ndx != DSI_CTRL_LEFT)
+			return;
+	}
+
+	pr_debug("%s: level=%d\n", __func__, level);
+
+	led_pwm1[1] = (unsigned char)level;
+
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	cmdreq.cmds = &backlight_cmd;
+	cmdreq.cmds_cnt = 1;
+	cmdreq.flags = CMD_REQ_COMMIT;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+
+	mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+}
+
+static void mdss_dsi_panel_set_idle_mode(struct mdss_panel_data *pdata,
+							bool enable)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+						panel_data);
+
+	pr_debug("%s: Idle (%d->%d)\n", __func__, ctrl->idle, enable);
+
+	if (ctrl->idle == enable)
+		return;
+
+	if (enable) {
+		if (ctrl->idle_on_cmds.cmd_cnt) {
+			mdss_dsi_panel_cmds_send(ctrl, &ctrl->idle_on_cmds,
+					CMD_REQ_COMMIT);
+			ctrl->idle = true;
+			pr_debug("Idle on\n");
+		}
+	} else {
+		if (ctrl->idle_off_cmds.cmd_cnt) {
+			mdss_dsi_panel_cmds_send(ctrl, &ctrl->idle_off_cmds,
+					CMD_REQ_COMMIT);
+			ctrl->idle = false;
+			pr_debug("Idle off\n");
+		}
+	}
+}
+
+static bool mdss_dsi_panel_get_idle_mode(struct mdss_panel_data *pdata)
+
+{
+	struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return 0;
+	}
+	ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+						panel_data);
+	return ctrl->idle;
+}
+
+static int mdss_dsi_request_gpios(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	int rc = 0;
+
+	if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
+		rc = gpio_request(ctrl_pdata->disp_en_gpio,
+						"disp_enable");
+		if (rc) {
+			pr_err("request disp_en gpio failed, rc=%d\n",
+				       rc);
+			goto disp_en_gpio_err;
+		}
+	}
+	rc = gpio_request(ctrl_pdata->rst_gpio, "disp_rst_n");
+	if (rc) {
+		pr_err("request reset gpio failed, rc=%d\n",
+			rc);
+		goto rst_gpio_err;
+	}
+	if (gpio_is_valid(ctrl_pdata->bklt_en_gpio)) {
+		rc = gpio_request(ctrl_pdata->bklt_en_gpio,
+						"bklt_enable");
+		if (rc) {
+			pr_err("request bklt gpio failed, rc=%d\n",
+				       rc);
+			goto bklt_en_gpio_err;
+		}
+	}
+	if (gpio_is_valid(ctrl_pdata->mode_gpio)) {
+		rc = gpio_request(ctrl_pdata->mode_gpio, "panel_mode");
+		if (rc) {
+			pr_err("request panel mode gpio failed,rc=%d\n",
+								rc);
+			goto mode_gpio_err;
+		}
+	}
+	return rc;
+
+mode_gpio_err:
+	if (gpio_is_valid(ctrl_pdata->bklt_en_gpio))
+		gpio_free(ctrl_pdata->bklt_en_gpio);
+bklt_en_gpio_err:
+	gpio_free(ctrl_pdata->rst_gpio);
+rst_gpio_err:
+	if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
+		gpio_free(ctrl_pdata->disp_en_gpio);
+disp_en_gpio_err:
+	return rc;
+}
+
+int mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_panel_info *pinfo = NULL;
+	int i, rc = 0;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	pinfo = &(ctrl_pdata->panel_data.panel_info);
+
+	/* need to configure intf mux only for external interface */
+	if (pinfo->is_dba_panel) {
+		if (enable) {
+			if (gpio_is_valid(ctrl_pdata->intf_mux_gpio)) {
+				rc = gpio_request(ctrl_pdata->intf_mux_gpio,
+						"intf_mux");
+				if (rc) {
+					pr_err("request mux gpio failed, rc=%d\n",
+									rc);
+					return rc;
+				}
+				rc = gpio_direction_output(
+					ctrl_pdata->intf_mux_gpio, 0);
+				if (rc) {
+					pr_err("%s: unable to set dir for intf mux gpio\n",
+								__func__);
+					goto exit;
+				}
+				gpio_set_value(ctrl_pdata->intf_mux_gpio, 0);
+			} else {
+				pr_debug("%s:%d, intf mux gpio not specified\n",
+							__func__, __LINE__);
+			}
+		} else {
+			if (gpio_is_valid(ctrl_pdata->intf_mux_gpio))
+				gpio_free(ctrl_pdata->intf_mux_gpio);
+		}
+	}
+
+	if ((mdss_dsi_is_right_ctrl(ctrl_pdata) &&
+		mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data)) ||
+			pinfo->is_dba_panel) {
+		pr_debug("%s:%d, right ctrl gpio configuration not needed\n",
+			__func__, __LINE__);
+		return rc;
+	}
+
+	if (!gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
+		pr_debug("%s:%d, reset line not configured\n",
+			   __func__, __LINE__);
+	}
+
+	if (!gpio_is_valid(ctrl_pdata->rst_gpio)) {
+		pr_debug("%s:%d, reset line not configured\n",
+			   __func__, __LINE__);
+		return rc;
+	}
+
+	pr_debug("%s: enable = %d\n", __func__, enable);
+
+	if (enable) {
+		rc = mdss_dsi_request_gpios(ctrl_pdata);
+		if (rc) {
+			pr_err("gpio request failed\n");
+			return rc;
+		}
+		if (!pinfo->cont_splash_enabled) {
+			if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
+				rc = gpio_direction_output(
+					ctrl_pdata->disp_en_gpio, 1);
+				if (rc) {
+					pr_err("%s: unable to set dir for en gpio\n",
+						__func__);
+					goto exit;
+				}
+			}
+
+			if (pdata->panel_info.rst_seq_len) {
+				rc = gpio_direction_output(ctrl_pdata->rst_gpio,
+					pdata->panel_info.rst_seq[0]);
+				if (rc) {
+					pr_err("%s: unable to set dir for rst gpio\n",
+						__func__);
+					goto exit;
+				}
+			}
+
+			for (i = 0; i < pdata->panel_info.rst_seq_len; ++i) {
+				gpio_set_value((ctrl_pdata->rst_gpio),
+					pdata->panel_info.rst_seq[i]);
+				if (pdata->panel_info.rst_seq[++i])
+					usleep_range(pinfo->rst_seq[i] * 1000,
+						     pinfo->rst_seq[i] * 1000);
+			}
+
+			if (gpio_is_valid(ctrl_pdata->bklt_en_gpio)) {
+				rc = gpio_direction_output(
+					ctrl_pdata->bklt_en_gpio, 1);
+				if (rc) {
+					pr_err("%s: unable to set dir for bklt gpio\n",
+						__func__);
+					goto exit;
+				}
+			}
+		}
+
+		if (gpio_is_valid(ctrl_pdata->mode_gpio)) {
+			bool out = false;
+
+			if (pinfo->mode_gpio_state == MODE_GPIO_HIGH)
+				out = true;
+			else if (pinfo->mode_gpio_state == MODE_GPIO_LOW)
+				out = false;
+
+			rc = gpio_direction_output(ctrl_pdata->mode_gpio, out);
+			if (rc) {
+				pr_err("%s: unable to set dir for mode gpio\n",
+					__func__);
+				goto exit;
+			}
+		}
+		if (ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT) {
+			pr_debug("%s: Panel Not properly turned OFF\n",
+						__func__);
+			ctrl_pdata->ctrl_state &= ~CTRL_STATE_PANEL_INIT;
+			pr_debug("%s: Reset panel done\n", __func__);
+		}
+	} else {
+		if (gpio_is_valid(ctrl_pdata->bklt_en_gpio)) {
+			gpio_set_value((ctrl_pdata->bklt_en_gpio), 0);
+			gpio_free(ctrl_pdata->bklt_en_gpio);
+		}
+		if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
+			gpio_set_value((ctrl_pdata->disp_en_gpio), 0);
+			gpio_free(ctrl_pdata->disp_en_gpio);
+		}
+		gpio_set_value((ctrl_pdata->rst_gpio), 0);
+		gpio_free(ctrl_pdata->rst_gpio);
+		if (gpio_is_valid(ctrl_pdata->mode_gpio))
+			gpio_free(ctrl_pdata->mode_gpio);
+	}
+
+exit:
+	return rc;
+}
+
+/**
+ * mdss_dsi_roi_merge() -  merge two roi into single roi
+ *
+ * Function used by partial update with only one dsi intf take 2A/2B
+ * (column/page) dcs commands.
+ */
+static int mdss_dsi_roi_merge(struct mdss_dsi_ctrl_pdata *ctrl,
+					struct mdss_rect *roi)
+{
+	struct mdss_panel_info *l_pinfo;
+	struct mdss_rect *l_roi;
+	struct mdss_rect *r_roi;
+	struct mdss_dsi_ctrl_pdata *other = NULL;
+	int ans = 0;
+
+	if (ctrl->ndx == DSI_CTRL_LEFT) {
+		other = mdss_dsi_get_ctrl_by_index(DSI_CTRL_RIGHT);
+		if (!other)
+			return ans;
+		l_pinfo = &(ctrl->panel_data.panel_info);
+		l_roi = &(ctrl->panel_data.panel_info.roi);
+		r_roi = &(other->panel_data.panel_info.roi);
+	} else  {
+		other = mdss_dsi_get_ctrl_by_index(DSI_CTRL_LEFT);
+		if (!other)
+			return ans;
+		l_pinfo = &(other->panel_data.panel_info);
+		l_roi = &(other->panel_data.panel_info.roi);
+		r_roi = &(ctrl->panel_data.panel_info.roi);
+	}
+
+	if (l_roi->w == 0 && l_roi->h == 0) {
+		/* right only */
+		*roi = *r_roi;
+		roi->x += l_pinfo->xres;/* add left full width to x-offset */
+	} else {
+		/* left only and left+righ */
+		*roi = *l_roi;
+		roi->w +=  r_roi->w; /* add right width */
+		ans = 1;
+	}
+
+	return ans;
+}
+
+static char caset[] = {0x2a, 0x00, 0x00, 0x03, 0x00};	/* DTYPE_DCS_LWRITE */
+static char paset[] = {0x2b, 0x00, 0x00, 0x05, 0x00};	/* DTYPE_DCS_LWRITE */
+
+/* pack into one frame before sent */
+static struct dsi_cmd_desc set_col_page_addr_cmd[] = {
+	{{DTYPE_DCS_LWRITE, 0, 0, 0, 1, sizeof(caset)}, caset},	/* packed */
+	{{DTYPE_DCS_LWRITE, 1, 0, 0, 1, sizeof(paset)}, paset},
+};
+
+static void mdss_dsi_send_col_page_addr(struct mdss_dsi_ctrl_pdata *ctrl,
+				struct mdss_rect *roi, int unicast)
+{
+	struct dcs_cmd_req cmdreq;
+
+	caset[1] = (((roi->x) & 0xFF00) >> 8);
+	caset[2] = (((roi->x) & 0xFF));
+	caset[3] = (((roi->x - 1 + roi->w) & 0xFF00) >> 8);
+	caset[4] = (((roi->x - 1 + roi->w) & 0xFF));
+	set_col_page_addr_cmd[0].payload = caset;
+
+	paset[1] = (((roi->y) & 0xFF00) >> 8);
+	paset[2] = (((roi->y) & 0xFF));
+	paset[3] = (((roi->y - 1 + roi->h) & 0xFF00) >> 8);
+	paset[4] = (((roi->y - 1 + roi->h) & 0xFF));
+	set_col_page_addr_cmd[1].payload = paset;
+
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	cmdreq.cmds_cnt = 2;
+	cmdreq.flags = CMD_REQ_COMMIT;
+	if (unicast)
+		cmdreq.flags |= CMD_REQ_UNICAST;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+
+	cmdreq.cmds = set_col_page_addr_cmd;
+	mdss_dsi_cmdlist_put(ctrl, &cmdreq);
+}
+
+static int mdss_dsi_set_col_page_addr(struct mdss_panel_data *pdata,
+		bool force_send)
+{
+	struct mdss_panel_info *pinfo;
+	struct mdss_rect roi = {0};
+	struct mdss_rect *p_roi;
+	struct mdss_rect *c_roi;
+	struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+	struct mdss_dsi_ctrl_pdata *other = NULL;
+	int left_or_both = 0;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	pinfo = &pdata->panel_info;
+	p_roi = &pinfo->roi;
+
+	/*
+	 * to avoid keep sending same col_page info to panel,
+	 * if roi_merge enabled, the roi of left ctrl is used
+	 * to compare against new merged roi and saved new
+	 * merged roi to it after comparing.
+	 * if roi_merge disabled, then the calling ctrl's roi
+	 * and pinfo's roi are used to compare.
+	 */
+	if (pinfo->partial_update_roi_merge) {
+		left_or_both = mdss_dsi_roi_merge(ctrl, &roi);
+		other = mdss_dsi_get_ctrl_by_index(DSI_CTRL_LEFT);
+		c_roi = &other->roi;
+	} else {
+		c_roi = &ctrl->roi;
+		roi = *p_roi;
+	}
+
+	/* roi had changed, do col_page update */
+	if (force_send || !mdss_rect_cmp(c_roi, &roi)) {
+		pr_debug("%s: ndx=%d x=%d y=%d w=%d h=%d\n",
+				__func__, ctrl->ndx, p_roi->x,
+				p_roi->y, p_roi->w, p_roi->h);
+
+		*c_roi = roi; /* keep to ctrl */
+		if (c_roi->w == 0 || c_roi->h == 0) {
+			/* no new frame update */
+			pr_debug("%s: ctrl=%d, no partial roi set\n",
+						__func__, ctrl->ndx);
+			return 0;
+		}
+
+		if (pinfo->dcs_cmd_by_left) {
+			if (left_or_both && ctrl->ndx == DSI_CTRL_RIGHT) {
+				/* 2A/2B sent by left already */
+				return 0;
+			}
+		}
+
+		if (!mdss_dsi_sync_wait_enable(ctrl)) {
+			if (pinfo->dcs_cmd_by_left)
+				ctrl = mdss_dsi_get_ctrl_by_index(
+							DSI_CTRL_LEFT);
+			mdss_dsi_send_col_page_addr(ctrl, &roi, 0);
+		} else {
+			/*
+			 * when sync_wait_broadcast enabled,
+			 * need trigger at right ctrl to
+			 * start both dcs cmd transmission
+			 */
+			other = mdss_dsi_get_other_ctrl(ctrl);
+			if (!other)
+				goto end;
+
+			if (mdss_dsi_is_left_ctrl(ctrl)) {
+				if (pinfo->partial_update_roi_merge) {
+					/*
+					 * roi is the one after merged
+					 * to dsi-1 only
+					 */
+					mdss_dsi_send_col_page_addr(other,
+							&roi, 0);
+				} else {
+					mdss_dsi_send_col_page_addr(ctrl,
+							&ctrl->roi, 1);
+					mdss_dsi_send_col_page_addr(other,
+							&other->roi, 1);
+				}
+			} else {
+				if (pinfo->partial_update_roi_merge) {
+					/*
+					 * roi is the one after merged
+					 * to dsi-1 only
+					 */
+					mdss_dsi_send_col_page_addr(ctrl,
+							&roi, 0);
+				} else {
+					mdss_dsi_send_col_page_addr(other,
+							&other->roi, 1);
+					mdss_dsi_send_col_page_addr(ctrl,
+							&ctrl->roi, 1);
+				}
+			}
+		}
+	}
+
+end:
+	return 0;
+}
+
+static int mdss_dsi_panel_apply_display_setting(struct mdss_panel_data *pdata,
+							u32 mode)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+	struct dsi_panel_cmds *lp_on_cmds;
+	struct dsi_panel_cmds *lp_off_cmds;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	lp_on_cmds = &ctrl->lp_on_cmds;
+	lp_off_cmds = &ctrl->lp_off_cmds;
+
+	/* Apply display settings for low-persistence mode */
+	if ((mode == MDSS_PANEL_LOW_PERSIST_MODE_ON) &&
+			(lp_on_cmds->cmd_cnt))
+		mdss_dsi_panel_apply_settings(ctrl, lp_on_cmds);
+	else if ((mode == MDSS_PANEL_LOW_PERSIST_MODE_OFF) &&
+			(lp_off_cmds->cmd_cnt))
+		mdss_dsi_panel_apply_settings(ctrl, lp_off_cmds);
+	else
+		return -EINVAL;
+
+	pr_debug("%s: Persistence mode %d applied\n", __func__, mode);
+	return 0;
+}
+
+static void mdss_dsi_panel_switch_mode(struct mdss_panel_data *pdata,
+							int mode)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mipi_panel_info *mipi;
+	struct dsi_panel_cmds *pcmds;
+	u32 flags = 0;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	mipi  = &pdata->panel_info.mipi;
+
+	if (!mipi->dms_mode)
+		return;
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	if (mipi->dms_mode != DYNAMIC_MODE_RESOLUTION_SWITCH_IMMEDIATE) {
+		flags |= CMD_REQ_COMMIT;
+		if (mode == SWITCH_TO_CMD_MODE)
+			pcmds = &ctrl_pdata->video2cmd;
+		else
+			pcmds = &ctrl_pdata->cmd2video;
+	} else if ((mipi->dms_mode ==
+				DYNAMIC_MODE_RESOLUTION_SWITCH_IMMEDIATE)
+			&& pdata->current_timing
+			&& !list_empty(&pdata->timings_list)) {
+		struct dsi_panel_timing *pt;
+
+		pt = container_of(pdata->current_timing,
+				struct dsi_panel_timing, timing);
+
+		pr_debug("%s: sending switch commands\n", __func__);
+		pcmds = &pt->switch_cmds;
+		flags |= CMD_REQ_DMA_TPG;
+		flags |= CMD_REQ_COMMIT;
+	} else {
+		pr_warn("%s: Invalid mode switch attempted\n", __func__);
+		return;
+	}
+
+	if ((pdata->panel_info.compression_mode == COMPRESSION_DSC) &&
+			(pdata->panel_info.send_pps_before_switch))
+		mdss_dsi_panel_dsc_pps_send(ctrl_pdata, &pdata->panel_info);
+
+	mdss_dsi_panel_cmds_send(ctrl_pdata, pcmds, flags);
+
+	if ((pdata->panel_info.compression_mode == COMPRESSION_DSC) &&
+			(!pdata->panel_info.send_pps_before_switch))
+		mdss_dsi_panel_dsc_pps_send(ctrl_pdata, &pdata->panel_info);
+}
+
+static void mdss_dsi_panel_bl_ctrl(struct mdss_panel_data *pdata,
+							u32 bl_level)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_dsi_ctrl_pdata *sctrl = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	/*
+	 * Some backlight controllers specify a minimum duty cycle
+	 * for the backlight brightness. If the brightness is less
+	 * than it, the controller can malfunction.
+	 */
+
+	if ((bl_level < pdata->panel_info.bl_min) && (bl_level != 0))
+		bl_level = pdata->panel_info.bl_min;
+
+	switch (ctrl_pdata->bklt_ctrl) {
+	case BL_WLED:
+		led_trigger_event(bl_led_trigger, bl_level);
+		break;
+	case BL_PWM:
+		mdss_dsi_panel_bklt_pwm(ctrl_pdata, bl_level);
+		break;
+	case BL_DCS_CMD:
+		if (!mdss_dsi_sync_wait_enable(ctrl_pdata)) {
+			mdss_dsi_panel_bklt_dcs(ctrl_pdata, bl_level);
+			break;
+		}
+		/*
+		 * DCS commands to update backlight are usually sent at
+		 * the same time to both the controllers. However, if
+		 * sync_wait is enabled, we need to ensure that the
+		 * dcs commands are first sent to the non-trigger
+		 * controller so that when the commands are triggered,
+		 * both controllers receive it at the same time.
+		 */
+		sctrl = mdss_dsi_get_other_ctrl(ctrl_pdata);
+		if (mdss_dsi_sync_wait_trigger(ctrl_pdata)) {
+			if (sctrl)
+				mdss_dsi_panel_bklt_dcs(sctrl, bl_level);
+			mdss_dsi_panel_bklt_dcs(ctrl_pdata, bl_level);
+		} else {
+			mdss_dsi_panel_bklt_dcs(ctrl_pdata, bl_level);
+			if (sctrl)
+				mdss_dsi_panel_bklt_dcs(sctrl, bl_level);
+		}
+		break;
+	default:
+		pr_err("%s: Unknown bl_ctrl configuration\n",
+			__func__);
+		break;
+	}
+}
+
+#ifdef TARGET_HW_MDSS_HDMI
+static void mdss_dsi_panel_on_hdmi(struct mdss_dsi_ctrl_pdata *ctrl,
+			struct mdss_panel_info *pinfo)
+{
+	if (ctrl->ds_registered)
+		mdss_dba_utils_video_on(pinfo->dba_data, pinfo);
+}
+#else
+static void mdss_dsi_panel_on_hdmi(struct mdss_dsi_ctrl_pdata *ctrl,
+			struct mdss_panel_info *pinfo)
+{
+	(void)(*ctrl);
+	(void)(*pinfo);
+}
+#endif
+
+static int mdss_dsi_panel_on(struct mdss_panel_data *pdata)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+	struct mdss_panel_info *pinfo;
+	struct dsi_panel_cmds *on_cmds;
+	int ret = 0;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	pinfo = &pdata->panel_info;
+	ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	pr_debug("%s: ndx=%d\n", __func__, ctrl->ndx);
+
+	if (pinfo->dcs_cmd_by_left) {
+		if (ctrl->ndx != DSI_CTRL_LEFT)
+			goto end;
+	}
+
+	on_cmds = &ctrl->on_cmds;
+
+	if ((pinfo->mipi.dms_mode == DYNAMIC_MODE_SWITCH_IMMEDIATE) &&
+			(pinfo->mipi.boot_mode != pinfo->mipi.mode))
+		on_cmds = &ctrl->post_dms_on_cmds;
+
+	pr_debug("%s: ndx=%d cmd_cnt=%d\n", __func__,
+				ctrl->ndx, on_cmds->cmd_cnt);
+
+	if (on_cmds->cmd_cnt)
+		mdss_dsi_panel_cmds_send(ctrl, on_cmds, CMD_REQ_COMMIT);
+
+	if (pinfo->compression_mode == COMPRESSION_DSC)
+		mdss_dsi_panel_dsc_pps_send(ctrl, pinfo);
+
+	mdss_dsi_panel_on_hdmi(ctrl, pinfo);
+
+	/* Ensure low persistence mode is set as before */
+	mdss_dsi_panel_apply_display_setting(pdata, pinfo->persist_mode);
+
+end:
+	pr_debug("%s:-\n", __func__);
+	return ret;
+}
+
+#ifdef TARGET_HW_MDSS_HDMI
+static void mdss_dsi_post_panel_on_hdmi(struct mdss_panel_info *pinfo)
+{
+	u32 vsync_period = 0;
+
+	if (pinfo->is_dba_panel && pinfo->is_pluggable) {
+		/* ensure at least 1 frame transfers to down stream device */
+		vsync_period = (MSEC_PER_SEC / pinfo->mipi.frame_rate) + 1;
+		msleep(vsync_period);
+		mdss_dba_utils_hdcp_enable(pinfo->dba_data, true);
+	}
+}
+#else
+static void mdss_dsi_post_panel_on_hdmi(struct mdss_panel_info *pinfo)
+{
+	(void)(*pinfo);
+}
+#endif
+
+static int mdss_dsi_post_panel_on(struct mdss_panel_data *pdata)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+	struct mdss_panel_info *pinfo;
+	struct dsi_panel_cmds *cmds;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	pr_debug("%s: ctrl=%pK ndx=%d\n", __func__, ctrl, ctrl->ndx);
+
+	pinfo = &pdata->panel_info;
+	if (pinfo->dcs_cmd_by_left && ctrl->ndx != DSI_CTRL_LEFT)
+		goto end;
+
+	cmds = &ctrl->post_panel_on_cmds;
+	if (cmds->cmd_cnt) {
+		msleep(VSYNC_DELAY);	/* wait for a vsync passed */
+		mdss_dsi_panel_cmds_send(ctrl, cmds, CMD_REQ_COMMIT);
+	}
+
+	mdss_dsi_post_panel_on_hdmi(pinfo);
+
+end:
+	pr_debug("%s:-\n", __func__);
+	return 0;
+}
+
+#ifdef TARGET_HW_MDSS_HDMI
+static void mdss_dsi_panel_off_hdmi(struct mdss_dsi_ctrl_pdata *ctrl,
+			struct mdss_panel_info *pinfo)
+{
+	if (ctrl->ds_registered && pinfo->is_pluggable) {
+		mdss_dba_utils_video_off(pinfo->dba_data);
+		mdss_dba_utils_hdcp_enable(pinfo->dba_data, false);
+	}
+}
+#else
+static void mdss_dsi_panel_off_hdmi(struct mdss_dsi_ctrl_pdata *ctrl,
+			struct mdss_panel_info *pinfo)
+{
+	(void)(*ctrl);
+	(void)(*pinfo);
+}
+#endif
+
+static int mdss_dsi_panel_off(struct mdss_panel_data *pdata)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+	struct mdss_panel_info *pinfo;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	pinfo = &pdata->panel_info;
+	ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	pr_debug("%s: ctrl=%pK ndx=%d\n", __func__, ctrl, ctrl->ndx);
+
+	if (pinfo->dcs_cmd_by_left) {
+		if (ctrl->ndx != DSI_CTRL_LEFT)
+			goto end;
+	}
+
+	if (ctrl->off_cmds.cmd_cnt)
+		mdss_dsi_panel_cmds_send(ctrl, &ctrl->off_cmds, CMD_REQ_COMMIT);
+
+	mdss_dsi_panel_off_hdmi(ctrl, pinfo);
+
+end:
+	/* clear idle state */
+	ctrl->idle = false;
+	pr_debug("%s:-\n", __func__);
+	return 0;
+}
+
+static int mdss_dsi_panel_low_power_config(struct mdss_panel_data *pdata,
+	int enable)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+	struct mdss_panel_info *pinfo;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	pinfo = &pdata->panel_info;
+	ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	pr_debug("%s: ctrl=%pK ndx=%d enable=%d\n", __func__, ctrl, ctrl->ndx,
+		enable);
+
+	/* Any panel specific low power commands/config */
+	/* Control idle mode for panel */
+	if (enable)
+		mdss_dsi_panel_set_idle_mode(pdata, true);
+	else
+		mdss_dsi_panel_set_idle_mode(pdata, false);
+	pr_debug("%s:-\n", __func__);
+	return 0;
+}
+
+static void mdss_dsi_parse_trigger(struct device_node *np, char *trigger,
+		char *trigger_key)
+{
+	const char *data;
+
+	*trigger = DSI_CMD_TRIGGER_SW;
+	data = of_get_property(np, trigger_key, NULL);
+	if (data) {
+		if (!strcmp(data, "none"))
+			*trigger = DSI_CMD_TRIGGER_NONE;
+		else if (!strcmp(data, "trigger_te"))
+			*trigger = DSI_CMD_TRIGGER_TE;
+		else if (!strcmp(data, "trigger_sw_seof"))
+			*trigger = DSI_CMD_TRIGGER_SW_SEOF;
+		else if (!strcmp(data, "trigger_sw_te"))
+			*trigger = DSI_CMD_TRIGGER_SW_TE;
+	}
+}
+
+
+static int mdss_dsi_parse_dcs_cmds(struct device_node *np,
+		struct dsi_panel_cmds *pcmds, char *cmd_key, char *link_key)
+{
+	const char *data;
+	int blen = 0, len;
+	char *buf, *bp;
+	struct dsi_ctrl_hdr *dchdr;
+	int i, cnt;
+
+	data = of_get_property(np, cmd_key, &blen);
+	if (!data) {
+		pr_err("%s: failed, key=%s\n", __func__, cmd_key);
+		return -ENOMEM;
+	}
+
+	buf = kcalloc(blen, sizeof(char), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	memcpy(buf, data, blen);
+
+	/* scan dcs commands */
+	bp = buf;
+	len = blen;
+	cnt = 0;
+	while (len >= sizeof(*dchdr)) {
+		dchdr = (struct dsi_ctrl_hdr *)bp;
+		dchdr->dlen = ntohs(dchdr->dlen);
+		if (dchdr->dlen > len) {
+			pr_err("%s: dtsi cmd=%x error, len=%d",
+				__func__, dchdr->dtype, dchdr->dlen);
+			goto exit_free;
+		}
+		bp += sizeof(*dchdr);
+		len -= sizeof(*dchdr);
+		bp += dchdr->dlen;
+		len -= dchdr->dlen;
+		cnt++;
+	}
+
+	if (len != 0) {
+		pr_err("%s: dcs_cmd=%x len=%d error!",
+				__func__, buf[0], blen);
+		goto exit_free;
+	}
+
+	pcmds->cmds = kcalloc(cnt, sizeof(struct dsi_cmd_desc),
+						GFP_KERNEL);
+	if (!pcmds->cmds)
+		goto exit_free;
+
+	pcmds->cmd_cnt = cnt;
+	pcmds->buf = buf;
+	pcmds->blen = blen;
+
+	bp = buf;
+	len = blen;
+	for (i = 0; i < cnt; i++) {
+		dchdr = (struct dsi_ctrl_hdr *)bp;
+		len -= sizeof(*dchdr);
+		bp += sizeof(*dchdr);
+		pcmds->cmds[i].dchdr = *dchdr;
+		pcmds->cmds[i].payload = bp;
+		bp += dchdr->dlen;
+		len -= dchdr->dlen;
+	}
+
+	/*Set default link state to LP Mode*/
+	pcmds->link_state = DSI_LP_MODE;
+
+	if (link_key) {
+		data = of_get_property(np, link_key, NULL);
+		if (data && !strcmp(data, "dsi_hs_mode"))
+			pcmds->link_state = DSI_HS_MODE;
+		else
+			pcmds->link_state = DSI_LP_MODE;
+	}
+
+	pr_debug("%s: dcs_cmd=%x len=%d, cmd_cnt=%d link_state=%d\n", __func__,
+		pcmds->buf[0], pcmds->blen, pcmds->cmd_cnt, pcmds->link_state);
+
+	return 0;
+
+exit_free:
+	kfree(buf);
+	return -ENOMEM;
+}
+
+
+int mdss_panel_get_dst_fmt(u32 bpp, char mipi_mode, u32 pixel_packing,
+				char *dst_format)
+{
+	int rc = 0;
+
+	switch (bpp) {
+	case 3:
+		*dst_format = DSI_CMD_DST_FORMAT_RGB111;
+		break;
+	case 8:
+		*dst_format = DSI_CMD_DST_FORMAT_RGB332;
+		break;
+	case 12:
+		*dst_format = DSI_CMD_DST_FORMAT_RGB444;
+		break;
+	case 16:
+		switch (mipi_mode) {
+		case DSI_VIDEO_MODE:
+			*dst_format = DSI_VIDEO_DST_FORMAT_RGB565;
+			break;
+		case DSI_CMD_MODE:
+			*dst_format = DSI_CMD_DST_FORMAT_RGB565;
+			break;
+		default:
+			*dst_format = DSI_VIDEO_DST_FORMAT_RGB565;
+			break;
+		}
+		break;
+	case 18:
+		switch (mipi_mode) {
+		case DSI_VIDEO_MODE:
+			if (pixel_packing == 0)
+				*dst_format = DSI_VIDEO_DST_FORMAT_RGB666;
+			else
+				*dst_format = DSI_VIDEO_DST_FORMAT_RGB666_LOOSE;
+			break;
+		case DSI_CMD_MODE:
+			*dst_format = DSI_CMD_DST_FORMAT_RGB666;
+			break;
+		default:
+			if (pixel_packing == 0)
+				*dst_format = DSI_VIDEO_DST_FORMAT_RGB666;
+			else
+				*dst_format = DSI_VIDEO_DST_FORMAT_RGB666_LOOSE;
+			break;
+		}
+		break;
+	case 24:
+		switch (mipi_mode) {
+		case DSI_VIDEO_MODE:
+			*dst_format = DSI_VIDEO_DST_FORMAT_RGB888;
+			break;
+		case DSI_CMD_MODE:
+			*dst_format = DSI_CMD_DST_FORMAT_RGB888;
+			break;
+		default:
+			*dst_format = DSI_VIDEO_DST_FORMAT_RGB888;
+			break;
+		}
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int mdss_dsi_parse_fbc_params(struct device_node *np,
+			struct mdss_panel_timing *timing)
+{
+	int rc, fbc_enabled = 0;
+	u32 tmp;
+	struct fbc_panel_info *fbc = &timing->fbc;
+
+	fbc_enabled = of_property_read_bool(np,	"qcom,mdss-dsi-fbc-enable");
+	if (fbc_enabled) {
+		pr_debug("%s:%d FBC panel enabled.\n", __func__, __LINE__);
+		fbc->enabled = 1;
+		rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-bpp", &tmp);
+		fbc->target_bpp = (!rc ? tmp : 24);
+		rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-packing",
+				&tmp);
+		fbc->comp_mode = (!rc ? tmp : 0);
+		fbc->qerr_enable = of_property_read_bool(np,
+			"qcom,mdss-dsi-fbc-quant-error");
+		rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-bias", &tmp);
+		fbc->cd_bias = (!rc ? tmp : 0);
+		fbc->pat_enable = of_property_read_bool(np,
+				"qcom,mdss-dsi-fbc-pat-mode");
+		fbc->vlc_enable = of_property_read_bool(np,
+				"qcom,mdss-dsi-fbc-vlc-mode");
+		fbc->bflc_enable = of_property_read_bool(np,
+				"qcom,mdss-dsi-fbc-bflc-mode");
+		rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-h-line-budget",
+				&tmp);
+		fbc->line_x_budget = (!rc ? tmp : 0);
+		rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-budget-ctrl",
+				&tmp);
+		fbc->block_x_budget = (!rc ? tmp : 0);
+		rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-block-budget",
+				&tmp);
+		fbc->block_budget = (!rc ? tmp : 0);
+		rc = of_property_read_u32(np,
+				"qcom,mdss-dsi-fbc-lossless-threshold", &tmp);
+		fbc->lossless_mode_thd = (!rc ? tmp : 0);
+		rc = of_property_read_u32(np,
+				"qcom,mdss-dsi-fbc-lossy-threshold", &tmp);
+		fbc->lossy_mode_thd = (!rc ? tmp : 0);
+		rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-rgb-threshold",
+				&tmp);
+		fbc->lossy_rgb_thd = (!rc ? tmp : 0);
+		rc = of_property_read_u32(np,
+				"qcom,mdss-dsi-fbc-lossy-mode-idx", &tmp);
+		fbc->lossy_mode_idx = (!rc ? tmp : 0);
+		rc = of_property_read_u32(np,
+				"qcom,mdss-dsi-fbc-slice-height", &tmp);
+		fbc->slice_height = (!rc ? tmp : 0);
+		fbc->pred_mode = of_property_read_bool(np,
+				"qcom,mdss-dsi-fbc-2d-pred-mode");
+		fbc->enc_mode = of_property_read_bool(np,
+				"qcom,mdss-dsi-fbc-ver2-mode");
+		rc = of_property_read_u32(np,
+				"qcom,mdss-dsi-fbc-max-pred-err", &tmp);
+		fbc->max_pred_err = (!rc ? tmp : 0);
+
+		timing->compression_mode = COMPRESSION_FBC;
+	} else {
+		pr_debug("%s:%d Panel does not support FBC.\n",
+				__func__, __LINE__);
+		fbc->enabled = 0;
+		fbc->target_bpp = 24;
+	}
+	return 0;
+}
+
+void mdss_dsi_panel_dsc_pps_send(struct mdss_dsi_ctrl_pdata *ctrl,
+				struct mdss_panel_info *pinfo)
+{
+	struct dsi_panel_cmds pcmds;
+	struct dsi_cmd_desc cmd;
+
+	if (!pinfo || (pinfo->compression_mode != COMPRESSION_DSC))
+		return;
+
+	memset(&pcmds, 0, sizeof(pcmds));
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.dchdr.dlen = mdss_panel_dsc_prepare_pps_buf(&pinfo->dsc,
+		ctrl->pps_buf, 0);
+	cmd.dchdr.dtype = DTYPE_PPS;
+	cmd.dchdr.last = 1;
+	cmd.dchdr.wait = 10;
+	cmd.dchdr.vc = 0;
+	cmd.dchdr.ack = 0;
+	cmd.payload = ctrl->pps_buf;
+
+	pcmds.cmd_cnt = 1;
+	pcmds.cmds = &cmd;
+	pcmds.link_state = DSI_LP_MODE;
+
+	mdss_dsi_panel_cmds_send(ctrl, &pcmds, CMD_REQ_COMMIT);
+}
+
+static int mdss_dsi_parse_hdr_settings(struct device_node *np,
+		struct mdss_panel_info *pinfo)
+{
+	int rc = 0;
+	struct mdss_panel_hdr_properties *hdr_prop;
+
+	if (!np) {
+		pr_err("%s: device node pointer is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!pinfo) {
+		pr_err("%s: panel info is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	hdr_prop = &pinfo->hdr_properties;
+	hdr_prop->hdr_enabled = of_property_read_bool(np,
+		"qcom,mdss-dsi-panel-hdr-enabled");
+
+	if (hdr_prop->hdr_enabled) {
+		rc = of_property_read_u32_array(np,
+				"qcom,mdss-dsi-panel-hdr-color-primaries",
+				hdr_prop->display_primaries,
+				DISPLAY_PRIMARIES_COUNT);
+		if (rc) {
+			pr_info("%s:%d, Unable to read color primaries,rc:%u",
+					__func__, __LINE__,
+					hdr_prop->hdr_enabled = false);
+		}
+
+		rc = of_property_read_u32(np,
+			"qcom,mdss-dsi-panel-peak-brightness",
+			&(hdr_prop->peak_brightness));
+		if (rc) {
+			pr_info("%s:%d, Unable to read hdr brightness, rc:%u",
+				__func__, __LINE__, rc);
+			hdr_prop->hdr_enabled = false;
+		}
+
+		rc = of_property_read_u32(np,
+			"qcom,mdss-dsi-panel-blackness-level",
+			&(hdr_prop->blackness_level));
+		if (rc) {
+			pr_info("%s:%d, Unable to read hdr brightness, rc:%u",
+				__func__, __LINE__, rc);
+			hdr_prop->hdr_enabled = false;
+		}
+	}
+	return 0;
+}
+
+static int mdss_dsi_parse_dsc_version(struct device_node *np,
+		struct mdss_panel_timing *timing)
+{
+	u32 data;
+	int rc = 0;
+	struct dsc_desc *dsc = &timing->dsc;
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsc-version", &data);
+	if (rc) {
+		dsc->version = 0x11;
+		rc = 0;
+	} else {
+		dsc->version = data & 0xff;
+		/* only support DSC 1.1 rev */
+		if (dsc->version != 0x11) {
+			pr_err("%s: DSC version:%d not supported\n", __func__,
+				dsc->version);
+			rc = -EINVAL;
+			goto end;
+		}
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsc-scr-version", &data);
+	if (rc) {
+		dsc->scr_rev = 0x0;
+		rc = 0;
+	} else {
+		dsc->scr_rev = data & 0xff;
+		/* only one scr rev supported */
+		if (dsc->scr_rev > 0x1) {
+			pr_err("%s: DSC scr version:%d not supported\n",
+				__func__, dsc->scr_rev);
+			rc = -EINVAL;
+			goto end;
+		}
+	}
+
+end:
+	return rc;
+}
+
+static int mdss_dsi_parse_dsc_params(struct device_node *np,
+		struct mdss_panel_timing *timing, bool is_split_display)
+{
+	u32 data, intf_width;
+	int rc = 0;
+	struct dsc_desc *dsc = &timing->dsc;
+
+	if (!np) {
+		pr_err("%s: device node pointer is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsc-encoders", &data);
+	if (rc) {
+		if (!of_find_property(np, "qcom,mdss-dsc-encoders", NULL)) {
+			/* property is not defined, default to 1 */
+			data = 1;
+		} else {
+			pr_err("%s: Error parsing qcom,mdss-dsc-encoders\n",
+				__func__);
+			goto end;
+		}
+	}
+
+	timing->dsc_enc_total = data;
+
+	if (is_split_display && (timing->dsc_enc_total > 1)) {
+		pr_err("%s: Error: for split displays, more than 1 dsc encoder per panel is not allowed.\n",
+			__func__);
+		goto end;
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsc-slice-height", &data);
+	if (rc)
+		goto end;
+	dsc->slice_height = data;
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsc-slice-width", &data);
+	if (rc)
+		goto end;
+	dsc->slice_width = data;
+	intf_width = timing->xres;
+
+	if (intf_width % dsc->slice_width) {
+		pr_err("%s: Error: multiple of slice-width:%d should match panel-width:%d\n",
+			__func__, dsc->slice_width, intf_width);
+		goto end;
+	}
+
+	data = intf_width / dsc->slice_width;
+	if (((timing->dsc_enc_total > 1) && ((data != 2) && (data != 4))) ||
+	    ((timing->dsc_enc_total == 1) && (data > 2))) {
+		pr_err("%s: Error: max 2 slice per encoder. slice-width:%d should match panel-width:%d dsc_enc_total:%d\n",
+			__func__, dsc->slice_width,
+			intf_width, timing->dsc_enc_total);
+		goto end;
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsc-slice-per-pkt", &data);
+	if (rc)
+		goto end;
+	dsc->slice_per_pkt = data;
+
+	/*
+	 * slice_per_pkt can be either 1 or all slices_per_intf
+	 */
+	if ((dsc->slice_per_pkt > 1) && (dsc->slice_per_pkt !=
+			DIV_ROUND_UP(intf_width, dsc->slice_width))) {
+		pr_err("Error: slice_per_pkt can be either 1 or all slices_per_intf\n");
+		pr_err("%s: slice_per_pkt=%d, slice_width=%d intf_width=%d\n",
+			__func__,
+			dsc->slice_per_pkt, dsc->slice_width, intf_width);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	pr_debug("%s: num_enc:%d :slice h=%d w=%d s_pkt=%d\n", __func__,
+		timing->dsc_enc_total, dsc->slice_height,
+		dsc->slice_width, dsc->slice_per_pkt);
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsc-bit-per-component", &data);
+	if (rc)
+		goto end;
+	dsc->bpc = data;
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsc-bit-per-pixel", &data);
+	if (rc)
+		goto end;
+	dsc->bpp = data;
+
+	pr_debug("%s: bpc=%d bpp=%d\n", __func__,
+		dsc->bpc, dsc->bpp);
+
+	dsc->block_pred_enable = of_property_read_bool(np,
+			"qcom,mdss-dsc-block-prediction-enable");
+
+	dsc->enable_422 = 0;
+	dsc->convert_rgb = 1;
+	dsc->vbr_enable = 0;
+
+	dsc->config_by_manufacture_cmd = of_property_read_bool(np,
+		"qcom,mdss-dsc-config-by-manufacture-cmd");
+
+	mdss_panel_dsc_parameters_calc(&timing->dsc);
+	mdss_panel_dsc_pclk_param_calc(&timing->dsc, intf_width);
+
+	timing->dsc.full_frame_slices =
+		DIV_ROUND_UP(intf_width, timing->dsc.slice_width);
+
+	timing->compression_mode = COMPRESSION_DSC;
+
+end:
+	return rc;
+}
+
+static struct device_node *mdss_dsi_panel_get_dsc_cfg_np(
+		struct device_node *np, struct mdss_panel_data *panel_data,
+		bool default_timing)
+{
+	struct device_node *dsc_cfg_np = NULL;
+
+
+	/* Read the dsc config node specified by command line */
+	if (default_timing) {
+		dsc_cfg_np = of_get_child_by_name(np,
+				panel_data->dsc_cfg_np_name);
+		if (!dsc_cfg_np)
+			pr_warn_once("%s: cannot find dsc config node:%s\n",
+				__func__, panel_data->dsc_cfg_np_name);
+	}
+
+	/*
+	 * Fall back to default from DT as nothing is specified
+	 * in command line.
+	 */
+	if (!dsc_cfg_np && of_find_property(np, "qcom,config-select", NULL)) {
+		dsc_cfg_np = of_parse_phandle(np, "qcom,config-select", 0);
+		if (!dsc_cfg_np)
+			pr_warn_once("%s:err parsing qcom,config-select\n",
+					__func__);
+	}
+
+	return dsc_cfg_np;
+}
+
+static int mdss_dsi_parse_topology_config(struct device_node *np,
+	struct dsi_panel_timing *pt, struct mdss_panel_data *panel_data,
+	bool default_timing)
+{
+	int rc = 0;
+	bool is_split_display = panel_data->panel_info.is_split_display;
+	const char *data;
+	struct mdss_panel_timing *timing = &pt->timing;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_panel_info *pinfo;
+	struct device_node *cfg_np = NULL;
+
+	ctrl_pdata = container_of(panel_data, struct mdss_dsi_ctrl_pdata,
+							panel_data);
+	pinfo = &ctrl_pdata->panel_data.panel_info;
+
+	cfg_np = mdss_dsi_panel_get_dsc_cfg_np(np,
+				&ctrl_pdata->panel_data, default_timing);
+
+	if (cfg_np) {
+		if (!of_property_read_u32_array(cfg_np, "qcom,lm-split",
+		    timing->lm_widths, 2)) {
+			if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data)
+			    && (timing->lm_widths[1] != 0)) {
+				pr_err("%s: lm-split not allowed with split display\n",
+					__func__);
+				rc = -EINVAL;
+				goto end;
+			}
+		}
+		rc = of_property_read_string(cfg_np, "qcom,split-mode", &data);
+		if (!rc && !strcmp(data, "pingpong-split"))
+			pinfo->use_pingpong_split = true;
+
+		if (((timing->lm_widths[0]) || (timing->lm_widths[1])) &&
+		    pinfo->use_pingpong_split) {
+			pr_err("%s: pingpong_split cannot be used when lm-split[%d,%d] is specified\n",
+				__func__,
+				timing->lm_widths[0], timing->lm_widths[1]);
+			return -EINVAL;
+		}
+
+		pr_info("%s: cfg_node name %s lm_split:%dx%d pp_split:%s\n",
+			__func__, cfg_np->name,
+			timing->lm_widths[0], timing->lm_widths[1],
+			pinfo->use_pingpong_split ? "yes" : "no");
+	}
+
+	if (!pinfo->use_pingpong_split &&
+	    (timing->lm_widths[0] == 0) && (timing->lm_widths[1] == 0))
+		timing->lm_widths[0] = pt->timing.xres;
+
+	data = of_get_property(np, "qcom,compression-mode", NULL);
+	if (data) {
+		if (cfg_np && !strcmp(data, "dsc")) {
+			rc = mdss_dsi_parse_dsc_version(np, &pt->timing);
+			if (rc)
+				goto end;
+
+			pinfo->send_pps_before_switch =
+				of_property_read_bool(np,
+				"qcom,mdss-dsi-send-pps-before-switch");
+
+			rc = mdss_dsi_parse_dsc_params(cfg_np, &pt->timing,
+					is_split_display);
+		} else if (!strcmp(data, "fbc")) {
+			rc = mdss_dsi_parse_fbc_params(np, &pt->timing);
+		}
+	}
+
+end:
+	of_node_put(cfg_np);
+	return rc;
+}
+
+static void mdss_panel_parse_te_params(struct device_node *np,
+		struct mdss_panel_timing *timing)
+{
+	struct mdss_mdp_pp_tear_check *te = &timing->te;
+	u32 tmp;
+	int rc = 0;
+	/*
+	 * TE default: dsi byte clock calculated base on 70 fps;
+	 * around 14 ms to complete a kickoff cycle if te disabled;
+	 * vclk_line base on 60 fps; write is faster than read;
+	 * init == start == rdptr;
+	 */
+	te->tear_check_en =
+		!of_property_read_bool(np, "qcom,mdss-tear-check-disable");
+	rc = of_property_read_u32
+		(np, "qcom,mdss-tear-check-sync-cfg-height", &tmp);
+	te->sync_cfg_height = (!rc ? tmp : 0xfff0);
+	rc = of_property_read_u32
+		(np, "qcom,mdss-tear-check-sync-init-val", &tmp);
+	te->vsync_init_val = (!rc ? tmp : timing->yres);
+	rc = of_property_read_u32
+		(np, "qcom,mdss-tear-check-sync-threshold-start", &tmp);
+	te->sync_threshold_start = (!rc ? tmp : 4);
+	rc = of_property_read_u32
+		(np, "qcom,mdss-tear-check-sync-threshold-continue", &tmp);
+	te->sync_threshold_continue = (!rc ? tmp : 4);
+	rc = of_property_read_u32(np, "qcom,mdss-tear-check-frame-rate", &tmp);
+	te->refx100 = (!rc ? tmp : 6000);
+	rc = of_property_read_u32
+		(np, "qcom,mdss-tear-check-start-pos", &tmp);
+	te->start_pos = (!rc ? tmp : timing->yres);
+	rc = of_property_read_u32
+		(np, "qcom,mdss-tear-check-rd-ptr-trigger-intr", &tmp);
+	te->rd_ptr_irq = (!rc ? tmp : timing->yres + 1);
+	te->wr_ptr_irq = 0;
+}
+
+
+static int mdss_dsi_parse_reset_seq(struct device_node *np,
+		u32 rst_seq[MDSS_DSI_RST_SEQ_LEN], u32 *rst_len,
+		const char *name)
+{
+	int num = 0, i;
+	int rc;
+	struct property *data;
+	u32 tmp[MDSS_DSI_RST_SEQ_LEN];
+	*rst_len = 0;
+	data = of_find_property(np, name, &num);
+	num /= sizeof(u32);
+	if (!data || !num || num > MDSS_DSI_RST_SEQ_LEN || num % 2) {
+		pr_debug("%s:%d, error reading %s, length found = %d\n",
+			__func__, __LINE__, name, num);
+	} else {
+		rc = of_property_read_u32_array(np, name, tmp, num);
+		if (rc)
+			pr_debug("%s:%d, error reading %s, rc = %d\n",
+				__func__, __LINE__, name, rc);
+		else {
+			for (i = 0; i < num; ++i)
+				rst_seq[i] = tmp[i];
+			*rst_len = num;
+		}
+	}
+	return 0;
+}
+
+static bool mdss_dsi_cmp_panel_reg_v2(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int i, j;
+	int len = 0, *lenp;
+	int group = 0;
+
+	lenp = ctrl->status_valid_params ?: ctrl->status_cmds_rlen;
+
+	for (i = 0; i < ctrl->status_cmds.cmd_cnt; i++)
+		len += lenp[i];
+
+	for (j = 0; j < ctrl->groups; ++j) {
+		for (i = 0; i < len; ++i) {
+			if (ctrl->return_buf[i] !=
+				ctrl->status_value[group + i])
+				break;
+		}
+
+		if (i == len)
+			return true;
+		group += len;
+	}
+
+	return false;
+}
+
+static int mdss_dsi_gen_read_status(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	if (!mdss_dsi_cmp_panel_reg_v2(ctrl_pdata)) {
+		pr_err("%s: Read back value from panel is incorrect\n",
+							__func__);
+		return -EINVAL;
+	} else {
+		return 1;
+	}
+}
+
+static int mdss_dsi_nt35596_read_status(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	if (!mdss_dsi_cmp_panel_reg(ctrl_pdata->status_buf,
+		ctrl_pdata->status_value, 0)) {
+		ctrl_pdata->status_error_count = 0;
+		pr_err("%s: Read back value from panel is incorrect\n",
+							__func__);
+		return -EINVAL;
+	}
+	{
+		if (!mdss_dsi_cmp_panel_reg(ctrl_pdata->status_buf,
+			ctrl_pdata->status_value, 3)) {
+			ctrl_pdata->status_error_count = 0;
+		} else {
+			if (mdss_dsi_cmp_panel_reg(ctrl_pdata->status_buf,
+				ctrl_pdata->status_value, 4) ||
+				mdss_dsi_cmp_panel_reg(ctrl_pdata->status_buf,
+				ctrl_pdata->status_value, 5))
+				ctrl_pdata->status_error_count = 0;
+			else
+				ctrl_pdata->status_error_count++;
+			if (ctrl_pdata->status_error_count >=
+					ctrl_pdata->max_status_error_count) {
+				ctrl_pdata->status_error_count = 0;
+				pr_err("%s: Read value bad. Error_cnt = %i\n",
+					 __func__,
+					ctrl_pdata->status_error_count);
+				return -EINVAL;
+			}
+		}
+		return 1;
+	}
+}
+
+static void mdss_dsi_parse_roi_alignment(struct device_node *np,
+		struct dsi_panel_timing *pt)
+{
+	int len = 0;
+	u32 value[6];
+	struct property *data;
+	struct mdss_panel_timing *timing = &pt->timing;
+
+	data = of_find_property(np, "qcom,panel-roi-alignment", &len);
+	len /= sizeof(u32);
+	if (!data || (len != 6)) {
+		pr_debug("%s: Panel roi alignment not found", __func__);
+	} else {
+		int rc = of_property_read_u32_array(np,
+				"qcom,panel-roi-alignment", value, len);
+		if (rc)
+			pr_debug("%s: Error reading panel roi alignment values",
+					__func__);
+		else {
+			timing->roi_alignment.xstart_pix_align = value[0];
+			timing->roi_alignment.ystart_pix_align = value[1];
+			timing->roi_alignment.width_pix_align = value[2];
+			timing->roi_alignment.height_pix_align = value[3];
+			timing->roi_alignment.min_width = value[4];
+			timing->roi_alignment.min_height = value[5];
+		}
+
+		pr_debug("%s: ROI alignment: [%d, %d, %d, %d, %d, %d]",
+			__func__, timing->roi_alignment.xstart_pix_align,
+			timing->roi_alignment.width_pix_align,
+			timing->roi_alignment.ystart_pix_align,
+			timing->roi_alignment.height_pix_align,
+			timing->roi_alignment.min_width,
+			timing->roi_alignment.min_height);
+	}
+}
+
+static void mdss_dsi_parse_dms_config(struct device_node *np,
+	struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct mdss_panel_info *pinfo = &ctrl->panel_data.panel_info;
+	const char *data;
+	bool dms_enabled;
+
+	dms_enabled = of_property_read_bool(np,
+		"qcom,dynamic-mode-switch-enabled");
+
+	if (!dms_enabled) {
+		pinfo->mipi.dms_mode = DYNAMIC_MODE_SWITCH_DISABLED;
+		goto exit;
+	}
+
+	/* default mode is suspend_resume */
+	pinfo->mipi.dms_mode = DYNAMIC_MODE_SWITCH_SUSPEND_RESUME;
+	data = of_get_property(np, "qcom,dynamic-mode-switch-type", NULL);
+	if (data && !strcmp(data, "dynamic-resolution-switch-immediate")) {
+		if (!list_empty(&ctrl->panel_data.timings_list))
+			pinfo->mipi.dms_mode =
+				DYNAMIC_MODE_RESOLUTION_SWITCH_IMMEDIATE;
+		else
+			pinfo->mipi.dms_mode =
+				DYNAMIC_MODE_SWITCH_DISABLED;
+		goto exit;
+	}
+
+	if (data && !strcmp(data, "dynamic-switch-immediate"))
+		pinfo->mipi.dms_mode = DYNAMIC_MODE_SWITCH_IMMEDIATE;
+	else
+		pr_debug("%s: default dms suspend/resume\n", __func__);
+
+	mdss_dsi_parse_dcs_cmds(np, &ctrl->video2cmd,
+		"qcom,video-to-cmd-mode-switch-commands", NULL);
+
+	mdss_dsi_parse_dcs_cmds(np, &ctrl->cmd2video,
+		"qcom,cmd-to-video-mode-switch-commands", NULL);
+
+	mdss_dsi_parse_dcs_cmds(np, &ctrl->post_dms_on_cmds,
+		"qcom,mdss-dsi-post-mode-switch-on-command",
+		"qcom,mdss-dsi-post-mode-switch-on-command-state");
+
+	if (pinfo->mipi.dms_mode == DYNAMIC_MODE_SWITCH_IMMEDIATE &&
+		!ctrl->post_dms_on_cmds.cmd_cnt) {
+		pr_warn("%s: No post dms on cmd specified\n", __func__);
+		pinfo->mipi.dms_mode = DYNAMIC_MODE_SWITCH_DISABLED;
+	}
+
+	if (!ctrl->video2cmd.cmd_cnt || !ctrl->cmd2video.cmd_cnt) {
+		pr_warn("%s: No commands specified for dynamic switch\n",
+			__func__);
+		pinfo->mipi.dms_mode = DYNAMIC_MODE_SWITCH_DISABLED;
+	}
+exit:
+	pr_info("%s: dynamic switch feature enabled: %d\n", __func__,
+		pinfo->mipi.dms_mode);
+}
+
+/* the length of all the valid values to be checked should not be great
+ * than the length of returned data from read command.
+ */
+static bool
+mdss_dsi_parse_esd_check_valid_params(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int i;
+
+	for (i = 0; i < ctrl->status_cmds.cmd_cnt; ++i) {
+		if (ctrl->status_valid_params[i] > ctrl->status_cmds_rlen[i]) {
+			pr_debug("%s: ignore valid params!\n", __func__);
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static bool mdss_dsi_parse_esd_status_len(struct device_node *np,
+	char *prop_key, u32 **target, u32 cmd_cnt)
+{
+	int tmp;
+
+	if (!of_find_property(np, prop_key, &tmp))
+		return false;
+
+	tmp /= sizeof(u32);
+	if (tmp != cmd_cnt) {
+		pr_err("%s: request property number(%d) not match command count(%d)\n",
+			__func__, tmp, cmd_cnt);
+		return false;
+	}
+
+	*target = kcalloc(tmp, sizeof(u32), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(*target)) {
+		pr_err("%s: Error allocating memory for property\n",
+			__func__);
+		return false;
+	}
+
+	if (of_property_read_u32_array(np, prop_key, *target, tmp)) {
+		pr_err("%s: cannot get values from dts\n", __func__);
+		kfree(*target);
+		*target = NULL;
+		return false;
+	}
+
+	return true;
+}
+
+static void mdss_dsi_parse_esd_params(struct device_node *np,
+	struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	u32 tmp;
+	u32 i, status_len, *lenp;
+	int rc;
+	struct property *data;
+	const char *string;
+	struct mdss_panel_info *pinfo = &ctrl->panel_data.panel_info;
+
+	pinfo->esd_check_enabled = of_property_read_bool(np,
+		"qcom,esd-check-enabled");
+
+	if (!pinfo->esd_check_enabled)
+		return;
+
+	ctrl->status_mode = ESD_MAX;
+	rc = of_property_read_string(np,
+			"qcom,mdss-dsi-panel-status-check-mode", &string);
+	if (!rc) {
+		if (!strcmp(string, "bta_check")) {
+			ctrl->status_mode = ESD_BTA;
+		} else if (!strcmp(string, "reg_read")) {
+			ctrl->status_mode = ESD_REG;
+			ctrl->check_read_status =
+				mdss_dsi_gen_read_status;
+		} else if (!strcmp(string, "reg_read_nt35596")) {
+			ctrl->status_mode = ESD_REG_NT35596;
+			ctrl->status_error_count = 0;
+			ctrl->check_read_status =
+				mdss_dsi_nt35596_read_status;
+		} else if (!strcmp(string, "te_signal_check")) {
+			if (pinfo->mipi.mode == DSI_CMD_MODE) {
+				ctrl->status_mode = ESD_TE;
+			} else {
+				pr_err("TE-ESD not valid for video mode\n");
+				goto error;
+			}
+		} else {
+			pr_err("No valid panel-status-check-mode string\n");
+			goto error;
+		}
+	}
+
+	if ((ctrl->status_mode == ESD_BTA) || (ctrl->status_mode == ESD_TE) ||
+			(ctrl->status_mode == ESD_MAX))
+		return;
+
+	mdss_dsi_parse_dcs_cmds(np, &ctrl->status_cmds,
+			"qcom,mdss-dsi-panel-status-command",
+				"qcom,mdss-dsi-panel-status-command-state");
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-panel-max-error-count",
+		&tmp);
+	ctrl->max_status_error_count = (!rc ? tmp : 0);
+
+	if (!mdss_dsi_parse_esd_status_len(np,
+		"qcom,mdss-dsi-panel-status-read-length",
+		&ctrl->status_cmds_rlen, ctrl->status_cmds.cmd_cnt)) {
+		pinfo->esd_check_enabled = false;
+		return;
+	}
+
+	if (mdss_dsi_parse_esd_status_len(np,
+		"qcom,mdss-dsi-panel-status-valid-params",
+		&ctrl->status_valid_params, ctrl->status_cmds.cmd_cnt)) {
+		if (!mdss_dsi_parse_esd_check_valid_params(ctrl))
+			goto error1;
+	}
+
+	status_len = 0;
+	lenp = ctrl->status_valid_params ?: ctrl->status_cmds_rlen;
+	for (i = 0; i < ctrl->status_cmds.cmd_cnt; ++i)
+		status_len += lenp[i];
+
+	data = of_find_property(np, "qcom,mdss-dsi-panel-status-value", &tmp);
+	tmp /= sizeof(u32);
+	if (!IS_ERR_OR_NULL(data) && tmp != 0 && (tmp % status_len) == 0) {
+		ctrl->groups = tmp / status_len;
+	} else {
+		pr_err("%s: Error parse panel-status-value\n", __func__);
+		goto error1;
+	}
+
+	ctrl->status_value = kcalloc(status_len * ctrl->groups, sizeof(u32),
+				GFP_KERNEL);
+	if (!ctrl->status_value)
+		goto error1;
+
+	ctrl->return_buf = kcalloc(status_len * ctrl->groups,
+			sizeof(unsigned char), GFP_KERNEL);
+	if (!ctrl->return_buf)
+		goto error2;
+
+	rc = of_property_read_u32_array(np,
+		"qcom,mdss-dsi-panel-status-value",
+		ctrl->status_value, ctrl->groups * status_len);
+	if (rc) {
+		pr_debug("%s: Error reading panel status values\n",
+				__func__);
+		memset(ctrl->status_value, 0, ctrl->groups * status_len);
+	}
+
+	return;
+
+error2:
+	kfree(ctrl->status_value);
+error1:
+	kfree(ctrl->status_valid_params);
+	kfree(ctrl->status_cmds_rlen);
+error:
+	pinfo->esd_check_enabled = false;
+}
+
+static int mdss_dsi_parse_panel_features(struct device_node *np,
+	struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct mdss_panel_info *pinfo;
+
+	if (!np || !ctrl) {
+		pr_err("%s: Invalid arguments\n", __func__);
+		return -ENODEV;
+	}
+
+	pinfo = &ctrl->panel_data.panel_info;
+
+	pinfo->partial_update_supported = of_property_read_bool(np,
+		"qcom,partial-update-enabled");
+	if (pinfo->mipi.mode == DSI_CMD_MODE) {
+		pinfo->partial_update_enabled = pinfo->partial_update_supported;
+		pr_info("%s: partial_update_enabled=%d\n", __func__,
+					pinfo->partial_update_enabled);
+		ctrl->set_col_page_addr = mdss_dsi_set_col_page_addr;
+		if (pinfo->partial_update_enabled) {
+			pinfo->partial_update_roi_merge =
+					of_property_read_bool(np,
+					"qcom,partial-update-roi-merge");
+		}
+	}
+
+	pinfo->dcs_cmd_by_left = of_property_read_bool(np,
+		"qcom,dcs-cmd-by-left");
+
+	pinfo->ulps_feature_enabled = of_property_read_bool(np,
+		"qcom,ulps-enabled");
+	pr_info("%s: ulps feature %s\n", __func__,
+		(pinfo->ulps_feature_enabled ? "enabled" : "disabled"));
+
+	pinfo->ulps_suspend_enabled = of_property_read_bool(np,
+		"qcom,suspend-ulps-enabled");
+	pr_info("%s: ulps during suspend feature %s", __func__,
+		(pinfo->ulps_suspend_enabled ? "enabled" : "disabled"));
+
+	mdss_dsi_parse_dms_config(np, ctrl);
+
+	pinfo->panel_ack_disabled = pinfo->sim_panel_mode ?
+		1 : of_property_read_bool(np, "qcom,panel-ack-disabled");
+
+	pinfo->allow_phy_power_off = of_property_read_bool(np,
+		"qcom,panel-allow-phy-poweroff");
+
+	mdss_dsi_parse_esd_params(np, ctrl);
+
+	if (pinfo->panel_ack_disabled && pinfo->esd_check_enabled) {
+		pr_warn("ESD should not be enabled if panel ACK is disabled\n");
+		pinfo->esd_check_enabled = false;
+	}
+
+	if (ctrl->disp_en_gpio <= 0) {
+		ctrl->disp_en_gpio = of_get_named_gpio(
+			np,
+			"qcom,5v-boost-gpio", 0);
+
+		if (!gpio_is_valid(ctrl->disp_en_gpio))
+			pr_debug("%s:%d, Disp_en gpio not specified\n",
+					__func__, __LINE__);
+	}
+
+	mdss_dsi_parse_dcs_cmds(np, &ctrl->lp_on_cmds,
+			"qcom,mdss-dsi-lp-mode-on", NULL);
+
+	mdss_dsi_parse_dcs_cmds(np, &ctrl->lp_off_cmds,
+			"qcom,mdss-dsi-lp-mode-off", NULL);
+
+	return 0;
+}
+
+static void mdss_dsi_parse_panel_horizintal_line_idle(struct device_node *np,
+	struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	const u32 *src;
+	int i, len, cnt;
+	struct panel_horizontal_idle *kp;
+
+	if (!np || !ctrl) {
+		pr_err("%s: Invalid arguments\n", __func__);
+		return;
+	}
+
+	src = of_get_property(np, "qcom,mdss-dsi-hor-line-idle", &len);
+	if (!src || len == 0)
+		return;
+
+	cnt = len % 3; /* 3 fields per entry */
+	if (cnt) {
+		pr_err("%s: invalid horizontal idle len=%d\n", __func__, len);
+		return;
+	}
+
+	cnt = len / sizeof(u32);
+
+	kp = kcalloc((cnt / 3), sizeof(*kp), GFP_KERNEL);
+	if (kp == NULL)
+		return;
+
+	ctrl->line_idle = kp;
+	for (i = 0; i < cnt; i += 3) {
+		kp->min = be32_to_cpu(src[i]);
+		kp->max = be32_to_cpu(src[i+1]);
+		kp->idle = be32_to_cpu(src[i+2]);
+		kp++;
+		ctrl->horizontal_idle_cnt++;
+	}
+
+	/*
+	 * idle is enabled for this controller, this will be used to
+	 * enable/disable burst mode since both features are mutually
+	 * exclusive.
+	 */
+	ctrl->idle_enabled = true;
+
+	pr_debug("%s: horizontal_idle_cnt=%d\n", __func__,
+				ctrl->horizontal_idle_cnt);
+}
+
+static int mdss_dsi_set_refresh_rate_range(struct device_node *pan_node,
+		struct mdss_panel_info *pinfo)
+{
+	int rc = 0;
+
+	rc = of_property_read_u32(pan_node,
+			"qcom,mdss-dsi-min-refresh-rate",
+			&pinfo->min_fps);
+	if (rc) {
+		pr_warn("%s:%d, Unable to read min refresh rate\n",
+				__func__, __LINE__);
+
+		/*
+		 * Since min refresh rate is not specified when dynamic
+		 * fps is enabled, using minimum as 30
+		 */
+		pinfo->min_fps = MIN_REFRESH_RATE;
+		rc = 0;
+	}
+
+	rc = of_property_read_u32(pan_node,
+			"qcom,mdss-dsi-max-refresh-rate",
+			&pinfo->max_fps);
+	if (rc) {
+		pr_warn("%s:%d, Unable to read max refresh rate\n",
+				__func__, __LINE__);
+
+		/*
+		 * Since max refresh rate was not specified when dynamic
+		 * fps is enabled, using the default panel refresh rate
+		 * as max refresh rate supported.
+		 */
+		pinfo->max_fps = pinfo->mipi.frame_rate;
+		rc = 0;
+	}
+
+	pr_info("dyn_fps: min = %d, max = %d\n",
+			pinfo->min_fps, pinfo->max_fps);
+	return rc;
+}
+
+static void mdss_dsi_parse_dfps_config(struct device_node *pan_node,
+			struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	const char *data;
+	bool dynamic_fps;
+	struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+
+	dynamic_fps = of_property_read_bool(pan_node,
+			"qcom,mdss-dsi-pan-enable-dynamic-fps");
+
+	if (!dynamic_fps)
+		return;
+
+	pinfo->dynamic_fps = true;
+	data = of_get_property(pan_node, "qcom,mdss-dsi-pan-fps-update", NULL);
+	if (data) {
+		if (!strcmp(data, "dfps_suspend_resume_mode")) {
+			pinfo->dfps_update = DFPS_SUSPEND_RESUME_MODE;
+			pr_debug("dfps mode: suspend/resume\n");
+		} else if (!strcmp(data, "dfps_immediate_clk_mode")) {
+			pinfo->dfps_update = DFPS_IMMEDIATE_CLK_UPDATE_MODE;
+			pr_debug("dfps mode: Immediate clk\n");
+		} else if (!strcmp(data, "dfps_immediate_porch_mode_hfp")) {
+			pinfo->dfps_update =
+				DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP;
+			pr_debug("dfps mode: Immediate porch HFP\n");
+		} else if (!strcmp(data, "dfps_immediate_porch_mode_vfp")) {
+			pinfo->dfps_update =
+				DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP;
+			pr_debug("dfps mode: Immediate porch VFP\n");
+		} else {
+			pinfo->dfps_update = DFPS_SUSPEND_RESUME_MODE;
+			pr_debug("default dfps mode: suspend/resume\n");
+		}
+		mdss_dsi_set_refresh_rate_range(pan_node, pinfo);
+	} else {
+		pinfo->dynamic_fps = false;
+		pr_debug("dfps update mode not configured: disable\n");
+	}
+	pinfo->new_fps = pinfo->mipi.frame_rate;
+	pinfo->current_fps = pinfo->mipi.frame_rate;
+}
+
+int mdss_panel_parse_bl_settings(struct device_node *np,
+			struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	const char *data;
+	int rc = 0;
+	u32 tmp;
+
+	ctrl_pdata->bklt_ctrl = UNKNOWN_CTRL;
+	data = of_get_property(np, "qcom,mdss-dsi-bl-pmic-control-type", NULL);
+	if (data) {
+		if (!strcmp(data, "bl_ctrl_wled")) {
+			led_trigger_register_simple("bkl-trigger",
+				&bl_led_trigger);
+			pr_debug("%s: SUCCESS-> WLED TRIGGER register\n",
+				__func__);
+			ctrl_pdata->bklt_ctrl = BL_WLED;
+		} else if (!strcmp(data, "bl_ctrl_pwm")) {
+			ctrl_pdata->bklt_ctrl = BL_PWM;
+			ctrl_pdata->pwm_pmi = of_property_read_bool(np,
+					"qcom,mdss-dsi-bl-pwm-pmi");
+			rc = of_property_read_u32(np,
+				"qcom,mdss-dsi-bl-pmic-pwm-frequency", &tmp);
+			if (rc) {
+				pr_err("%s:%d, Error, panel pwm_period\n",
+						__func__, __LINE__);
+				return -EINVAL;
+			}
+			ctrl_pdata->pwm_period = tmp;
+			if (ctrl_pdata->pwm_pmi) {
+				ctrl_pdata->pwm_bl = of_pwm_get(np, NULL);
+				if (IS_ERR(ctrl_pdata->pwm_bl)) {
+					pr_err("%s: Error, pwm device\n",
+								__func__);
+					ctrl_pdata->pwm_bl = NULL;
+					return -EINVAL;
+				}
+			} else {
+				rc = of_property_read_u32(np,
+					"qcom,mdss-dsi-bl-pmic-bank-select",
+								 &tmp);
+				if (rc) {
+					pr_err("%s:%d, Error, lpg channel\n",
+							__func__, __LINE__);
+					return -EINVAL;
+				}
+				ctrl_pdata->pwm_lpg_chan = tmp;
+				tmp = of_get_named_gpio(np,
+					"qcom,mdss-dsi-pwm-gpio", 0);
+				ctrl_pdata->pwm_pmic_gpio = tmp;
+				pr_debug("%s: Configured PWM bklt ctrl\n",
+								 __func__);
+			}
+		} else if (!strcmp(data, "bl_ctrl_dcs")) {
+			ctrl_pdata->bklt_ctrl = BL_DCS_CMD;
+			pr_debug("%s: Configured DCS_CMD bklt ctrl\n",
+								__func__);
+		}
+	}
+	return 0;
+}
+
+int mdss_dsi_panel_timing_switch(struct mdss_dsi_ctrl_pdata *ctrl,
+			struct mdss_panel_timing *timing)
+{
+	struct dsi_panel_timing *pt;
+	struct mdss_panel_info *pinfo = &ctrl->panel_data.panel_info;
+	int i;
+
+	if (!timing)
+		return -EINVAL;
+
+	if (timing == ctrl->panel_data.current_timing) {
+		pr_warn("%s: panel timing \"%s\" already set\n", __func__,
+				timing->name);
+		return 0; /* nothing to do */
+	}
+
+	pr_debug("%s: ndx=%d switching to panel timing \"%s\"\n", __func__,
+			ctrl->ndx, timing->name);
+
+	mdss_panel_info_from_timing(timing, pinfo);
+
+	pt = container_of(timing, struct dsi_panel_timing, timing);
+	pinfo->mipi.t_clk_pre = pt->t_clk_pre;
+	pinfo->mipi.t_clk_post = pt->t_clk_post;
+
+	for (i = 0; i < ARRAY_SIZE(pt->phy_timing); i++)
+		pinfo->mipi.dsi_phy_db.timing[i] = pt->phy_timing[i];
+
+	for (i = 0; i < ARRAY_SIZE(pt->phy_timing_8996); i++)
+		pinfo->mipi.dsi_phy_db.timing_8996[i] = pt->phy_timing_8996[i];
+
+	ctrl->on_cmds = pt->on_cmds;
+	ctrl->post_panel_on_cmds = pt->post_panel_on_cmds;
+
+	ctrl->panel_data.current_timing = timing;
+	if (!timing->clk_rate)
+		ctrl->refresh_clk_rate = true;
+	mdss_dsi_clk_refresh(&ctrl->panel_data, ctrl->update_phy_timing);
+
+	return 0;
+}
+
+void mdss_dsi_unregister_bl_settings(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	if (ctrl_pdata->bklt_ctrl == BL_WLED)
+		led_trigger_unregister_simple(bl_led_trigger);
+}
+
+static int mdss_dsi_panel_timing_from_dt(struct device_node *np,
+		struct dsi_panel_timing *pt,
+		struct mdss_panel_data *panel_data)
+{
+	u32 tmp;
+	u64 tmp64;
+	int rc, i, len;
+	const char *data;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata;
+	struct mdss_panel_info *pinfo;
+	bool phy_timings_present = false;
+
+	pinfo = &panel_data->panel_info;
+
+	ctrl_pdata = container_of(panel_data, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-panel-width", &tmp);
+	if (rc) {
+		pr_err("%s:%d, panel width not specified\n",
+						__func__, __LINE__);
+		return -EINVAL;
+	}
+	pt->timing.xres = tmp;
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-panel-height", &tmp);
+	if (rc) {
+		pr_err("%s:%d, panel height not specified\n",
+						__func__, __LINE__);
+		return -EINVAL;
+	}
+	pt->timing.yres = tmp;
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-h-front-porch", &tmp);
+	pt->timing.h_front_porch = (!rc ? tmp : 6);
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-h-back-porch", &tmp);
+	pt->timing.h_back_porch = (!rc ? tmp : 6);
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-h-pulse-width", &tmp);
+	pt->timing.h_pulse_width = (!rc ? tmp : 2);
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-h-sync-skew", &tmp);
+	pt->timing.hsync_skew = (!rc ? tmp : 0);
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-v-back-porch", &tmp);
+	pt->timing.v_back_porch = (!rc ? tmp : 6);
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-v-front-porch", &tmp);
+	pt->timing.v_front_porch = (!rc ? tmp : 6);
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-v-pulse-width", &tmp);
+	pt->timing.v_pulse_width = (!rc ? tmp : 2);
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-h-left-border", &tmp);
+	pt->timing.border_left = !rc ? tmp : 0;
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-h-right-border", &tmp);
+	pt->timing.border_right = !rc ? tmp : 0;
+
+	/* overriding left/right borders for split display cases */
+	if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data)) {
+		if (panel_data->next)
+			pt->timing.border_right = 0;
+		else
+			pt->timing.border_left = 0;
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-v-top-border", &tmp);
+	pt->timing.border_top = !rc ? tmp : 0;
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-v-bottom-border", &tmp);
+	pt->timing.border_bottom = !rc ? tmp : 0;
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-panel-framerate", &tmp);
+	pt->timing.frame_rate = !rc ? tmp : DEFAULT_FRAME_RATE;
+	rc = of_property_read_u64(np, "qcom,mdss-dsi-panel-clockrate", &tmp64);
+	if (rc == -EOVERFLOW) {
+		tmp64 = 0;
+		rc = of_property_read_u32(np,
+			"qcom,mdss-dsi-panel-clockrate", (u32 *)&tmp64);
+	}
+	pt->timing.clk_rate = !rc ? tmp64 : 0;
+
+	data = of_get_property(np, "qcom,mdss-dsi-panel-timings", &len);
+	if ((!data) || (len != 12)) {
+		pr_debug("%s:%d, Unable to read Phy timing settings",
+		       __func__, __LINE__);
+	} else {
+		for (i = 0; i < len; i++)
+			pt->phy_timing[i] = data[i];
+		phy_timings_present = true;
+	}
+
+	data = of_get_property(np, "qcom,mdss-dsi-panel-timings-phy-v2", &len);
+	if ((!data) || (len != 40)) {
+		pr_debug("%s:%d, Unable to read 8996 Phy lane timing settings",
+		       __func__, __LINE__);
+	} else {
+		for (i = 0; i < len; i++)
+			pt->phy_timing_8996[i] = data[i];
+		phy_timings_present = true;
+	}
+	if (!phy_timings_present) {
+		pr_err("%s: phy timing settings not present\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-t-clk-pre", &tmp);
+	pt->t_clk_pre = (!rc ? tmp : 0x24);
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-t-clk-post", &tmp);
+	pt->t_clk_post = (!rc ? tmp : 0x03);
+
+	if (np->name) {
+		pt->timing.name = kstrdup(np->name, GFP_KERNEL);
+		pr_info("%s: found new timing \"%s\" (%pK)\n", __func__,
+				np->name, &pt->timing);
+	}
+
+	return 0;
+}
+
+static int  mdss_dsi_panel_config_res_properties(struct device_node *np,
+		struct dsi_panel_timing *pt,
+		struct mdss_panel_data *panel_data,
+		bool default_timing)
+{
+	int rc = 0;
+
+	mdss_dsi_parse_roi_alignment(np, pt);
+
+	mdss_dsi_parse_dcs_cmds(np, &pt->on_cmds,
+		"qcom,mdss-dsi-on-command",
+		"qcom,mdss-dsi-on-command-state");
+
+	mdss_dsi_parse_dcs_cmds(np, &pt->post_panel_on_cmds,
+		"qcom,mdss-dsi-post-panel-on-command", NULL);
+
+	mdss_dsi_parse_dcs_cmds(np, &pt->switch_cmds,
+		"qcom,mdss-dsi-timing-switch-command",
+		"qcom,mdss-dsi-timing-switch-command-state");
+
+	rc = mdss_dsi_parse_topology_config(np, pt, panel_data, default_timing);
+	if (rc) {
+		pr_err("%s: parsing compression params failed. rc:%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	mdss_panel_parse_te_params(np, &pt->timing);
+	return rc;
+}
+
+static int mdss_panel_parse_display_timings(struct device_node *np,
+		struct mdss_panel_data *panel_data)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl;
+	struct dsi_panel_timing *modedb;
+	struct device_node *timings_np;
+	struct device_node *entry;
+	int num_timings, rc;
+	int i = 0, active_ndx = 0;
+	bool default_timing = false;
+
+	ctrl = container_of(panel_data, struct mdss_dsi_ctrl_pdata, panel_data);
+
+	INIT_LIST_HEAD(&panel_data->timings_list);
+
+	timings_np = of_get_child_by_name(np, "qcom,mdss-dsi-display-timings");
+	if (!timings_np) {
+		struct dsi_panel_timing pt;
+
+		memset(&pt, 0, sizeof(struct dsi_panel_timing));
+
+		/*
+		 * display timings node is not available, fallback to reading
+		 * timings directly from root node instead
+		 */
+		pr_debug("reading display-timings from panel node\n");
+		rc = mdss_dsi_panel_timing_from_dt(np, &pt, panel_data);
+		if (!rc) {
+			mdss_dsi_panel_config_res_properties(np, &pt,
+					panel_data, true);
+			rc = mdss_dsi_panel_timing_switch(ctrl, &pt.timing);
+		}
+		return rc;
+	}
+
+	num_timings = of_get_child_count(timings_np);
+	if (num_timings == 0) {
+		pr_err("no timings found within display-timings\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	modedb = kcalloc(num_timings, sizeof(*modedb), GFP_KERNEL);
+	if (!modedb) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	for_each_child_of_node(timings_np, entry) {
+		rc = mdss_dsi_panel_timing_from_dt(entry, (modedb + i),
+				panel_data);
+		if (rc) {
+			kfree(modedb);
+			goto exit;
+		}
+
+		default_timing = of_property_read_bool(entry,
+				"qcom,mdss-dsi-timing-default");
+		if (default_timing)
+			active_ndx = i;
+
+		mdss_dsi_panel_config_res_properties(entry, (modedb + i),
+				panel_data, default_timing);
+
+		list_add(&modedb[i].timing.list,
+				&panel_data->timings_list);
+		i++;
+	}
+
+	/* Configure default timing settings */
+	rc = mdss_dsi_panel_timing_switch(ctrl, &modedb[active_ndx].timing);
+	if (rc)
+		pr_err("unable to configure default timing settings\n");
+
+exit:
+	of_node_put(timings_np);
+
+	return rc;
+}
+
+#ifdef TARGET_HW_MDSS_HDMI
+static int mdss_panel_parse_dt_hdmi(struct device_node *np,
+			struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	int len = 0;
+	const char *bridge_chip_name;
+	struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+
+	pinfo->is_dba_panel = of_property_read_bool(np,
+			"qcom,dba-panel");
+
+	if (pinfo->is_dba_panel) {
+		bridge_chip_name = of_get_property(np,
+			"qcom,bridge-name", &len);
+		if (!bridge_chip_name || len <= 0) {
+			pr_err("%s:%d Unable to read qcom,bridge_name, data=%pK,len=%d\n",
+				__func__, __LINE__, bridge_chip_name, len);
+			return -EINVAL;
+		}
+		strlcpy(ctrl_pdata->bridge_name, bridge_chip_name,
+			MSM_DBA_CHIP_NAME_MAX_LEN);
+	}
+	return 0;
+}
+#else
+static int mdss_panel_parse_dt_hdmi(struct device_node *np,
+			struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	(void)(*np);
+	(void)(*ctrl_pdata);
+	return 0;
+}
+#endif
+static int mdss_panel_parse_dt(struct device_node *np,
+			struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	u32 tmp;
+	u8 lanes = 0;
+	int rc = 0;
+	const char *data;
+	static const char *pdest;
+	struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+
+	if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data))
+		pinfo->is_split_display = true;
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-physical-width-dimension", &tmp);
+	pinfo->physical_width = (!rc ? tmp : 0);
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-physical-height-dimension", &tmp);
+	pinfo->physical_height = (!rc ? tmp : 0);
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-bpp", &tmp);
+	if (rc) {
+		pr_err("%s:%d, bpp not specified\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+	pinfo->bpp = (!rc ? tmp : 24);
+	pinfo->mipi.mode = DSI_VIDEO_MODE;
+	data = of_get_property(np, "qcom,mdss-dsi-panel-type", NULL);
+	if (data && !strcmp(data, "dsi_cmd_mode"))
+		pinfo->mipi.mode = DSI_CMD_MODE;
+	pinfo->mipi.boot_mode = pinfo->mipi.mode;
+	tmp = 0;
+	data = of_get_property(np, "qcom,mdss-dsi-pixel-packing", NULL);
+	if (data && !strcmp(data, "loose"))
+		pinfo->mipi.pixel_packing = 1;
+	else
+		pinfo->mipi.pixel_packing = 0;
+	rc = mdss_panel_get_dst_fmt(pinfo->bpp,
+		pinfo->mipi.mode, pinfo->mipi.pixel_packing,
+		&(pinfo->mipi.dst_format));
+	if (rc) {
+		pr_debug("%s: problem determining dst format. Set Default\n",
+			__func__);
+		pinfo->mipi.dst_format =
+			DSI_VIDEO_DST_FORMAT_RGB888;
+	}
+	pdest = of_get_property(np,
+		"qcom,mdss-dsi-panel-destination", NULL);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-dsi-underflow-color", &tmp);
+	pinfo->lcdc.underflow_clr = (!rc ? tmp : 0xff);
+	rc = of_property_read_u32(np,
+		"qcom,mdss-dsi-border-color", &tmp);
+	pinfo->lcdc.border_clr = (!rc ? tmp : 0);
+	data = of_get_property(np, "qcom,mdss-dsi-panel-orientation", NULL);
+	if (data) {
+		pr_debug("panel orientation is %s\n", data);
+		if (!strcmp(data, "180"))
+			pinfo->panel_orientation = MDP_ROT_180;
+		else if (!strcmp(data, "hflip"))
+			pinfo->panel_orientation = MDP_FLIP_LR;
+		else if (!strcmp(data, "vflip"))
+			pinfo->panel_orientation = MDP_FLIP_UD;
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-brightness-max-level", &tmp);
+	pinfo->brightness_max = (!rc ? tmp : MDSS_MAX_BL_BRIGHTNESS);
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-bl-min-level", &tmp);
+	pinfo->bl_min = (!rc ? tmp : 0);
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-bl-max-level", &tmp);
+	pinfo->bl_max = (!rc ? tmp : 255);
+	ctrl_pdata->bklt_max = pinfo->bl_max;
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-interleave-mode", &tmp);
+	pinfo->mipi.interleave_mode = (!rc ? tmp : 0);
+
+	pinfo->mipi.vsync_enable = of_property_read_bool(np,
+		"qcom,mdss-dsi-te-check-enable");
+
+	if (pinfo->sim_panel_mode == SIM_SW_TE_MODE)
+		pinfo->mipi.hw_vsync_mode = false;
+	else
+		pinfo->mipi.hw_vsync_mode = of_property_read_bool(np,
+			"qcom,mdss-dsi-te-using-te-pin");
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-dsi-h-sync-pulse", &tmp);
+	pinfo->mipi.pulse_mode_hsa_he = (!rc ? tmp : false);
+
+	pinfo->mipi.hfp_power_stop = of_property_read_bool(np,
+		"qcom,mdss-dsi-hfp-power-mode");
+	pinfo->mipi.hsa_power_stop = of_property_read_bool(np,
+		"qcom,mdss-dsi-hsa-power-mode");
+	pinfo->mipi.hbp_power_stop = of_property_read_bool(np,
+		"qcom,mdss-dsi-hbp-power-mode");
+	pinfo->mipi.last_line_interleave_en = of_property_read_bool(np,
+		"qcom,mdss-dsi-last-line-interleave");
+	pinfo->mipi.bllp_power_stop = of_property_read_bool(np,
+		"qcom,mdss-dsi-bllp-power-mode");
+	pinfo->mipi.eof_bllp_power_stop = of_property_read_bool(
+		np, "qcom,mdss-dsi-bllp-eof-power-mode");
+	pinfo->mipi.traffic_mode = DSI_NON_BURST_SYNCH_PULSE;
+	data = of_get_property(np, "qcom,mdss-dsi-traffic-mode", NULL);
+	if (data) {
+		if (!strcmp(data, "non_burst_sync_event"))
+			pinfo->mipi.traffic_mode = DSI_NON_BURST_SYNCH_EVENT;
+		else if (!strcmp(data, "burst_mode"))
+			pinfo->mipi.traffic_mode = DSI_BURST_MODE;
+	}
+	rc = of_property_read_u32(np,
+		"qcom,mdss-dsi-te-dcs-command", &tmp);
+	pinfo->mipi.insert_dcs_cmd =
+			(!rc ? tmp : 1);
+	rc = of_property_read_u32(np,
+		"qcom,mdss-dsi-wr-mem-continue", &tmp);
+	pinfo->mipi.wr_mem_continue =
+			(!rc ? tmp : 0x3c);
+	rc = of_property_read_u32(np,
+		"qcom,mdss-dsi-wr-mem-start", &tmp);
+	pinfo->mipi.wr_mem_start =
+			(!rc ? tmp : 0x2c);
+	rc = of_property_read_u32(np,
+		"qcom,mdss-dsi-te-pin-select", &tmp);
+	pinfo->mipi.te_sel =
+			(!rc ? tmp : 1);
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-virtual-channel-id", &tmp);
+	pinfo->mipi.vc = (!rc ? tmp : 0);
+	pinfo->mipi.rgb_swap = DSI_RGB_SWAP_RGB;
+	data = of_get_property(np, "qcom,mdss-dsi-color-order", NULL);
+	if (data) {
+		if (!strcmp(data, "rgb_swap_rbg"))
+			pinfo->mipi.rgb_swap = DSI_RGB_SWAP_RBG;
+		else if (!strcmp(data, "rgb_swap_bgr"))
+			pinfo->mipi.rgb_swap = DSI_RGB_SWAP_BGR;
+		else if (!strcmp(data, "rgb_swap_brg"))
+			pinfo->mipi.rgb_swap = DSI_RGB_SWAP_BRG;
+		else if (!strcmp(data, "rgb_swap_grb"))
+			pinfo->mipi.rgb_swap = DSI_RGB_SWAP_GRB;
+		else if (!strcmp(data, "rgb_swap_gbr"))
+			pinfo->mipi.rgb_swap = DSI_RGB_SWAP_GBR;
+	}
+	pinfo->mipi.data_lane0 = of_property_read_bool(np,
+		"qcom,mdss-dsi-lane-0-state");
+	pinfo->mipi.data_lane1 = of_property_read_bool(np,
+		"qcom,mdss-dsi-lane-1-state");
+	pinfo->mipi.data_lane2 = of_property_read_bool(np,
+		"qcom,mdss-dsi-lane-2-state");
+	pinfo->mipi.data_lane3 = of_property_read_bool(np,
+		"qcom,mdss-dsi-lane-3-state");
+
+	if (pinfo->mipi.data_lane0)
+		lanes++;
+	if (pinfo->mipi.data_lane1)
+		lanes++;
+	if (pinfo->mipi.data_lane2)
+		lanes++;
+	if (pinfo->mipi.data_lane3)
+		lanes++;
+	/*
+	 * needed to set default lanes during
+	 * resolution switch for bridge chips
+	 */
+	pinfo->mipi.default_lanes = lanes;
+
+	rc = mdss_panel_parse_display_timings(np, &ctrl_pdata->panel_data);
+	if (rc)
+		return rc;
+	rc = mdss_dsi_parse_hdr_settings(np, pinfo);
+	if (rc)
+		return rc;
+
+	pinfo->mipi.rx_eot_ignore = of_property_read_bool(np,
+		"qcom,mdss-dsi-rx-eot-ignore");
+	pinfo->mipi.tx_eot_append = of_property_read_bool(np,
+		"qcom,mdss-dsi-tx-eot-append");
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-stream", &tmp);
+	pinfo->mipi.stream = (!rc ? tmp : 0);
+
+	data = of_get_property(np, "qcom,mdss-dsi-panel-mode-gpio-state", NULL);
+	if (data) {
+		if (!strcmp(data, "high"))
+			pinfo->mode_gpio_state = MODE_GPIO_HIGH;
+		else if (!strcmp(data, "low"))
+			pinfo->mode_gpio_state = MODE_GPIO_LOW;
+	} else {
+		pinfo->mode_gpio_state = MODE_GPIO_NOT_VALID;
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-mdp-transfer-time-us", &tmp);
+	pinfo->mdp_transfer_time_us = (!rc ? tmp : DEFAULT_MDP_TRANSFER_TIME);
+
+	pinfo->mipi.lp11_init = of_property_read_bool(np,
+					"qcom,mdss-dsi-lp11-init");
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-init-delay-us", &tmp);
+	pinfo->mipi.init_delay = (!rc ? tmp : 0);
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-post-init-delay", &tmp);
+	pinfo->mipi.post_init_delay = (!rc ? tmp : 0);
+
+	mdss_dsi_parse_trigger(np, &(pinfo->mipi.mdp_trigger),
+		"qcom,mdss-dsi-mdp-trigger");
+
+	mdss_dsi_parse_trigger(np, &(pinfo->mipi.dma_trigger),
+		"qcom,mdss-dsi-dma-trigger");
+
+	mdss_dsi_parse_reset_seq(np, pinfo->rst_seq, &(pinfo->rst_seq_len),
+		"qcom,mdss-dsi-reset-sequence");
+
+	mdss_dsi_parse_dcs_cmds(np, &ctrl_pdata->off_cmds,
+		"qcom,mdss-dsi-off-command", "qcom,mdss-dsi-off-command-state");
+
+	mdss_dsi_parse_dcs_cmds(np, &ctrl_pdata->idle_on_cmds,
+		"qcom,mdss-dsi-idle-on-command",
+		"qcom,mdss-dsi-idle-on-command-state");
+
+	mdss_dsi_parse_dcs_cmds(np, &ctrl_pdata->idle_off_cmds,
+		"qcom,mdss-dsi-idle-off-command",
+		"qcom,mdss-dsi-idle-off-command-state");
+
+	rc = of_property_read_u32(np, "qcom,mdss-dsi-idle-fps", &tmp);
+	pinfo->mipi.frame_rate_idle = (!rc ? tmp : 60);
+
+	rc = of_property_read_u32(np, "qcom,adjust-timer-wakeup-ms", &tmp);
+	pinfo->adjust_timer_delay_ms = (!rc ? tmp : 0);
+
+	pinfo->mipi.force_clk_lane_hs = of_property_read_bool(np,
+		"qcom,mdss-dsi-force-clock-lane-hs");
+
+	rc = mdss_dsi_parse_panel_features(np, ctrl_pdata);
+	if (rc) {
+		pr_err("%s: failed to parse panel features\n", __func__);
+		goto error;
+	}
+
+	mdss_dsi_parse_panel_horizintal_line_idle(np, ctrl_pdata);
+
+	mdss_dsi_parse_dfps_config(np, ctrl_pdata);
+
+	rc = mdss_panel_parse_dt_hdmi(np, ctrl_pdata);
+	if (rc)
+		goto error;
+
+	return 0;
+
+error:
+	return -EINVAL;
+}
+
+int mdss_dsi_panel_init(struct device_node *node,
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+	int ndx)
+{
+	int rc = 0;
+	static const char *panel_name;
+	struct mdss_panel_info *pinfo;
+
+	if (!node || !ctrl_pdata) {
+		pr_err("%s: Invalid arguments\n", __func__);
+		return -ENODEV;
+	}
+
+	pinfo = &ctrl_pdata->panel_data.panel_info;
+
+	pr_debug("%s:%d\n", __func__, __LINE__);
+	pinfo->panel_name[0] = '\0';
+	panel_name = of_get_property(node, "qcom,mdss-dsi-panel-name", NULL);
+	if (!panel_name) {
+		pr_info("%s:%d, Panel name not specified\n",
+						__func__, __LINE__);
+	} else {
+		pr_info("%s: Panel Name = %s\n", __func__, panel_name);
+		strlcpy(&pinfo->panel_name[0], panel_name, MDSS_MAX_PANEL_LEN);
+	}
+	rc = mdss_panel_parse_dt(node, ctrl_pdata);
+	if (rc) {
+		pr_err("%s:%d panel dt parse failed\n", __func__, __LINE__);
+		return rc;
+	}
+
+	pinfo->dynamic_switch_pending = false;
+	pinfo->is_lpm_mode = false;
+	pinfo->esd_rdy = false;
+	pinfo->persist_mode = false;
+
+	ctrl_pdata->on = mdss_dsi_panel_on;
+	ctrl_pdata->post_panel_on = mdss_dsi_post_panel_on;
+	ctrl_pdata->off = mdss_dsi_panel_off;
+	ctrl_pdata->low_power_config = mdss_dsi_panel_low_power_config;
+	ctrl_pdata->panel_data.set_backlight = mdss_dsi_panel_bl_ctrl;
+	ctrl_pdata->panel_data.apply_display_setting =
+			mdss_dsi_panel_apply_display_setting;
+	ctrl_pdata->switch_mode = mdss_dsi_panel_switch_mode;
+	ctrl_pdata->panel_data.get_idle = mdss_dsi_panel_get_idle_mode;
+	return 0;
+}
diff --git a/drivers/video/fbdev/msm/mdss_dsi_phy.c b/drivers/video/fbdev/msm/mdss_dsi_phy.c
new file mode 100644
index 0000000..456a8eb
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_phy.c
@@ -0,0 +1,904 @@
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mdss_dsi_phy.h"
+
+#define ESC_CLK_MHZ 192
+#define ESCCLK_MMSS_CC_PREDIV 10
+
+#define TLPX_NUMER 1000
+#define TR_EOT 20
+#define TA_GO  3
+#define TA_SURE 0
+#define TA_GET 4
+
+#define CLK_PREPARE_SPEC_MIN	38
+#define CLK_PREPARE_SPEC_MAX	95
+#define CLK_TRAIL_SPEC_MIN	60
+#define HS_EXIT_SPEC_MIN	100
+#define HS_EXIT_RECO_MAX	255
+#define HS_RQST_SPEC_MIN	50
+#define CLK_ZERO_RECO_MAX1	511
+#define CLK_ZERO_RECO_MAX2	255
+
+/* No. of timing params for phy rev 2.0 */
+#define TIMING_PARAM_DLANE_COUNT	32
+#define TIMING_PARAM_CLK_COUNT	8
+
+struct timing_entry {
+	int32_t mipi_min;
+	int32_t mipi_max;
+	int32_t rec_min;
+	int32_t rec_max;
+	int32_t rec;
+	char program_value;
+};
+
+struct dsi_phy_timing {
+	struct timing_entry clk_prepare;
+	struct timing_entry clk_zero;
+	struct timing_entry clk_trail;
+	struct timing_entry hs_prepare;
+	struct timing_entry hs_zero;
+	struct timing_entry hs_trail;
+	struct timing_entry hs_rqst;
+	struct timing_entry hs_rqst_clk;
+	struct timing_entry hs_exit;
+	struct timing_entry ta_go;
+	struct timing_entry ta_sure;
+	struct timing_entry ta_get;
+	struct timing_entry clk_post;
+	struct timing_entry clk_pre;
+};
+
+struct dsi_phy_t_clk_param {
+	uint32_t bitclk_mbps;
+	uint32_t escclk_numer;
+	uint32_t escclk_denom;
+	uint32_t tlpx_numer_ns;
+	uint32_t treot_ns;
+};
+
+static int  mdss_dsi_phy_common_validate_and_set(struct timing_entry *te,
+		char const *te_name)
+{
+	if (te->rec & 0xffffff00) {
+		/* Output value can only be 8 bits */
+		pr_err("Incorrect %s calculations - %d\n", te_name, te->rec);
+		return -EINVAL;
+	}
+	pr_debug("%s program value=%d\n", te_name, te->rec);
+	te->program_value = te->rec;
+	return 0;
+}
+
+static int mdss_dsi_phy_validate_and_set(struct timing_entry *te,
+		char const *te_name)
+{
+	if (te->rec < 0)
+		te->program_value = 0;
+	else
+		return mdss_dsi_phy_common_validate_and_set(te, te_name);
+
+	return 0;
+}
+
+static int mdss_dsi_phy_initialize_defaults(struct dsi_phy_t_clk_param *t_clk,
+		struct dsi_phy_timing *t_param, u32 phy_rev)
+{
+
+	if (phy_rev <= DSI_PHY_REV_UNKNOWN || phy_rev >= DSI_PHY_REV_MAX) {
+		pr_err("Invalid PHY %d revision\n", phy_rev);
+		return -EINVAL;
+	}
+
+	t_param->clk_prepare.mipi_min = CLK_PREPARE_SPEC_MIN;
+	t_param->clk_prepare.mipi_max = CLK_PREPARE_SPEC_MAX;
+	t_param->clk_trail.mipi_min = CLK_TRAIL_SPEC_MIN;
+	t_param->hs_exit.mipi_min = HS_EXIT_SPEC_MIN;
+	t_param->hs_exit.rec_max = HS_EXIT_RECO_MAX;
+
+	if (phy_rev == DSI_PHY_REV_20) {
+		t_param->clk_prepare.rec_min =
+			DIV_ROUND_UP((t_param->clk_prepare.mipi_min
+						* t_clk->bitclk_mbps),
+					(8 * t_clk->tlpx_numer_ns));
+		t_param->clk_prepare.rec_max =
+			rounddown(mult_frac(t_param->clk_prepare.mipi_max
+						* t_clk->bitclk_mbps, 1,
+						(8 * t_clk->tlpx_numer_ns)), 1);
+		t_param->hs_rqst.mipi_min = HS_RQST_SPEC_MIN;
+		t_param->hs_rqst_clk.mipi_min = HS_RQST_SPEC_MIN;
+	} else if (phy_rev == DSI_PHY_REV_10) {
+		t_param->clk_prepare.rec_min =
+			(DIV_ROUND_UP(t_param->clk_prepare.mipi_min *
+				      t_clk->bitclk_mbps,
+				      t_clk->tlpx_numer_ns)) - 2;
+		t_param->clk_prepare.rec_max =
+			(DIV_ROUND_UP(t_param->clk_prepare.mipi_max *
+				      t_clk->bitclk_mbps,
+				      t_clk->tlpx_numer_ns)) - 2;
+	}
+
+	pr_debug("clk_prepare: min=%d, max=%d\n", t_param->clk_prepare.rec_min,
+			t_param->clk_prepare.rec_max);
+
+	return 0;
+}
+
+static int mdss_dsi_phy_calc_param_phy_rev_2(struct dsi_phy_t_clk_param *t_clk,
+		struct dsi_phy_timing *t_param)
+{
+	/* recommended fraction for PHY REV 2.0 */
+	u32 const min_prepare_frac = 50;
+	u32 const hs_exit_min_frac = 10;
+	u32 const phy_timing_frac = 30;
+	u32 const hs_zero_min_frac = 10;
+	u32 const clk_zero_min_frac = 2;
+	int tmp;
+	int t_hs_prep_actual;
+	int teot_clk_lane, teot_data_lane;
+	u64 dividend;
+	u64 temp, rc = 0;
+	u64 multiplier = BIT(20);
+	u64 temp_multiple;
+	s64 mipi_min, mipi_max, mipi_max_tr, rec_min, rec_prog;
+	s64 clk_prep_actual;
+	s64 actual_intermediate;
+	s32 actual_frac;
+	s64 rec_temp1, rec_temp2, rec_temp3;
+	int tclk_prepare_program, dsiphy_halfbyteclk_en, tclk_zero_program;
+	int ths_request_clk_prepare, hstx_prepare_delay, temp_rec_min;
+	s64 tclk_prepare_theoretical, tclk_zero_theoretical;
+	s64 ths_request_theoretical;
+
+	/* clk_prepare calculations */
+	dividend = ((t_param->clk_prepare.rec_max
+			- t_param->clk_prepare.rec_min)
+			* min_prepare_frac * multiplier);
+	temp = roundup(div_s64(dividend, 100), multiplier);
+	temp += (t_param->clk_prepare.rec_min * multiplier);
+	t_param->clk_prepare.rec = div_s64(temp, multiplier);
+
+	rc = mdss_dsi_phy_common_validate_and_set(&t_param->clk_prepare,
+			"clk prepare");
+	if (rc)
+		goto error;
+
+	/* clk_ prepare theoretical value*/
+	temp_multiple = (8 * t_param->clk_prepare.program_value
+			* t_clk->tlpx_numer_ns * multiplier);
+	actual_intermediate = div_s64(temp_multiple, t_clk->bitclk_mbps);
+	div_s64_rem(temp_multiple, t_clk->bitclk_mbps, &actual_frac);
+	clk_prep_actual =
+		div_s64((actual_intermediate + actual_frac), multiplier);
+
+	pr_debug("CLK PREPARE: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d",
+			t_param->clk_prepare.mipi_min,
+			t_param->clk_prepare.mipi_max,
+			t_param->clk_prepare.rec_min,
+			t_param->clk_prepare.rec_max);
+	pr_debug("prog value = %d, actual=%lld\n",
+			t_param->clk_prepare.rec, clk_prep_actual);
+
+	/* clk zero calculations */
+	/* Mipi spec min*/
+	mipi_min = (300 * multiplier) - (actual_intermediate + actual_frac);
+	t_param->clk_zero.mipi_min = div_s64(mipi_min, multiplier);
+
+	/* recommended min */
+	rec_temp1 = div_s64(mipi_min * t_clk->bitclk_mbps,
+			t_clk->tlpx_numer_ns);
+	rec_temp2 = rec_temp1 - (11 * multiplier);
+	rec_temp3 = roundup(div_s64(rec_temp2, 8),  multiplier);
+	rec_min = div_s64(rec_temp3, multiplier) - 3;
+	t_param->clk_zero.rec_min = rec_min;
+
+	/* recommended max */
+	t_param->clk_zero.rec_max =
+			((t_param->clk_zero.rec_min > 255) ? 511 : 255);
+
+	/* Programmed value */
+	t_param->clk_zero.rec = DIV_ROUND_UP(
+			(t_param->clk_zero.rec_max - t_param->clk_zero.rec_min)
+				* clk_zero_min_frac
+				+ (t_param->clk_zero.rec_min * 100), 100);
+
+	rc = mdss_dsi_phy_common_validate_and_set(&t_param->clk_zero,
+			"clk zero");
+	if (rc)
+		goto error;
+
+	pr_debug("CLK ZERO: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d, prog value = %d\n",
+			t_param->clk_zero.mipi_min, t_param->clk_zero.mipi_max,
+			t_param->clk_zero.rec_min, t_param->clk_zero.rec_max,
+			t_param->clk_zero.rec);
+
+	/* clk trail calculations */
+	temp_multiple = div_s64(12 * multiplier * t_clk->tlpx_numer_ns,
+						t_clk->bitclk_mbps);
+	div_s64_rem(temp_multiple, multiplier, &actual_frac);
+
+	mipi_max_tr = 105 * multiplier + (temp_multiple + actual_frac);
+	teot_clk_lane = div_s64(mipi_max_tr, multiplier);
+
+	mipi_max = mipi_max_tr - (t_clk->treot_ns * multiplier);
+
+	t_param->clk_trail.mipi_max =  div_s64(mipi_max, multiplier);
+
+	/* recommended min*/
+	temp_multiple = div_s64(t_param->clk_trail.mipi_min * multiplier *
+			t_clk->bitclk_mbps, t_clk->tlpx_numer_ns);
+	div_s64_rem(temp_multiple, multiplier, &actual_frac);
+	rec_temp1 = temp_multiple + actual_frac + 3 * multiplier;
+	rec_temp2 = div_s64(rec_temp1, 8);
+	rec_temp3 = roundup(rec_temp2, multiplier);
+
+	t_param->clk_trail.rec_min = div_s64(rec_temp3, multiplier);
+
+	/* recommended max */
+	rec_temp1 = div_s64(mipi_max * t_clk->bitclk_mbps,
+						t_clk->tlpx_numer_ns);
+	rec_temp2 = rec_temp1 + 3 * multiplier;
+	rec_temp3 = rec_temp2 / 8;
+	t_param->clk_trail.rec_max = div_s64(rec_temp3, multiplier);
+
+	/* Programmed value */
+	t_param->clk_trail.rec = DIV_ROUND_UP(
+		(t_param->clk_trail.rec_max - t_param->clk_trail.rec_min)
+			* phy_timing_frac
+			+ (t_param->clk_trail.rec_min * 100), 100);
+
+	rc = mdss_dsi_phy_common_validate_and_set(&t_param->clk_trail,
+			"clk trail");
+	if (rc)
+		goto error;
+
+	pr_debug("CLK TRAIL: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d, prog value = %d\n",
+			t_param->clk_trail.mipi_min,
+			t_param->clk_trail.mipi_max,
+			t_param->clk_trail.rec_min,
+			t_param->clk_trail.rec_max,
+			t_param->clk_trail.rec);
+
+	/* hs prepare calculations */
+	/* mipi min */
+	temp_multiple = div_s64(4 * t_clk->tlpx_numer_ns * multiplier,
+			t_clk->bitclk_mbps);
+	div_s64_rem(temp_multiple, multiplier, &actual_frac);
+	mipi_min = 40 * multiplier + (temp_multiple + actual_frac);
+	t_param->hs_prepare.mipi_min = div_s64(mipi_min, multiplier);
+
+	/* mipi max */
+	temp_multiple = div_s64(6 * t_clk->tlpx_numer_ns * multiplier,
+			t_clk->bitclk_mbps);
+	div_s64_rem(temp_multiple, multiplier, &actual_frac);
+	mipi_max = 85 * multiplier + temp_multiple;
+	t_param->hs_prepare.mipi_max = div_s64(mipi_max, multiplier);
+
+	/* recommended min */
+	temp_multiple = div_s64(mipi_min * t_clk->bitclk_mbps,
+			t_clk->tlpx_numer_ns);
+	div_s64_rem(temp_multiple, multiplier, &actual_frac);
+	rec_temp1 = roundup((temp_multiple + actual_frac)/8, multiplier);
+	t_param->hs_prepare.rec_min = div_s64(rec_temp1, multiplier);
+
+	/* recommended max*/
+	temp_multiple = div_s64(mipi_max * t_clk->bitclk_mbps,
+			t_clk->tlpx_numer_ns);
+	div_s64_rem(temp_multiple, multiplier, &actual_frac);
+	rec_temp2 = rounddown((temp_multiple + actual_frac)/8, multiplier);
+	t_param->hs_prepare.rec_max = div_s64(rec_temp2, multiplier);
+
+	/* prog value*/
+	dividend = (rec_temp2 - rec_temp1) * min_prepare_frac;
+	temp = roundup(div_u64(dividend, 100), multiplier);
+	rec_prog = temp + rec_temp1;
+	t_param->hs_prepare.rec = div_s64(rec_prog, multiplier);
+
+	rc = mdss_dsi_phy_common_validate_and_set(&t_param->hs_prepare,
+			"HS prepare");
+	if (rc)
+		goto error;
+
+	/* theoretical Value */
+	temp_multiple = div_s64(8 * rec_prog * t_clk->tlpx_numer_ns,
+			t_clk->bitclk_mbps);
+	div_s64_rem(temp_multiple, multiplier, &actual_frac);
+	t_hs_prep_actual = div_s64(temp_multiple, multiplier);
+	pr_debug("HS PREPARE: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d, prog value = %d, actual=%d\n",
+			t_param->hs_prepare.mipi_min,
+			t_param->hs_prepare.mipi_max,
+			t_param->hs_prepare.rec_min,
+			t_param->hs_prepare.rec_max,
+			t_param->hs_prepare.rec, t_hs_prep_actual);
+
+	/* hs zero calculations */
+	/* mipi min*/
+	mipi_min = div_s64(10 * t_clk->tlpx_numer_ns * multiplier,
+			t_clk->bitclk_mbps);
+	rec_temp1 = (145 * multiplier) + mipi_min - temp_multiple;
+	t_param->hs_zero.mipi_min = div_s64(rec_temp1, multiplier);
+
+	/* recommended min */
+	rec_temp1 = div_s64(rec_temp1 * t_clk->bitclk_mbps,
+			t_clk->tlpx_numer_ns);
+	rec_temp2 = rec_temp1 - (11 * multiplier);
+	rec_temp3 = roundup((rec_temp2/8), multiplier);
+	rec_min = rec_temp3 - (3 * multiplier);
+	t_param->hs_zero.rec_min = div_s64(rec_min, multiplier);
+
+	t_param->hs_zero.rec_max =
+			((t_param->hs_zero.rec_min > 255) ? 511 : 255);
+
+	/* prog value */
+	t_param->hs_zero.rec = DIV_ROUND_UP(
+		(t_param->hs_zero.rec_max - t_param->hs_zero.rec_min)
+		* hs_zero_min_frac + (t_param->hs_zero.rec_min * 100),
+		100);
+
+	rc = mdss_dsi_phy_common_validate_and_set(&t_param->hs_zero, "HS zero");
+	if (rc)
+		goto error;
+
+	pr_debug("HS ZERO: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d, prog value = %d\n",
+			t_param->hs_zero.mipi_min, t_param->hs_zero.mipi_max,
+			t_param->hs_zero.rec_min, t_param->hs_zero.rec_max,
+			t_param->hs_zero.rec);
+
+	/* hs_trail calculations */
+	teot_data_lane  = teot_clk_lane;
+	t_param->hs_trail.mipi_min =  60 +
+		mult_frac(t_clk->tlpx_numer_ns, 4, t_clk->bitclk_mbps);
+	t_param->hs_trail.mipi_max =  teot_clk_lane - t_clk->treot_ns;
+	t_param->hs_trail.rec_min = DIV_ROUND_UP(
+		((t_param->hs_trail.mipi_min * t_clk->bitclk_mbps)
+		 + 3 * t_clk->tlpx_numer_ns), (8 * t_clk->tlpx_numer_ns));
+	tmp = ((t_param->hs_trail.mipi_max * t_clk->bitclk_mbps)
+		 + (3 * t_clk->tlpx_numer_ns));
+	t_param->hs_trail.rec_max = tmp/(8 * t_clk->tlpx_numer_ns);
+	tmp = DIV_ROUND_UP((t_param->hs_trail.rec_max
+			- t_param->hs_trail.rec_min) * phy_timing_frac,
+			100);
+	t_param->hs_trail.rec = tmp + t_param->hs_trail.rec_min;
+
+
+	rc = mdss_dsi_phy_common_validate_and_set(&t_param->hs_trail,
+			"HS trail");
+	if (rc)
+		goto error;
+
+	pr_debug("HS TRAIL: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d, prog value = %d\n",
+			t_param->hs_trail.mipi_min, t_param->hs_trail.mipi_max,
+			t_param->hs_trail.rec_min, t_param->hs_trail.rec_max,
+			t_param->hs_trail.rec);
+
+	/* hs rqst calculations for Data lane */
+	t_param->hs_rqst.rec = DIV_ROUND_UP(
+		(t_param->hs_rqst.mipi_min * t_clk->bitclk_mbps)
+		- (8 * t_clk->tlpx_numer_ns), (8 * t_clk->tlpx_numer_ns));
+
+	rc = mdss_dsi_phy_common_validate_and_set(&t_param->hs_rqst, "HS rqst");
+	if (rc)
+		goto error;
+
+	pr_debug("HS RQST-DATA: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d, prog value = %d\n",
+			t_param->hs_rqst.mipi_min, t_param->hs_rqst.mipi_max,
+			t_param->hs_rqst.rec_min, t_param->hs_rqst.rec_max,
+			t_param->hs_rqst.rec);
+
+	/* hs exit calculations */
+	t_param->hs_exit.rec_min = DIV_ROUND_UP(
+		(t_param->hs_exit.mipi_min * t_clk->bitclk_mbps),
+		(8 * t_clk->tlpx_numer_ns)) - 1;
+	t_param->hs_exit.rec = DIV_ROUND_UP(
+		(t_param->hs_exit.rec_max - t_param->hs_exit.rec_min)
+			* hs_exit_min_frac
+			+ (t_param->hs_exit.rec_min * 100), 100);
+
+	rc = mdss_dsi_phy_common_validate_and_set(&t_param->hs_exit, "HS exit");
+	if (rc)
+		goto error;
+
+	pr_debug("HS EXIT: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d, prog value = %d\n",
+			t_param->hs_exit.mipi_min, t_param->hs_exit.mipi_max,
+			t_param->hs_exit.rec_min, t_param->hs_exit.rec_max,
+			t_param->hs_exit.rec);
+
+	/* hs rqst calculations for Clock lane */
+	t_param->hs_rqst_clk.rec = DIV_ROUND_UP(
+		(t_param->hs_rqst_clk.mipi_min * t_clk->bitclk_mbps)
+		- (8 * t_clk->tlpx_numer_ns), (8 * t_clk->tlpx_numer_ns));
+
+	rc = mdss_dsi_phy_common_validate_and_set(&t_param->hs_rqst_clk,
+			"HS rqst clk");
+	if (rc)
+		goto error;
+
+	pr_debug("HS RQST-CLK: mipi_min=%d, max=%d, rec_min=%d, rec_max=%d, prog value = %d\n",
+			t_param->hs_rqst_clk.mipi_min,
+			t_param->hs_rqst_clk.mipi_max,
+			t_param->hs_rqst_clk.rec_min,
+			t_param->hs_rqst_clk.rec_max,
+			t_param->hs_rqst_clk.rec);
+
+	/* clk post and pre value calculation */
+	tmp = ((60 * (int)t_clk->bitclk_mbps) + (52 * 1000) - (43 * 1000));
+
+	/* clk_post minimum value can be a negetive number */
+	if (tmp % (8 * 1000) != 0) {
+		if (tmp < 0)
+			tmp = (tmp / (8 * 1000))  - 1;
+		else
+			tmp = (tmp / (8 * 1000)) + 1;
+	} else {
+		tmp = tmp / (8 * 1000);
+	}
+	tmp = tmp - 1;
+
+	t_param->clk_post.program_value =
+		DIV_ROUND_UP((63 - tmp) * hs_exit_min_frac, 100);
+	t_param->clk_post.program_value += tmp;
+
+	if (t_param->clk_post.program_value & 0xffffff00) {
+		pr_err("Invalid clk post calculations - %d\n",
+			   t_param->clk_post.program_value);
+		goto error;
+	}
+
+	t_param->clk_post.rec_min = tmp;
+
+	if (t_param->hs_rqst_clk.rec < 0)
+		ths_request_clk_prepare = 0;
+	else
+		ths_request_clk_prepare = t_param->hs_rqst_clk.program_value;
+
+	ths_request_theoretical = (ths_request_clk_prepare + 1);
+
+	tclk_prepare_program = t_param->clk_prepare.program_value;
+
+	dsiphy_halfbyteclk_en = 0;
+
+	if (t_clk->bitclk_mbps > 100)
+		hstx_prepare_delay = 0;
+	else
+		hstx_prepare_delay = 3;
+
+	tclk_prepare_theoretical = ((tclk_prepare_program * 8)
+					+ (dsiphy_halfbyteclk_en * 4)
+					+ (hstx_prepare_delay * 2));
+
+	tclk_zero_program = t_param->clk_zero.program_value;
+
+	tclk_zero_theoretical = ((tclk_zero_program + 3) * 8) + 11
+						- (hstx_prepare_delay * 2);
+
+	temp_rec_min = (8 * 1000) + (tclk_prepare_theoretical * 1000)
+			+ (tclk_zero_theoretical * 1000)
+			+ (ths_request_theoretical * 8 * 1000);
+
+	t_param->clk_pre.rec_min = DIV_ROUND_UP(temp_rec_min, 8 * 1000) - 1;
+
+	if (t_param->clk_pre.rec_min > 63) {
+		t_param->clk_pre.program_value =
+			DIV_ROUND_UP((2 * 63 - t_param->clk_pre.rec_min)
+						* hs_exit_min_frac, 100);
+		t_param->clk_pre.program_value += t_param->clk_pre.rec_min;
+	} else {
+		t_param->clk_pre.program_value =
+			DIV_ROUND_UP((63 - t_param->clk_pre.rec_min)
+						* hs_exit_min_frac, 100);
+		t_param->clk_pre.program_value += t_param->clk_pre.rec_min;
+	}
+
+	if (t_param->clk_pre.program_value & 0xffffff00) {
+		pr_err("Invalid clk pre calculations - %d\n",
+				t_param->clk_pre.program_value);
+		goto error;
+	}
+	pr_debug("t_clk_post: %d t_clk_pre: %d\n",
+			t_param->clk_post.program_value,
+			t_param->clk_pre.program_value);
+
+	pr_debug("teot_clk=%d, data=%d\n", teot_clk_lane, teot_data_lane);
+	return 0;
+
+error:
+	return -EINVAL;
+}
+
+static int mdss_dsi_phy_calc_hs_param_phy_rev_1(
+		struct dsi_phy_t_clk_param *t_clk,
+		struct dsi_phy_timing *t_param)
+{
+	int percent_min = 10;
+	int percent_allowable_phy = 0;
+	int percent_min_ths;
+	int tmp, rc = 0;
+	int tclk_prepare_theoretical, tclk_zero_theoretical;
+	int tlpx, ths_exit_theoretical;
+
+	if (t_clk->bitclk_mbps > 1200)
+		percent_min_ths = 15;
+	else
+		percent_min_ths = 10;
+
+	if (t_clk->bitclk_mbps > 180)
+		percent_allowable_phy = 10;
+	else
+		percent_allowable_phy = 40;
+
+	t_param->hs_prepare.rec_min =
+		DIV_ROUND_UP((40 * t_clk->bitclk_mbps)
+		+ (4 * t_clk->tlpx_numer_ns), t_clk->tlpx_numer_ns) - 2;
+	t_param->hs_prepare.rec_max =
+		DIV_ROUND_UP((85 * t_clk->bitclk_mbps)
+		+ (6 * t_clk->tlpx_numer_ns), t_clk->tlpx_numer_ns) - 2;
+	tmp = DIV_ROUND_UP((t_param->hs_prepare.rec_max
+		- t_param->hs_prepare.rec_min) * percent_min_ths, 100);
+	tmp += t_param->hs_prepare.rec_min;
+	t_param->hs_prepare.rec = (tmp & ~0x1);
+
+	rc = mdss_dsi_phy_validate_and_set(&t_param->hs_prepare, "HS prepare");
+	if (rc)
+		goto error;
+
+	tmp = (t_param->hs_prepare.program_value / 2) + 1;
+	t_param->hs_zero.rec_min = DIV_ROUND_UP((145 * t_clk->bitclk_mbps)
+		+ ((10 - (2 * (tmp + 1))) * 1000), 1000) - 2;
+	t_param->hs_zero.rec_max = 255;
+	tmp = DIV_ROUND_UP((t_param->hs_zero.rec_max
+		- t_param->hs_zero.rec_min) * percent_min, 100);
+	tmp += t_param->hs_zero.rec_min;
+	t_param->hs_zero.rec = (tmp & ~0x1);
+
+	rc = mdss_dsi_phy_validate_and_set(&t_param->hs_zero, "HS zero");
+	if (rc)
+		goto error;
+
+	t_param->hs_trail.rec_min = DIV_ROUND_UP((60 * t_clk->bitclk_mbps)
+		+ 4000, 1000) - 2;
+	t_param->hs_trail.rec_max = DIV_ROUND_UP((105 - t_clk->treot_ns)
+		* t_clk->bitclk_mbps + 12000, 1000) - 2;
+	tmp = DIV_ROUND_UP((t_param->hs_trail.rec_max
+		- t_param->hs_trail.rec_min) * percent_allowable_phy, 100);
+	tmp += t_param->hs_trail.rec_min;
+	t_param->hs_trail.rec = tmp & ~0x1;
+
+	rc = mdss_dsi_phy_validate_and_set(&t_param->hs_trail, "HS trail");
+	if (rc)
+		goto error;
+
+	t_param->hs_exit.rec_min = DIV_ROUND_UP(100 * t_clk->bitclk_mbps,
+		t_clk->tlpx_numer_ns) - 2;
+	t_param->hs_exit.rec_max = 255;
+	tmp = DIV_ROUND_UP((t_param->hs_exit.rec_max
+		- t_param->hs_exit.rec_min) * percent_min, 100);
+	tmp += t_param->hs_exit.rec_min;
+	t_param->hs_exit.rec = (tmp & ~0x1);
+
+	rc = mdss_dsi_phy_validate_and_set(&t_param->hs_exit, "HS exit");
+	if (rc)
+		goto error;
+
+	/* clk post and pre value calculation */
+	ths_exit_theoretical = (t_param->hs_exit.program_value / 2) + 1;
+	tmp = ((60 * (int)t_clk->bitclk_mbps) + (52 * 1000)
+			- (24 * 1000) - (ths_exit_theoretical * 2 * 1000));
+	/* clk_post minimum value can be a negetive number */
+	if (tmp % (8 * 1000) != 0) {
+		if (tmp < 0)
+			tmp = (tmp / (8 * 1000))  - 1;
+		else
+			tmp = (tmp / (8 * 1000)) + 1;
+	} else {
+		tmp = tmp / (8 * 1000);
+	}
+	tmp = tmp - 1;
+
+	t_param->clk_post.program_value =
+		DIV_ROUND_UP((63 - tmp) * percent_min, 100);
+	t_param->clk_post.program_value += tmp;
+
+	if (t_param->clk_post.program_value & 0xffffff00) {
+		pr_err("Invalid clk post calculations - %d\n",
+				t_param->clk_post.program_value);
+		goto error;
+	}
+
+	t_param->clk_post.rec_min = tmp;
+
+	tclk_prepare_theoretical = (t_param->clk_prepare.program_value / 2) + 1;
+	tclk_zero_theoretical = (t_param->clk_zero.program_value / 2) + 1;
+	tlpx = 10000/t_clk->escclk_numer;
+
+	t_param->clk_pre.rec_min =
+		DIV_ROUND_UP((tlpx * t_clk->bitclk_mbps) + (8 * 1000)
+			+ (tclk_prepare_theoretical * 2 * 1000)
+			+ (tclk_zero_theoretical * 2 * 1000), 8 * 1000) - 1;
+	if (t_param->clk_pre.rec_min > 63) {
+		t_param->clk_pre.program_value =
+			DIV_ROUND_UP((2 * 63 - t_param->clk_pre.rec_min)
+						* percent_min, 100);
+		t_param->clk_pre.program_value += t_param->clk_pre.rec_min;
+	} else {
+		t_param->clk_pre.program_value =
+			DIV_ROUND_UP((63 - t_param->clk_pre.rec_min)
+							* percent_min, 100);
+		t_param->clk_pre.program_value += t_param->clk_pre.rec_min;
+	}
+
+	if (t_param->clk_pre.program_value & 0xffffff00) {
+		pr_err("Invalid clk pre calculations - %d\n",
+				t_param->clk_pre.program_value);
+		goto error;
+	}
+	pr_debug("t_clk_post: %d t_clk_pre: %d\n",
+			t_param->clk_post.program_value,
+			t_param->clk_pre.program_value);
+
+	return 0;
+
+error:
+	return -EINVAL;
+
+}
+
+static int mdss_dsi_phy_calc_param_phy_rev_1(struct dsi_phy_t_clk_param *t_clk,
+		struct dsi_phy_timing *t_param)
+{
+	int percent_allowable_phy = 0;
+	int percent_min_t_clk = 10;
+	int tmp, rc = 0;
+	int clk_prep_actual;
+	int teot_clk_lane;
+	u32 temp = 0;
+
+	if (t_clk->bitclk_mbps > 180)
+		percent_allowable_phy = 10;
+	else
+		percent_allowable_phy = 40;
+
+	tmp = DIV_ROUND_UP((t_param->clk_prepare.rec_max -
+		t_param->clk_prepare.rec_min) * percent_min_t_clk, 100);
+	tmp += t_param->clk_prepare.rec_min;
+
+	t_param->clk_prepare.rec = (tmp & ~0x1);
+
+	rc = mdss_dsi_phy_common_validate_and_set(&t_param->clk_prepare,
+			"clk prepare");
+	if (rc)
+		goto error;
+
+	clk_prep_actual = 2 * ((t_param->clk_prepare.program_value
+				/ 2) + 1) * t_clk->tlpx_numer_ns;
+	clk_prep_actual /= t_clk->bitclk_mbps;
+
+	tmp = t_clk->bitclk_mbps * t_clk->escclk_denom
+		/ t_clk->escclk_numer;
+	t_param->hs_rqst.rec = tmp;
+	if (!(tmp & 0x1))
+		t_param->hs_rqst.rec -= 2;
+
+	rc = mdss_dsi_phy_common_validate_and_set(&t_param->hs_rqst, "HS rqst");
+	if (rc)
+		goto error;
+
+	if (t_param->hs_rqst.program_value < 0)
+		t_param->hs_rqst.program_value = 0;
+
+	/* t_clk_zero calculation */
+	t_param->clk_zero.mipi_min = (300 - clk_prep_actual);
+	t_param->clk_zero.rec_min = (DIV_ROUND_UP(t_param->clk_zero.mipi_min
+			* t_clk->bitclk_mbps, t_clk->tlpx_numer_ns)) - 2;
+
+	if (t_param->clk_zero.rec_min > 255) {
+		t_param->clk_zero.rec_max = CLK_ZERO_RECO_MAX1;
+		t_param->clk_zero.rec =
+			DIV_ROUND_UP(t_param->clk_zero.rec_min * 10
+				+ (t_param->clk_zero.rec_min * 100), 100);
+	} else {
+		t_param->clk_zero.rec_max = CLK_ZERO_RECO_MAX2;
+		temp = t_param->clk_zero.rec_max - t_param->clk_zero.rec_min;
+		t_param->clk_zero.rec = DIV_ROUND_UP(temp * 10
+				+ (t_param->clk_zero.rec_min * 100), 100);
+	}
+
+	t_param->clk_zero.rec &= ~0x1;
+
+	if (((t_param->hs_rqst.rec + t_param->clk_zero.rec +
+					t_param->clk_prepare.rec) % 8) != 0)
+		t_param->clk_zero.rec +=
+			(8 - ((t_param->hs_rqst.rec + t_param->clk_zero.rec +
+			       t_param->clk_prepare.rec) % 8));
+
+	rc = mdss_dsi_phy_common_validate_and_set(&t_param->clk_zero,
+			"clk zero");
+	if (rc)
+		goto error;
+
+	pr_debug("hs_rqst.rec: %d clk_zero.rec: %d clk_prepare.rec: %d\n",
+				t_param->hs_rqst.rec, t_param->clk_zero.rec,
+				t_param->clk_prepare.rec);
+	teot_clk_lane  = 105 + (12 * t_clk->tlpx_numer_ns
+		/ t_clk->bitclk_mbps);
+	t_param->clk_trail.mipi_max = teot_clk_lane - t_clk->treot_ns;
+	t_param->clk_trail.rec_min = DIV_ROUND_UP(t_param->clk_trail.mipi_min *
+		t_clk->bitclk_mbps, t_clk->tlpx_numer_ns) - 2;
+	t_param->clk_trail.rec_max = DIV_ROUND_UP(t_param->clk_trail.mipi_max *
+		t_clk->bitclk_mbps, t_clk->tlpx_numer_ns) - 2;
+
+	tmp = DIV_ROUND_UP((t_param->clk_trail.rec_max -
+		t_param->clk_trail.rec_min) * percent_allowable_phy, 100);
+	tmp += t_param->clk_trail.rec_min;
+	t_param->clk_trail.rec = (tmp & ~0x1);
+
+	rc = mdss_dsi_phy_validate_and_set(&t_param->clk_trail, "clk trail");
+	if (rc)
+		goto error;
+
+	rc = mdss_dsi_phy_calc_hs_param_phy_rev_1(t_clk, t_param);
+	if (rc)
+		pr_err("Invalid HS param calculations\n");
+
+error:
+	return rc;
+}
+
+static void mdss_dsi_phy_update_timing_param(struct mdss_panel_info *pinfo,
+		struct dsi_phy_timing *t_param)
+{
+	struct mdss_dsi_phy_ctrl *reg;
+
+	reg = &(pinfo->mipi.dsi_phy_db);
+
+	pinfo->mipi.t_clk_post = t_param->clk_post.program_value;
+	pinfo->mipi.t_clk_pre = t_param->clk_pre.program_value;
+
+	if (t_param->clk_zero.rec > 255) {
+		reg->timing[0] = t_param->clk_zero.program_value - 255;
+		reg->timing[3] = 1;
+	} else {
+		reg->timing[0] = t_param->clk_zero.program_value;
+		reg->timing[3] = 0;
+	}
+	reg->timing[1] = t_param->clk_trail.program_value;
+	reg->timing[2] = t_param->clk_prepare.program_value;
+	reg->timing[4] = t_param->hs_exit.program_value;
+	reg->timing[5] = t_param->hs_zero.program_value;
+	reg->timing[6] = t_param->hs_prepare.program_value;
+	reg->timing[7] = t_param->hs_trail.program_value;
+	reg->timing[8] = t_param->hs_rqst.program_value;
+	reg->timing[9] = (TA_SURE << 16) + TA_GO;
+	reg->timing[10] = TA_GET;
+	reg->timing[11] = 0;
+
+	pr_debug("[%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x]\n",
+		reg->timing[0], reg->timing[1], reg->timing[2], reg->timing[3],
+		reg->timing[4], reg->timing[5], reg->timing[6], reg->timing[7],
+		reg->timing[8], reg->timing[9], reg->timing[10],
+		reg->timing[11]);
+}
+
+static void mdss_dsi_phy_update_timing_param_rev_2(
+		struct mdss_panel_info *pinfo,
+		struct dsi_phy_timing *t_param)
+{
+	struct mdss_dsi_phy_ctrl *reg;
+	int i = 0;
+
+	reg = &(pinfo->mipi.dsi_phy_db);
+
+	pinfo->mipi.t_clk_post = t_param->clk_post.program_value;
+	pinfo->mipi.t_clk_pre = t_param->clk_pre.program_value;
+
+	for (i = 0; i < TIMING_PARAM_DLANE_COUNT; i += 8) {
+		reg->timing_8996[i] = t_param->hs_exit.program_value;
+		reg->timing_8996[i + 1] = t_param->hs_zero.program_value;
+		reg->timing_8996[i + 2] = t_param->hs_prepare.program_value;
+		reg->timing_8996[i + 3] = t_param->hs_trail.program_value;
+		reg->timing_8996[i + 4] = t_param->hs_rqst.program_value;
+		reg->timing_8996[i + 5] = 0x3;
+		reg->timing_8996[i + 6] = 0x4;
+		reg->timing_8996[i + 7] = 0xA0;
+	}
+
+	for (i = TIMING_PARAM_DLANE_COUNT;
+			i < TIMING_PARAM_DLANE_COUNT + TIMING_PARAM_CLK_COUNT;
+			i += 8) {
+		reg->timing_8996[i] = t_param->hs_exit.program_value;
+		reg->timing_8996[i + 1] = t_param->clk_zero.program_value;
+		reg->timing_8996[i + 2] = t_param->clk_prepare.program_value;
+		reg->timing_8996[i + 3] = t_param->clk_trail.program_value;
+		reg->timing_8996[i + 4] = t_param->hs_rqst_clk.program_value;
+		reg->timing_8996[i + 5] = 0x3;
+		reg->timing_8996[i + 6] = 0x4;
+		reg->timing_8996[i + 7] = 0xA0;
+	}
+}
+
+int mdss_dsi_phy_calc_timing_param(struct mdss_panel_info *pinfo, u32 phy_rev,
+		u32 frate_hz)
+{
+	struct dsi_phy_t_clk_param t_clk;
+	struct dsi_phy_timing t_param;
+	int hsync_period;
+	int vsync_period;
+	unsigned long inter_num;
+	uint32_t lane_config = 0;
+	unsigned long x, y;
+	int rc = 0;
+
+	if (!pinfo) {
+		pr_err("invalid panel info\n");
+		return -EINVAL;
+	}
+
+	hsync_period = mdss_panel_get_htotal(pinfo, true);
+	vsync_period = mdss_panel_get_vtotal(pinfo);
+
+	inter_num = pinfo->bpp * frate_hz;
+
+	if (pinfo->mipi.data_lane0)
+		lane_config++;
+	if (pinfo->mipi.data_lane1)
+		lane_config++;
+	if (pinfo->mipi.data_lane2)
+		lane_config++;
+	if (pinfo->mipi.data_lane3)
+		lane_config++;
+
+	x = mult_frac(vsync_period * hsync_period, inter_num, lane_config);
+	y = rounddown(x, 1);
+	t_clk.bitclk_mbps = rounddown(mult_frac(y, 1, 1000000), 1);
+	t_clk.escclk_numer = ESC_CLK_MHZ;
+	t_clk.escclk_denom = ESCCLK_MMSS_CC_PREDIV;
+	t_clk.tlpx_numer_ns = TLPX_NUMER;
+	t_clk.treot_ns = TR_EOT;
+	pr_debug("hperiod=%d, vperiod=%d, inter_num=%lu, lane_cfg=%d\n",
+			hsync_period, vsync_period, inter_num, lane_config);
+	pr_debug("x=%lu, y=%lu, bitrate=%d\n", x, y, t_clk.bitclk_mbps);
+
+	switch (phy_rev) {
+	case DSI_PHY_REV_10:
+		rc = mdss_dsi_phy_initialize_defaults(&t_clk, &t_param,
+				phy_rev);
+		if (rc) {
+			pr_err("phy%d initialization failed\n", phy_rev);
+			goto timing_calc_end;
+		}
+			mdss_dsi_phy_calc_param_phy_rev_1(&t_clk, &t_param);
+		mdss_dsi_phy_update_timing_param(pinfo, &t_param);
+		break;
+	case DSI_PHY_REV_20:
+		rc = mdss_dsi_phy_initialize_defaults(&t_clk, &t_param,
+				phy_rev);
+		if (rc) {
+			pr_err("phy%d initialization failed\n", phy_rev);
+			goto timing_calc_end;
+		}
+
+		rc = mdss_dsi_phy_calc_param_phy_rev_2(&t_clk, &t_param);
+		if (rc) {
+			pr_err("Phy timing calculations failed\n");
+			goto timing_calc_end;
+		}
+		mdss_dsi_phy_update_timing_param_rev_2(pinfo, &t_param);
+		break;
+	default:
+		pr_err("phy rev %d not supported\n", phy_rev);
+		return -EINVAL;
+	}
+
+timing_calc_end:
+	return rc;
+}
diff --git a/drivers/video/fbdev/msm/mdss_dsi_phy.h b/drivers/video/fbdev/msm/mdss_dsi_phy.h
new file mode 100644
index 0000000..aea42e8
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_phy.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MDSS_DSI_PHY_H
+#define MDSS_DSI_PHY_H
+
+#include <linux/types.h>
+
+#include "mdss_panel.h"
+
+enum phy_rev {
+	DSI_PHY_REV_UNKNOWN = 0x00,
+	DSI_PHY_REV_10 = 0x01,	/* REV 1.0 - 20nm, 28nm */
+	DSI_PHY_REV_20 = 0x02,	/* REV 2.0 - 14nm */
+	DSI_PHY_REV_MAX,
+};
+
+/*
+ * mdss_dsi_phy_calc_timing_param() - calculates clock timing and hs timing
+ *				parameters for the given phy revision.
+ *
+ * @pinfo - structure containing panel specific information which will be
+ *		used in calculating the phy timing parameters.
+ * @phy_rev - phy revision for which phy timings need to be calculated.
+ * @frate_hz - Frame rate for which phy timing parameters are to be calculated.
+ */
+int mdss_dsi_phy_calc_timing_param(struct mdss_panel_info *pinfo, u32 phy_rev,
+		u32 frate_hz);
+
+#endif /* MDSS_DSI_PHY_H */
diff --git a/drivers/video/fbdev/msm/mdss_dsi_status.c b/drivers/video/fbdev/msm/mdss_dsi_status.c
new file mode 100644
index 0000000..7b6be11
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_dsi_status.c
@@ -0,0 +1,286 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/fb.h>
+#include <linux/notifier.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/iopoll.h>
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/interrupt.h>
+
+#include "mdss_fb.h"
+#include "mdss_dsi.h"
+#include "mdss_panel.h"
+#include "mdss_mdp.h"
+
+#define STATUS_CHECK_INTERVAL_MS 5000
+#define STATUS_CHECK_INTERVAL_MIN_MS 50
+#define DSI_STATUS_CHECK_INIT -1
+#define DSI_STATUS_CHECK_DISABLE 1
+
+static uint32_t interval = STATUS_CHECK_INTERVAL_MS;
+static int32_t dsi_status_disable = DSI_STATUS_CHECK_INIT;
+struct dsi_status_data *pstatus_data;
+
+/*
+ * check_dsi_ctrl_status() - Reads MFD structure and
+ * calls platform specific DSI ctrl Status function.
+ * @work  : dsi controller status data
+ */
+static void check_dsi_ctrl_status(struct work_struct *work)
+{
+	struct dsi_status_data *pdsi_status = NULL;
+
+	pdsi_status = container_of(to_delayed_work(work),
+		struct dsi_status_data, check_status);
+
+	if (!pdsi_status) {
+		pr_err("%s: DSI status data not available\n", __func__);
+		return;
+	}
+
+	if (!pdsi_status->mfd) {
+		pr_err("%s: FB data not available\n", __func__);
+		return;
+	}
+
+	if (mdss_panel_is_power_off(pdsi_status->mfd->panel_power_state) ||
+			pdsi_status->mfd->shutdown_pending) {
+		pr_debug("%s: panel off\n", __func__);
+		return;
+	}
+
+	pdsi_status->mfd->mdp.check_dsi_status(work, interval);
+}
+
+/*
+ * hw_vsync_handler() - Interrupt handler for HW VSYNC signal.
+ * @irq		: irq line number
+ * @data	: Pointer to the device structure.
+ *
+ * This function is called whenever a HW vsync signal is received from the
+ * panel. This resets the timer of ESD delayed workqueue back to initial
+ * value.
+ */
+irqreturn_t hw_vsync_handler(int irq, void *data)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata =
+			(struct mdss_dsi_ctrl_pdata *)data;
+	if (!ctrl_pdata) {
+		pr_err("%s: DSI ctrl not available\n", __func__);
+		return IRQ_HANDLED;
+	}
+
+	if (pstatus_data)
+		mod_delayed_work(system_wq, &pstatus_data->check_status,
+			msecs_to_jiffies(interval));
+	else
+		pr_err("Pstatus data is NULL\n");
+
+	if (!atomic_read(&ctrl_pdata->te_irq_ready)) {
+		complete_all(&ctrl_pdata->te_irq_comp);
+		atomic_inc(&ctrl_pdata->te_irq_ready);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * disable_esd_thread() - Cancels work item for the esd check.
+ */
+void disable_esd_thread(void)
+{
+	if (pstatus_data &&
+		cancel_delayed_work_sync(&pstatus_data->check_status))
+		pr_debug("esd thread killed\n");
+}
+
+/*
+ * fb_event_callback() - Call back function for the fb_register_client()
+ *			 notifying events
+ * @self  : notifier block
+ * @event : The event that was triggered
+ * @data  : Of type struct fb_event
+ *
+ * This function listens for FB_BLANK_UNBLANK and FB_BLANK_POWERDOWN events
+ * from frame buffer. DSI status check work is either scheduled again after
+ * PANEL_STATUS_CHECK_INTERVAL or cancelled based on the event.
+ */
+static int fb_event_callback(struct notifier_block *self,
+				unsigned long event, void *data)
+{
+	struct fb_event *evdata = data;
+	struct dsi_status_data *pdata = container_of(self,
+				struct dsi_status_data, fb_notifier);
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_panel_info *pinfo;
+	struct msm_fb_data_type *mfd;
+
+	if (!evdata) {
+		pr_err("%s: event data not available\n", __func__);
+		return NOTIFY_BAD;
+	}
+
+	/* handle only mdss fb device */
+	if (strcmp("mdssfb", evdata->info->fix.id))
+		return NOTIFY_DONE;
+
+	mfd = evdata->info->par;
+	ctrl_pdata = container_of(dev_get_platdata(&mfd->pdev->dev),
+				struct mdss_dsi_ctrl_pdata, panel_data);
+	if (!ctrl_pdata) {
+		pr_err("%s: DSI ctrl not available\n", __func__);
+		return NOTIFY_BAD;
+	}
+
+	pinfo = &ctrl_pdata->panel_data.panel_info;
+
+	if ((!(pinfo->esd_check_enabled) &&
+			dsi_status_disable) ||
+			(dsi_status_disable == DSI_STATUS_CHECK_DISABLE)) {
+		pr_debug("ESD check is disabled.\n");
+		cancel_delayed_work(&pdata->check_status);
+		return NOTIFY_DONE;
+	}
+
+	pdata->mfd = evdata->info->par;
+	if (event == FB_EVENT_BLANK) {
+		int *blank = evdata->data;
+		struct dsi_status_data *pdata = container_of(self,
+				struct dsi_status_data, fb_notifier);
+		pdata->mfd = evdata->info->par;
+
+		switch (*blank) {
+		case FB_BLANK_UNBLANK:
+			schedule_delayed_work(&pdata->check_status,
+				msecs_to_jiffies(interval));
+			break;
+		case FB_BLANK_VSYNC_SUSPEND:
+		case FB_BLANK_NORMAL:
+			pr_debug("%s : ESD thread running\n", __func__);
+			break;
+		case FB_BLANK_POWERDOWN:
+		case FB_BLANK_HSYNC_SUSPEND:
+			cancel_delayed_work(&pdata->check_status);
+			break;
+		default:
+			pr_err("Unknown case in FB_EVENT_BLANK event\n");
+			break;
+		}
+	}
+	return 0;
+}
+
+static int param_dsi_status_disable(const char *val, struct kernel_param *kp)
+{
+	int ret = 0;
+	int int_val;
+
+	ret = kstrtos32(val, 0, &int_val);
+	if (ret)
+		return ret;
+
+	pr_info("%s: Set DSI status disable to %d\n",
+			__func__, int_val);
+	*((int *)kp->arg) = int_val;
+	return ret;
+}
+
+static int param_set_interval(const char *val, struct kernel_param *kp)
+{
+	int ret = 0;
+	int int_val;
+
+	ret = kstrtos32(val, 0, &int_val);
+	if (ret)
+		return ret;
+	if (int_val < STATUS_CHECK_INTERVAL_MIN_MS) {
+		pr_err("%s: Invalid value %d used, ignoring\n",
+						__func__, int_val);
+		ret = -EINVAL;
+	} else {
+		pr_info("%s: Set check interval to %d msecs\n",
+						__func__, int_val);
+		*((int *)kp->arg) = int_val;
+	}
+	return ret;
+}
+
+int __init mdss_dsi_status_init(void)
+{
+	int rc = 0;
+	struct mdss_util_intf *util = mdss_get_util_intf();
+
+	if (!util) {
+		pr_err("%s: Failed to get utility functions\n", __func__);
+		return -ENODEV;
+	}
+
+	if (util->display_disabled) {
+		pr_info("Display is disabled, not progressing with dsi_init\n");
+		return -ENOTSUPP;
+	}
+
+	pstatus_data = kzalloc(sizeof(struct dsi_status_data), GFP_KERNEL);
+	if (!pstatus_data)
+		return -ENOMEM;
+
+	pstatus_data->fb_notifier.notifier_call = fb_event_callback;
+
+	rc = fb_register_client(&pstatus_data->fb_notifier);
+	if (rc < 0) {
+		pr_err("%s: fb_register_client failed, returned with rc=%d\n",
+								__func__, rc);
+		kfree(pstatus_data);
+		return -EPERM;
+	}
+
+	pr_info("%s: DSI status check interval:%d\n", __func__,	interval);
+
+	INIT_DELAYED_WORK(&pstatus_data->check_status, check_dsi_ctrl_status);
+
+	pr_debug("%s: DSI ctrl status work queue initialized\n", __func__);
+
+	return rc;
+}
+
+void __exit mdss_dsi_status_exit(void)
+{
+	fb_unregister_client(&pstatus_data->fb_notifier);
+	cancel_delayed_work_sync(&pstatus_data->check_status);
+	kfree(pstatus_data);
+	pr_debug("%s: DSI ctrl status work queue removed\n", __func__);
+}
+
+module_param_call(interval, param_set_interval, param_get_uint,
+						&interval, 0644);
+MODULE_PARM_DESC(interval,
+	"Duration in milliseconds to send BTA command for DSI status check");
+
+module_param_call(dsi_status_disable, param_dsi_status_disable, param_get_uint,
+						&dsi_status_disable, 0644);
+MODULE_PARM_DESC(dsi_status_disable,
+		"Disable DSI status check");
+
+module_init(mdss_dsi_status_init);
+module_exit(mdss_dsi_status_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/msm/mdss_edp.c b/drivers/video/fbdev/msm/mdss_edp.c
new file mode 100644
index 0000000..55d4ab3
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_edp.c
@@ -0,0 +1,1271 @@
+/* Copyright (c) 2012-2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/qpnp/pwm.h>
+#include <linux/clk.h>
+#include <linux/spinlock_types.h>
+#include <linux/kthread.h>
+#include <mach/hardware.h>
+#include <mach/dma.h>
+
+#include "mdss.h"
+#include "mdss_edp.h"
+#include "mdss_debug.h"
+
+#define RGB_COMPONENTS		3
+#define VDDA_MIN_UV			1800000	/* uV units */
+#define VDDA_MAX_UV			1800000	/* uV units */
+#define VDDA_UA_ON_LOAD		100000	/* uA units */
+#define VDDA_UA_OFF_LOAD	100		/* uA units */
+
+static int mdss_edp_regulator_on(struct mdss_edp_drv_pdata *edp_drv);
+/*
+ * Init regulator needed for edp, 8974_l12
+ */
+static int mdss_edp_regulator_init(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret;
+
+	edp_drv->vdda_vreg = devm_regulator_get(&(edp_drv->pdev->dev), "vdda");
+	if (IS_ERR(edp_drv->vdda_vreg)) {
+		pr_err("%s: Could not get 8941_l12, ret = %ld\n", __func__,
+				PTR_ERR(edp_drv->vdda_vreg));
+		return -ENODEV;
+	}
+
+	ret = regulator_set_voltage(edp_drv->vdda_vreg,
+			VDDA_MIN_UV, VDDA_MAX_UV);
+	if (ret) {
+		pr_err("%s: vdda_vreg set_voltage failed, ret=%d\n", __func__,
+				ret);
+		return -EINVAL;
+	}
+
+	ret = mdss_edp_regulator_on(edp_drv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * Set uA and enable vdda
+ */
+static int mdss_edp_regulator_on(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret;
+
+	ret = regulator_set_optimum_mode(edp_drv->vdda_vreg, VDDA_UA_ON_LOAD);
+	if (ret < 0) {
+		pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__);
+		return ret;
+	}
+
+	ret = regulator_enable(edp_drv->vdda_vreg);
+	if (ret) {
+		pr_err("%s: Failed to enable vdda_vreg regulator.\n", __func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+/*
+ * Disable vdda and set uA
+ */
+static int mdss_edp_regulator_off(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret;
+
+	ret = regulator_disable(edp_drv->vdda_vreg);
+	if (ret) {
+		pr_err("%s: Failed to disable vdda_vreg regulator.\n",
+				__func__);
+		return ret;
+	}
+
+	ret = regulator_set_optimum_mode(edp_drv->vdda_vreg, VDDA_UA_OFF_LOAD);
+	if (ret < 0) {
+		pr_err("%s: vdda_vreg set regulator mode failed.\n",
+				__func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+/*
+ * Enables the gpio that supply power to the panel and enable the backlight
+ */
+static int mdss_edp_gpio_panel_en(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret = 0;
+
+	edp_drv->gpio_panel_en = of_get_named_gpio(edp_drv->pdev->dev.of_node,
+			"gpio-panel-en", 0);
+	if (!gpio_is_valid(edp_drv->gpio_panel_en)) {
+		pr_err("%s: gpio_panel_en=%d not specified\n", __func__,
+				edp_drv->gpio_panel_en);
+		goto gpio_err;
+	}
+
+	ret = gpio_request(edp_drv->gpio_panel_en, "disp_enable");
+	if (ret) {
+		pr_err("%s: Request reset gpio_panel_en failed, ret=%d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	ret = gpio_direction_output(edp_drv->gpio_panel_en, 1);
+	if (ret) {
+		pr_err("%s: Set direction for gpio_panel_en failed, ret=%d\n",
+				__func__, ret);
+		goto gpio_free;
+	}
+
+	return 0;
+
+gpio_free:
+	gpio_free(edp_drv->gpio_panel_en);
+gpio_err:
+	return -ENODEV;
+}
+
+static int mdss_edp_gpio_lvl_en(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret = 0;
+
+	edp_drv->gpio_lvl_en = of_get_named_gpio(edp_drv->pdev->dev.of_node,
+			"gpio-lvl-en", 0);
+	if (!gpio_is_valid(edp_drv->gpio_lvl_en)) {
+		pr_err("%s: gpio_lvl_en=%d not specified\n", __func__,
+				edp_drv->gpio_lvl_en);
+		ret = -ENODEV;
+		goto gpio_err;
+	}
+
+	ret = gpio_request(edp_drv->gpio_lvl_en, "lvl_enable");
+	if (ret) {
+		pr_err("%s: Request reset gpio_lvl_en failed, ret=%d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	ret = gpio_direction_output(edp_drv->gpio_lvl_en, 1);
+	if (ret) {
+		pr_err("%s: Set direction for gpio_lvl_en failed, ret=%d\n",
+				__func__, ret);
+		goto gpio_free;
+	}
+
+	return ret;
+
+gpio_free:
+	gpio_free(edp_drv->gpio_lvl_en);
+gpio_err:
+	return ret;
+}
+
+static int mdss_edp_pwm_config(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret = 0;
+
+	ret = of_property_read_u32(edp_drv->pdev->dev.of_node,
+			"qcom,panel-pwm-period", &edp_drv->pwm_period);
+	if (ret) {
+		pr_warn("%s: panel pwm period is not specified, %d", __func__,
+				edp_drv->pwm_period);
+		edp_drv->pwm_period = -EINVAL;
+	}
+
+	ret = of_property_read_u32(edp_drv->pdev->dev.of_node,
+			"qcom,panel-lpg-channel", &edp_drv->lpg_channel);
+	if (ret) {
+		pr_warn("%s: panel lpg channel is not specified, %d", __func__,
+				edp_drv->lpg_channel);
+		edp_drv->lpg_channel = -EINVAL;
+	}
+
+	if (edp_drv->pwm_period != -EINVAL &&
+		edp_drv->lpg_channel != -EINVAL) {
+		edp_drv->bl_pwm = pwm_request(edp_drv->lpg_channel,
+				"lcd-backlight");
+		if (edp_drv->bl_pwm == NULL || IS_ERR(edp_drv->bl_pwm)) {
+			pr_err("%s: pwm request failed", __func__);
+			edp_drv->bl_pwm = NULL;
+			return -EIO;
+		}
+	} else {
+		edp_drv->bl_pwm = NULL;
+	}
+
+	return 0;
+}
+
+void mdss_edp_set_backlight(struct mdss_panel_data *pdata, u32 bl_level)
+{
+	int ret = 0;
+	struct mdss_edp_drv_pdata *edp_drv = NULL;
+	int bl_max;
+	int period_ns;
+
+	edp_drv = container_of(pdata, struct mdss_edp_drv_pdata, panel_data);
+	if (!edp_drv) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	if (edp_drv->bl_pwm != NULL) {
+		bl_max = edp_drv->panel_data.panel_info.bl_max;
+		if (bl_level > bl_max)
+			bl_level = bl_max;
+
+		/* In order to avoid overflow, use the microsecond version
+		 * of pwm_config if the pwm_period is greater than or equal
+		 * to 1 second.
+		 */
+		if (edp_drv->pwm_period >= USEC_PER_SEC) {
+			ret = pwm_config_us(edp_drv->bl_pwm,
+					bl_level * edp_drv->pwm_period / bl_max,
+					edp_drv->pwm_period);
+			if (ret) {
+				pr_err("%s: pwm_config_us() failed err=%d.\n",
+						__func__, ret);
+				return;
+			}
+		} else {
+			period_ns = edp_drv->pwm_period * NSEC_PER_USEC;
+			ret = pwm_config(edp_drv->bl_pwm,
+					bl_level * period_ns / bl_max,
+					period_ns);
+			if (ret) {
+				pr_err("%s: pwm_config() failed err=%d.\n",
+						__func__, ret);
+				return;
+			}
+		}
+
+	if (edp_drv->is_pwm_enabled) {
+		pwm_disable(edp_drv->bl_pwm);
+		edp_drv->is_pwm_enabled = 0;
+	}
+
+		ret = pwm_enable(edp_drv->bl_pwm);
+		if (ret) {
+			pr_err("%s: pwm_enable() failed err=%d\n", __func__,
+					ret);
+			return;
+		}
+	edp_drv->is_pwm_enabled = 1;
+	}
+}
+
+int mdss_edp_mainlink_ready(struct mdss_edp_drv_pdata *ep, u32 which)
+{
+	u32 data;
+	int cnt = 10;
+
+	while (--cnt) {
+		data = edp_read(ep->base + 0x84); /* EDP_MAINLINK_READY */
+		if (data & which) {
+			pr_debug("%s: which=%x ready\n", __func__, which);
+			return 1;
+		}
+		usleep_range(1000, 1100);
+	}
+	pr_err("%s: which=%x NOT ready\n", __func__, which);
+
+	return 0;
+}
+
+void mdss_edp_mainlink_reset(struct mdss_edp_drv_pdata *ep)
+{
+	edp_write(ep->base + 0x04, 0x02); /* EDP_MAINLINK_CTRL */
+	usleep_range(1000, 1100);
+	edp_write(ep->base + 0x04, 0); /* EDP_MAINLINK_CTRL */
+}
+
+void mdss_edp_mainlink_ctrl(struct mdss_edp_drv_pdata *ep, int enable)
+{
+	u32 data;
+
+	data = edp_read(ep->base + 0x04);
+	data &= ~BIT(0);
+
+	if (enable)
+		data |= 0x1;
+
+	edp_write(ep->base + 0x04, data);
+}
+
+void mdss_edp_state_ctrl(struct mdss_edp_drv_pdata *ep, u32 state)
+{
+	edp_write(ep->base + EDP_STATE_CTRL, state);
+}
+
+void mdss_edp_aux_reset(struct mdss_edp_drv_pdata *ep)
+{
+	/* reset AUX */
+	edp_write(ep->base + 0x300, BIT(1)); /* EDP_AUX_CTRL */
+	usleep_range(1000, 1100);
+	edp_write(ep->base + 0x300, 0); /* EDP_AUX_CTRL */
+}
+
+void mdss_edp_aux_ctrl(struct mdss_edp_drv_pdata *ep, int enable)
+{
+	u32 data;
+
+	data = edp_read(ep->base + 0x300);
+	if (enable)
+		data |= 0x01;
+	else
+		data |= ~0x01;
+	edp_write(ep->base + 0x300, data); /* EDP_AUX_CTRL */
+}
+
+void mdss_edp_phy_pll_reset(struct mdss_edp_drv_pdata *ep)
+{
+	/* EDP_PHY_CTRL */
+	edp_write(ep->base + 0x74, 0x005); /* bit 0, 2 */
+	usleep_range(1000, 1100);
+	edp_write(ep->base + 0x74, 0x000); /* EDP_PHY_CTRL */
+}
+
+int mdss_edp_phy_pll_ready(struct mdss_edp_drv_pdata *ep)
+{
+	int cnt;
+	u32 status = 0;
+
+	cnt = 100;
+	while (--cnt) {
+		status = edp_read(ep->base + 0x6c0);
+		if (status & 0x01)
+			break;
+		usleep_range(100, 110);
+	}
+
+	pr_debug("%s: PLL cnt=%d status=%x\n", __func__, cnt, (int)status);
+
+	if (cnt <= 0) {
+		pr_err("%s: PLL NOT ready\n", __func__);
+		return 0;
+	} else
+		return 1;
+}
+
+int mdss_edp_phy_ready(struct mdss_edp_drv_pdata *ep)
+{
+	u32 status;
+
+	status = edp_read(ep->base + 0x598);
+	status &= 0x01;
+
+	return status;
+}
+
+void mdss_edp_phy_power_ctrl(struct mdss_edp_drv_pdata *ep, int enable)
+{
+	if (enable) {
+		/* EDP_PHY_EDPPHY_GLB_PD_CTL */
+		edp_write(ep->base + 0x52c, 0x3f);
+		/* EDP_PHY_EDPPHY_GLB_CFG */
+		edp_write(ep->base + 0x528, 0x1);
+		/* EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG */
+		edp_write(ep->base + 0x620, 0xf);
+	} else {
+		/* EDP_PHY_EDPPHY_GLB_PD_CTL */
+		edp_write(ep->base + 0x52c, 0xc0);
+	}
+}
+
+void mdss_edp_lane_power_ctrl(struct mdss_edp_drv_pdata *ep, int up)
+{
+	int i, off, max_lane;
+	u32 data;
+
+	max_lane = ep->lane_cnt;
+
+	if (up)
+		data = 0;	/* power up */
+	else
+		data = 0x7;	/* power down */
+
+	/* EDP_PHY_EDPPHY_LNn_PD_CTL */
+	for (i = 0; i < max_lane; i++) {
+		off = 0x40 * i;
+		edp_write(ep->base + 0x404 + off, data);
+	}
+
+	/* power down un used lane */
+	data = 0x7;	/* power down */
+	for (i = max_lane; i < EDP_MAX_LANE; i++) {
+		off = 0x40 * i;
+		edp_write(ep->base + 0x404 + off, data);
+	}
+}
+
+void mdss_edp_clock_synchrous(struct mdss_edp_drv_pdata *ep, int sync)
+{
+	u32 data;
+	u32 color;
+
+	/* EDP_MISC1_MISC0 */
+	data = edp_read(ep->base + 0x02c);
+
+	if (sync)
+		data |= 0x01;
+	else
+		data &= ~0x01;
+
+	/* only legacy rgb mode supported */
+	color = 0; /* 6 bits */
+	if (ep->edid.color_depth == 8)
+		color = 0x01;
+	else if (ep->edid.color_depth == 10)
+		color = 0x02;
+	else if (ep->edid.color_depth == 12)
+		color = 0x03;
+	else if (ep->edid.color_depth == 16)
+		color = 0x04;
+
+	color <<= 5;    /* bit 5 to bit 7 */
+
+	data |= color;
+	/* EDP_MISC1_MISC0 */
+	edp_write(ep->base + 0x2c, data);
+}
+
+/* voltage mode and pre emphasis cfg */
+void mdss_edp_phy_vm_pe_init(struct mdss_edp_drv_pdata *ep)
+{
+	/* EDP_PHY_EDPPHY_GLB_VM_CFG0 */
+	edp_write(ep->base + 0x510, 0x3);	/* vm only */
+	/* EDP_PHY_EDPPHY_GLB_VM_CFG1 */
+	edp_write(ep->base + 0x514, 0x64);
+	/* EDP_PHY_EDPPHY_GLB_MISC9 */
+	edp_write(ep->base + 0x518, 0x6c);
+}
+
+void mdss_edp_config_ctrl(struct mdss_edp_drv_pdata *ep)
+{
+	struct dpcd_cap *cap;
+	struct display_timing_desc *dp;
+	u32 data = 0;
+
+	dp = &ep->edid.timing[0];
+
+	cap = &ep->dpcd;
+
+	data = ep->lane_cnt - 1;
+	data <<= 4;
+
+	if (cap->enhanced_frame)
+		data |= 0x40;
+
+	if (ep->edid.color_depth == 8) {
+		/* 0 == 6 bits, 1 == 8 bits */
+		data |= 0x100;	/* bit 8 */
+	}
+
+	if (!dp->interlaced)	/* progressive */
+		data |= 0x04;
+
+	data |= 0x03;	/* sycn clock & static Mvid */
+
+	edp_write(ep->base + 0xc, data); /* EDP_CONFIGURATION_CTRL */
+}
+
+static void mdss_edp_sw_mvid_nvid(struct mdss_edp_drv_pdata *ep)
+{
+	edp_write(ep->base + 0x14, 0x13b); /* EDP_SOFTWARE_MVID */
+	edp_write(ep->base + 0x18, 0x266); /* EDP_SOFTWARE_NVID */
+}
+
+static void mdss_edp_timing_cfg(struct mdss_edp_drv_pdata *ep)
+{
+	struct mdss_panel_info *pinfo;
+	u32 total_ver, total_hor;
+	u32 data;
+
+	pinfo = &ep->panel_data.panel_info;
+
+	pr_debug("%s: width=%d hporch= %d %d %d\n", __func__,
+		pinfo->xres, pinfo->lcdc.h_back_porch,
+		pinfo->lcdc.h_front_porch, pinfo->lcdc.h_pulse_width);
+
+	pr_debug("%s: height=%d vporch= %d %d %d\n", __func__,
+		pinfo->yres, pinfo->lcdc.v_back_porch,
+		pinfo->lcdc.v_front_porch, pinfo->lcdc.v_pulse_width);
+
+	total_hor = pinfo->xres + pinfo->lcdc.h_back_porch +
+		pinfo->lcdc.h_front_porch + pinfo->lcdc.h_pulse_width;
+
+	total_ver = pinfo->yres + pinfo->lcdc.v_back_porch +
+			pinfo->lcdc.v_front_porch + pinfo->lcdc.v_pulse_width;
+
+	data = total_ver;
+	data <<= 16;
+	data |= total_hor;
+	edp_write(ep->base + 0x1c, data); /* EDP_TOTAL_HOR_VER */
+
+	data = (pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width);
+	data <<= 16;
+	data |= (pinfo->lcdc.h_back_porch + pinfo->lcdc.h_pulse_width);
+	edp_write(ep->base + 0x20, data); /* EDP_START_HOR_VER_FROM_SYNC */
+
+	data = pinfo->lcdc.v_pulse_width;
+	data <<= 16;
+	data |= pinfo->lcdc.h_pulse_width;
+	edp_write(ep->base + 0x24, data); /* EDP_HSYNC_VSYNC_WIDTH_POLARITY */
+
+	data = pinfo->yres;
+	data <<= 16;
+	data |= pinfo->xres;
+	edp_write(ep->base + 0x28, data); /* EDP_ACTIVE_HOR_VER */
+}
+
+int mdss_edp_wait4train(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret = 0;
+
+	if (edp_drv->cont_splash)
+		return ret;
+
+	ret = wait_for_completion_timeout(&edp_drv->video_comp, 30);
+	if (ret <= 0) {
+		pr_err("%s: Link Train timedout\n", __func__);
+		ret = -EINVAL;
+	} else {
+		ret = 0;
+	}
+
+	pr_debug("%s:\n", __func__);
+
+	return ret;
+}
+
+static void mdss_edp_irq_enable(struct mdss_edp_drv_pdata *edp_drv);
+static void mdss_edp_irq_disable(struct mdss_edp_drv_pdata *edp_drv);
+
+int mdss_edp_on(struct mdss_panel_data *pdata)
+{
+	struct mdss_edp_drv_pdata *edp_drv = NULL;
+	int ret = 0;
+
+	if (!pdata) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	edp_drv = container_of(pdata, struct mdss_edp_drv_pdata,
+			panel_data);
+
+	pr_debug("%s:+, cont_splash=%d\n", __func__, edp_drv->cont_splash);
+
+	if (!edp_drv->cont_splash) { /* vote for clocks */
+		mdss_edp_phy_pll_reset(edp_drv);
+		mdss_edp_aux_reset(edp_drv);
+		mdss_edp_mainlink_reset(edp_drv);
+		mdss_edp_aux_ctrl(edp_drv, 1);
+
+		ret = mdss_edp_prepare_clocks(edp_drv);
+		if (ret)
+			return ret;
+
+		mdss_edp_phy_power_ctrl(edp_drv, 1);
+
+		ret = mdss_edp_clk_enable(edp_drv);
+		if (ret) {
+			mdss_edp_unprepare_clocks(edp_drv);
+			return ret;
+		}
+
+		mdss_edp_phy_pll_ready(edp_drv);
+
+		mdss_edp_lane_power_ctrl(edp_drv, 1);
+
+		mdss_edp_clock_synchrous(edp_drv, 1);
+		mdss_edp_phy_vm_pe_init(edp_drv);
+		mdss_edp_config_ctrl(edp_drv);
+		mdss_edp_sw_mvid_nvid(edp_drv);
+		mdss_edp_timing_cfg(edp_drv);
+
+		gpio_set_value(edp_drv->gpio_panel_en, 1);
+		if (gpio_is_valid(edp_drv->gpio_lvl_en))
+			gpio_set_value(edp_drv->gpio_lvl_en, 1);
+
+		reinit_completion(&edp_drv->idle_comp);
+		mdss_edp_mainlink_ctrl(edp_drv, 1);
+	} else {
+		mdss_edp_aux_ctrl(edp_drv, 1);
+	}
+
+	mdss_edp_irq_enable(edp_drv);
+
+	if (edp_drv->delay_link_train) {
+		mdss_edp_link_train(edp_drv);
+		edp_drv->delay_link_train = 0;
+	}
+
+	mdss_edp_wait4train(edp_drv);
+
+	edp_drv->cont_splash = 0;
+
+	pr_debug("%s:-\n", __func__);
+	return ret;
+}
+
+int mdss_edp_off(struct mdss_panel_data *pdata)
+{
+	struct mdss_edp_drv_pdata *edp_drv = NULL;
+	int ret = 0;
+
+	edp_drv = container_of(pdata, struct mdss_edp_drv_pdata,
+				panel_data);
+	if (!edp_drv) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s:+, cont_splash=%d\n", __func__, edp_drv->cont_splash);
+
+	/* wait until link training is completed */
+	mutex_lock(&edp_drv->train_mutex);
+
+	reinit_completion(&edp_drv->idle_comp);
+	mdss_edp_state_ctrl(edp_drv, ST_PUSH_IDLE);
+
+	ret = wait_for_completion_timeout(&edp_drv->idle_comp,
+						msecs_to_jiffies(100));
+	if (ret == 0)
+		pr_err("%s: idle pattern timedout\n", __func__);
+
+	mdss_edp_state_ctrl(edp_drv, 0);
+
+	mdss_edp_sink_power_state(edp_drv, SINK_POWER_OFF);
+
+	mdss_edp_irq_disable(edp_drv);
+
+	gpio_set_value(edp_drv->gpio_panel_en, 0);
+	if (gpio_is_valid(edp_drv->gpio_lvl_en))
+		gpio_set_value(edp_drv->gpio_lvl_en, 0);
+	if (edp_drv->bl_pwm != NULL)
+		pwm_disable(edp_drv->bl_pwm);
+	edp_drv->is_pwm_enabled = 0;
+
+	mdss_edp_mainlink_reset(edp_drv);
+	mdss_edp_mainlink_ctrl(edp_drv, 0);
+
+	mdss_edp_lane_power_ctrl(edp_drv, 0);
+	mdss_edp_phy_power_ctrl(edp_drv, 0);
+
+	mdss_edp_clk_disable(edp_drv);
+	mdss_edp_unprepare_clocks(edp_drv);
+
+	mdss_edp_aux_ctrl(edp_drv, 0);
+
+	pr_debug("%s-: state_ctrl=%x\n", __func__,
+				edp_read(edp_drv->base + 0x8));
+
+	mutex_unlock(&edp_drv->train_mutex);
+	return 0;
+}
+
+static int mdss_edp_event_handler(struct mdss_panel_data *pdata,
+				  int event, void *arg)
+{
+	int rc = 0;
+
+	pr_debug("%s: event=%d\n", __func__, event);
+	switch (event) {
+	case MDSS_EVENT_UNBLANK:
+		rc = mdss_edp_on(pdata);
+		break;
+	case MDSS_EVENT_PANEL_OFF:
+		rc = mdss_edp_off(pdata);
+		break;
+	}
+	return rc;
+}
+
+/*
+ * Converts from EDID struct to mdss_panel_info
+ */
+static void mdss_edp_edid2pinfo(struct mdss_edp_drv_pdata *edp_drv)
+{
+	struct display_timing_desc *dp;
+	struct mdss_panel_info *pinfo;
+
+	dp = &edp_drv->edid.timing[0];
+	pinfo = &edp_drv->panel_data.panel_info;
+
+	pinfo->clk_rate = dp->pclk;
+	pr_debug("%s: pclk=%d\n", __func__, pinfo->clk_rate);
+
+	pinfo->xres = dp->h_addressable + dp->h_border * 2;
+	pinfo->yres = dp->v_addressable + dp->v_border * 2;
+
+	pr_debug("%s: x=%d y=%d\n", __func__, pinfo->xres, pinfo->yres);
+
+	pinfo->lcdc.h_back_porch = dp->h_blank - dp->h_fporch -
+					dp->h_sync_pulse;
+	pinfo->lcdc.h_front_porch = dp->h_fporch;
+	pinfo->lcdc.h_pulse_width = dp->h_sync_pulse;
+
+	pr_debug("%s: hporch= %d %d %d\n", __func__,
+		pinfo->lcdc.h_back_porch, pinfo->lcdc.h_front_porch,
+		pinfo->lcdc.h_pulse_width);
+
+	pinfo->lcdc.v_back_porch = dp->v_blank - dp->v_fporch
+					- dp->v_sync_pulse;
+	pinfo->lcdc.v_front_porch = dp->v_fporch;
+	pinfo->lcdc.v_pulse_width = dp->v_sync_pulse;
+
+	pr_debug("%s: vporch= %d %d %d\n", __func__,
+		pinfo->lcdc.v_back_porch, pinfo->lcdc.v_front_porch,
+		pinfo->lcdc.v_pulse_width);
+
+	pinfo->type = EDP_PANEL;
+	pinfo->pdest = DISPLAY_1;
+	pinfo->wait_cycle = 0;
+	pinfo->bpp = edp_drv->edid.color_depth * RGB_COMPONENTS;
+	pinfo->fb_num = 2;
+
+	pinfo->lcdc.border_clr = 0;	 /* black */
+	pinfo->lcdc.underflow_clr = 0xff; /* blue */
+	pinfo->lcdc.hsync_skew = 0;
+}
+
+static int mdss_edp_remove(struct platform_device *pdev)
+{
+	struct mdss_edp_drv_pdata *edp_drv = NULL;
+
+	edp_drv = platform_get_drvdata(pdev);
+
+	gpio_free(edp_drv->gpio_panel_en);
+	if (gpio_is_valid(edp_drv->gpio_lvl_en))
+		gpio_free(edp_drv->gpio_lvl_en);
+	mdss_edp_regulator_off(edp_drv);
+	iounmap(edp_drv->base);
+	iounmap(edp_drv->mmss_cc_base);
+	edp_drv->base = NULL;
+
+	return 0;
+}
+
+static int mdss_edp_device_register(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret;
+	u32 tmp;
+
+	mdss_edp_edid2pinfo(edp_drv);
+	edp_drv->panel_data.panel_info.bl_min = 1;
+	edp_drv->panel_data.panel_info.bl_max = 255;
+	ret = of_property_read_u32(edp_drv->pdev->dev.of_node,
+		"qcom,mdss-brightness-max-level", &tmp);
+	edp_drv->panel_data.panel_info.brightness_max =
+		(!ret ? tmp : MDSS_MAX_BL_BRIGHTNESS);
+
+	edp_drv->panel_data.panel_info.edp.frame_rate =
+				DEFAULT_FRAME_RATE;/* 60 fps */
+
+	edp_drv->panel_data.event_handler = mdss_edp_event_handler;
+	edp_drv->panel_data.set_backlight = mdss_edp_set_backlight;
+
+	edp_drv->panel_data.panel_info.cont_splash_enabled =
+					edp_drv->cont_splash;
+
+	ret = mdss_register_panel(edp_drv->pdev, &edp_drv->panel_data);
+	if (ret) {
+		dev_err(&(edp_drv->pdev->dev), "unable to register eDP\n");
+		return ret;
+	}
+
+	pr_info("%s: eDP initialized\n", __func__);
+
+	return 0;
+}
+
+/*
+ * Retrieve edp base address
+ */
+static int mdss_edp_get_base_address(struct mdss_edp_drv_pdata *edp_drv)
+{
+	struct resource *res;
+
+	res = platform_get_resource_byname(edp_drv->pdev, IORESOURCE_MEM,
+			"edp_base");
+	if (!res) {
+		pr_err("%s: Unable to get the MDSS EDP resources", __func__);
+		return -ENOMEM;
+	}
+
+	edp_drv->base_size = resource_size(res);
+	edp_drv->base = ioremap(res->start, resource_size(res));
+	if (!edp_drv->base) {
+		pr_err("%s: Unable to remap EDP resources",  __func__);
+		return -ENOMEM;
+	}
+
+	pr_debug("%s: drv=%x base=%x size=%x\n", __func__,
+		(int)edp_drv, (int)edp_drv->base, edp_drv->base_size);
+
+	mdss_debug_register_base("edp",
+			edp_drv->base, edp_drv->base_size, NULL);
+
+	return 0;
+}
+
+static int mdss_edp_get_mmss_cc_base_address(struct mdss_edp_drv_pdata
+		*edp_drv)
+{
+	struct resource *res;
+
+	res = platform_get_resource_byname(edp_drv->pdev, IORESOURCE_MEM,
+			"mmss_cc_base");
+	if (!res) {
+		pr_err("%s: Unable to get the MMSS_CC resources", __func__);
+		return -ENOMEM;
+	}
+
+	edp_drv->mmss_cc_base = ioremap(res->start, resource_size(res));
+	if (!edp_drv->mmss_cc_base) {
+		pr_err("%s: Unable to remap MMSS_CC resources",  __func__);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void mdss_edp_video_ready(struct mdss_edp_drv_pdata *ep)
+{
+	pr_debug("%s: edp_video_ready\n", __func__);
+	complete(&ep->video_comp);
+}
+
+static void mdss_edp_idle_patterns_sent(struct mdss_edp_drv_pdata *ep)
+{
+	pr_debug("%s: idle_patterns_sent\n", __func__);
+	complete(&ep->idle_comp);
+}
+
+static void mdss_edp_do_link_train(struct mdss_edp_drv_pdata *ep)
+{
+	if (ep->cont_splash)
+		return;
+
+	if (!ep->inited) {
+		ep->delay_link_train++;
+		return;
+	}
+
+	mdss_edp_link_train(ep);
+}
+
+static int edp_event_thread(void *data)
+{
+	struct mdss_edp_drv_pdata *ep;
+	unsigned long flag;
+	u32 todo = 0;
+
+	ep = (struct mdss_edp_drv_pdata *)data;
+
+	while (1) {
+		wait_event(ep->event_q, (ep->event_pndx != ep->event_gndx));
+		spin_lock_irqsave(&ep->event_lock, flag);
+		if (ep->event_pndx == ep->event_gndx) {
+			spin_unlock_irqrestore(&ep->event_lock, flag);
+			break;
+		}
+		todo = ep->event_todo_list[ep->event_gndx];
+		ep->event_todo_list[ep->event_gndx++] = 0;
+		ep->event_gndx %= HPD_EVENT_MAX;
+		spin_unlock_irqrestore(&ep->event_lock, flag);
+
+		pr_debug("%s: todo=%x\n", __func__, todo);
+
+		if (todo == 0)
+			continue;
+
+		if (todo & EV_EDID_READ)
+			mdss_edp_edid_read(ep, 0);
+
+		if (todo & EV_DPCD_CAP_READ)
+			mdss_edp_dpcd_cap_read(ep);
+
+		if (todo & EV_DPCD_STATUS_READ)
+			mdss_edp_dpcd_status_read(ep);
+
+		if (todo & EV_LINK_TRAIN)
+			mdss_edp_do_link_train(ep);
+
+		if (todo & EV_VIDEO_READY)
+			mdss_edp_video_ready(ep);
+
+		if (todo & EV_IDLE_PATTERNS_SENT)
+			mdss_edp_idle_patterns_sent(ep);
+	}
+
+	return 0;
+}
+
+static void edp_send_events(struct mdss_edp_drv_pdata *ep, u32 events)
+{
+	spin_lock(&ep->event_lock);
+	ep->event_todo_list[ep->event_pndx++] = events;
+	ep->event_pndx %= HPD_EVENT_MAX;
+	wake_up(&ep->event_q);
+	spin_unlock(&ep->event_lock);
+}
+
+irqreturn_t edp_isr(int irq, void *ptr)
+{
+	struct mdss_edp_drv_pdata *ep = (struct mdss_edp_drv_pdata *)ptr;
+	unsigned char *base = ep->base;
+	u32 isr1, isr2, mask1, mask2;
+	u32 ack;
+
+	spin_lock(&ep->lock);
+	isr1 = edp_read(base + 0x308);
+	isr2 = edp_read(base + 0x30c);
+
+	mask1 = isr1 & ep->mask1;
+	mask2 = isr2 & ep->mask2;
+
+	isr1 &= ~mask1;	/* remove masks bit */
+	isr2 &= ~mask2;
+
+	pr_debug("%s: isr=%x mask=%x isr2=%x mask2=%x\n",
+			__func__, isr1, mask1, isr2, mask2);
+
+	ack = isr1 & EDP_INTR_STATUS1;
+	ack <<= 1;	/* ack bits */
+	ack |= mask1;
+	edp_write(base + 0x308, ack);
+
+	ack = isr2 & EDP_INTR_STATUS2;
+	ack <<= 1;	/* ack bits */
+	ack |= mask2;
+	edp_write(base + 0x30c, ack);
+	spin_unlock(&ep->lock);
+
+	if (isr1 & EDP_INTR_HPD) {
+		isr1 &= ~EDP_INTR_HPD;	/* clear */
+		edp_send_events(ep, EV_LINK_TRAIN);
+	}
+
+	if (isr2 & EDP_INTR_READY_FOR_VIDEO)
+		edp_send_events(ep, EV_VIDEO_READY);
+
+	if (isr2 & EDP_INTR_IDLE_PATTERNs_SENT)
+		edp_send_events(ep, EV_IDLE_PATTERNS_SENT);
+
+	if (isr1 && ep->aux_cmd_busy) {
+		/* clear EDP_AUX_TRANS_CTRL */
+		edp_write(base + 0x318, 0);
+		/* read EDP_INTERRUPT_TRANS_NUM */
+		ep->aux_trans_num = edp_read(base + 0x310);
+
+		if (ep->aux_cmd_i2c)
+			edp_aux_i2c_handler(ep, isr1);
+		else
+			edp_aux_native_handler(ep, isr1);
+	}
+
+	return IRQ_HANDLED;
+}
+
+struct mdss_hw mdss_edp_hw = {
+	.hw_ndx = MDSS_HW_EDP,
+	.ptr = NULL,
+	.irq_handler = edp_isr,
+};
+
+static void mdss_edp_irq_enable(struct mdss_edp_drv_pdata *edp_drv)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&edp_drv->lock, flags);
+	edp_write(edp_drv->base + 0x308, edp_drv->mask1);
+	edp_write(edp_drv->base + 0x30c, edp_drv->mask2);
+	spin_unlock_irqrestore(&edp_drv->lock, flags);
+
+	edp_drv->mdss_util->enable_irq(&mdss_edp_hw);
+}
+
+static void mdss_edp_irq_disable(struct mdss_edp_drv_pdata *edp_drv)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&edp_drv->lock, flags);
+	edp_write(edp_drv->base + 0x308, 0x0);
+	edp_write(edp_drv->base + 0x30c, 0x0);
+	spin_unlock_irqrestore(&edp_drv->lock, flags);
+
+	edp_drv->mdss_util->disable_irq(&mdss_edp_hw);
+}
+
+static int mdss_edp_irq_setup(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret = 0;
+
+	edp_drv->gpio_panel_hpd = of_get_named_gpio_flags(
+			edp_drv->pdev->dev.of_node, "gpio-panel-hpd", 0,
+			&edp_drv->hpd_flags);
+
+	if (!gpio_is_valid(edp_drv->gpio_panel_hpd)) {
+		pr_err("%s gpio_panel_hpd %d is not valid ", __func__,
+				edp_drv->gpio_panel_hpd);
+		return -ENODEV;
+	}
+
+	ret = gpio_request(edp_drv->gpio_panel_hpd, "edp_hpd_irq_gpio");
+	if (ret) {
+		pr_err("%s unable to request gpio_panel_hpd %d", __func__,
+				edp_drv->gpio_panel_hpd);
+		return -ENODEV;
+	}
+
+	ret = gpio_tlmm_config(GPIO_CFG(
+					edp_drv->gpio_panel_hpd,
+					1,
+					GPIO_CFG_INPUT,
+					GPIO_CFG_NO_PULL,
+					GPIO_CFG_2MA),
+					GPIO_CFG_ENABLE);
+	if (ret) {
+		pr_err("%s: unable to config tlmm = %d\n", __func__,
+				edp_drv->gpio_panel_hpd);
+		gpio_free(edp_drv->gpio_panel_hpd);
+		return -ENODEV;
+	}
+
+	ret = gpio_direction_input(edp_drv->gpio_panel_hpd);
+	if (ret) {
+		pr_err("%s unable to set direction for gpio_panel_hpd %d",
+				__func__, edp_drv->gpio_panel_hpd);
+		return -ENODEV;
+	}
+
+	mdss_edp_hw.ptr = (void *)(edp_drv);
+
+	if (edp_drv->mdss_util->register_irq(&mdss_edp_hw))
+		pr_err("%s: mdss_register_irq failed.\n", __func__);
+
+
+	return 0;
+}
+
+
+static void mdss_edp_event_setup(struct mdss_edp_drv_pdata *ep)
+{
+	init_waitqueue_head(&ep->event_q);
+	spin_lock_init(&ep->event_lock);
+
+	kthread_run(edp_event_thread, (void *)ep, "mdss_edp_hpd");
+}
+
+static int mdss_edp_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct mdss_edp_drv_pdata *edp_drv;
+	struct mdss_panel_cfg *pan_cfg = NULL;
+
+	if (!mdss_is_ready()) {
+		pr_err("%s: MDP not probed yet!\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	pan_cfg = mdss_panel_intf_type(MDSS_PANEL_INTF_EDP);
+	if (IS_ERR(pan_cfg)) {
+		return PTR_ERR(pan_cfg);
+	} else if (!pan_cfg) {
+		pr_debug("%s: not configured as prim\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!pdev->dev.of_node) {
+		pr_err("%s: Failed\n", __func__);
+		return -EPERM;
+	}
+
+	edp_drv = devm_kzalloc(&pdev->dev, sizeof(*edp_drv), GFP_KERNEL);
+	if (edp_drv == NULL)
+		return -ENOMEM;
+
+	edp_drv->mdss_util = mdss_get_util_intf();
+	if (edp_drv->mdss_util == NULL) {
+		pr_err("Failed to get mdss utility functions\n");
+		return -ENODEV;
+	}
+	edp_drv->panel_data.panel_info.is_prim_panel = true;
+
+	mdss_edp_hw.irq_info = mdss_intr_line();
+	if (mdss_edp_hw.irq_info == NULL) {
+		pr_err("Failed to get mdss irq information\n");
+		return -ENODEV;
+	}
+
+	edp_drv->pdev = pdev;
+	edp_drv->pdev->id = 1;
+	edp_drv->clk_on = 0;
+	edp_drv->aux_rate = 19200000;
+	edp_drv->mask1 = EDP_INTR_MASK1;
+	edp_drv->mask2 = EDP_INTR_MASK2;
+	mutex_init(&edp_drv->emutex);
+	spin_lock_init(&edp_drv->lock);
+
+	ret = mdss_edp_get_base_address(edp_drv);
+	if (ret)
+		goto probe_err;
+
+	ret = mdss_edp_get_mmss_cc_base_address(edp_drv);
+	if (ret)
+		goto edp_base_unmap;
+
+	ret = mdss_edp_regulator_init(edp_drv);
+	if (ret)
+		goto mmss_cc_base_unmap;
+
+	ret = mdss_edp_clk_init(edp_drv);
+	if (ret)
+		goto edp_clk_deinit;
+
+	ret = mdss_edp_gpio_panel_en(edp_drv);
+	if (ret)
+		goto edp_clk_deinit;
+
+	ret = mdss_edp_gpio_lvl_en(edp_drv);
+	if (ret)
+		pr_err("%s: No gpio_lvl_en detected\n", __func__);
+
+	ret = mdss_edp_pwm_config(edp_drv);
+	if (ret)
+		goto edp_free_gpio_panel_en;
+
+	mdss_edp_irq_setup(edp_drv);
+
+	mdss_edp_aux_init(edp_drv);
+
+	mdss_edp_event_setup(edp_drv);
+
+	edp_drv->cont_splash = edp_drv->mdss_util->panel_intf_status(DISPLAY_1,
+		MDSS_PANEL_INTF_EDP) ? true : false;
+
+	/* only need aux and ahb clock for aux channel */
+	mdss_edp_prepare_aux_clocks(edp_drv);
+	mdss_edp_aux_clk_enable(edp_drv);
+
+	if (!edp_drv->cont_splash) {
+		mdss_edp_phy_pll_reset(edp_drv);
+		mdss_edp_aux_reset(edp_drv);
+		mdss_edp_mainlink_reset(edp_drv);
+		mdss_edp_phy_power_ctrl(edp_drv, 1);
+		mdss_edp_aux_ctrl(edp_drv, 1);
+	}
+
+	mdss_edp_irq_enable(edp_drv);
+
+	mdss_edp_edid_read(edp_drv, 0);
+	mdss_edp_dpcd_cap_read(edp_drv);
+	mdss_edp_fill_link_cfg(edp_drv);
+
+	mdss_edp_irq_disable(edp_drv);
+
+	if (!edp_drv->cont_splash) {
+		mdss_edp_aux_ctrl(edp_drv, 0);
+		mdss_edp_phy_power_ctrl(edp_drv, 0);
+	}
+
+	mdss_edp_aux_clk_disable(edp_drv);
+	mdss_edp_unprepare_aux_clocks(edp_drv);
+
+	if (edp_drv->cont_splash) { /* vote for clocks */
+		mdss_edp_prepare_clocks(edp_drv);
+		mdss_edp_clk_enable(edp_drv);
+	}
+
+	mdss_edp_device_register(edp_drv);
+
+	edp_drv->inited = true;
+
+	pr_debug("%s: done\n", __func__);
+
+	return 0;
+
+
+edp_free_gpio_panel_en:
+	gpio_free(edp_drv->gpio_panel_en);
+	if (gpio_is_valid(edp_drv->gpio_lvl_en))
+		gpio_free(edp_drv->gpio_lvl_en);
+edp_clk_deinit:
+	mdss_edp_clk_deinit(edp_drv);
+	mdss_edp_regulator_off(edp_drv);
+mmss_cc_base_unmap:
+	iounmap(edp_drv->mmss_cc_base);
+edp_base_unmap:
+	iounmap(edp_drv->base);
+probe_err:
+	return ret;
+
+}
+
+static const struct of_device_id msm_mdss_edp_dt_match[] = {
+	{.compatible = "qcom,mdss-edp"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_mdss_edp_dt_match);
+
+static struct platform_driver mdss_edp_driver = {
+	.probe = mdss_edp_probe,
+	.remove = mdss_edp_remove,
+	.shutdown = NULL,
+	.driver = {
+		.name = "mdss_edp",
+		.of_match_table = msm_mdss_edp_dt_match,
+	},
+};
+
+static int __init mdss_edp_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&mdss_edp_driver);
+	if (ret) {
+		pr_err("%s driver register failed", __func__);
+		return ret;
+	}
+
+	return ret;
+}
+module_init(mdss_edp_init);
+
+static void __exit mdss_edp_driver_cleanup(void)
+{
+	platform_driver_unregister(&mdss_edp_driver);
+}
+module_exit(mdss_edp_driver_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("eDP controller driver");
diff --git a/drivers/video/fbdev/msm/mdss_edp.h b/drivers/video/fbdev/msm/mdss_edp.h
new file mode 100644
index 0000000..2477f36
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_edp.h
@@ -0,0 +1,380 @@
+/* Copyright (c) 2012-2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_EDP_H
+#define MDSS_EDP_H
+
+#include <linux/of_gpio.h>
+
+#define edp_read(offset) readl_relaxed((offset))
+#define edp_write(offset, data) writel_relaxed((data), (offset))
+
+#define AUX_CMD_FIFO_LEN	144
+#define AUX_CMD_MAX		16
+#define AUX_CMD_I2C_MAX		128
+
+#define EDP_PORT_MAX		1
+#define EDP_SINK_CAP_LEN	16
+
+#define EDP_AUX_ERR_NONE	0
+#define EDP_AUX_ERR_ADDR	-1
+#define EDP_AUX_ERR_TOUT	-2
+#define EDP_AUX_ERR_NACK	-3
+
+/* 4 bits of aux command */
+#define EDP_CMD_AUX_WRITE	0x8
+#define EDP_CMD_AUX_READ	0x9
+
+/* 4 bits of i2c command */
+#define EDP_CMD_I2C_MOT		0x4	/* i2c middle of transaction */
+#define EDP_CMD_I2C_WRITE	0x0
+#define EDP_CMD_I2C_READ	0x1
+#define EDP_CMD_I2C_STATUS	0x2	/* i2c write status request */
+
+/* cmd reply: bit 0, 1 for aux */
+#define EDP_AUX_ACK		0x0
+#define EDP_AUX_NACK	0x1
+#define EDP_AUX_DEFER	0x2
+
+/* cmd reply: bit 2, 3 for i2c */
+#define EDP_I2C_ACK		0x0
+#define EDP_I2C_NACK	0x4
+#define EDP_I2C_DEFER	0x8
+
+#define EDP_CMD_TIMEOUT	400	/* us */
+#define EDP_CMD_LEN		16
+
+#define EDP_INTR_ACK_SHIFT	1
+#define EDP_INTR_MASK_SHIFT	2
+
+#define EDP_MAX_LANE		4
+
+/* isr */
+#define EDP_INTR_HPD		BIT(0)
+#define EDP_INTR_AUX_I2C_DONE	BIT(3)
+#define EDP_INTR_WRONG_ADDR	BIT(6)
+#define EDP_INTR_TIMEOUT	BIT(9)
+#define EDP_INTR_NACK_DEFER	BIT(12)
+#define EDP_INTR_WRONG_DATA_CNT	BIT(15)
+#define EDP_INTR_I2C_NACK	BIT(18)
+#define EDP_INTR_I2C_DEFER	BIT(21)
+#define EDP_INTR_PLL_UNLOCKED	BIT(24)
+#define EDP_INTR_AUX_ERROR	BIT(27)
+
+
+#define EDP_INTR_STATUS1 \
+	(EDP_INTR_HPD | EDP_INTR_AUX_I2C_DONE| \
+	EDP_INTR_WRONG_ADDR | EDP_INTR_TIMEOUT | \
+	EDP_INTR_NACK_DEFER | EDP_INTR_WRONG_DATA_CNT | \
+	EDP_INTR_I2C_NACK | EDP_INTR_I2C_DEFER | \
+	EDP_INTR_PLL_UNLOCKED | EDP_INTR_AUX_ERROR)
+
+#define EDP_INTR_MASK1		(EDP_INTR_STATUS1 << 2)
+
+
+#define EDP_INTR_READY_FOR_VIDEO	BIT(0)
+#define EDP_INTR_IDLE_PATTERNs_SENT	BIT(3)
+#define EDP_INTR_FRAME_END		BIT(6)
+#define EDP_INTR_CRC_UPDATED		BIT(9)
+
+#define EDP_INTR_STATUS2 \
+	(EDP_INTR_READY_FOR_VIDEO | EDP_INTR_IDLE_PATTERNs_SENT | \
+	EDP_INTR_FRAME_END | EDP_INTR_CRC_UPDATED)
+
+#define EDP_INTR_MASK2		(EDP_INTR_STATUS2 << 2)
+
+
+#define EDP_MAINLINK_CTRL	0x004
+#define EDP_STATE_CTRL		0x008
+#define EDP_MAINLINK_READY	0x084
+
+#define EDP_AUX_CTRL		0x300
+#define EDP_INTERRUPT_STATUS	0x308
+#define EDP_INTERRUPT_STATUS_2	0x30c
+#define EDP_AUX_DATA		0x314
+#define EDP_AUX_TRANS_CTRL	0x318
+#define EDP_AUX_STATUS		0x324
+
+#define EDP_PHY_EDPPHY_GLB_VM_CFG0	0x510
+#define EDP_PHY_EDPPHY_GLB_VM_CFG1	0x514
+
+struct edp_cmd {
+	char read;	/* 1 == read, 0 == write */
+	char i2c;	/* 1 == i2c cmd, 0 == native cmd */
+	u32 addr;	/* 20 bits */
+	char *datap;
+	int len;	/* len to be tx OR len to be rx for read */
+	char next;	/* next command */
+};
+
+struct edp_buf {
+	char *start;	/* buffer start addr */
+	char *end;	/* buffer end addr */
+	int size;	/* size of buffer */
+	char *data;	/* data pointer */
+	int len;	/* dara length */
+	char trans_num;	/* transaction number */
+	char i2c;	/* 1 == i2c cmd, 0 == native cmd */
+};
+
+#define DPCD_ENHANCED_FRAME	BIT(0)
+#define DPCD_TPS3	BIT(1)
+#define DPCD_MAX_DOWNSPREAD_0_5	BIT(2)
+#define DPCD_NO_AUX_HANDSHAKE	BIT(3)
+#define DPCD_PORT_0_EDID_PRESENTED	BIT(4)
+
+/* event */
+#define EV_EDP_AUX_SETUP		BIT(0)
+#define EV_EDID_READ			BIT(1)
+#define EV_DPCD_CAP_READ		BIT(2)
+#define EV_DPCD_STATUS_READ		BIT(3)
+#define EV_LINK_TRAIN			BIT(4)
+#define EV_IDLE_PATTERNS_SENT		BIT(30)
+#define EV_VIDEO_READY			BIT(31)
+
+/* edp state ctrl */
+#define ST_TRAIN_PATTERN_1		BIT(0)
+#define ST_TRAIN_PATTERN_2		BIT(1)
+#define ST_TRAIN_PATTERN_3		BIT(2)
+#define ST_SYMBOL_ERR_RATE_MEASUREMENT	BIT(3)
+#define ST_PRBS7			BIT(4)
+#define ST_CUSTOM_80_BIT_PATTERN	BIT(5)
+#define ST_SEND_VIDEO			BIT(6)
+#define ST_PUSH_IDLE			BIT(7)
+
+/* sink power state  */
+#define SINK_POWER_ON		1
+#define SINK_POWER_OFF		2
+
+#define EDP_LINK_RATE_162	6	/* 1.62G = 270M * 6 */
+#define EDP_LINK_RATE_270	10	/* 2.70G = 270M * 10 */
+#define EDP_LINK_RATE_MAX	EDP_LINK_RATE_270
+
+struct dpcd_cap {
+	char major;
+	char minor;
+	char max_lane_count;
+	char num_rx_port;
+	char i2c_speed_ctrl;
+	char scrambler_reset;
+	char enhanced_frame;
+	u32 max_link_rate;  /* 162, 270 and 540 Mb, divided by 10 */
+	u32 flags;
+	u32 rx_port0_buf_size;
+	u32 training_read_interval;/* us */
+};
+
+struct dpcd_link_status {
+	char lane_01_status;
+	char lane_23_status;
+	char interlane_align_done;
+	char downstream_port_status_changed;
+	char link_status_updated;
+	char port_0_in_sync;
+	char port_1_in_sync;
+	char req_voltage_swing[4];
+	char req_pre_emphasis[4];
+};
+
+struct display_timing_desc {
+	u32 pclk;
+	u32 h_addressable; /* addressable + boder = active */
+	u32 h_border;
+	u32 h_blank;	/* fporch + bporch + sync_pulse = blank */
+	u32 h_fporch;
+	u32 h_sync_pulse;
+	u32 v_addressable; /* addressable + boder = active */
+	u32 v_border;
+	u32 v_blank;	/* fporch + bporch + sync_pulse = blank */
+	u32 v_fporch;
+	u32 v_sync_pulse;
+	u32 width_mm;
+	u32 height_mm;
+	u32 interlaced;
+	u32 stereo;
+	u32 sync_type;
+	u32 sync_separate;
+	u32 vsync_pol;
+	u32 hsync_pol;
+};
+
+#define EDID_DISPLAY_PORT_SUPPORT 0x05
+
+struct edp_edid {
+	char id_name[4];
+	short id_product;
+	char version;
+	char revision;
+	char video_intf;	/* edp == 0x5 */
+	char color_depth;	/* 6, 8, 10, 12 and 14 bits */
+	char color_format;	/* RGB 4:4:4, YCrCb 4:4:4, Ycrcb 4:2:2 */
+	char dpm;		/* display power management */
+	char sync_digital;	/* 1 = digital */
+	char sync_separate;	/* 1 = separate */
+	char vsync_pol;		/* 0 = negative, 1 = positive */
+	char hsync_pol;		/* 0 = negative, 1 = positive */
+	char ext_block_cnt;
+	struct display_timing_desc timing[4];
+};
+
+struct edp_statistic {
+	u32 intr_hpd;
+	u32 intr_aux_i2c_done;
+	u32 intr_wrong_addr;
+	u32 intr_tout;
+	u32 intr_nack_defer;
+	u32 intr_wrong_data_cnt;
+	u32 intr_i2c_nack;
+	u32 intr_i2c_defer;
+	u32 intr_pll_unlock;
+	u32 intr_crc_update;
+	u32 intr_frame_end;
+	u32 intr_idle_pattern_sent;
+	u32 intr_ready_for_video;
+	u32 aux_i2c_tx;
+	u32 aux_i2c_rx;
+	u32 aux_native_tx;
+	u32 aux_native_rx;
+};
+
+
+#define DPCD_LINK_VOLTAGE_MAX	4
+#define DPCD_LINK_PRE_EMPHASIS_MAX	4
+
+#define HPD_EVENT_MAX   8
+
+struct mdss_edp_drv_pdata {
+	/* device driver */
+	int (*on)(struct mdss_panel_data *pdata);
+	int (*off)(struct mdss_panel_data *pdata);
+	struct platform_device *pdev;
+
+	struct mutex emutex;
+	int clk_cnt;
+	int cont_splash;
+	bool inited;
+	int delay_link_train;
+
+	/* edp specific */
+	unsigned char *base;
+	int base_size;
+	unsigned char *mmss_cc_base;
+	u32 mask1;
+	u32 mask2;
+
+	struct mdss_panel_data panel_data;
+	struct mdss_util_intf *mdss_util;
+
+	int edp_on_cnt;
+	int edp_off_cnt;
+
+	u32 pixel_rate;
+	u32 aux_rate;
+	char link_rate;	/* X 27000000 for real rate */
+	char lane_cnt;
+	char train_link_rate;	/* X 27000000 for real rate */
+	char train_lane_cnt;
+
+	struct edp_edid edid;
+	struct dpcd_cap dpcd;
+
+	/* regulators */
+	struct regulator *vdda_vreg;
+
+	/* clocks */
+	struct clk *aux_clk;
+	struct clk *pixel_clk;
+	struct clk *ahb_clk;
+	struct clk *link_clk;
+	struct clk *mdp_core_clk;
+	int clk_on;
+
+	/* gpios */
+	int gpio_panel_en;
+	int gpio_lvl_en;
+
+	/* backlight */
+	struct pwm_device *bl_pwm;
+	bool is_pwm_enabled;
+	int lpg_channel;
+	int pwm_period;
+
+	/* hpd */
+	int gpio_panel_hpd;
+	enum of_gpio_flags hpd_flags;
+	int hpd_irq;
+
+	/* aux */
+	struct completion aux_comp;
+	struct completion train_comp;
+	struct completion idle_comp;
+	struct completion video_comp;
+	struct mutex aux_mutex;
+	struct mutex train_mutex;
+	u32 aux_cmd_busy;
+	u32 aux_cmd_i2c;
+	int aux_trans_num;
+	int aux_error_num;
+	u32 aux_ctrl_reg;
+	struct edp_buf txp;
+	struct edp_buf rxp;
+	char txbuf[256];
+	char rxbuf[256];
+	struct dpcd_link_status link_status;
+	char v_level;
+	char p_level;
+	/* transfer unit */
+	char tu_desired;
+	char valid_boundary;
+	char delay_start;
+	u32 bpp;
+	struct edp_statistic edp_stat;
+
+	/* event */
+	wait_queue_head_t event_q;
+	u32 event_pndx;
+	u32 event_gndx;
+	u32 event_todo_list[HPD_EVENT_MAX];
+	spinlock_t event_lock;
+	spinlock_t lock;
+};
+
+int mdss_edp_aux_clk_enable(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_aux_clk_disable(struct mdss_edp_drv_pdata *edp_drv);
+int mdss_edp_clk_enable(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_clk_disable(struct mdss_edp_drv_pdata *edp_drv);
+int mdss_edp_clk_init(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_clk_deinit(struct mdss_edp_drv_pdata *edp_drv);
+int mdss_edp_prepare_aux_clocks(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_unprepare_aux_clocks(struct mdss_edp_drv_pdata *edp_drv);
+int mdss_edp_prepare_clocks(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_unprepare_clocks(struct mdss_edp_drv_pdata *edp_drv);
+
+void mdss_edp_dpcd_cap_read(struct mdss_edp_drv_pdata *edp);
+int mdss_edp_dpcd_status_read(struct mdss_edp_drv_pdata *edp);
+void mdss_edp_edid_read(struct mdss_edp_drv_pdata *edp, int block);
+int mdss_edp_link_train(struct mdss_edp_drv_pdata *edp);
+void edp_aux_i2c_handler(struct mdss_edp_drv_pdata *edp, u32 isr);
+void edp_aux_native_handler(struct mdss_edp_drv_pdata *edp, u32 isr);
+void mdss_edp_aux_init(struct mdss_edp_drv_pdata *ep);
+
+void mdss_edp_fill_link_cfg(struct mdss_edp_drv_pdata *ep);
+void mdss_edp_sink_power_down(struct mdss_edp_drv_pdata *ep);
+void mdss_edp_state_ctrl(struct mdss_edp_drv_pdata *ep, u32 state);
+int mdss_edp_sink_power_state(struct mdss_edp_drv_pdata *ep, char state);
+void mdss_edp_lane_power_ctrl(struct mdss_edp_drv_pdata *ep, int up);
+void mdss_edp_config_ctrl(struct mdss_edp_drv_pdata *ep);
+
+void mdss_edp_clk_debug(unsigned char *edp_base, unsigned char *mmss_cc_base);
+
+#endif /* MDSS_EDP_H */
diff --git a/drivers/video/fbdev/msm/mdss_edp_aux.c b/drivers/video/fbdev/msm/mdss_edp_aux.c
new file mode 100644
index 0000000..dc12f3b
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_edp_aux.c
@@ -0,0 +1,1342 @@
+/* Copyright (c) 2013-2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/bug.h>
+#include <linux/of_gpio.h>
+#include <linux/clk/msm-clk.h>
+
+#include <mach/hardware.h>
+#include <mach/gpio.h>
+#include <mach/dma.h>
+
+#include "mdss_panel.h"
+#include "mdss_edp.h"
+
+/*
+ * edp buffer operation
+ */
+static char *edp_buf_init(struct edp_buf *eb, char *buf, int size)
+{
+	eb->start = buf;
+	eb->size = size;
+	eb->data = eb->start;
+	eb->end = eb->start + eb->size;
+	eb->len = 0;
+	eb->trans_num = 0;
+	eb->i2c = 0;
+	return eb->data;
+}
+
+static char *edp_buf_reset(struct edp_buf *eb)
+{
+	eb->data = eb->start;
+	eb->len = 0;
+	eb->trans_num = 0;
+	eb->i2c = 0;
+	return eb->data;
+}
+
+static char *edp_buf_push(struct edp_buf *eb, int len)
+{
+	eb->data += len;
+	eb->len += len;
+	return eb->data;
+}
+
+static int edp_buf_trailing(struct edp_buf *eb)
+{
+	return (int)(eb->end - eb->data);
+}
+
+/*
+ * edp aux edp_buf_add_cmd:
+ * NO native and i2c command mix allowed
+ */
+static int edp_buf_add_cmd(struct edp_buf *eb, struct edp_cmd *cmd)
+{
+	char data;
+	char *bp, *cp;
+	int i, len;
+
+	if (cmd->read)	/* read */
+		len = 4;
+	else
+		len = cmd->len + 4;
+
+	if (edp_buf_trailing(eb) < len)
+		return 0;
+
+	/*
+	 * cmd fifo only has depth of 144 bytes
+	 * limit buf length to 128 bytes here
+	 */
+	if ((eb->len + len) > 128)
+		return 0;
+
+	bp = eb->data;
+	data = cmd->addr >> 16;
+	data &=  0x0f;	/* 4 addr bits */
+	if (cmd->read)
+		data |=  BIT(4);
+	*bp++ = data;
+	*bp++ = cmd->addr >> 8;
+	*bp++ = cmd->addr;
+	*bp++ = cmd->len - 1;
+
+	if (!cmd->read) { /* write */
+		cp = cmd->datap;
+		for (i = 0; i < cmd->len; i++)
+			*bp++ = *cp++;
+	}
+	edp_buf_push(eb, len);
+
+	if (cmd->i2c)
+		eb->i2c++;
+
+	eb->trans_num++;	/* Increase transaction number */
+
+	return cmd->len - 1;
+}
+
+static int edp_cmd_fifo_tx(struct edp_buf *tp, unsigned char *base)
+{
+	u32 data;
+	char *dp;
+	int len, cnt;
+
+	len = tp->len;	/* total byte to cmd fifo */
+	if (len == 0)
+		return 0;
+
+	cnt = 0;
+	dp = tp->start;
+
+	while (cnt < len) {
+		data = *dp; /* data byte */
+		data <<= 8;
+		data &= 0x00ff00; /* index = 0, write */
+		if (cnt == 0)
+			data |= BIT(31);  /* INDEX_WRITE */
+		pr_debug("%s: data=%x\n", __func__, data);
+		edp_write(base + EDP_AUX_DATA, data);
+		cnt++;
+		dp++;
+	}
+
+	data = (tp->trans_num - 1);
+	if (tp->i2c)
+		data |= BIT(8); /* I2C */
+
+	data |= BIT(9); /* GO */
+	pr_debug("%s: data=%x\n", __func__, data);
+	edp_write(base + EDP_AUX_TRANS_CTRL, data);
+
+	return tp->len;
+}
+
+static int edp_cmd_fifo_rx(struct edp_buf *rp, int len, unsigned char *base)
+{
+	u32 data;
+	char *dp;
+	int i;
+
+	data = 0; /* index = 0 */
+	data |= BIT(31);  /* INDEX_WRITE */
+	data |= BIT(0);	/* read */
+	edp_write(base + EDP_AUX_DATA, data);
+
+	dp = rp->data;
+
+	/* discard first byte */
+	data = edp_read(base + EDP_AUX_DATA);
+	for (i = 0; i < len; i++) {
+		data = edp_read(base + EDP_AUX_DATA);
+		pr_debug("%s: data=%x\n", __func__, data);
+		*dp++ = (char)((data >> 8) & 0xff);
+	}
+
+	rp->len = len;
+	return len;
+}
+
+static int edp_aux_write_cmds(struct mdss_edp_drv_pdata *ep,
+					struct edp_cmd *cmd)
+{
+	struct edp_cmd *cm;
+	struct edp_buf *tp;
+	int len, ret;
+
+	mutex_lock(&ep->aux_mutex);
+	ep->aux_cmd_busy = 1;
+
+	tp = &ep->txp;
+	edp_buf_reset(tp);
+
+	cm = cmd;
+	while (cm) {
+		pr_debug("%s: i2c=%d read=%d addr=%x len=%d next=%d\n",
+			__func__, cm->i2c, cm->read, cm->addr, cm->len,
+			cm->next);
+		ret = edp_buf_add_cmd(tp, cm);
+		if (ret <= 0)
+			break;
+		if (cm->next == 0)
+			break;
+		cm++;
+	}
+
+	if (tp->i2c)
+		ep->aux_cmd_i2c = 1;
+	else
+		ep->aux_cmd_i2c = 0;
+
+	reinit_completion(&ep->aux_comp);
+
+	len = edp_cmd_fifo_tx(&ep->txp, ep->base);
+
+	wait_for_completion(&ep->aux_comp);
+
+	if (ep->aux_error_num == EDP_AUX_ERR_NONE)
+		ret = len;
+	else
+		ret = ep->aux_error_num;
+
+	ep->aux_cmd_busy = 0;
+	mutex_unlock(&ep->aux_mutex);
+	return  ret;
+}
+
+static int edp_aux_read_cmds(struct mdss_edp_drv_pdata *ep,
+				struct edp_cmd *cmds)
+{
+	struct edp_cmd *cm;
+	struct edp_buf *tp;
+	struct edp_buf *rp;
+	int len, ret;
+
+	mutex_lock(&ep->aux_mutex);
+	ep->aux_cmd_busy = 1;
+
+	tp = &ep->txp;
+	rp = &ep->rxp;
+	edp_buf_reset(tp);
+	edp_buf_reset(rp);
+
+	cm = cmds;
+	len = 0;
+	while (cm) {
+		pr_debug("%s: i2c=%d read=%d addr=%x len=%d next=%d\n",
+			__func__, cm->i2c, cm->read, cm->addr, cm->len,
+			cm->next);
+		ret = edp_buf_add_cmd(tp, cm);
+		len += cm->len;
+		if (ret <= 0)
+			break;
+		if (cm->next == 0)
+			break;
+		cm++;
+	}
+
+	if (tp->i2c)
+		ep->aux_cmd_i2c = 1;
+	else
+		ep->aux_cmd_i2c = 0;
+
+	reinit_completion(&ep->aux_comp);
+
+	edp_cmd_fifo_tx(tp, ep->base);
+
+	wait_for_completion(&ep->aux_comp);
+
+	if (ep->aux_error_num == EDP_AUX_ERR_NONE)
+		ret = edp_cmd_fifo_rx(rp, len, ep->base);
+	else
+		ret = ep->aux_error_num;
+
+	ep->aux_cmd_busy = 0;
+	mutex_unlock(&ep->aux_mutex);
+
+	return ret;
+}
+
+void edp_aux_native_handler(struct mdss_edp_drv_pdata *ep, u32 isr)
+{
+
+	pr_debug("%s: isr=%x\n", __func__, isr);
+
+	if (isr & EDP_INTR_AUX_I2C_DONE)
+		ep->aux_error_num = EDP_AUX_ERR_NONE;
+	else if (isr & EDP_INTR_WRONG_ADDR)
+		ep->aux_error_num = EDP_AUX_ERR_ADDR;
+	else if (isr & EDP_INTR_TIMEOUT)
+		ep->aux_error_num = EDP_AUX_ERR_TOUT;
+	if (isr & EDP_INTR_NACK_DEFER)
+		ep->aux_error_num = EDP_AUX_ERR_NACK;
+
+	complete(&ep->aux_comp);
+}
+
+void edp_aux_i2c_handler(struct mdss_edp_drv_pdata *ep, u32 isr)
+{
+
+	pr_debug("%s: isr=%x\n", __func__, isr);
+
+	if (isr & EDP_INTR_AUX_I2C_DONE) {
+		if (isr & (EDP_INTR_I2C_NACK | EDP_INTR_I2C_DEFER))
+			ep->aux_error_num = EDP_AUX_ERR_NACK;
+		else
+			ep->aux_error_num = EDP_AUX_ERR_NONE;
+	} else {
+		if (isr & EDP_INTR_WRONG_ADDR)
+			ep->aux_error_num = EDP_AUX_ERR_ADDR;
+		else if (isr & EDP_INTR_TIMEOUT)
+			ep->aux_error_num = EDP_AUX_ERR_TOUT;
+		if (isr & EDP_INTR_NACK_DEFER)
+			ep->aux_error_num = EDP_AUX_ERR_NACK;
+		if (isr & EDP_INTR_I2C_NACK)
+			ep->aux_error_num = EDP_AUX_ERR_NACK;
+		if (isr & EDP_INTR_I2C_DEFER)
+			ep->aux_error_num = EDP_AUX_ERR_NACK;
+	}
+
+	complete(&ep->aux_comp);
+}
+
+static int edp_aux_write_buf(struct mdss_edp_drv_pdata *ep, u32 addr,
+				char *buf, int len, int i2c)
+{
+	struct edp_cmd	cmd;
+
+	cmd.read = 0;
+	cmd.i2c = i2c;
+	cmd.addr = addr;
+	cmd.datap = buf;
+	cmd.len = len & 0x0ff;
+	cmd.next = 0;
+
+	return edp_aux_write_cmds(ep, &cmd);
+}
+
+static int edp_aux_read_buf(struct mdss_edp_drv_pdata *ep, u32 addr,
+				int len, int i2c)
+{
+	struct edp_cmd cmd;
+
+	cmd.read = 1;
+	cmd.i2c = i2c;
+	cmd.addr = addr;
+	cmd.datap = NULL;
+	cmd.len = len & 0x0ff;
+	cmd.next = 0;
+
+	return edp_aux_read_cmds(ep, &cmd);
+}
+
+/*
+ * edid standard header bytes
+ */
+static char edid_hdr[8] = {0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00};
+
+int edp_edid_buf_error(char *buf, int len)
+{
+	char *bp;
+	int i;
+	char csum = 0;
+
+	bp = buf;
+	if (len < 128) {
+		pr_err("%s: Error: len=%x\n", __func__, len);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < 128; i++)
+		csum += *bp++;
+
+	if (csum != 0) {
+		pr_err("%s: Error: csum=%x\n", __func__, csum);
+		return -EINVAL;
+	}
+
+	if (strcmp(buf, edid_hdr)) {
+		pr_err("%s: Error: header\n", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+void edp_extract_edid_manufacturer(struct edp_edid *edid, char *buf)
+{
+	char *bp;
+	char data;
+
+	bp = &buf[8];
+	data = *bp & 0x7f;
+	data >>= 2;
+	edid->id_name[0] = 'A' + data - 1;
+	data = *bp & 0x03;
+	data <<= 3;
+	bp++;
+	data |= (*bp >> 5);
+	edid->id_name[1] = 'A' + data - 1;
+	data = *bp & 0x1f;
+	edid->id_name[2] = 'A' + data - 1;
+	edid->id_name[3] = 0;
+
+	pr_debug("%s: edid manufacturer = %s\n", __func__, edid->id_name);
+}
+
+void edp_extract_edid_product(struct edp_edid *edid, char *buf)
+{
+	char *bp;
+	u32 data;
+
+	bp = &buf[0x0a];
+	data =  *bp;
+	edid->id_product = *bp++;
+	edid->id_product &= 0x0ff;
+	data = *bp & 0x0ff;
+	data <<= 8;
+	edid->id_product |= data;
+
+	pr_debug("%s: edid product = 0x%x\n", __func__, edid->id_product);
+};
+
+void edp_extract_edid_version(struct edp_edid *edid, char *buf)
+{
+	edid->version = buf[0x12];
+	edid->revision = buf[0x13];
+	pr_debug("%s: edid version = %d.%d\n", __func__, edid->version,
+			edid->revision);
+};
+
+void edp_extract_edid_ext_block_cnt(struct edp_edid *edid, char *buf)
+{
+	edid->ext_block_cnt = buf[0x7e];
+	pr_debug("%s: edid extension = %d\n", __func__,
+			edid->ext_block_cnt);
+};
+
+void edp_extract_edid_video_support(struct edp_edid *edid, char *buf)
+{
+	char *bp;
+
+	bp = &buf[0x14];
+	if (*bp & 0x80) {
+		edid->video_intf = *bp & 0x0f;
+		/* 6, 8, 10, 12, 14 and 16 bit per component */
+		edid->color_depth = ((*bp & 0x70) >> 4); /* color bit depth */
+		if (edid->color_depth) {
+			edid->color_depth *= 2;
+			edid->color_depth += 4;
+		}
+		pr_debug("%s: Digital Video intf=%d color_depth=%d\n",
+			 __func__, edid->video_intf, edid->color_depth);
+	} else {
+		pr_err("%s: Error, Analog video interface\n", __func__);
+	}
+};
+
+void edp_extract_edid_feature(struct edp_edid *edid, char *buf)
+{
+	char *bp;
+	char data;
+
+	bp = &buf[0x18];
+	data = *bp;
+	data &= 0xe0;
+	data >>= 5;
+	if (data == 0x01)
+		edid->dpm = 1; /* display power management */
+
+	if (edid->video_intf) {
+		if (*bp & 0x80) {
+			/* RGB 4:4:4, YcrCb 4:4:4 and YCrCb 4:2:2 */
+			edid->color_format = *bp & 0x18;
+			edid->color_format >>= 3;
+		}
+	}
+
+	pr_debug("%s: edid dpm=%d color_format=%d\n", __func__,
+			edid->dpm, edid->color_format);
+};
+
+void edp_extract_edid_detailed_timing_description(struct edp_edid *edid,
+		char *buf)
+{
+	char *bp;
+	u32 data;
+	struct display_timing_desc *dp;
+
+	dp = &edid->timing[0];
+
+	bp = &buf[0x36];
+	dp->pclk = 0;
+	dp->pclk = *bp++; /* byte 0x36 */
+	dp->pclk |= (*bp++ << 8); /* byte 0x37 */
+
+	dp->h_addressable = *bp++; /* byte 0x38 */
+
+	if (dp->pclk == 0 && dp->h_addressable == 0)
+		return;	/* Not detailed timing definition */
+
+	dp->pclk *= 10000;
+
+	dp->h_blank = *bp++;/* byte 0x39 */
+	data = *bp & 0xf0; /* byte 0x3A */
+	data  <<= 4;
+	dp->h_addressable |= data;
+
+	data = *bp++ & 0x0f;
+	data <<= 8;
+	dp->h_blank |= data;
+
+	dp->v_addressable = *bp++; /* byte 0x3B */
+	dp->v_blank = *bp++; /* byte 0x3C */
+	data = *bp & 0xf0; /* byte 0x3D */
+	data  <<= 4;
+	dp->v_addressable |= data;
+
+	data = *bp++ & 0x0f;
+	data <<= 8;
+	dp->v_blank |= data;
+
+	dp->h_fporch = *bp++; /* byte 0x3E */
+	dp->h_sync_pulse = *bp++; /* byte 0x3F */
+
+	dp->v_fporch = *bp & 0x0f0; /* byte 0x40 */
+	dp->v_fporch  >>= 4;
+	dp->v_sync_pulse = *bp & 0x0f;
+
+	bp++;
+	data = *bp & 0xc0; /* byte 0x41 */
+	data <<= 2;
+	dp->h_fporch |= data;
+
+	data = *bp & 0x30;
+	data <<= 4;
+	dp->h_sync_pulse |= data;
+
+	data = *bp & 0x0c;
+	data <<= 2;
+	dp->v_fporch |= data;
+
+	data = *bp & 0x03;
+	data <<= 4;
+	dp->v_sync_pulse |= data;
+
+	bp++;
+	dp->width_mm = *bp++; /* byte 0x42 */
+	dp->height_mm = *bp++; /* byte 0x43 */
+	data = *bp & 0x0f0; /* byte 0x44 */
+	data <<= 4;
+	dp->width_mm |= data;
+	data = *bp & 0x0f;
+	data <<= 8;
+	dp->height_mm |= data;
+
+	bp++;
+	dp->h_border = *bp++; /* byte 0x45 */
+	dp->v_border = *bp++; /* byte 0x46 */
+
+	/* progressive or interlaved */
+	dp->interlaced = *bp & 0x80; /* byte 0x47 */
+
+	dp->stereo = *bp & 0x60;
+	dp->stereo >>= 5;
+
+	data = *bp & 0x1e; /* bit 4,3,2 1*/
+	data >>= 1;
+	dp->sync_type = data & 0x08;
+	dp->sync_type >>= 3;	/* analog or digital */
+	if (dp->sync_type) {
+		dp->sync_separate = data & 0x04;
+		dp->sync_separate >>= 2;
+		if (dp->sync_separate) {
+			if (data & 0x02)
+				dp->vsync_pol = 1; /* positive */
+			else
+				dp->vsync_pol = 0;/* negative */
+
+			if (data & 0x01)
+				dp->hsync_pol = 1; /* positive */
+			else
+				dp->hsync_pol = 0; /* negative */
+		}
+	}
+
+	pr_debug("%s: pixel_clock = %d\n", __func__, dp->pclk);
+
+	pr_debug("%s: horizontal=%d, blank=%d, porch=%d, sync=%d\n"
+			, __func__, dp->h_addressable, dp->h_blank,
+			dp->h_fporch, dp->h_sync_pulse);
+	pr_debug("%s: vertical=%d, blank=%d, porch=%d, vsync=%d\n"
+			, __func__, dp->v_addressable, dp->v_blank,
+			dp->v_fporch, dp->v_sync_pulse);
+	pr_debug("%s: panel size in mm, width=%d height=%d\n", __func__,
+			dp->width_mm, dp->height_mm);
+	pr_debug("%s: panel border horizontal=%d vertical=%d\n", __func__,
+				dp->h_border, dp->v_border);
+	pr_debug("%s: flags: interlaced=%d stereo=%d sync_type=%d sync_sep=%d\n"
+			, __func__, dp->interlaced, dp->stereo,
+			dp->sync_type, dp->sync_separate);
+	pr_debug("%s: polarity vsync=%d, hsync=%d", __func__,
+			dp->vsync_pol, dp->hsync_pol);
+}
+
+
+/*
+ * EDID structure can be found in VESA standard here:
+ * http://read.pudn.com/downloads110/ebook/456020/E-EDID%20Standard.pdf
+ *
+ * following table contains default edid
+ * static char edid_raw_data[128] = {
+ * 0, 255, 255, 255, 255, 255, 255, 0,
+ * 6, 175, 93, 48, 0, 0, 0, 0, 0, 22,
+ * 1, 4,
+ * 149, 26, 14, 120, 2,
+ * 164, 21,158, 85, 78, 155, 38, 15, 80, 84,
+ * 0, 0, 0,
+ * 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ * 29, 54, 128, 160, 112, 56, 30, 64, 48, 32, 142, 0, 0, 144, 16,0,0,24,
+ * 19, 36, 128, 160, 112, 56, 30, 64, 48, 32, 142, 0, 0, 144, 16,0,0,24,
+ * 0, 0, 0, 254, 0, 65, 85, 79, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+ * 0, 0, 0, 254, 0, 66, 49, 49, 54, 72, 65, 78, 48, 51, 46, 48, 32, 10,
+ * 0, 75 };
+ */
+
+static int edp_aux_chan_ready(struct mdss_edp_drv_pdata *ep)
+{
+	int cnt, ret;
+	char data = 0;
+
+	for (cnt = 5; cnt; cnt--) {
+		ret = edp_aux_write_buf(ep, 0x50, &data, 1, 1);
+		pr_debug("%s: ret=%d\n", __func__, ret);
+		if (ret >= 0)
+			break;
+		msleep(100);
+	}
+
+	if (cnt <= 0) {
+		pr_err("%s: aux chan NOT ready\n", __func__);
+		return 0;
+	}
+
+	return 1;
+}
+
+static int edp_sink_edid_read(struct mdss_edp_drv_pdata *ep, int block)
+{
+	struct edp_buf *rp;
+	int cnt, rlen;
+	int ret = 0;
+
+	ret = edp_aux_chan_ready(ep);
+	if (ret == 0) {
+		pr_err("%s: aux chan NOT ready\n", __func__);
+		return ret;
+	}
+
+	for (cnt = 5; cnt; cnt--) {
+		rlen = edp_aux_read_buf(ep, 0x50, 128, 1);
+		if (rlen > 0) {
+			pr_debug("%s: rlen=%d\n", __func__, rlen);
+
+			rp = &ep->rxp;
+			if (!edp_edid_buf_error(rp->data, rp->len))
+				break;
+		}
+	}
+
+	if (cnt <= 0) {
+		pr_err("%s: Failed\n", __func__);
+		return -EINVAL;
+	}
+
+	edp_extract_edid_manufacturer(&ep->edid, rp->data);
+	edp_extract_edid_product(&ep->edid, rp->data);
+	edp_extract_edid_version(&ep->edid, rp->data);
+	edp_extract_edid_ext_block_cnt(&ep->edid, rp->data);
+	edp_extract_edid_video_support(&ep->edid, rp->data);
+	edp_extract_edid_feature(&ep->edid, rp->data);
+	edp_extract_edid_detailed_timing_description(&ep->edid, rp->data);
+
+	return 128;
+}
+
+static void edp_sink_capability_read(struct mdss_edp_drv_pdata *ep,
+				int len)
+{
+	char *bp;
+	char data;
+	struct dpcd_cap *cap;
+	struct edp_buf *rp;
+	int rlen;
+
+	rlen = edp_aux_read_buf(ep, 0, len, 0);
+	if (rlen <= 0) {
+		pr_err("%s: edp aux read failed\n", __func__);
+		return;
+	}
+	rp = &ep->rxp;
+	cap = &ep->dpcd;
+	bp = rp->data;
+
+	data = *bp++; /* byte 0 */
+	cap->major = (data >> 4) & 0x0f;
+	cap->minor = data & 0x0f;
+	if (--rlen <= 0)
+		return;
+	pr_debug("%s: version: %d.%d\n", __func__, cap->major, cap->minor);
+
+	data = *bp++; /* byte 1 */
+	/* 162, 270 and 540 MB, symbol rate, NOT bit rate */
+	cap->max_link_rate = data;
+	if (--rlen <= 0)
+		return;
+	pr_debug("%s: link_rate=%d\n", __func__, cap->max_link_rate);
+
+	data = *bp++; /* byte 2 */
+	if (data & BIT(7))
+		cap->enhanced_frame++;
+
+	if (data & 0x40)
+		cap->flags |=  DPCD_TPS3;
+	data &= 0x0f;
+	cap->max_lane_count = data;
+	if (--rlen <= 0)
+		return;
+	pr_debug("%s: lane_count=%d\n", __func__, cap->max_lane_count);
+
+	data = *bp++; /* byte 3 */
+	if (data & BIT(0)) {
+		cap->flags |= DPCD_MAX_DOWNSPREAD_0_5;
+		pr_debug("%s: max_downspread\n", __func__);
+	}
+
+	if (data & BIT(6)) {
+		cap->flags |= DPCD_NO_AUX_HANDSHAKE;
+		pr_debug("%s: NO Link Training\n", __func__);
+	}
+	if (--rlen <= 0)
+		return;
+
+	data = *bp++; /* byte 4 */
+	cap->num_rx_port = (data & BIT(0)) + 1;
+	pr_debug("%s: rx_ports=%d", __func__, cap->num_rx_port);
+	if (--rlen <= 0)
+		return;
+
+	bp += 3;	/* skip 5, 6 and 7 */
+	rlen -= 3;
+	if (rlen <= 0)
+		return;
+
+	data = *bp++; /* byte 8 */
+	if (data & BIT(1)) {
+		cap->flags |= DPCD_PORT_0_EDID_PRESENTED;
+		pr_debug("%s: edid presented\n", __func__);
+	}
+	if (--rlen <= 0)
+		return;
+
+	data = *bp++; /* byte 9 */
+	cap->rx_port0_buf_size = (data + 1) * 32;
+	pr_debug("%s: lane_buf_size=%d", __func__, cap->rx_port0_buf_size);
+	if (--rlen <= 0)
+		return;
+
+	bp += 2; /* skip 10, 11 port1 capability */
+	rlen -= 2;
+	if (rlen <= 0)
+		return;
+
+	data = *bp++;	/* byte 12 */
+	cap->i2c_speed_ctrl = data;
+	if (cap->i2c_speed_ctrl > 0)
+		pr_debug("%s: i2c_rate=%d", __func__, cap->i2c_speed_ctrl);
+	if (--rlen <= 0)
+		return;
+
+	data = *bp++;	/* byte 13 */
+	cap->scrambler_reset = data & BIT(0);
+	pr_debug("%s: scrambler_reset=%d\n", __func__,
+					cap->scrambler_reset);
+
+	if (data & BIT(1))
+		cap->enhanced_frame++;
+
+	pr_debug("%s: enhanced_framing=%d\n", __func__,
+					cap->enhanced_frame);
+	if (--rlen <= 0)
+		return;
+
+	data = *bp++; /* byte 14 */
+	if (data == 0)
+		cap->training_read_interval = 4000; /* us */
+	else
+		cap->training_read_interval = 4000 * data; /* us */
+	pr_debug("%s: training_interval=%d\n", __func__,
+			 cap->training_read_interval);
+}
+
+static int edp_link_status_read(struct mdss_edp_drv_pdata *ep, int len)
+{
+	char *bp;
+	char data;
+	struct dpcd_link_status *sp;
+	struct edp_buf *rp;
+	int rlen;
+
+	pr_debug("%s: len=%d", __func__, len);
+	/* skip byte 0x200 and 0x201 */
+	rlen = edp_aux_read_buf(ep, 0x202, len, 0);
+	if (rlen < len) {
+		pr_err("%s: edp aux read failed\n", __func__);
+		return 0;
+	}
+	rp = &ep->rxp;
+	bp = rp->data;
+	sp = &ep->link_status;
+
+	data = *bp++; /* byte 0x202 */
+	sp->lane_01_status = data; /* lane 0, 1 */
+
+	data = *bp++; /* byte 0x203 */
+	sp->lane_23_status = data; /* lane 2, 3 */
+
+	data = *bp++; /* byte 0x204 */
+	sp->interlane_align_done = (data & BIT(0));
+	sp->downstream_port_status_changed = (data & BIT(6));
+	sp->link_status_updated = (data & BIT(7));
+
+	data = *bp++; /* byte 0x205 */
+	sp->port_0_in_sync = (data & BIT(0));
+	sp->port_1_in_sync = (data & BIT(1));
+
+	data = *bp++; /* byte 0x206 */
+	sp->req_voltage_swing[0] = data & 0x03;
+	data >>= 2;
+	sp->req_pre_emphasis[0] = data & 0x03;
+	data >>= 2;
+	sp->req_voltage_swing[1] = data & 0x03;
+	data >>= 2;
+	sp->req_pre_emphasis[1] = data & 0x03;
+
+	data = *bp++; /* byte 0x207 */
+	sp->req_voltage_swing[2] = data & 0x03;
+	data >>= 2;
+	sp->req_pre_emphasis[2] = data & 0x03;
+	data >>= 2;
+	sp->req_voltage_swing[3] = data & 0x03;
+	data >>= 2;
+	sp->req_pre_emphasis[3] = data & 0x03;
+
+	return len;
+}
+
+static int edp_cap_lane_rate_set(struct mdss_edp_drv_pdata *ep)
+{
+	char buf[4];
+	int len = 0;
+	struct dpcd_cap *cap;
+
+	cap = &ep->dpcd;
+
+	pr_debug("%s: bw=%x lane=%d\n", __func__, ep->link_rate, ep->lane_cnt);
+	buf[0] = ep->link_rate;
+	buf[1] = ep->lane_cnt;
+	if (cap->enhanced_frame)
+		buf[1] |= 0x80;
+	len = edp_aux_write_buf(ep, 0x100, buf, 2, 0);
+
+	return len;
+}
+
+static int edp_lane_set_write(struct mdss_edp_drv_pdata *ep, int voltage_level,
+		int pre_emphasis_level)
+{
+	int i;
+	char buf[4];
+
+	if (voltage_level >= DPCD_LINK_VOLTAGE_MAX)
+		voltage_level |= 0x04;
+
+	if (pre_emphasis_level >= DPCD_LINK_PRE_EMPHASIS_MAX)
+		pre_emphasis_level |= 0x04;
+
+	pre_emphasis_level <<= 3;
+
+	for (i = 0; i < 4; i++)
+		buf[i] = voltage_level | pre_emphasis_level;
+
+	pr_debug("%s: p|v=0x%x", __func__, voltage_level | pre_emphasis_level);
+	return edp_aux_write_buf(ep, 0x103, buf, 4, 0);
+}
+
+static int edp_train_pattern_set_write(struct mdss_edp_drv_pdata *ep,
+						int pattern)
+{
+	char buf[4];
+
+	pr_debug("%s: pattern=%x\n", __func__, pattern);
+	buf[0] = pattern;
+	return edp_aux_write_buf(ep, 0x102, buf, 1, 0);
+}
+
+static int edp_sink_clock_recovery_done(struct mdss_edp_drv_pdata *ep)
+{
+	u32 mask;
+	u32 data;
+
+	if (ep->lane_cnt == 1) {
+		mask = 0x01;	/* lane 0 */
+		data = ep->link_status.lane_01_status;
+	} else if (ep->lane_cnt == 2) {
+		mask = 0x011; /*B lane 0, 1 */
+		data = ep->link_status.lane_01_status;
+	} else {
+		mask = 0x01111; /*B lane 0, 1 */
+		data = ep->link_status.lane_23_status;
+		data <<= 8;
+		data |= ep->link_status.lane_01_status;
+	}
+
+	pr_debug("%s: data=%x mask=%x\n", __func__, data, mask);
+	data &= mask;
+	if (data == mask) /* all done */
+		return 1;
+
+	return 0;
+}
+
+static int edp_sink_channel_eq_done(struct mdss_edp_drv_pdata *ep)
+{
+	u32 mask;
+	u32 data;
+
+	pr_debug("%s:\n", __func__);
+
+	if (!ep->link_status.interlane_align_done) { /* not align */
+		pr_err("%s: interlane align failed\n", __func__);
+		return 0;
+	}
+
+	if (ep->lane_cnt == 1) {
+		mask = 0x7;
+		data = ep->link_status.lane_01_status;
+	} else if (ep->lane_cnt == 2) {
+		mask = 0x77;
+		data = ep->link_status.lane_01_status;
+	} else {
+		mask = 0x7777;
+		data = ep->link_status.lane_23_status;
+		data <<= 8;
+		data |= ep->link_status.lane_01_status;
+	}
+
+	pr_debug("%s: data=%x mask=%x\n", __func__, data, mask);
+
+	data &= mask;
+	if (data == mask)/* all done */
+		return 1;
+
+	return 0;
+}
+
+void edp_sink_train_set_adjust(struct mdss_edp_drv_pdata *ep)
+{
+	int i;
+	int max = 0;
+
+
+	/* use the max level across lanes */
+	for (i = 0; i < ep->lane_cnt; i++) {
+		pr_debug("%s: lane=%d req_voltage_swing=%d",
+			__func__, i, ep->link_status.req_voltage_swing[i]);
+		if (max < ep->link_status.req_voltage_swing[i])
+			max = ep->link_status.req_voltage_swing[i];
+	}
+
+	ep->v_level = max;
+
+	/* use the max level across lanes */
+	max = 0;
+	for (i = 0; i < ep->lane_cnt; i++) {
+		pr_debug(" %s: lane=%d req_pre_emphasis=%d",
+			__func__, i, ep->link_status.req_pre_emphasis[i]);
+		if (max < ep->link_status.req_pre_emphasis[i])
+			max = ep->link_status.req_pre_emphasis[i];
+	}
+
+	ep->p_level = max;
+	pr_debug("%s: v_level=%d, p_level=%d", __func__,
+					ep->v_level, ep->p_level);
+}
+
+static void edp_host_train_set(struct mdss_edp_drv_pdata *ep, int train)
+{
+	int bit, cnt;
+	u32 data;
+
+
+	bit = 1;
+	bit  <<=  (train - 1);
+	pr_debug("%s: bit=%d train=%d\n", __func__, bit, train);
+	edp_write(ep->base + EDP_STATE_CTRL, bit);
+
+	bit = 8;
+	bit <<= (train - 1);
+	cnt = 10;
+	while (cnt--) {
+		data = edp_read(ep->base + EDP_MAINLINK_READY);
+		if (data & bit)
+			break;
+	}
+
+	if (cnt == 0)
+		pr_err("%s: set link_train=%d failed\n", __func__, train);
+}
+
+char vm_pre_emphasis[4][4] = {
+	{0x03, 0x06, 0x09, 0x0C},	/* pe0, 0 db */
+	{0x03, 0x06, 0x09, 0xFF},	/* pe1, 3.5 db */
+	{0x03, 0x06, 0xFF, 0xFF},	/* pe2, 6.0 db */
+	{0x03, 0xFF, 0xFF, 0xFF}	/* pe3, 9.5 db */
+};
+
+/* voltage swing, 0.2v and 1.0v are not support */
+char vm_voltage_swing[4][4] = {
+	{0x14, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v  */
+	{0x18, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */
+	{0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */
+	{0x1E, 0xFF, 0xFF, 0xFF}  /* sw1, 1.2 v, optional */
+};
+
+static void edp_voltage_pre_emphasise_set(struct mdss_edp_drv_pdata *ep)
+{
+	u32 value0 = 0;
+	u32 value1 = 0;
+
+	pr_debug("%s: v=%d p=%d\n", __func__, ep->v_level, ep->p_level);
+
+	value0 = vm_pre_emphasis[(int)(ep->v_level)][(int)(ep->p_level)];
+	value1 = vm_voltage_swing[(int)(ep->v_level)][(int)(ep->p_level)];
+
+	/* Configure host and panel only if both values are allowed */
+	if (value0 != 0xFF && value1 != 0xFF) {
+		edp_write(ep->base + EDP_PHY_EDPPHY_GLB_VM_CFG0, value0);
+		edp_write(ep->base + EDP_PHY_EDPPHY_GLB_VM_CFG1, value1);
+		pr_debug("%s: value0=0x%x value1=0x%x", __func__,
+						value0, value1);
+		edp_lane_set_write(ep, ep->v_level, ep->p_level);
+	}
+
+}
+
+static int edp_start_link_train_1(struct mdss_edp_drv_pdata *ep)
+{
+	int tries, old_v_level;
+	int ret = 0;
+	int usleep_time;
+
+	pr_debug("%s:", __func__);
+
+	edp_host_train_set(ep, 0x01); /* train_1 */
+	edp_voltage_pre_emphasise_set(ep);
+	edp_train_pattern_set_write(ep, 0x21); /* train_1 */
+
+	tries = 0;
+	old_v_level = ep->v_level;
+	while (1) {
+		usleep_time = ep->dpcd.training_read_interval;
+		usleep_range(usleep_time, usleep_time + 10);
+
+		edp_link_status_read(ep, 6);
+		if (edp_sink_clock_recovery_done(ep)) {
+			ret = 0;
+			break;
+		}
+
+		if (ep->v_level == DPCD_LINK_VOLTAGE_MAX) {
+			ret = -1;
+			break;	/* quit */
+		}
+
+		if (old_v_level == ep->v_level) {
+			tries++;
+			if (tries >= 5) {
+				ret = -1;
+				break;	/* quit */
+			}
+		} else {
+			tries = 0;
+			old_v_level = ep->v_level;
+		}
+
+		edp_sink_train_set_adjust(ep);
+		edp_voltage_pre_emphasise_set(ep);
+	}
+
+	return ret;
+}
+
+static int edp_start_link_train_2(struct mdss_edp_drv_pdata *ep)
+{
+	int tries;
+	int ret = 0;
+	int usleep_time;
+	char pattern;
+
+	pr_debug("%s:", __func__);
+
+	if (ep->dpcd.flags & DPCD_TPS3)
+		pattern = 0x03;
+	else
+		pattern = 0x02;
+
+	edp_host_train_set(ep, pattern); /* train_2 */
+	edp_voltage_pre_emphasise_set(ep);
+	edp_train_pattern_set_write(ep, pattern | 0x20);/* train_2 */
+
+	tries = 0;
+	while (1) {
+		usleep_time = ep->dpcd.training_read_interval;
+		usleep_range(usleep_time, usleep_time + 10);
+
+		edp_link_status_read(ep, 6);
+
+		if (edp_sink_channel_eq_done(ep)) {
+			ret = 0;
+			break;
+		}
+
+		tries++;
+		if (tries > 5) {
+			ret = -1;
+			break;
+		}
+
+		edp_sink_train_set_adjust(ep);
+		edp_voltage_pre_emphasise_set(ep);
+	}
+
+	return ret;
+}
+
+static int edp_link_rate_down_shift(struct mdss_edp_drv_pdata *ep)
+{
+	u32 prate, lrate;
+	int rate, lane, max_lane;
+	int changed = 0;
+
+	rate = ep->link_rate;
+	lane = ep->lane_cnt;
+	max_lane = ep->dpcd.max_lane_count;
+
+	prate = ep->pixel_rate;
+	prate /= 1000;	/* avoid using 64 biits */
+	prate *= ep->bpp;
+	prate /= 8; /* byte */
+
+	if (rate > EDP_LINK_RATE_162 && rate <= EDP_LINK_RATE_MAX) {
+		rate -= 4;		/* reduce rate */
+		changed++;
+	}
+
+	if (changed) {
+		if (lane >= 1 && lane < max_lane)
+			lane <<= 1;	/* increase lane */
+
+		lrate = 270000000; /* 270M */
+		lrate /= 1000; /* avoid using 64 bits */
+		lrate *= rate;
+		lrate /= 10; /* byte, 10 bits --> 8 bits */
+		lrate *= lane;
+
+		pr_debug("%s: new lrate=%u prate=%u rate=%d lane=%d p=%d b=%d\n",
+		__func__, lrate, prate, rate, lane, ep->pixel_rate, ep->bpp);
+
+		if (lrate > prate) {
+			ep->link_rate = rate;
+			ep->lane_cnt = lane;
+			pr_debug("%s: new rate=%d %d\n", __func__, rate, lane);
+			return 0;
+		}
+	}
+
+	/* add calculation later */
+	return -EINVAL;
+}
+
+static void edp_clear_training_pattern(struct mdss_edp_drv_pdata *ep)
+{
+	int usleep_time;
+
+	pr_debug("%s:\n", __func__);
+	edp_train_pattern_set_write(ep, 0);
+	usleep_time = ep->dpcd.training_read_interval;
+	usleep_range(usleep_time, usleep_time + 10);
+}
+
+static int edp_aux_link_train(struct mdss_edp_drv_pdata *ep)
+{
+	int ret = 0;
+	int usleep_time;
+
+	ret = edp_aux_chan_ready(ep);
+	if (ret == 0) {
+		pr_err("%s: LINK Train failed: aux chan NOT ready\n", __func__);
+		complete(&ep->train_comp);
+		return ret;
+	}
+
+	edp_write(ep->base + EDP_MAINLINK_CTRL, 0x1);
+
+	mdss_edp_sink_power_state(ep, SINK_POWER_ON);
+
+train_start:
+	ep->v_level = 0; /* start from default level */
+	ep->p_level = 0;
+	edp_cap_lane_rate_set(ep);
+	mdss_edp_config_ctrl(ep);
+	mdss_edp_lane_power_ctrl(ep, 1);
+
+	mdss_edp_state_ctrl(ep, 0);
+	edp_clear_training_pattern(ep);
+	usleep_time = ep->dpcd.training_read_interval;
+	usleep_range(usleep_time, usleep_time + 10);
+
+	ret = edp_start_link_train_1(ep);
+	if (ret < 0) {
+		if (edp_link_rate_down_shift(ep) == 0) {
+			goto train_start;
+		} else {
+			pr_err("%s: Training 1 failed\n", __func__);
+			ret = -1;
+			goto clear;
+		}
+	}
+
+	pr_debug("%s: Training 1 completed successfully\n", __func__);
+
+	mdss_edp_state_ctrl(ep, 0);
+	edp_clear_training_pattern(ep);
+	ret = edp_start_link_train_2(ep);
+	if (ret < 0) {
+		if (edp_link_rate_down_shift(ep) == 0) {
+			goto train_start;
+		} else {
+			pr_err("%s: Training 2 failed\n", __func__);
+			ret = -1;
+			goto clear;
+		}
+	}
+
+	pr_debug("%s: Training 2 completed successfully\n", __func__);
+
+	mdss_edp_state_ctrl(ep, ST_SEND_VIDEO);
+clear:
+	edp_clear_training_pattern(ep);
+
+	complete(&ep->train_comp);
+	return ret;
+}
+
+void mdss_edp_dpcd_cap_read(struct mdss_edp_drv_pdata *ep)
+{
+	edp_sink_capability_read(ep, 16);
+}
+
+int mdss_edp_dpcd_status_read(struct mdss_edp_drv_pdata *ep)
+{
+	struct dpcd_link_status *sp;
+	int ret = 0; /* not sync */
+
+	ret = edp_link_status_read(ep, 6);
+
+	if (ret) {
+		sp = &ep->link_status;
+		ret = sp->port_0_in_sync; /* 1 == sync */
+	}
+
+	return ret;
+}
+
+void mdss_edp_fill_link_cfg(struct mdss_edp_drv_pdata *ep)
+{
+	struct display_timing_desc *dp;
+
+	dp = &ep->edid.timing[0];
+	ep->pixel_rate = dp->pclk;
+	ep->lane_cnt = ep->dpcd.max_lane_count;
+	ep->link_rate = ep->dpcd.max_link_rate;
+
+	pr_debug("%s: pclk=%d rate=%d lane=%d\n", __func__,
+		ep->pixel_rate, ep->link_rate, ep->lane_cnt);
+
+}
+
+void mdss_edp_edid_read(struct mdss_edp_drv_pdata *ep, int block)
+{
+	edp_sink_edid_read(ep, block);
+}
+
+int mdss_edp_sink_power_state(struct mdss_edp_drv_pdata *ep, char state)
+{
+	int ret;
+
+	ret = edp_aux_write_buf(ep, 0x600, &state, 1, 0);
+	pr_debug("%s: state=%d ret=%d\n", __func__, state, ret);
+	return ret;
+}
+
+int mdss_edp_link_train(struct mdss_edp_drv_pdata *ep)
+{
+	int ret;
+
+	mutex_lock(&ep->train_mutex);
+	ret = edp_aux_link_train(ep);
+	mutex_unlock(&ep->train_mutex);
+	return ret;
+}
+
+void mdss_edp_aux_init(struct mdss_edp_drv_pdata *ep)
+{
+	mutex_init(&ep->aux_mutex);
+	mutex_init(&ep->train_mutex);
+	init_completion(&ep->aux_comp);
+	init_completion(&ep->train_comp);
+	init_completion(&ep->idle_comp);
+	init_completion(&ep->video_comp);
+	complete(&ep->train_comp); /* make non block at first time */
+	complete(&ep->video_comp); /* make non block at first time */
+
+	edp_buf_init(&ep->txp, ep->txbuf, sizeof(ep->txbuf));
+	edp_buf_init(&ep->rxp, ep->rxbuf, sizeof(ep->rxbuf));
+}
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
new file mode 100644
index 0000000..64f462f
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -0,0 +1,5124 @@
+/*
+ * Core MDSS framebuffer driver.
+ *
+ * Copyright (C) 2007 Google Incorporated
+ * Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/videodev2.h>
+#include <linux/bootmem.h>
+#include <linux/console.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/msm_mdp.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/proc_fs.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+#include <linux/sync.h>
+#include <linux/sw_sync.h>
+#include <linux/file.h>
+#include <linux/kthread.h>
+#include <linux/dma-buf.h>
+#include "mdss_fb.h"
+#include "mdss_mdp_splash_logo.h"
+#define CREATE_TRACE_POINTS
+#include "mdss_debug.h"
+#include "mdss_smmu.h"
+#include "mdss_mdp.h"
+#include "mdp3_ctrl.h"
+
+#ifdef CONFIG_FB_MSM_TRIPLE_BUFFER
+#define MDSS_FB_NUM 3
+#else
+#define MDSS_FB_NUM 2
+#endif
+
+#ifndef EXPORT_COMPAT
+#define EXPORT_COMPAT(x)
+#endif
+
+#define MAX_FBI_LIST 32
+
+#ifndef TARGET_HW_MDSS_MDP3
+#define BLANK_FLAG_LP	FB_BLANK_NORMAL
+#define BLANK_FLAG_ULP	FB_BLANK_VSYNC_SUSPEND
+#else
+#define BLANK_FLAG_LP	FB_BLANK_VSYNC_SUSPEND
+#define BLANK_FLAG_ULP	FB_BLANK_NORMAL
+#endif
+
+/*
+ * Time period for fps calulation in micro seconds.
+ * Default value is set to 1 sec.
+ */
+#define MDP_TIME_PERIOD_CALC_FPS_US	1000000
+
+static struct fb_info *fbi_list[MAX_FBI_LIST];
+static int fbi_list_index;
+
+static u32 mdss_fb_pseudo_palette[16] = {
+	0x00000000, 0xffffffff, 0xffffffff, 0xffffffff,
+	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
+};
+
+static struct msm_mdp_interface *mdp_instance;
+
+static int mdss_fb_register(struct msm_fb_data_type *mfd);
+static int mdss_fb_open(struct fb_info *info, int user);
+static int mdss_fb_release(struct fb_info *info, int user);
+static int mdss_fb_release_all(struct fb_info *info, bool release_all);
+static int mdss_fb_pan_display(struct fb_var_screeninfo *var,
+			       struct fb_info *info);
+static int mdss_fb_check_var(struct fb_var_screeninfo *var,
+			     struct fb_info *info);
+static int mdss_fb_set_par(struct fb_info *info);
+static int mdss_fb_blank_sub(int blank_mode, struct fb_info *info,
+			     int op_enable);
+static int mdss_fb_suspend_sub(struct msm_fb_data_type *mfd);
+static int mdss_fb_ioctl(struct fb_info *info, unsigned int cmd,
+			 unsigned long arg, struct file *file);
+static int mdss_fb_fbmem_ion_mmap(struct fb_info *info,
+		struct vm_area_struct *vma);
+static int mdss_fb_alloc_fb_ion_memory(struct msm_fb_data_type *mfd,
+		size_t size);
+static void mdss_fb_release_fences(struct msm_fb_data_type *mfd);
+static int __mdss_fb_sync_buf_done_callback(struct notifier_block *p,
+		unsigned long val, void *data);
+
+static int __mdss_fb_display_thread(void *data);
+static int mdss_fb_pan_idle(struct msm_fb_data_type *mfd);
+static int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd,
+					int event, void *arg);
+static void mdss_fb_set_mdp_sync_pt_threshold(struct msm_fb_data_type *mfd,
+		int type);
+void mdss_fb_no_update_notify_timer_cb(unsigned long data)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
+
+	if (!mfd) {
+		pr_err("%s mfd NULL\n", __func__);
+		return;
+	}
+	mfd->no_update.value = NOTIFY_TYPE_NO_UPDATE;
+	complete(&mfd->no_update.comp);
+}
+
+void mdss_fb_bl_update_notify(struct msm_fb_data_type *mfd,
+		uint32_t notification_type)
+{
+#ifndef TARGET_HW_MDSS_MDP3
+	struct mdss_overlay_private *mdp5_data = NULL;
+#endif
+#ifdef TARGET_HW_MDSS_MDP3
+	struct mdp3_session_data *mdp3_session = NULL;
+#endif
+	if (!mfd) {
+		pr_err("%s mfd NULL\n", __func__);
+		return;
+	}
+	mutex_lock(&mfd->update.lock);
+	if (mfd->update.is_suspend) {
+		mutex_unlock(&mfd->update.lock);
+		return;
+	}
+	if (mfd->update.ref_count > 0) {
+		mutex_unlock(&mfd->update.lock);
+		mfd->update.value = notification_type;
+		complete(&mfd->update.comp);
+		mutex_lock(&mfd->update.lock);
+	}
+	mutex_unlock(&mfd->update.lock);
+
+	mutex_lock(&mfd->no_update.lock);
+	if (mfd->no_update.ref_count > 0) {
+		mutex_unlock(&mfd->no_update.lock);
+		mfd->no_update.value = notification_type;
+		complete(&mfd->no_update.comp);
+		mutex_lock(&mfd->no_update.lock);
+	}
+	mutex_unlock(&mfd->no_update.lock);
+#ifndef TARGET_HW_MDSS_MDP3
+	mdp5_data = mfd_to_mdp5_data(mfd);
+	if (mdp5_data) {
+		if (notification_type == NOTIFY_TYPE_BL_AD_ATTEN_UPDATE) {
+			mdp5_data->ad_bl_events++;
+			sysfs_notify_dirent(mdp5_data->ad_bl_event_sd);
+		} else if (notification_type == NOTIFY_TYPE_BL_UPDATE) {
+			mdp5_data->bl_events++;
+			sysfs_notify_dirent(mdp5_data->bl_event_sd);
+		}
+	}
+#endif
+#ifdef TARGET_HW_MDSS_MDP3
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	if (mdp3_session) {
+		mdp3_session->bl_events++;
+		sysfs_notify_dirent(mdp3_session->bl_event_sd);
+		pr_debug("bl_event = %u\n", mdp3_session->bl_events);
+	}
+#endif
+}
+
+static int mdss_fb_notify_update(struct msm_fb_data_type *mfd,
+							unsigned long *argp)
+{
+	int ret;
+	unsigned int notify = 0x0, to_user = 0x0;
+
+	ret = copy_from_user(&notify, argp, sizeof(unsigned int));
+	if (ret) {
+		pr_err("%s:ioctl failed\n", __func__);
+		return ret;
+	}
+
+	if (notify > NOTIFY_UPDATE_POWER_OFF)
+		return -EINVAL;
+
+	if (notify == NOTIFY_UPDATE_INIT) {
+		mutex_lock(&mfd->update.lock);
+		mfd->update.init_done = true;
+		mutex_unlock(&mfd->update.lock);
+		ret = 1;
+	} else if (notify == NOTIFY_UPDATE_DEINIT) {
+		mutex_lock(&mfd->update.lock);
+		mfd->update.init_done = false;
+		mutex_unlock(&mfd->update.lock);
+		complete(&mfd->update.comp);
+		complete(&mfd->no_update.comp);
+		ret = 1;
+	} else if (mfd->update.is_suspend) {
+		to_user = NOTIFY_TYPE_SUSPEND;
+		mfd->update.is_suspend = 0;
+		ret = 1;
+	} else if (notify == NOTIFY_UPDATE_START) {
+		mutex_lock(&mfd->update.lock);
+		if (mfd->update.init_done)
+			reinit_completion(&mfd->update.comp);
+		else {
+			mutex_unlock(&mfd->update.lock);
+			pr_err("notify update start called without init\n");
+			return -EINVAL;
+		}
+		mfd->update.ref_count++;
+		mutex_unlock(&mfd->update.lock);
+		ret = wait_for_completion_interruptible_timeout(
+						&mfd->update.comp, 4 * HZ);
+		mutex_lock(&mfd->update.lock);
+		mfd->update.ref_count--;
+		mutex_unlock(&mfd->update.lock);
+		to_user = (unsigned int)mfd->update.value;
+		if (mfd->update.type == NOTIFY_TYPE_SUSPEND) {
+			to_user = (unsigned int)mfd->update.type;
+			ret = 1;
+		}
+	} else if (notify == NOTIFY_UPDATE_STOP) {
+		mutex_lock(&mfd->update.lock);
+		if (mfd->update.init_done)
+			reinit_completion(&mfd->no_update.comp);
+		else {
+			mutex_unlock(&mfd->update.lock);
+			pr_err("notify update stop called without init\n");
+			return -EINVAL;
+		}
+		mfd->no_update.ref_count++;
+		mutex_unlock(&mfd->no_update.lock);
+		ret = wait_for_completion_interruptible_timeout(
+						&mfd->no_update.comp, 4 * HZ);
+		mutex_lock(&mfd->no_update.lock);
+		mfd->no_update.ref_count--;
+		mutex_unlock(&mfd->no_update.lock);
+		to_user = (unsigned int)mfd->no_update.value;
+	} else {
+		if (mdss_fb_is_power_on(mfd)) {
+			reinit_completion(&mfd->power_off_comp);
+			ret = wait_for_completion_interruptible_timeout(
+						&mfd->power_off_comp, 1 * HZ);
+		}
+	}
+
+	if (ret == 0)
+		ret = -ETIMEDOUT;
+	else if (ret > 0)
+		ret = copy_to_user(argp, &to_user, sizeof(unsigned int));
+	return ret;
+}
+
+static int lcd_backlight_registered;
+
+static void mdss_fb_set_bl_brightness(struct led_classdev *led_cdev,
+				      enum led_brightness value)
+{
+	struct msm_fb_data_type *mfd = dev_get_drvdata(led_cdev->dev->parent);
+	int bl_lvl;
+
+	if (mfd->boot_notification_led) {
+		led_trigger_event(mfd->boot_notification_led, 0);
+		mfd->boot_notification_led = NULL;
+	}
+
+	if (value > mfd->panel_info->brightness_max)
+		value = mfd->panel_info->brightness_max;
+
+	/* This maps android backlight level 0 to 255 into
+	 * driver backlight level 0 to bl_max with rounding
+	 */
+	MDSS_BRIGHT_TO_BL(bl_lvl, value, mfd->panel_info->bl_max,
+				mfd->panel_info->brightness_max);
+
+	if (!bl_lvl && value)
+		bl_lvl = 1;
+
+	if (!IS_CALIB_MODE_BL(mfd) && (!mfd->ext_bl_ctrl || !value ||
+							!mfd->bl_level)) {
+		mutex_lock(&mfd->bl_lock);
+		mdss_fb_set_backlight(mfd, bl_lvl);
+		mutex_unlock(&mfd->bl_lock);
+	}
+}
+
+static struct led_classdev backlight_led = {
+	.name           = "lcd-backlight",
+	.brightness     = MDSS_MAX_BL_BRIGHTNESS / 2,
+	.brightness_set = mdss_fb_set_bl_brightness,
+	.max_brightness = MDSS_MAX_BL_BRIGHTNESS,
+};
+
+static ssize_t mdss_fb_get_type(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+
+	switch (mfd->panel.type) {
+	case NO_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "no panel\n");
+		break;
+	case HDMI_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "hdmi panel\n");
+		break;
+	case LVDS_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "lvds panel\n");
+		break;
+	case DTV_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "dtv panel\n");
+		break;
+	case MIPI_VIDEO_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "mipi dsi video panel\n");
+		break;
+	case MIPI_CMD_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "mipi dsi cmd panel\n");
+		break;
+	case WRITEBACK_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "writeback panel\n");
+		break;
+	case EDP_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "edp panel\n");
+		break;
+	default:
+		ret = snprintf(buf, PAGE_SIZE, "unknown panel\n");
+		break;
+	}
+
+	return ret;
+}
+
+static int mdss_fb_get_panel_xres(struct mdss_panel_info *pinfo)
+{
+	struct mdss_panel_data *pdata;
+	int xres;
+
+	pdata = container_of(pinfo, struct mdss_panel_data, panel_info);
+
+	xres = pinfo->xres;
+	if (pdata->next && pdata->next->active)
+		xres += mdss_fb_get_panel_xres(&pdata->next->panel_info);
+
+	return xres;
+}
+
+static inline int mdss_fb_validate_split(int left, int right,
+			struct msm_fb_data_type *mfd)
+{
+	int rc = -EINVAL;
+	u32 panel_xres = mdss_fb_get_panel_xres(mfd->panel_info);
+
+	pr_debug("%pS: split_mode = %d left=%d right=%d panel_xres=%d\n",
+		__builtin_return_address(0), mfd->split_mode,
+		left, right, panel_xres);
+
+	/* more validate condition could be added if needed */
+	if (left && right) {
+		if (panel_xres == left + right) {
+			mfd->split_fb_left = left;
+			mfd->split_fb_right = right;
+			rc = 0;
+		}
+	} else {
+		if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+			mfd->split_fb_left = mfd->panel_info->xres;
+			mfd->split_fb_right = panel_xres - mfd->split_fb_left;
+			rc = 0;
+		} else {
+			mfd->split_fb_left = mfd->split_fb_right = 0;
+		}
+	}
+
+	return rc;
+}
+
+static ssize_t mdss_fb_store_split(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	int data[2] = {0};
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+
+	if (sscanf(buf, "%d %d", &data[0], &data[1]) != 2)
+		pr_debug("Not able to read split values\n");
+	else if (!mdss_fb_validate_split(data[0], data[1], mfd))
+		pr_debug("split left=%d right=%d\n", data[0], data[1]);
+
+	return len;
+}
+
+static ssize_t mdss_fb_show_split(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+
+	ret = snprintf(buf, PAGE_SIZE, "%d %d\n",
+		       mfd->split_fb_left, mfd->split_fb_right);
+	return ret;
+}
+
+static void mdss_fb_get_split(struct msm_fb_data_type *mfd)
+{
+	if ((mfd->split_mode == MDP_SPLIT_MODE_NONE) &&
+	    (mfd->split_fb_left && mfd->split_fb_right))
+		mfd->split_mode = MDP_DUAL_LM_SINGLE_DISPLAY;
+
+	pr_debug("split fb%d left=%d right=%d mode=%d\n", mfd->index,
+		mfd->split_fb_left, mfd->split_fb_right, mfd->split_mode);
+}
+
+static ssize_t mdss_fb_get_src_split_info(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	int ret = 0;
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = fbi->par;
+
+	if (is_split_lm(mfd) && (fbi->var.yres > fbi->var.xres)) {
+		pr_debug("always split mode enabled\n");
+		ret = scnprintf(buf, PAGE_SIZE,
+			"src_split_always\n");
+	}
+
+	return ret;
+}
+
+static ssize_t mdss_fb_get_thermal_level(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = fbi->par;
+	int ret;
+
+	ret = scnprintf(buf, PAGE_SIZE, "thermal_level=%d\n",
+						mfd->thermal_level);
+
+	return ret;
+}
+
+static ssize_t mdss_fb_set_thermal_level(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = fbi->par;
+	int rc = 0;
+	int thermal_level = 0;
+
+	rc = kstrtoint(buf, 10, &thermal_level);
+	if (rc) {
+		pr_err("kstrtoint failed. rc=%d\n", rc);
+		return rc;
+	}
+
+	pr_debug("Thermal level set to %d\n", thermal_level);
+	mfd->thermal_level = thermal_level;
+	sysfs_notify(&mfd->fbi->dev->kobj, NULL, "msm_fb_thermal_level");
+
+	return count;
+}
+
+static ssize_t mdss_mdp_show_blank_event(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	int ret;
+
+	pr_debug("fb%d panel_power_state = %d\n", mfd->index,
+		mfd->panel_power_state);
+	ret = scnprintf(buf, PAGE_SIZE, "panel_power_on = %d\n",
+						mfd->panel_power_state);
+
+	return ret;
+}
+
+static void __mdss_fb_idle_notify_work(struct work_struct *work)
+{
+	struct delayed_work *dw = to_delayed_work(work);
+	struct msm_fb_data_type *mfd = container_of(dw, struct msm_fb_data_type,
+		idle_notify_work);
+
+	/* Notify idle-ness here */
+	pr_debug("Idle timeout %dms expired!\n", mfd->idle_time);
+	if (mfd->idle_time)
+		sysfs_notify(&mfd->fbi->dev->kobj, NULL, "idle_notify");
+	mfd->idle_state = MDSS_FB_IDLE;
+}
+
+
+static ssize_t mdss_fb_get_fps_info(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = fbi->par;
+	unsigned int fps_int, fps_float;
+
+	if (mfd->panel_power_state != MDSS_PANEL_POWER_ON)
+		mfd->fps_info.measured_fps = 0;
+	fps_int = (unsigned int) mfd->fps_info.measured_fps;
+	fps_float = do_div(fps_int, 10);
+	return scnprintf(buf, PAGE_SIZE, "%d.%d\n", fps_int, fps_float);
+
+}
+
+static ssize_t mdss_fb_get_idle_time(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = fbi->par;
+	int ret;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d", mfd->idle_time);
+
+	return ret;
+}
+
+static ssize_t mdss_fb_set_idle_time(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = fbi->par;
+	int rc = 0;
+	int idle_time = 0;
+
+	rc = kstrtoint(buf, 10, &idle_time);
+	if (rc) {
+		pr_err("kstrtoint failed. rc=%d\n", rc);
+		return rc;
+	}
+
+	pr_debug("Idle time = %d\n", idle_time);
+	mfd->idle_time = idle_time;
+
+	return count;
+}
+
+static ssize_t mdss_fb_get_idle_notify(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = fbi->par;
+	int ret;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%s",
+		work_busy(&mfd->idle_notify_work.work) ? "no" : "yes");
+
+	return ret;
+}
+
+static ssize_t mdss_fb_get_panel_info(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = fbi->par;
+	struct mdss_panel_info *pinfo = mfd->panel_info;
+	int ret;
+
+	ret = scnprintf(buf, PAGE_SIZE,
+			"pu_en=%d\nxstart=%d\nwalign=%d\nystart=%d\nhalign=%d\n"
+			"min_w=%d\nmin_h=%d\nroi_merge=%d\ndyn_fps_en=%d\n"
+			"min_fps=%d\nmax_fps=%d\npanel_name=%s\n"
+			"primary_panel=%d\nis_pluggable=%d\ndisplay_id=%s\n"
+			"is_cec_supported=%d\nis_pingpong_split=%d\n"
+			"is_hdr_enabled=%d\n"
+			"peak_brightness=%d\nblackness_level=%d\n"
+			"white_chromaticity_x=%d\nwhite_chromaticity_y=%d\n"
+			"red_chromaticity_x=%d\nred_chromaticity_y=%d\n"
+			"green_chromaticity_x=%d\ngreen_chromaticity_y=%d\n"
+			"blue_chromaticity_x=%d\nblue_chromaticity_y=%d\n",
+			pinfo->partial_update_enabled,
+			pinfo->roi_alignment.xstart_pix_align,
+			pinfo->roi_alignment.width_pix_align,
+			pinfo->roi_alignment.ystart_pix_align,
+			pinfo->roi_alignment.height_pix_align,
+			pinfo->roi_alignment.min_width,
+			pinfo->roi_alignment.min_height,
+			pinfo->partial_update_roi_merge,
+			pinfo->dynamic_fps, pinfo->min_fps, pinfo->max_fps,
+			pinfo->panel_name, pinfo->is_prim_panel,
+			pinfo->is_pluggable, pinfo->display_id,
+			pinfo->is_cec_supported, is_pingpong_split(mfd),
+			pinfo->hdr_properties.hdr_enabled,
+			pinfo->hdr_properties.peak_brightness,
+			pinfo->hdr_properties.blackness_level,
+			pinfo->hdr_properties.display_primaries[0],
+			pinfo->hdr_properties.display_primaries[1],
+			pinfo->hdr_properties.display_primaries[2],
+			pinfo->hdr_properties.display_primaries[3],
+			pinfo->hdr_properties.display_primaries[4],
+			pinfo->hdr_properties.display_primaries[5],
+			pinfo->hdr_properties.display_primaries[6],
+			pinfo->hdr_properties.display_primaries[7]);
+
+	return ret;
+}
+
+static ssize_t mdss_fb_get_panel_status(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = fbi->par;
+	int ret;
+	int panel_status;
+
+	if (mdss_panel_is_power_off(mfd->panel_power_state)) {
+		ret = scnprintf(buf, PAGE_SIZE, "panel_status=%s\n", "suspend");
+	} else {
+		panel_status = mdss_fb_send_panel_event(mfd,
+				MDSS_EVENT_DSI_PANEL_STATUS, NULL);
+		ret = scnprintf(buf, PAGE_SIZE, "panel_status=%s\n",
+			panel_status > 0 ? "alive" : "dead");
+	}
+
+	return ret;
+}
+
+static ssize_t mdss_fb_force_panel_dead(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_panel_data *pdata;
+
+	pdata = dev_get_platdata(&mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("no panel connected!\n");
+		return len;
+	}
+
+	if (kstrtouint(buf, 0, &pdata->panel_info.panel_force_dead))
+		pr_err("kstrtouint buf error!\n");
+
+	return len;
+}
+
+/*
+ * mdss_fb_blanking_mode_switch() - Function triggers dynamic mode switch
+ * @mfd:	Framebuffer data structure for display
+ * @mode:	Enabled/Disable LowPowerMode
+ *		1: Enter into LowPowerMode
+ *		0: Exit from LowPowerMode
+ *
+ * This Function dynamically switches to and from video mode. This
+ * swtich involves the panel turning off backlight during trantision.
+ */
+static int mdss_fb_blanking_mode_switch(struct msm_fb_data_type *mfd, int mode)
+{
+	int ret = 0;
+	u32 bl_lvl = 0;
+	struct mdss_panel_info *pinfo = NULL;
+	struct mdss_panel_data *pdata;
+
+	if (!mfd || !mfd->panel_info)
+		return -EINVAL;
+
+	pinfo = mfd->panel_info;
+
+	if (!pinfo->mipi.dms_mode) {
+		pr_warn("Panel does not support dynamic switch!\n");
+		return 0;
+	}
+
+	if (mode == pinfo->mipi.mode) {
+		pr_debug("Already in requested mode!\n");
+		return 0;
+	}
+	pr_debug("Enter mode: %d\n", mode);
+
+	pdata = dev_get_platdata(&mfd->pdev->dev);
+
+	pdata->panel_info.dynamic_switch_pending = true;
+	ret = mdss_fb_pan_idle(mfd);
+	if (ret) {
+		pr_err("mdss_fb_pan_idle for fb%d failed. ret=%d\n",
+			mfd->index, ret);
+		pdata->panel_info.dynamic_switch_pending = false;
+		return ret;
+	}
+
+	mutex_lock(&mfd->bl_lock);
+	bl_lvl = mfd->bl_level;
+	mdss_fb_set_backlight(mfd, 0);
+	mutex_unlock(&mfd->bl_lock);
+
+	lock_fb_info(mfd->fbi);
+	ret = mdss_fb_blank_sub(FB_BLANK_POWERDOWN, mfd->fbi,
+						mfd->op_enable);
+	if (ret) {
+		pr_err("can't turn off display!\n");
+		unlock_fb_info(mfd->fbi);
+		return ret;
+	}
+
+	mfd->op_enable = false;
+
+	ret = mfd->mdp.configure_panel(mfd, mode, 1);
+	mdss_fb_set_mdp_sync_pt_threshold(mfd, mfd->panel.type);
+
+	mfd->op_enable = true;
+
+	ret = mdss_fb_blank_sub(FB_BLANK_UNBLANK, mfd->fbi,
+					mfd->op_enable);
+	if (ret) {
+		pr_err("can't turn on display!\n");
+		unlock_fb_info(mfd->fbi);
+		return ret;
+	}
+	unlock_fb_info(mfd->fbi);
+
+	mutex_lock(&mfd->bl_lock);
+	mfd->allow_bl_update = true;
+	mdss_fb_set_backlight(mfd, bl_lvl);
+	mutex_unlock(&mfd->bl_lock);
+
+	pdata->panel_info.dynamic_switch_pending = false;
+	pdata->panel_info.is_lpm_mode = mode ? 1 : 0;
+
+	if (ret) {
+		pr_err("can't turn on display!\n");
+		return ret;
+	}
+
+	pr_debug("Exit mode: %d\n", mode);
+
+	return 0;
+}
+
+static ssize_t mdss_fb_change_dfps_mode(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_panel_data *pdata;
+	struct mdss_panel_info *pinfo;
+	u32 dfps_mode;
+
+	pdata = dev_get_platdata(&mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("no panel connected!\n");
+		return len;
+	}
+	pinfo = &pdata->panel_info;
+
+	if (kstrtouint(buf, 0, &dfps_mode)) {
+		pr_err("kstrtouint buf error!\n");
+		return len;
+	}
+
+	if (dfps_mode >= DFPS_MODE_MAX) {
+		pinfo->dynamic_fps = false;
+		return len;
+	}
+
+	if (mfd->idle_time != 0) {
+		pr_err("ERROR: Idle time is not disabled.\n");
+		return len;
+	}
+
+	if (pinfo->current_fps != pinfo->default_fps) {
+		pr_err("ERROR: panel not configured to default fps\n");
+		return len;
+	}
+
+	pinfo->dynamic_fps = true;
+	pinfo->dfps_update = dfps_mode;
+
+	if (pdata->next)
+		pdata->next->panel_info.dfps_update = dfps_mode;
+
+	return len;
+}
+
+static ssize_t mdss_fb_get_dfps_mode(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_panel_data *pdata;
+	struct mdss_panel_info *pinfo;
+	int ret;
+
+	pdata = dev_get_platdata(&mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("no panel connected!\n");
+		return -EINVAL;
+	}
+	pinfo = &pdata->panel_info;
+
+	ret = scnprintf(buf, PAGE_SIZE, "dfps enabled=%d mode=%d\n",
+		pinfo->dynamic_fps, pinfo->dfps_update);
+
+	return ret;
+}
+
+static ssize_t mdss_fb_change_persist_mode(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_panel_info *pinfo = NULL;
+	struct mdss_panel_data *pdata;
+	int ret = 0;
+	u32 persist_mode;
+
+	if (!mfd || !mfd->panel_info) {
+		pr_err("%s: Panel info is NULL!\n", __func__);
+	return len;
+	}
+
+	pinfo = mfd->panel_info;
+
+	if (kstrtouint(buf, 0, &persist_mode)) {
+		pr_err("kstrtouint buf error!\n");
+		return len;
+	}
+
+	mutex_lock(&mfd->mdss_sysfs_lock);
+	if (mdss_panel_is_power_off(mfd->panel_power_state)) {
+		pinfo->persist_mode = persist_mode;
+		goto end;
+	}
+
+	mutex_lock(&mfd->bl_lock);
+
+	pdata = dev_get_platdata(&mfd->pdev->dev);
+	if ((pdata) && (pdata->apply_display_setting))
+		ret = pdata->apply_display_setting(pdata, persist_mode);
+
+	mutex_unlock(&mfd->bl_lock);
+
+	if (!ret) {
+		pr_debug("%s: Persist mode %d\n", __func__, persist_mode);
+		pinfo->persist_mode = persist_mode;
+	}
+
+end:
+	mutex_unlock(&mfd->mdss_sysfs_lock);
+	return len;
+}
+
+static ssize_t mdss_fb_get_persist_mode(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_panel_data *pdata;
+	struct mdss_panel_info *pinfo;
+	int ret;
+
+	pdata = dev_get_platdata(&mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("no panel connected!\n");
+		return -EINVAL;
+	}
+	pinfo = &pdata->panel_info;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", pinfo->persist_mode);
+
+	return ret;
+}
+
+static DEVICE_ATTR(msm_fb_type, 0444, mdss_fb_get_type, NULL);
+static DEVICE_ATTR(msm_fb_split, 0644, mdss_fb_show_split,
+					mdss_fb_store_split);
+static DEVICE_ATTR(show_blank_event, 0444, mdss_mdp_show_blank_event, NULL);
+static DEVICE_ATTR(idle_time, 0644,
+	mdss_fb_get_idle_time, mdss_fb_set_idle_time);
+static DEVICE_ATTR(idle_notify, 0444, mdss_fb_get_idle_notify, NULL);
+static DEVICE_ATTR(msm_fb_panel_info, 0444, mdss_fb_get_panel_info, NULL);
+static DEVICE_ATTR(msm_fb_src_split_info, 0444, mdss_fb_get_src_split_info,
+	NULL);
+static DEVICE_ATTR(msm_fb_thermal_level, 0644,
+	mdss_fb_get_thermal_level, mdss_fb_set_thermal_level);
+static DEVICE_ATTR(msm_fb_panel_status, 0644,
+	mdss_fb_get_panel_status, mdss_fb_force_panel_dead);
+static DEVICE_ATTR(msm_fb_dfps_mode, 0644,
+	mdss_fb_get_dfps_mode, mdss_fb_change_dfps_mode);
+static DEVICE_ATTR(measured_fps, 0664,
+	mdss_fb_get_fps_info, NULL);
+static DEVICE_ATTR(msm_fb_persist_mode, 0644,
+	mdss_fb_get_persist_mode, mdss_fb_change_persist_mode);
+static struct attribute *mdss_fb_attrs[] = {
+	&dev_attr_msm_fb_type.attr,
+	&dev_attr_msm_fb_split.attr,
+	&dev_attr_show_blank_event.attr,
+	&dev_attr_idle_time.attr,
+	&dev_attr_idle_notify.attr,
+	&dev_attr_msm_fb_panel_info.attr,
+	&dev_attr_msm_fb_src_split_info.attr,
+	&dev_attr_msm_fb_thermal_level.attr,
+	&dev_attr_msm_fb_panel_status.attr,
+	&dev_attr_msm_fb_dfps_mode.attr,
+	&dev_attr_measured_fps.attr,
+	&dev_attr_msm_fb_persist_mode.attr,
+	NULL,
+};
+
+static struct attribute_group mdss_fb_attr_group = {
+	.attrs = mdss_fb_attrs,
+};
+
+static int mdss_fb_create_sysfs(struct msm_fb_data_type *mfd)
+{
+	int rc;
+
+	rc = sysfs_create_group(&mfd->fbi->dev->kobj, &mdss_fb_attr_group);
+	if (rc)
+		pr_err("sysfs group creation failed, rc=%d\n", rc);
+	return rc;
+}
+
+static void mdss_fb_remove_sysfs(struct msm_fb_data_type *mfd)
+{
+	sysfs_remove_group(&mfd->fbi->dev->kobj, &mdss_fb_attr_group);
+}
+
+static void mdss_fb_shutdown(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return;
+
+	mfd->shutdown_pending = true;
+
+	/* wake up threads waiting on idle or kickoff queues */
+	wake_up_all(&mfd->idle_wait_q);
+	wake_up_all(&mfd->kickoff_wait_q);
+
+	lock_fb_info(mfd->fbi);
+	mdss_fb_release_all(mfd->fbi, true);
+	sysfs_notify(&mfd->fbi->dev->kobj, NULL, "show_blank_event");
+	unlock_fb_info(mfd->fbi);
+}
+
+static void mdss_fb_input_event_handler(struct input_handle *handle,
+				    unsigned int type,
+				    unsigned int code,
+				    int value)
+{
+	struct msm_fb_data_type *mfd = handle->handler->private;
+	int rc;
+
+	if ((type != EV_ABS) || !mdss_fb_is_power_on(mfd))
+		return;
+
+	if (mfd->mdp.input_event_handler) {
+		rc = mfd->mdp.input_event_handler(mfd);
+		if (rc)
+			pr_err("mdp input event handler failed\n");
+	}
+}
+
+static int mdss_fb_input_connect(struct input_handler *handler,
+			     struct input_dev *dev,
+			     const struct input_device_id *id)
+{
+	int rc;
+	struct input_handle *handle;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = handler->name;
+
+	rc = input_register_handle(handle);
+	if (rc) {
+		pr_err("failed to register input handle, rc = %d\n", rc);
+		goto error;
+	}
+
+	rc = input_open_device(handle);
+	if (rc) {
+		pr_err("failed to open input device, rc = %d\n", rc);
+		goto error_unregister;
+	}
+
+	return 0;
+
+error_unregister:
+	input_unregister_handle(handle);
+error:
+	kfree(handle);
+	return rc;
+}
+
+static void mdss_fb_input_disconnect(struct input_handle *handle)
+{
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	kfree(handle);
+}
+
+/*
+ * Structure for specifying event parameters on which to receive callbacks.
+ * This structure will trigger a callback in case of a touch event (specified by
+ * EV_ABS) where there is a change in X and Y coordinates,
+ */
+static const struct input_device_id mdss_fb_input_ids[] = {
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+		.evbit = { BIT_MASK(EV_ABS) },
+		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+				BIT_MASK(ABS_MT_POSITION_X) |
+				BIT_MASK(ABS_MT_POSITION_Y) },
+	},
+	{ },
+};
+
+static int mdss_fb_register_input_handler(struct msm_fb_data_type *mfd)
+{
+	int rc;
+	struct input_handler *handler;
+
+	if (mfd->input_handler)
+		return -EINVAL;
+
+	handler = kzalloc(sizeof(*handler), GFP_KERNEL);
+	if (!handler)
+		return -ENOMEM;
+
+	handler->event = mdss_fb_input_event_handler;
+	handler->connect = mdss_fb_input_connect;
+	handler->disconnect = mdss_fb_input_disconnect,
+	handler->name = "mdss_fb",
+	handler->id_table = mdss_fb_input_ids;
+	handler->private = mfd;
+
+	rc = input_register_handler(handler);
+	if (rc) {
+		pr_err("Unable to register the input handler\n");
+		kfree(handler);
+	} else {
+		mfd->input_handler = handler;
+	}
+
+	return rc;
+}
+
+static void mdss_fb_unregister_input_handler(struct msm_fb_data_type *mfd)
+{
+	if (!mfd->input_handler)
+		return;
+
+	input_unregister_handler(mfd->input_handler);
+	kfree(mfd->input_handler);
+}
+
+static void mdss_fb_videomode_from_panel_timing(struct fb_videomode *videomode,
+		struct mdss_panel_timing *pt)
+{
+	videomode->name = pt->name;
+	videomode->xres = pt->xres;
+	videomode->yres = pt->yres;
+	videomode->left_margin = pt->h_back_porch;
+	videomode->right_margin = pt->h_front_porch;
+	videomode->hsync_len = pt->h_pulse_width;
+	videomode->upper_margin = pt->v_back_porch;
+	videomode->lower_margin = pt->v_front_porch;
+	videomode->vsync_len = pt->v_pulse_width;
+	videomode->refresh = pt->frame_rate;
+	videomode->flag = 0;
+	videomode->vmode = 0;
+	videomode->sync = 0;
+
+	if (videomode->refresh) {
+		unsigned long clk_rate, h_total, v_total;
+
+		h_total = videomode->xres + videomode->left_margin
+			+ videomode->right_margin + videomode->hsync_len;
+		v_total = videomode->yres + videomode->lower_margin
+			+ videomode->upper_margin + videomode->vsync_len;
+		clk_rate = h_total * v_total * videomode->refresh;
+		videomode->pixclock =
+			KHZ2PICOS(clk_rate / 1000);
+	} else {
+		videomode->pixclock =
+			KHZ2PICOS((unsigned long)pt->clk_rate / 1000);
+	}
+}
+
+static void mdss_fb_set_split_mode(struct msm_fb_data_type *mfd,
+		struct mdss_panel_data *pdata)
+{
+	if (pdata->panel_info.is_split_display) {
+		struct mdss_panel_data *pnext = pdata->next;
+
+		mfd->split_fb_left = pdata->panel_info.lm_widths[0];
+		if (pnext)
+			mfd->split_fb_right = pnext->panel_info.lm_widths[0];
+
+		if (pdata->panel_info.use_pingpong_split)
+			mfd->split_mode = MDP_PINGPONG_SPLIT;
+		else
+			mfd->split_mode = MDP_DUAL_LM_DUAL_DISPLAY;
+	} else if ((pdata->panel_info.lm_widths[0] != 0)
+			&& (pdata->panel_info.lm_widths[1] != 0)) {
+		mfd->split_fb_left = pdata->panel_info.lm_widths[0];
+		mfd->split_fb_right = pdata->panel_info.lm_widths[1];
+		mfd->split_mode = MDP_DUAL_LM_SINGLE_DISPLAY;
+	} else {
+		mfd->split_mode = MDP_SPLIT_MODE_NONE;
+	}
+}
+
+static int mdss_fb_init_panel_modes(struct msm_fb_data_type *mfd,
+		struct mdss_panel_data *pdata)
+{
+	struct fb_info *fbi = mfd->fbi;
+	struct fb_videomode *modedb;
+	struct mdss_panel_timing *pt;
+	struct list_head *pos;
+	int num_timings = 0;
+	int i = 0;
+
+	/* check if multiple modes are supported */
+	if (!pdata->timings_list.prev || !pdata->timings_list.next)
+		INIT_LIST_HEAD(&pdata->timings_list);
+
+	if (!fbi || !pdata->current_timing || list_empty(&pdata->timings_list))
+		return 0;
+
+	list_for_each(pos, &pdata->timings_list)
+		num_timings++;
+
+	modedb = devm_kzalloc(fbi->dev, num_timings * sizeof(*modedb),
+			GFP_KERNEL);
+	if (!modedb)
+		return -ENOMEM;
+
+	list_for_each_entry(pt, &pdata->timings_list, list) {
+		struct mdss_panel_timing *spt = NULL;
+
+		mdss_fb_videomode_from_panel_timing(modedb + i, pt);
+		if (pdata->next) {
+			spt = mdss_panel_get_timing_by_name(pdata->next,
+					modedb[i].name);
+			if (!IS_ERR_OR_NULL(spt))
+				modedb[i].xres += spt->xres;
+			else
+				pr_debug("no matching split config for %s\n",
+						modedb[i].name);
+
+			/*
+			 * if no panel timing found for current, need to
+			 * disable it otherwise mark it as active
+			 */
+			if (pt == pdata->current_timing)
+				pdata->next->active = !IS_ERR_OR_NULL(spt);
+		}
+
+		if (pt == pdata->current_timing) {
+			pr_debug("found current mode: %s\n", pt->name);
+			fbi->mode = modedb + i;
+		}
+		i++;
+	}
+
+	fbi->monspecs.modedb = modedb;
+	fbi->monspecs.modedb_len = num_timings;
+
+	/* destroy and recreate modelist */
+	fb_destroy_modelist(&fbi->modelist);
+
+	if (fbi->mode)
+		fb_videomode_to_var(&fbi->var, fbi->mode);
+	fb_videomode_to_modelist(modedb, num_timings, &fbi->modelist);
+
+	return 0;
+}
+
+static int mdss_fb_probe(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd = NULL;
+	struct mdss_panel_data *pdata;
+	struct fb_info *fbi;
+	int rc;
+
+	if (fbi_list_index >= MAX_FBI_LIST)
+		return -ENOMEM;
+
+	pdata = dev_get_platdata(&pdev->dev);
+	if (!pdata)
+		return -EPROBE_DEFER;
+
+	if (!mdp_instance) {
+		pr_err("mdss mdp resource not initialized yet\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * alloc framebuffer info + par data
+	 */
+	fbi = framebuffer_alloc(sizeof(struct msm_fb_data_type), NULL);
+	if (fbi == NULL) {
+		pr_err("can't allocate framebuffer info data!\n");
+		return -ENOMEM;
+	}
+
+	mfd = (struct msm_fb_data_type *)fbi->par;
+	mfd->key = MFD_KEY;
+	mfd->fbi = fbi;
+	mfd->panel_info = &pdata->panel_info;
+	mfd->panel.type = pdata->panel_info.type;
+	mfd->panel.id = mfd->index;
+	mfd->fb_page = MDSS_FB_NUM;
+	mfd->index = fbi_list_index;
+	mfd->mdp_fb_page_protection = MDP_FB_PAGE_PROTECTION_WRITECOMBINE;
+
+	mfd->ext_ad_ctrl = -1;
+	if (mfd->panel_info && mfd->panel_info->brightness_max > 0)
+		MDSS_BRIGHT_TO_BL(mfd->bl_level, backlight_led.brightness,
+		mfd->panel_info->bl_max, mfd->panel_info->brightness_max);
+	else
+		mfd->bl_level = 0;
+
+	mfd->bl_scale = 1024;
+	mfd->bl_min_lvl = 30;
+	mfd->ad_bl_level = 0;
+	mfd->fb_imgType = MDP_RGBA_8888;
+	mfd->calib_mode_bl = 0;
+	mfd->unset_bl_level = U32_MAX;
+
+	mfd->pdev = pdev;
+
+	mfd->split_fb_left = mfd->split_fb_right = 0;
+
+	mdss_fb_set_split_mode(mfd, pdata);
+	pr_info("fb%d: split_mode:%d left:%d right:%d\n", mfd->index,
+		mfd->split_mode, mfd->split_fb_left, mfd->split_fb_right);
+
+	mfd->mdp = *mdp_instance;
+
+	rc = of_property_read_bool(pdev->dev.of_node,
+		"qcom,boot-indication-enabled");
+
+	if (rc) {
+		led_trigger_register_simple("boot-indication",
+			&(mfd->boot_notification_led));
+	}
+
+	INIT_LIST_HEAD(&mfd->file_list);
+
+	mutex_init(&mfd->bl_lock);
+	mutex_init(&mfd->mdss_sysfs_lock);
+	mutex_init(&mfd->switch_lock);
+
+	fbi_list[fbi_list_index++] = fbi;
+
+	platform_set_drvdata(pdev, mfd);
+
+	rc = mdss_fb_register(mfd);
+	if (rc)
+		return rc;
+
+	mdss_fb_create_sysfs(mfd);
+	mdss_fb_send_panel_event(mfd, MDSS_EVENT_FB_REGISTERED, fbi);
+
+	if (mfd->mdp.init_fnc) {
+		rc = mfd->mdp.init_fnc(mfd);
+		if (rc) {
+			pr_err("init_fnc failed\n");
+			return rc;
+		}
+	}
+	mdss_fb_init_fps_info(mfd);
+
+	rc = pm_runtime_set_active(mfd->fbi->dev);
+	if (rc < 0)
+		pr_err("pm_runtime: fail to set active.\n");
+	pm_runtime_enable(mfd->fbi->dev);
+
+	/* android supports only one lcd-backlight/lcd for now */
+	if (!lcd_backlight_registered) {
+		backlight_led.brightness = mfd->panel_info->brightness_max;
+		backlight_led.max_brightness = mfd->panel_info->brightness_max;
+		if (led_classdev_register(&pdev->dev, &backlight_led))
+			pr_err("led_classdev_register failed\n");
+		else
+			lcd_backlight_registered = 1;
+	}
+
+	mdss_fb_init_panel_modes(mfd, pdata);
+
+	mfd->mdp_sync_pt_data.fence_name = "mdp-fence";
+	if (mfd->mdp_sync_pt_data.timeline == NULL) {
+		char timeline_name[16];
+
+		snprintf(timeline_name, sizeof(timeline_name),
+			"mdss_fb_%d", mfd->index);
+		 mfd->mdp_sync_pt_data.timeline =
+				sw_sync_timeline_create(timeline_name);
+		if (mfd->mdp_sync_pt_data.timeline == NULL) {
+			pr_err("cannot create release fence time line\n");
+			return -ENOMEM;
+		}
+		mfd->mdp_sync_pt_data.notifier.notifier_call =
+			__mdss_fb_sync_buf_done_callback;
+	}
+
+	mdss_fb_set_mdp_sync_pt_threshold(mfd, mfd->panel.type);
+
+	if (mfd->mdp.splash_init_fnc)
+		mfd->mdp.splash_init_fnc(mfd);
+
+	/*
+	 * Register with input driver for a callback for command mode panels.
+	 * When there is an input event, mdp clocks will be turned on to reduce
+	 * latency when a frame update happens.
+	 * For video mode panels, idle timeout will be delayed so that userspace
+	 * does not get an idle event while new frames are expected. In case of
+	 * an idle event, user space tries to fall back to GPU composition which
+	 * can lead to increased load when there are new frames.
+	 */
+	if (mfd->mdp.input_event_handler &&
+		((mfd->panel_info->type == MIPI_CMD_PANEL) ||
+		(mfd->panel_info->type == MIPI_VIDEO_PANEL)))
+		if (mdss_fb_register_input_handler(mfd))
+			pr_err("failed to register input handler\n");
+
+	INIT_DELAYED_WORK(&mfd->idle_notify_work, __mdss_fb_idle_notify_work);
+
+	return rc;
+}
+
+static void mdss_fb_set_mdp_sync_pt_threshold(struct msm_fb_data_type *mfd,
+		int type)
+{
+	if (!mfd)
+		return;
+
+	switch (type) {
+	case WRITEBACK_PANEL:
+		mfd->mdp_sync_pt_data.threshold = 1;
+		mfd->mdp_sync_pt_data.retire_threshold = 0;
+		break;
+	case MIPI_CMD_PANEL:
+		mfd->mdp_sync_pt_data.threshold = 1;
+		mfd->mdp_sync_pt_data.retire_threshold = 1;
+		break;
+	default:
+		mfd->mdp_sync_pt_data.threshold = 2;
+		mfd->mdp_sync_pt_data.retire_threshold = 0;
+		break;
+	}
+}
+
+static int mdss_fb_remove(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	mdss_fb_remove_sysfs(mfd);
+
+	pm_runtime_disable(mfd->fbi->dev);
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	mdss_fb_unregister_input_handler(mfd);
+	mdss_panel_debugfs_cleanup(mfd->panel_info);
+
+	if (mdss_fb_suspend_sub(mfd))
+		pr_err("msm_fb_remove: can't stop the device %d\n",
+			    mfd->index);
+
+	/* remove /dev/fb* */
+	unregister_framebuffer(mfd->fbi);
+
+	if (lcd_backlight_registered) {
+		lcd_backlight_registered = 0;
+		led_classdev_unregister(&backlight_led);
+	}
+
+	return 0;
+}
+
+static int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd,
+					int event, void *arg)
+{
+	int ret = 0;
+	struct mdss_panel_data *pdata;
+
+	pdata = dev_get_platdata(&mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("no panel connected\n");
+		return -ENODEV;
+	}
+
+	pr_debug("sending event=%d for fb%d\n", event, mfd->index);
+
+	do {
+		if (pdata->event_handler)
+			ret = pdata->event_handler(pdata, event, arg);
+
+		pdata = pdata->next;
+	} while (!ret && pdata);
+
+	return ret;
+}
+
+static int mdss_fb_suspend_sub(struct msm_fb_data_type *mfd)
+{
+	int ret = 0;
+
+	if ((!mfd) || (mfd->key != MFD_KEY))
+		return 0;
+
+	pr_debug("mdss_fb suspend index=%d\n", mfd->index);
+
+	ret = mdss_fb_pan_idle(mfd);
+	if (ret) {
+		pr_warn("mdss_fb_pan_idle for fb%d failed. ret=%d\n",
+			mfd->index, ret);
+		goto exit;
+	}
+
+	ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_SUSPEND, NULL);
+	if (ret) {
+		pr_warn("unable to suspend fb%d (%d)\n", mfd->index, ret);
+		goto exit;
+	}
+
+	mfd->suspend.op_enable = mfd->op_enable;
+	mfd->suspend.panel_power_state = mfd->panel_power_state;
+
+	if (mfd->op_enable) {
+		/*
+		 * Ideally, display should have either been blanked by now, or
+		 * should have transitioned to a low power state. If not, then
+		 * as a fall back option, enter ulp state to leave the display
+		 * on, but turn off all interface clocks.
+		 */
+		if (mdss_fb_is_power_on(mfd)) {
+			ret = mdss_fb_blank_sub(BLANK_FLAG_ULP, mfd->fbi,
+					mfd->suspend.op_enable);
+			if (ret) {
+				pr_err("can't turn off display!\n");
+				goto exit;
+			}
+		}
+		mfd->op_enable = false;
+		fb_set_suspend(mfd->fbi, FBINFO_STATE_SUSPENDED);
+	}
+exit:
+	return ret;
+}
+
+static int mdss_fb_resume_sub(struct msm_fb_data_type *mfd)
+{
+	int ret = 0;
+
+	if ((!mfd) || (mfd->key != MFD_KEY))
+		return 0;
+
+	reinit_completion(&mfd->power_set_comp);
+	mfd->is_power_setting = true;
+	pr_debug("mdss_fb resume index=%d\n", mfd->index);
+
+	ret = mdss_fb_pan_idle(mfd);
+	if (ret) {
+		pr_warn("mdss_fb_pan_idle for fb%d failed. ret=%d\n",
+			mfd->index, ret);
+		return ret;
+	}
+
+	ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_RESUME, NULL);
+	if (ret) {
+		pr_warn("unable to resume fb%d (%d)\n", mfd->index, ret);
+		return ret;
+	}
+
+	/* resume state var recover */
+	mfd->op_enable = mfd->suspend.op_enable;
+
+	/*
+	 * If the fb was explicitly blanked or transitioned to ulp during
+	 * suspend, then undo it during resume with the appropriate unblank
+	 * flag. If fb was in ulp state when entering suspend, then nothing
+	 * needs to be done.
+	 */
+	if (mdss_panel_is_power_on(mfd->suspend.panel_power_state) &&
+		!mdss_panel_is_power_on_ulp(mfd->suspend.panel_power_state)) {
+		int unblank_flag = mdss_panel_is_power_on_interactive(
+			mfd->suspend.panel_power_state) ? FB_BLANK_UNBLANK :
+			BLANK_FLAG_LP;
+
+		ret = mdss_fb_blank_sub(unblank_flag, mfd->fbi, mfd->op_enable);
+		if (ret)
+			pr_warn("can't turn on display!\n");
+		else
+			fb_set_suspend(mfd->fbi, FBINFO_STATE_RUNNING);
+	}
+	mfd->is_power_setting = false;
+	complete_all(&mfd->power_set_comp);
+
+	return ret;
+}
+
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+static int mdss_fb_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	dev_dbg(&pdev->dev, "display suspend\n");
+
+	return mdss_fb_suspend_sub(mfd);
+}
+
+static int mdss_fb_resume(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	dev_dbg(&pdev->dev, "display resume\n");
+
+	return mdss_fb_resume_sub(mfd);
+}
+#else
+#define mdss_fb_suspend NULL
+#define mdss_fb_resume NULL
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int mdss_fb_pm_suspend(struct device *dev)
+{
+	struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	dev_dbg(dev, "display pm suspend\n");
+
+	return mdss_fb_suspend_sub(mfd);
+}
+
+static int mdss_fb_pm_resume(struct device *dev)
+{
+	struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	dev_dbg(dev, "display pm resume\n");
+
+	/*
+	 * It is possible that the runtime status of the fb device may
+	 * have been active when the system was suspended. Reset the runtime
+	 * status to suspended state after a complete system resume.
+	 */
+	pm_runtime_disable(dev);
+	pm_runtime_set_suspended(dev);
+	pm_runtime_enable(dev);
+
+	return mdss_fb_resume_sub(mfd);
+}
+#endif
+
+static const struct dev_pm_ops mdss_fb_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(mdss_fb_pm_suspend, mdss_fb_pm_resume)
+};
+
+static const struct of_device_id mdss_fb_dt_match[] = {
+	{ .compatible = "qcom,mdss-fb",},
+	{}
+};
+EXPORT_COMPAT("qcom,mdss-fb");
+
+static struct platform_driver mdss_fb_driver = {
+	.probe = mdss_fb_probe,
+	.remove = mdss_fb_remove,
+	.suspend = mdss_fb_suspend,
+	.resume = mdss_fb_resume,
+	.shutdown = mdss_fb_shutdown,
+	.driver = {
+		.name = "mdss_fb",
+		.of_match_table = mdss_fb_dt_match,
+		.pm = &mdss_fb_pm_ops,
+	},
+};
+
+static void mdss_fb_scale_bl(struct msm_fb_data_type *mfd, u32 *bl_lvl)
+{
+	u32 temp = *bl_lvl;
+
+	pr_debug("input = %d, scale = %d\n", temp, mfd->bl_scale);
+	if (temp >= mfd->bl_min_lvl) {
+		if (temp > mfd->panel_info->bl_max) {
+			pr_warn("%s: invalid bl level\n",
+				__func__);
+			temp = mfd->panel_info->bl_max;
+		}
+		if (mfd->bl_scale > 1024) {
+			pr_warn("%s: invalid bl scale\n",
+				__func__);
+			mfd->bl_scale = 1024;
+		}
+		/*
+		 * bl_scale is the numerator of
+		 * scaling fraction (x/1024)
+		 */
+		temp = (temp * mfd->bl_scale) / 1024;
+
+		/*if less than minimum level, use min level*/
+		if (temp < mfd->bl_min_lvl)
+			temp = mfd->bl_min_lvl;
+	}
+	pr_debug("output = %d\n", temp);
+
+	(*bl_lvl) = temp;
+}
+
+/* must call this function from within mfd->bl_lock */
+void mdss_fb_set_backlight(struct msm_fb_data_type *mfd, u32 bkl_lvl)
+{
+	struct mdss_panel_data *pdata;
+	u32 temp = bkl_lvl;
+	bool ad_bl_notify_needed = false;
+	bool bl_notify_needed = false;
+
+	if ((((mdss_fb_is_power_off(mfd) && mfd->dcm_state != DCM_ENTER)
+		|| !mfd->allow_bl_update) && !IS_CALIB_MODE_BL(mfd)) ||
+		mfd->panel_info->cont_splash_enabled) {
+		mfd->unset_bl_level = bkl_lvl;
+		return;
+	} else if (mdss_fb_is_power_on(mfd) && mfd->panel_info->panel_dead) {
+		mfd->unset_bl_level = mfd->bl_level;
+	} else {
+		mfd->unset_bl_level = U32_MAX;
+	}
+
+	pdata = dev_get_platdata(&mfd->pdev->dev);
+
+	if ((pdata) && (pdata->set_backlight)) {
+		if (mfd->mdp.ad_calc_bl)
+			(*mfd->mdp.ad_calc_bl)(mfd, temp, &temp,
+							&ad_bl_notify_needed);
+		if (!IS_CALIB_MODE_BL(mfd))
+			mdss_fb_scale_bl(mfd, &temp);
+		/*
+		 * Even though backlight has been scaled, want to show that
+		 * backlight has been set to bkl_lvl to those that read from
+		 * sysfs node. Thus, need to set bl_level even if it appears
+		 * the backlight has already been set to the level it is at,
+		 * as well as setting bl_level to bkl_lvl even though the
+		 * backlight has been set to the scaled value.
+		 */
+		if (mfd->bl_level_scaled == temp) {
+			mfd->bl_level = bkl_lvl;
+		} else {
+			if (mfd->bl_level != bkl_lvl)
+				bl_notify_needed = true;
+			pr_debug("backlight sent to panel :%d\n", temp);
+			pdata->set_backlight(pdata, temp);
+			mfd->bl_level = bkl_lvl;
+			mfd->bl_level_scaled = temp;
+		}
+		if (ad_bl_notify_needed)
+			mdss_fb_bl_update_notify(mfd,
+				NOTIFY_TYPE_BL_AD_ATTEN_UPDATE);
+		if (bl_notify_needed)
+			mdss_fb_bl_update_notify(mfd,
+				NOTIFY_TYPE_BL_UPDATE);
+	}
+}
+
+void mdss_fb_update_backlight(struct msm_fb_data_type *mfd)
+{
+	struct mdss_panel_data *pdata;
+	u32 temp;
+	bool bl_notify = false;
+
+	if (mfd->unset_bl_level == U32_MAX)
+		return;
+	mutex_lock(&mfd->bl_lock);
+	if (!mfd->allow_bl_update) {
+		pdata = dev_get_platdata(&mfd->pdev->dev);
+		if ((pdata) && (pdata->set_backlight)) {
+			mfd->bl_level = mfd->unset_bl_level;
+			temp = mfd->bl_level;
+			if (mfd->mdp.ad_calc_bl)
+				(*mfd->mdp.ad_calc_bl)(mfd, temp, &temp,
+								&bl_notify);
+			if (bl_notify)
+				mdss_fb_bl_update_notify(mfd,
+					NOTIFY_TYPE_BL_AD_ATTEN_UPDATE);
+			mdss_fb_bl_update_notify(mfd, NOTIFY_TYPE_BL_UPDATE);
+			pdata->set_backlight(pdata, temp);
+			mfd->bl_level_scaled = mfd->unset_bl_level;
+			mfd->allow_bl_update = true;
+		}
+	}
+	mutex_unlock(&mfd->bl_lock);
+}
+
+static int mdss_fb_start_disp_thread(struct msm_fb_data_type *mfd)
+{
+	int ret = 0;
+
+	pr_debug("%pS: start display thread fb%d\n",
+		__builtin_return_address(0), mfd->index);
+
+	/* this is needed for new split request from debugfs */
+	mdss_fb_get_split(mfd);
+
+	atomic_set(&mfd->commits_pending, 0);
+	mfd->disp_thread = kthread_run(__mdss_fb_display_thread,
+				mfd, "mdss_fb%d", mfd->index);
+
+	if (IS_ERR(mfd->disp_thread)) {
+		pr_err("ERROR: unable to start display thread %d\n",
+				mfd->index);
+		ret = PTR_ERR(mfd->disp_thread);
+		mfd->disp_thread = NULL;
+	}
+
+	return ret;
+}
+
+static void mdss_fb_stop_disp_thread(struct msm_fb_data_type *mfd)
+{
+	pr_debug("%pS: stop display thread fb%d\n",
+		__builtin_return_address(0), mfd->index);
+
+	kthread_stop(mfd->disp_thread);
+	mfd->disp_thread = NULL;
+}
+
+static void mdss_panel_validate_debugfs_info(struct msm_fb_data_type *mfd)
+{
+	struct mdss_panel_info *panel_info = mfd->panel_info;
+	struct fb_info *fbi = mfd->fbi;
+	struct fb_var_screeninfo *var = &fbi->var;
+	struct mdss_panel_data *pdata = container_of(panel_info,
+				struct mdss_panel_data, panel_info);
+
+	if (panel_info->debugfs_info->override_flag) {
+		if (mfd->mdp.off_fnc) {
+			mfd->panel_reconfig = true;
+			mfd->mdp.off_fnc(mfd);
+			mfd->panel_reconfig = false;
+		}
+
+		pr_debug("Overriding panel_info with debugfs_info\n");
+		panel_info->debugfs_info->override_flag = 0;
+		mdss_panel_debugfsinfo_to_panelinfo(panel_info);
+		if (is_panel_split(mfd) && pdata->next)
+			mdss_fb_validate_split(pdata->panel_info.xres,
+					pdata->next->panel_info.xres, mfd);
+		mdss_panelinfo_to_fb_var(panel_info, var);
+		if (mdss_fb_send_panel_event(mfd, MDSS_EVENT_CHECK_PARAMS,
+							panel_info))
+			pr_err("Failed to send panel event CHECK_PARAMS\n");
+	}
+}
+
+static int mdss_fb_blank_blank(struct msm_fb_data_type *mfd,
+	int req_power_state)
+{
+	int ret = 0;
+	int cur_power_state, current_bl;
+
+	if (!mfd)
+		return -EINVAL;
+
+	if (!mdss_fb_is_power_on(mfd) || !mfd->mdp.off_fnc)
+		return 0;
+
+	cur_power_state = mfd->panel_power_state;
+
+	pr_debug("Transitioning from %d --> %d\n", cur_power_state,
+		req_power_state);
+
+	if (cur_power_state == req_power_state) {
+		pr_debug("No change in power state\n");
+		return 0;
+	}
+
+	mutex_lock(&mfd->update.lock);
+	mfd->update.type = NOTIFY_TYPE_SUSPEND;
+	mfd->update.is_suspend = 1;
+	mutex_unlock(&mfd->update.lock);
+	complete(&mfd->update.comp);
+	del_timer(&mfd->no_update.timer);
+	mfd->no_update.value = NOTIFY_TYPE_SUSPEND;
+	complete(&mfd->no_update.comp);
+
+	mfd->op_enable = false;
+	if (mdss_panel_is_power_off(req_power_state)) {
+		/* Stop Display thread */
+		if (mfd->disp_thread)
+			mdss_fb_stop_disp_thread(mfd);
+		mutex_lock(&mfd->bl_lock);
+		current_bl = mfd->bl_level;
+		mfd->allow_bl_update = true;
+		mdss_fb_set_backlight(mfd, 0);
+		mfd->allow_bl_update = false;
+		mfd->unset_bl_level = current_bl;
+		mutex_unlock(&mfd->bl_lock);
+	}
+	mfd->panel_power_state = req_power_state;
+
+	ret = mfd->mdp.off_fnc(mfd);
+	if (ret)
+		mfd->panel_power_state = cur_power_state;
+	else if (mdss_panel_is_power_off(req_power_state))
+		mdss_fb_release_fences(mfd);
+	mfd->op_enable = true;
+	complete(&mfd->power_off_comp);
+
+	return ret;
+}
+
+static int mdss_fb_blank_unblank(struct msm_fb_data_type *mfd)
+{
+	int ret = 0;
+	int cur_power_state;
+
+	if (!mfd)
+		return -EINVAL;
+
+	if (mfd->panel_info->debugfs_info)
+		mdss_panel_validate_debugfs_info(mfd);
+
+	/* Start Display thread */
+	if (mfd->disp_thread == NULL) {
+		ret = mdss_fb_start_disp_thread(mfd);
+		if (IS_ERR_VALUE(ret))
+			return ret;
+	}
+
+	cur_power_state = mfd->panel_power_state;
+	pr_debug("Transitioning from %d --> %d\n", cur_power_state,
+		MDSS_PANEL_POWER_ON);
+
+	if (mdss_panel_is_power_on_interactive(cur_power_state)) {
+		pr_debug("No change in power state\n");
+		return 0;
+	}
+
+	if (mfd->mdp.on_fnc) {
+		struct mdss_panel_info *panel_info = mfd->panel_info;
+		struct fb_var_screeninfo *var = &mfd->fbi->var;
+
+		ret = mfd->mdp.on_fnc(mfd);
+		if (ret) {
+			mdss_fb_stop_disp_thread(mfd);
+			goto error;
+		}
+
+		mfd->panel_power_state = MDSS_PANEL_POWER_ON;
+		mfd->panel_info->panel_dead = false;
+		mutex_lock(&mfd->update.lock);
+		mfd->update.type = NOTIFY_TYPE_UPDATE;
+		mfd->update.is_suspend = 0;
+		mutex_unlock(&mfd->update.lock);
+
+		/*
+		 * Panel info can change depending in the information
+		 * programmed in the controller.
+		 * Update this info in the upstream structs.
+		 */
+		mdss_panelinfo_to_fb_var(panel_info, var);
+
+		/* Start the work thread to signal idle time */
+		if (mfd->idle_time)
+			schedule_delayed_work(&mfd->idle_notify_work,
+				msecs_to_jiffies(mfd->idle_time));
+	}
+
+	/* Reset the backlight only if the panel was off */
+	if (mdss_panel_is_power_off(cur_power_state)) {
+		mutex_lock(&mfd->bl_lock);
+		if (!mfd->allow_bl_update) {
+			mfd->allow_bl_update = true;
+			/*
+			 * If in AD calibration mode then frameworks would not
+			 * be allowed to update backlight hence post unblank
+			 * the backlight would remain 0 (0 is set in blank).
+			 * Hence resetting back to calibration mode value
+			 */
+			if (IS_CALIB_MODE_BL(mfd))
+				mdss_fb_set_backlight(mfd, mfd->calib_mode_bl);
+			else if ((!mfd->panel_info->mipi.post_init_delay) &&
+				(mfd->unset_bl_level != U32_MAX))
+				mdss_fb_set_backlight(mfd, mfd->unset_bl_level);
+
+			/*
+			 * it blocks the backlight update between unblank and
+			 * first kickoff to avoid backlight turn on before black
+			 * frame is transferred to panel through unblank call.
+			 */
+			mfd->allow_bl_update = false;
+		}
+		mutex_unlock(&mfd->bl_lock);
+	}
+
+error:
+	return ret;
+}
+
+static int mdss_fb_blank_sub(int blank_mode, struct fb_info *info,
+			     int op_enable)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	int ret = 0;
+	int cur_power_state, req_power_state = MDSS_PANEL_POWER_OFF;
+	char trace_buffer[32];
+
+	if (!mfd || !op_enable)
+		return -EPERM;
+
+	if (mfd->dcm_state == DCM_ENTER)
+		return -EPERM;
+
+	pr_debug("%pS mode:%d\n", __builtin_return_address(0),
+		blank_mode);
+
+	snprintf(trace_buffer, sizeof(trace_buffer), "fb%d blank %d",
+		mfd->index, blank_mode);
+	ATRACE_BEGIN(trace_buffer);
+
+	cur_power_state = mfd->panel_power_state;
+
+	/*
+	 * Low power (lp) and ultra low power (ulp) modes are currently only
+	 * supported for command mode panels. For all other panel, treat lp
+	 * mode as full unblank and ulp mode as full blank.
+	 */
+	if (mfd->panel_info->type != MIPI_CMD_PANEL) {
+		if (blank_mode == BLANK_FLAG_LP) {
+			pr_debug("lp mode only valid for cmd mode panels\n");
+			if (mdss_fb_is_power_on_interactive(mfd))
+				return 0;
+			blank_mode = FB_BLANK_UNBLANK;
+		} else if (blank_mode == BLANK_FLAG_ULP) {
+			pr_debug("ulp mode valid for cmd mode panels\n");
+			if (mdss_fb_is_power_off(mfd))
+				return 0;
+			blank_mode = FB_BLANK_POWERDOWN;
+		}
+	}
+
+	switch (blank_mode) {
+	case FB_BLANK_UNBLANK:
+		pr_debug("unblank called. cur pwr state=%d\n", cur_power_state);
+		ret = mdss_fb_blank_unblank(mfd);
+		break;
+	case BLANK_FLAG_ULP:
+		req_power_state = MDSS_PANEL_POWER_LP2;
+		pr_debug("ultra low power mode requested\n");
+		if (mdss_fb_is_power_off(mfd)) {
+			pr_debug("Unsupp transition: off --> ulp\n");
+			return 0;
+		}
+
+		ret = mdss_fb_blank_blank(mfd, req_power_state);
+		break;
+	case BLANK_FLAG_LP:
+		req_power_state = MDSS_PANEL_POWER_LP1;
+		pr_debug(" power mode requested\n");
+
+		/*
+		 * If low power mode is requested when panel is already off,
+		 * then first unblank the panel before entering low power mode
+		 */
+		if (mdss_fb_is_power_off(mfd) && mfd->mdp.on_fnc) {
+			pr_debug("off --> lp. switch to on first\n");
+			ret = mdss_fb_blank_unblank(mfd);
+			if (ret)
+				break;
+		}
+
+		ret = mdss_fb_blank_blank(mfd, req_power_state);
+		break;
+	case FB_BLANK_HSYNC_SUSPEND:
+	case FB_BLANK_POWERDOWN:
+	default:
+		req_power_state = MDSS_PANEL_POWER_OFF;
+		pr_debug("blank powerdown called\n");
+		ret = mdss_fb_blank_blank(mfd, req_power_state);
+		break;
+	}
+
+	/* Notify listeners */
+	sysfs_notify(&mfd->fbi->dev->kobj, NULL, "show_blank_event");
+
+	ATRACE_END(trace_buffer);
+
+	return ret;
+}
+
+static int mdss_fb_blank(int blank_mode, struct fb_info *info)
+{
+	int ret;
+	struct mdss_panel_data *pdata;
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	ret = mdss_fb_pan_idle(mfd);
+	if (ret) {
+		pr_warn("mdss_fb_pan_idle for fb%d failed. ret=%d\n",
+			mfd->index, ret);
+		return ret;
+	}
+	mutex_lock(&mfd->mdss_sysfs_lock);
+	if (mfd->op_enable == 0) {
+		if (blank_mode == FB_BLANK_UNBLANK)
+			mfd->suspend.panel_power_state = MDSS_PANEL_POWER_ON;
+		else if (blank_mode == BLANK_FLAG_ULP)
+			mfd->suspend.panel_power_state = MDSS_PANEL_POWER_LP2;
+		else if (blank_mode == BLANK_FLAG_LP)
+			mfd->suspend.panel_power_state = MDSS_PANEL_POWER_LP1;
+		else
+			mfd->suspend.panel_power_state = MDSS_PANEL_POWER_OFF;
+		ret = 0;
+		goto end;
+	}
+	pr_debug("mode: %d\n", blank_mode);
+
+	pdata = dev_get_platdata(&mfd->pdev->dev);
+
+	if (pdata->panel_info.is_lpm_mode &&
+			blank_mode == FB_BLANK_UNBLANK) {
+		pr_debug("panel is in lpm mode\n");
+		mfd->mdp.configure_panel(mfd, 0, 1);
+		mdss_fb_set_mdp_sync_pt_threshold(mfd, mfd->panel.type);
+		pdata->panel_info.is_lpm_mode = false;
+	}
+
+	ret = mdss_fb_blank_sub(blank_mode, info, mfd->op_enable);
+
+end:
+	mutex_unlock(&mfd->mdss_sysfs_lock);
+	return ret;
+}
+
+static inline int mdss_fb_create_ion_client(struct msm_fb_data_type *mfd)
+{
+	mfd->fb_ion_client  = msm_ion_client_create("mdss_fb_iclient");
+	if (IS_ERR_OR_NULL(mfd->fb_ion_client)) {
+		pr_err("Err:client not created, val %d\n",
+				PTR_RET(mfd->fb_ion_client));
+		mfd->fb_ion_client = NULL;
+		return PTR_RET(mfd->fb_ion_client);
+	}
+	return 0;
+}
+
+void mdss_fb_free_fb_ion_memory(struct msm_fb_data_type *mfd)
+{
+	if (!mfd) {
+		pr_err("no mfd\n");
+		return;
+	}
+
+	if (!mfd->fbi->screen_base)
+		return;
+
+	if (!mfd->fb_ion_client || !mfd->fb_ion_handle) {
+		pr_err("invalid input parameters for fb%d\n", mfd->index);
+		return;
+	}
+
+	mfd->fbi->screen_base = NULL;
+	mfd->fbi->fix.smem_start = 0;
+
+	ion_unmap_kernel(mfd->fb_ion_client, mfd->fb_ion_handle);
+
+	if (mfd->mdp.fb_mem_get_iommu_domain && !(!mfd->fb_attachment ||
+		!mfd->fb_attachment->dmabuf ||
+		!mfd->fb_attachment->dmabuf->ops)) {
+		dma_buf_unmap_attachment(mfd->fb_attachment, mfd->fb_table,
+				DMA_BIDIRECTIONAL);
+		dma_buf_detach(mfd->fbmem_buf, mfd->fb_attachment);
+		dma_buf_put(mfd->fbmem_buf);
+	}
+
+	ion_free(mfd->fb_ion_client, mfd->fb_ion_handle);
+	mfd->fb_ion_handle = NULL;
+	mfd->fbmem_buf = NULL;
+}
+
+int mdss_fb_alloc_fb_ion_memory(struct msm_fb_data_type *mfd, size_t fb_size)
+{
+	int rc = 0;
+	void *vaddr;
+	int domain;
+
+	if (!mfd) {
+		pr_err("Invalid input param - no mfd\n");
+		return -EINVAL;
+	}
+
+	if (!mfd->fb_ion_client) {
+		rc = mdss_fb_create_ion_client(mfd);
+		if (rc < 0) {
+			pr_err("fb ion client couldn't be created - %d\n", rc);
+			return rc;
+		}
+	}
+
+	pr_debug("size for mmap = %zu\n", fb_size);
+	mfd->fb_ion_handle = ion_alloc(mfd->fb_ion_client, fb_size, SZ_4K,
+			ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
+	if (IS_ERR_OR_NULL(mfd->fb_ion_handle)) {
+		pr_err("unable to alloc fbmem from ion - %ld\n",
+				PTR_ERR(mfd->fb_ion_handle));
+		return PTR_ERR(mfd->fb_ion_handle);
+	}
+
+	if (mfd->mdp.fb_mem_get_iommu_domain) {
+		mfd->fbmem_buf = ion_share_dma_buf(mfd->fb_ion_client,
+							mfd->fb_ion_handle);
+		if (IS_ERR(mfd->fbmem_buf)) {
+			rc = PTR_ERR(mfd->fbmem_buf);
+			goto fb_mmap_failed;
+		}
+
+		domain = mfd->mdp.fb_mem_get_iommu_domain();
+
+		mfd->fb_attachment = mdss_smmu_dma_buf_attach(mfd->fbmem_buf,
+				&mfd->pdev->dev, domain);
+		if (IS_ERR(mfd->fb_attachment)) {
+			rc = PTR_ERR(mfd->fb_attachment);
+			goto err_put;
+		}
+
+		mfd->fb_table = dma_buf_map_attachment(mfd->fb_attachment,
+				DMA_BIDIRECTIONAL);
+		if (IS_ERR(mfd->fb_table)) {
+			rc = PTR_ERR(mfd->fb_table);
+			goto err_detach;
+		}
+	} else {
+		pr_err("No IOMMU Domain\n");
+		rc = -EINVAL;
+		goto fb_mmap_failed;
+	}
+
+	vaddr  = ion_map_kernel(mfd->fb_ion_client, mfd->fb_ion_handle);
+	if (IS_ERR_OR_NULL(vaddr)) {
+		pr_err("ION memory mapping failed - %ld\n", PTR_ERR(vaddr));
+		rc = PTR_ERR(vaddr);
+		goto err_unmap;
+	}
+	pr_debug("alloc 0x%zxB vaddr = %pK for fb%d\n", fb_size,
+			vaddr, mfd->index);
+
+	mfd->fbi->screen_base = (char *) vaddr;
+	mfd->fbi->fix.smem_len = fb_size;
+
+	return rc;
+
+err_unmap:
+	dma_buf_unmap_attachment(mfd->fb_attachment, mfd->fb_table,
+					DMA_BIDIRECTIONAL);
+err_detach:
+	dma_buf_detach(mfd->fbmem_buf, mfd->fb_attachment);
+err_put:
+	dma_buf_put(mfd->fbmem_buf);
+fb_mmap_failed:
+	ion_free(mfd->fb_ion_client, mfd->fb_ion_handle);
+	mfd->fb_attachment = NULL;
+	mfd->fb_table = NULL;
+	mfd->fb_ion_handle = NULL;
+	mfd->fbmem_buf = NULL;
+	return rc;
+}
+
+/**
+ * mdss_fb_fbmem_ion_mmap() -  Custom fb  mmap() function for MSM driver.
+ *
+ * @info -  Framebuffer info.
+ * @vma  -  VM area which is part of the process virtual memory.
+ *
+ * This framebuffer mmap function differs from standard mmap() function by
+ * allowing for customized page-protection and dynamically allocate framebuffer
+ * memory from system heap and map to iommu virtual address.
+ *
+ * Return: virtual address is returned through vma
+ */
+static int mdss_fb_fbmem_ion_mmap(struct fb_info *info,
+		struct vm_area_struct *vma)
+{
+	int rc = 0;
+	size_t req_size, fb_size;
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct sg_table *table;
+	unsigned long addr = vma->vm_start;
+	unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+	struct scatterlist *sg;
+	unsigned int i;
+	struct page *page;
+
+	if (!mfd || !mfd->pdev || !mfd->pdev->dev.of_node) {
+		pr_err("Invalid device node\n");
+		return -ENODEV;
+	}
+
+	req_size = vma->vm_end - vma->vm_start;
+	fb_size = mfd->fbi->fix.smem_len;
+	if (req_size > fb_size) {
+		pr_warn("requested map is greater than framebuffer\n");
+		return -EOVERFLOW;
+	}
+
+	if (!mfd->fbi->screen_base) {
+		rc = mdss_fb_alloc_fb_ion_memory(mfd, fb_size);
+		if (rc < 0) {
+			pr_err("fb mmap failed!!!!\n");
+			return rc;
+		}
+	}
+
+	table = mfd->fb_table;
+	if (IS_ERR(table)) {
+		pr_err("Unable to get sg_table from ion:%ld\n", PTR_ERR(table));
+		mfd->fbi->screen_base = NULL;
+		return PTR_ERR(table);
+	} else if (!table) {
+		pr_err("sg_list is NULL\n");
+		mfd->fbi->screen_base = NULL;
+		return -EINVAL;
+	}
+
+	page = sg_page(table->sgl);
+	if (page) {
+		for_each_sg(table->sgl, sg, table->nents, i) {
+			unsigned long remainder = vma->vm_end - addr;
+			unsigned long len = sg->length;
+
+			page = sg_page(sg);
+
+			if (offset >= sg->length) {
+				offset -= sg->length;
+				continue;
+			} else if (offset) {
+				page += offset / PAGE_SIZE;
+				len = sg->length - offset;
+				offset = 0;
+			}
+			len = min(len, remainder);
+
+			if (mfd->mdp_fb_page_protection ==
+					MDP_FB_PAGE_PROTECTION_WRITECOMBINE)
+				vma->vm_page_prot =
+					pgprot_writecombine(vma->vm_page_prot);
+
+			pr_debug("vma=%pK, addr=%x len=%ld\n",
+					vma, (unsigned int)addr, len);
+			pr_debug("vm_start=%x vm_end=%x vm_page_prot=%ld\n",
+					(unsigned int)vma->vm_start,
+					(unsigned int)vma->vm_end,
+					(unsigned long int)vma->vm_page_prot);
+
+			io_remap_pfn_range(vma, addr, page_to_pfn(page), len,
+					vma->vm_page_prot);
+			addr += len;
+			if (addr >= vma->vm_end)
+				break;
+		}
+	} else {
+		pr_err("PAGE is null\n");
+		mdss_fb_free_fb_ion_memory(mfd);
+		return -ENOMEM;
+	}
+
+	return rc;
+}
+
+/*
+ * mdss_fb_physical_mmap() - Custom fb mmap() function for MSM driver.
+ *
+ * @info -  Framebuffer info.
+ * @vma  -  VM area which is part of the process virtual memory.
+ *
+ * This framebuffer mmap function differs from standard mmap() function as
+ * map to framebuffer memory from the CMA memory which is allocated during
+ * bootup.
+ *
+ * Return: virtual address is returned through vma
+ */
+static int mdss_fb_physical_mmap(struct fb_info *info,
+		struct vm_area_struct *vma)
+{
+	/* Get frame buffer memory range. */
+	unsigned long start = info->fix.smem_start;
+	u32 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
+	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	if (!start) {
+		pr_warn("No framebuffer memory is allocated\n");
+		return -ENOMEM;
+	}
+
+	/* Set VM flags. */
+	start &= PAGE_MASK;
+	if ((vma->vm_end <= vma->vm_start) ||
+			(off >= len) ||
+			((vma->vm_end - vma->vm_start) > (len - off)))
+		return -EINVAL;
+	off += start;
+	if (off < start)
+		return -EINVAL;
+	vma->vm_pgoff = off >> PAGE_SHIFT;
+	/* This is an IO map - tell maydump to skip this VMA */
+	vma->vm_flags |= VM_IO;
+
+	if (mfd->mdp_fb_page_protection == MDP_FB_PAGE_PROTECTION_WRITECOMBINE)
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	/* Remap the frame buffer I/O range */
+	if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+				vma->vm_end - vma->vm_start,
+				vma->vm_page_prot))
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int mdss_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	int rc = -EINVAL;
+
+	if (mfd->fb_mmap_type == MDP_FB_MMAP_ION_ALLOC) {
+		rc = mdss_fb_fbmem_ion_mmap(info, vma);
+	} else if (mfd->fb_mmap_type == MDP_FB_MMAP_PHYSICAL_ALLOC) {
+		rc = mdss_fb_physical_mmap(info, vma);
+	} else {
+		if (!info->fix.smem_start && !mfd->fb_ion_handle) {
+			rc = mdss_fb_fbmem_ion_mmap(info, vma);
+			mfd->fb_mmap_type = MDP_FB_MMAP_ION_ALLOC;
+		} else {
+			rc = mdss_fb_physical_mmap(info, vma);
+			mfd->fb_mmap_type = MDP_FB_MMAP_PHYSICAL_ALLOC;
+		}
+	}
+	if (rc < 0)
+		pr_err("fb mmap failed with rc = %d\n", rc);
+
+	return rc;
+}
+
+static struct fb_ops mdss_fb_ops = {
+	.owner = THIS_MODULE,
+	.fb_open = mdss_fb_open,
+	.fb_release = mdss_fb_release,
+	.fb_check_var = mdss_fb_check_var,	/* vinfo check */
+	.fb_set_par = mdss_fb_set_par,	/* set the video mode */
+	.fb_blank = mdss_fb_blank,	/* blank display */
+	.fb_pan_display = mdss_fb_pan_display,	/* pan display */
+	.fb_ioctl_v2 = mdss_fb_ioctl,	/* perform fb specific ioctl */
+#ifdef CONFIG_COMPAT
+	.fb_compat_ioctl_v2 = mdss_fb_compat_ioctl,
+#endif
+	.fb_mmap = mdss_fb_mmap,
+};
+
+static int mdss_fb_alloc_fbmem_iommu(struct msm_fb_data_type *mfd, int dom)
+{
+	void *virt = NULL;
+	phys_addr_t phys = 0;
+	size_t size = 0;
+	struct platform_device *pdev = mfd->pdev;
+	int rc = 0;
+	struct device_node *fbmem_pnode = NULL;
+
+	if (!pdev || !pdev->dev.of_node) {
+		pr_err("Invalid device node\n");
+		return -ENODEV;
+	}
+
+	fbmem_pnode = of_parse_phandle(pdev->dev.of_node,
+		"linux,contiguous-region", 0);
+	if (!fbmem_pnode) {
+		pr_debug("fbmem is not reserved for %s\n", pdev->name);
+		mfd->fbi->screen_base = NULL;
+		mfd->fbi->fix.smem_start = 0;
+		return 0;
+	}
+	{
+		const u32 *addr;
+		u64 len;
+
+		addr = of_get_address(fbmem_pnode, 0, &len, NULL);
+		if (!addr) {
+			pr_err("fbmem size is not specified\n");
+			of_node_put(fbmem_pnode);
+			return -EINVAL;
+		}
+		size = (size_t)len;
+		of_node_put(fbmem_pnode);
+	}
+
+	pr_debug("%s frame buffer reserve_size=0x%zx\n", __func__, size);
+
+	if (size < PAGE_ALIGN(mfd->fbi->fix.line_length *
+			      mfd->fbi->var.yres_virtual))
+		pr_warn("reserve size is smaller than framebuffer size\n");
+
+	rc = mdss_smmu_dma_alloc_coherent(&pdev->dev, size, &phys, &mfd->iova,
+			&virt, GFP_KERNEL, dom);
+	if (rc) {
+		pr_err("unable to alloc fbmem size=%zx\n", size);
+		return -ENOMEM;
+	}
+
+	if (MDSS_LPAE_CHECK(phys)) {
+		pr_warn("fb mem phys %pa > 4GB is not supported.\n", &phys);
+		mdss_smmu_dma_free_coherent(&pdev->dev, size, &virt,
+				phys, mfd->iova, dom);
+		return -ERANGE;
+	}
+
+	pr_debug("alloc 0x%zxB @ (%pa phys) (0x%pK virt) (%pa iova) for fb%d\n",
+		 size, &phys, virt, &mfd->iova, mfd->index);
+
+	mfd->fbi->screen_base = virt;
+	mfd->fbi->fix.smem_start = phys;
+	mfd->fbi->fix.smem_len = size;
+
+	return 0;
+}
+
+static int mdss_fb_alloc_fbmem(struct msm_fb_data_type *mfd)
+{
+
+	if (mfd->mdp.fb_mem_alloc_fnc) {
+		return mfd->mdp.fb_mem_alloc_fnc(mfd);
+	} else if (mfd->mdp.fb_mem_get_iommu_domain) {
+		int dom = mfd->mdp.fb_mem_get_iommu_domain();
+
+		if (dom >= 0)
+			return mdss_fb_alloc_fbmem_iommu(mfd, dom);
+		else
+			return -ENOMEM;
+	} else {
+		pr_err("no fb memory allocator function defined\n");
+		return -ENOMEM;
+	}
+}
+
+static int mdss_fb_register(struct msm_fb_data_type *mfd)
+{
+	int ret = -ENODEV;
+	int bpp;
+	char panel_name[20];
+	struct mdss_panel_info *panel_info = mfd->panel_info;
+	struct fb_info *fbi = mfd->fbi;
+	struct fb_fix_screeninfo *fix;
+	struct fb_var_screeninfo *var;
+	int *id;
+
+	/*
+	 * fb info initialization
+	 */
+	fix = &fbi->fix;
+	var = &fbi->var;
+
+	fix->type_aux = 0;	/* if type == FB_TYPE_INTERLEAVED_PLANES */
+	fix->visual = FB_VISUAL_TRUECOLOR;	/* True Color */
+	fix->ywrapstep = 0;	/* No support */
+	fix->mmio_start = 0;	/* No MMIO Address */
+	fix->mmio_len = 0;	/* No MMIO Address */
+	fix->accel = FB_ACCEL_NONE;/* FB_ACCEL_MSM needes to be added in fb.h */
+
+	var->xoffset = 0,	/* Offset from virtual to visible */
+	var->yoffset = 0,	/* resolution */
+	var->grayscale = 0,	/* No graylevels */
+	var->nonstd = 0,	/* standard pixel format */
+	var->activate = FB_ACTIVATE_VBL,	/* activate it at vsync */
+	var->height = -1,	/* height of picture in mm */
+	var->width = -1,	/* width of picture in mm */
+	var->accel_flags = 0,	/* acceleration flags */
+	var->sync = 0,	/* see FB_SYNC_* */
+	var->rotate = 0,	/* angle we rotate counter clockwise */
+	mfd->op_enable = false;
+
+	switch (mfd->fb_imgType) {
+	case MDP_RGB_565:
+		fix->type = FB_TYPE_PACKED_PIXELS;
+		fix->xpanstep = 1;
+		fix->ypanstep = 1;
+		var->vmode = FB_VMODE_NONINTERLACED;
+		var->blue.offset = 0;
+		var->green.offset = 5;
+		var->red.offset = 11;
+		var->blue.length = 5;
+		var->green.length = 6;
+		var->red.length = 5;
+		var->blue.msb_right = 0;
+		var->green.msb_right = 0;
+		var->red.msb_right = 0;
+		var->transp.offset = 0;
+		var->transp.length = 0;
+		bpp = 2;
+		break;
+
+	case MDP_RGB_888:
+		fix->type = FB_TYPE_PACKED_PIXELS;
+		fix->xpanstep = 1;
+		fix->ypanstep = 1;
+		var->vmode = FB_VMODE_NONINTERLACED;
+		var->blue.offset = 0;
+		var->green.offset = 8;
+		var->red.offset = 16;
+		var->blue.length = 8;
+		var->green.length = 8;
+		var->red.length = 8;
+		var->blue.msb_right = 0;
+		var->green.msb_right = 0;
+		var->red.msb_right = 0;
+		var->transp.offset = 0;
+		var->transp.length = 0;
+		bpp = 3;
+		break;
+
+	case MDP_ARGB_8888:
+		fix->type = FB_TYPE_PACKED_PIXELS;
+		fix->xpanstep = 1;
+		fix->ypanstep = 1;
+		var->vmode = FB_VMODE_NONINTERLACED;
+		var->blue.offset = 24;
+		var->green.offset = 16;
+		var->red.offset = 8;
+		var->blue.length = 8;
+		var->green.length = 8;
+		var->red.length = 8;
+		var->blue.msb_right = 0;
+		var->green.msb_right = 0;
+		var->red.msb_right = 0;
+		var->transp.offset = 0;
+		var->transp.length = 8;
+		bpp = 4;
+		break;
+
+	case MDP_RGBA_8888:
+		fix->type = FB_TYPE_PACKED_PIXELS;
+		fix->xpanstep = 1;
+		fix->ypanstep = 1;
+		var->vmode = FB_VMODE_NONINTERLACED;
+		var->blue.offset = 16;
+		var->green.offset = 8;
+		var->red.offset = 0;
+		var->blue.length = 8;
+		var->green.length = 8;
+		var->red.length = 8;
+		var->blue.msb_right = 0;
+		var->green.msb_right = 0;
+		var->red.msb_right = 0;
+		var->transp.offset = 24;
+		var->transp.length = 8;
+		bpp = 4;
+		break;
+
+	case MDP_YCRYCB_H2V1:
+		fix->type = FB_TYPE_INTERLEAVED_PLANES;
+		fix->xpanstep = 2;
+		fix->ypanstep = 1;
+		var->vmode = FB_VMODE_NONINTERLACED;
+
+		/* how about R/G/B offset? */
+		var->blue.offset = 0;
+		var->green.offset = 5;
+		var->red.offset = 11;
+		var->blue.length = 5;
+		var->green.length = 6;
+		var->red.length = 5;
+		var->blue.msb_right = 0;
+		var->green.msb_right = 0;
+		var->red.msb_right = 0;
+		var->transp.offset = 0;
+		var->transp.length = 0;
+		bpp = 2;
+		break;
+
+	default:
+		pr_err("msm_fb_init: fb %d unknown image type!\n",
+			    mfd->index);
+		return ret;
+	}
+
+	mdss_panelinfo_to_fb_var(panel_info, var);
+
+	fix->type = panel_info->is_3d_panel;
+	if (mfd->mdp.fb_stride)
+		fix->line_length = mfd->mdp.fb_stride(mfd->index, var->xres,
+							bpp);
+	else
+		fix->line_length = var->xres * bpp;
+
+	var->xres_virtual = var->xres;
+	var->yres_virtual = panel_info->yres * mfd->fb_page;
+	var->bits_per_pixel = bpp * 8;	/* FrameBuffer color depth */
+
+	/*
+	 * Populate smem length here for uspace to get the
+	 * Framebuffer size when FBIO_FSCREENINFO ioctl is called.
+	 */
+	fix->smem_len = PAGE_ALIGN(fix->line_length * var->yres) * mfd->fb_page;
+
+	/* id field for fb app  */
+	id = (int *)&mfd->panel;
+
+	snprintf(fix->id, sizeof(fix->id), "mdssfb_%x", (u32) *id);
+
+	fbi->fbops = &mdss_fb_ops;
+	fbi->flags = FBINFO_FLAG_DEFAULT;
+	fbi->pseudo_palette = mdss_fb_pseudo_palette;
+
+	mfd->ref_cnt = 0;
+	mfd->panel_power_state = MDSS_PANEL_POWER_OFF;
+	mfd->dcm_state = DCM_UNINIT;
+
+	if (mdss_fb_alloc_fbmem(mfd))
+		pr_warn("unable to allocate fb memory in fb register\n");
+
+	mfd->op_enable = true;
+
+	mutex_init(&mfd->update.lock);
+	mutex_init(&mfd->no_update.lock);
+	mutex_init(&mfd->mdp_sync_pt_data.sync_mutex);
+	atomic_set(&mfd->mdp_sync_pt_data.commit_cnt, 0);
+	atomic_set(&mfd->commits_pending, 0);
+	atomic_set(&mfd->ioctl_ref_cnt, 0);
+	atomic_set(&mfd->kickoff_pending, 0);
+
+	init_timer(&mfd->no_update.timer);
+	mfd->no_update.timer.function = mdss_fb_no_update_notify_timer_cb;
+	mfd->no_update.timer.data = (unsigned long)mfd;
+	mfd->update.ref_count = 0;
+	mfd->no_update.ref_count = 0;
+	mfd->update.init_done = false;
+	init_completion(&mfd->update.comp);
+	init_completion(&mfd->no_update.comp);
+	init_completion(&mfd->power_off_comp);
+	init_completion(&mfd->power_set_comp);
+	init_waitqueue_head(&mfd->commit_wait_q);
+	init_waitqueue_head(&mfd->idle_wait_q);
+	init_waitqueue_head(&mfd->ioctl_q);
+	init_waitqueue_head(&mfd->kickoff_wait_q);
+
+	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+	if (ret)
+		pr_err("fb_alloc_cmap() failed!\n");
+
+	if (register_framebuffer(fbi) < 0) {
+		fb_dealloc_cmap(&fbi->cmap);
+
+		mfd->op_enable = false;
+		return -EPERM;
+	}
+
+	snprintf(panel_name, ARRAY_SIZE(panel_name), "mdss_panel_fb%d",
+		mfd->index);
+	mdss_panel_debugfs_init(panel_info, panel_name);
+	pr_info("FrameBuffer[%d] %dx%d registered successfully!\n", mfd->index,
+					fbi->var.xres, fbi->var.yres);
+
+	return 0;
+}
+
+static int mdss_fb_open(struct fb_info *info, int user)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct mdss_fb_file_info *file_info = NULL;
+	int result;
+	struct task_struct *task = current->group_leader;
+
+	if (mfd->shutdown_pending) {
+		pr_err_once("Shutdown pending. Aborting operation. Request from pid:%d name=%s\n",
+			current->tgid, task->comm);
+		sysfs_notify(&mfd->fbi->dev->kobj, NULL, "show_blank_event");
+		return -ESHUTDOWN;
+	}
+
+	file_info = kmalloc(sizeof(*file_info), GFP_KERNEL);
+	if (!file_info)
+		return -ENOMEM;
+
+	file_info->file = info->file;
+	list_add(&file_info->list, &mfd->file_list);
+
+	result = pm_runtime_get_sync(info->dev);
+
+	if (result < 0) {
+		pr_err("pm_runtime: fail to wake up\n");
+		goto pm_error;
+	}
+
+	if (!mfd->ref_cnt) {
+		result = mdss_fb_blank_sub(FB_BLANK_UNBLANK, info,
+					   mfd->op_enable);
+		if (result) {
+			pr_err("can't turn on fb%d! rc=%d\n", mfd->index,
+				result);
+			goto blank_error;
+		}
+	}
+
+	mfd->ref_cnt++;
+	pr_debug("mfd refcount:%d file:%pK\n", mfd->ref_cnt, info->file);
+
+	return 0;
+
+blank_error:
+	pm_runtime_put(info->dev);
+pm_error:
+	list_del(&file_info->list);
+	kfree(file_info);
+	return result;
+}
+
+static int mdss_fb_release_all(struct fb_info *info, bool release_all)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct mdss_fb_file_info *file_info = NULL, *temp_file_info = NULL;
+	struct file *file = info->file;
+	int ret = 0;
+	bool node_found = false;
+	struct task_struct *task = current->group_leader;
+
+	if (!mfd->ref_cnt) {
+		pr_info("try to close unopened fb %d! from pid:%d name:%s\n",
+			mfd->index, current->tgid, task->comm);
+		return -EINVAL;
+	}
+
+	if (!wait_event_timeout(mfd->ioctl_q,
+		!atomic_read(&mfd->ioctl_ref_cnt) || !release_all,
+		msecs_to_jiffies(1000)))
+		pr_warn("fb%d ioctl could not finish. waited 1 sec.\n",
+			mfd->index);
+
+	/* wait only for the last release */
+	if (release_all || (mfd->ref_cnt == 1)) {
+		ret = mdss_fb_pan_idle(mfd);
+		if (ret && (ret != -ESHUTDOWN))
+			pr_warn("mdss_fb_pan_idle for fb%d failed. ret=%d ignoring.\n",
+				mfd->index, ret);
+	}
+
+	pr_debug("release_all = %s\n", release_all ? "true" : "false");
+
+	list_for_each_entry_safe(file_info, temp_file_info, &mfd->file_list,
+		list) {
+		if (!release_all && file_info->file != file)
+			continue;
+
+		pr_debug("found file node mfd->ref=%d\n", mfd->ref_cnt);
+		list_del(&file_info->list);
+		kfree(file_info);
+
+		mfd->ref_cnt--;
+		pm_runtime_put(info->dev);
+
+		node_found = true;
+
+		if (!release_all)
+			break;
+	}
+
+	if (!node_found || (release_all && mfd->ref_cnt))
+		pr_warn("file node not found or wrong ref cnt: release all:%d refcnt:%d\n",
+			release_all, mfd->ref_cnt);
+
+	pr_debug("current process=%s pid=%d mfd->ref=%d file:%pK\n",
+		task->comm, current->tgid, mfd->ref_cnt, info->file);
+
+	if (!mfd->ref_cnt || release_all) {
+		/* resources (if any) will be released during blank */
+		if (mfd->mdp.release_fnc)
+			mfd->mdp.release_fnc(mfd, NULL);
+
+		if (mfd->mdp.pp_release_fnc) {
+			ret = (*mfd->mdp.pp_release_fnc)(mfd);
+			if (ret)
+				pr_err("PP release failed ret %d\n", ret);
+		}
+
+		/* reset backlight before blank to prevent backlight from
+		 * enabling ahead of unblank. for some special cases like
+		 * adb shell stop/start.
+		 */
+		mdss_fb_set_backlight(mfd, 0);
+
+		ret = mdss_fb_blank_sub(FB_BLANK_POWERDOWN, info,
+			mfd->op_enable);
+		if (ret) {
+			pr_err("can't turn off fb%d! rc=%d current process=%s pid=%d\n",
+			      mfd->index, ret, task->comm, current->tgid);
+			return ret;
+		}
+		if (mfd->fb_ion_handle)
+			mdss_fb_free_fb_ion_memory(mfd);
+
+		atomic_set(&mfd->ioctl_ref_cnt, 0);
+	} else {
+		if (mfd->mdp.release_fnc)
+			ret = mfd->mdp.release_fnc(mfd, file);
+
+		/* display commit is needed to release resources */
+		if (ret)
+			mdss_fb_pan_display(&mfd->fbi->var, mfd->fbi);
+	}
+
+	return ret;
+}
+
+static int mdss_fb_release(struct fb_info *info, int user)
+{
+	return mdss_fb_release_all(info, false);
+}
+
+static void mdss_fb_power_setting_idle(struct msm_fb_data_type *mfd)
+{
+	int ret;
+
+	if (mfd->is_power_setting) {
+		ret = wait_for_completion_timeout(
+				&mfd->power_set_comp,
+			msecs_to_jiffies(WAIT_DISP_OP_TIMEOUT));
+		if (ret < 0)
+			ret = -ERESTARTSYS;
+		else if (!ret)
+			pr_err("%s wait for power_set_comp timeout %d %d",
+				__func__, ret, mfd->is_power_setting);
+		if (ret <= 0) {
+			mfd->is_power_setting = false;
+			complete_all(&mfd->power_set_comp);
+		}
+	}
+}
+
+static void __mdss_fb_copy_fence(struct msm_sync_pt_data *sync_pt_data,
+	struct sync_fence **fences, u32 *fence_cnt)
+{
+	pr_debug("%s: wait for fences\n", sync_pt_data->fence_name);
+
+	mutex_lock(&sync_pt_data->sync_mutex);
+	/*
+	 * Assuming that acq_fen_cnt is sanitized in bufsync ioctl
+	 * to check for sync_pt_data->acq_fen_cnt <= MDP_MAX_FENCE_FD
+	 */
+	*fence_cnt = sync_pt_data->acq_fen_cnt;
+	sync_pt_data->acq_fen_cnt = 0;
+	if (*fence_cnt)
+		memcpy(fences, sync_pt_data->acq_fen,
+				*fence_cnt * sizeof(struct sync_fence *));
+	mutex_unlock(&sync_pt_data->sync_mutex);
+}
+
+static int __mdss_fb_wait_for_fence_sub(struct msm_sync_pt_data *sync_pt_data,
+	struct sync_fence **fences, int fence_cnt)
+{
+	int i, ret = 0;
+	unsigned long max_wait = msecs_to_jiffies(WAIT_MAX_FENCE_TIMEOUT);
+	unsigned long timeout = jiffies + max_wait;
+	long wait_ms, wait_jf;
+
+	/* buf sync */
+	for (i = 0; i < fence_cnt && !ret; i++) {
+		wait_jf = timeout - jiffies;
+		wait_ms = jiffies_to_msecs(wait_jf);
+
+		/*
+		 * In this loop, if one of the previous fence took long
+		 * time, give a chance for the next fence to check if
+		 * fence is already signalled. If not signalled it breaks
+		 * in the final wait timeout.
+		 */
+		if (wait_jf < 0)
+			wait_ms = WAIT_MIN_FENCE_TIMEOUT;
+		else
+			wait_ms = min_t(long, WAIT_FENCE_FIRST_TIMEOUT,
+					wait_ms);
+
+		ret = sync_fence_wait(fences[i], wait_ms);
+
+		if (ret == -ETIME) {
+			wait_jf = timeout - jiffies;
+			wait_ms = jiffies_to_msecs(wait_jf);
+			if (wait_jf < 0)
+				break;
+
+				wait_ms = min_t(long, WAIT_FENCE_FINAL_TIMEOUT,
+						wait_ms);
+
+			pr_warn("%s: sync_fence_wait timed out! ",
+					fences[i]->name);
+			pr_cont("Waiting %ld.%ld more seconds\n",
+				(wait_ms/MSEC_PER_SEC), (wait_ms%MSEC_PER_SEC));
+			MDSS_XLOG(sync_pt_data->timeline_value);
+			MDSS_XLOG_TOUT_HANDLER("mdp");
+			ret = sync_fence_wait(fences[i], wait_ms);
+
+			if (ret == -ETIME)
+				break;
+		}
+		sync_fence_put(fences[i]);
+	}
+
+	if (ret < 0) {
+		pr_err("%s: sync_fence_wait failed! ret = %x\n",
+				sync_pt_data->fence_name, ret);
+		for (; i < fence_cnt; i++)
+			sync_fence_put(fences[i]);
+	}
+	return ret;
+}
+
+int mdss_fb_wait_for_fence(struct msm_sync_pt_data *sync_pt_data)
+{
+	struct sync_fence *fences[MDP_MAX_FENCE_FD];
+	int fence_cnt = 0;
+
+	__mdss_fb_copy_fence(sync_pt_data, fences, &fence_cnt);
+
+	if (fence_cnt)
+		__mdss_fb_wait_for_fence_sub(sync_pt_data,
+			fences, fence_cnt);
+
+	return fence_cnt;
+}
+
+/**
+ * mdss_fb_signal_timeline() - signal a single release fence
+ * @sync_pt_data:	Sync point data structure for the timeline which
+ *			should be signaled.
+ *
+ * This is called after a frame has been pushed to display. This signals the
+ * timeline to release the fences associated with this frame.
+ */
+void mdss_fb_signal_timeline(struct msm_sync_pt_data *sync_pt_data)
+{
+	mutex_lock(&sync_pt_data->sync_mutex);
+	if (atomic_add_unless(&sync_pt_data->commit_cnt, -1, 0) &&
+			sync_pt_data->timeline) {
+		sw_sync_timeline_inc(sync_pt_data->timeline, 1);
+		MDSS_XLOG(sync_pt_data->timeline_value);
+		sync_pt_data->timeline_value++;
+
+		pr_debug("%s: buffer signaled! timeline val=%d remaining=%d\n",
+			sync_pt_data->fence_name, sync_pt_data->timeline_value,
+			atomic_read(&sync_pt_data->commit_cnt));
+	} else {
+		pr_debug("%s timeline signaled without commits val=%d\n",
+			sync_pt_data->fence_name, sync_pt_data->timeline_value);
+	}
+	mutex_unlock(&sync_pt_data->sync_mutex);
+}
+
+/**
+ * mdss_fb_release_fences() - signal all pending release fences
+ * @mfd:	Framebuffer data structure for display
+ *
+ * Release all currently pending release fences, including those that are in
+ * the process to be commtted.
+ *
+ * Note: this should only be called during close or suspend sequence.
+ */
+static void mdss_fb_release_fences(struct msm_fb_data_type *mfd)
+{
+	struct msm_sync_pt_data *sync_pt_data = &mfd->mdp_sync_pt_data;
+	int val;
+
+	mutex_lock(&sync_pt_data->sync_mutex);
+	if (sync_pt_data->timeline) {
+		val = sync_pt_data->threshold +
+			atomic_read(&sync_pt_data->commit_cnt);
+		sw_sync_timeline_inc(sync_pt_data->timeline, val);
+		sync_pt_data->timeline_value += val;
+		atomic_set(&sync_pt_data->commit_cnt, 0);
+	}
+	mutex_unlock(&sync_pt_data->sync_mutex);
+}
+
+static void mdss_fb_release_kickoff(struct msm_fb_data_type *mfd)
+{
+	if (mfd->wait_for_kickoff) {
+		atomic_set(&mfd->kickoff_pending, 0);
+		wake_up_all(&mfd->kickoff_wait_q);
+	}
+}
+
+/**
+ * __mdss_fb_sync_buf_done_callback() - process async display events
+ * @p:		Notifier block registered for async events.
+ * @event:	Event enum to identify the event.
+ * @data:	Optional argument provided with the event.
+ *
+ * See enum mdp_notify_event for events handled.
+ */
+static int __mdss_fb_sync_buf_done_callback(struct notifier_block *p,
+		unsigned long event, void *data)
+{
+	struct msm_sync_pt_data *sync_pt_data;
+	struct msm_fb_data_type *mfd;
+	int fence_cnt;
+	int ret = NOTIFY_OK;
+
+	sync_pt_data = container_of(p, struct msm_sync_pt_data, notifier);
+	mfd = container_of(sync_pt_data, struct msm_fb_data_type,
+		mdp_sync_pt_data);
+
+	switch (event) {
+	case MDP_NOTIFY_FRAME_BEGIN:
+		if (mfd->idle_time && !mod_delayed_work(system_wq,
+					&mfd->idle_notify_work,
+					msecs_to_jiffies(WAIT_DISP_OP_TIMEOUT)))
+			pr_debug("fb%d: start idle delayed work\n",
+					mfd->index);
+
+		mfd->idle_state = MDSS_FB_NOT_IDLE;
+		break;
+	case MDP_NOTIFY_FRAME_READY:
+		if (sync_pt_data->async_wait_fences &&
+			sync_pt_data->temp_fen_cnt) {
+			fence_cnt = sync_pt_data->temp_fen_cnt;
+			sync_pt_data->temp_fen_cnt = 0;
+			ret = __mdss_fb_wait_for_fence_sub(sync_pt_data,
+				sync_pt_data->temp_fen, fence_cnt);
+		}
+		if (mfd->idle_time && !mod_delayed_work(system_wq,
+					&mfd->idle_notify_work,
+					msecs_to_jiffies(mfd->idle_time)))
+			pr_debug("fb%d: restarted idle work\n",
+					mfd->index);
+		if (ret == -ETIME)
+			ret = NOTIFY_BAD;
+		mfd->idle_state = MDSS_FB_IDLE_TIMER_RUNNING;
+		break;
+	case MDP_NOTIFY_FRAME_FLUSHED:
+		pr_debug("%s: frame flushed\n", sync_pt_data->fence_name);
+		sync_pt_data->flushed = true;
+		break;
+	case MDP_NOTIFY_FRAME_TIMEOUT:
+		pr_err("%s: frame timeout\n", sync_pt_data->fence_name);
+		mdss_fb_signal_timeline(sync_pt_data);
+		break;
+	case MDP_NOTIFY_FRAME_DONE:
+		pr_debug("%s: frame done\n", sync_pt_data->fence_name);
+		mdss_fb_signal_timeline(sync_pt_data);
+		mdss_fb_calc_fps(mfd);
+		break;
+	case MDP_NOTIFY_FRAME_CFG_DONE:
+		if (sync_pt_data->async_wait_fences)
+			__mdss_fb_copy_fence(sync_pt_data,
+					sync_pt_data->temp_fen,
+					&sync_pt_data->temp_fen_cnt);
+		break;
+	case MDP_NOTIFY_FRAME_CTX_DONE:
+		mdss_fb_release_kickoff(mfd);
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * mdss_fb_pan_idle() - wait for panel programming to be idle
+ * @mfd:	Framebuffer data structure for display
+ *
+ * Wait for any pending programming to be done if in the process of programming
+ * hardware configuration. After this function returns it is safe to perform
+ * software updates for next frame.
+ */
+static int mdss_fb_pan_idle(struct msm_fb_data_type *mfd)
+{
+	int ret = 0;
+
+	ret = wait_event_timeout(mfd->idle_wait_q,
+			(!atomic_read(&mfd->commits_pending) ||
+			 mfd->shutdown_pending),
+			msecs_to_jiffies(WAIT_DISP_OP_TIMEOUT));
+	if (!ret) {
+		pr_err("%pS: wait for idle timeout commits=%d\n",
+				__builtin_return_address(0),
+				atomic_read(&mfd->commits_pending));
+		MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt",
+			"dbg_bus", "vbif_dbg_bus");
+		ret = -ETIMEDOUT;
+	} else if (mfd->shutdown_pending) {
+		pr_debug("Shutdown signalled\n");
+		ret = -ESHUTDOWN;
+	} else {
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static int mdss_fb_wait_for_kickoff(struct msm_fb_data_type *mfd)
+{
+	int ret = 0;
+
+	if (!mfd->wait_for_kickoff)
+		return mdss_fb_pan_idle(mfd);
+
+	ret = wait_event_timeout(mfd->kickoff_wait_q,
+			(!atomic_read(&mfd->kickoff_pending) ||
+			 mfd->shutdown_pending),
+			msecs_to_jiffies(WAIT_DISP_OP_TIMEOUT));
+	if (!ret) {
+		pr_err("%pS: wait for kickoff timeout koff=%d commits=%d\n",
+				__builtin_return_address(0),
+				atomic_read(&mfd->kickoff_pending),
+				atomic_read(&mfd->commits_pending));
+		MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt",
+			"dbg_bus", "vbif_dbg_bus");
+		ret = -ETIMEDOUT;
+	} else if (mfd->shutdown_pending) {
+		pr_debug("Shutdown signalled\n");
+		ret = -ESHUTDOWN;
+	} else {
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static int mdss_fb_pan_display_ex(struct fb_info *info,
+		struct mdp_display_commit *disp_commit)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct fb_var_screeninfo *var = &disp_commit->var;
+	u32 wait_for_finish = disp_commit->wait_for_finish;
+	int ret = 0;
+
+	if (!mfd || (!mfd->op_enable))
+		return -EPERM;
+
+	if ((mdss_fb_is_power_off(mfd)) &&
+		!((mfd->dcm_state == DCM_ENTER) &&
+		(mfd->panel.type == MIPI_CMD_PANEL)))
+		return -EPERM;
+
+	if (var->xoffset > (info->var.xres_virtual - info->var.xres))
+		return -EINVAL;
+
+	if (var->yoffset > (info->var.yres_virtual - info->var.yres))
+		return -EINVAL;
+
+	ret = mdss_fb_pan_idle(mfd);
+	if (ret) {
+		pr_err("wait_for_kick failed. rc=%d\n", ret);
+		return ret;
+	}
+
+	if (mfd->mdp.pre_commit_fnc) {
+		ret = mfd->mdp.pre_commit_fnc(mfd);
+		if (ret) {
+			pr_err("fb%d: pre commit failed %d\n",
+					mfd->index, ret);
+			return ret;
+		}
+	}
+
+	mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+	if (info->fix.xpanstep)
+		info->var.xoffset =
+		(var->xoffset / info->fix.xpanstep) * info->fix.xpanstep;
+
+	if (info->fix.ypanstep)
+		info->var.yoffset =
+		(var->yoffset / info->fix.ypanstep) * info->fix.ypanstep;
+
+	mfd->msm_fb_backup.info = *info;
+	mfd->msm_fb_backup.disp_commit = *disp_commit;
+
+	atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
+	atomic_inc(&mfd->commits_pending);
+	atomic_inc(&mfd->kickoff_pending);
+	wake_up_all(&mfd->commit_wait_q);
+	mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
+	if (wait_for_finish) {
+		ret = mdss_fb_pan_idle(mfd);
+		if (ret)
+			pr_err("mdss_fb_pan_idle failed. rc=%d\n", ret);
+	}
+	return ret;
+}
+
+u32 mdss_fb_get_mode_switch(struct msm_fb_data_type *mfd)
+{
+	/* If there is no attached mfd then there is no pending mode switch */
+	if (!mfd)
+		return 0;
+
+	if (mfd->pending_switch)
+		return mfd->switch_new_mode;
+
+	return 0;
+}
+
+/*
+ * __ioctl_transition_dyn_mode_state() - State machine for mode switch
+ * @mfd:	Framebuffer data structure for display
+ * @cmd:	ioctl that was called
+ * @validate:	used with atomic commit when doing validate layers
+ *
+ * This function assists with dynamic mode switch of DSI panel. States
+ * are used to make sure that panel mode switch occurs on next
+ * prepare/sync/commit (for legacy) and validate/pre_commit (for
+ * atomic commit) pairing. This state machine insure that calculation
+ * and return values (such as buffer release fences) are based on the
+ * panel mode being switching into.
+ */
+static int __ioctl_transition_dyn_mode_state(struct msm_fb_data_type *mfd,
+		unsigned int cmd, bool validate, bool null_commit)
+{
+	if (mfd->switch_state == MDSS_MDP_NO_UPDATE_REQUESTED)
+		return 0;
+
+	mutex_lock(&mfd->switch_lock);
+	switch (cmd) {
+	case MSMFB_ATOMIC_COMMIT:
+		if ((mfd->switch_state == MDSS_MDP_WAIT_FOR_VALIDATE)
+				&& validate) {
+			if (mfd->switch_new_mode != SWITCH_RESOLUTION)
+				mfd->pending_switch = true;
+			mfd->switch_state = MDSS_MDP_WAIT_FOR_COMMIT;
+		} else if (mfd->switch_state == MDSS_MDP_WAIT_FOR_COMMIT) {
+			if (mfd->switch_new_mode != SWITCH_RESOLUTION)
+				mdss_fb_set_mdp_sync_pt_threshold(mfd,
+					mfd->switch_new_mode);
+			mfd->switch_state = MDSS_MDP_WAIT_FOR_KICKOFF;
+		} else if ((mfd->switch_state == MDSS_MDP_WAIT_FOR_VALIDATE)
+				&& null_commit) {
+			mfd->switch_state = MDSS_MDP_WAIT_FOR_KICKOFF;
+		}
+		break;
+	}
+	mutex_unlock(&mfd->switch_lock);
+	return 0;
+}
+
+static inline bool mdss_fb_is_wb_config_same(struct msm_fb_data_type *mfd,
+		struct mdp_output_layer *output_layer)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct msm_mdp_interface *mdp5_interface = &mfd->mdp;
+
+	if (!mdp5_data->wfd
+		|| (mdp5_interface->is_config_same
+		&& !mdp5_interface->is_config_same(mfd, output_layer)))
+		return false;
+	return true;
+}
+
+/* update pinfo and var for WB on config change */
+static void mdss_fb_update_resolution(struct msm_fb_data_type *mfd,
+		u32 xres, u32 yres, u32 format)
+{
+	struct mdss_panel_info *pinfo = mfd->panel_info;
+	struct fb_var_screeninfo *var = &mfd->fbi->var;
+	struct fb_fix_screeninfo *fix = &mfd->fbi->fix;
+	struct mdss_mdp_format_params *fmt = NULL;
+
+	pinfo->xres = xres;
+	pinfo->yres = yres;
+	mfd->fb_imgType = format;
+	if (mfd->mdp.get_format_params) {
+		fmt = mfd->mdp.get_format_params(format);
+		if (fmt) {
+			pinfo->bpp = fmt->bpp;
+			var->bits_per_pixel = fmt->bpp * 8;
+		}
+		if (mfd->mdp.fb_stride)
+			fix->line_length = mfd->mdp.fb_stride(mfd->index,
+						var->xres,
+						var->bits_per_pixel / 8);
+		else
+			fix->line_length = var->xres * var->bits_per_pixel / 8;
+
+	}
+	var->xres_virtual = var->xres;
+	var->yres_virtual = pinfo->yres * mfd->fb_page;
+	mdss_panelinfo_to_fb_var(pinfo, var);
+}
+
+int mdss_fb_atomic_commit(struct fb_info *info,
+	struct mdp_layer_commit  *commit, struct file *file)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct mdp_layer_commit_v1 *commit_v1;
+	struct mdp_output_layer *output_layer;
+	struct mdss_panel_info *pinfo;
+	bool wait_for_finish, wb_change = false;
+	int ret = -EPERM;
+	u32 old_xres, old_yres, old_format;
+
+	if (!mfd || (!mfd->op_enable)) {
+		pr_err("mfd is NULL or operation not permitted\n");
+		return -EPERM;
+	}
+
+	if ((mdss_fb_is_power_off(mfd)) &&
+		!((mfd->dcm_state == DCM_ENTER) &&
+		(mfd->panel.type == MIPI_CMD_PANEL))) {
+		pr_err("commit is not supported when interface is in off state\n");
+		goto end;
+	}
+	pinfo = mfd->panel_info;
+
+	/* only supports version 1.0 */
+	if (commit->version != MDP_COMMIT_VERSION_1_0) {
+		pr_err("commit version is not supported\n");
+		goto end;
+	}
+
+	if (!mfd->mdp.pre_commit || !mfd->mdp.atomic_validate) {
+		pr_err("commit callback is not registered\n");
+		goto end;
+	}
+
+	commit_v1 = &commit->commit_v1;
+	if (commit_v1->flags & MDP_VALIDATE_LAYER) {
+		ret = mdss_fb_wait_for_kickoff(mfd);
+		if (ret) {
+			pr_err("wait for kickoff failed\n");
+		} else {
+			__ioctl_transition_dyn_mode_state(mfd,
+				MSMFB_ATOMIC_COMMIT, true, false);
+			if (mfd->panel.type == WRITEBACK_PANEL) {
+				output_layer = commit_v1->output_layer;
+				if (!output_layer) {
+					pr_err("Output layer is null\n");
+					goto end;
+				}
+				wb_change = !mdss_fb_is_wb_config_same(mfd,
+						commit_v1->output_layer);
+				if (wb_change) {
+					old_xres = pinfo->xres;
+					old_yres = pinfo->yres;
+					old_format = mfd->fb_imgType;
+					mdss_fb_update_resolution(mfd,
+						output_layer->buffer.width,
+						output_layer->buffer.height,
+						output_layer->buffer.format);
+				}
+			}
+			ret = mfd->mdp.atomic_validate(mfd, file, commit_v1);
+			if (!ret)
+				mfd->atomic_commit_pending = true;
+		}
+		goto end;
+	} else {
+		ret = mdss_fb_pan_idle(mfd);
+		if (ret) {
+			pr_err("pan display idle call failed\n");
+			goto end;
+		}
+		__ioctl_transition_dyn_mode_state(mfd,
+			MSMFB_ATOMIC_COMMIT, false,
+			(commit_v1->input_layer_cnt ? 0 : 1));
+
+		ret = mfd->mdp.pre_commit(mfd, file, commit_v1);
+		if (ret) {
+			pr_err("atomic pre commit failed\n");
+			goto end;
+		}
+	}
+
+	wait_for_finish = commit_v1->flags & MDP_COMMIT_WAIT_FOR_FINISH;
+	mfd->msm_fb_backup.atomic_commit = true;
+	mfd->msm_fb_backup.disp_commit.l_roi =  commit_v1->left_roi;
+	mfd->msm_fb_backup.disp_commit.r_roi =  commit_v1->right_roi;
+
+	mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+	atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
+	atomic_inc(&mfd->commits_pending);
+	atomic_inc(&mfd->kickoff_pending);
+	wake_up_all(&mfd->commit_wait_q);
+	mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
+
+	if (wait_for_finish)
+		ret = mdss_fb_pan_idle(mfd);
+
+end:
+	if (ret && (mfd->panel.type == WRITEBACK_PANEL) && wb_change)
+		mdss_fb_update_resolution(mfd, old_xres, old_yres, old_format);
+	return ret;
+}
+
+static int mdss_fb_pan_display(struct fb_var_screeninfo *var,
+		struct fb_info *info)
+{
+	struct mdp_display_commit disp_commit;
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	/*
+	 * during mode switch through mode sysfs node, it will trigger a
+	 * pan_display after switch. This assumes that fb has been adjusted,
+	 * however when using overlays we may not have the right size at this
+	 * point, so it needs to go through PREPARE first. Abort pan_display
+	 * operations until that happens
+	 */
+	if (mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED) {
+		pr_debug("fb%d: pan_display skipped during switch\n",
+				mfd->index);
+		return 0;
+	}
+
+	memset(&disp_commit, 0, sizeof(disp_commit));
+	disp_commit.wait_for_finish = true;
+	memcpy(&disp_commit.var, var, sizeof(struct fb_var_screeninfo));
+	return mdss_fb_pan_display_ex(info, &disp_commit);
+}
+
+static int mdss_fb_pan_display_sub(struct fb_var_screeninfo *var,
+			       struct fb_info *info)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	if (!mfd->op_enable)
+		return -EPERM;
+
+	if ((mdss_fb_is_power_off(mfd)) &&
+		!((mfd->dcm_state == DCM_ENTER) &&
+		(mfd->panel.type == MIPI_CMD_PANEL)))
+		return -EPERM;
+
+	if (var->xoffset > (info->var.xres_virtual - info->var.xres))
+		return -EINVAL;
+
+	if (var->yoffset > (info->var.yres_virtual - info->var.yres))
+		return -EINVAL;
+
+	if (info->fix.xpanstep)
+		info->var.xoffset =
+		(var->xoffset / info->fix.xpanstep) * info->fix.xpanstep;
+
+	if (info->fix.ypanstep)
+		info->var.yoffset =
+		(var->yoffset / info->fix.ypanstep) * info->fix.ypanstep;
+
+	if (mfd->mdp.dma_fnc)
+		mfd->mdp.dma_fnc(mfd);
+	else
+		pr_warn("dma function not set for panel type=%d\n",
+				mfd->panel.type);
+
+	return 0;
+}
+
+static int mdss_grayscale_to_mdp_format(u32 grayscale)
+{
+	switch (grayscale) {
+	case V4L2_PIX_FMT_RGB24:
+		return MDP_RGB_888;
+	case V4L2_PIX_FMT_NV12:
+		return MDP_Y_CBCR_H2V2;
+	default:
+		return -EINVAL;
+	}
+}
+
+static void mdss_fb_var_to_panelinfo(struct fb_var_screeninfo *var,
+	struct mdss_panel_info *pinfo)
+{
+	int format = -EINVAL;
+
+	pinfo->xres = var->xres;
+	pinfo->yres = var->yres;
+	pinfo->lcdc.v_front_porch = var->lower_margin;
+	pinfo->lcdc.v_back_porch = var->upper_margin;
+	pinfo->lcdc.v_pulse_width = var->vsync_len;
+	pinfo->lcdc.h_front_porch = var->right_margin;
+	pinfo->lcdc.h_back_porch = var->left_margin;
+	pinfo->lcdc.h_pulse_width = var->hsync_len;
+
+	if (var->grayscale > 1) {
+		format = mdss_grayscale_to_mdp_format(var->grayscale);
+		if (!IS_ERR_VALUE(format))
+			pinfo->out_format = format;
+		else
+			pr_warn("Failed to map grayscale value (%d) to an MDP format\n",
+					var->grayscale);
+	}
+
+	/*
+	 * if greater than 1M, then rate would fall below 1mhz which is not
+	 * even supported. In this case it means clock rate is actually
+	 * passed directly in hz.
+	 */
+	if (var->pixclock > SZ_1M)
+		pinfo->clk_rate = var->pixclock;
+	else
+		pinfo->clk_rate = PICOS2KHZ(var->pixclock) * 1000;
+
+	/*
+	 * if it is a DBA panel i.e. HDMI TV connected through
+	 * DSI interface, then store the pixel clock value in
+	 * DSI specific variable.
+	 */
+	if (pinfo->is_dba_panel)
+		pinfo->mipi.dsi_pclk_rate = pinfo->clk_rate;
+}
+
+void mdss_panelinfo_to_fb_var(struct mdss_panel_info *pinfo,
+						struct fb_var_screeninfo *var)
+{
+	u32 frame_rate;
+
+	var->xres = mdss_fb_get_panel_xres(pinfo);
+	var->yres = pinfo->yres;
+	var->lower_margin = pinfo->lcdc.v_front_porch -
+		pinfo->prg_fet;
+	var->upper_margin = pinfo->lcdc.v_back_porch +
+		pinfo->prg_fet;
+	var->vsync_len = pinfo->lcdc.v_pulse_width;
+	var->right_margin = pinfo->lcdc.h_front_porch;
+	var->left_margin = pinfo->lcdc.h_back_porch;
+	var->hsync_len = pinfo->lcdc.h_pulse_width;
+
+	frame_rate = mdss_panel_get_framerate(pinfo,
+					FPS_RESOLUTION_HZ);
+	if (frame_rate) {
+		unsigned long clk_rate, h_total, v_total;
+
+		h_total = var->xres + var->left_margin
+			+ var->right_margin + var->hsync_len;
+		v_total = var->yres + var->lower_margin
+			+ var->upper_margin + var->vsync_len;
+		clk_rate = h_total * v_total * frame_rate;
+		var->pixclock = KHZ2PICOS(clk_rate / 1000);
+	} else if (pinfo->clk_rate) {
+		var->pixclock = KHZ2PICOS(
+				(unsigned long int) pinfo->clk_rate / 1000);
+	}
+
+	if (pinfo->physical_width)
+		var->width = pinfo->physical_width;
+	if (pinfo->physical_height)
+		var->height = pinfo->physical_height;
+
+	pr_debug("ScreenInfo: res=%dx%d [%d, %d] [%d, %d]\n",
+		var->xres, var->yres, var->left_margin,
+		var->right_margin, var->upper_margin,
+		var->lower_margin);
+}
+
+/**
+ * __mdss_fb_perform_commit() - process a frame to display
+ * @mfd:	Framebuffer data structure for display
+ *
+ * Processes all layers and buffers programmed and ensures all pending release
+ * fences are signaled once the buffer is transferred to display.
+ */
+static int __mdss_fb_perform_commit(struct msm_fb_data_type *mfd)
+{
+	struct msm_sync_pt_data *sync_pt_data = &mfd->mdp_sync_pt_data;
+	struct msm_fb_backup_type *fb_backup = &mfd->msm_fb_backup;
+	int ret = -ENOTSUP;
+	u32 new_dsi_mode, dynamic_dsi_switch = 0;
+
+	if (!sync_pt_data->async_wait_fences)
+		mdss_fb_wait_for_fence(sync_pt_data);
+	sync_pt_data->flushed = false;
+
+	mutex_lock(&mfd->switch_lock);
+	if (mfd->switch_state == MDSS_MDP_WAIT_FOR_KICKOFF) {
+		dynamic_dsi_switch = 1;
+		new_dsi_mode = mfd->switch_new_mode;
+	} else if (mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED) {
+		pr_err("invalid commit on fb%d with state = %d\n",
+			mfd->index, mfd->switch_state);
+		mutex_unlock(&mfd->switch_lock);
+		goto skip_commit;
+	}
+	mutex_unlock(&mfd->switch_lock);
+	if (dynamic_dsi_switch) {
+		MDSS_XLOG(mfd->index, mfd->split_mode, new_dsi_mode,
+			XLOG_FUNC_ENTRY);
+		pr_debug("Triggering dyn mode switch to %d\n", new_dsi_mode);
+		ret = mfd->mdp.mode_switch(mfd, new_dsi_mode);
+		if (ret)
+			pr_err("DSI mode switch has failed");
+		else
+			mfd->pending_switch = false;
+	}
+	if (fb_backup->disp_commit.flags & MDP_DISPLAY_COMMIT_OVERLAY) {
+		if (mfd->mdp.kickoff_fnc)
+			ret = mfd->mdp.kickoff_fnc(mfd,
+					&fb_backup->disp_commit);
+		else
+			pr_warn("no kickoff function setup for fb%d\n",
+					mfd->index);
+	} else if (fb_backup->atomic_commit) {
+		if (mfd->mdp.kickoff_fnc)
+			ret = mfd->mdp.kickoff_fnc(mfd,
+					&fb_backup->disp_commit);
+		else
+			pr_warn("no kickoff function setup for fb%d\n",
+				mfd->index);
+		fb_backup->atomic_commit = false;
+	} else {
+		ret = mdss_fb_pan_display_sub(&fb_backup->disp_commit.var,
+				&fb_backup->info);
+		if (ret)
+			pr_err("pan display failed %x on fb%d\n", ret,
+					mfd->index);
+	}
+
+skip_commit:
+	if (!ret)
+		mdss_fb_update_backlight(mfd);
+
+	if (IS_ERR_VALUE(ret) || !sync_pt_data->flushed) {
+		mdss_fb_release_kickoff(mfd);
+		mdss_fb_signal_timeline(sync_pt_data);
+		if ((mfd->panel.type == MIPI_CMD_PANEL) &&
+			(mfd->mdp.signal_retire_fence))
+			mfd->mdp.signal_retire_fence(mfd, 1);
+	}
+
+	if (dynamic_dsi_switch) {
+		MDSS_XLOG(mfd->index, mfd->split_mode, new_dsi_mode,
+			XLOG_FUNC_EXIT);
+		mfd->mdp.mode_switch_post(mfd, new_dsi_mode);
+		mutex_lock(&mfd->switch_lock);
+		mfd->switch_state = MDSS_MDP_NO_UPDATE_REQUESTED;
+		mutex_unlock(&mfd->switch_lock);
+		if (new_dsi_mode != SWITCH_RESOLUTION)
+			mfd->panel.type = new_dsi_mode;
+		pr_debug("Dynamic mode switch completed\n");
+	}
+
+	return ret;
+}
+
+static int __mdss_fb_display_thread(void *data)
+{
+	struct msm_fb_data_type *mfd = data;
+	int ret;
+	struct sched_param param;
+
+	/*
+	 * this priority was found during empiric testing to have appropriate
+	 * realtime scheduling to process display updates and interact with
+	 * other real time and normal priority tasks
+	 */
+	param.sched_priority = 16;
+	ret = sched_setscheduler(current, SCHED_FIFO, &param);
+	if (ret)
+		pr_warn("set priority failed for fb%d display thread\n",
+				mfd->index);
+
+	while (1) {
+		wait_event(mfd->commit_wait_q,
+				(atomic_read(&mfd->commits_pending) ||
+				 kthread_should_stop()));
+
+		if (kthread_should_stop())
+			break;
+
+		MDSS_XLOG(mfd->index, XLOG_FUNC_ENTRY);
+		ret = __mdss_fb_perform_commit(mfd);
+		MDSS_XLOG(mfd->index, XLOG_FUNC_EXIT);
+
+		atomic_dec(&mfd->commits_pending);
+		wake_up_all(&mfd->idle_wait_q);
+	}
+
+	mdss_fb_release_kickoff(mfd);
+	atomic_set(&mfd->commits_pending, 0);
+	wake_up_all(&mfd->idle_wait_q);
+
+	return ret;
+}
+
+static int mdss_fb_check_var(struct fb_var_screeninfo *var,
+			     struct fb_info *info)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	if (var->rotate != FB_ROTATE_UR && var->rotate != FB_ROTATE_UD)
+		return -EINVAL;
+
+	switch (var->bits_per_pixel) {
+	case 16:
+		if ((var->green.offset != 5) ||
+		    !((var->blue.offset == 11)
+		      || (var->blue.offset == 0)) ||
+		    !((var->red.offset == 11)
+		      || (var->red.offset == 0)) ||
+		    (var->blue.length != 5) ||
+		    (var->green.length != 6) ||
+		    (var->red.length != 5) ||
+		    (var->blue.msb_right != 0) ||
+		    (var->green.msb_right != 0) ||
+		    (var->red.msb_right != 0) ||
+		    (var->transp.offset != 0) ||
+		    (var->transp.length != 0))
+			return -EINVAL;
+		break;
+
+	case 24:
+		if ((var->blue.offset != 0) ||
+		    (var->green.offset != 8) ||
+		    (var->red.offset != 16) ||
+		    (var->blue.length != 8) ||
+		    (var->green.length != 8) ||
+		    (var->red.length != 8) ||
+		    (var->blue.msb_right != 0) ||
+		    (var->green.msb_right != 0) ||
+		    (var->red.msb_right != 0) ||
+		    !(((var->transp.offset == 0) &&
+		       (var->transp.length == 0)) ||
+		      ((var->transp.offset == 24) &&
+		       (var->transp.length == 8))))
+			return -EINVAL;
+		break;
+
+	case 32:
+		/*
+		 * Check user specified color format BGRA/ARGB/RGBA
+		 * and verify the position of the RGB components
+		 */
+
+		if (!((var->transp.offset == 24) &&
+		      (var->blue.offset == 0) &&
+		      (var->green.offset == 8) &&
+		      (var->red.offset == 16)) &&
+		    !((var->transp.offset == 0) &&
+		      (var->blue.offset == 24) &&
+		      (var->green.offset == 16) &&
+		      (var->red.offset == 8)) &&
+		    !((var->transp.offset == 24) &&
+		      (var->blue.offset == 16) &&
+		      (var->green.offset == 8) &&
+		      (var->red.offset == 0)))
+			return -EINVAL;
+
+		/* Check the common values for both RGBA and ARGB */
+
+		if ((var->blue.length != 8) ||
+		    (var->green.length != 8) ||
+		    (var->red.length != 8) ||
+		    (var->transp.length != 8) ||
+		    (var->blue.msb_right != 0) ||
+		    (var->green.msb_right != 0) ||
+		    (var->red.msb_right != 0))
+			return -EINVAL;
+
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if ((var->xres_virtual <= 0) || (var->yres_virtual <= 0))
+		return -EINVAL;
+
+	if ((var->xres == 0) || (var->yres == 0))
+		return -EINVAL;
+
+	if (var->xoffset > (var->xres_virtual - var->xres))
+		return -EINVAL;
+
+	if (var->yoffset > (var->yres_virtual - var->yres))
+		return -EINVAL;
+
+	if (info->mode) {
+		const struct fb_videomode *mode;
+
+		mode = fb_match_mode(var, &info->modelist);
+		if (mode == NULL)
+			return -EINVAL;
+	} else if (mfd->panel_info && !(var->activate & FB_ACTIVATE_TEST)) {
+		struct mdss_panel_info *panel_info;
+		int rc;
+
+		panel_info = kzalloc(sizeof(struct mdss_panel_info),
+				GFP_KERNEL);
+		if (!panel_info)
+			return -ENOMEM;
+
+		memcpy(panel_info, mfd->panel_info,
+				sizeof(struct mdss_panel_info));
+		mdss_fb_var_to_panelinfo(var, panel_info);
+		rc = mdss_fb_send_panel_event(mfd, MDSS_EVENT_CHECK_PARAMS,
+			panel_info);
+		if (IS_ERR_VALUE(rc)) {
+			kfree(panel_info);
+			return rc;
+		}
+		mfd->panel_reconfig = rc;
+		kfree(panel_info);
+	}
+
+	return 0;
+}
+
+static int mdss_fb_videomode_switch(struct msm_fb_data_type *mfd,
+		const struct fb_videomode *mode)
+{
+	int ret = 0;
+	struct mdss_panel_data *pdata, *tmp;
+	struct mdss_panel_timing *timing;
+
+	pdata = dev_get_platdata(&mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("no panel connected\n");
+		return -ENODEV;
+	}
+
+	/* make sure that we are idle while switching */
+	mdss_fb_wait_for_kickoff(mfd);
+
+	pr_debug("fb%d: changing display mode to %s\n", mfd->index, mode->name);
+	MDSS_XLOG(mfd->index, mode->name,
+			mdss_fb_get_panel_xres(mfd->panel_info),
+			mfd->panel_info->yres, mfd->split_mode,
+			XLOG_FUNC_ENTRY);
+	tmp = pdata;
+	do {
+		if (!tmp->event_handler) {
+			pr_warn("no event handler for panel\n");
+			continue;
+		}
+		timing = mdss_panel_get_timing_by_name(tmp, mode->name);
+		ret = tmp->event_handler(tmp,
+				MDSS_EVENT_PANEL_TIMING_SWITCH, timing);
+
+		tmp->active = timing != NULL;
+		tmp = tmp->next;
+	} while (tmp && !ret);
+
+	if (!ret)
+		mdss_fb_set_split_mode(mfd, pdata);
+
+	if (!ret && mfd->mdp.configure_panel) {
+		int dest_ctrl = 1;
+
+		/* todo: currently assumes no changes in video/cmd mode */
+		if (!mdss_fb_is_power_off(mfd)) {
+			mutex_lock(&mfd->switch_lock);
+			mfd->switch_state = MDSS_MDP_WAIT_FOR_VALIDATE;
+			mfd->switch_new_mode = SWITCH_RESOLUTION;
+			mutex_unlock(&mfd->switch_lock);
+			dest_ctrl = 0;
+		}
+		ret = mfd->mdp.configure_panel(mfd,
+				pdata->panel_info.mipi.mode, dest_ctrl);
+	}
+
+	MDSS_XLOG(mfd->index, mode->name,
+			mdss_fb_get_panel_xres(mfd->panel_info),
+			mfd->panel_info->yres, mfd->split_mode,
+			XLOG_FUNC_EXIT);
+	pr_debug("fb%d: %s mode change complete\n", mfd->index, mode->name);
+
+	return ret;
+}
+
+static int mdss_fb_set_par(struct fb_info *info)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct fb_var_screeninfo *var = &info->var;
+	int old_imgType, old_format;
+	int ret = 0;
+
+	ret = mdss_fb_pan_idle(mfd);
+	if (ret) {
+		pr_err("mdss_fb_pan_idle failed. rc=%d\n", ret);
+		return ret;
+	}
+
+	old_imgType = mfd->fb_imgType;
+	switch (var->bits_per_pixel) {
+	case 16:
+		if (var->red.offset == 0)
+			mfd->fb_imgType = MDP_BGR_565;
+		else
+			mfd->fb_imgType	= MDP_RGB_565;
+		break;
+
+	case 24:
+		if ((var->transp.offset == 0) && (var->transp.length == 0))
+			mfd->fb_imgType = MDP_RGB_888;
+		else if ((var->transp.offset == 24) &&
+			 (var->transp.length == 8)) {
+			mfd->fb_imgType = MDP_ARGB_8888;
+			info->var.bits_per_pixel = 32;
+		}
+		break;
+
+	case 32:
+		if ((var->red.offset == 0) &&
+		    (var->green.offset == 8) &&
+		    (var->blue.offset == 16) &&
+		    (var->transp.offset == 24))
+			mfd->fb_imgType = MDP_RGBA_8888;
+		else if ((var->red.offset == 16) &&
+		    (var->green.offset == 8) &&
+		    (var->blue.offset == 0) &&
+		    (var->transp.offset == 24))
+			mfd->fb_imgType = MDP_BGRA_8888;
+		else if ((var->red.offset == 8) &&
+		    (var->green.offset == 16) &&
+		    (var->blue.offset == 24) &&
+		    (var->transp.offset == 0))
+			mfd->fb_imgType = MDP_ARGB_8888;
+		else
+			mfd->fb_imgType = MDP_RGBA_8888;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (info->mode) {
+		const struct fb_videomode *mode;
+
+		mode = fb_match_mode(var, &info->modelist);
+		if (!mode)
+			return -EINVAL;
+
+		pr_debug("found mode: %s\n", mode->name);
+
+		if (fb_mode_is_equal(mode, info->mode)) {
+			pr_debug("mode is equal to current mode\n");
+			return 0;
+		}
+
+		ret = mdss_fb_videomode_switch(mfd, mode);
+		if (ret)
+			return ret;
+	}
+
+	if (mfd->mdp.fb_stride)
+		mfd->fbi->fix.line_length = mfd->mdp.fb_stride(mfd->index,
+						var->xres,
+						var->bits_per_pixel / 8);
+	else
+		mfd->fbi->fix.line_length = var->xres * var->bits_per_pixel / 8;
+
+	/* if memory is not allocated yet, change memory size for fb */
+	if (!info->fix.smem_start)
+		mfd->fbi->fix.smem_len = PAGE_ALIGN(mfd->fbi->fix.line_length *
+				mfd->fbi->var.yres) * mfd->fb_page;
+
+	old_format = mdss_grayscale_to_mdp_format(var->grayscale);
+	if (!IS_ERR_VALUE(old_format)) {
+		if (old_format != mfd->panel_info->out_format)
+			mfd->panel_reconfig = true;
+	}
+
+	if (mfd->panel_reconfig || (mfd->fb_imgType != old_imgType)) {
+		mdss_fb_blank_sub(FB_BLANK_POWERDOWN, info, mfd->op_enable);
+		mdss_fb_var_to_panelinfo(var, mfd->panel_info);
+		if (mfd->panel_info->is_dba_panel &&
+			mdss_fb_send_panel_event(mfd, MDSS_EVENT_UPDATE_PARAMS,
+							mfd->panel_info))
+			pr_debug("Failed to send panel event UPDATE_PARAMS\n");
+		mdss_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable);
+		mfd->panel_reconfig = false;
+	}
+
+	return ret;
+}
+
+int mdss_fb_dcm(struct msm_fb_data_type *mfd, int req_state)
+{
+	int ret = 0;
+
+	if (req_state == mfd->dcm_state) {
+		pr_warn("Already in correct DCM/DTM state\n");
+		return ret;
+	}
+
+	switch (req_state) {
+	case DCM_UNBLANK:
+		if (mfd->dcm_state == DCM_UNINIT &&
+			mdss_fb_is_power_off(mfd) && mfd->mdp.on_fnc) {
+			if (mfd->disp_thread == NULL) {
+				ret = mdss_fb_start_disp_thread(mfd);
+				if (ret < 0)
+					return ret;
+			}
+			ret = mfd->mdp.on_fnc(mfd);
+			if (ret == 0) {
+				mfd->panel_power_state = MDSS_PANEL_POWER_ON;
+				mfd->dcm_state = DCM_UNBLANK;
+			}
+		}
+		break;
+	case DCM_ENTER:
+		if (mfd->dcm_state == DCM_UNBLANK) {
+			/*
+			 * Keep unblank path available for only
+			 * DCM operation
+			 */
+			mfd->panel_power_state = MDSS_PANEL_POWER_OFF;
+			mfd->dcm_state = DCM_ENTER;
+		}
+		break;
+	case DCM_EXIT:
+		if (mfd->dcm_state == DCM_ENTER) {
+			/* Release the unblank path for exit */
+			mfd->panel_power_state = MDSS_PANEL_POWER_ON;
+			mfd->dcm_state = DCM_EXIT;
+		}
+		break;
+	case DCM_BLANK:
+		if ((mfd->dcm_state == DCM_EXIT ||
+			mfd->dcm_state == DCM_UNBLANK) &&
+			mdss_fb_is_power_on(mfd) && mfd->mdp.off_fnc) {
+			mfd->panel_power_state = MDSS_PANEL_POWER_OFF;
+			ret = mfd->mdp.off_fnc(mfd);
+			if (ret == 0)
+				mfd->dcm_state = DCM_UNINIT;
+			else
+				pr_err("DCM_BLANK failed\n");
+
+			if (mfd->disp_thread)
+				mdss_fb_stop_disp_thread(mfd);
+		}
+		break;
+	case DTM_ENTER:
+		if (mfd->dcm_state == DCM_UNINIT)
+			mfd->dcm_state = DTM_ENTER;
+		break;
+	case DTM_EXIT:
+		if (mfd->dcm_state == DTM_ENTER)
+			mfd->dcm_state = DCM_UNINIT;
+		break;
+	}
+
+	return ret;
+}
+
+static int mdss_fb_cursor(struct fb_info *info, void __user *p)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct fb_cursor cursor;
+	int ret;
+
+	if (!mfd->mdp.cursor_update)
+		return -ENODEV;
+
+	ret = copy_from_user(&cursor, p, sizeof(cursor));
+	if (ret)
+		return ret;
+
+	return mfd->mdp.cursor_update(mfd, &cursor);
+}
+
+int mdss_fb_async_position_update(struct fb_info *info,
+		struct mdp_position_update *update_pos)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	if (!update_pos->input_layer_cnt) {
+		pr_err("no input layers for position update\n");
+		return -EINVAL;
+	}
+	return mfd->mdp.async_position_update(mfd, update_pos);
+}
+
+static int mdss_fb_async_position_update_ioctl(struct fb_info *info,
+		unsigned long *argp)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct mdp_position_update update_pos;
+	int ret, rc;
+	u32 buffer_size, layer_cnt;
+	struct mdp_async_layer *layer_list = NULL;
+	struct mdp_async_layer __user *input_layer_list;
+
+	if (!mfd->mdp.async_position_update)
+		return -ENODEV;
+
+	ret = copy_from_user(&update_pos, argp, sizeof(update_pos));
+	if (ret) {
+		pr_err("copy from user failed\n");
+		return ret;
+	}
+	input_layer_list = update_pos.input_layers;
+
+	layer_cnt = update_pos.input_layer_cnt;
+	if ((!layer_cnt) || (layer_cnt > MAX_LAYER_COUNT)) {
+		pr_err("invalid async layers :%d to update\n", layer_cnt);
+		return -EINVAL;
+	}
+
+	buffer_size = sizeof(struct mdp_async_layer) * layer_cnt;
+	layer_list = kmalloc(buffer_size, GFP_KERNEL);
+	if (!layer_list) {
+		pr_err("unable to allocate memory for layers\n");
+		return -ENOMEM;
+	}
+
+	ret = copy_from_user(layer_list, input_layer_list, buffer_size);
+	if (ret) {
+		pr_err("layer list copy from user failed\n");
+		goto end;
+	}
+	update_pos.input_layers = layer_list;
+
+	ret = mdss_fb_async_position_update(info, &update_pos);
+	if (ret)
+		pr_err("async position update failed ret:%d\n", ret);
+
+	rc = copy_to_user(input_layer_list, layer_list, buffer_size);
+	if (rc)
+		pr_err("layer error code copy to user failed\n");
+
+	update_pos.input_layers = input_layer_list;
+	rc = copy_to_user(argp, &update_pos,
+			sizeof(struct mdp_position_update));
+	if (rc)
+		pr_err("copy to user for layers failed");
+
+end:
+	kfree(layer_list);
+	return ret;
+}
+
+static int mdss_fb_set_lut(struct fb_info *info, void __user *p)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct fb_cmap cmap;
+	int ret;
+
+	if (!mfd->mdp.lut_update)
+		return -ENODEV;
+
+	ret = copy_from_user(&cmap, p, sizeof(cmap));
+	if (ret)
+		return ret;
+
+	mfd->mdp.lut_update(mfd, &cmap);
+	return 0;
+}
+
+/**
+ * mdss_fb_sync_get_fence() - get fence from timeline
+ * @timeline:	Timeline to create the fence on
+ * @fence_name:	Name of the fence that will be created for debugging
+ * @val:	Timeline value at which the fence will be signaled
+ *
+ * Function returns a fence on the timeline given with the name provided.
+ * The fence created will be signaled when the timeline is advanced.
+ */
+struct sync_fence *mdss_fb_sync_get_fence(struct sw_sync_timeline *timeline,
+		const char *fence_name, int val)
+{
+	struct sync_pt *sync_pt;
+	struct sync_fence *fence;
+
+	pr_debug("%s: buf sync fence timeline=%d\n", fence_name, val);
+
+	sync_pt = sw_sync_pt_create(timeline, val);
+	if (sync_pt == NULL) {
+		pr_err("%s: cannot create sync point\n", fence_name);
+		return NULL;
+	}
+
+	/* create fence */
+	fence = sync_fence_create(fence_name, sync_pt);
+	if (fence == NULL) {
+		sync_pt_free(sync_pt);
+		pr_err("%s: cannot create fence\n", fence_name);
+		return NULL;
+	}
+
+	return fence;
+}
+
+static int mdss_fb_handle_buf_sync_ioctl(struct msm_sync_pt_data *sync_pt_data,
+				 struct mdp_buf_sync *buf_sync)
+{
+	int i, ret = 0;
+	int acq_fen_fd[MDP_MAX_FENCE_FD];
+	struct sync_fence *fence, *rel_fence, *retire_fence;
+	int rel_fen_fd;
+	int retire_fen_fd;
+	int val;
+
+	if ((buf_sync->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) ||
+				(sync_pt_data->timeline == NULL))
+		return -EINVAL;
+
+	if (buf_sync->acq_fen_fd_cnt)
+		ret = copy_from_user(acq_fen_fd, buf_sync->acq_fen_fd,
+				buf_sync->acq_fen_fd_cnt * sizeof(int));
+	if (ret) {
+		pr_err("%s: copy_from_user failed\n", sync_pt_data->fence_name);
+		return ret;
+	}
+
+	i = mdss_fb_wait_for_fence(sync_pt_data);
+	if (i > 0)
+		pr_warn("%s: waited on %d active fences\n",
+				sync_pt_data->fence_name, i);
+
+	mutex_lock(&sync_pt_data->sync_mutex);
+	for (i = 0; i < buf_sync->acq_fen_fd_cnt; i++) {
+		fence = sync_fence_fdget(acq_fen_fd[i]);
+		if (fence == NULL) {
+			pr_err("%s: null fence! i=%d fd=%d\n",
+					sync_pt_data->fence_name, i,
+					acq_fen_fd[i]);
+			ret = -EINVAL;
+			break;
+		}
+		sync_pt_data->acq_fen[i] = fence;
+	}
+	sync_pt_data->acq_fen_cnt = i;
+	if (ret)
+		goto buf_sync_err_1;
+
+	val = sync_pt_data->timeline_value + sync_pt_data->threshold +
+			atomic_read(&sync_pt_data->commit_cnt);
+
+	MDSS_XLOG(sync_pt_data->timeline_value, val,
+		atomic_read(&sync_pt_data->commit_cnt));
+	pr_debug("%s: fence CTL%d Commit_cnt%d\n", sync_pt_data->fence_name,
+		sync_pt_data->timeline_value,
+		atomic_read(&sync_pt_data->commit_cnt));
+	/* Set release fence */
+	rel_fence = mdss_fb_sync_get_fence(sync_pt_data->timeline,
+			sync_pt_data->fence_name, val);
+	if (IS_ERR_OR_NULL(rel_fence)) {
+		pr_err("%s: unable to retrieve release fence\n",
+				sync_pt_data->fence_name);
+		ret = rel_fence ? PTR_ERR(rel_fence) : -ENOMEM;
+		goto buf_sync_err_1;
+	}
+
+	/* create fd */
+	rel_fen_fd = get_unused_fd_flags(0);
+	if (rel_fen_fd < 0) {
+		pr_err("%s: get_unused_fd_flags failed error:0x%x\n",
+				sync_pt_data->fence_name, rel_fen_fd);
+		ret = rel_fen_fd;
+		goto buf_sync_err_2;
+	}
+
+	ret = copy_to_user(buf_sync->rel_fen_fd, &rel_fen_fd, sizeof(int));
+	if (ret) {
+		pr_err("%s: copy_to_user failed\n", sync_pt_data->fence_name);
+		goto buf_sync_err_3;
+	}
+
+	if (!(buf_sync->flags & MDP_BUF_SYNC_FLAG_RETIRE_FENCE))
+		goto skip_retire_fence;
+
+	if (sync_pt_data->get_retire_fence)
+		retire_fence = sync_pt_data->get_retire_fence(sync_pt_data);
+	else
+		retire_fence = NULL;
+
+	if (IS_ERR_OR_NULL(retire_fence)) {
+		val += sync_pt_data->retire_threshold;
+		retire_fence = mdss_fb_sync_get_fence(
+			sync_pt_data->timeline, "mdp-retire", val);
+	}
+
+	if (IS_ERR_OR_NULL(retire_fence)) {
+		pr_err("%s: unable to retrieve retire fence\n",
+				sync_pt_data->fence_name);
+		ret = retire_fence ? PTR_ERR(rel_fence) : -ENOMEM;
+		goto buf_sync_err_3;
+	}
+	retire_fen_fd = get_unused_fd_flags(0);
+
+	if (retire_fen_fd < 0) {
+		pr_err("%s: get_unused_fd_flags failed for retire fence error:0x%x\n",
+				sync_pt_data->fence_name, retire_fen_fd);
+		ret = retire_fen_fd;
+		sync_fence_put(retire_fence);
+		goto buf_sync_err_3;
+	}
+
+	ret = copy_to_user(buf_sync->retire_fen_fd, &retire_fen_fd,
+			sizeof(int));
+	if (ret) {
+		pr_err("%s: copy_to_user failed for retire fence\n",
+				sync_pt_data->fence_name);
+		put_unused_fd(retire_fen_fd);
+		sync_fence_put(retire_fence);
+		goto buf_sync_err_3;
+	}
+
+	sync_fence_install(retire_fence, retire_fen_fd);
+
+skip_retire_fence:
+	sync_fence_install(rel_fence, rel_fen_fd);
+	mutex_unlock(&sync_pt_data->sync_mutex);
+
+	if (buf_sync->flags & MDP_BUF_SYNC_FLAG_WAIT)
+		mdss_fb_wait_for_fence(sync_pt_data);
+
+	return ret;
+buf_sync_err_3:
+	put_unused_fd(rel_fen_fd);
+buf_sync_err_2:
+	sync_fence_put(rel_fence);
+buf_sync_err_1:
+	for (i = 0; i < sync_pt_data->acq_fen_cnt; i++)
+		sync_fence_put(sync_pt_data->acq_fen[i]);
+	sync_pt_data->acq_fen_cnt = 0;
+	mutex_unlock(&sync_pt_data->sync_mutex);
+	return ret;
+}
+static int mdss_fb_display_commit(struct fb_info *info,
+						unsigned long *argp)
+{
+	int ret;
+	struct mdp_display_commit disp_commit;
+
+	ret = copy_from_user(&disp_commit, argp,
+			sizeof(disp_commit));
+	if (ret) {
+		pr_err("%s:copy_from_user failed\n", __func__);
+		return ret;
+	}
+	ret = mdss_fb_pan_display_ex(info, &disp_commit);
+	return ret;
+}
+
+/**
+ * __mdss_fb_copy_pixel_ext() - copy pixel extension payload
+ * @src: pixel extn structure
+ * @dest: Qseed3/pixel extn common payload
+ *
+ * Function copies the pixel extension parameters into the scale data structure,
+ * this is required to allow using the scale_v2 data structure for both
+ * QSEED2 and QSEED3
+ */
+static void __mdss_fb_copy_pixel_ext(struct mdp_scale_data *src,
+					struct mdp_scale_data_v2 *dest)
+{
+	if (!src || !dest)
+		return;
+	memcpy(dest->init_phase_x, src->init_phase_x,
+		sizeof(src->init_phase_x));
+	memcpy(dest->phase_step_x, src->phase_step_x,
+		sizeof(src->init_phase_x));
+	memcpy(dest->init_phase_y, src->init_phase_y,
+		sizeof(src->init_phase_x));
+	memcpy(dest->phase_step_y, src->phase_step_y,
+		sizeof(src->init_phase_x));
+
+	memcpy(dest->num_ext_pxls_left, src->num_ext_pxls_left,
+		sizeof(src->num_ext_pxls_left));
+	memcpy(dest->num_ext_pxls_right, src->num_ext_pxls_right,
+		sizeof(src->num_ext_pxls_right));
+	memcpy(dest->num_ext_pxls_top, src->num_ext_pxls_top,
+		sizeof(src->num_ext_pxls_top));
+	memcpy(dest->num_ext_pxls_btm, src->num_ext_pxls_btm,
+		sizeof(src->num_ext_pxls_btm));
+
+	memcpy(dest->left_ftch, src->left_ftch, sizeof(src->left_ftch));
+	memcpy(dest->left_rpt, src->left_rpt, sizeof(src->left_rpt));
+	memcpy(dest->right_ftch, src->right_ftch, sizeof(src->right_ftch));
+	memcpy(dest->right_rpt, src->right_rpt, sizeof(src->right_rpt));
+
+
+	memcpy(dest->top_rpt, src->top_rpt, sizeof(src->top_rpt));
+	memcpy(dest->btm_rpt, src->btm_rpt, sizeof(src->btm_rpt));
+	memcpy(dest->top_ftch, src->top_ftch, sizeof(src->top_ftch));
+	memcpy(dest->btm_ftch, src->btm_ftch, sizeof(src->btm_ftch));
+
+	memcpy(dest->roi_w, src->roi_w, sizeof(src->roi_w));
+}
+
+static int __mdss_fb_scaler_handler(struct mdp_input_layer *layer)
+{
+	int ret = 0;
+	struct mdp_scale_data *pixel_ext = NULL;
+	struct mdp_scale_data_v2 *scale = NULL;
+
+	if ((layer->flags & MDP_LAYER_ENABLE_PIXEL_EXT) &&
+			(layer->flags & MDP_LAYER_ENABLE_QSEED3_SCALE)) {
+		pr_err("Invalid flag configuration for scaler, %x\n",
+				layer->flags);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (layer->flags & MDP_LAYER_ENABLE_PIXEL_EXT) {
+		scale = kzalloc(sizeof(struct mdp_scale_data_v2),
+				GFP_KERNEL);
+		pixel_ext = kzalloc(sizeof(struct mdp_scale_data),
+				GFP_KERNEL);
+		if (!scale || !pixel_ext) {
+			mdss_mdp_free_layer_pp_info(layer);
+			ret = -ENOMEM;
+			goto err;
+		}
+		ret = copy_from_user(pixel_ext, layer->scale,
+				sizeof(struct mdp_scale_data));
+		if (ret) {
+			mdss_mdp_free_layer_pp_info(layer);
+			ret = -EFAULT;
+			goto err;
+		}
+		__mdss_fb_copy_pixel_ext(pixel_ext, scale);
+		layer->scale = scale;
+	} else if (layer->flags & MDP_LAYER_ENABLE_QSEED3_SCALE) {
+		scale = kzalloc(sizeof(struct mdp_scale_data_v2),
+				GFP_KERNEL);
+		if (!scale) {
+			mdss_mdp_free_layer_pp_info(layer);
+			ret =  -ENOMEM;
+			goto err;
+		}
+
+		ret = copy_from_user(scale, layer->scale,
+				sizeof(struct mdp_scale_data_v2));
+		if (ret) {
+			mdss_mdp_free_layer_pp_info(layer);
+			ret = -EFAULT;
+			goto err;
+		}
+		layer->scale = scale;
+	} else {
+		layer->scale = NULL;
+	}
+	kfree(pixel_ext);
+	return ret;
+err:
+	kfree(pixel_ext);
+	kfree(scale);
+	layer->scale = NULL;
+	return ret;
+}
+
+static int mdss_fb_atomic_commit_ioctl(struct fb_info *info,
+	unsigned long *argp, struct file *file)
+{
+	int ret, i = 0, j = 0, rc;
+	struct mdp_layer_commit  commit;
+	u32 buffer_size, layer_count;
+	struct mdp_input_layer *layer, *layer_list = NULL;
+	struct mdp_input_layer __user *input_layer_list;
+	struct mdp_output_layer *output_layer = NULL;
+	struct mdp_output_layer __user *output_layer_user;
+	struct mdp_frc_info *frc_info = NULL;
+	struct mdp_frc_info __user *frc_info_user;
+	struct msm_fb_data_type *mfd;
+	struct mdss_overlay_private *mdp5_data = NULL;
+
+	ret = copy_from_user(&commit, argp, sizeof(struct mdp_layer_commit));
+	if (ret) {
+		pr_err("%s:copy_from_user failed\n", __func__);
+		return ret;
+	}
+
+	mfd = (struct msm_fb_data_type *)info->par;
+	if (!mfd)
+		return -EINVAL;
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (mfd->panel_info->panel_dead) {
+		pr_debug("early commit return\n");
+		MDSS_XLOG(mfd->panel_info->panel_dead);
+		/*
+		 * In case of an ESD attack, since we early return from the
+		 * commits, we need to signal the outstanding fences.
+		 */
+		mdss_fb_release_fences(mfd);
+		if ((mfd->panel.type == MIPI_CMD_PANEL) &&
+			mfd->mdp.signal_retire_fence && mdp5_data)
+			mfd->mdp.signal_retire_fence(mfd,
+						mdp5_data->retire_cnt);
+		return 0;
+	}
+
+	output_layer_user = commit.commit_v1.output_layer;
+	if (output_layer_user) {
+		buffer_size = sizeof(struct mdp_output_layer);
+		output_layer = kzalloc(buffer_size, GFP_KERNEL);
+		if (!output_layer) {
+			pr_err("unable to allocate memory for output layer\n");
+			return -ENOMEM;
+		}
+
+		ret = copy_from_user(output_layer,
+			output_layer_user, buffer_size);
+		if (ret) {
+			pr_err("layer list copy from user failed\n");
+			goto err;
+		}
+		commit.commit_v1.output_layer = output_layer;
+	}
+
+	layer_count = commit.commit_v1.input_layer_cnt;
+	input_layer_list = commit.commit_v1.input_layers;
+
+	if (layer_count > MAX_LAYER_COUNT) {
+		ret = -EINVAL;
+		goto err;
+	} else if (layer_count) {
+		buffer_size = sizeof(struct mdp_input_layer) * layer_count;
+		layer_list = kzalloc(buffer_size, GFP_KERNEL);
+		if (!layer_list) {
+			pr_err("unable to allocate memory for layers\n");
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		ret = copy_from_user(layer_list, input_layer_list, buffer_size);
+		if (ret) {
+			pr_err("layer list copy from user failed\n");
+			goto err;
+		}
+
+		commit.commit_v1.input_layers = layer_list;
+
+		for (i = 0; i < layer_count; i++) {
+			layer = &layer_list[i];
+
+			if (!(layer->flags & MDP_LAYER_PP)) {
+				layer->pp_info = NULL;
+			} else {
+				ret = mdss_mdp_copy_layer_pp_info(layer);
+				if (ret) {
+					pr_err("failure to copy pp_info data for layer %d, ret = %d\n",
+						i, ret);
+					goto err;
+				}
+			}
+
+			if ((layer->flags & MDP_LAYER_ENABLE_PIXEL_EXT) ||
+				(layer->flags &
+				 MDP_LAYER_ENABLE_QSEED3_SCALE)) {
+				ret = __mdss_fb_scaler_handler(layer);
+				if (ret) {
+					pr_err("failure to copy scale params for layer %d, ret = %d\n",
+						i, ret);
+					goto err;
+				}
+			} else {
+				layer->scale = NULL;
+			}
+		}
+	}
+
+	/* Copy Deterministic Frame Rate Control info from userspace */
+	frc_info_user = commit.commit_v1.frc_info;
+	if (frc_info_user) {
+		frc_info = kzalloc(sizeof(struct mdp_frc_info), GFP_KERNEL);
+		if (!frc_info) {
+			pr_err("unable to allocate memory for frc\n");
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		ret = copy_from_user(frc_info, frc_info_user,
+			sizeof(struct mdp_frc_info));
+		if (ret) {
+			pr_err("frc info copy from user failed\n");
+			goto frc_err;
+		}
+
+		commit.commit_v1.frc_info = frc_info;
+	}
+
+	ATRACE_BEGIN("ATOMIC_COMMIT");
+	ret = mdss_fb_atomic_commit(info, &commit, file);
+	if (ret)
+		pr_err("atomic commit failed ret:%d\n", ret);
+	ATRACE_END("ATOMIC_COMMIT");
+
+	if (layer_count) {
+		for (j = 0; j < layer_count; j++) {
+			rc = copy_to_user(&input_layer_list[j].error_code,
+					&layer_list[j].error_code, sizeof(int));
+			if (rc)
+				pr_err("layer error code copy to user failed\n");
+		}
+
+		commit.commit_v1.input_layers = input_layer_list;
+		commit.commit_v1.output_layer = output_layer_user;
+		commit.commit_v1.frc_info = frc_info_user;
+		rc = copy_to_user(argp, &commit,
+			sizeof(struct mdp_layer_commit));
+		if (rc)
+			pr_err("copy to user for release & retire fence failed\n");
+	}
+
+frc_err:
+	kfree(frc_info);
+err:
+	for (i--; i >= 0; i--) {
+		kfree(layer_list[i].scale);
+		layer_list[i].scale = NULL;
+		mdss_mdp_free_layer_pp_info(&layer_list[i]);
+	}
+	kfree(layer_list);
+	kfree(output_layer);
+
+	return ret;
+}
+
+int mdss_fb_switch_check(struct msm_fb_data_type *mfd, u32 mode)
+{
+	struct mdss_panel_info *pinfo = NULL;
+	int panel_type;
+
+	if (!mfd || !mfd->panel_info)
+		return -EINVAL;
+
+	pinfo = mfd->panel_info;
+
+	if ((!mfd->op_enable) || (mdss_fb_is_power_off(mfd)))
+		return -EPERM;
+
+	if (pinfo->mipi.dms_mode != DYNAMIC_MODE_SWITCH_IMMEDIATE) {
+		pr_warn("Panel does not support immediate dynamic switch!\n");
+		return -EPERM;
+	}
+
+	if (mfd->dcm_state != DCM_UNINIT) {
+		pr_warn("Switch not supported during DCM!\n");
+		return -EPERM;
+	}
+
+	mutex_lock(&mfd->switch_lock);
+	if (mode == pinfo->type) {
+		pr_debug("Already in requested mode!\n");
+		mutex_unlock(&mfd->switch_lock);
+		return -EPERM;
+	}
+	mutex_unlock(&mfd->switch_lock);
+
+	panel_type = mfd->panel.type;
+	if (panel_type != MIPI_VIDEO_PANEL && panel_type != MIPI_CMD_PANEL) {
+		pr_debug("Panel not in mipi video or cmd mode, cannot change\n");
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+static int mdss_fb_immediate_mode_switch(struct msm_fb_data_type *mfd, u32 mode)
+{
+	int ret;
+	u32 tranlated_mode;
+
+	if (mode)
+		tranlated_mode = MIPI_CMD_PANEL;
+	else
+		tranlated_mode = MIPI_VIDEO_PANEL;
+
+	pr_debug("%s: Request to switch to %d,", __func__, tranlated_mode);
+
+	ret = mdss_fb_switch_check(mfd, tranlated_mode);
+	if (ret)
+		return ret;
+
+	mutex_lock(&mfd->switch_lock);
+	if (mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED) {
+		pr_err("%s: Mode switch already in progress\n", __func__);
+		ret = -EAGAIN;
+		goto exit;
+	}
+	mfd->switch_state = MDSS_MDP_WAIT_FOR_VALIDATE;
+	mfd->switch_new_mode = tranlated_mode;
+
+exit:
+	mutex_unlock(&mfd->switch_lock);
+	return ret;
+}
+
+/*
+ * mdss_fb_mode_switch() - Function to change DSI mode
+ * @mfd:	Framebuffer data structure for display
+ * @mode:	Enabled/Disable LowPowerMode
+ *		1: Switch to Command Mode
+ *		0: Switch to video Mode
+ *
+ * This function is used to change from DSI mode based on the
+ * argument @mode on the next frame to be displayed.
+ */
+static int mdss_fb_mode_switch(struct msm_fb_data_type *mfd, u32 mode)
+{
+	struct mdss_panel_info *pinfo = NULL;
+	int ret = 0;
+
+	if (!mfd || !mfd->panel_info)
+		return -EINVAL;
+
+	pinfo = mfd->panel_info;
+	if (pinfo->mipi.dms_mode == DYNAMIC_MODE_SWITCH_SUSPEND_RESUME) {
+		ret = mdss_fb_blanking_mode_switch(mfd, mode);
+	} else if (pinfo->mipi.dms_mode == DYNAMIC_MODE_SWITCH_IMMEDIATE) {
+		ret = mdss_fb_immediate_mode_switch(mfd, mode);
+	} else {
+		pr_warn("Panel does not support dynamic mode switch!\n");
+		ret = -EPERM;
+	}
+
+	return ret;
+}
+
+static int __ioctl_wait_idle(struct msm_fb_data_type *mfd, u32 cmd)
+{
+	int ret = 0;
+
+	if (mfd->wait_for_kickoff &&
+		((cmd == MSMFB_OVERLAY_PREPARE) ||
+		(cmd == MSMFB_BUFFER_SYNC) ||
+		(cmd == MSMFB_OVERLAY_PLAY) ||
+		(cmd == MSMFB_CURSOR) ||
+		(cmd == MSMFB_METADATA_GET) ||
+		(cmd == MSMFB_METADATA_SET) ||
+		(cmd == MSMFB_OVERLAY_GET) ||
+		(cmd == MSMFB_OVERLAY_UNSET) ||
+		(cmd == MSMFB_OVERLAY_SET))) {
+		ret = mdss_fb_wait_for_kickoff(mfd);
+	}
+
+	if (ret && (ret != -ESHUTDOWN))
+		pr_err("wait_idle failed. cmd=0x%x rc=%d\n", cmd, ret);
+
+	return ret;
+}
+
+#ifdef TARGET_HW_MDSS_MDP3
+static bool check_not_supported_ioctl(u32 cmd)
+{
+	return false;
+}
+#else
+static bool check_not_supported_ioctl(u32 cmd)
+{
+	return((cmd == MSMFB_OVERLAY_SET) || (cmd == MSMFB_OVERLAY_UNSET) ||
+		(cmd == MSMFB_OVERLAY_GET) || (cmd == MSMFB_OVERLAY_PREPARE) ||
+		(cmd == MSMFB_DISPLAY_COMMIT) || (cmd == MSMFB_OVERLAY_PLAY) ||
+		(cmd == MSMFB_BUFFER_SYNC) || (cmd == MSMFB_OVERLAY_QUEUE) ||
+		(cmd == MSMFB_NOTIFY_UPDATE));
+}
+#endif
+
+/*
+ * mdss_fb_do_ioctl() - MDSS Framebuffer ioctl function
+ * @info:	pointer to framebuffer info
+ * @cmd:	ioctl command
+ * @arg:	argument to ioctl
+ *
+ * This function provides an architecture agnostic implementation
+ * of the mdss framebuffer ioctl. This function can be called
+ * by compat ioctl or regular ioctl to handle the supported commands.
+ */
+int mdss_fb_do_ioctl(struct fb_info *info, unsigned int cmd,
+			 unsigned long arg, struct file *file)
+{
+	struct msm_fb_data_type *mfd;
+	void __user *argp = (void __user *)arg;
+	int ret = -ENOTSUP;
+	struct mdp_buf_sync buf_sync;
+	unsigned int dsi_mode = 0;
+	struct mdss_panel_data *pdata = NULL;
+
+	if (!info || !info->par)
+		return -EINVAL;
+
+	mfd = (struct msm_fb_data_type *)info->par;
+	if (!mfd)
+		return -EINVAL;
+
+	if (mfd->shutdown_pending)
+		return -ESHUTDOWN;
+
+	pdata = dev_get_platdata(&mfd->pdev->dev);
+	if (!pdata || pdata->panel_info.dynamic_switch_pending)
+		return -EPERM;
+
+	if (check_not_supported_ioctl(cmd)) {
+		pr_err("Unsupported ioctl\n");
+		return -EINVAL;
+	}
+
+	atomic_inc(&mfd->ioctl_ref_cnt);
+
+	mdss_fb_power_setting_idle(mfd);
+
+	ret = __ioctl_wait_idle(mfd, cmd);
+	if (ret)
+		goto exit;
+
+	switch (cmd) {
+	case MSMFB_CURSOR:
+		ret = mdss_fb_cursor(info, argp);
+		break;
+
+	case MSMFB_SET_LUT:
+		ret = mdss_fb_set_lut(info, argp);
+		break;
+
+	case MSMFB_BUFFER_SYNC:
+		ret = copy_from_user(&buf_sync, argp, sizeof(buf_sync));
+		if (ret)
+			goto exit;
+
+		if ((!mfd->op_enable) || (mdss_fb_is_power_off(mfd))) {
+			ret = -EPERM;
+			goto exit;
+		}
+
+		ret = mdss_fb_handle_buf_sync_ioctl(&mfd->mdp_sync_pt_data,
+				&buf_sync);
+		if (!ret)
+			ret = copy_to_user(argp, &buf_sync, sizeof(buf_sync));
+		break;
+
+	case MSMFB_NOTIFY_UPDATE:
+		ret = mdss_fb_notify_update(mfd, argp);
+		break;
+
+	case MSMFB_DISPLAY_COMMIT:
+		ret = mdss_fb_display_commit(info, argp);
+		break;
+
+	case MSMFB_LPM_ENABLE:
+		ret = copy_from_user(&dsi_mode, argp, sizeof(dsi_mode));
+		if (ret) {
+			pr_err("%s: MSMFB_LPM_ENABLE ioctl failed\n", __func__);
+			goto exit;
+		}
+
+		ret = mdss_fb_mode_switch(mfd, dsi_mode);
+		break;
+	case MSMFB_ATOMIC_COMMIT:
+		ret = mdss_fb_atomic_commit_ioctl(info, argp, file);
+		break;
+
+	case MSMFB_ASYNC_POSITION_UPDATE:
+		ret = mdss_fb_async_position_update_ioctl(info, argp);
+		break;
+
+	default:
+		if (mfd->mdp.ioctl_handler)
+			ret = mfd->mdp.ioctl_handler(mfd, cmd, argp);
+		break;
+	}
+
+	if (ret == -ENOTSUP)
+		pr_err("unsupported ioctl (%x)\n", cmd);
+
+exit:
+	if (!atomic_dec_return(&mfd->ioctl_ref_cnt))
+		wake_up_all(&mfd->ioctl_q);
+
+	return ret;
+}
+
+static int mdss_fb_ioctl(struct fb_info *info, unsigned int cmd,
+			 unsigned long arg, struct file *file)
+{
+	if (!info || !info->par)
+		return -EINVAL;
+
+	return mdss_fb_do_ioctl(info, cmd, arg, file);
+}
+
+static int mdss_fb_register_extra_panel(struct platform_device *pdev,
+	struct mdss_panel_data *pdata)
+{
+	struct mdss_panel_data *fb_pdata;
+
+	fb_pdata = dev_get_platdata(&pdev->dev);
+	if (!fb_pdata) {
+		pr_err("framebuffer device %s contains invalid panel data\n",
+				dev_name(&pdev->dev));
+		return -EINVAL;
+	}
+
+	if (fb_pdata->next) {
+		pr_err("split panel already setup for framebuffer device %s\n",
+				dev_name(&pdev->dev));
+		return -EEXIST;
+	}
+
+	fb_pdata->next = pdata;
+
+	return 0;
+}
+
+int mdss_register_panel(struct platform_device *pdev,
+	struct mdss_panel_data *pdata)
+{
+	struct platform_device *fb_pdev, *mdss_pdev;
+	struct device_node *node = NULL;
+	int rc = 0;
+	bool master_panel = true;
+
+	if (!pdev || !pdev->dev.of_node) {
+		pr_err("Invalid device node\n");
+		return -ENODEV;
+	}
+
+	if (!mdp_instance) {
+		pr_err("mdss mdp resource not initialized yet\n");
+		return -EPROBE_DEFER;
+	}
+
+	if (pdata->get_fb_node)
+		node = pdata->get_fb_node(pdev);
+
+	if (!node) {
+		node = of_parse_phandle(pdev->dev.of_node,
+			"qcom,mdss-fb-map", 0);
+		if (!node) {
+			pr_err("Unable to find fb node for device: %s\n",
+					pdev->name);
+			return -ENODEV;
+		}
+	}
+	mdss_pdev = of_find_device_by_node(node->parent);
+	if (!mdss_pdev) {
+		pr_err("Unable to find mdss for node: %s\n", node->full_name);
+		rc = -ENODEV;
+		goto mdss_notfound;
+	}
+
+	pdata->active = true;
+	fb_pdev = of_find_device_by_node(node);
+	if (fb_pdev) {
+		rc = mdss_fb_register_extra_panel(fb_pdev, pdata);
+		if (rc == 0)
+			master_panel = false;
+	} else {
+		pr_info("adding framebuffer device %s\n", dev_name(&pdev->dev));
+		fb_pdev = of_platform_device_create(node, NULL,
+				&mdss_pdev->dev);
+		if (fb_pdev)
+			fb_pdev->dev.platform_data = pdata;
+	}
+
+	if (master_panel && mdp_instance->panel_register_done)
+		mdp_instance->panel_register_done(pdata);
+
+mdss_notfound:
+	of_node_put(node);
+	return rc;
+}
+EXPORT_SYMBOL(mdss_register_panel);
+
+int mdss_fb_register_mdp_instance(struct msm_mdp_interface *mdp)
+{
+	if (mdp_instance) {
+		pr_err("multiple MDP instance registration\n");
+		return -EINVAL;
+	}
+
+	mdp_instance = mdp;
+	return 0;
+}
+EXPORT_SYMBOL(mdss_fb_register_mdp_instance);
+
+int mdss_fb_get_phys_info(dma_addr_t *start, unsigned long *len, int fb_num)
+{
+	struct fb_info *info;
+	struct msm_fb_data_type *mfd;
+
+	if (fb_num >= MAX_FBI_LIST)
+		return -EINVAL;
+
+	info = fbi_list[fb_num];
+	if (!info)
+		return -ENOENT;
+
+	mfd = (struct msm_fb_data_type *)info->par;
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->iova)
+		*start = mfd->iova;
+	else
+		*start = info->fix.smem_start;
+	*len = info->fix.smem_len;
+
+	return 0;
+}
+EXPORT_SYMBOL(mdss_fb_get_phys_info);
+
+int __init mdss_fb_init(void)
+{
+	int rc = -ENODEV;
+
+	if (fb_get_options("msmfb", NULL))
+		return rc;
+
+	if (platform_driver_register(&mdss_fb_driver))
+		return rc;
+
+	return 0;
+}
+
+module_init(mdss_fb_init);
+
+int mdss_fb_suspres_panel(struct device *dev, void *data)
+{
+	struct msm_fb_data_type *mfd;
+	int rc = 0;
+	u32 event;
+
+	if (!data) {
+		pr_err("Device state not defined\n");
+		return -EINVAL;
+	}
+	mfd = dev_get_drvdata(dev);
+	if (!mfd)
+		return 0;
+
+	event = *((bool *) data) ? MDSS_EVENT_RESUME : MDSS_EVENT_SUSPEND;
+
+	/* Do not send runtime suspend/resume for HDMI primary */
+	if (!mdss_fb_is_hdmi_primary(mfd)) {
+		rc = mdss_fb_send_panel_event(mfd, event, NULL);
+		if (rc)
+			pr_warn("unable to %s fb%d (%d)\n",
+				event == MDSS_EVENT_RESUME ?
+				"resume" : "suspend",
+				mfd->index, rc);
+	}
+	return rc;
+}
+
+/*
+ * mdss_fb_report_panel_dead() - Sends the PANEL_ALIVE=0 status to HAL layer.
+ * @mfd   : frame buffer structure associated with fb device.
+ *
+ * This function is called if the panel fails to respond as expected to
+ * the register read/BTA or if the TE signal is not coming as expected
+ * from the panel. The function sends the PANEL_ALIVE=0 status to HAL
+ * layer.
+ */
+void mdss_fb_report_panel_dead(struct msm_fb_data_type *mfd)
+{
+	char *envp[2] = {"PANEL_ALIVE=0", NULL};
+	struct mdss_panel_data *pdata =
+		dev_get_platdata(&mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("Panel data not available\n");
+		return;
+	}
+
+	pdata->panel_info.panel_dead = true;
+	kobject_uevent_env(&mfd->fbi->dev->kobj,
+		KOBJ_CHANGE, envp);
+	pr_err("Panel has gone bad, sending uevent - %s\n", envp[0]);
+}
+
+
+/*
+ * mdss_fb_calc_fps() - Calculates fps value.
+ * @mfd   : frame buffer structure associated with fb device.
+ *
+ * This function is called at frame done. It counts the number
+ * of frames done for every 1 sec. Stores the value in measured_fps.
+ * measured_fps value is 10 times the calculated fps value.
+ * For example, measured_fps= 594 for calculated fps of 59.4
+ */
+void mdss_fb_calc_fps(struct msm_fb_data_type *mfd)
+{
+	ktime_t current_time_us;
+	u64 fps, diff_us;
+
+	current_time_us = ktime_get();
+	diff_us = (u64)ktime_us_delta(current_time_us,
+			mfd->fps_info.last_sampled_time_us);
+	mfd->fps_info.frame_count++;
+
+	if (diff_us >= MDP_TIME_PERIOD_CALC_FPS_US) {
+		fps = ((u64)mfd->fps_info.frame_count) * 10000000;
+		do_div(fps, diff_us);
+		mfd->fps_info.measured_fps = (unsigned int)fps;
+		pr_debug(" MDP_FPS for fb%d is %d.%d\n",
+			mfd->index, (unsigned int)fps/10, (unsigned int)fps%10);
+		mfd->fps_info.last_sampled_time_us = current_time_us;
+		mfd->fps_info.frame_count = 0;
+	}
+}
diff --git a/drivers/video/fbdev/msm/mdss_fb.h b/drivers/video/fbdev/msm/mdss_fb.h
new file mode 100644
index 0000000..f8ef48a
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_fb.h
@@ -0,0 +1,474 @@
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_FB_H
+#define MDSS_FB_H
+
+#include <linux/msm_ion.h>
+#include <linux/list.h>
+#include <linux/msm_mdp_ext.h>
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <linux/leds.h>
+
+#include "mdss_panel.h"
+#include "mdss_mdp_splash_logo.h"
+
+#define MDSS_LPAE_CHECK(phys)	\
+	((sizeof(phys) > sizeof(unsigned long)) ? ((phys >> 32) & 0xFF) : (0))
+
+#define MSM_FB_DEFAULT_PAGE_SIZE 2
+#define MFD_KEY  0x11161126
+#define MSM_FB_MAX_DEV_LIST 32
+
+#define MSM_FB_ENABLE_DBGFS
+#define WAIT_FENCE_FIRST_TIMEOUT (3 * MSEC_PER_SEC)
+#define WAIT_FENCE_FINAL_TIMEOUT (7 * MSEC_PER_SEC)
+#define WAIT_MAX_FENCE_TIMEOUT (WAIT_FENCE_FIRST_TIMEOUT + \
+					WAIT_FENCE_FINAL_TIMEOUT)
+#define WAIT_MIN_FENCE_TIMEOUT  (1)
+/*
+ * Display op timeout should be greater than total time it can take for
+ * a display thread to commit one frame. One of the largest time consuming
+ * activity performed by display thread is waiting for fences. So keeping
+ * that as a reference and add additional 20s to sustain system holdups.
+ */
+#define WAIT_DISP_OP_TIMEOUT (WAIT_FENCE_FIRST_TIMEOUT + \
+		WAIT_FENCE_FINAL_TIMEOUT + (20 * MSEC_PER_SEC))
+
+#ifndef MAX
+#define  MAX(x, y) (((x) > (y)) ? (x) : (y))
+#endif
+
+#ifndef MIN
+#define  MIN(x, y) (((x) < (y)) ? (x) : (y))
+#endif
+
+#define MDP_PP_AD_BL_LINEAR	0x0
+#define MDP_PP_AD_BL_LINEAR_INV	0x1
+
+/**
+ * enum mdp_notify_event - Different frame events to indicate frame update state
+ *
+ * @MDP_NOTIFY_FRAME_BEGIN:	Frame update has started, the frame is about to
+ *				be programmed into hardware.
+ * @MDP_NOTIFY_FRAME_CFG_DONE:	Frame configuration is done.
+ * @MDP_NOTIFY_FRAME_CTX_DONE:	Frame has finished accessing sw context.
+ *				Next frame can start preparing.
+ * @MDP_NOTIFY_FRAME_READY:	Frame ready to be kicked off, this can be used
+ *				as the last point in time to synchronize with
+ *				source buffers before kickoff.
+ * @MDP_NOTIFY_FRAME_FLUSHED:	Configuration of frame has been flushed and
+ *				DMA transfer has started.
+ * @MDP_NOTIFY_FRAME_DONE:	Frame DMA transfer has completed.
+ *				- For video mode panels this will indicate that
+ *				  previous frame has been replaced by new one.
+ *				- For command mode/writeback frame done happens
+ *				  as soon as the DMA of the frame is done.
+ * @MDP_NOTIFY_FRAME_TIMEOUT:	Frame DMA transfer has failed to complete within
+ *				a fair amount of time.
+ */
+enum mdp_notify_event {
+	MDP_NOTIFY_FRAME_BEGIN = 1,
+	MDP_NOTIFY_FRAME_CFG_DONE,
+	MDP_NOTIFY_FRAME_CTX_DONE,
+	MDP_NOTIFY_FRAME_READY,
+	MDP_NOTIFY_FRAME_FLUSHED,
+	MDP_NOTIFY_FRAME_DONE,
+	MDP_NOTIFY_FRAME_TIMEOUT,
+};
+
+/**
+ * enum mdp_split_mode - Lists the possible split modes in the device
+ *
+ * @MDP_SPLIT_MODE_NONE: Single physical display with single ctl path
+ *                       and single layer mixer.
+ *                       i.e. 1080p single DSI with single LM.
+ * #MDP_DUAL_LM_SINGLE_DISPLAY: Single physical display with signle ctl
+ *                              path but two layer mixers.
+ *                              i.e. WQXGA eDP or 4K HDMI primary or 1080p
+ *                                   single DSI with split LM to reduce power.
+ * @MDP_DUAL_LM_DUAL_DISPLAY: Two physically separate displays with two
+ *                            separate but synchronized ctl paths. Each ctl
+ *                            path with its own layer mixer.
+ *                            i.e. 1440x2560 with two DSI interfaces.
+ * @MDP_PINGPONG_SPLIT: Two physically separate display but single ctl path with
+ *                      single layer mixer. Data is split at pingpong module.
+ *                      i.e. 1440x2560 on chipsets with single DSI interface.
+ */
+enum mdp_split_mode {
+	MDP_SPLIT_MODE_NONE,
+	MDP_DUAL_LM_SINGLE_DISPLAY,
+	MDP_DUAL_LM_DUAL_DISPLAY,
+	MDP_PINGPONG_SPLIT,
+};
+
+/* enum mdp_mmap_type - Lists the possible mmap type in the device
+ *
+ * @MDP_FB_MMAP_NONE: Unknown type.
+ * @MDP_FB_MMAP_ION_ALLOC:   Use ION allocate a buffer for mmap
+ * @MDP_FB_MMAP_PHYSICAL_ALLOC:  Use physical buffer for mmap
+ */
+enum mdp_mmap_type {
+	MDP_FB_MMAP_NONE,
+	MDP_FB_MMAP_ION_ALLOC,
+	MDP_FB_MMAP_PHYSICAL_ALLOC,
+};
+
+/**
+ * enum dyn_mode_switch_state - Lists next stage for dynamic mode switch work
+ *
+ * @MDSS_MDP_NO_UPDATE_REQUESTED: incoming frame is processed normally
+ * @MDSS_MDP_WAIT_FOR_VALIDATE: Waiting for ATOMIC_COMMIT-validate to be called
+ * @MDSS_MDP_WAIT_FOR_COMMIT: Waiting for ATOMIC_COMMIT-commit to be called
+ * @MDSS_MDP_WAIT_FOR_KICKOFF: Waiting for KICKOFF to be called
+ */
+enum dyn_mode_switch_state {
+	MDSS_MDP_NO_UPDATE_REQUESTED,
+	MDSS_MDP_WAIT_FOR_VALIDATE,
+	MDSS_MDP_WAIT_FOR_COMMIT,
+	MDSS_MDP_WAIT_FOR_KICKOFF,
+};
+
+/**
+ * enum mdss_fb_idle_state - idle states based on frame updates
+ * @MDSS_FB_NOT_IDLE: Frame updates have started
+ * @MDSS_FB_IDLE_TIMER_RUNNING: Idle timer has been kicked
+ * @MDSS_FB_IDLE: Currently idle
+ */
+enum mdss_fb_idle_state {
+	MDSS_FB_NOT_IDLE,
+	MDSS_FB_IDLE_TIMER_RUNNING,
+	MDSS_FB_IDLE
+};
+
+struct disp_info_type_suspend {
+	int op_enable;
+	int panel_power_state;
+};
+
+struct disp_info_notify {
+	int type;
+	struct timer_list timer;
+	struct completion comp;
+	struct mutex lock;
+	int value;
+	int is_suspend;
+	int ref_count;
+	bool init_done;
+};
+
+struct msm_sync_pt_data {
+	char *fence_name;
+	u32 acq_fen_cnt;
+	struct sync_fence *acq_fen[MDP_MAX_FENCE_FD];
+	u32 temp_fen_cnt;
+	struct sync_fence *temp_fen[MDP_MAX_FENCE_FD];
+
+	struct sw_sync_timeline *timeline;
+	int timeline_value;
+	u32 threshold;
+	u32 retire_threshold;
+	atomic_t commit_cnt;
+	bool flushed;
+	bool async_wait_fences;
+
+	struct mutex sync_mutex;
+	struct notifier_block notifier;
+
+	struct sync_fence *(*get_retire_fence)
+		(struct msm_sync_pt_data *sync_pt_data);
+};
+
+struct msm_fb_data_type;
+
+struct msm_mdp_interface {
+	int (*fb_mem_alloc_fnc)(struct msm_fb_data_type *mfd);
+	int (*fb_mem_get_iommu_domain)(void);
+	int (*init_fnc)(struct msm_fb_data_type *mfd);
+	int (*on_fnc)(struct msm_fb_data_type *mfd);
+	int (*off_fnc)(struct msm_fb_data_type *mfd);
+	/* called to release resources associated to the process */
+	int (*release_fnc)(struct msm_fb_data_type *mfd, struct file *file);
+	int (*mode_switch)(struct msm_fb_data_type *mfd,
+					u32 mode);
+	int (*mode_switch_post)(struct msm_fb_data_type *mfd,
+					u32 mode);
+	int (*kickoff_fnc)(struct msm_fb_data_type *mfd,
+					struct mdp_display_commit *data);
+	int (*atomic_validate)(struct msm_fb_data_type *mfd, struct file *file,
+				struct mdp_layer_commit_v1 *commit);
+	bool (*is_config_same)(struct msm_fb_data_type *mfd,
+				struct mdp_output_layer *layer);
+	int (*pre_commit)(struct msm_fb_data_type *mfd, struct file *file,
+				struct mdp_layer_commit_v1 *commit);
+	int (*pre_commit_fnc)(struct msm_fb_data_type *mfd);
+	int (*ioctl_handler)(struct msm_fb_data_type *mfd, u32 cmd, void *arg);
+	void (*dma_fnc)(struct msm_fb_data_type *mfd);
+	int (*cursor_update)(struct msm_fb_data_type *mfd,
+				struct fb_cursor *cursor);
+	int (*async_position_update)(struct msm_fb_data_type *mfd,
+				struct mdp_position_update *update_pos);
+	int (*lut_update)(struct msm_fb_data_type *mfd, struct fb_cmap *cmap);
+	int (*do_histogram)(struct msm_fb_data_type *mfd,
+				struct mdp_histogram *hist);
+	int (*ad_calc_bl)(struct msm_fb_data_type *mfd, int bl_in,
+		int *bl_out, bool *bl_out_notify);
+	int (*panel_register_done)(struct mdss_panel_data *pdata);
+	u32 (*fb_stride)(u32 fb_index, u32 xres, int bpp);
+	struct mdss_mdp_format_params *(*get_format_params)(u32 format);
+	int (*splash_init_fnc)(struct msm_fb_data_type *mfd);
+	void (*check_dsi_status)(struct work_struct *work, uint32_t interval);
+	int (*configure_panel)(struct msm_fb_data_type *mfd, int mode,
+				int dest_ctrl);
+	int (*input_event_handler)(struct msm_fb_data_type *mfd);
+	int (*pp_release_fnc)(struct msm_fb_data_type *mfd);
+	void (*signal_retire_fence)(struct msm_fb_data_type *mfd,
+					int retire_cnt);
+	void *private1;
+};
+
+#define IS_CALIB_MODE_BL(mfd) (((mfd)->calib_mode) & MDSS_CALIB_MODE_BL)
+#define MDSS_BRIGHT_TO_BL(out, v, bl_max, max_bright) do {\
+				out = (2 * (v) * (bl_max) + max_bright);\
+				do_div(out, 2 * max_bright);\
+				} while (0)
+
+struct mdss_fb_file_info {
+	struct file *file;
+	struct list_head list;
+};
+
+struct msm_fb_backup_type {
+	struct fb_info info;
+	struct mdp_display_commit disp_commit;
+	bool   atomic_commit;
+};
+
+struct msm_fb_fps_info {
+	u32 frame_count;
+	ktime_t last_sampled_time_us;
+	u32 measured_fps;
+};
+
+struct msm_fb_data_type {
+	u32 key;
+	u32 index;
+	u32 ref_cnt;
+	u32 fb_page;
+
+	struct panel_id panel;
+	struct mdss_panel_info *panel_info;
+	struct mdss_panel_info reconfig_panel_info;
+	int split_mode;
+	int split_fb_left;
+	int split_fb_right;
+
+	u32 dest;
+	struct fb_info *fbi;
+
+	int idle_time;
+	u32 idle_state;
+	struct msm_fb_fps_info fps_info;
+	struct delayed_work idle_notify_work;
+
+	bool atomic_commit_pending;
+
+	int op_enable;
+	u32 fb_imgType;
+	int panel_reconfig;
+	u32 panel_orientation;
+
+	u32 dst_format;
+	int panel_power_state;
+	struct disp_info_type_suspend suspend;
+
+	struct dma_buf *dbuf;
+	struct dma_buf_attachment *attachment;
+	struct sg_table *table;
+	dma_addr_t iova;
+	void *cursor_buf;
+	phys_addr_t cursor_buf_phys;
+	dma_addr_t cursor_buf_iova;
+
+	int ext_ad_ctrl;
+	u32 ext_bl_ctrl;
+	u32 calib_mode;
+	u32 calib_mode_bl;
+	u32 ad_bl_level;
+	u32 bl_level;
+	u32 bl_scale;
+	u32 bl_min_lvl;
+	u32 unset_bl_level;
+	bool allow_bl_update;
+	u32 bl_level_scaled;
+	struct mutex bl_lock;
+	struct mutex mdss_sysfs_lock;
+	bool ipc_resume;
+
+	struct platform_device *pdev;
+
+	u32 mdp_fb_page_protection;
+
+	struct disp_info_notify update;
+	struct disp_info_notify no_update;
+	struct completion power_off_comp;
+
+	struct msm_mdp_interface mdp;
+
+	struct msm_sync_pt_data mdp_sync_pt_data;
+
+	/* for non-blocking */
+	struct task_struct *disp_thread;
+	atomic_t commits_pending;
+	atomic_t kickoff_pending;
+	wait_queue_head_t commit_wait_q;
+	wait_queue_head_t idle_wait_q;
+	wait_queue_head_t kickoff_wait_q;
+	bool shutdown_pending;
+
+	struct msm_fb_splash_info splash_info;
+
+	wait_queue_head_t ioctl_q;
+	atomic_t ioctl_ref_cnt;
+
+	struct msm_fb_backup_type msm_fb_backup;
+	struct completion power_set_comp;
+	u32 is_power_setting;
+
+	u32 dcm_state;
+	struct list_head file_list;
+	struct ion_client *fb_ion_client;
+	struct ion_handle *fb_ion_handle;
+	struct dma_buf *fbmem_buf;
+	struct dma_buf_attachment *fb_attachment;
+	struct sg_table *fb_table;
+
+	bool mdss_fb_split_stored;
+
+	u32 wait_for_kickoff;
+	u32 thermal_level;
+
+	int fb_mmap_type;
+	struct led_trigger *boot_notification_led;
+
+	/* Following is used for dynamic mode switch */
+	enum dyn_mode_switch_state switch_state;
+	u32 switch_new_mode;
+	bool pending_switch;
+	struct mutex switch_lock;
+	struct input_handler *input_handler;
+};
+
+static inline void mdss_fb_update_notify_update(struct msm_fb_data_type *mfd)
+{
+	int needs_complete = 0;
+
+	mutex_lock(&mfd->update.lock);
+	mfd->update.value = mfd->update.type;
+	needs_complete = mfd->update.value == NOTIFY_TYPE_UPDATE;
+	mutex_unlock(&mfd->update.lock);
+	if (needs_complete) {
+		complete(&mfd->update.comp);
+		mutex_lock(&mfd->no_update.lock);
+		if (mfd->no_update.timer.function)
+			del_timer(&(mfd->no_update.timer));
+
+		mfd->no_update.timer.expires = jiffies + (2 * HZ);
+		add_timer(&mfd->no_update.timer);
+		mutex_unlock(&mfd->no_update.lock);
+	}
+}
+
+/* Function returns true for either any kind of dual display */
+static inline bool is_panel_split(struct msm_fb_data_type *mfd)
+{
+	return mfd && mfd->panel_info && mfd->panel_info->is_split_display;
+}
+/* Function returns true, if Layer Mixer split is Set */
+static inline bool is_split_lm(struct msm_fb_data_type *mfd)
+{
+	return mfd &&
+	       (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY ||
+		mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY);
+}
+/* Function returns true, if Ping pong split is Set*/
+static inline bool is_pingpong_split(struct msm_fb_data_type *mfd)
+{
+	return mfd && (mfd->split_mode == MDP_PINGPONG_SPLIT);
+}
+static inline bool is_dual_lm_single_display(struct msm_fb_data_type *mfd)
+{
+	return mfd && (mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY);
+}
+static inline bool mdss_fb_is_power_off(struct msm_fb_data_type *mfd)
+{
+	return mdss_panel_is_power_off(mfd->panel_power_state);
+}
+
+static inline bool mdss_fb_is_power_on_interactive(
+	struct msm_fb_data_type *mfd)
+{
+	return mdss_panel_is_power_on_interactive(mfd->panel_power_state);
+}
+
+static inline bool mdss_fb_is_power_on(struct msm_fb_data_type *mfd)
+{
+	return mdss_panel_is_power_on(mfd->panel_power_state);
+}
+
+static inline bool mdss_fb_is_power_on_lp(struct msm_fb_data_type *mfd)
+{
+	return mdss_panel_is_power_on_lp(mfd->panel_power_state);
+}
+
+static inline bool mdss_fb_is_power_on_ulp(struct msm_fb_data_type *mfd)
+{
+	return mdss_panel_is_power_on_ulp(mfd->panel_power_state);
+}
+
+static inline bool mdss_fb_is_hdmi_primary(struct msm_fb_data_type *mfd)
+{
+	return (mfd && (mfd->index == 0) &&
+		(mfd->panel_info->type == DTV_PANEL));
+}
+
+static inline void mdss_fb_init_fps_info(struct msm_fb_data_type *mfd)
+{
+	memset(&mfd->fps_info, 0, sizeof(mfd->fps_info));
+}
+int mdss_fb_get_phys_info(dma_addr_t *start, unsigned long *len, int fb_num);
+void mdss_fb_set_backlight(struct msm_fb_data_type *mfd, u32 bkl_lvl);
+void mdss_fb_update_backlight(struct msm_fb_data_type *mfd);
+int mdss_fb_wait_for_fence(struct msm_sync_pt_data *sync_pt_data);
+void mdss_fb_signal_timeline(struct msm_sync_pt_data *sync_pt_data);
+struct sync_fence *mdss_fb_sync_get_fence(struct sw_sync_timeline *timeline,
+				const char *fence_name, int val);
+int mdss_fb_register_mdp_instance(struct msm_mdp_interface *mdp);
+int mdss_fb_dcm(struct msm_fb_data_type *mfd, int req_state);
+int mdss_fb_suspres_panel(struct device *dev, void *data);
+int mdss_fb_do_ioctl(struct fb_info *info, unsigned int cmd,
+		     unsigned long arg, struct file *file);
+int mdss_fb_compat_ioctl(struct fb_info *info, unsigned int cmd,
+			 unsigned long arg, struct file *file);
+int mdss_fb_atomic_commit(struct fb_info *info,
+	struct mdp_layer_commit  *commit, struct file *file);
+int mdss_fb_async_position_update(struct fb_info *info,
+		struct mdp_position_update *update_pos);
+
+u32 mdss_fb_get_mode_switch(struct msm_fb_data_type *mfd);
+void mdss_fb_report_panel_dead(struct msm_fb_data_type *mfd);
+void mdss_panelinfo_to_fb_var(struct mdss_panel_info *pinfo,
+						struct fb_var_screeninfo *var);
+void mdss_fb_calc_fps(struct msm_fb_data_type *mfd);
+#endif /* MDSS_FB_H */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_audio.c b/drivers/video/fbdev/msm/mdss_hdmi_audio.c
new file mode 100644
index 0000000..e4dc530
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_audio.c
@@ -0,0 +1,526 @@
+/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iopoll.h>
+#include <linux/types.h>
+#include <linux/switch.h>
+#include <linux/gcd.h>
+
+#include "mdss_hdmi_audio.h"
+#include "mdss_hdmi_util.h"
+
+#define HDMI_AUDIO_INFO_FRAME_PACKET_HEADER 0x84
+#define HDMI_AUDIO_INFO_FRAME_PACKET_VERSION 0x1
+#define HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH 0x0A
+
+#define HDMI_KHZ_TO_HZ 1000
+#define HDMI_MHZ_TO_HZ 1000000
+#define HDMI_ACR_N_MULTIPLIER 128
+#define DEFAULT_AUDIO_SAMPLE_RATE_HZ 48000
+
+/* Supported HDMI Audio channels */
+enum hdmi_audio_channels {
+	AUDIO_CHANNEL_2 = 2,
+	AUDIO_CHANNEL_3,
+	AUDIO_CHANNEL_4,
+	AUDIO_CHANNEL_5,
+	AUDIO_CHANNEL_6,
+	AUDIO_CHANNEL_7,
+	AUDIO_CHANNEL_8,
+};
+
+/* parameters for clock regeneration */
+struct hdmi_audio_acr {
+	u32 n;
+	u32 cts;
+};
+
+enum hdmi_audio_sample_rates {
+	AUDIO_SAMPLE_RATE_32KHZ,
+	AUDIO_SAMPLE_RATE_44_1KHZ,
+	AUDIO_SAMPLE_RATE_48KHZ,
+	AUDIO_SAMPLE_RATE_88_2KHZ,
+	AUDIO_SAMPLE_RATE_96KHZ,
+	AUDIO_SAMPLE_RATE_176_4KHZ,
+	AUDIO_SAMPLE_RATE_192KHZ,
+	AUDIO_SAMPLE_RATE_MAX
+};
+
+struct hdmi_audio {
+	struct dss_io_data *io;
+	struct msm_hdmi_audio_setup_params params;
+	struct switch_dev sdev;
+	u32 pclk;
+	bool ack_enabled;
+	bool audio_ack_enabled;
+	atomic_t ack_pending;
+};
+
+static void hdmi_audio_get_audio_sample_rate(u32 *sample_rate_hz)
+{
+	u32 rate = *sample_rate_hz;
+
+	switch (rate) {
+	case 32000:
+		*sample_rate_hz = AUDIO_SAMPLE_RATE_32KHZ;
+		break;
+	case 44100:
+		*sample_rate_hz = AUDIO_SAMPLE_RATE_44_1KHZ;
+		break;
+	case 48000:
+		*sample_rate_hz = AUDIO_SAMPLE_RATE_48KHZ;
+		break;
+	case 88200:
+		*sample_rate_hz = AUDIO_SAMPLE_RATE_88_2KHZ;
+		break;
+	case 96000:
+		*sample_rate_hz = AUDIO_SAMPLE_RATE_96KHZ;
+		break;
+	case 176400:
+		*sample_rate_hz = AUDIO_SAMPLE_RATE_176_4KHZ;
+		break;
+	case 192000:
+		*sample_rate_hz = AUDIO_SAMPLE_RATE_192KHZ;
+		break;
+	default:
+		pr_debug("%d unchanged\n", rate);
+		break;
+	}
+}
+
+static void hdmi_audio_get_acr_param(u32 pclk, u32 fs,
+	struct hdmi_audio_acr *acr)
+{
+	u32 div, mul;
+
+	if (!acr) {
+		pr_err("invalid data\n");
+		return;
+	}
+
+	/*
+	 * as per HDMI specification, N/CTS = (128*fs)/pclk.
+	 * get the ratio using this formula.
+	 */
+	acr->n = HDMI_ACR_N_MULTIPLIER * fs;
+	acr->cts = pclk;
+
+	/* get the greatest common divisor for the ratio */
+	div = gcd(acr->n, acr->cts);
+
+	/* get the n and cts values wrt N/CTS formula */
+	acr->n /= div;
+	acr->cts /= div;
+
+	/*
+	 * as per HDMI specification, 300 <= 128*fs/N <= 1500
+	 * with a target of 128*fs/N = 1000. To get closest
+	 * value without truncating fractional values, find
+	 * the corresponding multiplier
+	 */
+	mul = ((HDMI_ACR_N_MULTIPLIER * fs / HDMI_KHZ_TO_HZ)
+		+ (acr->n - 1)) / acr->n;
+
+	acr->n *= mul;
+	acr->cts *= mul;
+}
+
+static void hdmi_audio_acr_enable(struct hdmi_audio *audio)
+{
+	struct dss_io_data *io;
+	struct hdmi_audio_acr acr;
+	struct msm_hdmi_audio_setup_params *params;
+	u32 pclk, layout, multiplier = 1, sample_rate;
+	u32 acr_pkt_ctl, aud_pkt_ctl2, acr_reg_cts, acr_reg_n;
+
+	if (!audio) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	io = audio->io;
+	params = &audio->params;
+	pclk = audio->pclk;
+	sample_rate = params->sample_rate_hz;
+
+	hdmi_audio_get_acr_param(pclk * HDMI_KHZ_TO_HZ, sample_rate, &acr);
+	hdmi_audio_get_audio_sample_rate(&sample_rate);
+
+	layout = params->num_of_channels == AUDIO_CHANNEL_2 ? 0 : 1;
+
+	pr_debug("n=%u, cts=%u, layout=%u\n", acr.n, acr.cts, layout);
+
+	/* AUDIO_PRIORITY | SOURCE */
+	acr_pkt_ctl = BIT(31) | BIT(8);
+
+	switch (sample_rate) {
+	case AUDIO_SAMPLE_RATE_44_1KHZ:
+		acr_pkt_ctl |= 0x2 << 4;
+		acr.cts <<= 12;
+
+		acr_reg_cts = HDMI_ACR_44_0;
+		acr_reg_n = HDMI_ACR_44_1;
+		break;
+	case AUDIO_SAMPLE_RATE_48KHZ:
+		acr_pkt_ctl |= 0x3 << 4;
+		acr.cts <<= 12;
+
+		acr_reg_cts = HDMI_ACR_48_0;
+		acr_reg_n = HDMI_ACR_48_1;
+		break;
+	case AUDIO_SAMPLE_RATE_192KHZ:
+		multiplier = 4;
+		acr.n >>= 2;
+
+		acr_pkt_ctl |= 0x3 << 4;
+		acr.cts <<= 12;
+
+		acr_reg_cts = HDMI_ACR_48_0;
+		acr_reg_n = HDMI_ACR_48_1;
+		break;
+	case AUDIO_SAMPLE_RATE_176_4KHZ:
+		multiplier = 4;
+		acr.n >>= 2;
+
+		acr_pkt_ctl |= 0x2 << 4;
+		acr.cts <<= 12;
+
+		acr_reg_cts = HDMI_ACR_44_0;
+		acr_reg_n = HDMI_ACR_44_1;
+		break;
+	case AUDIO_SAMPLE_RATE_96KHZ:
+		multiplier = 2;
+		acr.n >>= 1;
+
+		acr_pkt_ctl |= 0x3 << 4;
+		acr.cts <<= 12;
+
+		acr_reg_cts = HDMI_ACR_48_0;
+		acr_reg_n = HDMI_ACR_48_1;
+		break;
+	case AUDIO_SAMPLE_RATE_88_2KHZ:
+		multiplier = 2;
+		acr.n >>= 1;
+
+		acr_pkt_ctl |= 0x2 << 4;
+		acr.cts <<= 12;
+
+		acr_reg_cts = HDMI_ACR_44_0;
+		acr_reg_n = HDMI_ACR_44_1;
+		break;
+	default:
+		multiplier = 1;
+
+		acr_pkt_ctl |= 0x1 << 4;
+		acr.cts <<= 12;
+
+		acr_reg_cts = HDMI_ACR_32_0;
+		acr_reg_n = HDMI_ACR_32_1;
+		break;
+	}
+
+	aud_pkt_ctl2 = BIT(0) | (layout << 1);
+
+	/* N_MULTIPLE(multiplier) */
+	acr_pkt_ctl &= ~(7 << 16);
+	acr_pkt_ctl |= (multiplier & 0x7) << 16;
+
+	/* SEND | CONT */
+	acr_pkt_ctl |= BIT(0) | BIT(1);
+
+	DSS_REG_W(io, acr_reg_cts, acr.cts);
+	DSS_REG_W(io, acr_reg_n, acr.n);
+	DSS_REG_W(io, HDMI_ACR_PKT_CTRL, acr_pkt_ctl);
+	DSS_REG_W(io, HDMI_AUDIO_PKT_CTRL2, aud_pkt_ctl2);
+}
+
+static void hdmi_audio_acr_setup(struct hdmi_audio *audio, bool on)
+{
+	if (on)
+		hdmi_audio_acr_enable(audio);
+	else
+		DSS_REG_W(audio->io, HDMI_ACR_PKT_CTRL, 0);
+}
+
+static void hdmi_audio_infoframe_setup(struct hdmi_audio *audio, bool enabled)
+{
+	struct dss_io_data *io = NULL;
+	u32 channels, channel_allocation, level_shift, down_mix, layout;
+	u32 hdmi_debug_reg = 0, audio_info_0_reg = 0, audio_info_1_reg = 0;
+	u32 audio_info_ctrl_reg, aud_pck_ctrl_2_reg;
+	u32 check_sum, sample_present;
+
+	if (!audio) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	io = audio->io;
+	if (!io->base) {
+		pr_err("core io not inititalized\n");
+		return;
+	}
+
+	audio_info_ctrl_reg = DSS_REG_R(io, HDMI_INFOFRAME_CTRL0);
+	audio_info_ctrl_reg &= ~0xF0;
+
+	if (!enabled)
+		goto end;
+
+	channels           = audio->params.num_of_channels - 1;
+	channel_allocation = audio->params.channel_allocation;
+	level_shift        = audio->params.level_shift;
+	down_mix           = audio->params.down_mix;
+	sample_present     = audio->params.sample_present;
+
+	layout = audio->params.num_of_channels == AUDIO_CHANNEL_2 ? 0 : 1;
+	aud_pck_ctrl_2_reg = BIT(0) | (layout << 1);
+	DSS_REG_W(io, HDMI_AUDIO_PKT_CTRL2, aud_pck_ctrl_2_reg);
+
+	audio_info_1_reg |= channel_allocation & 0xFF;
+	audio_info_1_reg |= ((level_shift & 0xF) << 11);
+	audio_info_1_reg |= ((down_mix & 0x1) << 15);
+
+	check_sum = 0;
+	check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_HEADER;
+	check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_VERSION;
+	check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH;
+	check_sum += channels;
+	check_sum += channel_allocation;
+	check_sum += (level_shift & 0xF) << 3 | (down_mix & 0x1) << 7;
+	check_sum &= 0xFF;
+	check_sum = (u8) (256 - check_sum);
+
+	audio_info_0_reg |= check_sum & 0xFF;
+	audio_info_0_reg |= ((channels & 0x7) << 8);
+
+	/* Enable Audio InfoFrame Transmission */
+	audio_info_ctrl_reg |= 0xF0;
+
+	if (layout) {
+		/* Set the Layout bit */
+		hdmi_debug_reg |= BIT(4);
+
+		/* Set the Sample Present bits */
+		hdmi_debug_reg |= sample_present & 0xF;
+	}
+end:
+	DSS_REG_W(io, HDMI_DEBUG, hdmi_debug_reg);
+	DSS_REG_W(io, HDMI_AUDIO_INFO0, audio_info_0_reg);
+	DSS_REG_W(io, HDMI_AUDIO_INFO1, audio_info_1_reg);
+	DSS_REG_W(io, HDMI_INFOFRAME_CTRL0, audio_info_ctrl_reg);
+}
+
+static int hdmi_audio_on(void *ctx, u32 pclk,
+	struct msm_hdmi_audio_setup_params *params)
+{
+	struct hdmi_audio *audio = ctx;
+	int rc = 0;
+
+	if (!audio) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	audio->pclk = pclk;
+	audio->params = *params;
+
+	if (!audio->params.num_of_channels) {
+		audio->params.sample_rate_hz = DEFAULT_AUDIO_SAMPLE_RATE_HZ;
+		audio->params.num_of_channels = AUDIO_CHANNEL_2;
+	}
+
+	hdmi_audio_acr_setup(audio, true);
+	hdmi_audio_infoframe_setup(audio, true);
+
+	pr_debug("HDMI Audio: Enabled\n");
+end:
+	return rc;
+}
+
+static void hdmi_audio_off(void *ctx)
+{
+	struct hdmi_audio *audio = ctx;
+
+	if (!audio) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	hdmi_audio_infoframe_setup(audio, false);
+	hdmi_audio_acr_setup(audio, false);
+
+	pr_debug("HDMI Audio: Disabled\n");
+}
+
+static void hdmi_audio_notify(void *ctx, int val)
+{
+	struct hdmi_audio *audio = ctx;
+	int state = 0;
+	bool switched;
+
+	if (!audio) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	state = audio->sdev.state;
+	if (state == val)
+		return;
+
+	if (audio->ack_enabled &&
+		atomic_read(&audio->ack_pending)) {
+		pr_err("%s ack pending, not notifying %s\n",
+			state ? "connect" : "disconnect",
+			val ? "connect" : "disconnect");
+		return;
+	}
+
+	switch_set_state(&audio->sdev, val);
+	switched = audio->sdev.state != state;
+
+	if (audio->ack_enabled && switched)
+		atomic_set(&audio->ack_pending, 1);
+
+	pr_debug("audio %s %s\n", switched ? "switched to" : "same as",
+		audio->sdev.state ? "HDMI" : "SPKR");
+}
+
+static void hdmi_audio_ack(void *ctx, u32 ack, u32 hpd)
+{
+	struct hdmi_audio *audio = ctx;
+	u32 ack_hpd;
+
+	if (!audio) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	if (ack & AUDIO_ACK_SET_ENABLE) {
+		audio->ack_enabled = ack & AUDIO_ACK_ENABLE ?
+			true : false;
+
+		pr_debug("audio ack feature %s\n",
+			audio->ack_enabled ? "enabled" : "disabled");
+		return;
+	}
+
+	if (!audio->ack_enabled)
+		return;
+
+	atomic_set(&audio->ack_pending, 0);
+
+	ack_hpd = ack & AUDIO_ACK_CONNECT;
+
+	pr_debug("acknowledging %s\n",
+		ack_hpd ? "connect" : "disconnect");
+
+	if (ack_hpd != hpd) {
+		pr_debug("unbalanced audio state, ack %d, hpd %d\n",
+			ack_hpd, hpd);
+
+		hdmi_audio_notify(ctx, hpd);
+	}
+}
+
+static void hdmi_audio_reset(void *ctx)
+{
+	struct hdmi_audio *audio = ctx;
+
+	if (!audio) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	atomic_set(&audio->ack_pending, 0);
+}
+
+static void hdmi_audio_status(void *ctx, struct hdmi_audio_status *status)
+{
+	struct hdmi_audio *audio = ctx;
+
+	if (!audio || !status) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	status->ack_enabled = audio->ack_enabled;
+	status->ack_pending = atomic_read(&audio->ack_pending);
+	status->switched = audio->sdev.state;
+}
+
+/**
+ * hdmi_audio_register() - audio registeration function
+ * @data: registeration initialization data
+ *
+ * This API configures audio module for client to use HDMI audio.
+ * Provides audio functionalities which client can call.
+ * Initializes internal data structures.
+ *
+ * Return: pointer to audio data that client needs to pass on
+ * calling audio functions.
+ */
+void *hdmi_audio_register(struct hdmi_audio_init_data *data)
+{
+	struct hdmi_audio *audio = NULL;
+	int rc = 0;
+
+	if (!data)
+		goto end;
+
+	audio = kzalloc(sizeof(*audio), GFP_KERNEL);
+	if (!audio)
+		goto end;
+
+	audio->sdev.name = "hdmi_audio";
+	rc = switch_dev_register(&audio->sdev);
+	if (rc) {
+		pr_err("audio switch registration failed\n");
+		kzfree(audio);
+		goto end;
+	}
+
+	audio->io = data->io;
+
+	data->ops->on     = hdmi_audio_on;
+	data->ops->off    = hdmi_audio_off;
+	data->ops->notify = hdmi_audio_notify;
+	data->ops->ack    = hdmi_audio_ack;
+	data->ops->reset  = hdmi_audio_reset;
+	data->ops->status = hdmi_audio_status;
+end:
+	return audio;
+}
+
+/**
+ * hdmi_audio_unregister() - unregister audio module
+ * @ctx: audio module's data
+ *
+ * Delete audio module's instance and allocated resources
+ */
+void hdmi_audio_unregister(void *ctx)
+{
+	struct hdmi_audio *audio = ctx;
+
+	if (audio) {
+		switch_dev_unregister(&audio->sdev);
+		kfree(ctx);
+	}
+}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_audio.h b/drivers/video/fbdev/msm/mdss_hdmi_audio.h
new file mode 100644
index 0000000..7b33cb8
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_audio.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2016, 2018,The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_AUDIO_H__
+#define __MDSS_HDMI_AUDIO_H__
+
+#include <linux/mdss_io_util.h>
+#include <linux/msm_hdmi.h>
+
+#define AUDIO_ACK_SET_ENABLE BIT(5)
+#define AUDIO_ACK_ENABLE BIT(4)
+#define AUDIO_ACK_CONNECT BIT(0)
+
+/**
+ * struct hdmi_audio_status - hdmi audio current status info
+ * @ack_pending: notification acknowledgment status
+ * @ack_enabled: acknowledgment feature is enabled or disabled
+ * @switched: audio notification status for routing
+ *
+ * Data for client to query about the current status of audio
+ */
+struct hdmi_audio_status {
+	bool ack_pending;
+	bool ack_enabled;
+	bool switched;
+};
+
+/**
+ * struct hdmi_audio_ops - audio operations for clients to call
+ * @on: function pointer to enable audio
+ * @reset: function pointer to reset the audio current status to default
+ * @status: function pointer to get the current status of audio
+ * @notify: function pointer to notify other modules for audio routing
+ * @ack: function pointer to acknowledge audio routing change
+ *
+ * Provides client operations for audio functionalities
+ */
+struct hdmi_audio_ops {
+	int (*on)(void *ctx, u32 pclk,
+		struct msm_hdmi_audio_setup_params *params);
+	void (*off)(void *ctx);
+	void (*reset)(void *ctx);
+	void (*status)(void *ctx, struct hdmi_audio_status *status);
+	void (*notify)(void *ctx, int val);
+	void (*ack)(void *ctx, u32 ack, u32 hpd);
+};
+
+/**
+ * struct hdmi_audio_init_data - data needed for initializing audio module
+ * @io: pointer to register access related data
+ * @ops: pointer to populate operation functions.
+ *
+ * Defines the data needed to be provided while initializing audio module
+ */
+struct hdmi_audio_init_data {
+	struct dss_io_data *io;
+	struct hdmi_audio_ops *ops;
+};
+
+void *hdmi_audio_register(struct hdmi_audio_init_data *data);
+void hdmi_audio_unregister(void *data);
+
+#endif /* __MDSS_HDMI_AUDIO_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_cec.c b/drivers/video/fbdev/msm/mdss_hdmi_cec.c
new file mode 100644
index 0000000..f1be313
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_cec.c
@@ -0,0 +1,504 @@
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/input.h>
+
+#include "mdss_hdmi_cec.h"
+#include "mdss_panel.h"
+
+#define CEC_STATUS_WR_ERROR	BIT(0)
+#define CEC_STATUS_WR_DONE	BIT(1)
+#define CEC_INTR		(BIT(1) | BIT(3) | BIT(7))
+
+#define CEC_SUPPORTED_HW_VERSION 0x30000001
+
+/* Reference: HDMI 1.4a Specification section 7.1 */
+
+#define CEC_OP_SET_STREAM_PATH  0x86
+#define CEC_OP_KEY_PRESS        0x44
+#define CEC_OP_STANDBY          0x36
+
+struct hdmi_cec_ctrl {
+	bool cec_enabled;
+	bool cec_wakeup_en;
+	bool cec_device_suspend;
+
+	u32 cec_msg_wr_status;
+	spinlock_t lock;
+	struct work_struct cec_read_work;
+	struct completion cec_msg_wr_done;
+	struct hdmi_cec_init_data init_data;
+	struct input_dev *input;
+};
+
+static int hdmi_cec_msg_send(void *data, struct cec_msg *msg)
+{
+	int i, line_check_retry = 10, rc = 0;
+	u32 frame_retransmit = RETRANSMIT_MAX_NUM;
+	bool frame_type;
+	unsigned long flags;
+	struct dss_io_data *io = NULL;
+	struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)data;
+
+	if (!cec_ctrl || !cec_ctrl->init_data.io || !msg) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	io = cec_ctrl->init_data.io;
+
+	reinit_completion(&cec_ctrl->cec_msg_wr_done);
+	cec_ctrl->cec_msg_wr_status = 0;
+	frame_type = (msg->recvr_id == 15 ? BIT(0) : 0);
+	if (msg->retransmit > 0 && msg->retransmit < RETRANSMIT_MAX_NUM)
+		frame_retransmit = msg->retransmit;
+
+	/* toggle cec in order to flush out bad hw state, if any */
+	DSS_REG_W(io, HDMI_CEC_CTRL, 0);
+	DSS_REG_W(io, HDMI_CEC_CTRL, BIT(0));
+
+	frame_retransmit = (frame_retransmit & 0xF) << 4;
+	DSS_REG_W(io, HDMI_CEC_RETRANSMIT, BIT(0) | frame_retransmit);
+
+	/* header block */
+	DSS_REG_W_ND(io, HDMI_CEC_WR_DATA,
+		(((msg->sender_id << 4) | msg->recvr_id) << 8) | frame_type);
+
+	/* data block 0 : opcode */
+	DSS_REG_W_ND(io, HDMI_CEC_WR_DATA,
+		((msg->frame_size < 2 ? 0 : msg->opcode) << 8) | frame_type);
+
+	/* data block 1-14 : operand 0-13 */
+	for (i = 0; i < msg->frame_size - 2; i++)
+		DSS_REG_W_ND(io, HDMI_CEC_WR_DATA,
+			(msg->operand[i] << 8) | frame_type);
+
+	while ((DSS_REG_R(io, HDMI_CEC_STATUS) & BIT(0)) &&
+		line_check_retry) {
+		line_check_retry--;
+		DEV_DBG("%s: CEC line is busy(%d)\n", __func__,
+			line_check_retry);
+		schedule();
+	}
+
+	if (!line_check_retry && (DSS_REG_R(io, HDMI_CEC_STATUS) & BIT(0))) {
+		DEV_ERR("%s: CEC line is busy. Retry\n", __func__);
+		return -EAGAIN;
+	}
+
+	/* start transmission */
+	DSS_REG_W(io, HDMI_CEC_CTRL, BIT(0) | BIT(1) |
+		((msg->frame_size & 0x1F) << 4) | BIT(9));
+
+	if (!wait_for_completion_timeout(
+		&cec_ctrl->cec_msg_wr_done, HZ)) {
+		DEV_ERR("%s: timedout", __func__);
+		return -ETIMEDOUT;
+	}
+
+	spin_lock_irqsave(&cec_ctrl->lock, flags);
+	if (cec_ctrl->cec_msg_wr_status == CEC_STATUS_WR_ERROR) {
+		rc = -ENXIO;
+		DEV_ERR("%s: msg write failed.\n", __func__);
+	} else {
+		DEV_DBG("%s: CEC write frame done (frame len=%d)", __func__,
+			msg->frame_size);
+	}
+	spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+	return rc;
+} /* hdmi_cec_msg_send */
+
+static void hdmi_cec_init_input_event(struct hdmi_cec_ctrl *cec_ctrl)
+{
+	int rc = 0;
+
+	if (!cec_ctrl) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return;
+	}
+
+	/* Initialize CEC input events */
+	if (!cec_ctrl->input)
+		cec_ctrl->input = input_allocate_device();
+	if (!cec_ctrl->input) {
+		DEV_ERR("%s: hdmi input device allocation failed\n", __func__);
+		return;
+	}
+
+	cec_ctrl->input->name = "HDMI CEC User or Deck Control";
+	cec_ctrl->input->phys = "hdmi/input0";
+	cec_ctrl->input->id.bustype = BUS_VIRTUAL;
+
+	input_set_capability(cec_ctrl->input, EV_KEY, KEY_POWER);
+
+	rc = input_register_device(cec_ctrl->input);
+	if (rc) {
+		DEV_ERR("%s: cec input device registeration failed\n",
+				__func__);
+		input_free_device(cec_ctrl->input);
+		cec_ctrl->input = NULL;
+		return;
+	}
+}
+
+static void hdmi_cec_deinit_input_event(struct hdmi_cec_ctrl *cec_ctrl)
+{
+	if (cec_ctrl->input)
+		input_unregister_device(cec_ctrl->input);
+	cec_ctrl->input = NULL;
+}
+
+static void hdmi_cec_msg_recv(struct work_struct *work)
+{
+	int i;
+	u32 data;
+	struct hdmi_cec_ctrl *cec_ctrl = NULL;
+	struct dss_io_data *io = NULL;
+	struct cec_msg msg;
+	struct cec_cbs *cbs;
+
+	cec_ctrl = container_of(work, struct hdmi_cec_ctrl, cec_read_work);
+	if (!cec_ctrl || !cec_ctrl->init_data.io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (!cec_ctrl->cec_enabled) {
+		DEV_ERR("%s: cec not enabled\n", __func__);
+		return;
+	}
+
+	io = cec_ctrl->init_data.io;
+	cbs = cec_ctrl->init_data.cbs;
+
+	data = DSS_REG_R(io, HDMI_CEC_RD_DATA);
+
+	msg.recvr_id   = (data & 0x000F);
+	msg.sender_id  = (data & 0x00F0) >> 4;
+	msg.frame_size = (data & 0x1F00) >> 8;
+	DEV_DBG("%s: Recvd init=[%u] dest=[%u] size=[%u]\n", __func__,
+		msg.sender_id, msg.recvr_id,
+		msg.frame_size);
+
+	if (msg.frame_size < 1 || msg.frame_size > MAX_CEC_FRAME_SIZE) {
+		DEV_ERR("%s: invalid message (frame length = %d)\n",
+			__func__, msg.frame_size);
+		return;
+	} else if (msg.frame_size == 1) {
+		DEV_DBG("%s: polling message (dest[%x] <- init[%x])\n",
+			__func__, msg.recvr_id, msg.sender_id);
+		return;
+	}
+
+	/* data block 0 : opcode */
+	data = DSS_REG_R_ND(io, HDMI_CEC_RD_DATA);
+	msg.opcode = data & 0xFF;
+
+	/* data block 1-14 : operand 0-13 */
+	for (i = 0; i < msg.frame_size - 2; i++) {
+		data = DSS_REG_R_ND(io, HDMI_CEC_RD_DATA);
+		msg.operand[i] = data & 0xFF;
+	}
+
+	for (; i < MAX_OPERAND_SIZE; i++)
+		msg.operand[i] = 0;
+
+	DEV_DBG("%s: opcode 0x%x, wakup_en %d, device_suspend %d\n", __func__,
+		msg.opcode, cec_ctrl->cec_wakeup_en,
+		cec_ctrl->cec_device_suspend);
+
+	if ((msg.opcode == CEC_OP_SET_STREAM_PATH ||
+		msg.opcode == CEC_OP_KEY_PRESS) &&
+		cec_ctrl->input && cec_ctrl->cec_wakeup_en &&
+		cec_ctrl->cec_device_suspend) {
+		DEV_DBG("%s: Sending power on at wakeup\n", __func__);
+		input_report_key(cec_ctrl->input, KEY_POWER, 1);
+		input_sync(cec_ctrl->input);
+		input_report_key(cec_ctrl->input, KEY_POWER, 0);
+		input_sync(cec_ctrl->input);
+	}
+
+	if ((msg.opcode == CEC_OP_STANDBY) &&
+		cec_ctrl->input && cec_ctrl->cec_wakeup_en &&
+		!cec_ctrl->cec_device_suspend) {
+		DEV_DBG("%s: Sending power off on standby\n", __func__);
+		input_report_key(cec_ctrl->input, KEY_POWER, 1);
+		input_sync(cec_ctrl->input);
+		input_report_key(cec_ctrl->input, KEY_POWER, 0);
+		input_sync(cec_ctrl->input);
+	}
+
+	if (cbs && cbs->msg_recv_notify)
+		cbs->msg_recv_notify(cbs->data, &msg);
+}
+
+/**
+ * hdmi_cec_isr() - interrupt handler for cec hw module
+ * @cec_ctrl: pointer to cec hw module's data
+ *
+ * Return: irq error code
+ *
+ * The API can be called by HDMI Tx driver on receiving hw interrupts
+ * to let the CEC related interrupts handled by this module.
+ */
+int hdmi_cec_isr(void *input)
+{
+	int rc = 0;
+	u32 cec_intr, cec_status;
+	unsigned long flags;
+	struct dss_io_data *io = NULL;
+	struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+	if (!cec_ctrl || !cec_ctrl->init_data.io) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return -EPERM;
+	}
+
+	if (!cec_ctrl->cec_enabled) {
+		DEV_DBG("%s: CEC feature not enabled\n", __func__);
+		return 0;
+	}
+
+	io = cec_ctrl->init_data.io;
+
+	cec_intr = DSS_REG_R_ND(io, HDMI_CEC_INT);
+
+	cec_status = DSS_REG_R_ND(io, HDMI_CEC_STATUS);
+
+	if ((cec_intr & BIT(0)) && (cec_intr & BIT(1))) {
+		DEV_DBG("%s: CEC_IRQ_FRAME_WR_DONE\n", __func__);
+		DSS_REG_W(io, HDMI_CEC_INT, cec_intr | BIT(0));
+
+		spin_lock_irqsave(&cec_ctrl->lock, flags);
+		cec_ctrl->cec_msg_wr_status |= CEC_STATUS_WR_DONE;
+		spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+		if (!completion_done(&cec_ctrl->cec_msg_wr_done))
+			complete_all(&cec_ctrl->cec_msg_wr_done);
+	}
+
+	if ((cec_intr & BIT(2)) && (cec_intr & BIT(3))) {
+		DEV_DBG("%s: CEC_IRQ_FRAME_ERROR\n", __func__);
+		DSS_REG_W(io, HDMI_CEC_INT, cec_intr | BIT(2));
+
+		spin_lock_irqsave(&cec_ctrl->lock, flags);
+		cec_ctrl->cec_msg_wr_status |= CEC_STATUS_WR_ERROR;
+		spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+		if (!completion_done(&cec_ctrl->cec_msg_wr_done))
+			complete_all(&cec_ctrl->cec_msg_wr_done);
+	}
+
+	if ((cec_intr & BIT(6)) && (cec_intr & BIT(7))) {
+		DEV_DBG("%s: CEC_IRQ_FRAME_RD_DONE\n", __func__);
+
+		DSS_REG_W(io, HDMI_CEC_INT, cec_intr | BIT(6));
+		queue_work(cec_ctrl->init_data.workq, &cec_ctrl->cec_read_work);
+	}
+
+	return rc;
+}
+
+void hdmi_cec_device_suspend(void *input, bool suspend)
+{
+	struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+	if (!cec_ctrl) {
+		DEV_WARN("%s: HDMI CEC HW module not initialized.\n", __func__);
+		return;
+	}
+
+	cec_ctrl->cec_device_suspend = suspend;
+}
+
+bool hdmi_cec_is_wakeup_en(void *input)
+{
+	struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+	if (!cec_ctrl) {
+		DEV_WARN("%s: HDMI CEC HW module not initialized.\n", __func__);
+		return 0;
+	}
+
+	return cec_ctrl->cec_wakeup_en;
+}
+
+static void hdmi_cec_wakeup_en(void *input, bool enable)
+{
+	struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+	if (!cec_ctrl) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return;
+	}
+
+	cec_ctrl->cec_wakeup_en = enable;
+}
+
+static void hdmi_cec_write_logical_addr(void *input, u8 addr)
+{
+	struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+	if (!cec_ctrl || !cec_ctrl->init_data.io) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return;
+	}
+
+	if (cec_ctrl->cec_enabled)
+		DSS_REG_W(cec_ctrl->init_data.io, HDMI_CEC_ADDR, addr & 0xF);
+}
+
+static int hdmi_cec_enable(void *input, bool enable)
+{
+	int ret = 0;
+	u32 hdmi_hw_version, reg_val;
+	struct dss_io_data *io = NULL;
+	struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+	struct mdss_panel_info *pinfo;
+
+	if (!cec_ctrl || !cec_ctrl->init_data.io) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		ret = -EPERM;
+		goto end;
+	}
+
+	io = cec_ctrl->init_data.io;
+	pinfo = cec_ctrl->init_data.pinfo;
+
+	if (!pinfo) {
+		DEV_ERR("%s: invalid pinfo\n", __func__);
+		goto end;
+	}
+
+	if (enable) {
+		/* 19.2Mhz * 0.00005 us = 950 = 0x3B6 */
+		DSS_REG_W(io, HDMI_CEC_REFTIMER, (0x3B6 & 0xFFF) | BIT(16));
+
+		hdmi_hw_version = DSS_REG_R(io, HDMI_VERSION);
+		if (hdmi_hw_version >= CEC_SUPPORTED_HW_VERSION) {
+			DSS_REG_W(io, HDMI_CEC_RD_RANGE, 0x30AB9888);
+			DSS_REG_W(io, HDMI_CEC_WR_RANGE, 0x888AA888);
+
+			DSS_REG_W(io, HDMI_CEC_RD_START_RANGE, 0x88888888);
+			DSS_REG_W(io, HDMI_CEC_RD_TOTAL_RANGE, 0x99);
+			DSS_REG_W(io, HDMI_CEC_COMPL_CTL, 0xF);
+			DSS_REG_W(io, HDMI_CEC_WR_CHECK_CONFIG, 0x4);
+		} else {
+			DEV_DBG("%s: CEC version %d is not supported.\n",
+				__func__, hdmi_hw_version);
+			ret = -EPERM;
+			goto end;
+		}
+
+		DSS_REG_W(io, HDMI_CEC_RD_FILTER, BIT(0) | (0x7FF << 4));
+		DSS_REG_W(io, HDMI_CEC_TIME, BIT(0) | ((7 * 0x30) << 7));
+
+		/* Enable CEC interrupts */
+		DSS_REG_W(io, HDMI_CEC_INT, CEC_INTR);
+
+		/* Enable Engine */
+		DSS_REG_W(io, HDMI_CEC_CTRL, BIT(0));
+	} else {
+		/* Disable Engine */
+		DSS_REG_W(io, HDMI_CEC_CTRL, 0);
+
+		/* Disable CEC interrupts */
+		reg_val = DSS_REG_R(io, HDMI_CEC_INT);
+		DSS_REG_W(io, HDMI_CEC_INT, reg_val & ~CEC_INTR);
+	}
+
+	cec_ctrl->cec_enabled = enable;
+end:
+	return ret;
+}
+
+/**
+ * hdmi_cec_init() - Initialize the CEC hw module
+ * @init_data: data needed to initialize the cec hw module
+ *
+ * Return: pointer to cec hw modules data that needs to be passed when
+ * calling cec hw modules API or error code.
+ *
+ * The API registers CEC HW modules with the client and provides HW
+ * specific operations.
+ */
+void *hdmi_cec_init(struct hdmi_cec_init_data *init_data)
+{
+	struct hdmi_cec_ctrl *cec_ctrl;
+	struct cec_ops *ops;
+	int ret = 0;
+
+	if (!init_data) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ops = init_data->ops;
+	if (!ops) {
+		DEV_ERR("%s: no ops provided\n", __func__);
+		ret = -EINVAL;
+		goto error;
+	}
+
+	cec_ctrl = kzalloc(sizeof(*cec_ctrl), GFP_KERNEL);
+	if (!cec_ctrl) {
+		DEV_ERR("%s: FAILED: out of memory\n", __func__);
+		ret = -EINVAL;
+		goto error;
+	}
+
+	/* keep a copy of init data */
+	cec_ctrl->init_data = *init_data;
+
+	spin_lock_init(&cec_ctrl->lock);
+	INIT_WORK(&cec_ctrl->cec_read_work, hdmi_cec_msg_recv);
+	init_completion(&cec_ctrl->cec_msg_wr_done);
+
+	/* populate hardware specific operations to client */
+	ops->send_msg = hdmi_cec_msg_send;
+	ops->wt_logical_addr = hdmi_cec_write_logical_addr;
+	ops->enable = hdmi_cec_enable;
+	ops->data = cec_ctrl;
+	ops->wakeup_en = hdmi_cec_wakeup_en;
+	ops->is_wakeup_en = hdmi_cec_is_wakeup_en;
+	ops->device_suspend = hdmi_cec_device_suspend;
+
+	hdmi_cec_init_input_event(cec_ctrl);
+
+	return cec_ctrl;
+error:
+	return ERR_PTR(ret);
+}
+
+/**
+ * hdmi_cec_deinit() - de-initialize CEC HW module
+ * @data: CEC HW module data
+ *
+ * This API release all resources allocated.
+ */
+void hdmi_cec_deinit(void *data)
+{
+	struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)data;
+
+	if (cec_ctrl)
+		hdmi_cec_deinit_input_event(cec_ctrl);
+
+	kfree(cec_ctrl);
+}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_cec.h b/drivers/video/fbdev/msm/mdss_hdmi_cec.h
new file mode 100644
index 0000000..57a7664
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_cec.h
@@ -0,0 +1,90 @@
+/* Copyright (c) 2010-2013, 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_CEC_H__
+#define __MDSS_HDMI_CEC_H__
+
+#include "mdss_hdmi_util.h"
+#include "mdss_cec_core.h"
+
+#define RETRANSMIT_MAX_NUM	5
+
+/**
+ * struct hdmi_cec_init_data - data needed for initializing cec hw module
+ * @workq: pointer to workqueue
+ * @io: pointer to register access related data
+ * @pinfo: pointer to panel information data
+ * @cbs: pointer to cec abstract callback functions.
+ * @ops: pointer to cec hw operation functions.
+ *
+ * Defines the data needed to be provided while initializing cec hw module
+ */
+struct hdmi_cec_init_data {
+	struct workqueue_struct *workq;
+	struct dss_io_data *io;
+	struct mdss_panel_info *pinfo;
+	struct cec_cbs *cbs;
+	struct cec_ops *ops;
+};
+
+/**
+ * hdmi_cec_isr() - interrupt handler for cec hw module
+ * @cec_ctrl: pointer to cec hw module's data
+ *
+ * Return: irq error code
+ *
+ * The API can be called by HDMI Tx driver on receiving hw interrupts
+ * to let the CEC related interrupts handled by this module.
+ */
+int hdmi_cec_isr(void *cec_ctrl);
+
+/**
+ * hdmi_cec_init() - Initialize the CEC hw module
+ * @init_data: data needed to initialize the cec hw module
+ *
+ * Return: pointer to cec hw modules data that needs to be passed when
+ * calling cec hw modules API or error code.
+ *
+ * The API registers CEC HW modules with the client and provides HW
+ * specific operations.
+ */
+void *hdmi_cec_init(struct hdmi_cec_init_data *init_data);
+
+/**
+ * hdmi_cec_deinit() - de-initialize CEC HW module
+ * @data: CEC HW module data
+ *
+ * This API release all resources allocated.
+ */
+void hdmi_cec_deinit(void *data);
+
+/**
+ * hdmi_cec_is_wakeup_en() - checks cec wakeup state
+ * @cec_ctrl: pointer to cec hw module's data
+ *
+ * Return: cec wakeup state
+ *
+ * This API is used to query whether the cec wakeup functionality is
+ * enabled or not.
+ */
+bool hdmi_cec_is_wakeup_en(void *cec_ctrl);
+
+/**
+ * hdmi_cec_device_suspend() - updates cec with device suspend state
+ * @cec_ctrl: pointer to cec hw module's data
+ * @suspend: device suspend state
+ *
+ * This API is used to update the CEC HW module of the device's suspend
+ * state.
+ */
+void hdmi_cec_device_suspend(void *cec_ctrl, bool suspend);
+#endif /* __MDSS_HDMI_CEC_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
new file mode 100644
index 0000000..d6e37a1
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
@@ -0,0 +1,2494 @@
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include "mdss_fb.h"
+#include "mdss_hdmi_edid.h"
+
+#define DBC_START_OFFSET 4
+#define EDID_DTD_LEN 18
+/*
+ * As per CEA-861-E specification 7.5.2, there can be
+ * upto 31 bytes following any tag (data block type).
+ */
+#define MAX_DATA_BLOCK_SIZE 31
+
+#define HDMI_VSDB_3D_EVF_DATA_OFFSET(vsd) \
+	(!((vsd)[8] & BIT(7)) ? 9 : (!((vsd)[8] & BIT(6)) ? 11 : 13))
+
+/*
+ * As per the CEA-861E spec, there can be a total of 10 short audio
+ * descriptors with each SAD being 3 bytes long.
+ * Thus, the maximum length of the audio data block would be 30 bytes
+ */
+#define MAX_NUMBER_ADB                  5
+#define MAX_AUDIO_DATA_BLOCK_SIZE	30
+#define MAX_SPKR_ALLOC_DATA_BLOCK_SIZE	3
+
+/*
+ * As per the HDMI 2.0 spec, the size of the HF-VSDB cannot exceed 31 bytes and
+ * the minimum size is 7 bytes.
+ */
+#define MAX_HF_VSDB_SIZE    31
+#define MIN_HF_VSDB_SIZE    7
+
+/* IEEE OUI for HDMI Forum. */
+#define HDMI_FORUM_IEEE_OUI 0xD85DC4
+
+/* Support for first 5 EDID blocks */
+#define MAX_EDID_SIZE (EDID_BLOCK_SIZE * MAX_EDID_BLOCKS)
+
+#define BUFF_SIZE_3D 128
+
+#define DTD_MAX			0x04
+#define DTD_OFFSET		0x36
+#define DTD_SIZE		0x12
+#define REVISION_OFFSET		0x13
+#define EDID_REVISION_FOUR	0x04
+
+#define EDID_VENDOR_ID_SIZE     4
+#define EDID_IEEE_REG_ID        0x0c03
+
+enum edid_sink_mode {
+	SINK_MODE_DVI,
+	SINK_MODE_HDMI
+};
+
+enum data_block_types {
+	RESERVED,
+	AUDIO_DATA_BLOCK,
+	VIDEO_DATA_BLOCK,
+	VENDOR_SPECIFIC_DATA_BLOCK,
+	SPEAKER_ALLOCATION_DATA_BLOCK,
+	VESA_DTC_DATA_BLOCK,
+	RESERVED2,
+	USE_EXTENDED_TAG
+};
+
+enum extended_data_block_types {
+	VIDEO_CAPABILITY_DATA_BLOCK = 0x0,
+	VENDOR_SPECIFIC_VIDEO_DATA_BLOCK = 0x01,
+	HDMI_VIDEO_DATA_BLOCK = 0x04,
+	Y420_VIDEO_DATA_BLOCK = 0x0E,
+	VIDEO_FORMAT_PREFERENCE_DATA_BLOCK = 0x0D,
+	Y420_CAPABILITY_MAP_DATA_BLOCK = 0x0F,
+	VENDOR_SPECIFIC_AUDIO_DATA_BLOCK = 0x11,
+	INFOFRAME_DATA_BLOCK = 0x20,
+};
+
+struct disp_mode_info {
+	u32 video_format;
+	u32 video_3d_format; /* Flags like SIDE_BY_SIDE_HALF*/
+	bool rgb_support;
+	bool y420_support;
+};
+
+struct hdmi_edid_sink_data {
+	struct disp_mode_info disp_mode_list[HDMI_VFRMT_MAX];
+	u32 disp_multi_3d_mode_list[16];
+	u32 disp_multi_3d_mode_list_cnt;
+	u32 num_of_elements;
+	u32 preferred_video_format;
+};
+
+struct hdmi_edid_sink_caps {
+	u32 max_pclk_in_hz;
+	bool scdc_present;
+	bool scramble_support; /* scramble support for less than 340Mcsc */
+	bool read_req_support;
+	bool osd_disparity;
+	bool dual_view_support;
+	bool ind_view_support;
+};
+
+struct hdmi_edid_override_data {
+	int scramble;
+	int sink_mode;
+	int format;
+	int vic;
+};
+
+struct hdmi_edid_ctrl {
+	u8 pt_scan_info;
+	u8 it_scan_info;
+	u8 ce_scan_info;
+	u8 cea_blks;
+	u16 physical_address;
+	u32 video_resolution; /* selected by user */
+	u32 sink_mode; /* HDMI or DVI */
+	u32 default_vic;
+	u16 audio_latency;
+	u16 video_latency;
+	u32 present_3d;
+	u32 page_id;
+	u8 audio_data_block[MAX_NUMBER_ADB * MAX_AUDIO_DATA_BLOCK_SIZE];
+	int adb_size;
+	u8 spkr_alloc_data_block[MAX_SPKR_ALLOC_DATA_BLOCK_SIZE];
+	int sadb_size;
+	u8 edid_buf[MAX_EDID_SIZE];
+	char vendor_id[EDID_VENDOR_ID_SIZE];
+	bool keep_resv_timings;
+	bool edid_override;
+
+	struct hdmi_edid_sink_data sink_data;
+	struct hdmi_edid_init_data init_data;
+	struct hdmi_edid_sink_caps sink_caps;
+	struct hdmi_edid_override_data override_data;
+};
+
+static bool hdmi_edid_is_mode_supported(struct hdmi_edid_ctrl *edid_ctrl,
+			struct msm_hdmi_mode_timing_info *timing)
+{
+	if (!timing->supported ||
+		timing->pixel_freq > edid_ctrl->init_data.max_pclk_khz)
+		return false;
+
+	return true;
+}
+
+static int hdmi_edid_reset_parser(struct hdmi_edid_ctrl *edid_ctrl)
+{
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	/* reset res info read page */
+	edid_ctrl->page_id = MSM_HDMI_INIT_RES_PAGE;
+
+	/* reset sink mode to DVI as default */
+	edid_ctrl->sink_mode = SINK_MODE_DVI;
+
+	edid_ctrl->sink_data.num_of_elements = 0;
+
+	/* reset scan info data */
+	edid_ctrl->pt_scan_info = 0;
+	edid_ctrl->it_scan_info = 0;
+	edid_ctrl->ce_scan_info = 0;
+
+	/* reset 3d data */
+	edid_ctrl->present_3d = 0;
+
+	/* reset number of cea extension blocks to 0 */
+	edid_ctrl->cea_blks = 0;
+
+	/* reset resolution related sink data */
+	memset(&edid_ctrl->sink_data, 0, sizeof(edid_ctrl->sink_data));
+
+	/* reset audio related data */
+	memset(edid_ctrl->audio_data_block, 0,
+		sizeof(edid_ctrl->audio_data_block));
+	memset(edid_ctrl->spkr_alloc_data_block, 0,
+		sizeof(edid_ctrl->spkr_alloc_data_block));
+	edid_ctrl->adb_size = 0;
+	edid_ctrl->sadb_size = 0;
+
+	hdmi_edid_set_video_resolution(edid_ctrl, edid_ctrl->default_vic, true);
+
+	/* reset new resolution details */
+	if (!edid_ctrl->keep_resv_timings)
+		hdmi_reset_resv_timing_info();
+
+	return 0;
+}
+
+static struct hdmi_edid_ctrl *hdmi_edid_get_ctrl(struct device *dev)
+{
+	struct fb_info *fbi;
+	struct msm_fb_data_type *mfd;
+	struct mdss_panel_info *pinfo;
+
+	if (!dev) {
+		pr_err("invlid input\n");
+		goto error;
+	}
+
+	fbi = dev_get_drvdata(dev);
+
+	if (!fbi) {
+		pr_err("invlid fbi\n");
+		goto error;
+	}
+
+	mfd = (struct msm_fb_data_type *)fbi->par;
+	if (!mfd) {
+		pr_err("invlid mfd\n");
+		goto error;
+	}
+
+	pinfo = mfd->panel_info;
+	if (!pinfo) {
+		pr_err("invlid pinfo\n");
+		goto error;
+	}
+
+	return pinfo->edid_data;
+
+error:
+	return NULL;
+}
+
+static ssize_t hdmi_edid_sysfs_rda_audio_data_block(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	int adb_size, adb_count;
+	ssize_t ret;
+	char *data = buf;
+
+	struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	adb_count = 1;
+	adb_size  = edid_ctrl->adb_size;
+	ret       = sizeof(adb_count) + sizeof(adb_size) + adb_size;
+
+	if (ret > PAGE_SIZE) {
+		DEV_DBG("%s: Insufficient buffer size\n", __func__);
+		return 0;
+	}
+
+	/* Currently only extracting one audio data block */
+	memcpy(data, &adb_count, sizeof(adb_count));
+	data += sizeof(adb_count);
+	memcpy(data, &adb_size, sizeof(adb_size));
+	data += sizeof(adb_size);
+	memcpy(data, edid_ctrl->audio_data_block,
+			edid_ctrl->adb_size);
+
+	print_hex_dump(KERN_DEBUG, "AUDIO DATA BLOCK: ", DUMP_PREFIX_NONE,
+			32, 8, buf, ret, false);
+
+	return ret;
+}
+static DEVICE_ATTR(audio_data_block, 0444,
+	hdmi_edid_sysfs_rda_audio_data_block,
+	NULL);
+
+static ssize_t hdmi_edid_sysfs_rda_spkr_alloc_data_block(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	int sadb_size, sadb_count;
+	ssize_t ret;
+	char *data = buf;
+
+	struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	sadb_count = 1;
+	sadb_size  = edid_ctrl->sadb_size;
+	ret        = sizeof(sadb_count) + sizeof(sadb_size) + sadb_size;
+
+	if (ret > PAGE_SIZE) {
+		DEV_DBG("%s: Insufficient buffer size\n", __func__);
+		return 0;
+	}
+
+	/* Currently only extracting one speaker allocation data block */
+	memcpy(data, &sadb_count, sizeof(sadb_count));
+	data += sizeof(sadb_count);
+	memcpy(data, &sadb_size, sizeof(sadb_size));
+	data += sizeof(sadb_size);
+	memcpy(data, edid_ctrl->spkr_alloc_data_block,
+			edid_ctrl->sadb_size);
+
+	print_hex_dump(KERN_DEBUG, "SPKR ALLOC DATA BLOCK: ", DUMP_PREFIX_NONE,
+			32, 8, buf, ret, false);
+
+	return ret;
+}
+static DEVICE_ATTR(spkr_alloc_data_block, 0444,
+	hdmi_edid_sysfs_rda_spkr_alloc_data_block, NULL);
+
+static ssize_t hdmi_edid_sysfs_wta_modes(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid ctrl\n", __func__);
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (sscanf(buf, "%d %d %d %d",
+		&edid_ctrl->override_data.scramble,
+		&edid_ctrl->override_data.sink_mode,
+		&edid_ctrl->override_data.format,
+		&edid_ctrl->override_data.vic) != 4) {
+		DEV_ERR("could not read input\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	edid_ctrl->edid_override = true;
+	return ret;
+bail:
+	DEV_DBG("%s: reset edid override\n", __func__);
+	edid_ctrl->edid_override = false;
+error:
+	return ret;
+}
+
+static ssize_t hdmi_edid_sysfs_rda_modes(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	int i;
+	struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+	u32 num_of_elements = 0;
+	struct disp_mode_info *video_mode;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	num_of_elements = edid_ctrl->sink_data.num_of_elements;
+	video_mode = edid_ctrl->sink_data.disp_mode_list;
+
+	if (edid_ctrl->edid_override && (edid_ctrl->override_data.vic > 0)) {
+		num_of_elements = 1;
+		edid_ctrl->sink_data.disp_mode_list[0].video_format =
+			edid_ctrl->override_data.vic;
+	}
+
+	buf[0] = 0;
+	if (num_of_elements) {
+		for (i = 0; i < num_of_elements; i++) {
+			if (ret > 0)
+				ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+					",%d", video_mode[i].video_format);
+			else
+				ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+					"%d", video_mode[i].video_format);
+		}
+	} else {
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%d",
+			edid_ctrl->video_resolution);
+	}
+
+	DEV_DBG("%s: '%s'\n", __func__, buf);
+	ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+
+	return ret;
+} /* hdmi_edid_sysfs_rda_modes */
+static DEVICE_ATTR(edid_modes, 0644, hdmi_edid_sysfs_rda_modes,
+	hdmi_edid_sysfs_wta_modes);
+
+static ssize_t hdmi_edid_sysfs_rda_res_info_data(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	u32 i, no_of_elem, offset = 0;
+	struct msm_hdmi_mode_timing_info info = {0};
+	struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+	struct disp_mode_info *minfo = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	no_of_elem = edid_ctrl->sink_data.num_of_elements;
+	minfo = edid_ctrl->sink_data.disp_mode_list;
+
+	if (edid_ctrl->edid_override && (edid_ctrl->override_data.vic > 0)) {
+		no_of_elem = 1;
+		minfo[0].video_format = edid_ctrl->override_data.vic;
+	}
+
+	for (i = 0; i < no_of_elem; i++) {
+		ret = hdmi_get_supported_mode(&info,
+			&edid_ctrl->init_data.ds_data,
+			minfo->video_format);
+
+		if (edid_ctrl->edid_override &&
+			(edid_ctrl->override_data.format > 0))
+			info.pixel_formats = edid_ctrl->override_data.format;
+		else
+			info.pixel_formats =
+			    (minfo->rgb_support ?
+				MSM_HDMI_RGB_888_24BPP_FORMAT : 0) |
+			    (minfo->y420_support ?
+				MSM_HDMI_YUV_420_12BPP_FORMAT : 0);
+
+		minfo++;
+		if (ret || !info.supported)
+			continue;
+
+		offset += scnprintf(buf + offset, PAGE_SIZE - offset,
+			"%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+			info.video_format, info.active_h,
+			info.front_porch_h, info.pulse_width_h,
+			info.back_porch_h, info.active_low_h,
+			info.active_v, info.front_porch_v,
+			info.pulse_width_v, info.back_porch_v,
+			info.active_low_v, info.pixel_freq,
+			info.refresh_rate, info.interlaced,
+			info.supported, info.ar,
+			info.pixel_formats);
+	}
+
+	return offset;
+}
+static DEVICE_ATTR(res_info_data, 0444, hdmi_edid_sysfs_rda_res_info_data,
+	NULL);
+
+static ssize_t hdmi_edid_sysfs_wta_res_info(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int rc, page_id;
+	u32 i = 0, j, page;
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+	struct msm_hdmi_mode_timing_info info = {0};
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = kstrtoint(buf, 10, &page_id);
+	if (rc) {
+		DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+		return rc;
+	}
+
+	if (page_id > MSM_HDMI_INIT_RES_PAGE) {
+		page = MSM_HDMI_INIT_RES_PAGE;
+		while (page < page_id) {
+			j = 1;
+			while (sizeof(info) * j < PAGE_SIZE) {
+				i++;
+				j++;
+			}
+			page++;
+		}
+	}
+
+	if (i < HDMI_VFRMT_MAX)
+		edid_ctrl->page_id = page_id;
+	else
+		DEV_ERR("%s: invalid page id\n", __func__);
+
+	DEV_DBG("%s: %d\n", __func__, edid_ctrl->page_id);
+	return ret;
+}
+
+static ssize_t hdmi_edid_sysfs_rda_res_info(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	u32 no_of_elem;
+	u32 i = 0, j, page;
+	char *buf_dbg = buf;
+	struct msm_hdmi_mode_timing_info info = {0};
+	struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+	u32 size_to_write = sizeof(info);
+	struct disp_mode_info *minfo = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	minfo = edid_ctrl->sink_data.disp_mode_list;
+	no_of_elem = edid_ctrl->sink_data.num_of_elements;
+
+	if (edid_ctrl->page_id > MSM_HDMI_INIT_RES_PAGE) {
+		page = MSM_HDMI_INIT_RES_PAGE;
+		while (page < edid_ctrl->page_id) {
+			j = 1;
+			while (sizeof(info) * j < PAGE_SIZE) {
+				i++;
+				j++;
+				minfo++;
+			}
+			page++;
+		}
+	}
+
+	if (edid_ctrl->edid_override && (edid_ctrl->override_data.vic > 0)) {
+		no_of_elem = 1;
+		minfo[0].video_format = edid_ctrl->override_data.vic;
+	}
+
+	for (; i < no_of_elem && size_to_write < PAGE_SIZE; i++) {
+		ret = hdmi_get_supported_mode(&info,
+			&edid_ctrl->init_data.ds_data,
+			minfo->video_format);
+
+		if (edid_ctrl->edid_override &&
+			(edid_ctrl->override_data.format > 0))
+			info.pixel_formats = edid_ctrl->override_data.format;
+		else
+			info.pixel_formats =
+			    (minfo->rgb_support ?
+				 MSM_HDMI_RGB_888_24BPP_FORMAT : 0) |
+			    (minfo->y420_support ?
+				MSM_HDMI_YUV_420_12BPP_FORMAT : 0);
+
+		minfo++;
+		if (ret || !info.supported)
+			continue;
+
+		memcpy(buf, &info, sizeof(info));
+
+		buf += sizeof(info);
+		size_to_write += sizeof(info);
+	}
+
+	for (i = sizeof(info); i < size_to_write; i += sizeof(info)) {
+		struct msm_hdmi_mode_timing_info info_dbg = {0};
+
+		memcpy(&info_dbg, buf_dbg, sizeof(info_dbg));
+
+		DEV_DBG("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+			info_dbg.video_format, info_dbg.active_h,
+			info_dbg.front_porch_h, info_dbg.pulse_width_h,
+			info_dbg.back_porch_h, info_dbg.active_low_h,
+			info_dbg.active_v, info_dbg.front_porch_v,
+			info_dbg.pulse_width_v, info_dbg.back_porch_v,
+			info_dbg.active_low_v, info_dbg.pixel_freq,
+			info_dbg.refresh_rate, info_dbg.interlaced,
+			info_dbg.supported, info_dbg.ar,
+			info_dbg.pixel_formats);
+
+		buf_dbg += sizeof(info_dbg);
+	}
+
+	return size_to_write - sizeof(info);
+}
+static DEVICE_ATTR(res_info, 0644, hdmi_edid_sysfs_rda_res_info,
+	hdmi_edid_sysfs_wta_res_info);
+
+static ssize_t hdmi_edid_sysfs_rda_audio_latency(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", edid_ctrl->audio_latency);
+
+	DEV_DBG("%s: '%s'\n", __func__, buf);
+
+	return ret;
+} /* hdmi_edid_sysfs_rda_audio_latency */
+static DEVICE_ATTR(edid_audio_latency, 0444,
+	hdmi_edid_sysfs_rda_audio_latency, NULL);
+
+static ssize_t hdmi_edid_sysfs_rda_video_latency(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", edid_ctrl->video_latency);
+
+	DEV_DBG("%s: '%s'\n", __func__, buf);
+
+	return ret;
+} /* hdmi_edid_sysfs_rda_video_latency */
+static DEVICE_ATTR(edid_video_latency, 0444,
+	hdmi_edid_sysfs_rda_video_latency, NULL);
+
+static ssize_t hdmi_edid_sysfs_rda_physical_address(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", edid_ctrl->physical_address);
+	DEV_DBG("%s: '%d'\n", __func__, edid_ctrl->physical_address);
+
+	return ret;
+} /* hdmi_edid_sysfs_rda_physical_address */
+static DEVICE_ATTR(pa, 0400, hdmi_edid_sysfs_rda_physical_address, NULL);
+
+static ssize_t hdmi_edid_sysfs_rda_scan_info(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d, %d, %d\n", edid_ctrl->pt_scan_info,
+		edid_ctrl->it_scan_info, edid_ctrl->ce_scan_info);
+	DEV_DBG("%s: '%s'\n", __func__, buf);
+
+	return ret;
+} /* hdmi_edid_sysfs_rda_scan_info */
+static DEVICE_ATTR(scan_info, 0444, hdmi_edid_sysfs_rda_scan_info, NULL);
+
+static ssize_t hdmi_edid_sysfs_rda_3d_modes(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	int i;
+	char buff_3d[BUFF_SIZE_3D];
+
+	struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	buf[0] = 0;
+	if (edid_ctrl->sink_data.num_of_elements) {
+		struct disp_mode_info *video_mode =
+			edid_ctrl->sink_data.disp_mode_list;
+
+		for (i = 0; i < edid_ctrl->sink_data.num_of_elements; i++) {
+			if (!video_mode[i].video_3d_format)
+				continue;
+			hdmi_get_video_3d_fmt_2string(
+					video_mode[i].video_3d_format,
+					buff_3d,
+					sizeof(buff_3d));
+			if (ret > 0)
+				ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+					",%d=%s", video_mode[i].video_format,
+					buff_3d);
+			else
+				ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+					"%d=%s", video_mode[i].video_format,
+					buff_3d);
+		}
+	}
+
+	DEV_DBG("%s: '%s'\n", __func__, buf);
+	ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+
+	return ret;
+} /* hdmi_edid_sysfs_rda_3d_modes */
+static DEVICE_ATTR(edid_3d_modes, 0444, hdmi_edid_sysfs_rda_3d_modes, NULL);
+
+static ssize_t hdmi_common_rda_edid_raw_data(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+	u32 size;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	size  = sizeof(edid_ctrl->edid_buf) < PAGE_SIZE ?
+			sizeof(edid_ctrl->edid_buf) : PAGE_SIZE;
+
+	/* buf can have max size of PAGE_SIZE */
+	memcpy(buf, edid_ctrl->edid_buf, size);
+
+	return size;
+} /* hdmi_common_rda_edid_raw_data */
+static DEVICE_ATTR(edid_raw_data, 0444, hdmi_common_rda_edid_raw_data, NULL);
+
+static ssize_t hdmi_edid_sysfs_wta_add_resolution(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int rc;
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+	struct msm_hdmi_mode_timing_info timing;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = sscanf(buf,
+		"%lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
+		(unsigned long *) &timing.active_h,
+		(unsigned long *) &timing.front_porch_h,
+		(unsigned long *) &timing.pulse_width_h,
+		(unsigned long *) &timing.back_porch_h,
+		(unsigned long *) &timing.active_low_h,
+		(unsigned long *) &timing.active_v,
+		(unsigned long *) &timing.front_porch_v,
+		(unsigned long *) &timing.pulse_width_v,
+		(unsigned long *) &timing.back_porch_v,
+		(unsigned long *) &timing.active_low_v,
+		(unsigned long *) &timing.pixel_freq,
+		(unsigned long *) &timing.refresh_rate,
+		(unsigned long *) &timing.interlaced,
+		(unsigned long *) &timing.supported,
+		(unsigned long *) &timing.ar);
+
+	if (rc != 15) {
+		DEV_ERR("%s: error reading buf\n", __func__);
+		goto err;
+	}
+
+	rc = hdmi_set_resv_timing_info(&timing);
+
+	if (!IS_ERR_VALUE(rc)) {
+		DEV_DBG("%s: added new res %d\n", __func__, rc);
+	} else {
+		DEV_ERR("%s: error adding new res %d\n", __func__, rc);
+		goto err;
+	}
+
+	edid_ctrl->keep_resv_timings = true;
+	return ret;
+
+err:
+	edid_ctrl->keep_resv_timings = false;
+	return -EFAULT;
+}
+static DEVICE_ATTR(add_res, 0200, NULL, hdmi_edid_sysfs_wta_add_resolution);
+
+static struct attribute *hdmi_edid_fs_attrs[] = {
+	&dev_attr_edid_modes.attr,
+	&dev_attr_pa.attr,
+	&dev_attr_scan_info.attr,
+	&dev_attr_edid_3d_modes.attr,
+	&dev_attr_edid_raw_data.attr,
+	&dev_attr_audio_data_block.attr,
+	&dev_attr_spkr_alloc_data_block.attr,
+	&dev_attr_edid_audio_latency.attr,
+	&dev_attr_edid_video_latency.attr,
+	&dev_attr_res_info.attr,
+	&dev_attr_res_info_data.attr,
+	&dev_attr_add_res.attr,
+	NULL,
+};
+
+static struct attribute_group hdmi_edid_fs_attrs_group = {
+	.attrs = hdmi_edid_fs_attrs,
+};
+
+static const u8 *hdmi_edid_find_block(const u8 *in_buf, u32 start_offset,
+	u8 type, u8 *len)
+{
+	/* the start of data block collection, start of Video Data Block */
+	u32 offset = start_offset;
+	u32 dbc_offset = in_buf[2];
+
+	if (dbc_offset >= EDID_BLOCK_SIZE - EDID_DTD_LEN)
+		return NULL;
+	*len = 0;
+
+	/*
+	 * * edid buffer 1, byte 2 being 4 means no non-DTD/Data block
+	 *   collection present.
+	 * * edid buffer 1, byte 2 being 0 menas no non-DTD/DATA block
+	 *   collection present and no DTD data present.
+	 */
+	if ((dbc_offset == 0) || (dbc_offset == 4)) {
+		DEV_WARN("EDID: no DTD or non-DTD data present\n");
+		return NULL;
+	}
+
+	while (offset < dbc_offset) {
+		u8 block_len = in_buf[offset] & 0x1F;
+
+		if ((offset + block_len <= dbc_offset) &&
+		    (in_buf[offset] >> 5) == type) {
+			*len = block_len;
+			DEV_DBG("%s: EDID: block=%d found @ 0x%x w/ len=%d\n",
+				__func__, type, offset, block_len);
+
+			return in_buf + offset;
+		}
+		offset += 1 + block_len;
+	}
+	DEV_WARN("%s: EDID: type=%d block not found in EDID block\n",
+		__func__, type);
+
+	return NULL;
+} /* hdmi_edid_find_block */
+
+static void hdmi_edid_set_y420_support(struct hdmi_edid_ctrl *edid_ctrl,
+				  u32 video_format)
+{
+	u32 i = 0;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return;
+	}
+
+	for (i = 0; i < edid_ctrl->sink_data.num_of_elements; ++i) {
+		if (video_format ==
+		    edid_ctrl->sink_data.disp_mode_list[i].video_format) {
+			edid_ctrl->sink_data.disp_mode_list[i].y420_support =
+				true;
+			DEV_DBG("%s: Yuv420 supported for format %d\n",
+			 __func__,
+			edid_ctrl->sink_data.disp_mode_list[i].video_format);
+		}
+	}
+}
+
+static void hdmi_edid_add_sink_y420_format(struct hdmi_edid_ctrl *edid_ctrl,
+					   u32 video_format)
+{
+	struct msm_hdmi_mode_timing_info timing = {0};
+	u32 ret = hdmi_get_supported_mode(&timing,
+				&edid_ctrl->init_data.ds_data,
+				video_format);
+	u32 supported = hdmi_edid_is_mode_supported(edid_ctrl, &timing);
+	struct hdmi_edid_sink_data *sink = &edid_ctrl->sink_data;
+
+	if (video_format >= HDMI_VFRMT_MAX) {
+		DEV_ERR("%s: video format: %s is not supported\n", __func__,
+			msm_hdmi_mode_2string(video_format));
+		return;
+	}
+
+	if (!sink) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	DEV_DBG("%s: EDID: format: %d [%s], %s\n", __func__,
+		video_format, msm_hdmi_mode_2string(video_format),
+		supported ? "Supported" : "Not-Supported");
+
+	if (!ret && supported) {
+		sink->disp_mode_list[sink->num_of_elements].video_format
+			= video_format;
+		sink->disp_mode_list[sink->num_of_elements].y420_support
+			= true;
+		sink->num_of_elements++;
+	}
+}
+
+static void hdmi_edid_parse_Y420VDB(struct hdmi_edid_ctrl *edid_ctrl,
+				    const u8 *in_buf)
+{
+	u8 len = 0;
+	u8 i = 0;
+	u32 video_format = 0;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	len = in_buf[0] & 0x1F;
+	/* Offset to byte 3 */
+	in_buf += 2;
+	for (i = 0; i < len - 1; i++) {
+		video_format = *(in_buf + i) & 0x7F;
+		hdmi_edid_add_sink_y420_format(edid_ctrl, video_format);
+	}
+}
+
+static void hdmi_edid_parse_Y420CMDB(struct hdmi_edid_ctrl *edid_ctrl,
+				     const u8 *in_buf)
+{
+	u32 offset = 0;
+	u8 svd_len = 0;
+	u32 i = 0, j = 0;
+	u32 video_format = 0;
+	u32 len = 0;
+	const u8 *svd = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+	 /* Byte 3 to L+1 contain SVDs */
+	offset += 2;
+	len = in_buf[0] & 0x1F;
+
+	/*
+	 * The Y420 Capability map data block should be parsed along with the
+	 * video data block. Each bit in Y420CMDB maps to each SVD in data
+	 * block
+	 */
+	svd = hdmi_edid_find_block(edid_ctrl->edid_buf+0x80, DBC_START_OFFSET,
+			VIDEO_DATA_BLOCK, &svd_len);
+
+	++svd;
+	for (i = 0; i < svd_len; i++, j++) {
+		video_format = *svd & 0x7F;
+		if (in_buf[offset] & (1 << j))
+			hdmi_edid_set_y420_support(edid_ctrl, video_format);
+
+		if (j & 0x80) {
+			j = j/8;
+			offset++;
+			if (offset >= len)
+				break;
+		}
+	}
+}
+
+static void hdmi_edid_parse_hvdb(struct hdmi_edid_ctrl *edid_ctrl,
+				 const u8 *in_buf)
+{
+	u32 len = 0;
+	struct hdmi_edid_sink_caps *sink_caps = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	sink_caps = &edid_ctrl->sink_caps;
+	len = in_buf[0] & 0x1F;
+	if ((in_buf[1] != HDMI_VIDEO_DATA_BLOCK) ||
+	    (len < 5)) {
+		DEV_ERR("%s: Not a HVDB tag code\n", __func__);
+		return;
+	}
+	DEV_ERR("FOUND HVDB flags = 0x%x\n", in_buf[4]);
+	sink_caps->max_pclk_in_hz = in_buf[3]*5000;
+	sink_caps->scdc_present = (in_buf[4] & 0x80) ? true : false;
+	sink_caps->read_req_support = (in_buf[4] & 0x40) ? true : false;
+	sink_caps->scramble_support = (in_buf[4] & 0x08) ? true : false;
+	sink_caps->ind_view_support = (in_buf[4] & 0x04) ? true : false;
+	sink_caps->dual_view_support = (in_buf[4] & 0x02) ? true : false;
+	sink_caps->osd_disparity = (in_buf[4] * 0x01) ? true : false;
+
+}
+
+static void hdmi_edid_extract_extended_data_blocks(
+	struct hdmi_edid_ctrl *edid_ctrl, const u8 *in_buf)
+{
+	u8 len = 0;
+	u32 start_offset = 0;
+	u8 const *etag = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	do {
+		if (!start_offset && !etag)
+			start_offset = DBC_START_OFFSET;
+		else
+			start_offset = etag - in_buf + len + 1;
+
+		etag = hdmi_edid_find_block(in_buf, start_offset,
+			USE_EXTENDED_TAG, &len);
+
+		if (!etag || !len) {
+			DEV_DBG("%s: No more extended block found\n", __func__);
+			break;
+		}
+
+		/* The extended data block should at least be 2 bytes long */
+		if (len < 2) {
+			DEV_DBG("%s: invalid block size\n", __func__);
+			continue;
+		}
+
+		/*
+		 * The second byte of the extended data block has the
+		 * extended tag code
+		 */
+		switch (etag[1]) {
+		case VIDEO_CAPABILITY_DATA_BLOCK:
+			/* Video Capability Data Block */
+			DEV_DBG("%s: EDID: VCDB=%02X %02X\n", __func__,
+				etag[1], etag[2]);
+
+			/*
+			 * Check if the sink specifies underscan
+			 * support for:
+			 * BIT 5: preferred video format
+			 * BIT 3: IT video format
+			 * BIT 1: CE video format
+			 */
+			edid_ctrl->pt_scan_info =
+				(etag[2] & (BIT(4) | BIT(5))) >> 4;
+			edid_ctrl->it_scan_info =
+				(etag[2] & (BIT(3) | BIT(2))) >> 2;
+			edid_ctrl->ce_scan_info =
+				etag[2] & (BIT(1) | BIT(0));
+			DEV_DBG("%s: Scan Info (pt|it|ce): (%d|%d|%d)",
+				__func__,
+				edid_ctrl->pt_scan_info,
+				edid_ctrl->it_scan_info,
+				edid_ctrl->ce_scan_info);
+			break;
+		case HDMI_VIDEO_DATA_BLOCK:
+			/* HDMI Video data block defined in HDMI 2.0 */
+			DEV_DBG("%s: EDID: HVDB found\n", __func__);
+			hdmi_edid_parse_hvdb(edid_ctrl, etag);
+			break;
+		case Y420_CAPABILITY_MAP_DATA_BLOCK:
+			DEV_DBG("%s found Y420CMDB byte 3 = 0x%x",
+				__func__, etag[2]);
+			hdmi_edid_parse_Y420CMDB(edid_ctrl, etag);
+			break;
+		case Y420_VIDEO_DATA_BLOCK:
+			DEV_DBG("%s found Y420VDB byte 3 = 0x%x",
+				__func__, etag[2]);
+			hdmi_edid_parse_Y420VDB(edid_ctrl, etag);
+			break;
+		default:
+			DEV_DBG("%s: Tag Code %d not supported\n",
+				__func__, etag[1]);
+			break;
+		}
+	} while (1);
+} /* hdmi_edid_extract_extended_data_blocks */
+
+static void hdmi_edid_extract_3d_present(struct hdmi_edid_ctrl *edid_ctrl,
+	const u8 *in_buf)
+{
+	u8 len, offset;
+	const u8 *vsd = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	vsd = hdmi_edid_find_block(in_buf, DBC_START_OFFSET,
+		VENDOR_SPECIFIC_DATA_BLOCK, &len);
+
+	edid_ctrl->present_3d = 0;
+	if (vsd == NULL || len == 0 || len > MAX_DATA_BLOCK_SIZE) {
+		DEV_DBG("%s: No/Invalid vendor Specific Data Block\n",
+			__func__);
+		return;
+	}
+
+	offset = HDMI_VSDB_3D_EVF_DATA_OFFSET(vsd);
+	DEV_DBG("%s: EDID: 3D present @ 0x%x = %02x\n", __func__,
+		offset, vsd[offset]);
+
+	if (vsd[offset] >> 7) { /* 3D format indication present */
+		DEV_INFO("%s: EDID: 3D present, 3D-len=%d\n", __func__,
+			vsd[offset+1] & 0x1F);
+		edid_ctrl->present_3d = 1;
+	}
+} /* hdmi_edid_extract_3d_present */
+
+static void hdmi_edid_extract_audio_data_blocks(
+	struct hdmi_edid_ctrl *edid_ctrl, const u8 *in_buf)
+{
+	u8 len = 0;
+	u8 adb_max = 0;
+	const u8 *adb = NULL;
+	u32 offset = DBC_START_OFFSET;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	edid_ctrl->adb_size = 0;
+
+	memset(edid_ctrl->audio_data_block, 0,
+		sizeof(edid_ctrl->audio_data_block));
+
+	do {
+		len = 0;
+		adb = hdmi_edid_find_block(in_buf, offset, AUDIO_DATA_BLOCK,
+			&len);
+
+		if ((adb == NULL) || (len > MAX_AUDIO_DATA_BLOCK_SIZE ||
+			adb_max >= MAX_NUMBER_ADB)) {
+			if (!edid_ctrl->adb_size) {
+				DEV_DBG("%s: No/Invalid Audio Data Block\n",
+					__func__);
+				return;
+			}
+			DEV_DBG("%s: No more valid ADB found\n",
+				__func__);
+
+			continue;
+		}
+
+		memcpy(edid_ctrl->audio_data_block + edid_ctrl->adb_size,
+			adb + 1, len);
+		offset = (adb - in_buf) + 1 + len;
+
+		edid_ctrl->adb_size += len;
+		adb_max++;
+	} while (adb);
+
+} /* hdmi_edid_extract_audio_data_blocks */
+
+static void hdmi_edid_extract_speaker_allocation_data(
+	struct hdmi_edid_ctrl *edid_ctrl, const u8 *in_buf)
+{
+	u8 len;
+	const u8 *sadb = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	sadb = hdmi_edid_find_block(in_buf, DBC_START_OFFSET,
+		SPEAKER_ALLOCATION_DATA_BLOCK, &len);
+	if ((sadb == NULL) || (len != MAX_SPKR_ALLOC_DATA_BLOCK_SIZE)) {
+		DEV_DBG("%s: No/Invalid Speaker Allocation Data Block\n",
+			__func__);
+		return;
+	}
+
+	memcpy(edid_ctrl->spkr_alloc_data_block, sadb + 1, len);
+	edid_ctrl->sadb_size = len;
+
+	DEV_DBG("%s: EDID: speaker alloc data SP byte = %08x %s%s%s%s%s%s%s\n",
+		__func__, sadb[1],
+		(sadb[1] & BIT(0)) ? "FL/FR," : "",
+		(sadb[1] & BIT(1)) ? "LFE," : "",
+		(sadb[1] & BIT(2)) ? "FC," : "",
+		(sadb[1] & BIT(3)) ? "RL/RR," : "",
+		(sadb[1] & BIT(4)) ? "RC," : "",
+		(sadb[1] & BIT(5)) ? "FLC/FRC," : "",
+		(sadb[1] & BIT(6)) ? "RLC/RRC," : "");
+} /* hdmi_edid_extract_speaker_allocation_data */
+
+static void hdmi_edid_extract_sink_caps(struct hdmi_edid_ctrl *edid_ctrl,
+	const u8 *in_buf)
+{
+	u8 len = 0, i = 0;
+	const u8 *vsd = NULL;
+	u32 vsd_offset = DBC_START_OFFSET;
+	u32 hf_ieee_oui = 0;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	/* Find HF-VSDB with HF-OUI */
+	do {
+		vsd = hdmi_edid_find_block(in_buf, vsd_offset,
+			   VENDOR_SPECIFIC_DATA_BLOCK, &len);
+
+		if (!vsd || !len || len > MAX_DATA_BLOCK_SIZE) {
+			if (i == 0)
+				DEV_ERR("%s: VSDB not found\n", __func__);
+			else
+				DEV_DBG("%s: no more VSDB found\n", __func__);
+			break;
+		}
+
+		hf_ieee_oui = (vsd[1] << 16) | (vsd[2] << 8) | vsd[3];
+
+		if (hf_ieee_oui == HDMI_FORUM_IEEE_OUI) {
+			DEV_DBG("%s: found HF-VSDB\n", __func__);
+			break;
+		}
+
+		DEV_DBG("%s: Not a HF OUI 0x%x\n", __func__, hf_ieee_oui);
+
+		i++;
+		vsd_offset = vsd - in_buf + len + 1;
+	} while (1);
+
+	if (!vsd) {
+		DEV_DBG("%s: HF-VSDB not found\n", __func__);
+		return;
+	}
+
+	/* Max pixel clock is in  multiples of 5Mhz. */
+	edid_ctrl->sink_caps.max_pclk_in_hz =
+			vsd[5]*5000000;
+	edid_ctrl->sink_caps.scdc_present =
+			(vsd[6] & 0x80) ? true : false;
+	edid_ctrl->sink_caps.scramble_support =
+			(vsd[6] & 0x08) ? true : false;
+	edid_ctrl->sink_caps.read_req_support =
+			(vsd[6] & 0x40) ? true : false;
+	edid_ctrl->sink_caps.osd_disparity =
+			(vsd[6] & 0x01) ? true : false;
+	edid_ctrl->sink_caps.dual_view_support =
+			(vsd[6] & 0x02) ? true : false;
+	edid_ctrl->sink_caps.ind_view_support =
+			(vsd[6] & 0x04) ? true : false;
+}
+
+static void hdmi_edid_extract_latency_fields(struct hdmi_edid_ctrl *edid_ctrl,
+	const u8 *in_buf)
+{
+	u8 len;
+	const u8 *vsd = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	vsd = hdmi_edid_find_block(in_buf, DBC_START_OFFSET,
+		VENDOR_SPECIFIC_DATA_BLOCK, &len);
+
+	if (vsd == NULL || len == 0 || len > MAX_DATA_BLOCK_SIZE ||
+		!(vsd[8] & BIT(7))) {
+		edid_ctrl->video_latency = (u16)-1;
+		edid_ctrl->audio_latency = (u16)-1;
+		DEV_DBG("%s: EDID: No audio/video latency present\n", __func__);
+	} else {
+		edid_ctrl->video_latency = vsd[9];
+		edid_ctrl->audio_latency = vsd[10];
+		DEV_DBG("%s: EDID: video-latency=%04x, audio-latency=%04x\n",
+			__func__, edid_ctrl->video_latency,
+			edid_ctrl->audio_latency);
+	}
+} /* hdmi_edid_extract_latency_fields */
+
+static u32 hdmi_edid_extract_ieee_reg_id(struct hdmi_edid_ctrl *edid_ctrl,
+	const u8 *in_buf)
+{
+	u8 len;
+	const u8 *vsd = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return 0;
+	}
+
+	vsd = hdmi_edid_find_block(in_buf, DBC_START_OFFSET,
+		VENDOR_SPECIFIC_DATA_BLOCK, &len);
+
+	if (vsd == NULL || len == 0 || len > MAX_DATA_BLOCK_SIZE) {
+		DEV_DBG("%s: No/Invalid Vendor Specific Data Block\n",
+			__func__);
+		return 0;
+	}
+
+	DEV_DBG("%s: EDID: VSD PhyAddr=%04x, MaxTMDS=%dMHz\n", __func__,
+		((u32)vsd[4] << 8) + (u32)vsd[5], (u32)vsd[7] * 5);
+
+	edid_ctrl->physical_address = ((u16)vsd[4] << 8) + (u16)vsd[5];
+
+	return ((u32)vsd[3] << 16) + ((u32)vsd[2] << 8) + (u32)vsd[1];
+} /* hdmi_edid_extract_ieee_reg_id */
+
+static void hdmi_edid_extract_vendor_id(struct hdmi_edid_ctrl *edid_ctrl)
+{
+	char *vendor_id;
+	u32 id_codes;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	vendor_id = edid_ctrl->vendor_id;
+	id_codes = ((u32)edid_ctrl->edid_buf[8] << 8) +
+			edid_ctrl->edid_buf[9];
+
+	vendor_id[0] = 'A' - 1 + ((id_codes >> 10) & 0x1F);
+	vendor_id[1] = 'A' - 1 + ((id_codes >> 5) & 0x1F);
+	vendor_id[2] = 'A' - 1 + (id_codes & 0x1F);
+	vendor_id[3] = 0;
+} /* hdmi_edid_extract_vendor_id */
+
+static u32 hdmi_edid_check_header(const u8 *edid_buf)
+{
+	return (edid_buf[0] == 0x00) && (edid_buf[1] == 0xff)
+		&& (edid_buf[2] == 0xff) && (edid_buf[3] == 0xff)
+		&& (edid_buf[4] == 0xff) && (edid_buf[5] == 0xff)
+		&& (edid_buf[6] == 0xff) && (edid_buf[7] == 0x00);
+} /* hdmi_edid_check_header */
+
+static void hdmi_edid_detail_desc(struct hdmi_edid_ctrl *edid_ctrl,
+	const u8 *data_buf, u32 *disp_mode)
+{
+	u32	aspect_ratio_4_3    = false;
+	u32	aspect_ratio_5_4    = false;
+	u32	interlaced          = false;
+	u32	active_h            = 0;
+	u32	active_v            = 0;
+	u32	blank_h             = 0;
+	u32	blank_v             = 0;
+	u32	img_size_h          = 0;
+	u32	img_size_v          = 0;
+	u32	pixel_clk           = 0;
+	u32	front_porch_h       = 0;
+	u32	front_porch_v       = 0;
+	u32	pulse_width_h       = 0;
+	u32	pulse_width_v       = 0;
+	u32	active_low_h        = 0;
+	u32	active_low_v        = 0;
+	const u32 khz_to_hz         = 1000;
+	u32 frame_data;
+	struct msm_hdmi_mode_timing_info timing = {0};
+	int rc;
+
+	/*
+	 * Pixel clock/ 10,000
+	 * LSB stored in byte 0 and MSB stored in byte 1
+	 */
+	pixel_clk = (u32) (data_buf[0x0] | (data_buf[0x1] << 8));
+
+	/* store pixel clock in /1000 terms */
+	pixel_clk *= 10;
+
+	/*
+	 * byte 0x8 -- Horizontal Front Porch - contains lower 8 bits
+	 * byte 0xb (bits 6, 7) -- contains upper 2 bits
+	 */
+	front_porch_h = (u32) (data_buf[0x8] |
+		(data_buf[0xb] & (0x3 << 6)) << 2);
+
+	/*
+	 * byte 0x9 -- Horizontal pulse width - contains lower 8 bits
+	 * byte 0xb (bits 4, 5) -- contains upper 2 bits
+	 */
+	pulse_width_h = (u32) (data_buf[0x9] |
+		(data_buf[0xb] & (0x3 << 4)) << 4);
+
+	/*
+	 * byte 0xa -- Vertical front porch -- stored in Upper Nibble,
+	 * contains lower 4 bits.
+	 * byte 0xb (bits 2, 3) -- contains upper 2 bits
+	 */
+	front_porch_v = (u32) (((data_buf[0xa] & (0xF << 4)) >> 4) |
+		(data_buf[0xb] & (0x3 << 2)) << 2);
+
+	/*
+	 * byte 0xa -- Vertical pulse width -- stored in Lower Nibble,
+	 * contains lower 4 bits.
+	 * byte 0xb (bits 0, 1) -- contains upper 2 bits
+	 */
+	pulse_width_v = (u32) ((data_buf[0xa] & 0xF) |
+		((data_buf[0xb] & 0x3) << 4));
+
+	/*
+	 * * See VESA Spec
+	 * * EDID_TIMING_DESC_UPPER_H_NIBBLE[0x4]: Relative Offset to the
+	 *   EDID detailed timing descriptors - Upper 4 bit for each H
+	 *   active/blank field
+	 * * EDID_TIMING_DESC_H_ACTIVE[0x2]: Relative Offset to the EDID
+	 *   detailed timing descriptors - H active
+	 */
+	active_h = ((((u32)data_buf[0x4] >> 0x4) & 0xF) << 8)
+		| data_buf[0x2];
+
+	/*
+	 * EDID_TIMING_DESC_H_BLANK[0x3]: Relative Offset to the EDID detailed
+	 *   timing descriptors - H blank
+	 */
+	blank_h = (((u32)data_buf[0x4] & 0xF) << 8)
+		| data_buf[0x3];
+
+	/*
+	 * * EDID_TIMING_DESC_UPPER_V_NIBBLE[0x7]: Relative Offset to the
+	 *   EDID detailed timing descriptors - Upper 4 bit for each V
+	 *   active/blank field
+	 * * EDID_TIMING_DESC_V_ACTIVE[0x5]: Relative Offset to the EDID
+	 *   detailed timing descriptors - V active
+	 */
+	active_v = ((((u32)data_buf[0x7] >> 0x4) & 0xF) << 8)
+		| data_buf[0x5];
+
+	/*
+	 * EDID_TIMING_DESC_V_BLANK[0x6]: Relative Offset to the EDID
+	 * detailed timing descriptors - V blank
+	 */
+	blank_v = (((u32)data_buf[0x7] & 0xF) << 8)
+		| data_buf[0x6];
+
+	/*
+	 * * EDID_TIMING_DESC_IMAGE_SIZE_UPPER_NIBBLE[0xE]: Relative Offset
+	 *   to the EDID detailed timing descriptors - Image Size upper
+	 *   nibble V and H
+	 * * EDID_TIMING_DESC_H_IMAGE_SIZE[0xC]: Relative Offset to the EDID
+	 *   detailed timing descriptors - H image size
+	 * * EDID_TIMING_DESC_V_IMAGE_SIZE[0xD]: Relative Offset to the EDID
+	 *   detailed timing descriptors - V image size
+	 */
+	img_size_h = ((((u32)data_buf[0xE] >> 0x4) & 0xF) << 8)
+		| data_buf[0xC];
+	img_size_v = (((u32)data_buf[0xE] & 0xF) << 8)
+		| data_buf[0xD];
+
+	/*
+	 * aspect ratio as 4:3 if within specificed range, rather than being
+	 * absolute value
+	 */
+	aspect_ratio_4_3 = (abs(img_size_h * 3 - img_size_v * 4) < 5) ? 1 : 0;
+
+	aspect_ratio_5_4 = (abs(img_size_h * 4 - img_size_v * 5) < 5) ? 1 : 0;
+
+	/*
+	 * EDID_TIMING_DESC_INTERLACE[0x11:7]: Relative Offset to the EDID
+	 * detailed timing descriptors - Interlace flag
+	 */
+	DEV_DBG("%s: Interlaced mode byte data_buf[0x11]=[%x]\n", __func__,
+		data_buf[0x11]);
+
+	/*
+	 * CEA 861-D: interlaced bit is bit[7] of byte[0x11]
+	 */
+	interlaced = (data_buf[0x11] & 0x80) >> 7;
+
+	active_low_v = ((data_buf[0x11] & (0x7 << 2)) >> 2) == 0x7 ? 0 : 1;
+
+	active_low_h = ((data_buf[0x11] & BIT(1)) &&
+				(data_buf[0x11] & BIT(4))) ? 0 : 1;
+
+	frame_data = (active_h + blank_h) * (active_v + blank_v);
+
+	if (frame_data) {
+		int refresh_rate_khz = (pixel_clk * khz_to_hz) / frame_data;
+
+		timing.active_h      = active_h;
+		timing.front_porch_h = front_porch_h;
+		timing.pulse_width_h = pulse_width_h;
+		timing.back_porch_h  = blank_h -
+					(front_porch_h + pulse_width_h);
+		timing.active_low_h  = active_low_h;
+		timing.active_v      = active_v;
+		timing.front_porch_v = front_porch_v;
+		timing.pulse_width_v = pulse_width_v;
+		timing.back_porch_v  = blank_v -
+					(front_porch_v + pulse_width_v);
+		timing.active_low_v  = active_low_v;
+		timing.pixel_freq    = pixel_clk;
+		timing.refresh_rate  = refresh_rate_khz * khz_to_hz;
+		timing.interlaced    = interlaced;
+		timing.supported     = true;
+		timing.ar            = aspect_ratio_4_3 ? HDMI_RES_AR_4_3 :
+					(aspect_ratio_5_4 ? HDMI_RES_AR_5_4 :
+					HDMI_RES_AR_16_9);
+
+		DEV_DBG("%s: new res: %dx%d%s@%dHz\n", __func__,
+			timing.active_h, timing.active_v,
+			interlaced ? "i" : "p",
+			timing.refresh_rate / khz_to_hz);
+
+		rc = hdmi_set_resv_timing_info(&timing);
+	} else {
+		DEV_ERR("%s: Invalid frame data\n", __func__);
+		rc = -EINVAL;
+	}
+
+	if (!IS_ERR_VALUE(rc)) {
+		*disp_mode = rc;
+		DEV_DBG("%s: DTD mode found: %d\n", __func__, *disp_mode);
+	} else {
+		*disp_mode = HDMI_VFRMT_UNKNOWN;
+		DEV_ERR("%s: error adding mode from DTD: %d\n", __func__, rc);
+	}
+} /* hdmi_edid_detail_desc */
+
+static void hdmi_edid_add_sink_3d_format(struct hdmi_edid_sink_data *sink_data,
+	u32 video_format, u32 video_3d_format)
+{
+	char string[BUFF_SIZE_3D];
+	u32 added = false;
+	int i;
+
+	for (i = 0; i < sink_data->num_of_elements; ++i) {
+		if (sink_data->disp_mode_list[i].video_format == video_format) {
+			sink_data->disp_mode_list[i].video_3d_format |=
+				video_3d_format;
+			added = true;
+			break;
+		}
+	}
+
+	hdmi_get_video_3d_fmt_2string(video_3d_format, string, sizeof(string));
+
+	DEV_DBG("%s: EDID[3D]: format: %d [%s], %s %s\n", __func__,
+		video_format, msm_hdmi_mode_2string(video_format),
+		string, added ? "added" : "NOT added");
+} /* hdmi_edid_add_sink_3d_format */
+
+static void hdmi_edid_add_sink_video_format(struct hdmi_edid_ctrl *edid_ctrl,
+	u32 video_format)
+{
+	struct msm_hdmi_mode_timing_info timing = {0};
+	u32 ret = hdmi_get_supported_mode(&timing,
+				&edid_ctrl->init_data.ds_data,
+				video_format);
+	u32 supported = hdmi_edid_is_mode_supported(edid_ctrl, &timing);
+	struct hdmi_edid_sink_data *sink_data = &edid_ctrl->sink_data;
+	struct disp_mode_info *disp_mode_list = sink_data->disp_mode_list;
+
+	if (video_format >= HDMI_VFRMT_MAX) {
+		DEV_ERR("%s: video format: %s is not supported\n", __func__,
+			msm_hdmi_mode_2string(video_format));
+		return;
+	}
+
+	DEV_DBG("%s: EDID: format: %d [%s], %s\n", __func__,
+		video_format, msm_hdmi_mode_2string(video_format),
+		supported ? "Supported" : "Not-Supported");
+
+	if (!ret && supported) {
+		/* todo: MHL */
+		disp_mode_list[sink_data->num_of_elements].video_format =
+			video_format;
+		disp_mode_list[sink_data->num_of_elements].rgb_support =
+			true;
+		sink_data->num_of_elements++;
+	}
+} /* hdmi_edid_add_sink_video_format */
+
+static int hdmi_edid_get_display_vsd_3d_mode(const u8 *data_buf,
+	struct hdmi_edid_sink_data *sink_data, u32 num_of_cea_blocks)
+{
+	u8 len, offset, present_multi_3d, hdmi_vic_len;
+	int hdmi_3d_len;
+	u16 structure_all, structure_mask;
+	const u8 *vsd = num_of_cea_blocks ?
+		hdmi_edid_find_block(data_buf+0x80, DBC_START_OFFSET,
+			VENDOR_SPECIFIC_DATA_BLOCK, &len) : NULL;
+	int i;
+
+	if (vsd == NULL || len == 0 || len > MAX_DATA_BLOCK_SIZE) {
+		DEV_DBG("%s: No/Invalid Vendor Specific Data Block\n",
+			__func__);
+		return -ENXIO;
+	}
+
+	offset = HDMI_VSDB_3D_EVF_DATA_OFFSET(vsd);
+	if (offset >= len - 1)
+		return -ETOOSMALL;
+
+	present_multi_3d = (vsd[offset] & 0x60) >> 5;
+
+	offset += 1;
+
+	hdmi_vic_len = (vsd[offset] >> 5) & 0x7;
+	hdmi_3d_len = vsd[offset] & 0x1F;
+	DEV_DBG("%s: EDID[3D]: HDMI_VIC_LEN = %d, HDMI_3D_LEN = %d\n", __func__,
+		hdmi_vic_len, hdmi_3d_len);
+
+	offset += (hdmi_vic_len + 1);
+	if (offset >= len - 1)
+		return -ETOOSMALL;
+
+	if (present_multi_3d == 1 || present_multi_3d == 2) {
+		DEV_DBG("%s: EDID[3D]: multi 3D present (%d)\n", __func__,
+			present_multi_3d);
+		/* 3d_structure_all */
+		structure_all = (vsd[offset] << 8) | vsd[offset + 1];
+		offset += 2;
+		if (offset >= len - 1)
+			return -ETOOSMALL;
+		hdmi_3d_len -= 2;
+		if (present_multi_3d == 2) {
+			/* 3d_structure_mask */
+			structure_mask = (vsd[offset] << 8) | vsd[offset + 1];
+			offset += 2;
+			hdmi_3d_len -= 2;
+		} else
+			structure_mask = 0xffff;
+
+		i = 0;
+		while (i < 16) {
+			if (i >= sink_data->disp_multi_3d_mode_list_cnt)
+				break;
+
+			if (!(structure_mask & BIT(i))) {
+				++i;
+				continue;
+			}
+
+			/* BIT0: FRAME PACKING */
+			if (structure_all & BIT(0))
+				hdmi_edid_add_sink_3d_format(sink_data,
+					sink_data->
+					disp_multi_3d_mode_list[i],
+					FRAME_PACKING);
+
+			/* BIT6: TOP AND BOTTOM */
+			if (structure_all & BIT(6))
+				hdmi_edid_add_sink_3d_format(sink_data,
+					sink_data->
+					disp_multi_3d_mode_list[i],
+					TOP_AND_BOTTOM);
+
+			/* BIT8: SIDE BY SIDE HALF */
+			if (structure_all & BIT(8))
+				hdmi_edid_add_sink_3d_format(sink_data,
+					sink_data->
+					disp_multi_3d_mode_list[i],
+					SIDE_BY_SIDE_HALF);
+
+			++i;
+		}
+	}
+
+	i = 0;
+	while (hdmi_3d_len > 0) {
+		if (offset >= len - 1)
+			return -ETOOSMALL;
+		DEV_DBG("%s: EDID: 3D_Structure_%d @ 0x%x: %02x\n",
+			__func__, i + 1, offset, vsd[offset]);
+		if ((vsd[offset] >> 4) >=
+			sink_data->disp_multi_3d_mode_list_cnt) {
+			if ((vsd[offset] & 0x0F) >= 8) {
+				offset += 1;
+				hdmi_3d_len -= 1;
+				DEV_DBG("%s:EDID:3D_Detail_%d @ 0x%x: %02x\n",
+					__func__, i + 1, offset,
+					vsd[min_t(u32, offset, (len - 1))]);
+			}
+			i += 1;
+			offset += 1;
+			hdmi_3d_len -= 1;
+			continue;
+		}
+
+		switch (vsd[offset] & 0x0F) {
+		case 0:
+			/* 0000b: FRAME PACKING */
+			hdmi_edid_add_sink_3d_format(sink_data,
+				sink_data->
+				disp_multi_3d_mode_list[vsd[offset] >> 4],
+				FRAME_PACKING);
+			break;
+		case 6:
+			/* 0110b: TOP AND BOTTOM */
+			hdmi_edid_add_sink_3d_format(sink_data,
+				sink_data->
+				disp_multi_3d_mode_list[vsd[offset] >> 4],
+				TOP_AND_BOTTOM);
+			break;
+		case 8:
+			/* 1000b: SIDE BY SIDE HALF */
+			hdmi_edid_add_sink_3d_format(sink_data,
+				sink_data->
+				disp_multi_3d_mode_list[vsd[offset] >> 4],
+				SIDE_BY_SIDE_HALF);
+			break;
+		}
+		if ((vsd[offset] & 0x0F) >= 8) {
+			offset += 1;
+			hdmi_3d_len -= 1;
+			DEV_DBG("%s: EDID[3D]: 3D_Detail_%d @ 0x%x: %02x\n",
+				__func__, i + 1, offset,
+				vsd[min_t(u32, offset, (len - 1))]);
+		}
+		i += 1;
+		offset += 1;
+		hdmi_3d_len -= 1;
+	}
+	return 0;
+} /* hdmi_edid_get_display_vsd_3d_mode */
+
+static void hdmi_edid_get_extended_video_formats(
+	struct hdmi_edid_ctrl *edid_ctrl, const u8 *in_buf)
+{
+	u8 db_len, offset, i;
+	u8 hdmi_vic_len;
+	u32 video_format;
+	const u8 *vsd = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	vsd = hdmi_edid_find_block(in_buf, DBC_START_OFFSET,
+		VENDOR_SPECIFIC_DATA_BLOCK, &db_len);
+
+	if (vsd == NULL || db_len == 0 || db_len > MAX_DATA_BLOCK_SIZE) {
+		DEV_DBG("%s: No/Invalid Vendor Specific Data Block\n",
+			__func__);
+		return;
+	}
+
+	/* check if HDMI_Video_present flag is set or not */
+	if (!(vsd[8] & BIT(5))) {
+		DEV_DBG("%s: extended vfmts are not supported by the sink.\n",
+			__func__);
+		return;
+	}
+
+	offset = HDMI_VSDB_3D_EVF_DATA_OFFSET(vsd);
+
+	hdmi_vic_len = vsd[offset + 1] >> 5;
+	if (hdmi_vic_len) {
+		DEV_DBG("%s: EDID: EVFRMT @ 0x%x of block 3, len = %02x\n",
+			__func__, offset, hdmi_vic_len);
+
+		for (i = 0; i < hdmi_vic_len; i++) {
+			video_format = HDMI_VFRMT_END + vsd[offset + 2 + i];
+			hdmi_edid_add_sink_video_format(edid_ctrl,
+				video_format);
+		}
+	}
+} /* hdmi_edid_get_extended_video_formats */
+
+static void hdmi_edid_parse_et3(struct hdmi_edid_ctrl *edid_ctrl,
+	const u8 *edid_blk0)
+{
+	u8  start = DTD_OFFSET, i = 0;
+	struct hdmi_edid_sink_data *sink_data = NULL;
+
+	if (!edid_ctrl || !edid_blk0) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	sink_data = &edid_ctrl->sink_data;
+
+	/* check if the EDID revision is 4 (version 1.4) */
+	if (edid_blk0[REVISION_OFFSET] != EDID_REVISION_FOUR)
+		return;
+
+	/* Check each of 4 - 18 bytes descriptors */
+	while (i < DTD_MAX) {
+		u8  iter = start;
+		u32 header_1 = 0;
+		u8  header_2 = 0;
+
+		header_1 = edid_blk0[iter++];
+		header_1 = header_1 << 8 | edid_blk0[iter++];
+		header_1 = header_1 << 8 | edid_blk0[iter++];
+		header_1 = header_1 << 8 | edid_blk0[iter++];
+		header_2 = edid_blk0[iter];
+
+		if (header_1 != 0x000000F7 || header_2 != 0x00)
+			goto loop_end;
+
+		/* VESA DMT Standard Version (0x0A)*/
+		iter++;
+
+		/* First set of supported formats */
+		iter++;
+		if (edid_blk0[iter] & BIT(3)) {
+			pr_debug("%s: DMT 848x480@60\n", __func__);
+			hdmi_edid_add_sink_video_format(edid_ctrl,
+				HDMI_VFRMT_848x480p60_16_9);
+		}
+
+		/* Second set of supported formats */
+		iter++;
+		if (edid_blk0[iter] & BIT(1)) {
+			pr_debug("%s: DMT 1280x1024@60\n", __func__);
+			hdmi_edid_add_sink_video_format(edid_ctrl,
+				HDMI_VFRMT_1280x1024p60_5_4);
+		}
+
+		if (edid_blk0[iter] & BIT(3)) {
+			pr_debug("%s: DMT 1280x960@60\n", __func__);
+			hdmi_edid_add_sink_video_format(edid_ctrl,
+				HDMI_VFRMT_1280x960p60_4_3);
+		}
+
+		/* Third set of supported formats */
+		iter++;
+		if (edid_blk0[iter] & BIT(1)) {
+			pr_debug("%s: DMT 1400x1050@60\n", __func__);
+			hdmi_edid_add_sink_video_format(edid_ctrl,
+				HDMI_VFRMT_1400x1050p60_4_3);
+		}
+
+		if (edid_blk0[iter] & BIT(5)) {
+			pr_debug("%s: DMT 1440x900@60\n", __func__);
+			hdmi_edid_add_sink_video_format(edid_ctrl,
+				HDMI_VFRMT_1440x900p60_16_10);
+		}
+
+		if (edid_blk0[iter] & BIT(7)) {
+			pr_debug("%s: DMT 1360x768@60\n", __func__);
+			hdmi_edid_add_sink_video_format(edid_ctrl,
+				HDMI_VFRMT_1360x768p60_16_9);
+		}
+
+		/* Fourth set of supported formats */
+		iter++;
+		if (edid_blk0[iter] & BIT(2)) {
+			pr_debug("%s: DMT 1600x1200@60\n", __func__);
+			hdmi_edid_add_sink_video_format(edid_ctrl,
+				HDMI_VFRMT_1600x1200p60_4_3);
+		}
+
+		if (edid_blk0[iter] & BIT(5)) {
+			pr_debug("%s: DMT 1680x1050@60\n", __func__);
+			hdmi_edid_add_sink_video_format(edid_ctrl,
+				HDMI_VFRMT_1680x1050p60_16_10);
+		}
+
+		/* Fifth set of supported formats */
+		iter++;
+		if (edid_blk0[iter] & BIT(0)) {
+			pr_debug("%s: DMT 1920x1200@60\n", __func__);
+			hdmi_edid_add_sink_video_format(edid_ctrl,
+				HDMI_VFRMT_1920x1200p60_16_10);
+		}
+
+loop_end:
+		i++;
+		start += DTD_SIZE;
+	}
+}
+
+static void hdmi_edid_get_display_mode(struct hdmi_edid_ctrl *edid_ctrl)
+{
+	u8 i = 0, offset = 0, std_blk = 0;
+	u32 video_format = HDMI_VFRMT_640x480p60_4_3;
+	u32 has480p = false;
+	u8 len = 0;
+	u8 num_of_cea_blocks;
+	u8 *data_buf;
+	int rc;
+	const u8 *edid_blk0 = NULL;
+	const u8 *edid_blk1 = NULL;
+	const u8 *svd = NULL;
+	u32 has60hz_mode = false;
+	u32 has50hz_mode = false;
+	bool read_block0_res = false;
+	struct hdmi_edid_sink_data *sink_data = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	data_buf = edid_ctrl->edid_buf;
+	num_of_cea_blocks = edid_ctrl->cea_blks;
+
+	edid_blk0 = &data_buf[0x0];
+	edid_blk1 = &data_buf[0x80];
+	svd = num_of_cea_blocks ?
+		hdmi_edid_find_block(data_buf+0x80, DBC_START_OFFSET,
+			VIDEO_DATA_BLOCK, &len) : NULL;
+
+	if (num_of_cea_blocks && (len == 0 || len > MAX_DATA_BLOCK_SIZE)) {
+		DEV_DBG("%s: fall back to block 0 res\n", __func__);
+		svd = NULL;
+		read_block0_res = true;
+	}
+
+	sink_data = &edid_ctrl->sink_data;
+
+	sink_data->disp_multi_3d_mode_list_cnt = 0;
+	if (svd != NULL) {
+		++svd;
+		for (i = 0; i < len; ++i, ++svd) {
+			/*
+			 * Subtract 1 because it is zero based in the driver,
+			 * while the Video identification code is 1 based in the
+			 * CEA_861D spec
+			 */
+			video_format = (*svd & 0x7F);
+			hdmi_edid_add_sink_video_format(edid_ctrl,
+				video_format);
+			/* Make a note of the preferred video format */
+			if (i == 0)
+				sink_data->preferred_video_format =
+					video_format;
+
+			if (i < 16) {
+				sink_data->disp_multi_3d_mode_list[i]
+					= video_format;
+				sink_data->disp_multi_3d_mode_list_cnt++;
+			}
+
+			if (video_format <= HDMI_VFRMT_1920x1080p60_16_9 ||
+				video_format == HDMI_VFRMT_2880x480p60_4_3 ||
+				video_format == HDMI_VFRMT_2880x480p60_16_9)
+				has60hz_mode = true;
+
+			if ((video_format >= HDMI_VFRMT_720x576p50_4_3 &&
+				video_format <= HDMI_VFRMT_1920x1080p50_16_9) ||
+				video_format == HDMI_VFRMT_2880x576p50_4_3 ||
+				video_format == HDMI_VFRMT_2880x576p50_16_9 ||
+				video_format == HDMI_VFRMT_1920x1250i50_16_9)
+				has50hz_mode = true;
+
+			if (video_format == HDMI_VFRMT_640x480p60_4_3)
+				has480p = true;
+		}
+	} else if (!num_of_cea_blocks || read_block0_res) {
+		/* Detailed timing descriptors */
+		u32 desc_offset = 0;
+		/*
+		 * * Maximum 4 timing descriptor in block 0 - No CEA
+		 *   extension in this case
+		 * * EDID_FIRST_TIMING_DESC[0x36] - 1st detailed timing
+		 *   descriptor
+		 * * EDID_DETAIL_TIMING_DESC_BLCK_SZ[0x12] - Each detailed
+		 *   timing descriptor has block size of 18
+		 */
+		while (4 > i && 0 != edid_blk0[0x36+desc_offset]) {
+			hdmi_edid_detail_desc(edid_ctrl,
+				edid_blk0+0x36+desc_offset,
+				&video_format);
+
+			DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
+				__func__, __LINE__,
+				msm_hdmi_mode_2string(video_format));
+
+			hdmi_edid_add_sink_video_format(edid_ctrl,
+				video_format);
+
+			if (video_format == HDMI_VFRMT_640x480p60_4_3)
+				has480p = true;
+
+			/* Make a note of the preferred video format */
+			if (i == 0) {
+				sink_data->preferred_video_format =
+					video_format;
+			}
+			desc_offset += 0x12;
+			++i;
+		}
+	} else if (num_of_cea_blocks == 1) {
+		u32 desc_offset = 0;
+
+		/*
+		 * Read from both block 0 and block 1
+		 * Read EDID block[0] as above
+		 */
+		while (4 > i && 0 != edid_blk0[0x36+desc_offset]) {
+			hdmi_edid_detail_desc(edid_ctrl,
+				edid_blk0+0x36+desc_offset,
+				&video_format);
+
+			DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
+				__func__, __LINE__,
+				msm_hdmi_mode_2string(video_format));
+
+			hdmi_edid_add_sink_video_format(edid_ctrl,
+				video_format);
+
+			if (video_format == HDMI_VFRMT_640x480p60_4_3)
+				has480p = true;
+
+			/* Make a note of the preferred video format */
+			if (i == 0) {
+				sink_data->preferred_video_format =
+					video_format;
+			}
+			desc_offset += 0x12;
+			++i;
+		}
+
+		/*
+		 * * Parse block 1 - CEA extension byte offset of first
+		 *   detailed timing generation - offset is relevant to
+		 *   the offset of block 1
+		 * * EDID_CEA_EXTENSION_FIRST_DESC[0x82]: Offset to CEA
+		 *   extension first timing desc - indicate the offset of
+		 *   the first detailed timing descriptor
+		 * * EDID_BLOCK_SIZE = 0x80  Each page size in the EDID ROM
+		 */
+		desc_offset = edid_blk1[0x02];
+		while (edid_blk1[desc_offset] != 0) {
+			hdmi_edid_detail_desc(edid_ctrl,
+				edid_blk1+desc_offset,
+				&video_format);
+
+			DEV_DBG("[%s:%d] Block-1 Adding vid fmt = [%s]\n",
+				__func__, __LINE__,
+				msm_hdmi_mode_2string(video_format));
+
+			hdmi_edid_add_sink_video_format(edid_ctrl,
+				video_format);
+			if (video_format == HDMI_VFRMT_640x480p60_4_3)
+				has480p = true;
+
+			/* Make a note of the preferred video format */
+			if (i == 0) {
+				sink_data->preferred_video_format =
+					video_format;
+			}
+			desc_offset += 0x12;
+			++i;
+		}
+	}
+
+	std_blk = 0;
+	offset  = 0;
+	while (std_blk < 8) {
+		if ((edid_blk0[0x26 + offset] == 0x81) &&
+		    (edid_blk0[0x26 + offset + 1] == 0x80)) {
+			pr_debug("%s: 108MHz: off=[%x] stdblk=[%x]\n",
+				 __func__, offset, std_blk);
+			hdmi_edid_add_sink_video_format(edid_ctrl,
+				HDMI_VFRMT_1280x1024p60_5_4);
+		}
+		if ((edid_blk0[0x26 + offset] == 0x61) &&
+		    (edid_blk0[0x26 + offset + 1] == 0x40)) {
+			pr_debug("%s: 65MHz: off=[%x] stdblk=[%x]\n",
+				 __func__, offset, std_blk);
+			hdmi_edid_add_sink_video_format(edid_ctrl,
+				HDMI_VFRMT_1024x768p60_4_3);
+			break;
+		}
+		offset += 2;
+
+		std_blk++;
+	}
+
+	/* Established Timing I */
+	if (edid_blk0[0x23] & BIT(0)) {
+		pr_debug("%s: DMT: ETI: HDMI_VFRMT_800x600_4_3\n", __func__);
+		hdmi_edid_add_sink_video_format(edid_ctrl,
+				HDMI_VFRMT_800x600p60_4_3);
+	}
+
+	/* Established Timing II */
+	if (edid_blk0[0x24] & BIT(3)) {
+		pr_debug("%s: DMT: ETII: HDMI_VFRMT_1024x768p60_4_3\n",
+			__func__);
+		hdmi_edid_add_sink_video_format(edid_ctrl,
+				HDMI_VFRMT_1024x768p60_4_3);
+	}
+
+	/* Established Timing III */
+	hdmi_edid_parse_et3(edid_ctrl, data_buf);
+
+	hdmi_edid_get_extended_video_formats(edid_ctrl, data_buf+0x80);
+
+	/* mandaroty 3d format */
+	if (edid_ctrl->present_3d) {
+		if (has60hz_mode) {
+			hdmi_edid_add_sink_3d_format(sink_data,
+				HDMI_VFRMT_1920x1080p24_16_9,
+				FRAME_PACKING | TOP_AND_BOTTOM);
+			hdmi_edid_add_sink_3d_format(sink_data,
+				HDMI_VFRMT_1280x720p60_16_9,
+				FRAME_PACKING | TOP_AND_BOTTOM);
+			hdmi_edid_add_sink_3d_format(sink_data,
+				HDMI_VFRMT_1920x1080i60_16_9,
+				SIDE_BY_SIDE_HALF);
+		}
+
+		if (has50hz_mode) {
+			hdmi_edid_add_sink_3d_format(sink_data,
+				HDMI_VFRMT_1920x1080p24_16_9,
+				FRAME_PACKING | TOP_AND_BOTTOM);
+			hdmi_edid_add_sink_3d_format(sink_data,
+				HDMI_VFRMT_1280x720p50_16_9,
+				FRAME_PACKING | TOP_AND_BOTTOM);
+			hdmi_edid_add_sink_3d_format(sink_data,
+				HDMI_VFRMT_1920x1080i50_16_9,
+				SIDE_BY_SIDE_HALF);
+		}
+
+		/* 3d format described in Vendor Specific Data */
+		rc = hdmi_edid_get_display_vsd_3d_mode(data_buf, sink_data,
+			num_of_cea_blocks);
+		if (!rc)
+			pr_debug("%s: 3D formats in VSD\n", __func__);
+	}
+
+	/*
+	 * Need to add default 640 by 480 timings, in case not described
+	 * in the EDID structure.
+	 * All DTV sink devices should support this mode
+	 */
+	if (!has480p)
+		hdmi_edid_add_sink_video_format(edid_ctrl,
+			HDMI_VFRMT_640x480p60_4_3);
+} /* hdmi_edid_get_display_mode */
+
+u32 hdmi_edid_get_raw_data(void *input, u8 *buf, u32 size)
+{
+	struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *) input;
+	u32 ret = 0;
+	u32 buf_size;
+
+	if (!edid_ctrl || !buf) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	buf_size = sizeof(edid_ctrl->edid_buf);
+
+	size = min(size, buf_size);
+
+	memcpy(buf, edid_ctrl->edid_buf, size);
+
+end:
+	return ret;
+}
+
+static void hdmi_edid_add_resv_timings(struct hdmi_edid_ctrl *edid_ctrl)
+{
+	int i = HDMI_VFRMT_RESERVE1;
+
+	while (i <= RESERVE_VFRMT_END) {
+		if (hdmi_is_valid_resv_timing(i))
+			hdmi_edid_add_sink_video_format(edid_ctrl, i);
+		else
+			break;
+		i++;
+	}
+}
+
+int hdmi_edid_parser(void *input)
+{
+	u8 *edid_buf = NULL;
+	u32 num_of_cea_blocks = 0;
+	u16 ieee_reg_id;
+	int status = 0;
+	u32 i = 0;
+	struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		status = -EINVAL;
+		goto err_invalid_data;
+	}
+
+	/* reset edid data for new hdmi connection */
+	hdmi_edid_reset_parser(edid_ctrl);
+
+	edid_buf = edid_ctrl->edid_buf;
+
+	DEV_DBG("%s: === HDMI EDID BLOCK 0 ===\n", __func__);
+	print_hex_dump(KERN_DEBUG, "HDMI EDID: ", DUMP_PREFIX_NONE, 16, 1,
+		edid_buf, EDID_BLOCK_SIZE, false);
+
+	if (!hdmi_edid_check_header(edid_buf)) {
+		status = -EPROTO;
+		goto err_invalid_header;
+	}
+
+	hdmi_edid_extract_vendor_id(edid_ctrl);
+
+	/* EDID_CEA_EXTENSION_FLAG[0x7E] - CEC extension byte */
+	num_of_cea_blocks = edid_buf[EDID_BLOCK_SIZE - 2];
+	DEV_DBG("%s: No. of CEA blocks is  [%u]\n", __func__,
+		num_of_cea_blocks);
+
+	/* Find out any CEA extension blocks following block 0 */
+	if (num_of_cea_blocks == 0) {
+		/* No CEA extension */
+		edid_ctrl->sink_mode = SINK_MODE_DVI;
+		DEV_DBG("HDMI DVI mode: %s\n",
+			edid_ctrl->sink_mode ? "no" : "yes");
+		goto bail;
+	}
+
+	/* Find out if CEA extension blocks exceeding max limit */
+	if (num_of_cea_blocks >= MAX_EDID_BLOCKS) {
+		DEV_WARN("%s: HDMI EDID exceeded max CEA blocks limit\n",
+				__func__);
+		num_of_cea_blocks = MAX_EDID_BLOCKS - 1;
+	}
+
+	/* check for valid CEA block */
+	if (edid_buf[EDID_BLOCK_SIZE] != 2) {
+		DEV_ERR("%s: Invalid CEA block\n", __func__);
+		num_of_cea_blocks = 0;
+		goto bail;
+	}
+
+	/* goto to CEA extension edid block */
+	edid_buf += EDID_BLOCK_SIZE;
+
+	ieee_reg_id = hdmi_edid_extract_ieee_reg_id(edid_ctrl, edid_buf);
+	if (ieee_reg_id == EDID_IEEE_REG_ID)
+		edid_ctrl->sink_mode = SINK_MODE_HDMI;
+	else
+		edid_ctrl->sink_mode = SINK_MODE_DVI;
+
+	hdmi_edid_extract_sink_caps(edid_ctrl, edid_buf);
+	hdmi_edid_extract_latency_fields(edid_ctrl, edid_buf);
+	hdmi_edid_extract_speaker_allocation_data(edid_ctrl, edid_buf);
+	hdmi_edid_extract_audio_data_blocks(edid_ctrl, edid_buf);
+	hdmi_edid_extract_3d_present(edid_ctrl, edid_buf);
+	hdmi_edid_extract_extended_data_blocks(edid_ctrl, edid_buf);
+
+bail:
+	for (i = 1; i <= num_of_cea_blocks; i++) {
+		DEV_DBG("%s: === HDMI EDID BLOCK %d ===\n", __func__, i);
+		print_hex_dump(KERN_DEBUG, "HDMI EDID: ", DUMP_PREFIX_NONE,
+			16, 1, edid_ctrl->edid_buf + (i * EDID_BLOCK_SIZE),
+			EDID_BLOCK_SIZE, false);
+	}
+
+	edid_ctrl->cea_blks = num_of_cea_blocks;
+
+	hdmi_edid_get_display_mode(edid_ctrl);
+
+	if (edid_ctrl->keep_resv_timings)
+		hdmi_edid_add_resv_timings(edid_ctrl);
+
+	return 0;
+
+err_invalid_header:
+	edid_ctrl->sink_data.num_of_elements = 1;
+	edid_ctrl->sink_data.disp_mode_list[0].video_format =
+		edid_ctrl->video_resolution;
+	edid_ctrl->sink_data.disp_mode_list[0].rgb_support = true;
+err_invalid_data:
+	return status;
+} /* hdmi_edid_read */
+
+/*
+ * If the sink specified support for both underscan/overscan then, by default,
+ * set the underscan bit. Only checking underscan support for preferred
+ * format and cea formats.
+ */
+u8 hdmi_edid_get_sink_scaninfo(void *input, u32 resolution)
+{
+	u8 scaninfo = 0;
+	int use_ce_scan_info = true;
+	struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		goto end;
+	}
+
+	if (resolution == edid_ctrl->sink_data.preferred_video_format) {
+		use_ce_scan_info = false;
+		switch (edid_ctrl->pt_scan_info) {
+		case 0:
+			/*
+			 * Need to use the info specified for the corresponding
+			 * IT or CE format
+			 */
+			DEV_DBG("%s: No underscan info for preferred V fmt\n",
+				__func__);
+			use_ce_scan_info = true;
+			break;
+		case 3:
+			DEV_DBG("%s: Set underscan bit for preferred V fmt\n",
+				__func__);
+			scaninfo = BIT(1);
+			break;
+		default:
+			DEV_DBG("%s: Underscan not set for preferred V fmt\n",
+				__func__);
+			break;
+		}
+	}
+
+	if (use_ce_scan_info) {
+		if (edid_ctrl->ce_scan_info == 3) {
+			DEV_DBG("%s: Setting underscan bit for CE video fmt\n",
+				__func__);
+			scaninfo |= BIT(1);
+		} else {
+			DEV_DBG("%s: Not setting underscan bit for CE V fmt\n",
+				__func__);
+		}
+	}
+
+end:
+	return scaninfo;
+} /* hdmi_edid_get_sink_scaninfo */
+
+u32 hdmi_edid_get_sink_mode(void *input)
+{
+	struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+	bool sink_mode;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return 0;
+	}
+
+	if (edid_ctrl->edid_override &&
+		(edid_ctrl->override_data.sink_mode != -1))
+		sink_mode = edid_ctrl->override_data.sink_mode;
+	else
+		sink_mode = edid_ctrl->sink_mode;
+
+	return sink_mode;
+} /* hdmi_edid_get_sink_mode */
+
+bool hdmi_edid_is_s3d_mode_supported(void *input, u32 video_mode, u32 s3d_mode)
+{
+	int i;
+	bool ret = false;
+	struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+	struct hdmi_edid_sink_data *sink_data;
+
+	sink_data = &edid_ctrl->sink_data;
+	for (i = 0; i < sink_data->num_of_elements; ++i) {
+		if (sink_data->disp_mode_list[i].video_format != video_mode)
+			continue;
+		if (sink_data->disp_mode_list[i].video_3d_format &
+			(1 << s3d_mode))
+			ret = true;
+		else
+			DEV_DBG("%s: return false: vic=%d caps=%x s3d=%d\n",
+				__func__, video_mode,
+				sink_data->disp_mode_list[i].video_3d_format,
+				s3d_mode);
+		break;
+	}
+	return ret;
+}
+
+bool hdmi_edid_get_scdc_support(void *input)
+{
+	struct hdmi_edid_ctrl *edid_ctrl = input;
+	bool scdc_present;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return false;
+	}
+
+	if (edid_ctrl->edid_override &&
+		(edid_ctrl->override_data.scramble != -1))
+		scdc_present = edid_ctrl->override_data.scramble;
+	else
+		scdc_present = edid_ctrl->sink_caps.scdc_present;
+
+	return scdc_present;
+}
+
+/**
+ * hdmi_edid_sink_scramble_override() - check if override has been enabled
+ * @input: edid data
+ *
+ * Return true if scrambling override is enabled false otherwise.
+ */
+bool hdmi_edid_sink_scramble_override(void *input)
+{
+	struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+	if (edid_ctrl->edid_override &&
+		(edid_ctrl->override_data.scramble != -1))
+		return true;
+
+	return false;
+
+}
+
+bool hdmi_edid_get_sink_scrambler_support(void *input)
+{
+	struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+	bool scramble_support;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return 0;
+	}
+
+	if (edid_ctrl->edid_override &&
+		(edid_ctrl->override_data.scramble != -1))
+		scramble_support = edid_ctrl->override_data.scramble;
+	else
+		scramble_support = edid_ctrl->sink_caps.scramble_support;
+
+	return scramble_support;
+}
+
+int hdmi_edid_get_audio_blk(void *input, struct msm_hdmi_audio_edid_blk *blk)
+{
+	struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+	if (!edid_ctrl || !blk) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	blk->audio_data_blk = edid_ctrl->audio_data_block;
+	blk->audio_data_blk_size = edid_ctrl->adb_size;
+
+	blk->spk_alloc_data_blk = edid_ctrl->spkr_alloc_data_block;
+	blk->spk_alloc_data_blk_size = edid_ctrl->sadb_size;
+
+	return 0;
+} /* hdmi_edid_get_audio_blk */
+
+void hdmi_edid_set_video_resolution(void *input, u32 resolution, bool reset)
+{
+	struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	edid_ctrl->video_resolution = resolution;
+
+	if (reset) {
+		edid_ctrl->default_vic = resolution;
+		edid_ctrl->sink_data.num_of_elements = 1;
+		edid_ctrl->sink_data.disp_mode_list[0].video_format =
+			resolution;
+		edid_ctrl->sink_data.disp_mode_list[0].rgb_support = true;
+	}
+} /* hdmi_edid_set_video_resolution */
+
+void hdmi_edid_deinit(void *input)
+{
+	struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+	if (edid_ctrl) {
+		if (edid_ctrl->init_data.kobj)
+			sysfs_remove_group(edid_ctrl->init_data.kobj,
+				&hdmi_edid_fs_attrs_group);
+
+		kfree(edid_ctrl);
+	}
+}
+
+void *hdmi_edid_init(struct hdmi_edid_init_data *idata)
+{
+	struct hdmi_edid_ctrl *edid_ctrl = NULL;
+
+	if (!idata) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		goto error;
+	}
+
+	edid_ctrl = kzalloc(sizeof(*edid_ctrl), GFP_KERNEL);
+	if (!edid_ctrl)
+		goto error;
+
+	edid_ctrl->init_data = *idata;
+
+	if (idata->kobj) {
+		if (sysfs_create_group(idata->kobj,
+			&hdmi_edid_fs_attrs_group))
+			DEV_ERR("%s: EDID sysfs create failed\n",
+				__func__);
+	} else {
+		DEV_DBG("%s: kobj not provided\n", __func__);
+	}
+
+	/* provide edid buffer to the client */
+	idata->buf = edid_ctrl->edid_buf;
+	idata->buf_size = sizeof(edid_ctrl->edid_buf);
+
+	return (void *)edid_ctrl;
+
+error:
+	kfree(edid_ctrl);
+	return NULL;
+}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.h b/drivers/video/fbdev/msm/mdss_hdmi_edid.h
new file mode 100644
index 0000000..69c3eb6
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2010-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __HDMI_EDID_H__
+#define __HDMI_EDID_H__
+
+#include <linux/msm_hdmi.h>
+#include "mdss_hdmi_util.h"
+
+#define EDID_BLOCK_SIZE 0x80
+#define EDID_BLOCK_ADDR 0xA0
+#define MAX_EDID_BLOCKS 5
+
+struct hdmi_edid_init_data {
+	struct kobject *kobj;
+	struct hdmi_util_ds_data ds_data;
+	u32 max_pclk_khz;
+	u8 *buf;
+	u32 buf_size;
+};
+
+int hdmi_edid_parser(void *edid_ctrl);
+u32 hdmi_edid_get_raw_data(void *edid_ctrl, u8 *buf, u32 size);
+u8 hdmi_edid_get_sink_scaninfo(void *edid_ctrl, u32 resolution);
+u32 hdmi_edid_get_sink_mode(void *edid_ctrl);
+bool hdmi_edid_sink_scramble_override(void *input);
+bool hdmi_edid_get_sink_scrambler_support(void *input);
+bool hdmi_edid_get_scdc_support(void *input);
+int hdmi_edid_get_audio_blk(void *edid_ctrl,
+	struct msm_hdmi_audio_edid_blk *blk);
+void hdmi_edid_set_video_resolution(void *edid_ctrl, u32 resolution,
+	bool reset);
+void hdmi_edid_deinit(void *edid_ctrl);
+void *hdmi_edid_init(struct hdmi_edid_init_data *init_data);
+bool hdmi_edid_is_s3d_mode_supported(void *input,
+	u32 video_mode, u32 s3d_mode);
+
+#endif /* __HDMI_EDID_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_hdcp.c b/drivers/video/fbdev/msm/mdss_hdmi_hdcp.c
new file mode 100644
index 0000000..bbdf485
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_hdcp.c
@@ -0,0 +1,1699 @@
+/* Copyright (c) 2010-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <soc/qcom/scm.h>
+#include <linux/hdcp_qseecom.h>
+#include "mdss_hdmi_hdcp.h"
+#include "video/msm_hdmi_hdcp_mgr.h"
+
+#define HDCP_STATE_NAME (hdcp_state_name(hdcp_ctrl->hdcp_state))
+
+/* HDCP Keys state based on HDMI_HDCP_LINK0_STATUS:KEYS_STATE */
+#define HDCP_KEYS_STATE_NO_KEYS		0
+#define HDCP_KEYS_STATE_NOT_CHECKED	1
+#define HDCP_KEYS_STATE_CHECKING	2
+#define HDCP_KEYS_STATE_VALID		3
+#define HDCP_KEYS_STATE_AKSV_NOT_VALID	4
+#define HDCP_KEYS_STATE_CHKSUM_MISMATCH	5
+#define HDCP_KEYS_STATE_PROD_AKSV	6
+#define HDCP_KEYS_STATE_RESERVED	7
+
+#define TZ_HDCP_CMD_ID 0x00004401
+#define HDCP_REG_ENABLE 0x01
+#define HDCP_REG_DISABLE 0x00
+
+#define HDCP_INT_CLR (BIT(1) | BIT(5) | BIT(7) | BIT(9) | BIT(13))
+
+struct hdmi_hdcp_reg_data {
+	u32 reg_id;
+	u32 off;
+	char *name;
+	u32 reg_val;
+};
+
+struct hdmi_hdcp_ctrl {
+	u32 auth_retries;
+	u32 tp_msgid;
+	u32 tz_hdcp;
+	enum hdmi_hdcp_state hdcp_state;
+	struct HDCP_V2V1_MSG_TOPOLOGY cached_tp;
+	struct HDCP_V2V1_MSG_TOPOLOGY current_tp;
+	struct delayed_work hdcp_auth_work;
+	struct work_struct hdcp_int_work;
+	struct completion r0_checked;
+	struct hdmi_hdcp_init_data init_data;
+	struct hdmi_hdcp_ops *ops;
+	bool hdmi_tx_ver_4;
+};
+
+const char *hdcp_state_name(enum hdmi_hdcp_state hdcp_state)
+{
+	switch (hdcp_state) {
+	case HDCP_STATE_INACTIVE:	return "HDCP_STATE_INACTIVE";
+	case HDCP_STATE_AUTHENTICATING:	return "HDCP_STATE_AUTHENTICATING";
+	case HDCP_STATE_AUTHENTICATED:	return "HDCP_STATE_AUTHENTICATED";
+	case HDCP_STATE_AUTH_FAIL:	return "HDCP_STATE_AUTH_FAIL";
+	default:			return "???";
+	}
+} /* hdcp_state_name */
+
+static int hdmi_hdcp_count_one(u8 *array, u8 len)
+{
+	int i, j, count = 0;
+
+	for (i = 0; i < len; i++)
+		for (j = 0; j < 8; j++)
+			count += (((array[i] >> j) & 0x1) ? 1 : 0);
+	return count;
+} /* hdmi_hdcp_count_one */
+
+static void reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int hdcp_ddc_ctrl1_reg;
+	int hdcp_ddc_status;
+	int failure;
+	int nack0;
+	struct dss_io_data *io;
+
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	io = hdcp_ctrl->init_data.core_io;
+
+	/* Check for any DDC transfer failures */
+	hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+	failure = (hdcp_ddc_status >> 16) & 0x1;
+	nack0 = (hdcp_ddc_status >> 14) & 0x1;
+	DEV_DBG("%s: %s: On Entry: HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d\n",
+		__func__, HDCP_STATE_NAME, hdcp_ddc_status, failure, nack0);
+
+	if (failure == 0x1) {
+		/*
+		 * Indicates that the last HDCP HW DDC transfer failed.
+		 * This occurs when a transfer is attempted with HDCP DDC
+		 * disabled (HDCP_DDC_DISABLE=1) or the number of retries
+		 * matches HDCP_DDC_RETRY_CNT.
+		 * Failure occurred,  let's clear it.
+		 */
+		DEV_DBG("%s: %s: DDC failure detected.HDCP_DDC_STATUS=0x%08x\n",
+			 __func__, HDCP_STATE_NAME, hdcp_ddc_status);
+
+		/* First, Disable DDC */
+		DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_0, BIT(0));
+
+		/* ACK the Failure to Clear it */
+		hdcp_ddc_ctrl1_reg = DSS_REG_R(io, HDMI_HDCP_DDC_CTRL_1);
+		DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_1,
+			hdcp_ddc_ctrl1_reg | BIT(0));
+
+		/* Check if the FAILURE got Cleared */
+		hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+		hdcp_ddc_status = (hdcp_ddc_status >> 16) & BIT(0);
+		if (hdcp_ddc_status == 0x0)
+			DEV_DBG("%s: %s: HDCP DDC Failure cleared\n", __func__,
+				HDCP_STATE_NAME);
+		else
+			DEV_WARN("%s: %s: Unable to clear HDCP DDC Failure",
+				__func__, HDCP_STATE_NAME);
+
+		/* Re-Enable HDCP DDC */
+		DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_0, 0);
+	}
+
+	if (nack0 == 0x1) {
+		DEV_DBG("%s: %s: Before: HDMI_DDC_SW_STATUS=0x%08x\n", __func__,
+			HDCP_STATE_NAME, DSS_REG_R(io, HDMI_DDC_SW_STATUS));
+		/* Reset HDMI DDC software status */
+		DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+			DSS_REG_R(io, HDMI_DDC_CTRL) | BIT(3));
+		msleep(20);
+		DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+			DSS_REG_R(io, HDMI_DDC_CTRL) & ~(BIT(3)));
+
+		/* Reset HDMI DDC Controller */
+		DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+			DSS_REG_R(io, HDMI_DDC_CTRL) | BIT(1));
+		msleep(20);
+		DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+			DSS_REG_R(io, HDMI_DDC_CTRL) & ~BIT(1));
+		DEV_DBG("%s: %s: After: HDMI_DDC_SW_STATUS=0x%08x\n", __func__,
+			HDCP_STATE_NAME, DSS_REG_R(io, HDMI_DDC_SW_STATUS));
+	}
+
+	hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+
+	failure = (hdcp_ddc_status >> 16) & BIT(0);
+	nack0 = (hdcp_ddc_status >> 14) & BIT(0);
+	DEV_DBG("%s: %s: On Exit: HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d\n",
+		__func__, HDCP_STATE_NAME, hdcp_ddc_status, failure, nack0);
+} /* reset_hdcp_ddc_failures */
+
+static void hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct dss_io_data *io = NULL;
+	u32 hdcp_ddc_status, ddc_hw_status;
+	u32 ddc_xfer_done, ddc_xfer_req;
+	u32 ddc_hw_req, ddc_hw_not_idle;
+	bool ddc_hw_not_ready, xfer_not_done, hw_not_done;
+	u32 timeout_count;
+
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	io = hdcp_ctrl->init_data.core_io;
+	if (!io->base) {
+		DEV_ERR("%s: core io not inititalized\n", __func__);
+		return;
+	}
+
+	/* Wait to be clean on DDC HW engine */
+	timeout_count = 100;
+	do {
+		hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+		ddc_xfer_req    = hdcp_ddc_status & BIT(4);
+		ddc_xfer_done   = hdcp_ddc_status & BIT(10);
+
+		ddc_hw_status   = DSS_REG_R(io, HDMI_DDC_HW_STATUS);
+		ddc_hw_req      = ddc_hw_status & BIT(16);
+		ddc_hw_not_idle = ddc_hw_status & (BIT(0) | BIT(1));
+
+		/* ddc transfer was requested but not completed */
+		xfer_not_done = ddc_xfer_req && !ddc_xfer_done;
+
+		/* ddc status is not idle or a hw request pending */
+		hw_not_done = ddc_hw_not_idle || ddc_hw_req;
+
+		ddc_hw_not_ready = xfer_not_done || hw_not_done;
+
+		DEV_DBG("%s: %s: timeout count(%d): ddc hw%sready\n",
+			__func__, HDCP_STATE_NAME, timeout_count,
+				ddc_hw_not_ready ? " not " : " ");
+		DEV_DBG("hdcp_ddc_status[0x%x], ddc_hw_status[0x%x]\n",
+				hdcp_ddc_status, ddc_hw_status);
+		if (ddc_hw_not_ready)
+			msleep(20);
+		} while (ddc_hw_not_ready && --timeout_count);
+} /* hdmi_hdcp_hw_ddc_clean */
+
+static int hdcp_scm_call(struct scm_hdcp_req *req, u32 *resp)
+{
+	int ret = 0;
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_HDCP, SCM_CMD_HDCP, (void *) req,
+			     SCM_HDCP_MAX_REG * sizeof(struct scm_hdcp_req),
+			     &resp, sizeof(*resp));
+	} else {
+		struct scm_desc desc;
+
+		desc.args[0] = req[0].addr;
+		desc.args[1] = req[0].val;
+		desc.args[2] = req[1].addr;
+		desc.args[3] = req[1].val;
+		desc.args[4] = req[2].addr;
+		desc.args[5] = req[2].val;
+		desc.args[6] = req[3].addr;
+		desc.args[7] = req[3].val;
+		desc.args[8] = req[4].addr;
+		desc.args[9] = req[4].val;
+		desc.arginfo = SCM_ARGS(10);
+
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_HDCP, SCM_CMD_HDCP),
+				&desc);
+		*resp = desc.ret[0];
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int hdmi_hdcp_load_keys(void *input)
+{
+	int rc = 0;
+	bool use_sw_keys = false;
+	u32 reg_val;
+	u32 ksv_lsb_addr, ksv_msb_addr;
+	u32 aksv_lsb, aksv_msb;
+	u8 aksv[5];
+	struct dss_io_data *io;
+	struct dss_io_data *qfprom_io;
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = input;
+
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io ||
+		!hdcp_ctrl->init_data.qfprom_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if ((hdcp_ctrl->hdcp_state != HDCP_STATE_INACTIVE) &&
+		(hdcp_ctrl->hdcp_state != HDCP_STATE_AUTH_FAIL)) {
+		DEV_ERR("%s: %s: invalid state. returning\n", __func__,
+			HDCP_STATE_NAME);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	io = hdcp_ctrl->init_data.core_io;
+	qfprom_io = hdcp_ctrl->init_data.qfprom_io;
+
+	/* On compatible hardware, use SW keys */
+	reg_val = DSS_REG_R(qfprom_io, SEC_CTRL_HW_VERSION);
+	if (reg_val >= HDCP_SEL_MIN_SEC_VERSION) {
+		reg_val = DSS_REG_R(qfprom_io,
+			QFPROM_RAW_FEAT_CONFIG_ROW0_MSB +
+			QFPROM_RAW_VERSION_4);
+
+		if (!(reg_val & BIT(23)))
+			use_sw_keys = true;
+	}
+
+	if (use_sw_keys) {
+		if (hdcp1_set_keys(&aksv_msb, &aksv_lsb)) {
+			pr_err("%s: setting hdcp SW keys failed\n", __func__);
+			rc = -EINVAL;
+			goto end;
+		}
+	} else {
+		/* Fetch aksv from QFPROM, this info should be public. */
+		ksv_lsb_addr = HDCP_KSV_LSB;
+		ksv_msb_addr = HDCP_KSV_MSB;
+
+		if (hdcp_ctrl->hdmi_tx_ver_4) {
+			ksv_lsb_addr += HDCP_KSV_VERSION_4_OFFSET;
+			ksv_msb_addr += HDCP_KSV_VERSION_4_OFFSET;
+		}
+
+		aksv_lsb = DSS_REG_R(qfprom_io, ksv_lsb_addr);
+		aksv_msb = DSS_REG_R(qfprom_io, ksv_msb_addr);
+	}
+
+	DEV_DBG("%s: %s: AKSV=%02x%08x\n", __func__, HDCP_STATE_NAME,
+		aksv_msb, aksv_lsb);
+
+	aksv[0] =  aksv_lsb        & 0xFF;
+	aksv[1] = (aksv_lsb >> 8)  & 0xFF;
+	aksv[2] = (aksv_lsb >> 16) & 0xFF;
+	aksv[3] = (aksv_lsb >> 24) & 0xFF;
+	aksv[4] =  aksv_msb        & 0xFF;
+
+	/* check there are 20 ones in AKSV */
+	if (hdmi_hdcp_count_one(aksv, 5) != 20) {
+		DEV_ERR("%s: AKSV bit count failed\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	DSS_REG_W(io, HDMI_HDCP_SW_LOWER_AKSV, aksv_lsb);
+	DSS_REG_W(io, HDMI_HDCP_SW_UPPER_AKSV, aksv_msb);
+
+	/* Setup seed values for random number An */
+	DSS_REG_W(io, HDMI_HDCP_ENTROPY_CTRL0, 0xB1FFB0FF);
+	DSS_REG_W(io, HDMI_HDCP_ENTROPY_CTRL1, 0xF00DFACE);
+
+	/* Disable the RngCipher state */
+	DSS_REG_W(io, HDMI_HDCP_DEBUG_CTRL,
+		DSS_REG_R(io, HDMI_HDCP_DEBUG_CTRL) & ~(BIT(2)));
+
+	/* make sure hw is programmed */
+	wmb();
+
+	DSS_REG_W(io, HDMI_HDCP_CTRL, BIT(0));
+
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
+end:
+	return rc;
+}
+
+static int hdmi_hdcp_authentication_part1(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	u32 link0_aksv_0, link0_aksv_1;
+	u32 link0_bksv_0, link0_bksv_1;
+	u32 link0_an_0, link0_an_1;
+	u32 timeout_count;
+	bool is_match;
+	struct dss_io_data *io;
+	struct dss_io_data *hdcp_io;
+	u8 aksv[5], *bksv = NULL;
+	u8 an[8];
+	u8 bcaps = 0;
+	struct hdmi_tx_ddc_data ddc_data;
+	u32 link0_status = 0, an_ready, keys_state;
+	u8 buf[0xFF];
+
+	struct scm_hdcp_req scm_buf[SCM_HDCP_MAX_REG];
+	u32 phy_addr;
+	u32 ret  = 0;
+	u32 resp = 0;
+
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io ||
+		!hdcp_ctrl->init_data.qfprom_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	phy_addr = hdcp_ctrl->init_data.phy_addr;
+	bksv = hdcp_ctrl->current_tp.bksv;
+	io = hdcp_ctrl->init_data.core_io;
+	hdcp_io = hdcp_ctrl->init_data.hdcp_io;
+
+	if (hdcp_ctrl->hdcp_state != HDCP_STATE_AUTHENTICATING) {
+		DEV_ERR("%s: %s: invalid state. returning\n", __func__,
+			HDCP_STATE_NAME);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/* Clear any DDC failures from previous tries */
+	reset_hdcp_ddc_failures(hdcp_ctrl);
+
+	/*
+	 * Read BCAPS
+	 * We need to first try to read an HDCP register on the sink to see if
+	 * the sink is ready for HDCP authentication
+	 */
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.offset = 0x40;
+	ddc_data.data_buf = &bcaps;
+	ddc_data.data_len = 1;
+	ddc_data.request_len = 1;
+	ddc_data.retry = 5;
+	ddc_data.what = "Bcaps";
+
+	hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+	rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl);
+	if (rc) {
+		DEV_ERR("%s: %s: BCAPS read failed\n", __func__,
+			HDCP_STATE_NAME);
+		goto error;
+	}
+	DEV_DBG("%s: %s: BCAPS=%02x\n", __func__, HDCP_STATE_NAME, bcaps);
+
+	/* receiver (0), repeater (1) */
+	hdcp_ctrl->current_tp.ds_type =
+		(bcaps & BIT(6)) >> 6 ? DS_REPEATER : DS_RECEIVER;
+
+	/* Write BCAPS to the hardware */
+	if (hdcp_ctrl->tz_hdcp) {
+		memset(scm_buf, 0x00, sizeof(scm_buf));
+
+		scm_buf[0].addr = phy_addr + HDMI_HDCP_RCVPORT_DATA12;
+		scm_buf[0].val  = bcaps;
+
+		ret = hdcp_scm_call(scm_buf, &resp);
+		if (ret || resp) {
+			DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
+				__func__, ret, resp);
+			rc = -EINVAL;
+			goto error;
+		}
+	} else if (hdcp_ctrl->hdmi_tx_ver_4) {
+		DSS_REG_W(hdcp_io, HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA12,
+				bcaps);
+	} else {
+		DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA12, bcaps);
+	}
+
+	/* Wait for HDCP keys to be checked and validated */
+	timeout_count = 100;
+	keys_state = (link0_status >> 28) & 0x7;
+	while ((keys_state != HDCP_KEYS_STATE_VALID) &&
+		--timeout_count) {
+		link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+		keys_state = (link0_status >> 28) & 0x7;
+		DEV_DBG("%s: %s: Keys not ready(%d). s=%d\n, l0=%0x08x",
+			__func__, HDCP_STATE_NAME, timeout_count,
+			keys_state, link0_status);
+		msleep(20);
+	}
+
+	if (!timeout_count) {
+		DEV_ERR("%s: %s: Invalid Keys State: %d\n", __func__,
+			HDCP_STATE_NAME, keys_state);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * 1.1_Features turned off by default.
+	 * No need to write AInfo since 1.1_Features is disabled.
+	 */
+	DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA4, 0);
+
+	/* Wait for An0 and An1 bit to be ready */
+	timeout_count = 100;
+	do {
+		link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+		an_ready = (link0_status & BIT(8)) && (link0_status & BIT(9));
+		if (!an_ready) {
+			DEV_DBG("%s: %s: An not ready(%d). l0_status=0x%08x\n",
+				__func__, HDCP_STATE_NAME, timeout_count,
+				link0_status);
+			msleep(20);
+		}
+	} while (!an_ready && --timeout_count);
+
+	if (!timeout_count) {
+		rc = -ETIMEDOUT;
+		DEV_ERR("%s: %s: timedout, An0=%ld, An1=%ld\n", __func__,
+			HDCP_STATE_NAME, (link0_status & BIT(8)) >> 8,
+			(link0_status & BIT(9)) >> 9);
+		goto error;
+	}
+
+	/* As per hardware recommendations, wait before reading An */
+	msleep(20);
+
+	/* Read An0 and An1 */
+	link0_an_0 = DSS_REG_R(io, HDMI_HDCP_RCVPORT_DATA5);
+	link0_an_1 = DSS_REG_R(io, HDMI_HDCP_RCVPORT_DATA6);
+
+	/* Read AKSV */
+	link0_aksv_0 = DSS_REG_R(io, HDMI_HDCP_RCVPORT_DATA3);
+	link0_aksv_1 = DSS_REG_R(io, HDMI_HDCP_RCVPORT_DATA4);
+
+	/* Copy An and AKSV to byte arrays for transmission */
+	aksv[0] =  link0_aksv_0        & 0xFF;
+	aksv[1] = (link0_aksv_0 >> 8)  & 0xFF;
+	aksv[2] = (link0_aksv_0 >> 16) & 0xFF;
+	aksv[3] = (link0_aksv_0 >> 24) & 0xFF;
+	aksv[4] =  link0_aksv_1        & 0xFF;
+
+	an[0] =  link0_an_0        & 0xFF;
+	an[1] = (link0_an_0 >> 8)  & 0xFF;
+	an[2] = (link0_an_0 >> 16) & 0xFF;
+	an[3] = (link0_an_0 >> 24) & 0xFF;
+	an[4] =  link0_an_1        & 0xFF;
+	an[5] = (link0_an_1 >> 8)  & 0xFF;
+	an[6] = (link0_an_1 >> 16) & 0xFF;
+	an[7] = (link0_an_1 >> 24) & 0xFF;
+
+	/* Write An to offset 0x18 */
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.offset = 0x18;
+	ddc_data.data_buf = an;
+	ddc_data.data_len = 8;
+	ddc_data.what = "An";
+	hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+	rc = hdmi_ddc_write(hdcp_ctrl->init_data.ddc_ctrl);
+	if (rc) {
+		DEV_ERR("%s: %s: An write failed\n", __func__, HDCP_STATE_NAME);
+		goto error;
+	}
+
+	/* Write AKSV to offset 0x10 */
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.offset = 0x10;
+	ddc_data.data_buf = aksv;
+	ddc_data.data_len = 5;
+	ddc_data.what = "Aksv";
+	hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+	rc = hdmi_ddc_write(hdcp_ctrl->init_data.ddc_ctrl);
+	if (rc) {
+		DEV_ERR("%s: %s: AKSV write failed\n", __func__,
+			HDCP_STATE_NAME);
+		goto error;
+	}
+	DEV_DBG("%s: %s: Link0-AKSV=%02x%08x\n", __func__,
+		HDCP_STATE_NAME, link0_aksv_1 & 0xFF, link0_aksv_0);
+
+	/* Read BKSV at offset 0x00 */
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.offset = 0x00;
+	ddc_data.data_buf = bksv;
+	ddc_data.data_len = 5;
+	ddc_data.request_len = 5;
+	ddc_data.retry = 5;
+	ddc_data.what = "Bksv";
+
+	hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+	rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl);
+	if (rc) {
+		DEV_ERR("%s: %s: BKSV read failed\n", __func__,
+			HDCP_STATE_NAME);
+		goto error;
+	}
+
+	/* check there are 20 ones in BKSV */
+	if (hdmi_hdcp_count_one(bksv, 5) != 20) {
+		DEV_ERR("%s: %s: BKSV doesn't have 20 1's and 20 0's\n",
+			__func__, HDCP_STATE_NAME);
+		DEV_ERR("%s: %s: BKSV chk fail. BKSV=%02x%02x%02x%02x%02x\n",
+			__func__, HDCP_STATE_NAME, bksv[4], bksv[3], bksv[2],
+			bksv[1], bksv[0]);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	link0_bksv_0 = bksv[3];
+	link0_bksv_0 = (link0_bksv_0 << 8) | bksv[2];
+	link0_bksv_0 = (link0_bksv_0 << 8) | bksv[1];
+	link0_bksv_0 = (link0_bksv_0 << 8) | bksv[0];
+	link0_bksv_1 = bksv[4];
+	DEV_DBG("%s: %s: BKSV=%02x%08x\n", __func__, HDCP_STATE_NAME,
+		link0_bksv_1, link0_bksv_0);
+
+	if (hdcp_ctrl->tz_hdcp) {
+		memset(scm_buf, 0x00, sizeof(scm_buf));
+
+		scm_buf[0].addr = phy_addr + HDMI_HDCP_RCVPORT_DATA0;
+		scm_buf[0].val  = link0_bksv_0;
+		scm_buf[1].addr = phy_addr + HDMI_HDCP_RCVPORT_DATA1;
+		scm_buf[1].val  = link0_bksv_1;
+
+		ret = hdcp_scm_call(scm_buf, &resp);
+
+		if (ret || resp) {
+			DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
+				__func__, ret, resp);
+			rc = -EINVAL;
+			goto error;
+		}
+	} else if (hdcp_ctrl->hdmi_tx_ver_4) {
+		DSS_REG_W(hdcp_io, HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA0,
+			link0_bksv_0);
+		DSS_REG_W(hdcp_io, HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA1,
+			link0_bksv_1);
+	} else {
+		DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA0, link0_bksv_0);
+		DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA1, link0_bksv_1);
+	}
+
+	/* Enable HDCP interrupts and ack/clear any stale interrupts */
+	DSS_REG_W(io, HDMI_HDCP_INT_CTRL, 0xE6);
+
+	/*
+	 * HDCP Compliace Test case 1A-01:
+	 * Wait here at least 100ms before reading R0'
+	 */
+	msleep(125);
+
+	/* Read R0' at offset 0x08 */
+	memset(buf, 0, sizeof(buf));
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.offset = 0x08;
+	ddc_data.data_buf = buf;
+	ddc_data.data_len = 2;
+	ddc_data.request_len = 2;
+	ddc_data.retry = 5;
+	ddc_data.what = "R0'";
+
+	hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+	rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl);
+	if (rc) {
+		DEV_ERR("%s: %s: R0' read failed\n", __func__, HDCP_STATE_NAME);
+		goto error;
+	}
+	DEV_DBG("%s: %s: R0'=%02x%02x\n", __func__, HDCP_STATE_NAME,
+		buf[1], buf[0]);
+
+	/* Write R0' to HDCP registers and check to see if it is a match */
+	reinit_completion(&hdcp_ctrl->r0_checked);
+	DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA2_0, (((u32)buf[1]) << 8) | buf[0]);
+	timeout_count = wait_for_completion_timeout(
+		&hdcp_ctrl->r0_checked, HZ*2);
+	link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+	is_match = link0_status & BIT(12);
+	if (!is_match) {
+		DEV_DBG("%s: %s: Link0_Status=0x%08x\n", __func__,
+			HDCP_STATE_NAME, link0_status);
+		if (!timeout_count) {
+			DEV_ERR("%s: %s: Timeout. No R0 mtch. R0'=%02x%02x\n",
+				__func__, HDCP_STATE_NAME, buf[1], buf[0]);
+			rc = -ETIMEDOUT;
+			goto error;
+		} else {
+			DEV_ERR("%s: %s: R0 mismatch. R0'=%02x%02x\n", __func__,
+				HDCP_STATE_NAME, buf[1], buf[0]);
+			rc = -EINVAL;
+			goto error;
+		}
+	} else {
+		DEV_DBG("%s: %s: R0 matches\n", __func__, HDCP_STATE_NAME);
+	}
+
+error:
+	if (rc) {
+		DEV_ERR("%s: %s: Authentication Part I failed\n", __func__,
+			hdcp_ctrl ? HDCP_STATE_NAME : "???");
+	} else {
+		/* Enable HDCP Encryption */
+		DSS_REG_W(io, HDMI_HDCP_CTRL, BIT(0) | BIT(8));
+		DEV_INFO("%s: %s: Authentication Part I successful\n",
+			__func__, HDCP_STATE_NAME);
+	}
+	return rc;
+} /* hdmi_hdcp_authentication_part1 */
+
+static  int read_write_v_h(dss_io_data *io, int off. char *name, u32 reg,
+				    bool wr)
+{
+	char what[20];
+	int rc = 0;
+
+	do {
+		ddc_data.offset = off;
+		memset(what, 0, sizeof(what));
+		snprintf(what, 20, name);
+		hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+		rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl);
+		if (rc) {
+			DEV_ERR("%s: %s: Read %s failed\n", __func__,
+				HDCP_STATE_NAME, what);
+			return rc;
+		}
+		DEV_DBG("%s: %s: %s: buf[0]=%x, [1]=%x,[2]=%x, [3]=%x\n",
+			__func__, HDCP_STATE_NAME, what, buf[0], buf[1],
+			buf[2], buf[3]);
+		if (wr) {
+			DSS_REG_W((io), (reg),
+					(buf[3] << 24 | buf[2] << 16 |
+					buf[1] << 8 | buf[0]));
+		}
+	} while (0);
+	return rc;
+}
+
+static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	char what[20];
+	int rc = 0;
+	u8 buf[4];
+	struct hdmi_tx_ddc_data ddc_data;
+	struct dss_io_data *io;
+
+	struct scm_hdcp_req scm_buf[SCM_HDCP_MAX_REG];
+	u32 phy_addr;
+
+	struct hdmi_hdcp_reg_data reg_data[]  = {
+		{HDMI_HDCP_RCVPORT_DATA7,  0x20, "V' H0"},
+		{HDMI_HDCP_RCVPORT_DATA8,  0x24, "V' H1"},
+		{HDMI_HDCP_RCVPORT_DATA9,  0x28, "V' H2"},
+		{HDMI_HDCP_RCVPORT_DATA10, 0x2C, "V' H3"},
+		{HDMI_HDCP_RCVPORT_DATA11, 0x30, "V' H4"},
+	};
+	u32 size = ARRAY_SIZE(reg_data)/sizeof(reg_data[0]);
+	u32 iter = 0;
+	u32 ret  = 0;
+	u32 resp = 0;
+
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	phy_addr = hdcp_ctrl->init_data.phy_addr;
+
+	io = hdcp_ctrl->init_data.core_io;
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.data_buf = buf;
+	ddc_data.data_len = 4;
+	ddc_data.request_len = 4;
+	ddc_data.retry = 5;
+	ddc_data.what = what;
+
+	if (hdcp_ctrl->tz_hdcp) {
+		memset(scm_buf, 0x00, sizeof(scm_buf));
+
+		for (iter = 0; iter < size && iter < SCM_HDCP_MAX_REG; iter++) {
+			struct hdmi_hdcp_reg_data *rd = reg_data + iter;
+
+			if (read_write_v_h(io, rd->off, rd->name, 0, false))
+				goto error;
+
+			rd->reg_val = buf[3] << 24 | buf[2] << 16 |
+				buf[1] << 8 | buf[0];
+
+			scm_buf[iter].addr = phy_addr + reg_data[iter].reg_id;
+			scm_buf[iter].val  = reg_data[iter].reg_val;
+		}
+
+		ret = hdcp_scm_call(scm_buf, &resp);
+		if (ret || resp) {
+			DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
+				__func__, ret, resp);
+			rc = -EINVAL;
+			goto error;
+		}
+	} else if (hdcp_ctrl->hdmi_tx_ver_4) {
+		struct dss_io_data *hdcp_io = hdcp_ctrl->init_data.hdcp_io;
+
+		/* Read V'.HO 4 Byte at offset 0x20 */
+		if (read_write_v_h(hdcp_io, 0x20, "V' H0",
+				HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA7, true))
+			goto error;
+
+		/* Read V'.H1 4 Byte at offset 0x24 */
+		if (read_write_v_h(hdcp_io, 0x24, "V' H1",
+				HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA8, true))
+			goto error;
+
+		/* Read V'.H2 4 Byte at offset 0x28 */
+		if (read_write_v_h(hdcp_io, 0x28, "V' H2",
+				HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA9, true))
+			goto error;
+
+		/* Read V'.H3 4 Byte at offset 0x2C */
+		if (read_write_v_h(hdcp_io, 0x2C, "V' H3",
+				HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA10, true))
+			goto error;
+
+		/* Read V'.H4 4 Byte at offset 0x30 */
+		if (read_write_v_h(hdcp_io, 0x30, "V' H4",
+				HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA11, true))
+			goto error;
+	} else {
+		/* Read V'.HO 4 Byte at offset 0x20 */
+		if (read_write_v_h(io, 0x20, "V' H0", HDMI_HDCP_RCVPORT_DATA7,
+				true))
+			goto error;
+
+		/* Read V'.H1 4 Byte at offset 0x24 */
+		if (read_write_v_h(io, 0x24, "V' H1", HDMI_HDCP_RCVPORT_DATA8,
+				true))
+			goto error;
+
+		/* Read V'.H2 4 Byte at offset 0x28 */
+		if (read_write_v_h(io, 0x28, "V' H2", HDMI_HDCP_RCVPORT_DATA9,
+				true))
+			goto error;
+
+		/* Read V'.H3 4 Byte at offset 0x2C */
+		if (read_write_v_h(io, 0x2C, "V' H3", HDMI_HDCP_RCVPORT_DATA10,
+				true))
+			goto error;
+
+		/* Read V'.H4 4 Byte at offset 0x30 */
+		if (read_write_v_h(io, 0x30, "V' H4", HDMI_HDCP_RCVPORT_DATA11,
+				true))
+			goto error;
+	}
+
+error:
+	return rc;
+}
+
+static int hdmi_hdcp_authentication_part2(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc, cnt, i;
+	struct hdmi_tx_ddc_data ddc_data;
+	u32 timeout_count, down_stream_devices = 0;
+	u32 repeater_cascade_depth = 0;
+	u8 buf[0xFF];
+	u8 *ksv_fifo = NULL;
+	u8 bcaps;
+	u16 bstatus, max_devs_exceeded = 0, max_cascade_exceeded = 0;
+	u32 link0_status;
+	u32 ksv_bytes;
+	struct dss_io_data *io;
+
+	struct scm_hdcp_req scm_buf[SCM_HDCP_MAX_REG];
+	u32 phy_addr;
+	u32 ret  = 0;
+	u32 resp = 0;
+
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	phy_addr = hdcp_ctrl->init_data.phy_addr;
+
+	if (hdcp_ctrl->hdcp_state != HDCP_STATE_AUTHENTICATING) {
+		DEV_DBG("%s: %s: invalid state. returning\n", __func__,
+			HDCP_STATE_NAME);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	ksv_fifo = hdcp_ctrl->current_tp.ksv_list;
+
+	io = hdcp_ctrl->init_data.core_io;
+
+	memset(buf, 0, sizeof(buf));
+	memset(ksv_fifo, 0,
+		sizeof(hdcp_ctrl->current_tp.ksv_list));
+
+	/*
+	 * Wait until READY bit is set in BCAPS, as per HDCP specifications
+	 * maximum permitted time to check for READY bit is five seconds.
+	 */
+	timeout_count = 50;
+	do {
+		timeout_count--;
+		/* Read BCAPS at offset 0x40 */
+		memset(&ddc_data, 0, sizeof(ddc_data));
+		ddc_data.dev_addr = 0x74;
+		ddc_data.offset = 0x40;
+		ddc_data.data_buf = &bcaps;
+		ddc_data.data_len = 1;
+		ddc_data.request_len = 1;
+		ddc_data.retry = 5;
+		ddc_data.what = "Bcaps";
+		ddc_data.retry_align = true;
+
+		hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+		rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl);
+		if (rc) {
+			DEV_ERR("%s: %s: BCAPS read failed\n", __func__,
+				HDCP_STATE_NAME);
+			goto error;
+		}
+		msleep(100);
+	} while (!(bcaps & BIT(5)) && timeout_count);
+
+	/* Read BSTATUS at offset 0x41 */
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.offset = 0x41;
+	ddc_data.data_buf = buf;
+	ddc_data.data_len = 2;
+	ddc_data.request_len = 2;
+	ddc_data.retry = 5;
+	ddc_data.what = "Bstatuss";
+	ddc_data.retry_align = true;
+
+	hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+	rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl);
+	if (rc) {
+		DEV_ERR("%s: %s: BSTATUS read failed\n", __func__,
+			HDCP_STATE_NAME);
+		goto error;
+	}
+	bstatus = buf[1];
+	bstatus = (bstatus << 8) | buf[0];
+
+	if (hdcp_ctrl->tz_hdcp) {
+		memset(scm_buf, 0x00, sizeof(scm_buf));
+
+		/* Write BSTATUS and BCAPS to HDCP registers */
+		scm_buf[0].addr = phy_addr + HDMI_HDCP_RCVPORT_DATA12;
+		scm_buf[0].val  = bcaps | (bstatus << 8);
+
+		ret = hdcp_scm_call(scm_buf, &resp);
+		if (ret || resp) {
+			DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
+				__func__, ret, resp);
+			rc = -EINVAL;
+			goto error;
+		}
+	} else if (hdcp_ctrl->hdmi_tx_ver_4) {
+		DSS_REG_W(hdcp_ctrl->init_data.hdcp_io,
+				HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA12,
+				bcaps | (bstatus << 8));
+	} else {
+		DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA12, bcaps | (bstatus << 8));
+	}
+
+	down_stream_devices = bstatus & 0x7F;
+	if (down_stream_devices == 0) {
+		/*
+		 * If no downstream devices are attached to the repeater
+		 * then part II fails.
+		 * todo: The other approach would be to continue PART II.
+		 */
+		DEV_ERR("%s: %s: No downstream devices\n", __func__,
+			HDCP_STATE_NAME);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/* Cascaded repeater depth */
+	repeater_cascade_depth = (bstatus >> 8) & 0x7;
+
+	/*
+	 * HDCP Compliance 1B-05:
+	 * Check if no. of devices connected to repeater
+	 * exceed max_devices_connected from bit 7 of Bstatus.
+	 */
+	max_devs_exceeded = (bstatus & BIT(7)) >> 7;
+	if (max_devs_exceeded == 0x01) {
+		DEV_ERR("%s: %s: no. of devs connected exceeds max allowed",
+			__func__, HDCP_STATE_NAME);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * HDCP Compliance 1B-06:
+	 * Check if no. of cascade connected to repeater
+	 * exceed max_cascade_connected from bit 11 of Bstatus.
+	 */
+	max_cascade_exceeded = (bstatus & BIT(11)) >> 11;
+	if (max_cascade_exceeded == 0x01) {
+		DEV_ERR("%s: %s: no. of cascade conn exceeds max allowed",
+			__func__, HDCP_STATE_NAME);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * Read KSV FIFO over DDC
+	 * Key Slection vector FIFO Used to pull downstream KSVs
+	 * from HDCP Repeaters.
+	 * All bytes (DEVICE_COUNT * 5) must be read in a single,
+	 * auto incrementing access.
+	 * All bytes read as 0x00 for HDCP Receivers that are not
+	 * HDCP Repeaters (REPEATER == 0).
+	 */
+	ksv_bytes = 5 * down_stream_devices;
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.offset = 0x43;
+	ddc_data.data_buf = ksv_fifo;
+	ddc_data.data_len = ksv_bytes;
+	ddc_data.request_len = ksv_bytes;
+	ddc_data.retry = 5;
+	ddc_data.what = "KSV FIFO";
+
+	hdcp_ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+	cnt = 0;
+	do {
+		rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl);
+		if (rc) {
+			DEV_ERR("%s: %s: KSV FIFO read failed\n", __func__,
+				HDCP_STATE_NAME);
+			/*
+			 * HDCP Compliace Test case 1B-01:
+			 * Wait here until all the ksv bytes have been
+			 * read from the KSV FIFO register.
+			 */
+			msleep(25);
+		} else {
+			break;
+		}
+		cnt++;
+	} while (cnt != 20);
+
+	if (cnt == 20)
+		goto error;
+
+	rc = hdmi_hdcp_transfer_v_h(hdcp_ctrl);
+	if (rc)
+		goto error;
+
+	/*
+	 * Write KSV FIFO to HDCP_SHA_DATA.
+	 * This is done 1 byte at time starting with the LSB.
+	 * On the very last byte write, the HDCP_SHA_DATA_DONE bit[0]
+	 */
+
+	/* First, reset SHA engine */
+	/* Next, enable SHA engine, SEL=DIGA_HDCP */
+	if (hdcp_ctrl->tz_hdcp) {
+		memset(scm_buf, 0x00, sizeof(scm_buf));
+
+		scm_buf[0].addr = phy_addr + HDMI_HDCP_SHA_CTRL;
+		scm_buf[0].val  = HDCP_REG_ENABLE;
+		scm_buf[1].addr = phy_addr + HDMI_HDCP_SHA_CTRL;
+		scm_buf[1].val  = HDCP_REG_DISABLE;
+
+		ret = hdcp_scm_call(scm_buf, &resp);
+		if (ret || resp) {
+			DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
+				__func__, ret, resp);
+			rc = -EINVAL;
+			goto error;
+		}
+	} else if (hdcp_ctrl->hdmi_tx_ver_4) {
+		DSS_REG_W(hdcp_ctrl->init_data.hdcp_io,
+				HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_CTRL,
+				HDCP_REG_ENABLE);
+		DSS_REG_W(hdcp_ctrl->init_data.hdcp_io,
+				HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_CTRL,
+				HDCP_REG_DISABLE);
+	} else {
+		DSS_REG_W(io, HDMI_HDCP_SHA_CTRL, HDCP_REG_ENABLE);
+		DSS_REG_W(io, HDMI_HDCP_SHA_CTRL, HDCP_REG_DISABLE);
+	}
+
+	for (i = 0; i < ksv_bytes - 1; i++) {
+		/* Write KSV byte and do not set DONE bit[0] */
+		if (hdcp_ctrl->tz_hdcp) {
+			memset(scm_buf, 0x00, sizeof(scm_buf));
+
+			scm_buf[0].addr = phy_addr + HDMI_HDCP_SHA_DATA;
+			scm_buf[0].val  = ksv_fifo[i] << 16;
+
+			ret = hdcp_scm_call(scm_buf, &resp);
+			if (ret || resp) {
+				DEV_ERR("%s: scm_call ret = %d, resp = %d\n",
+					__func__, ret, resp);
+				rc = -EINVAL;
+				goto error;
+			}
+		} else if (hdcp_ctrl->hdmi_tx_ver_4) {
+			DSS_REG_W_ND(hdcp_ctrl->init_data.hdcp_io,
+					HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_DATA,
+					ksv_fifo[i] << 16);
+		} else {
+			DSS_REG_W_ND(io, HDMI_HDCP_SHA_DATA, ksv_fifo[i] << 16);
+		}
+
+		/*
+		 * Once 64 bytes have been written, we need to poll for
+		 * HDCP_SHA_BLOCK_DONE before writing any further
+		 */
+		if (i && !((i + 1) % 64)) {
+			timeout_count = 100;
+			while (!(DSS_REG_R(io, HDMI_HDCP_SHA_STATUS) & BIT(0))
+				&& (--timeout_count)) {
+				DEV_DBG("%s: %s: Wrote 64 bytes KSV FIFO\n",
+					__func__, HDCP_STATE_NAME);
+				DEV_DBG("%s: %s: HDCP_SHA_STATUS=%08x\n",
+					__func__, HDCP_STATE_NAME,
+					DSS_REG_R(io, HDMI_HDCP_SHA_STATUS));
+				msleep(20);
+			}
+			if (!timeout_count) {
+				rc = -ETIMEDOUT;
+				DEV_ERR("%s: %s: Write KSV FIFO timedout",
+					__func__, HDCP_STATE_NAME);
+				goto error;
+			}
+		}
+
+	}
+
+	/* Write l to DONE bit[0] */
+	if (hdcp_ctrl->tz_hdcp) {
+		memset(scm_buf, 0x00, sizeof(scm_buf));
+
+		scm_buf[0].addr = phy_addr + HDMI_HDCP_SHA_DATA;
+		scm_buf[0].val  = (ksv_fifo[ksv_bytes - 1] << 16) | 0x1;
+
+		ret = hdcp_scm_call(scm_buf, &resp);
+		if (ret || resp) {
+			DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
+				__func__, ret, resp);
+			rc = -EINVAL;
+			goto error;
+		}
+	} else if (hdcp_ctrl->hdmi_tx_ver_4) {
+		DSS_REG_W_ND(hdcp_ctrl->init_data.hdcp_io,
+				HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_DATA,
+				(ksv_fifo[ksv_bytes - 1] << 16) | 0x1);
+	} else {
+		DSS_REG_W_ND(io, HDMI_HDCP_SHA_DATA,
+			(ksv_fifo[ksv_bytes - 1] << 16) | 0x1);
+	}
+
+	/* Now wait for HDCP_SHA_COMP_DONE */
+	timeout_count = 100;
+	while ((0x10 != (DSS_REG_R(io, HDMI_HDCP_SHA_STATUS)
+		& 0xFFFFFF10)) && --timeout_count)
+		msleep(20);
+	if (!timeout_count) {
+		rc = -ETIMEDOUT;
+		DEV_ERR("%s: %s: SHA computation timedout", __func__,
+			HDCP_STATE_NAME);
+		goto error;
+	}
+
+	/* Wait for V_MATCHES */
+	timeout_count = 100;
+	link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+	while (((link0_status & BIT(20)) != BIT(20)) && --timeout_count) {
+		DEV_DBG("%s: %s: Waiting for V_MATCHES(%d). l0_status=0x%08x\n",
+			__func__, HDCP_STATE_NAME, timeout_count, link0_status);
+		msleep(20);
+		link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+	}
+	if (!timeout_count) {
+		rc = -ETIMEDOUT;
+		DEV_ERR("%s: %s: HDCP V Match timedout", __func__,
+			HDCP_STATE_NAME);
+		goto error;
+	}
+
+error:
+	if (rc)
+		DEV_ERR("%s: %s: Authentication Part II failed\n", __func__,
+			hdcp_ctrl ? HDCP_STATE_NAME : "???");
+	else
+		DEV_INFO("%s: %s: Authentication Part II successful\n",
+			__func__, HDCP_STATE_NAME);
+
+	if (!hdcp_ctrl) {
+		DEV_ERR("%s: hdcp_ctrl null. Topology not updated\n",
+			__func__);
+		return rc;
+	}
+	/* Update topology information */
+	hdcp_ctrl->current_tp.dev_count = down_stream_devices;
+	hdcp_ctrl->current_tp.max_cascade_exceeded = max_cascade_exceeded;
+	hdcp_ctrl->current_tp.max_dev_exceeded = max_devs_exceeded;
+	hdcp_ctrl->current_tp.depth = repeater_cascade_depth;
+
+	return rc;
+} /* hdmi_hdcp_authentication_part2 */
+
+static void hdmi_hdcp_cache_topology(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	memcpy((void *)&hdcp_ctrl->cached_tp,
+		(void *) &hdcp_ctrl->current_tp,
+		sizeof(hdcp_ctrl->cached_tp));
+}
+
+static void hdmi_hdcp_notify_topology(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	char a[16], b[16];
+	char *envp[] = {
+		[0] = "HDCP_MGR_EVENT=MSG_READY",
+		[1] = a,
+		[2] = b,
+		NULL,
+		};
+
+	snprintf(envp[1], 16, "%d", (int)DOWN_CHECK_TOPOLOGY);
+	snprintf(envp[2], 16, "%d", (int)HDCP_V1_TX);
+	kobject_uevent_env(hdcp_ctrl->init_data.sysfs_kobj, KOBJ_CHANGE, envp);
+
+	DEV_DBG("%s Event Sent: %s msgID = %s srcID = %s\n", __func__,
+			envp[0], envp[1], envp[2]);
+}
+
+static void hdmi_hdcp_int_work(struct work_struct *work)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
+		struct hdmi_hdcp_ctrl, hdcp_int_work);
+
+	if (!hdcp_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	mutex_lock(hdcp_ctrl->init_data.mutex);
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAIL;
+	mutex_unlock(hdcp_ctrl->init_data.mutex);
+
+	if (hdcp_ctrl->init_data.notify_status) {
+		hdcp_ctrl->init_data.notify_status(
+			hdcp_ctrl->init_data.cb_data,
+			hdcp_ctrl->hdcp_state);
+	}
+} /* hdmi_hdcp_int_work */
+
+static void hdmi_hdcp_auth_work(struct work_struct *work)
+{
+	int rc;
+	struct delayed_work *dw = to_delayed_work(work);
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(dw,
+		struct hdmi_hdcp_ctrl, hdcp_auth_work);
+	struct dss_io_data *io;
+
+	if (!hdcp_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (hdcp_ctrl->hdcp_state != HDCP_STATE_AUTHENTICATING) {
+		DEV_DBG("%s: %s: invalid state. returning\n", __func__,
+			HDCP_STATE_NAME);
+		return;
+	}
+
+	io = hdcp_ctrl->init_data.core_io;
+	/* Enabling Software DDC */
+	DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION, DSS_REG_R(io,
+				HDMI_DDC_ARBITRATION) & ~(BIT(4)));
+
+	rc = hdmi_hdcp_authentication_part1(hdcp_ctrl);
+	if (rc) {
+		DEV_DBG("%s: %s: HDCP Auth Part I failed\n", __func__,
+			HDCP_STATE_NAME);
+		goto error;
+	}
+
+	if (hdcp_ctrl->current_tp.ds_type == DS_REPEATER) {
+		rc = hdmi_hdcp_authentication_part2(hdcp_ctrl);
+		if (rc) {
+			DEV_DBG("%s: %s: HDCP Auth Part II failed\n", __func__,
+				HDCP_STATE_NAME);
+			goto error;
+		}
+	} else {
+		DEV_INFO("%s: Downstream device is not a repeater\n", __func__);
+	}
+	/* Disabling software DDC before going into part3 to make sure
+	 * there is no Arbitration between software and hardware for DDCi
+	 */
+	DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION, DSS_REG_R(io,
+				HDMI_DDC_ARBITRATION) | (BIT(4)));
+
+error:
+	/*
+	 * Ensure that the state did not change during authentication.
+	 * If it did, it means that deauthenticate/reauthenticate was
+	 * called. In that case, this function need not notify HDMI Tx
+	 * of the result
+	 */
+	mutex_lock(hdcp_ctrl->init_data.mutex);
+	if (hdcp_ctrl->hdcp_state == HDCP_STATE_AUTHENTICATING) {
+		if (rc) {
+			hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAIL;
+		} else {
+			hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATED;
+			hdcp_ctrl->auth_retries = 0;
+			hdmi_hdcp_cache_topology(hdcp_ctrl);
+			hdmi_hdcp_notify_topology(hdcp_ctrl);
+		}
+		mutex_unlock(hdcp_ctrl->init_data.mutex);
+
+		/* Notify HDMI Tx controller of the result */
+		DEV_DBG("%s: %s: Notifying HDMI Tx of auth result\n",
+			__func__, HDCP_STATE_NAME);
+		if (hdcp_ctrl->init_data.notify_status) {
+			hdcp_ctrl->init_data.notify_status(
+				hdcp_ctrl->init_data.cb_data,
+				hdcp_ctrl->hdcp_state);
+		}
+	} else {
+		DEV_DBG("%s: %s: HDCP state changed during authentication\n",
+			__func__, HDCP_STATE_NAME);
+		mutex_unlock(hdcp_ctrl->init_data.mutex);
+	}
+} /* hdmi_hdcp_auth_work */
+
+int hdmi_hdcp_authenticate(void *input)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
+
+	if (!hdcp_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	if (hdcp_ctrl->hdcp_state != HDCP_STATE_INACTIVE) {
+		DEV_DBG("%s: %s: already active or activating. returning\n",
+			__func__, HDCP_STATE_NAME);
+		return 0;
+	}
+
+	DEV_DBG("%s: %s: Queuing work to start HDCP authentication", __func__,
+		HDCP_STATE_NAME);
+
+	if (!hdmi_hdcp_load_keys(input))
+		queue_delayed_work(hdcp_ctrl->init_data.workq,
+			&hdcp_ctrl->hdcp_auth_work, HZ/2);
+	else
+		queue_work(hdcp_ctrl->init_data.workq,
+			&hdcp_ctrl->hdcp_int_work);
+
+	return 0;
+} /* hdmi_hdcp_authenticate */
+
+int hdmi_hdcp_reauthenticate(void *input)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
+	struct dss_io_data *io;
+	u32 hdmi_hw_version;
+	u32 ret = 0;
+
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	io = hdcp_ctrl->init_data.core_io;
+
+	if (hdcp_ctrl->hdcp_state != HDCP_STATE_AUTH_FAIL) {
+		DEV_DBG("%s: %s: invalid state. returning\n", __func__,
+			HDCP_STATE_NAME);
+		return 0;
+	}
+
+	hdmi_hw_version = DSS_REG_R(io, HDMI_VERSION);
+	if (hdmi_hw_version >= 0x30030000) {
+		DSS_REG_W(io, HDMI_CTRL_SW_RESET, BIT(1));
+		DSS_REG_W(io, HDMI_CTRL_SW_RESET, 0);
+	}
+
+	/* Disable HDCP interrupts */
+	DSS_REG_W(io, HDMI_HDCP_INT_CTRL, 0);
+
+	DSS_REG_W(io, HDMI_HDCP_RESET, BIT(0));
+
+	/* Wait to be clean on DDC HW engine */
+	hdmi_hdcp_hw_ddc_clean(hdcp_ctrl);
+
+	/* Disable encryption and disable the HDCP block */
+	DSS_REG_W(io, HDMI_HDCP_CTRL, 0);
+
+	if (!hdmi_hdcp_load_keys(input))
+		queue_delayed_work(hdcp_ctrl->init_data.workq,
+			&hdcp_ctrl->hdcp_auth_work, HZ/2);
+	else
+		queue_work(hdcp_ctrl->init_data.workq,
+			&hdcp_ctrl->hdcp_int_work);
+
+	return ret;
+} /* hdmi_hdcp_reauthenticate */
+
+void hdmi_hdcp_off(void *input)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
+	struct dss_io_data *io;
+	int rc = 0;
+
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	io = hdcp_ctrl->init_data.core_io;
+
+	if (hdcp_ctrl->hdcp_state == HDCP_STATE_INACTIVE) {
+		DEV_DBG("%s: %s: inactive. returning\n", __func__,
+			HDCP_STATE_NAME);
+		return;
+	}
+
+	/*
+	 * Disable HDCP interrupts.
+	 * Also, need to set the state to inactive here so that any ongoing
+	 * reauth works will know that the HDCP session has been turned off.
+	 */
+	mutex_lock(hdcp_ctrl->init_data.mutex);
+	DSS_REG_W(io, HDMI_HDCP_INT_CTRL, 0);
+	hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+	mutex_unlock(hdcp_ctrl->init_data.mutex);
+
+	/*
+	 * Cancel any pending auth/reauth attempts.
+	 * If one is ongoing, this will wait for it to finish.
+	 * No more reauthentiaction attempts will be scheduled since we
+	 * set the currect state to inactive.
+	 */
+	rc = cancel_delayed_work_sync(&hdcp_ctrl->hdcp_auth_work);
+	if (rc)
+		DEV_DBG("%s: %s: Deleted hdcp auth work\n", __func__,
+			HDCP_STATE_NAME);
+	rc = cancel_work_sync(&hdcp_ctrl->hdcp_int_work);
+	if (rc)
+		DEV_DBG("%s: %s: Deleted hdcp int work\n", __func__,
+			HDCP_STATE_NAME);
+
+	DSS_REG_W(io, HDMI_HDCP_RESET, BIT(0));
+
+	/* Disable encryption and disable the HDCP block */
+	DSS_REG_W(io, HDMI_HDCP_CTRL, 0);
+
+	DEV_DBG("%s: %s: HDCP: Off\n", __func__, HDCP_STATE_NAME);
+} /* hdmi_hdcp_off */
+
+int hdmi_hdcp_isr(void *input)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
+	int rc = 0;
+	struct dss_io_data *io;
+	u32 hdcp_int_val;
+
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	io = hdcp_ctrl->init_data.core_io;
+
+	hdcp_int_val = DSS_REG_R(io, HDMI_HDCP_INT_CTRL);
+
+	/* Ignore HDCP interrupts if HDCP is disabled */
+	if (hdcp_ctrl->hdcp_state == HDCP_STATE_INACTIVE) {
+		DSS_REG_W(io, HDMI_HDCP_INT_CTRL, HDCP_INT_CLR);
+		return 0;
+	}
+
+	if (hdcp_int_val & BIT(0)) {
+		/* AUTH_SUCCESS_INT */
+		DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(1)));
+		DEV_INFO("%s: %s: AUTH_SUCCESS_INT received\n", __func__,
+			HDCP_STATE_NAME);
+		if (hdcp_ctrl->hdcp_state == HDCP_STATE_AUTHENTICATING)
+			complete_all(&hdcp_ctrl->r0_checked);
+	}
+
+	if (hdcp_int_val & BIT(4)) {
+		/* AUTH_FAIL_INT */
+		u32 link_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+
+		DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(5)));
+		DEV_INFO("%s: %s: AUTH_FAIL_INT rcvd, LINK0_STATUS=0x%08x\n",
+			__func__, HDCP_STATE_NAME, link_status);
+		if (hdcp_ctrl->hdcp_state == HDCP_STATE_AUTHENTICATED) {
+			/* Inform HDMI Tx of the failure */
+			queue_work(hdcp_ctrl->init_data.workq,
+				&hdcp_ctrl->hdcp_int_work);
+			/* todo: print debug log with auth fail reason */
+		} else if (hdcp_ctrl->hdcp_state == HDCP_STATE_AUTHENTICATING) {
+			complete_all(&hdcp_ctrl->r0_checked);
+		}
+
+		/* Clear AUTH_FAIL_INFO as well */
+		DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(7)));
+	}
+
+	if (hdcp_int_val & BIT(8)) {
+		/* DDC_XFER_REQ_INT */
+		DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(9)));
+		DEV_INFO("%s: %s: DDC_XFER_REQ_INT received\n", __func__,
+			HDCP_STATE_NAME);
+	}
+
+	if (hdcp_int_val & BIT(12)) {
+		/* DDC_XFER_DONE_INT */
+		DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(13)));
+		DEV_INFO("%s: %s: DDC_XFER_DONE received\n", __func__,
+			HDCP_STATE_NAME);
+	}
+
+error:
+	return rc;
+} /* hdmi_hdcp_isr */
+
+static ssize_t hdmi_hdcp_sysfs_rda_status(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_hdcp_ctrl *hdcp_ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_HDCP);
+
+	if (!hdcp_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(hdcp_ctrl->init_data.mutex);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", hdcp_ctrl->hdcp_state);
+	DEV_DBG("%s: '%d'\n", __func__, hdcp_ctrl->hdcp_state);
+	mutex_unlock(hdcp_ctrl->init_data.mutex);
+
+	return ret;
+} /* hdmi_hdcp_sysfs_rda_hdcp*/
+
+static ssize_t hdmi_hdcp_sysfs_rda_tp(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	struct hdmi_hdcp_ctrl *hdcp_ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_HDCP);
+
+	if (!hdcp_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (hdcp_ctrl->tp_msgid) {
+	case DOWN_CHECK_TOPOLOGY:
+	case DOWN_REQUEST_TOPOLOGY:
+		buf[MSG_ID_IDX]   = hdcp_ctrl->tp_msgid;
+		buf[RET_CODE_IDX] = HDCP_AUTHED;
+		ret = HEADER_LEN;
+
+		memcpy(buf + HEADER_LEN, &hdcp_ctrl->cached_tp,
+			sizeof(struct HDCP_V2V1_MSG_TOPOLOGY));
+
+		ret += sizeof(struct HDCP_V2V1_MSG_TOPOLOGY);
+
+		/* clear the flag once data is read back to user space*/
+		hdcp_ctrl->tp_msgid = -1;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+} /* hdmi_hdcp_sysfs_rda_tp*/
+
+static ssize_t hdmi_hdcp_sysfs_wta_tp(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int msgid = 0;
+	ssize_t ret = count;
+	struct hdmi_hdcp_ctrl *hdcp_ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_HDCP);
+
+	if (!hdcp_ctrl || !buf) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	msgid = buf[0];
+
+	switch (msgid) {
+	case DOWN_CHECK_TOPOLOGY:
+	case DOWN_REQUEST_TOPOLOGY:
+		hdcp_ctrl->tp_msgid = msgid;
+		break;
+	/* more cases added here */
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+} /* hdmi_tx_sysfs_wta_hpd */
+
+static DEVICE_ATTR(status, 0444, hdmi_hdcp_sysfs_rda_status, NULL);
+static DEVICE_ATTR(tp, 0644, hdmi_hdcp_sysfs_rda_tp,
+	hdmi_hdcp_sysfs_wta_tp);
+
+
+static struct attribute *hdmi_hdcp_fs_attrs[] = {
+	&dev_attr_status.attr,
+	&dev_attr_tp.attr,
+	NULL,
+};
+
+static struct attribute_group hdmi_hdcp_fs_attr_group = {
+	.name = "hdcp",
+	.attrs = hdmi_hdcp_fs_attrs,
+};
+
+void hdmi_hdcp_deinit(void *input)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
+
+	if (!hdcp_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	sysfs_remove_group(hdcp_ctrl->init_data.sysfs_kobj,
+				&hdmi_hdcp_fs_attr_group);
+
+	kfree(hdcp_ctrl);
+} /* hdmi_hdcp_deinit */
+
+void *hdmi_hdcp_init(struct hdmi_hdcp_init_data *init_data)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
+	int ret;
+	static struct hdmi_hdcp_ops ops = {
+		.hdmi_hdcp_isr = hdmi_hdcp_isr,
+		.hdmi_hdcp_reauthenticate = hdmi_hdcp_reauthenticate,
+		.hdmi_hdcp_authenticate = hdmi_hdcp_authenticate,
+		.hdmi_hdcp_off = hdmi_hdcp_off
+	};
+
+	if (!init_data || !init_data->core_io || !init_data->qfprom_io ||
+		!init_data->mutex || !init_data->ddc_ctrl ||
+		!init_data->notify_status || !init_data->workq ||
+		!init_data->cb_data) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		goto error;
+	}
+
+	if (init_data->hdmi_tx_ver >= HDMI_TX_VERSION_4
+			&& !init_data->hdcp_io) {
+		DEV_ERR("%s: hdcp_io required for HDMI Tx Ver 4\n", __func__);
+		goto error;
+	}
+
+	hdcp_ctrl = kzalloc(sizeof(*hdcp_ctrl), GFP_KERNEL);
+	if (!hdcp_ctrl) {
+		DEV_ERR("%s: Out of memory\n", __func__);
+		goto error;
+	}
+
+	hdcp_ctrl->init_data = *init_data;
+	hdcp_ctrl->ops = &ops;
+	hdcp_ctrl->hdmi_tx_ver_4 =
+		(init_data->hdmi_tx_ver >= HDMI_TX_VERSION_4);
+
+	if (sysfs_create_group(init_data->sysfs_kobj,
+				&hdmi_hdcp_fs_attr_group)) {
+		DEV_ERR("%s: hdcp sysfs group creation failed\n", __func__);
+		goto error;
+	}
+
+	INIT_DELAYED_WORK(&hdcp_ctrl->hdcp_auth_work, hdmi_hdcp_auth_work);
+	INIT_WORK(&hdcp_ctrl->hdcp_int_work, hdmi_hdcp_int_work);
+
+	hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+	init_completion(&hdcp_ctrl->r0_checked);
+
+	if (!hdcp_ctrl->hdmi_tx_ver_4) {
+		ret = scm_is_call_available(SCM_SVC_HDCP, SCM_CMD_HDCP);
+		if (ret <= 0) {
+			DEV_ERR("%s: secure hdcp service unavailable, ret = %d",
+				 __func__, ret);
+		} else {
+			DEV_DBG("%s: tz_hdcp = 1\n", __func__);
+			hdcp_ctrl->tz_hdcp = 1;
+		}
+	}
+
+	DEV_DBG("%s: HDCP module initialized. HDCP_STATE=%s", __func__,
+		HDCP_STATE_NAME);
+
+error:
+	return (void *)hdcp_ctrl;
+} /* hdmi_hdcp_init */
+
+struct hdmi_hdcp_ops *hdmi_hdcp_start(void *input)
+{
+	return ((struct hdmi_hdcp_ctrl *)input)->ops;
+}
+
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_hdcp.h b/drivers/video/fbdev/msm/mdss_hdmi_hdcp.h
new file mode 100644
index 0000000..2098943
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_hdcp.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2012, 2014-2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_HDCP_H__
+#define __MDSS_HDMI_HDCP_H__
+
+#include "mdss_hdmi_util.h"
+#include <video/msm_hdmi_modes.h>
+#include <soc/qcom/scm.h>
+
+enum hdmi_hdcp_state {
+	HDCP_STATE_INACTIVE,
+	HDCP_STATE_AUTHENTICATING,
+	HDCP_STATE_AUTHENTICATED,
+	HDCP_STATE_AUTH_FAIL,
+	HDCP_STATE_AUTH_ENC_NONE,
+	HDCP_STATE_AUTH_ENC_1X,
+	HDCP_STATE_AUTH_ENC_2P2
+};
+
+struct hdmi_hdcp_init_data {
+	struct dss_io_data *core_io;
+	struct dss_io_data *qfprom_io;
+	struct dss_io_data *hdcp_io;
+	struct mutex *mutex;
+	struct kobject *sysfs_kobj;
+	struct workqueue_struct *workq;
+	void *cb_data;
+	void (*notify_status)(void *cb_data, enum hdmi_hdcp_state status);
+	struct hdmi_tx_ddc_ctrl *ddc_ctrl;
+	u32 phy_addr;
+	u32 hdmi_tx_ver;
+	struct msm_hdmi_mode_timing_info *timing;
+	bool tethered;
+};
+
+struct hdmi_hdcp_ops {
+	int (*hdmi_hdcp_isr)(void *ptr);
+	int (*hdmi_hdcp_reauthenticate)(void *input);
+	int (*hdmi_hdcp_authenticate)(void *hdcp_ctrl);
+	bool (*feature_supported)(void *input);
+	void (*hdmi_hdcp_off)(void *hdcp_ctrl);
+};
+
+void *hdmi_hdcp_init(struct hdmi_hdcp_init_data *init_data);
+void *hdmi_hdcp2p2_init(struct hdmi_hdcp_init_data *init_data);
+void hdmi_hdcp_deinit(void *input);
+void hdmi_hdcp2p2_deinit(void *input);
+
+struct hdmi_hdcp_ops *hdmi_hdcp_start(void *input);
+struct hdmi_hdcp_ops *hdmi_hdcp2p2_start(void *input);
+
+const char *hdcp_state_name(enum hdmi_hdcp_state hdcp_state);
+
+#endif /* __MDSS_HDMI_HDCP_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
new file mode 100644
index 0000000..fc0c878
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
@@ -0,0 +1,1108 @@
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+#include <linux/kthread.h>
+
+#include <linux/hdcp_qseecom.h>
+#include "mdss_hdmi_hdcp.h"
+#include "video/msm_hdmi_hdcp_mgr.h"
+#include "mdss_hdmi_util.h"
+
+/*
+ * Defined addresses and offsets of standard HDCP 2.2 sink registers
+ * for DDC, as defined in HDCP 2.2 spec section 2.14 table 2.7
+ */
+#define HDCP_SINK_DDC_SLAVE_ADDR 0x74            /* Sink DDC slave address */
+#define HDCP_SINK_DDC_HDCP2_VERSION 0x50         /* Does sink support HDCP2.2 */
+#define HDCP_SINK_DDC_HDCP2_WRITE_MESSAGE 0x60   /* HDCP Tx writes here */
+#define HDCP_SINK_DDC_HDCP2_RXSTATUS 0x70        /* RxStatus, 2 bytes */
+#define HDCP_SINK_DDC_HDCP2_READ_MESSAGE 0x80    /* HDCP Tx reads here */
+
+#define HDCP2P2_DEFAULT_TIMEOUT 500
+
+/*
+ * HDCP 2.2 encryption requires the data encryption block that is present in
+ * HDMI controller version 4.0.0 and above
+ */
+#define MIN_HDMI_TX_MAJOR_VERSION 4
+
+enum hdmi_hdcp2p2_sink_status {
+	SINK_DISCONNECTED,
+	SINK_CONNECTED
+};
+
+enum hdmi_auth_status {
+	HDMI_HDCP_AUTH_STATUS_FAILURE,
+	HDMI_HDCP_AUTH_STATUS_SUCCESS
+};
+
+struct hdmi_hdcp2p2_ctrl {
+	atomic_t auth_state;
+	bool tethered;
+	enum hdmi_hdcp2p2_sink_status sink_status; /* Is sink connected */
+	struct hdmi_hdcp_init_data init_data; /* Feature data from HDMI drv */
+	struct mutex mutex; /* mutex to protect access to ctrl */
+	struct mutex msg_lock; /* mutex to protect access to msg buffer */
+	struct mutex wakeup_mutex; /* mutex to protect access to wakeup call*/
+	struct hdmi_hdcp_ops *ops;
+	void *lib_ctx; /* Handle to HDCP 2.2 Trustzone library */
+	struct hdcp_txmtr_ops *lib; /* Ops for driver to call into TZ */
+
+	enum hdmi_hdcp_wakeup_cmd wakeup_cmd;
+	enum hdmi_auth_status auth_status;
+	char *send_msg_buf;
+	uint32_t send_msg_len;
+	uint32_t timeout;
+	uint32_t timeout_left;
+
+	struct task_struct *thread;
+	struct kthread_worker worker;
+	struct kthread_work status;
+	struct kthread_work auth;
+	struct kthread_work send_msg;
+	struct kthread_work recv_msg;
+	struct kthread_work link;
+	struct kthread_work poll;
+};
+
+static int hdmi_hdcp2p2_auth(struct hdmi_hdcp2p2_ctrl *ctrl);
+static void hdmi_hdcp2p2_send_msg(struct hdmi_hdcp2p2_ctrl *ctrl);
+static void hdmi_hdcp2p2_recv_msg(struct hdmi_hdcp2p2_ctrl *ctrl);
+static void hdmi_hdcp2p2_auth_status(struct hdmi_hdcp2p2_ctrl *ctrl);
+static int hdmi_hdcp2p2_link_check(struct hdmi_hdcp2p2_ctrl *ctrl);
+
+static inline bool hdmi_hdcp2p2_is_valid_state(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+	if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_AUTHENTICATE)
+		return true;
+
+	if (atomic_read(&ctrl->auth_state) != HDCP_STATE_INACTIVE)
+		return true;
+
+	return false;
+}
+
+static int hdmi_hdcp2p2_copy_buf(struct hdmi_hdcp2p2_ctrl *ctrl,
+	struct hdmi_hdcp_wakeup_data *data)
+{
+	mutex_lock(&ctrl->msg_lock);
+
+	if (!data->send_msg_len) {
+		mutex_unlock(&ctrl->msg_lock);
+		return 0;
+	}
+
+	ctrl->send_msg_len = data->send_msg_len;
+
+	kzfree(ctrl->send_msg_buf);
+
+	ctrl->send_msg_buf = kzalloc(data->send_msg_len, GFP_KERNEL);
+
+	if (!ctrl->send_msg_buf) {
+		mutex_unlock(&ctrl->msg_lock);
+		return -ENOMEM;
+	}
+
+	memcpy(ctrl->send_msg_buf, data->send_msg_buf, ctrl->send_msg_len);
+
+	mutex_unlock(&ctrl->msg_lock);
+
+	return 0;
+}
+
+static int hdmi_hdcp2p2_wakeup(struct hdmi_hdcp_wakeup_data *data)
+{
+	struct hdmi_hdcp2p2_ctrl *ctrl;
+
+	if (!data) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	ctrl = data->context;
+	if (!ctrl) {
+		pr_err("invalid ctrl\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctrl->wakeup_mutex);
+
+	pr_debug("cmd: %s, timeout %dms, tethered %d\n",
+		hdmi_hdcp_cmd_to_str(data->cmd),
+		data->timeout, ctrl->tethered);
+
+	ctrl->wakeup_cmd = data->cmd;
+
+	if (data->timeout)
+		ctrl->timeout = data->timeout * 2;
+	else
+		ctrl->timeout = HDCP2P2_DEFAULT_TIMEOUT;
+
+	if (!hdmi_hdcp2p2_is_valid_state(ctrl)) {
+		pr_err("invalid state\n");
+		goto exit;
+	}
+
+	if (hdmi_hdcp2p2_copy_buf(ctrl, data))
+		goto exit;
+
+	if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS)
+		ctrl->auth_status = HDMI_HDCP_AUTH_STATUS_SUCCESS;
+	else if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_STATUS_FAILED)
+		ctrl->auth_status = HDMI_HDCP_AUTH_STATUS_FAILURE;
+
+	if (ctrl->tethered)
+		goto exit;
+
+	switch (ctrl->wakeup_cmd) {
+	case HDMI_HDCP_WKUP_CMD_SEND_MESSAGE:
+		queue_kthread_work(&ctrl->worker, &ctrl->send_msg);
+		break;
+	case HDMI_HDCP_WKUP_CMD_RECV_MESSAGE:
+		queue_kthread_work(&ctrl->worker, &ctrl->recv_msg);
+		break;
+	case HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS:
+	case HDMI_HDCP_WKUP_CMD_STATUS_FAILED:
+		queue_kthread_work(&ctrl->worker, &ctrl->status);
+		break;
+	case HDMI_HDCP_WKUP_CMD_LINK_POLL:
+		queue_kthread_work(&ctrl->worker, &ctrl->poll);
+		break;
+	case HDMI_HDCP_WKUP_CMD_AUTHENTICATE:
+		queue_kthread_work(&ctrl->worker, &ctrl->auth);
+		break;
+	default:
+		pr_err("invalid wakeup command %d\n", ctrl->wakeup_cmd);
+	}
+exit:
+	mutex_unlock(&ctrl->wakeup_mutex);
+	return 0;
+}
+
+static inline int hdmi_hdcp2p2_wakeup_lib(struct hdmi_hdcp2p2_ctrl *ctrl,
+	struct hdcp_lib_wakeup_data *data)
+{
+	int rc = 0;
+
+	if (ctrl && ctrl->lib && ctrl->lib->wakeup &&
+		data && (data->cmd != HDCP_LIB_WKUP_CMD_INVALID)) {
+		rc = ctrl->lib->wakeup(data);
+		if (rc)
+			pr_err("error sending %s to lib\n",
+				hdcp_lib_cmd_to_str(data->cmd));
+	}
+
+	return rc;
+}
+
+static void hdmi_hdcp2p2_run(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	while (1) {
+		switch (ctrl->wakeup_cmd) {
+		case HDMI_HDCP_WKUP_CMD_SEND_MESSAGE:
+			ctrl->wakeup_cmd = HDMI_HDCP_WKUP_CMD_INVALID;
+			hdmi_hdcp2p2_send_msg(ctrl);
+			break;
+		case HDMI_HDCP_WKUP_CMD_RECV_MESSAGE:
+			ctrl->wakeup_cmd = HDMI_HDCP_WKUP_CMD_INVALID;
+			hdmi_hdcp2p2_recv_msg(ctrl);
+			break;
+		case HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS:
+		case HDMI_HDCP_WKUP_CMD_STATUS_FAILED:
+			hdmi_hdcp2p2_auth_status(ctrl);
+			goto exit;
+		case HDMI_HDCP_WKUP_CMD_LINK_POLL:
+			hdmi_hdcp2p2_link_check(ctrl);
+			goto exit;
+		default:
+			goto exit;
+		}
+	}
+exit:
+	ctrl->wakeup_cmd = HDMI_HDCP_WKUP_CMD_INVALID;
+}
+
+int hdmi_hdcp2p2_authenticate_tethered(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+	int rc = 0;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	rc = hdmi_hdcp2p2_auth(ctrl);
+	if (rc) {
+		pr_err("auth failed %d\n", rc);
+		goto exit;
+	}
+
+	hdmi_hdcp2p2_run(ctrl);
+exit:
+	return rc;
+}
+
+static void hdmi_hdcp2p2_reset(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	ctrl->sink_status = SINK_DISCONNECTED;
+	atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
+}
+
+static void hdmi_hdcp2p2_off(void *input)
+{
+	struct hdmi_hdcp2p2_ctrl *ctrl = (struct hdmi_hdcp2p2_ctrl *)input;
+	struct hdmi_hdcp_wakeup_data cdata = {HDMI_HDCP_WKUP_CMD_AUTHENTICATE};
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	hdmi_hdcp2p2_reset(ctrl);
+
+	flush_kthread_worker(&ctrl->worker);
+
+	hdmi_hdcp2p2_ddc_disable(ctrl->init_data.ddc_ctrl);
+
+	if (ctrl->tethered) {
+		hdmi_hdcp2p2_auth(ctrl);
+	} else {
+		cdata.context = input;
+		hdmi_hdcp2p2_wakeup(&cdata);
+	}
+}
+
+static int hdmi_hdcp2p2_authenticate(void *input)
+{
+	struct hdmi_hdcp2p2_ctrl *ctrl = input;
+	struct hdmi_hdcp_wakeup_data cdata = {HDMI_HDCP_WKUP_CMD_AUTHENTICATE};
+	u32 regval;
+	int rc = 0;
+
+	/* Enable authentication success interrupt */
+	regval = DSS_REG_R(ctrl->init_data.core_io, HDMI_HDCP_INT_CTRL2);
+	regval |= BIT(1) | BIT(2);
+
+	DSS_REG_W(ctrl->init_data.core_io, HDMI_HDCP_INT_CTRL2, regval);
+
+	flush_kthread_worker(&ctrl->worker);
+
+	ctrl->sink_status = SINK_CONNECTED;
+	atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATING);
+
+	/* make sure ddc is idle before starting hdcp 2.2 authentication */
+	hdmi_scrambler_ddc_disable(ctrl->init_data.ddc_ctrl);
+	hdmi_hdcp2p2_ddc_disable(ctrl->init_data.ddc_ctrl);
+
+	if (ctrl->tethered) {
+		hdmi_hdcp2p2_authenticate_tethered(ctrl);
+	} else {
+		cdata.context = input;
+		hdmi_hdcp2p2_wakeup(&cdata);
+	}
+
+	return rc;
+}
+
+static int hdmi_hdcp2p2_reauthenticate(void *input)
+{
+	struct hdmi_hdcp2p2_ctrl *ctrl = (struct hdmi_hdcp2p2_ctrl *)input;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	hdmi_hdcp2p2_reset((struct hdmi_hdcp2p2_ctrl *)input);
+
+	return  hdmi_hdcp2p2_authenticate(input);
+}
+
+static ssize_t hdmi_hdcp2p2_sysfs_rda_tethered(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_hdcp2p2_ctrl *ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_HDCP2P2);
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctrl->mutex);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", ctrl->tethered);
+	mutex_unlock(&ctrl->mutex);
+
+	return ret;
+}
+
+static ssize_t hdmi_hdcp2p2_sysfs_wta_tethered(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct hdmi_hdcp2p2_ctrl *ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_HDCP2P2);
+	int rc, tethered;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctrl->mutex);
+	rc = kstrtoint(buf, 10, &tethered);
+	if (rc) {
+		pr_err("kstrtoint failed. rc=%d\n", rc);
+		goto exit;
+	}
+
+	ctrl->tethered = !!tethered;
+
+	if (ctrl->lib && ctrl->lib->update_exec_type && ctrl->lib_ctx)
+		ctrl->lib->update_exec_type(ctrl->lib_ctx, ctrl->tethered);
+exit:
+	mutex_unlock(&ctrl->mutex);
+
+	return count;
+}
+
+static ssize_t hdmi_hdcp2p2_sysfs_wta_min_level_change(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct hdmi_hdcp2p2_ctrl *ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_HDCP2P2);
+	struct hdcp_lib_wakeup_data cdata = {
+		HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE};
+	bool enc_notify = true;
+	enum hdmi_hdcp_state enc_lvl;
+	int min_enc_lvl;
+	int rc;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	rc = kstrtoint(buf, 10, &min_enc_lvl);
+	if (rc) {
+		DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+		goto exit;
+	}
+
+	switch (min_enc_lvl) {
+	case 0:
+		enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
+		break;
+	case 1:
+		enc_lvl = HDCP_STATE_AUTH_ENC_1X;
+		break;
+	case 2:
+		enc_lvl = HDCP_STATE_AUTH_ENC_2P2;
+		break;
+	default:
+		enc_notify = false;
+	}
+
+	pr_debug("enc level changed %d\n", min_enc_lvl);
+
+	cdata.context = ctrl->lib_ctx;
+	hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+	if (ctrl->tethered)
+		hdmi_hdcp2p2_run(ctrl);
+
+	if (enc_notify && ctrl->init_data.notify_status)
+		ctrl->init_data.notify_status(ctrl->init_data.cb_data, enc_lvl);
+
+	rc = count;
+exit:
+	return  rc;
+}
+
+static void hdmi_hdcp2p2_auth_failed(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
+
+	hdmi_hdcp2p2_ddc_disable(ctrl->init_data.ddc_ctrl);
+
+	/* notify hdmi tx about HDCP failure */
+	ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+		HDCP_STATE_AUTH_FAIL);
+}
+
+static int hdmi_hdcp2p2_ddc_read_message(struct hdmi_hdcp2p2_ctrl *ctrl,
+	u8 *buf, int size, u32 timeout)
+{
+	struct hdmi_tx_ddc_data ddc_data;
+	int rc;
+
+	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+		pr_err("hdcp is off\n");
+		return -EINVAL;
+	}
+
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = HDCP_SINK_DDC_SLAVE_ADDR;
+	ddc_data.offset = HDCP_SINK_DDC_HDCP2_READ_MESSAGE;
+	ddc_data.data_buf = buf;
+	ddc_data.data_len = size;
+	ddc_data.request_len = size;
+	ddc_data.retry = 0;
+	ddc_data.hard_timeout = timeout;
+	ddc_data.what = "HDCP2ReadMessage";
+
+	ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+	pr_debug("read msg timeout %dms\n", timeout);
+
+	rc = hdmi_ddc_read(ctrl->init_data.ddc_ctrl);
+	if (rc)
+		pr_err("Cannot read HDCP message register\n");
+
+	ctrl->timeout_left = ctrl->init_data.ddc_ctrl->ddc_data.timeout_left;
+
+	return rc;
+}
+
+int hdmi_hdcp2p2_ddc_write_message(struct hdmi_hdcp2p2_ctrl *ctrl,
+	u8 *buf, size_t size)
+{
+	struct hdmi_tx_ddc_data ddc_data;
+	int rc;
+
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = HDCP_SINK_DDC_SLAVE_ADDR;
+	ddc_data.offset = HDCP_SINK_DDC_HDCP2_WRITE_MESSAGE;
+	ddc_data.data_buf = buf;
+	ddc_data.data_len = size;
+	ddc_data.hard_timeout = ctrl->timeout;
+	ddc_data.what = "HDCP2WriteMessage";
+
+	ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+	rc = hdmi_ddc_write(ctrl->init_data.ddc_ctrl);
+	if (rc)
+		pr_err("Cannot write HDCP message register\n");
+
+	ctrl->timeout_left = ctrl->init_data.ddc_ctrl->ddc_data.timeout_left;
+
+	return rc;
+}
+
+static int hdmi_hdcp2p2_read_version(struct hdmi_hdcp2p2_ctrl *ctrl,
+		u8 *hdcp2version)
+{
+	struct hdmi_tx_ddc_data ddc_data;
+	int rc;
+
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = HDCP_SINK_DDC_SLAVE_ADDR;
+	ddc_data.offset = HDCP_SINK_DDC_HDCP2_VERSION;
+	ddc_data.data_buf = hdcp2version;
+	ddc_data.data_len = 1;
+	ddc_data.request_len = 1;
+	ddc_data.retry = 1;
+	ddc_data.what = "HDCP2Version";
+
+	ctrl->init_data.ddc_ctrl->ddc_data = ddc_data;
+
+	rc = hdmi_ddc_read(ctrl->init_data.ddc_ctrl);
+	if (rc) {
+		pr_err("Cannot read HDCP2Version register");
+		return rc;
+	}
+
+	pr_debug("Read HDCP2Version as %u\n", *hdcp2version);
+	return rc;
+}
+
+static DEVICE_ATTR(min_level_change, 0200, NULL,
+		hdmi_hdcp2p2_sysfs_wta_min_level_change);
+static DEVICE_ATTR(tethered, 0644, hdmi_hdcp2p2_sysfs_rda_tethered,
+		hdmi_hdcp2p2_sysfs_wta_tethered);
+
+static struct attribute *hdmi_hdcp2p2_fs_attrs[] = {
+	&dev_attr_min_level_change.attr,
+	&dev_attr_tethered.attr,
+	NULL,
+};
+
+static struct attribute_group hdmi_hdcp2p2_fs_attr_group = {
+	.name = "hdcp2p2",
+	.attrs = hdmi_hdcp2p2_fs_attrs,
+};
+
+static bool hdmi_hdcp2p2_feature_supported(void *input)
+{
+	struct hdmi_hdcp2p2_ctrl *ctrl = input;
+	struct hdcp_txmtr_ops *lib = NULL;
+	bool supported = false;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	lib = ctrl->lib;
+	if (!lib) {
+		pr_err("invalid lib ops data\n");
+		goto end;
+	}
+
+	if (lib->feature_supported)
+		supported = lib->feature_supported(
+			ctrl->lib_ctx);
+end:
+	return supported;
+}
+
+static void hdmi_hdcp2p2_send_msg(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+	int rc = 0;
+	struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+	uint32_t msglen;
+	char *msg = NULL;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	cdata.context = ctrl->lib_ctx;
+
+	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+		pr_err("hdcp is off\n");
+		goto exit;
+	}
+
+	mutex_lock(&ctrl->msg_lock);
+	msglen = ctrl->send_msg_len;
+
+	if (!msglen) {
+		mutex_unlock(&ctrl->msg_lock);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	msg = kzalloc(msglen, GFP_KERNEL);
+	if (!msg) {
+		mutex_unlock(&ctrl->msg_lock);
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	memcpy(msg, ctrl->send_msg_buf, msglen);
+	mutex_unlock(&ctrl->msg_lock);
+
+	/* Forward the message to the sink */
+	rc = hdmi_hdcp2p2_ddc_write_message(ctrl, msg, (size_t)msglen);
+	if (rc) {
+		pr_err("Error sending msg to sink %d\n", rc);
+		cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_SEND_FAILED;
+	} else {
+		cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS;
+		cdata.timeout = ctrl->timeout_left;
+	}
+exit:
+	kfree(msg);
+
+	hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+}
+
+static void hdmi_hdcp2p2_send_msg_work(struct kthread_work *work)
+{
+	struct hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+		struct hdmi_hdcp2p2_ctrl, send_msg);
+
+	hdmi_hdcp2p2_send_msg(ctrl);
+}
+
+static void hdmi_hdcp2p2_link_cb(void *data)
+{
+	struct hdmi_hdcp2p2_ctrl *ctrl = data;
+
+	if (!ctrl) {
+		pr_debug("invalid input\n");
+		return;
+	}
+
+	if (atomic_read(&ctrl->auth_state) != HDCP_STATE_INACTIVE)
+		queue_kthread_work(&ctrl->worker, &ctrl->link);
+}
+
+static void hdmi_hdcp2p2_recv_msg(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+	int rc = 0, timeout_hsync;
+	char *recvd_msg_buf = NULL;
+	struct hdmi_tx_hdcp2p2_ddc_data *ddc_data;
+	struct hdmi_tx_ddc_ctrl *ddc_ctrl;
+	struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	cdata.context = ctrl->lib_ctx;
+
+	ddc_ctrl = ctrl->init_data.ddc_ctrl;
+	if (!ddc_ctrl) {
+		pr_err("invalid ddc ctrl\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+		pr_err("hdcp is off\n");
+		goto exit;
+	}
+	hdmi_ddc_config(ddc_ctrl);
+
+	ddc_data = &ddc_ctrl->hdcp2p2_ddc_data;
+
+	memset(ddc_data, 0, sizeof(*ddc_data));
+
+	timeout_hsync = hdmi_utils_get_timeout_in_hysnc(
+		ctrl->init_data.timing, ctrl->timeout);
+
+	if (timeout_hsync <= 0) {
+		pr_err("err in timeout hsync calc\n");
+		timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC;
+	}
+
+	pr_debug("timeout for rxstatus %dms, %d hsync\n",
+		ctrl->timeout, timeout_hsync);
+
+	ddc_data->intr_mask = RXSTATUS_MESSAGE_SIZE | RXSTATUS_REAUTH_REQ;
+	ddc_data->timeout_ms = ctrl->timeout;
+	ddc_data->timeout_hsync = timeout_hsync;
+	ddc_data->periodic_timer_hsync = timeout_hsync / 20;
+	ddc_data->read_method = HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER;
+	ddc_data->wait = true;
+
+	rc = hdmi_hdcp2p2_ddc_read_rxstatus(ddc_ctrl);
+	if (rc) {
+		pr_err("error reading rxstatus %d\n", rc);
+		goto exit;
+	}
+
+	if (ddc_data->reauth_req) {
+		ddc_data->reauth_req = false;
+
+		pr_debug("reauth triggered by sink\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	ctrl->timeout_left = ddc_data->timeout_left;
+
+	pr_debug("timeout left after rxstatus %dms, msg size %d\n",
+		ctrl->timeout_left, ddc_data->message_size);
+
+	if (!ddc_data->message_size) {
+		pr_err("recvd invalid message size\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	recvd_msg_buf = kzalloc(ddc_data->message_size, GFP_KERNEL);
+	if (!recvd_msg_buf) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	rc = hdmi_hdcp2p2_ddc_read_message(ctrl, recvd_msg_buf,
+		ddc_data->message_size, ctrl->timeout_left);
+	if (rc) {
+		pr_err("error reading message %d\n", rc);
+		goto exit;
+	}
+
+	cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS;
+	cdata.recvd_msg_buf = recvd_msg_buf;
+	cdata.recvd_msg_len = ddc_data->message_size;
+	cdata.timeout = ctrl->timeout_left;
+exit:
+	if (rc == -ETIMEDOUT)
+		cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT;
+	else if (rc)
+		cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED;
+
+	hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+	kfree(recvd_msg_buf);
+}
+
+static void hdmi_hdcp2p2_recv_msg_work(struct kthread_work *work)
+{
+	struct hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+		struct hdmi_hdcp2p2_ctrl, recv_msg);
+
+	hdmi_hdcp2p2_recv_msg(ctrl);
+}
+
+static int hdmi_hdcp2p2_link_check(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+	struct hdmi_tx_ddc_ctrl *ddc_ctrl;
+	struct hdmi_tx_hdcp2p2_ddc_data *ddc_data;
+	int timeout_hsync;
+
+	ddc_ctrl = ctrl->init_data.ddc_ctrl;
+	if (!ddc_ctrl)
+		return -EINVAL;
+
+	hdmi_ddc_config(ddc_ctrl);
+
+	ddc_data = &ddc_ctrl->hdcp2p2_ddc_data;
+
+	memset(ddc_data, 0, sizeof(*ddc_data));
+
+	timeout_hsync = hdmi_utils_get_timeout_in_hysnc(
+		ctrl->init_data.timing, jiffies_to_msecs(HZ / 2));
+
+	if (timeout_hsync <= 0) {
+		pr_err("err in timeout hsync calc\n");
+		timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC;
+	}
+	pr_debug("timeout for rxstatus %d hsyncs\n", timeout_hsync);
+
+	ddc_data->intr_mask = RXSTATUS_READY | RXSTATUS_MESSAGE_SIZE |
+		RXSTATUS_REAUTH_REQ;
+	ddc_data->timeout_hsync = timeout_hsync;
+	ddc_data->periodic_timer_hsync = timeout_hsync;
+	ddc_data->read_method = HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER;
+	ddc_data->link_cb = hdmi_hdcp2p2_link_cb;
+	ddc_data->link_data = ctrl;
+
+	return hdmi_hdcp2p2_ddc_read_rxstatus(ddc_ctrl);
+}
+
+static void hdmi_hdcp2p2_poll_work(struct kthread_work *work)
+{
+	struct hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+		struct hdmi_hdcp2p2_ctrl, poll);
+
+	hdmi_hdcp2p2_link_check(ctrl);
+}
+
+static void hdmi_hdcp2p2_auth_status(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+		pr_err("hdcp is off\n");
+		return;
+	}
+
+	if (ctrl->auth_status == HDMI_HDCP_AUTH_STATUS_SUCCESS) {
+		ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+			HDCP_STATE_AUTHENTICATED);
+
+		atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATED);
+
+		if (ctrl->tethered)
+			hdmi_hdcp2p2_link_check(ctrl);
+	} else {
+		hdmi_hdcp2p2_auth_failed(ctrl);
+	}
+}
+
+static void hdmi_hdcp2p2_auth_status_work(struct kthread_work *work)
+{
+	struct hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+		struct hdmi_hdcp2p2_ctrl, status);
+
+	hdmi_hdcp2p2_auth_status(ctrl);
+}
+
+static void hdmi_hdcp2p2_link_work(struct kthread_work *work)
+{
+	int rc = 0;
+	struct hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+		struct hdmi_hdcp2p2_ctrl, link);
+	struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+	char *recvd_msg_buf = NULL;
+	struct hdmi_tx_hdcp2p2_ddc_data *ddc_data;
+	struct hdmi_tx_ddc_ctrl *ddc_ctrl;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	cdata.context = ctrl->lib_ctx;
+
+	ddc_ctrl = ctrl->init_data.ddc_ctrl;
+	if (!ddc_ctrl) {
+		rc = -EINVAL;
+		cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+		goto exit;
+	}
+
+	ddc_data = &ddc_ctrl->hdcp2p2_ddc_data;
+
+	if (ddc_data->reauth_req) {
+		pr_debug("reauth triggered by sink\n");
+
+		ddc_data->reauth_req = false;
+		rc = -ENOLINK;
+		cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+		goto exit;
+	}
+
+	if (ddc_data->ready && ddc_data->message_size) {
+		pr_debug("topology changed. rxstatus msg size %d\n",
+			ddc_data->message_size);
+
+		ddc_data->ready  = false;
+
+		recvd_msg_buf = kzalloc(ddc_data->message_size, GFP_KERNEL);
+		if (!recvd_msg_buf) {
+			cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+			goto exit;
+		}
+
+		rc = hdmi_hdcp2p2_ddc_read_message(ctrl, recvd_msg_buf,
+			ddc_data->message_size, HDCP2P2_DEFAULT_TIMEOUT);
+		if (rc) {
+			cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+			pr_err("error reading message %d\n", rc);
+		} else {
+			cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS;
+			cdata.recvd_msg_buf = recvd_msg_buf;
+			cdata.recvd_msg_len = ddc_data->message_size;
+		}
+
+		ddc_data->message_size = 0;
+	}
+exit:
+	hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+	kfree(recvd_msg_buf);
+
+	if (ctrl->tethered)
+		hdmi_hdcp2p2_run(ctrl);
+
+	if (rc) {
+		hdmi_hdcp2p2_auth_failed(ctrl);
+		return;
+	}
+}
+
+static int hdmi_hdcp2p2_auth(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+	struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+	int rc = 0;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	cdata.context = ctrl->lib_ctx;
+
+	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTHENTICATING)
+		cdata.cmd = HDCP_LIB_WKUP_CMD_START;
+	else
+		cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+
+	rc = hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+	if (rc)
+		hdmi_hdcp2p2_auth_failed(ctrl);
+
+	return rc;
+}
+
+static void hdmi_hdcp2p2_auth_work(struct kthread_work *work)
+{
+	struct hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+		struct hdmi_hdcp2p2_ctrl, auth);
+
+	hdmi_hdcp2p2_auth(ctrl);
+}
+
+void hdmi_hdcp2p2_deinit(void *input)
+{
+	struct hdmi_hdcp2p2_ctrl *ctrl = (struct hdmi_hdcp2p2_ctrl *)input;
+	struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+	cdata.context = ctrl->lib_ctx;
+	hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+	kthread_stop(ctrl->thread);
+
+	sysfs_remove_group(ctrl->init_data.sysfs_kobj,
+				&hdmi_hdcp2p2_fs_attr_group);
+
+	mutex_destroy(&ctrl->mutex);
+	mutex_destroy(&ctrl->msg_lock);
+	mutex_destroy(&ctrl->wakeup_mutex);
+	kfree(ctrl);
+}
+
+void *hdmi_hdcp2p2_init(struct hdmi_hdcp_init_data *init_data)
+{
+	int rc;
+	struct hdmi_hdcp2p2_ctrl *ctrl;
+	static struct hdmi_hdcp_ops ops = {
+		.hdmi_hdcp_reauthenticate = hdmi_hdcp2p2_reauthenticate,
+		.hdmi_hdcp_authenticate = hdmi_hdcp2p2_authenticate,
+		.feature_supported = hdmi_hdcp2p2_feature_supported,
+		.hdmi_hdcp_off = hdmi_hdcp2p2_off
+	};
+
+	static struct hdcp_client_ops client_ops = {
+		.wakeup = hdmi_hdcp2p2_wakeup,
+	};
+
+	static struct hdcp_txmtr_ops txmtr_ops;
+	struct hdcp_register_data register_data;
+
+	pr_debug("HDCP2P2 feature initialization\n");
+
+	if (!init_data || !init_data->core_io || !init_data->mutex ||
+		!init_data->ddc_ctrl || !init_data->notify_status ||
+		!init_data->workq || !init_data->cb_data) {
+		pr_err("invalid input\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (init_data->hdmi_tx_ver < MIN_HDMI_TX_MAJOR_VERSION) {
+		pr_err("HDMI Tx does not support HDCP 2.2\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return ERR_PTR(-ENOMEM);
+
+	ctrl->init_data = *init_data;
+	ctrl->lib = &txmtr_ops;
+	ctrl->tethered = init_data->tethered;
+
+	rc = sysfs_create_group(init_data->sysfs_kobj,
+				&hdmi_hdcp2p2_fs_attr_group);
+	if (rc) {
+		pr_err("hdcp2p2 sysfs group creation failed\n");
+		goto error;
+	}
+
+	ctrl->sink_status = SINK_DISCONNECTED;
+
+	atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
+
+	ctrl->ops = &ops;
+	mutex_init(&ctrl->mutex);
+	mutex_init(&ctrl->msg_lock);
+	mutex_init(&ctrl->wakeup_mutex);
+
+	register_data.hdcp_ctx = &ctrl->lib_ctx;
+	register_data.client_ops = &client_ops;
+	register_data.txmtr_ops = &txmtr_ops;
+	register_data.client_ctx = ctrl;
+	register_data.tethered = ctrl->tethered;
+
+	rc = hdcp_library_register(&register_data);
+	if (rc) {
+		pr_err("Unable to register with HDCP 2.2 library\n");
+		goto error;
+	}
+
+	init_kthread_worker(&ctrl->worker);
+
+	init_kthread_work(&ctrl->auth,     hdmi_hdcp2p2_auth_work);
+	init_kthread_work(&ctrl->send_msg, hdmi_hdcp2p2_send_msg_work);
+	init_kthread_work(&ctrl->recv_msg, hdmi_hdcp2p2_recv_msg_work);
+	init_kthread_work(&ctrl->status,   hdmi_hdcp2p2_auth_status_work);
+	init_kthread_work(&ctrl->link,     hdmi_hdcp2p2_link_work);
+	init_kthread_work(&ctrl->poll,     hdmi_hdcp2p2_poll_work);
+
+	ctrl->thread = kthread_run(kthread_worker_fn,
+		&ctrl->worker, "hdmi_hdcp2p2");
+
+	if (IS_ERR(ctrl->thread)) {
+		pr_err("unable to start hdcp2p2 thread\n");
+		rc = PTR_ERR(ctrl->thread);
+		ctrl->thread = NULL;
+		goto error;
+	}
+
+	return ctrl;
+error:
+	kfree(ctrl);
+	return ERR_PTR(rc);
+}
+
+static bool hdmi_hdcp2p2_supported(struct hdmi_hdcp2p2_ctrl *ctrl)
+{
+	u8 hdcp2version;
+
+	int rc = hdmi_hdcp2p2_read_version(ctrl, &hdcp2version);
+
+	if (rc)
+		goto error;
+
+	if (hdcp2version & BIT(2)) {
+		pr_debug("Sink is HDCP 2.2 capable\n");
+		return true;
+	}
+
+error:
+	pr_debug("Sink is not HDCP 2.2 capable\n");
+	return false;
+}
+
+struct hdmi_hdcp_ops *hdmi_hdcp2p2_start(void *input)
+{
+	struct hdmi_hdcp2p2_ctrl *ctrl = input;
+
+	pr_debug("Checking sink capability\n");
+	if (hdmi_hdcp2p2_supported(ctrl))
+		return ctrl->ops;
+	else
+		return NULL;
+
+}
+
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_mhl.h b/drivers/video/fbdev/msm/mdss_hdmi_mhl.h
new file mode 100644
index 0000000..924a1a0
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_mhl.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MDSS_HDMI_MHL_H__
+#define __MDSS_HDMI_MHL_H__
+
+#include <linux/platform_device.h>
+
+struct msm_hdmi_mhl_ops {
+	u8 (*tmds_enabled)(struct platform_device *pdev);
+	int (*set_mhl_max_pclk)(struct platform_device *pdev, u32 max_val);
+	int (*set_upstream_hpd)(struct platform_device *pdev, uint8_t on);
+};
+
+int msm_hdmi_register_mhl(struct platform_device *pdev,
+			  struct msm_hdmi_mhl_ops *ops, void *data);
+#endif /* __MDSS_HDMI_MHL_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_panel.c b/drivers/video/fbdev/msm/mdss_hdmi_panel.c
new file mode 100644
index 0000000..9e082b3a
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_panel.c
@@ -0,0 +1,932 @@
+/* Copyright (c) 2010-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/types.h>
+
+#include "video/msm_hdmi_modes.h"
+#include "mdss_debug.h"
+#include "mdss_fb.h"
+#include "mdss.h"
+#include "mdss_hdmi_util.h"
+#include "mdss_panel.h"
+#include "mdss_hdmi_panel.h"
+
+#define HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ 340000
+#define HDMI_TX_SCRAMBLER_TIMEOUT_MSEC 200
+
+#define HDMI_TX_KHZ_TO_HZ                  1000U
+
+/* AVI INFOFRAME DATA */
+#define NUM_MODES_AVI 20
+#define AVI_MAX_DATA_BYTES 13
+
+/* Line numbers at which AVI Infoframe and Vendor Infoframe will be sent */
+#define AVI_IFRAME_LINE_NUMBER 1
+#define VENDOR_IFRAME_LINE_NUMBER 3
+
+#define IFRAME_CHECKSUM_32(d)			\
+	((d & 0xff) + ((d >> 8) & 0xff) +	\
+	((d >> 16) & 0xff) + ((d >> 24) & 0xff))
+
+#define IFRAME_PACKET_OFFSET 0x80
+/*
+ * InfoFrame Type Code:
+ * 0x0 - Reserved
+ * 0x1 - Vendor Specific
+ * 0x2 - Auxiliary Video Information
+ * 0x3 - Source Product Description
+ * 0x4 - AUDIO
+ * 0x5 - MPEG Source
+ * 0x6 - NTSC VBI
+ * 0x7 - 0xFF - Reserved
+ */
+#define AVI_IFRAME_TYPE 0x2
+#define AVI_IFRAME_VERSION 0x2
+#define LEFT_SHIFT_BYTE(x) ((x) << 8)
+#define LEFT_SHIFT_WORD(x) ((x) << 16)
+#define LEFT_SHIFT_24BITS(x) ((x) << 24)
+
+/* AVI Infoframe data byte 3, bit 7 (msb) represents ITC bit */
+#define SET_ITC_BIT(byte)  (byte = (byte | BIT(7)))
+#define CLR_ITC_BIT(byte)  (byte = (byte & ~BIT(7)))
+
+/*
+ * CN represents IT content type, if ITC bit in infoframe data byte 3
+ * is set, CN bits will represent content type as below:
+ * 0b00 Graphics
+ * 0b01 Photo
+ * 0b10 Cinema
+ * 0b11 Game
+ */
+#define CONFIG_CN_BITS(bits, byte) \
+		(byte = (byte & ~(BIT(4) | BIT(5))) |\
+			((bits & (BIT(0) | BIT(1))) << 4))
+
+struct hdmi_avi_iframe_bar_info {
+	bool vert_binfo_present;
+	bool horz_binfo_present;
+	u32 end_of_top_bar;
+	u32 start_of_bottom_bar;
+	u32 end_of_left_bar;
+	u32 start_of_right_bar;
+};
+
+struct hdmi_avi_infoframe_config {
+	u32 pixel_format;
+	u32 scan_info;
+	bool act_fmt_info_present;
+	u32 colorimetry_info;
+	u32 ext_colorimetry_info;
+	u32 rgb_quantization_range;
+	u32 yuv_quantization_range;
+	u32 scaling_info;
+	bool is_it_content;
+	u8 content_type;
+	u8 pixel_rpt_factor;
+	struct hdmi_avi_iframe_bar_info bar_info;
+};
+
+struct hdmi_video_config {
+	struct msm_hdmi_mode_timing_info *timing;
+	struct hdmi_avi_infoframe_config avi_iframe;
+};
+
+struct hdmi_panel {
+	struct dss_io_data *io;
+	struct hdmi_util_ds_data *ds_data;
+	struct hdmi_panel_data *data;
+	struct hdmi_video_config vid_cfg;
+	struct hdmi_tx_ddc_ctrl *ddc;
+	u32 version;
+	u32 vic;
+	u8 *spd_vendor_name;
+	u8 *spd_product_description;
+	bool on;
+	bool scrambler_enabled;
+};
+
+enum {
+	DATA_BYTE_1,
+	DATA_BYTE_2,
+	DATA_BYTE_3,
+	DATA_BYTE_4,
+	DATA_BYTE_5,
+	DATA_BYTE_6,
+	DATA_BYTE_7,
+	DATA_BYTE_8,
+	DATA_BYTE_9,
+	DATA_BYTE_10,
+	DATA_BYTE_11,
+	DATA_BYTE_12,
+	DATA_BYTE_13,
+};
+
+enum hdmi_quantization_range {
+	HDMI_QUANTIZATION_DEFAULT,
+	HDMI_QUANTIZATION_LIMITED_RANGE,
+	HDMI_QUANTIZATION_FULL_RANGE
+};
+
+enum hdmi_scaling_info {
+	HDMI_SCALING_NONE,
+	HDMI_SCALING_HORZ,
+	HDMI_SCALING_VERT,
+	HDMI_SCALING_HORZ_VERT,
+};
+
+static int hdmi_panel_get_vic(struct mdss_panel_info *pinfo,
+		struct hdmi_util_ds_data *ds_data)
+{
+	int new_vic = -1;
+	u32 h_total, v_total;
+	struct msm_hdmi_mode_timing_info timing;
+
+	if (!pinfo) {
+		pr_err("invalid panel data\n");
+		return -EINVAL;
+	}
+
+	if (pinfo->vic) {
+		struct msm_hdmi_mode_timing_info info = {0};
+		u32 ret = hdmi_get_supported_mode(&info, ds_data, pinfo->vic);
+		u32 supported = info.supported;
+
+		if (!ret && supported) {
+			new_vic = pinfo->vic;
+		} else {
+			pr_err("invalid or not supported vic %d\n",
+				pinfo->vic);
+			return -EPERM;
+		}
+	} else {
+		timing.active_h      = pinfo->xres;
+		timing.back_porch_h  = pinfo->lcdc.h_back_porch;
+		timing.front_porch_h = pinfo->lcdc.h_front_porch;
+		timing.pulse_width_h = pinfo->lcdc.h_pulse_width;
+
+		h_total = timing.active_h + timing.back_porch_h +
+			timing.front_porch_h + timing.pulse_width_h;
+
+		pr_debug("ah=%d bph=%d fph=%d pwh=%d ht=%d\n",
+			timing.active_h, timing.back_porch_h,
+			timing.front_porch_h, timing.pulse_width_h,
+			h_total);
+
+		timing.active_v      = pinfo->yres;
+		timing.back_porch_v  = pinfo->lcdc.v_back_porch;
+		timing.front_porch_v = pinfo->lcdc.v_front_porch;
+		timing.pulse_width_v = pinfo->lcdc.v_pulse_width;
+
+		v_total = timing.active_v + timing.back_porch_v +
+			timing.front_porch_v + timing.pulse_width_v;
+
+		pr_debug("av=%d bpv=%d fpv=%d pwv=%d vt=%d\n",
+			timing.active_v, timing.back_porch_v,
+			timing.front_porch_v, timing.pulse_width_v, v_total);
+
+		timing.pixel_freq = ((unsigned long int)pinfo->clk_rate / 1000);
+		if (h_total && v_total) {
+			timing.refresh_rate = ((timing.pixel_freq * 1000) /
+				(h_total * v_total)) * 1000;
+		} else {
+			pr_err("cannot cal refresh rate\n");
+			return -EPERM;
+		}
+
+		pr_debug("pixel_freq=%d refresh_rate=%d\n",
+			timing.pixel_freq, timing.refresh_rate);
+
+		new_vic = hdmi_get_video_id_code(&timing, ds_data);
+	}
+
+	return new_vic;
+}
+
+static void hdmi_panel_update_dfps_data(struct hdmi_panel *panel)
+{
+	struct mdss_panel_info *pinfo = panel->data->pinfo;
+
+	pinfo->saved_total = mdss_panel_get_htotal(pinfo, true);
+	pinfo->saved_fporch = panel->vid_cfg.timing->front_porch_h;
+
+	pinfo->current_fps = panel->vid_cfg.timing->refresh_rate;
+	pinfo->default_fps = panel->vid_cfg.timing->refresh_rate;
+	pinfo->lcdc.frame_rate = panel->vid_cfg.timing->refresh_rate;
+}
+
+static int hdmi_panel_config_avi(struct hdmi_panel *panel)
+{
+	struct mdss_panel_info *pinfo = panel->data->pinfo;
+	struct hdmi_video_config *vid_cfg = &panel->vid_cfg;
+	struct hdmi_avi_infoframe_config *avi = &vid_cfg->avi_iframe;
+	struct msm_hdmi_mode_timing_info *timing;
+	u32 ret = 0;
+
+	timing = panel->vid_cfg.timing;
+	if (!timing) {
+		pr_err("fmt not supported: %d\n", panel->vic);
+		ret = -EPERM;
+		goto end;
+	}
+
+	/* Setup AVI Infoframe content */
+	avi->pixel_format  = pinfo->out_format;
+	avi->is_it_content = panel->data->is_it_content;
+	avi->content_type  = panel->data->content_type;
+	avi->scan_info     = panel->data->scan_info;
+
+	avi->bar_info.end_of_top_bar = 0x0;
+	avi->bar_info.start_of_bottom_bar = timing->active_v + 1;
+	avi->bar_info.end_of_left_bar = 0;
+	avi->bar_info.start_of_right_bar = timing->active_h + 1;
+
+	avi->act_fmt_info_present = true;
+	avi->rgb_quantization_range = HDMI_QUANTIZATION_DEFAULT;
+	avi->yuv_quantization_range = HDMI_QUANTIZATION_DEFAULT;
+
+	avi->scaling_info = HDMI_SCALING_NONE;
+
+	avi->colorimetry_info = 0;
+	avi->ext_colorimetry_info = 0;
+
+	avi->pixel_rpt_factor = 0;
+end:
+	return ret;
+}
+
+static int hdmi_panel_setup_video(struct hdmi_panel *panel)
+{
+	u32 total_h, start_h, end_h;
+	u32 total_v, start_v, end_v;
+	u32 div = 0;
+	struct dss_io_data *io = panel->io;
+	struct msm_hdmi_mode_timing_info *timing;
+
+	timing = panel->vid_cfg.timing;
+	if (!timing) {
+		pr_err("fmt not supported: %d\n", panel->vic);
+		return -EPERM;
+	}
+
+	/* reduce horizontal params by half for YUV420 output */
+	if (panel->vid_cfg.avi_iframe.pixel_format == MDP_Y_CBCR_H2V2)
+		div = 1;
+
+	total_h = (hdmi_tx_get_h_total(timing) >> div) - 1;
+	total_v = hdmi_tx_get_v_total(timing) - 1;
+
+	if (((total_v << 16) & 0xE0000000) || (total_h & 0xFFFFE000)) {
+		pr_err("total v=%d or h=%d is larger than supported\n",
+			total_v, total_h);
+		return -EPERM;
+	}
+	DSS_REG_W(io, HDMI_TOTAL, (total_v << 16) | (total_h << 0));
+
+	start_h = (timing->back_porch_h >> div) +
+		  (timing->pulse_width_h >> div);
+	end_h   = (total_h + 1) - (timing->front_porch_h >> div);
+	if (((end_h << 16) & 0xE0000000) || (start_h & 0xFFFFE000)) {
+		pr_err("end_h=%d or start_h=%d is larger than supported\n",
+			end_h, start_h);
+		return -EPERM;
+	}
+	DSS_REG_W(io, HDMI_ACTIVE_H, (end_h << 16) | (start_h << 0));
+
+	start_v = timing->back_porch_v + timing->pulse_width_v - 1;
+	end_v   = total_v - timing->front_porch_v;
+	if (((end_v << 16) & 0xE0000000) || (start_v & 0xFFFFE000)) {
+		pr_err("end_v=%d or start_v=%d is larger than supported\n",
+			end_v, start_v);
+		return -EPERM;
+	}
+	DSS_REG_W(io, HDMI_ACTIVE_V, (end_v << 16) | (start_v << 0));
+
+	if (timing->interlaced) {
+		DSS_REG_W(io, HDMI_V_TOTAL_F2, (total_v + 1) << 0);
+		DSS_REG_W(io, HDMI_ACTIVE_V_F2,
+			((end_v + 1) << 16) | ((start_v + 1) << 0));
+	} else {
+		DSS_REG_W(io, HDMI_V_TOTAL_F2, 0);
+		DSS_REG_W(io, HDMI_ACTIVE_V_F2, 0);
+	}
+
+	DSS_REG_W(io, HDMI_FRAME_CTRL,
+		((timing->interlaced << 31) & 0x80000000) |
+		((timing->active_low_h << 29) & 0x20000000) |
+		((timing->active_low_v << 28) & 0x10000000));
+
+	hdmi_panel_update_dfps_data(panel);
+
+	return 0;
+}
+
+static void hdmi_panel_set_avi_infoframe(struct hdmi_panel *panel)
+{
+	int i;
+	u8  avi_iframe[AVI_MAX_DATA_BYTES] = {0};
+	u8 checksum;
+	u32 sum, reg_val;
+	struct dss_io_data *io = panel->io;
+	struct hdmi_avi_infoframe_config *avi;
+	struct msm_hdmi_mode_timing_info *timing;
+
+	avi = &panel->vid_cfg.avi_iframe;
+	timing = panel->vid_cfg.timing;
+
+	/*
+	 * BYTE - 1:
+	 *	0:1 - Scan Information
+	 *	2:3 - Bar Info
+	 *	4   - Active Format Info present
+	 *	5:6 - Pixel format type;
+	 *	7   - Reserved;
+	 */
+	avi_iframe[0] = (avi->scan_info & 0x3) |
+			(avi->bar_info.vert_binfo_present ? BIT(2) : 0) |
+			(avi->bar_info.horz_binfo_present ? BIT(3) : 0) |
+			(avi->act_fmt_info_present ? BIT(4) : 0);
+	if (avi->pixel_format == MDP_Y_CBCR_H2V2)
+		avi_iframe[0] |= (0x3 << 5);
+	else if (avi->pixel_format == MDP_Y_CBCR_H2V1)
+		avi_iframe[0] |= (0x1 << 5);
+	else if (avi->pixel_format == MDP_Y_CBCR_H1V1)
+		avi_iframe[0] |= (0x2 << 5);
+
+	/*
+	 * BYTE - 2:
+	 *	0:3 - Active format info
+	 *	4:5 - Picture aspect ratio
+	 *	6:7 - Colorimetry info
+	 */
+	avi_iframe[1] |= 0x08;
+	if (timing->ar == HDMI_RES_AR_4_3)
+		avi_iframe[1] |= (0x1 << 4);
+	else if (timing->ar == HDMI_RES_AR_16_9)
+		avi_iframe[1] |= (0x2 << 4);
+
+	avi_iframe[1] |= (avi->colorimetry_info & 0x3) << 6;
+
+	/*
+	 * BYTE - 3:
+	 *	0:1 - Scaling info
+	 *	2:3 - Quantization range
+	 *	4:6 - Extended Colorimetry
+	 *	7   - IT content
+	 */
+	avi_iframe[2] |= (avi->scaling_info & 0x3) |
+			 ((avi->rgb_quantization_range & 0x3) << 2) |
+			 ((avi->ext_colorimetry_info & 0x7) << 4) |
+			 ((avi->is_it_content ? 0x1 : 0x0) << 7);
+	/*
+	 * BYTE - 4:
+	 *	0:7 - VIC
+	 */
+	if (timing->video_format < HDMI_VFRMT_END)
+		avi_iframe[3] = timing->video_format;
+
+	/*
+	 * BYTE - 5:
+	 *	0:3 - Pixel Repeat factor
+	 *	4:5 - Content type
+	 *	6:7 - YCC Quantization range
+	 */
+	avi_iframe[4] = (avi->pixel_rpt_factor & 0xF) |
+			((avi->content_type & 0x3) << 4) |
+			((avi->yuv_quantization_range & 0x3) << 6);
+
+	/* BYTE - 6,7: End of top bar */
+	avi_iframe[5] = avi->bar_info.end_of_top_bar & 0xFF;
+	avi_iframe[6] = ((avi->bar_info.end_of_top_bar & 0xFF00) >> 8);
+
+	/* BYTE - 8,9: Start of bottom bar */
+	avi_iframe[7] = avi->bar_info.start_of_bottom_bar & 0xFF;
+	avi_iframe[8] = ((avi->bar_info.start_of_bottom_bar & 0xFF00) >>
+			 8);
+
+	/* BYTE - 10,11: Endof of left bar */
+	avi_iframe[9] = avi->bar_info.end_of_left_bar & 0xFF;
+	avi_iframe[10] = ((avi->bar_info.end_of_left_bar & 0xFF00) >> 8);
+
+	/* BYTE - 12,13: Start of right bar */
+	avi_iframe[11] = avi->bar_info.start_of_right_bar & 0xFF;
+	avi_iframe[12] = ((avi->bar_info.start_of_right_bar & 0xFF00) >>
+			  8);
+
+	sum = IFRAME_PACKET_OFFSET + AVI_IFRAME_TYPE +
+		AVI_IFRAME_VERSION + AVI_MAX_DATA_BYTES;
+
+	for (i = 0; i < AVI_MAX_DATA_BYTES; i++)
+		sum += avi_iframe[i];
+	sum &= 0xFF;
+	sum = 256 - sum;
+	checksum = (u8) sum;
+
+	reg_val = checksum |
+		LEFT_SHIFT_BYTE(avi_iframe[DATA_BYTE_1]) |
+		LEFT_SHIFT_WORD(avi_iframe[DATA_BYTE_2]) |
+		LEFT_SHIFT_24BITS(avi_iframe[DATA_BYTE_3]);
+	DSS_REG_W(io, HDMI_AVI_INFO0, reg_val);
+
+	reg_val = avi_iframe[DATA_BYTE_4] |
+		LEFT_SHIFT_BYTE(avi_iframe[DATA_BYTE_5]) |
+		LEFT_SHIFT_WORD(avi_iframe[DATA_BYTE_6]) |
+		LEFT_SHIFT_24BITS(avi_iframe[DATA_BYTE_7]);
+	DSS_REG_W(io, HDMI_AVI_INFO1, reg_val);
+
+	reg_val = avi_iframe[DATA_BYTE_8] |
+		LEFT_SHIFT_BYTE(avi_iframe[DATA_BYTE_9]) |
+		LEFT_SHIFT_WORD(avi_iframe[DATA_BYTE_10]) |
+		LEFT_SHIFT_24BITS(avi_iframe[DATA_BYTE_11]);
+	DSS_REG_W(io, HDMI_AVI_INFO2, reg_val);
+
+	reg_val = avi_iframe[DATA_BYTE_12] |
+		LEFT_SHIFT_BYTE(avi_iframe[DATA_BYTE_13]) |
+		LEFT_SHIFT_24BITS(AVI_IFRAME_VERSION);
+	DSS_REG_W(io, HDMI_AVI_INFO3, reg_val);
+
+	/* AVI InfFrame enable (every frame) */
+	DSS_REG_W(io, HDMI_INFOFRAME_CTRL0,
+		DSS_REG_R(io, HDMI_INFOFRAME_CTRL0) | BIT(1) | BIT(0));
+
+	reg_val = DSS_REG_R(io, HDMI_INFOFRAME_CTRL1);
+	reg_val &= ~0x3F;
+	reg_val |= AVI_IFRAME_LINE_NUMBER;
+	DSS_REG_W(io, HDMI_INFOFRAME_CTRL1, reg_val);
+}
+
+static void hdmi_panel_set_vendor_specific_infoframe(void *input)
+{
+	int i;
+	u8 vs_iframe[9]; /* two header + length + 6 data */
+	u32 sum, reg_val;
+	u32 hdmi_vic, hdmi_video_format, s3d_struct = 0;
+	struct hdmi_panel *panel = input;
+	struct dss_io_data *io = panel->io;
+
+	/* HDMI Spec 1.4a Table 8-10 */
+	vs_iframe[0] = 0x81; /* type */
+	vs_iframe[1] = 0x1;  /* version */
+	vs_iframe[2] = 0x8;  /* length */
+
+	vs_iframe[3] = 0x0; /* PB0: checksum */
+
+	/* PB1..PB3: 24 Bit IEEE Registration Code 00_0C_03 */
+	vs_iframe[4] = 0x03;
+	vs_iframe[5] = 0x0C;
+	vs_iframe[6] = 0x00;
+
+	if ((panel->data->s3d_mode != HDMI_S3D_NONE) &&
+	    panel->data->s3d_support) {
+		switch (panel->data->s3d_mode) {
+		case HDMI_S3D_SIDE_BY_SIDE:
+			s3d_struct = 0x8;
+			break;
+		case HDMI_S3D_TOP_AND_BOTTOM:
+			s3d_struct = 0x6;
+			break;
+		default:
+			s3d_struct = 0;
+		}
+		hdmi_video_format = 0x2;
+		hdmi_vic = 0;
+		/* PB5: 3D_Structure[7:4], Reserved[3:0] */
+		vs_iframe[8] = s3d_struct << 4;
+	} else {
+		hdmi_video_format = 0x1;
+		switch (panel->vic) {
+		case HDMI_EVFRMT_3840x2160p30_16_9:
+			hdmi_vic = 0x1;
+			break;
+		case HDMI_EVFRMT_3840x2160p25_16_9:
+			hdmi_vic = 0x2;
+			break;
+		case HDMI_EVFRMT_3840x2160p24_16_9:
+			hdmi_vic = 0x3;
+			break;
+		case HDMI_EVFRMT_4096x2160p24_16_9:
+			hdmi_vic = 0x4;
+			break;
+		default:
+			hdmi_video_format = 0x0;
+			hdmi_vic = 0x0;
+		}
+		/* PB5: HDMI_VIC */
+		vs_iframe[8] = hdmi_vic;
+	}
+	/* PB4: HDMI Video Format[7:5],  Reserved[4:0] */
+	vs_iframe[7] = (hdmi_video_format << 5) & 0xE0;
+
+	/* compute checksum */
+	sum = 0;
+	for (i = 0; i < 9; i++)
+		sum += vs_iframe[i];
+
+	sum &= 0xFF;
+	sum = 256 - sum;
+	vs_iframe[3] = (u8)sum;
+
+	reg_val = (s3d_struct << 24) | (hdmi_vic << 16) |
+		  (vs_iframe[3] << 8) | (hdmi_video_format << 5) |
+		  vs_iframe[2];
+	DSS_REG_W(io, HDMI_VENSPEC_INFO0, reg_val);
+
+	/* vendor specific info-frame enable (every frame) */
+	DSS_REG_W(io, HDMI_INFOFRAME_CTRL0,
+		DSS_REG_R(io, HDMI_INFOFRAME_CTRL0) | BIT(13) | BIT(12));
+
+	reg_val = DSS_REG_R(io, HDMI_INFOFRAME_CTRL1);
+	reg_val &= ~0x3F000000;
+	reg_val |= (VENDOR_IFRAME_LINE_NUMBER << 24);
+	DSS_REG_W(io, HDMI_INFOFRAME_CTRL1, reg_val);
+}
+
+static void hdmi_panel_set_spd_infoframe(struct hdmi_panel *panel)
+{
+	u32 packet_header  = 0;
+	u32 check_sum      = 0;
+	u32 packet_payload = 0;
+	u32 packet_control = 0;
+	u8 *vendor_name = NULL;
+	u8 *product_description = NULL;
+	struct dss_io_data *io = panel->io;
+
+	vendor_name = panel->spd_vendor_name;
+	product_description = panel->spd_product_description;
+
+	/* Setup Packet header and payload */
+	/*
+	 * 0x83 InfoFrame Type Code
+	 * 0x01 InfoFrame Version Number
+	 * 0x19 Length of Source Product Description InfoFrame
+	 */
+	packet_header  = 0x83 | (0x01 << 8) | (0x19 << 16);
+	DSS_REG_W(io, HDMI_GENERIC1_HDR, packet_header);
+	check_sum += IFRAME_CHECKSUM_32(packet_header);
+
+	packet_payload = (vendor_name[3] & 0x7f)
+		| ((vendor_name[4] & 0x7f) << 8)
+		| ((vendor_name[5] & 0x7f) << 16)
+		| ((vendor_name[6] & 0x7f) << 24);
+	DSS_REG_W(io, HDMI_GENERIC1_1, packet_payload);
+	check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+	/* Product Description (7-bit ASCII code) */
+	packet_payload = (vendor_name[7] & 0x7f)
+		| ((product_description[0] & 0x7f) << 8)
+		| ((product_description[1] & 0x7f) << 16)
+		| ((product_description[2] & 0x7f) << 24);
+	DSS_REG_W(io, HDMI_GENERIC1_2, packet_payload);
+	check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+	packet_payload = (product_description[3] & 0x7f)
+		| ((product_description[4] & 0x7f) << 8)
+		| ((product_description[5] & 0x7f) << 16)
+		| ((product_description[6] & 0x7f) << 24);
+	DSS_REG_W(io, HDMI_GENERIC1_3, packet_payload);
+	check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+	packet_payload = (product_description[7] & 0x7f)
+		| ((product_description[8] & 0x7f) << 8)
+		| ((product_description[9] & 0x7f) << 16)
+		| ((product_description[10] & 0x7f) << 24);
+	DSS_REG_W(io, HDMI_GENERIC1_4, packet_payload);
+	check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+	packet_payload = (product_description[11] & 0x7f)
+		| ((product_description[12] & 0x7f) << 8)
+		| ((product_description[13] & 0x7f) << 16)
+		| ((product_description[14] & 0x7f) << 24);
+	DSS_REG_W(io, HDMI_GENERIC1_5, packet_payload);
+	check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+	/*
+	 * Source Device Information
+	 * 00h unknown
+	 * 01h Digital STB
+	 * 02h DVD
+	 * 03h D-VHS
+	 * 04h HDD Video
+	 * 05h DVC
+	 * 06h DSC
+	 * 07h Video CD
+	 * 08h Game
+	 * 09h PC general
+	 */
+	packet_payload = (product_description[15] & 0x7f) | 0x00 << 8;
+	DSS_REG_W(io, HDMI_GENERIC1_6, packet_payload);
+	check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+	/* Vendor Name (7bit ASCII code) */
+	packet_payload = ((vendor_name[0] & 0x7f) << 8)
+		| ((vendor_name[1] & 0x7f) << 16)
+		| ((vendor_name[2] & 0x7f) << 24);
+	check_sum += IFRAME_CHECKSUM_32(packet_payload);
+	packet_payload |= ((0x100 - (0xff & check_sum)) & 0xff);
+	DSS_REG_W(io, HDMI_GENERIC1_0, packet_payload);
+
+	/*
+	 * GENERIC1_LINE | GENERIC1_CONT | GENERIC1_SEND
+	 * Setup HDMI TX generic packet control
+	 * Enable this packet to transmit every frame
+	 * Enable HDMI TX engine to transmit Generic packet 1
+	 */
+	packet_control = DSS_REG_R_ND(io, HDMI_GEN_PKT_CTRL);
+	packet_control |= ((0x1 << 24) | (1 << 5) | (1 << 4));
+	DSS_REG_W(io, HDMI_GEN_PKT_CTRL, packet_control);
+}
+
+static int hdmi_panel_setup_infoframe(struct hdmi_panel *panel)
+{
+	int rc = 0;
+
+	if (!panel) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (panel->data->infoframe) {
+		hdmi_panel_set_avi_infoframe(panel);
+		hdmi_panel_set_vendor_specific_infoframe(panel);
+		hdmi_panel_set_spd_infoframe(panel);
+	}
+end:
+	return rc;
+}
+
+static int hdmi_panel_setup_scrambler(struct hdmi_panel *panel)
+{
+	int rc = 0;
+	int timeout_hsync;
+	u32 reg_val = 0;
+	u32 tmds_clock_ratio = 0;
+	bool scrambler_on = false;
+	struct msm_hdmi_mode_timing_info *timing = NULL;
+
+	if (!panel) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	timing = panel->vid_cfg.timing;
+	if (!timing) {
+		pr_err("Invalid timing info\n");
+		return -EINVAL;
+	}
+
+	/* Scrambling is supported from HDMI TX 4.0 */
+	if (panel->version < HDMI_TX_SCRAMBLER_MIN_TX_VERSION) {
+		pr_debug("scrambling not supported by tx\n");
+		return 0;
+	}
+
+	if (timing->pixel_freq > HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ) {
+		scrambler_on = true;
+		tmds_clock_ratio = 1;
+	} else {
+		scrambler_on = panel->data->scrambler;
+	}
+
+	pr_debug("scrambler %s\n", scrambler_on ? "on" : "off");
+
+	if (scrambler_on) {
+		rc = hdmi_scdc_write(panel->ddc,
+			HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE,
+			tmds_clock_ratio);
+		if (rc) {
+			pr_err("TMDS CLK RATIO ERR\n");
+			return rc;
+		}
+
+		reg_val = DSS_REG_R(panel->io, HDMI_CTRL);
+		reg_val |= BIT(31); /* Enable Update DATAPATH_MODE */
+		reg_val |= BIT(28); /* Set SCRAMBLER_EN bit */
+
+		DSS_REG_W(panel->io, HDMI_CTRL, reg_val);
+
+		rc = hdmi_scdc_write(panel->ddc,
+			HDMI_TX_SCDC_SCRAMBLING_ENABLE, 0x1);
+		if (!rc) {
+			panel->scrambler_enabled = true;
+		} else {
+			pr_err("failed to enable scrambling\n");
+			return rc;
+		}
+
+		/*
+		 * Setup hardware to periodically check for scrambler
+		 * status bit on the sink. Sink should set this bit
+		 * with in 200ms after scrambler is enabled.
+		 */
+		timeout_hsync = hdmi_utils_get_timeout_in_hysnc(
+					panel->vid_cfg.timing,
+					HDMI_TX_SCRAMBLER_TIMEOUT_MSEC);
+
+		if (timeout_hsync <= 0) {
+			pr_err("err in timeout hsync calc\n");
+			timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC;
+		}
+
+		pr_debug("timeout for scrambling en: %d hsyncs\n",
+			timeout_hsync);
+
+		rc = hdmi_setup_ddc_timers(panel->ddc,
+			HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS, timeout_hsync);
+	} else {
+		hdmi_scdc_write(panel->ddc,
+			HDMI_TX_SCDC_SCRAMBLING_ENABLE, 0x0);
+
+		panel->scrambler_enabled = false;
+	}
+
+	return rc;
+}
+
+static int hdmi_panel_update_fps(void *input, u32 fps)
+{
+	struct hdmi_panel *panel = input;
+	struct mdss_panel_info *pinfo = panel->data->pinfo;
+	struct msm_hdmi_mode_timing_info *timing = panel->vid_cfg.timing;
+	u64 pclk;
+	int vic;
+
+	timing->back_porch_h = pinfo->lcdc.h_back_porch;
+	timing->front_porch_h = pinfo->lcdc.h_front_porch;
+	timing->pulse_width_h = pinfo->lcdc.h_pulse_width;
+
+	timing->back_porch_v = pinfo->lcdc.v_back_porch;
+	timing->front_porch_v = pinfo->lcdc.v_front_porch;
+	timing->pulse_width_v = pinfo->lcdc.v_pulse_width;
+
+	timing->refresh_rate = fps;
+
+	pclk = pinfo->clk_rate;
+	do_div(pclk, HDMI_TX_KHZ_TO_HZ);
+	timing->pixel_freq = (unsigned long) pclk;
+
+	if (hdmi_panel_setup_video(panel)) {
+		DEV_DBG("%s: no change in video timing\n", __func__);
+		goto end;
+	}
+
+	vic = hdmi_get_video_id_code(timing, panel->ds_data);
+
+	if (vic > 0 && panel->vic != vic) {
+		panel->vic = vic;
+		DEV_DBG("%s: switched to new resolution id %d\n",
+			__func__, vic);
+	}
+
+	pinfo->dynamic_fps = false;
+end:
+	return panel->vic;
+}
+
+static int hdmi_panel_power_on(void *input)
+{
+	int rc = 0;
+	bool res_changed = false;
+	struct hdmi_panel *panel = input;
+	struct mdss_panel_info *pinfo;
+	struct msm_hdmi_mode_timing_info *info;
+
+	if (!panel) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto err;
+	}
+
+	pinfo = panel->data->pinfo;
+	if (!pinfo) {
+		pr_err("invalid panel data\n");
+		rc = -EINVAL;
+		goto err;
+	}
+
+	if (panel->vic != panel->data->vic) {
+		res_changed = true;
+
+		pr_debug("switching from %d => %d\n",
+			panel->vic, panel->data->vic);
+
+		panel->vic = panel->data->vic;
+	}
+
+	if (pinfo->cont_splash_enabled) {
+		pinfo->cont_splash_enabled = false;
+
+		if (!res_changed) {
+			panel->on = true;
+
+			hdmi_panel_set_vendor_specific_infoframe(panel);
+			hdmi_panel_set_spd_infoframe(panel);
+
+			pr_debug("handoff done\n");
+
+			goto end;
+		}
+	}
+
+	rc = hdmi_panel_config_avi(panel);
+	if (rc) {
+		pr_err("avi config failed. rc=%d\n", rc);
+		goto err;
+	}
+
+	rc = hdmi_panel_setup_video(panel);
+	if (rc) {
+		pr_err("video setup failed. rc=%d\n", rc);
+		goto err;
+	}
+
+	rc = hdmi_panel_setup_infoframe(panel);
+	if (rc) {
+		pr_err("infoframe setup failed. rc=%d\n", rc);
+		goto err;
+	}
+
+	rc = hdmi_panel_setup_scrambler(panel);
+	if (rc) {
+		pr_err("scrambler setup failed. rc=%d\n", rc);
+		goto err;
+	}
+end:
+	panel->on = true;
+
+	info = panel->vid_cfg.timing;
+	pr_debug("%dx%d%s@%dHz %dMHz %s (%d)\n",
+		info->active_h, info->active_v,
+		info->interlaced ? "i" : "p",
+		info->refresh_rate / 1000,
+		info->pixel_freq / 1000,
+		pinfo->out_format == MDP_Y_CBCR_H2V2 ? "Y420" : "RGB",
+		info->video_format);
+err:
+	return rc;
+}
+
+static int hdmi_panel_power_off(void *input)
+{
+	struct hdmi_panel *panel = input;
+
+	panel->on = false;
+
+	pr_debug("panel off\n");
+	return 0;
+}
+
+void *hdmi_panel_init(struct hdmi_panel_init_data *data)
+{
+	struct hdmi_panel *panel = NULL;
+
+	if (!data) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	panel = kzalloc(sizeof(*panel), GFP_KERNEL);
+	if (!panel)
+		goto end;
+
+	panel->io = data->io;
+	panel->ds_data = data->ds_data;
+	panel->data = data->panel_data;
+	panel->spd_vendor_name = data->spd_vendor_name;
+	panel->spd_product_description = data->spd_product_description;
+	panel->version = data->version;
+	panel->ddc = data->ddc;
+	panel->vid_cfg.timing = data->timing;
+
+	if (data->ops) {
+		data->ops->on = hdmi_panel_power_on;
+		data->ops->off = hdmi_panel_power_off;
+		data->ops->get_vic = hdmi_panel_get_vic;
+		data->ops->vendor = hdmi_panel_set_vendor_specific_infoframe;
+		data->ops->update_fps = hdmi_panel_update_fps;
+	}
+end:
+	return panel;
+}
+
+void hdmi_panel_deinit(void *input)
+{
+	struct hdmi_panel *panel = input;
+
+	kfree(panel);
+}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_panel.h b/drivers/video/fbdev/msm/mdss_hdmi_panel.h
new file mode 100644
index 0000000..4685b4e
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_panel.h
@@ -0,0 +1,107 @@
+/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_PANEL_H__
+#define __MDSS_HDMI_PANEL_H__
+
+#include "mdss_panel.h"
+#include "mdss_hdmi_util.h"
+
+/**
+ * struct hdmi_panel_data - panel related data information
+ *
+ * @pinfo: pointer to mdss panel information
+ * @s3d_mode: 3d mode supported
+ * @vic: video indentification code
+ * @scan_info: scan information of the TV
+ * @s3d_support: set to true if 3d supported, false otherwize
+ * @content_type: type of content like game, cinema etc
+ * @infoframe: set to true if infoframes should be sent to sink
+ * @is_it_content: set to true if content is IT
+ * @scrambler: set to true if scrambler needs to be enabled
+ */
+struct hdmi_panel_data {
+	struct mdss_panel_info *pinfo;
+	u32 s3d_mode;
+	u32 vic;
+	u32 scan_info;
+	u8 content_type;
+	bool s3d_support;
+	bool infoframe;
+	bool is_it_content;
+	bool scrambler;
+};
+
+/**
+ * struct hdmi_panel_ops - panel operation for clients
+ *
+ * @on: pointer to a function which powers on the panel
+ * @off: pointer to a function which powers off the panel
+ * @vendor: pointer to a function which programs vendor specific infoframe
+ * @update_fps: pointer to a function which updates fps
+ * @get_vic: pointer to a function which get the vic from panel information.
+ */
+struct hdmi_panel_ops {
+	int (*on)(void *input);
+	int (*off)(void *input);
+	void (*vendor)(void *input);
+	int (*update_fps)(void *input, u32 fps);
+	int (*get_vic)(struct mdss_panel_info *pinfo,
+		struct hdmi_util_ds_data *ds_data);
+};
+
+/**
+ * struct hdmi_panel_init_data - initialization data for hdmi panel
+ *
+ * @io: pointer to logical memory of the hdmi tx core
+ * @ds_data: pointer to down stream data
+ * @panel_data: pointer to panel data
+ * @ddc: pointer to display data channel's data
+ * @ops: pointer to pnael ops to be filled by hdmi panel
+ * @timing: pointer to the timing details of current resolution
+ * @spd_vendor_name: pointer to spd vendor infoframe data
+ * @spd_product_description:  pointer to spd product description infoframe data
+ * @version:  hardware version of the hdmi tx
+ */
+struct hdmi_panel_init_data {
+	struct dss_io_data *io;
+	struct hdmi_util_ds_data *ds_data;
+	struct hdmi_panel_data *panel_data;
+	struct hdmi_tx_ddc_ctrl *ddc;
+	struct hdmi_panel_ops *ops;
+	struct msm_hdmi_mode_timing_info *timing;
+	u8 *spd_vendor_name;
+	u8 *spd_product_description;
+	u32 version;
+};
+
+/**
+ * hdmi_panel_init() - initializes hdmi panel
+ *
+ * initializes the hdmi panel, allocates the memory, assign the input
+ * data to local variables and provide the operation function pointers.
+ *
+ * @data: initialization data.
+ * return: hdmi panel data that need to be send with hdmi ops.
+ */
+void *hdmi_panel_init(struct hdmi_panel_init_data *data);
+
+/**
+ * hdmi_panel_deinit() - deinitializes hdmi panel
+ *
+ * releases memory and all resources.
+ *
+ * @input: hdmi panel data.
+ */
+void hdmi_panel_deinit(void *input);
+
+#endif /* __MDSS_HDMI_PANEL_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
new file mode 100644
index 0000000..12aba84
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -0,0 +1,4639 @@
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iopoll.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/types.h>
+#include <linux/hdcp_qseecom.h>
+#include <linux/clk.h>
+
+#define REG_DUMP 0
+
+#include "mdss_debug.h"
+#include "mdss_fb.h"
+#include "mdss_hdmi_cec.h"
+#include "mdss_hdmi_edid.h"
+#include "mdss_hdmi_hdcp.h"
+#include "mdss_hdmi_tx.h"
+#include "mdss_hdmi_audio.h"
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_hdmi_mhl.h"
+
+#define DRV_NAME "hdmi-tx"
+#define COMPATIBLE_NAME "qcom,hdmi-tx"
+
+#define HDMI_TX_EVT_STR(x) #x
+#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3
+#define DEFAULT_HDMI_PRIMARY_RESOLUTION HDMI_VFRMT_1920x1080p60_16_9
+
+/* HDMI PHY/PLL bit field macros */
+#define SW_RESET BIT(2)
+#define SW_RESET_PLL BIT(0)
+
+#define HPD_DISCONNECT_POLARITY 0
+#define HPD_CONNECT_POLARITY    1
+
+/*
+ * Audio engine may take 1 to 3 sec to shutdown
+ * in normal cases. To handle worst cases, making
+ * timeout for audio engine shutdown as 5 sec.
+ */
+#define AUDIO_POLL_SLEEP_US   (5 * 1000)
+#define AUDIO_POLL_TIMEOUT_US (AUDIO_POLL_SLEEP_US * 1000)
+
+#define HDMI_TX_YUV420_24BPP_PCLK_TMDS_CH_RATE_RATIO 2
+#define HDMI_TX_YUV422_24BPP_PCLK_TMDS_CH_RATE_RATIO 1
+#define HDMI_TX_RGB_24BPP_PCLK_TMDS_CH_RATE_RATIO 1
+
+#define HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ 340000
+#define HDMI_TX_SCRAMBLER_TIMEOUT_MSEC 200
+
+/* Maximum pixel clock rates for hdmi tx */
+#define HDMI_DEFAULT_MAX_PCLK_RATE         148500
+#define HDMI_TX_3_MAX_PCLK_RATE            297000
+#define HDMI_TX_4_MAX_PCLK_RATE            600000
+
+#define hdmi_tx_get_fd(x) (x ? hdmi_ctrl->feature_data[ffs(x) - 1] : 0)
+#define hdmi_tx_set_fd(x, y) {if (x) hdmi_ctrl->feature_data[ffs(x) - 1] = y; }
+
+#define MAX_EDID_READ_RETRY	5
+
+#define HDMI_TX_MIN_FPS 20000
+#define HDMI_TX_MAX_FPS 120000
+
+/* Enable HDCP by default */
+static bool hdcp_feature_on = true;
+
+/*
+ * CN represents IT content type, if ITC bit in infoframe data byte 3
+ * is set, CN bits will represent content type as below:
+ * 0b00 Graphics
+ * 0b01 Photo
+ * 0b10 Cinema
+ * 0b11 Game
+ */
+#define CONFIG_CN_BITS(bits, byte) \
+		(byte = (byte & ~(BIT(4) | BIT(5))) |\
+			((bits & (BIT(0) | BIT(1))) << 4))
+
+enum hdmi_tx_hpd_states {
+	HPD_OFF,
+	HPD_ON,
+	HPD_ON_CONDITIONAL_MTP,
+	HPD_DISABLE,
+	HPD_ENABLE
+};
+
+static int hdmi_tx_set_mhl_hpd(struct platform_device *pdev, uint8_t on);
+static int hdmi_tx_sysfs_enable_hpd(struct hdmi_tx_ctrl *hdmi_ctrl, int on);
+static irqreturn_t hdmi_tx_isr(int irq, void *data);
+static void hdmi_tx_hpd_off(struct hdmi_tx_ctrl *hdmi_ctrl);
+static int hdmi_tx_hpd_on(struct hdmi_tx_ctrl *hdmi_ctrl);
+static int hdmi_tx_enable_power(struct hdmi_tx_ctrl *hdmi_ctrl,
+	enum hdmi_tx_power_module_type module, int enable);
+static int hdmi_tx_setup_tmds_clk_rate(struct hdmi_tx_ctrl *hdmi_ctrl);
+static void hdmi_tx_fps_work(struct work_struct *work);
+
+static struct mdss_hw hdmi_tx_hw = {
+	.hw_ndx = MDSS_HW_HDMI,
+	.ptr = NULL,
+	.irq_handler = hdmi_tx_isr,
+};
+
+static struct dss_gpio hpd_gpio_config[] = {
+	{0, 1, COMPATIBLE_NAME "-hpd"},
+	{0, 1, COMPATIBLE_NAME "-mux-en"},
+	{0, 0, COMPATIBLE_NAME "-mux-sel"},
+	{0, 1, COMPATIBLE_NAME "-mux-lpm"}
+};
+
+static struct dss_gpio ddc_gpio_config[] = {
+	{0, 1, COMPATIBLE_NAME "-ddc-mux-sel"},
+	{0, 1, COMPATIBLE_NAME "-ddc-clk"},
+	{0, 1, COMPATIBLE_NAME "-ddc-data"}
+};
+
+static struct dss_gpio core_gpio_config[] = {
+};
+
+static struct dss_gpio cec_gpio_config[] = {
+	{0, 1, COMPATIBLE_NAME "-cec"}
+};
+
+const char *hdmi_pm_name(enum hdmi_tx_power_module_type module)
+{
+	switch (module) {
+	case HDMI_TX_HPD_PM:	return "HDMI_TX_HPD_PM";
+	case HDMI_TX_DDC_PM:	return "HDMI_TX_DDC_PM";
+	case HDMI_TX_CORE_PM:	return "HDMI_TX_CORE_PM";
+	case HDMI_TX_CEC_PM:	return "HDMI_TX_CEC_PM";
+	default: return "???";
+	}
+} /* hdmi_pm_name */
+
+static int hdmi_tx_get_version(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc;
+	int reg_val;
+	struct dss_io_data *io;
+
+	rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_HPD_PM, true);
+	if (rc) {
+		DEV_ERR("%s: Failed to read HDMI version\n", __func__);
+		goto fail;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: core io not inititalized\n", __func__);
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	reg_val = DSS_REG_R(io, HDMI_VERSION);
+	reg_val = (reg_val & 0xF0000000) >> 28;
+	hdmi_ctrl->hdmi_tx_ver = reg_val;
+
+	switch (hdmi_ctrl->hdmi_tx_ver) {
+	case (HDMI_TX_VERSION_3):
+		hdmi_ctrl->max_pclk_khz = HDMI_TX_3_MAX_PCLK_RATE;
+		break;
+	case (HDMI_TX_VERSION_4):
+		hdmi_ctrl->max_pclk_khz = HDMI_TX_4_MAX_PCLK_RATE;
+		break;
+	default:
+		hdmi_ctrl->max_pclk_khz = HDMI_DEFAULT_MAX_PCLK_RATE;
+		break;
+	}
+
+	rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_HPD_PM, false);
+	if (rc) {
+		DEV_ERR("%s: FAILED to disable power\n", __func__);
+		goto fail;
+	}
+
+fail:
+	return rc;
+}
+
+int register_hdmi_cable_notification(struct hdmi_cable_notify *handler)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+	struct list_head *pos;
+
+	if (!hdmi_tx_hw.ptr) {
+		DEV_WARN("%s: HDMI Tx core not ready\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	if (!handler) {
+		DEV_ERR("%s: Empty handler\n", __func__);
+		return -ENODEV;
+	}
+
+	hdmi_ctrl = (struct hdmi_tx_ctrl *) hdmi_tx_hw.ptr;
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+	handler->status = hdmi_ctrl->hpd_state;
+	list_for_each(pos, &hdmi_ctrl->cable_notify_handlers);
+	list_add_tail(&handler->link, pos);
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+
+	return handler->status;
+} /* register_hdmi_cable_notification */
+
+int unregister_hdmi_cable_notification(struct hdmi_cable_notify *handler)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+	if (!hdmi_tx_hw.ptr) {
+		DEV_WARN("%s: HDMI Tx core not ready\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!handler) {
+		DEV_ERR("%s: Empty handler\n", __func__);
+		return -ENODEV;
+	}
+
+	hdmi_ctrl = (struct hdmi_tx_ctrl *) hdmi_tx_hw.ptr;
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+	list_del(&handler->link);
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+
+	return 0;
+} /* unregister_hdmi_cable_notification */
+
+static void hdmi_tx_cable_notify_work(struct work_struct *work)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+	struct hdmi_cable_notify *pos;
+
+	hdmi_ctrl = container_of(work, struct hdmi_tx_ctrl, cable_notify_work);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid hdmi data\n", __func__);
+		return;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+	list_for_each_entry(pos, &hdmi_ctrl->cable_notify_handlers, link) {
+		if (pos->status != hdmi_ctrl->hpd_state) {
+			pos->status = hdmi_ctrl->hpd_state;
+			pos->hpd_notify(pos);
+		}
+	}
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+} /* hdmi_tx_cable_notify_work */
+
+static bool hdmi_tx_is_cea_format(int mode)
+{
+	bool cea_fmt;
+
+	if ((mode > 0) && (mode <= HDMI_EVFRMT_END))
+		cea_fmt = true;
+	else
+		cea_fmt = false;
+
+	DEV_DBG("%s: %s\n", __func__, cea_fmt ? "Yes" : "No");
+
+	return cea_fmt;
+}
+
+static inline bool hdmi_tx_is_hdcp_enabled(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	return hdmi_ctrl->hdcp_feature_on &&
+		(hdmi_ctrl->hdcp14_present || hdmi_ctrl->hdcp22_present) &&
+		hdmi_ctrl->hdcp_ops;
+}
+
+static const char *hdmi_tx_pm_name(enum hdmi_tx_power_module_type module)
+{
+	switch (module) {
+	case HDMI_TX_HPD_PM:	return "HDMI_TX_HPD_PM";
+	case HDMI_TX_DDC_PM:	return "HDMI_TX_DDC_PM";
+	case HDMI_TX_CORE_PM:	return "HDMI_TX_CORE_PM";
+	case HDMI_TX_CEC_PM:	return "HDMI_TX_CEC_PM";
+	default: return "???";
+	}
+} /* hdmi_tx_pm_name */
+
+static const char *hdmi_tx_io_name(u32 type)
+{
+	switch (type) {
+	case HDMI_TX_CORE_IO:	return "core_physical";
+	case HDMI_TX_QFPROM_IO:	return "qfprom_physical";
+	case HDMI_TX_HDCP_IO:	return "hdcp_physical";
+	default:		return NULL;
+	}
+} /* hdmi_tx_io_name */
+
+static void hdmi_tx_audio_setup(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	if (hdmi_ctrl && hdmi_ctrl->audio_ops.on) {
+		u32 pclk = hdmi_tx_setup_tmds_clk_rate(hdmi_ctrl);
+
+		hdmi_ctrl->audio_ops.on(hdmi_ctrl->audio_data,
+			pclk, &hdmi_ctrl->audio_params);
+	}
+}
+
+static inline u32 hdmi_tx_is_dvi_mode(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	return hdmi_edid_get_sink_mode(
+		hdmi_tx_get_fd(HDMI_TX_FEAT_EDID)) ? 0 : 1;
+} /* hdmi_tx_is_dvi_mode */
+
+static inline bool hdmi_tx_is_panel_on(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	return hdmi_ctrl->hpd_state && hdmi_ctrl->panel_power_on;
+}
+
+static inline bool hdmi_tx_is_cec_wakeup_en(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	void *fd = NULL;
+
+	if (!hdmi_ctrl)
+		return false;
+
+	fd = hdmi_tx_get_fd(HDMI_TX_FEAT_CEC_HW);
+
+	if (!fd)
+		return false;
+
+	return hdmi_cec_is_wakeup_en(fd);
+}
+
+static inline void hdmi_tx_cec_device_suspend(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	void *fd = NULL;
+
+	if (!hdmi_ctrl)
+		return;
+
+	fd = hdmi_tx_get_fd(HDMI_TX_FEAT_CEC_HW);
+
+	if (!fd)
+		return;
+
+	hdmi_cec_device_suspend(fd, hdmi_ctrl->panel_suspend);
+}
+
+
+static inline void hdmi_tx_send_cable_notification(
+	struct hdmi_tx_ctrl *hdmi_ctrl, int val)
+{
+	int state = 0;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+	state = hdmi_ctrl->sdev.state;
+
+	switch_set_state(&hdmi_ctrl->sdev, val);
+
+	DEV_INFO("%s: cable state %s %d\n", __func__,
+		hdmi_ctrl->sdev.state == state ?
+			"is same" : "switched to",
+		hdmi_ctrl->sdev.state);
+
+	/* Notify all registered modules of cable connection status */
+	schedule_work(&hdmi_ctrl->cable_notify_work);
+} /* hdmi_tx_send_cable_notification */
+
+static inline void hdmi_tx_set_audio_switch_node(
+	struct hdmi_tx_ctrl *hdmi_ctrl, int val)
+{
+	if (hdmi_ctrl && hdmi_ctrl->audio_ops.notify &&
+		!hdmi_tx_is_dvi_mode(hdmi_ctrl))
+		hdmi_ctrl->audio_ops.notify(hdmi_ctrl->audio_data, val);
+}
+
+static void hdmi_tx_wait_for_audio_engine(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	u64 status = 0;
+	u32 wait_for_vote = 50;
+	struct dss_io_data *io = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: core io not inititalized\n", __func__);
+		return;
+	}
+
+	/*
+	 * wait for 5 sec max for audio engine to acknowledge if hdmi tx core
+	 * can be safely turned off. Sleep for a reasonable time to make sure
+	 * vote_hdmi_core_on variable is updated properly by audio.
+	 */
+	while (hdmi_ctrl->vote_hdmi_core_on && --wait_for_vote)
+		msleep(100);
+
+
+	if (!wait_for_vote)
+		DEV_ERR("%s: HDMI core still voted for power on\n", __func__);
+
+	if (readl_poll_timeout(io->base + HDMI_AUDIO_PKT_CTRL, status,
+				(status & BIT(0)) == 0, AUDIO_POLL_SLEEP_US,
+				AUDIO_POLL_TIMEOUT_US))
+		DEV_ERR("%s: Error turning off audio packet transmission.\n",
+			__func__);
+
+	if (readl_poll_timeout(io->base + HDMI_AUDIO_CFG, status,
+				(status & BIT(0)) == 0, AUDIO_POLL_SLEEP_US,
+				AUDIO_POLL_TIMEOUT_US))
+		DEV_ERR("%s: Error turning off audio engine.\n", __func__);
+}
+
+static struct hdmi_tx_ctrl *hdmi_tx_get_drvdata_from_panel_data(
+	struct mdss_panel_data *mpd)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+	if (mpd) {
+		hdmi_ctrl = container_of(mpd, struct hdmi_tx_ctrl, panel_data);
+		if (!hdmi_ctrl)
+			DEV_ERR("%s: hdmi_ctrl = NULL\n", __func__);
+	} else {
+		DEV_ERR("%s: mdss_panel_data = NULL\n", __func__);
+	}
+	return hdmi_ctrl;
+} /* hdmi_tx_get_drvdata_from_panel_data */
+
+static struct hdmi_tx_ctrl *hdmi_tx_get_drvdata_from_sysfs_dev(
+	struct device *device)
+{
+	struct msm_fb_data_type *mfd = NULL;
+	struct mdss_panel_data *panel_data = NULL;
+	struct fb_info *fbi = dev_get_drvdata(device);
+
+	if (fbi) {
+		mfd = (struct msm_fb_data_type *)fbi->par;
+		panel_data = dev_get_platdata(&mfd->pdev->dev);
+
+		return hdmi_tx_get_drvdata_from_panel_data(panel_data);
+	}
+	DEV_ERR("%s: fbi = NULL\n", __func__);
+	return NULL;
+} /* hdmi_tx_get_drvdata_from_sysfs_dev */
+
+/* todo: Fix this. Right now this is declared in hdmi_util.h */
+void *hdmi_get_featuredata_from_sysfs_dev(struct device *device,
+	u32 feature_type)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+	if (!device || feature_type >= HDMI_TX_FEAT_MAX) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return NULL;
+	}
+
+	hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(device);
+	if (hdmi_ctrl)
+		return hdmi_tx_get_fd(feature_type);
+	else
+		return NULL;
+
+} /* hdmi_tx_get_featuredata_from_sysfs_dev */
+EXPORT_SYMBOL(hdmi_get_featuredata_from_sysfs_dev);
+
+static int hdmi_tx_config_5v(struct hdmi_tx_ctrl *hdmi_ctrl, bool enable)
+{
+	struct dss_module_power *pd = NULL;
+	int ret = 0;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	pd = &hdmi_ctrl->pdata.power_data[HDMI_TX_HPD_PM];
+	if (!pd || !pd->gpio_config) {
+		DEV_ERR("%s: Error: invalid power data\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	gpio_set_value(pd->gpio_config->gpio, enable);
+end:
+	return ret;
+}
+
+static ssize_t hdmi_tx_sysfs_rda_connected(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", hdmi_ctrl->hpd_state);
+	DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->hpd_state);
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+
+	return ret;
+} /* hdmi_tx_sysfs_rda_connected */
+
+static ssize_t hdmi_tx_sysfs_wta_edid(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int ret = 0;
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+	int i = 0;
+	const char *buf_t = buf;
+	const int char_to_nib = 2;
+	int edid_size = count / char_to_nib;
+
+	hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl || !hdmi_ctrl->edid_buf) {
+		DEV_ERR("%s: invalid data\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+	if ((edid_size < EDID_BLOCK_SIZE) ||
+		(edid_size > hdmi_ctrl->edid_buf_size)) {
+		DEV_DBG("%s: disabling custom edid\n", __func__);
+
+		ret = -EINVAL;
+		hdmi_ctrl->custom_edid = false;
+		goto end;
+	}
+
+	memset(hdmi_ctrl->edid_buf, 0, hdmi_ctrl->edid_buf_size);
+
+	while (edid_size--) {
+		char t[char_to_nib + 1];
+		int d;
+
+		memcpy(t, buf_t, sizeof(char) * char_to_nib);
+		t[char_to_nib] = '\0';
+
+		ret = kstrtoint(t, 16, &d);
+		if (ret) {
+			pr_err("kstrtoint error %d\n", ret);
+			goto end;
+		}
+
+		memcpy(hdmi_ctrl->edid_buf + i++, &d,
+			sizeof(*hdmi_ctrl->edid_buf));
+
+		buf_t += char_to_nib;
+	}
+
+	ret = strnlen(buf, PAGE_SIZE);
+	hdmi_ctrl->custom_edid = true;
+end:
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+	return ret;
+}
+
+static ssize_t hdmi_tx_sysfs_rda_edid(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+	u32 size;
+	u32 cea_blks;
+
+	hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl || !hdmi_ctrl->edid_buf) {
+		DEV_ERR("%s: invalid data\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+	cea_blks = hdmi_ctrl->edid_buf[EDID_BLOCK_SIZE - 2];
+	if (cea_blks >= MAX_EDID_BLOCKS) {
+		DEV_ERR("%s: invalid cea blocks\n", __func__);
+		mutex_unlock(&hdmi_ctrl->tx_lock);
+		return -EINVAL;
+	}
+	size = (cea_blks + 1) * EDID_BLOCK_SIZE;
+	size = min_t(u32, size, PAGE_SIZE);
+
+	DEV_DBG("%s: edid size %d\n", __func__, size);
+
+	memcpy(buf, hdmi_ctrl->edid_buf, size);
+
+	print_hex_dump(KERN_DEBUG, "HDMI EDID: ", DUMP_PREFIX_NONE,
+		16, 1, buf, size, false);
+
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+	return size;
+}
+
+static ssize_t hdmi_tx_sysfs_wta_audio_cb(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int ack, rc = 0;
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+	hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	rc = kstrtoint(buf, 10, &ack);
+	if (rc) {
+		DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+		goto end;
+	}
+
+	if (hdmi_ctrl->audio_ops.ack)
+		hdmi_ctrl->audio_ops.ack(hdmi_ctrl->audio_data,
+			ack, hdmi_ctrl->hpd_state);
+end:
+	return ret;
+}
+
+static int hdmi_tx_update_pixel_clk(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	struct dss_module_power *power_data = NULL;
+	struct mdss_panel_info *pinfo;
+	int rc = 0;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	pinfo = &hdmi_ctrl->panel_data.panel_info;
+
+	power_data = &hdmi_ctrl->pdata.power_data[HDMI_TX_CORE_PM];
+	if (!power_data) {
+		DEV_ERR("%s: Error: invalid power data\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (power_data->clk_config->rate == pinfo->clk_rate) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	power_data->clk_config->rate = pinfo->clk_rate;
+
+	if (pinfo->out_format == MDP_Y_CBCR_H2V2)
+		power_data->clk_config->rate /= 2;
+
+	DEV_DBG("%s: rate %ld\n", __func__, power_data->clk_config->rate);
+
+	msm_dss_clk_set_rate(power_data->clk_config, power_data->num_clk);
+end:
+	return rc;
+}
+
+static ssize_t hdmi_tx_sysfs_wta_hot_plug(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int hot_plug, rc;
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+	hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+
+	rc = kstrtoint(buf, 10, &hot_plug);
+	if (rc) {
+		DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+		goto end;
+	}
+
+	hdmi_ctrl->hpd_state = !!hot_plug;
+
+	queue_work(hdmi_ctrl->workq, &hdmi_ctrl->hpd_int_work);
+
+	rc = strnlen(buf, PAGE_SIZE);
+end:
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+	return rc;
+}
+
+static ssize_t hdmi_tx_sysfs_rda_sim_mode(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", hdmi_ctrl->sim_mode);
+	DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->sim_mode);
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+
+	return ret;
+}
+
+static ssize_t hdmi_tx_sysfs_wta_sim_mode(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int sim_mode, rc;
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+	struct dss_io_data *io = NULL;
+
+	hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: core io is not initialized\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!hdmi_ctrl->hpd_initialized) {
+		DEV_ERR("%s: hpd not enabled\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = kstrtoint(buf, 10, &sim_mode);
+	if (rc) {
+		DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+		goto end;
+	}
+
+	hdmi_ctrl->sim_mode = !!sim_mode;
+
+	if (hdmi_ctrl->sim_mode) {
+		DSS_REG_W(io, HDMI_HPD_INT_CTRL, BIT(0));
+	} else {
+		int cable_sense = DSS_REG_R(io, HDMI_HPD_INT_STATUS) & BIT(1);
+
+		DSS_REG_W(io, HDMI_HPD_INT_CTRL, BIT(0) | BIT(2) |
+			(cable_sense ? 0 : BIT(1)));
+	}
+
+	rc = strnlen(buf, PAGE_SIZE);
+end:
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+	return rc;
+}
+
+static ssize_t hdmi_tx_sysfs_rda_video_mode(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", hdmi_ctrl->vic);
+	DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->vic);
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+
+	return ret;
+} /* hdmi_tx_sysfs_rda_video_mode */
+
+static ssize_t hdmi_tx_sysfs_rda_hpd(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", hdmi_ctrl->hpd_feature_on);
+	DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->hpd_feature_on);
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+
+	return ret;
+} /* hdmi_tx_sysfs_rda_hpd */
+
+static ssize_t hdmi_tx_sysfs_wta_hpd(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int hpd, rc = 0;
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+	hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+
+	rc = kstrtoint(buf, 10, &hpd);
+	if (rc) {
+		DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+		goto end;
+	}
+
+	DEV_DBG("%s: %d\n", __func__, hpd);
+
+	if (hdmi_ctrl->ds_registered && hpd &&
+	    (!hdmi_ctrl->mhl_hpd_on || hdmi_ctrl->hpd_feature_on)) {
+		DEV_DBG("%s: DS registered, HPD on not allowed\n", __func__);
+		goto end;
+	}
+
+	switch (hpd) {
+	case HPD_OFF:
+	case HPD_DISABLE:
+		if (hpd == HPD_DISABLE)
+			hdmi_ctrl->hpd_disabled = true;
+
+		if (!hdmi_ctrl->hpd_feature_on) {
+			DEV_DBG("%s: HPD is already off\n", __func__);
+			goto end;
+		}
+
+		/* disable audio ack feature */
+		if (hdmi_ctrl->audio_ops.ack)
+			hdmi_ctrl->audio_ops.ack(hdmi_ctrl->audio_data,
+				AUDIO_ACK_SET_ENABLE, hdmi_ctrl->hpd_state);
+
+		if (hdmi_ctrl->panel_power_on) {
+			hdmi_ctrl->hpd_off_pending = true;
+			hdmi_tx_config_5v(hdmi_ctrl, false);
+		} else {
+			hdmi_tx_hpd_off(hdmi_ctrl);
+
+			hdmi_ctrl->sdev.state = 0;
+			hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0);
+		}
+
+		break;
+	case HPD_ON:
+		if (hdmi_ctrl->hpd_disabled == true) {
+			DEV_ERR("%s: hpd is disabled, state %d not allowed\n",
+				__func__, hpd);
+			goto end;
+		}
+
+		if (hdmi_ctrl->pdata.cond_power_on) {
+			DEV_ERR("%s: hpd state %d not allowed w/ cond. hpd\n",
+				__func__, hpd);
+			goto end;
+		}
+
+		if (hdmi_ctrl->hpd_feature_on) {
+			DEV_DBG("%s: HPD is already on\n", __func__);
+			goto end;
+		}
+
+		rc = hdmi_tx_sysfs_enable_hpd(hdmi_ctrl, true);
+		break;
+	case HPD_ON_CONDITIONAL_MTP:
+		if (hdmi_ctrl->hpd_disabled == true) {
+			DEV_ERR("%s: hpd is disabled, state %d not allowed\n",
+				__func__, hpd);
+			goto end;
+		}
+
+		if (!hdmi_ctrl->pdata.cond_power_on) {
+			DEV_ERR("%s: hpd state %d not allowed w/o cond. hpd\n",
+				__func__, hpd);
+			goto end;
+		}
+
+		if (hdmi_ctrl->hpd_feature_on) {
+			DEV_DBG("%s: HPD is already on\n", __func__);
+			goto end;
+		}
+
+		rc = hdmi_tx_sysfs_enable_hpd(hdmi_ctrl, true);
+		break;
+	case HPD_ENABLE:
+		hdmi_ctrl->hpd_disabled = false;
+
+		rc = hdmi_tx_sysfs_enable_hpd(hdmi_ctrl, true);
+		break;
+	default:
+		DEV_ERR("%s: Invalid HPD state requested\n", __func__);
+		goto end;
+	}
+
+	if (!rc) {
+		hdmi_ctrl->hpd_feature_on =
+			(~hdmi_ctrl->hpd_feature_on) & BIT(0);
+		DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->hpd_feature_on);
+	} else {
+		DEV_ERR("%s: failed to '%s' hpd. rc = %d\n", __func__,
+			hpd ? "enable" : "disable", rc);
+		ret = rc;
+	}
+
+end:
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+	return ret;
+} /* hdmi_tx_sysfs_wta_hpd */
+
+static ssize_t hdmi_tx_sysfs_wta_vendor_name(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	ssize_t ret, sz;
+	u8 *s = (u8 *) buf;
+	u8 *d = NULL;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+	d = hdmi_ctrl->spd_vendor_name;
+	ret = strnlen(buf, PAGE_SIZE);
+	ret = (ret > 8) ? 8 : ret;
+
+	sz = sizeof(hdmi_ctrl->spd_vendor_name);
+	memset(hdmi_ctrl->spd_vendor_name, 0, sz);
+	while (*s) {
+		if (*s & 0x60 && *s ^ 0x7f) {
+			*d = *s;
+		} else {
+			/* stop copying if control character found */
+			break;
+		}
+
+		if (++s > (u8 *) (buf + ret))
+			break;
+
+		d++;
+	}
+	hdmi_ctrl->spd_vendor_name[sz - 1] = 0;
+
+	DEV_DBG("%s: '%s'\n", __func__, hdmi_ctrl->spd_vendor_name);
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+
+	return ret;
+} /* hdmi_tx_sysfs_wta_vendor_name */
+
+static ssize_t hdmi_tx_sysfs_rda_vendor_name(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+	ret = snprintf(buf, PAGE_SIZE, "%s\n", hdmi_ctrl->spd_vendor_name);
+	DEV_DBG("%s: '%s'\n", __func__, hdmi_ctrl->spd_vendor_name);
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+
+	return ret;
+} /* hdmi_tx_sysfs_rda_vendor_name */
+
+static ssize_t hdmi_tx_sysfs_wta_product_description(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	ssize_t ret, sz;
+	u8 *s = (u8 *) buf;
+	u8 *d = NULL;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+	d = hdmi_ctrl->spd_product_description;
+	ret = strnlen(buf, PAGE_SIZE);
+	ret = (ret > 16) ? 16 : ret;
+
+	sz = sizeof(hdmi_ctrl->spd_product_description);
+	memset(hdmi_ctrl->spd_product_description, 0, sz);
+	while (*s) {
+		if (*s & 0x60 && *s ^ 0x7f) {
+			*d = *s;
+		} else {
+			/* stop copying if control character found */
+			break;
+		}
+
+		if (++s > (u8 *) (buf + ret))
+			break;
+
+		d++;
+	}
+	hdmi_ctrl->spd_product_description[sz - 1] = 0;
+
+	DEV_DBG("%s: '%s'\n", __func__, hdmi_ctrl->spd_product_description);
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+
+	return ret;
+} /* hdmi_tx_sysfs_wta_product_description */
+
+static ssize_t hdmi_tx_sysfs_rda_product_description(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+	ret = snprintf(buf, PAGE_SIZE, "%s\n",
+		hdmi_ctrl->spd_product_description);
+	DEV_DBG("%s: '%s'\n", __func__, hdmi_ctrl->spd_product_description);
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+
+	return ret;
+} /* hdmi_tx_sysfs_rda_product_description */
+
+static ssize_t hdmi_tx_sysfs_wta_avi_itc(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int ret;
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+	int itc = 0;
+
+	hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+
+	ret = kstrtoint(buf, 10, &itc);
+	if (ret) {
+		DEV_ERR("%s: kstrtoint failed. rc =%d\n", __func__, ret);
+		goto end;
+	}
+
+	if (itc < 0 || itc > 1) {
+		DEV_ERR("%s: Invalid ITC %d\n", __func__, itc);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	hdmi_ctrl->panel.is_it_content = itc ? true : false;
+
+	ret = strnlen(buf, PAGE_SIZE);
+end:
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+	return ret;
+} /* hdmi_tx_sysfs_wta_avi_itc */
+
+static ssize_t hdmi_tx_sysfs_wta_avi_cn_bits(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int ret;
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+	int cn_bits = 0;
+
+	hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+
+	ret = kstrtoint(buf, 10, &cn_bits);
+	if (ret) {
+		DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, ret);
+		goto end;
+	}
+
+	/* As per CEA-861-E, CN is a positive number and can be max 3 */
+	if (cn_bits < 0 || cn_bits > 3) {
+		DEV_ERR("%s: Invalid CN %d\n", __func__, cn_bits);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	hdmi_ctrl->panel.content_type = cn_bits;
+
+	ret = strnlen(buf, PAGE_SIZE);
+end:
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+
+	return ret;
+} /* hdmi_tx_sysfs_wta_cn_bits */
+
+static ssize_t hdmi_tx_sysfs_wta_s3d_mode(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int ret, s3d_mode;
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+	void *pdata;
+
+	hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	pdata = hdmi_tx_get_fd(HDMI_TX_FEAT_PANEL);
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+
+	ret = kstrtoint(buf, 10, &s3d_mode);
+	if (ret) {
+		DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, ret);
+		goto end;
+	}
+
+	if (s3d_mode < HDMI_S3D_NONE || s3d_mode >= HDMI_S3D_MAX) {
+		DEV_ERR("%s: invalid s3d mode = %d\n", __func__, s3d_mode);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	if (s3d_mode > HDMI_S3D_NONE &&
+		!hdmi_edid_is_s3d_mode_supported(
+		    hdmi_tx_get_fd(HDMI_TX_FEAT_EDID),
+			hdmi_ctrl->vic, s3d_mode)) {
+		DEV_ERR("%s: s3d mode not supported in current video mode\n",
+			__func__);
+		ret = -EPERM;
+		hdmi_ctrl->panel.s3d_support = false;
+		goto end;
+	}
+
+	hdmi_ctrl->panel.s3d_mode = s3d_mode;
+	hdmi_ctrl->panel.s3d_support = true;
+
+	if (hdmi_ctrl->panel_ops.vendor)
+		hdmi_ctrl->panel_ops.vendor(pdata);
+
+	ret = strnlen(buf, PAGE_SIZE);
+	DEV_DBG("%s: %d\n", __func__, hdmi_ctrl->s3d_mode);
+end:
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+	return ret;
+}
+
+static ssize_t hdmi_tx_sysfs_rda_s3d_mode(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", hdmi_ctrl->s3d_mode);
+	DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->s3d_mode);
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+
+	return ret;
+}
+
+static ssize_t hdmi_tx_sysfs_wta_5v(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int read, ret;
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+	struct dss_module_power *pd = NULL;
+
+	hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+	pd = &hdmi_ctrl->pdata.power_data[HDMI_TX_HPD_PM];
+	if (!pd || !pd->gpio_config) {
+		DEV_ERR("%s: Error: invalid power data\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ret = kstrtoint(buf, 10, &read);
+	if (ret) {
+		DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, ret);
+		goto end;
+	}
+
+	read = ~(!!read ^ pd->gpio_config->value) & BIT(0);
+
+	ret = hdmi_tx_config_5v(hdmi_ctrl, read);
+	if (ret)
+		goto end;
+
+	ret = strnlen(buf, PAGE_SIZE);
+end:
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+	return ret;
+}
+
+static DEVICE_ATTR(connected, 0444, hdmi_tx_sysfs_rda_connected, NULL);
+static DEVICE_ATTR(hdmi_audio_cb, 0200, NULL, hdmi_tx_sysfs_wta_audio_cb);
+static DEVICE_ATTR(hot_plug, 0200, NULL, hdmi_tx_sysfs_wta_hot_plug);
+static DEVICE_ATTR(sim_mode, 0644, hdmi_tx_sysfs_rda_sim_mode,
+	hdmi_tx_sysfs_wta_sim_mode);
+static DEVICE_ATTR(edid, 0644, hdmi_tx_sysfs_rda_edid,
+	hdmi_tx_sysfs_wta_edid);
+static DEVICE_ATTR(video_mode, 0444, hdmi_tx_sysfs_rda_video_mode, NULL);
+static DEVICE_ATTR(hpd, 0644, hdmi_tx_sysfs_rda_hpd,
+	hdmi_tx_sysfs_wta_hpd);
+static DEVICE_ATTR(vendor_name, 0644,
+	hdmi_tx_sysfs_rda_vendor_name, hdmi_tx_sysfs_wta_vendor_name);
+static DEVICE_ATTR(product_description, 0644,
+	hdmi_tx_sysfs_rda_product_description,
+	hdmi_tx_sysfs_wta_product_description);
+static DEVICE_ATTR(avi_itc, 0200, NULL, hdmi_tx_sysfs_wta_avi_itc);
+static DEVICE_ATTR(avi_cn0_1, 0200, NULL, hdmi_tx_sysfs_wta_avi_cn_bits);
+static DEVICE_ATTR(s3d_mode, 0644, hdmi_tx_sysfs_rda_s3d_mode,
+	hdmi_tx_sysfs_wta_s3d_mode);
+static DEVICE_ATTR(5v, 0200, NULL, hdmi_tx_sysfs_wta_5v);
+
+static struct attribute *hdmi_tx_fs_attrs[] = {
+	&dev_attr_connected.attr,
+	&dev_attr_hdmi_audio_cb.attr,
+	&dev_attr_hot_plug.attr,
+	&dev_attr_sim_mode.attr,
+	&dev_attr_edid.attr,
+	&dev_attr_video_mode.attr,
+	&dev_attr_hpd.attr,
+	&dev_attr_vendor_name.attr,
+	&dev_attr_product_description.attr,
+	&dev_attr_avi_itc.attr,
+	&dev_attr_avi_cn0_1.attr,
+	&dev_attr_s3d_mode.attr,
+	&dev_attr_5v.attr,
+	NULL,
+};
+static struct attribute_group hdmi_tx_fs_attrs_group = {
+	.attrs = hdmi_tx_fs_attrs,
+};
+
+static int hdmi_tx_sysfs_create(struct hdmi_tx_ctrl *hdmi_ctrl,
+	struct fb_info *fbi)
+{
+	int rc;
+
+	if (!hdmi_ctrl || !fbi) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -ENODEV;
+	}
+
+	rc = sysfs_create_group(&fbi->dev->kobj,
+		&hdmi_tx_fs_attrs_group);
+	if (rc) {
+		DEV_ERR("%s: failed, rc=%d\n", __func__, rc);
+		return rc;
+	}
+	hdmi_ctrl->kobj = &fbi->dev->kobj;
+	DEV_DBG("%s: sysfs group %pK\n", __func__, hdmi_ctrl->kobj);
+
+	return 0;
+} /* hdmi_tx_sysfs_create */
+
+static void hdmi_tx_sysfs_remove(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+	if (hdmi_ctrl->kobj)
+		sysfs_remove_group(hdmi_ctrl->kobj, &hdmi_tx_fs_attrs_group);
+	hdmi_ctrl->kobj = NULL;
+} /* hdmi_tx_sysfs_remove */
+
+static int hdmi_tx_config_avmute(struct hdmi_tx_ctrl *hdmi_ctrl, bool set)
+{
+	struct dss_io_data *io;
+	u32 av_mute_status;
+	bool av_pkt_en = false;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: Core io is not initialized\n", __func__);
+		return -EINVAL;
+	}
+
+	av_mute_status = DSS_REG_R(io, HDMI_GC);
+
+	if (set) {
+		if (!(av_mute_status & BIT(0))) {
+			DSS_REG_W(io, HDMI_GC, av_mute_status | BIT(0));
+			av_pkt_en = true;
+		}
+	} else {
+		if (av_mute_status & BIT(0)) {
+			DSS_REG_W(io, HDMI_GC, av_mute_status & ~BIT(0));
+			av_pkt_en = true;
+		}
+	}
+
+	/* Enable AV Mute tranmission here */
+	if (av_pkt_en)
+		DSS_REG_W(io, HDMI_VBI_PKT_CTRL,
+			DSS_REG_R(io, HDMI_VBI_PKT_CTRL) | (BIT(4) & BIT(5)));
+
+	DEV_DBG("%s: AVMUTE %s\n", __func__, set ? "set" : "cleared");
+
+	return 0;
+} /* hdmi_tx_config_avmute */
+
+static bool hdmi_tx_is_encryption_set(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	struct dss_io_data *io;
+	bool enc_en = true;
+	u32 reg_val;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		goto end;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: Core io is not initialized\n", __func__);
+		goto end;
+	}
+
+	reg_val = DSS_REG_R_ND(io, HDMI_HDCP_CTRL2);
+	if ((reg_val & BIT(0)) && (reg_val & BIT(1)))
+		goto end;
+
+	if (DSS_REG_R_ND(io, HDMI_CTRL) & BIT(2))
+		goto end;
+
+	return false;
+
+end:
+	return enc_en;
+} /* hdmi_tx_is_encryption_set */
+
+static void hdmi_tx_hdcp_cb(void *ptr, enum hdmi_hdcp_state status)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = (struct hdmi_tx_ctrl *)ptr;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	hdmi_ctrl->hdcp_status = status;
+
+	queue_delayed_work(hdmi_ctrl->workq, &hdmi_ctrl->hdcp_cb_work, HZ/4);
+}
+
+static inline bool hdmi_tx_is_stream_shareable(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	bool ret;
+
+	switch (hdmi_ctrl->enc_lvl) {
+	case HDCP_STATE_AUTH_ENC_NONE:
+		ret = true;
+		break;
+	case HDCP_STATE_AUTH_ENC_1X:
+		ret = hdmi_tx_is_hdcp_enabled(hdmi_ctrl) &&
+			hdmi_ctrl->auth_state;
+		break;
+	case HDCP_STATE_AUTH_ENC_2P2:
+		ret = hdmi_ctrl->hdcp_feature_on &&
+			hdmi_ctrl->hdcp22_present &&
+			hdmi_ctrl->auth_state;
+		break;
+	default:
+		ret = false;
+	}
+
+	return ret;
+}
+
+static void hdmi_tx_hdcp_cb_work(struct work_struct *work)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+	struct delayed_work *dw = to_delayed_work(work);
+	int rc = 0;
+
+	hdmi_ctrl = container_of(dw, struct hdmi_tx_ctrl, hdcp_cb_work);
+	if (!hdmi_ctrl) {
+		DEV_DBG("%s: invalid input\n", __func__);
+		return;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+
+	switch (hdmi_ctrl->hdcp_status) {
+	case HDCP_STATE_AUTHENTICATED:
+		hdmi_ctrl->auth_state = true;
+
+		if (hdmi_tx_is_panel_on(hdmi_ctrl) &&
+			hdmi_tx_is_stream_shareable(hdmi_ctrl)) {
+			rc = hdmi_tx_config_avmute(hdmi_ctrl, false);
+			hdmi_tx_set_audio_switch_node(hdmi_ctrl, 1);
+		}
+
+		if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present)
+			hdcp1_set_enc(true);
+		break;
+	case HDCP_STATE_AUTH_FAIL:
+		if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present) {
+			if (hdmi_ctrl->auth_state)
+				hdcp1_set_enc(false);
+		}
+
+		hdmi_ctrl->auth_state = false;
+
+		if (hdmi_tx_is_panel_on(hdmi_ctrl)) {
+			DEV_DBG("%s: Reauthenticating\n", __func__);
+
+			if (hdmi_tx_is_encryption_set(hdmi_ctrl) ||
+				!hdmi_tx_is_stream_shareable(hdmi_ctrl)) {
+				hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0);
+				rc = hdmi_tx_config_avmute(hdmi_ctrl, true);
+			}
+
+			rc = hdmi_ctrl->hdcp_ops->hdmi_hdcp_reauthenticate(
+				hdmi_ctrl->hdcp_data);
+			if (rc)
+				DEV_ERR("%s: HDCP reauth failed. rc=%d\n",
+					__func__, rc);
+		} else {
+			DEV_DBG("%s: Not reauthenticating. Cable not conn\n",
+				__func__);
+		}
+
+		break;
+	case HDCP_STATE_AUTH_ENC_NONE:
+		hdmi_ctrl->enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
+
+		if (hdmi_tx_is_panel_on(hdmi_ctrl)) {
+			rc = hdmi_tx_config_avmute(hdmi_ctrl, false);
+			hdmi_tx_set_audio_switch_node(hdmi_ctrl, 1);
+		}
+		break;
+	case HDCP_STATE_AUTH_ENC_1X:
+	case HDCP_STATE_AUTH_ENC_2P2:
+		hdmi_ctrl->enc_lvl = hdmi_ctrl->hdcp_status;
+
+		if (hdmi_tx_is_panel_on(hdmi_ctrl) &&
+			hdmi_tx_is_stream_shareable(hdmi_ctrl)) {
+			rc = hdmi_tx_config_avmute(hdmi_ctrl, false);
+			hdmi_tx_set_audio_switch_node(hdmi_ctrl, 1);
+		} else {
+			hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0);
+			rc = hdmi_tx_config_avmute(hdmi_ctrl, true);
+		}
+		break;
+	default:
+		break;
+		/* do nothing */
+	}
+
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+}
+
+static u32 hdmi_tx_ddc_read(struct hdmi_tx_ddc_ctrl *ddc_ctrl,
+	u32 block, u8 *edid_buf)
+{
+	u32 block_size = EDID_BLOCK_SIZE;
+	struct hdmi_tx_ddc_data ddc_data;
+	u32 status = 0, retry_cnt = 0, i;
+
+	if (!ddc_ctrl || !edid_buf) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	do {
+		DEV_DBG("EDID: reading block(%d) with block-size=%d\n",
+				block, block_size);
+
+		for (i = 0; i < EDID_BLOCK_SIZE; i += block_size) {
+			memset(&ddc_data, 0, sizeof(ddc_data));
+
+			ddc_data.dev_addr    = EDID_BLOCK_ADDR;
+			ddc_data.offset      = block * EDID_BLOCK_SIZE + i;
+			ddc_data.data_buf    = edid_buf + i;
+			ddc_data.data_len    = block_size;
+			ddc_data.request_len = block_size;
+			ddc_data.retry       = 1;
+			ddc_data.what        = "EDID";
+			ddc_data.retry_align = true;
+
+			ddc_ctrl->ddc_data = ddc_data;
+
+			/* Read EDID twice with 32bit alighnment too */
+			if (block < 2)
+				status = hdmi_ddc_read(ddc_ctrl);
+			else
+				status = hdmi_ddc_read_seg(ddc_ctrl);
+
+			if (status)
+				break;
+		}
+		if (retry_cnt++ >= MAX_EDID_READ_RETRY)
+			block_size /= 2;
+
+	} while (status && (block_size >= 16));
+
+	return status;
+}
+
+static int hdmi_tx_read_edid_retry(struct hdmi_tx_ctrl *hdmi_ctrl, u8 block)
+{
+	u32 checksum_retry = 0;
+	u8 *ebuf;
+	int ret = 0;
+	struct hdmi_tx_ddc_ctrl *ddc_ctrl;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ebuf = hdmi_ctrl->edid_buf;
+	if (!ebuf) {
+		DEV_ERR("%s: invalid edid buf\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ddc_ctrl = &hdmi_ctrl->ddc_ctrl;
+
+	while (checksum_retry++ < MAX_EDID_READ_RETRY) {
+		ret = hdmi_tx_ddc_read(ddc_ctrl, block,
+			ebuf + (block * EDID_BLOCK_SIZE));
+		if (ret)
+			continue;
+		else
+			break;
+	}
+end:
+	return ret;
+}
+
+static int hdmi_tx_read_edid(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int ndx, check_sum;
+	int cea_blks = 0, block = 0, total_blocks = 0;
+	int ret = 0;
+	u8 *ebuf;
+	struct hdmi_tx_ddc_ctrl *ddc_ctrl;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ebuf = hdmi_ctrl->edid_buf;
+	if (!ebuf) {
+		DEV_ERR("%s: invalid edid buf\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	memset(ebuf, 0, hdmi_ctrl->edid_buf_size);
+
+	ddc_ctrl = &hdmi_ctrl->ddc_ctrl;
+
+	do {
+		if (block * EDID_BLOCK_SIZE > hdmi_ctrl->edid_buf_size) {
+			DEV_ERR("%s: no mem for block %d, max mem %d\n",
+				__func__, block, hdmi_ctrl->edid_buf_size);
+			ret = -ENOMEM;
+			goto end;
+		}
+
+		ret = hdmi_tx_read_edid_retry(hdmi_ctrl, block);
+		if (ret) {
+			DEV_ERR("%s: edid read failed\n", __func__);
+			goto end;
+		}
+
+		/* verify checksum to validate edid block */
+		check_sum = 0;
+		for (ndx = 0; ndx < EDID_BLOCK_SIZE; ++ndx)
+			check_sum += ebuf[ndx];
+
+		if (check_sum & 0xFF) {
+			DEV_ERR("%s: checksum mismatch\n", __func__);
+			ret = -EINVAL;
+			goto end;
+		}
+
+		/* get number of cea extension blocks as given in block 0*/
+		if (block == 0) {
+			cea_blks = ebuf[EDID_BLOCK_SIZE - 2];
+			if (cea_blks < 0 || cea_blks >= MAX_EDID_BLOCKS) {
+				cea_blks = 0;
+				DEV_ERR("%s: invalid cea blocks %d\n",
+					__func__, cea_blks);
+				ret = -EINVAL;
+				goto end;
+			}
+
+			total_blocks = cea_blks + 1;
+		}
+	} while ((cea_blks-- > 0) && (block++ < MAX_EDID_BLOCKS));
+end:
+
+	return ret;
+}
+
+/* Enable HDMI features */
+static int hdmi_tx_init_panel(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	struct hdmi_panel_init_data panel_init_data = {0};
+	void *panel_data;
+	int rc = 0;
+
+	hdmi_ctrl->panel.pinfo = &hdmi_ctrl->panel_data.panel_info;
+
+	panel_init_data.io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	panel_init_data.ds_data = &hdmi_ctrl->ds_data;
+	panel_init_data.ops = &hdmi_ctrl->panel_ops;
+	panel_init_data.panel_data = &hdmi_ctrl->panel;
+	panel_init_data.spd_vendor_name = hdmi_ctrl->spd_vendor_name;
+	panel_init_data.spd_product_description =
+		hdmi_ctrl->spd_product_description;
+	panel_init_data.version = hdmi_ctrl->hdmi_tx_ver;
+	panel_init_data.ddc = &hdmi_ctrl->ddc_ctrl;
+	panel_init_data.timing = &hdmi_ctrl->timing;
+
+	panel_data = hdmi_panel_init(&panel_init_data);
+	if (IS_ERR_OR_NULL(panel_data)) {
+		DEV_ERR("%s: panel init failed\n", __func__);
+		rc = -EINVAL;
+	} else {
+		hdmi_tx_set_fd(HDMI_TX_FEAT_PANEL, panel_data);
+		DEV_DBG("%s: panel initialized\n", __func__);
+	}
+
+	return rc;
+}
+
+static int hdmi_tx_init_edid(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	struct hdmi_edid_init_data edid_init_data = {0};
+	void *edid_data;
+	int rc = 0;
+
+	edid_init_data.kobj = hdmi_ctrl->kobj;
+	edid_init_data.ds_data = hdmi_ctrl->ds_data;
+	edid_init_data.max_pclk_khz = hdmi_ctrl->max_pclk_khz;
+
+	edid_data = hdmi_edid_init(&edid_init_data);
+	if (!edid_data) {
+		DEV_ERR("%s: edid init failed\n", __func__);
+		rc = -ENODEV;
+		goto end;
+	}
+
+	hdmi_ctrl->panel_data.panel_info.edid_data = edid_data;
+	hdmi_tx_set_fd(HDMI_TX_FEAT_EDID, edid_data);
+
+	/* get edid buffer from edid parser */
+	hdmi_ctrl->edid_buf = edid_init_data.buf;
+	hdmi_ctrl->edid_buf_size = edid_init_data.buf_size;
+
+	hdmi_edid_set_video_resolution(edid_data, hdmi_ctrl->vic, true);
+end:
+	return rc;
+}
+
+static int hdmi_tx_init_hdcp(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	struct hdmi_hdcp_init_data hdcp_init_data = {0};
+	struct resource *res;
+	void *hdcp_data;
+	int rc = 0;
+
+	res = platform_get_resource_byname(hdmi_ctrl->pdev,
+		IORESOURCE_MEM, hdmi_tx_io_name(HDMI_TX_CORE_IO));
+	if (!res) {
+		DEV_ERR("%s: Error getting HDMI tx core resource\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	hdcp_init_data.phy_addr      = res->start;
+	hdcp_init_data.core_io       = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	hdcp_init_data.qfprom_io     = &hdmi_ctrl->pdata.io[HDMI_TX_QFPROM_IO];
+	hdcp_init_data.hdcp_io       = &hdmi_ctrl->pdata.io[HDMI_TX_HDCP_IO];
+	hdcp_init_data.mutex         = &hdmi_ctrl->mutex;
+	hdcp_init_data.sysfs_kobj    = hdmi_ctrl->kobj;
+	hdcp_init_data.ddc_ctrl      = &hdmi_ctrl->ddc_ctrl;
+	hdcp_init_data.workq         = hdmi_ctrl->workq;
+	hdcp_init_data.notify_status = hdmi_tx_hdcp_cb;
+	hdcp_init_data.cb_data       = (void *)hdmi_ctrl;
+	hdcp_init_data.hdmi_tx_ver   = hdmi_ctrl->hdmi_tx_ver;
+	hdcp_init_data.timing        = &hdmi_ctrl->timing;
+
+	if (hdmi_ctrl->hdcp14_present) {
+		hdcp_data = hdmi_hdcp_init(&hdcp_init_data);
+
+		if (IS_ERR_OR_NULL(hdcp_data)) {
+			DEV_ERR("%s: hdcp 1.4 init failed\n", __func__);
+			rc = -EINVAL;
+			goto end;
+		} else {
+			hdmi_tx_set_fd(HDMI_TX_FEAT_HDCP, hdcp_data);
+			DEV_DBG("%s: HDCP 1.4 initialized\n", __func__);
+		}
+	}
+
+	hdcp_data = hdmi_hdcp2p2_init(&hdcp_init_data);
+
+	if (IS_ERR_OR_NULL(hdcp_data)) {
+		DEV_ERR("%s: hdcp 2.2 init failed\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	} else {
+		hdmi_tx_set_fd(HDMI_TX_FEAT_HDCP2P2, hdcp_data);
+		DEV_DBG("%s: HDCP 2.2 initialized\n", __func__);
+	}
+end:
+	return rc;
+}
+
+static int hdmi_tx_init_cec_hw(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	struct hdmi_cec_init_data cec_init_data = {0};
+	void *cec_hw_data;
+	int rc = 0;
+
+	cec_init_data.io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	cec_init_data.workq = hdmi_ctrl->workq;
+	cec_init_data.pinfo = &hdmi_ctrl->panel_data.panel_info;
+	cec_init_data.ops = &hdmi_ctrl->hdmi_cec_ops;
+	cec_init_data.cbs = &hdmi_ctrl->hdmi_cec_cbs;
+
+	cec_hw_data = hdmi_cec_init(&cec_init_data);
+	if (IS_ERR_OR_NULL(cec_hw_data)) {
+		DEV_ERR("%s: cec init failed\n", __func__);
+		rc = -EINVAL;
+	} else {
+		hdmi_ctrl->panel_data.panel_info.is_cec_supported = true;
+		hdmi_tx_set_fd(HDMI_TX_FEAT_CEC_HW, cec_hw_data);
+		DEV_DBG("%s: cec hw initialized\n", __func__);
+	}
+
+	return rc;
+}
+
+static int hdmi_tx_init_cec_abst(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	struct cec_abstract_init_data cec_abst_init_data = {0};
+	void *cec_abst_data;
+	int rc = 0;
+
+	cec_abst_init_data.kobj  = hdmi_ctrl->kobj;
+	cec_abst_init_data.ops   = &hdmi_ctrl->hdmi_cec_ops;
+	cec_abst_init_data.cbs   = &hdmi_ctrl->hdmi_cec_cbs;
+
+	cec_abst_data = cec_abstract_init(&cec_abst_init_data);
+	if (IS_ERR_OR_NULL(cec_abst_data)) {
+		DEV_ERR("%s: cec abst init failed\n", __func__);
+		rc = -EINVAL;
+	} else {
+		hdmi_tx_set_fd(HDMI_TX_FEAT_CEC_ABST, cec_abst_data);
+		hdmi_ctrl->panel_data.panel_info.cec_data = cec_abst_data;
+		DEV_DBG("%s: cec abst initialized\n", __func__);
+	}
+
+	return rc;
+}
+
+static int hdmi_tx_init_audio(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	struct hdmi_audio_init_data audio_init_data = {0};
+	void *audio_data;
+	int rc = 0;
+
+	audio_init_data.io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	audio_init_data.ops = &hdmi_ctrl->audio_ops;
+
+	audio_data = hdmi_audio_register(&audio_init_data);
+	if (!audio_data) {
+		rc = -EINVAL;
+		DEV_ERR("%s: audio init failed\n", __func__);
+	} else {
+		hdmi_ctrl->audio_data = audio_data;
+		DEV_DBG("%s: audio initialized\n", __func__);
+	}
+
+	return rc;
+}
+
+static void hdmi_tx_deinit_features(struct hdmi_tx_ctrl *hdmi_ctrl,
+		u32 features)
+{
+	void *fd;
+
+	if (features & HDMI_TX_FEAT_CEC_ABST) {
+		fd = hdmi_tx_get_fd(HDMI_TX_FEAT_CEC_ABST);
+
+		cec_abstract_deinit(fd);
+
+		hdmi_ctrl->panel_data.panel_info.cec_data = NULL;
+		hdmi_tx_set_fd(HDMI_TX_FEAT_CEC_ABST, 0);
+	}
+
+	if (features & HDMI_TX_FEAT_CEC_HW) {
+		fd = hdmi_tx_get_fd(HDMI_TX_FEAT_CEC_HW);
+
+		hdmi_cec_deinit(fd);
+		hdmi_ctrl->panel_data.panel_info.is_cec_supported = false;
+		hdmi_tx_set_fd(HDMI_TX_FEAT_CEC_HW, 0);
+	}
+
+	if (features & HDMI_TX_FEAT_HDCP2P2) {
+		fd = hdmi_tx_get_fd(HDMI_TX_FEAT_HDCP2P2);
+
+		hdmi_hdcp2p2_deinit(fd);
+		hdmi_tx_set_fd(HDMI_TX_FEAT_HDCP2P2, 0);
+	}
+
+	if (features & HDMI_TX_FEAT_HDCP) {
+		fd = hdmi_tx_get_fd(HDMI_TX_FEAT_HDCP);
+
+		hdmi_hdcp_deinit(fd);
+		hdmi_tx_set_fd(HDMI_TX_FEAT_HDCP, 0);
+	}
+
+	if (features & HDMI_TX_FEAT_EDID) {
+		fd = hdmi_tx_get_fd(HDMI_TX_FEAT_EDID);
+
+		hdmi_edid_deinit(fd);
+		hdmi_ctrl->edid_buf = NULL;
+		hdmi_ctrl->edid_buf_size = 0;
+		hdmi_tx_set_fd(HDMI_TX_FEAT_EDID, 0);
+	}
+} /* hdmi_tx_init_features */
+
+static int hdmi_tx_init_features(struct hdmi_tx_ctrl *hdmi_ctrl,
+	struct fb_info *fbi)
+{
+	int ret = 0;
+	u32 deinit_features = 0;
+
+	if (!hdmi_ctrl || !fbi) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ret = hdmi_tx_init_panel(hdmi_ctrl);
+	if (ret)
+		goto end;
+
+	ret = hdmi_tx_init_edid(hdmi_ctrl);
+	if (ret) {
+		deinit_features |= HDMI_TX_FEAT_PANEL;
+		goto err;
+	}
+
+	ret = hdmi_tx_init_hdcp(hdmi_ctrl);
+	if (ret) {
+		deinit_features |= HDMI_TX_FEAT_EDID;
+		goto err;
+	}
+
+	ret = hdmi_tx_init_cec_hw(hdmi_ctrl);
+	if (ret) {
+		deinit_features |= HDMI_TX_FEAT_HDCP;
+		goto err;
+	}
+
+	ret = hdmi_tx_init_cec_abst(hdmi_ctrl);
+	if (ret) {
+		deinit_features |= HDMI_TX_FEAT_CEC_HW;
+		goto err;
+	}
+
+	ret = hdmi_tx_init_audio(hdmi_ctrl);
+	if (ret) {
+		deinit_features |= HDMI_TX_FEAT_CEC_ABST;
+		goto err;
+	}
+
+	return 0;
+err:
+	hdmi_tx_deinit_features(hdmi_ctrl, deinit_features);
+end:
+	return ret;
+}
+
+static inline u32 hdmi_tx_is_controller_on(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	struct dss_io_data *io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+
+	return DSS_REG_R_ND(io, HDMI_CTRL) & BIT(0);
+} /* hdmi_tx_is_controller_on */
+
+static int hdmi_tx_init_panel_info(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	struct mdss_panel_info *pinfo;
+	struct msm_hdmi_mode_timing_info timing = {0};
+	u32 ret;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = hdmi_get_supported_mode(&timing, &hdmi_ctrl->ds_data,
+		hdmi_ctrl->vic);
+	pinfo = &hdmi_ctrl->panel_data.panel_info;
+
+	if (ret || !timing.supported || !pinfo) {
+		DEV_ERR("%s: invalid timing data\n", __func__);
+		return -EINVAL;
+	}
+
+	pinfo->xres = timing.active_h;
+	pinfo->yres = timing.active_v;
+	pinfo->clk_rate = timing.pixel_freq * 1000;
+
+	pinfo->lcdc.h_back_porch = timing.back_porch_h;
+	pinfo->lcdc.h_front_porch = timing.front_porch_h;
+	pinfo->lcdc.h_pulse_width = timing.pulse_width_h;
+	pinfo->lcdc.v_back_porch = timing.back_porch_v;
+	pinfo->lcdc.v_front_porch = timing.front_porch_v;
+	pinfo->lcdc.v_pulse_width = timing.pulse_width_v;
+	pinfo->lcdc.frame_rate = timing.refresh_rate;
+
+	pinfo->type = DTV_PANEL;
+	pinfo->pdest = DISPLAY_3;
+	pinfo->wait_cycle = 0;
+	pinfo->out_format = MDP_RGB_888;
+	pinfo->bpp = 24;
+	pinfo->fb_num = 1;
+
+	pinfo->min_fps = HDMI_TX_MIN_FPS;
+	pinfo->max_fps = HDMI_TX_MAX_FPS;
+
+	pinfo->lcdc.border_clr = 0; /* blk */
+	pinfo->lcdc.underflow_clr = 0xff; /* blue */
+	pinfo->lcdc.hsync_skew = 0;
+
+	pinfo->is_pluggable = hdmi_ctrl->pdata.pluggable;
+
+	hdmi_ctrl->timing = timing;
+
+	return 0;
+} /* hdmi_tx_init_panel_info */
+
+static int hdmi_tx_read_sink_info(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int status = 0;
+	void *data;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	data = hdmi_tx_get_fd(HDMI_TX_FEAT_EDID);
+
+	if (!hdmi_tx_is_controller_on(hdmi_ctrl)) {
+		DEV_ERR("%s: failed: HDMI controller is off", __func__);
+		status = -ENXIO;
+		goto error;
+	}
+
+	if (!hdmi_ctrl->custom_edid && !hdmi_ctrl->sim_mode) {
+		hdmi_ddc_config(&hdmi_ctrl->ddc_ctrl);
+
+		status = hdmi_tx_read_edid(hdmi_ctrl);
+		if (status) {
+			DEV_ERR("%s: error reading edid\n", __func__);
+			goto error;
+		}
+	}
+
+	/* parse edid if a valid edid buffer is present */
+	if (hdmi_ctrl->custom_edid || !hdmi_ctrl->sim_mode) {
+		status = hdmi_edid_parser(data);
+		if (status)
+			DEV_ERR("%s: edid parse failed\n", __func__);
+	}
+
+error:
+	return status;
+} /* hdmi_tx_read_sink_info */
+
+static void hdmi_tx_update_hdcp_info(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	void *fd = NULL;
+	struct hdmi_hdcp_ops *ops = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	/* check first if hdcp2p2 is supported */
+	fd = hdmi_tx_get_fd(HDMI_TX_FEAT_HDCP2P2);
+	if (fd)
+		ops = hdmi_hdcp2p2_start(fd);
+
+	if (ops && ops->feature_supported)
+		hdmi_ctrl->hdcp22_present = ops->feature_supported(fd);
+	else
+		hdmi_ctrl->hdcp22_present = false;
+
+	if (!hdmi_ctrl->hdcp22_present) {
+		if (hdmi_ctrl->hdcp1_use_sw_keys)
+			hdmi_ctrl->hdcp14_present =
+				hdcp1_check_if_supported_load_app();
+
+		if (hdmi_ctrl->hdcp14_present) {
+			fd = hdmi_tx_get_fd(HDMI_TX_FEAT_HDCP);
+			ops = hdmi_hdcp_start(fd);
+		}
+	}
+
+	/* update internal data about hdcp */
+	hdmi_ctrl->hdcp_data = fd;
+	hdmi_ctrl->hdcp_ops = ops;
+}
+
+static void hdmi_tx_hpd_int_work(struct work_struct *work)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+	struct dss_io_data *io;
+	int rc = -EINVAL;
+	int retry = MAX_EDID_READ_RETRY;
+
+	hdmi_ctrl = container_of(work, struct hdmi_tx_ctrl, hpd_int_work);
+	if (!hdmi_ctrl) {
+		DEV_DBG("%s: invalid input\n", __func__);
+		return;
+	}
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+
+	if (!hdmi_ctrl->hpd_initialized) {
+		DEV_DBG("hpd not initialized\n");
+		goto end;
+	}
+
+	DEV_DBG("%s: %s\n", __func__,
+		hdmi_ctrl->hpd_state ? "CONNECT" : "DISCONNECT");
+
+	if (hdmi_ctrl->hpd_state) {
+		if (hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_DDC_PM, true)) {
+			DEV_ERR("%s: Failed to enable ddc power\n", __func__);
+			goto end;
+		}
+
+		/* Enable SW DDC before EDID read */
+		DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION,
+			DSS_REG_R(io, HDMI_DDC_ARBITRATION) & ~(BIT(4)));
+
+		while (rc && retry--)
+			rc = hdmi_tx_read_sink_info(hdmi_ctrl);
+		if (!retry && rc)
+			pr_warn_ratelimited("%s: EDID read failed\n", __func__);
+
+		if (hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_DDC_PM, false))
+			DEV_ERR("%s: Failed to disable ddc power\n", __func__);
+
+		hdmi_tx_send_cable_notification(hdmi_ctrl, true);
+	} else {
+		hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0);
+		hdmi_tx_wait_for_audio_engine(hdmi_ctrl);
+
+		hdmi_tx_send_cable_notification(hdmi_ctrl, false);
+	}
+end:
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+} /* hdmi_tx_hpd_int_work */
+
+static int hdmi_tx_check_capability(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	u32 hdmi_disabled, hdcp_disabled, reg_val;
+	struct dss_io_data *io = NULL;
+	int ret = 0;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_QFPROM_IO];
+	if (!io->base) {
+		DEV_ERR("%s: QFPROM io is not initialized\n", __func__);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	/* check if hdmi and hdcp are disabled */
+	if (hdmi_ctrl->hdmi_tx_ver < HDMI_TX_VERSION_4) {
+		hdcp_disabled = DSS_REG_R_ND(io,
+			QFPROM_RAW_FEAT_CONFIG_ROW0_LSB) & BIT(31);
+
+		hdmi_disabled = DSS_REG_R_ND(io,
+			QFPROM_RAW_FEAT_CONFIG_ROW0_MSB) & BIT(0);
+	} else {
+		reg_val = DSS_REG_R_ND(io,
+			QFPROM_RAW_FEAT_CONFIG_ROW0_LSB + QFPROM_RAW_VERSION_4);
+		hdcp_disabled = reg_val & BIT(12);
+		hdmi_disabled = reg_val & BIT(13);
+
+		reg_val = DSS_REG_R_ND(io, SEC_CTRL_HW_VERSION);
+		/*
+		 * With HDCP enabled on capable hardware, check if HW
+		 * or SW keys should be used.
+		 */
+		if (!hdcp_disabled && (reg_val >= HDCP_SEL_MIN_SEC_VERSION)) {
+			reg_val = DSS_REG_R_ND(io,
+				QFPROM_RAW_FEAT_CONFIG_ROW0_MSB +
+				QFPROM_RAW_VERSION_4);
+			if (!(reg_val & BIT(23)))
+				hdmi_ctrl->hdcp1_use_sw_keys = true;
+		}
+	}
+
+	DEV_DBG("%s: Features <HDMI:%s, HDCP:%s>\n", __func__,
+		hdmi_disabled ? "OFF" : "ON", hdcp_disabled ? "OFF" : "ON");
+
+	if (hdmi_disabled) {
+		DEV_ERR("%s: HDMI disabled\n", __func__);
+		ret = -ENODEV;
+		goto end;
+	}
+
+	hdmi_ctrl->hdcp14_present = !hdcp_disabled;
+end:
+	return ret;
+} /* hdmi_tx_check_capability */
+
+static void hdmi_tx_set_mode(struct hdmi_tx_ctrl *hdmi_ctrl, u32 power_on)
+{
+	struct dss_io_data *io = NULL;
+	/* Defaults: Disable block, HDMI mode */
+	u32 reg_val = BIT(1);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: Core io is not initialized\n", __func__);
+		return;
+	}
+
+	if (power_on) {
+		/* Enable the block */
+		reg_val |= BIT(0);
+
+		/**
+		 * HDMI Encryption, if HDCP is enabled
+		 * The ENC_REQUIRED bit is only available on HDMI Tx major
+		 * version less than 4. From 4 onwards, this bit is controlled
+		 * by TZ
+		 */
+		if (hdmi_ctrl->hdmi_tx_ver < 4 &&
+			hdmi_tx_is_hdcp_enabled(hdmi_ctrl) &&
+			!hdmi_ctrl->pdata.primary)
+			reg_val |= BIT(2);
+
+		/* Set transmission mode to DVI based in EDID info */
+		if (!hdmi_edid_get_sink_mode(hdmi_tx_get_fd(HDMI_TX_FEAT_EDID)))
+			reg_val &= ~BIT(1); /* DVI mode */
+
+		/*
+		 * Use DATAPATH_MODE as 1 always, the new mode that also
+		 * supports scrambler and HDCP 2.2. The legacy mode should no
+		 * longer be used
+		 */
+		reg_val |= BIT(31);
+	}
+
+	DSS_REG_W(io, HDMI_CTRL, reg_val);
+
+	DEV_DBG("HDMI Core: %s, HDMI_CTRL=0x%08x\n",
+		power_on ? "Enable" : "Disable", reg_val);
+} /* hdmi_tx_set_mode */
+
+static int hdmi_tx_pinctrl_set_state(struct hdmi_tx_ctrl *hdmi_ctrl,
+			enum hdmi_tx_power_module_type module, bool active)
+{
+	struct pinctrl_state *pin_state = NULL;
+	int rc = -EFAULT;
+	struct dss_module_power *power_data = NULL;
+	u64 cur_pin_states;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -ENODEV;
+	}
+
+	if (IS_ERR_OR_NULL(hdmi_ctrl->pin_res.pinctrl))
+		return 0;
+
+	power_data = &hdmi_ctrl->pdata.power_data[module];
+
+	cur_pin_states = active ? (hdmi_ctrl->pdata.pin_states | BIT(module))
+				: (hdmi_ctrl->pdata.pin_states & ~BIT(module));
+
+	if (cur_pin_states & BIT(HDMI_TX_HPD_PM)) {
+		if (cur_pin_states & BIT(HDMI_TX_DDC_PM)) {
+			if (cur_pin_states & BIT(HDMI_TX_CEC_PM))
+				pin_state = hdmi_ctrl->pin_res.state_active;
+			else
+				pin_state =
+					hdmi_ctrl->pin_res.state_ddc_active;
+		} else if (cur_pin_states & BIT(HDMI_TX_CEC_PM)) {
+			pin_state = hdmi_ctrl->pin_res.state_cec_active;
+		} else {
+			pin_state = hdmi_ctrl->pin_res.state_hpd_active;
+		}
+	} else {
+		pin_state = hdmi_ctrl->pin_res.state_suspend;
+	}
+
+	if (!IS_ERR_OR_NULL(pin_state)) {
+		rc = pinctrl_select_state(hdmi_ctrl->pin_res.pinctrl,
+				pin_state);
+		if (rc)
+			pr_err("%s: cannot set pins\n", __func__);
+		else
+			hdmi_ctrl->pdata.pin_states = cur_pin_states;
+	} else {
+		pr_err("%s: pinstate not found\n", __func__);
+	}
+
+	return rc;
+}
+
+static int hdmi_tx_pinctrl_init(struct platform_device *pdev)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl;
+
+	hdmi_ctrl = platform_get_drvdata(pdev);
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -ENODEV;
+	}
+
+	hdmi_ctrl->pin_res.pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(hdmi_ctrl->pin_res.pinctrl)) {
+		pr_err("%s: failed to get pinctrl\n", __func__);
+		return PTR_ERR(hdmi_ctrl->pin_res.pinctrl);
+	}
+
+	hdmi_ctrl->pin_res.state_active =
+		pinctrl_lookup_state(hdmi_ctrl->pin_res.pinctrl, "hdmi_active");
+	if (IS_ERR_OR_NULL(hdmi_ctrl->pin_res.state_active))
+		pr_debug("%s: cannot get active pinstate\n", __func__);
+
+	hdmi_ctrl->pin_res.state_hpd_active =
+		pinctrl_lookup_state(hdmi_ctrl->pin_res.pinctrl,
+							"hdmi_hpd_active");
+	if (IS_ERR_OR_NULL(hdmi_ctrl->pin_res.state_hpd_active))
+		pr_debug("%s: cannot get hpd active pinstate\n", __func__);
+
+	hdmi_ctrl->pin_res.state_cec_active =
+		pinctrl_lookup_state(hdmi_ctrl->pin_res.pinctrl,
+							"hdmi_cec_active");
+	if (IS_ERR_OR_NULL(hdmi_ctrl->pin_res.state_cec_active))
+		pr_debug("%s: cannot get cec active pinstate\n", __func__);
+
+	hdmi_ctrl->pin_res.state_ddc_active =
+		pinctrl_lookup_state(hdmi_ctrl->pin_res.pinctrl,
+							"hdmi_ddc_active");
+	if (IS_ERR_OR_NULL(hdmi_ctrl->pin_res.state_ddc_active))
+		pr_debug("%s: cannot get ddc active pinstate\n", __func__);
+
+	hdmi_ctrl->pin_res.state_suspend =
+		pinctrl_lookup_state(hdmi_ctrl->pin_res.pinctrl, "hdmi_sleep");
+	if (IS_ERR_OR_NULL(hdmi_ctrl->pin_res.state_suspend))
+		pr_debug("%s: cannot get sleep pinstate\n", __func__);
+
+	return 0;
+}
+
+static int hdmi_tx_config_power(struct hdmi_tx_ctrl *hdmi_ctrl,
+	enum hdmi_tx_power_module_type module, int config)
+{
+	int rc = 0;
+	struct dss_module_power *power_data = NULL;
+	char name[MAX_CLIENT_NAME_LEN];
+
+	if (!hdmi_ctrl || module >= HDMI_TX_MAX_PM) {
+		DEV_ERR("%s: Error: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	power_data = &hdmi_ctrl->pdata.power_data[module];
+	if (!power_data) {
+		DEV_ERR("%s: Error: invalid power data\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (config) {
+		rc = msm_dss_config_vreg(&hdmi_ctrl->pdev->dev,
+			power_data->vreg_config, power_data->num_vreg, 1);
+		if (rc) {
+			DEV_ERR("%s: Failed to config %s vreg. Err=%d\n",
+				__func__, hdmi_tx_pm_name(module), rc);
+			goto exit;
+		}
+
+		snprintf(name, MAX_CLIENT_NAME_LEN, "hdmi:%u", module);
+		hdmi_ctrl->pdata.reg_bus_clt[module] =
+			mdss_reg_bus_vote_client_create(name);
+		if (IS_ERR(hdmi_ctrl->pdata.reg_bus_clt[module])) {
+			pr_err("reg bus client create failed\n");
+			msm_dss_config_vreg(&hdmi_ctrl->pdev->dev,
+			power_data->vreg_config, power_data->num_vreg, 0);
+			rc = PTR_ERR(hdmi_ctrl->pdata.reg_bus_clt[module]);
+			goto exit;
+		}
+
+		rc = msm_dss_get_clk(&hdmi_ctrl->pdev->dev,
+			power_data->clk_config, power_data->num_clk);
+		if (rc) {
+			DEV_ERR("%s: Failed to get %s clk. Err=%d\n",
+				__func__, hdmi_tx_pm_name(module), rc);
+
+			mdss_reg_bus_vote_client_destroy(
+				hdmi_ctrl->pdata.reg_bus_clt[module]);
+			hdmi_ctrl->pdata.reg_bus_clt[module] = NULL;
+			msm_dss_config_vreg(&hdmi_ctrl->pdev->dev,
+			power_data->vreg_config, power_data->num_vreg, 0);
+		}
+	} else {
+		msm_dss_put_clk(power_data->clk_config, power_data->num_clk);
+		mdss_reg_bus_vote_client_destroy(
+			hdmi_ctrl->pdata.reg_bus_clt[module]);
+		hdmi_ctrl->pdata.reg_bus_clt[module] = NULL;
+
+		rc = msm_dss_config_vreg(&hdmi_ctrl->pdev->dev,
+			power_data->vreg_config, power_data->num_vreg, 0);
+		if (rc)
+			DEV_ERR("%s: Fail to deconfig %s vreg. Err=%d\n",
+				__func__, hdmi_tx_pm_name(module), rc);
+	}
+
+exit:
+	return rc;
+} /* hdmi_tx_config_power */
+
+static int hdmi_tx_check_clk_state(struct hdmi_tx_ctrl *hdmi_ctrl,
+	enum hdmi_tx_power_module_type module)
+{
+	int i;
+	int rc = 0;
+	struct dss_module_power *pd = NULL;
+
+	if (!hdmi_ctrl || module >= HDMI_TX_MAX_PM) {
+		DEV_ERR("%s: Error: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	pd = &hdmi_ctrl->pdata.power_data[module];
+	if (!pd) {
+		DEV_ERR("%s: Error: invalid power data\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	for (i = 0; i < pd->num_clk; i++) {
+		struct clk *clk = pd->clk_config[i].clk;
+
+		if (clk) {
+			u32 rate = clk_get_rate(clk);
+
+			DEV_DBG("%s: clk %s: rate %d\n", __func__,
+				pd->clk_config[i].clk_name, rate);
+
+			if (!rate) {
+				rc = -EINVAL;
+				goto error;
+			}
+		} else {
+			DEV_ERR("%s: clk %s: not configured\n", __func__,
+				pd->clk_config[i].clk_name);
+
+			rc = -EINVAL;
+			goto error;
+		}
+	}
+
+	return 0;
+error:
+	return rc;
+}
+
+static int hdmi_tx_enable_power(struct hdmi_tx_ctrl *hdmi_ctrl,
+	enum hdmi_tx_power_module_type module, int enable)
+{
+	int rc = 0;
+	struct dss_module_power *power_data = NULL;
+
+	if (!hdmi_ctrl || module >= HDMI_TX_MAX_PM) {
+		DEV_ERR("%s: Error: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	power_data = &hdmi_ctrl->pdata.power_data[module];
+	if (!power_data) {
+		DEV_ERR("%s: Error: invalid power data\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (hdmi_ctrl->panel_data.panel_info.cont_splash_enabled) {
+		DEV_DBG("%s: %s enabled by splash.\n",
+				__func__, hdmi_pm_name(module));
+		return 0;
+	}
+
+	if (enable && !hdmi_ctrl->power_data_enable[module]) {
+		rc = msm_dss_enable_vreg(power_data->vreg_config,
+			power_data->num_vreg, 1);
+		if (rc) {
+			DEV_ERR("%s: Failed to enable %s vreg. Error=%d\n",
+				__func__, hdmi_tx_pm_name(module), rc);
+			goto error;
+		}
+
+		rc = hdmi_tx_pinctrl_set_state(hdmi_ctrl, module, enable);
+		if (rc) {
+			DEV_ERR("%s: Failed to set %s pinctrl state\n",
+				__func__, hdmi_tx_pm_name(module));
+			goto error;
+		}
+
+		rc = msm_dss_enable_gpio(power_data->gpio_config,
+			power_data->num_gpio, 1);
+		if (rc) {
+			DEV_ERR("%s: Failed to enable %s gpio. Error=%d\n",
+				__func__, hdmi_tx_pm_name(module), rc);
+			goto disable_vreg;
+		}
+		mdss_update_reg_bus_vote(hdmi_ctrl->pdata.reg_bus_clt[module],
+			VOTE_INDEX_LOW);
+
+		rc = msm_dss_clk_set_rate(power_data->clk_config,
+			power_data->num_clk);
+		if (rc) {
+			DEV_ERR("%s: failed to set clks rate for %s. err=%d\n",
+				__func__, hdmi_tx_pm_name(module), rc);
+			goto disable_gpio;
+		}
+
+		rc = msm_dss_enable_clk(power_data->clk_config,
+			power_data->num_clk, 1);
+		if (rc) {
+			DEV_ERR("%s: Failed to enable clks for %s. Error=%d\n",
+				__func__, hdmi_tx_pm_name(module), rc);
+			goto disable_gpio;
+		}
+		hdmi_ctrl->power_data_enable[module] = true;
+	} else if (!enable && hdmi_ctrl->power_data_enable[module] &&
+		(!hdmi_tx_is_cec_wakeup_en(hdmi_ctrl) ||
+		((module != HDMI_TX_HPD_PM) && (module != HDMI_TX_CEC_PM)))) {
+		msm_dss_enable_clk(power_data->clk_config,
+			power_data->num_clk, 0);
+		mdss_update_reg_bus_vote(hdmi_ctrl->pdata.reg_bus_clt[module],
+			VOTE_INDEX_DISABLE);
+		msm_dss_enable_gpio(power_data->gpio_config,
+			power_data->num_gpio, 0);
+		hdmi_tx_pinctrl_set_state(hdmi_ctrl, module, 0);
+		msm_dss_enable_vreg(power_data->vreg_config,
+			power_data->num_vreg, 0);
+		hdmi_ctrl->power_data_enable[module] = false;
+	}
+
+	return rc;
+
+disable_gpio:
+	mdss_update_reg_bus_vote(hdmi_ctrl->pdata.reg_bus_clt[module],
+		VOTE_INDEX_DISABLE);
+	msm_dss_enable_gpio(power_data->gpio_config, power_data->num_gpio, 0);
+disable_vreg:
+	msm_dss_enable_vreg(power_data->vreg_config, power_data->num_vreg, 0);
+error:
+	return rc;
+} /* hdmi_tx_enable_power */
+
+static void hdmi_tx_core_off(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_CEC_PM, 0);
+	hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_CORE_PM, 0);
+} /* hdmi_tx_core_off */
+
+static int hdmi_tx_core_on(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc = 0;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_CORE_PM, 1);
+	if (rc) {
+		DEV_ERR("%s: core hdmi_msm_enable_power failed rc = %d\n",
+			__func__, rc);
+		return rc;
+	}
+	rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_CEC_PM, 1);
+	if (rc) {
+		DEV_ERR("%s: cec hdmi_msm_enable_power failed rc = %d\n",
+			__func__, rc);
+		goto disable_core_power;
+	}
+
+	return rc;
+disable_core_power:
+	hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_CORE_PM, 0);
+	return rc;
+} /* hdmi_tx_core_on */
+
+static void hdmi_tx_phy_reset(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	unsigned int phy_reset_polarity = 0x0;
+	unsigned int pll_reset_polarity = 0x0;
+	unsigned int val;
+	struct dss_io_data *io = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: core io not inititalized\n", __func__);
+		return;
+	}
+
+	val = DSS_REG_R_ND(io, HDMI_PHY_CTRL);
+
+	phy_reset_polarity = val >> 3 & 0x1;
+	pll_reset_polarity = val >> 1 & 0x1;
+
+	if (phy_reset_polarity == 0)
+		DSS_REG_W_ND(io, HDMI_PHY_CTRL, val | SW_RESET);
+	else
+		DSS_REG_W_ND(io, HDMI_PHY_CTRL, val & (~SW_RESET));
+
+	if (pll_reset_polarity == 0)
+		DSS_REG_W_ND(io, HDMI_PHY_CTRL, val | SW_RESET_PLL);
+	else
+		DSS_REG_W_ND(io, HDMI_PHY_CTRL, val & (~SW_RESET_PLL));
+
+	if (phy_reset_polarity == 0)
+		DSS_REG_W_ND(io, HDMI_PHY_CTRL, val & (~SW_RESET));
+	else
+		DSS_REG_W_ND(io, HDMI_PHY_CTRL, val | SW_RESET);
+
+	if (pll_reset_polarity == 0)
+		DSS_REG_W_ND(io, HDMI_PHY_CTRL, val & (~SW_RESET_PLL));
+	else
+		DSS_REG_W_ND(io, HDMI_PHY_CTRL, val | SW_RESET_PLL);
+} /* hdmi_tx_phy_reset */
+
+static int hdmi_tx_audio_info_setup(struct platform_device *pdev,
+	struct msm_hdmi_audio_setup_params *params)
+{
+	int rc = 0;
+	struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+	u32 is_mode_dvi;
+
+	if (!hdmi_ctrl || !params) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -ENODEV;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+
+	is_mode_dvi = hdmi_tx_is_dvi_mode(hdmi_ctrl);
+
+	if (!is_mode_dvi && hdmi_tx_is_panel_on(hdmi_ctrl)) {
+		memcpy(&hdmi_ctrl->audio_params, params,
+			sizeof(struct msm_hdmi_audio_setup_params));
+
+		hdmi_tx_audio_setup(hdmi_ctrl);
+	} else {
+		rc = -EPERM;
+	}
+
+	if (rc) {
+		struct hdmi_audio_status status = {0};
+
+		if (hdmi_ctrl->audio_ops.status)
+			hdmi_ctrl->audio_ops.status(hdmi_ctrl->audio_data,
+				&status);
+
+		dev_err_ratelimited(&hdmi_ctrl->pdev->dev,
+			"%s: hpd %d, ack %d, switch %d, mode %s, power %d\n",
+			__func__, hdmi_ctrl->hpd_state,
+			status.ack_pending, status.switched,
+			is_mode_dvi ? "dvi" : "hdmi",
+			hdmi_ctrl->panel_power_on);
+	}
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+	return rc;
+}
+
+static int hdmi_tx_get_audio_edid_blk(struct platform_device *pdev,
+	struct msm_hdmi_audio_edid_blk *blk)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -ENODEV;
+	}
+
+	return hdmi_edid_get_audio_blk(
+		hdmi_tx_get_fd(HDMI_TX_FEAT_EDID), blk);
+} /* hdmi_tx_get_audio_edid_blk */
+
+static u8 hdmi_tx_tmds_enabled(struct platform_device *pdev)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -ENODEV;
+	}
+
+	/* status of tmds */
+	return (hdmi_ctrl->timing_gen_on == true);
+}
+
+static int hdmi_tx_set_mhl_max_pclk(struct platform_device *pdev, u32 max_val)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+	hdmi_ctrl = platform_get_drvdata(pdev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -ENODEV;
+	}
+	if (max_val) {
+		hdmi_ctrl->ds_data.ds_max_clk = max_val;
+		hdmi_ctrl->ds_data.ds_registered = true;
+	} else {
+		DEV_ERR("%s: invalid max pclk val\n", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int msm_hdmi_register_mhl(struct platform_device *pdev,
+			  struct msm_hdmi_mhl_ops *ops, void *data)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid pdev\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!ops) {
+		DEV_ERR("%s: invalid ops\n", __func__);
+		return -EINVAL;
+	}
+
+	ops->tmds_enabled = hdmi_tx_tmds_enabled;
+	ops->set_mhl_max_pclk = hdmi_tx_set_mhl_max_pclk;
+	ops->set_upstream_hpd = hdmi_tx_set_mhl_hpd;
+
+	hdmi_ctrl->ds_registered = true;
+
+	return 0;
+}
+
+static int hdmi_tx_get_cable_status(struct platform_device *pdev, u32 vote)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+	unsigned long flags;
+	u32 hpd;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&hdmi_ctrl->hpd_state_lock, flags);
+	hpd = hdmi_tx_is_panel_on(hdmi_ctrl);
+	spin_unlock_irqrestore(&hdmi_ctrl->hpd_state_lock, flags);
+
+	hdmi_ctrl->vote_hdmi_core_on = false;
+
+	if (vote && hpd)
+		hdmi_ctrl->vote_hdmi_core_on = true;
+
+	/*
+	 * if cable is not connected and audio calls this function,
+	 * consider this as an error as it will result in whole
+	 * audio path to fail.
+	 */
+	if (!hpd) {
+		struct hdmi_audio_status status = {0};
+
+		if (hdmi_ctrl->audio_ops.status)
+			hdmi_ctrl->audio_ops.status(hdmi_ctrl->audio_data,
+				&status);
+
+		dev_err_ratelimited(&hdmi_ctrl->pdev->dev,
+			"%s: hpd %d, ack %d, switch %d, power %d\n",
+			__func__, hdmi_ctrl->hpd_state,
+			status.ack_pending, status.switched,
+			hdmi_ctrl->panel_power_on);
+	}
+
+	return hpd;
+}
+
+int msm_hdmi_register_audio_codec(struct platform_device *pdev,
+	struct msm_hdmi_audio_codec_ops *ops)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+
+	if (!hdmi_ctrl || !ops) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -ENODEV;
+	}
+
+	ops->audio_info_setup = hdmi_tx_audio_info_setup;
+	ops->get_audio_edid_blk = hdmi_tx_get_audio_edid_blk;
+	ops->hdmi_cable_status = hdmi_tx_get_cable_status;
+
+	return 0;
+} /* hdmi_tx_audio_register */
+EXPORT_SYMBOL(msm_hdmi_register_audio_codec);
+
+static int hdmi_tx_setup_tmds_clk_rate(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	u32 rate = 0;
+	struct msm_hdmi_mode_timing_info *timing = NULL;
+	u32 rate_ratio;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: Bad input parameters\n", __func__);
+		goto end;
+	}
+
+	timing = &hdmi_ctrl->timing;
+	if (!timing) {
+		DEV_ERR("%s: Invalid timing info\n", __func__);
+		goto end;
+	}
+
+	switch (hdmi_ctrl->panel_data.panel_info.out_format) {
+	case MDP_Y_CBCR_H2V2:
+		rate_ratio = HDMI_TX_YUV420_24BPP_PCLK_TMDS_CH_RATE_RATIO;
+		break;
+	case MDP_Y_CBCR_H2V1:
+		rate_ratio = HDMI_TX_YUV422_24BPP_PCLK_TMDS_CH_RATE_RATIO;
+		break;
+	default:
+		rate_ratio = HDMI_TX_RGB_24BPP_PCLK_TMDS_CH_RATE_RATIO;
+		break;
+	}
+
+	rate = timing->pixel_freq / rate_ratio;
+
+end:
+	return rate;
+}
+
+static inline bool hdmi_tx_hw_is_cable_connected(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	return DSS_REG_R(&hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO],
+			HDMI_HPD_INT_STATUS) & BIT(1) ? true : false;
+}
+
+static void hdmi_tx_hpd_polarity_setup(struct hdmi_tx_ctrl *hdmi_ctrl,
+	bool polarity)
+{
+	struct dss_io_data *io = NULL;
+	bool cable_sense;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: core io is not initialized\n", __func__);
+		return;
+	}
+
+	if (hdmi_ctrl->sim_mode) {
+		DEV_DBG("%s: sim mode enabled\n", __func__);
+		return;
+	}
+
+	if (polarity)
+		DSS_REG_W(io, HDMI_HPD_INT_CTRL, BIT(2) | BIT(1));
+	else
+		DSS_REG_W(io, HDMI_HPD_INT_CTRL, BIT(2));
+
+	cable_sense = hdmi_tx_hw_is_cable_connected(hdmi_ctrl);
+	DEV_DBG("%s: listen = %s, sense = %s\n", __func__,
+		polarity ? "connect" : "disconnect",
+		cable_sense ? "connect" : "disconnect");
+
+	if (cable_sense == polarity) {
+		u32 reg_val = DSS_REG_R(io, HDMI_HPD_CTRL);
+
+		/* Toggle HPD circuit to trigger HPD sense */
+		DSS_REG_W(io, HDMI_HPD_CTRL, reg_val & ~BIT(28));
+		DSS_REG_W(io, HDMI_HPD_CTRL, reg_val | BIT(28));
+	}
+} /* hdmi_tx_hpd_polarity_setup */
+
+static inline void hdmi_tx_audio_off(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	if (hdmi_ctrl && hdmi_ctrl->audio_ops.off)
+		hdmi_ctrl->audio_ops.off(hdmi_ctrl->audio_data);
+
+	memset(&hdmi_ctrl->audio_params, 0,
+		sizeof(struct msm_hdmi_audio_setup_params));
+}
+
+static int hdmi_tx_power_off(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	struct dss_io_data *io = NULL;
+	void *pdata =  NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	pdata = hdmi_tx_get_fd(HDMI_TX_FEAT_PANEL);
+	if (!pdata) {
+		DEV_ERR("%s: invalid panel data\n", __func__);
+		return -EINVAL;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: Core io is not initialized\n", __func__);
+		goto end;
+	}
+
+	if (!hdmi_ctrl->panel_power_on) {
+		DEV_DBG("%s: hdmi_ctrl is already off\n", __func__);
+		goto end;
+	}
+
+	if (!hdmi_tx_is_dvi_mode(hdmi_ctrl))
+		hdmi_tx_audio_off(hdmi_ctrl);
+
+	if (hdmi_ctrl->panel_ops.off)
+		hdmi_ctrl->panel_ops.off(pdata);
+
+	hdmi_tx_core_off(hdmi_ctrl);
+
+	hdmi_ctrl->panel_power_on = false;
+
+	if (hdmi_ctrl->hpd_off_pending || hdmi_ctrl->panel_suspend ||
+		!hdmi_ctrl->pdata.pluggable)
+		hdmi_tx_hpd_off(hdmi_ctrl);
+
+	if (hdmi_ctrl->hdmi_tx_hpd_done)
+		hdmi_ctrl->hdmi_tx_hpd_done(
+			hdmi_ctrl->downstream_data);
+end:
+	DEV_INFO("%s: HDMI Core: OFF\n", __func__);
+	return 0;
+} /* hdmi_tx_power_off */
+
+static int hdmi_tx_power_on(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int ret;
+	u32 div = 0;
+	struct mdss_panel_data *panel_data = &hdmi_ctrl->panel_data;
+	void *pdata = hdmi_tx_get_fd(HDMI_TX_FEAT_PANEL);
+	void *edata = hdmi_tx_get_fd(HDMI_TX_FEAT_EDID);
+
+	if (!hdmi_ctrl->pdata.pluggable)
+		hdmi_tx_hpd_on(hdmi_ctrl);
+
+	ret = hdmi_tx_check_clk_state(hdmi_ctrl, HDMI_TX_HPD_PM);
+	if (ret) {
+		DEV_ERR("%s: clocks not on\n", __func__);
+		return -EINVAL;
+	}
+
+	if (hdmi_ctrl->panel_ops.get_vic)
+		hdmi_ctrl->vic = hdmi_ctrl->panel_ops.get_vic(
+			&panel_data->panel_info, &hdmi_ctrl->ds_data);
+
+	if (hdmi_ctrl->vic <= 0) {
+		DEV_ERR("%s: invalid vic\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = hdmi_get_supported_mode(&hdmi_ctrl->timing,
+		&hdmi_ctrl->ds_data, hdmi_ctrl->vic);
+	if (ret || !hdmi_ctrl->timing.supported) {
+		DEV_ERR("%s: invalid timing data\n", __func__);
+		return -EINVAL;
+	}
+
+	hdmi_ctrl->panel.vic = hdmi_ctrl->vic;
+
+	if (!hdmi_tx_is_dvi_mode(hdmi_ctrl) &&
+	    hdmi_tx_is_cea_format(hdmi_ctrl->vic))
+		hdmi_ctrl->panel.infoframe = true;
+	else
+		hdmi_ctrl->panel.infoframe = false;
+
+	hdmi_ctrl->panel.scan_info = hdmi_edid_get_sink_scaninfo(edata,
+					hdmi_ctrl->vic);
+	hdmi_ctrl->panel.scrambler = hdmi_edid_get_sink_scrambler_support(
+					edata);
+
+	if (hdmi_ctrl->panel_ops.on)
+		hdmi_ctrl->panel_ops.on(pdata);
+
+	if (panel_data->panel_info.out_format == MDP_Y_CBCR_H2V2)
+		div = 1;
+
+	hdmi_ctrl->pdata.power_data[HDMI_TX_CORE_PM].clk_config[0].rate =
+		(hdmi_ctrl->timing.pixel_freq * 1000) >> div;
+
+	hdmi_edid_set_video_resolution(hdmi_tx_get_fd(HDMI_TX_FEAT_EDID),
+		hdmi_ctrl->vic, false);
+
+	hdmi_tx_core_on(hdmi_ctrl);
+
+	if (hdmi_ctrl->panel.infoframe &&
+	    !hdmi_tx_is_encryption_set(hdmi_ctrl) &&
+	    hdmi_tx_is_stream_shareable(hdmi_ctrl)) {
+		hdmi_tx_set_audio_switch_node(hdmi_ctrl, 1);
+		hdmi_tx_config_avmute(hdmi_ctrl, false);
+	}
+
+	hdmi_ctrl->panel_power_on = true;
+
+	hdmi_tx_hpd_polarity_setup(hdmi_ctrl, HPD_DISCONNECT_POLARITY);
+
+	if (hdmi_ctrl->hdmi_tx_hpd_done)
+		hdmi_ctrl->hdmi_tx_hpd_done(hdmi_ctrl->downstream_data);
+
+	DEV_DBG("%s: hdmi_ctrl core on\n", __func__);
+	return 0;
+}
+
+static void hdmi_tx_hpd_off(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc = 0;
+	struct dss_io_data *io = NULL;
+	unsigned long flags;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (!hdmi_ctrl->hpd_initialized) {
+		DEV_DBG("%s: HPD is already OFF, returning\n", __func__);
+		return;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: core io not inititalized\n", __func__);
+		return;
+	}
+
+	/* Turn off HPD interrupts */
+	DSS_REG_W(io, HDMI_HPD_INT_CTRL, 0);
+
+	/* non pluggable display should not enable wakeup interrupt */
+	if ((hdmi_tx_is_cec_wakeup_en(hdmi_ctrl) &&
+			hdmi_ctrl->pdata.pluggable)) {
+		hdmi_ctrl->mdss_util->enable_wake_irq(&hdmi_tx_hw);
+	} else {
+		hdmi_ctrl->mdss_util->disable_irq(&hdmi_tx_hw);
+		hdmi_tx_set_mode(hdmi_ctrl, false);
+	}
+	hdmi_tx_config_5v(hdmi_ctrl, false);
+	rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_HPD_PM, 0);
+	if (rc)
+		DEV_INFO("%s: Failed to disable hpd power. Error=%d\n",
+			__func__, rc);
+
+	spin_lock_irqsave(&hdmi_ctrl->hpd_state_lock, flags);
+	hdmi_ctrl->hpd_state = false;
+	spin_unlock_irqrestore(&hdmi_ctrl->hpd_state_lock, flags);
+
+	hdmi_ctrl->hpd_initialized = false;
+	hdmi_ctrl->hpd_off_pending = false;
+
+	DEV_DBG("%s: HPD is now OFF\n", __func__);
+} /* hdmi_tx_hpd_off */
+
+static int hdmi_tx_hpd_on(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	u32 reg_val;
+	int rc = 0;
+	struct dss_io_data *io = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: core io not inititalized\n", __func__);
+		return -EINVAL;
+	}
+
+	if (hdmi_ctrl->hpd_initialized) {
+		DEV_DBG("%s: HPD is already ON\n", __func__);
+	} else {
+		rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_HPD_PM, true);
+		if (rc) {
+			DEV_ERR("%s: Failed to enable hpd power. rc=%d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		dss_reg_dump(io->base, io->len, "HDMI-INIT: ", REG_DUMP);
+
+		if (!hdmi_ctrl->panel_data.panel_info.cont_splash_enabled) {
+			hdmi_tx_set_mode(hdmi_ctrl, false);
+			hdmi_tx_phy_reset(hdmi_ctrl);
+			hdmi_tx_set_mode(hdmi_ctrl, true);
+		}
+
+		DSS_REG_W(io, HDMI_USEC_REFTIMER, 0x0001001B);
+
+		if (hdmi_tx_is_cec_wakeup_en(hdmi_ctrl))
+			hdmi_ctrl->mdss_util->disable_wake_irq(&hdmi_tx_hw);
+
+		hdmi_ctrl->mdss_util->enable_irq(&hdmi_tx_hw);
+
+		hdmi_ctrl->hpd_initialized = true;
+
+		DEV_INFO("%s: HDMI HW version = 0x%x\n", __func__,
+			DSS_REG_R_ND(&hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO],
+				HDMI_VERSION));
+
+		/* set timeout to 4.1ms (max) for hardware debounce */
+		reg_val = DSS_REG_R(io, HDMI_HPD_CTRL) | 0x1FFF;
+
+		/* Turn on HPD HW circuit */
+		DSS_REG_W(io, HDMI_HPD_CTRL, reg_val | BIT(28));
+
+		hdmi_tx_hpd_polarity_setup(hdmi_ctrl, HPD_CONNECT_POLARITY);
+		DEV_DBG("%s: HPD is now ON\n", __func__);
+	}
+
+	return rc;
+} /* hdmi_tx_hpd_on */
+
+static int hdmi_tx_sysfs_enable_hpd(struct hdmi_tx_ctrl *hdmi_ctrl, int on)
+{
+	int rc = 0;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	DEV_DBG("%s: %d\n", __func__, on);
+	if (on) {
+		hdmi_ctrl->hpd_off_pending = false;
+		rc = hdmi_tx_hpd_on(hdmi_ctrl);
+	} else {
+		if (!hdmi_ctrl->panel_power_on)
+			hdmi_tx_hpd_off(hdmi_ctrl);
+		else
+			hdmi_ctrl->hpd_off_pending = true;
+	}
+
+	return rc;
+} /* hdmi_tx_sysfs_enable_hpd */
+
+static int hdmi_tx_set_mhl_hpd(struct platform_device *pdev, uint8_t on)
+{
+	int rc = 0;
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+	hdmi_ctrl = platform_get_drvdata(pdev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+
+	/* mhl status should override */
+	hdmi_ctrl->mhl_hpd_on = on;
+
+	if (!on && hdmi_ctrl->hpd_feature_on) {
+		rc = hdmi_tx_sysfs_enable_hpd(hdmi_ctrl, false);
+	} else if (on && !hdmi_ctrl->hpd_feature_on) {
+		rc = hdmi_tx_sysfs_enable_hpd(hdmi_ctrl, true);
+	} else {
+		DEV_DBG("%s: hpd is already '%s'. return\n", __func__,
+			hdmi_ctrl->hpd_feature_on ? "enabled" : "disabled");
+		goto end;
+	}
+
+	if (!rc) {
+		hdmi_ctrl->hpd_feature_on =
+			(~hdmi_ctrl->hpd_feature_on) & BIT(0);
+		DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->hpd_feature_on);
+	} else {
+		DEV_ERR("%s: failed to '%s' hpd. rc = %d\n", __func__,
+			on ? "enable" : "disable", rc);
+	}
+end:
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+	return rc;
+}
+
+static irqreturn_t hdmi_tx_isr(int irq, void *data)
+{
+	struct dss_io_data *io = NULL;
+	struct hdmi_tx_ctrl *hdmi_ctrl = (struct hdmi_tx_ctrl *)data;
+	unsigned long flags;
+	u32 hpd_current_state;
+	u32 reg_val = 0;
+
+	if (!hdmi_ctrl) {
+		DEV_WARN("%s: invalid input data, ISR ignored\n", __func__);
+		goto end;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_WARN("%s: core io not initialized, ISR ignored\n",
+			__func__);
+		goto end;
+	}
+
+	if (DSS_REG_R(io, HDMI_HPD_INT_STATUS) & BIT(0)) {
+		spin_lock_irqsave(&hdmi_ctrl->hpd_state_lock, flags);
+		hpd_current_state = hdmi_ctrl->hpd_state;
+		hdmi_ctrl->hpd_state =
+			(DSS_REG_R(io, HDMI_HPD_INT_STATUS) & BIT(1)) >> 1;
+		spin_unlock_irqrestore(&hdmi_ctrl->hpd_state_lock, flags);
+
+		if (!completion_done(&hdmi_ctrl->hpd_int_done))
+			complete_all(&hdmi_ctrl->hpd_int_done);
+
+		/*
+		 * check if this is a spurious interrupt, if yes, reset
+		 * interrupts and return
+		 */
+		if (hpd_current_state == hdmi_ctrl->hpd_state) {
+			DEV_DBG("%s: spurious interrupt %d\n", __func__,
+				hpd_current_state);
+
+			/* enable interrupts */
+			reg_val |= BIT(2);
+
+			/* set polarity, reverse of current state */
+			reg_val |= (~hpd_current_state << 1) & BIT(1);
+
+			/* ack interrupt */
+			reg_val |= BIT(0);
+
+			DSS_REG_W(io, HDMI_HPD_INT_CTRL, reg_val);
+			goto end;
+		}
+
+		/*
+		 * Ack the current hpd interrupt and stop listening to
+		 * new hpd interrupt.
+		 */
+		DSS_REG_W(io, HDMI_HPD_INT_CTRL, BIT(0));
+
+		queue_work(hdmi_ctrl->workq, &hdmi_ctrl->hpd_int_work);
+	}
+
+	if (hdmi_ddc_isr(&hdmi_ctrl->ddc_ctrl,
+		hdmi_ctrl->hdmi_tx_ver))
+		DEV_ERR("%s: hdmi_ddc_isr failed\n", __func__);
+
+	if (hdmi_tx_get_fd(HDMI_TX_FEAT_CEC_HW)) {
+		if (hdmi_cec_isr(hdmi_tx_get_fd(HDMI_TX_FEAT_CEC_HW)))
+			DEV_ERR("%s: hdmi_cec_isr failed\n", __func__);
+	}
+
+	if (hdmi_ctrl->hdcp_ops && hdmi_ctrl->hdcp_data) {
+		if (hdmi_ctrl->hdcp_ops->hdmi_hdcp_isr) {
+			if (hdmi_ctrl->hdcp_ops->hdmi_hdcp_isr(
+				hdmi_ctrl->hdcp_data))
+				DEV_ERR("%s: hdmi_hdcp_isr failed\n",
+					 __func__);
+		}
+	}
+end:
+	return IRQ_HANDLED;
+} /* hdmi_tx_isr */
+
+static void hdmi_tx_dev_deinit(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	hdmi_tx_deinit_features(hdmi_ctrl, HDMI_TX_FEAT_MAX);
+
+	hdmi_ctrl->hdcp_ops = NULL;
+	hdmi_ctrl->hdcp_data = NULL;
+
+	switch_dev_unregister(&hdmi_ctrl->sdev);
+	if (hdmi_ctrl->workq)
+		destroy_workqueue(hdmi_ctrl->workq);
+	mutex_destroy(&hdmi_ctrl->tx_lock);
+	mutex_destroy(&hdmi_ctrl->mutex);
+
+	hdmi_tx_hw.ptr = NULL;
+} /* hdmi_tx_dev_deinit */
+
+static int hdmi_tx_dev_init(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc = 0;
+	struct hdmi_tx_platform_data *pdata = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	pdata = &hdmi_ctrl->pdata;
+
+	rc = hdmi_tx_check_capability(hdmi_ctrl);
+	if (rc) {
+		DEV_ERR("%s: no HDMI device\n", __func__);
+		goto fail_no_hdmi;
+	}
+
+	/* irq enable/disable will be handled in hpd on/off */
+	hdmi_tx_hw.ptr = (void *)hdmi_ctrl;
+
+	mutex_init(&hdmi_ctrl->mutex);
+	mutex_init(&hdmi_ctrl->tx_lock);
+
+	INIT_LIST_HEAD(&hdmi_ctrl->cable_notify_handlers);
+
+	hdmi_ctrl->workq = create_workqueue("hdmi_tx_workq");
+	if (!hdmi_ctrl->workq) {
+		DEV_ERR("%s: hdmi_tx_workq creation failed.\n", __func__);
+		rc = -EPERM;
+		goto fail_create_workq;
+	}
+
+	hdmi_ctrl->ddc_ctrl.io = &pdata->io[HDMI_TX_CORE_IO];
+	init_completion(&hdmi_ctrl->ddc_ctrl.ddc_sw_done);
+
+	hdmi_ctrl->panel_power_on = false;
+	hdmi_ctrl->panel_suspend = false;
+
+	hdmi_ctrl->hpd_state = false;
+	hdmi_ctrl->hpd_initialized = false;
+	hdmi_ctrl->hpd_off_pending = false;
+	init_completion(&hdmi_ctrl->hpd_int_done);
+
+	INIT_WORK(&hdmi_ctrl->hpd_int_work, hdmi_tx_hpd_int_work);
+	INIT_WORK(&hdmi_ctrl->fps_work, hdmi_tx_fps_work);
+	INIT_WORK(&hdmi_ctrl->cable_notify_work, hdmi_tx_cable_notify_work);
+	INIT_DELAYED_WORK(&hdmi_ctrl->hdcp_cb_work, hdmi_tx_hdcp_cb_work);
+
+	spin_lock_init(&hdmi_ctrl->hpd_state_lock);
+
+	return 0;
+
+fail_create_workq:
+	if (hdmi_ctrl->workq)
+		destroy_workqueue(hdmi_ctrl->workq);
+	mutex_destroy(&hdmi_ctrl->mutex);
+fail_no_hdmi:
+	return rc;
+} /* hdmi_tx_dev_init */
+
+static int hdmi_tx_start_hdcp(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	if (hdmi_ctrl->panel_data.panel_info.cont_splash_enabled ||
+		!hdmi_tx_is_hdcp_enabled(hdmi_ctrl))
+		return 0;
+
+	if (hdmi_tx_is_encryption_set(hdmi_ctrl))
+		hdmi_tx_config_avmute(hdmi_ctrl, true);
+
+	rc = hdmi_ctrl->hdcp_ops->hdmi_hdcp_authenticate(hdmi_ctrl->hdcp_data);
+	if (rc)
+		DEV_ERR("%s: hdcp auth failed. rc=%d\n", __func__, rc);
+
+	return rc;
+}
+
+static int hdmi_tx_init_switch_dev(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc = -EINVAL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		goto end;
+	}
+
+	hdmi_ctrl->sdev.name = "hdmi";
+	rc = switch_dev_register(&hdmi_ctrl->sdev);
+	if (rc) {
+		DEV_ERR("%s: display switch registration failed\n", __func__);
+		goto end;
+	}
+end:
+	return rc;
+}
+
+static int hdmi_tx_hdcp_off(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc = 0;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	DEV_DBG("%s: Turning off HDCP\n", __func__);
+	hdmi_ctrl->hdcp_ops->hdmi_hdcp_off(
+		hdmi_ctrl->hdcp_data);
+
+	hdmi_ctrl->hdcp_ops = NULL;
+
+	rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_DDC_PM,
+		false);
+	if (rc)
+		DEV_ERR("%s: Failed to disable ddc power\n",
+			__func__);
+
+	return rc;
+}
+
+static char *hdmi_tx_get_event_name(int event)
+{
+	switch (event) {
+	case MDSS_EVENT_RESET:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_RESET);
+	case MDSS_EVENT_LINK_READY:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_LINK_READY);
+	case MDSS_EVENT_UNBLANK:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_UNBLANK);
+	case MDSS_EVENT_PANEL_ON:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_PANEL_ON);
+	case MDSS_EVENT_BLANK:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_BLANK);
+	case MDSS_EVENT_PANEL_OFF:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_PANEL_OFF);
+	case MDSS_EVENT_CLOSE:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_CLOSE);
+	case MDSS_EVENT_SUSPEND:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_SUSPEND);
+	case MDSS_EVENT_RESUME:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_RESUME);
+	case MDSS_EVENT_CHECK_PARAMS:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_CHECK_PARAMS);
+	case MDSS_EVENT_CONT_SPLASH_BEGIN:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_CONT_SPLASH_BEGIN);
+	case MDSS_EVENT_CONT_SPLASH_FINISH:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_CONT_SPLASH_FINISH);
+	case MDSS_EVENT_PANEL_UPDATE_FPS:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_PANEL_UPDATE_FPS);
+	case MDSS_EVENT_FB_REGISTERED:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_FB_REGISTERED);
+	case MDSS_EVENT_PANEL_CLK_CTRL:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_PANEL_CLK_CTRL);
+	case MDSS_EVENT_DSI_CMDLIST_KOFF:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_DSI_CMDLIST_KOFF);
+	case MDSS_EVENT_ENABLE_PARTIAL_ROI:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_ENABLE_PARTIAL_ROI);
+	case MDSS_EVENT_DSI_STREAM_SIZE:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_DSI_STREAM_SIZE);
+	case MDSS_EVENT_DSI_DYNAMIC_SWITCH:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_DSI_DYNAMIC_SWITCH);
+	case MDSS_EVENT_REGISTER_RECOVERY_HANDLER:
+		return HDMI_TX_EVT_STR(MDSS_EVENT_REGISTER_RECOVERY_HANDLER);
+	default:
+		return "unknown";
+	}
+}
+
+static void hdmi_tx_update_fps(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	void *pdata = NULL;
+	struct mdss_panel_info *pinfo;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	pdata = hdmi_tx_get_fd(HDMI_TX_FEAT_PANEL);
+	if (!pdata) {
+		DEV_ERR("%s: invalid panel data\n", __func__);
+		return;
+	}
+
+	pinfo = &hdmi_ctrl->panel_data.panel_info;
+
+	if (!pinfo->dynamic_fps) {
+		DEV_DBG("%s: Dynamic fps not enabled\n", __func__);
+		return;
+	}
+
+	DEV_DBG("%s: current fps %d, new fps %d\n", __func__,
+		pinfo->current_fps, hdmi_ctrl->dynamic_fps);
+
+	if (hdmi_ctrl->dynamic_fps == pinfo->current_fps) {
+		DEV_DBG("%s: Panel is already at this FPS: %d\n",
+			__func__, hdmi_ctrl->dynamic_fps);
+		return;
+	}
+
+	if (hdmi_tx_is_hdcp_enabled(hdmi_ctrl))
+		hdmi_tx_hdcp_off(hdmi_ctrl);
+
+	if (hdmi_ctrl->panel_ops.update_fps)
+		hdmi_ctrl->vic = hdmi_ctrl->panel_ops.update_fps(pdata,
+			hdmi_ctrl->dynamic_fps);
+
+	hdmi_tx_update_pixel_clk(hdmi_ctrl);
+
+	hdmi_tx_start_hdcp(hdmi_ctrl);
+}
+
+static void hdmi_tx_fps_work(struct work_struct *work)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+	hdmi_ctrl = container_of(work, struct hdmi_tx_ctrl, fps_work);
+	if (!hdmi_ctrl) {
+		DEV_DBG("%s: invalid input\n", __func__);
+		return;
+	}
+
+	hdmi_tx_update_fps(hdmi_ctrl);
+}
+
+static int hdmi_tx_evt_handle_register(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc = 0;
+
+	rc = hdmi_tx_sysfs_create(hdmi_ctrl, hdmi_ctrl->evt_arg);
+	if (rc) {
+		DEV_ERR("%s: hdmi_tx_sysfs_create failed.rc=%d\n",
+			__func__, rc);
+		goto sysfs_err;
+	}
+	rc = hdmi_tx_init_features(hdmi_ctrl, hdmi_ctrl->evt_arg);
+	if (rc) {
+		DEV_ERR("%s: init_features failed.rc=%d\n", __func__, rc);
+		goto init_err;
+	}
+
+	rc = hdmi_tx_init_switch_dev(hdmi_ctrl);
+	if (rc) {
+		DEV_ERR("%s: init switch dev failed.rc=%d\n", __func__, rc);
+		goto switch_err;
+	}
+
+	if (hdmi_ctrl->pdata.primary || !hdmi_ctrl->pdata.pluggable) {
+		reinit_completion(&hdmi_ctrl->hpd_int_done);
+		rc = hdmi_tx_sysfs_enable_hpd(hdmi_ctrl, true);
+		if (rc) {
+			DEV_ERR("%s: hpd_enable failed. rc=%d\n", __func__, rc);
+			goto primary_err;
+		} else {
+			hdmi_ctrl->hpd_feature_on = true;
+		}
+	}
+
+	return 0;
+
+primary_err:
+	switch_dev_unregister(&hdmi_ctrl->sdev);
+switch_err:
+	hdmi_tx_deinit_features(hdmi_ctrl, HDMI_TX_FEAT_MAX);
+init_err:
+	hdmi_tx_sysfs_remove(hdmi_ctrl);
+sysfs_err:
+	return rc;
+}
+
+static int hdmi_tx_evt_handle_check_param(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int new_vic = -1;
+	int rc = 0;
+
+	if (hdmi_ctrl->panel_ops.get_vic)
+		new_vic = hdmi_ctrl->panel_ops.get_vic(
+			hdmi_ctrl->evt_arg, &hdmi_ctrl->ds_data);
+
+	if ((new_vic < 0) || (new_vic > HDMI_VFRMT_MAX)) {
+		DEV_ERR("%s: invalid or not supported vic\n", __func__);
+		goto end;
+	}
+
+	/*
+	 * return value of 1 lets mdss know that panel
+	 * needs a reconfig due to new resolution and
+	 * it will issue close and open subsequently.
+	 */
+	if (new_vic != hdmi_ctrl->vic) {
+		rc = 1;
+		DEV_DBG("%s: res change %d ==> %d\n", __func__,
+			hdmi_ctrl->vic, new_vic);
+	}
+end:
+	return rc;
+}
+
+static int hdmi_tx_evt_handle_resume(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc = 0;
+
+	hdmi_ctrl->panel_suspend = false;
+	hdmi_tx_cec_device_suspend(hdmi_ctrl);
+
+	if (!hdmi_ctrl->hpd_feature_on)
+		goto end;
+
+	rc = hdmi_tx_hpd_on(hdmi_ctrl);
+	if (rc) {
+		DEV_ERR("%s: hpd_on failed. rc=%d\n", __func__, rc);
+		goto end;
+	}
+
+	if (hdmi_ctrl->sdev.state &&
+		!hdmi_tx_hw_is_cable_connected(hdmi_ctrl)) {
+		u32 timeout;
+
+		reinit_completion(&hdmi_ctrl->hpd_int_done);
+		timeout = wait_for_completion_timeout(
+			&hdmi_ctrl->hpd_int_done, HZ/10);
+		if (!timeout && !hdmi_ctrl->hpd_state) {
+			DEV_DBG("%s: cable removed during suspend\n", __func__);
+			hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0);
+			hdmi_tx_wait_for_audio_engine(hdmi_ctrl);
+			hdmi_tx_send_cable_notification(hdmi_ctrl, 0);
+		}
+	}
+end:
+	return rc;
+}
+
+static int hdmi_tx_evt_handle_reset(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	if (!hdmi_ctrl->panel_data.panel_info.cont_splash_enabled &&
+	    hdmi_ctrl->hpd_initialized) {
+		hdmi_tx_set_mode(hdmi_ctrl, false);
+		hdmi_tx_phy_reset(hdmi_ctrl);
+		hdmi_tx_set_mode(hdmi_ctrl, true);
+	}
+
+	return 0;
+}
+
+static int hdmi_tx_evt_handle_unblank(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc;
+
+	rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_DDC_PM, true);
+	if (rc) {
+		DEV_ERR("%s: ddc power on failed. rc=%d\n", __func__, rc);
+		goto end;
+	}
+
+	rc = hdmi_tx_power_on(hdmi_ctrl);
+	if (rc)
+		DEV_ERR("%s: hdmi_tx_power_on failed. rc=%d\n", __func__, rc);
+end:
+	return rc;
+}
+
+static int hdmi_tx_evt_handle_panel_on(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc = 0;
+
+	if (!hdmi_ctrl->sim_mode) {
+		hdmi_tx_update_hdcp_info(hdmi_ctrl);
+
+		rc = hdmi_tx_start_hdcp(hdmi_ctrl);
+		if (rc)
+			DEV_ERR("%s: hdcp start failed rc=%d\n", __func__, rc);
+	}
+
+	hdmi_ctrl->timing_gen_on = true;
+
+	if (hdmi_ctrl->panel_suspend) {
+		DEV_DBG("%s: panel suspend has triggered\n", __func__);
+
+		hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0);
+		hdmi_tx_wait_for_audio_engine(hdmi_ctrl);
+		hdmi_tx_send_cable_notification(hdmi_ctrl, 0);
+	}
+
+	return rc;
+}
+
+static int hdmi_tx_evt_handle_suspend(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	if ((!hdmi_ctrl->hpd_feature_on) || (hdmi_ctrl->panel_suspend == true))
+		goto end;
+
+	if ((!hdmi_ctrl->hpd_state && !hdmi_ctrl->panel_power_on) ||
+			(hdmi_ctrl->hpd_state && !hdmi_ctrl->pdata.pluggable))
+		hdmi_tx_hpd_off(hdmi_ctrl);
+
+	hdmi_ctrl->panel_suspend = true;
+	hdmi_tx_cec_device_suspend(hdmi_ctrl);
+end:
+	return 0;
+}
+
+static int hdmi_tx_evt_handle_blank(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	if (hdmi_tx_is_hdcp_enabled(hdmi_ctrl))
+		hdmi_tx_hdcp_off(hdmi_ctrl);
+
+	return 0;
+}
+
+static int hdmi_tx_evt_handle_panel_off(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc;
+
+	rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_DDC_PM, false);
+	if (rc) {
+		DEV_ERR("%s: Failed to disable ddc power\n", __func__);
+		goto end;
+	}
+
+	if (hdmi_ctrl->panel_power_on) {
+		hdmi_tx_config_avmute(hdmi_ctrl, 1);
+		rc = hdmi_tx_power_off(hdmi_ctrl);
+		if (rc)
+			DEV_ERR("%s: hdmi_tx_power_off failed.rc=%d\n",
+				__func__, rc);
+	} else {
+		DEV_DBG("%s: hdmi_ctrl is already powered off\n", __func__);
+	}
+
+	hdmi_ctrl->timing_gen_on = false;
+end:
+	return rc;
+}
+
+static int hdmi_tx_evt_handle_close(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	if (hdmi_ctrl->hpd_feature_on && hdmi_ctrl->hpd_initialized &&
+		!hdmi_ctrl->hpd_state)
+		hdmi_tx_hpd_polarity_setup(hdmi_ctrl, HPD_CONNECT_POLARITY);
+
+	return 0;
+}
+
+static int hdmi_tx_event_handler(struct mdss_panel_data *panel_data,
+	int event, void *arg)
+{
+	int rc = 0;
+	hdmi_tx_evt_handler handler;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_panel_data(panel_data);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* UPDATE FPS is called from atomic context */
+	if (event == MDSS_EVENT_PANEL_UPDATE_FPS) {
+		hdmi_ctrl->dynamic_fps = (u32) (unsigned long)arg;
+		DEV_DBG("%s: fps %d\n", __func__, hdmi_ctrl->dynamic_fps);
+		queue_work(hdmi_ctrl->workq, &hdmi_ctrl->fps_work);
+		return rc;
+	}
+
+	mutex_lock(&hdmi_ctrl->tx_lock);
+
+	hdmi_ctrl->evt_arg = arg;
+
+	DEV_DBG("%s: event = %s suspend=%d, hpd_feature=%d\n", __func__,
+		hdmi_tx_get_event_name(event), hdmi_ctrl->panel_suspend,
+		hdmi_ctrl->hpd_feature_on);
+
+	handler = hdmi_ctrl->evt_handler[event];
+	if (handler)
+		rc = handler(hdmi_ctrl);
+
+	mutex_unlock(&hdmi_ctrl->tx_lock);
+end:
+	return rc;
+}
+
+static int hdmi_tx_register_panel(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc = 0;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	hdmi_ctrl->panel_data.event_handler = hdmi_tx_event_handler;
+
+	if (!hdmi_ctrl->pdata.primary)
+		hdmi_ctrl->vic = DEFAULT_VIDEO_RESOLUTION;
+
+	rc = hdmi_tx_init_panel_info(hdmi_ctrl);
+	if (rc) {
+		DEV_ERR("%s: hdmi_init_panel_info failed\n", __func__);
+		return rc;
+	}
+
+	rc = mdss_register_panel(hdmi_ctrl->pdev, &hdmi_ctrl->panel_data);
+	if (rc) {
+		DEV_ERR("%s: FAILED: to register HDMI panel\n", __func__);
+		return rc;
+	}
+
+	rc = hdmi_ctrl->mdss_util->register_irq(&hdmi_tx_hw);
+	if (rc)
+		DEV_ERR("%s: mdss_register_irq failed.\n", __func__);
+
+	return rc;
+} /* hdmi_tx_register_panel */
+
+static void hdmi_tx_deinit_resource(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int i;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	/* VREG & CLK */
+	for (i = HDMI_TX_MAX_PM - 1; i >= 0; i--) {
+		if (hdmi_tx_config_power(hdmi_ctrl, i, 0))
+			DEV_ERR("%s: '%s' power deconfig fail\n",
+				__func__, hdmi_tx_pm_name(i));
+	}
+
+	/* IO */
+	for (i = HDMI_TX_MAX_IO - 1; i >= 0; i--) {
+		if (hdmi_ctrl->pdata.io[i].base)
+			msm_dss_iounmap(&hdmi_ctrl->pdata.io[i]);
+	}
+} /* hdmi_tx_deinit_resource */
+
+static int hdmi_tx_init_resource(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int i, rc = 0;
+	struct hdmi_tx_platform_data *pdata = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	pdata = &hdmi_ctrl->pdata;
+
+	hdmi_tx_pinctrl_init(hdmi_ctrl->pdev);
+
+	/* IO */
+	for (i = 0; i < HDMI_TX_MAX_IO; i++) {
+		rc = msm_dss_ioremap_byname(hdmi_ctrl->pdev, &pdata->io[i],
+			hdmi_tx_io_name(i));
+		if (rc) {
+			DEV_DBG("%s: '%s' remap failed or not available\n",
+				__func__, hdmi_tx_io_name(i));
+		}
+		DEV_INFO("%s: '%s': start = 0x%pK, len=0x%x\n", __func__,
+			hdmi_tx_io_name(i), pdata->io[i].base,
+			pdata->io[i].len);
+	}
+
+	/* VREG & CLK */
+	for (i = 0; i < HDMI_TX_MAX_PM; i++) {
+		rc = hdmi_tx_config_power(hdmi_ctrl, i, 1);
+		if (rc) {
+			DEV_ERR("%s: '%s' power config failed.rc=%d\n",
+				__func__, hdmi_tx_pm_name(i), rc);
+			goto error;
+		}
+	}
+
+	return rc;
+
+error:
+	hdmi_tx_deinit_resource(hdmi_ctrl);
+	return rc;
+} /* hdmi_tx_init_resource */
+
+static void hdmi_tx_put_dt_clk_data(struct device *dev,
+	struct dss_module_power *module_power)
+{
+	if (!module_power) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (module_power->clk_config) {
+		devm_kfree(dev, module_power->clk_config);
+		module_power->clk_config = NULL;
+	}
+	module_power->num_clk = 0;
+} /* hdmi_tx_put_dt_clk_data */
+
+/* todo: once clk are moved to device tree then change this implementation */
+static int hdmi_tx_get_dt_clk_data(struct device *dev,
+	struct dss_module_power *mp, u32 module_type)
+{
+	int rc = 0;
+
+	if (!dev || !mp) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	DEV_DBG("%s: module: '%s'\n", __func__, hdmi_tx_pm_name(module_type));
+
+	switch (module_type) {
+	case HDMI_TX_HPD_PM:
+		mp->num_clk = 4;
+		mp->clk_config = devm_kzalloc(dev, sizeof(struct dss_clk) *
+			mp->num_clk, GFP_KERNEL);
+		if (!mp->clk_config) {
+			DEV_ERR("%s: can't alloc '%s' clk mem\n", __func__,
+				hdmi_tx_pm_name(module_type));
+			goto error;
+		}
+
+		snprintf(mp->clk_config[0].clk_name, 32, "%s", "iface_clk");
+		mp->clk_config[0].type = DSS_CLK_AHB;
+		mp->clk_config[0].rate = 0;
+
+		snprintf(mp->clk_config[1].clk_name, 32, "%s", "core_clk");
+		mp->clk_config[1].type = DSS_CLK_OTHER;
+		mp->clk_config[1].rate = 19200000;
+
+		/*
+		 * This clock is required to clock MDSS interrupt registers
+		 * when HDMI is the only block turned on within MDSS. Since
+		 * rate for this clock is controlled by MDP driver, treat this
+		 * similar to AHB clock and do not set rate for it.
+		 */
+		snprintf(mp->clk_config[2].clk_name, 32, "%s", "mdp_core_clk");
+		mp->clk_config[2].type = DSS_CLK_AHB;
+		mp->clk_config[2].rate = 0;
+
+		snprintf(mp->clk_config[3].clk_name, 32, "%s", "alt_iface_clk");
+		mp->clk_config[3].type = DSS_CLK_AHB;
+		mp->clk_config[3].rate = 0;
+		break;
+
+	case HDMI_TX_CORE_PM:
+		mp->num_clk = 1;
+		mp->clk_config = devm_kzalloc(dev, sizeof(struct dss_clk) *
+			mp->num_clk, GFP_KERNEL);
+		if (!mp->clk_config) {
+			DEV_ERR("%s: can't alloc '%s' clk mem\n", __func__,
+				hdmi_tx_pm_name(module_type));
+			goto error;
+		}
+
+		snprintf(mp->clk_config[0].clk_name, 32, "%s", "extp_clk");
+		mp->clk_config[0].type = DSS_CLK_PCLK;
+		/* This rate will be overwritten when core is powered on */
+		mp->clk_config[0].rate = 148500000;
+		break;
+
+	case HDMI_TX_DDC_PM:
+	case HDMI_TX_CEC_PM:
+		mp->num_clk = 0;
+		DEV_DBG("%s: no clk\n", __func__);
+		break;
+
+	default:
+		DEV_ERR("%s: invalid module type=%d\n", __func__,
+			module_type);
+		return -EINVAL;
+	}
+
+	return rc;
+
+error:
+	if (mp->clk_config) {
+		devm_kfree(dev, mp->clk_config);
+		mp->clk_config = NULL;
+	}
+	mp->num_clk = 0;
+
+	return rc;
+} /* hdmi_tx_get_dt_clk_data */
+
+static void hdmi_tx_put_dt_vreg_data(struct device *dev,
+	struct dss_module_power *module_power)
+{
+	if (!module_power) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (module_power->vreg_config) {
+		devm_kfree(dev, module_power->vreg_config);
+		module_power->vreg_config = NULL;
+	}
+	module_power->num_vreg = 0;
+} /* hdmi_tx_put_dt_vreg_data */
+
+static int hdmi_tx_get_dt_vreg_data(struct device *dev,
+	struct dss_module_power *mp, u32 module_type)
+{
+	int i, j, rc = 0;
+	int dt_vreg_total = 0, mod_vreg_total = 0;
+	u32 ndx_mask = 0;
+	u32 *val_array = NULL;
+	const char *mod_name = NULL;
+	struct device_node *of_node = NULL;
+
+	if (!dev || !mp) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (module_type) {
+	case HDMI_TX_HPD_PM:
+		mod_name = "hpd";
+		break;
+	case HDMI_TX_DDC_PM:
+		mod_name = "ddc";
+		break;
+	case HDMI_TX_CORE_PM:
+		mod_name = "core";
+		break;
+	case HDMI_TX_CEC_PM:
+		mod_name = "cec";
+		break;
+	default:
+		DEV_ERR("%s: invalid module type=%d\n", __func__,
+			module_type);
+		return -EINVAL;
+	}
+
+	DEV_DBG("%s: module: '%s'\n", __func__, hdmi_tx_pm_name(module_type));
+
+	of_node = dev->of_node;
+
+	dt_vreg_total = of_property_count_strings(of_node, "qcom,supply-names");
+	if (dt_vreg_total < 0) {
+		DEV_ERR("%s: vreg not found. rc=%d\n", __func__,
+			dt_vreg_total);
+		rc = dt_vreg_total;
+		goto error;
+	}
+
+	/* count how many vreg for particular hdmi module */
+	for (i = 0; i < dt_vreg_total; i++) {
+		const char *st = NULL;
+
+		rc = of_property_read_string_index(of_node,
+			"qcom,supply-names", i, &st);
+		if (rc) {
+			DEV_ERR("%s: error reading name. i=%d, rc=%d\n",
+				__func__, i, rc);
+			goto error;
+		}
+
+		if (strnstr(st, mod_name, strlen(st))) {
+			ndx_mask |= BIT(i);
+			mod_vreg_total++;
+		}
+	}
+
+	if (mod_vreg_total > 0) {
+		mp->num_vreg = mod_vreg_total;
+		mp->vreg_config = devm_kzalloc(dev, sizeof(struct dss_vreg) *
+			mod_vreg_total, GFP_KERNEL);
+		if (!mp->vreg_config) {
+			DEV_ERR("%s: can't alloc '%s' vreg mem\n", __func__,
+				hdmi_tx_pm_name(module_type));
+			goto error;
+		}
+	} else {
+		DEV_DBG("%s: no vreg\n", __func__);
+		return 0;
+	}
+
+	val_array = devm_kzalloc(dev, sizeof(u32) * dt_vreg_total, GFP_KERNEL);
+	if (!val_array) {
+		DEV_ERR("%s: can't allocate vreg scratch mem\n", __func__);
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	for (i = 0, j = 0; (i < dt_vreg_total) && (j < mod_vreg_total); i++) {
+		const char *st = NULL;
+
+		if (!(ndx_mask & BIT(0))) {
+			ndx_mask >>= 1;
+			continue;
+		}
+
+		/* vreg-name */
+		rc = of_property_read_string_index(of_node,
+			"qcom,supply-names", i, &st);
+		if (rc) {
+			DEV_ERR("%s: error reading name. i=%d, rc=%d\n",
+				__func__, i, rc);
+			goto error;
+		}
+		snprintf(mp->vreg_config[j].vreg_name, 32, "%s", st);
+
+		/* vreg-min-voltage */
+		memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+		rc = of_property_read_u32_array(of_node,
+			"qcom,min-voltage-level", val_array,
+			dt_vreg_total);
+		if (rc) {
+			DEV_ERR("%s: error read '%s' min volt. rc=%d\n",
+				__func__, hdmi_tx_pm_name(module_type), rc);
+			goto error;
+		}
+		mp->vreg_config[j].min_voltage = val_array[i];
+
+		/* vreg-max-voltage */
+		memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+		rc = of_property_read_u32_array(of_node,
+			"qcom,max-voltage-level", val_array,
+			dt_vreg_total);
+		if (rc) {
+			DEV_ERR("%s: error read '%s' max volt. rc=%d\n",
+				__func__, hdmi_tx_pm_name(module_type), rc);
+			goto error;
+		}
+		mp->vreg_config[j].max_voltage = val_array[i];
+
+		/* vreg-op-mode */
+		memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+		rc = of_property_read_u32_array(of_node,
+			"qcom,enable-load", val_array,
+			dt_vreg_total);
+		if (rc) {
+			DEV_ERR("%s: error read '%s' enable load. rc=%d\n",
+				__func__, hdmi_tx_pm_name(module_type), rc);
+			goto error;
+		}
+		mp->vreg_config[j].load[DSS_REG_MODE_ENABLE] = val_array[i];
+
+		memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+		rc = of_property_read_u32_array(of_node,
+			"qcom,disable-load", val_array,
+			dt_vreg_total);
+		if (rc) {
+			DEV_ERR("%s: error read '%s' disable load. rc=%d\n",
+				__func__, hdmi_tx_pm_name(module_type), rc);
+			goto error;
+		}
+		mp->vreg_config[j].load[DSS_REG_MODE_DISABLE] = val_array[i];
+
+		DEV_DBG("%s: %s min=%d, max=%d, enable=%d disable=%d\n",
+			__func__,
+			mp->vreg_config[j].vreg_name,
+			mp->vreg_config[j].min_voltage,
+			mp->vreg_config[j].max_voltage,
+			mp->vreg_config[j].load[DSS_REG_MODE_ENABLE],
+			mp->vreg_config[j].load[DSS_REG_MODE_DISABLE]);
+
+		ndx_mask >>= 1;
+		j++;
+	}
+
+	devm_kfree(dev, val_array);
+
+	return rc;
+
+error:
+	if (mp->vreg_config) {
+		devm_kfree(dev, mp->vreg_config);
+		mp->vreg_config = NULL;
+	}
+	mp->num_vreg = 0;
+
+	if (val_array)
+		devm_kfree(dev, val_array);
+	return rc;
+} /* hdmi_tx_get_dt_vreg_data */
+
+static void hdmi_tx_put_dt_gpio_data(struct device *dev,
+	struct dss_module_power *module_power)
+{
+	if (!module_power) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (module_power->gpio_config) {
+		devm_kfree(dev, module_power->gpio_config);
+		module_power->gpio_config = NULL;
+	}
+	module_power->num_gpio = 0;
+} /* hdmi_tx_put_dt_gpio_data */
+
+static int hdmi_tx_get_dt_gpio_data(struct device *dev,
+	struct dss_module_power *mp, u32 module_type)
+{
+	int i, j;
+	int mp_gpio_cnt = 0, gpio_list_size = 0;
+	struct dss_gpio *gpio_list = NULL;
+	struct device_node *of_node = NULL;
+
+	DEV_DBG("%s: module: '%s'\n", __func__, hdmi_tx_pm_name(module_type));
+
+	if (!dev || !mp) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	of_node = dev->of_node;
+
+	switch (module_type) {
+	case HDMI_TX_HPD_PM:
+		gpio_list_size = ARRAY_SIZE(hpd_gpio_config);
+		gpio_list = hpd_gpio_config;
+		break;
+	case HDMI_TX_DDC_PM:
+		gpio_list_size = ARRAY_SIZE(ddc_gpio_config);
+		gpio_list = ddc_gpio_config;
+		break;
+	case HDMI_TX_CORE_PM:
+		gpio_list_size = ARRAY_SIZE(core_gpio_config);
+		gpio_list = core_gpio_config;
+		break;
+	case HDMI_TX_CEC_PM:
+		gpio_list_size = ARRAY_SIZE(cec_gpio_config);
+		gpio_list = cec_gpio_config;
+		break;
+	default:
+		DEV_ERR("%s: invalid module type=%d\n", __func__,
+			module_type);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < gpio_list_size; i++)
+		if (of_find_property(of_node, gpio_list[i].gpio_name, NULL))
+			mp_gpio_cnt++;
+
+	if (!mp_gpio_cnt) {
+		DEV_DBG("%s: no gpio\n", __func__);
+		return 0;
+	}
+
+	DEV_DBG("%s: mp_gpio_cnt = %d\n", __func__, mp_gpio_cnt);
+	mp->num_gpio = mp_gpio_cnt;
+
+	mp->gpio_config = devm_kzalloc(dev, sizeof(struct dss_gpio) *
+		mp_gpio_cnt, GFP_KERNEL);
+	if (!mp->gpio_config) {
+		DEV_ERR("%s: can't alloc '%s' gpio mem\n", __func__,
+			hdmi_tx_pm_name(module_type));
+
+		mp->num_gpio = 0;
+		return -ENOMEM;
+	}
+
+	for (i = 0, j = 0; i < gpio_list_size; i++) {
+		int gpio = of_get_named_gpio(of_node,
+			gpio_list[i].gpio_name, 0);
+		if (gpio < 0) {
+			DEV_DBG("%s: no gpio named %s\n", __func__,
+				gpio_list[i].gpio_name);
+			continue;
+		}
+		memcpy(&mp->gpio_config[j], &gpio_list[i],
+			sizeof(struct dss_gpio));
+
+		mp->gpio_config[j].gpio = (unsigned int)gpio;
+
+		DEV_DBG("%s: gpio num=%d, name=%s, value=%d\n",
+			__func__, mp->gpio_config[j].gpio,
+			mp->gpio_config[j].gpio_name,
+			mp->gpio_config[j].value);
+		j++;
+	}
+
+	return 0;
+} /* hdmi_tx_get_dt_gpio_data */
+
+static void hdmi_tx_put_dt_data(struct device *dev,
+	struct hdmi_tx_platform_data *pdata)
+{
+	int i;
+
+	if (!dev || !pdata) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	for (i = HDMI_TX_MAX_PM - 1; i >= 0; i--)
+		hdmi_tx_put_dt_clk_data(dev, &pdata->power_data[i]);
+
+	for (i = HDMI_TX_MAX_PM - 1; i >= 0; i--)
+		hdmi_tx_put_dt_vreg_data(dev, &pdata->power_data[i]);
+
+	for (i = HDMI_TX_MAX_PM - 1; i >= 0; i--)
+		hdmi_tx_put_dt_gpio_data(dev, &pdata->power_data[i]);
+} /* hdmi_tx_put_dt_data */
+
+static int hdmi_tx_get_dt_data(struct platform_device *pdev,
+	struct hdmi_tx_platform_data *pdata)
+{
+	int i, rc = 0, len = 0;
+	struct device_node *of_node = NULL;
+	struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+	const char *data;
+
+	if (!pdev || !pdata) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	of_node = pdev->dev.of_node;
+
+	rc = of_property_read_u32(of_node, "cell-index", &pdev->id);
+	if (rc) {
+		DEV_ERR("%s: dev id from dt not found.rc=%d\n",
+			__func__, rc);
+		goto error;
+	}
+	DEV_DBG("%s: id=%d\n", __func__, pdev->id);
+
+	/* GPIO */
+	for (i = 0; i < HDMI_TX_MAX_PM; i++) {
+		rc = hdmi_tx_get_dt_gpio_data(&pdev->dev,
+			&pdata->power_data[i], i);
+		if (rc) {
+			DEV_ERR("%s: '%s' get_dt_gpio_data failed.rc=%d\n",
+				__func__, hdmi_tx_pm_name(i), rc);
+			goto error;
+		}
+	}
+
+	/* VREG */
+	for (i = 0; i < HDMI_TX_MAX_PM; i++) {
+		rc = hdmi_tx_get_dt_vreg_data(&pdev->dev,
+			&pdata->power_data[i], i);
+		if (rc) {
+			DEV_ERR("%s: '%s' get_dt_vreg_data failed.rc=%d\n",
+				__func__, hdmi_tx_pm_name(i), rc);
+			goto error;
+		}
+	}
+
+	/* CLK */
+	for (i = 0; i < HDMI_TX_MAX_PM; i++) {
+		rc = hdmi_tx_get_dt_clk_data(&pdev->dev,
+			&pdata->power_data[i], i);
+		if (rc) {
+			DEV_ERR("%s: '%s' get_dt_clk_data failed.rc=%d\n",
+				__func__, hdmi_tx_pm_name(i), rc);
+			goto error;
+		}
+	}
+
+	if (!hdmi_ctrl->pdata.primary)
+		hdmi_ctrl->pdata.primary = of_property_read_bool(
+			pdev->dev.of_node, "qcom,primary_panel");
+
+	pdata->cond_power_on = of_property_read_bool(pdev->dev.of_node,
+		"qcom,conditional-power-on");
+
+	pdata->pluggable = of_property_read_bool(pdev->dev.of_node,
+		"qcom,pluggable");
+
+	data = of_get_property(pdev->dev.of_node, "qcom,display-id", &len);
+	if (!data || len <= 0)
+		pr_err("%s:%d Unable to read qcom,display-id, data=%pK,len=%d\n",
+			__func__, __LINE__, data, len);
+	else
+		snprintf(hdmi_ctrl->panel_data.panel_info.display_id,
+			MDSS_DISPLAY_ID_MAX_LEN, "%s", data);
+
+	return rc;
+
+error:
+	hdmi_tx_put_dt_data(&pdev->dev, pdata);
+	return rc;
+} /* hdmi_tx_get_dt_data */
+
+static int hdmi_tx_init_event_handler(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	hdmi_tx_evt_handler *handler;
+
+	if (!hdmi_ctrl)
+		return -EINVAL;
+
+	handler = hdmi_ctrl->evt_handler;
+
+	handler[MDSS_EVENT_FB_REGISTERED] = hdmi_tx_evt_handle_register;
+	handler[MDSS_EVENT_CHECK_PARAMS]  = hdmi_tx_evt_handle_check_param;
+	handler[MDSS_EVENT_RESUME]        = hdmi_tx_evt_handle_resume;
+	handler[MDSS_EVENT_RESET]         = hdmi_tx_evt_handle_reset;
+	handler[MDSS_EVENT_UNBLANK]       = hdmi_tx_evt_handle_unblank;
+	handler[MDSS_EVENT_PANEL_ON]      = hdmi_tx_evt_handle_panel_on;
+	handler[MDSS_EVENT_SUSPEND]       = hdmi_tx_evt_handle_suspend;
+	handler[MDSS_EVENT_BLANK]         = hdmi_tx_evt_handle_blank;
+	handler[MDSS_EVENT_PANEL_OFF]     = hdmi_tx_evt_handle_panel_off;
+	handler[MDSS_EVENT_CLOSE]         = hdmi_tx_evt_handle_close;
+
+	return 0;
+}
+
+static int hdmi_tx_probe(struct platform_device *pdev)
+{
+	int rc = 0, i;
+	struct device_node *of_node = pdev->dev.of_node;
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+	struct mdss_panel_cfg *pan_cfg = NULL;
+
+	if (!of_node) {
+		DEV_ERR("%s: FAILED: of_node not found\n", __func__);
+		rc = -ENODEV;
+		return rc;
+	}
+
+	hdmi_ctrl = devm_kzalloc(&pdev->dev, sizeof(*hdmi_ctrl), GFP_KERNEL);
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: FAILED: cannot alloc hdmi tx ctrl\n", __func__);
+		rc = -ENOMEM;
+		goto failed_no_mem;
+	}
+
+	hdmi_ctrl->mdss_util = mdss_get_util_intf();
+	if (hdmi_ctrl->mdss_util == NULL) {
+		pr_err("Failed to get mdss utility functions\n");
+		rc = -ENODEV;
+		goto failed_dt_data;
+	}
+
+	platform_set_drvdata(pdev, hdmi_ctrl);
+	hdmi_ctrl->pdev = pdev;
+	hdmi_ctrl->enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
+
+	pan_cfg = mdss_panel_intf_type(MDSS_PANEL_INTF_HDMI);
+	if (IS_ERR(pan_cfg)) {
+		return PTR_ERR(pan_cfg);
+	} else if (pan_cfg) {
+		int vic;
+
+		if (kstrtoint(pan_cfg->arg_cfg, 10, &vic) ||
+			vic <= HDMI_VFRMT_UNKNOWN || vic >= HDMI_VFRMT_MAX)
+			vic = DEFAULT_HDMI_PRIMARY_RESOLUTION;
+
+		hdmi_ctrl->pdata.primary = true;
+		hdmi_ctrl->vic = vic;
+		hdmi_ctrl->panel_data.panel_info.is_prim_panel = true;
+		hdmi_ctrl->panel_data.panel_info.cont_splash_enabled =
+			hdmi_ctrl->mdss_util->panel_intf_status(DISPLAY_1,
+					MDSS_PANEL_INTF_HDMI) ? true : false;
+	}
+
+	hdmi_tx_hw.irq_info = mdss_intr_line();
+	if (hdmi_tx_hw.irq_info == NULL) {
+		pr_err("Failed to get mdss irq information\n");
+		return -ENODEV;
+	}
+
+	rc = hdmi_tx_get_dt_data(pdev, &hdmi_ctrl->pdata);
+	if (rc) {
+		DEV_ERR("%s: FAILED: parsing device tree data. rc=%d\n",
+			__func__, rc);
+		goto failed_dt_data;
+	}
+
+	rc = hdmi_tx_init_resource(hdmi_ctrl);
+	if (rc) {
+		DEV_ERR("%s: FAILED: resource init. rc=%d\n",
+			__func__, rc);
+		goto failed_res_init;
+	}
+
+	rc = hdmi_tx_get_version(hdmi_ctrl);
+	if (rc) {
+		DEV_ERR("%s: FAILED: hdmi_tx_get_version. rc=%d\n",
+			__func__, rc);
+		goto failed_reg_panel;
+	}
+
+	rc = hdmi_tx_dev_init(hdmi_ctrl);
+	if (rc) {
+		DEV_ERR("%s: FAILED: hdmi_tx_dev_init. rc=%d\n", __func__, rc);
+		goto failed_dev_init;
+	}
+
+	rc = hdmi_tx_init_event_handler(hdmi_ctrl);
+	if (rc) {
+		DEV_ERR("%s: FAILED: hdmi_tx_init_event_handler. rc=%d\n",
+			__func__, rc);
+		goto failed_dev_init;
+	}
+
+	rc = hdmi_tx_register_panel(hdmi_ctrl);
+	if (rc) {
+		DEV_ERR("%s: FAILED: register_panel. rc=%d\n", __func__, rc);
+		goto failed_reg_panel;
+	}
+
+	rc = of_platform_populate(of_node, NULL, NULL, &pdev->dev);
+	if (rc) {
+		DEV_ERR("%s: Failed to add child devices. rc=%d\n",
+			__func__, rc);
+		goto failed_reg_panel;
+	} else {
+		DEV_DBG("%s: Add child devices.\n", __func__);
+	}
+
+	if (mdss_debug_register_io("hdmi",
+		&hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO], NULL))
+		DEV_WARN("%s: hdmi_tx debugfs register failed\n", __func__);
+
+	if (hdmi_ctrl->panel_data.panel_info.cont_splash_enabled) {
+		for (i = 0; i < HDMI_TX_MAX_PM; i++) {
+			msm_dss_enable_vreg(
+				hdmi_ctrl->pdata.power_data[i].vreg_config,
+				hdmi_ctrl->pdata.power_data[i].num_vreg, 1);
+
+			hdmi_tx_pinctrl_set_state(hdmi_ctrl, i, 1);
+
+			msm_dss_enable_gpio(
+				hdmi_ctrl->pdata.power_data[i].gpio_config,
+				hdmi_ctrl->pdata.power_data[i].num_gpio, 1);
+
+			msm_dss_enable_clk(
+				hdmi_ctrl->pdata.power_data[i].clk_config,
+				hdmi_ctrl->pdata.power_data[i].num_clk, 1);
+
+			hdmi_ctrl->power_data_enable[i] = true;
+		}
+	}
+
+	return rc;
+
+failed_reg_panel:
+	hdmi_tx_dev_deinit(hdmi_ctrl);
+failed_dev_init:
+	hdmi_tx_deinit_resource(hdmi_ctrl);
+failed_res_init:
+	hdmi_tx_put_dt_data(&pdev->dev, &hdmi_ctrl->pdata);
+failed_dt_data:
+	devm_kfree(&pdev->dev, hdmi_ctrl);
+failed_no_mem:
+	return rc;
+} /* hdmi_tx_probe */
+
+static int hdmi_tx_remove(struct platform_device *pdev)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: no driver data\n", __func__);
+		return -ENODEV;
+	}
+
+	hdmi_tx_sysfs_remove(hdmi_ctrl);
+	hdmi_tx_dev_deinit(hdmi_ctrl);
+	hdmi_tx_deinit_resource(hdmi_ctrl);
+	hdmi_tx_put_dt_data(&pdev->dev, &hdmi_ctrl->pdata);
+	devm_kfree(&hdmi_ctrl->pdev->dev, hdmi_ctrl);
+
+	return 0;
+} /* hdmi_tx_remove */
+
+static const struct of_device_id hdmi_tx_dt_match[] = {
+	{.compatible = COMPATIBLE_NAME,},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, hdmi_tx_dt_match);
+
+static struct platform_driver this_driver = {
+	.probe = hdmi_tx_probe,
+	.remove = hdmi_tx_remove,
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = hdmi_tx_dt_match,
+	},
+};
+
+static int __init hdmi_tx_drv_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&this_driver);
+	if (rc)
+		DEV_ERR("%s: FAILED: rc=%d\n", __func__, rc);
+
+	return rc;
+} /* hdmi_tx_drv_init */
+
+static void __exit hdmi_tx_drv_exit(void)
+{
+	platform_driver_unregister(&this_driver);
+} /* hdmi_tx_drv_exit */
+
+static int set_hdcp_feature_on(const char *val, const struct kernel_param *kp)
+{
+	int rc = 0;
+
+	rc = param_set_bool(val, kp);
+	if (!rc)
+		pr_debug("%s: HDCP feature = %d\n", __func__, hdcp_feature_on);
+
+	return rc;
+}
+
+static struct kernel_param_ops hdcp_feature_on_param_ops = {
+	.set = set_hdcp_feature_on,
+	.get = param_get_bool,
+};
+
+module_param_cb(hdcp, &hdcp_feature_on_param_ops, &hdcp_feature_on,
+	0644);
+MODULE_PARM_DESC(hdcp, "Enable or Disable HDCP");
+
+module_init(hdmi_tx_drv_init);
+module_exit(hdmi_tx_drv_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HDMI MSM TX driver");
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.h b/drivers/video/fbdev/msm/mdss_hdmi_tx.h
new file mode 100644
index 0000000..e55aaea
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.h
@@ -0,0 +1,141 @@
+/* Copyright (c) 2010-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_TX_H__
+#define __MDSS_HDMI_TX_H__
+
+#include <linux/switch.h>
+#include "mdss_hdmi_util.h"
+#include "mdss_hdmi_panel.h"
+#include "mdss_cec_core.h"
+#include "mdss_hdmi_audio.h"
+
+#define MAX_SWITCH_NAME_SIZE        5
+
+enum hdmi_tx_io_type {
+	HDMI_TX_CORE_IO,
+	HDMI_TX_QFPROM_IO,
+	HDMI_TX_HDCP_IO,
+	HDMI_TX_MAX_IO
+};
+
+enum hdmi_tx_power_module_type {
+	HDMI_TX_HPD_PM,
+	HDMI_TX_DDC_PM,
+	HDMI_TX_CORE_PM,
+	HDMI_TX_CEC_PM,
+	HDMI_TX_MAX_PM
+};
+
+/* Data filled from device tree */
+struct hdmi_tx_platform_data {
+	bool primary;
+	bool cont_splash_enabled;
+	bool cond_power_on;
+	struct dss_io_data io[HDMI_TX_MAX_IO];
+	struct dss_module_power power_data[HDMI_TX_MAX_PM];
+	struct reg_bus_client *reg_bus_clt[HDMI_TX_MAX_PM];
+	/* bitfield representing each module's pin state */
+	u64 pin_states;
+	bool pluggable;
+};
+
+struct hdmi_tx_pinctrl {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *state_active;
+	struct pinctrl_state *state_hpd_active;
+	struct pinctrl_state *state_cec_active;
+	struct pinctrl_state *state_ddc_active;
+	struct pinctrl_state *state_suspend;
+};
+
+struct hdmi_tx_ctrl;
+typedef int (*hdmi_tx_evt_handler) (struct hdmi_tx_ctrl *);
+
+struct hdmi_tx_ctrl {
+	struct platform_device *pdev;
+	struct hdmi_tx_platform_data pdata;
+	struct mdss_panel_data panel_data;
+	struct mdss_util_intf *mdss_util;
+	struct msm_hdmi_mode_timing_info timing;
+	struct hdmi_tx_pinctrl pin_res;
+	struct mutex mutex;
+	struct mutex tx_lock;
+	struct list_head cable_notify_handlers;
+	struct kobject *kobj;
+	struct switch_dev sdev;
+	struct workqueue_struct *workq;
+	struct hdmi_util_ds_data ds_data;
+	struct completion hpd_int_done;
+	struct work_struct hpd_int_work;
+	struct delayed_work hdcp_cb_work;
+	struct work_struct cable_notify_work;
+	struct hdmi_tx_ddc_ctrl ddc_ctrl;
+	struct hdmi_hdcp_ops *hdcp_ops;
+	struct cec_ops hdmi_cec_ops;
+	struct cec_cbs hdmi_cec_cbs;
+	struct hdmi_audio_ops audio_ops;
+	struct msm_hdmi_audio_setup_params audio_params;
+	struct hdmi_panel_data panel;
+	struct hdmi_panel_ops panel_ops;
+	struct work_struct fps_work;
+
+	spinlock_t hpd_state_lock;
+
+	u32 panel_power_on;
+	u32 panel_suspend;
+	u32 vic;
+	u32 hdmi_tx_ver;
+	u32 max_pclk_khz;
+	u32 hpd_state;
+	u32 hpd_off_pending;
+	u32 hpd_feature_on;
+	u32 hpd_initialized;
+	u32 vote_hdmi_core_on;
+	u32 dynamic_fps;
+	u32 hdcp14_present;
+	u32 enc_lvl;
+	u32 edid_buf_size;
+	u32 s3d_mode;
+
+	u8 timing_gen_on;
+	u8 mhl_hpd_on;
+	u8 hdcp_status;
+	u8 spd_vendor_name[9];
+	u8 spd_product_description[17];
+
+	bool hdcp_feature_on;
+	bool hpd_disabled;
+	bool ds_registered;
+	bool scrambler_enabled;
+	bool hdcp1_use_sw_keys;
+	bool hdcp14_sw_keys;
+	bool auth_state;
+	bool custom_edid;
+	bool sim_mode;
+	bool hdcp22_present;
+	bool power_data_enable[HDMI_TX_MAX_PM];
+
+	void (*hdmi_tx_hpd_done)(void *data);
+	void *downstream_data;
+	void *audio_data;
+	void *feature_data[hweight8(HDMI_TX_FEAT_MAX)];
+	void *hdcp_data;
+	void *evt_arg;
+	u8 *edid_buf;
+
+	char disp_switch_name[MAX_SWITCH_NAME_SIZE];
+
+	hdmi_tx_evt_handler evt_handler[MDSS_EVENT_MAX - 1];
+};
+
+#endif /* __MDSS_HDMI_TX_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.c b/drivers/video/fbdev/msm/mdss_hdmi_util.c
new file mode 100644
index 0000000..8942e71
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.c
@@ -0,0 +1,1694 @@
+/* Copyright (c) 2010-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include "mdss_hdmi_util.h"
+
+#define RESOLUTION_NAME_STR_LEN 30
+#define HDMI_SEC_TO_MS 1000
+#define HDMI_MS_TO_US 1000
+#define HDMI_SEC_TO_US (HDMI_SEC_TO_MS * HDMI_MS_TO_US)
+#define HDMI_KHZ_TO_HZ 1000
+#define HDMI_BUSY_WAIT_DELAY_US 100
+
+#define HDMI_SCDC_UNKNOWN_REGISTER        "Unknown register"
+
+static char res_buf[RESOLUTION_NAME_STR_LEN];
+
+enum trigger_mode {
+	TRIGGER_WRITE,
+	TRIGGER_READ
+};
+
+int hdmi_utils_get_timeout_in_hysnc(struct msm_hdmi_mode_timing_info *timing,
+	u32 timeout_ms)
+{
+	u32 fps, v_total;
+	u32 time_taken_by_one_line_us, lines_needed_for_given_time;
+
+	if (!timing || !timeout_ms) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	fps = timing->refresh_rate / HDMI_KHZ_TO_HZ;
+	v_total = hdmi_tx_get_v_total(timing);
+
+	/*
+	 * pixel clock  = h_total * v_total * fps
+	 * 1 sec = pixel clock number of pixels are transmitted.
+	 * time taken by one line (h_total) = 1 / (v_total * fps).
+	 */
+	time_taken_by_one_line_us = HDMI_SEC_TO_US / (v_total * fps);
+	lines_needed_for_given_time = (timeout_ms * HDMI_MS_TO_US) /
+		time_taken_by_one_line_us;
+
+	return lines_needed_for_given_time;
+}
+
+static int hdmi_ddc_clear_irq(struct hdmi_tx_ddc_ctrl *ddc_ctrl,
+	char *what)
+{
+	u32 ddc_int_ctrl, ddc_status, in_use, timeout;
+	u32 sw_done_mask = BIT(2);
+	u32 sw_done_ack  = BIT(1);
+	u32 in_use_by_sw = BIT(0);
+	u32 in_use_by_hw = BIT(1);
+
+	if (!ddc_ctrl || !ddc_ctrl->io) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	/* clear and enable interrutps */
+	ddc_int_ctrl = sw_done_mask | sw_done_ack;
+
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_INT_CTRL, ddc_int_ctrl);
+
+	/* wait until DDC HW is free */
+	timeout = 100;
+	do {
+		ddc_status = DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_HW_STATUS);
+		in_use = ddc_status & (in_use_by_sw | in_use_by_hw);
+		if (in_use) {
+			pr_debug("ddc is in use by %s, timeout(%d)\n",
+				ddc_status & in_use_by_sw ? "sw" : "hw",
+				timeout);
+			udelay(100);
+		}
+	} while (in_use && --timeout);
+
+	if (!timeout) {
+		pr_err("%s: timedout\n", what);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static void hdmi_scrambler_ddc_reset(struct hdmi_tx_ddc_ctrl *ctrl)
+{
+	u32 reg_val;
+
+	if (!ctrl) {
+		pr_err("Invalid parameters\n");
+		return;
+	}
+
+	/* clear ack and disable interrupts */
+	reg_val = BIT(14) | BIT(9) | BIT(5) | BIT(1);
+	DSS_REG_W(ctrl->io, HDMI_DDC_INT_CTRL2, reg_val);
+
+	/* Reset DDC timers */
+	reg_val = BIT(0) | DSS_REG_R(ctrl->io, HDMI_SCRAMBLER_STATUS_DDC_CTRL);
+	DSS_REG_W(ctrl->io, HDMI_SCRAMBLER_STATUS_DDC_CTRL, reg_val);
+
+	reg_val = DSS_REG_R(ctrl->io, HDMI_SCRAMBLER_STATUS_DDC_CTRL);
+	reg_val &= ~BIT(0);
+	DSS_REG_W(ctrl->io, HDMI_SCRAMBLER_STATUS_DDC_CTRL, reg_val);
+}
+
+void hdmi_scrambler_ddc_disable(struct hdmi_tx_ddc_ctrl *ctrl)
+{
+	u32 reg_val;
+
+	if (!ctrl) {
+		pr_err("Invalid parameters\n");
+		return;
+	}
+
+	hdmi_scrambler_ddc_reset(ctrl);
+
+	/* Disable HW DDC access to RxStatus register */
+	reg_val = DSS_REG_R(ctrl->io, HDMI_HW_DDC_CTRL);
+	reg_val &= ~(BIT(8) | BIT(9));
+
+	DSS_REG_W(ctrl->io, HDMI_HW_DDC_CTRL, reg_val);
+}
+
+static int hdmi_scrambler_ddc_check_status(struct hdmi_tx_ddc_ctrl *ctrl)
+{
+	int rc = 0;
+	u32 reg_val;
+
+	if (!ctrl) {
+		pr_err("invalid ddc ctrl\n");
+		return -EINVAL;
+	}
+
+	/* check for errors and clear status */
+	reg_val = DSS_REG_R(ctrl->io, HDMI_SCRAMBLER_STATUS_DDC_STATUS);
+
+	if (reg_val & BIT(4)) {
+		pr_err("ddc aborted\n");
+		reg_val |= BIT(5);
+		rc = -ECONNABORTED;
+	}
+
+	if (reg_val & BIT(8)) {
+		pr_err("timed out\n");
+		reg_val |= BIT(9);
+		rc = -ETIMEDOUT;
+	}
+
+	if (reg_val & BIT(12)) {
+		pr_err("NACK0\n");
+		reg_val |= BIT(13);
+		rc = -EIO;
+	}
+
+	if (reg_val & BIT(14)) {
+		pr_err("NACK1\n");
+		reg_val |= BIT(15);
+		rc = -EIO;
+	}
+
+	DSS_REG_W(ctrl->io, HDMI_SCRAMBLER_STATUS_DDC_STATUS, reg_val);
+
+	return rc;
+}
+
+static int hdmi_scrambler_status_timer_setup(struct hdmi_tx_ddc_ctrl *ctrl,
+		u32 timeout_hsync)
+{
+	u32 reg_val;
+	int rc;
+	struct dss_io_data *io = NULL;
+
+	if (!ctrl || !ctrl->io) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	io = ctrl->io;
+
+	hdmi_ddc_clear_irq(ctrl, "scrambler");
+
+	DSS_REG_W(io, HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL, timeout_hsync);
+	DSS_REG_W(io, HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2, timeout_hsync);
+
+	reg_val = DSS_REG_R(io, HDMI_DDC_INT_CTRL5);
+	reg_val |= BIT(10);
+	DSS_REG_W(io, HDMI_DDC_INT_CTRL5, reg_val);
+
+	reg_val = DSS_REG_R(io, HDMI_DDC_INT_CTRL2);
+	/* Trigger interrupt if scrambler status is 0 or DDC failure */
+	reg_val |= BIT(10);
+	reg_val &= ~(BIT(15) | BIT(16));
+	reg_val |= BIT(16);
+	DSS_REG_W(io, HDMI_DDC_INT_CTRL2, reg_val);
+
+	/* Enable DDC access */
+	reg_val = DSS_REG_R(io, HDMI_HW_DDC_CTRL);
+
+	reg_val &= ~(BIT(8) | BIT(9));
+	reg_val |= BIT(8);
+	DSS_REG_W(io, HDMI_HW_DDC_CTRL, reg_val);
+
+	/* WAIT for 200ms as per HDMI 2.0 standard for sink to respond */
+	msleep(200);
+
+	/* clear the scrambler status */
+	rc = hdmi_scrambler_ddc_check_status(ctrl);
+	if (rc)
+		pr_err("scrambling ddc error %d\n", rc);
+
+	hdmi_scrambler_ddc_disable(ctrl);
+
+	return rc;
+}
+
+static inline char *hdmi_scdc_reg2string(u32 type)
+{
+	switch (type) {
+	case HDMI_TX_SCDC_SCRAMBLING_STATUS:
+		return "HDMI_TX_SCDC_SCRAMBLING_STATUS";
+	case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+		return "HDMI_TX_SCDC_SCRAMBLING_ENABLE";
+	case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+		return "HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE";
+	case HDMI_TX_SCDC_CLOCK_DET_STATUS:
+		return "HDMI_TX_SCDC_CLOCK_DET_STATUS";
+	case HDMI_TX_SCDC_CH0_LOCK_STATUS:
+		return "HDMI_TX_SCDC_CH0_LOCK_STATUS";
+	case HDMI_TX_SCDC_CH1_LOCK_STATUS:
+		return "HDMI_TX_SCDC_CH1_LOCK_STATUS";
+	case HDMI_TX_SCDC_CH2_LOCK_STATUS:
+		return "HDMI_TX_SCDC_CH2_LOCK_STATUS";
+	case HDMI_TX_SCDC_CH0_ERROR_COUNT:
+		return "HDMI_TX_SCDC_CH0_ERROR_COUNT";
+	case HDMI_TX_SCDC_CH1_ERROR_COUNT:
+		return "HDMI_TX_SCDC_CH1_ERROR_COUNT";
+	case HDMI_TX_SCDC_CH2_ERROR_COUNT:
+		return "HDMI_TX_SCDC_CH2_ERROR_COUNT";
+	case HDMI_TX_SCDC_READ_ENABLE:
+		return"HDMI_TX_SCDC_READ_ENABLE";
+	default:
+		return HDMI_SCDC_UNKNOWN_REGISTER;
+	}
+}
+
+static struct msm_hdmi_mode_timing_info hdmi_resv_timings[
+		RESERVE_VFRMT_END - HDMI_VFRMT_RESERVE1 + 1];
+
+static int hdmi_get_resv_timing_info(
+	struct msm_hdmi_mode_timing_info *mode, int id)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hdmi_resv_timings); i++) {
+		struct msm_hdmi_mode_timing_info *info = &hdmi_resv_timings[i];
+
+		if (info->video_format == id) {
+			*mode = *info;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+int hdmi_set_resv_timing_info(struct msm_hdmi_mode_timing_info *mode)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hdmi_resv_timings); i++) {
+		struct msm_hdmi_mode_timing_info *info = &hdmi_resv_timings[i];
+
+		if (info->video_format == 0) {
+			*info = *mode;
+			info->video_format = HDMI_VFRMT_RESERVE1 + i;
+			return info->video_format;
+		}
+	}
+
+	return -ENOMEM;
+}
+
+bool hdmi_is_valid_resv_timing(int mode)
+{
+	struct msm_hdmi_mode_timing_info *info;
+
+	if (mode < HDMI_VFRMT_RESERVE1 || mode > RESERVE_VFRMT_END) {
+		pr_err("invalid mode %d\n", mode);
+		return false;
+	}
+
+	info = &hdmi_resv_timings[mode - HDMI_VFRMT_RESERVE1];
+
+	return info->video_format >= HDMI_VFRMT_RESERVE1 &&
+		info->video_format <= RESERVE_VFRMT_END;
+}
+
+void hdmi_reset_resv_timing_info(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hdmi_resv_timings); i++) {
+		struct msm_hdmi_mode_timing_info *info = &hdmi_resv_timings[i];
+
+		info->video_format = 0;
+	}
+}
+
+int msm_hdmi_get_timing_info(
+	struct msm_hdmi_mode_timing_info *mode, int id)
+{
+	int ret = 0;
+
+	switch (id) {
+	case HDMI_VFRMT_640x480p60_4_3:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_640x480p60_4_3);
+		break;
+	case HDMI_VFRMT_720x480p60_4_3:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_720x480p60_4_3);
+		break;
+	case HDMI_VFRMT_720x480p60_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_720x480p60_16_9);
+		break;
+	case HDMI_VFRMT_1280x720p60_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1280x720p60_16_9);
+		break;
+	case HDMI_VFRMT_1920x1080i60_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1920x1080i60_16_9);
+		break;
+	case HDMI_VFRMT_1440x480i60_4_3:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1440x480i60_4_3);
+		break;
+	case HDMI_VFRMT_1440x480i60_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1440x480i60_16_9);
+		break;
+	case HDMI_VFRMT_1920x1080p60_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1920x1080p60_16_9);
+		break;
+	case HDMI_VFRMT_720x576p50_4_3:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_720x576p50_4_3);
+		break;
+	case HDMI_VFRMT_720x576p50_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_720x576p50_16_9);
+		break;
+	case HDMI_VFRMT_1280x720p50_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1280x720p50_16_9);
+		break;
+	case HDMI_VFRMT_1440x576i50_4_3:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1440x576i50_4_3);
+		break;
+	case HDMI_VFRMT_1440x576i50_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1440x576i50_16_9);
+		break;
+	case HDMI_VFRMT_1920x1080p50_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1920x1080p50_16_9);
+		break;
+	case HDMI_VFRMT_1920x1080p24_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1920x1080p24_16_9);
+		break;
+	case HDMI_VFRMT_1920x1080p25_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1920x1080p25_16_9);
+		break;
+	case HDMI_VFRMT_1920x1080p30_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1920x1080p30_16_9);
+		break;
+	case HDMI_EVFRMT_3840x2160p30_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_EVFRMT_3840x2160p30_16_9);
+		break;
+	case HDMI_EVFRMT_3840x2160p25_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_EVFRMT_3840x2160p25_16_9);
+		break;
+	case HDMI_EVFRMT_3840x2160p24_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_EVFRMT_3840x2160p24_16_9);
+		break;
+	case HDMI_EVFRMT_4096x2160p24_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_EVFRMT_4096x2160p24_16_9);
+		break;
+	case HDMI_VFRMT_1024x768p60_4_3:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1024x768p60_4_3);
+		break;
+	case HDMI_VFRMT_1280x1024p60_5_4:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1280x1024p60_5_4);
+		break;
+	case HDMI_VFRMT_2560x1600p60_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_2560x1600p60_16_9);
+		break;
+	case HDMI_VFRMT_800x600p60_4_3:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_800x600p60_4_3);
+		break;
+	case HDMI_VFRMT_848x480p60_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_848x480p60_16_9);
+		break;
+	case HDMI_VFRMT_1280x960p60_4_3:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1280x960p60_4_3);
+		break;
+	case HDMI_VFRMT_1360x768p60_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1360x768p60_16_9);
+		break;
+	case HDMI_VFRMT_1440x900p60_16_10:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1440x900p60_16_10);
+		break;
+	case HDMI_VFRMT_1400x1050p60_4_3:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1400x1050p60_4_3);
+		break;
+	case HDMI_VFRMT_1680x1050p60_16_10:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1680x1050p60_16_10);
+		break;
+	case HDMI_VFRMT_1600x1200p60_4_3:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1600x1200p60_4_3);
+		break;
+	case HDMI_VFRMT_1920x1200p60_16_10:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1920x1200p60_16_10);
+		break;
+	case HDMI_VFRMT_1366x768p60_16_10:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1366x768p60_16_10);
+		break;
+	case HDMI_VFRMT_1280x800p60_16_10:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_1280x800p60_16_10);
+		break;
+	case HDMI_VFRMT_3840x2160p24_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p24_16_9);
+		break;
+	case HDMI_VFRMT_3840x2160p25_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p25_16_9);
+		break;
+	case HDMI_VFRMT_3840x2160p30_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p30_16_9);
+		break;
+	case HDMI_VFRMT_3840x2160p50_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p50_16_9);
+		break;
+	case HDMI_VFRMT_3840x2160p60_16_9:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p60_16_9);
+		break;
+	case HDMI_VFRMT_4096x2160p24_256_135:
+		MSM_HDMI_MODES_GET_DETAILS(mode,
+					HDMI_VFRMT_4096x2160p24_256_135);
+		break;
+	case HDMI_VFRMT_4096x2160p25_256_135:
+		MSM_HDMI_MODES_GET_DETAILS(mode,
+					HDMI_VFRMT_4096x2160p25_256_135);
+		break;
+	case HDMI_VFRMT_4096x2160p30_256_135:
+		MSM_HDMI_MODES_GET_DETAILS(mode,
+					HDMI_VFRMT_4096x2160p30_256_135);
+		break;
+	case HDMI_VFRMT_4096x2160p50_256_135:
+		MSM_HDMI_MODES_GET_DETAILS(mode,
+					HDMI_VFRMT_4096x2160p50_256_135);
+		break;
+	case HDMI_VFRMT_4096x2160p60_256_135:
+		MSM_HDMI_MODES_GET_DETAILS(mode,
+					HDMI_VFRMT_4096x2160p60_256_135);
+		break;
+	case HDMI_VFRMT_3840x2160p24_64_27:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p24_64_27);
+		break;
+	case HDMI_VFRMT_3840x2160p25_64_27:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p25_64_27);
+		break;
+	case HDMI_VFRMT_3840x2160p30_64_27:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p30_64_27);
+		break;
+	case HDMI_VFRMT_3840x2160p50_64_27:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p50_64_27);
+		break;
+	case HDMI_VFRMT_3840x2160p60_64_27:
+		MSM_HDMI_MODES_GET_DETAILS(mode, HDMI_VFRMT_3840x2160p60_64_27);
+		break;
+	default:
+		ret = hdmi_get_resv_timing_info(mode, id);
+	}
+
+	return ret;
+}
+
+int hdmi_get_supported_mode(struct msm_hdmi_mode_timing_info *info,
+	struct hdmi_util_ds_data *ds_data, u32 mode)
+{
+	int ret;
+
+	if (!info)
+		return -EINVAL;
+
+	if (mode >= HDMI_VFRMT_MAX)
+		return -EINVAL;
+
+	ret = msm_hdmi_get_timing_info(info, mode);
+
+	if (!ret && ds_data && ds_data->ds_registered && ds_data->ds_max_clk) {
+		if (info->pixel_freq > ds_data->ds_max_clk)
+			info->supported = false;
+	}
+
+	return ret;
+} /* hdmi_get_supported_mode */
+
+const char *msm_hdmi_mode_2string(u32 mode)
+{
+	static struct msm_hdmi_mode_timing_info ri = {0};
+	char *aspect_ratio;
+
+	if (mode >= HDMI_VFRMT_MAX)
+		return "???";
+
+	if (hdmi_get_supported_mode(&ri, NULL, mode))
+		return "???";
+
+	memset(res_buf, 0, sizeof(res_buf));
+
+	if (!ri.supported) {
+		snprintf(res_buf, RESOLUTION_NAME_STR_LEN, "%d", mode);
+		return res_buf;
+	}
+
+	switch (ri.ar) {
+	case HDMI_RES_AR_4_3:
+		aspect_ratio = "4/3";
+		break;
+	case HDMI_RES_AR_5_4:
+		aspect_ratio = "5/4";
+		break;
+	case HDMI_RES_AR_16_9:
+		aspect_ratio = "16/9";
+		break;
+	case HDMI_RES_AR_16_10:
+		aspect_ratio = "16/10";
+		break;
+	default:
+		aspect_ratio = "???";
+	};
+
+	snprintf(res_buf, RESOLUTION_NAME_STR_LEN, "%dx%d %s%dHz %s",
+		ri.active_h, ri.active_v, ri.interlaced ? "i" : "p",
+		ri.refresh_rate / 1000, aspect_ratio);
+
+	return res_buf;
+}
+
+int hdmi_get_video_id_code(struct msm_hdmi_mode_timing_info *timing_in,
+	struct hdmi_util_ds_data *ds_data)
+{
+	int i, vic = -1;
+	struct msm_hdmi_mode_timing_info supported_timing = {0};
+	u32 ret;
+
+	if (!timing_in) {
+		pr_err("invalid input\n");
+		goto exit;
+	}
+
+	/* active_low_h, active_low_v and interlaced are not checked against */
+	for (i = 0; i < HDMI_VFRMT_MAX; i++) {
+		ret = hdmi_get_supported_mode(&supported_timing, ds_data, i);
+
+		if (ret || !supported_timing.supported)
+			continue;
+		if (timing_in->active_h != supported_timing.active_h)
+			continue;
+		if (timing_in->front_porch_h != supported_timing.front_porch_h)
+			continue;
+		if (timing_in->pulse_width_h != supported_timing.pulse_width_h)
+			continue;
+		if (timing_in->back_porch_h != supported_timing.back_porch_h)
+			continue;
+		if (timing_in->active_v != supported_timing.active_v)
+			continue;
+		if (timing_in->front_porch_v != supported_timing.front_porch_v)
+			continue;
+		if (timing_in->pulse_width_v != supported_timing.pulse_width_v)
+			continue;
+		if (timing_in->back_porch_v != supported_timing.back_porch_v)
+			continue;
+		if (timing_in->pixel_freq != supported_timing.pixel_freq)
+			continue;
+		if (timing_in->refresh_rate != supported_timing.refresh_rate)
+			continue;
+
+		vic = (int)supported_timing.video_format;
+		break;
+	}
+
+	if (vic < 0)
+		pr_err("timing is not supported h=%d v=%d\n",
+			timing_in->active_h, timing_in->active_v);
+	else
+		pr_debug("vic = %d timing = %s\n", vic,
+			msm_hdmi_mode_2string((u32)vic));
+exit:
+
+	return vic;
+} /* hdmi_get_video_id_code */
+
+static const char *hdmi_get_single_video_3d_fmt_2string(u32 format)
+{
+	switch (format) {
+	case TOP_AND_BOTTOM:	return "TAB";
+	case FRAME_PACKING:	return "FP";
+	case SIDE_BY_SIDE_HALF: return "SSH";
+	}
+	return "";
+} /* hdmi_get_single_video_3d_fmt_2string */
+
+ssize_t hdmi_get_video_3d_fmt_2string(u32 format, char *buf, u32 size)
+{
+	ssize_t ret, len = 0;
+
+	ret = scnprintf(buf, size, "%s",
+		hdmi_get_single_video_3d_fmt_2string(
+			format & FRAME_PACKING));
+	len += ret;
+
+	if (len && (format & TOP_AND_BOTTOM))
+		ret = scnprintf(buf + len, size - len, ":%s",
+			hdmi_get_single_video_3d_fmt_2string(
+				format & TOP_AND_BOTTOM));
+	else
+		ret = scnprintf(buf + len, size - len, "%s",
+			hdmi_get_single_video_3d_fmt_2string(
+				format & TOP_AND_BOTTOM));
+	len += ret;
+
+	if (len && (format & SIDE_BY_SIDE_HALF))
+		ret = scnprintf(buf + len, size - len, ":%s",
+			hdmi_get_single_video_3d_fmt_2string(
+				format & SIDE_BY_SIDE_HALF));
+	else
+		ret = scnprintf(buf + len, size - len, "%s",
+			hdmi_get_single_video_3d_fmt_2string(
+				format & SIDE_BY_SIDE_HALF));
+	len += ret;
+
+	return len;
+} /* hdmi_get_video_3d_fmt_2string */
+
+static void hdmi_ddc_trigger(struct hdmi_tx_ddc_ctrl *ddc_ctrl,
+		enum trigger_mode mode, bool seg)
+{
+	struct hdmi_tx_ddc_data *ddc_data = &ddc_ctrl->ddc_data;
+	struct dss_io_data *io = ddc_ctrl->io;
+	u32 const seg_addr = 0x60, seg_num = 0x01;
+	u32 ddc_ctrl_reg_val;
+
+	ddc_data->dev_addr &= 0xFE;
+
+	if (mode == TRIGGER_READ && seg) {
+		DSS_REG_W_ND(io, HDMI_DDC_DATA, BIT(31) | (seg_addr << 8));
+		DSS_REG_W_ND(io, HDMI_DDC_DATA, seg_num << 8);
+	}
+
+	/* handle portion #1 */
+	DSS_REG_W_ND(io, HDMI_DDC_DATA, BIT(31) | (ddc_data->dev_addr << 8));
+
+	/* handle portion #2 */
+	DSS_REG_W_ND(io, HDMI_DDC_DATA, ddc_data->offset << 8);
+
+	if (mode == TRIGGER_READ) {
+		/* handle portion #3 */
+		DSS_REG_W_ND(io, HDMI_DDC_DATA,
+			(ddc_data->dev_addr | BIT(0)) << 8);
+
+		/* HDMI_I2C_TRANSACTION0 */
+		DSS_REG_W_ND(io, HDMI_DDC_TRANS0, BIT(12) | BIT(16));
+
+		/* Write to HDMI_I2C_TRANSACTION1 */
+		if (seg) {
+			DSS_REG_W_ND(io, HDMI_DDC_TRANS1, BIT(12) | BIT(16));
+			DSS_REG_W_ND(io, HDMI_DDC_TRANS2,
+				BIT(0) | BIT(12) | BIT(13) |
+				(ddc_data->request_len << 16));
+
+			ddc_ctrl_reg_val = BIT(0) | BIT(21);
+		} else {
+			DSS_REG_W_ND(io, HDMI_DDC_TRANS1,
+				BIT(0) | BIT(12) | BIT(13) |
+				(ddc_data->request_len << 16));
+
+			ddc_ctrl_reg_val = BIT(0) | BIT(20);
+		}
+	} else {
+		int ndx;
+
+		/* write buffer */
+		for (ndx = 0; ndx < ddc_data->data_len; ++ndx)
+			DSS_REG_W_ND(io, HDMI_DDC_DATA,
+				((u32)ddc_data->data_buf[ndx]) << 8);
+
+		DSS_REG_W_ND(io, HDMI_DDC_TRANS0,
+			(ddc_data->data_len + 1) << 16 | BIT(12) | BIT(13));
+
+		ddc_ctrl_reg_val = BIT(0);
+	}
+
+	/* Trigger the I2C transfer */
+	DSS_REG_W_ND(io, HDMI_DDC_CTRL, ddc_ctrl_reg_val);
+}
+
+static void hdmi_ddc_clear_status(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+	u32 reg_val;
+
+	/* Read DDC status */
+	reg_val = DSS_REG_R(ddc_ctrl->io, HDMI_DDC_SW_STATUS);
+	reg_val &= BIT(12) | BIT(13) | BIT(14) | BIT(15);
+
+	/* Check if any NACK occurred */
+	if (reg_val) {
+		pr_debug("%s: NACK: HDMI_DDC_SW_STATUS 0x%x\n",
+			ddc_ctrl->ddc_data.what, reg_val);
+
+		/* SW_STATUS_RESET, SOFT_RESET */
+		reg_val = BIT(3) | BIT(1);
+
+		DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_CTRL, reg_val);
+	}
+}
+
+static int hdmi_ddc_read_retry(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+	u32 reg_val, ndx, time_out_count, wait_time;
+	struct hdmi_tx_ddc_data *ddc_data;
+	int status;
+	int busy_wait_us;
+
+	if (!ddc_ctrl || !ddc_ctrl->io) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	ddc_data  = &ddc_ctrl->ddc_data;
+
+	if (!ddc_data->data_buf) {
+		status = -EINVAL;
+		pr_err("%s: invalid buf\n", ddc_data->what);
+		goto error;
+	}
+
+	if (ddc_data->retry < 0) {
+		pr_err("invalid no. of retries %d\n", ddc_data->retry);
+		status = -EINVAL;
+		goto error;
+	}
+
+	do {
+		status = hdmi_ddc_clear_irq(ddc_ctrl, ddc_data->what);
+		if (status)
+			continue;
+
+		if (ddc_data->hard_timeout) {
+			pr_debug("using hard_timeout %dms\n",
+				ddc_data->hard_timeout);
+
+			busy_wait_us = ddc_data->hard_timeout * HDMI_MS_TO_US;
+			atomic_set(&ddc_ctrl->read_busy_wait_done, 0);
+		} else {
+			reinit_completion(&ddc_ctrl->ddc_sw_done);
+			wait_time = HZ / 2;
+		}
+
+		hdmi_ddc_trigger(ddc_ctrl, TRIGGER_READ, false);
+
+		if (ddc_data->hard_timeout) {
+			while (busy_wait_us > 0 &&
+				!atomic_read(&ddc_ctrl->read_busy_wait_done)) {
+				udelay(HDMI_BUSY_WAIT_DELAY_US);
+				busy_wait_us -= HDMI_BUSY_WAIT_DELAY_US;
+			};
+
+			if (busy_wait_us < 0)
+				busy_wait_us = 0;
+
+			time_out_count = busy_wait_us / HDMI_MS_TO_US;
+
+			ddc_data->timeout_left = time_out_count;
+		} else {
+			time_out_count = wait_for_completion_timeout(
+				&ddc_ctrl->ddc_sw_done, wait_time);
+
+			ddc_data->timeout_left =
+				jiffies_to_msecs(time_out_count);
+		}
+
+		pr_debug("ddc read done at %dms\n", jiffies_to_msecs(jiffies));
+
+		if (!time_out_count) {
+			pr_debug("%s: timedout\n", ddc_data->what);
+
+			status = -ETIMEDOUT;
+		}
+
+		hdmi_ddc_clear_status(ddc_ctrl);
+	} while (status && ddc_data->retry--);
+
+	if (status)
+		goto error;
+
+	/* Write data to DDC buffer */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA,
+		BIT(0) | (3 << 16) | BIT(31));
+
+	/* Discard first byte */
+	DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_DATA);
+	for (ndx = 0; ndx < ddc_data->data_len; ++ndx) {
+		reg_val = DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_DATA);
+		ddc_data->data_buf[ndx] = (u8)((reg_val & 0x0000FF00) >> 8);
+	}
+
+	pr_debug("%s: success\n",  ddc_data->what);
+
+error:
+	return status;
+} /* hdmi_ddc_read_retry */
+
+void hdmi_ddc_config(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+	if (!ddc_ctrl || !ddc_ctrl->io) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	/* Configure Pre-Scale multiplier & Threshold */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_SPEED, (10 << 16) | (2 << 0));
+
+	/*
+	 * Setting 31:24 bits : Time units to wait before timeout
+	 * when clock is being stalled by external sink device
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_SETUP, 0xFF000000);
+
+	/* Enable reference timer to 19 micro-seconds */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_REF, (1 << 16) | (19 << 0));
+} /* hdmi_ddc_config */
+
+static void hdmi_hdcp2p2_ddc_clear_status(struct hdmi_tx_ddc_ctrl *ctrl)
+{
+	u32 reg_val;
+
+	if (!ctrl) {
+		pr_err("invalid ddc ctrl\n");
+		return;
+	}
+
+	/* check for errors and clear status */
+	reg_val = DSS_REG_R(ctrl->io, HDMI_HDCP2P2_DDC_STATUS);
+
+	if (reg_val & BIT(4)) {
+		pr_debug("ddc aborted\n");
+		reg_val |= BIT(5);
+	}
+
+	if (reg_val & BIT(8)) {
+		pr_debug("timed out\n");
+		reg_val |= BIT(9);
+	}
+
+	if (reg_val & BIT(12)) {
+		pr_debug("NACK0\n");
+		reg_val |= BIT(13);
+	}
+
+	if (reg_val & BIT(14)) {
+		pr_debug("NACK1\n");
+		reg_val |= BIT(15);
+	}
+
+	DSS_REG_W(ctrl->io, HDMI_HDCP2P2_DDC_STATUS, reg_val);
+}
+
+static int hdmi_ddc_hdcp2p2_isr(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+	struct dss_io_data *io = NULL;
+	struct hdmi_tx_hdcp2p2_ddc_data *data;
+	u32 intr0, intr2, intr5;
+	u32 msg_size;
+	int rc = 0;
+
+	if (!ddc_ctrl || !ddc_ctrl->io) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	io = ddc_ctrl->io;
+
+	data = &ddc_ctrl->hdcp2p2_ddc_data;
+
+	intr0 = DSS_REG_R(io, HDMI_DDC_INT_CTRL0);
+	intr2 = DSS_REG_R(io, HDMI_HDCP_INT_CTRL2);
+	intr5 = DSS_REG_R_ND(io, HDMI_DDC_INT_CTRL5);
+
+	pr_debug("intr0: 0x%x, intr2: 0x%x, intr5: 0x%x\n",
+			intr0, intr2, intr5);
+
+	/* check if encryption is enabled */
+	if (intr2 & BIT(0)) {
+		/*
+		 * ack encryption ready interrupt.
+		 * disable encryption ready interrupt.
+		 * enable encryption not ready interrupt.
+		 */
+		intr2 &= ~BIT(2);
+		intr2 |= BIT(1) | BIT(6);
+
+		pr_debug("HDCP 2.2 Encryption enabled\n");
+		data->encryption_ready = true;
+	}
+
+	/* check if encryption is disabled */
+	if (intr2 & BIT(4)) {
+		/*
+		 * ack encryption not ready interrupt.
+		 * disable encryption not ready interrupt.
+		 * enable encryption ready interrupt.
+		 */
+		intr2  &= ~BIT(6);
+		intr2  |= BIT(5) | BIT(2);
+
+		pr_debug("HDCP 2.2 Encryption disabled\n");
+		data->encryption_ready = false;
+	}
+
+	DSS_REG_W_ND(io, HDMI_HDCP_INT_CTRL2, intr2);
+
+	/* get the message size bits 29:20 */
+	msg_size = (intr0 & (0x3FF << 20)) >> 20;
+
+	if (msg_size) {
+		/* ack and disable message size interrupt */
+		intr0 |= BIT(30);
+		intr0 &= ~BIT(31);
+
+		data->message_size = msg_size;
+	}
+
+	/* check and disable ready interrupt */
+	if (intr0 & BIT(16)) {
+		/* ack ready/not ready interrupt */
+		intr0 |= BIT(17);
+
+		intr0 &= ~BIT(18);
+		data->ready = true;
+	}
+
+	/* check for reauth req interrupt */
+	if (intr0 & BIT(12)) {
+		/* ack and disable reauth req interrupt */
+		intr0 |= BIT(13);
+		intr0 &= ~BIT(14);
+
+		data->reauth_req = true;
+	}
+
+	/* check for ddc fail interrupt */
+	if (intr0 & BIT(8)) {
+		/* ack ddc fail interrupt */
+		intr0 |= BIT(9);
+
+		data->ddc_max_retries_fail = true;
+	}
+
+	/* check for ddc done interrupt */
+	if (intr0 & BIT(4)) {
+		/* ack ddc done interrupt */
+		intr0 |= BIT(5);
+
+		data->ddc_done = true;
+	}
+
+	/* check for ddc read req interrupt */
+	if (intr0 & BIT(0)) {
+		/* ack read req interrupt */
+		intr0 |= BIT(1);
+
+		data->ddc_read_req = true;
+	}
+
+	DSS_REG_W_ND(io, HDMI_DDC_INT_CTRL0, intr0);
+
+	if (intr5 & BIT(0)) {
+		pr_err("RXSTATUS_DDC_REQ_TIMEOUT\n");
+
+		/* ack and disable timeout interrupt */
+		intr5 |= BIT(1);
+		intr5 &= ~BIT(2);
+
+		data->ddc_timeout = true;
+	}
+	DSS_REG_W_ND(io, HDMI_DDC_INT_CTRL5, intr5);
+
+	if (data->message_size || data->ready || data->reauth_req) {
+		if (data->wait) {
+			atomic_set(&ddc_ctrl->rxstatus_busy_wait_done, 1);
+		} else if (data->link_cb && data->link_data) {
+			data->link_cb(data->link_data);
+		} else {
+			pr_err("new msg/reauth not handled\n");
+			rc = -EINVAL;
+		}
+	}
+
+	hdmi_hdcp2p2_ddc_clear_status(ddc_ctrl);
+
+	return rc;
+}
+
+static int hdmi_ddc_scrambling_isr(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+	struct dss_io_data *io;
+	bool scrambler_timer_off = false;
+	u32 intr2, intr5;
+
+	if (!ddc_ctrl || !ddc_ctrl->io) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	io = ddc_ctrl->io;
+
+	intr2 = DSS_REG_R_ND(io, HDMI_DDC_INT_CTRL2);
+	intr5 = DSS_REG_R_ND(io, HDMI_DDC_INT_CTRL5);
+
+	pr_debug("intr2: 0x%x, intr5: 0x%x\n", intr2, intr5);
+
+	if (intr2 & BIT(12)) {
+		pr_err("SCRAMBLER_STATUS_NOT\n");
+
+		intr2 |= BIT(14);
+
+		scrambler_timer_off = true;
+	}
+
+	if (intr2 & BIT(8)) {
+		pr_err("SCRAMBLER_STATUS_DDC_FAILED\n");
+
+		intr2 |= BIT(9);
+
+		scrambler_timer_off = true;
+	}
+	DSS_REG_W_ND(io, HDMI_DDC_INT_CTRL2, intr2);
+
+	if (intr5 & BIT(8)) {
+		pr_err("SCRAMBLER_STATUS_DDC_REQ_TIMEOUT\n");
+
+		intr5 |= BIT(9);
+		intr5 &= ~BIT(10);
+
+		scrambler_timer_off = true;
+	}
+	DSS_REG_W_ND(io, HDMI_DDC_INT_CTRL5, intr5);
+
+	if (scrambler_timer_off)
+		hdmi_scrambler_ddc_disable(ddc_ctrl);
+
+	return 0;
+}
+
+int hdmi_ddc_isr(struct hdmi_tx_ddc_ctrl *ddc_ctrl, u32 version)
+{
+	u32 ddc_int_ctrl, ret = 0;
+
+	if (!ddc_ctrl || !ddc_ctrl->io) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	ddc_int_ctrl = DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_INT_CTRL);
+	pr_debug("intr: 0x%x\n", ddc_int_ctrl);
+
+	if (ddc_int_ctrl & BIT(0)) {
+		pr_debug("sw done\n");
+
+		ddc_int_ctrl |= BIT(1);
+		if (ddc_ctrl->ddc_data.hard_timeout) {
+			atomic_set(&ddc_ctrl->read_busy_wait_done, 1);
+			atomic_set(&ddc_ctrl->write_busy_wait_done, 1);
+		} else {
+			complete(&ddc_ctrl->ddc_sw_done);
+		}
+	}
+
+	if (ddc_int_ctrl & BIT(4)) {
+		pr_debug("hw done\n");
+		ddc_int_ctrl |= BIT(5);
+	}
+
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_INT_CTRL, ddc_int_ctrl);
+
+	if (version >= HDMI_TX_SCRAMBLER_MIN_TX_VERSION) {
+		ret = hdmi_ddc_scrambling_isr(ddc_ctrl);
+		if (ret)
+			pr_err("err in scrambling isr\n");
+	}
+
+	ret = hdmi_ddc_hdcp2p2_isr(ddc_ctrl);
+	if (ret)
+		pr_err("err in hdcp2p2 isr\n");
+
+	return ret;
+} /* hdmi_ddc_isr */
+
+int hdmi_ddc_read(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+	int rc = 0;
+	int retry;
+	struct hdmi_tx_ddc_data *ddc_data;
+
+	if (!ddc_ctrl) {
+		pr_err("invalid ddc ctrl\n");
+		return -EINVAL;
+	}
+
+	ddc_data = &ddc_ctrl->ddc_data;
+	retry = ddc_data->retry;
+
+	rc = hdmi_ddc_read_retry(ddc_ctrl);
+	if (!rc)
+		return rc;
+
+	if (ddc_data->retry_align) {
+		ddc_data->retry = retry;
+
+		ddc_data->request_len = 32 * ((ddc_data->data_len + 31) / 32);
+		rc = hdmi_ddc_read_retry(ddc_ctrl);
+	}
+
+	return rc;
+} /* hdmi_ddc_read */
+
+int hdmi_ddc_read_seg(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+	int status;
+	u32 reg_val, ndx, time_out_count;
+	struct hdmi_tx_ddc_data *ddc_data;
+
+	if (!ddc_ctrl || !ddc_ctrl->io) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	ddc_data = &ddc_ctrl->ddc_data;
+
+	if (!ddc_data->data_buf) {
+		status = -EINVAL;
+		pr_err("%s: invalid buf\n", ddc_data->what);
+		goto error;
+	}
+
+	if (ddc_data->retry < 0) {
+		pr_err("invalid no. of retries %d\n", ddc_data->retry);
+		status = -EINVAL;
+		goto error;
+	}
+
+	do {
+		status = hdmi_ddc_clear_irq(ddc_ctrl, ddc_data->what);
+		if (status)
+			continue;
+
+		reinit_completion(&ddc_ctrl->ddc_sw_done);
+
+		hdmi_ddc_trigger(ddc_ctrl, TRIGGER_READ, true);
+
+		time_out_count = wait_for_completion_timeout(
+			&ddc_ctrl->ddc_sw_done, HZ / 2);
+
+		if (!time_out_count) {
+			pr_debug("%s: timedout\n", ddc_data->what);
+
+			status = -ETIMEDOUT;
+		}
+
+		hdmi_ddc_clear_status(ddc_ctrl);
+	} while (status && ddc_data->retry--);
+
+	if (status)
+		goto error;
+
+	/* Write data to DDC buffer */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA,
+		BIT(0) | (5 << 16) | BIT(31));
+
+	/* Discard first byte */
+	DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_DATA);
+
+	for (ndx = 0; ndx < ddc_data->data_len; ++ndx) {
+		reg_val = DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_DATA);
+		ddc_data->data_buf[ndx] = (u8) ((reg_val & 0x0000FF00) >> 8);
+	}
+
+	pr_debug("%s: success\n", ddc_data->what);
+
+error:
+	return status;
+} /* hdmi_ddc_read_seg */
+
+int hdmi_ddc_write(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+	int status;
+	u32 time_out_count;
+	struct hdmi_tx_ddc_data *ddc_data;
+	u32 wait_time;
+	int busy_wait_us;
+
+	if (!ddc_ctrl || !ddc_ctrl->io) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	ddc_data = &ddc_ctrl->ddc_data;
+
+	if (!ddc_data->data_buf) {
+		status = -EINVAL;
+		pr_err("%s: invalid buf\n", ddc_data->what);
+		goto error;
+	}
+
+	if (ddc_data->retry < 0) {
+		pr_err("invalid no. of retries %d\n", ddc_data->retry);
+		status = -EINVAL;
+		goto error;
+	}
+
+	do {
+		status = hdmi_ddc_clear_irq(ddc_ctrl, ddc_data->what);
+		if (status)
+			continue;
+
+		if (ddc_data->hard_timeout) {
+			pr_debug("using hard_timeout %dms\n",
+				ddc_data->hard_timeout);
+
+			busy_wait_us = ddc_data->hard_timeout * HDMI_MS_TO_US;
+			atomic_set(&ddc_ctrl->write_busy_wait_done, 0);
+		} else {
+			reinit_completion(&ddc_ctrl->ddc_sw_done);
+			wait_time = HZ / 2;
+		}
+
+		hdmi_ddc_trigger(ddc_ctrl, TRIGGER_WRITE, false);
+
+		if (ddc_data->hard_timeout) {
+			while (busy_wait_us > 0 &&
+				!atomic_read(&ddc_ctrl->write_busy_wait_done)) {
+				udelay(HDMI_BUSY_WAIT_DELAY_US);
+				busy_wait_us -= HDMI_BUSY_WAIT_DELAY_US;
+			};
+
+			if (busy_wait_us < 0)
+				busy_wait_us = 0;
+
+			time_out_count = busy_wait_us / HDMI_MS_TO_US;
+
+			ddc_data->timeout_left = time_out_count;
+		} else {
+			time_out_count = wait_for_completion_timeout(
+				&ddc_ctrl->ddc_sw_done, wait_time);
+
+			ddc_data->timeout_left =
+				jiffies_to_msecs(time_out_count);
+		}
+
+		pr_debug("DDC write done at %dms\n", jiffies_to_msecs(jiffies));
+
+		if (!time_out_count) {
+			pr_debug("%s timout\n",  ddc_data->what);
+
+			status = -ETIMEDOUT;
+		}
+
+		hdmi_ddc_clear_status(ddc_ctrl);
+	} while (status && ddc_data->retry--);
+
+	if (status)
+		goto error;
+
+	pr_debug("%s: success\n", ddc_data->what);
+error:
+	return status;
+} /* hdmi_ddc_write */
+
+
+int hdmi_ddc_abort_transaction(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+	int status;
+	struct hdmi_tx_ddc_data *ddc_data;
+
+	if (!ddc_ctrl || !ddc_ctrl->io) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	ddc_data = &ddc_ctrl->ddc_data;
+
+	status = hdmi_ddc_clear_irq(ddc_ctrl, ddc_data->what);
+	if (status)
+		goto error;
+
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_ARBITRATION, BIT(12)|BIT(8));
+
+error:
+	return status;
+
+}
+
+int hdmi_scdc_read(struct hdmi_tx_ddc_ctrl *ctrl, u32 data_type, u32 *val)
+{
+	struct hdmi_tx_ddc_data data = {0};
+	int rc = 0;
+	u8 data_buf[2] = {0};
+
+	if (!ctrl || !ctrl->io || !val) {
+		pr_err("Bad Parameters\n");
+		return -EINVAL;
+	}
+
+	if (data_type >= HDMI_TX_SCDC_MAX) {
+		pr_err("Unsupported data type\n");
+		return -EINVAL;
+	}
+
+	data.what = hdmi_scdc_reg2string(data_type);
+	data.dev_addr = 0xA8;
+	data.retry = 1;
+	data.data_buf = data_buf;
+
+	switch (data_type) {
+	case HDMI_TX_SCDC_SCRAMBLING_STATUS:
+		data.data_len = 1;
+		data.request_len = 1;
+		data.offset = HDMI_SCDC_SCRAMBLER_STATUS;
+		break;
+	case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+	case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+		data.data_len = 1;
+		data.request_len = 1;
+		data.offset = HDMI_SCDC_TMDS_CONFIG;
+		break;
+	case HDMI_TX_SCDC_CLOCK_DET_STATUS:
+	case HDMI_TX_SCDC_CH0_LOCK_STATUS:
+	case HDMI_TX_SCDC_CH1_LOCK_STATUS:
+	case HDMI_TX_SCDC_CH2_LOCK_STATUS:
+		data.data_len = 1;
+		data.request_len = 1;
+		data.offset = HDMI_SCDC_STATUS_FLAGS_0;
+		break;
+	case HDMI_TX_SCDC_CH0_ERROR_COUNT:
+		data.data_len = 2;
+		data.request_len = 2;
+		data.offset = HDMI_SCDC_ERR_DET_0_L;
+		break;
+	case HDMI_TX_SCDC_CH1_ERROR_COUNT:
+		data.data_len = 2;
+		data.request_len = 2;
+		data.offset = HDMI_SCDC_ERR_DET_1_L;
+		break;
+	case HDMI_TX_SCDC_CH2_ERROR_COUNT:
+		data.data_len = 2;
+		data.request_len = 2;
+		data.offset = HDMI_SCDC_ERR_DET_2_L;
+		break;
+	case HDMI_TX_SCDC_READ_ENABLE:
+		data.data_len = 1;
+		data.request_len = 1;
+		data.offset = HDMI_SCDC_CONFIG_0;
+		break;
+	default:
+		break;
+	}
+
+	ctrl->ddc_data = data;
+
+	rc = hdmi_ddc_read(ctrl);
+	if (rc) {
+		pr_err("DDC Read failed for %s\n", data.what);
+		return rc;
+	}
+
+	switch (data_type) {
+	case HDMI_TX_SCDC_SCRAMBLING_STATUS:
+		*val = (data_buf[0] & BIT(0)) ? 1 : 0;
+		break;
+	case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+		*val = (data_buf[0] & BIT(0)) ? 1 : 0;
+		break;
+	case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+		*val = (data_buf[0] & BIT(1)) ? 1 : 0;
+		break;
+	case HDMI_TX_SCDC_CLOCK_DET_STATUS:
+		*val = (data_buf[0] & BIT(0)) ? 1 : 0;
+		break;
+	case HDMI_TX_SCDC_CH0_LOCK_STATUS:
+		*val = (data_buf[0] & BIT(1)) ? 1 : 0;
+		break;
+	case HDMI_TX_SCDC_CH1_LOCK_STATUS:
+		*val = (data_buf[0] & BIT(2)) ? 1 : 0;
+		break;
+	case HDMI_TX_SCDC_CH2_LOCK_STATUS:
+		*val = (data_buf[0] & BIT(3)) ? 1 : 0;
+		break;
+	case HDMI_TX_SCDC_CH0_ERROR_COUNT:
+	case HDMI_TX_SCDC_CH1_ERROR_COUNT:
+	case HDMI_TX_SCDC_CH2_ERROR_COUNT:
+		if (data_buf[1] & BIT(7))
+			*val = (data_buf[0] | ((data_buf[1] & 0x7F) << 8));
+		else
+			*val = 0;
+		break;
+	case HDMI_TX_SCDC_READ_ENABLE:
+		*val = (data_buf[0] & BIT(0)) ? 1 : 0;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+int hdmi_scdc_write(struct hdmi_tx_ddc_ctrl *ctrl, u32 data_type, u32 val)
+{
+	struct hdmi_tx_ddc_data data = {0};
+	struct hdmi_tx_ddc_data rdata = {0};
+	int rc = 0;
+	u8 data_buf[2] = {0};
+	u8 read_val = 0;
+
+	if (!ctrl || !ctrl->io) {
+		pr_err("Bad Parameters\n");
+		return -EINVAL;
+	}
+
+	if (data_type >= HDMI_TX_SCDC_MAX) {
+		pr_err("Unsupported data type\n");
+		return -EINVAL;
+	}
+
+	data.what = hdmi_scdc_reg2string(data_type);
+	data.dev_addr = 0xA8;
+	data.retry = 1;
+	data.data_buf = data_buf;
+
+	switch (data_type) {
+	case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+	case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+		rdata.what = "TMDS CONFIG";
+		rdata.dev_addr = 0xA8;
+		rdata.retry = 2;
+		rdata.data_buf = &read_val;
+		rdata.data_len = 1;
+		rdata.offset = HDMI_SCDC_TMDS_CONFIG;
+		rdata.request_len = 1;
+		ctrl->ddc_data = rdata;
+		rc = hdmi_ddc_read(ctrl);
+		if (rc) {
+			pr_err("scdc read failed\n");
+			return rc;
+		}
+		if (data_type == HDMI_TX_SCDC_SCRAMBLING_ENABLE) {
+			data_buf[0] = ((((u8)(read_val & 0xFF)) & (~BIT(0))) |
+				       ((u8)(val & BIT(0))));
+		} else {
+			data_buf[0] = ((((u8)(read_val & 0xFF)) & (~BIT(1))) |
+				       (((u8)(val & BIT(0))) << 1));
+		}
+		data.data_len = 1;
+		data.request_len = 1;
+		data.offset = HDMI_SCDC_TMDS_CONFIG;
+		break;
+	case HDMI_TX_SCDC_READ_ENABLE:
+		data.data_len = 1;
+		data.request_len = 1;
+		data.offset = HDMI_SCDC_CONFIG_0;
+		data_buf[0] = (u8)(val & 0x1);
+		break;
+	default:
+		pr_err("Cannot write to read only reg (%d)\n",
+			data_type);
+		return -EINVAL;
+	}
+
+	ctrl->ddc_data = data;
+
+	rc = hdmi_ddc_write(ctrl);
+	if (rc) {
+		pr_err("DDC Read failed for %s\n", data.what);
+		return rc;
+	}
+
+	return 0;
+}
+
+int hdmi_setup_ddc_timers(struct hdmi_tx_ddc_ctrl *ctrl,
+			  u32 type, u32 to_in_num_lines)
+{
+	if (!ctrl) {
+		pr_err("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	if (type >= HDMI_TX_DDC_TIMER_MAX) {
+		pr_err("Invalid timer type %d\n", type);
+		return -EINVAL;
+	}
+
+	switch (type) {
+	case HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS:
+		hdmi_scrambler_status_timer_setup(ctrl, to_in_num_lines);
+		break;
+	default:
+		pr_err("%d type not supported\n", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void hdmi_hdcp2p2_ddc_reset(struct hdmi_tx_ddc_ctrl *ctrl)
+{
+	u32 reg_val;
+
+	if (!ctrl) {
+		pr_err("Invalid parameters\n");
+		return;
+	}
+
+	/*
+	 * Clear acks for DDC_REQ, DDC_DONE, DDC_FAILED, RXSTATUS_READY,
+	 * RXSTATUS_MSG_SIZE
+	 */
+	reg_val = BIT(30) | BIT(17) | BIT(13) | BIT(9) | BIT(5) | BIT(1);
+	DSS_REG_W(ctrl->io, HDMI_DDC_INT_CTRL0, reg_val);
+
+	/* Reset DDC timers */
+	reg_val = BIT(0) | DSS_REG_R(ctrl->io, HDMI_HDCP2P2_DDC_CTRL);
+	DSS_REG_W(ctrl->io, HDMI_HDCP2P2_DDC_CTRL, reg_val);
+	reg_val = DSS_REG_R(ctrl->io, HDMI_HDCP2P2_DDC_CTRL);
+	reg_val &= ~BIT(0);
+	DSS_REG_W(ctrl->io, HDMI_HDCP2P2_DDC_CTRL, reg_val);
+}
+
+void hdmi_hdcp2p2_ddc_disable(struct hdmi_tx_ddc_ctrl *ctrl)
+{
+	u32 reg_val;
+
+	if (!ctrl) {
+		pr_err("Invalid parameters\n");
+		return;
+	}
+
+	hdmi_hdcp2p2_ddc_reset(ctrl);
+
+	/* Disable HW DDC access to RxStatus register */
+	reg_val = DSS_REG_R(ctrl->io, HDMI_HW_DDC_CTRL);
+	reg_val &= ~(BIT(1) | BIT(0));
+
+	DSS_REG_W(ctrl->io, HDMI_HW_DDC_CTRL, reg_val);
+}
+
+int hdmi_hdcp2p2_ddc_read_rxstatus(struct hdmi_tx_ddc_ctrl *ctrl)
+{
+	u32 reg_val;
+	u32 intr_en_mask;
+	u32 timeout;
+	u32 timer;
+	int rc = 0;
+	struct hdmi_tx_hdcp2p2_ddc_data *data;
+	int busy_wait_us;
+
+	if (!ctrl) {
+		pr_err("Invalid ctrl data\n");
+		return -EINVAL;
+	}
+
+	data = &ctrl->hdcp2p2_ddc_data;
+	if (!data) {
+		pr_err("Invalid ddc data\n");
+		return -EINVAL;
+	}
+
+	rc = hdmi_ddc_clear_irq(ctrl, "rxstatus");
+	if (rc)
+		return rc;
+
+	intr_en_mask = data->intr_mask;
+	intr_en_mask |= BIT(HDCP2P2_RXSTATUS_DDC_FAILED_INTR_MASK);
+
+	/* Disable short read for now, sinks don't support it */
+	reg_val = DSS_REG_R(ctrl->io, HDMI_HDCP2P2_DDC_CTRL);
+	reg_val |= BIT(4);
+	DSS_REG_W(ctrl->io, HDMI_HDCP2P2_DDC_CTRL, reg_val);
+
+	/*
+	 * Setup the DDC timers for HDMI_HDCP2P2_DDC_TIMER_CTRL1 and
+	 *  HDMI_HDCP2P2_DDC_TIMER_CTRL2.
+	 * Following are the timers:
+	 * 1. DDC_REQUEST_TIMER: Timeout in hsyncs in which to wait for the
+	 *	HDCP 2.2 sink to respond to an RxStatus request
+	 * 2. DDC_URGENT_TIMER: Time period in hsyncs to issue an urgent flag
+	 *	when an RxStatus DDC request is made but not accepted by I2C
+	 *	engine
+	 * 3. DDC_TIMEOUT_TIMER: Timeout in hsyncs which starts counting when
+	 *	a request is made and stops when it is accepted by DDC arbiter
+	 */
+	timeout = data->timeout_hsync;
+	timer = data->periodic_timer_hsync;
+	pr_debug("timeout: %d hsyncs, timer %d hsync\n", timeout, timer);
+
+	DSS_REG_W(ctrl->io, HDMI_HDCP2P2_DDC_TIMER_CTRL, timer);
+
+	/* Set both urgent and hw-timeout fields to the same value */
+	DSS_REG_W(ctrl->io, HDMI_HDCP2P2_DDC_TIMER_CTRL2,
+		(timeout << 16 | timeout));
+
+	/* enable interrupts */
+	reg_val = intr_en_mask;
+	/* Clear interrupt status bits */
+	reg_val |= intr_en_mask >> 1;
+
+	pr_debug("writing HDMI_DDC_INT_CTRL0 0x%x\n", reg_val);
+	DSS_REG_W(ctrl->io, HDMI_DDC_INT_CTRL0, reg_val);
+
+	reg_val = DSS_REG_R(ctrl->io, HDMI_DDC_INT_CTRL5);
+	/* clear and enable RxStatus read timeout */
+	reg_val |= BIT(2) | BIT(1);
+
+	DSS_REG_W(ctrl->io, HDMI_DDC_INT_CTRL5, reg_val);
+
+	/*
+	 * Enable hardware DDC access to RxStatus register
+	 *
+	 * HDMI_HW_DDC_CTRL:Bits 1:0 (RXSTATUS_DDC_ENABLE) read like this:
+	 *
+	 * 0 = disable HW controlled DDC access to RxStatus
+	 * 1 = automatic on when HDCP 2.2 is authenticated and loop based on
+	 * request timer (i.e. the hardware will loop automatically)
+	 * 2 = force on and loop based on request timer (hardware will loop)
+	 * 3 = enable by sw trigger and loop until interrupt is generated for
+	 * RxStatus.reauth_req, RxStatus.ready or RxStatus.message_Size.
+	 *
+	 * Depending on the value of ddc_data::poll_sink, we make the decision
+	 * to use either SW_TRIGGER(3) (poll_sink = false) which means that the
+	 * hardware will poll sink and generate interrupt when sink responds,
+	 * or use AUTOMATIC_LOOP(1) (poll_sink = true) which will poll the sink
+	 * based on request timer
+	 */
+	reg_val = DSS_REG_R(ctrl->io, HDMI_HW_DDC_CTRL);
+	reg_val &= ~(BIT(1) | BIT(0));
+
+	busy_wait_us = data->timeout_ms * HDMI_MS_TO_US;
+	atomic_set(&ctrl->rxstatus_busy_wait_done, 0);
+
+	/* read method: HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER */
+	reg_val |= BIT(1) | BIT(0);
+	DSS_REG_W(ctrl->io, HDMI_HW_DDC_CTRL, reg_val);
+	DSS_REG_W(ctrl->io, HDMI_HDCP2P2_DDC_SW_TRIGGER, 1);
+
+	if (data->wait) {
+		while (busy_wait_us > 0 &&
+			!atomic_read(&ctrl->rxstatus_busy_wait_done)) {
+			udelay(HDMI_BUSY_WAIT_DELAY_US);
+			busy_wait_us -= HDMI_BUSY_WAIT_DELAY_US;
+		};
+
+		if (busy_wait_us < 0)
+			busy_wait_us = 0;
+
+		data->timeout_left = busy_wait_us / HDMI_MS_TO_US;
+
+		if (!data->timeout_left) {
+			pr_err("sw ddc rxstatus timeout\n");
+			rc = -ETIMEDOUT;
+		}
+
+		hdmi_hdcp2p2_ddc_disable(ctrl);
+	}
+
+	return rc;
+}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.h b/drivers/video/fbdev/msm/mdss_hdmi_util.h
new file mode 100644
index 0000000..d26be99
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.h
@@ -0,0 +1,514 @@
+/* Copyright (c) 2010-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __HDMI_UTIL_H__
+#define __HDMI_UTIL_H__
+#include <linux/mdss_io_util.h>
+#include "video/msm_hdmi_modes.h"
+
+/* HDMI_TX Registers */
+#define HDMI_CTRL                        (0x00000000)
+#define HDMI_TEST_PATTERN                (0x00000010)
+#define HDMI_RANDOM_PATTERN              (0x00000014)
+#define HDMI_PKT_BLK_CTRL                (0x00000018)
+#define HDMI_STATUS                      (0x0000001C)
+#define HDMI_AUDIO_PKT_CTRL              (0x00000020)
+#define HDMI_ACR_PKT_CTRL                (0x00000024)
+#define HDMI_VBI_PKT_CTRL                (0x00000028)
+#define HDMI_INFOFRAME_CTRL0             (0x0000002C)
+#define HDMI_INFOFRAME_CTRL1             (0x00000030)
+#define HDMI_GEN_PKT_CTRL                (0x00000034)
+#define HDMI_ACP                         (0x0000003C)
+#define HDMI_GC                          (0x00000040)
+#define HDMI_AUDIO_PKT_CTRL2             (0x00000044)
+#define HDMI_ISRC1_0                     (0x00000048)
+#define HDMI_ISRC1_1                     (0x0000004C)
+#define HDMI_ISRC1_2                     (0x00000050)
+#define HDMI_ISRC1_3                     (0x00000054)
+#define HDMI_ISRC1_4                     (0x00000058)
+#define HDMI_ISRC2_0                     (0x0000005C)
+#define HDMI_ISRC2_1                     (0x00000060)
+#define HDMI_ISRC2_2                     (0x00000064)
+#define HDMI_ISRC2_3                     (0x00000068)
+#define HDMI_AVI_INFO0                   (0x0000006C)
+#define HDMI_AVI_INFO1                   (0x00000070)
+#define HDMI_AVI_INFO2                   (0x00000074)
+#define HDMI_AVI_INFO3                   (0x00000078)
+#define HDMI_MPEG_INFO0                  (0x0000007C)
+#define HDMI_MPEG_INFO1                  (0x00000080)
+#define HDMI_GENERIC0_HDR                (0x00000084)
+#define HDMI_GENERIC0_0                  (0x00000088)
+#define HDMI_GENERIC0_1                  (0x0000008C)
+#define HDMI_GENERIC0_2                  (0x00000090)
+#define HDMI_GENERIC0_3                  (0x00000094)
+#define HDMI_GENERIC0_4                  (0x00000098)
+#define HDMI_GENERIC0_5                  (0x0000009C)
+#define HDMI_GENERIC0_6                  (0x000000A0)
+#define HDMI_GENERIC1_HDR                (0x000000A4)
+#define HDMI_GENERIC1_0                  (0x000000A8)
+#define HDMI_GENERIC1_1                  (0x000000AC)
+#define HDMI_GENERIC1_2                  (0x000000B0)
+#define HDMI_GENERIC1_3                  (0x000000B4)
+#define HDMI_GENERIC1_4                  (0x000000B8)
+#define HDMI_GENERIC1_5                  (0x000000BC)
+#define HDMI_GENERIC1_6                  (0x000000C0)
+#define HDMI_ACR_32_0                    (0x000000C4)
+#define HDMI_ACR_32_1                    (0x000000C8)
+#define HDMI_ACR_44_0                    (0x000000CC)
+#define HDMI_ACR_44_1                    (0x000000D0)
+#define HDMI_ACR_48_0                    (0x000000D4)
+#define HDMI_ACR_48_1                    (0x000000D8)
+#define HDMI_ACR_STATUS_0                (0x000000DC)
+#define HDMI_ACR_STATUS_1                (0x000000E0)
+#define HDMI_AUDIO_INFO0                 (0x000000E4)
+#define HDMI_AUDIO_INFO1                 (0x000000E8)
+#define HDMI_CS_60958_0                  (0x000000EC)
+#define HDMI_CS_60958_1                  (0x000000F0)
+#define HDMI_RAMP_CTRL0                  (0x000000F8)
+#define HDMI_RAMP_CTRL1                  (0x000000FC)
+#define HDMI_RAMP_CTRL2                  (0x00000100)
+#define HDMI_RAMP_CTRL3                  (0x00000104)
+#define HDMI_CS_60958_2                  (0x00000108)
+#define HDMI_HDCP_CTRL2                  (0x0000010C)
+#define HDMI_HDCP_CTRL                   (0x00000110)
+#define HDMI_HDCP_DEBUG_CTRL             (0x00000114)
+#define HDMI_HDCP_INT_CTRL               (0x00000118)
+#define HDMI_HDCP_LINK0_STATUS           (0x0000011C)
+#define HDMI_HDCP_DDC_CTRL_0             (0x00000120)
+#define HDMI_HDCP_DDC_CTRL_1             (0x00000124)
+#define HDMI_HDCP_DDC_STATUS             (0x00000128)
+#define HDMI_HDCP_ENTROPY_CTRL0          (0x0000012C)
+#define HDMI_HDCP_RESET                  (0x00000130)
+#define HDMI_HDCP_RCVPORT_DATA0          (0x00000134)
+#define HDMI_HDCP_RCVPORT_DATA1          (0x00000138)
+#define HDMI_HDCP_RCVPORT_DATA2_0        (0x0000013C)
+#define HDMI_HDCP_RCVPORT_DATA2_1        (0x00000140)
+#define HDMI_HDCP_RCVPORT_DATA3          (0x00000144)
+#define HDMI_HDCP_RCVPORT_DATA4          (0x00000148)
+#define HDMI_HDCP_RCVPORT_DATA5          (0x0000014C)
+#define HDMI_HDCP_RCVPORT_DATA6          (0x00000150)
+#define HDMI_HDCP_RCVPORT_DATA7          (0x00000154)
+#define HDMI_HDCP_RCVPORT_DATA8          (0x00000158)
+#define HDMI_HDCP_RCVPORT_DATA9          (0x0000015C)
+#define HDMI_HDCP_RCVPORT_DATA10         (0x00000160)
+#define HDMI_HDCP_RCVPORT_DATA11         (0x00000164)
+#define HDMI_HDCP_RCVPORT_DATA12         (0x00000168)
+#define HDMI_VENSPEC_INFO0               (0x0000016C)
+#define HDMI_VENSPEC_INFO1               (0x00000170)
+#define HDMI_VENSPEC_INFO2               (0x00000174)
+#define HDMI_VENSPEC_INFO3               (0x00000178)
+#define HDMI_VENSPEC_INFO4               (0x0000017C)
+#define HDMI_VENSPEC_INFO5               (0x00000180)
+#define HDMI_VENSPEC_INFO6               (0x00000184)
+#define HDMI_HDCP_DEBUG                  (0x00000194)
+#define HDMI_TMDS_CTRL_CHAR              (0x0000019C)
+#define HDMI_TMDS_CTRL_SEL               (0x000001A4)
+#define HDMI_TMDS_SYNCCHAR01             (0x000001A8)
+#define HDMI_TMDS_SYNCCHAR23             (0x000001AC)
+#define HDMI_TMDS_DEBUG                  (0x000001B4)
+#define HDMI_TMDS_CTL_BITS               (0x000001B8)
+#define HDMI_TMDS_DCBAL_CTRL             (0x000001BC)
+#define HDMI_TMDS_DCBAL_CHAR             (0x000001C0)
+#define HDMI_TMDS_CTL01_GEN              (0x000001C8)
+#define HDMI_TMDS_CTL23_GEN              (0x000001CC)
+#define HDMI_AUDIO_CFG                   (0x000001D0)
+#define HDMI_DEBUG                       (0x00000204)
+#define HDMI_USEC_REFTIMER               (0x00000208)
+#define HDMI_DDC_CTRL                    (0x0000020C)
+#define HDMI_DDC_ARBITRATION             (0x00000210)
+#define HDMI_DDC_INT_CTRL                (0x00000214)
+#define HDMI_DDC_SW_STATUS               (0x00000218)
+#define HDMI_DDC_HW_STATUS               (0x0000021C)
+#define HDMI_DDC_SPEED                   (0x00000220)
+#define HDMI_DDC_SETUP                   (0x00000224)
+#define HDMI_DDC_TRANS0                  (0x00000228)
+#define HDMI_DDC_TRANS1                  (0x0000022C)
+#define HDMI_DDC_TRANS2                  (0x00000230)
+#define HDMI_DDC_TRANS3                  (0x00000234)
+#define HDMI_DDC_DATA                    (0x00000238)
+#define HDMI_HDCP_SHA_CTRL               (0x0000023C)
+#define HDMI_HDCP_SHA_STATUS             (0x00000240)
+#define HDMI_HDCP_SHA_DATA               (0x00000244)
+#define HDMI_HDCP_SHA_DBG_M0_0           (0x00000248)
+#define HDMI_HDCP_SHA_DBG_M0_1           (0x0000024C)
+#define HDMI_HPD_INT_STATUS              (0x00000250)
+#define HDMI_HPD_INT_CTRL                (0x00000254)
+#define HDMI_HPD_CTRL                    (0x00000258)
+#define HDMI_HDCP_ENTROPY_CTRL1          (0x0000025C)
+#define HDMI_HDCP_SW_UPPER_AN            (0x00000260)
+#define HDMI_HDCP_SW_LOWER_AN            (0x00000264)
+#define HDMI_CRC_CTRL                    (0x00000268)
+#define HDMI_VID_CRC                     (0x0000026C)
+#define HDMI_AUD_CRC                     (0x00000270)
+#define HDMI_VBI_CRC                     (0x00000274)
+#define HDMI_DDC_REF                     (0x0000027C)
+#define HDMI_HDCP_SW_UPPER_AKSV          (0x00000284)
+#define HDMI_HDCP_SW_LOWER_AKSV          (0x00000288)
+#define HDMI_CEC_CTRL                    (0x0000028C)
+#define HDMI_CEC_WR_DATA                 (0x00000290)
+#define HDMI_CEC_RETRANSMIT              (0x00000294)
+#define HDMI_CEC_STATUS                  (0x00000298)
+#define HDMI_CEC_INT                     (0x0000029C)
+#define HDMI_CEC_ADDR                    (0x000002A0)
+#define HDMI_CEC_TIME                    (0x000002A4)
+#define HDMI_CEC_REFTIMER                (0x000002A8)
+#define HDMI_CEC_RD_DATA                 (0x000002AC)
+#define HDMI_CEC_RD_FILTER               (0x000002B0)
+#define HDMI_ACTIVE_H                    (0x000002B4)
+#define HDMI_ACTIVE_V                    (0x000002B8)
+#define HDMI_ACTIVE_V_F2                 (0x000002BC)
+#define HDMI_TOTAL                       (0x000002C0)
+#define HDMI_V_TOTAL_F2                  (0x000002C4)
+#define HDMI_FRAME_CTRL                  (0x000002C8)
+#define HDMI_AUD_INT                     (0x000002CC)
+#define HDMI_DEBUG_BUS_CTRL              (0x000002D0)
+#define HDMI_PHY_CTRL                    (0x000002D4)
+#define HDMI_CEC_WR_RANGE                (0x000002DC)
+#define HDMI_CEC_RD_RANGE                (0x000002E0)
+#define HDMI_VERSION                     (0x000002E4)
+#define HDMI_BIST_ENABLE                 (0x000002F4)
+#define HDMI_TIMING_ENGINE_EN            (0x000002F8)
+#define HDMI_INTF_CONFIG                 (0x000002FC)
+#define HDMI_HSYNC_CTL                   (0x00000300)
+#define HDMI_VSYNC_PERIOD_F0             (0x00000304)
+#define HDMI_VSYNC_PERIOD_F1             (0x00000308)
+#define HDMI_VSYNC_PULSE_WIDTH_F0        (0x0000030C)
+#define HDMI_VSYNC_PULSE_WIDTH_F1        (0x00000310)
+#define HDMI_DISPLAY_V_START_F0          (0x00000314)
+#define HDMI_DISPLAY_V_START_F1          (0x00000318)
+#define HDMI_DISPLAY_V_END_F0            (0x0000031C)
+#define HDMI_DISPLAY_V_END_F1            (0x00000320)
+#define HDMI_ACTIVE_V_START_F0           (0x00000324)
+#define HDMI_ACTIVE_V_START_F1           (0x00000328)
+#define HDMI_ACTIVE_V_END_F0             (0x0000032C)
+#define HDMI_ACTIVE_V_END_F1             (0x00000330)
+#define HDMI_DISPLAY_HCTL                (0x00000334)
+#define HDMI_ACTIVE_HCTL                 (0x00000338)
+#define HDMI_HSYNC_SKEW                  (0x0000033C)
+#define HDMI_POLARITY_CTL                (0x00000340)
+#define HDMI_TPG_MAIN_CONTROL            (0x00000344)
+#define HDMI_TPG_VIDEO_CONFIG            (0x00000348)
+#define HDMI_TPG_COMPONENT_LIMITS        (0x0000034C)
+#define HDMI_TPG_RECTANGLE               (0x00000350)
+#define HDMI_TPG_INITIAL_VALUE           (0x00000354)
+#define HDMI_TPG_BLK_WHT_PATTERN_FRAMES  (0x00000358)
+#define HDMI_TPG_RGB_MAPPING             (0x0000035C)
+#define HDMI_CEC_COMPL_CTL               (0x00000360)
+#define HDMI_CEC_RD_START_RANGE          (0x00000364)
+#define HDMI_CEC_RD_TOTAL_RANGE          (0x00000368)
+#define HDMI_CEC_RD_ERR_RESP_LO          (0x0000036C)
+#define HDMI_CEC_WR_CHECK_CONFIG         (0x00000370)
+#define HDMI_INTERNAL_TIMING_MODE        (0x00000374)
+#define HDMI_CTRL_SW_RESET               (0x00000378)
+#define HDMI_CTRL_AUDIO_RESET            (0x0000037C)
+#define HDMI_SCRATCH                     (0x00000380)
+#define HDMI_CLK_CTRL                    (0x00000384)
+#define HDMI_CLK_ACTIVE                  (0x00000388)
+#define HDMI_VBI_CFG                     (0x0000038C)
+#define HDMI_DDC_INT_CTRL0               (0x00000430)
+#define HDMI_DDC_INT_CTRL1               (0x00000434)
+#define HDMI_DDC_INT_CTRL2               (0x00000438)
+#define HDMI_DDC_INT_CTRL3               (0x0000043C)
+#define HDMI_DDC_INT_CTRL4               (0x00000440)
+#define HDMI_DDC_INT_CTRL5               (0x00000444)
+#define HDMI_HDCP2P2_DDC_CTRL            (0x0000044C)
+#define HDMI_HDCP2P2_DDC_TIMER_CTRL      (0x00000450)
+#define HDMI_HDCP2P2_DDC_TIMER_CTRL2     (0x00000454)
+#define HDMI_HDCP2P2_DDC_STATUS          (0x00000458)
+#define HDMI_SCRAMBLER_STATUS_DDC_CTRL   (0x00000464)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL    (0x00000468)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2   (0x0000046C)
+#define HDMI_SCRAMBLER_STATUS_DDC_STATUS        (0x00000470)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS  (0x00000474)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS2 (0x00000478)
+#define HDMI_HW_DDC_CTRL                 (0x000004CC)
+#define HDMI_HDCP2P2_DDC_SW_TRIGGER      (0x000004D0)
+#define HDMI_HDCP_STATUS                 (0x00000500)
+#define HDMI_HDCP_INT_CTRL2              (0x00000504)
+
+/* HDMI PHY Registers */
+#define HDMI_PHY_ANA_CFG0                (0x00000000)
+#define HDMI_PHY_ANA_CFG1                (0x00000004)
+#define HDMI_PHY_PD_CTRL0                (0x00000010)
+#define HDMI_PHY_PD_CTRL1                (0x00000014)
+#define HDMI_PHY_BIST_CFG0               (0x00000034)
+#define HDMI_PHY_BIST_PATN0              (0x0000003C)
+#define HDMI_PHY_BIST_PATN1              (0x00000040)
+#define HDMI_PHY_BIST_PATN2              (0x00000044)
+#define HDMI_PHY_BIST_PATN3              (0x00000048)
+
+/* QFPROM Registers for HDMI/HDCP */
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB  (0x000000F8)
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB  (0x000000FC)
+#define QFPROM_RAW_VERSION_4             (0x000000A8)
+#define SEC_CTRL_HW_VERSION              (0x00006000)
+#define HDCP_KSV_LSB                     (0x000060D8)
+#define HDCP_KSV_MSB                     (0x000060DC)
+#define HDCP_KSV_VERSION_4_OFFSET        (0x00000014)
+
+/* SEC_CTRL version that supports HDCP SEL */
+#define HDCP_SEL_MIN_SEC_VERSION         (0x50010000)
+
+#define TOP_AND_BOTTOM		(1 << HDMI_S3D_TOP_AND_BOTTOM)
+#define FRAME_PACKING		(1 << HDMI_S3D_FRAME_PACKING)
+#define SIDE_BY_SIDE_HALF	(1 << HDMI_S3D_SIDE_BY_SIDE)
+
+#define LPASS_LPAIF_RDDMA_CTL0           (0xFE152000)
+#define LPASS_LPAIF_RDDMA_PER_CNT0       (0x00000014)
+
+/* TX major version that supports scrambling */
+#define HDMI_TX_SCRAMBLER_MIN_TX_VERSION 0x04
+
+/* TX major versions */
+#define HDMI_TX_VERSION_4         4
+#define HDMI_TX_VERSION_3         3
+
+/* HDMI SCDC register offsets */
+#define HDMI_SCDC_UPDATE_0              0x10
+#define HDMI_SCDC_UPDATE_1              0x11
+#define HDMI_SCDC_TMDS_CONFIG           0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS      0x21
+#define HDMI_SCDC_CONFIG_0              0x30
+#define HDMI_SCDC_STATUS_FLAGS_0        0x40
+#define HDMI_SCDC_STATUS_FLAGS_1        0x41
+#define HDMI_SCDC_ERR_DET_0_L           0x50
+#define HDMI_SCDC_ERR_DET_0_H           0x51
+#define HDMI_SCDC_ERR_DET_1_L           0x52
+#define HDMI_SCDC_ERR_DET_1_H           0x53
+#define HDMI_SCDC_ERR_DET_2_L           0x54
+#define HDMI_SCDC_ERR_DET_2_H           0x55
+#define HDMI_SCDC_ERR_DET_CHECKSUM      0x56
+
+/* HDCP secure registers directly accessible to HLOS since HDMI controller
+ * version major version 4.0
+ */
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA0  (0x00000004)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA1  (0x00000008)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA7  (0x0000000C)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA8  (0x00000010)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA9  (0x00000014)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x00000018)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x0000001C)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x00000020)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_CTRL       (0x00000024)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_DATA       (0x00000028)
+
+/*
+ * Offsets in HDMI_DDC_INT_CTRL0 register
+ *
+ * The HDMI_DDC_INT_CTRL0 register is intended for HDCP 2.2 RxStatus
+ * register manipulation. It reads like this:
+ *
+ * Bit 31: RXSTATUS_MESSAGE_SIZE_MASK (1 = generate interrupt when size > 0)
+ * Bit 30: RXSTATUS_MESSAGE_SIZE_ACK  (1 = Acknowledge message size intr)
+ * Bits 29-20: RXSTATUS_MESSAGE_SIZE  (Actual size of message available)
+ * Bits 19-18: RXSTATUS_READY_MASK    (1 = generate interrupt when ready = 1
+ *				       2 = generate interrupt when ready = 0)
+ * Bit 17: RXSTATUS_READY_ACK         (1 = Acknowledge ready bit interrupt)
+ * Bit 16: RXSTATUS_READY	      (1 = Rxstatus ready bit read is 1)
+ * Bit 15: RXSTATUS_READY_NOT         (1 = Rxstatus ready bit read is 0)
+ * Bit 14: RXSTATUS_REAUTH_REQ_MASK   (1 = generate interrupt when reauth is
+ *					   requested by sink)
+ * Bit 13: RXSTATUS_REAUTH_REQ_ACK    (1 = Acknowledge Reauth req interrupt)
+ * Bit 12: RXSTATUS_REAUTH_REQ        (1 = Rxstatus reauth req bit read is 1)
+ * Bit 10: RXSTATUS_DDC_FAILED_MASK   (1 = generate interrupt when DDC
+ *					   tranasaction fails)
+ * Bit 9:  RXSTATUS_DDC_FAILED_ACK    (1 = Acknowledge ddc failure interrupt)
+ * Bit 8:  RXSTATUS_DDC_FAILED	      (1 = DDC transaction failed)
+ * Bit 6:  RXSTATUS_DDC_DONE_MASK     (1 = generate interrupt when DDC
+ *					   transaction completes)
+ * Bit 5:  RXSTATUS_DDC_DONE_ACK      (1 = Acknowledge ddc done interrupt)
+ * Bit 4:  RXSTATUS_DDC_DONE	      (1 = DDC transaction is done)
+ * Bit 2:  RXSTATUS_DDC_REQ_MASK      (1 = generate interrupt when DDC Read
+ *					   request for RXstatus is made)
+ * Bit 1:  RXSTATUS_DDC_REQ_ACK       (1 = Acknowledge Rxstatus read interrupt)
+ * Bit 0:  RXSTATUS_DDC_REQ           (1 = RXStatus DDC read request is made)
+ *
+ */
+#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_SHIFT         20
+#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_MASK          0x3ff00000
+#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_ACK_SHIFT     30
+#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_INTR_SHIFT    31
+
+#define HDCP2P2_RXSTATUS_REAUTH_REQ_SHIFT           12
+#define HDCP2P2_RXSTATUS_REAUTH_REQ_MASK             1
+#define HDCP2P2_RXSTATUS_REAUTH_REQ_ACK_SHIFT	    13
+#define HDCP2P2_RXSTATUS_REAUTH_REQ_INTR_SHIFT	    14
+
+#define HDCP2P2_RXSTATUS_READY_SHIFT		    16
+#define HDCP2P2_RXSTATUS_READY_MASK                  1
+#define HDCP2P2_RXSTATUS_READY_ACK_SHIFT            17
+#define HDCP2P2_RXSTATUS_READY_INTR_SHIFT           18
+#define HDCP2P2_RXSTATUS_READY_INTR_MASK            18
+
+#define HDCP2P2_RXSTATUS_DDC_FAILED_SHIFT           8
+#define HDCP2P2_RXSTATUS_DDC_FAILED_ACKSHIFT        9
+#define HDCP2P2_RXSTATUS_DDC_FAILED_INTR_MASK       10
+#define HDCP2P2_RXSTATUS_DDC_DONE                   6
+
+/*
+ * Bits 1:0 in HDMI_HW_DDC_CTRL that dictate how the HDCP 2.2 RxStatus will be
+ * read by the hardware
+ */
+#define HDCP2P2_RXSTATUS_HW_DDC_DISABLE             0
+#define HDCP2P2_RXSTATUS_HW_DDC_AUTOMATIC_LOOP      1
+#define HDCP2P2_RXSTATUS_HW_DDC_FORCE_LOOP          2
+#define HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER          3
+
+/* default hsyncs for 4k@60 for 200ms */
+#define HDMI_DEFAULT_TIMEOUT_HSYNC 28571
+
+enum hdmi_tx_feature_type {
+	HDMI_TX_FEAT_EDID     = BIT(0),
+	HDMI_TX_FEAT_HDCP     = BIT(1),
+	HDMI_TX_FEAT_HDCP2P2  = BIT(2),
+	HDMI_TX_FEAT_CEC_HW   = BIT(3),
+	HDMI_TX_FEAT_CEC_ABST = BIT(4),
+	HDMI_TX_FEAT_PANEL    = BIT(5),
+	HDMI_TX_FEAT_MAX      = HDMI_TX_FEAT_EDID | HDMI_TX_FEAT_HDCP |
+				HDMI_TX_FEAT_HDCP2P2 | HDMI_TX_FEAT_CEC_HW |
+				HDMI_TX_FEAT_CEC_ABST | HDMI_TX_FEAT_PANEL
+};
+
+enum hdmi_tx_scdc_access_type {
+	HDMI_TX_SCDC_SCRAMBLING_STATUS,
+	HDMI_TX_SCDC_SCRAMBLING_ENABLE,
+	HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE,
+	HDMI_TX_SCDC_CLOCK_DET_STATUS,
+	HDMI_TX_SCDC_CH0_LOCK_STATUS,
+	HDMI_TX_SCDC_CH1_LOCK_STATUS,
+	HDMI_TX_SCDC_CH2_LOCK_STATUS,
+	HDMI_TX_SCDC_CH0_ERROR_COUNT,
+	HDMI_TX_SCDC_CH1_ERROR_COUNT,
+	HDMI_TX_SCDC_CH2_ERROR_COUNT,
+	HDMI_TX_SCDC_READ_ENABLE,
+	HDMI_TX_SCDC_MAX,
+};
+
+enum hdmi_tx_ddc_timer_type {
+	HDMI_TX_DDC_TIMER_HDCP2P2_RD_MSG,
+	HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS,
+	HDMI_TX_DDC_TIMER_UPDATE_FLAGS,
+	HDMI_TX_DDC_TIMER_STATUS_FLAGS,
+	HDMI_TX_DDC_TIMER_CED,
+	HDMI_TX_DDC_TIMER_MAX,
+};
+
+struct hdmi_tx_ddc_data {
+	char *what;
+	u8 *data_buf;
+	u32 data_len;
+	u32 dev_addr;
+	u32 offset;
+	u32 request_len;
+	u32 retry_align;
+	u32 hard_timeout;
+	u32 timeout_left;
+	int retry;
+};
+
+enum hdmi_tx_hdcp2p2_rxstatus_intr_mask {
+	RXSTATUS_MESSAGE_SIZE = BIT(31),
+	RXSTATUS_READY = BIT(18),
+	RXSTATUS_REAUTH_REQ = BIT(14),
+};
+
+struct hdmi_tx_hdcp2p2_ddc_data {
+	enum hdmi_tx_hdcp2p2_rxstatus_intr_mask intr_mask;
+	u32 timeout_ms;
+	u32 timeout_hsync;
+	u32 periodic_timer_hsync;
+	u32 timeout_left;
+	u32 read_method;
+	u32 message_size;
+	bool encryption_ready;
+	bool ready;
+	bool reauth_req;
+	bool ddc_max_retries_fail;
+	bool ddc_done;
+	bool ddc_read_req;
+	bool ddc_timeout;
+	bool wait;
+	int irq_wait_count;
+	void (*link_cb)(void *data);
+	void *link_data;
+};
+
+struct hdmi_tx_ddc_ctrl {
+	atomic_t write_busy_wait_done;
+	atomic_t read_busy_wait_done;
+	atomic_t rxstatus_busy_wait_done;
+	struct dss_io_data *io;
+	struct completion ddc_sw_done;
+	struct hdmi_tx_ddc_data ddc_data;
+	struct hdmi_tx_hdcp2p2_ddc_data hdcp2p2_ddc_data;
+};
+
+
+struct hdmi_util_ds_data {
+	bool ds_registered;
+	u32 ds_max_clk;
+};
+
+static inline int hdmi_tx_get_v_total(const struct msm_hdmi_mode_timing_info *t)
+{
+	if (t) {
+		return t->active_v + t->front_porch_v + t->pulse_width_v +
+			t->back_porch_v;
+	}
+
+	return 0;
+}
+
+static inline int hdmi_tx_get_h_total(const struct msm_hdmi_mode_timing_info *t)
+{
+	if (t) {
+		return t->active_h + t->front_porch_h + t->pulse_width_h +
+			t->back_porch_h;
+	}
+
+	return 0;
+}
+
+/* video timing related utility routines */
+int hdmi_get_video_id_code(struct msm_hdmi_mode_timing_info *timing_in,
+	struct hdmi_util_ds_data *ds_data);
+int hdmi_get_supported_mode(struct msm_hdmi_mode_timing_info *info,
+	struct hdmi_util_ds_data *ds_data, u32 mode);
+ssize_t hdmi_get_video_3d_fmt_2string(u32 format, char *buf, u32 size);
+const char *msm_hdmi_mode_2string(u32 mode);
+int hdmi_set_resv_timing_info(struct msm_hdmi_mode_timing_info *mode);
+bool hdmi_is_valid_resv_timing(int mode);
+void hdmi_reset_resv_timing_info(void);
+
+/* todo: Fix this. Right now this is defined in mdss_hdmi_tx.c */
+void *hdmi_get_featuredata_from_sysfs_dev(struct device *device, u32 type);
+
+/* DDC */
+void hdmi_ddc_config(struct hdmi_tx_ddc_ctrl *ctrl);
+int hdmi_ddc_isr(struct hdmi_tx_ddc_ctrl *ctrl, u32 version);
+int hdmi_ddc_write(struct hdmi_tx_ddc_ctrl *ctrl);
+int hdmi_ddc_read_seg(struct hdmi_tx_ddc_ctrl *ctrl);
+int hdmi_ddc_read(struct hdmi_tx_ddc_ctrl *ctrl);
+int hdmi_ddc_abort_transaction(struct hdmi_tx_ddc_ctrl *ctrl);
+
+int hdmi_scdc_read(struct hdmi_tx_ddc_ctrl *ctrl, u32 data_type, u32 *val);
+int hdmi_scdc_write(struct hdmi_tx_ddc_ctrl *ctrl, u32 data_type, u32 val);
+int hdmi_setup_ddc_timers(struct hdmi_tx_ddc_ctrl *ctrl,
+			  u32 type, u32 to_in_num_lines);
+void hdmi_scrambler_ddc_disable(struct hdmi_tx_ddc_ctrl *ctrl);
+void hdmi_hdcp2p2_ddc_disable(struct hdmi_tx_ddc_ctrl *ctrl);
+int hdmi_hdcp2p2_ddc_read_rxstatus(struct hdmi_tx_ddc_ctrl *ctrl);
+int hdmi_utils_get_timeout_in_hysnc(struct msm_hdmi_mode_timing_info *timing,
+	u32 timeout_ms);
+
+#endif /* __HDMI_UTIL_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_io_util.c b/drivers/video/fbdev/msm/mdss_io_util.c
new file mode 100644
index 0000000..bd96c605
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_io_util.c
@@ -0,0 +1,552 @@
+/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/mdss_io_util.h>
+
+#define MAX_I2C_CMDS  16
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug)
+{
+	u32 in_val;
+
+	if (!io || !io->base) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return;
+	}
+
+	if (offset > io->len) {
+		DEV_ERR("%pS->%s: offset out of range\n",
+			__builtin_return_address(0), __func__);
+		return;
+	}
+
+	writel_relaxed(value, io->base + offset);
+	if (debug) {
+		in_val = readl_relaxed(io->base + offset);
+		DEV_DBG("[%08x] => %08x [%08x]\n",
+			(u32)(unsigned long)(io->base + offset),
+
+			value, in_val);
+	}
+} /* dss_reg_w */
+EXPORT_SYMBOL(dss_reg_w);
+
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug)
+{
+	u32 value;
+
+	if (!io || !io->base) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return -EINVAL;
+	}
+
+	if (offset > io->len) {
+		DEV_ERR("%pS->%s: offset out of range\n",
+			__builtin_return_address(0), __func__);
+		return -EINVAL;
+	}
+
+	value = readl_relaxed(io->base + offset);
+	if (debug)
+		DEV_DBG("[%08x] <= %08x\n",
+			(u32)(unsigned long)(io->base + offset), value);
+
+	return value;
+} /* dss_reg_r */
+EXPORT_SYMBOL(dss_reg_r);
+
+void dss_reg_dump(void __iomem *base, u32 length, const char *prefix,
+	u32 debug)
+{
+	if (debug)
+		print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
+			(void *)base, length, false);
+} /* dss_reg_dump */
+EXPORT_SYMBOL(dss_reg_dump);
+
+static struct resource *msm_dss_get_res_byname(struct platform_device *pdev,
+	unsigned int type, const char *name)
+{
+	struct resource *res = NULL;
+
+	res = platform_get_resource_byname(pdev, type, name);
+	if (!res)
+		DEV_ERR("%s: '%s' resource not found\n", __func__, name);
+
+	return res;
+} /* msm_dss_get_res_byname */
+EXPORT_SYMBOL(msm_dss_get_res_byname);
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+	struct dss_io_data *io_data, const char *name)
+{
+	struct resource *res = NULL;
+
+	if (!pdev || !io_data) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return -EINVAL;
+	}
+
+	res = msm_dss_get_res_byname(pdev, IORESOURCE_MEM, name);
+	if (!res) {
+		DEV_ERR("%pS->%s: '%s' msm_dss_get_res_byname failed\n",
+			__builtin_return_address(0), __func__, name);
+		return -ENODEV;
+	}
+
+	io_data->len = (u32)resource_size(res);
+	io_data->base = ioremap(res->start, io_data->len);
+	if (!io_data->base) {
+		DEV_ERR("%pS->%s: '%s' ioremap failed\n",
+			__builtin_return_address(0), __func__, name);
+		return -EIO;
+	}
+
+	return 0;
+} /* msm_dss_ioremap_byname */
+EXPORT_SYMBOL(msm_dss_ioremap_byname);
+
+void msm_dss_iounmap(struct dss_io_data *io_data)
+{
+	if (!io_data) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return;
+	}
+
+	if (io_data->base) {
+		iounmap(io_data->base);
+		io_data->base = NULL;
+	}
+	io_data->len = 0;
+} /* msm_dss_iounmap */
+EXPORT_SYMBOL(msm_dss_iounmap);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+	int num_vreg, int config)
+{
+	int i = 0, rc = 0;
+	struct dss_vreg *curr_vreg = NULL;
+	enum dss_vreg_type type;
+
+	if (!in_vreg || !num_vreg)
+		return rc;
+
+	if (config) {
+		for (i = 0; i < num_vreg; i++) {
+			curr_vreg = &in_vreg[i];
+			curr_vreg->vreg = regulator_get(dev,
+				curr_vreg->vreg_name);
+			rc = PTR_RET(curr_vreg->vreg);
+			if (rc) {
+				DEV_ERR("%pS->%s: %s get failed. rc=%d\n",
+					 __builtin_return_address(0), __func__,
+					 curr_vreg->vreg_name, rc);
+				curr_vreg->vreg = NULL;
+				goto vreg_get_fail;
+			}
+			type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+					? DSS_REG_LDO : DSS_REG_VS;
+			if (type == DSS_REG_LDO) {
+				rc = regulator_set_voltage(
+					curr_vreg->vreg,
+					curr_vreg->min_voltage,
+					curr_vreg->max_voltage);
+				if (rc < 0) {
+					DEV_ERR("%pS->%s: %s set vltg fail\n",
+						__builtin_return_address(0),
+						__func__,
+						curr_vreg->vreg_name);
+					goto vreg_set_voltage_fail;
+				}
+			}
+		}
+	} else {
+		for (i = num_vreg-1; i >= 0; i--) {
+			curr_vreg = &in_vreg[i];
+			if (curr_vreg->vreg) {
+				type = (regulator_count_voltages(
+					curr_vreg->vreg) > 0)
+					? DSS_REG_LDO : DSS_REG_VS;
+				if (type == DSS_REG_LDO) {
+					regulator_set_voltage(curr_vreg->vreg,
+						0, curr_vreg->max_voltage);
+				}
+				regulator_put(curr_vreg->vreg);
+				curr_vreg->vreg = NULL;
+			}
+		}
+	}
+	return 0;
+
+vreg_unconfig:
+if (type == DSS_REG_LDO)
+	regulator_set_optimum_mode(curr_vreg->vreg, 0);
+
+vreg_set_voltage_fail:
+	regulator_put(curr_vreg->vreg);
+	curr_vreg->vreg = NULL;
+
+vreg_get_fail:
+	for (i--; i >= 0; i--) {
+		curr_vreg = &in_vreg[i];
+		type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+			? DSS_REG_LDO : DSS_REG_VS;
+		goto vreg_unconfig;
+	}
+	return rc;
+} /* msm_dss_config_vreg */
+EXPORT_SYMBOL(msm_dss_config_vreg);
+
+int msm_dss_config_vreg_opt_mode(struct dss_vreg *in_vreg, int num_vreg,
+				 enum dss_vreg_mode mode)
+{
+	int i = 0, rc = 0;
+
+	if (mode >= DSS_REG_MODE_MAX) {
+		pr_err("%pS->%s: invalid mode %d\n",
+			 __builtin_return_address(0), __func__, mode);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	for (i = 0; i < num_vreg; i++) {
+		rc = PTR_RET(in_vreg[i].vreg);
+		if (rc) {
+			DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
+				__builtin_return_address(0), __func__,
+				in_vreg[i].vreg_name, rc);
+			goto error;
+		}
+
+		DEV_DBG("%s: Setting optimum mode %d for %s (load=%d)\n",
+			__func__, mode, in_vreg[i].vreg_name,
+			in_vreg[i].load[mode]);
+		rc = regulator_set_optimum_mode(in_vreg[i].vreg,
+					in_vreg[i].load[mode]);
+		if (rc < 0) {
+			DEV_ERR("%pS->%s: %s set opt mode failed. rc=%d\n",
+				__builtin_return_address(0), __func__,
+				in_vreg[i].vreg_name, rc);
+			goto error;
+		} else {
+			/*
+			 * regulator_set_optimum_mode can return non-zero
+			 * value for success. However, this API is expected
+			 * to return 0 for success.
+			 */
+			rc = 0;
+		}
+	}
+
+error:
+	return rc;
+}
+EXPORT_SYMBOL(msm_dss_config_vreg_opt_mode);
+
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable)
+{
+	int i = 0, rc = 0;
+	bool need_sleep;
+
+	if (enable) {
+		for (i = 0; i < num_vreg; i++) {
+			rc = PTR_RET(in_vreg[i].vreg);
+			if (rc) {
+				DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
+					__builtin_return_address(0), __func__,
+					in_vreg[i].vreg_name, rc);
+				goto vreg_set_opt_mode_fail;
+			}
+			need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
+			if (in_vreg[i].pre_on_sleep && need_sleep)
+				usleep_range(in_vreg[i].pre_on_sleep * 1000,
+					in_vreg[i].pre_on_sleep * 1000);
+			rc = regulator_set_optimum_mode(in_vreg[i].vreg,
+				in_vreg[i].load[DSS_REG_MODE_ENABLE]);
+			if (rc < 0) {
+				DEV_ERR("%pS->%s: %s set opt m fail\n",
+					__builtin_return_address(0), __func__,
+					in_vreg[i].vreg_name);
+				goto vreg_set_opt_mode_fail;
+			}
+			rc = regulator_enable(in_vreg[i].vreg);
+			if (in_vreg[i].post_on_sleep && need_sleep)
+				usleep_range(in_vreg[i].post_on_sleep * 1000,
+					in_vreg[i].post_on_sleep * 1000);
+			if (rc < 0) {
+				DEV_ERR("%pS->%s: %s enable failed\n",
+					__builtin_return_address(0), __func__,
+					in_vreg[i].vreg_name);
+				goto disable_vreg;
+			}
+		}
+	} else {
+		for (i = num_vreg-1; i >= 0; i--) {
+			if (in_vreg[i].pre_off_sleep)
+				usleep_range(in_vreg[i].pre_off_sleep * 1000,
+					in_vreg[i].pre_off_sleep * 1000);
+			regulator_set_optimum_mode(in_vreg[i].vreg,
+				in_vreg[i].load[DSS_REG_MODE_DISABLE]);
+
+			if (regulator_is_enabled(in_vreg[i].vreg))
+				regulator_disable(in_vreg[i].vreg);
+
+			if (in_vreg[i].post_off_sleep)
+				usleep_range(in_vreg[i].post_off_sleep * 1000,
+					in_vreg[i].post_off_sleep * 1000);
+		}
+	}
+	return rc;
+
+disable_vreg:
+	regulator_set_optimum_mode(in_vreg[i].vreg,
+					in_vreg[i].load[DSS_REG_MODE_DISABLE]);
+
+vreg_set_opt_mode_fail:
+	for (i--; i >= 0; i--) {
+		if (in_vreg[i].pre_off_sleep)
+			usleep_range(in_vreg[i].pre_off_sleep * 1000,
+				in_vreg[i].pre_off_sleep * 1000);
+		regulator_set_optimum_mode(in_vreg[i].vreg,
+			in_vreg[i].load[DSS_REG_MODE_DISABLE]);
+		regulator_disable(in_vreg[i].vreg);
+		if (in_vreg[i].post_off_sleep)
+			usleep_range(in_vreg[i].post_off_sleep * 1000,
+				in_vreg[i].post_off_sleep * 1000);
+	}
+
+	return rc;
+} /* msm_dss_enable_vreg */
+EXPORT_SYMBOL(msm_dss_enable_vreg);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable)
+{
+	int i = 0, rc = 0;
+
+	if (enable) {
+		for (i = 0; i < num_gpio; i++) {
+			DEV_DBG("%pS->%s: %s enable\n",
+				__builtin_return_address(0), __func__,
+				in_gpio[i].gpio_name);
+
+			rc = gpio_request(in_gpio[i].gpio,
+				in_gpio[i].gpio_name);
+			if (rc < 0) {
+				DEV_ERR("%pS->%s: %s enable failed\n",
+					__builtin_return_address(0), __func__,
+					in_gpio[i].gpio_name);
+				goto disable_gpio;
+			}
+			gpio_set_value(in_gpio[i].gpio, in_gpio[i].value);
+		}
+	} else {
+		for (i = num_gpio-1; i >= 0; i--) {
+			DEV_DBG("%pS->%s: %s disable\n",
+				__builtin_return_address(0), __func__,
+				in_gpio[i].gpio_name);
+			if (in_gpio[i].gpio)
+				gpio_free(in_gpio[i].gpio);
+		}
+	}
+	return rc;
+
+disable_gpio:
+	for (i--; i >= 0; i--)
+		if (in_gpio[i].gpio)
+			gpio_free(in_gpio[i].gpio);
+
+	return rc;
+} /* msm_dss_enable_gpio */
+EXPORT_SYMBOL(msm_dss_enable_gpio);
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+	int i;
+
+	for (i = num_clk - 1; i >= 0; i--) {
+		if (clk_arry[i].clk)
+			clk_put(clk_arry[i].clk);
+		clk_arry[i].clk = NULL;
+	}
+} /* msm_dss_put_clk */
+EXPORT_SYMBOL(msm_dss_put_clk);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < num_clk; i++) {
+		clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+		rc = PTR_RET(clk_arry[i].clk);
+		if (rc) {
+			DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name, rc);
+			goto error;
+		}
+	}
+
+	return rc;
+
+error:
+	msm_dss_put_clk(clk_arry, num_clk);
+
+	return rc;
+} /* msm_dss_get_clk */
+EXPORT_SYMBOL(msm_dss_get_clk);
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < num_clk; i++) {
+		if (clk_arry[i].clk) {
+			if (clk_arry[i].type != DSS_CLK_AHB) {
+				DEV_DBG("%pS->%s: '%s' rate %ld\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name,
+					clk_arry[i].rate);
+				rc = clk_set_rate(clk_arry[i].clk,
+					clk_arry[i].rate);
+				if (rc) {
+					DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+						__builtin_return_address(0),
+						__func__,
+						clk_arry[i].clk_name, rc);
+					break;
+				}
+			}
+		} else {
+			DEV_ERR("%pS->%s: '%s' is not available\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+			rc = -EPERM;
+			break;
+		}
+	}
+
+	return rc;
+} /* msm_dss_clk_set_rate */
+EXPORT_SYMBOL(msm_dss_clk_set_rate);
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+	int i, rc = 0;
+
+	if (enable) {
+		for (i = 0; i < num_clk; i++) {
+			DEV_DBG("%pS->%s: enable '%s'\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+			if (clk_arry[i].clk) {
+				rc = clk_prepare_enable(clk_arry[i].clk);
+				if (rc)
+					DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+						__builtin_return_address(0),
+						__func__,
+						clk_arry[i].clk_name, rc);
+			} else {
+				DEV_ERR("%pS->%s: '%s' is not available\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name);
+				rc = -EPERM;
+			}
+
+			if (rc) {
+				msm_dss_enable_clk(&clk_arry[i],
+					i, false);
+				break;
+			}
+		}
+	} else {
+		for (i = num_clk - 1; i >= 0; i--) {
+			DEV_DBG("%pS->%s: disable '%s'\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+
+			if (clk_arry[i].clk)
+				clk_disable_unprepare(clk_arry[i].clk);
+			else
+				DEV_ERR("%pS->%s: '%s' is not available\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name);
+		}
+	}
+
+	return rc;
+} /* msm_dss_enable_clk */
+EXPORT_SYMBOL(msm_dss_enable_clk);
+
+
+int mdss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *read_buf)
+{
+	struct i2c_msg msgs[2];
+	int ret = -1;
+
+	pr_debug("%s: reading from slave_addr=[%x] and offset=[%x]\n",
+		 __func__, slave_addr, reg_offset);
+
+	msgs[0].addr = slave_addr >> 1;
+	msgs[0].flags = 0;
+	msgs[0].buf = &reg_offset;
+	msgs[0].len = 1;
+
+	msgs[1].addr = slave_addr >> 1;
+	msgs[1].flags = I2C_M_RD;
+	msgs[1].buf = read_buf;
+	msgs[1].len = 1;
+
+	ret = i2c_transfer(client->adapter, msgs, 2);
+	if (ret < 1) {
+		pr_err("%s: I2C READ FAILED=[%d]\n", __func__, ret);
+		return -EACCES;
+	}
+	pr_debug("%s: i2c buf is [%x]\n", __func__, *read_buf);
+	return 0;
+}
+EXPORT_SYMBOL(mdss_i2c_byte_read);
+
+int mdss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *value)
+{
+	struct i2c_msg msgs[1];
+	uint8_t data[2];
+	int status = -EACCES;
+
+	pr_debug("%s: writing from slave_addr=[%x] and offset=[%x]\n",
+		 __func__, slave_addr, reg_offset);
+
+	data[0] = reg_offset;
+	data[1] = *value;
+
+	msgs[0].addr = slave_addr >> 1;
+	msgs[0].flags = 0;
+	msgs[0].len = 2;
+	msgs[0].buf = data;
+
+	status = i2c_transfer(client->adapter, msgs, 1);
+	if (status < 1) {
+		pr_err("I2C WRITE FAILED=[%d]\n", status);
+		return -EACCES;
+	}
+	pr_debug("%s: I2C write status=%x\n", __func__, status);
+	return status;
+}
+EXPORT_SYMBOL(mdss_i2c_byte_write);
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
new file mode 100644
index 0000000..73324489
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -0,0 +1,5116 @@
+/*
+ * MDSS MDP Interface (used by framebuffer core)
+ *
+ * Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/rpm-smd.h>
+
+#include "mdss.h"
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_panel.h"
+#include "mdss_debug.h"
+#include "mdss_mdp_debug.h"
+#include "mdss_smmu.h"
+
+#include "mdss_mdp_trace.h"
+
+#define AXI_HALT_TIMEOUT_US	0x4000
+#define AUTOSUSPEND_TIMEOUT_MS	200
+#define DEFAULT_MDP_PIPE_WIDTH	2048
+#define RES_1080p		(1088*1920)
+#define RES_UHD			(3840*2160)
+
+struct mdss_data_type *mdss_res;
+static u32 mem_protect_sd_ctrl_id;
+
+static int mdss_fb_mem_get_iommu_domain(void)
+{
+	return mdss_smmu_get_domain_id(MDSS_IOMMU_DOMAIN_UNSECURE);
+}
+
+struct msm_mdp_interface mdp5 = {
+	.init_fnc = mdss_mdp_overlay_init,
+	.fb_mem_get_iommu_domain = mdss_fb_mem_get_iommu_domain,
+	.fb_stride = mdss_mdp_fb_stride,
+	.check_dsi_status = mdss_check_dsi_ctrl_status,
+	.get_format_params = mdss_mdp_get_format_params,
+};
+
+#define IB_QUOTA 2000000000
+#define AB_QUOTA 2000000000
+
+#define MAX_AXI_PORT_COUNT 3
+
+#define MEM_PROTECT_SD_CTRL 0xF
+#define MEM_PROTECT_SD_CTRL_FLAT 0x14
+
+static DEFINE_SPINLOCK(mdp_lock);
+static DEFINE_SPINLOCK(mdss_mdp_intr_lock);
+static DEFINE_MUTEX(mdp_clk_lock);
+static DEFINE_MUTEX(mdp_iommu_ref_cnt_lock);
+static DEFINE_MUTEX(mdp_fs_idle_pc_lock);
+
+static struct mdss_panel_intf pan_types[] = {
+	{"dsi", MDSS_PANEL_INTF_DSI},
+	{"edp", MDSS_PANEL_INTF_EDP},
+	{"hdmi", MDSS_PANEL_INTF_HDMI},
+};
+static char mdss_mdp_panel[MDSS_MAX_PANEL_LEN];
+
+struct mdss_hw mdss_mdp_hw = {
+	.hw_ndx = MDSS_HW_MDP,
+	.ptr = NULL,
+	.irq_handler = mdss_mdp_isr,
+};
+
+/* define for h/w block with external driver */
+struct mdss_hw mdss_misc_hw = {
+	.hw_ndx = MDSS_HW_MISC,
+	.ptr = NULL,
+	.irq_handler = NULL,
+};
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val)	\
+	{						\
+		.src = MSM_BUS_MASTER_AMPSS_M0,		\
+		.dst = MSM_BUS_SLAVE_DISPLAY_CFG,	\
+		.ab = (ab_val),				\
+		.ib = (ib_val),				\
+	}
+
+#define BUS_VOTE_19_MHZ 153600000
+#define BUS_VOTE_40_MHZ 320000000
+#define BUS_VOTE_80_MHZ 640000000
+
+static struct msm_bus_vectors mdp_reg_bus_vectors[] = {
+	MDP_REG_BUS_VECTOR_ENTRY(0, 0),
+	MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ),
+	MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_40_MHZ),
+	MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_80_MHZ),
+};
+static struct msm_bus_paths mdp_reg_bus_usecases[ARRAY_SIZE(
+		mdp_reg_bus_vectors)];
+static struct msm_bus_scale_pdata mdp_reg_bus_scale_table = {
+	.usecase = mdp_reg_bus_usecases,
+	.num_usecases = ARRAY_SIZE(mdp_reg_bus_usecases),
+	.name = "mdss_reg",
+	.active_only = true,
+};
+#endif
+
+u32 invalid_mdp107_wb_output_fmts[] = {
+	MDP_XRGB_8888,
+	MDP_RGBX_8888,
+	MDP_BGRX_8888,
+};
+
+/*
+ * struct intr_call - array of intr handlers
+ * @func: intr handler
+ * @arg: requested argument to the handler
+ */
+struct intr_callback {
+	void (*func)(void *);
+	void *arg;
+};
+
+/*
+ * struct mdss_mdp_intr_reg - array of MDP intr register sets
+ * @clr_off: offset to CLEAR reg
+ * @en_off: offset to ENABLE reg
+ * @status_off: offset to STATUS reg
+ */
+struct mdss_mdp_intr_reg {
+	u32 clr_off;
+	u32 en_off;
+	u32 status_off;
+};
+
+/*
+ * struct mdss_mdp_irq - maps each irq with i/f
+ * @intr_type: type of interface
+ * @intf_num: i/f the irq is associated with
+ * @irq_mask: corresponding bit in the reg set
+ * @reg_idx: which reg set to program
+ */
+struct mdss_mdp_irq {
+	u32 intr_type;
+	u32 intf_num;
+	u32 irq_mask;
+	u32 reg_idx;
+};
+
+static struct mdss_mdp_intr_reg mdp_intr_reg[] = {
+	{ MDSS_MDP_REG_INTR_CLEAR, MDSS_MDP_REG_INTR_EN,
+		MDSS_MDP_REG_INTR_STATUS },
+	{ MDSS_MDP_REG_INTR2_CLEAR, MDSS_MDP_REG_INTR2_EN,
+		MDSS_MDP_REG_INTR2_STATUS }
+};
+
+static struct mdss_mdp_irq mdp_irq_map[] =  {
+	{ MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 1,
+		MDSS_MDP_INTR_INTF_0_UNDERRUN, 0},
+	{ MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 2,
+		MDSS_MDP_INTR_INTF_1_UNDERRUN, 0},
+	{ MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 3,
+		MDSS_MDP_INTR_INTF_2_UNDERRUN, 0},
+	{ MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 4,
+		MDSS_MDP_INTR_INTF_3_UNDERRUN, 0},
+	{ MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 1,
+		MDSS_MDP_INTR_INTF_0_VSYNC, 0},
+	{ MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 2,
+		MDSS_MDP_INTR_INTF_1_VSYNC, 0},
+	{ MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 3,
+		MDSS_MDP_INTR_INTF_2_VSYNC, 0},
+	{ MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 4,
+		MDSS_MDP_INTR_INTF_3_VSYNC, 0},
+	{ MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 0,
+		MDSS_MDP_INTR_PING_PONG_0_DONE, 0},
+	{ MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 1,
+		MDSS_MDP_INTR_PING_PONG_1_DONE, 0},
+	{ MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 2,
+		MDSS_MDP_INTR_PING_PONG_2_DONE, 0},
+	{ MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 3,
+		MDSS_MDP_INTR_PING_PONG_3_DONE, 0},
+	{ MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 0,
+		MDSS_MDP_INTR_PING_PONG_0_RD_PTR, 0},
+	{ MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 1,
+		MDSS_MDP_INTR_PING_PONG_1_RD_PTR, 0},
+	{ MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 2,
+		MDSS_MDP_INTR_PING_PONG_2_RD_PTR, 0},
+	{ MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 3,
+		MDSS_MDP_INTR_PING_PONG_3_RD_PTR, 0},
+	{ MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 0,
+		MDSS_MDP_INTR_PING_PONG_0_WR_PTR, 0},
+	{ MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 1,
+		MDSS_MDP_INTR_PING_PONG_1_WR_PTR, 0},
+	{ MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 2,
+		MDSS_MDP_INTR_PING_PONG_2_WR_PTR, 0},
+	{ MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 3,
+		MDSS_MDP_INTR_PING_PONG_3_WR_PTR, 0},
+	{ MDSS_MDP_IRQ_TYPE_WB_ROT_COMP, 0,
+		MDSS_MDP_INTR_WB_0_DONE, 0},
+	{ MDSS_MDP_IRQ_TYPE_WB_ROT_COMP, 1,
+		MDSS_MDP_INTR_WB_1_DONE, 0},
+	{ MDSS_MDP_IRQ_TYPE_WB_WFD_COMP, 0,
+		MDSS_MDP_INTR_WB_2_DONE, 0},
+	{ MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 0,
+		MDSS_MDP_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+	{ MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 1,
+		MDSS_MDP_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+	{ MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 2,
+		MDSS_MDP_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+	{ MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 3,
+		MDSS_MDP_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
+	{ MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, 2,
+		MDSS_MDP_INTR2_PING_PONG_2_CWB_OVERFLOW, 1},
+	{ MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, 3,
+		MDSS_MDP_INTR2_PING_PONG_2_CWB_OVERFLOW, 1}
+};
+
+static struct intr_callback *mdp_intr_cb;
+
+static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on);
+static int mdss_mdp_parse_dt(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_wb(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_handler(struct platform_device *pdev,
+				char *prop_name, u32 *offsets, int len);
+static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev,
+				char *prop_name);
+static int mdss_mdp_parse_dt_smp(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_prefill(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_misc(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_ad_cfg(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_ppb_off(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_cdm(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_dsc(struct platform_device *pdev);
+
+static inline u32 is_mdp_irq_enabled(void)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mdp_intr_reg); i++)
+		if (mdata->mdp_irq_mask[i] != 0)
+			return 1;
+
+	if (mdata->mdp_hist_irq_mask)
+		return 1;
+
+	if (mdata->mdp_intf_irq_mask)
+		return 1;
+
+	return 0;
+}
+
+u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp)
+{
+	/* The adreno GPU hardware requires that the pitch be aligned to
+	 *  32 pixels for color buffers, so for the cases where the GPU
+	 *  is writing directly to fb0, the framebuffer pitch
+	 *  also needs to be 32 pixel aligned
+	 */
+
+	if (fb_index == 0)
+		return ALIGN(xres, 32) * bpp;
+	else
+		return xres * bpp;
+}
+
+static void mdss_irq_mask(struct irq_data *data)
+{
+	struct mdss_data_type *mdata = irq_data_get_irq_chip_data(data);
+	unsigned long irq_flags;
+
+	if (!mdata)
+		return;
+
+	pr_debug("irq_domain_mask %lu\n", data->hwirq);
+
+	if (data->hwirq < 32) {
+		spin_lock_irqsave(&mdp_lock, irq_flags);
+		mdata->mdss_util->disable_irq(&mdss_misc_hw);
+		spin_unlock_irqrestore(&mdp_lock, irq_flags);
+	}
+}
+
+static void mdss_irq_unmask(struct irq_data *data)
+{
+	struct mdss_data_type *mdata = irq_data_get_irq_chip_data(data);
+	unsigned long irq_flags;
+
+	if (!mdata)
+		return;
+
+	pr_debug("irq_domain_unmask %lu\n", data->hwirq);
+
+	if (data->hwirq < 32) {
+		spin_lock_irqsave(&mdp_lock, irq_flags);
+		mdata->mdss_util->enable_irq(&mdss_misc_hw);
+		spin_unlock_irqrestore(&mdp_lock, irq_flags);
+	}
+}
+
+static struct irq_chip mdss_irq_chip = {
+	.name		= "mdss",
+	.irq_mask	= mdss_irq_mask,
+	.irq_unmask	= mdss_irq_unmask,
+};
+
+static int mdss_irq_domain_map(struct irq_domain *d,
+		unsigned int virq, irq_hw_number_t hw)
+{
+	struct mdss_data_type *mdata = d->host_data;
+	/* check here if virq is a valid interrupt line */
+	irq_set_chip_and_handler(virq, &mdss_irq_chip, handle_level_irq);
+	irq_set_chip_data(virq, mdata);
+	set_irq_flags(virq, IRQF_VALID);
+	return 0;
+}
+
+const struct irq_domain_ops mdss_irq_domain_ops = {
+	.map = mdss_irq_domain_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+static irqreturn_t mdss_irq_handler(int irq, void *ptr)
+{
+	struct mdss_data_type *mdata = ptr;
+	u32 intr;
+
+	if (!mdata)
+		return IRQ_NONE;
+	else if (!mdss_get_irq_enable_state(&mdss_mdp_hw))
+		return IRQ_HANDLED;
+
+	intr = MDSS_REG_READ(mdata, MDSS_REG_HW_INTR_STATUS);
+
+	mdss_mdp_hw.irq_info->irq_buzy = true;
+
+	if (intr & MDSS_INTR_MDP) {
+		spin_lock(&mdp_lock);
+		mdata->mdss_util->irq_dispatch(MDSS_HW_MDP, irq, ptr);
+		spin_unlock(&mdp_lock);
+		intr &= ~MDSS_INTR_MDP;
+	}
+
+	if (intr & MDSS_INTR_DSI0) {
+		mdata->mdss_util->irq_dispatch(MDSS_HW_DSI0, irq, ptr);
+		intr &= ~MDSS_INTR_DSI0;
+	}
+
+	if (intr & MDSS_INTR_DSI1) {
+		mdata->mdss_util->irq_dispatch(MDSS_HW_DSI1, irq, ptr);
+		intr &= ~MDSS_INTR_DSI1;
+	}
+
+	if (intr & MDSS_INTR_EDP) {
+		mdata->mdss_util->irq_dispatch(MDSS_HW_EDP, irq, ptr);
+		intr &= ~MDSS_INTR_EDP;
+	}
+
+	if (intr & MDSS_INTR_HDMI) {
+		mdata->mdss_util->irq_dispatch(MDSS_HW_HDMI, irq, ptr);
+		intr &= ~MDSS_INTR_HDMI;
+	}
+
+	/* route misc. interrupts to external drivers */
+	while (intr) {
+		irq_hw_number_t hwirq = fls(intr) - 1;
+
+		generic_handle_irq(irq_find_mapping(
+				mdata->irq_domain, hwirq));
+		intr &= ~(1 << hwirq);
+	}
+
+	mdss_mdp_hw.irq_info->irq_buzy = false;
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata)
+{
+	struct msm_bus_scale_pdata *reg_bus_pdata;
+	int i, rc;
+
+	if (!mdata->bus_hdl) {
+		rc = mdss_mdp_parse_dt_bus_scale(mdata->pdev);
+		if (rc) {
+			pr_err("Error in device tree : bus scale\n");
+			return rc;
+		}
+
+		mdata->bus_hdl =
+			msm_bus_scale_register_client(mdata->bus_scale_table);
+		if (!mdata->bus_hdl) {
+			pr_err("bus_client register failed\n");
+			return -EINVAL;
+		}
+
+		pr_debug("register bus_hdl=%x\n", mdata->bus_hdl);
+	}
+
+	if (!mdata->reg_bus_scale_table) {
+		reg_bus_pdata = &mdp_reg_bus_scale_table;
+		for (i = 0; i < reg_bus_pdata->num_usecases; i++) {
+			mdp_reg_bus_usecases[i].num_paths = 1;
+			mdp_reg_bus_usecases[i].vectors =
+				&mdp_reg_bus_vectors[i];
+		}
+		mdata->reg_bus_scale_table = reg_bus_pdata;
+	}
+
+	if (!mdata->reg_bus_hdl) {
+		mdata->reg_bus_hdl =
+			msm_bus_scale_register_client(
+			      mdata->reg_bus_scale_table);
+		if (!mdata->reg_bus_hdl)
+			/* Continue without reg_bus scaling */
+			pr_warn("reg_bus_client register failed\n");
+		else
+			pr_debug("register reg_bus_hdl=%x\n",
+					mdata->reg_bus_hdl);
+	}
+
+	if (mdata->hw_rt_bus_scale_table && !mdata->hw_rt_bus_hdl) {
+		mdata->hw_rt_bus_hdl =
+			msm_bus_scale_register_client(
+			      mdata->hw_rt_bus_scale_table);
+		if (!mdata->hw_rt_bus_hdl)
+			/* Continue without reg_bus scaling */
+			pr_warn("hw_rt_bus client register failed\n");
+		else
+			pr_debug("register hw_rt_bus=%x\n",
+					mdata->hw_rt_bus_hdl);
+	}
+
+	/*
+	 * Following call will not result in actual vote rather update the
+	 * current index and ab/ib value. When continuous splash is enabled,
+	 * actual vote will happen when splash handoff is done.
+	 */
+	return mdss_bus_scale_set_quota(MDSS_MDP_RT, AB_QUOTA, IB_QUOTA);
+}
+
+static void mdss_mdp_bus_scale_unregister(struct mdss_data_type *mdata)
+{
+	pr_debug("unregister bus_hdl=%x\n", mdata->bus_hdl);
+
+	if (mdata->bus_hdl)
+		msm_bus_scale_unregister_client(mdata->bus_hdl);
+
+	pr_debug("unregister reg_bus_hdl=%x\n", mdata->reg_bus_hdl);
+
+	if (mdata->reg_bus_hdl) {
+		msm_bus_scale_unregister_client(mdata->reg_bus_hdl);
+		mdata->reg_bus_hdl = 0;
+	}
+
+	if (mdata->hw_rt_bus_hdl) {
+		msm_bus_scale_unregister_client(mdata->hw_rt_bus_hdl);
+		mdata->hw_rt_bus_hdl = 0;
+	}
+}
+
+/*
+ * Caller needs to hold mdata->bus_lock lock before calling this function.
+ */
+static int mdss_mdp_bus_scale_set_quota(u64 ab_quota_rt, u64 ab_quota_nrt,
+		u64 ib_quota_rt, u64 ib_quota_nrt)
+{
+	int new_uc_idx;
+	u64 ab_quota[MAX_AXI_PORT_COUNT] = {0, 0};
+	u64 ib_quota[MAX_AXI_PORT_COUNT] = {0, 0};
+	int rc;
+
+	if (mdss_res->bus_hdl < 1) {
+		pr_err("invalid bus handle %d\n", mdss_res->bus_hdl);
+		return -EINVAL;
+	}
+
+	if (!ab_quota_rt && !ab_quota_nrt && !ib_quota_rt && !ib_quota_nrt)  {
+		new_uc_idx = 0;
+	} else {
+		int i;
+		struct msm_bus_vectors *vect = NULL;
+		struct msm_bus_scale_pdata *bw_table =
+			mdss_res->bus_scale_table;
+		u32 nrt_axi_port_cnt = mdss_res->nrt_axi_port_cnt;
+		u32 total_axi_port_cnt = mdss_res->axi_port_cnt;
+		u32 rt_axi_port_cnt = total_axi_port_cnt - nrt_axi_port_cnt;
+		int match_cnt = 0;
+
+		if (!bw_table || !total_axi_port_cnt ||
+		    total_axi_port_cnt > MAX_AXI_PORT_COUNT) {
+			pr_err("invalid input\n");
+			return -EINVAL;
+		}
+
+		if (mdss_res->bus_channels) {
+			ib_quota_rt = div_u64(ib_quota_rt,
+						mdss_res->bus_channels);
+			ib_quota_nrt = div_u64(ib_quota_nrt,
+						mdss_res->bus_channels);
+		}
+
+		if (mdss_res->has_fixed_qos_arbiter_enabled ||
+			nrt_axi_port_cnt) {
+
+			ab_quota_rt = div_u64(ab_quota_rt, rt_axi_port_cnt);
+			ab_quota_nrt = div_u64(ab_quota_nrt, nrt_axi_port_cnt);
+
+			for (i = 0; i < total_axi_port_cnt; i++) {
+				if (i < rt_axi_port_cnt) {
+					ab_quota[i] = ab_quota_rt;
+					ib_quota[i] = ib_quota_rt;
+				} else {
+					ab_quota[i] = ab_quota_nrt;
+					ib_quota[i] = ib_quota_nrt;
+				}
+			}
+		} else {
+			ab_quota[0] = div_u64(ab_quota_rt + ab_quota_nrt,
+					total_axi_port_cnt);
+			ib_quota[0] = ib_quota_rt + ib_quota_nrt;
+
+			for (i = 1; i < total_axi_port_cnt; i++) {
+				ab_quota[i] = ab_quota[0];
+				ib_quota[i] = ib_quota[0];
+			}
+		}
+
+		for (i = 0; i < total_axi_port_cnt; i++) {
+			vect = &bw_table->usecase
+				[mdss_res->curr_bw_uc_idx].vectors[i];
+			/* avoid performing updates for small changes */
+			if ((ab_quota[i] == vect->ab) &&
+				(ib_quota[i] == vect->ib))
+				match_cnt++;
+		}
+
+		if (match_cnt == total_axi_port_cnt) {
+			pr_debug("skip BW vote\n");
+			return 0;
+		}
+
+		new_uc_idx = (mdss_res->curr_bw_uc_idx %
+			(bw_table->num_usecases - 1)) + 1;
+
+		for (i = 0; i < total_axi_port_cnt; i++) {
+			vect = &bw_table->usecase[new_uc_idx].vectors[i];
+			vect->ab = ab_quota[i];
+			vect->ib = ib_quota[i];
+
+			pr_debug("uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n",
+				new_uc_idx, (i < rt_axi_port_cnt) ? "rt" : "nrt"
+				, i, vect->ab, vect->ib);
+		}
+	}
+	mdss_res->curr_bw_uc_idx = new_uc_idx;
+	mdss_res->ao_bw_uc_idx = new_uc_idx;
+
+	if ((mdss_res->bus_ref_cnt == 0) && mdss_res->curr_bw_uc_idx) {
+		rc = 0;
+	} else { /* vote BW if bus_bw_cnt > 0 or uc_idx is zero */
+		ATRACE_BEGIN("msm_bus_scale_req");
+		rc = msm_bus_scale_client_update_request(mdss_res->bus_hdl,
+			new_uc_idx);
+		ATRACE_END("msm_bus_scale_req");
+	}
+	return rc;
+}
+
+struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name)
+{
+	struct reg_bus_client *client;
+	static u32 id;
+
+	if (client_name == NULL) {
+		pr_err("client name is null\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	client = kcalloc(1, sizeof(struct reg_bus_client), GFP_KERNEL);
+	if (!client)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_lock(&mdss_res->reg_bus_lock);
+	strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
+	client->usecase_ndx = VOTE_INDEX_DISABLE;
+	client->id = id;
+	pr_debug("bus vote client %s created:%pK id :%d\n", client_name,
+		client, id);
+	id++;
+	list_add(&client->list, &mdss_res->reg_bus_clist);
+	mutex_unlock(&mdss_res->reg_bus_lock);
+
+	return client;
+}
+
+void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *client)
+{
+	if (!client) {
+		pr_err("reg bus vote: invalid client handle\n");
+	} else {
+		pr_debug("bus vote client %s destroyed:%pK id:%u\n",
+			client->name, client, client->id);
+		mutex_lock(&mdss_res->reg_bus_lock);
+		list_del_init(&client->list);
+		mutex_unlock(&mdss_res->reg_bus_lock);
+		kfree(client);
+	}
+}
+
+int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
+{
+	int ret = 0;
+	bool changed = false;
+	u32 max_usecase_ndx = VOTE_INDEX_DISABLE;
+	struct reg_bus_client *client, *temp_client;
+
+	if (!mdss_res || !mdss_res->reg_bus_hdl || !bus_client)
+		return 0;
+
+	mutex_lock(&mdss_res->reg_bus_lock);
+	bus_client->usecase_ndx = usecase_ndx;
+	list_for_each_entry_safe(client, temp_client, &mdss_res->reg_bus_clist,
+		list) {
+
+		if (client->usecase_ndx < VOTE_INDEX_MAX &&
+		    client->usecase_ndx > max_usecase_ndx)
+			max_usecase_ndx = client->usecase_ndx;
+	}
+
+	if (mdss_res->reg_bus_usecase_ndx != max_usecase_ndx) {
+		changed = true;
+		mdss_res->reg_bus_usecase_ndx = max_usecase_ndx;
+	}
+
+	pr_debug("%pS: changed=%d current idx=%d request client %s id:%u idx:%d\n",
+		__builtin_return_address(0), changed, max_usecase_ndx,
+		bus_client->name, bus_client->id, usecase_ndx);
+	MDSS_XLOG(changed, max_usecase_ndx, bus_client->id, usecase_ndx);
+	if (changed)
+		ret = msm_bus_scale_client_update_request(mdss_res->reg_bus_hdl,
+			max_usecase_ndx);
+
+	mutex_unlock(&mdss_res->reg_bus_lock);
+	return ret;
+}
+
+int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
+{
+	int rc = 0;
+	int i;
+	u64 total_ab_rt = 0, total_ib_rt = 0;
+	u64 total_ab_nrt = 0, total_ib_nrt = 0;
+
+	mutex_lock(&mdss_res->bus_lock);
+
+	mdss_res->ab[client] = ab_quota;
+	mdss_res->ib[client] = ib_quota;
+	trace_mdp_perf_update_bus(client, ab_quota, ib_quota);
+
+	for (i = 0; i < MDSS_MAX_BUS_CLIENTS; i++) {
+		if (i == MDSS_MDP_NRT) {
+			total_ab_nrt = mdss_res->ab[i];
+			total_ib_nrt = mdss_res->ib[i];
+		} else {
+			total_ab_rt += mdss_res->ab[i];
+			total_ib_rt = max(total_ib_rt, mdss_res->ib[i]);
+		}
+	}
+
+	rc = mdss_mdp_bus_scale_set_quota(total_ab_rt, total_ab_nrt,
+			total_ib_rt, total_ib_nrt);
+
+	mutex_unlock(&mdss_res->bus_lock);
+
+	return rc;
+}
+#else
+static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata)
+{
+	return 0;
+}
+
+static void mdss_mdp_bus_scale_unregister(struct mdss_data_type *mdata)
+{
+}
+
+int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
+{
+	pr_debug("No bus scaling! client=%d ab=%llu ib=%llu\n",
+			client, ab_quota, ib_quota);
+
+	return 0;
+}
+
+struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name)
+{
+	return NULL;
+}
+
+void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *client)
+{
+}
+
+int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
+{
+	pr_debug("%pS: No reg scaling! usecase=%u\n",
+			__builtin_return_address(0), usecase_ndx);
+
+	return 0;
+}
+#endif
+
+
+static int mdss_mdp_intr2index(u32 intr_type, u32 intf_num)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mdp_irq_map); i++) {
+		if (intr_type == mdp_irq_map[i].intr_type &&
+			intf_num == mdp_irq_map[i].intf_num)
+			return i;
+	}
+	return -EINVAL;
+}
+
+u32 mdss_mdp_get_irq_mask(u32 intr_type, u32 intf_num)
+{
+	int idx = mdss_mdp_intr2index(intr_type, intf_num);
+
+	return (idx < 0) ? 0 : mdp_irq_map[idx].irq_mask;
+}
+
+void mdss_mdp_enable_hw_irq(struct mdss_data_type *mdata)
+{
+	mdata->mdss_util->enable_irq(&mdss_mdp_hw);
+}
+
+void mdss_mdp_disable_hw_irq(struct mdss_data_type *mdata)
+{
+	if (!is_mdp_irq_enabled())
+		mdata->mdss_util->disable_irq(&mdss_mdp_hw);
+}
+
+/* function assumes that mdp is clocked to access hw registers */
+void mdss_mdp_irq_clear(struct mdss_data_type *mdata,
+		u32 intr_type, u32 intf_num)
+{
+	unsigned long irq_flags;
+	int irq_idx;
+	struct mdss_mdp_intr_reg reg;
+	struct mdss_mdp_irq irq;
+
+	irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
+	if (irq_idx < 0) {
+		pr_err("invalid irq request\n");
+		return;
+	}
+
+	irq = mdp_irq_map[irq_idx];
+	reg = mdp_intr_reg[irq.reg_idx];
+
+	pr_debug("clearing mdp irq mask=%x\n", irq.irq_mask);
+	spin_lock_irqsave(&mdp_lock, irq_flags);
+	writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off);
+	spin_unlock_irqrestore(&mdp_lock, irq_flags);
+}
+
+int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num)
+{
+	int irq_idx, idx;
+	unsigned long irq_flags;
+	int ret = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_intr_reg reg;
+	struct mdss_mdp_irq irq;
+
+	irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
+	if (irq_idx < 0) {
+		pr_err("invalid irq request\n");
+		return -EINVAL;
+	}
+
+	irq = mdp_irq_map[irq_idx];
+	reg = mdp_intr_reg[irq.reg_idx];
+
+	spin_lock_irqsave(&mdp_lock, irq_flags);
+	if (mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask) {
+		pr_warn("MDSS MDP IRQ-0x%x is already set, mask=%x\n",
+				irq.irq_mask, mdata->mdp_irq_mask[idx]);
+		ret = -EBUSY;
+	} else {
+		pr_debug("MDP IRQ mask old=%x new=%x\n",
+				mdata->mdp_irq_mask[irq.reg_idx], irq.irq_mask);
+		mdata->mdp_irq_mask[irq.reg_idx] |= irq.irq_mask;
+		writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off);
+		writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx],
+				mdata->mdp_base + reg.en_off);
+		mdata->mdss_util->enable_irq(&mdss_mdp_hw);
+	}
+	spin_unlock_irqrestore(&mdp_lock, irq_flags);
+
+	return ret;
+}
+int mdss_mdp_hist_irq_enable(u32 irq)
+{
+	int ret = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (mdata->mdp_hist_irq_mask & irq) {
+		pr_warn("MDSS MDP Hist IRQ-0x%x is already set, mask=%x\n",
+				irq, mdata->mdp_hist_irq_mask);
+		ret = -EBUSY;
+	} else {
+		pr_debug("mask old=%x new=%x\n",
+				mdata->mdp_hist_irq_mask, irq);
+		mdata->mdp_hist_irq_mask |= irq;
+		writel_relaxed(irq, mdata->mdp_base +
+			MDSS_MDP_REG_HIST_INTR_CLEAR);
+		writel_relaxed(mdata->mdp_hist_irq_mask, mdata->mdp_base +
+			MDSS_MDP_REG_HIST_INTR_EN);
+		mdata->mdss_util->enable_irq(&mdss_mdp_hw);
+	}
+
+	return ret;
+}
+
+void mdss_mdp_irq_disable(u32 intr_type, u32 intf_num)
+{
+	int irq_idx;
+	unsigned long irq_flags;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_intr_reg reg;
+	struct mdss_mdp_irq irq;
+
+	irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
+	if (irq_idx < 0) {
+		pr_err("invalid irq request\n");
+		return;
+	}
+
+	irq =  mdp_irq_map[irq_idx];
+	reg = mdp_intr_reg[irq.reg_idx];
+
+	spin_lock_irqsave(&mdp_lock, irq_flags);
+	if (!(mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask)) {
+		pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
+				irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
+	} else {
+		mdata->mdp_irq_mask[irq.reg_idx] &= ~irq.irq_mask;
+		writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx],
+				mdata->mdp_base + reg.en_off);
+		if (!is_mdp_irq_enabled())
+			mdata->mdss_util->disable_irq(&mdss_mdp_hw);
+	}
+	spin_unlock_irqrestore(&mdp_lock, irq_flags);
+}
+
+/* This function is used to check and clear the status of MDP interrupts */
+void mdss_mdp_intr_check_and_clear(u32 intr_type, u32 intf_num)
+{
+	u32 status;
+	int irq_idx;
+	unsigned long irq_flags;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_intr_reg reg;
+	struct mdss_mdp_irq irq;
+
+	irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
+	if (irq_idx < 0) {
+		pr_err("invalid irq request\n");
+		return;
+	}
+
+	irq =  mdp_irq_map[irq_idx];
+	reg = mdp_intr_reg[irq.reg_idx];
+
+	spin_lock_irqsave(&mdp_lock, irq_flags);
+	status = irq.irq_mask & readl_relaxed(mdata->mdp_base +
+			reg.status_off);
+	if (status) {
+		pr_debug("clearing irq: intr_type:%d, intf_num:%d\n",
+				intr_type, intf_num);
+		writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off);
+	}
+	spin_unlock_irqrestore(&mdp_lock, irq_flags);
+}
+
+void mdss_mdp_hist_irq_disable(u32 irq)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!(mdata->mdp_hist_irq_mask & irq)) {
+		pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
+				irq, mdata->mdp_hist_irq_mask);
+	} else {
+		mdata->mdp_hist_irq_mask &= ~irq;
+		writel_relaxed(mdata->mdp_hist_irq_mask, mdata->mdp_base +
+			MDSS_MDP_REG_HIST_INTR_EN);
+		if (!is_mdp_irq_enabled())
+			mdata->mdss_util->disable_irq(&mdss_mdp_hw);
+	}
+}
+
+/**
+ * mdss_mdp_irq_disable_nosync() - disable mdp irq
+ * @intr_type:	mdp interface type
+ * @intf_num:	mdp interface num
+ *
+ * This function is called from interrupt context
+ * mdp_lock is already held at up stream (mdss_irq_handler)
+ * therefore spin_lock(&mdp_lock) is not allowed here
+ *
+ */
+void mdss_mdp_irq_disable_nosync(u32 intr_type, u32 intf_num)
+{
+	int irq_idx;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_intr_reg reg;
+	struct mdss_mdp_irq irq;
+
+	irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
+	if (irq_idx < 0) {
+		pr_err("invalid irq request\n");
+		return;
+	}
+
+	irq = mdp_irq_map[irq_idx];
+	reg = mdp_intr_reg[irq.reg_idx];
+
+	if (!(mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask)) {
+		pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
+				irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
+	} else {
+		mdata->mdp_irq_mask[irq.reg_idx] &= ~irq.irq_mask;
+		writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx],
+				mdata->mdp_base + reg.en_off);
+		if (!is_mdp_irq_enabled())
+			mdata->mdss_util->disable_irq_nosync(&mdss_mdp_hw);
+	}
+}
+
+int mdss_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
+		       void (*fnc_ptr)(void *), void *arg)
+{
+	unsigned long flags;
+	int index;
+
+	index = mdss_mdp_intr2index(intr_type, intf_num);
+	if (index < 0) {
+		pr_warn("invalid intr type=%u intf_numf_num=%u\n",
+				intr_type, intf_num);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&mdss_mdp_intr_lock, flags);
+	WARN(mdp_intr_cb[index].func && fnc_ptr,
+			"replacing current intr callback for ndx=%d\n", index);
+	mdp_intr_cb[index].func = fnc_ptr;
+	mdp_intr_cb[index].arg = arg;
+	spin_unlock_irqrestore(&mdss_mdp_intr_lock, flags);
+
+	return 0;
+}
+
+int mdss_mdp_set_intr_callback_nosync(u32 intr_type, u32 intf_num,
+		       void	  (*fnc_ptr)(void *), void *arg)
+{
+	int index;
+
+	index = mdss_mdp_intr2index(intr_type, intf_num);
+	if (index < 0) {
+		pr_warn("invalid intr Typee=%u intf_num=%u\n",
+				intr_type, intf_num);
+		return -EINVAL;
+	}
+
+	WARN(mdp_intr_cb[index].func && fnc_ptr,
+			"replacing current intr callbackack for ndx=%d\n",
+			index);
+	mdp_intr_cb[index].func = fnc_ptr;
+	mdp_intr_cb[index].arg = arg;
+
+	return 0;
+}
+
+static inline void mdss_mdp_intr_done(int index)
+{
+	void (*fnc)(void *);
+	void *arg;
+
+	spin_lock(&mdss_mdp_intr_lock);
+	fnc = mdp_intr_cb[index].func;
+	arg = mdp_intr_cb[index].arg;
+	spin_unlock(&mdss_mdp_intr_lock);
+	if (fnc)
+		fnc(arg);
+}
+
+irqreturn_t mdss_mdp_isr(int irq, void *ptr)
+{
+	struct mdss_data_type *mdata = ptr;
+	u32 isr, mask, hist_isr, hist_mask;
+	int i, j;
+
+	if (!mdata->clk_ena)
+		return IRQ_HANDLED;
+
+	for (i = 0; i < ARRAY_SIZE(mdp_intr_reg); i++) {
+		struct mdss_mdp_intr_reg reg = mdp_intr_reg[i];
+
+		isr = readl_relaxed(mdata->mdp_base + reg.status_off);
+		if (isr == 0)
+			continue;
+
+		mask = readl_relaxed(mdata->mdp_base + reg.en_off);
+		writel_relaxed(isr, mdata->mdp_base + reg.clr_off);
+
+		pr_debug("%s: reg:%d isr=%x mask=%x\n",
+				__func__, i+1, isr, mask);
+
+		isr &= mask;
+		if (isr == 0)
+			continue;
+
+		for (j = 0; j < ARRAY_SIZE(mdp_irq_map); j++)
+			if (mdp_irq_map[j].reg_idx == i &&
+					(isr & mdp_irq_map[j].irq_mask))
+				mdss_mdp_intr_done(j);
+		if (!i) {
+			if (isr & MDSS_MDP_INTR_PING_PONG_0_DONE)
+				mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI0,
+						      false);
+
+			if (isr & MDSS_MDP_INTR_PING_PONG_1_DONE)
+				mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI1,
+						      false);
+
+			if (isr & MDSS_MDP_INTR_INTF_0_VSYNC)
+				mdss_misr_crc_collect(mdata, DISPLAY_MISR_EDP,
+						      true);
+
+			if (isr & MDSS_MDP_INTR_INTF_1_VSYNC)
+				mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI0,
+						      true);
+
+			if (isr & MDSS_MDP_INTR_INTF_2_VSYNC)
+				mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI1,
+						      true);
+
+			if (isr & MDSS_MDP_INTR_INTF_3_VSYNC)
+				mdss_misr_crc_collect(mdata, DISPLAY_MISR_HDMI,
+						      true);
+
+			if (isr & MDSS_MDP_INTR_WB_0_DONE)
+				mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP,
+						      true);
+
+			if (isr & MDSS_MDP_INTR_WB_1_DONE)
+				mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP,
+						      true);
+
+			if (isr &  MDSS_MDP_INTR_WB_2_DONE)
+				mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP,
+						      true);
+		}
+	}
+
+	hist_isr = readl_relaxed(mdata->mdp_base +
+			MDSS_MDP_REG_HIST_INTR_STATUS);
+	if (hist_isr != 0) {
+		hist_mask = readl_relaxed(mdata->mdp_base +
+				MDSS_MDP_REG_HIST_INTR_EN);
+		writel_relaxed(hist_isr, mdata->mdp_base +
+				MDSS_MDP_REG_HIST_INTR_CLEAR);
+		hist_isr &= hist_mask;
+		if (hist_isr != 0)
+			mdss_mdp_hist_intr_done(hist_isr);
+	}
+
+	mdss_mdp_video_isr(mdata->video_intf, mdata->nintf);
+	return IRQ_HANDLED;
+}
+
+static int mdss_mdp_clk_update(u32 clk_idx, u32 enable)
+{
+	int ret = -ENODEV;
+	struct clk *clk = mdss_mdp_get_clk(clk_idx);
+
+	if (clk) {
+		pr_debug("clk=%d en=%d\n", clk_idx, enable);
+		if (enable) {
+			if (clk_idx == MDSS_CLK_MDP_VSYNC)
+				clk_set_rate(clk, 19200000);
+			ret = clk_prepare_enable(clk);
+		} else {
+			clk_disable_unprepare(clk);
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+int mdss_mdp_vsync_clk_enable(int enable, bool locked)
+{
+	int ret = 0;
+
+	pr_debug("clk enable=%d\n", enable);
+
+	if (!locked)
+		mutex_lock(&mdp_clk_lock);
+
+	if (mdss_res->vsync_ena != enable) {
+		mdss_res->vsync_ena = enable;
+		ret = mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
+	}
+
+	if (!locked)
+		mutex_unlock(&mdp_clk_lock);
+	return ret;
+}
+
+void mdss_mdp_set_clk_rate(unsigned long rate)
+{
+	struct mdss_data_type *mdata = mdss_res;
+	unsigned long clk_rate;
+	struct clk *clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
+	unsigned long min_clk_rate;
+
+	min_clk_rate = max(rate, mdata->perf_tune.min_mdp_clk);
+
+	if (clk) {
+		mutex_lock(&mdp_clk_lock);
+		if (min_clk_rate < mdata->max_mdp_clk_rate)
+			clk_rate = clk_round_rate(clk, min_clk_rate);
+		else
+			clk_rate = mdata->max_mdp_clk_rate;
+		if (IS_ERR_VALUE(clk_rate)) {
+			pr_err("unable to round rate err=%ld\n", clk_rate);
+		} else if (clk_rate != clk_get_rate(clk)) {
+			if (IS_ERR_VALUE(clk_set_rate(clk, clk_rate)))
+				pr_err("clk_set_rate failed\n");
+			else
+				pr_debug("mdp clk rate=%lu\n", clk_rate);
+		}
+		mutex_unlock(&mdp_clk_lock);
+	} else {
+		pr_err("mdp src clk not setup properly\n");
+	}
+}
+
+unsigned long mdss_mdp_get_clk_rate(u32 clk_idx, bool locked)
+{
+	unsigned long clk_rate = 0;
+	struct clk *clk = mdss_mdp_get_clk(clk_idx);
+
+	if (clk) {
+		if (!locked)
+			mutex_lock(&mdp_clk_lock);
+
+		clk_rate = clk_get_rate(clk);
+
+		if (!locked)
+			mutex_unlock(&mdp_clk_lock);
+	}
+
+	return clk_rate;
+}
+
+/**
+ * mdss_bus_rt_bw_vote() -- place bus bandwidth request
+ * @enable: value of enable or disable
+ *
+ * hw_rt table has two entries, 0 and Min Vote (1Mhz)
+ * while attaching SMMU and for few TZ operations which
+ * happen at very early stage, we will request Min Vote
+ * thru this handle.
+ *
+ */
+static int mdss_bus_rt_bw_vote(bool enable)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int rc = 0;
+	bool changed = false;
+
+	if (!mdata->hw_rt_bus_hdl || mdata->handoff_pending)
+		return 0;
+
+	if (enable) {
+		if (mdata->hw_rt_bus_ref_cnt == 0)
+			changed = true;
+		mdata->hw_rt_bus_ref_cnt++;
+	} else {
+		if (mdata->hw_rt_bus_ref_cnt != 0) {
+			mdata->hw_rt_bus_ref_cnt--;
+			if (mdata->hw_rt_bus_ref_cnt == 0)
+				changed = true;
+		} else {
+			pr_warn("%s: bus bw votes are not balanced\n",
+				__func__);
+		}
+	}
+
+	pr_debug("%pS: task:%s bw_cnt=%d changed=%d enable=%d\n",
+		__builtin_return_address(0), current->group_leader->comm,
+		mdata->hw_rt_bus_ref_cnt, changed, enable);
+
+	if (changed) {
+		rc = msm_bus_scale_client_update_request(mdata->hw_rt_bus_hdl,
+							 enable ? 1 : 0);
+		if (rc)
+			pr_err("%s: Bus bandwidth vote failed\n", __func__);
+	}
+
+	return rc;
+}
+
+/**
+ * __mdss_mdp_reg_access_clk_enable - Enable minimum MDSS clocks required
+ * for register access
+ */
+static inline void __mdss_mdp_reg_access_clk_enable(
+		struct mdss_data_type *mdata, bool enable)
+{
+	if (enable) {
+		mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+				VOTE_INDEX_LOW);
+		mdss_bus_rt_bw_vote(true);
+		mdss_mdp_clk_update(MDSS_CLK_AHB, 1);
+		mdss_mdp_clk_update(MDSS_CLK_AXI, 1);
+		mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 1);
+	} else {
+		mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 0);
+		mdss_mdp_clk_update(MDSS_CLK_AXI, 0);
+		mdss_mdp_clk_update(MDSS_CLK_AHB, 0);
+		mdss_bus_rt_bw_vote(false);
+		mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+				VOTE_INDEX_DISABLE);
+	}
+}
+
+int __mdss_mdp_vbif_halt(struct mdss_data_type *mdata, bool is_nrt)
+{
+	int rc = 0;
+	void __iomem *base;
+	u32 halt_ack_mask = BIT(0), status;
+
+	/* if not real time vbif */
+	if (is_nrt)
+		base = mdata->vbif_nrt_io.base;
+	else
+		base = mdata->vbif_io.base;
+
+	if (!base) {
+		/* some targets might not have a nrt port */
+		goto vbif_done;
+	}
+
+	/* force vbif clock on */
+	MDSS_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 1, is_nrt);
+
+	/* request halt */
+	MDSS_VBIF_WRITE(mdata, MMSS_VBIF_AXI_HALT_CTRL0, 1, is_nrt);
+
+	rc = readl_poll_timeout(base +
+			MMSS_VBIF_AXI_HALT_CTRL1, status, (status &
+				halt_ack_mask),
+			1000, AXI_HALT_TIMEOUT_US);
+	if (rc == -ETIMEDOUT) {
+		pr_err("VBIF axi is not halting. TIMEDOUT.\n");
+		goto vbif_done;
+	}
+
+	pr_debug("VBIF axi is halted\n");
+
+vbif_done:
+	return rc;
+}
+
+/**
+ * mdss_mdp_vbif_axi_halt() - Halt MDSS AXI ports
+ * @mdata: pointer to the global mdss data structure.
+ *
+ * This function can be called during deep suspend, display off or for
+ * debugging purposes. On success it should be assumed that AXI ports connected
+ * to RT VBIF are in idle state and would not fetch any more data.
+ */
+static void mdss_mdp_vbif_axi_halt(struct mdss_data_type *mdata)
+{
+	__mdss_mdp_reg_access_clk_enable(mdata, true);
+
+	/* real time ports */
+	__mdss_mdp_vbif_halt(mdata, false);
+	/* non-real time ports */
+	__mdss_mdp_vbif_halt(mdata, true);
+
+	__mdss_mdp_reg_access_clk_enable(mdata, false);
+}
+
+int mdss_iommu_ctrl(int enable)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int rc = 0;
+
+	mutex_lock(&mdp_iommu_ref_cnt_lock);
+	pr_debug("%pS: enable:%d ref_cnt:%d attach:%d hoff:%d\n",
+		__builtin_return_address(0), enable, mdata->iommu_ref_cnt,
+		mdata->iommu_attached, mdata->handoff_pending);
+
+	if (enable) {
+		/*
+		 * delay iommu attach until continuous splash screen has
+		 * finished handoff, as it may still be working with phys addr
+		 */
+		if (!mdata->iommu_attached && !mdata->handoff_pending) {
+			mdss_bus_rt_bw_vote(true);
+			rc = mdss_smmu_attach(mdata);
+		}
+		mdata->iommu_ref_cnt++;
+	} else {
+		if (mdata->iommu_ref_cnt) {
+			mdata->iommu_ref_cnt--;
+			if (mdata->iommu_ref_cnt == 0) {
+				rc = mdss_smmu_detach(mdata);
+				mdss_bus_rt_bw_vote(false);
+			}
+		} else {
+			pr_err("unbalanced iommu ref\n");
+		}
+	}
+	mutex_unlock(&mdp_iommu_ref_cnt_lock);
+
+	if (IS_ERR_VALUE(rc))
+		return rc;
+	else
+		return mdata->iommu_ref_cnt;
+}
+
+static void mdss_mdp_memory_retention_enter(void)
+{
+	struct clk *mdss_mdp_clk = NULL;
+	struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
+
+	if (mdp_vote_clk) {
+		mdss_mdp_clk = clk_get_parent(mdp_vote_clk);
+		if (mdss_mdp_clk) {
+			clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
+			clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_SET);
+			clk_set_flags(mdss_mdp_clk, CLKFLAG_NORETAIN_PERIPH);
+		}
+	}
+}
+
+static void mdss_mdp_memory_retention_exit(void)
+{
+	struct clk *mdss_mdp_clk = NULL;
+	struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
+
+	if (mdp_vote_clk) {
+		mdss_mdp_clk = clk_get_parent(mdp_vote_clk);
+		if (mdss_mdp_clk) {
+			clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
+			clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_PERIPH);
+			clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_CLEAR);
+		}
+	}
+}
+
+/**
+ * mdss_mdp_idle_pc_restore() - Restore MDSS settings when exiting idle pc
+ *
+ * MDSS GDSC can be voted off during idle-screen usecase for MIPI DSI command
+ * mode displays, referred to as MDSS idle power collapse. Upon subsequent
+ * frame update, MDSS GDSC needs to turned back on and hw state needs to be
+ * restored.
+ */
+static int mdss_mdp_idle_pc_restore(void)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int rc = 0;
+
+	mutex_lock(&mdp_fs_idle_pc_lock);
+	if (!mdata->idle_pc) {
+		pr_debug("no idle pc, no need to restore\n");
+		goto end;
+	}
+
+	pr_debug("called from %pS\n", __builtin_return_address(0));
+	rc = mdss_iommu_ctrl(1);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("mdss iommu attach failed rc=%d\n", rc);
+		goto end;
+	}
+	mdss_hw_init(mdata);
+	mdss_iommu_ctrl(0);
+
+	/**
+	 * sleep 10 microseconds to make sure AD auto-reinitialization
+	 * is done
+	 */
+	udelay(10);
+	mdss_mdp_memory_retention_exit();
+
+	mdss_mdp_ctl_restore(true);
+	mdata->idle_pc = false;
+
+end:
+	mutex_unlock(&mdp_fs_idle_pc_lock);
+	return rc;
+}
+
+/**
+ * mdss_bus_bandwidth_ctrl() -- place bus bandwidth request
+ * @enable:	value of enable or disable
+ *
+ * Function place bus bandwidth request to allocate saved bandwidth
+ * if enabled or free bus bandwidth allocation if disabled.
+ * Bus bandwidth is required by mdp.For dsi, it only requires to send
+ * dcs coammnd. It returns error if bandwidth request fails.
+ */
+void mdss_bus_bandwidth_ctrl(int enable)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int changed = 0;
+
+	mutex_lock(&mdata->bus_lock);
+	if (enable) {
+		if (mdata->bus_ref_cnt == 0)
+			changed++;
+		mdata->bus_ref_cnt++;
+	} else {
+		if (mdata->bus_ref_cnt) {
+			mdata->bus_ref_cnt--;
+			if (mdata->bus_ref_cnt == 0)
+				changed++;
+		} else {
+			pr_err("Can not be turned off\n");
+		}
+	}
+
+	pr_debug("%pS: task:%s bw_cnt=%d changed=%d enable=%d\n",
+		__builtin_return_address(0), current->group_leader->comm,
+		mdata->bus_ref_cnt, changed, enable);
+
+	if (changed) {
+		MDSS_XLOG(mdata->bus_ref_cnt, enable);
+
+		if (!enable) {
+			if (!mdata->handoff_pending) {
+				msm_bus_scale_client_update_request(
+						mdata->bus_hdl, 0);
+				mdata->ao_bw_uc_idx = 0;
+			}
+			pm_runtime_mark_last_busy(&mdata->pdev->dev);
+			pm_runtime_put_autosuspend(&mdata->pdev->dev);
+		} else {
+			pm_runtime_get_sync(&mdata->pdev->dev);
+			msm_bus_scale_client_update_request(
+				mdata->bus_hdl, mdata->curr_bw_uc_idx);
+		}
+	}
+
+	mutex_unlock(&mdata->bus_lock);
+}
+EXPORT_SYMBOL(mdss_bus_bandwidth_ctrl);
+
+void mdss_mdp_clk_ctrl(int enable)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	static int mdp_clk_cnt;
+	unsigned long flags;
+	int changed = 0;
+	int rc = 0;
+
+	mutex_lock(&mdp_clk_lock);
+	if (enable) {
+		if (mdp_clk_cnt == 0)
+			changed++;
+		mdp_clk_cnt++;
+	} else {
+		if (mdp_clk_cnt) {
+			mdp_clk_cnt--;
+			if (mdp_clk_cnt == 0)
+				changed++;
+		} else {
+			pr_err("Can not be turned off\n");
+		}
+	}
+
+	if (changed)
+		MDSS_XLOG(mdp_clk_cnt, enable, current->pid);
+
+	pr_debug("%pS: task:%s clk_cnt=%d changed=%d enable=%d\n",
+		__builtin_return_address(0), current->group_leader->comm,
+		mdata->bus_ref_cnt, changed, enable);
+
+	if (changed) {
+		if (enable) {
+			pm_runtime_get_sync(&mdata->pdev->dev);
+
+			mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+				VOTE_INDEX_LOW);
+
+			rc = mdss_iommu_ctrl(1);
+			if (IS_ERR_VALUE(rc))
+				pr_err("IOMMU attach failed\n");
+
+			/* Active+Sleep */
+			msm_bus_scale_client_update_context(mdata->bus_hdl,
+				false, mdata->curr_bw_uc_idx);
+		}
+
+		spin_lock_irqsave(&mdp_lock, flags);
+		mdata->clk_ena = enable;
+		spin_unlock_irqrestore(&mdp_lock, flags);
+
+		mdss_mdp_clk_update(MDSS_CLK_AHB, enable);
+		mdss_mdp_clk_update(MDSS_CLK_AXI, enable);
+		mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable);
+		mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable);
+		if (mdata->vsync_ena)
+			mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
+
+		if (!enable) {
+			/* release iommu control */
+			mdss_iommu_ctrl(0);
+
+			/* Active-Only */
+			msm_bus_scale_client_update_context(mdata->bus_hdl,
+				true, mdata->ao_bw_uc_idx);
+
+			mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+				VOTE_INDEX_DISABLE);
+
+			pm_runtime_mark_last_busy(&mdata->pdev->dev);
+			pm_runtime_put_autosuspend(&mdata->pdev->dev);
+		}
+	}
+
+	if (enable && changed)
+		mdss_mdp_idle_pc_restore();
+
+	mutex_unlock(&mdp_clk_lock);
+}
+
+static inline int mdss_mdp_irq_clk_register(struct mdss_data_type *mdata,
+					    char *clk_name, int clk_idx)
+{
+	struct clk *tmp;
+
+	if (clk_idx >= MDSS_MAX_CLK) {
+		pr_err("invalid clk index %d\n", clk_idx);
+		return -EINVAL;
+	}
+
+	tmp = devm_clk_get(&mdata->pdev->dev, clk_name);
+	if (IS_ERR(tmp)) {
+		pr_err("unable to get clk: %s\n", clk_name);
+		return PTR_ERR(tmp);
+	}
+
+	mdata->mdp_clk[clk_idx] = tmp;
+	return 0;
+}
+
+#define SEC_DEVICE_MDSS		1
+
+static void __mdss_restore_sec_cfg(struct mdss_data_type *mdata)
+{
+	int ret, scm_ret = 0;
+
+	if (test_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED, mdata->mdss_caps_map))
+		return;
+
+	pr_debug("restoring mdss secure config\n");
+
+	__mdss_mdp_reg_access_clk_enable(mdata, true);
+
+	ret = scm_restore_sec_cfg(SEC_DEVICE_MDSS, 0, &scm_ret);
+	if (ret || scm_ret)
+		pr_warn("scm_restore_sec_cfg failed %d %d\n",
+				ret, scm_ret);
+
+	__mdss_mdp_reg_access_clk_enable(mdata, false);
+}
+
+static int mdss_mdp_gdsc_notifier_call(struct notifier_block *self,
+		unsigned long event, void *data)
+{
+	struct mdss_data_type *mdata;
+
+	mdata = container_of(self, struct mdss_data_type, gdsc_cb);
+
+	if (event & REGULATOR_EVENT_ENABLE) {
+		/*
+		 * As SMMU in low tier targets is not power collapsible,
+		 * hence we don't need to restore sec configuration.
+		 */
+		if (!mdss_mdp_req_init_restore_cfg(mdata))
+			__mdss_restore_sec_cfg(mdata);
+	} else if (event & REGULATOR_EVENT_PRE_DISABLE) {
+		pr_debug("mdss gdsc is getting disabled\n");
+		/* halt the vbif transactions */
+		mdss_mdp_vbif_axi_halt(mdata);
+	}
+
+	return NOTIFY_OK;
+}
+
+static int mdss_mdp_irq_clk_setup(struct mdss_data_type *mdata)
+{
+	int ret;
+
+	ret = of_property_read_u32(mdata->pdev->dev.of_node,
+			"qcom,max-clk-rate", &mdata->max_mdp_clk_rate);
+	if (ret) {
+		pr_err("failed to get max mdp clock rate\n");
+		return ret;
+	}
+
+	pr_debug("max mdp clk rate=%d\n", mdata->max_mdp_clk_rate);
+
+	ret = devm_request_irq(&mdata->pdev->dev, mdss_mdp_hw.irq_info->irq,
+				mdss_irq_handler, IRQF_DISABLED, "MDSS", mdata);
+	if (ret) {
+		pr_err("mdp request_irq() failed!\n");
+		return ret;
+	}
+	disable_irq(mdss_mdp_hw.irq_info->irq);
+
+	mdata->fs = devm_regulator_get(&mdata->pdev->dev, "vdd");
+	if (IS_ERR_OR_NULL(mdata->fs)) {
+		mdata->fs = NULL;
+		pr_err("unable to get gdsc regulator\n");
+		return -EINVAL;
+	}
+
+	mdata->venus = devm_regulator_get_optional(&mdata->pdev->dev,
+		"gdsc-venus");
+	if (IS_ERR_OR_NULL(mdata->venus)) {
+		mdata->venus = NULL;
+		pr_debug("unable to get venus gdsc regulator\n");
+	}
+
+	mdata->fs_ena = false;
+
+	mdata->gdsc_cb.notifier_call = mdss_mdp_gdsc_notifier_call;
+	mdata->gdsc_cb.priority = 5;
+	if (regulator_register_notifier(mdata->fs, &(mdata->gdsc_cb)))
+		pr_warn("GDSC notification registration failed!\n");
+	else
+		mdata->regulator_notif_register = true;
+
+	mdata->vdd_cx = devm_regulator_get_optional(&mdata->pdev->dev,
+				"vdd-cx");
+	if (IS_ERR_OR_NULL(mdata->vdd_cx)) {
+		pr_debug("unable to get CX reg. rc=%d\n",
+					PTR_RET(mdata->vdd_cx));
+		mdata->vdd_cx = NULL;
+	}
+
+	mdata->reg_bus_clt = mdss_reg_bus_vote_client_create("mdp\0");
+	if (IS_ERR(mdata->reg_bus_clt)) {
+		pr_err("bus client register failed\n");
+		return PTR_ERR(mdata->reg_bus_clt);
+	}
+
+	if (mdss_mdp_irq_clk_register(mdata, "bus_clk", MDSS_CLK_AXI) ||
+	    mdss_mdp_irq_clk_register(mdata, "iface_clk", MDSS_CLK_AHB) ||
+	    mdss_mdp_irq_clk_register(mdata, "core_clk",
+				      MDSS_CLK_MDP_CORE))
+		return -EINVAL;
+
+	/* lut_clk is not present on all MDSS revisions */
+	mdss_mdp_irq_clk_register(mdata, "lut_clk", MDSS_CLK_MDP_LUT);
+
+	/* vsync_clk is optional for non-smart panels */
+	mdss_mdp_irq_clk_register(mdata, "vsync_clk", MDSS_CLK_MDP_VSYNC);
+
+	/* Setting the default clock rate to the max supported.*/
+	mdss_mdp_set_clk_rate(mdata->max_mdp_clk_rate);
+	pr_debug("mdp clk rate=%ld\n",
+		mdss_mdp_get_clk_rate(MDSS_CLK_MDP_CORE, false));
+
+	return 0;
+}
+
+static void mdss_debug_enable_clock(int on)
+{
+	if (on)
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	else
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+}
+
+static int mdss_mdp_debug_init(struct platform_device *pdev,
+	struct mdss_data_type *mdata)
+{
+	int rc;
+	struct mdss_debug_base *dbg_blk;
+
+	mdata->debug_inf.debug_enable_clock = mdss_debug_enable_clock;
+
+	rc = mdss_debugfs_init(mdata);
+	if (rc)
+		return rc;
+
+	rc = mdss_mdp_debugfs_init(mdata);
+	if (rc) {
+		mdss_debugfs_remove(mdata);
+		return rc;
+	}
+
+	mdss_debug_register_io("mdp", &mdata->mdss_io, &dbg_blk);
+	mdss_debug_register_dump_range(pdev, dbg_blk, "qcom,regs-dump-mdp",
+		"qcom,regs-dump-names-mdp", "qcom,regs-dump-xin-id-mdp");
+
+	if (mdata->vbif_io.base)
+		mdss_debug_register_io("vbif", &mdata->vbif_io, NULL);
+	if (mdata->vbif_nrt_io.base)
+		mdss_debug_register_io("vbif_nrt", &mdata->vbif_nrt_io, NULL);
+
+	return 0;
+}
+
+static u32 mdss_get_props(void)
+{
+	u32 props = 0;
+	void __iomem *props_base = ioremap(0xFC4B8114, 4);
+
+	if (props_base) {
+		props = readl_relaxed(props_base);
+		iounmap(props_base);
+	}
+	return props;
+}
+
+void mdss_mdp_init_default_prefill_factors(struct mdss_data_type *mdata)
+{
+	mdata->prefill_data.prefill_factors.fmt_mt_nv12_factor = 8;
+	mdata->prefill_data.prefill_factors.fmt_mt_factor = 4;
+	mdata->prefill_data.prefill_factors.fmt_linear_factor = 1;
+	mdata->prefill_data.prefill_factors.scale_factor = 1;
+	mdata->prefill_data.prefill_factors.xtra_ff_factor = 2;
+
+	if (test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map)) {
+		mdata->prefill_data.ts_threshold = 25;
+		mdata->prefill_data.ts_end = 8;
+		mdata->prefill_data.ts_rate.numer = 1;
+		mdata->prefill_data.ts_rate.denom = 4;
+		mdata->prefill_data.ts_overhead = 2;
+	}
+}
+
+static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
+{
+
+	mdata->per_pipe_ib_factor.numer = 0;
+	mdata->per_pipe_ib_factor.denom = 0;
+	mdata->apply_post_scale_bytes = true;
+	mdata->hflip_buffer_reused = true;
+	/* prevent disable of prefill calculations */
+	mdata->min_prefill_lines = 0xffff;
+	/* clock gating feature is disabled by default */
+	mdata->enable_gate = false;
+	mdata->pixel_ram_size = 0;
+	mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL_FLAT;
+
+	mdss_mdp_hw_rev_debug_caps_init(mdata);
+
+	switch (mdata->mdp_rev) {
+	case MDSS_MDP_HW_REV_107:
+		mdss_set_quirk(mdata, MDSS_QUIRK_ROTCDP);
+	case MDSS_MDP_HW_REV_107_1:
+		mdss_mdp_format_flag_removal(invalid_mdp107_wb_output_fmts,
+			ARRAY_SIZE(invalid_mdp107_wb_output_fmts),
+			VALID_MDP_WB_INTF_FORMAT);
+		/* fall-through */
+	case MDSS_MDP_HW_REV_107_2:
+		mdata->max_target_zorder = 7; /* excluding base layer */
+		mdata->max_cursor_size = 128;
+		mdata->per_pipe_ib_factor.numer = 8;
+		mdata->per_pipe_ib_factor.denom = 5;
+		mdata->apply_post_scale_bytes = false;
+		mdata->hflip_buffer_reused = false;
+		mdata->min_prefill_lines = 21;
+		mdata->has_ubwc = true;
+		mdata->pixel_ram_size = 50 * 1024;
+		set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
+		set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
+		set_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED,
+			mdata->mdss_caps_map);
+		set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
+			mdata->mdss_caps_map);
+		mdss_mdp_init_default_prefill_factors(mdata);
+		mdss_set_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU);
+		mdss_set_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT);
+		mdss_set_quirk(mdata, MDSS_QUIRK_HDR_SUPPORT_ENABLED);
+		break;
+	case MDSS_MDP_HW_REV_105:
+	case MDSS_MDP_HW_REV_109:
+		mdss_set_quirk(mdata, MDSS_QUIRK_BWCPANIC);
+		mdata->max_target_zorder = 7; /* excluding base layer */
+		mdata->max_cursor_size = 128;
+		set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
+		set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
+			mdata->mdss_caps_map);
+		break;
+	case MDSS_MDP_HW_REV_110:
+		mdss_set_quirk(mdata, MDSS_QUIRK_BWCPANIC);
+		mdata->max_target_zorder = 4; /* excluding base layer */
+		mdata->max_cursor_size = 128;
+		set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
+		mdata->min_prefill_lines = 12;
+		mdata->props = mdss_get_props();
+		break;
+	case MDSS_MDP_HW_REV_112:
+		mdata->max_target_zorder = 4; /* excluding base layer */
+		mdata->max_cursor_size = 64;
+		mdata->min_prefill_lines = 12;
+		set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
+		break;
+	case MDSS_MDP_HW_REV_114:
+		/* disable ECG for 28nm PHY platform */
+		mdata->enable_gate = false;
+	case MDSS_MDP_HW_REV_116:
+		mdata->max_target_zorder = 4; /* excluding base layer */
+		mdata->max_cursor_size = 128;
+		mdata->min_prefill_lines = 14;
+		mdata->has_ubwc = true;
+		mdata->pixel_ram_size = 40 * 1024;
+		mdata->apply_post_scale_bytes = false;
+		mdata->hflip_buffer_reused = false;
+		mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL;
+		set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
+		set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
+		mdss_mdp_init_default_prefill_factors(mdata);
+		set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
+		mdss_set_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR);
+		mdss_set_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP);
+		break;
+	case MDSS_MDP_HW_REV_115:
+		mdata->max_target_zorder = 4; /* excluding base layer */
+		mdata->max_cursor_size = 128;
+		mdata->min_prefill_lines = 14;
+		mdata->has_ubwc = false;
+		mdata->pixel_ram_size = 16 * 1024;
+		mdata->apply_post_scale_bytes = false;
+		mdata->hflip_buffer_reused = false;
+		/* disable ECG for 28nm PHY platform */
+		mdata->enable_gate = false;
+		mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL;
+		set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
+		set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
+		set_bit(MDSS_CAPS_MIXER_1_FOR_WB, mdata->mdss_caps_map);
+		mdss_mdp_init_default_prefill_factors(mdata);
+		set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
+		mdss_set_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR);
+		mdss_set_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP);
+		break;
+	case MDSS_MDP_HW_REV_300:
+	case MDSS_MDP_HW_REV_301:
+		mdata->max_target_zorder = 7; /* excluding base layer */
+		mdata->max_cursor_size = 384;
+		mdata->per_pipe_ib_factor.numer = 8;
+		mdata->per_pipe_ib_factor.denom = 5;
+		mdata->apply_post_scale_bytes = false;
+		mdata->hflip_buffer_reused = false;
+		mdata->min_prefill_lines = 25;
+		mdata->has_ubwc = true;
+		mdata->pixel_ram_size = 50 * 1024;
+		mdata->rects_per_sspp[MDSS_MDP_PIPE_TYPE_DMA] = 2;
+
+		set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
+		set_bit(MDSS_QOS_IB_NOCR, mdata->mdss_qos_map);
+		set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
+		set_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED,
+			mdata->mdss_caps_map);
+		set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
+			mdata->mdss_caps_map);
+		set_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map);
+		set_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map);
+		mdss_mdp_init_default_prefill_factors(mdata);
+		mdss_set_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU);
+		mdss_set_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT);
+		mdss_set_quirk(mdata, MDSS_QUIRK_SRC_SPLIT_ALWAYS);
+		mdata->has_wb_ubwc = true;
+		set_bit(MDSS_CAPS_10_BIT_SUPPORTED, mdata->mdss_caps_map);
+		break;
+	default:
+		mdata->max_target_zorder = 4; /* excluding base layer */
+		mdata->max_cursor_size = 64;
+	}
+
+	if (mdata->mdp_rev < MDSS_MDP_HW_REV_103)
+		mdss_set_quirk(mdata, MDSS_QUIRK_DOWNSCALE_HANG);
+
+	if (mdata->mdp_rev < MDSS_MDP_HW_REV_102 ||
+			mdata->mdp_rev == MDSS_MDP_HW_REV_200)
+		mdss_set_quirk(mdata, MDSS_QUIRK_FMT_PACK_PATTERN);
+}
+
+static void mdss_hw_rev_init(struct mdss_data_type *mdata)
+{
+	if (mdata->mdp_rev)
+		return;
+
+	mdata->mdp_rev = MDSS_REG_READ(mdata, MDSS_REG_HW_VERSION);
+	mdss_mdp_hw_rev_caps_init(mdata);
+}
+
+/**
+ * mdss_hw_init() - Initialize MDSS target specific register settings
+ * @mdata: MDP private data
+ *
+ * Initialize basic MDSS hardware settings based on the board specific
+ * parameters. This function does not explicitly turn on the MDP clocks
+ * and so it must be called with the MDP clocks already enabled.
+ */
+void mdss_hw_init(struct mdss_data_type *mdata)
+{
+	struct mdss_mdp_pipe *vig;
+
+	mdss_hw_rev_init(mdata);
+
+	/* Disable hw underrun recovery only for older mdp reversions. */
+	if (mdata->mdp_rev < MDSS_MDP_HW_REV_105)
+		writel_relaxed(0x0, mdata->mdp_base +
+			MDSS_MDP_REG_VIDEO_INTF_UNDERFLOW_CTL);
+
+	if (mdata->hw_settings) {
+		struct mdss_hw_settings *hws = mdata->hw_settings;
+
+		while (hws->reg) {
+			writel_relaxed(hws->val, hws->reg);
+			hws++;
+		}
+	}
+
+	vig = mdata->vig_pipes;
+
+	mdata->nmax_concurrent_ad_hw =
+		(mdata->mdp_rev < MDSS_MDP_HW_REV_103) ? 1 : 2;
+
+	pr_debug("MDP hw init done\n");
+}
+
+static u32 mdss_mdp_res_init(struct mdss_data_type *mdata)
+{
+	u32 rc = 0;
+
+	if (mdata->res_init) {
+		pr_err("mdss resources already initialized\n");
+		return -EPERM;
+	}
+
+	mdata->res_init = true;
+	mdata->clk_ena = false;
+	mdss_mdp_hw.irq_info->irq_mask = MDSS_MDP_DEFAULT_INTR_MASK;
+	mdss_mdp_hw.irq_info->irq_ena = false;
+
+	rc = mdss_mdp_irq_clk_setup(mdata);
+	if (rc)
+		return rc;
+
+	mdata->hist_intr.req = 0;
+	mdata->hist_intr.curr = 0;
+	mdata->hist_intr.state = 0;
+	spin_lock_init(&mdata->hist_intr.lock);
+
+	mdata->iclient = msm_ion_client_create(mdata->pdev->name);
+	if (IS_ERR_OR_NULL(mdata->iclient)) {
+		pr_err("msm_ion_client_create() return error (%pK)\n",
+				mdata->iclient);
+		mdata->iclient = NULL;
+	}
+
+	return rc;
+}
+
+static u32 mdss_mdp_scaler_init(struct mdss_data_type *mdata,
+				struct device *dev)
+{
+	int ret;
+	struct device_node *node;
+	u32 prop_val;
+
+	if (!dev)
+		return -EPERM;
+
+	node = of_get_child_by_name(dev->of_node, "qcom,mdss-scaler-offsets");
+	if (!node)
+		return 0;
+
+	if (mdata->scaler_off)
+		return -EFAULT;
+
+	mdata->scaler_off = devm_kzalloc(&mdata->pdev->dev,
+			sizeof(*mdata->scaler_off), GFP_KERNEL);
+	if (!mdata->scaler_off)
+		return -ENOMEM;
+
+	ret = of_property_read_u32(node,
+			"qcom,mdss-vig-scaler-off",
+			&prop_val);
+	if (ret) {
+		pr_err("read property %s failed ret %d\n",
+				"qcom,mdss-vig-scaler-off", ret);
+		return -EINVAL;
+	}
+	mdata->scaler_off->vig_scaler_off = prop_val;
+	ret = of_property_read_u32(node,
+			"qcom,mdss-vig-scaler-lut-off",
+			&prop_val);
+	if (ret) {
+		pr_err("read property %s failed ret %d\n",
+				"qcom,mdss-vig-scaler-lut-off", ret);
+		return -EINVAL;
+	}
+	mdata->scaler_off->vig_scaler_lut_off = prop_val;
+	mdata->scaler_off->has_dest_scaler =
+		of_property_read_bool(mdata->pdev->dev.of_node,
+				"qcom,mdss-has-dest-scaler");
+	if (mdata->scaler_off->has_dest_scaler) {
+		ret = of_property_read_u32(node,
+				"qcom,mdss-dest-block-off",
+				&prop_val);
+		if (ret) {
+			pr_err("read property %s failed ret %d\n",
+					"qcom,mdss-dest-block-off", ret);
+			return -EINVAL;
+		}
+		mdata->scaler_off->dest_base = mdata->mdss_io.base +
+			prop_val;
+		mdata->scaler_off->ndest_scalers =
+			mdss_mdp_parse_dt_prop_len(mdata->pdev,
+					"qcom,mdss-dest-scalers-off");
+		mdata->scaler_off->dest_scaler_off =
+			devm_kzalloc(&mdata->pdev->dev, sizeof(u32) *
+					mdata->scaler_off->ndest_scalers,
+					GFP_KERNEL);
+		if  (!mdata->scaler_off->dest_scaler_off) {
+			kfree(mdata->scaler_off->dest_scaler_off);
+			return -ENOMEM;
+		}
+		ret = mdss_mdp_parse_dt_handler(mdata->pdev,
+				"qcom,mdss-dest-scaler-off",
+				mdata->scaler_off->dest_scaler_off,
+				mdata->scaler_off->ndest_scalers);
+		if (ret)
+			return -EINVAL;
+		mdata->scaler_off->dest_scaler_lut_off =
+			devm_kzalloc(&mdata->pdev->dev, sizeof(u32) *
+					mdata->scaler_off->ndest_scalers,
+					GFP_KERNEL);
+		if  (!mdata->scaler_off->dest_scaler_lut_off) {
+			kfree(mdata->scaler_off->dest_scaler_lut_off);
+			return -ENOMEM;
+		}
+		ret = mdss_mdp_parse_dt_handler(mdata->pdev,
+				"qcom,mdss-dest-scalers-lut-off",
+				mdata->scaler_off->dest_scaler_lut_off,
+				mdata->scaler_off->ndest_scalers);
+		if (ret)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * mdss_mdp_footswitch_ctrl_splash() - clocks handoff for cont. splash screen
+ * @on: 1 to start handoff, 0 to complete the handoff after first frame update
+ *
+ * MDSS Clocks and GDSC are already on during continuous splash screen, but
+ * increasing ref count will keep clocks from being turned off until handoff
+ * has properly happened after frame update.
+ */
+void mdss_mdp_footswitch_ctrl_splash(int on)
+{
+	int ret;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (mdata != NULL) {
+		if (on) {
+			mdata->handoff_pending = true;
+			pr_debug("Enable MDP FS for splash.\n");
+			if (mdata->venus) {
+				ret = regulator_enable(mdata->venus);
+				if (ret)
+					pr_err("venus failed to enable\n");
+			}
+
+			ret = regulator_enable(mdata->fs);
+			if (ret)
+				pr_err("Footswitch failed to enable\n");
+
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+			mdss_bus_bandwidth_ctrl(true);
+		} else {
+			pr_debug("Disable MDP FS for splash.\n");
+			mdss_bus_bandwidth_ctrl(false);
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+			regulator_disable(mdata->fs);
+			if (mdata->venus)
+				regulator_disable(mdata->venus);
+			mdata->handoff_pending = false;
+		}
+	} else {
+		pr_warn("mdss mdata not initialized\n");
+	}
+}
+
+static int mdss_mdp_get_pan_intf(const char *pan_intf)
+{
+	int i, rc = MDSS_PANEL_INTF_INVALID;
+
+	if (!pan_intf)
+		return rc;
+
+	for (i = 0; i < ARRAY_SIZE(pan_types); i++) {
+		if (!strcmp(pan_intf, pan_types[i].name)) {
+			rc = pan_types[i].type;
+			break;
+		}
+	}
+	return rc;
+}
+
+static int mdss_mdp_get_pan_cfg(struct mdss_panel_cfg *pan_cfg)
+{
+	char *t = NULL;
+	char pan_intf_str[MDSS_MAX_PANEL_LEN];
+	int rc, i, panel_len;
+	char pan_name[MDSS_MAX_PANEL_LEN] = {'\0'};
+
+	if (!pan_cfg)
+		return -EINVAL;
+
+	if (mdss_mdp_panel[0] == '0') {
+		pr_debug("panel name is not set\n");
+		pan_cfg->lk_cfg = false;
+		pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
+		return -EINVAL;
+	} else if (mdss_mdp_panel[0] == '1') {
+		pan_cfg->lk_cfg = true;
+	} else {
+		/* read from dt */
+		pan_cfg->lk_cfg = true;
+		pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
+		return -EINVAL;
+	}
+
+	/* skip lk cfg and delimiter; ex: "1:" */
+	strlcpy(pan_name, &mdss_mdp_panel[2], MDSS_MAX_PANEL_LEN);
+	t = strnstr(pan_name, ":", MDSS_MAX_PANEL_LEN);
+	if (!t) {
+		pr_err("pan_name=[%s] invalid\n", pan_name);
+		pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
+		return -EINVAL;
+	}
+
+	for (i = 0; ((pan_name + i) < t) && (i < 4); i++)
+		pan_intf_str[i] = *(pan_name + i);
+	pan_intf_str[i] = 0;
+	pr_debug("%d panel intf %s\n", __LINE__, pan_intf_str);
+	/* point to the start of panel name */
+	t = t + 1;
+	strlcpy(&pan_cfg->arg_cfg[0], t, sizeof(pan_cfg->arg_cfg));
+	pr_debug("%d: t=[%s] panel name=[%s]\n", __LINE__,
+		t, pan_cfg->arg_cfg);
+
+	panel_len = strlen(pan_cfg->arg_cfg);
+	if (!panel_len) {
+		pr_err("Panel name is invalid\n");
+		pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
+		return -EINVAL;
+	}
+
+	rc = mdss_mdp_get_pan_intf(pan_intf_str);
+	pan_cfg->pan_intf = (rc < 0) ?  MDSS_PANEL_INTF_INVALID : rc;
+	return 0;
+}
+
+static int mdss_mdp_parse_dt_pan_intf(struct platform_device *pdev)
+{
+	int rc;
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	const char *prim_intf = NULL;
+
+	rc = of_property_read_string(pdev->dev.of_node,
+				"qcom,mdss-pref-prim-intf", &prim_intf);
+	if (rc)
+		return -ENODEV;
+
+	rc = mdss_mdp_get_pan_intf(prim_intf);
+	if (rc < 0) {
+		mdata->pan_cfg.pan_intf = MDSS_PANEL_INTF_INVALID;
+	} else {
+		mdata->pan_cfg.pan_intf = rc;
+		rc = 0;
+	}
+	return rc;
+}
+
+static int mdss_mdp_get_cmdline_config(struct platform_device *pdev)
+{
+	int rc, len = 0;
+	int *intf_type;
+	char *panel_name;
+	struct mdss_panel_cfg *pan_cfg;
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+	mdata->pan_cfg.arg_cfg[MDSS_MAX_PANEL_LEN] = 0;
+	pan_cfg = &mdata->pan_cfg;
+	panel_name = &pan_cfg->arg_cfg[0];
+	intf_type = &pan_cfg->pan_intf;
+
+	/* reads from dt by default */
+	pan_cfg->lk_cfg = true;
+
+	len = strlen(mdss_mdp_panel);
+
+	if (len > 0) {
+		rc = mdss_mdp_get_pan_cfg(pan_cfg);
+		if (!rc) {
+			pan_cfg->init_done = true;
+			return rc;
+		}
+	}
+
+	rc = mdss_mdp_parse_dt_pan_intf(pdev);
+	/* if pref pan intf is not present */
+	if (rc)
+		pr_warn("unable to parse device tree for pan intf\n");
+
+	pan_cfg->init_done = true;
+
+	return 0;
+}
+
+static void __update_sspp_info(struct mdss_mdp_pipe *pipe,
+	int pipe_cnt, char *type, char *buf, int *cnt)
+{
+	int i;
+	int j;
+	size_t len = PAGE_SIZE;
+	int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1);
+
+#define SPRINT(fmt, ...) \
+		(*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__))
+
+	for (i = 0; i < pipe_cnt && pipe; i++) {
+		SPRINT("pipe_num:%d pipe_type:%s pipe_ndx:%d rects:%d pipe_is_handoff:%d display_id:%d ",
+			pipe->num, type, pipe->ndx, pipe->multirect.max_rects,
+			pipe->is_handed_off, mdss_mdp_get_display_id(pipe));
+		SPRINT("fmts_supported:");
+		for (j = 0; j < num_bytes; j++)
+			SPRINT("%d,", pipe->supported_formats[j]);
+		SPRINT("\n");
+		pipe += pipe->multirect.max_rects;
+	}
+#undef SPRINT
+}
+
+static void mdss_mdp_update_sspp_info(struct mdss_data_type *mdata,
+	char *buf, int *cnt)
+{
+	__update_sspp_info(mdata->vig_pipes, mdata->nvig_pipes,
+		"vig", buf, cnt);
+	__update_sspp_info(mdata->rgb_pipes, mdata->nrgb_pipes,
+		"rgb", buf, cnt);
+	__update_sspp_info(mdata->dma_pipes, mdata->ndma_pipes,
+		"dma", buf, cnt);
+	__update_sspp_info(mdata->cursor_pipes, mdata->ncursor_pipes,
+		"cursor", buf, cnt);
+}
+
+static void mdss_mdp_update_wb_info(struct mdss_data_type *mdata,
+	char *buf, int *cnt)
+{
+#define SPRINT(fmt, ...) \
+		(*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__))
+	size_t len = PAGE_SIZE;
+	int i;
+	int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1);
+
+	SPRINT("rot_input_fmts=");
+	for (i = 0; i < num_bytes && mdata->wb; i++)
+		SPRINT("%d ", mdata->wb->supported_input_formats[i]);
+	SPRINT("\nrot_output_fmts=");
+	for (i = 0; i < num_bytes && mdata->wb; i++)
+		SPRINT("%d ", mdata->wb->supported_input_formats[i]);
+	SPRINT("\nwb_output_fmts=");
+	for (i = 0; i < num_bytes && mdata->wb; i++)
+		SPRINT("%d ", mdata->wb->supported_output_formats[i]);
+	SPRINT("\n");
+#undef SPRINT
+}
+
+ssize_t mdss_mdp_show_capabilities(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mdss_data_type *mdata = dev_get_drvdata(dev);
+	size_t len = PAGE_SIZE;
+	int cnt = 0;
+
+#define SPRINT(fmt, ...) \
+		(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
+
+	SPRINT("mdp_version=5\n");
+	SPRINT("hw_rev=%d\n", mdata->mdp_rev);
+	SPRINT("pipe_count:%d\n", mdata->nvig_pipes + mdata->nrgb_pipes +
+		mdata->ndma_pipes + mdata->ncursor_pipes);
+	mdss_mdp_update_sspp_info(mdata, buf, &cnt);
+	mdss_mdp_update_wb_info(mdata, buf, &cnt);
+	/* TODO : need to remove num pipes info */
+	SPRINT("rgb_pipes=%d\n", mdata->nrgb_pipes);
+	SPRINT("vig_pipes=%d\n", mdata->nvig_pipes);
+	SPRINT("dma_pipes=%d\n", mdata->ndma_pipes);
+	SPRINT("blending_stages=%d\n", mdata->max_target_zorder);
+	SPRINT("cursor_pipes=%d\n", mdata->ncursor_pipes);
+	SPRINT("max_cursor_size=%d\n", mdata->max_cursor_size);
+	SPRINT("smp_count=%d\n", mdata->smp_mb_cnt);
+	SPRINT("smp_size=%d\n", mdata->smp_mb_size);
+	SPRINT("smp_mb_per_pipe=%d\n", mdata->smp_mb_per_pipe);
+	SPRINT("max_downscale_ratio=%d\n", MAX_DOWNSCALE_RATIO);
+	SPRINT("max_upscale_ratio=%d\n", MAX_UPSCALE_RATIO);
+
+	if (mdata->nwb)
+		SPRINT("wb_intf_index=%d\n", mdata->nwb - 1);
+
+	if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map)) {
+		SPRINT("fmt_mt_nv12_factor=%d\n",
+			mdata->prefill_data.prefill_factors.fmt_mt_nv12_factor);
+		SPRINT("fmt_mt_factor=%d\n",
+			mdata->prefill_data.prefill_factors.fmt_mt_factor);
+		SPRINT("fmt_linear_factor=%d\n",
+			mdata->prefill_data.prefill_factors.fmt_linear_factor);
+		SPRINT("scale_factor=%d\n",
+			mdata->prefill_data.prefill_factors.scale_factor);
+		SPRINT("xtra_ff_factor=%d\n",
+			mdata->prefill_data.prefill_factors.xtra_ff_factor);
+	}
+
+	if (test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map)) {
+		SPRINT("amortizable_threshold=%d\n",
+			mdata->prefill_data.ts_threshold);
+		SPRINT("system_overhead_lines=%d\n",
+			mdata->prefill_data.ts_overhead);
+	}
+
+	if (mdata->props)
+		SPRINT("props=%d\n", mdata->props);
+	if (mdata->max_bw_low)
+		SPRINT("max_bandwidth_low=%u\n", mdata->max_bw_low);
+	if (mdata->max_bw_high)
+		SPRINT("max_bandwidth_high=%u\n", mdata->max_bw_high);
+	if (mdata->max_pipe_width)
+		SPRINT("max_pipe_width=%d\n", mdata->max_pipe_width);
+	if (mdata->max_mixer_width)
+		SPRINT("max_mixer_width=%d\n", mdata->max_mixer_width);
+	if (mdata->max_bw_per_pipe)
+		SPRINT("max_pipe_bw=%u\n", mdata->max_bw_per_pipe);
+	if (mdata->max_mdp_clk_rate)
+		SPRINT("max_mdp_clk=%u\n", mdata->max_mdp_clk_rate);
+	if (mdata->clk_factor.numer)
+		SPRINT("clk_fudge_factor=%u,%u\n", mdata->clk_factor.numer,
+			mdata->clk_factor.denom);
+	if (mdata->has_rot_dwnscale) {
+		if (mdata->rot_dwnscale_min)
+			SPRINT("rot_dwnscale_min=%u\n",
+				mdata->rot_dwnscale_min);
+		if (mdata->rot_dwnscale_max)
+			SPRINT("rot_dwnscale_max=%u\n",
+				mdata->rot_dwnscale_max);
+	}
+	SPRINT("features=");
+	if (mdata->has_bwc)
+		SPRINT(" bwc");
+	if (mdata->has_ubwc)
+		SPRINT(" ubwc");
+	if (mdata->has_wb_ubwc)
+		SPRINT(" wb_ubwc");
+	if (mdata->has_decimation)
+		SPRINT(" decimation");
+	if (mdata->highest_bank_bit && !mdss_mdp_is_ubwc_supported(mdata))
+		SPRINT(" tile_format");
+	if (mdata->has_non_scalar_rgb)
+		SPRINT(" non_scalar_rgb");
+	if (mdata->has_src_split)
+		SPRINT(" src_split");
+	if (mdata->has_rot_dwnscale)
+		SPRINT(" rotator_downscale");
+	if (mdata->max_bw_settings_cnt)
+		SPRINT(" dynamic_bw_limit");
+	if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
+		SPRINT(" qseed3");
+	if (test_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map))
+		SPRINT(" dest_scaler");
+	if (mdata->has_separate_rotator)
+		SPRINT(" separate_rotator");
+	if (mdss_has_quirk(mdata, MDSS_QUIRK_HDR_SUPPORT_ENABLED))
+		SPRINT(" hdr");
+	SPRINT("\n");
+#undef SPRINT
+
+	return cnt;
+}
+
+static ssize_t mdss_mdp_read_max_limit_bw(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mdss_data_type *mdata = dev_get_drvdata(dev);
+	size_t len = PAGE_SIZE;
+	u32 cnt = 0;
+	int i;
+
+	char bw_names[4][8] = {"default", "camera", "hflip", "vflip"};
+	char pipe_bw_names[4][16] = {"default_pipe", "camera_pipe",
+				"hflip_pipe", "vflip_pipe"};
+	struct mdss_max_bw_settings *bw_settings;
+	struct mdss_max_bw_settings *pipe_bw_settings;
+
+	bw_settings = mdata->max_bw_settings;
+	pipe_bw_settings = mdata->max_per_pipe_bw_settings;
+
+#define SPRINT(fmt, ...) \
+		(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
+
+	SPRINT("bw_mode_bitmap=%d\n", mdata->bw_mode_bitmap);
+	SPRINT("bw_limit_pending=%d\n", mdata->bw_limit_pending);
+
+	for (i = 0; i < mdata->max_bw_settings_cnt; i++) {
+		SPRINT("%s=%d\n", bw_names[i], bw_settings->mdss_max_bw_val);
+		bw_settings++;
+	}
+
+	for (i = 0; i < mdata->mdss_per_pipe_bw_cnt; i++) {
+		SPRINT("%s=%d\n", pipe_bw_names[i],
+					pipe_bw_settings->mdss_max_bw_val);
+		pipe_bw_settings++;
+	}
+
+	return cnt;
+}
+
+static ssize_t mdss_mdp_store_max_limit_bw(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct mdss_data_type *mdata = dev_get_drvdata(dev);
+	u32 data = 0;
+
+	if (kstrtouint(buf, 0, &data)) {
+		pr_info("Not able scan to bw_mode_bitmap\n");
+	} else {
+		mdata->bw_mode_bitmap = data;
+		mdata->bw_limit_pending = true;
+		pr_debug("limit use case, bw_mode_bitmap = %d\n", data);
+	}
+
+	return len;
+}
+
+static DEVICE_ATTR(caps, 0444, mdss_mdp_show_capabilities, NULL);
+static DEVICE_ATTR(bw_mode_bitmap, 0664,
+		mdss_mdp_read_max_limit_bw, mdss_mdp_store_max_limit_bw);
+
+static struct attribute *mdp_fs_attrs[] = {
+	&dev_attr_caps.attr,
+	&dev_attr_bw_mode_bitmap.attr,
+	NULL
+};
+
+static struct attribute_group mdp_fs_attr_group = {
+	.attrs = mdp_fs_attrs
+};
+
+static int mdss_mdp_register_sysfs(struct mdss_data_type *mdata)
+{
+	struct device *dev = &mdata->pdev->dev;
+	int rc;
+
+	rc = sysfs_create_group(&dev->kobj, &mdp_fs_attr_group);
+
+	return rc;
+}
+
+int mdss_panel_get_intf_status(u32 disp_num, u32 intf_type)
+{
+	int rc, intf_status = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mdss_res || !mdss_res->pan_cfg.init_done)
+		return -EPROBE_DEFER;
+
+	if (mdss_res->handoff_pending) {
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+		intf_status = readl_relaxed(mdata->mdp_base +
+			MDSS_MDP_REG_DISP_INTF_SEL);
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+		if (intf_type == MDSS_PANEL_INTF_DSI) {
+			if (disp_num == DISPLAY_1)
+				rc = (intf_status & MDSS_MDP_INTF_DSI0_SEL);
+			else if (disp_num == DISPLAY_2)
+				rc = (intf_status & MDSS_MDP_INTF_DSI1_SEL);
+			else
+				rc = 0;
+		} else if (intf_type == MDSS_PANEL_INTF_EDP) {
+			intf_status &= MDSS_MDP_INTF_EDP_SEL;
+			rc = (intf_status == MDSS_MDP_INTF_EDP_SEL);
+		} else if (intf_type == MDSS_PANEL_INTF_HDMI) {
+			intf_status &= MDSS_MDP_INTF_HDMI_SEL;
+			rc = (intf_status == MDSS_MDP_INTF_HDMI_SEL);
+		} else {
+			rc = 0;
+		}
+	} else {
+		rc = 0;
+	}
+
+	return rc;
+}
+
+static int mdss_mdp_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	int rc;
+	struct mdss_data_type *mdata;
+	uint32_t intf_sel = 0;
+	uint32_t split_display = 0;
+	int num_of_display_on = 0;
+	int i = 0;
+
+	if (!pdev->dev.of_node) {
+		pr_err("MDP driver only supports device tree probe\n");
+		return -ENOTSUPP;
+	}
+
+	if (mdss_res) {
+		pr_err("MDP already initialized\n");
+		return -EINVAL;
+	}
+
+	mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL);
+	if (mdata == NULL)
+		return -ENOMEM;
+
+	pdev->id = 0;
+	mdata->pdev = pdev;
+	platform_set_drvdata(pdev, mdata);
+	mdss_res = mdata;
+	mutex_init(&mdata->reg_lock);
+	mutex_init(&mdata->reg_bus_lock);
+	mutex_init(&mdata->bus_lock);
+	INIT_LIST_HEAD(&mdata->reg_bus_clist);
+	atomic_set(&mdata->sd_client_count, 0);
+	atomic_set(&mdata->active_intf_cnt, 0);
+
+	mdss_res->mdss_util = mdss_get_util_intf();
+	if (mdss_res->mdss_util == NULL) {
+		pr_err("Failed to get mdss utility functions\n");
+		return -ENODEV;
+	}
+
+	mdss_res->mdss_util->get_iommu_domain = mdss_smmu_get_domain_id;
+	mdss_res->mdss_util->iommu_attached = is_mdss_iommu_attached;
+	mdss_res->mdss_util->iommu_ctrl = mdss_iommu_ctrl;
+	mdss_res->mdss_util->bus_scale_set_quota = mdss_bus_scale_set_quota;
+	mdss_res->mdss_util->bus_bandwidth_ctrl = mdss_bus_bandwidth_ctrl;
+	mdss_res->mdss_util->panel_intf_type = mdss_panel_intf_type;
+	mdss_res->mdss_util->panel_intf_status = mdss_panel_get_intf_status;
+
+	if (mdss_res->mdss_util->param_check(mdss_mdp_panel)) {
+		mdss_res->mdss_util->display_disabled = true;
+		mdss_res->mdss_util->mdp_probe_done = true;
+		return 0;
+	}
+
+	rc = msm_dss_ioremap_byname(pdev, &mdata->mdss_io, "mdp_phys");
+	if (rc) {
+		pr_err("unable to map MDP base\n");
+		goto probe_done;
+	}
+	pr_debug("MDSS HW Base addr=0x%x len=0x%x\n",
+		(int) (unsigned long) mdata->mdss_io.base,
+		mdata->mdss_io.len);
+
+	rc = msm_dss_ioremap_byname(pdev, &mdata->vbif_io, "vbif_phys");
+	if (rc) {
+		pr_err("unable to map MDSS VBIF base\n");
+		goto probe_done;
+	}
+	pr_debug("MDSS VBIF HW Base addr=0x%x len=0x%x\n",
+		(int) (unsigned long) mdata->vbif_io.base,
+		mdata->vbif_io.len);
+
+	rc = msm_dss_ioremap_byname(pdev, &mdata->vbif_nrt_io, "vbif_nrt_phys");
+	if (rc)
+		pr_debug("unable to map MDSS VBIF non-realtime base\n");
+	else
+		pr_debug("MDSS VBIF NRT HW Base addr=%pK len=0x%x\n",
+			mdata->vbif_nrt_io.base, mdata->vbif_nrt_io.len);
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		pr_err("unable to get MDSS irq\n");
+		rc = -ENOMEM;
+		goto probe_done;
+	}
+
+	mdss_mdp_hw.irq_info = kcalloc(1, sizeof(struct irq_info), GFP_KERNEL);
+	if (!mdss_mdp_hw.irq_info)
+		return -ENOMEM;
+
+	mdss_mdp_hw.irq_info->irq = res->start;
+	mdss_mdp_hw.ptr = mdata;
+
+	/* export misc. interrupts to external driver */
+	mdata->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 32,
+			&mdss_irq_domain_ops, mdata);
+	if (!mdata->irq_domain) {
+		pr_err("unable to add linear domain\n");
+		rc = -ENOMEM;
+		goto probe_done;
+	}
+
+	mdss_misc_hw.irq_info = mdss_intr_line();
+	rc = mdss_res->mdss_util->register_irq(&mdss_misc_hw);
+	if (rc)
+		pr_err("mdss_register_irq failed.\n");
+
+	rc = mdss_mdp_res_init(mdata);
+	if (rc) {
+		pr_err("unable to initialize mdss mdp resources\n");
+		goto probe_done;
+	}
+
+	pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT_MS);
+	if (mdata->idle_pc_enabled)
+		pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	if (!pm_runtime_enabled(&pdev->dev))
+		mdss_mdp_footswitch_ctrl(mdata, true);
+
+	rc = mdss_mdp_bus_scale_register(mdata);
+	if (rc) {
+		pr_err("unable to register bus scaling\n");
+		goto probe_done;
+	}
+
+	/*
+	 * enable clocks and read mdp_rev as soon as possible once
+	 * kernel is up.
+	 */
+	mdss_mdp_footswitch_ctrl_splash(true);
+	mdss_hw_rev_init(mdata);
+
+	/*populate hw iomem base info from device tree*/
+	rc = mdss_mdp_parse_dt(pdev);
+	if (rc) {
+		pr_err("unable to parse device tree\n");
+		goto probe_done;
+	}
+
+	rc = mdss_mdp_get_cmdline_config(pdev);
+	if (rc) {
+		pr_err("Error in panel override:rc=[%d]\n", rc);
+		goto probe_done;
+	}
+
+	rc = mdss_mdp_debug_init(pdev, mdata);
+	if (rc) {
+		pr_err("unable to initialize mdp debugging\n");
+		goto probe_done;
+	}
+	rc = mdss_mdp_scaler_init(mdata, &pdev->dev);
+	if (rc)
+		goto probe_done;
+
+	rc = mdss_mdp_register_sysfs(mdata);
+	if (rc)
+		pr_err("unable to register mdp sysfs nodes\n");
+
+	rc = mdss_fb_register_mdp_instance(&mdp5);
+	if (rc)
+		pr_err("unable to register mdp instance\n");
+
+	rc = mdss_res->mdss_util->register_irq(&mdss_mdp_hw);
+	if (rc)
+		pr_err("mdss_register_irq failed.\n");
+
+	rc = mdss_smmu_init(mdata, &pdev->dev);
+	if (rc)
+		pr_err("mdss smmu init failed\n");
+
+	mdss_mdp_set_supported_formats(mdata);
+
+	mdss_res->mdss_util->mdp_probe_done = true;
+
+	mdss_hw_init(mdata);
+
+	rc = mdss_mdp_pp_init(&pdev->dev);
+	if (rc)
+		pr_err("unable to initialize mdss pp resources\n");
+
+	/* Restoring Secure configuration during boot-up */
+	if (mdss_mdp_req_init_restore_cfg(mdata))
+		__mdss_restore_sec_cfg(mdata);
+
+	if (mdss_has_quirk(mdata, MDSS_QUIRK_BWCPANIC)) {
+		mdata->default_panic_lut0 = readl_relaxed(mdata->mdp_base +
+			MMSS_MDP_PANIC_LUT0);
+		mdata->default_panic_lut1 = readl_relaxed(mdata->mdp_base +
+			MMSS_MDP_PANIC_LUT1);
+		mdata->default_robust_lut = readl_relaxed(mdata->mdp_base +
+			MMSS_MDP_ROBUST_LUT);
+	}
+
+	/*
+	 * Read the DISP_INTF_SEL register to check if display was enabled in
+	 * bootloader or not. If yes, let handoff handle removing the extra
+	 * clk/regulator votes else turn off clk/regulators because purpose
+	 * here is to get mdp_rev.
+	 */
+	intf_sel = readl_relaxed(mdata->mdp_base +
+		MDSS_MDP_REG_DISP_INTF_SEL);
+	split_display = readl_relaxed(mdata->mdp_base +
+		MDSS_MDP_REG_SPLIT_DISPLAY_EN);
+	mdata->splash_intf_sel = intf_sel;
+	mdata->splash_split_disp = split_display;
+
+	if (intf_sel != 0) {
+		for (i = 0; i < 4; i++)
+			if ((intf_sel >> i*8) & 0x000000FF)
+				num_of_display_on++;
+
+		/*
+		 * For split display enabled - DSI0, DSI1 interfaces are
+		 * considered as single display. So decrement
+		 * 'num_of_display_on' by 1
+		 */
+		if (split_display)
+			num_of_display_on--;
+	}
+	if (!num_of_display_on) {
+		mdss_mdp_footswitch_ctrl_splash(false);
+		msm_bus_scale_client_update_request(
+					mdata->bus_hdl, 0);
+		mdata->ao_bw_uc_idx = 0;
+	} else {
+		mdata->handoff_pending = true;
+		/*
+		 * If multiple displays are enabled in LK, ctrl_splash off will
+		 * be called multiple times during splash_cleanup. Need to
+		 * enable it symmetrically
+		 */
+		for (i = 1; i < num_of_display_on; i++)
+			mdss_mdp_footswitch_ctrl_splash(true);
+	}
+
+	mdp_intr_cb  = kcalloc(ARRAY_SIZE(mdp_irq_map),
+			sizeof(struct intr_callback), GFP_KERNEL);
+	if (mdp_intr_cb == NULL)
+		return -ENOMEM;
+
+	mdss_res->mdp_irq_mask = kcalloc(ARRAY_SIZE(mdp_intr_reg),
+			sizeof(u32), GFP_KERNEL);
+	if (mdss_res->mdp_irq_mask == NULL)
+		return -ENOMEM;
+
+	pr_info("mdss version = 0x%x, bootloader display is %s, num %d, intf_sel=0x%08x\n",
+		mdata->mdp_rev, num_of_display_on ? "on" : "off",
+		num_of_display_on, intf_sel);
+
+probe_done:
+	if (IS_ERR_VALUE(rc)) {
+		if (!num_of_display_on)
+			mdss_mdp_footswitch_ctrl_splash(false);
+
+		if (mdata->regulator_notif_register)
+			regulator_unregister_notifier(mdata->fs,
+						&(mdata->gdsc_cb));
+		mdss_mdp_hw.ptr = NULL;
+		mdss_mdp_pp_term(&pdev->dev);
+		mutex_destroy(&mdata->reg_lock);
+		mdss_res = NULL;
+	}
+
+	return rc;
+}
+
+static void mdss_mdp_parse_dt_regs_array(const u32 *arr, struct dss_io_data *io,
+	struct mdss_hw_settings *hws, int count)
+{
+	u32 len, reg;
+	int i;
+
+	if (!arr)
+		return;
+
+	for (i = 0, len = count * 2; i < len; i += 2) {
+		reg = be32_to_cpu(arr[i]);
+		if (reg >= io->len)
+			continue;
+
+		hws->reg = io->base + reg;
+		hws->val = be32_to_cpu(arr[i + 1]);
+		pr_debug("reg: 0x%04x=0x%08x\n", reg, hws->val);
+		hws++;
+	}
+}
+
+int mdss_mdp_parse_dt_hw_settings(struct platform_device *pdev)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	struct mdss_hw_settings *hws;
+	const u32 *vbif_arr, *mdp_arr, *vbif_nrt_arr;
+	int vbif_len, mdp_len, vbif_nrt_len;
+
+	vbif_arr = of_get_property(pdev->dev.of_node, "qcom,vbif-settings",
+			&vbif_len);
+	if (!vbif_arr || (vbif_len & 1)) {
+		pr_debug("MDSS VBIF settings not found\n");
+		vbif_len = 0;
+	}
+	vbif_len /= 2 * sizeof(u32);
+
+	vbif_nrt_arr = of_get_property(pdev->dev.of_node,
+				"qcom,vbif-nrt-settings", &vbif_nrt_len);
+	if (!vbif_nrt_arr || (vbif_nrt_len & 1)) {
+		pr_debug("MDSS VBIF non-realtime settings not found\n");
+		vbif_nrt_len = 0;
+	}
+	vbif_nrt_len /= 2 * sizeof(u32);
+
+	mdp_arr = of_get_property(pdev->dev.of_node, "qcom,mdp-settings",
+			&mdp_len);
+	if (!mdp_arr || (mdp_len & 1)) {
+		pr_debug("MDSS MDP settings not found\n");
+		mdp_len = 0;
+	}
+	mdp_len /= 2 * sizeof(u32);
+
+	if (!(mdp_len + vbif_len + vbif_nrt_len))
+		return 0;
+
+	hws = devm_kzalloc(&pdev->dev, sizeof(*hws) * (vbif_len + mdp_len +
+			vbif_nrt_len + 1), GFP_KERNEL);
+	if (!hws)
+		return -ENOMEM;
+
+	mdss_mdp_parse_dt_regs_array(vbif_arr, &mdata->vbif_io,
+			hws, vbif_len);
+	mdss_mdp_parse_dt_regs_array(vbif_nrt_arr, &mdata->vbif_nrt_io,
+			hws, vbif_nrt_len);
+	mdss_mdp_parse_dt_regs_array(mdp_arr, &mdata->mdss_io,
+		hws + vbif_len, mdp_len);
+
+	mdata->hw_settings = hws;
+
+	return 0;
+}
+
+static int mdss_mdp_parse_dt(struct platform_device *pdev)
+{
+	int rc, data;
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+	rc = mdss_mdp_parse_dt_hw_settings(pdev);
+	if (rc) {
+		pr_err("Error in device tree : hw settings\n");
+		return rc;
+	}
+
+	rc = mdss_mdp_parse_dt_pipe(pdev);
+	if (rc) {
+		pr_err("Error in device tree : pipes\n");
+		return rc;
+	}
+
+	rc = mdss_mdp_parse_dt_mixer(pdev);
+	if (rc) {
+		pr_err("Error in device tree : mixers\n");
+		return rc;
+	}
+
+	rc = mdss_mdp_parse_dt_misc(pdev);
+	if (rc) {
+		pr_err("Error in device tree : misc\n");
+		return rc;
+	}
+
+	rc = mdss_mdp_parse_dt_wb(pdev);
+	if (rc) {
+		pr_err("Error in device tree : wb\n");
+		return rc;
+	}
+
+	rc = mdss_mdp_parse_dt_ctl(pdev);
+	if (rc) {
+		pr_err("Error in device tree : ctl\n");
+		return rc;
+	}
+
+	rc = mdss_mdp_parse_dt_video_intf(pdev);
+	if (rc) {
+		pr_err("Error in device tree : ctl\n");
+		return rc;
+	}
+
+	rc = mdss_mdp_parse_dt_smp(pdev);
+	if (rc) {
+		pr_err("Error in device tree : smp\n");
+		return rc;
+	}
+
+	rc = mdss_mdp_parse_dt_prefill(pdev);
+	if (rc) {
+		pr_err("Error in device tree : prefill\n");
+		return rc;
+	}
+
+	rc = mdss_mdp_parse_dt_ad_cfg(pdev);
+	if (rc) {
+		pr_err("Error in device tree : ad\n");
+		return rc;
+	}
+
+	rc = mdss_mdp_parse_dt_cdm(pdev);
+	if (rc)
+		pr_debug("CDM offset not found in device tree\n");
+
+	rc = mdss_mdp_parse_dt_dsc(pdev);
+	if (rc)
+		pr_debug("DSC offset not found in device tree\n");
+
+	/* Parse the mdp specific register base offset*/
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,mdss-mdp-reg-offset", &data);
+	if (rc) {
+		pr_err("Error in device tree : mdp reg base\n");
+		return rc;
+	}
+	mdata->mdp_base = mdata->mdss_io.base + data;
+	return 0;
+}
+
+static void mdss_mdp_parse_dt_pipe_sw_reset(struct platform_device *pdev,
+	u32 reg_off, char *prop_name, struct mdss_mdp_pipe *pipe_list,
+	u32 npipes)
+{
+	int len;
+	const u32 *arr;
+
+	arr = of_get_property(pdev->dev.of_node, prop_name, &len);
+	if (arr) {
+		int i;
+
+		len /= sizeof(u32);
+		if (len != npipes) {
+			pr_err("%s: invalid sw_reset entries req:%d found:%d\n",
+				prop_name, len, npipes);
+			return;
+		}
+
+		for (i = 0; i < len; i++) {
+			pipe_list[i].sw_reset.reg_off = reg_off;
+			pipe_list[i].sw_reset.bit_off = be32_to_cpu(arr[i]);
+
+			pr_debug("%s[%d]: sw_reset: reg_off:0x%x bit_off:%d\n",
+				prop_name, i, reg_off, be32_to_cpu(arr[i]));
+		}
+	}
+}
+
+static int  mdss_mdp_parse_dt_pipe_clk_ctrl(struct platform_device *pdev,
+	char *prop_name, struct mdss_mdp_pipe *pipe_list, u32 npipes)
+{
+	int rc = 0, len;
+	const u32 *arr;
+
+	arr = of_get_property(pdev->dev.of_node, prop_name, &len);
+	if (arr) {
+		int i, j;
+
+		len /= sizeof(u32);
+		for (i = 0, j = 0; i < len; j++) {
+			struct mdss_mdp_pipe *pipe = NULL;
+
+			if (j >= npipes) {
+				pr_err("invalid clk ctrl enries for prop: %s\n",
+					prop_name);
+				return -EINVAL;
+			}
+
+			pipe = &pipe_list[j];
+
+			pipe->clk_ctrl.reg_off = be32_to_cpu(arr[i++]);
+			pipe->clk_ctrl.bit_off = be32_to_cpu(arr[i++]);
+
+			/* status register is next in line to ctrl register */
+			pipe->clk_status.reg_off = pipe->clk_ctrl.reg_off + 4;
+			pipe->clk_status.bit_off = be32_to_cpu(arr[i++]);
+
+			pr_debug("%s[%d]: ctrl: reg_off: 0x%x bit_off: %d\n",
+				prop_name, j, pipe->clk_ctrl.reg_off,
+				pipe->clk_ctrl.bit_off);
+			pr_debug("%s[%d]: status: reg_off: 0x%x bit_off: %d\n",
+				prop_name, j, pipe->clk_status.reg_off,
+				pipe->clk_status.bit_off);
+		}
+		if (j != npipes) {
+			pr_err("%s: %d entries found. required %d\n",
+				prop_name, j, npipes);
+			for (i = 0; i < npipes; i++) {
+				memset(&pipe_list[i].clk_ctrl, 0,
+					sizeof(pipe_list[i].clk_ctrl));
+				memset(&pipe_list[i].clk_status, 0,
+					sizeof(pipe_list[i].clk_status));
+			}
+			rc = -EINVAL;
+		}
+	} else {
+		pr_err("error mandatory property '%s' not found\n", prop_name);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static void mdss_mdp_parse_dt_pipe_panic_ctrl(struct platform_device *pdev,
+	char *prop_name, struct mdss_mdp_pipe *pipe_list, u32 npipes)
+{
+	int i, j;
+	int len;
+	const u32 *arr;
+	struct mdss_mdp_pipe *pipe = NULL;
+
+	arr = of_get_property(pdev->dev.of_node, prop_name, &len);
+	if (arr) {
+		len /= sizeof(u32);
+		for (i = 0, j = 0; i < len; j++) {
+			if (j >= npipes) {
+				pr_err("invalid panic ctrl enries for prop: %s\n",
+					prop_name);
+				return;
+			}
+
+			pipe = &pipe_list[j];
+			pipe->panic_ctrl_ndx = be32_to_cpu(arr[i++]);
+		}
+		if (j != npipes)
+			pr_err("%s: %d entries found. required %d\n",
+				prop_name, j, npipes);
+	} else {
+		pr_debug("panic ctrl enabled but property '%s' not found\n",
+								prop_name);
+	}
+}
+
+static int mdss_mdp_parse_dt_pipe_helper(struct platform_device *pdev,
+		u32 ptype, char *ptypestr,
+		struct mdss_mdp_pipe **out_plist,
+		size_t len,
+		u8 priority_base)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	u32 offsets[MDSS_MDP_MAX_SSPP];
+	u32 ftch_id[MDSS_MDP_MAX_SSPP];
+	u32 xin_id[MDSS_MDP_MAX_SSPP];
+	u32 pnums[MDSS_MDP_MAX_SSPP];
+	struct mdss_mdp_pipe *pipe_list;
+	char prop_name[64];
+	int i, cnt, rc;
+	u32 rects_per_sspp;
+
+	if (!out_plist)
+		return -EINVAL;
+
+	for (i = 0, cnt = 0; i < MDSS_MDP_MAX_SSPP && cnt < len; i++) {
+		if (ptype == get_pipe_type_from_num(i)) {
+			pnums[cnt] = i;
+			cnt++;
+		}
+	}
+
+	if (cnt < len)
+		pr_warn("Invalid %s pipe count: %zu, max supported: %d\n",
+				ptypestr, len, cnt);
+	if (cnt == 0) {
+		*out_plist = NULL;
+
+		return 0;
+	}
+
+	/* by default works in single rect mode unless otherwise noted */
+	rects_per_sspp = mdata->rects_per_sspp[ptype] ? : 1;
+
+	pipe_list = devm_kzalloc(&pdev->dev,
+			(sizeof(struct mdss_mdp_pipe) * cnt * rects_per_sspp),
+			GFP_KERNEL);
+	if (!pipe_list)
+		return -ENOMEM;
+
+	if (mdata->has_pixel_ram || (ptype == MDSS_MDP_PIPE_TYPE_CURSOR)) {
+		for (i = 0; i < cnt; i++)
+			ftch_id[i] = -1;
+	} else {
+		snprintf(prop_name, sizeof(prop_name),
+				"qcom,mdss-pipe-%s-fetch-id", ptypestr);
+		rc = mdss_mdp_parse_dt_handler(pdev, prop_name, ftch_id,
+				cnt);
+		if (rc)
+			goto parse_fail;
+	}
+
+	snprintf(prop_name, sizeof(prop_name),
+			"qcom,mdss-pipe-%s-xin-id", ptypestr);
+	rc = mdss_mdp_parse_dt_handler(pdev, prop_name, xin_id, cnt);
+	if (rc)
+		goto parse_fail;
+
+	snprintf(prop_name, sizeof(prop_name),
+			"qcom,mdss-pipe-%s-off", ptypestr);
+	rc = mdss_mdp_parse_dt_handler(pdev, prop_name, offsets, cnt);
+	if (rc)
+		goto parse_fail;
+
+	rc = mdss_mdp_pipe_addr_setup(mdata, pipe_list, offsets, ftch_id,
+			xin_id, ptype, pnums, cnt, rects_per_sspp,
+			priority_base);
+	if (rc)
+		goto parse_fail;
+
+	snprintf(prop_name, sizeof(prop_name),
+			"qcom,mdss-pipe-%s-clk-ctrl-offsets", ptypestr);
+	rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev, prop_name,
+			pipe_list, cnt);
+	if (rc)
+		goto parse_fail;
+
+	*out_plist = pipe_list;
+
+	return cnt;
+parse_fail:
+	devm_kfree(&pdev->dev, pipe_list);
+
+	return rc;
+}
+
+static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev)
+{
+	int rc = 0;
+	u32 nfids = 0, len, nxids = 0, npipes = 0;
+	u32 sw_reset_offset = 0;
+	u32 data[4];
+
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+	mdata->has_pixel_ram = !mdss_mdp_parse_dt_prop_len(pdev,
+						"qcom,mdss-smp-data");
+
+	mdata->nvig_pipes = mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-pipe-vig-off");
+	mdata->nrgb_pipes = mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-pipe-rgb-off");
+	mdata->ndma_pipes = mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-pipe-dma-off");
+	mdata->ncursor_pipes = mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-pipe-cursor-off");
+
+	npipes = mdata->nvig_pipes + mdata->nrgb_pipes + mdata->ndma_pipes;
+
+	if (!mdata->has_pixel_ram) {
+		nfids  += mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-pipe-vig-fetch-id");
+		nfids  += mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-pipe-rgb-fetch-id");
+		nfids  += mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-pipe-dma-fetch-id");
+		if (npipes != nfids) {
+			pr_err("device tree err: unequal number of pipes and smp ids");
+			return -EINVAL;
+		}
+	}
+
+	if (mdata->nvig_pipes)
+		nxids += mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-pipe-vig-xin-id");
+	if (mdata->nrgb_pipes)
+		nxids += mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-pipe-rgb-xin-id");
+	if (mdata->ndma_pipes)
+		nxids += mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-pipe-dma-xin-id");
+	if (npipes != nxids) {
+		pr_err("device tree err: unequal number of pipes and xin ids\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_VIG, "vig",
+			&mdata->vig_pipes, mdata->nvig_pipes, 0);
+	if (IS_ERR_VALUE(rc))
+		goto parse_fail;
+	mdata->nvig_pipes = rc;
+
+	rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_RGB, "rgb",
+			&mdata->rgb_pipes, mdata->nrgb_pipes,
+			mdata->nvig_pipes);
+	if (IS_ERR_VALUE(rc))
+		goto parse_fail;
+	mdata->nrgb_pipes = rc;
+
+	rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_DMA, "dma",
+			&mdata->dma_pipes, mdata->ndma_pipes,
+			mdata->nvig_pipes + mdata->nrgb_pipes);
+	if (IS_ERR_VALUE(rc))
+		goto parse_fail;
+	mdata->ndma_pipes = rc;
+
+	rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_CURSOR,
+			"cursor", &mdata->cursor_pipes, mdata->ncursor_pipes,
+			0);
+	if (IS_ERR_VALUE(rc))
+		goto parse_fail;
+	mdata->ncursor_pipes = rc;
+
+	rc = 0;
+
+	mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-sw-reset-off",
+		&sw_reset_offset, 1);
+	if (sw_reset_offset) {
+		if (mdata->vig_pipes)
+			mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset,
+				"qcom,mdss-pipe-vig-sw-reset-map",
+				mdata->vig_pipes, mdata->nvig_pipes);
+		if (mdata->rgb_pipes)
+			mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset,
+				"qcom,mdss-pipe-rgb-sw-reset-map",
+				mdata->rgb_pipes, mdata->nrgb_pipes);
+		if (mdata->dma_pipes)
+			mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset,
+				"qcom,mdss-pipe-dma-sw-reset-map",
+				mdata->dma_pipes, mdata->ndma_pipes);
+	}
+
+	mdata->has_panic_ctrl = of_property_read_bool(pdev->dev.of_node,
+		"qcom,mdss-has-panic-ctrl");
+	if (mdata->has_panic_ctrl) {
+		if (mdata->vig_pipes)
+			mdss_mdp_parse_dt_pipe_panic_ctrl(pdev,
+				"qcom,mdss-pipe-vig-panic-ctrl-offsets",
+				mdata->vig_pipes, mdata->nvig_pipes);
+		if (mdata->rgb_pipes)
+			mdss_mdp_parse_dt_pipe_panic_ctrl(pdev,
+				"qcom,mdss-pipe-rgb-panic-ctrl-offsets",
+				mdata->rgb_pipes, mdata->nrgb_pipes);
+		if (mdata->dma_pipes)
+			mdss_mdp_parse_dt_pipe_panic_ctrl(pdev,
+				"qcom,mdss-pipe-dma-panic-ctrl-offsets",
+				mdata->dma_pipes, mdata->ndma_pipes);
+	}
+
+	len = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-per-pipe-panic-luts");
+	if (len != 4) {
+		pr_debug("Unable to read per-pipe-panic-luts\n");
+	} else {
+		rc = mdss_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-per-pipe-panic-luts", data, len);
+		mdata->default_panic_lut_per_pipe_linear = data[0];
+		mdata->default_panic_lut_per_pipe_tile = data[1];
+		mdata->default_robust_lut_per_pipe_linear = data[2];
+		mdata->default_robust_lut_per_pipe_tile = data[3];
+		pr_debug("per pipe panic lut [0]:0x%x [1]:0x%x [2]:0x%x [3]:0x%x\n",
+			data[0], data[1], data[2], data[3]);
+	}
+
+parse_fail:
+	return rc;
+}
+
+static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev)
+{
+
+	u32 nmixers, npingpong;
+	int rc = 0;
+	u32 *mixer_offsets = NULL, *dspp_offsets = NULL,
+	    *pingpong_offsets = NULL;
+	u32 is_virtual_mixer_req = false;
+
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+	mdata->nmixers_intf = mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-mixer-intf-off");
+	mdata->nmixers_wb = mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-mixer-wb-off");
+	mdata->ndspp = mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-dspp-off");
+	npingpong = mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-pingpong-off");
+	nmixers = mdata->nmixers_intf + mdata->nmixers_wb;
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,max-mixer-width", &mdata->max_mixer_width);
+	if (rc) {
+		pr_err("device tree err: failed to get max mixer width\n");
+		return -EINVAL;
+	}
+
+	if (mdata->nmixers_intf < mdata->ndspp) {
+		pr_err("device tree err: no of dspp are greater than intf mixers\n");
+		return -EINVAL;
+	}
+
+	if (mdata->nmixers_intf != npingpong) {
+		pr_err("device tree err: unequal no of pingpong and intf mixers\n");
+		return -EINVAL;
+	}
+
+	mixer_offsets = kcalloc(nmixers, sizeof(u32), GFP_KERNEL);
+	if (!mixer_offsets)
+		return -ENOMEM;
+
+	dspp_offsets = kcalloc(mdata->ndspp, sizeof(u32), GFP_KERNEL);
+	if (!dspp_offsets) {
+		rc = -ENOMEM;
+		goto dspp_alloc_fail;
+	}
+	pingpong_offsets = kcalloc(npingpong, sizeof(u32), GFP_KERNEL);
+	if (!pingpong_offsets) {
+		rc = -ENOMEM;
+		goto pingpong_alloc_fail;
+	}
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-intf-off",
+		mixer_offsets, mdata->nmixers_intf);
+	if (rc)
+		goto parse_done;
+
+	mdata->has_separate_rotator = of_property_read_bool(pdev->dev.of_node,
+		"qcom,mdss-has-separate-rotator");
+	if (mdata->nmixers_wb) {
+		rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-wb-off",
+				mixer_offsets + mdata->nmixers_intf,
+				mdata->nmixers_wb);
+		if (rc)
+			goto parse_done;
+	} else if (!mdata->has_separate_rotator) {
+		/*
+		 * If writeback mixers are not available, put the number of
+		 * writeback mixers equal to number of DMA pipes so that
+		 * later same number of virtual writeback mixers can be
+		 * allocated.
+		 */
+		mdata->nmixers_wb = mdata->ndma_pipes;
+		is_virtual_mixer_req = true;
+	}
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-dspp-off",
+		dspp_offsets, mdata->ndspp);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pingpong-off",
+		pingpong_offsets, npingpong);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets,
+			dspp_offsets, pingpong_offsets,
+			MDSS_MDP_MIXER_TYPE_INTF, mdata->nmixers_intf);
+	if (rc)
+		goto parse_done;
+
+	if (mdata->nmixers_wb) {
+		if (is_virtual_mixer_req) {
+			/*
+			 * Replicate last interface mixers based on number of
+			 * dma pipes available as virtual writeback mixers.
+			 */
+			rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets +
+				mdata->nmixers_intf - mdata->ndma_pipes,
+				NULL, NULL, MDSS_MDP_MIXER_TYPE_WRITEBACK,
+				mdata->nmixers_wb);
+			if (rc)
+				goto parse_done;
+		} else {
+			rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets +
+				mdata->nmixers_intf, NULL, NULL,
+				MDSS_MDP_MIXER_TYPE_WRITEBACK,
+				mdata->nmixers_wb);
+			if (rc)
+				goto parse_done;
+		}
+	}
+
+parse_done:
+	kfree(pingpong_offsets);
+pingpong_alloc_fail:
+	kfree(dspp_offsets);
+dspp_alloc_fail:
+	kfree(mixer_offsets);
+
+	return rc;
+}
+
+static int mdss_mdp_cdm_addr_setup(struct mdss_data_type *mdata,
+				   u32 *cdm_offsets, u32 len)
+{
+	struct mdss_mdp_cdm *head;
+	u32 i = 0;
+
+	head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_cdm) *
+				len, GFP_KERNEL);
+	if (!head)
+		return -ENOMEM;
+
+	for (i = 0; i < len; i++) {
+		head[i].num = i;
+		head[i].base = (mdata->mdss_io.base) + cdm_offsets[i];
+		atomic_set(&head[i].kref.refcount, 0);
+		mutex_init(&head[i].lock);
+		init_completion(&head[i].free_comp);
+		pr_debug("%s: cdm off (%d) = %pK\n", __func__, i, head[i].base);
+	}
+
+	mdata->cdm_off = head;
+	mutex_init(&mdata->cdm_lock);
+	return 0;
+}
+
+static int mdss_mdp_parse_dt_cdm(struct platform_device *pdev)
+{
+	int rc = 0;
+	u32 *cdm_offsets = NULL;
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+	mdata->ncdm = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-cdm-off");
+
+	if (!mdata->ncdm) {
+		rc = 0;
+		pr_debug("%s: No CDM offsets present in DT\n", __func__);
+		goto end;
+	}
+	pr_debug("%s: cdm len == %d\n", __func__, mdata->ncdm);
+	cdm_offsets = kcalloc(mdata->ncdm, sizeof(u32), GFP_KERNEL);
+	if (!cdm_offsets) {
+		rc = -ENOMEM;
+		mdata->ncdm = 0;
+		goto end;
+	}
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-cdm-off", cdm_offsets,
+				       mdata->ncdm);
+	if (rc) {
+		pr_err("device tree err: failed to get cdm offsets\n");
+		goto fail;
+	}
+
+	rc = mdss_mdp_cdm_addr_setup(mdata, cdm_offsets, mdata->ncdm);
+	if (rc) {
+		pr_err("%s: CDM address setup failed\n", __func__);
+		goto fail;
+	}
+
+fail:
+	kfree(cdm_offsets);
+	if (rc)
+		mdata->ncdm = 0;
+end:
+	return rc;
+}
+
+static int mdss_mdp_dsc_addr_setup(struct mdss_data_type *mdata,
+				   u32 *dsc_offsets, u32 len)
+{
+	struct mdss_mdp_dsc *head;
+	u32 i = 0;
+
+	head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_dsc) *
+				len, GFP_KERNEL);
+	if (!head)
+		return -ENOMEM;
+
+	for (i = 0; i < len; i++) {
+		head[i].num = i;
+		head[i].base = (mdata->mdss_io.base) + dsc_offsets[i];
+		pr_debug("dsc off (%d) = %pK\n", i, head[i].base);
+	}
+
+	mdata->dsc_off = head;
+	return 0;
+}
+
+static int mdss_mdp_parse_dt_dsc(struct platform_device *pdev)
+{
+	int rc = 0;
+	u32 *dsc_offsets = NULL;
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+	mdata->ndsc = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-dsc-off");
+	if (!mdata->ndsc) {
+		rc = 0;
+		pr_debug("No DSC offsets present in DT\n");
+		goto end;
+	}
+	pr_debug("dsc len == %d\n", mdata->ndsc);
+
+	dsc_offsets = kcalloc(mdata->ndsc, sizeof(u32), GFP_KERNEL);
+	if (!dsc_offsets) {
+		rc = -ENOMEM;
+		mdata->ndsc = 0;
+		goto end;
+	}
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-dsc-off", dsc_offsets,
+				       mdata->ndsc);
+	if (rc) {
+		pr_err("device tree err: failed to get cdm offsets\n");
+		goto fail;
+	}
+
+	rc = mdss_mdp_dsc_addr_setup(mdata, dsc_offsets, mdata->ndsc);
+	if (rc) {
+		pr_err("%s: DSC address setup failed\n", __func__);
+		goto fail;
+	}
+
+fail:
+	kfree(dsc_offsets);
+	if (rc)
+		mdata->ndsc = 0;
+end:
+	return rc;
+}
+
+static int mdss_mdp_parse_dt_wb(struct platform_device *pdev)
+{
+	int rc = 0;
+	u32 *wb_offsets = NULL;
+	u32 num_wb_mixer, nwb_offsets, num_intf_wb = 0;
+	const char *wfd_data;
+	struct mdss_data_type *mdata;
+
+	mdata = platform_get_drvdata(pdev);
+
+	num_wb_mixer = mdata->nmixers_wb;
+
+	wfd_data = of_get_property(pdev->dev.of_node,
+					"qcom,mdss-wfd-mode", NULL);
+	if (wfd_data && strcmp(wfd_data, "shared") != 0)
+		num_intf_wb = 1;
+
+	nwb_offsets =  mdss_mdp_parse_dt_prop_len(pdev,
+			"qcom,mdss-wb-off");
+
+	wb_offsets = kcalloc(nwb_offsets, sizeof(u32), GFP_KERNEL);
+	if (!wb_offsets)
+		return -ENOMEM;
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-wb-off",
+		wb_offsets, nwb_offsets);
+	if (rc)
+		goto wb_parse_done;
+
+	rc = mdss_mdp_wb_addr_setup(mdata, num_wb_mixer, num_intf_wb);
+	if (rc)
+		goto wb_parse_done;
+
+	mdata->nwb_offsets = nwb_offsets;
+	mdata->wb_offsets = wb_offsets;
+
+	return 0;
+
+wb_parse_done:
+	kfree(wb_offsets);
+	return rc;
+}
+
+static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev)
+{
+	int rc = 0;
+	u32 *ctl_offsets = NULL;
+
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+	mdata->nctl = mdss_mdp_parse_dt_prop_len(pdev,
+			"qcom,mdss-ctl-off");
+
+	if (mdata->nctl < mdata->nwb) {
+		pr_err("device tree err: number of ctl greater than wb\n");
+		rc = -EINVAL;
+		goto parse_done;
+	}
+
+	ctl_offsets = kcalloc(mdata->nctl, sizeof(u32), GFP_KERNEL);
+	if (!ctl_offsets)
+		return -ENOMEM;
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ctl-off",
+		ctl_offsets, mdata->nctl);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_ctl_addr_setup(mdata, ctl_offsets, mdata->nctl);
+	if (rc)
+		goto parse_done;
+
+parse_done:
+	kfree(ctl_offsets);
+
+	return rc;
+}
+
+static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	u32 count;
+	u32 *offsets;
+	int rc;
+
+
+	count = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-intf-off");
+	if (count == 0)
+		return -EINVAL;
+
+	offsets = kcalloc(count, sizeof(u32), GFP_KERNEL);
+	if (!offsets)
+		return -ENOMEM;
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-intf-off",
+			offsets, count);
+	if (rc)
+		goto parse_fail;
+
+	rc = mdss_mdp_video_addr_setup(mdata, offsets, count);
+	if (rc)
+		pr_err("unable to setup video interfaces\n");
+
+parse_fail:
+	kfree(offsets);
+
+	return rc;
+}
+
+static int mdss_mdp_update_smp_map(struct platform_device *pdev,
+		const u32 *data, int len, int pipe_cnt,
+		struct mdss_mdp_pipe *pipes)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	int i, j, k;
+	u32 cnt, mmb;
+
+	len /= sizeof(u32);
+	for (i = 0, k = 0; i < len; k++) {
+		struct mdss_mdp_pipe *pipe = NULL;
+
+		if (k >= pipe_cnt) {
+			pr_err("invalid fixed mmbs\n");
+			return -EINVAL;
+		}
+
+		pipe = &pipes[k];
+
+		cnt = be32_to_cpu(data[i++]);
+		if (cnt == 0)
+			continue;
+
+		for (j = 0; j < cnt; j++) {
+			mmb = be32_to_cpu(data[i++]);
+			if (mmb > mdata->smp_mb_cnt) {
+				pr_err("overflow mmb:%d pipe:%d: max:%d\n",
+						mmb, k, mdata->smp_mb_cnt);
+				return -EINVAL;
+			}
+			set_bit(mmb, pipe->smp_map[0].fixed);
+		}
+		if (bitmap_intersects(pipe->smp_map[0].fixed,
+					mdata->mmb_alloc_map,
+					mdata->smp_mb_cnt)) {
+			pr_err("overlapping fixed mmb map\n");
+			return -EINVAL;
+		}
+		bitmap_or(mdata->mmb_alloc_map, pipe->smp_map[0].fixed,
+				mdata->mmb_alloc_map, mdata->smp_mb_cnt);
+	}
+	return 0;
+}
+
+static int mdss_mdp_parse_dt_smp(struct platform_device *pdev)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	u32 num;
+	u32 data[2];
+	int rc, len;
+	const u32 *arr;
+
+	num = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-smp-data");
+	/*
+	 * This property is optional for targets with fix pixel ram. Rest
+	 * must provide no. of smp and size of each block.
+	 */
+	if (!num)
+		return 0;
+	else if (num != 2)
+		return -EINVAL;
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-smp-data", data, num);
+	if (rc)
+		return rc;
+
+	rc = mdss_mdp_smp_setup(mdata, data[0], data[1]);
+
+	if (rc) {
+		pr_err("unable to setup smp data\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,mdss-smp-mb-per-pipe", data);
+	mdata->smp_mb_per_pipe = (!rc ? data[0] : 0);
+
+	rc = 0;
+	arr = of_get_property(pdev->dev.of_node,
+			"qcom,mdss-pipe-rgb-fixed-mmb", &len);
+	if (arr) {
+		rc = mdss_mdp_update_smp_map(pdev, arr, len,
+				mdata->nrgb_pipes, mdata->rgb_pipes);
+
+		if (rc)
+			pr_warn("unable to update smp map for RGB pipes\n");
+	}
+
+	arr = of_get_property(pdev->dev.of_node,
+			"qcom,mdss-pipe-vig-fixed-mmb", &len);
+	if (arr) {
+		rc = mdss_mdp_update_smp_map(pdev, arr, len,
+				mdata->nvig_pipes, mdata->vig_pipes);
+
+		if (rc)
+			pr_warn("unable to update smp map for VIG pipes\n");
+	}
+	return rc;
+}
+
+static void mdss_mdp_parse_dt_fudge_factors(struct platform_device *pdev,
+	char *prop_name, struct mult_factor *ff)
+{
+	int rc;
+	u32 data[2] = {1, 1};
+
+	rc = mdss_mdp_parse_dt_handler(pdev, prop_name, data, 2);
+	if (rc) {
+		pr_debug("err reading %s\n", prop_name);
+	} else {
+		ff->numer = data[0];
+		ff->denom = data[1];
+	}
+}
+
+static int mdss_mdp_parse_dt_prefill(struct platform_device *pdev)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	struct mdss_prefill_data *prefill = &mdata->prefill_data;
+	int rc;
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,mdss-prefill-outstanding-buffer-bytes",
+		&prefill->ot_bytes);
+	if (rc) {
+		pr_err("prefill outstanding buffer bytes not specified\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,mdss-prefill-y-buffer-bytes", &prefill->y_buf_bytes);
+	if (rc) {
+		pr_err("prefill y buffer bytes not specified\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,mdss-prefill-scaler-buffer-lines-bilinear",
+		&prefill->y_scaler_lines_bilinear);
+	if (rc) {
+		pr_err("prefill scaler lines for bilinear not specified\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,mdss-prefill-scaler-buffer-lines-caf",
+		&prefill->y_scaler_lines_caf);
+	if (rc) {
+		pr_debug("prefill scaler lines for caf not specified\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,mdss-prefill-post-scaler-buffer-pixels",
+		&prefill->post_scaler_pixels);
+	if (rc) {
+		pr_err("prefill post scaler buffer pixels not specified\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,mdss-prefill-pingpong-buffer-pixels",
+		&prefill->pp_pixels);
+	if (rc) {
+		pr_err("prefill pingpong buffer lines not specified\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,mdss-prefill-fbc-lines", &prefill->fbc_lines);
+	if (rc)
+		pr_debug("prefill FBC lines not specified\n");
+
+	return 0;
+}
+
+static void mdss_mdp_parse_vbif_qos(struct platform_device *pdev)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	int rc;
+
+	mdata->npriority_lvl = mdss_mdp_parse_dt_prop_len(pdev,
+			"qcom,mdss-vbif-qos-rt-setting");
+	if (mdata->npriority_lvl == MDSS_VBIF_QOS_REMAP_ENTRIES) {
+		mdata->vbif_rt_qos = kcalloc(mdata->npriority_lvl,
+					    sizeof(u32), GFP_KERNEL);
+		if (!mdata->vbif_rt_qos)
+			return;
+
+		rc = mdss_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-vbif-qos-rt-setting",
+				mdata->vbif_rt_qos, mdata->npriority_lvl);
+		if (rc) {
+			pr_debug("rt setting not found\n");
+			return;
+		}
+	} else {
+		mdata->npriority_lvl = 0;
+		pr_debug("Invalid or no vbif qos rt setting\n");
+		return;
+	}
+
+	mdata->npriority_lvl = mdss_mdp_parse_dt_prop_len(pdev,
+			"qcom,mdss-vbif-qos-nrt-setting");
+	if (mdata->npriority_lvl == MDSS_VBIF_QOS_REMAP_ENTRIES) {
+		mdata->vbif_nrt_qos = kcalloc(mdata->npriority_lvl,
+					      sizeof(u32), GFP_KERNEL);
+		if (!mdata->vbif_nrt_qos)
+			return;
+
+		rc = mdss_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-vbif-qos-nrt-setting", mdata->vbif_nrt_qos,
+				mdata->npriority_lvl);
+		if (rc) {
+			pr_debug("nrt setting not found\n");
+			return;
+		}
+	} else {
+		mdata->npriority_lvl = 0;
+		pr_debug("Invalid or no vbif qos nrt seting\n");
+	}
+}
+
+static void mdss_mdp_parse_max_bw_array(const u32 *arr,
+		struct mdss_max_bw_settings *max_bw_settings, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++) {
+		max_bw_settings->mdss_max_bw_mode = be32_to_cpu(arr[i*2]);
+		max_bw_settings->mdss_max_bw_val = be32_to_cpu(arr[(i*2)+1]);
+		max_bw_settings++;
+	}
+}
+
+static void mdss_mdp_parse_max_bandwidth(struct platform_device *pdev)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	struct mdss_max_bw_settings *max_bw_settings;
+	int max_bw_settings_cnt = 0;
+	const u32 *max_bw;
+
+	max_bw = of_get_property(pdev->dev.of_node, "qcom,max-bw-settings",
+			&max_bw_settings_cnt);
+
+	if (!max_bw || !max_bw_settings_cnt) {
+		pr_debug("MDSS max bandwidth settings not found\n");
+		return;
+	}
+
+	max_bw_settings_cnt /= 2 * sizeof(u32);
+
+	max_bw_settings = devm_kzalloc(&pdev->dev, sizeof(*max_bw_settings)
+			* max_bw_settings_cnt, GFP_KERNEL);
+	if (!max_bw_settings)
+		return;
+
+	mdss_mdp_parse_max_bw_array(max_bw, max_bw_settings,
+			max_bw_settings_cnt);
+
+	mdata->max_bw_settings = max_bw_settings;
+	mdata->max_bw_settings_cnt = max_bw_settings_cnt;
+}
+
+static void mdss_mdp_parse_per_pipe_bandwidth(struct platform_device *pdev)
+{
+
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	struct mdss_max_bw_settings *max_bw_per_pipe_settings;
+	int max_bw_settings_cnt = 0;
+	const u32 *max_bw_settings;
+	u32 max_bw, min_bw, threshold, i = 0;
+
+	max_bw_settings = of_get_property(pdev->dev.of_node,
+			"qcom,max-bandwidth-per-pipe-kbps",
+			&max_bw_settings_cnt);
+
+	if (!max_bw_settings || !max_bw_settings_cnt) {
+		pr_debug("MDSS per pipe max bandwidth settings not found\n");
+		return;
+	}
+
+	/* Support targets where a common per pipe max bw is provided */
+	if ((max_bw_settings_cnt / sizeof(u32)) == 1) {
+		mdata->max_bw_per_pipe = be32_to_cpu(max_bw_settings[0]);
+		mdata->max_per_pipe_bw_settings = NULL;
+		pr_debug("Common per pipe max bandwidth provided\n");
+		return;
+	}
+
+	max_bw_settings_cnt /= 2 * sizeof(u32);
+
+	max_bw_per_pipe_settings = devm_kzalloc(&pdev->dev,
+		    sizeof(struct mdss_max_bw_settings) * max_bw_settings_cnt,
+		    GFP_KERNEL);
+	if (!max_bw_per_pipe_settings) {
+		pr_err("Memory allocation failed for max_bw_settings\n");
+		return;
+	}
+
+	mdss_mdp_parse_max_bw_array(max_bw_settings, max_bw_per_pipe_settings,
+					max_bw_settings_cnt);
+	mdata->max_per_pipe_bw_settings = max_bw_per_pipe_settings;
+	mdata->mdss_per_pipe_bw_cnt = max_bw_settings_cnt;
+
+	/* Calculate min and max allowed per pipe BW */
+	min_bw = mdata->max_bw_high;
+	max_bw = 0;
+
+	while (i < max_bw_settings_cnt) {
+		threshold = mdata->max_per_pipe_bw_settings[i].mdss_max_bw_val;
+		if (threshold > max_bw)
+			max_bw = threshold;
+		if (threshold < min_bw)
+			min_bw = threshold;
+		++i;
+	}
+	mdata->max_bw_per_pipe = max_bw;
+	mdata->min_bw_per_pipe = min_bw;
+}
+
+static int mdss_mdp_parse_dt_misc(struct platform_device *pdev)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	u32 data, slave_pingpong_off;
+	const char *wfd_data;
+	int rc;
+	struct property *prop = NULL;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-rot-block-size",
+		&data);
+	mdata->rot_block_size = (!rc ? data : 128);
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,mdss-default-ot-rd-limit", &data);
+	mdata->default_ot_rd_limit = (!rc ? data : 0);
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,mdss-default-ot-wr-limit", &data);
+	mdata->default_ot_wr_limit = (!rc ? data : 0);
+
+	mdata->has_non_scalar_rgb = of_property_read_bool(pdev->dev.of_node,
+		"qcom,mdss-has-non-scalar-rgb");
+	mdata->has_bwc = of_property_read_bool(pdev->dev.of_node,
+					       "qcom,mdss-has-bwc");
+	mdata->has_decimation = of_property_read_bool(pdev->dev.of_node,
+		"qcom,mdss-has-decimation");
+	mdata->has_no_lut_read = of_property_read_bool(pdev->dev.of_node,
+		"qcom,mdss-no-lut-read");
+	mdata->needs_hist_vote = !(of_property_read_bool(pdev->dev.of_node,
+		"qcom,mdss-no-hist-vote"));
+	wfd_data = of_get_property(pdev->dev.of_node,
+					"qcom,mdss-wfd-mode", NULL);
+	if (wfd_data) {
+		pr_debug("wfd mode: %s\n", wfd_data);
+		if (!strcmp(wfd_data, "intf")) {
+			mdata->wfd_mode = MDSS_MDP_WFD_INTERFACE;
+		} else if (!strcmp(wfd_data, "shared")) {
+			mdata->wfd_mode = MDSS_MDP_WFD_SHARED;
+		} else if (!strcmp(wfd_data, "dedicated")) {
+			mdata->wfd_mode = MDSS_MDP_WFD_DEDICATED;
+		} else {
+			pr_debug("wfd default mode: Shared\n");
+			mdata->wfd_mode = MDSS_MDP_WFD_SHARED;
+		}
+	} else {
+		pr_warn("wfd mode not configured. Set to default: Shared\n");
+		mdata->wfd_mode = MDSS_MDP_WFD_SHARED;
+	}
+
+	mdata->has_src_split = of_property_read_bool(pdev->dev.of_node,
+		 "qcom,mdss-has-source-split");
+	mdata->has_fixed_qos_arbiter_enabled =
+			of_property_read_bool(pdev->dev.of_node,
+		 "qcom,mdss-has-fixed-qos-arbiter-enabled");
+	mdata->idle_pc_enabled = of_property_read_bool(pdev->dev.of_node,
+		 "qcom,mdss-idle-power-collapse-enabled");
+
+	prop = of_find_property(pdev->dev.of_node, "batfet-supply", NULL);
+	mdata->batfet_required = prop ? true : false;
+	mdata->en_svs_high = of_property_read_bool(pdev->dev.of_node,
+		"qcom,mdss-en-svs-high");
+	if (!mdata->en_svs_high)
+		pr_debug("%s: svs_high is not enabled\n", __func__);
+	rc = of_property_read_u32(pdev->dev.of_node,
+		 "qcom,mdss-highest-bank-bit", &(mdata->highest_bank_bit));
+	if (rc)
+		pr_debug("Could not read optional property: highest bank bit\n");
+
+	mdata->has_pingpong_split = of_property_read_bool(pdev->dev.of_node,
+		 "qcom,mdss-has-pingpong-split");
+
+	if (mdata->has_pingpong_split) {
+		rc = of_property_read_u32(pdev->dev.of_node,
+				"qcom,mdss-slave-pingpong-off",
+				&slave_pingpong_off);
+		if (rc) {
+			pr_err("Error in device tree: slave pingpong offset\n");
+			return rc;
+		}
+		mdata->slave_pingpong_base = mdata->mdss_io.base +
+			slave_pingpong_off;
+		rc = mdss_mdp_parse_dt_ppb_off(pdev);
+		if (rc) {
+			pr_err("Error in device tree: ppb offset not configured\n");
+			return rc;
+		}
+	}
+
+	/*
+	 * 2x factor on AB because bus driver will divide by 2
+	 * due to 2x ports to BIMC
+	 */
+	mdata->ab_factor.numer = 2;
+	mdata->ab_factor.denom = 1;
+	mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ab-factor",
+		&mdata->ab_factor);
+
+	/*
+	 * 1.2 factor on ib as default value. This value is
+	 * experimentally determined and should be tuned in device
+	 * tree.
+	 */
+	mdata->ib_factor.numer = 6;
+	mdata->ib_factor.denom = 5;
+	mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ib-factor",
+		&mdata->ib_factor);
+
+	/*
+	 * Set overlap ib value equal to ib by default. This value can
+	 * be tuned in device tree to be different from ib.
+	 * This factor apply when the max bandwidth per pipe
+	 * is the overlap BW.
+	 */
+	mdata->ib_factor_overlap.numer = mdata->ib_factor.numer;
+	mdata->ib_factor_overlap.denom = mdata->ib_factor.denom;
+	mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ib-factor-overlap",
+		&mdata->ib_factor_overlap);
+
+	mdata->clk_factor.numer = 1;
+	mdata->clk_factor.denom = 1;
+	mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-clk-factor",
+		&mdata->clk_factor);
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,max-bandwidth-low-kbps", &mdata->max_bw_low);
+	if (rc)
+		pr_debug("max bandwidth (low) property not specified\n");
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,max-bandwidth-high-kbps", &mdata->max_bw_high);
+	if (rc)
+		pr_debug("max bandwidth (high) property not specified\n");
+
+	mdss_mdp_parse_per_pipe_bandwidth(pdev);
+
+	mdss_mdp_parse_max_bandwidth(pdev);
+
+	mdata->nclk_lvl = mdss_mdp_parse_dt_prop_len(pdev,
+					"qcom,mdss-clk-levels");
+
+	if (mdata->nclk_lvl) {
+		mdata->clock_levels = kcalloc(mdata->nclk_lvl, sizeof(u32),
+							GFP_KERNEL);
+		if (!mdata->clock_levels)
+			return -ENOMEM;
+
+		rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-clk-levels",
+			mdata->clock_levels, mdata->nclk_lvl);
+		if (rc)
+			pr_debug("clock levels not found\n");
+	}
+
+	mdss_mdp_parse_vbif_qos(pdev);
+	mdata->traffic_shaper_en = of_property_read_bool(pdev->dev.of_node,
+		 "qcom,mdss-traffic-shaper-enabled");
+	mdata->has_rot_dwnscale = of_property_read_bool(pdev->dev.of_node,
+		"qcom,mdss-has-rotator-downscale");
+	if (mdata->has_rot_dwnscale) {
+		rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,mdss-rot-downscale-min",
+			&mdata->rot_dwnscale_min);
+		if (rc)
+			pr_err("Min rotator downscale property not specified\n");
+
+		rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,mdss-rot-downscale-max",
+			&mdata->rot_dwnscale_max);
+		if (rc)
+			pr_err("Max rotator downscale property not specified\n");
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,mdss-dram-channels", &mdata->bus_channels);
+	if (rc)
+		pr_debug("number of channels property not specified\n");
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,max-pipe-width", &mdata->max_pipe_width);
+	if (rc) {
+		pr_debug("max pipe width not specified. Using default value\n");
+		mdata->max_pipe_width = DEFAULT_MDP_PIPE_WIDTH;
+	}
+	return 0;
+}
+
+static int mdss_mdp_parse_dt_ad_cfg(struct platform_device *pdev)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	u32 *ad_offsets = NULL;
+	int rc;
+
+	mdata->nad_cfgs = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-ad-off");
+
+	if (mdata->nad_cfgs == 0) {
+		mdata->ad_cfgs = NULL;
+		return 0;
+	}
+
+	if (mdata->nad_cfgs > mdata->nmixers_intf)
+		return -EINVAL;
+
+
+	mdata->has_wb_ad = of_property_read_bool(pdev->dev.of_node,
+		"qcom,mdss-has-wb-ad");
+
+	ad_offsets = kcalloc(mdata->nad_cfgs, sizeof(u32), GFP_KERNEL);
+	if (!ad_offsets)
+		return -ENOMEM;
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ad-off", ad_offsets,
+					mdata->nad_cfgs);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_ad_addr_setup(mdata, ad_offsets);
+	if (rc)
+		pr_err("unable to setup assertive display\n");
+
+parse_done:
+	kfree(ad_offsets);
+	return rc;
+}
+
+static int mdss_mdp_parse_dt_ppb_off(struct platform_device *pdev)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	u32 len, index;
+	const u32 *arr;
+
+	arr = of_get_property(pdev->dev.of_node, "qcom,mdss-ppb-ctl-off", &len);
+	if (arr) {
+		mdata->nppb_ctl = len / sizeof(u32);
+		mdata->ppb_ctl = devm_kzalloc(&mdata->pdev->dev,
+				sizeof(u32) * mdata->nppb_ctl, GFP_KERNEL);
+
+		if (mdata->ppb_ctl == NULL)
+			return -ENOMEM;
+
+		for (index = 0; index <  mdata->nppb_ctl; index++)
+			mdata->ppb_ctl[index] = be32_to_cpu(arr[index]);
+	}
+
+	arr = of_get_property(pdev->dev.of_node, "qcom,mdss-ppb-cfg-off", &len);
+	if (arr) {
+		mdata->nppb_cfg = len / sizeof(u32);
+		mdata->ppb_cfg = devm_kzalloc(&mdata->pdev->dev,
+				sizeof(u32) * mdata->nppb_cfg, GFP_KERNEL);
+
+		if (mdata->ppb_cfg == NULL)
+			return -ENOMEM;
+
+		for (index = 0; index <  mdata->nppb_cfg; index++)
+			mdata->ppb_cfg[index] = be32_to_cpu(arr[index]);
+	}
+	return 0;
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev)
+{
+	int rc, paths;
+	struct device_node *node;
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,msm-bus,num-paths", &paths);
+	if (rc) {
+		pr_err("Error. qcom,msm-bus,num-paths prop not found.rc=%d\n",
+			rc);
+		return rc;
+	}
+	mdss_res->axi_port_cnt = paths;
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,mdss-num-nrt-paths", &mdata->nrt_axi_port_cnt);
+	if (rc && mdata->has_fixed_qos_arbiter_enabled) {
+		pr_err("Error. qcom,mdss-num-nrt-paths prop not found.rc=%d\n",
+			rc);
+		return rc;
+	}
+	rc = 0;
+
+	mdata->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+	if (IS_ERR_OR_NULL(mdata->bus_scale_table)) {
+		rc = PTR_ERR(mdata->bus_scale_table);
+		if (!rc)
+			rc = -EINVAL;
+		pr_err("msm_bus_cl_get_pdata failed. rc=%d\n", rc);
+		mdata->bus_scale_table = NULL;
+		return rc;
+	}
+
+	/*
+	 * if mdss-reg-bus is not found then default table is picked
+	 * hence below code wont return error.
+	 */
+	node = of_get_child_by_name(pdev->dev.of_node, "qcom,mdss-reg-bus");
+	if (node) {
+		mdata->reg_bus_scale_table =
+			msm_bus_pdata_from_node(pdev, node);
+		if (IS_ERR_OR_NULL(mdata->reg_bus_scale_table)) {
+			rc = PTR_ERR(mdata->reg_bus_scale_table);
+			if (!rc)
+				pr_err("bus_pdata reg_bus failed rc=%d\n", rc);
+			rc = 0;
+			mdata->reg_bus_scale_table = NULL;
+		}
+	} else {
+		rc = 0;
+		mdata->reg_bus_scale_table = NULL;
+		pr_debug("mdss-reg-bus not found\n");
+	}
+
+	node = of_get_child_by_name(pdev->dev.of_node, "qcom,mdss-hw-rt-bus");
+	if (node) {
+		mdata->hw_rt_bus_scale_table =
+			msm_bus_pdata_from_node(pdev, node);
+		if (IS_ERR_OR_NULL(mdata->hw_rt_bus_scale_table)) {
+			rc = PTR_ERR(mdata->hw_rt_bus_scale_table);
+			if (!rc)
+				pr_err("hw_rt_bus_scale failed rc=%d\n", rc);
+			rc = 0;
+			mdata->hw_rt_bus_scale_table = NULL;
+		}
+	} else {
+		rc = 0;
+		mdata->hw_rt_bus_scale_table = NULL;
+		pr_debug("mdss-hw-rt-bus not found\n");
+	}
+
+	return rc;
+}
+#else
+static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev)
+{
+	return 0;
+}
+
+#endif
+
+static int mdss_mdp_parse_dt_handler(struct platform_device *pdev,
+		char *prop_name, u32 *offsets, int len)
+{
+	int rc;
+
+	rc = of_property_read_u32_array(pdev->dev.of_node, prop_name,
+					offsets, len);
+	if (rc) {
+		pr_err("Error from prop %s : u32 array read\n", prop_name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev,
+				      char *prop_name)
+{
+	int len = 0;
+
+	of_find_property(pdev->dev.of_node, prop_name, &len);
+
+	if (len < 1) {
+		pr_debug("prop %s : doesn't exist in device tree\n",
+			prop_name);
+		return 0;
+	}
+
+	len = len/sizeof(u32);
+
+	return len;
+}
+
+struct mdss_data_type *mdss_mdp_get_mdata(void)
+{
+	return mdss_res;
+}
+
+void mdss_mdp_batfet_ctrl(struct mdss_data_type *mdata, int enable)
+{
+	int ret;
+
+	if (!mdata->batfet_required)
+		return;
+
+	if (!mdata->batfet) {
+		if (enable) {
+			mdata->batfet = devm_regulator_get(&mdata->pdev->dev,
+				"batfet");
+			if (IS_ERR_OR_NULL(mdata->batfet)) {
+				pr_debug("unable to get batfet reg. rc=%d\n",
+					PTR_RET(mdata->batfet));
+				mdata->batfet = NULL;
+				return;
+			}
+		} else {
+			pr_debug("Batfet regulator disable w/o enable\n");
+			return;
+		}
+	}
+
+	if (enable) {
+		ret = regulator_enable(mdata->batfet);
+		if (ret)
+			pr_err("regulator_enable failed\n");
+	} else {
+		regulator_disable(mdata->batfet);
+	}
+}
+
+/**
+ * mdss_is_ready() - checks if mdss is probed and ready
+ *
+ * Checks if mdss resources have been initialized
+ *
+ * returns true if mdss is ready, else returns false
+ */
+bool mdss_is_ready(void)
+{
+	return mdss_mdp_get_mdata() ? true : false;
+}
+EXPORT_SYMBOL(mdss_mdp_get_mdata);
+
+/**
+ * mdss_panel_intf_type() - checks if a given intf type is primary
+ * @intf_val: panel interface type of the individual controller
+ *
+ * Individual controller queries with MDP to check if it is
+ * configured as the primary interface.
+ *
+ * returns a pointer to the configured structure mdss_panel_cfg
+ * to the controller that's configured as the primary panel interface.
+ * returns NULL on error or if @intf_val is not the configured
+ * controller.
+ */
+struct mdss_panel_cfg *mdss_panel_intf_type(int intf_val)
+{
+	if (!mdss_res || !mdss_res->pan_cfg.init_done)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	if (mdss_res->pan_cfg.pan_intf == intf_val)
+		return &mdss_res->pan_cfg;
+	else
+		return NULL;
+}
+EXPORT_SYMBOL(mdss_panel_intf_type);
+
+struct irq_info *mdss_intr_line()
+{
+	return mdss_mdp_hw.irq_info;
+}
+EXPORT_SYMBOL(mdss_intr_line);
+
+int mdss_mdp_wait_for_xin_halt(u32 xin_id, bool is_vbif_nrt)
+{
+	void __iomem *vbif_base;
+	u32 status;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 idle_mask = BIT(xin_id);
+	int rc;
+
+	vbif_base = is_vbif_nrt ? mdata->vbif_nrt_io.base :
+				mdata->vbif_io.base;
+
+	rc = readl_poll_timeout(vbif_base + MMSS_VBIF_XIN_HALT_CTRL1,
+		status, (status & idle_mask),
+		1000, XIN_HALT_TIMEOUT_US);
+	if (rc == -ETIMEDOUT) {
+		pr_err("VBIF client %d not halting. TIMEDOUT.\n",
+			xin_id);
+		MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt",
+			"dbg_bus", "vbif_dbg_bus", "panic");
+	} else {
+		pr_debug("VBIF client %d is halted\n", xin_id);
+	}
+
+	return rc;
+}
+
+/**
+ * force_on_xin_clk() - enable/disable the force-on for the pipe clock
+ * @bit_off: offset of the bit to enable/disable the force-on.
+ * @reg_off: register offset for the clock control.
+ * @enable: boolean to indicate if the force-on of the clock needs to be
+ * enabled or disabled.
+ *
+ * This function returns:
+ * true - if the clock is forced-on by this function
+ * false - if the clock was already forced on
+ * It is the caller responsibility to check if this function is forcing
+ * the clock on; if so, it will need to remove the force of the clock,
+ * otherwise it should avoid to remove the force-on.
+ * Clocks must be on when calling this function.
+ */
+bool force_on_xin_clk(u32 bit_off, u32 clk_ctl_reg_off, bool enable)
+{
+	u32 val;
+	u32 force_on_mask;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	bool clk_forced_on = false;
+
+	force_on_mask = BIT(bit_off);
+	val = readl_relaxed(mdata->mdp_base + clk_ctl_reg_off);
+
+	clk_forced_on = !(force_on_mask & val);
+
+	if (true == enable)
+		val |= force_on_mask;
+	else
+		val &= ~force_on_mask;
+
+	writel_relaxed(val, mdata->mdp_base + clk_ctl_reg_off);
+
+	return clk_forced_on;
+}
+
+static void apply_dynamic_ot_limit(u32 *ot_lim,
+	struct mdss_mdp_set_ot_params *params)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 res, read_vbif_ot;
+	u32 rot_ot = 4;
+
+	if (false == test_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map))
+		return;
+
+	/* Dynamic OT setting done only for rotator and WFD */
+	if (!((params->is_rot && params->is_yuv) || params->is_wb))
+		return;
+
+	res = params->width * params->height;
+
+	pr_debug("w:%d h:%d rot:%d yuv:%d wb:%d res:%d fps:%d\n",
+		params->width, params->height, params->is_rot,
+		params->is_yuv, params->is_wb, res, params->frame_rate);
+
+	switch (mdata->mdp_rev) {
+	case MDSS_MDP_HW_REV_114:
+		/*
+		 * MDP rev is same for msm8937 and msm8940, but rotator OT
+		 * recommendations are different. Setting it based on AXI OT.
+		 */
+		read_vbif_ot = MDSS_VBIF_READ(mdata, MMSS_VBIF_OUT_RD_LIM_CONF0,
+					false);
+		rot_ot  = (read_vbif_ot == 0x10) ? 4 : 8;
+		/* fall-through */
+	case MDSS_MDP_HW_REV_115:
+	case MDSS_MDP_HW_REV_116:
+		if ((res <= RES_1080p) && (params->frame_rate <= 30))
+			*ot_lim = 2;
+		else if (params->is_rot && params->is_yuv)
+			*ot_lim = rot_ot;
+		else
+			*ot_lim = 6;
+		break;
+	default:
+		if (res <= RES_1080p) {
+			*ot_lim = 2;
+		} else if (res <= RES_UHD) {
+			if (params->is_rot && params->is_yuv)
+				*ot_lim = 8;
+			else
+				*ot_lim = 16;
+		}
+		break;
+	}
+}
+
+static u32 get_ot_limit(u32 reg_off, u32 bit_off,
+	struct mdss_mdp_set_ot_params *params)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 ot_lim = 0;
+	u32 is_vbif_nrt, val;
+
+	if (mdata->default_ot_wr_limit &&
+		(params->reg_off_vbif_lim_conf == MMSS_VBIF_WR_LIM_CONF))
+		ot_lim = mdata->default_ot_wr_limit;
+	else if (mdata->default_ot_rd_limit &&
+		(params->reg_off_vbif_lim_conf == MMSS_VBIF_RD_LIM_CONF))
+		ot_lim = mdata->default_ot_rd_limit;
+
+	/*
+	 * If default ot is not set from dt,
+	 * then do not configure it.
+	 */
+	if (ot_lim == 0)
+		goto exit;
+
+	/* Modify the limits if the target and the use case requires it */
+	apply_dynamic_ot_limit(&ot_lim, params);
+
+	is_vbif_nrt = params->is_vbif_nrt;
+	val = MDSS_VBIF_READ(mdata, reg_off, is_vbif_nrt);
+	val &= (0xFF << bit_off);
+	val = val >> bit_off;
+
+	if (val == ot_lim)
+		ot_lim = 0;
+
+exit:
+	pr_debug("ot_lim=%d\n", ot_lim);
+	return ot_lim;
+}
+
+void mdss_mdp_set_ot_limit(struct mdss_mdp_set_ot_params *params)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 ot_lim;
+	u32 reg_off_vbif_lim_conf = (params->xin_id / 4) * 4 +
+		params->reg_off_vbif_lim_conf;
+	u32 bit_off_vbif_lim_conf = (params->xin_id % 4) * 8;
+	bool is_vbif_nrt = params->is_vbif_nrt;
+	u32 reg_val;
+	bool forced_on;
+
+	ot_lim = get_ot_limit(
+		reg_off_vbif_lim_conf,
+		bit_off_vbif_lim_conf,
+		params) & 0xFF;
+
+	if (ot_lim == 0)
+		goto exit;
+
+	trace_mdp_perf_set_ot(params->num, params->xin_id, ot_lim,
+		is_vbif_nrt);
+
+	mutex_lock(&mdata->reg_lock);
+
+	forced_on = force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
+		params->reg_off_mdp_clk_ctrl, true);
+
+	reg_val = MDSS_VBIF_READ(mdata, reg_off_vbif_lim_conf,
+		is_vbif_nrt);
+	reg_val &= ~(0xFF << bit_off_vbif_lim_conf);
+	reg_val |= (ot_lim) << bit_off_vbif_lim_conf;
+	MDSS_VBIF_WRITE(mdata, reg_off_vbif_lim_conf, reg_val,
+		is_vbif_nrt);
+
+	reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+		is_vbif_nrt);
+	MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+		reg_val | BIT(params->xin_id), is_vbif_nrt);
+
+	mutex_unlock(&mdata->reg_lock);
+	mdss_mdp_wait_for_xin_halt(params->xin_id, is_vbif_nrt);
+	mutex_lock(&mdata->reg_lock);
+
+	reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+		is_vbif_nrt);
+	MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+		reg_val & ~BIT(params->xin_id), is_vbif_nrt);
+
+	if (forced_on)
+		force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
+			params->reg_off_mdp_clk_ctrl, false);
+
+	mutex_unlock(&mdata->reg_lock);
+
+exit:
+	return;
+}
+
+#define RPM_MISC_REQ_TYPE 0x6373696d
+#define RPM_MISC_REQ_SVS_PLUS_KEY 0x2B737673
+
+static void mdss_mdp_config_cx_voltage(struct mdss_data_type *mdata, int enable)
+{
+	int ret = 0;
+	static struct msm_rpm_kvp rpm_kvp;
+	static uint8_t svs_en;
+
+	if (!mdata->en_svs_high)
+		return;
+
+	if (!rpm_kvp.key) {
+		rpm_kvp.key = RPM_MISC_REQ_SVS_PLUS_KEY;
+		rpm_kvp.length = sizeof(uint64_t);
+		pr_debug("%s: Initialized rpm_kvp structure\n", __func__);
+	}
+
+	if (enable) {
+		svs_en = 1;
+		rpm_kvp.data = &svs_en;
+		pr_debug("%s: voting for svs high\n", __func__);
+		ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET,
+					RPM_MISC_REQ_TYPE, 0,
+					&rpm_kvp, 1);
+		if (ret)
+			pr_err("vote for active_set svs high failed: %d\n",
+					ret);
+		ret = msm_rpm_send_message(MSM_RPM_CTX_SLEEP_SET,
+					RPM_MISC_REQ_TYPE, 0,
+					&rpm_kvp, 1);
+		if (ret)
+			pr_err("vote for sleep_set svs high failed: %d\n",
+					ret);
+	} else {
+		svs_en = 0;
+		rpm_kvp.data = &svs_en;
+		pr_debug("%s: Removing vote for svs high\n", __func__);
+		ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET,
+					RPM_MISC_REQ_TYPE, 0,
+					&rpm_kvp, 1);
+		if (ret)
+			pr_err("Remove vote:active_set svs high failed: %d\n",
+					ret);
+		ret = msm_rpm_send_message(MSM_RPM_CTX_SLEEP_SET,
+					RPM_MISC_REQ_TYPE, 0,
+					&rpm_kvp, 1);
+		if (ret)
+			pr_err("Remove vote:sleep_set svs high failed: %d\n",
+					ret);
+	}
+}
+
+static int mdss_mdp_cx_ctrl(struct mdss_data_type *mdata, int enable)
+{
+	int rc = 0;
+
+	if (!mdata->vdd_cx)
+		return rc;
+
+	if (enable) {
+		rc = regulator_set_voltage(
+				mdata->vdd_cx,
+				RPM_REGULATOR_CORNER_SVS_SOC,
+				RPM_REGULATOR_CORNER_SUPER_TURBO);
+		if (rc < 0)
+			goto vreg_set_voltage_fail;
+
+		pr_debug("Enabling CX power rail\n");
+		rc = regulator_enable(mdata->vdd_cx);
+		if (rc) {
+			pr_err("Failed to enable regulator.\n");
+			return rc;
+		}
+	} else {
+		pr_debug("Disabling CX power rail\n");
+		rc = regulator_disable(mdata->vdd_cx);
+		if (rc) {
+			pr_err("Failed to disable regulator.\n");
+			return rc;
+		}
+		rc = regulator_set_voltage(
+				mdata->vdd_cx,
+				RPM_REGULATOR_CORNER_NONE,
+				RPM_REGULATOR_CORNER_SUPER_TURBO);
+		if (rc < 0)
+			goto vreg_set_voltage_fail;
+	}
+
+	return rc;
+
+vreg_set_voltage_fail:
+	pr_err("Set vltg fail\n");
+	return rc;
+}
+
+/**
+ * mdss_mdp_footswitch_ctrl() - Disable/enable MDSS GDSC and CX/Batfet rails
+ * @mdata: MDP private data
+ * @on: 1 to turn on footswitch, 0 to turn off footswitch
+ *
+ * When no active references to the MDP device node and it's child nodes are
+ * held, MDSS GDSC can be turned off. However, any any panels are still
+ * active (but likely in an idle state), the vote for the CX and the batfet
+ * rails should not be released.
+ */
+static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on)
+{
+	int ret;
+	int active_cnt = 0;
+
+	if (!mdata->fs)
+		return;
+
+	MDSS_XLOG(on, mdata->fs_ena, mdata->idle_pc, mdata->en_svs_high,
+		atomic_read(&mdata->active_intf_cnt));
+
+	if (on) {
+		if (!mdata->fs_ena) {
+			pr_debug("Enable MDP FS\n");
+			if (mdata->venus) {
+				ret = regulator_enable(mdata->venus);
+				if (ret)
+					pr_err("venus failed to enable\n");
+			}
+
+			ret = regulator_enable(mdata->fs);
+			if (ret)
+				pr_warn("Footswitch failed to enable\n");
+			if (!mdata->idle_pc) {
+				mdss_mdp_cx_ctrl(mdata, true);
+				mdss_mdp_batfet_ctrl(mdata, true);
+			}
+		}
+		if (mdata->en_svs_high)
+			mdss_mdp_config_cx_voltage(mdata, true);
+		mdata->fs_ena = true;
+	} else {
+		if (mdata->fs_ena) {
+			pr_debug("Disable MDP FS\n");
+			active_cnt = atomic_read(&mdata->active_intf_cnt);
+			if (active_cnt != 0) {
+				/*
+				 * Turning off GDSC while overlays are still
+				 * active.
+				 */
+				mdata->idle_pc = true;
+				pr_debug("idle pc. active overlays=%d\n",
+					active_cnt);
+				mdss_mdp_memory_retention_enter();
+			} else {
+				mdss_mdp_cx_ctrl(mdata, false);
+				mdss_mdp_batfet_ctrl(mdata, false);
+			}
+			if (mdata->en_svs_high)
+				mdss_mdp_config_cx_voltage(mdata, false);
+			regulator_disable(mdata->fs);
+			if (mdata->venus)
+				regulator_disable(mdata->venus);
+		}
+		mdata->fs_ena = false;
+	}
+}
+
+int mdss_mdp_secure_display_ctrl(struct mdss_data_type *mdata,
+	unsigned int enable)
+{
+	struct sd_ctrl_req {
+		unsigned int enable;
+	} __attribute__ ((__packed__)) request;
+	unsigned int resp = -1;
+	int ret = 0;
+	struct scm_desc desc;
+
+	if ((enable && (mdss_get_sd_client_cnt() > 0)) ||
+		(!enable && (mdss_get_sd_client_cnt() > 1))) {
+		mdss_update_sd_client(mdata, enable);
+		return ret;
+	}
+
+	desc.args[0] = request.enable = enable;
+	desc.arginfo = SCM_ARGS(1);
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_MP, MEM_PROTECT_SD_CTRL,
+			&request, sizeof(request), &resp, sizeof(resp));
+	} else {
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+				mem_protect_sd_ctrl_id), &desc);
+		resp = desc.ret[0];
+	}
+
+	pr_debug("scm_call MEM_PROTECT_SD_CTRL(%u): ret=%d, resp=%x",
+				enable, ret, resp);
+	if (ret)
+		return ret;
+
+	mdss_update_sd_client(mdata, enable);
+	return resp;
+}
+
+static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata)
+{
+	mdata->suspend_fs_ena = mdata->fs_ena;
+	mdss_mdp_footswitch_ctrl(mdata, false);
+
+	pr_debug("suspend done fs=%d\n", mdata->suspend_fs_ena);
+
+	return 0;
+}
+
+static inline int mdss_mdp_resume_sub(struct mdss_data_type *mdata)
+{
+	if (mdata->suspend_fs_ena)
+		mdss_mdp_footswitch_ctrl(mdata, true);
+
+	pr_debug("resume done fs=%d\n", mdata->suspend_fs_ena);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mdss_mdp_pm_suspend(struct device *dev)
+{
+	struct mdss_data_type *mdata;
+
+	mdata = dev_get_drvdata(dev);
+	if (!mdata)
+		return -ENODEV;
+
+	dev_dbg(dev, "display pm suspend\n");
+
+	return mdss_mdp_suspend_sub(mdata);
+}
+
+static int mdss_mdp_pm_resume(struct device *dev)
+{
+	struct mdss_data_type *mdata;
+
+	mdata = dev_get_drvdata(dev);
+	if (!mdata)
+		return -ENODEV;
+
+	dev_dbg(dev, "display pm resume\n");
+
+	/*
+	 * It is possible that the runtime status of the mdp device may
+	 * have been active when the system was suspended. Reset the runtime
+	 * status to suspended state after a complete system resume.
+	 */
+	pm_runtime_disable(dev);
+	pm_runtime_set_suspended(dev);
+	pm_runtime_enable(dev);
+
+	return mdss_mdp_resume_sub(mdata);
+}
+#endif
+
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+static int mdss_mdp_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+	if (!mdata)
+		return -ENODEV;
+
+	dev_dbg(&pdev->dev, "display suspend\n");
+
+	return mdss_mdp_suspend_sub(mdata);
+}
+
+static int mdss_mdp_resume(struct platform_device *pdev)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+	if (!mdata)
+		return -ENODEV;
+
+	dev_dbg(&pdev->dev, "display resume\n");
+
+	return mdss_mdp_resume_sub(mdata);
+}
+#else
+#define mdss_mdp_suspend NULL
+#define mdss_mdp_resume NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int mdss_mdp_runtime_resume(struct device *dev)
+{
+	struct mdss_data_type *mdata = dev_get_drvdata(dev);
+	bool device_on = true;
+
+	if (!mdata)
+		return -ENODEV;
+
+	dev_dbg(dev, "pm_runtime: resuming. active overlay cnt=%d\n",
+		atomic_read(&mdata->active_intf_cnt));
+
+	/* do not resume panels when coming out of idle power collapse */
+	if (!mdata->idle_pc)
+		device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
+	mdss_mdp_footswitch_ctrl(mdata, true);
+
+	return 0;
+}
+
+static int mdss_mdp_runtime_idle(struct device *dev)
+{
+	struct mdss_data_type *mdata = dev_get_drvdata(dev);
+
+	if (!mdata)
+		return -ENODEV;
+
+	dev_dbg(dev, "pm_runtime: idling...\n");
+
+	return 0;
+}
+
+static int mdss_mdp_runtime_suspend(struct device *dev)
+{
+	struct mdss_data_type *mdata = dev_get_drvdata(dev);
+	bool device_on = false;
+
+	if (!mdata)
+		return -ENODEV;
+	dev_dbg(dev, "pm_runtime: suspending. active overlay cnt=%d\n",
+		atomic_read(&mdata->active_intf_cnt));
+
+	if (mdata->clk_ena) {
+		pr_err("MDP suspend failed\n");
+		return -EBUSY;
+	}
+
+	mdss_mdp_footswitch_ctrl(mdata, false);
+	/* do not suspend panels when going in to idle power collapse */
+	if (!mdata->idle_pc)
+		device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops mdss_mdp_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(mdss_mdp_pm_suspend, mdss_mdp_pm_resume)
+	SET_RUNTIME_PM_OPS(mdss_mdp_runtime_suspend,
+			mdss_mdp_runtime_resume,
+			mdss_mdp_runtime_idle)
+};
+
+static int mdss_mdp_remove(struct platform_device *pdev)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+	if (!mdata)
+		return -ENODEV;
+	pm_runtime_disable(&pdev->dev);
+	mdss_mdp_pp_term(&pdev->dev);
+	mdss_mdp_bus_scale_unregister(mdata);
+	mdss_debugfs_remove(mdata);
+	if (mdata->regulator_notif_register)
+		regulator_unregister_notifier(mdata->fs, &(mdata->gdsc_cb));
+	return 0;
+}
+
+static const struct of_device_id mdss_mdp_dt_match[] = {
+	{ .compatible = "qcom,mdss_mdp",},
+	{}
+};
+MODULE_DEVICE_TABLE(of, mdss_mdp_dt_match);
+
+static struct platform_driver mdss_mdp_driver = {
+	.probe = mdss_mdp_probe,
+	.remove = mdss_mdp_remove,
+	.suspend = mdss_mdp_suspend,
+	.resume = mdss_mdp_resume,
+	.shutdown = NULL,
+	.driver = {
+		/*
+		 * Driver name must match the device name added in
+		 * platform.c.
+		 */
+		.name = "mdp",
+		.of_match_table = mdss_mdp_dt_match,
+		.pm = &mdss_mdp_pm_ops,
+	},
+};
+
+static int mdss_mdp_register_driver(void)
+{
+	return platform_driver_register(&mdss_mdp_driver);
+}
+
+static int __init mdss_mdp_driver_init(void)
+{
+	int ret;
+
+	ret = mdss_mdp_register_driver();
+	if (ret) {
+		pr_err("mdp_register_driver() failed!\n");
+		return ret;
+	}
+
+	return 0;
+
+}
+
+module_param_string(panel, mdss_mdp_panel, MDSS_MAX_PANEL_LEN, 0600);
+/*
+ * panel=<lk_cfg>:<pan_intf>:<pan_intf_cfg>:<panel_topology_cfg>
+ * where <lk_cfg> is "1"-lk/gcdb config or "0" non-lk/non-gcdb
+ * config; <pan_intf> is dsi:<ctrl_id> or hdmi or edp
+ * <pan_intf_cfg> is panel interface specific string
+ * Ex: This string is panel's device node name from DT
+ * for DSI interface
+ * hdmi/edp interface does not use this string
+ * <panel_topology_cfg> is an optional string. Currently it is
+ * only valid for DSI panels. In dual-DSI case, it needs to be
+ * used on both panels or none. When used, format is config%d
+ * where %d is one of the configuration found in device node of
+ * panel selected by <pan_intf_cfg>
+ */
+MODULE_PARM_DESC(panel, "lk supplied panel selection string");
+MODULE_PARM_DESC(panel,
+	"panel=<lk_cfg>:<pan_intf>:<pan_intf_cfg>:<panel_topology_cfg>");
+module_init(mdss_mdp_driver_init);
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
new file mode 100644
index 0000000..eb85ceb
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -0,0 +1,1980 @@
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_MDP_H
+#define MDSS_MDP_H
+
+#include <linux/io.h>
+#include <linux/msm_mdp.h>
+#include <linux/msm_mdp_ext.h>
+#include <linux/platform_device.h>
+#include <linux/notifier.h>
+#include <linux/irqreturn.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+
+#include "mdss.h"
+#include "mdss_mdp_hwio.h"
+#include "mdss_fb.h"
+#include "mdss_mdp_cdm.h"
+
+#define MDSS_MDP_DEFAULT_INTR_MASK 0
+
+#define PHASE_STEP_SHIFT	21
+#define PHASE_STEP_UNIT_SCALE   ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL		15
+#define MAX_LINE_BUFFER_WIDTH	2048
+#define MAX_MIXER_HEIGHT	0xFFFF
+#define MAX_IMG_WIDTH		0x3FFF
+#define MAX_IMG_HEIGHT		0x3FFF
+#define AHB_CLK_OFFSET		0x2B4
+#define MAX_DST_H		MAX_MIXER_HEIGHT
+#define MAX_DOWNSCALE_RATIO	4
+#define MAX_UPSCALE_RATIO	20
+#define MAX_DECIMATION		4
+#define MDP_MIN_VBP		4
+#define MAX_FREE_LIST_SIZE	12
+#define OVERLAY_MAX		10
+
+#define VALID_ROT_WB_FORMAT BIT(0)
+#define VALID_MDP_WB_INTF_FORMAT BIT(1)
+#define VALID_MDP_CURSOR_FORMAT BIT(2)
+
+#define C3_ALPHA	3	/* alpha */
+#define C2_R_Cr		2	/* R/Cr */
+#define C1_B_Cb		1	/* B/Cb */
+#define C0_G_Y		0	/* G/luma */
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KOFF_TIMEOUT_MS 84
+#define KOFF_TIMEOUT msecs_to_jiffies(KOFF_TIMEOUT_MS)
+
+#define OVERFETCH_DISABLE_TOP		BIT(0)
+#define OVERFETCH_DISABLE_BOTTOM	BIT(1)
+#define OVERFETCH_DISABLE_LEFT		BIT(2)
+#define OVERFETCH_DISABLE_RIGHT		BIT(3)
+
+#define MDSS_MDP_CDP_ENABLE		BIT(0)
+#define MDSS_MDP_CDP_ENABLE_UBWCMETA	BIT(1)
+#define MDSS_MDP_CDP_AMORTIZED		BIT(2)
+#define MDSS_MDP_CDP_AHEAD_64		BIT(3)
+
+#define PERF_STATUS_DONE 0
+#define PERF_STATUS_BUSY 1
+
+#define PERF_CALC_PIPE_APPLY_CLK_FUDGE	BIT(0)
+#define PERF_CALC_PIPE_SINGLE_LAYER	BIT(1)
+#define PERF_CALC_PIPE_CALC_SMP_SIZE	BIT(2)
+
+#define PERF_SINGLE_PIPE_BW_FLOOR 1200000000
+#define CURSOR_PIPE_LEFT 0
+#define CURSOR_PIPE_RIGHT 1
+
+#define MASTER_CTX 0
+#define SLAVE_CTX 1
+
+#define XIN_HALT_TIMEOUT_US	0x4000
+
+#define MAX_LAYER_COUNT		0xC
+
+/* hw cursor can only be setup in highest mixer stage */
+#define HW_CURSOR_STAGE(mdata) \
+	(((mdata)->max_target_zorder + MDSS_MDP_STAGE_0) - 1)
+
+#define BITS_TO_BYTES(x) DIV_ROUND_UP(x, BITS_PER_BYTE)
+
+enum mdss_mdp_perf_state_type {
+	PERF_SW_COMMIT_STATE = 0,
+	PERF_HW_MDP_STATE,
+};
+
+enum mdss_mdp_block_power_state {
+	MDP_BLOCK_POWER_OFF = 0,
+	MDP_BLOCK_POWER_ON = 1,
+};
+
+enum mdss_mdp_mixer_type {
+	MDSS_MDP_MIXER_TYPE_UNUSED,
+	MDSS_MDP_MIXER_TYPE_INTF,
+	MDSS_MDP_MIXER_TYPE_WRITEBACK,
+};
+
+enum mdss_mdp_mixer_mux {
+	MDSS_MDP_MIXER_MUX_DEFAULT,
+	MDSS_MDP_MIXER_MUX_LEFT,
+	MDSS_MDP_MIXER_MUX_RIGHT,
+};
+
+enum mdss_sd_transition {
+	SD_TRANSITION_NONE,
+	SD_TRANSITION_SECURE_TO_NON_SECURE,
+	SD_TRANSITION_NON_SECURE_TO_SECURE
+};
+
+static inline enum mdss_mdp_sspp_index get_pipe_num_from_ndx(u32 ndx)
+{
+	u32 id;
+
+	if (unlikely(!ndx))
+		return MDSS_MDP_MAX_SSPP;
+
+	id = fls(ndx) - 1;
+
+	if (unlikely(ndx ^ BIT(id)))
+		return MDSS_MDP_MAX_SSPP;
+
+	return id;
+}
+
+static inline enum mdss_mdp_pipe_type
+get_pipe_type_from_num(enum mdss_mdp_sspp_index pnum)
+{
+	enum mdss_mdp_pipe_type ptype;
+
+	switch (pnum) {
+	case MDSS_MDP_SSPP_VIG0:
+	case MDSS_MDP_SSPP_VIG1:
+	case MDSS_MDP_SSPP_VIG2:
+	case MDSS_MDP_SSPP_VIG3:
+		ptype = MDSS_MDP_PIPE_TYPE_VIG;
+		break;
+	case MDSS_MDP_SSPP_RGB0:
+	case MDSS_MDP_SSPP_RGB1:
+	case MDSS_MDP_SSPP_RGB2:
+	case MDSS_MDP_SSPP_RGB3:
+		ptype = MDSS_MDP_PIPE_TYPE_RGB;
+		break;
+	case MDSS_MDP_SSPP_DMA0:
+	case MDSS_MDP_SSPP_DMA1:
+	case MDSS_MDP_SSPP_DMA2:
+	case MDSS_MDP_SSPP_DMA3:
+		ptype = MDSS_MDP_PIPE_TYPE_DMA;
+		break;
+	case MDSS_MDP_SSPP_CURSOR0:
+	case MDSS_MDP_SSPP_CURSOR1:
+		ptype = MDSS_MDP_PIPE_TYPE_CURSOR;
+		break;
+	default:
+		ptype = MDSS_MDP_PIPE_TYPE_INVALID;
+		break;
+	}
+
+	return ptype;
+}
+
+static inline enum mdss_mdp_pipe_type get_pipe_type_from_ndx(u32 ndx)
+{
+	enum mdss_mdp_sspp_index pnum;
+
+	pnum = get_pipe_num_from_ndx(ndx);
+
+	return get_pipe_type_from_num(pnum);
+}
+
+enum mdss_mdp_block_type {
+	MDSS_MDP_BLOCK_UNUSED,
+	MDSS_MDP_BLOCK_SSPP,
+	MDSS_MDP_BLOCK_MIXER,
+	MDSS_MDP_BLOCK_DSPP,
+	MDSS_MDP_BLOCK_WB,
+	MDSS_MDP_BLOCK_CDM,
+	MDSS_MDP_BLOCK_SSPP_10,
+	MDSS_MDP_BLOCK_MAX
+};
+
+enum mdss_mdp_csc_type {
+	MDSS_MDP_CSC_YUV2RGB_601L,
+	MDSS_MDP_CSC_YUV2RGB_601FR,
+	MDSS_MDP_CSC_YUV2RGB_709L,
+	MDSS_MDP_CSC_YUV2RGB_2020L,
+	MDSS_MDP_CSC_YUV2RGB_2020FR,
+	MDSS_MDP_CSC_RGB2YUV_601L,
+	MDSS_MDP_CSC_RGB2YUV_601FR,
+	MDSS_MDP_CSC_RGB2YUV_709L,
+	MDSS_MDP_CSC_RGB2YUV_2020L,
+	MDSS_MDP_CSC_RGB2YUV_2020FR,
+	MDSS_MDP_CSC_YUV2YUV,
+	MDSS_MDP_CSC_RGB2RGB,
+	MDSS_MDP_MAX_CSC
+};
+
+enum mdp_wfd_blk_type {
+	MDSS_MDP_WFD_SHARED = 0,
+	MDSS_MDP_WFD_INTERFACE,
+	MDSS_MDP_WFD_DEDICATED,
+};
+
+enum mdss_mdp_reg_bus_cfg {
+	REG_CLK_CFG_OFF,
+	REG_CLK_CFG_LOW,
+	REG_CLK_CFG_HIGH,
+};
+
+enum mdss_mdp_panic_signal_type {
+	MDSS_MDP_PANIC_NONE,
+	MDSS_MDP_PANIC_COMMON_REG_CFG,
+	MDSS_MDP_PANIC_PER_PIPE_CFG,
+};
+
+enum mdss_mdp_fetch_type {
+	MDSS_MDP_FETCH_LINEAR,
+	MDSS_MDP_FETCH_TILE,
+	MDSS_MDP_FETCH_UBWC,
+};
+
+/**
+ * enum mdp_commit_stage_type - Indicate different commit stages
+ *
+ * @MDP_COMMIT_STATE_WAIT_FOR_PINGPONG:	At the stage of being ready to
+ *			wait for pingpong buffer.
+ * @MDP_COMMIT_STATE_PINGPONG_DONE:		At the stage that pingpong
+ *			buffer is ready.
+ */
+enum mdp_commit_stage_type {
+	MDP_COMMIT_STAGE_SETUP_DONE,
+	MDP_COMMIT_STAGE_READY_FOR_KICKOFF,
+};
+
+struct mdss_mdp_ctl;
+typedef void (*mdp_vsync_handler_t)(struct mdss_mdp_ctl *, ktime_t);
+
+struct mdss_mdp_vsync_handler {
+	bool enabled;
+	bool cmd_post_flush;
+	mdp_vsync_handler_t vsync_handler;
+	struct list_head list;
+};
+
+struct mdss_mdp_lineptr_handler {
+	bool enabled;
+	mdp_vsync_handler_t lineptr_handler;
+	struct list_head list;
+};
+
+enum mdss_mdp_wb_ctl_type {
+	MDSS_MDP_WB_CTL_TYPE_BLOCK = 1,
+	MDSS_MDP_WB_CTL_TYPE_LINE
+};
+
+enum mdss_mdp_bw_vote_mode {
+	MDSS_MDP_BW_MODE_SINGLE_LAYER,
+	MDSS_MDP_BW_MODE_SINGLE_IF,
+	MDSS_MDP_BW_MODE_MAX
+};
+
+enum mdp_wb_blk_caps {
+	MDSS_MDP_WB_WFD = BIT(0),
+	MDSS_MDP_WB_ROTATOR = BIT(1),
+	MDSS_MDP_WB_INTF = BIT(2),
+	MDSS_MDP_WB_UBWC = BIT(3),
+};
+
+/**
+ * enum perf_calc_vote_mode - enum to decide if mdss_mdp_get_bw_vote_mode
+ *		function needs an extra efficiency factor.
+ *
+ * @PERF_CALC_VOTE_MODE_PER_PIPE: used to check if efficiency factor is needed
+ *		based on the pipe properties.
+ * @PERF_CALC_VOTE_MODE_CTL: used to check if efficiency factor is needed based
+ *		on the controller properties.
+ * @PERF_CALC_VOTE_MODE_MAX: used to check if efficiency factor is need to vote
+ *		max MDP bandwidth.
+ *
+ * Depending upon the properties of each specific object (determined
+ * by this enum), driver decides if the mode to vote needs an
+ * extra factor.
+ */
+enum perf_calc_vote_mode {
+	PERF_CALC_VOTE_MODE_PER_PIPE,
+	PERF_CALC_VOTE_MODE_CTL,
+	PERF_CALC_VOTE_MODE_MAX,
+};
+
+struct mdss_mdp_perf_params {
+	u64 bw_overlap;
+	u64 bw_overlap_nocr;
+	u64 bw_writeback;
+	u64 bw_prefill;
+	u64 max_per_pipe_ib;
+	u32 prefill_bytes;
+	u64 bw_ctl;
+	u32 mdp_clk_rate;
+	DECLARE_BITMAP(bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
+};
+
+struct mdss_mdp_writeback {
+	u32 num;
+	char __iomem *base;
+	u32 caps;
+	struct kref kref;
+	u8 supported_input_formats[BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1)];
+	u8 supported_output_formats[BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1)];
+};
+
+struct mdss_mdp_ctl_intfs_ops {
+	int (*start_fnc)(struct mdss_mdp_ctl *ctl);
+	int (*stop_fnc)(struct mdss_mdp_ctl *ctl, int panel_power_state);
+	int (*prepare_fnc)(struct mdss_mdp_ctl *ctl, void *arg);
+	int (*display_fnc)(struct mdss_mdp_ctl *ctl, void *arg);
+	int (*wait_fnc)(struct mdss_mdp_ctl *ctl, void *arg);
+	int (*wait_vsync_fnc)(struct mdss_mdp_ctl *ctl);
+	int (*wait_pingpong)(struct mdss_mdp_ctl *ctl, void *arg);
+	u32 (*read_line_cnt_fnc)(struct mdss_mdp_ctl *);
+	int (*add_vsync_handler)(struct mdss_mdp_ctl *,
+					struct mdss_mdp_vsync_handler *);
+	int (*remove_vsync_handler)(struct mdss_mdp_ctl *,
+					struct mdss_mdp_vsync_handler *);
+	int (*config_fps_fnc)(struct mdss_mdp_ctl *ctl, int new_fps);
+	int (*restore_fnc)(struct mdss_mdp_ctl *ctl, bool locked);
+	int (*early_wake_up_fnc)(struct mdss_mdp_ctl *ctl);
+
+	/*
+	 * reconfigure interface for new resolution, called before (pre=1)
+	 * and after interface has been reconfigured (pre=0)
+	 */
+	int (*reconfigure)(struct mdss_mdp_ctl *ctl,
+			enum dynamic_switch_modes mode, bool pre);
+	/* called before do any register programming  from commit thread */
+	void (*pre_programming)(struct mdss_mdp_ctl *ctl);
+
+	/* to update lineptr, [1..yres] - enable, 0 - disable */
+	int (*update_lineptr)(struct mdss_mdp_ctl *ctl, bool enable);
+};
+
+/* FRC info used for Deterministic Frame Rate Control */
+#define FRC_CADENCE_22_RATIO 2000000000u /* 30fps -> 60fps, 29.97 -> 59.94 */
+#define FRC_CADENCE_22_RATIO_LOW 1940000000u
+#define FRC_CADENCE_22_RATIO_HIGH 2060000000u
+
+#define FRC_CADENCE_23_RATIO 2500000000u /* 24fps -> 60fps, 23.976 -> 59.94 */
+#define FRC_CADENCE_23_RATIO_LOW 2450000000u
+#define FRC_CADENCE_23_RATIO_HIGH 2550000000u
+
+#define FRC_CADENCE_23223_RATIO 2400000000u /* 25fps -> 60fps */
+#define FRC_CADENCE_23223_RATIO_LOW 2360000000u
+#define FRC_CADENCE_23223_RATIO_HIGH 2440000000u
+
+#define FRC_VIDEO_TS_DELTA_THRESHOLD_US (16666 * 10) /* 10 frames at 60fps */
+
+/*
+ * In current FRC design, the minimum video fps change we can support is 24fps
+ * to 25fps, so the timestamp delta per frame is 1667. Use this threshold to
+ * catch this case and ignore more trivial video fps variations.
+ */
+#define FRC_VIDEO_FPS_CHANGE_THRESHOLD_US 1667
+
+/* how many samples we need for video
+ * fps calculation
+ */
+#define FRC_VIDEO_FPS_DETECT_WINDOW 32
+
+/*
+ * Experimental value. Mininum vsync counts during video's single update could
+ * be thought of as pause. If video fps is 10fps and display is 60fps, every
+ * video frame should arrive per 6 vsync, and add 2 more vsync delay, each frame
+ * should arrive in at most 8 vsync interval, otherwise it's considered as a
+ * pause. This value might need tuning in some cases.
+ */
+#define FRC_VIDEO_PAUSE_THRESHOLD 8
+
+#define FRC_MAX_VIDEO_DROPPING_CNT 10 /* how many drops before we disable FRC */
+#define FRC_VIDEO_DROP_TOLERANCE_WINDOW 1000 /* how many frames to count drop */
+
+/* DONOT change the definition order. __check_known_cadence depends on it */
+enum {
+	FRC_CADENCE_NONE = 0, /* Waiting for samples to compute cadence */
+	FRC_CADENCE_23,
+	FRC_CADENCE_22,
+	FRC_CADENCE_23223,
+	FRC_CADENCE_FREE_RUN, /* No extra repeat, but wait for changes */
+	FRC_CADENCE_DISABLE, /* FRC disabled, no extra repeat */
+};
+#define FRC_MAX_SUPPORT_CADENCE FRC_CADENCE_FREE_RUN
+
+#define FRC_CADENCE_SEQUENCE_MAX_LEN 5 /* 5 -> 23223 */
+#define FRC_CADENCE_SEQUENCE_MAX_RETRY 5 /* max retry of matching sequence */
+
+/* sequence generator for pre-defined cadence */
+struct mdss_mdp_frc_seq_gen {
+	int seq[FRC_CADENCE_SEQUENCE_MAX_LEN];
+	int cache[FRC_CADENCE_SEQUENCE_MAX_LEN]; /* 0 -> this slot is empty */
+	int len;
+	int pos; /* current position in seq, < 0 -> pattern not matched */
+	int base;
+	int retry;
+};
+
+struct mdss_mdp_frc_data {
+	u32 frame_cnt; /* video frame count */
+	s64 timestamp; /* video timestamp in millisecond */
+};
+
+struct mdss_mdp_frc_video_stat {
+	u32 frame_cnt; /* video frame count */
+	s64 timestamp; /* video timestamp in millisecond */
+	s64 last_delta;
+};
+
+struct mdss_mdp_frc_drop_stat {
+	u32 drop_cnt; /* how many video buffer drop */
+	u32 frame_cnt; /* the first frame cnt where drop happens */
+};
+
+/* how many samples at least we need for
+ * cadence detection
+ */
+#define FRC_CADENCE_DETECT_WINDOW 6
+
+struct mdss_mdp_frc_cadence_calc {
+	struct mdss_mdp_frc_data samples[FRC_CADENCE_DETECT_WINDOW];
+	int sample_cnt;
+};
+
+struct mdss_mdp_frc_info {
+	u32 cadence_id; /* patterns such as 22/23/23223 */
+	u32 display_fp1000s;
+	u32 last_vsync_cnt; /* vsync when we kicked off last frame */
+	u32 last_repeat; /* how many times last frame was repeated */
+	u32 base_vsync_cnt;
+	struct mdss_mdp_frc_data cur_frc;
+	struct mdss_mdp_frc_data last_frc;
+	struct mdss_mdp_frc_data base_frc;
+	struct mdss_mdp_frc_video_stat video_stat;
+	struct mdss_mdp_frc_drop_stat drop_stat;
+	struct mdss_mdp_frc_cadence_calc calc;
+	struct mdss_mdp_frc_seq_gen gen;
+};
+
+/*
+ * FSM used in deterministic frame rate control:
+ *
+ *                +----------------+                      +----------------+
+ *                | +------------+ |    too many drops    | +------------+ |
+ *       +--------> |  INIT      | +----------------------> |   DISABLE  | |
+ *       |        | +------------+ <-----------+          | +------------+ |
+ *       |        +----------------+           |          +----------------+
+ *       |           |        |                |
+ *       |           |        |                | change
+ *       |      frame|        |change          +----------------+
+ *       |           |        |                                 |
+ *       |           |        |                                 |
+ *       |        +--v--------+----+                      +-----+----------+
+ * change|        |                |      not supported   |                |
+ *       |        | CADENCE_DETECT +---------------------->    FREE_RUN    |
+ *       |        |                |                      |                |
+ *       |        +-------+--------+                      +----------------+
+ *       |                |
+ *       |                |
+ *       |                |cadence detected
+ *       |                |
+ *       |                |
+ *       |        +-------v--------+             +----------------------------+
+ *       |        |                |             |Events:                     |
+ *       +--------+  SEQ_MATCH     |             |  1. change: some changes   |
+ *       |        |                |             |  might change cadence like |
+ *       |        +-------+--------+             |  video/display fps.        |
+ *       |                |                      |  2. frame: video frame with|
+ *       |                |sequence matched      |  correct FRC info.         |
+ *       |                |                      |  3. in other states than   |
+ *       |        +-------v--------+             |  INIT frame event doesn't  |
+ *       |        |                |             |  make any state change.    |
+ *       |        |                |             +----------------------------+
+ *       +--------+   READY        |
+ *                |                |
+ *                +----------------+
+ */
+enum mdss_mdp_frc_state_type {
+	FRC_STATE_INIT = 0, /* INIT state waiting for frames */
+	FRC_STATE_CADENCE_DETECT, /* state to detect cadence ID */
+	FRC_STATE_SEQ_MATCH, /* state to find start pos in cadence sequence */
+	FRC_STATE_FREERUN, /* state has no extra repeat but might be changed */
+	FRC_STATE_READY, /* state ready to do FRC */
+	FRC_STATE_DISABLE, /* state in which FRC is disabled */
+	FRC_STATE_MAX,
+};
+
+struct mdss_mdp_frc_fsm;
+
+struct mdss_mdp_frc_fsm_ops {
+	/* preprocess incoming FRC info like checking fps changes */
+	void (*pre_frc)(struct mdss_mdp_frc_fsm *frc_fsm, void *arg);
+	/* deterministic frame rate control like delaying frame's display */
+	void (*do_frc)(struct mdss_mdp_frc_fsm *frc_fsm, void *arg);
+	/* post-operations after FRC like saving past info */
+	void (*post_frc)(struct mdss_mdp_frc_fsm *frc_fsm, void *arg);
+};
+
+struct mdss_mdp_frc_fsm_cbs {
+	/* callback used once updating FRC FSM's state */
+	void (*update_state_cb)(struct mdss_mdp_frc_fsm *frc_fsm);
+};
+
+struct mdss_mdp_frc_fsm_state {
+	char *name; /* debug name of current state */
+	enum mdss_mdp_frc_state_type state; /* current state type */
+	struct mdss_mdp_frc_fsm_ops ops; /* operations of curent state */
+};
+
+struct mdss_mdp_frc_fsm {
+	bool enable; /* whether FRC is running */
+	struct mdss_mdp_frc_fsm_state state; /* current state */
+	struct mdss_mdp_frc_fsm_state to_state; /* state to set */
+	struct mdss_mdp_frc_fsm_cbs cbs;
+	struct mdss_mdp_frc_info frc_info;
+};
+
+struct mdss_mdp_ctl {
+	u32 num;
+	char __iomem *base;
+
+	u32 ref_cnt;
+	int power_state;
+
+	u32 intf_num;
+	u32 slave_intf_num; /* ping-pong split */
+	u32 intf_type;
+
+	/*
+	 * false: for sctl in DUAL_LM_DUAL_DISPLAY
+	 * true: everything else
+	 */
+	bool is_master;
+
+	u32 opmode;
+	u32 flush_bits;
+	u32 flush_reg_data;
+
+	bool split_flush_en;
+	bool is_video_mode;
+	u32 play_cnt;
+	u32 vsync_cnt;
+	u32 underrun_cnt;
+
+	struct work_struct cpu_pm_work;
+	int autorefresh_frame_cnt;
+
+	u16 width;
+	u16 height;
+	u16 border_x_off;
+	u16 border_y_off;
+	bool is_secure;
+
+	/* used for WFD */
+	u32 dst_format;
+	enum mdss_mdp_csc_type csc_type;
+	struct mult_factor dst_comp_ratio;
+
+	u32 clk_rate;
+	int force_screen_state;
+	struct mdss_mdp_perf_params cur_perf;
+	struct mdss_mdp_perf_params new_perf;
+	u32 perf_transaction_status;
+	bool perf_release_ctl_bw;
+	u64 bw_pending;
+	bool disable_prefill;
+
+	bool traffic_shaper_enabled;
+	u32  traffic_shaper_mdp_clk;
+
+	struct mdss_data_type *mdata;
+	struct msm_fb_data_type *mfd;
+	struct mdss_mdp_mixer *mixer_left;
+	struct mdss_mdp_mixer *mixer_right;
+	struct mdss_mdp_cdm *cdm;
+	struct mutex lock;
+	struct mutex offlock;
+	struct mutex flush_lock;
+	struct mutex *shared_lock;
+	struct mutex rsrc_lock;
+	spinlock_t spin_lock;
+
+	struct mdss_panel_data *panel_data;
+	struct mdss_mdp_vsync_handler vsync_handler;
+	struct mdss_mdp_vsync_handler recover_underrun_handler;
+	struct work_struct recover_work;
+	struct work_struct remove_underrun_handler;
+
+	struct mdss_mdp_lineptr_handler lineptr_handler;
+
+	/*
+	 * This ROI is aligned to as per following guidelines and
+	 * sent to the panel driver.
+	 *
+	 * 1. DUAL_LM_DUAL_DISPLAY
+	 *    Panel = 1440x2560
+	 *    CTL0 = 720x2560 (LM0=720x2560)
+	 *    CTL1 = 720x2560 (LM1=720x2560)
+	 *    Both CTL's ROI will be (0-719)x(0-2599)
+	 * 2. DUAL_LM_SINGLE_DISPLAY
+	 *    Panel = 1440x2560
+	 *    CTL0 = 1440x2560 (LM0=720x2560 and LM1=720x2560)
+	 *    CTL0's ROI will be (0-1429)x(0-2599)
+	 * 3. SINGLE_LM_SINGLE_DISPLAY
+	 *    Panel = 1080x1920
+	 *    CTL0 = 1080x1920 (LM0=1080x1920)
+	 *    CTL0's ROI will be (0-1079)x(0-1919)
+	 */
+	struct mdss_rect roi;
+	struct mdss_rect roi_bkup;
+
+	struct blocking_notifier_head notifier_head;
+
+	void *priv_data;
+	void *intf_ctx[2];
+	u32 wb_type;
+
+	struct mdss_mdp_writeback *wb;
+
+	struct mdss_mdp_ctl_intfs_ops ops;
+	bool force_ctl_start;
+
+	u64 last_input_time;
+	int pending_mode_switch;
+	u16 frame_rate;
+
+	/* dynamic resolution switch during cont-splash handoff */
+	bool switch_with_handoff;
+
+	/* vsync handler for FRC */
+	struct mdss_mdp_vsync_handler frc_vsync_handler;
+	bool commit_in_progress;
+};
+
+struct mdss_mdp_mixer {
+	u32 num;
+	u32 ref_cnt;
+	char __iomem *base;
+	char __iomem *dspp_base;
+	char __iomem *pingpong_base;
+	u8 type;
+	u8 params_changed;
+	u16 width;
+	u16 height;
+
+	bool valid_roi;
+	bool roi_changed;
+	struct mdss_rect roi;
+
+	u8 cursor_enabled;
+	u16 cursor_hotx;
+	u16 cursor_hoty;
+	u8 rotator_mode;
+
+	/*
+	 * src_split_req is valid only for right layer mixer.
+	 *
+	 * VIDEO mode panels: Always true if source split is enabled.
+	 * CMD mode panels: Only true if source split is enabled and
+	 *                  for a given commit left and right both ROIs
+	 *                  are valid.
+	 */
+	bool src_split_req;
+	bool is_right_mixer;
+	struct mdss_mdp_ctl *ctl;
+	struct mdss_mdp_pipe *stage_pipe[MAX_PIPES_PER_LM];
+	u32 next_pipe_map;
+	u32 pipe_mapped;
+};
+
+struct mdss_mdp_format_params {
+	u32 format;
+	u32 flag;
+	u8 is_yuv;
+
+	u8 frame_format;
+	u8 chroma_sample;
+	u8 solid_fill;
+	u8 fetch_planes;
+	u8 unpack_align_msb;	/* 0 to LSB, 1 to MSB */
+	u8 unpack_tight;	/* 0 for loose, 1 for tight */
+	u8 unpack_count;	/* 0 = 1 component, 1 = 2 component ... */
+	u8 bpp;
+	u8 alpha_enable;	/*  source has alpha */
+	u8 fetch_mode;
+	u8 bits[MAX_PLANES];
+	u8 element[MAX_PLANES];
+	u8 unpack_dx_format;	/*1 for 10 bit format otherwise 0 */
+};
+
+struct mdss_mdp_format_ubwc_tile_info {
+	u16 tile_height;
+	u16 tile_width;
+};
+
+struct mdss_mdp_format_params_ubwc {
+	struct mdss_mdp_format_params mdp_format;
+	struct mdss_mdp_format_ubwc_tile_info micro;
+};
+
+struct mdss_mdp_plane_sizes {
+	u32 num_planes;
+	u32 plane_size[MAX_PLANES];
+	u32 total_size;
+	u32 ystride[MAX_PLANES];
+	u32 rau_cnt;
+	u32 rau_h[2];
+};
+
+struct mdss_mdp_img_data {
+	dma_addr_t addr;
+	unsigned long len;
+	u32 offset;
+	u32 flags;
+	u32 dir;
+	u32 domain;
+	bool mapped;
+	bool skip_detach;
+	struct fd srcp_f;
+	struct dma_buf *srcp_dma_buf;
+	struct dma_buf_attachment *srcp_attachment;
+	struct sg_table *srcp_table;
+};
+
+enum mdss_mdp_data_state {
+	MDP_BUF_STATE_UNUSED,
+	MDP_BUF_STATE_READY,
+	MDP_BUF_STATE_ACTIVE,
+	MDP_BUF_STATE_CLEANUP,
+};
+
+struct mdss_mdp_data {
+	enum mdss_mdp_data_state state;
+	u8 num_planes;
+	struct mdss_mdp_img_data p[MAX_PLANES];
+	struct list_head buf_list;
+	struct list_head pipe_list;
+	struct list_head chunk_list;
+	u64 last_alloc;
+	u64 last_freed;
+	struct mdss_mdp_pipe *last_pipe;
+};
+
+struct pp_hist_col_info {
+	u32 col_state;
+	u32 col_en;
+	u32 hist_cnt_read;
+	u32 hist_cnt_sent;
+	u32 hist_cnt_time;
+	u32 frame_cnt;
+	u32 data[HIST_V_SIZE];
+	struct mutex hist_mutex;
+	spinlock_t hist_lock;
+	char __iomem *base;
+	u32 intr_shift;
+	u32 disp_num;
+	struct mdss_mdp_ctl *ctl;
+};
+
+struct mdss_mdp_ad {
+	char __iomem *base;
+	u8 num;
+};
+
+struct mdss_ad_info {
+	u8 num;
+	u8 calc_hw_num;
+	u32 ops;
+	u32 sts;
+	u32 reg_sts;
+	u32 state;
+	u32 ad_data;
+	u32 ad_data_mode;
+	struct mdss_ad_init init;
+	struct mdss_ad_cfg cfg;
+	struct mutex lock;
+	struct work_struct calc_work;
+	struct msm_fb_data_type *mfd;
+	struct msm_fb_data_type *bl_mfd;
+	struct mdss_mdp_vsync_handler handle;
+	u32 last_str;
+	u32 last_bl;
+	u32 last_ad_data;
+	u16 last_calib[4];
+	bool last_ad_data_valid;
+	bool last_calib_valid;
+	u32 ipc_frame_count;
+	u32 bl_data;
+	u32 calc_itr;
+	uint32_t bl_lin[AD_BL_LIN_LEN];
+	uint32_t bl_lin_inv[AD_BL_LIN_LEN];
+	uint32_t bl_att_lut[AD_BL_ATT_LUT_LEN];
+};
+
+struct pp_sts_type {
+	u32 pa_sts;
+	u32 pcc_sts;
+	u32 igc_sts;
+	u32 igc_tbl_idx;
+	u32 argc_sts;
+	u32 enhist_sts;
+	u32 dither_sts;
+	u32 gamut_sts;
+	u32 pgc_sts;
+	u32 sharp_sts;
+	u32 hist_sts;
+	u32 side_sts;
+};
+
+struct mdss_pipe_pp_res {
+	u32 igc_c0_c1[IGC_LUT_ENTRIES];
+	u32 igc_c2[IGC_LUT_ENTRIES];
+	u32 hist_lut[ENHIST_LUT_ENTRIES];
+	struct pp_hist_col_info hist;
+	struct pp_sts_type pp_sts;
+	void *pa_cfg_payload;
+	void *pcc_cfg_payload;
+	void *igc_cfg_payload;
+	void *hist_lut_cfg_payload;
+};
+
+struct mdss_mdp_pipe_smp_map {
+	DECLARE_BITMAP(reserved, MAX_DRV_SUP_MMB_BLKS);
+	DECLARE_BITMAP(allocated, MAX_DRV_SUP_MMB_BLKS);
+	DECLARE_BITMAP(fixed, MAX_DRV_SUP_MMB_BLKS);
+};
+
+struct mdss_mdp_shared_reg_ctrl {
+	u32 reg_off;
+	u32 bit_off;
+};
+
+enum mdss_mdp_pipe_rect {
+	MDSS_MDP_PIPE_RECT0, /* default */
+	MDSS_MDP_PIPE_RECT1,
+	MDSS_MDP_PIPE_MAX_RECTS,
+};
+
+/**
+ * enum mdss_mdp_pipe_multirect_mode - pipe multirect mode
+ * @MDSS_MDP_PIPE_MULTIRECT_NONE:	pipe is not working in multirect mode
+ * @MDSS_MDP_PIPE_MULTIRECT_PARALLEL:	rectangles are being fetched at the
+ *					same time in time multiplexed fashion
+ * @MDSS_MDP_PIPE_MULTIRECT_SERIAL:	rectangles are fetched serially, where
+ *					one is only fetched after the other one
+ *					is complete
+ */
+enum mdss_mdp_pipe_multirect_mode {
+	MDSS_MDP_PIPE_MULTIRECT_NONE,
+	MDSS_MDP_PIPE_MULTIRECT_PARALLEL,
+	MDSS_MDP_PIPE_MULTIRECT_SERIAL,
+};
+
+/**
+ * struct mdss_mdp_pipe_multirect_params - multirect info for layer or pipe
+ * @num:	rectangle being operated, default is RECT0 if pipe doesn't
+ *		support multirect
+ * @mode:	mode of multirect operation, default is NONE
+ * @next:	pointer to sibling pipe/layer which is also operating in
+ *		multirect mode
+ */
+struct mdss_mdp_pipe_multirect_params {
+	enum mdss_mdp_pipe_rect num; /* RECT0 or RECT1 */
+	int max_rects;
+	enum mdss_mdp_pipe_multirect_mode mode;
+	void *next; /* pointer to next pipe or layer */
+};
+
+struct mdss_mdp_pipe {
+	u32 num;
+	u32 type;
+	u32 ndx;
+	u8 priority;
+	char __iomem *base;
+	u32 ftch_id;
+	u32 xin_id;
+	u32 panic_ctrl_ndx;
+	struct mdss_mdp_shared_reg_ctrl clk_ctrl;
+	struct mdss_mdp_shared_reg_ctrl clk_status;
+	struct mdss_mdp_shared_reg_ctrl sw_reset;
+
+	struct kref kref;
+
+	u32 play_cnt;
+	struct file *file;
+	bool is_handed_off;
+
+	u32 flags;
+	u32 bwc_mode;
+
+	/* valid only when pipe's output is crossing both layer mixers */
+	bool src_split_req;
+	bool is_right_blend;
+
+	u16 img_width;
+	u16 img_height;
+	u8 horz_deci;
+	u8 vert_deci;
+	struct mdss_rect src;
+	struct mdss_rect dst;
+	struct mdss_mdp_format_params *src_fmt;
+	struct mdss_mdp_plane_sizes src_planes;
+
+	/* compression ratio from the source format */
+	struct mult_factor comp_ratio;
+
+	enum mdss_mdp_stage_index mixer_stage;
+	u8 is_fg;
+	u8 alpha;
+	u8 blend_op;
+	u8 overfetch_disable;
+	u32 transp;
+	u32 bg_color;
+
+	struct msm_fb_data_type *mfd;
+	struct mdss_mdp_mixer *mixer_left;
+	struct mdss_mdp_mixer *mixer_right;
+
+	struct mdp_overlay req_data;
+	struct mdp_input_layer layer;
+	u32 params_changed;
+	bool dirty;
+	bool unhalted;
+	bool async_update;
+
+	struct mdss_mdp_pipe_smp_map smp_map[MAX_PLANES];
+
+	struct list_head buf_queue;
+	struct list_head list;
+
+	struct mdp_overlay_pp_params pp_cfg;
+	struct mdss_pipe_pp_res pp_res;
+	struct mdp_scale_data_v2 scaler;
+	u8 chroma_sample_h;
+	u8 chroma_sample_v;
+
+	wait_queue_head_t free_waitq;
+	u32 frame_rate;
+	u8 csc_coeff_set;
+	u8 supported_formats[BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1)];
+
+	struct mdss_mdp_pipe_multirect_params multirect;
+};
+
+struct mdss_mdp_writeback_arg {
+	struct mdss_mdp_data *data;
+	void *priv_data;
+};
+
+struct mdss_mdp_wfd;
+
+struct mdss_overlay_private {
+	ktime_t vsync_time;
+	ktime_t lineptr_time;
+	struct kernfs_node *vsync_event_sd;
+	struct kernfs_node *lineptr_event_sd;
+	struct kernfs_node *hist_event_sd;
+	struct kernfs_node *bl_event_sd;
+	struct kernfs_node *ad_event_sd;
+	struct kernfs_node *ad_bl_event_sd;
+	int borderfill_enable;
+	int hw_refresh;
+	void *cpu_pm_hdl;
+
+	struct mdss_data_type *mdata;
+	struct mutex ov_lock;
+	struct mutex dfps_lock;
+	struct mdss_mdp_ctl *ctl;
+	struct mdss_mdp_wfd *wfd;
+
+	struct mutex list_lock;
+	struct list_head pipes_used;
+	struct list_head pipes_cleanup;
+	struct list_head pipes_destroy;
+	struct list_head rot_proc_list;
+	bool mixer_swap;
+	u32 resources_state;
+
+	/* list of buffers that can be reused */
+	struct list_head bufs_chunks;
+	struct list_head bufs_pool;
+	struct list_head bufs_used;
+	/* list of buffers which should be freed during cleanup stage */
+	struct list_head bufs_freelist;
+
+	int ad_state;
+	int dyn_pu_state;
+
+	bool handoff;
+	u32 splash_mem_addr;
+	u32 splash_mem_size;
+	u32 sd_enabled;
+
+	struct sw_sync_timeline *vsync_timeline;
+	struct mdss_mdp_vsync_handler vsync_retire_handler;
+	int retire_cnt;
+	bool kickoff_released;
+	u32 cursor_ndx[2];
+	u32 hist_events;
+	u32 bl_events;
+	u32 ad_events;
+	u32 ad_bl_events;
+
+	bool allow_kickoff;
+
+	/* video frame info used by deterministic frame rate control */
+	struct mdss_mdp_frc_fsm *frc_fsm;
+	u8 sd_transition_state;
+	struct kthread_worker worker;
+	struct kthread_work vsync_work;
+	struct task_struct *thread;
+};
+
+struct mdss_mdp_set_ot_params {
+	u32 xin_id;
+	u32 num;
+	u32 width;
+	u32 height;
+	u16 frame_rate;
+	bool is_rot;
+	bool is_wb;
+	bool is_yuv;
+	bool is_vbif_nrt;
+	u32 reg_off_vbif_lim_conf;
+	u32 reg_off_mdp_clk_ctrl;
+	u32 bit_off_mdp_clk_ctrl;
+};
+
+struct mdss_mdp_commit_cb {
+	void *data;
+	int (*commit_cb_fnc)(enum mdp_commit_stage_type commit_state,
+		void *data);
+};
+
+/**
+ * enum mdss_screen_state - Screen states that MDP can be forced into
+ *
+ * @MDSS_SCREEN_DEFAULT:	Do not force MDP into any screen state.
+ * @MDSS_SCREEN_FORCE_BLANK:	Force MDP to generate blank color fill screen.
+ */
+enum mdss_screen_state {
+	MDSS_SCREEN_DEFAULT,
+	MDSS_SCREEN_FORCE_BLANK,
+};
+
+/**
+ * enum mdss_mdp_clt_intf_event_flags - flags specifying how event to should
+ *                                      be sent to panel drivers.
+ *
+ * @CTL_INTF_EVENT_FLAG_DEFAULT: this flag denotes default behaviour where
+ *                              event will be send to all panels attached this
+ *                              display, recursively in split-DSI.
+ * @CTL_INTF_EVENT_FLAG_SKIP_BROADCAST: this flag sends event only to panel
+ *                                     associated with this ctl.
+ * @CTL_INTF_EVENT_FLAG_SLAVE_INTF: this flag sends event only to slave panel
+ *                                  associated with this ctl, i.e pingpong-split
+ */
+enum mdss_mdp_clt_intf_event_flags {
+	CTL_INTF_EVENT_FLAG_DEFAULT = 0,
+	CTL_INTF_EVENT_FLAG_SKIP_BROADCAST = BIT(1),
+	CTL_INTF_EVENT_FLAG_SLAVE_INTF = BIT(2),
+};
+
+#define mfd_to_mdp5_data(mfd) (mfd->mdp.private1)
+#define mfd_to_mdata(mfd) (((struct mdss_overlay_private *)\
+				(mfd->mdp.private1))->mdata)
+#define mfd_to_ctl(mfd) (((struct mdss_overlay_private *)\
+				(mfd->mdp.private1))->ctl)
+#define mfd_to_wb(mfd) (((struct mdss_overlay_private *)\
+				(mfd->mdp.private1))->wb)
+
+/**
+ * - mdss_mdp_is_roi_changed
+ * @mfd - pointer to mfd
+ *
+ * Function returns true if roi is changed for any layer mixer of a given
+ * display, false otherwise.
+ */
+static inline bool mdss_mdp_is_roi_changed(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_ctl *ctl;
+
+	if (!mfd)
+		return false;
+
+	ctl = mfd_to_ctl(mfd); /* returns master ctl */
+
+	return ctl->mixer_left->roi_changed ||
+	      (is_split_lm(mfd) ? ctl->mixer_right->roi_changed : false);
+}
+
+/**
+ * - mdss_mdp_is_both_lm_valid
+ * @main_ctl - pointer to a main ctl
+ *
+ * Function checks if both layer mixers are active or not. This can be useful
+ * when partial update is enabled on either MDP_DUAL_LM_SINGLE_DISPLAY or
+ * MDP_DUAL_LM_DUAL_DISPLAY .
+ */
+static inline bool mdss_mdp_is_both_lm_valid(struct mdss_mdp_ctl *main_ctl)
+{
+	return (main_ctl && main_ctl->is_master &&
+		main_ctl->mixer_left && main_ctl->mixer_left->valid_roi &&
+		main_ctl->mixer_right && main_ctl->mixer_right->valid_roi);
+}
+
+enum mdss_mdp_pu_type {
+	MDSS_MDP_INVALID_UPDATE = -1,
+	MDSS_MDP_DEFAULT_UPDATE,
+	MDSS_MDP_LEFT_ONLY_UPDATE,	/* only valid for split_lm */
+	MDSS_MDP_RIGHT_ONLY_UPDATE,	/* only valid for split_lm */
+};
+
+/* only call from master ctl */
+static inline enum mdss_mdp_pu_type mdss_mdp_get_pu_type(
+	struct mdss_mdp_ctl *mctl)
+{
+	enum mdss_mdp_pu_type pu_type = MDSS_MDP_INVALID_UPDATE;
+
+	if (!mctl || !mctl->is_master)
+		return pu_type;
+
+	if (!is_split_lm(mctl->mfd) || mdss_mdp_is_both_lm_valid(mctl))
+		pu_type = MDSS_MDP_DEFAULT_UPDATE;
+	else if (mctl->mixer_left->valid_roi)
+		pu_type = MDSS_MDP_LEFT_ONLY_UPDATE;
+	else if (mctl->mixer_right->valid_roi)
+		pu_type = MDSS_MDP_RIGHT_ONLY_UPDATE;
+	else
+		pr_err("%s: invalid pu_type\n", __func__);
+
+	return pu_type;
+}
+
+static inline struct mdss_mdp_ctl *mdss_mdp_get_split_ctl(
+	struct mdss_mdp_ctl *ctl)
+{
+	if (ctl && ctl->mixer_right && (ctl->mixer_right->ctl != ctl))
+		return ctl->mixer_right->ctl;
+
+	return NULL;
+}
+
+static inline struct mdss_mdp_ctl *mdss_mdp_get_main_ctl(
+	struct mdss_mdp_ctl *sctl)
+{
+	if (sctl && sctl->mfd && sctl->mixer_left &&
+		sctl->mixer_left->is_right_mixer)
+		return mfd_to_ctl(sctl->mfd);
+
+	return NULL;
+}
+
+static inline bool mdss_mdp_pipe_is_yuv(struct mdss_mdp_pipe *pipe)
+{
+	return pipe && (pipe->type == MDSS_MDP_PIPE_TYPE_VIG);
+}
+
+static inline bool mdss_mdp_pipe_is_rgb(struct mdss_mdp_pipe *pipe)
+{
+	return pipe && (pipe->type == MDSS_MDP_PIPE_TYPE_RGB);
+}
+
+static inline bool mdss_mdp_pipe_is_dma(struct mdss_mdp_pipe *pipe)
+{
+	return pipe && (pipe->type == MDSS_MDP_PIPE_TYPE_DMA);
+}
+
+static inline void mdss_mdp_ctl_write(struct mdss_mdp_ctl *ctl,
+				      u32 reg, u32 val)
+{
+	writel_relaxed(val, ctl->base + reg);
+}
+
+static inline u32 mdss_mdp_ctl_read(struct mdss_mdp_ctl *ctl, u32 reg)
+{
+	return readl_relaxed(ctl->base + reg);
+}
+
+static inline void mdp_mixer_write(struct mdss_mdp_mixer *mixer,
+	u32 reg, u32 val)
+{
+	writel_relaxed(val, mixer->base + reg);
+}
+
+static inline u32 mdp_mixer_read(struct mdss_mdp_mixer *mixer, u32 reg)
+{
+	return readl_relaxed(mixer->base + reg);
+}
+
+static inline void mdss_mdp_pingpong_write(char __iomem *pingpong_base,
+				      u32 reg, u32 val)
+{
+	writel_relaxed(val, pingpong_base + reg);
+}
+
+static inline u32 mdss_mdp_pingpong_read(char __iomem *pingpong_base, u32 reg)
+{
+	return readl_relaxed(pingpong_base + reg);
+}
+
+static inline int mdss_mdp_pipe_is_sw_reset_available(
+	struct mdss_data_type *mdata)
+{
+	switch (mdata->mdp_rev) {
+	case MDSS_MDP_HW_REV_101_2:
+	case MDSS_MDP_HW_REV_103_1:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static inline int mdss_mdp_iommu_dyn_attach_supported(
+	struct mdss_data_type *mdata)
+{
+	return (mdata->mdp_rev >= MDSS_MDP_HW_REV_103);
+}
+
+static inline int mdss_mdp_line_buffer_width(void)
+{
+	return MAX_LINE_BUFFER_WIDTH;
+}
+
+static inline u32 get_panel_yres(struct mdss_panel_info *pinfo)
+{
+	u32 yres;
+
+	yres = pinfo->yres + pinfo->lcdc.border_top +
+				pinfo->lcdc.border_bottom;
+	return yres;
+}
+
+static inline u32 get_panel_xres(struct mdss_panel_info *pinfo)
+{
+	u32 xres;
+
+	xres = pinfo->xres + pinfo->lcdc.border_left +
+				pinfo->lcdc.border_right;
+	return xres;
+}
+
+static inline u32 get_panel_width(struct mdss_mdp_ctl *ctl)
+{
+	u32 width;
+
+	width = get_panel_xres(&ctl->panel_data->panel_info);
+	if (ctl->panel_data->next && is_pingpong_split(ctl->mfd))
+		width += get_panel_xres(&ctl->panel_data->next->panel_info);
+
+	return width;
+}
+
+static inline bool mdss_mdp_req_init_restore_cfg(struct mdss_data_type *mdata)
+{
+	if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+				MDSS_MDP_HW_REV_106) ||
+	    IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+				MDSS_MDP_HW_REV_108) ||
+	    IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+				MDSS_MDP_HW_REV_112) ||
+	    IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+				MDSS_MDP_HW_REV_114) ||
+	    IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+				MDSS_MDP_HW_REV_115) ||
+	    IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+				MDSS_MDP_HW_REV_116))
+		return true;
+
+	return false;
+}
+
+static inline int mdss_mdp_panic_signal_support_mode(
+	struct mdss_data_type *mdata)
+{
+	uint32_t signal_mode = MDSS_MDP_PANIC_NONE;
+
+	if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+				MDSS_MDP_HW_REV_105) ||
+		IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+				MDSS_MDP_HW_REV_108) ||
+		IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+				MDSS_MDP_HW_REV_109) ||
+		IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+				MDSS_MDP_HW_REV_110))
+		signal_mode = MDSS_MDP_PANIC_COMMON_REG_CFG;
+	else if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+				MDSS_MDP_HW_REV_107) ||
+		IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+				MDSS_MDP_HW_REV_114) ||
+		IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+				MDSS_MDP_HW_REV_115) ||
+		IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev,
+				MDSS_MDP_HW_REV_116))
+		signal_mode = MDSS_MDP_PANIC_PER_PIPE_CFG;
+
+	return signal_mode;
+}
+
+static inline struct clk *mdss_mdp_get_clk(u32 clk_idx)
+{
+	if (clk_idx < MDSS_MAX_CLK)
+		return mdss_res->mdp_clk[clk_idx];
+	return NULL;
+}
+
+static inline void mdss_update_sd_client(struct mdss_data_type *mdata,
+							unsigned int status)
+{
+	if (status)
+		atomic_inc(&mdata->sd_client_count);
+	else
+		atomic_add_unless(&mdss_res->sd_client_count, -1, 0);
+}
+
+static inline int mdss_mdp_get_wb_ctl_support(struct mdss_data_type *mdata,
+							bool rotator_session)
+{
+	/*
+	 * Any control path can be routed to any of the hardware datapaths.
+	 * But there is a HW restriction for 3D Mux block. As the 3D Mux
+	 * settings in the CTL registers are double buffered, if an interface
+	 * uses it and disconnects, then the subsequent interface which gets
+	 * connected should use the same control path in order to clear the
+	 * 3D MUX settings.
+	 * To handle this restriction, we are allowing WB also, to loop through
+	 * all the avialable control paths, so that it can reuse the control
+	 * path left by the external interface, thereby clearing the 3D Mux
+	 * settings.
+	 * The initial control paths can be used by Primary, External and WB.
+	 * The rotator can use the remaining available control paths.
+	 */
+	return rotator_session ? (mdata->nctl - mdata->nmixers_wb) :
+		MDSS_MDP_CTL0;
+}
+
+static inline bool mdss_mdp_is_nrt_vbif_client(struct mdss_data_type *mdata,
+					struct mdss_mdp_pipe *pipe)
+{
+	return mdata->vbif_nrt_io.base && pipe->mixer_left &&
+			pipe->mixer_left->rotator_mode;
+}
+
+static inline bool mdss_mdp_is_nrt_ctl_path(struct mdss_mdp_ctl *ctl)
+{
+	return (ctl->intf_num ==  MDSS_MDP_NO_INTF) ||
+		(ctl->mixer_left && ctl->mixer_left->rotator_mode);
+}
+
+static inline bool mdss_mdp_is_nrt_vbif_base_defined(
+		struct mdss_data_type *mdata)
+{
+	return mdata->vbif_nrt_io.base ? true : false;
+}
+
+static inline bool mdss_mdp_ctl_is_power_off(struct mdss_mdp_ctl *ctl)
+{
+	return mdss_panel_is_power_off(ctl->power_state);
+}
+
+static inline bool mdss_mdp_ctl_is_power_on_interactive(
+	struct mdss_mdp_ctl *ctl)
+{
+	return mdss_panel_is_power_on_interactive(ctl->power_state);
+}
+
+static inline bool mdss_mdp_ctl_is_power_on(struct mdss_mdp_ctl *ctl)
+{
+	return mdss_panel_is_power_on(ctl->power_state);
+}
+
+static inline bool mdss_mdp_ctl_is_power_on_lp(struct mdss_mdp_ctl *ctl)
+{
+	return mdss_panel_is_power_on_lp(ctl->power_state);
+}
+
+static inline u32 left_lm_w_from_mfd(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	struct mdss_panel_info *pinfo = mfd->panel_info;
+	int width = 0;
+
+	if (ctl && ctl->mixer_left) {
+		width =  ctl->mixer_left->width;
+		width -= (pinfo->lcdc.border_left + pinfo->lcdc.border_right);
+		pr_debug("ctl=%d mw=%d l=%d r=%d w=%d\n",
+			ctl->num, ctl->mixer_left->width,
+			pinfo->lcdc.border_left, pinfo->lcdc.border_right,
+			width);
+	}
+	return width;
+}
+
+static inline bool mdss_mdp_is_tile_format(struct mdss_mdp_format_params *fmt)
+{
+	return fmt && (fmt->fetch_mode == MDSS_MDP_FETCH_TILE);
+}
+
+static inline bool mdss_mdp_is_ubwc_format(struct mdss_mdp_format_params *fmt)
+{
+	return fmt && (fmt->fetch_mode == MDSS_MDP_FETCH_UBWC);
+}
+
+static inline bool mdss_mdp_is_linear_format(struct mdss_mdp_format_params *fmt)
+{
+	return fmt && (fmt->fetch_mode == MDSS_MDP_FETCH_LINEAR);
+}
+
+static inline bool mdss_mdp_is_nv12_format(struct mdss_mdp_format_params *fmt)
+{
+	return fmt && (fmt->chroma_sample == MDSS_MDP_CHROMA_420) &&
+		(fmt->fetch_planes == MDSS_MDP_PLANE_PSEUDO_PLANAR);
+}
+
+static inline bool mdss_mdp_is_ubwc_supported(struct mdss_data_type *mdata)
+{
+	return mdata->has_ubwc;
+}
+
+static inline bool mdss_mdp_is_wb_rotator_supported(
+		struct mdss_data_type *mdata)
+{
+	return mdata && !mdata->has_separate_rotator;
+}
+
+static inline int mdss_mdp_is_cdm_supported(struct mdss_data_type *mdata,
+					    u32 intf_type, u32 mixer_type)
+{
+	int support = mdata->ncdm;
+
+	/*
+	 * CDM is supported under these conditions
+	 * 1. If Device tree created a cdm block AND
+	 * 2. Output interface is HDMI OR Output interface is WB2
+	 */
+	return support && ((intf_type == MDSS_INTF_HDMI) ||
+			   ((intf_type == MDSS_MDP_NO_INTF) &&
+			    ((mixer_type == MDSS_MDP_MIXER_TYPE_INTF) ||
+			     (mixer_type == MDSS_MDP_MIXER_TYPE_WRITEBACK))));
+}
+
+static inline u32 mdss_mdp_get_cursor_frame_size(struct mdss_data_type *mdata)
+{
+	return mdata->max_cursor_size *  mdata->max_cursor_size * 4;
+}
+
+static inline uint8_t pp_vig_csc_pipe_val(struct mdss_mdp_pipe *pipe)
+{
+	switch (pipe->csc_coeff_set) {
+	case MDP_CSC_ITU_R_601:
+		return MDSS_MDP_CSC_YUV2RGB_601L;
+	case MDP_CSC_ITU_R_601_FR:
+		return MDSS_MDP_CSC_YUV2RGB_601FR;
+	case MDP_CSC_ITU_R_2020:
+		return MDSS_MDP_CSC_YUV2RGB_2020L;
+	case MDP_CSC_ITU_R_2020_FR:
+		return MDSS_MDP_CSC_YUV2RGB_2020FR;
+	case MDP_CSC_ITU_R_709:
+	default:
+		return  MDSS_MDP_CSC_YUV2RGB_709L;
+	}
+}
+
+/*
+ * when split_lm topology is used without 3D_Mux, either DSC_MERGE or
+ * split_panel is used during full frame updates. Now when we go from
+ * full frame update to right-only update, we need to disable DSC_MERGE or
+ * split_panel. However, those are controlled through DSC0_COMMON_MODE
+ * register which is double buffered, and this double buffer update is tied to
+ * LM0. Now for right-only update, LM0 will not get double buffer update signal.
+ * So DSC_MERGE or split_panel is not disabled for right-only update which is
+ * a wrong HW state and leads ping-pong timeout. Workaround for this is to use
+ * LM0->DSC0 pair for right-only update and disable DSC_MERGE or split_panel.
+ *
+ * However using LM0->DSC0 pair for right-only update requires many changes
+ * at various levels of SW. To lower the SW impact and still support
+ * right-only partial update, keep SW state as it is but swap mixer register
+ * writes such that we instruct HW to use LM0->DSC0 pair.
+ *
+ * This function will return true if such a swap is needed or not.
+ */
+static inline bool mdss_mdp_is_lm_swap_needed(struct mdss_data_type *mdata,
+	struct mdss_mdp_ctl *mctl)
+{
+	if (!mdata || !mctl || !mctl->is_master ||
+	    !mctl->panel_data || !mctl->mfd)
+		return false;
+
+	return (is_dsc_compression(&mctl->panel_data->panel_info)) &&
+	       (mctl->panel_data->panel_info.partial_update_enabled) &&
+	       (mdss_has_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU)) &&
+	       ((mctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) ||
+		((mctl->mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) &&
+		 (mctl->panel_data->panel_info.dsc_enc_total == 2))) &&
+	       (!mctl->mixer_left->valid_roi) &&
+	       (mctl->mixer_right->valid_roi);
+}
+
+static inline int mdss_mdp_get_display_id(struct mdss_mdp_pipe *pipe)
+{
+	return (pipe && pipe->mfd) ? pipe->mfd->index : -1;
+}
+
+static inline bool mdss_mdp_is_full_frame_update(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_mixer *mixer;
+	struct mdss_rect *roi;
+
+	if (mdss_mdp_get_pu_type(ctl) != MDSS_MDP_DEFAULT_UPDATE)
+		return false;
+
+	if (ctl->mixer_left->valid_roi) {
+		mixer = ctl->mixer_left;
+		roi = &mixer->roi;
+		if ((roi->x != 0) || (roi->y != 0) || (roi->w != mixer->width)
+			|| (roi->h != mixer->height))
+			return false;
+	}
+
+	if (ctl->mixer_right && ctl->mixer_right->valid_roi) {
+		mixer = ctl->mixer_right;
+		roi = &mixer->roi;
+		if ((roi->x != 0) || (roi->y != 0) || (roi->w != mixer->width)
+			|| (roi->h != mixer->height))
+			return false;
+	}
+
+	return true;
+}
+
+static inline bool mdss_mdp_is_lineptr_supported(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_panel_info *pinfo;
+
+	if (!ctl || !ctl->mixer_left || !ctl->is_master)
+		return false;
+
+	pinfo = &ctl->panel_data->panel_info;
+
+	return (ctl->is_video_mode || ((pinfo->type == MIPI_CMD_PANEL)
+			&& (pinfo->te.tear_check_en)) ? true : false);
+}
+
+static inline bool mdss_mdp_is_map_needed(struct mdss_data_type *mdata,
+						struct mdss_mdp_img_data *data)
+{
+	u32 is_secure_ui = data->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION;
+
+     /*
+      * For ULT Targets we need SMMU Map, to issue map call for secure Display.
+      */
+	if (is_secure_ui && !mdss_has_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP))
+		return false;
+
+	return true;
+}
+
+static inline u32 mdss_mdp_get_rotator_dst_format(u32 in_format, u32 in_rot90,
+	u32 bwc)
+{
+	switch (in_format) {
+	case MDP_RGB_565:
+	case MDP_BGR_565:
+		if (in_rot90)
+			return MDP_RGB_888;
+		else
+			return in_format;
+	case MDP_RGBA_8888:
+		if (bwc)
+			return MDP_BGRA_8888;
+		else
+			return in_format;
+	case MDP_Y_CBCR_H2V2_VENUS:
+	case MDP_Y_CRCB_H2V2_VENUS:
+	case MDP_Y_CBCR_H2V2:
+		if (in_rot90)
+			return MDP_Y_CRCB_H2V2;
+		else
+			return in_format;
+	case MDP_Y_CB_CR_H2V2:
+	case MDP_Y_CR_CB_GH2V2:
+	case MDP_Y_CR_CB_H2V2:
+		return MDP_Y_CRCB_H2V2;
+	default:
+		return in_format;
+	}
+}
+
+irqreturn_t mdss_mdp_isr(int irq, void *ptr);
+void mdss_mdp_irq_clear(struct mdss_data_type *mdata,
+		u32 intr_type, u32 intf_num);
+int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num);
+void mdss_mdp_irq_disable(u32 intr_type, u32 intf_num);
+void mdss_mdp_intr_check_and_clear(u32 intr_type, u32 intf_num);
+int mdss_mdp_hist_irq_enable(u32 irq);
+void mdss_mdp_hist_irq_disable(u32 irq);
+void mdss_mdp_irq_disable_nosync(u32 intr_type, u32 intf_num);
+int mdss_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
+			       void (*fnc_ptr)(void *), void *arg);
+int mdss_mdp_set_intr_callback_nosync(u32 intr_type, u32 intf_num,
+			       void (*fnc_ptr)(void *), void *arg);
+u32 mdss_mdp_get_irq_mask(u32 intr_type, u32 intf_num);
+
+void mdss_mdp_footswitch_ctrl_splash(int on);
+void mdss_mdp_batfet_ctrl(struct mdss_data_type *mdata, int enable);
+void mdss_mdp_set_clk_rate(unsigned long min_clk_rate);
+unsigned long mdss_mdp_get_clk_rate(u32 clk_idx, bool locked);
+int mdss_mdp_vsync_clk_enable(int enable, bool locked);
+void mdss_mdp_clk_ctrl(int enable);
+struct mdss_data_type *mdss_mdp_get_mdata(void);
+int mdss_mdp_secure_display_ctrl(struct mdss_data_type *mdata,
+	unsigned int enable);
+
+int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd);
+int mdss_mdp_dfps_update_params(struct msm_fb_data_type *mfd,
+	struct mdss_panel_data *pdata, struct dynamic_fps_data *data);
+int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
+	struct file *file, struct mdp_layer_commit_v1 *ov_commit);
+int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd,
+	struct file *file, struct mdp_layer_commit_v1 *ov_commit);
+
+int mdss_mdp_layer_atomic_validate_wfd(struct msm_fb_data_type *mfd,
+	struct file *file, struct mdp_layer_commit_v1 *ov_commit);
+int mdss_mdp_layer_pre_commit_wfd(struct msm_fb_data_type *mfd,
+	struct file *file, struct mdp_layer_commit_v1 *ov_commit);
+bool mdss_mdp_wfd_is_config_same(struct msm_fb_data_type *mfd,
+	struct mdp_output_layer *layer);
+
+int mdss_mdp_async_position_update(struct msm_fb_data_type *mfd,
+		struct mdp_position_update *update_pos);
+
+int mdss_mdp_overlay_req_check(struct msm_fb_data_type *mfd,
+			       struct mdp_overlay *req,
+			       struct mdss_mdp_format_params *fmt);
+int mdss_mdp_overlay_vsync_ctrl(struct msm_fb_data_type *mfd, int en);
+int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
+	struct mdp_overlay *req, struct mdss_mdp_pipe **ppipe,
+	struct mdss_mdp_pipe *left_blend_pipe, bool is_single_layer);
+void mdss_mdp_handoff_cleanup_pipes(struct msm_fb_data_type *mfd,
+							u32 type);
+int mdss_mdp_overlay_release(struct msm_fb_data_type *mfd, int ndx);
+int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd);
+void mdss_mdp_overlay_set_chroma_sample(
+	struct mdss_mdp_pipe *pipe);
+int mdp_pipe_tune_perf(struct mdss_mdp_pipe *pipe,
+	u32 flags);
+int mdss_mdp_overlay_setup_scaling(struct mdss_mdp_pipe *pipe);
+struct mdss_mdp_pipe *mdss_mdp_pipe_assign(struct mdss_data_type *mdata,
+	struct mdss_mdp_mixer *mixer, u32 ndx,
+	enum mdss_mdp_pipe_rect rect_num);
+struct mdss_mdp_pipe *mdss_mdp_overlay_pipe_reuse(
+	struct msm_fb_data_type *mfd, int pipe_ndx);
+void mdss_mdp_pipe_position_update(struct mdss_mdp_pipe *pipe,
+		struct mdss_rect *src, struct mdss_rect *dst);
+int mdss_mdp_video_addr_setup(struct mdss_data_type *mdata,
+		u32 *offsets,  u32 count);
+int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl);
+void mdss_mdp_switch_roi_reset(struct mdss_mdp_ctl *ctl);
+void mdss_mdp_switch_to_cmd_mode(struct mdss_mdp_ctl *ctl, int prep);
+void mdss_mdp_switch_to_vid_mode(struct mdss_mdp_ctl *ctl, int prep);
+void *mdss_mdp_get_intf_base_addr(struct mdss_data_type *mdata,
+		u32 interface_id);
+int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_writeback_start(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
+		struct mdp_display_commit *data);
+struct mdss_mdp_data *mdss_mdp_overlay_buf_alloc(struct msm_fb_data_type *mfd,
+		struct mdss_mdp_pipe *pipe);
+void mdss_mdp_overlay_buf_free(struct msm_fb_data_type *mfd,
+		struct mdss_mdp_data *buf);
+
+int mdss_mdp_ctl_reconfig(struct mdss_mdp_ctl *ctl,
+		struct mdss_panel_data *pdata);
+struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata,
+					struct msm_fb_data_type *mfd);
+int mdss_mdp_video_reconfigure_splash_done(struct mdss_mdp_ctl *ctl,
+		bool handoff);
+int mdss_mdp_cmd_reconfigure_splash_done(struct mdss_mdp_ctl *ctl,
+		bool handoff);
+int mdss_mdp_ctl_splash_finish(struct mdss_mdp_ctl *ctl, bool handoff);
+void mdss_mdp_check_ctl_reset_status(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_ctl_split_display_setup(struct mdss_mdp_ctl *ctl,
+		struct mdss_panel_data *pdata);
+int mdss_mdp_ctl_destroy(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_ctl_start(struct mdss_mdp_ctl *ctl, bool handoff);
+int mdss_mdp_ctl_stop(struct mdss_mdp_ctl *ctl, int panel_power_mode);
+int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg,
+	u32 flags);
+int mdss_mdp_get_prefetch_lines(struct mdss_panel_info *pinfo);
+int mdss_mdp_perf_bw_check(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_pipe **left_plist, int left_cnt,
+		struct mdss_mdp_pipe **right_plist, int right_cnt);
+int mdss_mdp_perf_bw_check_pipe(struct mdss_mdp_perf_params *perf,
+		struct mdss_mdp_pipe *pipe);
+int mdss_mdp_get_pipe_overlap_bw(struct mdss_mdp_pipe *pipe,
+	struct mdss_rect *roi, u64 *quota, u64 *quota_nocr, u32 flags);
+int mdss_mdp_get_panel_params(struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_mixer *mixer, u32 *fps, u32 *v_total,
+	u32 *h_total, u32 *xres);
+int mdss_mdp_perf_calc_pipe(struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_perf_params *perf, struct mdss_rect *roi,
+	u32 flags);
+bool mdss_mdp_is_amortizable_pipe(struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_mixer *mixer, struct mdss_data_type *mdata);
+u32 mdss_mdp_calc_latency_buf_bytes(bool is_yuv, bool is_bwc,
+	bool is_tile, u32 src_w, u32 bpp, bool use_latency_buf_percentage,
+	u32 smp_bytes, bool is_ubwc, bool is_nv12, bool is_hflip);
+u32 mdss_mdp_get_mdp_clk_rate(struct mdss_data_type *mdata);
+int mdss_mdp_ctl_notify(struct mdss_mdp_ctl *ctl, int event);
+void mdss_mdp_ctl_notifier_register(struct mdss_mdp_ctl *ctl,
+	struct notifier_block *notifier);
+void mdss_mdp_ctl_notifier_unregister(struct mdss_mdp_ctl *ctl,
+	struct notifier_block *notifier);
+u32 mdss_mdp_ctl_perf_get_transaction_status(struct mdss_mdp_ctl *ctl);
+u32 apply_comp_ratio_factor(u32 quota, struct mdss_mdp_format_params *fmt,
+	struct mult_factor *factor);
+
+int mdss_mdp_scan_pipes(void);
+
+int mdss_mdp_mixer_handoff(struct mdss_mdp_ctl *ctl, u32 num,
+	struct mdss_mdp_pipe *pipe);
+
+void mdss_mdp_ctl_perf_set_transaction_status(struct mdss_mdp_ctl *ctl,
+	enum mdss_mdp_perf_state_type component, bool new_status);
+void mdss_mdp_ctl_perf_release_bw(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_async_ctl_flush(struct msm_fb_data_type *mfd,
+		u32 flush_bits);
+int mdss_mdp_get_pipe_flush_bits(struct mdss_mdp_pipe *pipe);
+struct mdss_mdp_mixer *mdss_mdp_block_mixer_alloc(void);
+int mdss_mdp_block_mixer_destroy(struct mdss_mdp_mixer *mixer);
+struct mdss_mdp_mixer *mdss_mdp_mixer_get(struct mdss_mdp_ctl *ctl, int mux);
+struct mdss_mdp_pipe *mdss_mdp_get_staged_pipe(struct mdss_mdp_ctl *ctl,
+	int mux, int stage, bool is_right_blend);
+int mdss_mdp_mixer_pipe_update(struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_mixer *mixer, int params_changed);
+int mdss_mdp_mixer_pipe_unstage(struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_mixer *mixer);
+void mdss_mdp_mixer_unstage_all(struct mdss_mdp_mixer *mixer);
+void mdss_mdp_reset_mixercfg(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
+	struct mdss_mdp_commit_cb *commit_cb);
+int mdss_mdp_display_wait4comp(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_display_wait4pingpong(struct mdss_mdp_ctl *ctl, bool use_lock);
+int mdss_mdp_display_wakeup_time(struct mdss_mdp_ctl *ctl,
+				 ktime_t *wakeup_time);
+
+int mdss_mdp_csc_setup(u32 block, u32 blk_idx, u32 csc_type);
+int mdss_mdp_csc_setup_data(u32 block, u32 blk_idx, struct mdp_csc_cfg *data);
+
+int mdss_mdp_pp_init(struct device *dev);
+void mdss_mdp_pp_term(struct device *dev);
+int mdss_mdp_pp_overlay_init(struct msm_fb_data_type *mfd);
+
+int mdss_mdp_pp_resume(struct msm_fb_data_type *mfd);
+
+int mdss_mdp_pp_setup(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_pp_setup_locked(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_pipe_pp_setup(struct mdss_mdp_pipe *pipe, u32 *op);
+void mdss_mdp_pipe_pp_clear(struct mdss_mdp_pipe *pipe);
+int mdss_mdp_pipe_sspp_setup(struct mdss_mdp_pipe *pipe, u32 *op);
+int mdss_mdp_pp_sspp_config(struct mdss_mdp_pipe *pipe);
+int mdss_mdp_copy_layer_pp_info(struct mdp_input_layer *layer);
+void mdss_mdp_free_layer_pp_info(struct mdp_input_layer *layer);
+
+int mdss_mdp_smp_setup(struct mdss_data_type *mdata, u32 cnt, u32 size);
+
+void mdss_hw_init(struct mdss_data_type *mdata);
+
+int mdss_mdp_mfd_valid_dspp(struct msm_fb_data_type *mfd);
+
+int mdss_mdp_pa_config(struct msm_fb_data_type *mfd,
+			struct mdp_pa_cfg_data *config, u32 *copyback);
+int mdss_mdp_pa_v2_config(struct msm_fb_data_type *mfd,
+			struct mdp_pa_v2_cfg_data *config, u32 *copyback);
+int mdss_mdp_pcc_config(struct msm_fb_data_type *mfd,
+			struct mdp_pcc_cfg_data *cfg_ptr, u32 *copyback);
+int mdss_mdp_igc_lut_config(struct msm_fb_data_type *mfd,
+			struct mdp_igc_lut_data *config, u32 *copyback,
+				u32 copy_from_kernel);
+int mdss_mdp_argc_config(struct msm_fb_data_type *mfd,
+			struct mdp_pgc_lut_data *config, u32 *copyback);
+int mdss_mdp_hist_lut_config(struct msm_fb_data_type *mfd,
+			struct mdp_hist_lut_data *config, u32 *copyback);
+int mdss_mdp_pp_default_overlay_config(struct msm_fb_data_type *mfd,
+					struct mdss_panel_data *pdata,
+					bool enable);
+int mdss_mdp_dither_config(struct msm_fb_data_type *mfd,
+			struct mdp_dither_cfg_data *config, u32 *copyback,
+			   int copy_from_kernel);
+int mdss_mdp_gamut_config(struct msm_fb_data_type *mfd,
+			struct mdp_gamut_cfg_data *config, u32 *copyback);
+
+int mdss_mdp_hist_intr_req(struct mdss_intr *intr, u32 bits, bool en);
+int mdss_mdp_hist_intr_setup(struct mdss_intr *intr, int state);
+int mdss_mdp_hist_start(struct mdp_histogram_start_req *req);
+int mdss_mdp_hist_stop(u32 block);
+int mdss_mdp_hist_collect(struct mdp_histogram_data *hist);
+void mdss_mdp_hist_intr_done(u32 isr);
+
+int mdss_mdp_ad_config(struct msm_fb_data_type *mfd,
+				struct mdss_ad_init_cfg *init_cfg);
+int mdss_mdp_ad_input(struct msm_fb_data_type *mfd,
+				struct mdss_ad_input *input, int wait);
+int mdss_mdp_ad_addr_setup(struct mdss_data_type *mdata, u32 *ad_offsets);
+int mdss_mdp_calib_mode(struct msm_fb_data_type *mfd,
+				struct mdss_calib_cfg *cfg);
+
+int mdss_mdp_pipe_handoff(struct mdss_mdp_pipe *pipe);
+int mdss_mdp_smp_handoff(struct mdss_data_type *mdata);
+struct mdss_mdp_pipe *mdss_mdp_pipe_alloc(struct mdss_mdp_mixer *mixer,
+	u32 type, struct mdss_mdp_pipe *left_blend_pipe);
+struct mdss_mdp_pipe *mdss_mdp_pipe_get(u32 ndx,
+	enum mdss_mdp_pipe_rect rect_num);
+struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata,
+	u32 ndx, enum mdss_mdp_pipe_rect rect_num);
+int mdss_mdp_pipe_map(struct mdss_mdp_pipe *pipe);
+void mdss_mdp_pipe_unmap(struct mdss_mdp_pipe *pipe);
+
+u32 mdss_mdp_smp_calc_num_blocks(struct mdss_mdp_pipe *pipe);
+u32 mdss_mdp_smp_get_size(struct mdss_mdp_pipe *pipe);
+int mdss_mdp_smp_reserve(struct mdss_mdp_pipe *pipe);
+void mdss_mdp_smp_unreserve(struct mdss_mdp_pipe *pipe);
+void mdss_mdp_smp_release(struct mdss_mdp_pipe *pipe);
+
+int mdss_mdp_pipe_addr_setup(struct mdss_data_type *mdata,
+	struct mdss_mdp_pipe *head, u32 *offsets, u32 *ftch_id, u32 *xin_id,
+	u32 type, const int *pnums, u32 len, u32 rects_per_sspp,
+	u8 priority_base);
+int mdss_mdp_mixer_addr_setup(struct mdss_data_type *mdata, u32 *mixer_offsets,
+		u32 *dspp_offsets, u32 *pingpong_offsets, u32 type, u32 len);
+int mdss_mdp_ctl_addr_setup(struct mdss_data_type *mdata, u32 *ctl_offsets,
+	u32 len);
+int mdss_mdp_wb_addr_setup(struct mdss_data_type *mdata,
+	u32 num_wb, u32 num_intf_wb);
+
+void mdss_mdp_pipe_clk_force_off(struct mdss_mdp_pipe *pipe);
+int mdss_mdp_pipe_fetch_halt(struct mdss_mdp_pipe *pipe, bool is_recovery);
+int mdss_mdp_pipe_panic_signal_ctrl(struct mdss_mdp_pipe *pipe, bool enable);
+void mdss_mdp_bwcpanic_ctrl(struct mdss_data_type *mdata, bool enable);
+int mdss_mdp_pipe_destroy(struct mdss_mdp_pipe *pipe);
+int mdss_mdp_pipe_queue_data(struct mdss_mdp_pipe *pipe,
+			     struct mdss_mdp_data *src_data);
+
+int mdss_mdp_data_check(struct mdss_mdp_data *data,
+			struct mdss_mdp_plane_sizes *ps,
+			struct mdss_mdp_format_params *fmt);
+int mdss_mdp_get_plane_sizes(struct mdss_mdp_format_params *fmt, u32 w, u32 h,
+	     struct mdss_mdp_plane_sizes *ps, u32 bwc_mode, bool rotation);
+int mdss_mdp_get_rau_strides(u32 w, u32 h, struct mdss_mdp_format_params *fmt,
+			       struct mdss_mdp_plane_sizes *ps);
+void mdss_mdp_data_calc_offset(struct mdss_mdp_data *data, u16 x, u16 y,
+	struct mdss_mdp_plane_sizes *ps, struct mdss_mdp_format_params *fmt);
+void mdss_mdp_format_flag_removal(u32 *table, u32 num, u32 remove_bits);
+struct mdss_mdp_format_params *mdss_mdp_get_format_params(u32 format);
+int mdss_mdp_validate_offset_for_ubwc_format(
+	struct mdss_mdp_format_params *fmt, u16 x, u16 y);
+void mdss_mdp_get_v_h_subsample_rate(u8 chroma_samp,
+	u8 *v_sample, u8 *h_sample);
+struct mult_factor *mdss_mdp_get_comp_factor(u32 format,
+	bool rt_factor);
+int mdss_mdp_data_map(struct mdss_mdp_data *data, bool rotator, int dir);
+void mdss_mdp_data_free(struct mdss_mdp_data *data, bool rotator, int dir);
+int mdss_mdp_data_get_and_validate_size(struct mdss_mdp_data *data,
+	struct msmfb_data *planes, int num_planes, u32 flags,
+	struct device *dev, bool rotator, int dir,
+	struct mdp_layer_buffer *buffer);
+u32 mdss_get_panel_framerate(struct msm_fb_data_type *mfd);
+int mdss_mdp_calc_phase_step(u32 src, u32 dst, u32 *out_phase);
+
+void mdss_mdp_intersect_rect(struct mdss_rect *res_rect,
+	const struct mdss_rect *dst_rect,
+	const struct mdss_rect *sci_rect);
+void mdss_mdp_crop_rect(struct mdss_rect *src_rect,
+	struct mdss_rect *dst_rect,
+	const struct mdss_rect *sci_rect);
+void rect_copy_mdss_to_mdp(struct mdp_rect *user, struct mdss_rect *kernel);
+void rect_copy_mdp_to_mdss(struct mdp_rect *user, struct mdss_rect *kernel);
+bool mdss_rect_overlap_check(struct mdss_rect *rect1, struct mdss_rect *rect2);
+void mdss_rect_split(struct mdss_rect *in_roi, struct mdss_rect *l_roi,
+	struct mdss_rect *r_roi, u32 splitpoint);
+
+
+int mdss_mdp_get_ctl_mixers(u32 fb_num, u32 *mixer_id);
+bool mdss_mdp_mixer_reg_has_pipe(struct mdss_mdp_mixer *mixer,
+		struct mdss_mdp_pipe *pipe);
+u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp);
+void mdss_check_dsi_ctrl_status(struct work_struct *work, uint32_t interval);
+
+int mdss_mdp_calib_config(struct mdp_calib_config_data *cfg, u32 *copyback);
+int mdss_mdp_calib_config_buffer(struct mdp_calib_config_buffer *cfg,
+						u32 *copyback);
+int mdss_mdp_ctl_update_fps(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_pipe_is_staged(struct mdss_mdp_pipe *pipe);
+int mdss_mdp_writeback_display_commit(struct mdss_mdp_ctl *ctl, void *arg);
+struct mdss_mdp_ctl *mdss_mdp_ctl_mixer_switch(struct mdss_mdp_ctl *ctl,
+					       u32 return_type);
+void mdss_mdp_set_roi(struct mdss_mdp_ctl *ctl,
+	struct mdss_rect *l_roi, struct mdss_rect *r_roi);
+void mdss_mdp_mixer_update_pipe_map(struct mdss_mdp_ctl *master_ctl,
+		int mixer_mux);
+
+void mdss_mdp_pipe_calc_pixel_extn(struct mdss_mdp_pipe *pipe);
+void mdss_mdp_pipe_calc_qseed3_cfg(struct mdss_mdp_pipe *pipe);
+void mdss_mdp_ctl_restore(bool locked);
+int  mdss_mdp_ctl_reset(struct mdss_mdp_ctl *ctl, bool is_recovery);
+int mdss_mdp_wait_for_xin_halt(u32 xin_id, bool is_vbif_nrt);
+void mdss_mdp_set_ot_limit(struct mdss_mdp_set_ot_params *params);
+int mdss_mdp_cmd_set_autorefresh_mode(struct mdss_mdp_ctl *ctl, int frame_cnt);
+int mdss_mdp_cmd_get_autorefresh_mode(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_ctl_cmd_set_autorefresh(struct mdss_mdp_ctl *ctl, int frame_cnt);
+int mdss_mdp_ctl_cmd_get_autorefresh(struct mdss_mdp_ctl *ctl);
+void mdss_mdp_ctl_event_timer(void *data);
+int mdss_mdp_pp_get_version(struct mdp_pp_feature_version *version);
+
+struct mdss_mdp_ctl *mdss_mdp_ctl_alloc(struct mdss_data_type *mdata,
+					       u32 off);
+int mdss_mdp_ctl_free(struct mdss_mdp_ctl *ctl);
+
+struct mdss_mdp_mixer *mdss_mdp_mixer_assign(u32 id, bool wb, bool rot);
+struct mdss_mdp_mixer *mdss_mdp_mixer_alloc(
+		struct mdss_mdp_ctl *ctl, u32 type, int mux, int rotator);
+int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer);
+
+bool mdss_mdp_is_wb_mdp_intf(u32 num, u32 reg_index);
+struct mdss_mdp_writeback *mdss_mdp_wb_assign(u32 id, u32 reg_index);
+struct mdss_mdp_writeback *mdss_mdp_wb_alloc(u32 caps, u32 reg_index);
+void mdss_mdp_wb_free(struct mdss_mdp_writeback *wb);
+
+void mdss_mdp_ctl_dsc_setup(struct mdss_mdp_ctl *ctl,
+	struct mdss_panel_info *pinfo);
+
+void mdss_mdp_video_isr(void *ptr, u32 count);
+void mdss_mdp_enable_hw_irq(struct mdss_data_type *mdata);
+void mdss_mdp_disable_hw_irq(struct mdss_data_type *mdata);
+
+void mdss_mdp_set_supported_formats(struct mdss_data_type *mdata);
+
+void mdss_mdp_frc_fsm_init_state(struct mdss_mdp_frc_fsm *frc_fsm);
+void mdss_mdp_frc_fsm_change_state(struct mdss_mdp_frc_fsm *frc_fsm,
+	enum mdss_mdp_frc_state_type state,
+	void (*cb)(struct mdss_mdp_frc_fsm *frc_fsm));
+void mdss_mdp_frc_fsm_update_state(struct mdss_mdp_frc_fsm *frc_fsm);
+
+#ifdef CONFIG_FB_MSM_MDP_NONE
+struct mdss_data_type *mdss_mdp_get_mdata(void)
+{
+	return NULL;
+}
+
+int mdss_mdp_copy_layer_pp_info(struct mdp_input_layer *layer)
+{
+	return -EFAULT;
+}
+
+void mdss_mdp_free_layer_pp_info(struct mdp_input_layer *layer)
+{
+}
+
+#endif /* CONFIG_FB_MSM_MDP_NONE */
+#endif /* MDSS_MDP_H */
diff --git a/drivers/video/fbdev/msm/mdss_mdp_cdm.c b/drivers/video/fbdev/msm/mdss_mdp_cdm.c
new file mode 100644
index 0000000..ab680f5
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_cdm.c
@@ -0,0 +1,384 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/mutex.h>
+
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_trace.h"
+#include "mdss_debug.h"
+
+static u32 cdm_cdwn2_cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+static u32 cdm_cdwn2_offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+static u32 cdm_cdwn2_cosite_v_coeff[] = {0x00080004};
+static u32 cdm_cdwn2_offsite_v_coeff[] = {0x00060002};
+
+#define VSYNC_TIMEOUT_US 16000
+
+/**
+ * @mdss_mdp_cdm_alloc() - Allocates a cdm block by parsing the list of
+ *			     available cdm blocks.
+ *
+ * @mdata - structure containing the list of cdm blocks
+ */
+static struct mdss_mdp_cdm *mdss_mdp_cdm_alloc(struct mdss_data_type *mdata)
+{
+	struct mdss_mdp_cdm *cdm = NULL;
+	u32 i = 0;
+
+	mutex_lock(&mdata->cdm_lock);
+
+	for (i = 0; i < mdata->ncdm; i++) {
+		cdm = mdata->cdm_off + i;
+		if (atomic_read(&cdm->kref.refcount) == 0) {
+			kref_init(&cdm->kref);
+			cdm->mdata = mdata;
+			pr_debug("alloc cdm=%d\n", cdm->num);
+			break;
+		}
+		cdm = NULL;
+	}
+
+	mutex_unlock(&mdata->cdm_lock);
+
+	return cdm;
+}
+
+/**
+ *  @mdss_mdp_cdm_free() - Adds the CDM block back to the available list
+ *  @kref: Reference count structure
+ */
+static void mdss_mdp_cdm_free(struct kref *kref)
+{
+	struct mdss_mdp_cdm *cdm = container_of(kref, struct mdss_mdp_cdm,
+						 kref);
+	if (!cdm)
+		return;
+
+	complete_all(&cdm->free_comp);
+	pr_debug("free cdm_num = %d\n", cdm->num);
+
+}
+
+/**
+ * @mdss_mdp_cdm_init() - Allocates a CDM block and initializes the hardware
+ *			  and software context. This should be called once at
+ *			  when setting up the usecase and released when done.
+ * @ctl:		 Pointer to the control structure.
+ * @intf_type:		 Output interface which will be connected to CDM.
+ */
+struct mdss_mdp_cdm *mdss_mdp_cdm_init(struct mdss_mdp_ctl *ctl, u32 intf_type)
+{
+	struct mdss_data_type *mdata = ctl->mdata;
+	struct mdss_mdp_cdm *cdm = NULL;
+
+	cdm = mdss_mdp_cdm_alloc(mdata);
+
+	/**
+	 * give hdmi interface priority to alloc the cdm block. It will wait
+	 * for one vsync cycle to allow wfd to finish its job and try to reserve
+	 * the block the again.
+	 */
+	if (!cdm && (intf_type == MDP_CDM_CDWN_OUTPUT_HDMI)) {
+		/* always wait for first cdm block */
+		cdm = mdata->cdm_off;
+		if (cdm) {
+			reinit_completion(&cdm->free_comp);
+			/*
+			 * no need to check the return status of completion
+			 * timeout. Next cdm_alloc call will try to reserve
+			 * the cdm block and returns failure if allocation
+			 * fails.
+			 */
+			wait_for_completion_timeout(&cdm->free_comp,
+				usecs_to_jiffies(VSYNC_TIMEOUT_US));
+			cdm = mdss_mdp_cdm_alloc(mdata);
+		}
+	}
+
+	if (!cdm) {
+		pr_err("%s: Unable to allocate cdm\n", __func__);
+		return ERR_PTR(-EBUSY);
+	}
+
+	cdm->out_intf = intf_type;
+	cdm->is_bypassed = true;
+	memset(&cdm->setup, 0x0, sizeof(struct mdp_cdm_cfg));
+
+	return cdm;
+}
+
+/**
+ * @mdss_mdp_cdm_csc_setup - Programs the CSC block.
+ * @cdm:		     Pointer to the CDM structure.
+ * @data:                    Pointer to the structure containing configuration
+ *			     data.
+ */
+static int mdss_mdp_cdm_csc_setup(struct mdss_mdp_cdm *cdm,
+				  struct mdp_cdm_cfg *data)
+{
+	int rc = 0;
+	u32 op_mode = 0;
+
+	mdss_mdp_csc_setup(MDSS_MDP_BLOCK_CDM, cdm->num, data->csc_type);
+
+	if ((data->csc_type == MDSS_MDP_CSC_RGB2YUV_601L) ||
+		(data->csc_type == MDSS_MDP_CSC_RGB2YUV_601FR) ||
+		(data->csc_type == MDSS_MDP_CSC_RGB2YUV_709L)) {
+		op_mode |= BIT(2);  /* DST_DATA_FORMAT = YUV */
+		op_mode &= ~BIT(1); /* SRC_DATA_FORMAT = RGB */
+		op_mode |= BIT(0);  /* EN = 1 */
+		cdm->is_bypassed = false;
+	} else {
+		op_mode = 0;
+		cdm->is_bypassed = true;
+	}
+
+	writel_relaxed(op_mode, cdm->base + MDSS_MDP_REG_CDM_CSC_10_OPMODE);
+
+	return rc;
+}
+
+/**
+ * @mdss_mdp_cdm_cdwn_setup - Programs the chroma down block.
+ * @cdm:		      Pointer to the CDM structure.
+ * @data:                     Pointer to the structure containing configuration
+ *			      data.
+ */
+static int mdss_mdp_cdm_cdwn_setup(struct mdss_mdp_cdm *cdm,
+			       struct mdp_cdm_cfg *data)
+{
+	int rc = 0;
+	u32 opmode = 0;
+	u32 out_size = 0;
+
+	if (data->mdp_csc_bit_depth == MDP_CDM_CSC_10BIT)
+		opmode &= ~BIT(7);
+	else
+		opmode |= BIT(7);
+
+	/* ENABLE DWNS_H bit */
+	opmode |= BIT(1);
+
+	switch (data->horz_downsampling_type) {
+	case MDP_CDM_CDWN_DISABLE:
+		/* CLEAR METHOD_H field */
+		opmode &= ~(0x18);
+		/* CLEAR DWNS_H bit */
+		opmode &= ~BIT(1);
+		break;
+	case MDP_CDM_CDWN_PIXEL_DROP:
+		/* Clear METHOD_H field (pixel drop is 0) */
+		opmode &= ~(0x18);
+		break;
+	case MDP_CDM_CDWN_AVG:
+		/* Clear METHOD_H field (Average is 0x1) */
+		opmode &= ~(0x18);
+		opmode |= (0x1 << 0x3);
+		break;
+	case MDP_CDM_CDWN_COSITE:
+		/* Clear METHOD_H field (Average is 0x2) */
+		opmode &= ~(0x18);
+		opmode |= (0x2 << 0x3);
+		/* Co-site horizontal coefficients */
+		writel_relaxed(cdm_cdwn2_cosite_h_coeff[0], cdm->base +
+			       MDSS_MDP_REG_CDM_CDWN2_COEFF_COSITE_H_0);
+		writel_relaxed(cdm_cdwn2_cosite_h_coeff[1], cdm->base +
+			       MDSS_MDP_REG_CDM_CDWN2_COEFF_COSITE_H_1);
+		writel_relaxed(cdm_cdwn2_cosite_h_coeff[2], cdm->base +
+			       MDSS_MDP_REG_CDM_CDWN2_COEFF_COSITE_H_2);
+		break;
+	case MDP_CDM_CDWN_OFFSITE:
+		/* Clear METHOD_H field (Average is 0x3) */
+		opmode &= ~(0x18);
+		opmode |= (0x3 << 0x3);
+
+		/* Off-site horizontal coefficients */
+		writel_relaxed(cdm_cdwn2_offsite_h_coeff[0], cdm->base +
+			       MDSS_MDP_REG_CDM_CDWN2_COEFF_OFFSITE_H_0);
+		writel_relaxed(cdm_cdwn2_offsite_h_coeff[1], cdm->base +
+			       MDSS_MDP_REG_CDM_CDWN2_COEFF_OFFSITE_H_1);
+		writel_relaxed(cdm_cdwn2_offsite_h_coeff[2], cdm->base +
+			       MDSS_MDP_REG_CDM_CDWN2_COEFF_OFFSITE_H_2);
+		break;
+	default:
+		pr_err("%s invalid horz down sampling type\n", __func__);
+		return -EINVAL;
+	}
+
+	/* ENABLE DWNS_V bit */
+	opmode |= BIT(2);
+
+	switch (data->vert_downsampling_type) {
+	case MDP_CDM_CDWN_DISABLE:
+		/* CLEAR METHOD_V field */
+		opmode &= ~(0x60);
+		/* CLEAR DWNS_V bit */
+		opmode &= ~BIT(2);
+		break;
+	case MDP_CDM_CDWN_PIXEL_DROP:
+		/* Clear METHOD_V field (pixel drop is 0) */
+		opmode &= ~(0x60);
+		break;
+	case MDP_CDM_CDWN_AVG:
+		/* Clear METHOD_V field (Average is 0x1) */
+		opmode &= ~(0x60);
+		opmode |= (0x1 << 0x5);
+		break;
+	case MDP_CDM_CDWN_COSITE:
+		/* Clear METHOD_V field (Average is 0x2) */
+		opmode &= ~(0x60);
+		opmode |= (0x2 << 0x5);
+		/* Co-site vertical coefficients */
+		writel_relaxed(cdm_cdwn2_cosite_v_coeff[0], cdm->base +
+			       MDSS_MDP_REG_CDM_CDWN2_COEFF_COSITE_V);
+		break;
+	case MDP_CDM_CDWN_OFFSITE:
+		/* Clear METHOD_V field (Average is 0x3) */
+		opmode &= ~(0x60);
+		opmode |= (0x3 << 0x5);
+
+		/* Off-site vertical coefficients */
+		writel_relaxed(cdm_cdwn2_offsite_v_coeff[0], cdm->base +
+			       MDSS_MDP_REG_CDM_CDWN2_COEFF_OFFSITE_V);
+		break;
+	default:
+		pr_err("%s invalid vert down sampling type\n", __func__);
+		return -EINVAL;
+	}
+
+	if (data->vert_downsampling_type || data->horz_downsampling_type)
+		opmode |= BIT(0); /* EN CDWN module */
+	else
+		opmode &= ~BIT(0);
+
+	out_size = (data->output_width & 0xFFFF) |
+			((data->output_height & 0xFFFF) << 16);
+	writel_relaxed(out_size, cdm->base + MDSS_MDP_REG_CDM_CDWN2_OUT_SIZE);
+	writel_relaxed(opmode, cdm->base + MDSS_MDP_REG_CDM_CDWN2_OP_MODE);
+	writel_relaxed(((0x3FF << 16) | 0x0),
+		       cdm->base + MDSS_MDP_REG_CDM_CDWN2_CLAMP_OUT);
+	return rc;
+
+}
+
+/**
+ * @mdss_mdp_cdm_out_packer_setup - Programs the output packer block.
+ * @cdm:			    Pointer to the CDM structure.
+ * @data:			    Pointer to the structure containing
+ *				    configuration data.
+ */
+static int mdss_mdp_cdm_out_packer_setup(struct mdss_mdp_cdm *cdm,
+					 struct mdp_cdm_cfg *data)
+{
+	int rc = 0;
+	u32 opmode = 0;
+	u32 cdm_enable = 0;
+	struct mdss_mdp_format_params *fmt;
+
+	if (cdm->out_intf == MDP_CDM_CDWN_OUTPUT_HDMI) {
+		/* Enable HDMI packer */
+		opmode |= BIT(0);
+		fmt = mdss_mdp_get_format_params(data->out_format);
+		if (!fmt) {
+			pr_err("cdm format = %d, not supported\n",
+			       data->out_format);
+			return -EINVAL;
+		}
+		opmode &= ~0x6;
+		opmode |= (fmt->chroma_sample << 1);
+		if (!cdm->is_bypassed)
+			cdm_enable |= BIT(19);
+
+	} else {
+		/* Disable HDMI pacler for WB */
+		opmode = 0;
+		if (!cdm->is_bypassed)
+			cdm_enable |= BIT(24);
+	}
+	writel_relaxed(cdm_enable, cdm->mdata->mdp_base +
+					MDSS_MDP_MDP_OUT_CTL_0);
+	writel_relaxed(opmode, cdm->base + MDSS_MDP_REG_CDM_HDMI_PACK_OP_MODE);
+
+	return rc;
+}
+
+/**
+ * @mdss_mdp_cdm_setup - Sets up the CDM block based on the usecase. The CDM
+ *			 block should be initialized before calling this
+ *			 function.
+ * @cdm:	         Pointer to the CDM structure.
+ * @data:                Pointer to the structure containing configuration
+ *			 data.
+ */
+int mdss_mdp_cdm_setup(struct mdss_mdp_cdm *cdm, struct mdp_cdm_cfg *data)
+{
+	int rc = 0;
+
+	if (!cdm || !data) {
+		pr_err("%s: invalid arguments\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	mutex_lock(&cdm->lock);
+	/* Setup CSC block */
+	rc = mdss_mdp_cdm_csc_setup(cdm, data);
+	if (rc) {
+		pr_err("%s: csc configuration failure\n", __func__);
+		goto fail;
+	}
+
+	/* Setup chroma down sampler */
+	rc = mdss_mdp_cdm_cdwn_setup(cdm, data);
+	if (rc) {
+		pr_err("%s: cdwn configuration failure\n", __func__);
+		goto fail;
+	}
+
+	/* Setup HDMI packer */
+	rc = mdss_mdp_cdm_out_packer_setup(cdm, data);
+	if (rc) {
+		pr_err("%s: out packer configuration failure\n", __func__);
+		goto fail;
+	}
+
+	memcpy(&cdm->setup, data, sizeof(struct mdp_cdm_cfg));
+
+fail:
+	mutex_unlock(&cdm->lock);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	return rc;
+}
+
+/**
+ * @mdss_mdp_cdm_destroy - Destroys the CDM configuration and return it to
+ *			   default state.
+ * @cdm:                   Pointer to the CDM structure
+ */
+int mdss_mdp_cdm_destroy(struct mdss_mdp_cdm *cdm)
+{
+	int rc = 0;
+
+	if (!cdm) {
+		pr_err("%s: invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	kref_put(&cdm->kref, mdss_mdp_cdm_free);
+
+	return rc;
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_cdm.h b/drivers/video/fbdev/msm/mdss_mdp_cdm.h
new file mode 100644
index 0000000..c494720
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_cdm.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2014,2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_MDP_CDM_H
+#define MDSS_MDP_CDM_H
+
+#include <linux/msm_mdp.h>
+#include <linux/kref.h>
+
+enum mdp_cdm_cdwn_method_type {
+	MDP_CDM_CDWN_DISABLE,
+	MDP_CDM_CDWN_PIXEL_DROP,
+	MDP_CDM_CDWN_AVG,
+	MDP_CDM_CDWN_COSITE,
+	MDP_CDM_CDWN_OFFSITE,
+};
+
+enum mdp_cdm_cdwn_output_type {
+	MDP_CDM_CDWN_OUTPUT_HDMI,
+	MDP_CDM_CDWN_OUTPUT_WB,
+};
+
+enum mdp_cdm_csc_bit_depth {
+	MDP_CDM_CSC_8BIT,
+	MDP_CDM_CSC_10BIT,
+};
+
+struct mdp_cdm_cfg {
+	/* CSC block configuration */
+	u32 mdp_csc_bit_depth;
+	u32 csc_type;
+	/* CDWN block configuration */
+	u32 horz_downsampling_type;
+	u32 vert_downsampling_type;
+	/* Output packer configuration */
+	u32 output_width;
+	u32 output_height;
+	u32 out_format;
+};
+
+struct mdss_mdp_cdm {
+	u32 num;
+	char __iomem *base;
+	struct kref kref;
+	struct mutex lock;
+
+	struct mdss_data_type *mdata;
+	u32 out_intf;
+	bool is_bypassed;
+	struct mdp_cdm_cfg setup;
+	struct completion free_comp;
+};
+
+struct mdss_mdp_cdm *mdss_mdp_cdm_init(struct mdss_mdp_ctl *ctl,
+				       u32 intf_type);
+int mdss_mdp_cdm_destroy(struct mdss_mdp_cdm *cdm);
+int mdss_mdp_cdm_setup(struct mdss_mdp_cdm *cdm, struct mdp_cdm_cfg *data);
+
+#endif /* MDSS_MDP_CDM_H */
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
new file mode 100644
index 0000000..6e4a6b3
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -0,0 +1,5992 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include <soc/qcom/event_timer.h>
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_trace.h"
+#include "mdss_debug.h"
+
+#define MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM 2
+#define NUM_MIXERCFG_REGS 3
+#define MDSS_MDP_WB_OUTPUT_BPP	3
+struct mdss_mdp_mixer_cfg {
+	u32 config_masks[NUM_MIXERCFG_REGS];
+	bool border_enabled;
+	bool cursor_enabled;
+};
+
+static struct {
+	u32 flush_bit;
+	struct mdss_mdp_hwio_cfg base;
+	struct mdss_mdp_hwio_cfg ext;
+	struct mdss_mdp_hwio_cfg ext2;
+} mdp_pipe_hwio[MDSS_MDP_MAX_SSPP] = {
+	[MDSS_MDP_SSPP_VIG0]    = {  0, {  0, 3, 0 }, {  0, 1, 3 } },
+	[MDSS_MDP_SSPP_VIG1]    = {  1, {  3, 3, 0 }, {  2, 1, 3 } },
+	[MDSS_MDP_SSPP_VIG2]    = {  2, {  6, 3, 0 }, {  4, 1, 3 } },
+	[MDSS_MDP_SSPP_VIG3]    = { 18, { 26, 3, 0 }, {  6, 1, 3 } },
+	[MDSS_MDP_SSPP_RGB0]    = {  3, {  9, 3, 0 }, {  8, 1, 3 } },
+	[MDSS_MDP_SSPP_RGB1]    = {  4, { 12, 3, 0 }, { 10, 1, 3 } },
+	[MDSS_MDP_SSPP_RGB2]    = {  5, { 15, 3, 0 }, { 12, 1, 3 } },
+	[MDSS_MDP_SSPP_RGB3]    = { 19, { 29, 3, 0 }, { 14, 1, 3 } },
+	[MDSS_MDP_SSPP_DMA0]    = { 11, { 18, 3, 0 }, { 16, 1, 3 } },
+	[MDSS_MDP_SSPP_DMA1]    = { 12, { 21, 3, 0 }, { 18, 1, 3 } },
+	[MDSS_MDP_SSPP_DMA2]    = { 24, .ext2 = {  0, 4, 0 } },
+	[MDSS_MDP_SSPP_DMA3]    = { 25, .ext2 = {  4, 4, 0 } },
+	[MDSS_MDP_SSPP_CURSOR0] = { 22, .ext  = { 20, 4, 0 } },
+	[MDSS_MDP_SSPP_CURSOR1] = { 23, .ext  = { 26, 4, 0 } },
+};
+
+static struct {
+	struct mdss_mdp_hwio_cfg ext2;
+} mdp_pipe_rec1_hwio[MDSS_MDP_MAX_SSPP] = {
+	[MDSS_MDP_SSPP_DMA0]    = { .ext2 = {  8, 4, 0 } },
+	[MDSS_MDP_SSPP_DMA1]    = { .ext2 = { 12, 4, 0 } },
+	[MDSS_MDP_SSPP_DMA2]    = { .ext2 = { 16, 4, 0 } },
+	[MDSS_MDP_SSPP_DMA3]    = { .ext2 = { 20, 4, 0 } },
+};
+
+static void __mdss_mdp_mixer_write_cfg(struct mdss_mdp_mixer *mixer,
+		struct mdss_mdp_mixer_cfg *cfg);
+
+static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
+{
+	u64 result = val;
+
+	if (val) {
+		u64 temp = -1UL;
+
+		do_div(temp, val);
+		if (temp > numer) {
+			/* no overflow, so we can do the operation*/
+			result = (val * (u64)numer);
+			do_div(result, denom);
+		}
+	}
+	return result;
+}
+
+static inline u64 apply_fudge_factor(u64 val,
+	struct mult_factor *factor)
+{
+	return fudge_factor(val, factor->numer, factor->denom);
+}
+
+static inline u64 apply_inverse_fudge_factor(u64 val,
+	struct mult_factor *factor)
+{
+	return fudge_factor(val, factor->denom, factor->numer);
+}
+
+static DEFINE_MUTEX(mdss_mdp_ctl_lock);
+
+static inline u64 mdss_mdp_get_pclk_rate(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+
+	return (ctl->intf_type == MDSS_INTF_DSI) ?
+		pinfo->mipi.dsi_pclk_rate :
+		pinfo->clk_rate;
+}
+
+static inline u32 mdss_mdp_clk_fudge_factor(struct mdss_mdp_mixer *mixer,
+						u32 rate)
+{
+	struct mdss_panel_info *pinfo = &mixer->ctl->panel_data->panel_info;
+
+	rate = apply_fudge_factor(rate, &mdss_res->clk_factor);
+
+	/*
+	 * If the panel is video mode and its back porch period is
+	 * small, the workaround of increasing mdp clk is needed to
+	 * avoid underrun.
+	 */
+	if (mixer->ctl->is_video_mode && pinfo &&
+		(pinfo->lcdc.v_back_porch < MDP_MIN_VBP))
+		rate = apply_fudge_factor(rate, &mdss_res->clk_factor);
+
+	return rate;
+}
+
+struct mdss_mdp_prefill_params {
+	u32 smp_bytes;
+	u32 xres;
+	u32 src_w;
+	u32 dst_w;
+	u32 src_h;
+	u32 dst_h;
+	u32 dst_y;
+	u32 bpp;
+	u32 pnum;
+	bool is_yuv;
+	bool is_caf;
+	bool is_fbc;
+	bool is_bwc;
+	bool is_tile;
+	bool is_hflip;
+	bool is_cmd;
+	bool is_ubwc;
+	bool is_nv12;
+};
+
+static inline bool mdss_mdp_perf_is_caf(struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	/*
+	 * CAF mode filter is enabled when format is yuv and
+	 * upscaling. Post processing had the decision to use CAF
+	 * under these conditions.
+	 */
+	return ((mdata->mdp_rev >= MDSS_MDP_HW_REV_102) &&
+		pipe->src_fmt->is_yuv && ((pipe->src.h >> pipe->vert_deci) <=
+			pipe->dst.h));
+}
+
+static inline u32 mdss_mdp_calc_y_scaler_bytes(struct mdss_mdp_prefill_params
+	*params, struct mdss_prefill_data *prefill)
+{
+	u32 y_scaler_bytes = 0, y_scaler_lines = 0;
+
+	if (params->is_yuv) {
+		if (params->src_h != params->dst_h) {
+			y_scaler_lines = (params->is_caf) ?
+				prefill->y_scaler_lines_caf :
+				prefill->y_scaler_lines_bilinear;
+			/*
+			 * y is src_width, u is src_width/2 and v is
+			 * src_width/2, so the total is scaler_lines *
+			 * src_w * 2
+			 */
+			y_scaler_bytes = y_scaler_lines * params->src_w * 2;
+		}
+	} else {
+		if (params->src_h != params->dst_h) {
+			y_scaler_lines = prefill->y_scaler_lines_bilinear;
+			y_scaler_bytes = y_scaler_lines * params->src_w *
+				params->bpp;
+		}
+	}
+
+	return y_scaler_bytes;
+}
+
+static inline u32 mdss_mdp_align_latency_buf_bytes(
+		u32 latency_buf_bytes, u32 percentage,
+		u32 smp_bytes)
+{
+	u32 aligned_bytes;
+
+	aligned_bytes = ((smp_bytes - latency_buf_bytes) * percentage) / 100;
+
+	pr_debug("percentage=%d, extra_bytes(per)=%d smp_bytes=%d latency=%d\n",
+		percentage, aligned_bytes, smp_bytes, latency_buf_bytes);
+	return latency_buf_bytes + aligned_bytes;
+}
+
+/**
+ * @ mdss_mdp_calc_latency_buf_bytes() -
+ *                             Get the number of bytes for the
+ *                             latency lines.
+ * @is_yuv - true if format is yuv
+ * @is_bwc - true if BWC is enabled
+ * @is_tile - true if it is Tile format
+ * @src_w - source rectangle width
+ * @bpp - Bytes per pixel of source rectangle
+ * @use_latency_buf_percentage - use an extra percentage for
+ *				the latency bytes calculation.
+ * @smp_bytes - size of the smp for alignment
+ * @is_ubwc - true if UBWC is enabled
+ * @is_nv12 - true if NV12 format is used
+ * @is_hflip - true if HFLIP is enabled
+ *
+ * Return:
+ * The amount of bytes to consider for the latency lines, where:
+ *	If use_latency_buf_percentate is  TRUE:
+ *		Function will return the amount of bytes for the
+ *		latency lines plus a percentage of the
+ *		additional bytes allocated to align with the
+ *		SMP size. Percentage is determined by
+ *		"latency_buff_per", which can be modified
+ *		through debugfs.
+ *	If use_latency_buf_percentage is FALSE:
+ *		Function will return only the the amount of bytes
+ *		for the latency lines without any
+ *		extra bytes.
+ */
+u32 mdss_mdp_calc_latency_buf_bytes(bool is_yuv, bool is_bwc,
+	bool is_tile, u32 src_w, u32 bpp, bool use_latency_buf_percentage,
+	u32 smp_bytes, bool is_ubwc, bool is_nv12, bool is_hflip)
+{
+	u32 latency_lines = 0, latency_buf_bytes;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (is_hflip && !mdata->hflip_buffer_reused)
+		latency_lines = 1;
+
+	if (is_yuv) {
+		if (is_ubwc) {
+			if (is_nv12)
+				latency_lines += 8;
+			else
+				latency_lines += 4;
+			latency_buf_bytes = src_w * bpp * latency_lines;
+		} else if (is_bwc) {
+			latency_lines += 4;
+			latency_buf_bytes = src_w * bpp * latency_lines;
+		} else {
+			if (!mdata->hflip_buffer_reused)
+				latency_lines += 1;
+			else
+				latency_lines = 2;
+			/* multiply * 2 for the two YUV planes */
+			latency_buf_bytes = mdss_mdp_align_latency_buf_bytes(
+				src_w * bpp * latency_lines,
+				use_latency_buf_percentage ?
+				mdata->latency_buff_per : 0, smp_bytes) * 2;
+		}
+	} else {
+		if (is_ubwc) {
+			latency_lines += 4;
+			latency_buf_bytes = src_w * bpp * latency_lines;
+		} else if (is_tile) {
+			latency_lines += 8;
+			latency_buf_bytes = src_w * bpp * latency_lines;
+		} else if (is_bwc) {
+			latency_lines += 4;
+			latency_buf_bytes = src_w * bpp * latency_lines;
+		} else {
+			if (!mdata->hflip_buffer_reused)
+				latency_lines += 1;
+			else
+				latency_lines = 2;
+			latency_buf_bytes = mdss_mdp_align_latency_buf_bytes(
+				src_w * bpp * latency_lines,
+				use_latency_buf_percentage ?
+				mdata->latency_buff_per : 0, smp_bytes);
+		}
+	}
+
+	return latency_buf_bytes;
+}
+
+static inline u32 mdss_mdp_calc_scaling_w_h(u32 val, u32 src_h, u32 dst_h,
+	u32 src_w, u32 dst_w)
+{
+	if (dst_h)
+		val = mult_frac(val, src_h, dst_h);
+	if (dst_w)
+		val = mult_frac(val, src_w, dst_w);
+
+	return val;
+}
+
+static u32 mdss_mdp_perf_calc_pipe_prefill_video(struct mdss_mdp_prefill_params
+	*params)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_prefill_data *prefill = &mdata->prefill_data;
+	u32 prefill_bytes = 0;
+	u32 latency_buf_bytes = 0;
+	u32 y_buf_bytes = 0;
+	u32 y_scaler_bytes = 0;
+	u32 pp_bytes = 0, pp_lines = 0;
+	u32 post_scaler_bytes = 0;
+	u32 fbc_bytes = 0;
+
+	prefill_bytes = prefill->ot_bytes;
+
+	latency_buf_bytes = mdss_mdp_calc_latency_buf_bytes(params->is_yuv,
+		params->is_bwc, params->is_tile, params->src_w, params->bpp,
+		true, params->smp_bytes, params->is_ubwc, params->is_nv12,
+		params->is_hflip);
+	prefill_bytes += latency_buf_bytes;
+	pr_debug("latency_buf_bytes bw_calc=%d actual=%d\n", latency_buf_bytes,
+		params->smp_bytes);
+
+	if (params->is_yuv)
+		y_buf_bytes = prefill->y_buf_bytes;
+
+	y_scaler_bytes = mdss_mdp_calc_y_scaler_bytes(params, prefill);
+
+	prefill_bytes += y_buf_bytes + y_scaler_bytes;
+
+	if (mdata->apply_post_scale_bytes || (params->src_h != params->dst_h) ||
+			(params->src_w != params->dst_w)) {
+		post_scaler_bytes = prefill->post_scaler_pixels * params->bpp;
+		post_scaler_bytes = mdss_mdp_calc_scaling_w_h(post_scaler_bytes,
+			params->src_h, params->dst_h, params->src_w,
+			params->dst_w);
+		prefill_bytes += post_scaler_bytes;
+	}
+
+	if (params->xres)
+		pp_lines = DIV_ROUND_UP(prefill->pp_pixels, params->xres);
+	if (params->xres && params->dst_h && (params->dst_y <= pp_lines))
+		pp_bytes = ((params->src_w * params->bpp * prefill->pp_pixels /
+				params->xres) * params->src_h) / params->dst_h;
+	prefill_bytes += pp_bytes;
+
+	if (params->is_fbc) {
+		fbc_bytes = prefill->fbc_lines * params->bpp;
+		fbc_bytes = mdss_mdp_calc_scaling_w_h(fbc_bytes, params->src_h,
+			params->dst_h, params->src_w, params->dst_w);
+	}
+	prefill_bytes += fbc_bytes;
+
+	trace_mdp_perf_prefill_calc(params->pnum, latency_buf_bytes,
+		prefill->ot_bytes, y_buf_bytes, y_scaler_bytes, pp_lines,
+		pp_bytes, post_scaler_bytes, fbc_bytes, prefill_bytes);
+
+	pr_debug("ot=%d y_buf=%d pp_lines=%d pp=%d post_sc=%d fbc_bytes=%d\n",
+		prefill->ot_bytes, y_buf_bytes, pp_lines, pp_bytes,
+		post_scaler_bytes, fbc_bytes);
+
+	return prefill_bytes;
+}
+
+static u32 mdss_mdp_perf_calc_pipe_prefill_cmd(struct mdss_mdp_prefill_params
+	*params)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_prefill_data *prefill = &mdata->prefill_data;
+	u32 prefill_bytes;
+	u32 ot_bytes = 0;
+	u32 latency_lines, latency_buf_bytes;
+	u32 y_buf_bytes = 0;
+	u32 y_scaler_bytes;
+	u32 fbc_cmd_lines = 0, fbc_cmd_bytes = 0;
+	u32 post_scaler_bytes = 0;
+
+	/* y_scaler_bytes are same for the first or non first line */
+	y_scaler_bytes = mdss_mdp_calc_y_scaler_bytes(params, prefill);
+	prefill_bytes = y_scaler_bytes;
+
+	/* 1st line if fbc is not enabled and 2nd line if fbc is enabled */
+	if (((params->dst_y == 0) && !params->is_fbc) ||
+		((params->dst_y <= 1) && params->is_fbc)) {
+		if (params->is_ubwc) {
+			if (params->is_nv12)
+				latency_lines = 8;
+			else
+				latency_lines = 4;
+		} else if (params->is_bwc || params->is_tile) {
+			latency_lines = 4;
+		} else if (params->is_hflip) {
+			latency_lines = 1;
+		} else {
+			latency_lines = 0;
+		}
+		latency_buf_bytes = params->src_w * params->bpp * latency_lines;
+		prefill_bytes += latency_buf_bytes;
+
+		fbc_cmd_lines++;
+		if (params->is_fbc)
+			fbc_cmd_lines++;
+		fbc_cmd_bytes = params->bpp * params->dst_w * fbc_cmd_lines;
+		fbc_cmd_bytes = mdss_mdp_calc_scaling_w_h(fbc_cmd_bytes,
+			params->src_h, params->dst_h, params->src_w,
+			params->dst_w);
+		prefill_bytes += fbc_cmd_bytes;
+	} else {
+		ot_bytes = prefill->ot_bytes;
+		prefill_bytes += ot_bytes;
+
+		latency_buf_bytes = mdss_mdp_calc_latency_buf_bytes(
+			params->is_yuv, params->is_bwc, params->is_tile,
+			params->src_w, params->bpp, true, params->smp_bytes,
+			params->is_ubwc, params->is_nv12, params->is_hflip);
+		prefill_bytes += latency_buf_bytes;
+
+		if (params->is_yuv)
+			y_buf_bytes = prefill->y_buf_bytes;
+		prefill_bytes += y_buf_bytes;
+
+		if (mdata->apply_post_scale_bytes ||
+				(params->src_h != params->dst_h) ||
+				(params->src_w != params->dst_w)) {
+			post_scaler_bytes = prefill->post_scaler_pixels *
+				params->bpp;
+			post_scaler_bytes = mdss_mdp_calc_scaling_w_h(
+				post_scaler_bytes, params->src_h,
+				params->dst_h, params->src_w,
+				params->dst_w);
+			prefill_bytes += post_scaler_bytes;
+		}
+	}
+
+	pr_debug("ot=%d bwc=%d smp=%d y_buf=%d fbc=%d\n", ot_bytes,
+		params->is_bwc, latency_buf_bytes, y_buf_bytes, fbc_cmd_bytes);
+
+	return prefill_bytes;
+}
+
+u32 mdss_mdp_perf_calc_pipe_prefill_single(struct mdss_mdp_prefill_params
+	*params)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_prefill_data *prefill = &mdata->prefill_data;
+	u32 prefill_bytes;
+	u32 latency_lines, latency_buf_bytes;
+	u32 y_scaler_bytes;
+	u32 fbc_cmd_lines = 0, fbc_cmd_bytes = 0;
+
+	if (params->is_ubwc) {
+		if (params->is_nv12)
+			latency_lines = 8;
+		else
+			latency_lines = 4;
+	} else if (params->is_bwc || params->is_tile)
+		/* can start processing after receiving 4 lines */
+		latency_lines = 4;
+	else if (params->is_hflip)
+		/* need oneline before reading backwards */
+		latency_lines = 1;
+	else
+		latency_lines = 0;
+	latency_buf_bytes = params->src_w * params->bpp * latency_lines;
+	prefill_bytes = latency_buf_bytes;
+
+	y_scaler_bytes = mdss_mdp_calc_y_scaler_bytes(params, prefill);
+	prefill_bytes += y_scaler_bytes;
+
+	if (params->is_cmd)
+		fbc_cmd_lines++;
+	if (params->is_fbc)
+		fbc_cmd_lines++;
+
+	if (fbc_cmd_lines) {
+		fbc_cmd_bytes = params->bpp * params->dst_w * fbc_cmd_lines;
+		fbc_cmd_bytes = mdss_mdp_calc_scaling_w_h(fbc_cmd_bytes,
+			params->src_h, params->dst_h, params->src_w,
+			params->dst_w);
+		prefill_bytes += fbc_cmd_bytes;
+	}
+
+	return prefill_bytes;
+}
+
+u32 mdss_mdp_perf_calc_smp_size(struct mdss_mdp_pipe *pipe,
+	bool calc_smp_size)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 smp_bytes;
+
+	if (pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR)
+		return 0;
+
+	/* Get allocated or fixed smp bytes */
+	smp_bytes = mdss_mdp_smp_get_size(pipe);
+
+	/*
+	 * We need to calculate the SMP size for scenarios where
+	 * allocation have not happened yet (i.e. during prepare IOCTL).
+	 */
+	if (calc_smp_size && !mdata->has_pixel_ram) {
+		u32 calc_smp_total;
+
+		calc_smp_total = mdss_mdp_smp_calc_num_blocks(pipe);
+		calc_smp_total *= mdata->smp_mb_size;
+
+		/*
+		 * If the pipe has fixed SMPs, then we must consider
+		 * the max smp size.
+		 */
+		if (calc_smp_total > smp_bytes)
+			smp_bytes = calc_smp_total;
+	}
+
+	pr_debug("SMP size (bytes) %d for pnum=%d calc=%d\n",
+		smp_bytes, pipe->num, calc_smp_size);
+	WARN_ON(smp_bytes == 0);
+
+	return smp_bytes;
+}
+
+static void mdss_mdp_get_bw_vote_mode(void *data,
+	u32 mdp_rev, struct mdss_mdp_perf_params *perf,
+	enum perf_calc_vote_mode calc_mode, u32 flags)
+{
+
+	if (!data)
+		goto exit;
+
+	switch (mdp_rev) {
+	case MDSS_MDP_HW_REV_105:
+	case MDSS_MDP_HW_REV_109:
+		if (calc_mode == PERF_CALC_VOTE_MODE_PER_PIPE) {
+			struct mdss_mdp_mixer *mixer =
+				(struct mdss_mdp_mixer *)data;
+
+			if ((flags & PERF_CALC_PIPE_SINGLE_LAYER) &&
+				!mixer->rotator_mode &&
+				(mixer->type == MDSS_MDP_MIXER_TYPE_INTF))
+				set_bit(MDSS_MDP_BW_MODE_SINGLE_LAYER,
+					perf->bw_vote_mode);
+		} else if (calc_mode == PERF_CALC_VOTE_MODE_CTL) {
+			struct mdss_mdp_ctl *ctl = (struct mdss_mdp_ctl *)data;
+
+			if (ctl->is_video_mode &&
+				(ctl->mfd->split_mode == MDP_SPLIT_MODE_NONE))
+				set_bit(MDSS_MDP_BW_MODE_SINGLE_IF,
+					perf->bw_vote_mode);
+		}
+		break;
+	default:
+		break;
+	};
+
+	pr_debug("mode=0x%lx\n", *(perf->bw_vote_mode));
+
+exit:
+	return;
+}
+
+static u32 __calc_qseed3_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
+	struct mdss_rect src, struct mdss_rect dst, u32 src_h,
+	u32 fps, u32 v_total)
+{
+	u32 active_line_cycle, backfill_cycle, total_cycle;
+	u32 ver_dwnscale;
+	u32 active_line;
+	u32 backfill_line;
+
+	ver_dwnscale = (src_h << PHASE_STEP_SHIFT) / dst.h;
+
+	if (ver_dwnscale > (MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM
+			<< PHASE_STEP_SHIFT)) {
+		active_line = MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM
+			<< PHASE_STEP_SHIFT;
+		backfill_line = ver_dwnscale - active_line;
+	} else {
+		/* active line same as downscale and no backfill */
+		active_line = ver_dwnscale;
+		backfill_line = 0;
+	}
+
+	active_line_cycle = mult_frac(active_line, src.w,
+		4) >> PHASE_STEP_SHIFT; /* 4pix/clk */
+	if (active_line_cycle < dst.w)
+		active_line_cycle = dst.w;
+
+	backfill_cycle = mult_frac(backfill_line, src.w, 4) /* 4pix/clk */
+		>> PHASE_STEP_SHIFT;
+
+	total_cycle = active_line_cycle + backfill_cycle;
+
+	pr_debug("line: active=%d backfill=%d vds=%d\n",
+		active_line, backfill_line, ver_dwnscale);
+	pr_debug("cycle: total=%d active=%d backfill=%d\n",
+		total_cycle, active_line_cycle, backfill_cycle);
+
+	return total_cycle * (fps * v_total);
+}
+
+static inline bool __is_vert_downscaling(u32 src_h,
+	struct mdss_rect dst){
+
+	return (src_h > dst.h);
+}
+
+static u32 get_pipe_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
+	struct mdss_rect src, struct mdss_rect dst,
+	u32 fps, u32 v_total, u32 flags)
+{
+	struct mdss_mdp_mixer *mixer;
+	u32 rate, src_h;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	/*
+	 * when doing vertical decimation lines will be skipped, hence there is
+	 * no need to account for these lines in MDP clock or request bus
+	 * bandwidth to fetch them.
+	 */
+	mixer = pipe->mixer_left;
+	src_h = DECIMATED_DIMENSION(src.h, pipe->vert_deci);
+
+	if (mixer->rotator_mode) {
+
+		rate = pipe->src.w * pipe->src.h * fps;
+		rate /= 4; /* block mode fetch at 4 pix/clk */
+	} else if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map) &&
+		pipe->scaler.enable && __is_vert_downscaling(src_h, dst)) {
+
+		rate = __calc_qseed3_mdp_clk_rate(pipe, src, dst, src_h,
+			fps, v_total);
+	} else {
+
+		rate = dst.w;
+		if (src_h > dst.h)
+			rate = (rate * src_h) / dst.h;
+
+		rate *= v_total * fps;
+
+		/* pipes decoding BWC content have different clk requirement */
+		if (pipe->bwc_mode && !pipe->src_fmt->is_yuv &&
+		    pipe->src_fmt->bpp == 4) {
+			u32 bwc_rate =
+			mult_frac((src.w * src_h * fps), v_total, dst.h << 1);
+			pr_debug("src: w:%d h:%d fps:%d vtotal:%d dst h:%d\n",
+				src.w, src_h, fps, v_total, dst.h);
+			pr_debug("pipe%d: bwc_rate=%d normal_rate=%d\n",
+				pipe->num, bwc_rate, rate);
+			rate = max(bwc_rate, rate);
+		}
+	}
+
+	if (flags & PERF_CALC_PIPE_APPLY_CLK_FUDGE)
+		rate = mdss_mdp_clk_fudge_factor(mixer, rate);
+
+	return rate;
+}
+
+static u32 mdss_mdp_get_rotator_fps(struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 fps;
+
+	if (pipe->src.w >= 3840 || pipe->src.h >= 3840)
+		fps = ROTATOR_LOW_FRAME_RATE;
+	else if (mdata->traffic_shaper_en)
+		fps = DEFAULT_ROTATOR_FRAME_RATE;
+	else if (pipe->frame_rate)
+		fps = pipe->frame_rate;
+	else
+		fps = DEFAULT_FRAME_RATE;
+
+	pr_debug("rotator fps:%d\n", fps);
+
+	return fps;
+}
+
+int mdss_mdp_get_panel_params(struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_mixer *mixer, u32 *fps, u32 *v_total,
+	u32 *h_total, u32 *xres)
+{
+
+	if (mixer->rotator_mode) {
+		*fps = mdss_mdp_get_rotator_fps(pipe);
+	} else if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
+		struct mdss_panel_info *pinfo;
+
+		if (!mixer->ctl)
+			return -EINVAL;
+
+		pinfo = &mixer->ctl->panel_data->panel_info;
+		if (pinfo->type == MIPI_VIDEO_PANEL) {
+			*fps = pinfo->panel_max_fps;
+			*v_total = pinfo->panel_max_vtotal;
+		} else {
+			*fps = mdss_panel_get_framerate(pinfo,
+					FPS_RESOLUTION_HZ);
+			*v_total = mdss_panel_get_vtotal(pinfo);
+		}
+		*xres = get_panel_width(mixer->ctl);
+		*h_total = mdss_panel_get_htotal(pinfo, false);
+
+		if (is_pingpong_split(mixer->ctl->mfd))
+			*h_total += mdss_panel_get_htotal(
+				&mixer->ctl->panel_data->next->panel_info,
+				false);
+	} else {
+		*v_total = mixer->height;
+		*xres = mixer->width;
+		*h_total = mixer->width;
+		*fps = DEFAULT_FRAME_RATE;
+	}
+
+	return 0;
+}
+
+int mdss_mdp_get_pipe_overlap_bw(struct mdss_mdp_pipe *pipe,
+	struct mdss_rect *roi, u64 *quota, u64 *quota_nocr, u32 flags)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_mixer *mixer = pipe->mixer_left;
+	struct mdss_rect src, dst;
+	u32 v_total = 0, h_total = 0, xres = 0, src_h = 0;
+	u32 fps = DEFAULT_FRAME_RATE;
+	*quota = 0;
+	*quota_nocr = 0;
+
+	if (mdss_mdp_get_panel_params(pipe, mixer, &fps, &v_total,
+			&h_total, &xres)) {
+		pr_err(" error retreiving the panel params!\n");
+		return -EINVAL;
+	}
+
+	dst = pipe->dst;
+	src = pipe->src;
+
+	/* crop rectangles */
+	if (roi && !mixer->ctl->is_video_mode && !pipe->src_split_req)
+		mdss_mdp_crop_rect(&src, &dst, roi);
+
+	/*
+	 * when doing vertical decimation lines will be skipped, hence there is
+	 * no need to account for these lines in MDP clock or request bus
+	 * bandwidth to fetch them.
+	 */
+	src_h = DECIMATED_DIMENSION(src.h, pipe->vert_deci);
+
+	*quota = fps * src.w * src_h;
+
+	if (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420)
+		/*
+		 * with decimation, chroma is not downsampled, this means we
+		 * need to allocate bw for extra lines that will be fetched
+		 */
+		if (pipe->vert_deci)
+			*quota *= 2;
+		else
+			*quota = (*quota * 3) / 2;
+	else
+		*quota *= pipe->src_fmt->bpp;
+
+	if (mixer->rotator_mode) {
+		if (test_bit(MDSS_QOS_OVERHEAD_FACTOR,
+				mdata->mdss_qos_map)) {
+			/* rotator read */
+			*quota_nocr += (*quota * 2);
+			*quota = apply_comp_ratio_factor(*quota,
+				pipe->src_fmt, &pipe->comp_ratio);
+			/*
+			 * rotator write: here we are using src_fmt since
+			 * current implementation only supports calculate
+			 * bandwidth based in the source parameters.
+			 * The correct fine-tuned calculation should use
+			 * destination format and destination rectangles to
+			 * calculate the bandwidth, but leaving this
+			 * calculation as per current support.
+			 */
+			*quota += apply_comp_ratio_factor(*quota,
+				pipe->src_fmt, &pipe->comp_ratio);
+		} else {
+			*quota *= 2; /* bus read + write */
+		}
+	} else {
+
+		*quota = DIV_ROUND_UP_ULL(*quota * v_total, dst.h);
+		if (!mixer->ctl->is_video_mode)
+			*quota = DIV_ROUND_UP_ULL(*quota * h_total, xres);
+
+		*quota_nocr = *quota;
+
+		if (test_bit(MDSS_QOS_OVERHEAD_FACTOR,
+				mdata->mdss_qos_map))
+			*quota = apply_comp_ratio_factor(*quota,
+				pipe->src_fmt, &pipe->comp_ratio);
+	}
+
+
+	pr_debug("quota:%llu nocr:%llu src.w:%d src.h%d comp:[%d, %d]\n",
+		*quota, *quota_nocr, src.w, src_h, pipe->comp_ratio.numer,
+		pipe->comp_ratio.denom);
+
+	return 0;
+}
+
+static inline bool validate_comp_ratio(struct mult_factor *factor)
+{
+	return factor->numer && factor->denom;
+}
+
+u32 apply_comp_ratio_factor(u32 quota,
+	struct mdss_mdp_format_params *fmt,
+	struct mult_factor *factor)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mdata || !test_bit(MDSS_QOS_OVERHEAD_FACTOR,
+		      mdata->mdss_qos_map))
+		return quota;
+
+	/* apply compression ratio, only for compressed formats */
+	if (mdss_mdp_is_ubwc_format(fmt) &&
+	    validate_comp_ratio(factor))
+		quota = apply_inverse_fudge_factor(quota, factor);
+
+	return quota;
+}
+
+static u32 mdss_mdp_get_vbp_factor(struct mdss_mdp_ctl *ctl)
+{
+	u32 fps, v_total, vbp, vbp_fac;
+	struct mdss_panel_info *pinfo;
+
+	if (!ctl || !ctl->panel_data)
+		return 0;
+
+	pinfo = &ctl->panel_data->panel_info;
+	fps = mdss_panel_get_framerate(pinfo,
+			FPS_RESOLUTION_HZ);
+	v_total = mdss_panel_get_vtotal(pinfo);
+	vbp = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width;
+	vbp += pinfo->prg_fet;
+
+	vbp_fac = (vbp) ? fps * v_total / vbp : 0;
+	pr_debug("vbp_fac=%d vbp=%d v_total=%d\n", vbp_fac, vbp, v_total);
+
+	return vbp_fac;
+}
+
+static u32 mdss_mdp_get_vbp_factor_max(struct mdss_mdp_ctl *ctl)
+{
+	u32 vbp_max = 0;
+	int i;
+	struct mdss_data_type *mdata;
+
+	if (!ctl || !ctl->mdata)
+		return 0;
+
+	mdata = ctl->mdata;
+	for (i = 0; i < mdata->nctl; i++) {
+		struct mdss_mdp_ctl *ctl = mdata->ctl_off + i;
+		u32 vbp_fac;
+
+		/* skip command mode interfaces */
+		if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map)
+				&& !ctl->is_video_mode)
+			continue;
+
+		if (mdss_mdp_ctl_is_power_on(ctl)) {
+			vbp_fac = mdss_mdp_get_vbp_factor(ctl);
+			vbp_max = max(vbp_max, vbp_fac);
+		}
+	}
+
+	return vbp_max;
+}
+
+static u32 __calc_prefill_line_time_us(struct mdss_mdp_ctl *ctl)
+{
+	u32 fps, v_total, vbp, vbp_fac;
+	struct mdss_panel_info *pinfo;
+
+	if (!ctl || !ctl->panel_data)
+		return 0;
+
+	pinfo = &ctl->panel_data->panel_info;
+	fps = mdss_panel_get_framerate(pinfo,
+			FPS_RESOLUTION_HZ);
+	v_total = mdss_panel_get_vtotal(pinfo);
+	vbp = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width;
+	vbp += pinfo->prg_fet;
+
+	vbp_fac = mult_frac(USEC_PER_SEC, vbp, fps * v_total); /* use uS */
+	pr_debug("vbp_fac=%d vbp=%d v_total=%d fps=%d\n",
+		vbp_fac, vbp, v_total, fps);
+
+	return vbp_fac;
+}
+
+static u32 __get_min_prefill_line_time_us(struct mdss_mdp_ctl *ctl)
+{
+	u32 vbp_min = 0;
+	int i;
+	struct mdss_data_type *mdata;
+
+	if (!ctl || !ctl->mdata)
+		return 0;
+
+	mdata = ctl->mdata;
+	for (i = 0; i < mdata->nctl; i++) {
+		struct mdss_mdp_ctl *tmp_ctl = mdata->ctl_off + i;
+		u32 vbp_fac;
+
+		/* skip command mode interfaces */
+		if (!tmp_ctl->is_video_mode)
+			continue;
+
+		if (mdss_mdp_ctl_is_power_on(tmp_ctl)) {
+			vbp_fac = __calc_prefill_line_time_us(tmp_ctl);
+			vbp_min = min(vbp_min, vbp_fac);
+		}
+	}
+
+	return vbp_min;
+}
+
+static u32 mdss_mdp_calc_prefill_line_time(struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_pipe *pipe)
+{
+	u32 prefill_us = 0;
+	u32 prefill_amortized = 0;
+	struct mdss_data_type *mdata;
+	struct mdss_mdp_mixer *mixer;
+	struct mdss_panel_info *pinfo;
+	u32 fps, v_total;
+
+	if (!ctl || !ctl->mdata)
+		return 0;
+
+	mixer = pipe->mixer_left;
+	if (!mixer)
+		return -EINVAL;
+
+	pinfo = &ctl->panel_data->panel_info;
+	fps = mdss_panel_get_framerate(pinfo,
+		FPS_RESOLUTION_HZ);
+	v_total = mdss_panel_get_vtotal(pinfo);
+
+	/* calculate the minimum prefill */
+	prefill_us = __get_min_prefill_line_time_us(ctl);
+
+	/* if pipe is amortizable, add the amortized prefill contribution */
+	if (mdss_mdp_is_amortizable_pipe(pipe, mixer, mdata)) {
+		prefill_amortized = mult_frac(USEC_PER_SEC, pipe->src.y,
+			fps * v_total);
+		prefill_us += prefill_amortized;
+	}
+
+	return prefill_us;
+}
+
+static inline bool __is_multirect_high_pipe(struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_mdp_pipe *next_pipe = pipe->multirect.next;
+
+	return (pipe->src.y > next_pipe->src.y);
+}
+
+static u64 mdss_mdp_apply_prefill_factor(u64 prefill_bw,
+	struct mdss_mdp_ctl *ctl, struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u64 total_prefill_bw;
+	u32 prefill_time_us;
+
+	if (test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map)) {
+
+		/*
+		 * for multi-rect serial mode, only take the contribution from
+		 * pipe that belongs to the rect closest to the origin.
+		 */
+		if (pipe->multirect.mode == MDSS_MDP_PIPE_MULTIRECT_SERIAL &&
+			__is_multirect_high_pipe(pipe)) {
+			total_prefill_bw = 0;
+			goto exit;
+		}
+
+		prefill_time_us = mdss_mdp_calc_prefill_line_time(ctl, pipe);
+		total_prefill_bw = prefill_time_us ? DIV_ROUND_UP_ULL(
+			USEC_PER_SEC * prefill_bw, prefill_time_us) : 0;
+	} else {
+		total_prefill_bw = prefill_bw *
+			mdss_mdp_get_vbp_factor_max(ctl);
+	}
+
+exit:
+	return total_prefill_bw;
+}
+
+u64 mdss_mdp_perf_calc_simplified_prefill(struct mdss_mdp_pipe *pipe,
+	u32 v_total, u32 fps, struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct simplified_prefill_factors *pfactors =
+			&mdata->prefill_data.prefill_factors;
+	u64 prefill_per_pipe = 0;
+	u32 prefill_lines = pfactors->xtra_ff_factor;
+
+
+	/* do not calculate prefill for command mode */
+	if (!ctl->is_video_mode)
+		goto exit;
+
+	prefill_per_pipe = pipe->src.w * pipe->src_fmt->bpp;
+
+	/* format factors */
+	if (mdss_mdp_is_tile_format(pipe->src_fmt)) {
+		if (mdss_mdp_is_nv12_format(pipe->src_fmt))
+			prefill_lines += pfactors->fmt_mt_nv12_factor;
+		else
+			prefill_lines += pfactors->fmt_mt_factor;
+	} else {
+		prefill_lines += pfactors->fmt_linear_factor;
+	}
+
+	/* scaling factors */
+	if (pipe->src.h > pipe->dst.h) {
+		prefill_lines += pfactors->scale_factor;
+
+		prefill_per_pipe = fudge_factor(prefill_per_pipe,
+			DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci),
+			pipe->dst.h);
+	}
+
+	prefill_per_pipe *= prefill_lines;
+	prefill_per_pipe = mdss_mdp_apply_prefill_factor(prefill_per_pipe,
+		ctl, pipe);
+
+	pr_debug("pipe src: %dx%d bpp:%d\n",
+		pipe->src.w, pipe->src.h, pipe->src_fmt->bpp);
+	pr_debug("ff_factor:%d mt_nv12:%d mt:%d\n",
+		pfactors->xtra_ff_factor,
+		(mdss_mdp_is_tile_format(pipe->src_fmt) &&
+		mdss_mdp_is_nv12_format(pipe->src_fmt)) ?
+		pfactors->fmt_mt_nv12_factor : 0,
+		mdss_mdp_is_tile_format(pipe->src_fmt) ?
+		pfactors->fmt_mt_factor : 0);
+	pr_debug("pipe prefill:%llu lines:%d\n",
+		prefill_per_pipe, prefill_lines);
+
+exit:
+	return prefill_per_pipe;
+}
+
+/**
+ * mdss_mdp_perf_calc_pipe() - calculate performance numbers required by pipe
+ * @pipe:	Source pipe struct containing updated pipe params
+ * @perf:	Structure containing values that should be updated for
+ *		performance tuning
+ * @flags: flags to determine how to perform some of the
+ *		calculations, supported flags:
+ *
+ *	PERF_CALC_PIPE_APPLY_CLK_FUDGE:
+ *		Determine if mdp clock fudge is applicable.
+ *	PERF_CALC_PIPE_SINGLE_LAYER:
+ *		Indicate if the calculation is for a single pipe staged
+ *		in the layer mixer
+ *	PERF_CALC_PIPE_CALC_SMP_SIZE:
+ *		Indicate if the smp size needs to be calculated, this is
+ *		for the cases where SMP haven't been allocated yet, so we need
+ *		to estimate here the smp size (i.e. PREPARE IOCTL).
+ *
+ * Function calculates the minimum required performance calculations in order
+ * to avoid MDP underflow. The calculations are based on the way MDP
+ * fetches (bandwidth requirement) and processes data through MDP pipeline
+ * (MDP clock requirement) based on frame size and scaling requirements.
+ */
+
+int mdss_mdp_perf_calc_pipe(struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_perf_params *perf, struct mdss_rect *roi,
+	u32 flags)
+{
+	struct mdss_mdp_mixer *mixer;
+	int fps = DEFAULT_FRAME_RATE;
+	u32 v_total = 0, src_h, xres = 0, h_total = 0;
+	struct mdss_rect src, dst;
+	bool is_fbc = false;
+	struct mdss_mdp_prefill_params prefill_params;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	bool calc_smp_size = false;
+
+	if (!pipe || !perf || !pipe->mixer_left)
+		return -EINVAL;
+
+	mixer = pipe->mixer_left;
+
+	dst = pipe->dst;
+	src = pipe->src;
+
+	/*
+	 * when doing vertical decimation lines will be skipped, hence there is
+	 * no need to account for these lines in MDP clock or request bus
+	 * bandwidth to fetch them.
+	 */
+	src_h = DECIMATED_DIMENSION(src.h, pipe->vert_deci);
+
+	if (mdss_mdp_get_panel_params(pipe, mixer, &fps, &v_total,
+			&h_total, &xres)) {
+		pr_err(" error retreiving the panel params!\n");
+		return -EINVAL;
+	}
+
+	if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
+		if (!mixer->ctl)
+			return -EINVAL;
+		is_fbc = mixer->ctl->panel_data->panel_info.fbc.enabled;
+	}
+
+	mixer->ctl->frame_rate = fps;
+
+	/* crop rectangles */
+	if (roi && !mixer->ctl->is_video_mode && !pipe->src_split_req)
+		mdss_mdp_crop_rect(&src, &dst, roi);
+
+	pr_debug("v_total=%d, xres=%d fps=%d\n", v_total, xres, fps);
+	pr_debug("src(w,h)(%d,%d) dst(w,h)(%d,%d) dst_y=%d bpp=%d yuv=%d\n",
+		 pipe->src.w, src_h, pipe->dst.w, pipe->dst.h, pipe->dst.y,
+		 pipe->src_fmt->bpp, pipe->src_fmt->is_yuv);
+
+	if (mdss_mdp_get_pipe_overlap_bw(pipe, roi, &perf->bw_overlap,
+			&perf->bw_overlap_nocr, flags))
+		pr_err("failure calculating overlap bw!\n");
+
+	perf->mdp_clk_rate = get_pipe_mdp_clk_rate(pipe, src, dst,
+		fps, v_total, flags);
+
+	pr_debug("bw:%llu bw_nocr:%llu clk:%d\n", perf->bw_overlap,
+		perf->bw_overlap_nocr, perf->mdp_clk_rate);
+
+	if (pipe->flags & MDP_SOLID_FILL)
+		perf->bw_overlap = 0;
+
+	if (mixer->ctl->intf_num == MDSS_MDP_NO_INTF ||
+		mdata->disable_prefill ||
+		mixer->ctl->disable_prefill ||
+		(pipe->flags & MDP_SOLID_FILL)) {
+		perf->prefill_bytes = 0;
+		perf->bw_prefill = 0;
+		goto exit;
+	}
+
+	if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map)) {
+		perf->bw_prefill = mdss_mdp_perf_calc_simplified_prefill(pipe,
+			v_total, fps, mixer->ctl);
+		goto exit;
+	}
+
+	calc_smp_size = (flags & PERF_CALC_PIPE_CALC_SMP_SIZE) ? true : false;
+	prefill_params.smp_bytes = mdss_mdp_perf_calc_smp_size(pipe,
+			calc_smp_size);
+	prefill_params.xres = xres;
+	prefill_params.src_w = src.w;
+	prefill_params.src_h = src_h;
+	prefill_params.dst_w = dst.w;
+	prefill_params.dst_h = dst.h;
+	prefill_params.dst_y = dst.y;
+	prefill_params.bpp = pipe->src_fmt->bpp;
+	prefill_params.is_yuv = pipe->src_fmt->is_yuv;
+	prefill_params.is_caf = mdss_mdp_perf_is_caf(pipe);
+	prefill_params.is_fbc = is_fbc;
+	prefill_params.is_bwc = pipe->bwc_mode;
+	prefill_params.is_tile = mdss_mdp_is_tile_format(pipe->src_fmt);
+	prefill_params.is_hflip = pipe->flags & MDP_FLIP_LR;
+	prefill_params.is_cmd = !mixer->ctl->is_video_mode;
+	prefill_params.pnum = pipe->num;
+	prefill_params.is_ubwc = mdss_mdp_is_ubwc_format(pipe->src_fmt);
+	prefill_params.is_nv12 = mdss_mdp_is_nv12_format(pipe->src_fmt);
+
+	mdss_mdp_get_bw_vote_mode(mixer, mdata->mdp_rev, perf,
+		PERF_CALC_VOTE_MODE_PER_PIPE, flags);
+
+	if (flags & PERF_CALC_PIPE_SINGLE_LAYER)
+		perf->prefill_bytes =
+			mdss_mdp_perf_calc_pipe_prefill_single(&prefill_params);
+	else if (!prefill_params.is_cmd)
+		perf->prefill_bytes =
+			mdss_mdp_perf_calc_pipe_prefill_video(&prefill_params);
+	else
+		perf->prefill_bytes =
+			mdss_mdp_perf_calc_pipe_prefill_cmd(&prefill_params);
+
+exit:
+	pr_debug("mixer=%d pnum=%d clk_rate=%u bw_overlap=%llu bw_prefill=%llu (%d) %s\n",
+		 mixer->num, pipe->num, perf->mdp_clk_rate, perf->bw_overlap,
+		 perf->bw_prefill, perf->prefill_bytes, mdata->disable_prefill ?
+		 "prefill is disabled" : "");
+
+	return 0;
+}
+
+static inline int mdss_mdp_perf_is_overlap(u32 y00, u32 y01, u32 y10, u32 y11)
+{
+	return (y10 < y00 && y11 >= y01) || (y10 >= y00 && y10 < y01);
+}
+
+static inline int cmpu32(const void *a, const void *b)
+{
+	return (*(u32 *)a < *(u32 *)b) ? -1 : 0;
+}
+
+static void mdss_mdp_perf_calc_mixer(struct mdss_mdp_mixer *mixer,
+		struct mdss_mdp_perf_params *perf,
+		struct mdss_mdp_pipe **pipe_list, int num_pipes,
+		u32 flags)
+{
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_panel_info *pinfo = NULL;
+	int fps = DEFAULT_FRAME_RATE;
+	u32 v_total = 0, bpp = MDSS_MDP_WB_OUTPUT_BPP;
+	int i;
+	u32 max_clk_rate = 0;
+	u64 bw_overlap_max = 0;
+	u64 bw_overlap[MAX_PIPES_PER_LM] = { 0 };
+	u64 bw_overlap_async = 0;
+	u32 v_region[MAX_PIPES_PER_LM * 2] = { 0 };
+	u32 prefill_val = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	bool apply_fudge = true;
+	struct mdss_mdp_format_params *fmt = NULL;
+
+	WARN_ON(num_pipes > MAX_PIPES_PER_LM);
+
+	memset(perf, 0, sizeof(*perf));
+
+	if (!mixer->rotator_mode) {
+		pinfo = &mixer->ctl->panel_data->panel_info;
+		if (!pinfo) {
+			pr_err("pinfo is NULL\n");
+			goto exit;
+		}
+
+		if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
+			if (pinfo->type == MIPI_VIDEO_PANEL) {
+				fps = pinfo->panel_max_fps;
+				v_total = pinfo->panel_max_vtotal;
+			} else {
+				fps = mdss_panel_get_framerate(pinfo,
+						FPS_RESOLUTION_HZ);
+				v_total = mdss_panel_get_vtotal(pinfo);
+			}
+		} else {
+			v_total = mixer->height;
+		}
+
+		/* For writeback panel, mixer type can be other than intf */
+		if (pinfo->type == WRITEBACK_PANEL) {
+			fmt = mdss_mdp_get_format_params(
+				mixer->ctl->dst_format);
+			if (fmt)
+				bpp = fmt->bpp;
+			pinfo = NULL;
+		}
+
+		perf->mdp_clk_rate = mixer->width * v_total * fps;
+		perf->mdp_clk_rate =
+			mdss_mdp_clk_fudge_factor(mixer, perf->mdp_clk_rate);
+
+		if (!pinfo) { /* perf for bus writeback */
+			perf->bw_writeback =
+				fps * mixer->width * mixer->height * bpp;
+
+			if (test_bit(MDSS_QOS_OVERHEAD_FACTOR,
+					mdata->mdss_qos_map))
+				perf->bw_writeback = apply_comp_ratio_factor(
+						perf->bw_writeback, fmt,
+						&mixer->ctl->dst_comp_ratio);
+
+		} else if (pinfo->type == MIPI_CMD_PANEL) {
+			u32 dsi_transfer_rate = mixer->width * v_total;
+
+			/* adjust transfer time from micro seconds */
+			dsi_transfer_rate = mult_frac(dsi_transfer_rate,
+				1000000, pinfo->mdp_transfer_time_us);
+
+			if (dsi_transfer_rate > perf->mdp_clk_rate)
+				perf->mdp_clk_rate = dsi_transfer_rate;
+		}
+
+		if (is_dsc_compression(pinfo) &&
+		    mixer->ctl->opmode & MDSS_MDP_CTL_OP_PACK_3D_ENABLE)
+			perf->mdp_clk_rate *= 2;
+	}
+
+	/*
+	 * In case of border color, we still need enough mdp clock
+	 * to avoid under-run. Clock requirement for border color is
+	 * based on mixer width.
+	 */
+	if (num_pipes == 0)
+		goto exit;
+
+	memset(bw_overlap, 0, sizeof(u64) * MAX_PIPES_PER_LM);
+	memset(v_region, 0, sizeof(u32) * MAX_PIPES_PER_LM * 2);
+
+	/*
+	 * Apply this logic only for 8x26 to reduce clock rate
+	 * for single video playback use case
+	 */
+	if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_101)
+		 && mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
+		u32 npipes = 0;
+
+		for (i = 0; i < num_pipes; i++) {
+			pipe = pipe_list[i];
+			if (pipe) {
+				if (npipes) {
+					apply_fudge = true;
+					break;
+				}
+				npipes++;
+				apply_fudge = !(pipe->src_fmt->is_yuv)
+					|| !(pipe->flags
+					& MDP_SOURCE_ROTATED_90);
+			}
+		}
+	}
+
+	if (apply_fudge)
+		flags |= PERF_CALC_PIPE_APPLY_CLK_FUDGE;
+	if (num_pipes == 1)
+		flags |= PERF_CALC_PIPE_SINGLE_LAYER;
+
+	for (i = 0; i < num_pipes; i++) {
+		struct mdss_mdp_perf_params tmp;
+
+		memset(&tmp, 0, sizeof(tmp));
+
+		pipe = pipe_list[i];
+		if (pipe == NULL)
+			continue;
+
+		/*
+		 * if is pipe used across two LMs in source split configuration
+		 * then it is staged on both LMs. In such cases skip BW calc
+		 * for such pipe on right LM to prevent adding BW twice.
+		 */
+		if (pipe->src_split_req && mixer->is_right_mixer)
+			continue;
+
+		if (mdss_mdp_perf_calc_pipe(pipe, &tmp, &mixer->roi,
+			flags))
+			continue;
+
+		if (!mdss_mdp_is_nrt_ctl_path(mixer->ctl)) {
+			u64 per_pipe_ib =
+			    test_bit(MDSS_QOS_IB_NOCR, mdata->mdss_qos_map) ?
+			    tmp.bw_overlap_nocr : tmp.bw_overlap;
+
+			perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
+			    per_pipe_ib);
+		}
+
+		bitmap_or(perf->bw_vote_mode, perf->bw_vote_mode,
+			tmp.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
+
+		/*
+		 * for async layers, the overlap calculation is skipped
+		 * and the bandwidth is added at the end, accounting for
+		 * worst case, that async layer might overlap with
+		 * all the other layers.
+		 */
+		if (pipe->async_update) {
+			bw_overlap[i] = 0;
+			v_region[2*i] = 0;
+			v_region[2*i + 1] = 0;
+			bw_overlap_async += tmp.bw_overlap;
+		} else {
+			bw_overlap[i] = tmp.bw_overlap;
+			v_region[2*i] = pipe->dst.y;
+			v_region[2*i + 1] = pipe->dst.y + pipe->dst.h;
+		}
+
+		if (tmp.mdp_clk_rate > max_clk_rate)
+			max_clk_rate = tmp.mdp_clk_rate;
+
+		if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map))
+			prefill_val += tmp.bw_prefill;
+		else
+			prefill_val += tmp.prefill_bytes;
+	}
+
+	/*
+	 * Sort the v_region array so the total display area can be
+	 * divided into individual regions. Check how many pipes fetch
+	 * data for each region and sum them up, then the worst case
+	 * of all regions is ib request.
+	 */
+	sort(v_region, num_pipes * 2, sizeof(u32), cmpu32, NULL);
+	for (i = 1; i < num_pipes * 2; i++) {
+		int j;
+		u64 bw_max_region = 0;
+		u32 y0, y1;
+
+		pr_debug("v_region[%d]%d\n", i, v_region[i]);
+		if (v_region[i] == v_region[i-1])
+			continue;
+		y0 = v_region[i-1];
+		y1 = v_region[i];
+		for (j = 0; j < num_pipes; j++) {
+			if (!bw_overlap[j])
+				continue;
+			pipe = pipe_list[j];
+			if (mdss_mdp_perf_is_overlap(y0, y1, pipe->dst.y,
+				(pipe->dst.y + pipe->dst.h)))
+				bw_max_region += bw_overlap[j];
+			pr_debug("pipe%d rect%d: v[%d](%d,%d)pipe[%d](%d,%d)bw(%llu %llu)\n",
+				pipe->num, pipe->multirect.num,
+				i, y0, y1, j, pipe->dst.y,
+				pipe->dst.y + pipe->dst.h, bw_overlap[j],
+				bw_max_region);
+		}
+		bw_overlap_max = max(bw_overlap_max, bw_max_region);
+	}
+
+	perf->bw_overlap += bw_overlap_max + bw_overlap_async;
+
+	if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map))
+		perf->bw_prefill += prefill_val;
+	else
+		perf->prefill_bytes += prefill_val;
+
+	if (max_clk_rate > perf->mdp_clk_rate)
+		perf->mdp_clk_rate = max_clk_rate;
+
+exit:
+	pr_debug("final mixer=%d video=%d clk_rate=%u bw=%llu prefill=%d mode=0x%lx\n",
+		mixer->num, mixer->ctl->is_video_mode, perf->mdp_clk_rate,
+		perf->bw_overlap, prefill_val,
+		*(perf->bw_vote_mode));
+}
+
+static bool is_mdp_prefetch_needed(struct mdss_panel_info *pinfo)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	bool enable_prefetch = false;
+
+	if (mdata->mdp_rev >= MDSS_MDP_HW_REV_105) {
+		if ((pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width +
+			pinfo->lcdc.v_front_porch) < mdata->min_prefill_lines)
+			pr_warn_once("low vbp+vfp may lead to perf issues in some cases\n");
+
+		enable_prefetch = true;
+
+		if ((pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width) >=
+				MDSS_MDP_MAX_PREFILL_FETCH)
+			enable_prefetch = false;
+	} else {
+		if ((pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width) <
+				mdata->min_prefill_lines)
+			pr_warn_once("low vbp may lead to display performance issues");
+	}
+
+	return enable_prefetch;
+}
+
+/**
+ * mdss_mdp_get_prefetch_lines: - Number of fetch lines in vertical front porch
+ * @pinfo: Pointer to the panel information.
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * In some cases, vertical front porch is too high. In such cases limit
+ * the mdp fetch lines  as the last (25 - vbp - vpw) lines of vertical
+ * front porch.
+ */
+int mdss_mdp_get_prefetch_lines(struct mdss_panel_info *pinfo)
+{
+	int prefetch_avail = 0;
+	int v_total, vfp_start;
+	u32 prefetch_needed;
+
+	if (!is_mdp_prefetch_needed(pinfo))
+		return 0;
+
+	v_total = mdss_panel_get_vtotal(pinfo);
+	vfp_start = (pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width +
+			pinfo->yres);
+
+	prefetch_avail = v_total - vfp_start;
+	prefetch_needed = MDSS_MDP_MAX_PREFILL_FETCH -
+		pinfo->lcdc.v_back_porch -
+		pinfo->lcdc.v_pulse_width;
+
+	if (prefetch_avail > prefetch_needed)
+		prefetch_avail = prefetch_needed;
+
+	return prefetch_avail;
+}
+
+static bool mdss_mdp_video_mode_intf_connected(struct mdss_mdp_ctl *ctl)
+{
+	int i;
+	struct mdss_data_type *mdata;
+
+	if (!ctl || !ctl->mdata)
+		return 0;
+
+	mdata = ctl->mdata;
+	for (i = 0; i < mdata->nctl; i++) {
+		struct mdss_mdp_ctl *ctl = mdata->ctl_off + i;
+
+		if (ctl->is_video_mode && mdss_mdp_ctl_is_power_on(ctl)) {
+			pr_debug("video interface connected ctl:%d\n",
+				ctl->num);
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static void __mdss_mdp_perf_calc_ctl_helper(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_perf_params *perf,
+		struct mdss_mdp_pipe **left_plist, int left_cnt,
+		struct mdss_mdp_pipe **right_plist, int right_cnt,
+		u32 flags)
+{
+	struct mdss_mdp_perf_params tmp;
+	struct mdss_data_type *mdata = ctl->mdata;
+
+	memset(perf, 0, sizeof(*perf));
+
+	if (ctl->mixer_left) {
+		mdss_mdp_perf_calc_mixer(ctl->mixer_left, &tmp,
+				left_plist, left_cnt, flags);
+
+		bitmap_or(perf->bw_vote_mode, perf->bw_vote_mode,
+			tmp.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
+
+		perf->max_per_pipe_ib = tmp.max_per_pipe_ib;
+		perf->bw_overlap += tmp.bw_overlap;
+		perf->mdp_clk_rate = tmp.mdp_clk_rate;
+		perf->bw_writeback += tmp.bw_writeback;
+
+		if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map))
+			perf->bw_prefill += tmp.bw_prefill;
+		else
+			perf->prefill_bytes += tmp.prefill_bytes;
+	}
+
+	if (ctl->mixer_right) {
+		mdss_mdp_perf_calc_mixer(ctl->mixer_right, &tmp,
+				right_plist, right_cnt, flags);
+
+		bitmap_or(perf->bw_vote_mode, perf->bw_vote_mode,
+			tmp.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
+
+		perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
+			tmp.max_per_pipe_ib);
+		perf->bw_overlap += tmp.bw_overlap;
+		perf->bw_writeback += tmp.bw_writeback;
+		if (tmp.mdp_clk_rate > perf->mdp_clk_rate)
+			perf->mdp_clk_rate = tmp.mdp_clk_rate;
+
+		if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map))
+			perf->bw_prefill += tmp.bw_prefill;
+		else
+			perf->prefill_bytes += tmp.prefill_bytes;
+
+		if (ctl->intf_type) {
+			u64 clk_rate = mdss_mdp_get_pclk_rate(ctl);
+			/* minimum clock rate due to inefficiency in 3dmux */
+			clk_rate = DIV_ROUND_UP_ULL((clk_rate >> 1) * 9, 8);
+			if (clk_rate > perf->mdp_clk_rate)
+				perf->mdp_clk_rate = clk_rate;
+		}
+	}
+
+	/* request minimum bandwidth to have bus clock on when display is on */
+	if (perf->bw_overlap == 0)
+		perf->bw_overlap = SZ_16M;
+
+	if (!test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map) &&
+		(ctl->intf_type != MDSS_MDP_NO_INTF)) {
+		u32 vbp_fac = mdss_mdp_get_vbp_factor_max(ctl);
+
+		perf->bw_prefill = perf->prefill_bytes;
+		/*
+		 * Prefill bandwidth equals the amount of data (number
+		 * of prefill_bytes) divided by the the amount time
+		 * available (blanking period). It is equivalent that
+		 * prefill bytes times a factor in unit Hz, which is
+		 * the reciprocal of time.
+		 */
+		perf->bw_prefill *= vbp_fac;
+	}
+
+	perf->bw_ctl = max(perf->bw_prefill, perf->bw_overlap);
+	pr_debug("ctl=%d prefill bw=%llu overlap bw=%llu mode=0x%lx writeback:%llu\n",
+			ctl->num, perf->bw_prefill, perf->bw_overlap,
+			*(perf->bw_vote_mode), perf->bw_writeback);
+}
+
+static u32 mdss_check_for_flip(struct mdss_mdp_ctl *ctl)
+{
+	u32 i, panel_orientation;
+	struct mdss_mdp_pipe *pipe;
+	u32 flags = 0;
+
+	panel_orientation = ctl->mfd->panel_orientation;
+	if (panel_orientation & MDP_FLIP_LR)
+		flags |= MDSS_MAX_BW_LIMIT_HFLIP;
+	if (panel_orientation & MDP_FLIP_UD)
+		flags |= MDSS_MAX_BW_LIMIT_VFLIP;
+
+	for (i = 0; i < MAX_PIPES_PER_LM; i++) {
+		if ((flags & MDSS_MAX_BW_LIMIT_HFLIP) &&
+				(flags & MDSS_MAX_BW_LIMIT_VFLIP))
+			return flags;
+
+		if (ctl->mixer_left && ctl->mixer_left->stage_pipe[i]) {
+			pipe = ctl->mixer_left->stage_pipe[i];
+			if (pipe->flags & MDP_FLIP_LR)
+				flags |= MDSS_MAX_BW_LIMIT_HFLIP;
+			if (pipe->flags & MDP_FLIP_UD)
+				flags |= MDSS_MAX_BW_LIMIT_VFLIP;
+		}
+
+		if (ctl->mixer_right && ctl->mixer_right->stage_pipe[i]) {
+			pipe = ctl->mixer_right->stage_pipe[i];
+			if (pipe->flags & MDP_FLIP_LR)
+				flags |= MDSS_MAX_BW_LIMIT_HFLIP;
+			if (pipe->flags & MDP_FLIP_UD)
+				flags |= MDSS_MAX_BW_LIMIT_VFLIP;
+		}
+	}
+
+	return flags;
+}
+
+static int mdss_mdp_set_threshold_max_bandwidth(struct mdss_mdp_ctl *ctl)
+{
+	u32 mode, threshold = 0, max = INT_MAX;
+	u32 i = 0;
+	struct mdss_max_bw_settings *max_bw_settings =
+		ctl->mdata->max_bw_settings;
+
+	if (!ctl->mdata->max_bw_settings_cnt && !ctl->mdata->max_bw_settings)
+		return 0;
+
+	mode = ctl->mdata->bw_mode_bitmap;
+
+	if (!((mode & MDSS_MAX_BW_LIMIT_HFLIP) &&
+				(mode & MDSS_MAX_BW_LIMIT_VFLIP)))
+		mode |= mdss_check_for_flip(ctl);
+
+	pr_debug("final mode = %d, bw_mode_bitmap = %d\n", mode,
+			ctl->mdata->bw_mode_bitmap);
+
+	/* Return minimum bandwidth limit */
+	for (i = 0; i < ctl->mdata->max_bw_settings_cnt; i++) {
+		if (max_bw_settings[i].mdss_max_bw_mode & mode) {
+			threshold = max_bw_settings[i].mdss_max_bw_val;
+			if (threshold < max)
+				max = threshold;
+		}
+	}
+
+	return max;
+}
+
+int mdss_mdp_perf_bw_check(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_pipe **left_plist, int left_cnt,
+		struct mdss_mdp_pipe **right_plist, int right_cnt)
+{
+	struct mdss_data_type *mdata = ctl->mdata;
+	struct mdss_mdp_perf_params perf;
+	u32 bw, threshold, i, mode_switch, max_bw;
+	u64 bw_sum_of_intfs = 0;
+	bool is_video_mode;
+
+	/* we only need bandwidth check on real-time clients (interfaces) */
+	if (ctl->intf_type == MDSS_MDP_NO_INTF)
+		return 0;
+
+	__mdss_mdp_perf_calc_ctl_helper(ctl, &perf,
+			left_plist, left_cnt, right_plist, right_cnt,
+			PERF_CALC_PIPE_CALC_SMP_SIZE);
+	ctl->bw_pending = perf.bw_ctl;
+
+	for (i = 0; i < mdata->nctl; i++) {
+		struct mdss_mdp_ctl *temp = mdata->ctl_off + i;
+
+		if (temp->power_state == MDSS_PANEL_POWER_ON  &&
+				(temp->intf_type != MDSS_MDP_NO_INTF))
+			bw_sum_of_intfs += temp->bw_pending;
+	}
+
+	/* convert bandwidth to kb */
+	bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+	pr_debug("calculated bandwidth=%uk\n", bw);
+
+	/* mfd validation happens in func */
+	mode_switch = mdss_fb_get_mode_switch(ctl->mfd);
+	if (mode_switch)
+		is_video_mode = (mode_switch == MIPI_VIDEO_PANEL);
+	else
+		is_video_mode = ctl->is_video_mode;
+	threshold = (is_video_mode ||
+		mdss_mdp_video_mode_intf_connected(ctl)) ?
+		mdata->max_bw_low : mdata->max_bw_high;
+
+	max_bw = mdss_mdp_set_threshold_max_bandwidth(ctl);
+
+	if (max_bw && (max_bw < threshold))
+		threshold = max_bw;
+
+	pr_debug("final threshold bw limit = %d\n", threshold);
+
+	if (bw > threshold) {
+		ctl->bw_pending = 0;
+		pr_debug("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
+		return -E2BIG;
+	}
+
+	return 0;
+}
+
+static u32 mdss_mdp_get_max_pipe_bw(struct mdss_mdp_pipe *pipe)
+{
+
+	struct mdss_mdp_ctl *ctl = pipe->mixer_left->ctl;
+	struct mdss_max_bw_settings *max_per_pipe_bw_settings;
+	u32 flags = 0, threshold = 0, panel_orientation;
+	u32 i, max = INT_MAX;
+
+	if (!ctl->mdata->mdss_per_pipe_bw_cnt
+			&& !ctl->mdata->max_per_pipe_bw_settings)
+		return 0;
+
+	panel_orientation = ctl->mfd->panel_orientation;
+	max_per_pipe_bw_settings = ctl->mdata->max_per_pipe_bw_settings;
+
+	/* Check for panel orienatation */
+	panel_orientation = ctl->mfd->panel_orientation;
+	if (panel_orientation & MDP_FLIP_LR)
+		flags |= MDSS_MAX_BW_LIMIT_HFLIP;
+	if (panel_orientation & MDP_FLIP_UD)
+		flags |= MDSS_MAX_BW_LIMIT_VFLIP;
+
+	/* check for Hflip/Vflip in pipe */
+	if (pipe->flags & MDP_FLIP_LR)
+		flags |= MDSS_MAX_BW_LIMIT_HFLIP;
+	if (pipe->flags & MDP_FLIP_UD)
+		flags |= MDSS_MAX_BW_LIMIT_VFLIP;
+
+	flags |= ctl->mdata->bw_mode_bitmap;
+
+	for (i = 0; i < ctl->mdata->mdss_per_pipe_bw_cnt; i++) {
+		if (max_per_pipe_bw_settings[i].mdss_max_bw_mode & flags) {
+			threshold = max_per_pipe_bw_settings[i].mdss_max_bw_val;
+			if (threshold < max)
+				max = threshold;
+		}
+	}
+
+	return max;
+}
+
+int mdss_mdp_perf_bw_check_pipe(struct mdss_mdp_perf_params *perf,
+		struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_data_type *mdata = pipe->mixer_left->ctl->mdata;
+	struct mdss_mdp_ctl *ctl = pipe->mixer_left->ctl;
+	u32 vbp_fac = 0, threshold = 0;
+	u64 prefill_bw, pipe_bw, max_pipe_bw;
+
+	/* we only need bandwidth check on real-time clients (interfaces) */
+	if (ctl->intf_type == MDSS_MDP_NO_INTF)
+		return 0;
+
+	if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map)) {
+		prefill_bw = perf->bw_prefill;
+	} else {
+		vbp_fac = mdss_mdp_get_vbp_factor_max(ctl);
+		prefill_bw = perf->prefill_bytes * vbp_fac;
+	}
+	pipe_bw = max(prefill_bw, perf->bw_overlap);
+	pr_debug("prefill=%llu, vbp_fac=%u, overlap=%llu\n",
+			prefill_bw, vbp_fac, perf->bw_overlap);
+
+	/* convert bandwidth to kb */
+	pipe_bw = DIV_ROUND_UP_ULL(pipe_bw, 1000);
+
+	threshold = mdata->max_bw_per_pipe;
+	max_pipe_bw = mdss_mdp_get_max_pipe_bw(pipe);
+
+	if (max_pipe_bw && (max_pipe_bw < threshold))
+		threshold = max_pipe_bw;
+
+	pr_debug("bw=%llu threshold=%u\n", pipe_bw, threshold);
+
+	if (threshold && pipe_bw > threshold) {
+		pr_debug("pipe exceeds bandwidth: %llukb > %ukb\n", pipe_bw,
+				threshold);
+		return -E2BIG;
+	}
+
+	return 0;
+}
+
+static void mdss_mdp_perf_calc_ctl(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_perf_params *perf)
+{
+	struct mdss_mdp_pipe *left_plist[MAX_PIPES_PER_LM];
+	struct mdss_mdp_pipe *right_plist[MAX_PIPES_PER_LM];
+	int i, left_cnt = 0, right_cnt = 0;
+
+	for (i = 0; i < MAX_PIPES_PER_LM; i++) {
+		if (ctl->mixer_left && ctl->mixer_left->stage_pipe[i]) {
+			left_plist[left_cnt] =
+					ctl->mixer_left->stage_pipe[i];
+			left_cnt++;
+		}
+
+		if (ctl->mixer_right && ctl->mixer_right->stage_pipe[i]) {
+			right_plist[right_cnt] =
+					ctl->mixer_right->stage_pipe[i];
+			right_cnt++;
+		}
+	}
+
+	__mdss_mdp_perf_calc_ctl_helper(ctl, perf,
+		left_plist, left_cnt, right_plist, right_cnt, 0);
+
+	if (ctl->is_video_mode || ((ctl->intf_type != MDSS_MDP_NO_INTF) &&
+		mdss_mdp_video_mode_intf_connected(ctl))) {
+		perf->bw_ctl =
+			max(apply_fudge_factor(perf->bw_overlap,
+				&mdss_res->ib_factor_overlap),
+			apply_fudge_factor(perf->bw_prefill,
+				&mdss_res->ib_factor));
+		perf->bw_writeback = apply_fudge_factor(perf->bw_writeback,
+				&mdss_res->ib_factor);
+	}
+	pr_debug("ctl=%d clk_rate=%u\n", ctl->num, perf->mdp_clk_rate);
+	pr_debug("bw_overlap=%llu bw_prefill=%llu prefill_bytes=%d\n",
+		 perf->bw_overlap, perf->bw_prefill, perf->prefill_bytes);
+}
+
+static void set_status(u32 *value, bool status, u32 bit_num)
+{
+	if (status)
+		*value |= BIT(bit_num);
+	else
+		*value &= ~BIT(bit_num);
+}
+
+/**
+ * @ mdss_mdp_ctl_perf_set_transaction_status() -
+ *                             Set the status of the on-going operations
+ *                             for the command mode panels.
+ * @ctl - pointer to a ctl
+ *
+ * This function is called to set the status bit in the perf_transaction_status
+ * according to the operation that it is on-going for the command mode
+ * panels, where:
+ *
+ * PERF_SW_COMMIT_STATE:
+ *           1 - If SW operation has been committed and bw
+ *               has been requested (HW transaction have not started yet).
+ *           0 - If there is no SW operation pending
+ * PERF_HW_MDP_STATE:
+ *           1 - If HW transaction is on-going
+ *           0 - If there is no HW transaction on going (ping-pong interrupt
+ *               has finished)
+ * Only if both states are zero there are no pending operations and
+ * BW could be released.
+ * State can be queried calling "mdss_mdp_ctl_perf_get_transaction_status"
+ */
+void mdss_mdp_ctl_perf_set_transaction_status(struct mdss_mdp_ctl *ctl,
+	enum mdss_mdp_perf_state_type component, bool new_status)
+{
+	u32  previous_transaction;
+	bool previous_status;
+	unsigned long flags;
+
+	if (!ctl || !ctl->panel_data ||
+		(ctl->panel_data->panel_info.type != MIPI_CMD_PANEL))
+		return;
+
+	spin_lock_irqsave(&ctl->spin_lock, flags);
+
+	previous_transaction = ctl->perf_transaction_status;
+	previous_status = previous_transaction & BIT(component) ?
+		PERF_STATUS_BUSY : PERF_STATUS_DONE;
+
+	/*
+	 * If we set "done" state when previous state was not "busy",
+	 * we want to print a warning since maybe there is a state
+	 * that we are not considering
+	 */
+	WARN((new_status == PERF_STATUS_DONE) &&
+		(previous_status != PERF_STATUS_BUSY),
+		"unexpected previous state for component: %d\n", component);
+
+	set_status(&ctl->perf_transaction_status, new_status,
+		(u32)component);
+
+	pr_debug("ctl:%d component:%d previous:%d status:%d\n",
+		ctl->num, component, previous_transaction,
+		ctl->perf_transaction_status);
+	pr_debug("ctl:%d new_status:%d prev_status:%d\n",
+		ctl->num, new_status, previous_status);
+
+	spin_unlock_irqrestore(&ctl->spin_lock, flags);
+}
+
+/**
+ * @ mdss_mdp_ctl_perf_get_transaction_status() -
+ *                             Get the status of the on-going operations
+ *                             for the command mode panels.
+ * @ctl - pointer to a ctl
+ *
+ * Return:
+ * The status of the transactions for the command mode panels,
+ * note that the bandwidth can be released only if all transaction
+ * status bits are zero.
+ */
+u32 mdss_mdp_ctl_perf_get_transaction_status(struct mdss_mdp_ctl *ctl)
+{
+	unsigned long flags;
+	u32 transaction_status;
+
+	if (!ctl)
+		return PERF_STATUS_BUSY;
+
+	/*
+	 * If Rotator mode and bandwidth has been released; return STATUS_DONE
+	 * so the bandwidth is re-calculated.
+	 */
+	if (ctl->mixer_left && ctl->mixer_left->rotator_mode &&
+		!ctl->perf_release_ctl_bw)
+		return PERF_STATUS_DONE;
+
+	/*
+	 * If Video Mode or not valid data to determine the status, return busy
+	 * status, so the bandwidth cannot be freed by the caller
+	 */
+	if (!ctl || !ctl->panel_data ||
+		(ctl->panel_data->panel_info.type != MIPI_CMD_PANEL)) {
+		return PERF_STATUS_BUSY;
+	}
+
+	spin_lock_irqsave(&ctl->spin_lock, flags);
+	transaction_status = ctl->perf_transaction_status;
+	spin_unlock_irqrestore(&ctl->spin_lock, flags);
+	pr_debug("ctl:%d status:%d\n", ctl->num,
+		transaction_status);
+
+	return transaction_status;
+}
+
+/**
+ * @ mdss_mdp_ctl_perf_update_traffic_shaper_bw  -
+ *				Apply BW fudge factor to rotator
+ *				if mdp clock increased during
+ *				rotation session.
+ * @ctl - pointer to the controller
+ * @mdp_clk - new mdp clock
+ *
+ * If mdp clock increased and traffic shaper is enabled, we need to
+ * account for the additional bandwidth that will be requested by
+ * the rotator when running at a higher clock, so we apply a fudge
+ * factor proportional to the mdp clock increment.
+ */
+static void mdss_mdp_ctl_perf_update_traffic_shaper_bw(struct mdss_mdp_ctl *ctl,
+		u32 mdp_clk)
+{
+	if ((mdp_clk > 0) && (mdp_clk > ctl->traffic_shaper_mdp_clk)) {
+		ctl->cur_perf.bw_ctl = fudge_factor(ctl->cur_perf.bw_ctl,
+			mdp_clk, ctl->traffic_shaper_mdp_clk);
+		pr_debug("traffic shaper bw:%llu, clk: %d,  mdp_clk:%d\n",
+			ctl->cur_perf.bw_ctl, ctl->traffic_shaper_mdp_clk,
+				mdp_clk);
+	}
+}
+
+static u64 mdss_mdp_ctl_calc_client_vote(struct mdss_data_type *mdata,
+	struct mdss_mdp_perf_params *perf, bool nrt_client, u32 mdp_clk)
+{
+	u64 bw_sum_of_intfs = 0;
+	int i;
+	struct mdss_mdp_ctl *ctl;
+	struct mdss_mdp_mixer *mixer;
+	struct mdss_mdp_perf_params perf_temp;
+
+	bitmap_zero(perf_temp.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
+
+	for (i = 0; i < mdata->nctl; i++) {
+		ctl = mdata->ctl_off + i;
+		mixer = ctl->mixer_left;
+		if (mdss_mdp_ctl_is_power_on(ctl) &&
+		    /* RealTime clients */
+		    ((!nrt_client && ctl->mixer_left &&
+			!ctl->mixer_left->rotator_mode) ||
+		    /* Non-RealTime clients */
+		    (nrt_client && mdss_mdp_is_nrt_ctl_path(ctl)))) {
+			/* Skip rotation layers as bw calc by rot driver */
+			if (ctl->mixer_left && ctl->mixer_left->rotator_mode)
+				continue;
+			/*
+			 * If traffic shaper is enabled we must check
+			 * if additional bandwidth is required.
+			 */
+			if (ctl->traffic_shaper_enabled)
+				mdss_mdp_ctl_perf_update_traffic_shaper_bw
+					(ctl, mdp_clk);
+
+			mdss_mdp_get_bw_vote_mode(ctl, mdata->mdp_rev,
+				&perf_temp, PERF_CALC_VOTE_MODE_CTL, 0);
+
+			bitmap_or(perf_temp.bw_vote_mode,
+				perf_temp.bw_vote_mode,
+				ctl->cur_perf.bw_vote_mode,
+				MDSS_MDP_BW_MODE_MAX);
+
+			if (nrt_client && ctl->mixer_left &&
+				!ctl->mixer_left->rotator_mode) {
+				bw_sum_of_intfs += ctl->cur_perf.bw_writeback;
+				continue;
+			}
+
+			perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
+				ctl->cur_perf.max_per_pipe_ib);
+
+			bw_sum_of_intfs += ctl->cur_perf.bw_ctl;
+
+			pr_debug("ctl_num=%d bw=%llu mode=0x%lx\n", ctl->num,
+				ctl->cur_perf.bw_ctl,
+				*(ctl->cur_perf.bw_vote_mode));
+		}
+	}
+
+	return bw_sum_of_intfs;
+}
+
+static void mdss_mdp_ctl_update_client_vote(struct mdss_data_type *mdata,
+	struct mdss_mdp_perf_params *perf, bool nrt_client, u64 bw_vote)
+{
+	u64 bus_ab_quota, bus_ib_quota;
+
+	bus_ab_quota = max(bw_vote, mdata->perf_tune.min_bus_vote);
+
+	if (test_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map)) {
+		if (!nrt_client)
+			bus_ib_quota = perf->max_per_pipe_ib;
+		else
+			bus_ib_quota = 0;
+	} else {
+		bus_ib_quota = bw_vote;
+	}
+
+	if (test_bit(MDSS_MDP_BW_MODE_SINGLE_LAYER,
+		perf->bw_vote_mode) &&
+		(bus_ib_quota >= PERF_SINGLE_PIPE_BW_FLOOR)) {
+		struct mult_factor ib_factor_vscaling;
+
+		ib_factor_vscaling.numer = 2;
+		ib_factor_vscaling.denom = 1;
+		bus_ib_quota = apply_fudge_factor(bus_ib_quota,
+			&ib_factor_vscaling);
+	}
+
+	if (test_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map) &&
+			!nrt_client)
+		bus_ib_quota = apply_fudge_factor(bus_ib_quota,
+			&mdata->per_pipe_ib_factor);
+
+	bus_ab_quota = apply_fudge_factor(bus_ab_quota, &mdss_res->ab_factor);
+	ATRACE_INT("bus_quota", bus_ib_quota);
+
+	mdss_bus_scale_set_quota(nrt_client ? MDSS_MDP_NRT : MDSS_MDP_RT,
+		bus_ab_quota, bus_ib_quota);
+	pr_debug("client:%s ab=%llu ib=%llu\n", nrt_client ? "nrt" : "rt",
+		bus_ab_quota, bus_ib_quota);
+}
+
+static void mdss_mdp_ctl_perf_update_bus(struct mdss_data_type *mdata,
+	struct mdss_mdp_ctl *ctl, u32 mdp_clk)
+{
+	u64 bw_sum_of_rt_intfs = 0, bw_sum_of_nrt_intfs = 0;
+	struct mdss_mdp_perf_params perf = {0};
+
+	ATRACE_BEGIN(__func__);
+
+	/*
+	 * non-real time client
+	 * 1. rotator path
+	 * 2. writeback output path
+	 */
+	if (mdss_mdp_is_nrt_ctl_path(ctl)) {
+		bitmap_zero(perf.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
+		bw_sum_of_nrt_intfs = mdss_mdp_ctl_calc_client_vote(mdata,
+			&perf, true, mdp_clk);
+		mdss_mdp_ctl_update_client_vote(mdata, &perf, true,
+			bw_sum_of_nrt_intfs);
+	}
+
+	/*
+	 * real time client
+	 * 1. any realtime interface - primary or secondary interface
+	 * 2. writeback input path
+	 */
+	if (!mdss_mdp_is_nrt_ctl_path(ctl) ||
+		(ctl->intf_num ==  MDSS_MDP_NO_INTF)) {
+		bitmap_zero(perf.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
+		bw_sum_of_rt_intfs = mdss_mdp_ctl_calc_client_vote(mdata,
+			&perf, false, mdp_clk);
+		mdss_mdp_ctl_update_client_vote(mdata, &perf, false,
+			bw_sum_of_rt_intfs);
+	}
+
+	ATRACE_END(__func__);
+}
+
+/**
+ * @mdss_mdp_ctl_perf_release_bw() - request zero bandwidth
+ * @ctl - pointer to a ctl
+ *
+ * Function checks a state variable for the ctl, if all pending commit
+ * requests are done, meaning no more bandwidth is needed, release
+ * bandwidth request.
+ */
+void mdss_mdp_ctl_perf_release_bw(struct mdss_mdp_ctl *ctl)
+{
+	int transaction_status;
+	struct mdss_data_type *mdata;
+	int i;
+
+	/* only do this for command panel */
+	if (!ctl || !ctl->mdata || !ctl->panel_data ||
+		(ctl->panel_data->panel_info.type != MIPI_CMD_PANEL))
+		return;
+
+	mutex_lock(&mdss_mdp_ctl_lock);
+	mdata = ctl->mdata;
+	/*
+	 * If video interface present, cmd panel bandwidth cannot be
+	 * released.
+	 */
+	for (i = 0; i < mdata->nctl; i++) {
+		struct mdss_mdp_ctl *ctl_local = mdata->ctl_off + i;
+
+		if (mdss_mdp_ctl_is_power_on(ctl_local) &&
+			ctl_local->is_video_mode)
+			goto exit;
+	}
+
+	transaction_status = mdss_mdp_ctl_perf_get_transaction_status(ctl);
+	pr_debug("transaction_status=0x%x\n", transaction_status);
+
+	/*Release the bandwidth only if there are no transactions pending*/
+	if (!transaction_status && mdata->enable_bw_release) {
+		/*
+		 * for splitdisplay if release_bw is called using secondary
+		 * then find the main ctl and release BW for main ctl because
+		 * BW is always calculated/stored using main ctl.
+		 */
+		struct mdss_mdp_ctl *ctl_local =
+			mdss_mdp_get_main_ctl(ctl) ? : ctl;
+
+		trace_mdp_cmd_release_bw(ctl_local->num);
+		ctl_local->cur_perf.bw_ctl = 0;
+		ctl_local->new_perf.bw_ctl = 0;
+		pr_debug("Release BW ctl=%d\n", ctl_local->num);
+		mdss_mdp_ctl_perf_update_bus(mdata, ctl, 0);
+	}
+exit:
+	mutex_unlock(&mdss_mdp_ctl_lock);
+}
+
+static int mdss_mdp_select_clk_lvl(struct mdss_data_type *mdata,
+			u32 clk_rate)
+{
+	int i;
+
+	for (i = 0; i < mdata->nclk_lvl; i++) {
+		if (clk_rate > mdata->clock_levels[i]) {
+			continue;
+		} else {
+			clk_rate = mdata->clock_levels[i];
+			break;
+		}
+	}
+
+	return clk_rate;
+}
+
+static void mdss_mdp_perf_release_ctl_bw(struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_perf_params *perf)
+{
+	/* Set to zero controller bandwidth. */
+	memset(perf, 0, sizeof(*perf));
+	ctl->perf_release_ctl_bw = false;
+}
+
+u32 mdss_mdp_get_mdp_clk_rate(struct mdss_data_type *mdata)
+{
+	u32 clk_rate = 0;
+	uint i;
+	struct clk *clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
+
+	for (i = 0; i < mdata->nctl; i++) {
+		struct mdss_mdp_ctl *ctl;
+
+		ctl = mdata->ctl_off + i;
+		if (mdss_mdp_ctl_is_power_on(ctl)) {
+			clk_rate = max(ctl->cur_perf.mdp_clk_rate,
+							clk_rate);
+			clk_rate = clk_round_rate(clk, clk_rate);
+		}
+	}
+	clk_rate  = mdss_mdp_select_clk_lvl(mdata, clk_rate);
+
+	pr_debug("clk:%u nctl:%d\n", clk_rate, mdata->nctl);
+	return clk_rate;
+}
+
+static bool is_traffic_shaper_enabled(struct mdss_data_type *mdata)
+{
+	uint i;
+
+	for (i = 0; i < mdata->nctl; i++) {
+		struct mdss_mdp_ctl *ctl;
+
+		ctl = mdata->ctl_off + i;
+		if (mdss_mdp_ctl_is_power_on(ctl))
+			if (ctl->traffic_shaper_enabled)
+				return true;
+	}
+	return false;
+}
+
+static void mdss_mdp_ctl_perf_update(struct mdss_mdp_ctl *ctl,
+		int params_changed, bool stop_req)
+{
+	struct mdss_mdp_perf_params *new, *old;
+	int update_bus = 0, update_clk = 0;
+	struct mdss_data_type *mdata;
+	bool is_bw_released;
+	u32 clk_rate = 0;
+
+	if (!ctl || !ctl->mdata)
+		return;
+	ATRACE_BEGIN(__func__);
+	mutex_lock(&mdss_mdp_ctl_lock);
+
+	mdata = ctl->mdata;
+	old = &ctl->cur_perf;
+	new = &ctl->new_perf;
+
+	/*
+	 * We could have released the bandwidth if there were no transactions
+	 * pending, so we want to re-calculate the bandwidth in this situation.
+	 */
+	is_bw_released = !mdss_mdp_ctl_perf_get_transaction_status(ctl);
+
+	if (mdss_mdp_ctl_is_power_on(ctl)) {
+		/* Skip perf update if ctl is used for rotation */
+		if (ctl->mixer_left && ctl->mixer_left->rotator_mode)
+			goto end;
+
+		if (ctl->perf_release_ctl_bw &&
+			mdata->enable_rotator_bw_release)
+			mdss_mdp_perf_release_ctl_bw(ctl, new);
+		else if (is_bw_released || params_changed)
+			mdss_mdp_perf_calc_ctl(ctl, new);
+
+		/*
+		 * three cases for bus bandwidth update.
+		 * 1. new bandwidth vote or writeback output vote
+		 *    are higher than current vote for update request.
+		 * 2. new bandwidth vote or writeback output vote are
+		 *    lower than current vote at end of commit or stop.
+		 * 3. end of writeback/rotator session - last chance to
+		 *    non-realtime remove vote.
+		 */
+		if ((params_changed && ((new->bw_ctl > old->bw_ctl) ||
+			(new->bw_writeback > old->bw_writeback))) ||
+		    (!params_changed && ((new->bw_ctl < old->bw_ctl) ||
+			(new->bw_writeback < old->bw_writeback))) ||
+			(stop_req && mdss_mdp_is_nrt_ctl_path(ctl))) {
+
+			pr_debug("c=%d p=%d new_bw=%llu,old_bw=%llu\n",
+				ctl->num, params_changed, new->bw_ctl,
+				old->bw_ctl);
+			if (stop_req) {
+				old->bw_writeback = 0;
+				old->bw_ctl = 0;
+				old->max_per_pipe_ib = 0;
+			} else {
+				old->bw_ctl = new->bw_ctl;
+				old->max_per_pipe_ib = new->max_per_pipe_ib;
+				old->bw_writeback = new->bw_writeback;
+			}
+			bitmap_copy(old->bw_vote_mode, new->bw_vote_mode,
+				MDSS_MDP_BW_MODE_MAX);
+			update_bus = 1;
+		}
+
+		/*
+		 * If traffic shaper is enabled, we do not decrease the clock,
+		 * otherwise we would increase traffic shaper latency. Clock
+		 * would be decreased after traffic shaper is done.
+		 */
+		if ((params_changed && (new->mdp_clk_rate > old->mdp_clk_rate))
+			 || (!params_changed &&
+			 (new->mdp_clk_rate < old->mdp_clk_rate) &&
+			(false == is_traffic_shaper_enabled(mdata)))) {
+			old->mdp_clk_rate = new->mdp_clk_rate;
+			update_clk = 1;
+		}
+	} else {
+		memset(old, 0, sizeof(*old));
+		memset(new, 0, sizeof(*new));
+		update_bus = 1;
+		update_clk = 1;
+	}
+
+	/*
+	 * Calculate mdp clock before bandwidth calculation. If traffic shaper
+	 * is enabled and clock increased, the bandwidth calculation can
+	 * use the new clock for the rotator bw calculation.
+	 */
+	if (update_clk)
+		clk_rate = mdss_mdp_get_mdp_clk_rate(mdata);
+
+	if (update_bus)
+		mdss_mdp_ctl_perf_update_bus(mdata, ctl, clk_rate);
+
+	/*
+	 * Update the clock after bandwidth vote to ensure
+	 * bandwidth is available before clock rate is increased.
+	 */
+	if (update_clk) {
+		ATRACE_INT("mdp_clk", clk_rate);
+		mdss_mdp_set_clk_rate(clk_rate);
+		pr_debug("update clk rate = %d HZ\n", clk_rate);
+	}
+
+end:
+	mutex_unlock(&mdss_mdp_ctl_lock);
+	ATRACE_END(__func__);
+}
+
+struct mdss_mdp_ctl *mdss_mdp_ctl_alloc(struct mdss_data_type *mdata,
+					       u32 off)
+{
+	struct mdss_mdp_ctl *ctl = NULL;
+	u32 cnum;
+	u32 nctl = mdata->nctl;
+
+	mutex_lock(&mdss_mdp_ctl_lock);
+	if (mdata->wfd_mode == MDSS_MDP_WFD_SHARED)
+		nctl++;
+
+	for (cnum = off; cnum < nctl; cnum++) {
+		ctl = mdata->ctl_off + cnum;
+		if (ctl->ref_cnt == 0) {
+			ctl->ref_cnt++;
+			ctl->mdata = mdata;
+			mutex_init(&ctl->lock);
+			mutex_init(&ctl->offlock);
+			mutex_init(&ctl->flush_lock);
+			mutex_init(&ctl->rsrc_lock);
+			spin_lock_init(&ctl->spin_lock);
+			BLOCKING_INIT_NOTIFIER_HEAD(&ctl->notifier_head);
+			pr_debug("alloc ctl_num=%d\n", ctl->num);
+			break;
+		}
+		ctl = NULL;
+	}
+	mutex_unlock(&mdss_mdp_ctl_lock);
+
+	return ctl;
+}
+
+int mdss_mdp_ctl_free(struct mdss_mdp_ctl *ctl)
+{
+	if (!ctl)
+		return -ENODEV;
+
+	pr_debug("free ctl_num=%d ref_cnt=%d\n", ctl->num, ctl->ref_cnt);
+
+	if (!ctl->ref_cnt) {
+		pr_err("called with ref_cnt=0\n");
+		return -EINVAL;
+	}
+
+	if (ctl->mixer_left && ctl->mixer_left->ref_cnt)
+		mdss_mdp_mixer_free(ctl->mixer_left);
+
+	if (ctl->mixer_right && ctl->mixer_right->ref_cnt)
+		mdss_mdp_mixer_free(ctl->mixer_right);
+
+	if (ctl->wb)
+		mdss_mdp_wb_free(ctl->wb);
+
+	mutex_lock(&mdss_mdp_ctl_lock);
+	ctl->ref_cnt--;
+	ctl->intf_num = MDSS_MDP_NO_INTF;
+	ctl->intf_type = MDSS_MDP_NO_INTF;
+	ctl->is_secure = false;
+	ctl->power_state = MDSS_PANEL_POWER_OFF;
+	ctl->mixer_left = NULL;
+	ctl->mixer_right = NULL;
+	ctl->wb = NULL;
+	ctl->cdm = NULL;
+	memset(&ctl->ops, 0, sizeof(ctl->ops));
+	mutex_unlock(&mdss_mdp_ctl_lock);
+
+	return 0;
+}
+
+/**
+ * mdss_mdp_mixer_alloc() - allocate mdp mixer.
+ * @ctl: mdp controller.
+ * @type: specifying type of mixer requested. interface or writeback.
+ * @mux: specifies if mixer allocation is for split_fb cases.
+ * @rotator: specifies if the mixer requested for rotator operations.
+ *
+ * This function is called to request allocation of mdp mixer
+ * during mdp controller path setup.
+ *
+ * Return: mdp mixer structure that is allocated.
+ *	   NULL if mixer allocation fails.
+ */
+struct mdss_mdp_mixer *mdss_mdp_mixer_alloc(
+		struct mdss_mdp_ctl *ctl, u32 type, int mux, int rotator)
+{
+	struct mdss_mdp_mixer *mixer = NULL, *alt_mixer = NULL;
+	u32 nmixers_intf;
+	u32 nmixers_wb;
+	u32 i;
+	u32 nmixers;
+	struct mdss_mdp_mixer *mixer_pool = NULL;
+
+	if (!ctl || !ctl->mdata)
+		return NULL;
+
+	mutex_lock(&mdss_mdp_ctl_lock);
+	nmixers_intf = ctl->mdata->nmixers_intf;
+	nmixers_wb = ctl->mdata->nmixers_wb;
+
+	switch (type) {
+	case MDSS_MDP_MIXER_TYPE_INTF:
+		mixer_pool = ctl->mdata->mixer_intf;
+		nmixers = nmixers_intf;
+
+		/*
+		 * try to reserve first layer mixer for write back if
+		 * assertive display needs to be supported through wfd
+		 */
+		if (ctl->mdata->has_wb_ad && ctl->intf_num &&
+			((ctl->panel_data->panel_info.type != MIPI_CMD_PANEL) ||
+			!mux)) {
+			alt_mixer = mixer_pool;
+			mixer_pool++;
+			nmixers--;
+		} else if ((ctl->panel_data->panel_info.type == WRITEBACK_PANEL)
+			&& (ctl->mdata->ndspp < nmixers)) {
+			mixer_pool += ctl->mdata->ndspp;
+			nmixers -= ctl->mdata->ndspp;
+		}
+		break;
+
+	case MDSS_MDP_MIXER_TYPE_WRITEBACK:
+		mixer_pool = ctl->mdata->mixer_wb;
+		nmixers = nmixers_wb;
+		if ((ctl->mdata->wfd_mode == MDSS_MDP_WFD_DEDICATED) && rotator)
+			mixer_pool = mixer_pool + nmixers;
+		break;
+
+	default:
+		nmixers = 0;
+		pr_err("invalid pipe type %d\n", type);
+		break;
+	}
+
+	/*Allocate virtual wb mixer if no dedicated wfd wb blk is present*/
+	if ((ctl->mdata->wfd_mode == MDSS_MDP_WFD_SHARED) &&
+			(type == MDSS_MDP_MIXER_TYPE_WRITEBACK))
+		nmixers += 1;
+
+	for (i = 0; i < nmixers; i++) {
+		mixer = mixer_pool + i;
+		if (mixer->ref_cnt == 0)
+			break;
+		mixer = NULL;
+	}
+
+	if (!mixer && alt_mixer && (alt_mixer->ref_cnt == 0))
+		mixer = alt_mixer;
+
+	if (mixer) {
+		mixer->ref_cnt++;
+		mixer->params_changed++;
+		mixer->ctl = ctl;
+		mixer->next_pipe_map = 0;
+		mixer->pipe_mapped = 0;
+		pr_debug("alloc mixer num %d for ctl=%d\n",
+				mixer->num, ctl->num);
+	}
+	mutex_unlock(&mdss_mdp_ctl_lock);
+
+	return mixer;
+}
+
+struct mdss_mdp_mixer *mdss_mdp_mixer_assign(u32 id, bool wb, bool rot)
+{
+	struct mdss_mdp_mixer *mixer = NULL;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	mutex_lock(&mdss_mdp_ctl_lock);
+
+	if (rot && (mdata->wfd_mode == MDSS_MDP_WFD_DEDICATED))
+		mixer = mdata->mixer_wb + mdata->nmixers_wb;
+	else if (wb && id < mdata->nmixers_wb)
+		mixer = mdata->mixer_wb + id;
+	else if (!wb && id < mdata->nmixers_intf)
+		mixer = mdata->mixer_intf + id;
+
+	if (mixer && mixer->ref_cnt == 0) {
+		mixer->ref_cnt++;
+		mixer->params_changed++;
+	} else {
+		pr_err("mixer is in use already = %d\n", id);
+		mixer = NULL;
+	}
+	mutex_unlock(&mdss_mdp_ctl_lock);
+	return mixer;
+}
+
+int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer)
+{
+	if (!mixer)
+		return -ENODEV;
+
+	pr_debug("free mixer_num=%d ref_cnt=%d\n", mixer->num, mixer->ref_cnt);
+
+	if (!mixer->ref_cnt) {
+		pr_err("called with ref_cnt=0\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&mdss_mdp_ctl_lock);
+	mixer->ref_cnt--;
+	mixer->is_right_mixer = false;
+	mutex_unlock(&mdss_mdp_ctl_lock);
+
+	return 0;
+}
+
+struct mdss_mdp_mixer *mdss_mdp_block_mixer_alloc(void)
+{
+	struct mdss_mdp_ctl *ctl = NULL;
+	struct mdss_mdp_mixer *mixer = NULL;
+	struct mdss_mdp_writeback *wb = NULL;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 offset = mdss_mdp_get_wb_ctl_support(mdata, true);
+	int ret = 0;
+
+	ctl = mdss_mdp_ctl_alloc(mdss_res, offset);
+	if (!ctl) {
+		pr_debug("unable to allocate wb ctl\n");
+		return NULL;
+	}
+
+	mixer = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_WRITEBACK,
+							false, true);
+	if (!mixer) {
+		pr_debug("unable to allocate wb mixer\n");
+		goto error;
+	}
+
+	mixer->rotator_mode = 1;
+
+	switch (mixer->num) {
+	case MDSS_MDP_WB_LAYERMIXER0:
+		ctl->opmode = MDSS_MDP_CTL_OP_ROT0_MODE;
+		break;
+	case MDSS_MDP_WB_LAYERMIXER1:
+		ctl->opmode = MDSS_MDP_CTL_OP_ROT1_MODE;
+		break;
+	default:
+		pr_err("invalid layer mixer=%d\n", mixer->num);
+		goto error;
+	}
+
+	wb = mdss_mdp_wb_alloc(MDSS_MDP_WB_ROTATOR, ctl->num);
+	if (!wb) {
+		pr_err("Unable to allocate writeback block\n");
+		goto error;
+	}
+
+	ctl->mixer_left = mixer;
+
+	ctl->ops.start_fnc = mdss_mdp_writeback_start;
+	ctl->power_state = MDSS_PANEL_POWER_ON;
+	ctl->wb_type = MDSS_MDP_WB_CTL_TYPE_BLOCK;
+	mixer->ctl = ctl;
+	ctl->wb = wb;
+
+	if (ctl->ops.start_fnc)
+		ret = ctl->ops.start_fnc(ctl);
+
+	if (!ret)
+		return mixer;
+error:
+	if (wb)
+		mdss_mdp_wb_free(wb);
+	if (mixer)
+		mdss_mdp_mixer_free(mixer);
+	if (ctl)
+		mdss_mdp_ctl_free(ctl);
+
+	return NULL;
+}
+
+int mdss_mdp_block_mixer_destroy(struct mdss_mdp_mixer *mixer)
+{
+	struct mdss_mdp_ctl *ctl;
+
+	if (!mixer || !mixer->ctl) {
+		pr_err("invalid ctl handle\n");
+		return -ENODEV;
+	}
+
+	ctl = mixer->ctl;
+	mixer->rotator_mode = 0;
+
+	pr_debug("destroy ctl=%d mixer=%d\n", ctl->num, mixer->num);
+
+	if (ctl->ops.stop_fnc)
+		ctl->ops.stop_fnc(ctl, MDSS_PANEL_POWER_OFF);
+
+	mdss_mdp_ctl_free(ctl);
+
+	mdss_mdp_ctl_perf_update(ctl, 0, true);
+
+	return 0;
+}
+
+int mdss_mdp_display_wakeup_time(struct mdss_mdp_ctl *ctl,
+				 ktime_t *wakeup_time)
+{
+	struct mdss_panel_info *pinfo;
+	u64 clk_rate;
+	u32 clk_period;
+	u32 current_line, total_line;
+	u32 time_of_line, time_to_vsync, adjust_line_ns;
+
+	ktime_t current_time = ktime_get();
+
+	if (!ctl->ops.read_line_cnt_fnc)
+		return -ENOTSUP;
+
+	pinfo = &ctl->panel_data->panel_info;
+	if (!pinfo)
+		return -ENODEV;
+
+	clk_rate = mdss_mdp_get_pclk_rate(ctl);
+
+	clk_rate = DIV_ROUND_UP_ULL(clk_rate, 1000); /* in kHz */
+	if (!clk_rate)
+		return -EINVAL;
+
+	/*
+	 * calculate clk_period as pico second to maintain good
+	 * accuracy with high pclk rate and this number is in 17 bit
+	 * range.
+	 */
+	clk_period = DIV_ROUND_UP_ULL(1000000000, clk_rate);
+	if (!clk_period)
+		return -EINVAL;
+
+	time_of_line = (pinfo->lcdc.h_back_porch +
+		 pinfo->lcdc.h_front_porch +
+		 pinfo->lcdc.h_pulse_width +
+		 pinfo->xres) * clk_period;
+
+	time_of_line /= 1000;	/* in nano second */
+	if (!time_of_line)
+		return -EINVAL;
+
+	current_line = ctl->ops.read_line_cnt_fnc(ctl);
+
+	total_line = pinfo->lcdc.v_back_porch +
+		pinfo->lcdc.v_front_porch +
+		pinfo->lcdc.v_pulse_width +
+		pinfo->yres;
+
+	if (current_line >= total_line)
+		time_to_vsync = time_of_line * total_line;
+	else
+		time_to_vsync = time_of_line * (total_line - current_line);
+
+	if (pinfo->adjust_timer_delay_ms) {
+		adjust_line_ns = pinfo->adjust_timer_delay_ms
+			* 1000000; /* convert to ns */
+
+		/* Ignore large values of adjust_line_ns\ */
+		if (time_to_vsync > adjust_line_ns)
+			time_to_vsync -= adjust_line_ns;
+	}
+
+	if (!time_to_vsync)
+		return -EINVAL;
+
+	*wakeup_time = ktime_add_ns(current_time, time_to_vsync);
+
+	pr_debug("clk_rate=%lldkHz clk_period=%d cur_line=%d tot_line=%d\n",
+		clk_rate, clk_period, current_line, total_line);
+	pr_debug("time_to_vsync=%d current_time=%d wakeup_time=%d\n",
+		time_to_vsync, (int)ktime_to_ms(current_time),
+		(int)ktime_to_ms(*wakeup_time));
+
+	return 0;
+}
+
+static void __cpu_pm_work_handler(struct work_struct *work)
+{
+	struct mdss_mdp_ctl *ctl =
+		container_of(work, typeof(*ctl), cpu_pm_work);
+	ktime_t wakeup_time;
+	struct mdss_overlay_private *mdp5_data;
+
+	if (!ctl)
+		return;
+
+	if (mdss_mdp_display_wakeup_time(ctl, &wakeup_time))
+		return;
+
+	mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+	activate_event_timer(mdp5_data->cpu_pm_hdl, wakeup_time);
+}
+
+void mdss_mdp_ctl_event_timer(void *data)
+{
+	struct mdss_overlay_private *mdp5_data =
+				(struct mdss_overlay_private *)data;
+	struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+
+	if (mdp5_data->cpu_pm_hdl && ctl && ctl->autorefresh_frame_cnt)
+		schedule_work(&ctl->cpu_pm_work);
+}
+
+int mdss_mdp_ctl_cmd_set_autorefresh(struct mdss_mdp_ctl *ctl, int frame_cnt)
+{
+	int ret = 0;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+
+	if (ctl->panel_data->panel_info.type == MIPI_CMD_PANEL) {
+		ret = mdss_mdp_cmd_set_autorefresh_mode(ctl, frame_cnt);
+		if (!ret) {
+			ctl->autorefresh_frame_cnt = frame_cnt;
+			if (frame_cnt)
+				mdss_mdp_ctl_event_timer(mdp5_data);
+		}
+	} else {
+		pr_err("Mode not supported for this panel\n");
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+int mdss_mdp_ctl_cmd_get_autorefresh(struct mdss_mdp_ctl *ctl)
+{
+	if (ctl->panel_data->panel_info.type == MIPI_CMD_PANEL)
+		return mdss_mdp_cmd_get_autorefresh_mode(ctl);
+	else
+		return 0;
+}
+
+int mdss_mdp_ctl_splash_finish(struct mdss_mdp_ctl *ctl, bool handoff)
+{
+	switch (ctl->panel_data->panel_info.type) {
+	case MIPI_VIDEO_PANEL:
+	case EDP_PANEL:
+	case DTV_PANEL:
+		return mdss_mdp_video_reconfigure_splash_done(ctl, handoff);
+	case MIPI_CMD_PANEL:
+		return mdss_mdp_cmd_reconfigure_splash_done(ctl, handoff);
+	default:
+		return 0;
+	}
+}
+
+static inline int mdss_mdp_set_split_ctl(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_ctl *split_ctl)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_panel_info *pinfo;
+
+
+	if (!ctl || !split_ctl || !mdata)
+		return -ENODEV;
+
+	/* setup split ctl mixer as right mixer of original ctl so that
+	 * original ctl can work the same way as dual pipe solution
+	 */
+	ctl->mixer_right = split_ctl->mixer_left;
+	pinfo = &ctl->panel_data->panel_info;
+
+	/* add x offset from left ctl's border */
+	split_ctl->border_x_off += (pinfo->lcdc.border_left +
+					pinfo->lcdc.border_right);
+
+	return 0;
+}
+
+static inline void __dsc_enable(struct mdss_mdp_mixer *mixer)
+{
+	mdss_mdp_pingpong_write(mixer->pingpong_base,
+			MDSS_MDP_REG_PP_DSC_MODE, 1);
+}
+
+static inline void __dsc_disable(struct mdss_mdp_mixer *mixer)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	char __iomem *offset = mdata->mdp_base;
+
+	mdss_mdp_pingpong_write(mixer->pingpong_base,
+			MDSS_MDP_REG_PP_DSC_MODE, 0);
+
+	if (mixer->num == MDSS_MDP_INTF_LAYERMIXER0) {
+		offset += MDSS_MDP_DSC_0_OFFSET;
+	} else if (mixer->num == MDSS_MDP_INTF_LAYERMIXER1) {
+		offset += MDSS_MDP_DSC_1_OFFSET;
+	} else {
+		pr_err("invalid mixer numer=%d\n", mixer->num);
+		return;
+	}
+	writel_relaxed(0, offset + MDSS_MDP_REG_DSC_COMMON_MODE);
+}
+
+static void __dsc_config(struct mdss_mdp_mixer *mixer,
+	struct dsc_desc *dsc, u32 mode, bool ich_reset_override)
+{
+	u32 data;
+	int bpp, lsb;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	char __iomem *offset = mdata->mdp_base;
+	u32 initial_lines = dsc->initial_lines;
+	bool is_cmd_mode = !(mode & BIT(2));
+
+	data = mdss_mdp_pingpong_read(mixer->pingpong_base,
+			MDSS_MDP_REG_PP_DCE_DATA_OUT_SWAP);
+	data |= BIT(18); /* endian flip */
+	mdss_mdp_pingpong_write(mixer->pingpong_base,
+		MDSS_MDP_REG_PP_DCE_DATA_OUT_SWAP, data);
+
+	if (mixer->num == MDSS_MDP_INTF_LAYERMIXER0) {
+		offset += MDSS_MDP_DSC_0_OFFSET;
+	} else if (mixer->num == MDSS_MDP_INTF_LAYERMIXER1) {
+		offset += MDSS_MDP_DSC_1_OFFSET;
+	} else {
+		pr_err("invalid mixer numer=%d\n", mixer->num);
+		return;
+	}
+
+	writel_relaxed(mode, offset + MDSS_MDP_REG_DSC_COMMON_MODE);
+
+	data = 0;
+	if (ich_reset_override)
+		data = 3 << 28;
+
+	if (is_cmd_mode)
+		initial_lines += 1;
+
+	data |= (initial_lines << 20);
+	data |= ((dsc->slice_last_group_size - 1) << 18);
+	/* bpp is 6.4 format, 4 LSBs bits are for fractional part */
+	lsb = dsc->bpp % 4;
+	bpp = dsc->bpp / 4;
+	bpp *= 4;	/* either 8 or 12 */
+	bpp <<= 4;
+	bpp |= lsb;
+	data |= (bpp << 8);
+	data |= (dsc->block_pred_enable << 7);
+	data |= (dsc->line_buf_depth << 3);
+	data |= (dsc->enable_422 << 2);
+	data |= (dsc->convert_rgb << 1);
+	data |= dsc->input_10_bits;
+
+	pr_debug("%d %d %d %d %d %d %d %d %d, data=%x\n",
+		ich_reset_override,
+		initial_lines, dsc->slice_last_group_size,
+		dsc->bpp, dsc->block_pred_enable, dsc->line_buf_depth,
+		dsc->enable_422, dsc->convert_rgb, dsc->input_10_bits, data);
+
+	writel_relaxed(data, offset + MDSS_MDP_REG_DSC_ENC);
+
+	data = dsc->pic_width << 16;
+	data |= dsc->pic_height;
+	writel_relaxed(data, offset + MDSS_MDP_REG_DSC_PICTURE);
+
+	data = dsc->slice_width << 16;
+	data |= dsc->slice_height;
+	writel_relaxed(data, offset + MDSS_MDP_REG_DSC_SLICE);
+
+	data = dsc->chunk_size << 16;
+	writel_relaxed(data, offset + MDSS_MDP_REG_DSC_CHUNK_SIZE);
+
+	pr_debug("mix%d pic_w=%d pic_h=%d, slice_w=%d slice_h=%d, chunk=%d\n",
+		mixer->num, dsc->pic_width, dsc->pic_height,
+		dsc->slice_width, dsc->slice_height, dsc->chunk_size);
+	MDSS_XLOG(mixer->num, dsc->pic_width, dsc->pic_height,
+		dsc->slice_width, dsc->slice_height, dsc->chunk_size);
+
+	data = dsc->initial_dec_delay << 16;
+	data |= dsc->initial_xmit_delay;
+	writel_relaxed(data, offset + MDSS_MDP_REG_DSC_DELAY);
+
+	data = dsc->initial_scale_value;
+	writel_relaxed(data, offset + MDSS_MDP_REG_DSC_SCALE_INITIAL);
+
+	data = dsc->scale_decrement_interval;
+	writel_relaxed(data, offset + MDSS_MDP_REG_DSC_SCALE_DEC_INTERVAL);
+
+	data = dsc->scale_increment_interval;
+	writel_relaxed(data, offset + MDSS_MDP_REG_DSC_SCALE_INC_INTERVAL);
+
+	data = dsc->first_line_bpg_offset;
+	writel_relaxed(data, offset + MDSS_MDP_REG_DSC_FIRST_LINE_BPG_OFFSET);
+
+	data = dsc->nfl_bpg_offset << 16;
+	data |= dsc->slice_bpg_offset;
+	writel_relaxed(data, offset + MDSS_MDP_REG_DSC_BPG_OFFSET);
+
+	data = dsc->initial_offset << 16;
+	data |= dsc->final_offset;
+	writel_relaxed(data, offset + MDSS_MDP_REG_DSC_DSC_OFFSET);
+
+	data = dsc->det_thresh_flatness << 10;
+	data |= dsc->max_qp_flatness << 5;
+	data |= dsc->min_qp_flatness;
+	writel_relaxed(data, offset + MDSS_MDP_REG_DSC_FLATNESS);
+	writel_relaxed(0x983, offset + MDSS_MDP_REG_DSC_FLATNESS);
+
+	data = dsc->rc_model_size;	/* rate_buffer_size */
+	writel_relaxed(data, offset + MDSS_MDP_REG_DSC_RC_MODEL_SIZE);
+
+	data = dsc->tgt_offset_lo << 18;
+	data |= dsc->tgt_offset_hi << 14;
+	data |= dsc->quant_incr_limit1 << 9;
+	data |= dsc->quant_incr_limit0 << 4;
+	data |= dsc->edge_factor;
+	writel_relaxed(data, offset + MDSS_MDP_REG_DSC_RC);
+}
+
+static void __dsc_config_thresh(struct mdss_mdp_mixer *mixer,
+	struct dsc_desc *dsc)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	char __iomem *offset, *off;
+	u32 *lp;
+	char *cp;
+	int i;
+
+	offset = mdata->mdp_base;
+
+	if (mixer->num == MDSS_MDP_INTF_LAYERMIXER0) {
+		offset += MDSS_MDP_DSC_0_OFFSET;
+	} else if (mixer->num == MDSS_MDP_INTF_LAYERMIXER1) {
+		offset += MDSS_MDP_DSC_1_OFFSET;
+	} else {
+		pr_err("invalid mixer numer=%d\n", mixer->num);
+		return;
+	}
+
+	lp = dsc->buf_thresh;
+	off = offset + MDSS_MDP_REG_DSC_RC_BUF_THRESH;
+	for (i = 0; i < 14; i++) {
+		writel_relaxed(*lp++, off);
+		off += 4;
+	}
+
+	cp = dsc->range_min_qp;
+	off = offset + MDSS_MDP_REG_DSC_RANGE_MIN_QP;
+	for (i = 0; i < 15; i++) {
+		writel_relaxed(*cp++, off);
+		off += 4;
+	}
+
+	cp = dsc->range_max_qp;
+	off = offset + MDSS_MDP_REG_DSC_RANGE_MAX_QP;
+	for (i = 0; i < 15; i++) {
+		writel_relaxed(*cp++, off);
+		off += 4;
+	}
+
+	cp = dsc->range_bpg_offset;
+	off = offset + MDSS_MDP_REG_DSC_RANGE_BPG_OFFSET;
+	for (i = 0; i < 15; i++) {
+		writel_relaxed(*cp++, off);
+		off += 4;
+	}
+}
+
+static bool __is_dsc_merge_enabled(u32 common_mode)
+{
+	return common_mode & BIT(1);
+}
+
+static bool __dsc_is_3d_mux_enabled(struct mdss_mdp_ctl *ctl,
+	struct mdss_panel_info *pinfo)
+{
+	return ctl && is_dual_lm_single_display(ctl->mfd) &&
+	       pinfo && (pinfo->dsc_enc_total == 1);
+}
+
+/* must be called from master ctl */
+static u32 __dsc_get_common_mode(struct mdss_mdp_ctl *ctl, bool mux_3d)
+{
+	u32 common_mode = 0;
+
+	if (ctl->is_video_mode)
+		common_mode = BIT(2);
+
+	if (mdss_mdp_is_both_lm_valid(ctl))
+		common_mode |= BIT(0);
+
+	if (is_dual_lm_single_display(ctl->mfd)) {
+		if (mux_3d)
+			common_mode &= ~BIT(0);
+		else if (mdss_mdp_is_both_lm_valid(ctl)) /* dsc_merge */
+			common_mode |= BIT(1);
+	}
+
+	return common_mode;
+}
+
+static void __dsc_get_pic_dim(struct mdss_mdp_mixer *mixer_l,
+	struct mdss_mdp_mixer *mixer_r, u32 *pic_w, u32 *pic_h)
+{
+	bool valid_l = mixer_l && mixer_l->valid_roi;
+	bool valid_r = mixer_r && mixer_r->valid_roi;
+
+	*pic_w = 0;
+	*pic_h = 0;
+
+	if (valid_l) {
+		*pic_w = mixer_l->roi.w;
+		*pic_h = mixer_l->roi.h;
+	}
+
+	if (valid_r) {
+		*pic_w += mixer_r->roi.w;
+		*pic_h = mixer_r->roi.h;
+	}
+}
+
+static bool __is_ich_reset_override_needed(bool pu_en, struct dsc_desc *dsc)
+{
+	/*
+	 * As per the DSC spec, ICH_RESET can be either end of the slice line
+	 * or at the end of the slice. HW internally generates ich_reset at
+	 * end of the slice line if DSC_MERGE is used or encoder has two
+	 * soft slices. However, if encoder has only 1 soft slice and DSC_MERGE
+	 * is not used then it will generate ich_reset at the end of slice.
+	 *
+	 * Now as per the spec, during one PPS session, position where
+	 * ich_reset is generated should not change. Now if full-screen frame
+	 * has more than 1 soft slice then HW will automatically generate
+	 * ich_reset at the end of slice_line. But for the same panel, if
+	 * partial frame is enabled and only 1 encoder is used with 1 slice,
+	 * then HW will generate ich_reset at end of the slice. This is a
+	 * mismatch. Prevent this by overriding HW's decision.
+	 */
+	return pu_en && dsc && (dsc->full_frame_slices > 1) &&
+	       (dsc->slice_width == dsc->pic_width);
+}
+
+static void __dsc_setup_dual_lm_single_display(struct mdss_mdp_ctl *ctl,
+	struct mdss_panel_info *pinfo)
+{
+	u32 pic_width = 0, pic_height = 0;
+	u32 intf_ip_w, enc_ip_w, common_mode, this_frame_slices;
+	bool valid_l, valid_r;
+	bool enable_right_dsc;
+	bool mux_3d, ich_reset_override;
+	struct dsc_desc *dsc;
+	struct mdss_mdp_mixer *mixer_l, *mixer_r;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!pinfo || !ctl || !ctl->is_master ||
+	    !is_dual_lm_single_display(ctl->mfd))
+		return;
+
+	dsc = &pinfo->dsc;
+	mixer_l = ctl->mixer_left;
+	mixer_r = ctl->mixer_right;
+
+	mux_3d = __dsc_is_3d_mux_enabled(ctl, pinfo);
+	common_mode = __dsc_get_common_mode(ctl, mux_3d);
+	__dsc_get_pic_dim(mixer_l, mixer_r, &pic_width, &pic_height);
+
+	valid_l = mixer_l->valid_roi;
+	valid_r = mixer_r->valid_roi;
+	if (mdss_mdp_is_lm_swap_needed(mdata, ctl)) {
+		valid_l = true;
+		valid_r = false;
+	}
+
+	this_frame_slices = pic_width / dsc->slice_width;
+
+	/* enable or disable pp_split + DSC_Merge based on partial update */
+	if ((pinfo->partial_update_enabled) && !mux_3d &&
+	    (dsc->full_frame_slices == 4) &&
+	    (mdss_has_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT))) {
+
+		if (valid_l && valid_r) {
+			/* left + right */
+			pr_debug("full line (4 slices) or middle 2 slice partial update\n");
+			writel_relaxed(0x0,
+				mdata->mdp_base + mdata->ppb_ctl[0]);
+			writel_relaxed(0x0,
+				mdata->mdp_base + MDSS_MDP_REG_DCE_SEL);
+		} else if (valid_l || valid_r) {
+			/* left-only or right-only */
+			if (this_frame_slices == 2) {
+				pr_debug("2 slice parital update, use merge\n");
+
+				/* tandem + merge */
+				common_mode = BIT(1) | BIT(0);
+
+				valid_r = true;
+				valid_l = true;
+
+				writel_relaxed(0x2 << 4, mdata->mdp_base +
+					mdata->ppb_ctl[0]);
+				writel_relaxed(BIT(0),
+					mdata->mdp_base + MDSS_MDP_REG_DCE_SEL);
+			} else {
+				pr_debug("only one slice partial update\n");
+				writel_relaxed(0x0, mdata->mdp_base +
+					mdata->ppb_ctl[0]);
+				writel_relaxed(0x0, mdata->mdp_base +
+					MDSS_MDP_REG_DCE_SEL);
+			}
+		}
+	} else {
+		writel_relaxed(0x0, mdata->mdp_base + MDSS_MDP_REG_DCE_SEL);
+	}
+
+	mdss_panel_dsc_update_pic_dim(dsc, pic_width, pic_height);
+
+	intf_ip_w = this_frame_slices * dsc->slice_width;
+	mdss_panel_dsc_pclk_param_calc(dsc, intf_ip_w);
+
+	enc_ip_w = intf_ip_w;
+	/* if dsc_merge, both encoders work on same number of slices */
+	if (__is_dsc_merge_enabled(common_mode))
+		enc_ip_w /= 2;
+	mdss_panel_dsc_initial_line_calc(dsc, enc_ip_w);
+
+	/*
+	 * __is_ich_reset_override_needed should be called only after
+	 * updating pic dimension, mdss_panel_dsc_update_pic_dim.
+	 */
+	ich_reset_override = __is_ich_reset_override_needed(
+					pinfo->partial_update_enabled, dsc);
+	if (valid_l) {
+		__dsc_config(mixer_l, dsc, common_mode, ich_reset_override);
+		__dsc_config_thresh(mixer_l, dsc);
+		__dsc_enable(mixer_l);
+	} else {
+		__dsc_disable(mixer_l);
+	}
+
+	enable_right_dsc = valid_r;
+	if (mux_3d && valid_l)
+		enable_right_dsc = false;
+
+	if (enable_right_dsc) {
+		__dsc_config(mixer_r, dsc, common_mode, ich_reset_override);
+		__dsc_config_thresh(mixer_r, dsc);
+		__dsc_enable(mixer_r);
+	} else {
+		__dsc_disable(mixer_r);
+	}
+
+	pr_debug("mix%d: valid_l=%d mix%d: valid_r=%d mode=%d, pic_dim:%dx%d mux_3d=%d intf_ip_w=%d enc_ip_w=%d ich_ovrd=%d\n",
+		mixer_l->num, valid_l, mixer_r->num, valid_r,
+		common_mode, pic_width, pic_height,
+		mux_3d, intf_ip_w, enc_ip_w, ich_reset_override);
+
+	MDSS_XLOG(mixer_l->num, valid_l, mixer_r->num, valid_r,
+		  common_mode, pic_width, pic_height,
+		  mux_3d, intf_ip_w, enc_ip_w, ich_reset_override);
+}
+
+static void __dsc_setup_dual_lm_dual_display(
+	struct mdss_mdp_ctl *ctl, struct mdss_panel_info *pinfo,
+	struct mdss_mdp_ctl *sctl, struct mdss_panel_info *spinfo)
+{
+	u32 pic_width = 0, pic_height = 0;
+	u32 intf_ip_w, enc_ip_w, common_mode, this_frame_slices;
+	bool valid_l, valid_r;
+	bool ich_reset_override;
+	struct dsc_desc *dsc_l, *dsc_r;
+	struct mdss_mdp_mixer *mixer_l, *mixer_r;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!pinfo || !ctl || !sctl || !spinfo ||
+	    !ctl->is_master || !ctl->mfd ||
+	    (ctl->mfd->split_mode != MDP_DUAL_LM_DUAL_DISPLAY))
+		return;
+
+	dsc_l = &pinfo->dsc;
+	dsc_r = &spinfo->dsc;
+
+	mixer_l = ctl->mixer_left;
+	mixer_r = ctl->mixer_right;
+
+	common_mode = __dsc_get_common_mode(ctl, false);
+	/*
+	 * In this topology, both DSC use same pic dimension. So no need to
+	 * maintain two separate local copies.
+	 */
+	__dsc_get_pic_dim(mixer_l, mixer_r, &pic_width, &pic_height);
+
+	valid_l = mixer_l->valid_roi;
+	valid_r = mixer_r->valid_roi;
+	if (mdss_mdp_is_lm_swap_needed(mdata, ctl)) {
+		valid_l = true;
+		valid_r = false;
+	}
+
+	/*
+	 * Since both DSC use same pic dimension, set same pic dimension
+	 * to both DSC structures.
+	 */
+	mdss_panel_dsc_update_pic_dim(dsc_l, pic_width, pic_height);
+	mdss_panel_dsc_update_pic_dim(dsc_r, pic_width, pic_height);
+
+	this_frame_slices = pic_width / dsc_l->slice_width;
+	intf_ip_w = this_frame_slices * dsc_l->slice_width;
+	if (valid_l && valid_r)
+		intf_ip_w /= 2;
+	/*
+	 * In this topology when both interfaces are active, they have same
+	 * load so intf_ip_w will be same.
+	 */
+	mdss_panel_dsc_pclk_param_calc(dsc_l, intf_ip_w);
+	mdss_panel_dsc_pclk_param_calc(dsc_r, intf_ip_w);
+
+	/*
+	 * In this topology, since there is no dsc_merge, uncompressed input
+	 * to encoder and interface is same.
+	 */
+	enc_ip_w = intf_ip_w;
+	mdss_panel_dsc_initial_line_calc(dsc_l, enc_ip_w);
+	mdss_panel_dsc_initial_line_calc(dsc_r, enc_ip_w);
+
+	/*
+	 * __is_ich_reset_override_needed should be called only after
+	 * updating pic dimension, mdss_panel_dsc_update_pic_dim.
+	 */
+	ich_reset_override = __is_ich_reset_override_needed(
+					pinfo->partial_update_enabled, dsc_l);
+
+	if (valid_l) {
+		__dsc_config(mixer_l, dsc_l, common_mode, ich_reset_override);
+		__dsc_config_thresh(mixer_l, dsc_l);
+		__dsc_enable(mixer_l);
+	} else {
+		__dsc_disable(mixer_l);
+	}
+
+	if (valid_r) {
+		__dsc_config(mixer_r, dsc_r, common_mode, ich_reset_override);
+		__dsc_config_thresh(mixer_r, dsc_r);
+		__dsc_enable(mixer_r);
+	} else {
+		__dsc_disable(mixer_r);
+	}
+
+	pr_debug("mix%d: valid_l=%d mix%d: valid_r=%d mode=%d, pic_dim:%dx%d intf_ip_w=%d enc_ip_w=%d ich_ovrd=%d\n",
+		mixer_l->num, valid_l, mixer_r->num, valid_r,
+		common_mode, pic_width, pic_height,
+		intf_ip_w, enc_ip_w, ich_reset_override);
+
+	MDSS_XLOG(mixer_l->num, valid_l, mixer_r->num, valid_r,
+		  common_mode, pic_width, pic_height,
+		  intf_ip_w, enc_ip_w, ich_reset_override);
+}
+
+static void __dsc_setup_single_lm_single_display(struct mdss_mdp_ctl *ctl,
+	struct mdss_panel_info *pinfo)
+{
+	u32 pic_width = 0, pic_height = 0;
+	u32 intf_ip_w, enc_ip_w, common_mode, this_frame_slices;
+	bool valid;
+	bool ich_reset_override;
+	struct dsc_desc *dsc;
+	struct mdss_mdp_mixer *mixer;
+
+	if (!pinfo || !ctl || !ctl->is_master)
+		return;
+
+	dsc = &pinfo->dsc;
+	mixer = ctl->mixer_left;
+	valid = mixer->valid_roi;
+
+	common_mode = __dsc_get_common_mode(ctl, false);
+	__dsc_get_pic_dim(mixer, NULL, &pic_width, &pic_height);
+
+	mdss_panel_dsc_update_pic_dim(dsc, pic_width, pic_height);
+
+	this_frame_slices = pic_width / dsc->slice_width;
+	intf_ip_w = this_frame_slices * dsc->slice_width;
+	mdss_panel_dsc_pclk_param_calc(dsc, intf_ip_w);
+
+	enc_ip_w = intf_ip_w;
+	mdss_panel_dsc_initial_line_calc(dsc, enc_ip_w);
+
+	/*
+	 * __is_ich_reset_override_needed should be called only after
+	 * updating pic dimension, mdss_panel_dsc_update_pic_dim.
+	 */
+	ich_reset_override = __is_ich_reset_override_needed(
+					pinfo->partial_update_enabled, dsc);
+	if (valid) {
+		__dsc_config(mixer, dsc, common_mode, ich_reset_override);
+		__dsc_config_thresh(mixer, dsc);
+		__dsc_enable(mixer);
+	} else {
+		__dsc_disable(mixer);
+	}
+
+	pr_debug("mix%d: valid=%d mode=%d, pic_dim:%dx%d intf_ip_w=%d enc_ip_w=%d ich_ovrd=%d\n",
+		mixer->num, valid, common_mode, pic_width, pic_height,
+		intf_ip_w, enc_ip_w, ich_reset_override);
+
+	MDSS_XLOG(mixer->num, valid, common_mode, pic_width, pic_height,
+		  intf_ip_w, enc_ip_w, ich_reset_override);
+}
+
+void mdss_mdp_ctl_dsc_setup(struct mdss_mdp_ctl *ctl,
+	struct mdss_panel_info *pinfo)
+{
+	struct mdss_mdp_ctl *sctl;
+	struct mdss_panel_info *spinfo;
+
+	if (!is_dsc_compression(pinfo))
+		return;
+
+	if (!ctl->is_master) {
+		pr_debug("skip slave ctl because master will program for both\n");
+		return;
+	}
+
+	switch (ctl->mfd->split_mode) {
+	case MDP_DUAL_LM_SINGLE_DISPLAY:
+		__dsc_setup_dual_lm_single_display(ctl, pinfo);
+		break;
+	case MDP_DUAL_LM_DUAL_DISPLAY:
+		sctl = mdss_mdp_get_split_ctl(ctl);
+		if (sctl) {
+			spinfo = &sctl->panel_data->panel_info;
+			__dsc_setup_dual_lm_dual_display(ctl, pinfo, sctl,
+					spinfo);
+		}
+		break;
+	default:
+		/* pp_split is not supported yet */
+		__dsc_setup_single_lm_single_display(ctl, pinfo);
+		break;
+	}
+}
+
+static int mdss_mdp_ctl_fbc_enable(int enable,
+		struct mdss_mdp_mixer *mixer, struct mdss_panel_info *pdata)
+{
+	struct fbc_panel_info *fbc;
+	u32 mode = 0, budget_ctl = 0, lossy_mode = 0, width;
+
+	if (!pdata) {
+		pr_err("Invalid pdata\n");
+		return -EINVAL;
+	}
+
+	fbc = &pdata->fbc;
+
+	if (!fbc->enabled) {
+		pr_debug("FBC not enabled\n");
+		return -EINVAL;
+	}
+
+	if (mixer->num == MDSS_MDP_INTF_LAYERMIXER0 ||
+			mixer->num == MDSS_MDP_INTF_LAYERMIXER1) {
+		pr_debug("Mixer supports FBC.\n");
+	} else {
+		pr_debug("Mixer doesn't support FBC.\n");
+		return -EINVAL;
+	}
+
+	if (enable) {
+		if (fbc->enc_mode && pdata->bpp) {
+			/* width is the compressed width */
+			width = mult_frac(pdata->xres, fbc->target_bpp,
+					pdata->bpp);
+		} else {
+			/* width is the source width */
+			width = pdata->xres;
+		}
+
+		mode = ((width) << 16) | ((fbc->slice_height) << 11) |
+			((fbc->pred_mode) << 10) | ((fbc->enc_mode) << 9) |
+			((fbc->comp_mode) << 8) | ((fbc->qerr_enable) << 7) |
+			((fbc->cd_bias) << 4) | ((fbc->pat_enable) << 3) |
+			((fbc->vlc_enable) << 2) | ((fbc->bflc_enable) << 1) |
+			enable;
+
+		budget_ctl = ((fbc->line_x_budget) << 12) |
+			((fbc->block_x_budget) << 8) | fbc->block_budget;
+
+		lossy_mode = ((fbc->max_pred_err) << 28) |
+			((fbc->lossless_mode_thd) << 16) |
+			((fbc->lossy_mode_thd) << 8) |
+			((fbc->lossy_rgb_thd) << 4) | fbc->lossy_mode_idx;
+	}
+
+	mdss_mdp_pingpong_write(mixer->pingpong_base,
+		MDSS_MDP_REG_PP_FBC_MODE, mode);
+	mdss_mdp_pingpong_write(mixer->pingpong_base,
+		MDSS_MDP_REG_PP_FBC_BUDGET_CTL, budget_ctl);
+	mdss_mdp_pingpong_write(mixer->pingpong_base,
+		MDSS_MDP_REG_PP_FBC_LOSSY_MODE, lossy_mode);
+
+	return 0;
+}
+
+int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_ctl *split_ctl;
+	u32 width, height;
+	int split_fb, rc = 0;
+	u32 max_mixer_width;
+	struct mdss_panel_info *pinfo;
+
+	if (!ctl || !ctl->panel_data) {
+		pr_err("invalid ctl handle\n");
+		return -ENODEV;
+	}
+
+	pinfo = &ctl->panel_data->panel_info;
+	if (pinfo->type == WRITEBACK_PANEL) {
+		pr_err("writeback panel, ignore\n");
+		return 0;
+	}
+
+	split_ctl = mdss_mdp_get_split_ctl(ctl);
+
+	width = get_panel_width(ctl);
+	height = get_panel_yres(pinfo);
+
+	max_mixer_width = ctl->mdata->max_mixer_width;
+
+	split_fb = ((is_dual_lm_single_display(ctl->mfd)) &&
+		    (ctl->mfd->split_fb_left <= max_mixer_width) &&
+		    (ctl->mfd->split_fb_right <= max_mixer_width)) ? 1 : 0;
+	pr_debug("max=%d xres=%d left=%d right=%d\n", max_mixer_width,
+		 width, ctl->mfd->split_fb_left, ctl->mfd->split_fb_right);
+
+	if ((split_ctl && (width > max_mixer_width)) ||
+			(width > (2 * max_mixer_width))) {
+		pr_err("Unsupported panel resolution: %dx%d\n", width, height);
+		return -ENOTSUPP;
+	}
+
+	ctl->width = width;
+	ctl->height = height;
+	ctl->roi = (struct mdss_rect) {0, 0, width, height};
+
+	if (!ctl->mixer_left) {
+		ctl->mixer_left =
+			mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF,
+			 ((width > max_mixer_width) || split_fb), 0);
+		if (!ctl->mixer_left) {
+			pr_err("unable to allocate layer mixer\n");
+			return -ENOMEM;
+		} else if (split_fb && ctl->mixer_left->num >= 1 &&
+			(ctl->panel_data->panel_info.type == MIPI_CMD_PANEL)) {
+			pr_err("use only DSPP0 and DSPP1 with cmd split\n");
+			return -EPERM;
+		}
+	}
+
+	if (split_fb) {
+		width = ctl->mfd->split_fb_left;
+		width += (pinfo->lcdc.border_left +
+				pinfo->lcdc.border_right);
+	} else if (width > max_mixer_width) {
+		width /= 2;
+	}
+
+	ctl->mixer_left->width = width;
+	ctl->mixer_left->height = height;
+	ctl->mixer_left->roi = (struct mdss_rect) {0, 0, width, height};
+	ctl->mixer_left->valid_roi = true;
+	ctl->mixer_left->roi_changed = true;
+
+	if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+		pr_debug("dual display detected\n");
+	} else {
+		if (split_fb)
+			width = ctl->mfd->split_fb_right;
+
+		if (width < ctl->width) {
+			if (ctl->mixer_right == NULL) {
+				ctl->mixer_right = mdss_mdp_mixer_alloc(ctl,
+					MDSS_MDP_MIXER_TYPE_INTF, true, 0);
+				if (!ctl->mixer_right) {
+					pr_err("unable to allocate right mixer\n");
+					if (ctl->mixer_left)
+						mdss_mdp_mixer_free(
+							ctl->mixer_left);
+					return -ENOMEM;
+				}
+			}
+			ctl->mixer_right->is_right_mixer = true;
+			ctl->mixer_right->width = width;
+			ctl->mixer_right->height = height;
+			ctl->mixer_right->roi = (struct mdss_rect)
+						{0, 0, width, height};
+			ctl->mixer_right->valid_roi = true;
+			ctl->mixer_right->roi_changed = true;
+		} else if (ctl->mixer_right) {
+			ctl->mixer_right->valid_roi = false;
+			ctl->mixer_right->roi_changed = false;
+			mdss_mdp_mixer_free(ctl->mixer_right);
+			ctl->mixer_right = NULL;
+		}
+
+		if (ctl->mixer_right) {
+			if (!is_dsc_compression(pinfo) ||
+				(pinfo->dsc_enc_total == 1))
+				ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+				       MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT;
+		} else {
+			ctl->opmode &= ~(MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+				  MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT);
+		}
+	}
+
+	rc = mdss_mdp_pp_default_overlay_config(ctl->mfd, ctl->panel_data,
+						true);
+	/*
+	 * Ignore failure of PP config, ctl set-up can succeed.
+	 */
+	if (rc) {
+		pr_err("failed to set the pp config rc %dfb %d\n", rc,
+			ctl->mfd->index);
+		rc = 0;
+	}
+	return 0;
+}
+
+/**
+ * mdss_mdp_ctl_reconfig() - re-configure ctl for new mode
+ * @ctl: mdp controller.
+ * @pdata: panel data
+ *
+ * This function is called when we are trying to dynamically change
+ * the DSI mode. We need to change various mdp_ctl properties to
+ * the new mode of operation.
+ */
+int mdss_mdp_ctl_reconfig(struct mdss_mdp_ctl *ctl,
+		struct mdss_panel_data *pdata)
+{
+	void *tmp;
+	int ret = 0;
+
+	/*
+	 * Switch first to prevent deleting important data in the case
+	 * where panel type is not supported in reconfig
+	 */
+	if ((pdata->panel_info.type != MIPI_VIDEO_PANEL) &&
+			(pdata->panel_info.type != MIPI_CMD_PANEL)) {
+		pr_err("unsupported panel type (%d)\n", pdata->panel_info.type);
+		return -EINVAL;
+	}
+
+	/* if only changing resolution there is no need for intf reconfig */
+	if (!ctl->is_video_mode == (pdata->panel_info.type == MIPI_CMD_PANEL))
+		goto skip_intf_reconfig;
+
+	/*
+	 * Intentionally not clearing stop function, as stop will
+	 * be called after panel is instructed mode switch is happening
+	 */
+	tmp = ctl->ops.stop_fnc;
+	memset(&ctl->ops, 0, sizeof(ctl->ops));
+	ctl->ops.stop_fnc = tmp;
+
+	switch (pdata->panel_info.type) {
+	case MIPI_VIDEO_PANEL:
+		ctl->is_video_mode = true;
+		ctl->intf_type = MDSS_INTF_DSI;
+		ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
+		ctl->ops.start_fnc = mdss_mdp_video_start;
+		break;
+	case MIPI_CMD_PANEL:
+		ctl->is_video_mode = false;
+		ctl->intf_type = MDSS_INTF_DSI;
+		ctl->opmode = MDSS_MDP_CTL_OP_CMD_MODE;
+		ctl->ops.start_fnc = mdss_mdp_cmd_start;
+		break;
+	}
+
+	ctl->is_secure = false;
+	ctl->split_flush_en = false;
+	ctl->perf_release_ctl_bw = false;
+	ctl->play_cnt = 0;
+
+	ctl->opmode |= (ctl->intf_num << 4);
+
+skip_intf_reconfig:
+	ctl->width = get_panel_xres(&pdata->panel_info);
+	ctl->height = get_panel_yres(&pdata->panel_info);
+
+	if (ctl->mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) {
+		if (ctl->mixer_left) {
+			ctl->mixer_left->width = ctl->width / 2;
+			ctl->mixer_left->height = ctl->height;
+		}
+		if (ctl->mixer_right) {
+			ctl->mixer_right->width = ctl->width / 2;
+			ctl->mixer_right->height = ctl->height;
+		}
+	} else {
+		/*
+		 * Handles MDP_SPLIT_MODE_NONE, MDP_DUAL_LM_DUAL_DISPLAY and
+		 * MDP_PINGPONG_SPLIT case.
+		 */
+		if (ctl->mixer_left) {
+			ctl->mixer_left->width = ctl->width;
+			ctl->mixer_left->height = ctl->height;
+		}
+	}
+	ctl->roi = (struct mdss_rect) {0, 0, ctl->width, ctl->height};
+
+	ctl->border_x_off = pdata->panel_info.lcdc.border_left;
+	ctl->border_y_off = pdata->panel_info.lcdc.border_top;
+
+	return ret;
+}
+
+struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata,
+				       struct msm_fb_data_type *mfd)
+{
+	int ret = 0, offset;
+	struct mdss_mdp_ctl *ctl;
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_panel_info *pinfo;
+
+	if (pdata->panel_info.type == WRITEBACK_PANEL)
+		offset = mdss_mdp_get_wb_ctl_support(mdata, false);
+	else
+		offset = MDSS_MDP_CTL0;
+
+	if (is_pingpong_split(mfd) && !mdata->has_pingpong_split) {
+		pr_err("Error: pp_split cannot be enabled on fb%d if HW doesn't support it\n",
+			mfd->index);
+		return ERR_PTR(-EINVAL);
+	}
+
+	ctl = mdss_mdp_ctl_alloc(mdata, offset);
+	if (!ctl) {
+		pr_err("unable to allocate ctl\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pinfo = &pdata->panel_info;
+	ctl->mfd = mfd;
+	ctl->panel_data = pdata;
+	ctl->is_video_mode = false;
+	ctl->perf_release_ctl_bw = false;
+	ctl->border_x_off = pinfo->lcdc.border_left;
+	ctl->border_y_off = pinfo->lcdc.border_top;
+	ctl->disable_prefill = false;
+
+	switch (pdata->panel_info.type) {
+	case EDP_PANEL:
+		ctl->is_video_mode = true;
+		ctl->intf_num = MDSS_MDP_INTF0;
+		ctl->intf_type = MDSS_INTF_EDP;
+		ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
+		ctl->ops.start_fnc = mdss_mdp_video_start;
+		break;
+	case MIPI_VIDEO_PANEL:
+		ctl->is_video_mode = true;
+		if (pdata->panel_info.pdest == DISPLAY_1)
+			ctl->intf_num = mdp5_data->mixer_swap ? MDSS_MDP_INTF2 :
+				MDSS_MDP_INTF1;
+		else
+			ctl->intf_num = mdp5_data->mixer_swap ? MDSS_MDP_INTF1 :
+				MDSS_MDP_INTF2;
+		ctl->intf_type = MDSS_INTF_DSI;
+		ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
+		ctl->ops.start_fnc = mdss_mdp_video_start;
+		break;
+	case MIPI_CMD_PANEL:
+		if (pdata->panel_info.pdest == DISPLAY_1)
+			ctl->intf_num = mdp5_data->mixer_swap ? MDSS_MDP_INTF2 :
+				MDSS_MDP_INTF1;
+		else
+			ctl->intf_num = mdp5_data->mixer_swap ? MDSS_MDP_INTF1 :
+				MDSS_MDP_INTF2;
+		ctl->intf_type = MDSS_INTF_DSI;
+		ctl->opmode = MDSS_MDP_CTL_OP_CMD_MODE;
+		ctl->ops.start_fnc = mdss_mdp_cmd_start;
+		INIT_WORK(&ctl->cpu_pm_work, __cpu_pm_work_handler);
+		break;
+	case DTV_PANEL:
+		ctl->is_video_mode = true;
+		ctl->intf_num = MDSS_MDP_INTF3;
+		ctl->intf_type = MDSS_INTF_HDMI;
+		ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
+		ctl->ops.start_fnc = mdss_mdp_video_start;
+		break;
+	case WRITEBACK_PANEL:
+		ctl->intf_num = MDSS_MDP_NO_INTF;
+		ctl->ops.start_fnc = mdss_mdp_writeback_start;
+		break;
+	default:
+		pr_err("unsupported panel type (%d)\n", pdata->panel_info.type);
+		ret = -EINVAL;
+		goto ctl_init_fail;
+	}
+
+	ctl->opmode |= (ctl->intf_num << 4);
+
+	if (ctl->intf_num == MDSS_MDP_NO_INTF) {
+		ctl->dst_format = pdata->panel_info.out_format;
+	} else {
+		switch (pdata->panel_info.bpp) {
+		case 18:
+			if (ctl->intf_type == MDSS_INTF_DSI)
+				ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB666 |
+					MDSS_MDP_PANEL_FORMAT_PACK_ALIGN_MSB;
+			else
+				ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB666;
+			break;
+		case 24:
+		default:
+			ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB888;
+			break;
+		}
+	}
+
+	return ctl;
+ctl_init_fail:
+	mdss_mdp_ctl_free(ctl);
+
+	return ERR_PTR(ret);
+}
+
+int mdss_mdp_ctl_split_display_setup(struct mdss_mdp_ctl *ctl,
+		struct mdss_panel_data *pdata)
+{
+	struct mdss_mdp_ctl *sctl;
+	struct mdss_mdp_mixer *mixer;
+
+	if (!ctl || !pdata)
+		return -ENODEV;
+
+	if (pdata->panel_info.xres > ctl->mdata->max_mixer_width) {
+		pr_err("Unsupported second panel resolution: %dx%d\n",
+				pdata->panel_info.xres, pdata->panel_info.yres);
+		return -ENOTSUPP;
+	}
+
+	if (ctl->mixer_right) {
+		pr_err("right mixer already setup for ctl=%d\n", ctl->num);
+		return -EPERM;
+	}
+
+	sctl = mdss_mdp_ctl_init(pdata, ctl->mfd);
+	if (!sctl) {
+		pr_err("unable to setup split display\n");
+		return -ENODEV;
+	}
+
+	sctl->width = get_panel_xres(&pdata->panel_info);
+	sctl->height = get_panel_yres(&pdata->panel_info);
+
+	sctl->roi = (struct mdss_rect){0, 0, sctl->width, sctl->height};
+
+	if (!ctl->mixer_left) {
+		ctl->mixer_left = mdss_mdp_mixer_alloc(ctl,
+				MDSS_MDP_MIXER_TYPE_INTF,
+				false, 0);
+		if (!ctl->mixer_left) {
+			pr_err("unable to allocate layer mixer\n");
+			mdss_mdp_ctl_destroy(sctl);
+			return -ENOMEM;
+		}
+	}
+
+	mixer = mdss_mdp_mixer_alloc(sctl, MDSS_MDP_MIXER_TYPE_INTF, false, 0);
+	if (!mixer) {
+		pr_err("unable to allocate layer mixer\n");
+		mdss_mdp_ctl_destroy(sctl);
+		return -ENOMEM;
+	}
+
+	mixer->is_right_mixer = true;
+	mixer->width = sctl->width;
+	mixer->height = sctl->height;
+	mixer->roi = (struct mdss_rect)
+				{0, 0, mixer->width, mixer->height};
+	mixer->valid_roi = true;
+	mixer->roi_changed = true;
+	sctl->mixer_left = mixer;
+
+	return mdss_mdp_set_split_ctl(ctl, sctl);
+}
+
+static void mdss_mdp_ctl_split_display_enable(int enable,
+	struct mdss_mdp_ctl *main_ctl, struct mdss_mdp_ctl *slave_ctl)
+{
+	u32 upper = 0, lower = 0;
+
+	pr_debug("split main ctl=%d intf=%d\n",
+			main_ctl->num, main_ctl->intf_num);
+
+	if (slave_ctl)
+		pr_debug("split slave ctl=%d intf=%d\n",
+			slave_ctl->num, slave_ctl->intf_num);
+
+	if (enable) {
+		if (main_ctl->opmode & MDSS_MDP_CTL_OP_CMD_MODE) {
+			/* interface controlling sw trigger (cmd mode) */
+			lower |= BIT(1);
+			if (main_ctl->intf_num == MDSS_MDP_INTF2)
+				lower |= BIT(4);
+			else
+				lower |= BIT(8);
+			/*
+			 * Enable SMART_PANEL_FREE_RUN if ping pong split
+			 * is enabled.
+			 */
+			if (is_pingpong_split(main_ctl->mfd))
+				lower |= BIT(2);
+			upper = lower;
+		} else {
+			/* interface controlling sw trigger (video mode) */
+			if (main_ctl->intf_num == MDSS_MDP_INTF2) {
+				lower |= BIT(4);
+				upper |= BIT(8);
+			} else {
+				lower |= BIT(8);
+				upper |= BIT(4);
+			}
+		}
+	}
+	writel_relaxed(upper, main_ctl->mdata->mdp_base +
+		MDSS_MDP_REG_SPLIT_DISPLAY_UPPER_PIPE_CTRL);
+	writel_relaxed(lower, main_ctl->mdata->mdp_base +
+		MDSS_MDP_REG_SPLIT_DISPLAY_LOWER_PIPE_CTRL);
+	writel_relaxed(enable, main_ctl->mdata->mdp_base +
+		MDSS_MDP_REG_SPLIT_DISPLAY_EN);
+
+	if ((main_ctl->mdata->mdp_rev >= MDSS_MDP_HW_REV_103)
+		&& main_ctl->is_video_mode) {
+		struct mdss_overlay_private *mdp5_data;
+		bool mixer_swap = false;
+
+		if (main_ctl->mfd) {
+			mdp5_data = mfd_to_mdp5_data(main_ctl->mfd);
+			mixer_swap = mdp5_data->mixer_swap;
+		}
+
+		main_ctl->split_flush_en = !mixer_swap;
+		if (main_ctl->split_flush_en)
+			writel_relaxed(enable ? 0x1 : 0x0,
+				main_ctl->mdata->mdp_base +
+				MMSS_MDP_MDP_SSPP_SPARE_0);
+	}
+}
+
+static void mdss_mdp_ctl_pp_split_display_enable(bool enable,
+		struct mdss_mdp_ctl *ctl)
+{
+	u32 cfg = 0, cntl = 0;
+
+	if (!ctl->mdata->nppb_ctl || !ctl->mdata->nppb_cfg) {
+		pr_err("No PPB to enable PP split\n");
+		WARN_ON(1);
+	}
+
+	mdss_mdp_ctl_split_display_enable(enable, ctl, NULL);
+
+	if (enable) {
+		cfg = ctl->slave_intf_num << 20; /* Set slave intf */
+		cfg |= BIT(16);			 /* Set horizontal split */
+		cntl = BIT(5);			 /* enable dst split */
+	}
+
+	writel_relaxed(cfg, ctl->mdata->mdp_base + ctl->mdata->ppb_cfg[0]);
+	writel_relaxed(cntl, ctl->mdata->mdp_base + ctl->mdata->ppb_ctl[0]);
+}
+
+int mdss_mdp_ctl_destroy(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_ctl *sctl;
+	int rc;
+
+	rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CLOSE, NULL,
+				     CTL_INTF_EVENT_FLAG_DEFAULT);
+	WARN(rc, "unable to close panel for intf=%d\n", ctl->intf_num);
+
+	(void) mdss_mdp_pp_default_overlay_config(ctl->mfd, ctl->panel_data,
+							false);
+
+	sctl = mdss_mdp_get_split_ctl(ctl);
+	if (sctl) {
+		pr_debug("destroying split display ctl=%d\n", sctl->num);
+		mdss_mdp_ctl_free(sctl);
+	}
+
+	mdss_mdp_ctl_free(ctl);
+
+	return 0;
+}
+
+int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg,
+	u32 flags)
+{
+	struct mdss_panel_data *pdata;
+	int rc = 0;
+
+	if (!ctl || !ctl->panel_data)
+		return -ENODEV;
+
+	pdata = ctl->panel_data;
+
+	if (flags & CTL_INTF_EVENT_FLAG_SLAVE_INTF) {
+		pdata = pdata->next;
+		if (!pdata) {
+			pr_err("Error: event=%d flags=0x%x, ctl%d slave intf is not present\n",
+				event, flags, ctl->num);
+			return -EINVAL;
+		}
+	}
+
+	pr_debug("sending ctl=%d event=%d flag=0x%x\n", ctl->num, event, flags);
+
+	do {
+		if (pdata->event_handler)
+			rc = pdata->event_handler(pdata, event, arg);
+		pdata = pdata->next;
+	} while (rc == 0 && pdata && pdata->active &&
+		!(flags & CTL_INTF_EVENT_FLAG_SKIP_BROADCAST));
+
+	return rc;
+}
+
+static void mdss_mdp_ctl_restore_sub(struct mdss_mdp_ctl *ctl)
+{
+	u32 temp;
+	int ret = 0;
+
+	temp = readl_relaxed(ctl->mdata->mdp_base +
+			MDSS_MDP_REG_DISP_INTF_SEL);
+	temp |= (ctl->intf_type << ((ctl->intf_num - MDSS_MDP_INTF0) * 8));
+	writel_relaxed(temp, ctl->mdata->mdp_base +
+			MDSS_MDP_REG_DISP_INTF_SEL);
+
+	if (ctl->mfd && ctl->panel_data) {
+		ctl->mfd->ipc_resume = true;
+		mdss_mdp_pp_resume(ctl->mfd);
+
+		if (is_dsc_compression(&ctl->panel_data->panel_info)) {
+			/*
+			 * Avoid redundant call to dsc_setup when mode switch
+			 * is in progress. During the switch, dsc_setup is
+			 * handled in mdss_mode_switch() function.
+			 */
+			if (ctl->pending_mode_switch != SWITCH_RESOLUTION)
+				mdss_mdp_ctl_dsc_setup(ctl,
+					&ctl->panel_data->panel_info);
+		} else if (ctl->panel_data->panel_info.compression_mode ==
+				COMPRESSION_FBC) {
+			ret = mdss_mdp_ctl_fbc_enable(1, ctl->mixer_left,
+					&ctl->panel_data->panel_info);
+			if (ret)
+				pr_err("Failed to restore FBC mode\n");
+		}
+	}
+}
+
+/*
+ * mdss_mdp_ctl_restore() - restore mdp ctl path
+ * @locked - boolean to signal that clock lock is already acquired
+ *
+ * This function is called whenever MDP comes out of a power collapse as
+ * a result of a screen update. It restores the MDP controller's software
+ * state to the hardware registers.
+ * Function does not enable the clocks, so caller must make sure
+ * clocks are enabled before calling.
+ * The locked boolean in the parametrs signals that synchronization
+ * with mdp clocks access is not required downstream.
+ * Only call this function setting this value to true if the clocks access
+ * synchronization is guaranteed by the caller.
+ */
+void mdss_mdp_ctl_restore(bool locked)
+{
+	struct mdss_mdp_ctl *ctl = NULL;
+	struct mdss_mdp_ctl *sctl;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 cnum;
+
+	for (cnum = MDSS_MDP_CTL0; cnum < mdata->nctl; cnum++) {
+		ctl = mdata->ctl_off + cnum;
+		if (!mdss_mdp_ctl_is_power_on(ctl))
+			continue;
+
+		pr_debug("restoring ctl%d, intf_type=%d\n", cnum,
+			ctl->intf_type);
+		ctl->play_cnt = 0;
+		sctl = mdss_mdp_get_split_ctl(ctl);
+		mdss_mdp_ctl_restore_sub(ctl);
+		if (sctl) {
+			mdss_mdp_ctl_restore_sub(sctl);
+			mdss_mdp_ctl_split_display_enable(1, ctl, sctl);
+		} else if (is_pingpong_split(ctl->mfd)) {
+			mdss_mdp_ctl_pp_split_display_enable(1, ctl);
+		}
+
+		if (ctl->ops.restore_fnc)
+			ctl->ops.restore_fnc(ctl, locked);
+	}
+}
+
+static int mdss_mdp_ctl_start_sub(struct mdss_mdp_ctl *ctl, bool handoff)
+{
+	struct mdss_mdp_mixer *mixer;
+	u32 outsize, temp;
+	int ret = 0;
+	int i, nmixers;
+
+	pr_debug("ctl_num=%d\n", ctl->num);
+
+	/*
+	 * Need start_fnc in 2 cases:
+	 * (1) handoff
+	 * (2) continuous splash finished.
+	 */
+	if (handoff || !ctl->panel_data->panel_info.cont_splash_enabled) {
+		if (ctl->ops.start_fnc)
+			ret = ctl->ops.start_fnc(ctl);
+		else
+			pr_warn("no start function for ctl=%d type=%d\n",
+					ctl->num,
+					ctl->panel_data->panel_info.type);
+
+		if (ret) {
+			pr_err("unable to start intf\n");
+			return ret;
+		}
+	}
+
+	if (!ctl->panel_data->panel_info.cont_splash_enabled) {
+		nmixers = MDSS_MDP_INTF_MAX_LAYERMIXER +
+			MDSS_MDP_WB_MAX_LAYERMIXER;
+		for (i = 0; i < nmixers; i++)
+			mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_LAYER(i), 0);
+	}
+
+	temp = readl_relaxed(ctl->mdata->mdp_base +
+		MDSS_MDP_REG_DISP_INTF_SEL);
+	temp |= (ctl->intf_type << ((ctl->intf_num - MDSS_MDP_INTF0) * 8));
+	if (is_pingpong_split(ctl->mfd))
+		temp |= (ctl->intf_type << (ctl->intf_num * 8));
+
+	writel_relaxed(temp, ctl->mdata->mdp_base +
+		MDSS_MDP_REG_DISP_INTF_SEL);
+
+	mixer = ctl->mixer_left;
+	if (mixer) {
+		struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+
+		mixer->params_changed++;
+
+		outsize = (mixer->height << 16) | mixer->width;
+		mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OUT_SIZE, outsize);
+
+		if (is_dsc_compression(pinfo)) {
+			mdss_mdp_ctl_dsc_setup(ctl, pinfo);
+		} else if (pinfo->compression_mode == COMPRESSION_FBC) {
+			ret = mdss_mdp_ctl_fbc_enable(1, ctl->mixer_left,
+					pinfo);
+		}
+	}
+	return ret;
+}
+
+int mdss_mdp_ctl_start(struct mdss_mdp_ctl *ctl, bool handoff)
+{
+	struct mdss_mdp_ctl *sctl;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int ret = 0;
+
+	pr_debug("ctl_num=%d, power_state=%d\n", ctl->num, ctl->power_state);
+
+	if (mdss_mdp_ctl_is_power_on_interactive(ctl)
+			&& !(ctl->pending_mode_switch)) {
+		pr_debug("%d: panel already on!\n", __LINE__);
+		return 0;
+	}
+
+	if (mdss_mdp_ctl_is_power_off(ctl)) {
+		ret = mdss_mdp_ctl_setup(ctl);
+		if (ret)
+			return ret;
+	}
+
+	sctl = mdss_mdp_get_split_ctl(ctl);
+
+	mutex_lock(&ctl->lock);
+
+	if (mdss_mdp_ctl_is_power_off(ctl))
+		memset(&ctl->cur_perf, 0, sizeof(ctl->cur_perf));
+
+	/*
+	 * keep power_on false during handoff to avoid unexpected
+	 * operations to overlay.
+	 */
+	if (!handoff || ctl->pending_mode_switch)
+		ctl->power_state = MDSS_PANEL_POWER_ON;
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	ret = mdss_mdp_ctl_start_sub(ctl, handoff);
+	if (ret == 0) {
+		if (sctl && ctl->mfd &&
+		    ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+			/*split display available */
+			ret = mdss_mdp_ctl_start_sub(sctl, handoff);
+			if (!ret)
+				mdss_mdp_ctl_split_display_enable(1, ctl, sctl);
+		} else if (ctl->mixer_right) {
+			struct mdss_mdp_mixer *mixer = ctl->mixer_right;
+			u32 out;
+
+			mixer->params_changed++;
+			out = (mixer->height << 16) | mixer->width;
+			mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OUT_SIZE, out);
+			mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_PACK_3D, 0);
+		} else if (is_pingpong_split(ctl->mfd)) {
+			ctl->slave_intf_num = (ctl->intf_num + 1);
+			mdss_mdp_ctl_pp_split_display_enable(true, ctl);
+		}
+	}
+
+	mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_RESUME);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	mutex_unlock(&ctl->lock);
+
+	return ret;
+}
+
+int mdss_mdp_ctl_stop(struct mdss_mdp_ctl *ctl, int power_state)
+{
+	struct mdss_mdp_ctl *sctl;
+	int ret = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	pr_debug("ctl_num=%d, power_state=%d\n", ctl->num, ctl->power_state);
+
+	if (!ctl->mfd->panel_reconfig && !mdss_mdp_ctl_is_power_on(ctl)) {
+		pr_debug("%s %d already off!\n", __func__, __LINE__);
+		return 0;
+	}
+
+	sctl = mdss_mdp_get_split_ctl(ctl);
+
+	mutex_lock(&ctl->lock);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_SUSPEND);
+
+	if (ctl->ops.stop_fnc) {
+		ret = ctl->ops.stop_fnc(ctl, power_state);
+		if (ctl->panel_data->panel_info.compression_mode ==
+				COMPRESSION_FBC) {
+			mdss_mdp_ctl_fbc_enable(0, ctl->mixer_left,
+					&ctl->panel_data->panel_info);
+		}
+	} else {
+		pr_warn("no stop func for ctl=%d\n", ctl->num);
+	}
+
+	if (sctl && sctl->ops.stop_fnc) {
+		ret = sctl->ops.stop_fnc(sctl, power_state);
+		if (sctl->panel_data->panel_info.compression_mode ==
+				COMPRESSION_FBC) {
+			mdss_mdp_ctl_fbc_enable(0, sctl->mixer_left,
+					&sctl->panel_data->panel_info);
+		}
+	}
+	if (ret) {
+		pr_warn("error powering off intf ctl=%d\n", ctl->num);
+		goto end;
+	}
+
+	if (mdss_panel_is_power_on(power_state)) {
+		pr_debug("panel is not off, leaving ctl power on\n");
+		goto end;
+	}
+
+	if (sctl)
+		mdss_mdp_ctl_split_display_enable(0, ctl, sctl);
+
+	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, 0);
+	if (sctl) {
+		mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP, 0);
+		mdss_mdp_reset_mixercfg(sctl);
+	}
+
+	mdss_mdp_reset_mixercfg(ctl);
+
+	ctl->play_cnt = 0;
+
+end:
+	if (!ret) {
+		ctl->power_state = power_state;
+		if (!ctl->pending_mode_switch)
+			mdss_mdp_ctl_perf_update(ctl, 0, true);
+	}
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+	mutex_unlock(&ctl->lock);
+
+	return ret;
+}
+
+/*
+ * mdss_mdp_pipe_reset() - Halts all the pipes during ctl reset.
+ * @mixer: Mixer from which to reset all pipes.
+ * This function called during control path reset and will halt
+ * all the pipes staged on the mixer.
+ */
+static void mdss_mdp_pipe_reset(struct mdss_mdp_mixer *mixer, bool is_recovery)
+{
+	unsigned long pipe_map;
+	u32 bit = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	bool sw_rst_avail = mdss_mdp_pipe_is_sw_reset_available(mdata);
+
+	if (!mixer)
+		return;
+
+	pipe_map = mixer->pipe_mapped;
+	pr_debug("pipe_map=0x%lx\n", pipe_map);
+	for_each_set_bit_from(bit, &pipe_map, MAX_PIPES_PER_LM) {
+		struct mdss_mdp_pipe *pipe;
+
+		/*
+		 * this assumes that within lm there can be either rect0+rect1
+		 * or rect0 only. Thus to find the hardware pipe to halt only
+		 * check for rect 0 is sufficient.
+		 */
+		pipe = mdss_mdp_pipe_search(mdata, 1 << bit,
+				MDSS_MDP_PIPE_RECT0);
+		if (pipe) {
+			mdss_mdp_pipe_fetch_halt(pipe, is_recovery);
+			if (sw_rst_avail)
+				mdss_mdp_pipe_clk_force_off(pipe);
+		}
+	}
+}
+
+static u32 mdss_mdp_poll_ctl_reset_status(struct mdss_mdp_ctl *ctl, u32 cnt)
+{
+	u32 status;
+	/*
+	 * it takes around 30us to have mdp finish resetting its ctl path
+	 * poll every 50us so that reset should be completed at 1st poll
+	 */
+	do {
+		udelay(50);
+		status = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_SW_RESET);
+		status &= 0x01;
+		pr_debug("status=%x, count=%d\n", status, cnt);
+		cnt--;
+	} while (cnt > 0 && status);
+
+	return status;
+}
+
+/*
+ * mdss_mdp_check_ctl_reset_status() - checks ctl reset status
+ * @ctl: mdp controller
+ *
+ * This function checks the ctl reset status before every frame update.
+ * If the reset bit is set, it keeps polling the status till the hw
+ * reset is complete. And does a panic if hw fails to complet the reset
+ * with in the max poll interval.
+ */
+void mdss_mdp_check_ctl_reset_status(struct mdss_mdp_ctl *ctl)
+{
+	u32 status;
+
+	if (!ctl)
+		return;
+
+	status = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_SW_RESET);
+	status &= 0x01;
+	if (!status)
+		return;
+
+	pr_debug("hw ctl reset is set for ctl:%d\n", ctl->num);
+	status = mdss_mdp_poll_ctl_reset_status(ctl, 5);
+	if (status) {
+		pr_err("hw recovery is not complete for ctl:%d\n", ctl->num);
+		MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt", "dbg_bus",
+			"vbif_dbg_bus", "panic");
+	}
+}
+
+/*
+ * mdss_mdp_ctl_reset() - reset mdp ctl path.
+ * @ctl: mdp controller.
+ * this function called when underflow happen,
+ * it will reset mdp ctl path and poll for its completion
+ *
+ * Note: called within atomic context.
+ */
+int mdss_mdp_ctl_reset(struct mdss_mdp_ctl *ctl, bool is_recovery)
+{
+	u32 status;
+	struct mdss_mdp_mixer *mixer;
+
+	if (!ctl) {
+		pr_err("ctl not initialized\n");
+		return -EINVAL;
+	}
+
+	mixer = ctl->mixer_left;
+	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_SW_RESET, 1);
+
+	status = mdss_mdp_poll_ctl_reset_status(ctl, 20);
+	if (status)
+		pr_err("sw ctl:%d reset timedout\n", ctl->num);
+
+	if (mixer) {
+		mdss_mdp_pipe_reset(mixer, is_recovery);
+
+		if (is_dual_lm_single_display(ctl->mfd) &&
+				ctl->mixer_right)
+			mdss_mdp_pipe_reset(ctl->mixer_right, is_recovery);
+	}
+
+	return (status) ? -EAGAIN : 0;
+}
+
+/*
+ * mdss_mdp_mixer_update_pipe_map() - keep track of pipe configuration in  mixer
+ * @master_ctl: mdp controller.
+ *
+ * This function keeps track of the current mixer configuration in the hardware.
+ * It's callers responsibility to call with master control.
+ */
+void mdss_mdp_mixer_update_pipe_map(struct mdss_mdp_ctl *master_ctl,
+		       int mixer_mux)
+{
+	struct mdss_mdp_mixer *mixer = mdss_mdp_mixer_get(master_ctl,
+			mixer_mux);
+
+	if (!mixer)
+		return;
+
+	pr_debug("mixer%d pipe_mapped=0x%x next_pipes=0x%x\n",
+		mixer->num, mixer->pipe_mapped, mixer->next_pipe_map);
+
+	mixer->pipe_mapped = mixer->next_pipe_map;
+}
+
+static void mdss_mdp_set_mixer_roi(struct mdss_mdp_mixer *mixer,
+	struct mdss_rect *roi)
+{
+	mixer->valid_roi = (roi->w && roi->h);
+	mixer->roi_changed = false;
+
+	if (!mdss_rect_cmp(roi, &mixer->roi)) {
+		mixer->roi = *roi;
+		mixer->params_changed++;
+		mixer->roi_changed = true;
+	}
+
+	pr_debug("mixer%d ROI %s: [%d, %d, %d, %d]\n",
+		mixer->num, mixer->roi_changed ? "changed" : "not changed",
+		mixer->roi.x, mixer->roi.y, mixer->roi.w, mixer->roi.h);
+	MDSS_XLOG(mixer->num, mixer->roi_changed, mixer->valid_roi,
+		mixer->roi.x, mixer->roi.y, mixer->roi.w, mixer->roi.h);
+}
+
+/* only call from master ctl */
+void mdss_mdp_set_roi(struct mdss_mdp_ctl *ctl,
+	struct mdss_rect *l_roi, struct mdss_rect *r_roi)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	enum mdss_mdp_pu_type previous_frame_pu_type, current_frame_pu_type;
+
+	/* Reset ROI when we have (1) invalid ROI (2) feature disabled */
+	if ((!l_roi->w && l_roi->h) || (l_roi->w && !l_roi->h) ||
+	    (!r_roi->w && r_roi->h) || (r_roi->w && !r_roi->h) ||
+	    (!l_roi->w && !l_roi->h && !r_roi->w && !r_roi->h) ||
+	    !ctl->panel_data->panel_info.partial_update_enabled) {
+
+		if (ctl->mixer_left) {
+			*l_roi = (struct mdss_rect) {0, 0,
+					ctl->mixer_left->width,
+					ctl->mixer_left->height};
+		}
+
+		if (ctl->mixer_right) {
+			*r_roi = (struct mdss_rect) {0, 0,
+					ctl->mixer_right->width,
+					ctl->mixer_right->height};
+		}
+	}
+
+	previous_frame_pu_type = mdss_mdp_get_pu_type(ctl);
+	if (ctl->mixer_left) {
+		mdss_mdp_set_mixer_roi(ctl->mixer_left, l_roi);
+		ctl->roi = ctl->mixer_left->roi;
+	}
+
+	if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+		struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+
+		if (sctl && sctl->mixer_left) {
+			mdss_mdp_set_mixer_roi(sctl->mixer_left, r_roi);
+			sctl->roi = sctl->mixer_left->roi;
+		}
+	} else if (is_dual_lm_single_display(ctl->mfd) && ctl->mixer_right) {
+
+		mdss_mdp_set_mixer_roi(ctl->mixer_right, r_roi);
+
+		/* in this case, CTL_ROI is a union of left+right ROIs. */
+		ctl->roi.w += ctl->mixer_right->roi.w;
+
+		/* right_only, update roi.x as per CTL ROI guidelines */
+		if (ctl->mixer_left && !ctl->mixer_left->valid_roi) {
+			ctl->roi = ctl->mixer_right->roi;
+			ctl->roi.x = left_lm_w_from_mfd(ctl->mfd) +
+				ctl->mixer_right->roi.x;
+		}
+	}
+
+	current_frame_pu_type = mdss_mdp_get_pu_type(ctl);
+
+	/*
+	 * Force HW programming whenever partial update type changes
+	 * between two consecutive frames to avoid incorrect HW programming.
+	 */
+	if (is_split_lm(ctl->mfd) && mdata->has_src_split &&
+	    (previous_frame_pu_type != current_frame_pu_type)) {
+		if (ctl->mixer_left)
+			ctl->mixer_left->roi_changed = true;
+		if (ctl->mixer_right)
+			ctl->mixer_right->roi_changed = true;
+	}
+}
+
+static void __mdss_mdp_mixer_update_cfg_masks(u32 pnum,
+		enum mdss_mdp_pipe_rect rect_num,
+		u32 stage, struct mdss_mdp_mixer_cfg *cfg)
+{
+	u32 masks[NUM_MIXERCFG_REGS] = { 0 };
+	int i;
+
+	if (pnum >= MDSS_MDP_MAX_SSPP)
+		return;
+
+	if (rect_num == MDSS_MDP_PIPE_RECT0) {
+		masks[0] = mdss_mdp_hwio_mask(&mdp_pipe_hwio[pnum].base, stage);
+		masks[1] = mdss_mdp_hwio_mask(&mdp_pipe_hwio[pnum].ext, stage);
+		masks[2] = mdss_mdp_hwio_mask(&mdp_pipe_hwio[pnum].ext2, stage);
+	} else { /* RECT1 */
+		masks[2] = mdss_mdp_hwio_mask(&mdp_pipe_rec1_hwio[pnum].ext2,
+				stage);
+	}
+
+	for (i = 0; i < NUM_MIXERCFG_REGS; i++)
+		cfg->config_masks[i] |= masks[i];
+
+	pr_debug("pnum=%d stage=%d cfg=0x%08x ext=0x%08x\n",
+			pnum, stage, masks[0], masks[1]);
+}
+
+static void __mdss_mdp_mixer_get_offsets(u32 mixer_num,
+		u32 *offsets, size_t count)
+{
+	WARN_ON(count < NUM_MIXERCFG_REGS);
+
+	offsets[0] = MDSS_MDP_REG_CTL_LAYER(mixer_num);
+	offsets[1] = MDSS_MDP_REG_CTL_LAYER_EXTN(mixer_num);
+	offsets[2] = MDSS_MDP_REG_CTL_LAYER_EXTN2(mixer_num);
+}
+
+static inline int __mdss_mdp_mixer_get_hw_num(struct mdss_mdp_mixer *mixer)
+{
+	/*
+	 * mapping to hardware expectation of actual mixer programming to
+	 * happen on following registers:
+	 *  INTF: 0, 1, 2, 5
+	 *  WB: 3, 4
+	 * With some exceptions on certain revisions
+	 */
+	if (mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) {
+		u32 wb_offset;
+
+		if (test_bit(MDSS_CAPS_MIXER_1_FOR_WB,
+					mixer->ctl->mdata->mdss_caps_map))
+			wb_offset = MDSS_MDP_INTF_LAYERMIXER1;
+		else
+			wb_offset = MDSS_MDP_INTF_LAYERMIXER3;
+
+		return mixer->num + wb_offset;
+	} else if (mixer->num == MDSS_MDP_INTF_LAYERMIXER3) {
+		return 5;
+	} else {
+		return mixer->num;
+	}
+}
+
+static inline void __mdss_mdp_mixer_write_layer(struct mdss_mdp_ctl *ctl,
+		u32 mixer_num, u32 *values, size_t count)
+{
+	u32 off[NUM_MIXERCFG_REGS];
+	int i;
+
+	WARN_ON(!values || count < NUM_MIXERCFG_REGS);
+
+	__mdss_mdp_mixer_get_offsets(mixer_num, off, ARRAY_SIZE(off));
+
+	for (i = 0; i < count; i++)
+		mdss_mdp_ctl_write(ctl, off[i], values[i]);
+}
+
+static void __mdss_mdp_mixer_write_cfg(struct mdss_mdp_mixer *mixer,
+		struct mdss_mdp_mixer_cfg *cfg)
+{
+	u32 vals[NUM_MIXERCFG_REGS] = {0};
+	int i, mixer_num;
+
+	if (!mixer)
+		return;
+
+	mixer_num = __mdss_mdp_mixer_get_hw_num(mixer);
+
+	if (cfg) {
+		for (i = 0; i < NUM_MIXERCFG_REGS; i++)
+			vals[i] = cfg->config_masks[i];
+
+		if (cfg->border_enabled)
+			vals[0] |= MDSS_MDP_LM_BORDER_COLOR;
+		if (cfg->cursor_enabled)
+			vals[0] |= MDSS_MDP_LM_CURSOR_OUT;
+	}
+
+	__mdss_mdp_mixer_write_layer(mixer->ctl, mixer_num,
+			vals, ARRAY_SIZE(vals));
+
+	pr_debug("mixer=%d cfg=0%08x cfg_extn=0x%08x cfg_extn2=0x%08x\n",
+		mixer->num, vals[0], vals[1], vals[2]);
+	MDSS_XLOG(mixer->num, vals[0], vals[1], vals[2]);
+}
+
+void mdss_mdp_reset_mixercfg(struct mdss_mdp_ctl *ctl)
+{
+	u32 vals[NUM_MIXERCFG_REGS] = {0};
+	int i, nmixers;
+
+	if (!ctl)
+		return;
+
+	nmixers = MDSS_MDP_INTF_MAX_LAYERMIXER + MDSS_MDP_WB_MAX_LAYERMIXER;
+
+	for (i = 0; i < nmixers; i++)
+		__mdss_mdp_mixer_write_layer(ctl, i, vals, ARRAY_SIZE(vals));
+}
+
+bool mdss_mdp_mixer_reg_has_pipe(struct mdss_mdp_mixer *mixer,
+		struct mdss_mdp_pipe *pipe)
+{
+	u32 offs[NUM_MIXERCFG_REGS];
+	u32 cfgs[NUM_MIXERCFG_REGS];
+	struct mdss_mdp_mixer_cfg mixercfg;
+	int i, mixer_num;
+
+	if (!mixer)
+		return false;
+
+	memset(&mixercfg, 0, sizeof(mixercfg));
+
+	mixer_num = __mdss_mdp_mixer_get_hw_num(mixer);
+	__mdss_mdp_mixer_get_offsets(mixer_num, offs, NUM_MIXERCFG_REGS);
+
+	for (i = 0; i < NUM_MIXERCFG_REGS; i++)
+		cfgs[i] = mdss_mdp_ctl_read(mixer->ctl, offs[i]);
+
+	__mdss_mdp_mixer_update_cfg_masks(pipe->num, pipe->multirect.num, -1,
+			&mixercfg);
+	for (i = 0; i < NUM_MIXERCFG_REGS; i++) {
+		if (cfgs[i] & mixercfg.config_masks[i]) {
+			MDSS_XLOG(mixer->num, cfgs[0], cfgs[1]);
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static void mdss_mdp_mixer_setup(struct mdss_mdp_ctl *master_ctl,
+	int mixer_mux, bool lm_swap)
+{
+	int i, mixer_num;
+	int stage, screen_state, outsize;
+	u32 off, blend_op, blend_stage;
+	u32 mixer_op_mode = 0, bg_alpha_enable = 0;
+	struct mdss_mdp_mixer_cfg mixercfg;
+	u32 fg_alpha = 0, bg_alpha = 0;
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_mdp_ctl *ctl, *ctl_hw;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_mixer *mixer_hw = mdss_mdp_mixer_get(master_ctl,
+		mixer_mux);
+	struct mdss_mdp_mixer *mixer;
+
+	if (!mixer_hw)
+		return;
+
+	ctl = mixer_hw->ctl;
+	if (!ctl)
+		return;
+
+	ctl_hw = ctl;
+	mixer_hw->params_changed = 0;
+
+	/* check if mixer setup for rotator is needed */
+	if (mixer_hw->rotator_mode) {
+		__mdss_mdp_mixer_write_cfg(mixer_hw, NULL);
+		return;
+	}
+
+	memset(&mixercfg, 0, sizeof(mixercfg));
+
+	if (lm_swap) {
+		if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
+			mixer = mdss_mdp_mixer_get(master_ctl,
+				MDSS_MDP_MIXER_MUX_LEFT);
+		else
+			mixer = mdss_mdp_mixer_get(master_ctl,
+				MDSS_MDP_MIXER_MUX_RIGHT);
+		ctl_hw = mixer->ctl;
+	} else {
+		mixer = mixer_hw;
+	}
+
+	/*
+	 * if lm_swap was used on MDP_DUAL_LM_DUAL_DISPLAY then we need to
+	 * reset mixercfg every frame because there might be a stale value
+	 * in mixerfcfg register.
+	 */
+	if ((ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) &&
+	    is_dsc_compression(&ctl->panel_data->panel_info) &&
+	    ctl->panel_data->panel_info.partial_update_enabled &&
+	    mdss_has_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU))
+		mdss_mdp_reset_mixercfg(ctl_hw);
+
+	if (!mixer->valid_roi) {
+		/*
+		 * resetting mixer config is specifically needed when split
+		 * mode is MDP_DUAL_LM_SINGLE_DISPLAY but update is only on
+		 * one side.
+		 */
+		__mdss_mdp_mixer_write_cfg(mixer_hw, NULL);
+
+		MDSS_XLOG(mixer->num, mixer_hw->num, XLOG_FUNC_EXIT);
+		return;
+	}
+
+	trace_mdp_mixer_update(mixer_hw->num);
+	pr_debug("setup mixer=%d hw=%d\n", mixer->num, mixer_hw->num);
+	screen_state = ctl->force_screen_state;
+
+	outsize = (mixer->roi.h << 16) | mixer->roi.w;
+	mdp_mixer_write(mixer_hw, MDSS_MDP_REG_LM_OUT_SIZE, outsize);
+
+	if (screen_state == MDSS_SCREEN_FORCE_BLANK) {
+		mixercfg.border_enabled = true;
+		goto update_mixer;
+	}
+
+	pipe = mixer->stage_pipe[MDSS_MDP_STAGE_BASE * MAX_PIPES_PER_STAGE];
+	if (pipe == NULL) {
+		mixercfg.border_enabled = true;
+	} else {
+		__mdss_mdp_mixer_update_cfg_masks(pipe->num,
+				pipe->multirect.num, MDSS_MDP_STAGE_BASE,
+				&mixercfg);
+
+		if (pipe->src_fmt->alpha_enable)
+			bg_alpha_enable = 1;
+	}
+
+	i = MDSS_MDP_STAGE_0 * MAX_PIPES_PER_STAGE;
+	for (; i < MAX_PIPES_PER_LM; i++) {
+		pipe = mixer->stage_pipe[i];
+		if (pipe == NULL)
+			continue;
+
+		stage = i / MAX_PIPES_PER_STAGE;
+		if (stage != pipe->mixer_stage) {
+			pr_warn("pipe%d rec%d mixer:%d stage mismatch. pipe->mixer_stage=%d, mixer->stage_pipe=%d multirect_mode=%d. skip staging it\n",
+			    pipe->num, pipe->multirect.num, mixer->num,
+			    pipe->mixer_stage, stage, pipe->multirect.mode);
+			mixer->stage_pipe[i] = NULL;
+			continue;
+		}
+
+		/*
+		 * pipe which is staged on both LMs will be tracked through
+		 * left mixer only.
+		 */
+		if (!pipe->src_split_req || !mixer->is_right_mixer)
+			mixer->next_pipe_map |= pipe->ndx;
+
+		blend_stage = stage - MDSS_MDP_STAGE_0;
+		off = MDSS_MDP_REG_LM_BLEND_OFFSET(blend_stage);
+
+		/*
+		 * Account for additional blending stages
+		 * from MDP v1.5 onwards
+		 */
+		if (blend_stage > 3)
+			off += MDSS_MDP_REG_LM_BLEND_STAGE4;
+		blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
+			    MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
+		fg_alpha = pipe->alpha;
+		bg_alpha = 0xFF - pipe->alpha;
+		/* keep fg alpha */
+		mixer_op_mode |= 1 << (blend_stage + 1);
+
+		switch (pipe->blend_op) {
+		case BLEND_OP_OPAQUE:
+
+			blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
+				    MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
+
+			pr_debug("pnum=%d stg=%d op=OPAQUE\n", pipe->num,
+					stage);
+			break;
+
+		case BLEND_OP_PREMULTIPLIED:
+			if (pipe->src_fmt->alpha_enable) {
+				blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
+					    MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL);
+				if (fg_alpha != 0xff) {
+					bg_alpha = fg_alpha;
+					blend_op |=
+						MDSS_MDP_BLEND_BG_MOD_ALPHA |
+						MDSS_MDP_BLEND_BG_INV_MOD_ALPHA;
+				} else {
+					blend_op |= MDSS_MDP_BLEND_BG_INV_ALPHA;
+				}
+			}
+			pr_debug("pnum=%d stg=%d op=PREMULTIPLIED\n", pipe->num,
+					stage);
+			break;
+
+		case BLEND_OP_COVERAGE:
+			if (pipe->src_fmt->alpha_enable) {
+				blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_PIXEL |
+					    MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL);
+				if (fg_alpha != 0xff) {
+					bg_alpha = fg_alpha;
+					blend_op |=
+					       MDSS_MDP_BLEND_FG_MOD_ALPHA |
+					       MDSS_MDP_BLEND_FG_INV_MOD_ALPHA |
+					       MDSS_MDP_BLEND_BG_MOD_ALPHA |
+					       MDSS_MDP_BLEND_BG_INV_MOD_ALPHA;
+				} else {
+					blend_op |= MDSS_MDP_BLEND_BG_INV_ALPHA;
+				}
+			}
+			pr_debug("pnum=%d stg=%d op=COVERAGE\n", pipe->num,
+					stage);
+			break;
+
+		default:
+			blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
+				    MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
+			pr_debug("pnum=%d stg=%d op=NONE\n", pipe->num,
+					stage);
+			break;
+		}
+
+		if (!pipe->src_fmt->alpha_enable && bg_alpha_enable)
+			mixer_op_mode = 0;
+
+		__mdss_mdp_mixer_update_cfg_masks(pipe->num,
+				pipe->multirect.num, stage, &mixercfg);
+
+		trace_mdp_sspp_change(pipe);
+
+		pr_debug("stg=%d op=%x fg_alpha=%x bg_alpha=%x\n", stage,
+					blend_op, fg_alpha, bg_alpha);
+		mdp_mixer_write(mixer_hw,
+			off + MDSS_MDP_REG_LM_OP_MODE, blend_op);
+		mdp_mixer_write(mixer_hw,
+			off + MDSS_MDP_REG_LM_BLEND_FG_ALPHA, fg_alpha);
+		mdp_mixer_write(mixer_hw,
+			off + MDSS_MDP_REG_LM_BLEND_BG_ALPHA, bg_alpha);
+	}
+
+	if (mixer->cursor_enabled)
+		mixercfg.cursor_enabled = true;
+
+update_mixer:
+	mixer_num = __mdss_mdp_mixer_get_hw_num(mixer_hw);
+	ctl_hw->flush_bits |= BIT(mixer_num < 5 ? 6 + mixer_num : 20);
+
+	/* Read GC enable/disable status on LM */
+	mixer_op_mode |=
+		(mdp_mixer_read(mixer_hw, MDSS_MDP_REG_LM_OP_MODE) & BIT(0));
+
+	if (mixer->src_split_req && mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
+		mixer_op_mode |= BIT(31);
+
+	mdp_mixer_write(mixer_hw, MDSS_MDP_REG_LM_OP_MODE, mixer_op_mode);
+
+	mdp_mixer_write(mixer_hw, MDSS_MDP_REG_LM_BORDER_COLOR_0,
+		(mdata->bcolor0 & 0xFFF) | ((mdata->bcolor1 & 0xFFF) << 16));
+	mdp_mixer_write(mixer_hw, MDSS_MDP_REG_LM_BORDER_COLOR_1,
+		mdata->bcolor2 & 0xFFF);
+
+	__mdss_mdp_mixer_write_cfg(mixer_hw, &mixercfg);
+
+	pr_debug("mixer=%d hw=%d op_mode=0x%08x w=%d h=%d bc0=0x%x bc1=0x%x\n",
+		mixer->num, mixer_hw->num,
+		mixer_op_mode, mixer->roi.w, mixer->roi.h,
+		(mdata->bcolor0 & 0xFFF) | ((mdata->bcolor1 & 0xFFF) << 16),
+		mdata->bcolor2 & 0xFFF);
+	MDSS_XLOG(mixer->num, mixer_hw->num,
+		mixer_op_mode, mixer->roi.h, mixer->roi.w);
+}
+
+int mdss_mdp_mixer_addr_setup(struct mdss_data_type *mdata,
+	 u32 *mixer_offsets, u32 *dspp_offsets, u32 *pingpong_offsets,
+	 u32 type, u32 len)
+{
+	struct mdss_mdp_mixer *head;
+	u32 i;
+	int rc = 0;
+	u32 size = len;
+
+	if ((type == MDSS_MDP_MIXER_TYPE_WRITEBACK) &&
+			(mdata->wfd_mode == MDSS_MDP_WFD_SHARED))
+		size++;
+
+	head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_mixer) *
+			size, GFP_KERNEL);
+
+	if (!head) {
+		pr_err("unable to setup mixer type=%d :kzalloc fail\n",
+			type);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		head[i].type = type;
+		head[i].base = mdata->mdss_io.base + mixer_offsets[i];
+		head[i].ref_cnt = 0;
+		head[i].num = i;
+		if (type == MDSS_MDP_MIXER_TYPE_INTF && dspp_offsets
+				&& pingpong_offsets) {
+			if (mdata->ndspp > i)
+				head[i].dspp_base = mdata->mdss_io.base +
+						dspp_offsets[i];
+			head[i].pingpong_base = mdata->mdss_io.base +
+					pingpong_offsets[i];
+		}
+	}
+
+	/*
+	 * Duplicate the last writeback mixer for concurrent line and block mode
+	 * operations
+	 */
+	if ((type == MDSS_MDP_MIXER_TYPE_WRITEBACK) &&
+			(mdata->wfd_mode == MDSS_MDP_WFD_SHARED))
+		head[len] = head[len - 1];
+
+	switch (type) {
+
+	case MDSS_MDP_MIXER_TYPE_INTF:
+		mdata->mixer_intf = head;
+		break;
+
+	case MDSS_MDP_MIXER_TYPE_WRITEBACK:
+		mdata->mixer_wb = head;
+		break;
+
+	default:
+		pr_err("Invalid mixer type=%d\n", type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+int mdss_mdp_ctl_addr_setup(struct mdss_data_type *mdata,
+	u32 *ctl_offsets,  u32 len)
+{
+	struct mdss_mdp_ctl *head;
+	struct mutex *shared_lock = NULL;
+	u32 i;
+	u32 size = len;
+
+	if (mdata->wfd_mode == MDSS_MDP_WFD_SHARED) {
+		size++;
+		shared_lock = devm_kzalloc(&mdata->pdev->dev,
+					   sizeof(struct mutex),
+					   GFP_KERNEL);
+		if (!shared_lock) {
+			pr_err("unable to allocate mem for mutex\n");
+			return -ENOMEM;
+		}
+		mutex_init(shared_lock);
+	}
+
+	head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_ctl) *
+			size, GFP_KERNEL);
+
+	if (!head) {
+		pr_err("unable to setup ctl and wb: kzalloc fail\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		head[i].num = i;
+		head[i].base = (mdata->mdss_io.base) + ctl_offsets[i];
+		head[i].ref_cnt = 0;
+	}
+
+	if (mdata->wfd_mode == MDSS_MDP_WFD_SHARED) {
+		head[len - 1].shared_lock = shared_lock;
+		/*
+		 * Allocate a virtual ctl to be able to perform simultaneous
+		 * line mode and block mode operations on the same
+		 * writeback block
+		 */
+		head[len] = head[len - 1];
+		head[len].num = head[len - 1].num;
+	}
+	mdata->ctl_off = head;
+
+	return 0;
+}
+
+int mdss_mdp_wb_addr_setup(struct mdss_data_type *mdata,
+	u32 num_block_wb, u32 num_intf_wb)
+{
+	struct mdss_mdp_writeback *wb;
+	u32 total, i;
+
+	total = num_block_wb + num_intf_wb;
+	wb = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_writeback) *
+			total, GFP_KERNEL);
+	if (!wb)
+		return -ENOMEM;
+
+	for (i = 0; i < total; i++) {
+		wb[i].num = i;
+		if (i < num_block_wb) {
+			wb[i].caps = MDSS_MDP_WB_ROTATOR | MDSS_MDP_WB_WFD;
+			if (mdss_mdp_is_ubwc_supported(mdata))
+				wb[i].caps |= MDSS_MDP_WB_UBWC;
+		} else {
+			wb[i].caps = MDSS_MDP_WB_WFD | MDSS_MDP_WB_INTF;
+		}
+	}
+
+	mdata->wb = wb;
+	mdata->nwb = total;
+	mutex_init(&mdata->wb_lock);
+
+	return 0;
+}
+
+struct mdss_mdp_mixer *mdss_mdp_mixer_get(struct mdss_mdp_ctl *ctl, int mux)
+{
+	struct mdss_mdp_mixer *mixer = NULL;
+
+	if (!ctl) {
+		pr_err("ctl not initialized\n");
+		return NULL;
+	}
+
+	switch (mux) {
+	case MDSS_MDP_MIXER_MUX_DEFAULT:
+	case MDSS_MDP_MIXER_MUX_LEFT:
+		mixer = ctl->mixer_left;
+		break;
+	case MDSS_MDP_MIXER_MUX_RIGHT:
+		mixer = ctl->mixer_right;
+		break;
+	}
+
+	return mixer;
+}
+
+struct mdss_mdp_pipe *mdss_mdp_get_staged_pipe(struct mdss_mdp_ctl *ctl,
+	int mux, int stage, bool is_right_blend)
+{
+	struct mdss_mdp_pipe *pipe = NULL;
+	struct mdss_mdp_mixer *mixer;
+	int index = (stage * MAX_PIPES_PER_STAGE) + (int)is_right_blend;
+
+	if (!ctl)
+		return NULL;
+
+	WARN_ON(index > MAX_PIPES_PER_LM);
+
+	mixer = mdss_mdp_mixer_get(ctl, mux);
+	if (mixer && (index < MAX_PIPES_PER_LM))
+		pipe = mixer->stage_pipe[index];
+
+	pr_debug("%pS index=%d pipe%d\n", __builtin_return_address(0),
+		index, pipe ? pipe->num : -1);
+	return pipe;
+}
+
+int mdss_mdp_get_pipe_flush_bits(struct mdss_mdp_pipe *pipe)
+{
+	if (WARN_ON(!pipe || pipe->num >= MDSS_MDP_MAX_SSPP))
+		return 0;
+
+	return BIT(mdp_pipe_hwio[pipe->num].flush_bit);
+}
+
+int mdss_mdp_async_ctl_flush(struct msm_fb_data_type *mfd,
+		u32 flush_bits)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+	struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+	int ret = 0;
+
+	mutex_lock(&ctl->flush_lock);
+
+	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
+	if ((!ctl->split_flush_en) && sctl)
+		mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
+
+	mutex_unlock(&ctl->flush_lock);
+	return ret;
+}
+
+int mdss_mdp_mixer_pipe_update(struct mdss_mdp_pipe *pipe,
+			 struct mdss_mdp_mixer *mixer, int params_changed)
+{
+	struct mdss_mdp_ctl *ctl;
+	int i, j, k;
+
+	if (!pipe)
+		return -EINVAL;
+	if (!mixer)
+		return -EINVAL;
+	ctl = mixer->ctl;
+	if (!ctl)
+		return -EINVAL;
+
+	if (pipe->mixer_stage >= MDSS_MDP_MAX_STAGE) {
+		pr_err("invalid mixer stage\n");
+		return -EINVAL;
+	}
+
+	pr_debug("pnum=%x mixer=%d stage=%d\n", pipe->num, mixer->num,
+			pipe->mixer_stage);
+
+	mutex_lock(&ctl->flush_lock);
+
+	if (params_changed) {
+		mixer->params_changed++;
+		for (i = MDSS_MDP_STAGE_UNUSED; i < MDSS_MDP_MAX_STAGE; i++) {
+			j = i * MAX_PIPES_PER_STAGE;
+
+			/*
+			 * this could lead to cases where left blend index is
+			 * not populated. For instance, where pipe is spanning
+			 * across layer mixers. But this is handled properly
+			 * within mixer programming code.
+			 */
+			if (pipe->is_right_blend)
+				j++;
+
+			/* First clear all blend containers for current stage */
+			for (k = 0; k < MAX_PIPES_PER_STAGE; k++) {
+				u32 ndx = (i * MAX_PIPES_PER_STAGE) + k;
+
+				if (mixer->stage_pipe[ndx] == pipe)
+					mixer->stage_pipe[ndx] = NULL;
+			}
+
+			/* then stage actual pipe on specific blend container */
+			if (i == pipe->mixer_stage)
+				mixer->stage_pipe[j] = pipe;
+		}
+	}
+
+	ctl->flush_bits |= mdss_mdp_get_pipe_flush_bits(pipe);
+
+	mutex_unlock(&ctl->flush_lock);
+
+	return 0;
+}
+
+/**
+ * mdss_mdp_mixer_unstage_all() - Unstage all pipes from mixer
+ * @mixer:	Mixer from which to unstage all pipes
+ *
+ * Unstage any pipes that are currently attached to mixer.
+ *
+ * NOTE: this will not update the pipe structure, and thus a full
+ * deinitialization or reconfiguration of all pipes is expected after this call.
+ */
+void mdss_mdp_mixer_unstage_all(struct mdss_mdp_mixer *mixer)
+{
+	struct mdss_mdp_pipe *tmp;
+	int i;
+
+	if (!mixer)
+		return;
+
+	for (i = 0; i < MAX_PIPES_PER_LM; i++) {
+		tmp = mixer->stage_pipe[i];
+		if (tmp) {
+			mixer->stage_pipe[i] = NULL;
+			mixer->params_changed++;
+			tmp->params_changed++;
+		}
+	}
+}
+
+int mdss_mdp_mixer_pipe_unstage(struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_mixer *mixer)
+{
+	int i, right_blend;
+
+	if (!pipe)
+		return -EINVAL;
+	if (!mixer)
+		return -EINVAL;
+
+	right_blend = pipe->is_right_blend ? 1 : 0;
+	i = (pipe->mixer_stage * MAX_PIPES_PER_STAGE) + right_blend;
+	if ((i < MAX_PIPES_PER_LM) && (pipe == mixer->stage_pipe[i])) {
+		pr_debug("unstage p%d from %s side of stage=%d lm=%d ndx=%d\n",
+				pipe->num, right_blend ? "right" : "left",
+				pipe->mixer_stage, mixer->num, i);
+	} else {
+		int stage;
+
+		for (i = 0; i < MAX_PIPES_PER_LM; i++) {
+			if (pipe != mixer->stage_pipe[i])
+				continue;
+
+			stage = i / MAX_PIPES_PER_STAGE;
+			right_blend = i & 1;
+
+			pr_warn("lm=%d pipe #%d stage=%d with %s blend, unstaged from %s side of stage=%d!\n",
+				mixer->num, pipe->num, pipe->mixer_stage,
+				pipe->is_right_blend ? "right" : "left",
+				right_blend ? "right" : "left", stage);
+			break;
+		}
+
+		/* pipe not found, not a failure */
+		if (i == MAX_PIPES_PER_LM)
+			return 0;
+	}
+
+	mixer->params_changed++;
+	mixer->stage_pipe[i] = NULL;
+
+	return 0;
+}
+
+int mdss_mdp_ctl_update_fps(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_panel_info *pinfo;
+	struct mdss_overlay_private *mdp5_data;
+	int ret = 0;
+	int new_fps;
+
+	if (!ctl->panel_data || !ctl->mfd)
+		return -ENODEV;
+
+	pinfo = &ctl->panel_data->panel_info;
+
+	if (!pinfo->dynamic_fps || !ctl->ops.config_fps_fnc)
+		return 0;
+
+	if (!pinfo->default_fps) {
+		/* we haven't got any call to update the fps */
+		return 0;
+	}
+
+	mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+	if (!mdp5_data)
+		return -ENODEV;
+
+	/*
+	 * Panel info is already updated with the new fps info,
+	 * so we need to lock the data to make sure the panel info
+	 * is not updated while we reconfigure the HW.
+	 */
+	mutex_lock(&mdp5_data->dfps_lock);
+
+	if ((pinfo->dfps_update == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP) ||
+		(pinfo->dfps_update == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP) ||
+		(pinfo->dfps_update ==
+			DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP) ||
+		(pinfo->dfps_update ==
+			DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) ||
+		pinfo->dfps_update == DFPS_IMMEDIATE_CLK_UPDATE_MODE) {
+		new_fps = mdss_panel_get_framerate(pinfo,
+				FPS_RESOLUTION_DEFAULT);
+	} else {
+		new_fps = pinfo->new_fps;
+	}
+
+	pr_debug("fps new:%d old:%d\n", new_fps,
+		pinfo->current_fps);
+
+	if (new_fps == pinfo->current_fps) {
+		pr_debug("FPS is already %d\n", new_fps);
+		ret = 0;
+		goto exit;
+	}
+
+	ret = ctl->ops.config_fps_fnc(ctl, new_fps);
+	if (!ret)
+		pr_debug("fps set to %d\n", new_fps);
+	else
+		pr_err("Failed to configure %d fps rc=%d\n",
+			new_fps, ret);
+
+exit:
+	mutex_unlock(&mdp5_data->dfps_lock);
+	return ret;
+}
+
+int mdss_mdp_display_wait4comp(struct mdss_mdp_ctl *ctl)
+{
+	int ret;
+	u32 reg_data, flush_data;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!ctl) {
+		pr_err("invalid ctl\n");
+		return -ENODEV;
+	}
+
+	ret = mutex_lock_interruptible(&ctl->lock);
+	if (ret)
+		return ret;
+
+	if (!mdss_mdp_ctl_is_power_on(ctl)) {
+		mutex_unlock(&ctl->lock);
+		return 0;
+	}
+
+	ATRACE_BEGIN("wait_fnc");
+	if (ctl->ops.wait_fnc)
+		ret = ctl->ops.wait_fnc(ctl, NULL);
+	ATRACE_END("wait_fnc");
+
+	trace_mdp_commit(ctl);
+
+	mdss_mdp_ctl_perf_update(ctl, 0, false);
+	mdata->bw_limit_pending = false;
+
+	if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_103)) {
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+		reg_data = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_FLUSH);
+		flush_data = readl_relaxed(mdata->mdp_base + AHB_CLK_OFFSET);
+		if ((flush_data & BIT(28)) &&
+		    !(ctl->flush_reg_data & reg_data)) {
+
+			flush_data &= ~(BIT(28));
+			writel_relaxed(flush_data,
+					 mdata->mdp_base + AHB_CLK_OFFSET);
+			ctl->flush_reg_data = 0;
+		}
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	}
+
+	mutex_unlock(&ctl->lock);
+	return ret;
+}
+
+int mdss_mdp_display_wait4pingpong(struct mdss_mdp_ctl *ctl, bool use_lock)
+{
+	struct mdss_mdp_ctl *sctl = NULL;
+	int ret;
+	bool recovery_needed = false;
+
+	if (use_lock) {
+		ret = mutex_lock_interruptible(&ctl->lock);
+		if (ret)
+			return ret;
+	}
+
+	if (!mdss_mdp_ctl_is_power_on(ctl) || !ctl->ops.wait_pingpong) {
+		if (use_lock)
+			mutex_unlock(&ctl->lock);
+		return 0;
+	}
+
+	ATRACE_BEGIN("wait_pingpong");
+	ret = ctl->ops.wait_pingpong(ctl, NULL);
+	ATRACE_END("wait_pingpong");
+	if (ret)
+		recovery_needed = true;
+
+	sctl = mdss_mdp_get_split_ctl(ctl);
+
+	if (sctl && sctl->ops.wait_pingpong) {
+		ATRACE_BEGIN("wait_pingpong sctl");
+		ret = sctl->ops.wait_pingpong(sctl, NULL);
+		ATRACE_END("wait_pingpong sctl");
+		if (ret)
+			recovery_needed = true;
+	}
+
+	ctl->mdata->bw_limit_pending = false;
+	if (recovery_needed) {
+		mdss_mdp_ctl_reset(ctl, true);
+		if (sctl)
+			mdss_mdp_ctl_reset(sctl, true);
+
+		mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_RESET_WRITE_PTR,
+			NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+
+		pr_debug("pingpong timeout recovery finished\n");
+	}
+
+	if (use_lock)
+		mutex_unlock(&ctl->lock);
+
+	return ret;
+}
+
+static void mdss_mdp_force_border_color(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	bool lm_swap = mdss_mdp_is_lm_swap_needed(mdata, ctl);
+
+	ctl->force_screen_state = MDSS_SCREEN_FORCE_BLANK;
+
+	if (sctl)
+		sctl->force_screen_state = MDSS_SCREEN_FORCE_BLANK;
+
+	mdss_mdp_mixer_setup(ctl, MDSS_MDP_MIXER_MUX_LEFT, lm_swap);
+	mdss_mdp_mixer_setup(ctl, MDSS_MDP_MIXER_MUX_RIGHT, lm_swap);
+
+	ctl->force_screen_state = MDSS_SCREEN_DEFAULT;
+	if (sctl)
+		sctl->force_screen_state = MDSS_SCREEN_DEFAULT;
+
+	/*
+	 * Update the params changed for mixer for the next frame to
+	 * configure the mixer setup properly.
+	 */
+	if (ctl->mixer_left)
+		ctl->mixer_left->params_changed++;
+	if (ctl->mixer_right)
+		ctl->mixer_right->params_changed++;
+}
+
+int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
+	struct mdss_mdp_commit_cb *commit_cb)
+{
+	struct mdss_mdp_ctl *sctl = NULL;
+	int ret = 0;
+	bool is_bw_released, split_lm_valid;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 ctl_flush_bits = 0, sctl_flush_bits = 0;
+
+	if (!ctl) {
+		pr_err("display function not set\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&ctl->lock);
+	pr_debug("commit ctl=%d play_cnt=%d\n", ctl->num, ctl->play_cnt);
+
+	if (!mdss_mdp_ctl_is_power_on(ctl)) {
+		mutex_unlock(&ctl->lock);
+		return 0;
+	}
+
+	split_lm_valid = mdss_mdp_is_both_lm_valid(ctl);
+
+	sctl = mdss_mdp_get_split_ctl(ctl);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	mutex_lock(&ctl->flush_lock);
+
+	/*
+	 * We could have released the bandwidth if there were no transactions
+	 * pending, so we want to re-calculate the bandwidth in this situation
+	 */
+	is_bw_released = !mdss_mdp_ctl_perf_get_transaction_status(ctl);
+	if (is_bw_released) {
+		if (sctl)
+			is_bw_released =
+				!mdss_mdp_ctl_perf_get_transaction_status(sctl);
+	}
+
+	/*
+	 * left update on any topology or
+	 * any update on MDP_DUAL_LM_SINGLE_DISPLAY topology.
+	 */
+	if (ctl->mixer_left->valid_roi ||
+	    (is_dual_lm_single_display(ctl->mfd) &&
+	     ctl->mixer_right->valid_roi))
+		mdss_mdp_ctl_perf_set_transaction_status(ctl,
+				PERF_SW_COMMIT_STATE, PERF_STATUS_BUSY);
+
+	/* right update on MDP_DUAL_LM_DUAL_DISPLAY */
+	if (sctl && sctl->mixer_left->valid_roi)
+		mdss_mdp_ctl_perf_set_transaction_status(sctl,
+			PERF_SW_COMMIT_STATE, PERF_STATUS_BUSY);
+
+	if (ctl->mixer_right)
+		ctl->mixer_right->src_split_req =
+			mdata->has_src_split && split_lm_valid;
+
+	if (is_bw_released || ctl->force_screen_state ||
+	    (ctl->mixer_left->params_changed) ||
+	    (ctl->mixer_right && ctl->mixer_right->params_changed)) {
+		bool lm_swap = mdss_mdp_is_lm_swap_needed(mdata, ctl);
+
+		ATRACE_BEGIN("prepare_fnc");
+		if (ctl->ops.prepare_fnc)
+			ret = ctl->ops.prepare_fnc(ctl, arg);
+		ATRACE_END("prepare_fnc");
+		if (ret) {
+			pr_err("error preparing display\n");
+			mutex_unlock(&ctl->flush_lock);
+			goto done;
+		}
+
+		ATRACE_BEGIN("mixer_programming");
+		mdss_mdp_ctl_perf_update(ctl, 1, false);
+
+		mdss_mdp_mixer_setup(ctl, MDSS_MDP_MIXER_MUX_LEFT, lm_swap);
+		mdss_mdp_mixer_setup(ctl, MDSS_MDP_MIXER_MUX_RIGHT, lm_swap);
+
+		mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, ctl->opmode);
+		ctl->flush_bits |= BIT(17);	/* CTL */
+
+		if (sctl) {
+			mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP,
+					sctl->opmode);
+			sctl->flush_bits |= BIT(17);
+			sctl_flush_bits = sctl->flush_bits;
+		}
+		ATRACE_END("mixer_programming");
+	}
+
+	/*
+	 * With partial frame update, enable split display bit only
+	 * when validity of ROI's on both the DSI's are identical.
+	 */
+	if (sctl)
+		mdss_mdp_ctl_split_display_enable(split_lm_valid, ctl, sctl);
+
+	ATRACE_BEGIN("postproc_programming");
+	if (ctl->is_video_mode && ctl->mfd && ctl->mfd->dcm_state != DTM_ENTER)
+		/* postprocessing setup, including dspp */
+		mdss_mdp_pp_setup_locked(ctl);
+
+	if (sctl) {
+		if (ctl->split_flush_en) {
+			ctl->flush_bits |= sctl->flush_bits;
+			sctl->flush_bits = 0;
+			sctl_flush_bits = 0;
+		} else {
+			sctl_flush_bits = sctl->flush_bits;
+		}
+		sctl->commit_in_progress = true;
+	}
+	ctl->commit_in_progress = true;
+	ctl_flush_bits = ctl->flush_bits;
+
+	ATRACE_END("postproc_programming");
+
+	mutex_unlock(&ctl->flush_lock);
+
+	ATRACE_BEGIN("frame_ready");
+	mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_CFG_DONE);
+	if (commit_cb)
+		commit_cb->commit_cb_fnc(
+			MDP_COMMIT_STAGE_SETUP_DONE,
+			commit_cb->data);
+	ret = mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_READY);
+
+	/*
+	 * When wait for fence timed out, driver ignores the fences
+	 * for signalling. Hardware needs to access only on the buffers
+	 * that are valid and driver needs to ensure it. This function
+	 * would set the mixer state to border when there is timeout.
+	 */
+	if (ret == NOTIFY_BAD) {
+		mdss_mdp_force_border_color(ctl);
+		ctl_flush_bits |= (ctl->flush_bits | BIT(17));
+		if (sctl && (!ctl->split_flush_en))
+			sctl_flush_bits |= (sctl->flush_bits | BIT(17));
+		ret = 0;
+	}
+
+	ATRACE_END("frame_ready");
+
+	if (ctl->ops.wait_pingpong && !mdata->serialize_wait4pp)
+		mdss_mdp_display_wait4pingpong(ctl, false);
+
+	/* Moved pp programming to post ping pong */
+	if (!ctl->is_video_mode && ctl->mfd &&
+			ctl->mfd->dcm_state != DTM_ENTER) {
+		/* postprocessing setup, including dspp */
+		mutex_lock(&ctl->flush_lock);
+		mdss_mdp_pp_setup_locked(ctl);
+		if (sctl) {
+			if (ctl->split_flush_en) {
+				ctl->flush_bits |= sctl->flush_bits;
+				sctl->flush_bits = 0;
+				sctl_flush_bits = 0;
+			} else {
+				sctl_flush_bits = sctl->flush_bits;
+			}
+		}
+		ctl_flush_bits = ctl->flush_bits;
+		mutex_unlock(&ctl->flush_lock);
+	}
+	/*
+	 * if serialize_wait4pp is false then roi_bkup used in wait4pingpong
+	 * will be of previous frame as expected.
+	 */
+	ctl->roi_bkup.w = ctl->roi.w;
+	ctl->roi_bkup.h = ctl->roi.h;
+
+	/*
+	 * update roi of panel_info which will be
+	 * used by dsi to set col_page addr of panel.
+	 */
+	if (ctl->panel_data &&
+	    ctl->panel_data->panel_info.partial_update_enabled) {
+
+		if (is_pingpong_split(ctl->mfd)) {
+			bool pp_split = false;
+			struct mdss_rect l_roi, r_roi, temp = {0};
+			u32 opmode = mdss_mdp_ctl_read(ctl,
+			     MDSS_MDP_REG_CTL_TOP) & ~0xF0; /* clear OUT_SEL */
+			/*
+			 * with pp split enabled, it is a requirement that both
+			 * panels share equal load, so split-point is center.
+			 */
+			u32 left_panel_w = left_lm_w_from_mfd(ctl->mfd) / 2;
+
+			mdss_rect_split(&ctl->roi, &l_roi, &r_roi,
+				left_panel_w);
+
+			/*
+			 * If update is only on left panel then we still send
+			 * zeroed out right panel ROIs to DSI driver. Based on
+			 * zeroed ROI, DSI driver identifies which panel is not
+			 * transmitting.
+			 */
+			ctl->panel_data->panel_info.roi = l_roi;
+			ctl->panel_data->next->panel_info.roi = r_roi;
+
+			/* based on the roi, update ctl topology */
+			if (!mdss_rect_cmp(&temp, &l_roi) &&
+			    !mdss_rect_cmp(&temp, &r_roi)) {
+				/* left + right */
+				opmode |= (ctl->intf_num << 4);
+				pp_split = true;
+			} else if (mdss_rect_cmp(&temp, &l_roi)) {
+				/* right only */
+				opmode |= (ctl->slave_intf_num << 4);
+				pp_split = false;
+			} else {
+				/* left only */
+				opmode |= (ctl->intf_num << 4);
+				pp_split = false;
+			}
+
+			mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, opmode);
+
+			mdss_mdp_ctl_pp_split_display_enable(pp_split, ctl);
+		} else {
+			/*
+			 * if single lm update on 3D mux topology, clear it.
+			 */
+			if ((is_dual_lm_single_display(ctl->mfd)) &&
+			    (ctl->opmode & MDSS_MDP_CTL_OP_PACK_3D_ENABLE) &&
+			    (!mdss_mdp_is_both_lm_valid(ctl))) {
+
+				u32 opmode = mdss_mdp_ctl_read(ctl,
+					MDSS_MDP_REG_CTL_TOP);
+			       opmode &= ~(0xF << 19); /* clear 3D Mux */
+
+				mdss_mdp_ctl_write(ctl,
+					MDSS_MDP_REG_CTL_TOP, opmode);
+			}
+
+			ctl->panel_data->panel_info.roi = ctl->roi;
+			if (sctl && sctl->panel_data)
+				sctl->panel_data->panel_info.roi = sctl->roi;
+		}
+	}
+
+	if (commit_cb)
+		commit_cb->commit_cb_fnc(MDP_COMMIT_STAGE_READY_FOR_KICKOFF,
+			commit_cb->data);
+
+	if (mdss_has_quirk(mdata, MDSS_QUIRK_BWCPANIC) &&
+	    !bitmap_empty(mdata->bwc_enable_map, MAX_DRV_SUP_PIPES))
+		mdss_mdp_bwcpanic_ctrl(mdata, true);
+
+	ATRACE_BEGIN("flush_kickoff");
+	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl_flush_bits);
+	if (sctl) {
+		if (sctl_flush_bits) {
+			mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH,
+				sctl_flush_bits);
+			sctl->flush_bits = 0;
+		}
+		sctl->commit_in_progress = false;
+	}
+	ctl->commit_in_progress = false;
+
+	MDSS_XLOG(ctl->intf_num, ctl_flush_bits, sctl_flush_bits,
+		split_lm_valid);
+	wmb(); /* ensure write is finished before progressing */
+	ctl->flush_reg_data = ctl_flush_bits;
+	ctl->flush_bits = 0;
+
+	mdss_mdp_mixer_update_pipe_map(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+	mdss_mdp_mixer_update_pipe_map(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
+
+	/* right-only kickoff */
+	if (!ctl->mixer_left->valid_roi &&
+	    sctl && sctl->mixer_left->valid_roi) {
+		/*
+		 * Separate kickoff on DSI1 is needed only when we have
+		 * ONLY right half updating on a dual DSI panel
+		 */
+		if (sctl->ops.display_fnc)
+			ret = sctl->ops.display_fnc(sctl, arg);
+	} else {
+		if (ctl->ops.display_fnc)
+			ret = ctl->ops.display_fnc(ctl, arg); /* DSI0 kickoff */
+	}
+
+	if (ret)
+		pr_warn("ctl %d error displaying frame\n", ctl->num);
+
+	ctl->play_cnt++;
+	ATRACE_END("flush_kickoff");
+
+done:
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+	mutex_unlock(&ctl->lock);
+
+	return ret;
+}
+
+void mdss_mdp_ctl_notifier_register(struct mdss_mdp_ctl *ctl,
+	struct notifier_block *notifier)
+{
+	struct mdss_mdp_ctl *sctl;
+
+	blocking_notifier_chain_register(&ctl->notifier_head, notifier);
+
+	sctl = mdss_mdp_get_split_ctl(ctl);
+	if (sctl)
+		blocking_notifier_chain_register(&sctl->notifier_head,
+						notifier);
+}
+
+void mdss_mdp_ctl_notifier_unregister(struct mdss_mdp_ctl *ctl,
+	struct notifier_block *notifier)
+{
+	struct mdss_mdp_ctl *sctl;
+
+	blocking_notifier_chain_unregister(&ctl->notifier_head, notifier);
+
+	sctl = mdss_mdp_get_split_ctl(ctl);
+	if (sctl)
+		blocking_notifier_chain_unregister(&sctl->notifier_head,
+						notifier);
+}
+
+int mdss_mdp_ctl_notify(struct mdss_mdp_ctl *ctl, int event)
+{
+	return blocking_notifier_call_chain(&ctl->notifier_head, event, ctl);
+}
+
+int mdss_mdp_get_ctl_mixers(u32 fb_num, u32 *mixer_id)
+{
+	int i;
+	struct mdss_mdp_ctl *ctl;
+	struct mdss_data_type *mdata;
+	u32 mixer_cnt = 0;
+
+	mutex_lock(&mdss_mdp_ctl_lock);
+	mdata = mdss_mdp_get_mdata();
+	for (i = 0; i < mdata->nctl; i++) {
+		ctl = mdata->ctl_off + i;
+		if ((mdss_mdp_ctl_is_power_on(ctl)) && (ctl->mfd) &&
+			(ctl->mfd->index == fb_num)) {
+			if (ctl->mixer_left) {
+				mixer_id[mixer_cnt] = ctl->mixer_left->num;
+				mixer_cnt++;
+			}
+			if (mixer_cnt && ctl->mixer_right) {
+				mixer_id[mixer_cnt] = ctl->mixer_right->num;
+				mixer_cnt++;
+			}
+			if (mixer_cnt)
+				break;
+		}
+	}
+	mutex_unlock(&mdss_mdp_ctl_lock);
+	return mixer_cnt;
+}
+
+/**
+ * @mdss_mdp_ctl_mixer_switch() - return ctl mixer of @return_type
+ * @ctl: Pointer to ctl structure to be switched.
+ * @return_type: wb_type of the ctl to be switched to.
+ *
+ * Virtual mixer switch should be performed only when there is no
+ * dedicated wfd block and writeback block is shared.
+ */
+struct mdss_mdp_ctl *mdss_mdp_ctl_mixer_switch(struct mdss_mdp_ctl *ctl,
+					       u32 return_type)
+{
+	int i;
+	struct mdss_data_type *mdata = ctl->mdata;
+
+	if (ctl->wb_type == return_type) {
+		mdata->mixer_switched = false;
+		return ctl;
+	}
+	for (i = 0; i <= mdata->nctl; i++) {
+		if (mdata->ctl_off[i].wb_type == return_type) {
+			pr_debug("switching mixer from ctl=%d to ctl=%d\n",
+				 ctl->num, mdata->ctl_off[i].num);
+			mdata->mixer_switched = true;
+			return mdata->ctl_off + i;
+		}
+	}
+	pr_err("unable to switch mixer to type=%d\n", return_type);
+	return NULL;
+}
+
+static int __mdss_mdp_mixer_handoff_helper(struct mdss_mdp_mixer *mixer,
+	struct mdss_mdp_pipe *pipe)
+{
+	int rc = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 right_blend = 0;
+
+	if (!mixer) {
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * It is possible to have more the one pipe staged on a single
+	 * layer mixer at same staging level.
+	 */
+	if (mixer->stage_pipe[MDSS_MDP_STAGE_UNUSED] != NULL) {
+		if (mdata->mdp_rev < MDSS_MDP_HW_REV_103) {
+			pr_err("More than one pipe staged on mixer num %d\n",
+				mixer->num);
+			rc = -EINVAL;
+			goto error;
+		} else if (mixer->stage_pipe[MDSS_MDP_STAGE_UNUSED + 1] !=
+			NULL) {
+			pr_err("More than two pipe staged on mixer num %d\n",
+				mixer->num);
+			rc = -EINVAL;
+			goto error;
+		} else {
+			right_blend = 1;
+		}
+	}
+
+	pr_debug("Staging pipe num %d on mixer num %d\n",
+		pipe->num, mixer->num);
+	mixer->stage_pipe[MDSS_MDP_STAGE_UNUSED + right_blend] = pipe;
+	pipe->mixer_left = mixer;
+	pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
+
+error:
+	return rc;
+}
+
+/**
+ * mdss_mdp_mixer_handoff() - Stages a given pipe on the appropriate mixer
+ * @ctl:  pointer to the control structure associated with the overlay device.
+ * @num:  the mixer number on which the pipe needs to be staged.
+ * @pipe: pointer to the pipe to be staged.
+ *
+ * Function stages a given pipe on either the left mixer or the right mixer
+ * for the control structre based on the mixer number. If the input mixer
+ * number does not match either of the mixers then an error is returned.
+ * This function is called during overlay handoff when certain pipes are
+ * already staged by the bootloader.
+ */
+int mdss_mdp_mixer_handoff(struct mdss_mdp_ctl *ctl, u32 num,
+	struct mdss_mdp_pipe *pipe)
+{
+	int rc = 0;
+	struct mdss_mdp_mixer *mx_left = ctl->mixer_left;
+	struct mdss_mdp_mixer *mx_right = ctl->mixer_right;
+
+	/*
+	 * For performance calculations, stage the handed off pipe
+	 * as MDSS_MDP_STAGE_UNUSED
+	 */
+	if (mx_left && (mx_left->num == num)) {
+		rc = __mdss_mdp_mixer_handoff_helper(mx_left, pipe);
+	} else if (mx_right && (mx_right->num == num)) {
+		rc = __mdss_mdp_mixer_handoff_helper(mx_right, pipe);
+	} else {
+		pr_err("pipe num %d staged on unallocated mixer num %d\n",
+			pipe->num, num);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+struct mdss_mdp_writeback *mdss_mdp_wb_alloc(u32 caps, u32 reg_index)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_writeback *wb = NULL;
+	int i;
+	bool wb_virtual_on;
+
+	wb_virtual_on = (mdata->nctl == mdata->nwb_offsets);
+
+	if (wb_virtual_on && reg_index >= mdata->nwb_offsets)
+		return NULL;
+
+	mutex_lock(&mdata->wb_lock);
+
+	for (i = 0; i < mdata->nwb; i++) {
+		wb = mdata->wb + i;
+		if ((wb->caps & caps) &&
+			(atomic_read(&wb->kref.refcount) == 0)) {
+			kref_init(&wb->kref);
+			break;
+		}
+		wb = NULL;
+	}
+	mutex_unlock(&mdata->wb_lock);
+
+	if (wb) {
+		wb->base = mdata->mdss_io.base;
+		if (wb_virtual_on)
+			wb->base += mdata->wb_offsets[reg_index];
+		else
+			wb->base += mdata->wb_offsets[i];
+	}
+
+	return wb;
+}
+
+bool mdss_mdp_is_wb_mdp_intf(u32 num, u32 reg_index)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_writeback *wb = NULL;
+	bool wb_virtual_on;
+
+	wb_virtual_on = (mdata->nctl == mdata->nwb_offsets);
+
+	if (num >= mdata->nwb || (wb_virtual_on && reg_index >=
+			mdata->nwb_offsets))
+		return false;
+
+	wb = mdata->wb + num;
+	if (!wb)
+		return false;
+
+	return (wb->caps & MDSS_MDP_WB_INTF) ? true : false;
+}
+
+struct mdss_mdp_writeback *mdss_mdp_wb_assign(u32 num, u32 reg_index)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_writeback *wb = NULL;
+	bool wb_virtual_on;
+
+	wb_virtual_on = (mdata->nctl == mdata->nwb_offsets);
+
+	if (num >= mdata->nwb)
+		return NULL;
+
+	if (wb_virtual_on && reg_index >= mdata->nwb_offsets)
+		return NULL;
+
+	mutex_lock(&mdata->wb_lock);
+	wb = mdata->wb + num;
+	if (atomic_read(&wb->kref.refcount) == 0)
+		kref_init(&wb->kref);
+	else
+		wb = NULL;
+	mutex_unlock(&mdata->wb_lock);
+
+	if (!wb)
+		return NULL;
+
+	wb->base = mdata->mdss_io.base;
+	if (wb_virtual_on)
+		wb->base += mdata->wb_offsets[reg_index];
+	else
+		wb->base += mdata->wb_offsets[num];
+
+	return wb;
+}
+
+static void mdss_mdp_wb_release(struct kref *kref)
+{
+	struct mdss_mdp_writeback *wb =
+		container_of(kref, struct mdss_mdp_writeback, kref);
+
+	if (!wb)
+		return;
+
+	wb->base = NULL;
+}
+
+void mdss_mdp_wb_free(struct mdss_mdp_writeback *wb)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (kref_put_mutex(&wb->kref, mdss_mdp_wb_release,
+			&mdata->wb_lock))
+		mutex_unlock(&mdata->wb_lock);
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_debug.c b/drivers/video/fbdev/msm/mdss_mdp_debug.c
new file mode 100644
index 0000000..b641ccb
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_debug.c
@@ -0,0 +1,1515 @@
+/*
+ * Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/time.h>
+#include <linux/seq_file.h>
+
+#include "mdss_mdp.h"
+#include "mdss_panel.h"
+#include "mdss_debug.h"
+#include "mdss_mdp_debug.h"
+
+#define BUF_DUMP_LAST_N 10
+
+static struct debug_bus dbg_bus_8996[] = {
+
+	/*
+	 * sspp0  - 0x188
+	 * sspp1  - 0x298
+	 * dspp   - 0x348
+	 * periph - 0x418
+	 */
+
+	/* Unpack 0 sspp 0*/
+	{ 0x188, 50, 2 },
+	{ 0x188, 60, 2 },
+	{ 0x188, 54, 2 },
+	{ 0x188, 64, 2 },
+	{ 0x188, 70, 2 },
+	{ 0x188, 85, 2 },
+	/* Upack 0 sspp 1*/
+	{ 0x298, 50, 2 },
+	{ 0x298, 60, 2 },
+	{ 0x298, 54, 2 },
+	{ 0x298, 64, 2 },
+	{ 0x298, 70, 2 },
+	{ 0x298, 85, 2 },
+	/* scheduler */
+	{ 0x348, 130, 0 },
+	{ 0x348, 130, 1 },
+	{ 0x348, 130, 2 },
+	{ 0x348, 130, 3 },
+	{ 0x348, 130, 4 },
+	{ 0x348, 130, 5 },
+
+	/* qseed */
+	{0x188, 6, 0},
+	{0x188, 6, 1},
+	{0x188, 26, 0},
+	{0x188, 26, 1},
+	{0x298, 6, 0},
+	{0x298, 6, 1},
+	{0x298, 26, 0},
+	{0x298, 26, 1},
+
+	/* scale */
+	{0x188, 16, 0},
+	{0x188, 16, 1},
+	{0x188, 36, 0},
+	{0x188, 36, 1},
+	{0x298, 16, 0},
+	{0x298, 16, 1},
+	{0x298, 36, 0},
+	{0x298, 36, 1},
+
+	/* fetch sspp0 */
+
+	/* vig 0 */
+	{ 0x188, 0, 0 },
+	{ 0x188, 0, 1 },
+	{ 0x188, 0, 2 },
+	{ 0x188, 0, 3 },
+	{ 0x188, 0, 4 },
+	{ 0x188, 0, 5 },
+	{ 0x188, 0, 6 },
+	{ 0x188, 0, 7 },
+
+	{ 0x188, 1, 0 },
+	{ 0x188, 1, 1 },
+	{ 0x188, 1, 2 },
+	{ 0x188, 1, 3 },
+	{ 0x188, 1, 4 },
+	{ 0x188, 1, 5 },
+	{ 0x188, 1, 6 },
+	{ 0x188, 1, 7 },
+
+	{ 0x188, 2, 0 },
+	{ 0x188, 2, 1 },
+	{ 0x188, 2, 2 },
+	{ 0x188, 2, 3 },
+	{ 0x188, 2, 4 },
+	{ 0x188, 2, 5 },
+	{ 0x188, 2, 6 },
+	{ 0x188, 2, 7 },
+
+	{ 0x188, 4, 0 },
+	{ 0x188, 4, 1 },
+	{ 0x188, 4, 2 },
+	{ 0x188, 4, 3 },
+	{ 0x188, 4, 4 },
+	{ 0x188, 4, 5 },
+	{ 0x188, 4, 6 },
+	{ 0x188, 4, 7 },
+
+	{ 0x188, 5, 0 },
+	{ 0x188, 5, 1 },
+	{ 0x188, 5, 2 },
+	{ 0x188, 5, 3 },
+	{ 0x188, 5, 4 },
+	{ 0x188, 5, 5 },
+	{ 0x188, 5, 6 },
+	{ 0x188, 5, 7 },
+
+	/* vig 2 */
+	{ 0x188, 20, 0 },
+	{ 0x188, 20, 1 },
+	{ 0x188, 20, 2 },
+	{ 0x188, 20, 3 },
+	{ 0x188, 20, 4 },
+	{ 0x188, 20, 5 },
+	{ 0x188, 20, 6 },
+	{ 0x188, 20, 7 },
+
+	{ 0x188, 21, 0 },
+	{ 0x188, 21, 1 },
+	{ 0x188, 21, 2 },
+	{ 0x188, 21, 3 },
+	{ 0x188, 21, 4 },
+	{ 0x188, 21, 5 },
+	{ 0x188, 21, 6 },
+	{ 0x188, 21, 7 },
+
+	{ 0x188, 22, 0 },
+	{ 0x188, 22, 1 },
+	{ 0x188, 22, 2 },
+	{ 0x188, 22, 3 },
+	{ 0x188, 22, 4 },
+	{ 0x188, 22, 5 },
+	{ 0x188, 22, 6 },
+	{ 0x188, 22, 7 },
+
+	{ 0x188, 24, 0 },
+	{ 0x188, 24, 1 },
+	{ 0x188, 24, 2 },
+	{ 0x188, 24, 3 },
+	{ 0x188, 24, 4 },
+	{ 0x188, 24, 5 },
+	{ 0x188, 24, 6 },
+	{ 0x188, 24, 7 },
+
+	{ 0x188, 25, 0 },
+	{ 0x188, 25, 1 },
+	{ 0x188, 25, 2 },
+	{ 0x188, 25, 3 },
+	{ 0x188, 25, 4 },
+	{ 0x188, 25, 5 },
+	{ 0x188, 25, 6 },
+	{ 0x188, 25, 7 },
+
+	/* rgb 0 */
+	{ 0x188, 10, 0 },
+	{ 0x188, 10, 1 },
+	{ 0x188, 10, 2 },
+	{ 0x188, 10, 3 },
+	{ 0x188, 10, 4 },
+	{ 0x188, 10, 5 },
+	{ 0x188, 10, 6 },
+	{ 0x188, 10, 7 },
+
+	{ 0x188, 11, 0 },
+	{ 0x188, 11, 1 },
+	{ 0x188, 11, 2 },
+	{ 0x188, 11, 3 },
+	{ 0x188, 11, 4 },
+	{ 0x188, 11, 5 },
+	{ 0x188, 11, 6 },
+	{ 0x188, 11, 7 },
+
+	{ 0x188, 12, 0 },
+	{ 0x188, 12, 1 },
+	{ 0x188, 12, 2 },
+	{ 0x188, 12, 3 },
+	{ 0x188, 12, 4 },
+	{ 0x188, 12, 5 },
+	{ 0x188, 12, 6 },
+	{ 0x188, 12, 7 },
+
+	{ 0x188, 14, 0 },
+	{ 0x188, 14, 1 },
+	{ 0x188, 14, 2 },
+	{ 0x188, 14, 3 },
+	{ 0x188, 14, 4 },
+	{ 0x188, 14, 5 },
+	{ 0x188, 14, 6 },
+	{ 0x188, 14, 7 },
+
+	{ 0x188, 15, 0 },
+	{ 0x188, 15, 1 },
+	{ 0x188, 15, 2 },
+	{ 0x188, 15, 3 },
+	{ 0x188, 15, 4 },
+	{ 0x188, 15, 5 },
+	{ 0x188, 15, 6 },
+	{ 0x188, 15, 7 },
+
+	/* rgb 2 */
+	{ 0x188, 30, 0 },
+	{ 0x188, 30, 1 },
+	{ 0x188, 30, 2 },
+	{ 0x188, 30, 3 },
+	{ 0x188, 30, 4 },
+	{ 0x188, 30, 5 },
+	{ 0x188, 30, 6 },
+	{ 0x188, 30, 7 },
+
+	{ 0x188, 31, 0 },
+	{ 0x188, 31, 1 },
+	{ 0x188, 31, 2 },
+	{ 0x188, 31, 3 },
+	{ 0x188, 31, 4 },
+	{ 0x188, 31, 5 },
+	{ 0x188, 31, 6 },
+	{ 0x188, 31, 7 },
+
+	{ 0x188, 32, 0 },
+	{ 0x188, 32, 1 },
+	{ 0x188, 32, 2 },
+	{ 0x188, 32, 3 },
+	{ 0x188, 32, 4 },
+	{ 0x188, 32, 5 },
+	{ 0x188, 32, 6 },
+	{ 0x188, 32, 7 },
+
+	{ 0x188, 34, 0 },
+	{ 0x188, 34, 1 },
+	{ 0x188, 34, 2 },
+	{ 0x188, 34, 3 },
+	{ 0x188, 34, 4 },
+	{ 0x188, 34, 5 },
+	{ 0x188, 34, 6 },
+	{ 0x188, 34, 7 },
+
+	{ 0x188, 35, 0 },
+	{ 0x188, 35, 1 },
+	{ 0x188, 35, 2 },
+	{ 0x188, 35, 3 },
+	{ 0x188, 35, 4 },
+	{ 0x188, 35, 5 },
+	{ 0x188, 35, 6 },
+	{ 0x188, 35, 7 },
+
+	/* dma 0 */
+	{ 0x188, 40, 0 },
+	{ 0x188, 40, 1 },
+	{ 0x188, 40, 2 },
+	{ 0x188, 40, 3 },
+	{ 0x188, 40, 4 },
+	{ 0x188, 40, 5 },
+	{ 0x188, 40, 6 },
+	{ 0x188, 40, 7 },
+
+	{ 0x188, 41, 0 },
+	{ 0x188, 41, 1 },
+	{ 0x188, 41, 2 },
+	{ 0x188, 41, 3 },
+	{ 0x188, 41, 4 },
+	{ 0x188, 41, 5 },
+	{ 0x188, 41, 6 },
+	{ 0x188, 41, 7 },
+
+	{ 0x188, 42, 0 },
+	{ 0x188, 42, 1 },
+	{ 0x188, 42, 2 },
+	{ 0x188, 42, 3 },
+	{ 0x188, 42, 4 },
+	{ 0x188, 42, 5 },
+	{ 0x188, 42, 6 },
+	{ 0x188, 42, 7 },
+
+	{ 0x188, 44, 0 },
+	{ 0x188, 44, 1 },
+	{ 0x188, 44, 2 },
+	{ 0x188, 44, 3 },
+	{ 0x188, 44, 4 },
+	{ 0x188, 44, 5 },
+	{ 0x188, 44, 6 },
+	{ 0x188, 44, 7 },
+
+	{ 0x188, 45, 0 },
+	{ 0x188, 45, 1 },
+	{ 0x188, 45, 2 },
+	{ 0x188, 45, 3 },
+	{ 0x188, 45, 4 },
+	{ 0x188, 45, 5 },
+	{ 0x188, 45, 6 },
+	{ 0x188, 45, 7 },
+
+	/* cursor 0 */
+	{ 0x188, 80, 0 },
+	{ 0x188, 80, 1 },
+	{ 0x188, 80, 2 },
+	{ 0x188, 80, 3 },
+	{ 0x188, 80, 4 },
+	{ 0x188, 80, 5 },
+	{ 0x188, 80, 6 },
+	{ 0x188, 80, 7 },
+
+	{ 0x188, 81, 0 },
+	{ 0x188, 81, 1 },
+	{ 0x188, 81, 2 },
+	{ 0x188, 81, 3 },
+	{ 0x188, 81, 4 },
+	{ 0x188, 81, 5 },
+	{ 0x188, 81, 6 },
+	{ 0x188, 81, 7 },
+
+	{ 0x188, 82, 0 },
+	{ 0x188, 82, 1 },
+	{ 0x188, 82, 2 },
+	{ 0x188, 82, 3 },
+	{ 0x188, 82, 4 },
+	{ 0x188, 82, 5 },
+	{ 0x188, 82, 6 },
+	{ 0x188, 82, 7 },
+
+	{ 0x188, 83, 0 },
+	{ 0x188, 83, 1 },
+	{ 0x188, 83, 2 },
+	{ 0x188, 83, 3 },
+	{ 0x188, 83, 4 },
+	{ 0x188, 83, 5 },
+	{ 0x188, 83, 6 },
+	{ 0x188, 83, 7 },
+
+	{ 0x188, 84, 0 },
+	{ 0x188, 84, 1 },
+	{ 0x188, 84, 2 },
+	{ 0x188, 84, 3 },
+	{ 0x188, 84, 4 },
+	{ 0x188, 84, 5 },
+	{ 0x188, 84, 6 },
+	{ 0x188, 84, 7 },
+
+	/* fetch sspp1 */
+	/* vig 1 */
+	{ 0x298, 0, 0 },
+	{ 0x298, 0, 1 },
+	{ 0x298, 0, 2 },
+	{ 0x298, 0, 3 },
+	{ 0x298, 0, 4 },
+	{ 0x298, 0, 5 },
+	{ 0x298, 0, 6 },
+	{ 0x298, 0, 7 },
+
+	{ 0x298, 1, 0 },
+	{ 0x298, 1, 1 },
+	{ 0x298, 1, 2 },
+	{ 0x298, 1, 3 },
+	{ 0x298, 1, 4 },
+	{ 0x298, 1, 5 },
+	{ 0x298, 1, 6 },
+	{ 0x298, 1, 7 },
+
+	{ 0x298, 2, 0 },
+	{ 0x298, 2, 1 },
+	{ 0x298, 2, 2 },
+	{ 0x298, 2, 3 },
+	{ 0x298, 2, 4 },
+	{ 0x298, 2, 5 },
+	{ 0x298, 2, 6 },
+	{ 0x298, 2, 7 },
+
+	{ 0x298, 4, 0 },
+	{ 0x298, 4, 1 },
+	{ 0x298, 4, 2 },
+	{ 0x298, 4, 3 },
+	{ 0x298, 4, 4 },
+	{ 0x298, 4, 5 },
+	{ 0x298, 4, 6 },
+	{ 0x298, 4, 7 },
+
+	{ 0x298, 5, 0 },
+	{ 0x298, 5, 1 },
+	{ 0x298, 5, 2 },
+	{ 0x298, 5, 3 },
+	{ 0x298, 5, 4 },
+	{ 0x298, 5, 5 },
+	{ 0x298, 5, 6 },
+	{ 0x298, 5, 7 },
+
+	/* vig 3 */
+	{ 0x298, 20, 0 },
+	{ 0x298, 20, 1 },
+	{ 0x298, 20, 2 },
+	{ 0x298, 20, 3 },
+	{ 0x298, 20, 4 },
+	{ 0x298, 20, 5 },
+	{ 0x298, 20, 6 },
+	{ 0x298, 20, 7 },
+
+	{ 0x298, 21, 0 },
+	{ 0x298, 21, 1 },
+	{ 0x298, 21, 2 },
+	{ 0x298, 21, 3 },
+	{ 0x298, 21, 4 },
+	{ 0x298, 21, 5 },
+	{ 0x298, 21, 6 },
+	{ 0x298, 21, 7 },
+
+	{ 0x298, 22, 0 },
+	{ 0x298, 22, 1 },
+	{ 0x298, 22, 2 },
+	{ 0x298, 22, 3 },
+	{ 0x298, 22, 4 },
+	{ 0x298, 22, 5 },
+	{ 0x298, 22, 6 },
+	{ 0x298, 22, 7 },
+
+	{ 0x298, 24, 0 },
+	{ 0x298, 24, 1 },
+	{ 0x298, 24, 2 },
+	{ 0x298, 24, 3 },
+	{ 0x298, 24, 4 },
+	{ 0x298, 24, 5 },
+	{ 0x298, 24, 6 },
+	{ 0x298, 24, 7 },
+
+	{ 0x298, 25, 0 },
+	{ 0x298, 25, 1 },
+	{ 0x298, 25, 2 },
+	{ 0x298, 25, 3 },
+	{ 0x298, 25, 4 },
+	{ 0x298, 25, 5 },
+	{ 0x298, 25, 6 },
+	{ 0x298, 25, 7 },
+
+	/* rgb 1 */
+	{ 0x298, 10, 0 },
+	{ 0x298, 10, 1 },
+	{ 0x298, 10, 2 },
+	{ 0x298, 10, 3 },
+	{ 0x298, 10, 4 },
+	{ 0x298, 10, 5 },
+	{ 0x298, 10, 6 },
+	{ 0x298, 10, 7 },
+
+	{ 0x298, 11, 0 },
+	{ 0x298, 11, 1 },
+	{ 0x298, 11, 2 },
+	{ 0x298, 11, 3 },
+	{ 0x298, 11, 4 },
+	{ 0x298, 11, 5 },
+	{ 0x298, 11, 6 },
+	{ 0x298, 11, 7 },
+
+	{ 0x298, 12, 0 },
+	{ 0x298, 12, 1 },
+	{ 0x298, 12, 2 },
+	{ 0x298, 12, 3 },
+	{ 0x298, 12, 4 },
+	{ 0x298, 12, 5 },
+	{ 0x298, 12, 6 },
+	{ 0x298, 12, 7 },
+
+	{ 0x298, 14, 0 },
+	{ 0x298, 14, 1 },
+	{ 0x298, 14, 2 },
+	{ 0x298, 14, 3 },
+	{ 0x298, 14, 4 },
+	{ 0x298, 14, 5 },
+	{ 0x298, 14, 6 },
+	{ 0x298, 14, 7 },
+
+	{ 0x298, 15, 0 },
+	{ 0x298, 15, 1 },
+	{ 0x298, 15, 2 },
+	{ 0x298, 15, 3 },
+	{ 0x298, 15, 4 },
+	{ 0x298, 15, 5 },
+	{ 0x298, 15, 6 },
+	{ 0x298, 15, 7 },
+
+	/* rgb 3 */
+	{ 0x298, 30, 0 },
+	{ 0x298, 30, 1 },
+	{ 0x298, 30, 2 },
+	{ 0x298, 30, 3 },
+	{ 0x298, 30, 4 },
+	{ 0x298, 30, 5 },
+	{ 0x298, 30, 6 },
+	{ 0x298, 30, 7 },
+
+	{ 0x298, 31, 0 },
+	{ 0x298, 31, 1 },
+	{ 0x298, 31, 2 },
+	{ 0x298, 31, 3 },
+	{ 0x298, 31, 4 },
+	{ 0x298, 31, 5 },
+	{ 0x298, 31, 6 },
+	{ 0x298, 31, 7 },
+
+	{ 0x298, 32, 0 },
+	{ 0x298, 32, 1 },
+	{ 0x298, 32, 2 },
+	{ 0x298, 32, 3 },
+	{ 0x298, 32, 4 },
+	{ 0x298, 32, 5 },
+	{ 0x298, 32, 6 },
+	{ 0x298, 32, 7 },
+
+	{ 0x298, 34, 0 },
+	{ 0x298, 34, 1 },
+	{ 0x298, 34, 2 },
+	{ 0x298, 34, 3 },
+	{ 0x298, 34, 4 },
+	{ 0x298, 34, 5 },
+	{ 0x298, 34, 6 },
+	{ 0x298, 34, 7 },
+
+	{ 0x298, 35, 0 },
+	{ 0x298, 35, 1 },
+	{ 0x298, 35, 2 },
+	{ 0x298, 35, 3 },
+	{ 0x298, 35, 4 },
+	{ 0x298, 35, 5 },
+	{ 0x298, 35, 6 },
+	{ 0x298, 35, 7 },
+
+	/* dma 1 */
+	{ 0x298, 40, 0 },
+	{ 0x298, 40, 1 },
+	{ 0x298, 40, 2 },
+	{ 0x298, 40, 3 },
+	{ 0x298, 40, 4 },
+	{ 0x298, 40, 5 },
+	{ 0x298, 40, 6 },
+	{ 0x298, 40, 7 },
+
+	{ 0x298, 41, 0 },
+	{ 0x298, 41, 1 },
+	{ 0x298, 41, 2 },
+	{ 0x298, 41, 3 },
+	{ 0x298, 41, 4 },
+	{ 0x298, 41, 5 },
+	{ 0x298, 41, 6 },
+	{ 0x298, 41, 7 },
+
+	{ 0x298, 42, 0 },
+	{ 0x298, 42, 1 },
+	{ 0x298, 42, 2 },
+	{ 0x298, 42, 3 },
+	{ 0x298, 42, 4 },
+	{ 0x298, 42, 5 },
+	{ 0x298, 42, 6 },
+	{ 0x298, 42, 7 },
+
+	{ 0x298, 44, 0 },
+	{ 0x298, 44, 1 },
+	{ 0x298, 44, 2 },
+	{ 0x298, 44, 3 },
+	{ 0x298, 44, 4 },
+	{ 0x298, 44, 5 },
+	{ 0x298, 44, 6 },
+	{ 0x298, 44, 7 },
+
+	{ 0x298, 45, 0 },
+	{ 0x298, 45, 1 },
+	{ 0x298, 45, 2 },
+	{ 0x298, 45, 3 },
+	{ 0x298, 45, 4 },
+	{ 0x298, 45, 5 },
+	{ 0x298, 45, 6 },
+	{ 0x298, 45, 7 },
+
+	/* cursor 1 */
+	{ 0x298, 80, 0 },
+	{ 0x298, 80, 1 },
+	{ 0x298, 80, 2 },
+	{ 0x298, 80, 3 },
+	{ 0x298, 80, 4 },
+	{ 0x298, 80, 5 },
+	{ 0x298, 80, 6 },
+	{ 0x298, 80, 7 },
+
+	{ 0x298, 81, 0 },
+	{ 0x298, 81, 1 },
+	{ 0x298, 81, 2 },
+	{ 0x298, 81, 3 },
+	{ 0x298, 81, 4 },
+	{ 0x298, 81, 5 },
+	{ 0x298, 81, 6 },
+	{ 0x298, 81, 7 },
+
+	{ 0x298, 82, 0 },
+	{ 0x298, 82, 1 },
+	{ 0x298, 82, 2 },
+	{ 0x298, 82, 3 },
+	{ 0x298, 82, 4 },
+	{ 0x298, 82, 5 },
+	{ 0x298, 82, 6 },
+	{ 0x298, 82, 7 },
+
+	{ 0x298, 83, 0 },
+	{ 0x298, 83, 1 },
+	{ 0x298, 83, 2 },
+	{ 0x298, 83, 3 },
+	{ 0x298, 83, 4 },
+	{ 0x298, 83, 5 },
+	{ 0x298, 83, 6 },
+	{ 0x298, 83, 7 },
+
+	{ 0x298, 84, 0 },
+	{ 0x298, 84, 1 },
+	{ 0x298, 84, 2 },
+	{ 0x298, 84, 3 },
+	{ 0x298, 84, 4 },
+	{ 0x298, 84, 5 },
+	{ 0x298, 84, 6 },
+	{ 0x298, 84, 7 },
+
+	/* dspp */
+	{ 0x348, 13, 0 },
+	{ 0x348, 19, 0 },
+	{ 0x348, 14, 0 },
+	{ 0x348, 14, 1 },
+	{ 0x348, 14, 3 },
+	{ 0x348, 20, 0 },
+	{ 0x348, 20, 1 },
+	{ 0x348, 20, 3 },
+
+	/* dither */
+	{ 0x348, 18, 1 },
+	{ 0x348, 24, 1 },
+
+	/* ppb_0 */
+	{ 0x348, 31, 0 },
+	{ 0x348, 33, 0 },
+	{ 0x348, 35, 0 },
+	{ 0x348, 42, 0 },
+
+	/* ppb_1 */
+	{ 0x348, 32, 0 },
+	{ 0x348, 34, 0 },
+	{ 0x348, 36, 0 },
+	{ 0x348, 43, 0 },
+
+	/* lm_lut */
+	{ 0x348, 109, 0 },
+	{ 0x348, 105, 0 },
+	{ 0x348, 103, 0 },
+	{ 0x348, 101, 0 },
+	{ 0x348,  99, 0 },
+
+	/* tear-check */
+	{ 0x418, 63, 0 },
+	{ 0x418, 64, 0 },
+	{ 0x418, 65, 0 },
+	{ 0x418, 73, 0 },
+	{ 0x418, 74, 0 },
+
+	/* crossbar */
+	{ 0x348, 0, 0},
+
+	/* blend */
+	/* LM0 */
+	{ 0x348, 63, 0},
+	{ 0x348, 63, 1},
+	{ 0x348, 63, 2},
+	{ 0x348, 63, 3},
+	{ 0x348, 63, 4},
+	{ 0x348, 63, 5},
+	{ 0x348, 63, 6},
+	{ 0x348, 63, 7},
+
+	{ 0x348, 64, 0},
+	{ 0x348, 64, 1},
+	{ 0x348, 64, 2},
+	{ 0x348, 64, 3},
+	{ 0x348, 64, 4},
+	{ 0x348, 64, 5},
+	{ 0x348, 64, 6},
+	{ 0x348, 64, 7},
+
+	{ 0x348, 65, 0},
+	{ 0x348, 65, 1},
+	{ 0x348, 65, 2},
+	{ 0x348, 65, 3},
+	{ 0x348, 65, 4},
+	{ 0x348, 65, 5},
+	{ 0x348, 65, 6},
+	{ 0x348, 65, 7},
+
+	{ 0x348, 66, 0},
+	{ 0x348, 66, 1},
+	{ 0x348, 66, 2},
+	{ 0x348, 66, 3},
+	{ 0x348, 66, 4},
+	{ 0x348, 66, 5},
+	{ 0x348, 66, 6},
+	{ 0x348, 66, 7},
+
+	{ 0x348, 67, 0},
+	{ 0x348, 67, 1},
+	{ 0x348, 67, 2},
+	{ 0x348, 67, 3},
+	{ 0x348, 67, 4},
+	{ 0x348, 67, 5},
+	{ 0x348, 67, 6},
+	{ 0x348, 67, 7},
+
+	{ 0x348, 68, 0},
+	{ 0x348, 68, 1},
+	{ 0x348, 68, 2},
+	{ 0x348, 68, 3},
+	{ 0x348, 68, 4},
+	{ 0x348, 68, 5},
+	{ 0x348, 68, 6},
+	{ 0x348, 68, 7},
+
+	{ 0x348, 69, 0},
+	{ 0x348, 69, 1},
+	{ 0x348, 69, 2},
+	{ 0x348, 69, 3},
+	{ 0x348, 69, 4},
+	{ 0x348, 69, 5},
+	{ 0x348, 69, 6},
+	{ 0x348, 69, 7},
+
+	/* LM1 */
+	{ 0x348, 70, 0},
+	{ 0x348, 70, 1},
+	{ 0x348, 70, 2},
+	{ 0x348, 70, 3},
+	{ 0x348, 70, 4},
+	{ 0x348, 70, 5},
+	{ 0x348, 70, 6},
+	{ 0x348, 70, 7},
+
+	{ 0x348, 71, 0},
+	{ 0x348, 71, 1},
+	{ 0x348, 71, 2},
+	{ 0x348, 71, 3},
+	{ 0x348, 71, 4},
+	{ 0x348, 71, 5},
+	{ 0x348, 71, 6},
+	{ 0x348, 71, 7},
+
+	{ 0x348, 72, 0},
+	{ 0x348, 72, 1},
+	{ 0x348, 72, 2},
+	{ 0x348, 72, 3},
+	{ 0x348, 72, 4},
+	{ 0x348, 72, 5},
+	{ 0x348, 72, 6},
+	{ 0x348, 72, 7},
+
+	{ 0x348, 73, 0},
+	{ 0x348, 73, 1},
+	{ 0x348, 73, 2},
+	{ 0x348, 73, 3},
+	{ 0x348, 73, 4},
+	{ 0x348, 73, 5},
+	{ 0x348, 73, 6},
+	{ 0x348, 73, 7},
+
+	{ 0x348, 74, 0},
+	{ 0x348, 74, 1},
+	{ 0x348, 74, 2},
+	{ 0x348, 74, 3},
+	{ 0x348, 74, 4},
+	{ 0x348, 74, 5},
+	{ 0x348, 74, 6},
+	{ 0x348, 74, 7},
+
+	{ 0x348, 75, 0},
+	{ 0x348, 75, 1},
+	{ 0x348, 75, 2},
+	{ 0x348, 75, 3},
+	{ 0x348, 75, 4},
+	{ 0x348, 75, 5},
+	{ 0x348, 75, 6},
+	{ 0x348, 75, 7},
+
+	{ 0x348, 76, 0},
+	{ 0x348, 76, 1},
+	{ 0x348, 76, 2},
+	{ 0x348, 76, 3},
+	{ 0x348, 76, 4},
+	{ 0x348, 76, 5},
+	{ 0x348, 76, 6},
+	{ 0x348, 76, 7},
+
+	/* LM2 */
+	{ 0x348, 77, 0},
+	{ 0x348, 77, 1},
+	{ 0x348, 77, 2},
+	{ 0x348, 77, 3},
+	{ 0x348, 77, 4},
+	{ 0x348, 77, 5},
+	{ 0x348, 77, 6},
+	{ 0x348, 77, 7},
+
+	{ 0x348, 78, 0},
+	{ 0x348, 78, 1},
+	{ 0x348, 78, 2},
+	{ 0x348, 78, 3},
+	{ 0x348, 78, 4},
+	{ 0x348, 78, 5},
+	{ 0x348, 78, 6},
+	{ 0x348, 78, 7},
+
+	{ 0x348, 79, 0},
+	{ 0x348, 79, 1},
+	{ 0x348, 79, 2},
+	{ 0x348, 79, 3},
+	{ 0x348, 79, 4},
+	{ 0x348, 79, 5},
+	{ 0x348, 79, 6},
+	{ 0x348, 79, 7},
+
+	{ 0x348, 80, 0},
+	{ 0x348, 80, 1},
+	{ 0x348, 80, 2},
+	{ 0x348, 80, 3},
+	{ 0x348, 80, 4},
+	{ 0x348, 80, 5},
+	{ 0x348, 80, 6},
+	{ 0x348, 80, 7},
+
+	{ 0x348, 81, 0},
+	{ 0x348, 81, 1},
+	{ 0x348, 81, 2},
+	{ 0x348, 81, 3},
+	{ 0x348, 81, 4},
+	{ 0x348, 81, 5},
+	{ 0x348, 81, 6},
+	{ 0x348, 81, 7},
+
+	{ 0x348, 82, 0},
+	{ 0x348, 82, 1},
+	{ 0x348, 82, 2},
+	{ 0x348, 82, 3},
+	{ 0x348, 82, 4},
+	{ 0x348, 82, 5},
+	{ 0x348, 82, 6},
+	{ 0x348, 82, 7},
+
+	{ 0x348, 83, 0},
+	{ 0x348, 83, 1},
+	{ 0x348, 83, 2},
+	{ 0x348, 83, 3},
+	{ 0x348, 83, 4},
+	{ 0x348, 83, 5},
+	{ 0x348, 83, 6},
+	{ 0x348, 83, 7},
+
+	/* csc */
+	{0x188, 7, 0},
+	{0x188, 7, 1},
+	{0x188, 27, 0},
+	{0x188, 27, 1},
+	{0x298, 7, 0},
+	{0x298, 7, 1},
+	{0x298, 27, 0},
+	{0x298, 27, 1},
+
+	/* pcc */
+	{ 0x188, 3,  3},
+	{ 0x188, 23, 3},
+	{ 0x188, 13, 3},
+	{ 0x188, 33, 3},
+	{ 0x188, 43, 3},
+	{ 0x298, 3,  3},
+	{ 0x298, 23, 3},
+	{ 0x298, 13, 3},
+	{ 0x298, 33, 3},
+	{ 0x298, 43, 3},
+
+	/* spa */
+	{ 0x188, 8,  0},
+	{ 0x188, 28, 0},
+	{ 0x298, 8,  0},
+	{ 0x298, 28, 0},
+	{ 0x348, 13, 0},
+	{ 0x348, 19, 0},
+
+	/* igc */
+	{ 0x188, 9,  0},
+	{ 0x188, 9,  1},
+	{ 0x188, 9,  3},
+	{ 0x188, 29, 0},
+	{ 0x188, 29, 1},
+	{ 0x188, 29, 3},
+	{ 0x188, 17, 0},
+	{ 0x188, 17, 1},
+	{ 0x188, 17, 3},
+	{ 0x188, 37, 0},
+	{ 0x188, 37, 1},
+	{ 0x188, 37, 3},
+	{ 0x188, 46, 0},
+	{ 0x188, 46, 1},
+	{ 0x188, 46, 3},
+
+	{ 0x298, 9,  0},
+	{ 0x298, 9,  1},
+	{ 0x298, 9,  3},
+	{ 0x298, 29, 0},
+	{ 0x298, 29, 1},
+	{ 0x298, 29, 3},
+	{ 0x298, 17, 0},
+	{ 0x298, 17, 1},
+	{ 0x298, 17, 3},
+	{ 0x298, 37, 0},
+	{ 0x298, 37, 1},
+	{ 0x298, 37, 3},
+	{ 0x298, 46, 0},
+	{ 0x298, 46, 1},
+	{ 0x298, 46, 3},
+
+	{ 0x348, 14, 0},
+	{ 0x348, 14, 1},
+	{ 0x348, 14, 3},
+	{ 0x348, 20, 0},
+	{ 0x348, 20, 1},
+	{ 0x348, 20, 3},
+
+	{ 0x418, 60, 0},
+};
+
+static struct vbif_debug_bus vbif_dbg_bus_8996[] = {
+	{0x214, 0x21c, 16, 2, 0x10}, /* arb clients */
+	{0x214, 0x21c, 0, 14, 0x13}, /* xin blocks - axi side */
+	{0x21c, 0x214, 0, 14, 0xc}, /* xin blocks - clock side */
+};
+
+static struct vbif_debug_bus nrt_vbif_dbg_bus_8996[] = {
+	{0x214, 0x21c, 16, 1, 0x10}, /* arb clients */
+	{0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
+	{0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
+};
+
+void mdss_mdp_hw_rev_debug_caps_init(struct mdss_data_type *mdata)
+{
+	mdata->dbg_bus = NULL;
+	mdata->dbg_bus_size = 0;
+
+	switch (mdata->mdp_rev) {
+	case MDSS_MDP_HW_REV_107:
+	case MDSS_MDP_HW_REV_107_1:
+	case MDSS_MDP_HW_REV_107_2:
+		mdata->dbg_bus = dbg_bus_8996;
+		mdata->dbg_bus_size = ARRAY_SIZE(dbg_bus_8996);
+		mdata->vbif_dbg_bus = vbif_dbg_bus_8996;
+		mdata->vbif_dbg_bus_size = ARRAY_SIZE(vbif_dbg_bus_8996);
+		mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_8996;
+		mdata->nrt_vbif_dbg_bus_size =
+			ARRAY_SIZE(nrt_vbif_dbg_bus_8996);
+		break;
+	default:
+		break;
+	}
+}
+
+void mdss_mdp_debug_mid(u32 mid)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_debug_data *mdd = mdata->debug_inf.debug_data;
+	struct range_dump_node *xlog_node;
+	struct mdss_debug_base *blk_base;
+	char *addr;
+	u32 len;
+
+	list_for_each_entry(blk_base, &mdd->base_list, head) {
+		list_for_each_entry(xlog_node, &blk_base->dump_list, head) {
+			if (xlog_node->xin_id != mid)
+				continue;
+
+			len = get_dump_range(&xlog_node->offset,
+				blk_base->max_offset);
+			addr = blk_base->base + xlog_node->offset.start;
+			pr_info("%s: mid:%d range_base=0x%pK start=0x%x end=0x%x\n",
+				xlog_node->range_name, mid, addr,
+				xlog_node->offset.start, xlog_node->offset.end);
+
+			/*
+			 * Next instruction assumes that MDP clocks are ON
+			 * because it is called from interrupt context
+			 */
+			mdss_dump_reg((const char *)xlog_node->range_name,
+				MDSS_DBG_DUMP_IN_LOG, addr, len,
+				&xlog_node->reg_dump, true);
+		}
+	}
+}
+
+static void __print_time(char *buf, u32 size, u64 ts)
+{
+	unsigned long rem_ns = do_div(ts, NSEC_PER_SEC);
+
+	snprintf(buf, size, "%llu.%06lu", ts, rem_ns);
+}
+
+static void __print_buf(struct seq_file *s, struct mdss_mdp_data *buf,
+		bool show_pipe)
+{
+	char tmpbuf[20];
+	int i;
+	const char * const buf_stat_stmap[] = {
+		[MDP_BUF_STATE_UNUSED]  = "UNUSED ",
+		[MDP_BUF_STATE_READY]   = "READY  ",
+		[MDP_BUF_STATE_ACTIVE]  = "ACTIVE ",
+		[MDP_BUF_STATE_CLEANUP] = "CLEANUP",
+	};
+	const char * const domain_stmap[] = {
+		[MDSS_IOMMU_DOMAIN_UNSECURE]     = "mdp_unsecure",
+		[MDSS_IOMMU_DOMAIN_ROT_UNSECURE] = "rot_unsecure",
+		[MDSS_IOMMU_DOMAIN_SECURE]       = "mdp_secure",
+		[MDSS_IOMMU_DOMAIN_ROT_SECURE]   = "rot_secure",
+		[MDSS_IOMMU_MAX_DOMAIN]          = "undefined",
+	};
+	const char * const dma_data_dir_stmap[] = {
+		[DMA_BIDIRECTIONAL] = "read/write",
+		[DMA_TO_DEVICE]     = "read",
+		[DMA_FROM_DEVICE]   = "read/write",
+		[DMA_NONE]          = "????",
+	};
+
+	seq_puts(s, "\t");
+	if (show_pipe && buf->last_pipe)
+		seq_printf(s, "pnum=%d ", buf->last_pipe->num);
+
+	seq_printf(s, "state=%s addr=%pa size=%lu ",
+		buf->state < ARRAY_SIZE(buf_stat_stmap) &&
+		buf_stat_stmap[buf->state] ? buf_stat_stmap[buf->state] : "?",
+		&buf->p[0].addr, buf->p[0].len);
+
+	__print_time(tmpbuf, sizeof(tmpbuf), buf->last_alloc);
+	seq_printf(s, "alloc_time=%s ", tmpbuf);
+	if (buf->state == MDP_BUF_STATE_UNUSED) {
+		__print_time(tmpbuf, sizeof(tmpbuf), buf->last_freed);
+		seq_printf(s, "freed_time=%s ", tmpbuf);
+	} else {
+		for (i = 0; i < buf->num_planes; i++) {
+			seq_puts(s, "\n\t\t");
+			seq_printf(s, "plane[%d] domain=%s ", i,
+				domain_stmap[buf->p[i].domain]);
+			seq_printf(s, "permission=%s ",
+				dma_data_dir_stmap[buf->p[i].dir]);
+		}
+	}
+	seq_puts(s, "\n");
+}
+
+static void __dump_pipe(struct seq_file *s, struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_mdp_data *buf;
+	int format;
+	int smps[4];
+	int i;
+
+	seq_printf(s, "\nSSPP #%d type=%s ndx=%x flags=0x%08x play_cnt=%u xin_id=%d\n",
+			pipe->num, mdss_mdp_pipetype2str(pipe->type),
+			pipe->ndx, pipe->flags, pipe->play_cnt, pipe->xin_id);
+	seq_printf(s, "\tstage=%d alpha=0x%x transp=0x%x blend_op=%d\n",
+			pipe->mixer_stage, pipe->alpha,
+			pipe->transp, pipe->blend_op);
+	if (pipe->multirect.max_rects > 1) {
+		const char * const fmodes[] = {
+			[MDSS_MDP_PIPE_MULTIRECT_PARALLEL]	= "parallel",
+			[MDSS_MDP_PIPE_MULTIRECT_SERIAL]	= "serial",
+			[MDSS_MDP_PIPE_MULTIRECT_NONE]		= "single",
+		};
+		const char *mode = NULL;
+
+		if (pipe->multirect.mode < ARRAY_SIZE(fmodes))
+			mode = fmodes[pipe->multirect.mode];
+		if (!mode)
+			mode = "invalid";
+
+		seq_printf(s, "\trect=%d/%d fetch_mode=%s\n",
+				pipe->multirect.num, pipe->multirect.max_rects,
+				mode);
+	}
+
+	format = pipe->src_fmt->format;
+	seq_printf(s, "\tsrc w=%d h=%d format=%d (%s)\n",
+			pipe->img_width, pipe->img_height, format,
+			mdss_mdp_format2str(format));
+	seq_printf(s, "\tsrc_rect x=%d y=%d w=%d h=%d H.dec=%d V.dec=%d\n",
+			pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
+			pipe->horz_deci, pipe->vert_deci);
+	seq_printf(s, "\tdst_rect x=%d y=%d w=%d h=%d\n",
+			pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
+
+	smps[0] = bitmap_weight(pipe->smp_map[0].allocated,
+			MAX_DRV_SUP_MMB_BLKS);
+	smps[1] = bitmap_weight(pipe->smp_map[1].allocated,
+			MAX_DRV_SUP_MMB_BLKS);
+	smps[2] = bitmap_weight(pipe->smp_map[0].reserved,
+			MAX_DRV_SUP_MMB_BLKS);
+	smps[3] = bitmap_weight(pipe->smp_map[1].reserved,
+			MAX_DRV_SUP_MMB_BLKS);
+
+	seq_printf(s, "\tSMP allocated=[%d %d] reserved=[%d %d]\n",
+			smps[0], smps[1], smps[2], smps[3]);
+
+	seq_puts(s, "\tSupported formats = ");
+	for (i = 0; i < BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1); i++)
+		seq_printf(s, "0x%02X ", pipe->supported_formats[i]);
+	seq_puts(s, "\n");
+
+	seq_puts(s, "Data:\n");
+
+	list_for_each_entry(buf, &pipe->buf_queue, pipe_list)
+		__print_buf(s, buf, false);
+}
+
+static void __dump_mixer(struct seq_file *s, struct mdss_mdp_mixer *mixer)
+{
+	struct mdss_mdp_pipe *pipe;
+	int i, cnt = 0;
+
+	if (!mixer)
+		return;
+
+	seq_printf(s, "\n%s Mixer #%d  res=%dx%d roi[%d, %d, %d, %d] %s\n",
+		mixer->type == MDSS_MDP_MIXER_TYPE_INTF ? "Intf" : "Writeback",
+		mixer->num, mixer->width, mixer->height,
+		mixer->roi.x, mixer->roi.y, mixer->roi.w, mixer->roi.h,
+		mixer->cursor_enabled ? "w/cursor" : "");
+
+	for (i = 0; i < ARRAY_SIZE(mixer->stage_pipe); i++) {
+		pipe = mixer->stage_pipe[i];
+		if (pipe) {
+			__dump_pipe(s, pipe);
+			cnt++;
+		}
+	}
+
+	seq_printf(s, "\nTotal pipes=%d\n", cnt);
+}
+
+static void __dump_timings(struct seq_file *s, struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_panel_info *pinfo;
+
+	if (!ctl || !ctl->panel_data)
+		return;
+
+	pinfo = &ctl->panel_data->panel_info;
+	seq_printf(s, "Panel #%d %dx%dp%d\n",
+			pinfo->pdest, pinfo->xres, pinfo->yres,
+			mdss_panel_get_framerate(pinfo, FPS_RESOLUTION_HZ));
+	seq_printf(s, "\tvbp=%d vfp=%d vpw=%d hbp=%d hfp=%d hpw=%d\n",
+			pinfo->lcdc.v_back_porch,
+			pinfo->lcdc.v_front_porch,
+			pinfo->lcdc.v_pulse_width,
+			pinfo->lcdc.h_back_porch,
+			pinfo->lcdc.h_front_porch,
+			pinfo->lcdc.h_pulse_width);
+
+	if (pinfo->lcdc.border_bottom || pinfo->lcdc.border_top ||
+			pinfo->lcdc.border_left ||
+			pinfo->lcdc.border_right) {
+		seq_printf(s, "\tborder (l,t,r,b):[%d,%d,%d,%d] off xy:%d,%d\n",
+				pinfo->lcdc.border_left,
+				pinfo->lcdc.border_top,
+				pinfo->lcdc.border_right,
+				pinfo->lcdc.border_bottom,
+				ctl->border_x_off,
+				ctl->border_y_off);
+	}
+}
+
+static void __dump_ctl(struct seq_file *s, struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_perf_params *perf;
+
+	if (!mdss_mdp_ctl_is_power_on(ctl))
+		return;
+
+	seq_printf(s, "\n--[ Control path #%d - ", ctl->num);
+
+	if (ctl->panel_data) {
+		struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+
+		seq_printf(s, "%s%s]--\n",
+			sctl && sctl->panel_data ? "DUAL " : "",
+			mdss_panel2str(ctl->panel_data->panel_info.type));
+		__dump_timings(s, ctl);
+		__dump_timings(s, sctl);
+	} else {
+		struct mdss_mdp_mixer *mixer;
+
+		mixer = ctl->mixer_left;
+		if (mixer) {
+			seq_printf(s, "%s%d",
+					(mixer->rotator_mode ? "rot" : "wb"),
+					mixer->num);
+		} else {
+			seq_puts(s, "unknown");
+		}
+		seq_puts(s, "]--\n");
+	}
+	perf = &ctl->cur_perf;
+	seq_printf(s, "MDP Clk=%u  Final BW=%llu\n",
+			perf->mdp_clk_rate,
+			perf->bw_ctl);
+	seq_printf(s, "Play Count=%u  Underrun Count=%u\n",
+			ctl->play_cnt, ctl->underrun_cnt);
+
+	__dump_mixer(s, ctl->mixer_left);
+	__dump_mixer(s, ctl->mixer_right);
+}
+
+static int __dump_mdp(struct seq_file *s, struct mdss_data_type *mdata)
+{
+	struct mdss_mdp_ctl *ctl;
+	int i, ignore_ndx = -1;
+
+	for (i = 0; i < mdata->nctl; i++) {
+		ctl = mdata->ctl_off + i;
+		/* ignore slave ctl in split display case */
+		if (ctl->num == ignore_ndx)
+			continue;
+		if (ctl->mixer_right && (ctl->mixer_right->ctl != ctl))
+			ignore_ndx = ctl->mixer_right->ctl->num;
+		__dump_ctl(s, ctl);
+	}
+	return 0;
+}
+
+#define DUMP_CHUNK 256
+#define DUMP_SIZE SZ_32K
+void mdss_mdp_dump(struct mdss_data_type *mdata)
+{
+	struct seq_file s = {
+		.size = DUMP_SIZE - 1,
+	};
+	int i;
+
+	s.buf = kzalloc(DUMP_SIZE, GFP_KERNEL);
+	if (!s.buf)
+		return;
+
+	__dump_mdp(&s, mdata);
+	seq_puts(&s, "\n");
+
+	pr_info("MDP DUMP\n------------------------\n");
+	for (i = 0; i < s.count; i += DUMP_CHUNK) {
+		if ((s.count - i) > DUMP_CHUNK) {
+			char c = s.buf[i + DUMP_CHUNK];
+
+			s.buf[i + DUMP_CHUNK] = 0;
+			pr_cont("%s", s.buf + i);
+			s.buf[i + DUMP_CHUNK] = c;
+		} else {
+			s.buf[s.count] = 0;
+			pr_cont("%s", s.buf + i);
+		}
+	}
+
+	kfree(s.buf);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void __dump_buf_data(struct seq_file *s, struct msm_fb_data_type *mfd)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_data *buf;
+	int i = 0;
+
+	seq_printf(s, "List of buffers for fb%d\n", mfd->index);
+
+	mutex_lock(&mdp5_data->list_lock);
+	if (!list_empty(&mdp5_data->bufs_used)) {
+		seq_puts(s, " Buffers used:\n");
+		list_for_each_entry(buf, &mdp5_data->bufs_used, buf_list)
+			__print_buf(s, buf, true);
+	}
+
+	if (!list_empty(&mdp5_data->bufs_freelist)) {
+		seq_puts(s, " Buffers in free list:\n");
+		list_for_each_entry(buf, &mdp5_data->bufs_freelist, buf_list)
+			__print_buf(s, buf, true);
+	}
+
+	if (!list_empty(&mdp5_data->bufs_pool)) {
+		seq_printf(s, " Last %d buffers used:\n", BUF_DUMP_LAST_N);
+
+		list_for_each_entry_reverse(buf, &mdp5_data->bufs_pool,
+				buf_list) {
+			if (buf->last_freed == 0 || i == BUF_DUMP_LAST_N)
+				break;
+			__print_buf(s, buf, true);
+			i++;
+		}
+	}
+	mutex_unlock(&mdp5_data->list_lock);
+}
+
+static int __dump_buffers(struct seq_file *s, struct mdss_data_type *mdata)
+{
+	struct mdss_mdp_ctl *ctl;
+	int i, ignore_ndx = -1;
+
+	for (i = 0; i < mdata->nctl; i++) {
+		ctl = mdata->ctl_off + i;
+		/* ignore slave ctl in split display case */
+		if (ctl->num == ignore_ndx)
+			continue;
+		if (ctl->mixer_right && (ctl->mixer_right->ctl != ctl))
+			ignore_ndx = ctl->mixer_right->ctl->num;
+
+		if (ctl->mfd)
+			__dump_buf_data(s, ctl->mfd);
+	}
+	return 0;
+}
+
+static int mdss_debugfs_dump_show(struct seq_file *s, void *v)
+{
+	struct mdss_data_type *mdata = (struct mdss_data_type *)s->private;
+
+	return __dump_mdp(s, mdata);
+}
+DEFINE_MDSS_DEBUGFS_SEQ_FOPS(mdss_debugfs_dump);
+
+static int mdss_debugfs_buffers_show(struct seq_file *s, void *v)
+{
+	struct mdss_data_type *mdata = (struct mdss_data_type *)s->private;
+
+	return __dump_buffers(s, mdata);
+}
+DEFINE_MDSS_DEBUGFS_SEQ_FOPS(mdss_debugfs_buffers);
+
+static int __danger_safe_signal_status(struct seq_file *s, bool danger_status)
+{
+	struct mdss_data_type *mdata = (struct mdss_data_type *)s->private;
+	u32 status;
+	int i, j;
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	if (danger_status) {
+		seq_puts(s, "\nDanger signal status:\n");
+		status = readl_relaxed(mdata->mdp_base +
+			MDSS_MDP_DANGER_STATUS);
+	} else {
+		seq_puts(s, "\nSafe signal status:\n");
+		status = readl_relaxed(mdata->mdp_base +
+			MDSS_MDP_SAFE_STATUS);
+	}
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+	seq_printf(s, "MDP     :  0x%lx\n",
+		DANGER_SAFE_STATUS(status, MDP_DANGER_SAFE_BIT_OFFSET));
+
+	for (i = 0, j = VIG_DANGER_SAFE_BIT_OFFSET; i < mdata->nvig_pipes;
+			i++, j += 2)
+		seq_printf(s, "VIG%d    :  0x%lx  \t", i,
+			DANGER_SAFE_STATUS(status, j));
+	seq_puts(s, "\n");
+
+	for (i = 0, j = RGB_DANGER_SAFE_BIT_OFFSET; i < mdata->nrgb_pipes;
+			i++, j += 2)
+		seq_printf(s, "RGB%d    :  0x%lx  \t", i,
+			DANGER_SAFE_STATUS(status, j));
+	seq_puts(s, "\n");
+	for (i = 0, j = DMA_DANGER_SAFE_BIT_OFFSET; i < mdata->ndma_pipes;
+			i++, j += 2)
+		seq_printf(s, "DMA%d    :  0x%lx  \t", i,
+			DANGER_SAFE_STATUS(status, j));
+	seq_puts(s, "\n");
+
+	for (i = 0, j = CURSOR_DANGER_SAFE_BIT_OFFSET; i < mdata->ncursor_pipes;
+			i++, j += 2)
+		seq_printf(s, "CURSOR%d :  0x%lx  \t", i,
+			DANGER_SAFE_STATUS(status, j));
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+static int mdss_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+	return __danger_safe_signal_status(s, true);
+}
+DEFINE_MDSS_DEBUGFS_SEQ_FOPS(mdss_debugfs_danger_stats);
+
+static int mdss_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+	return __danger_safe_signal_status(s, false);
+}
+DEFINE_MDSS_DEBUGFS_SEQ_FOPS(mdss_debugfs_safe_stats);
+
+static void __stats_ctl_dump(struct mdss_mdp_ctl *ctl, struct seq_file *s)
+{
+	if (!ctl->ref_cnt)
+		return;
+
+	if (ctl->intf_num) {
+		seq_printf(s, "intf%d: play: %08u \t",
+				ctl->intf_num, ctl->play_cnt);
+		seq_printf(s, "vsync: %08u \tunderrun: %08u\n",
+				ctl->vsync_cnt, ctl->underrun_cnt);
+		if (ctl->mfd) {
+			seq_printf(s, "user_bl: %08u \tmod_bl: %08u\n",
+				ctl->mfd->bl_level, ctl->mfd->bl_level_scaled);
+		}
+	} else {
+		seq_printf(s, "wb: \tmode=%x \tplay: %08u\n",
+				ctl->opmode, ctl->play_cnt);
+	}
+}
+
+static void __dump_stat(struct seq_file *s, char *ptypestr,
+		struct mdss_mdp_pipe *pipe_list, int count)
+{
+	struct mdss_mdp_pipe *pipe;
+	int i = 0, ndx = 0;
+	u32 rects_per_pipe = 1;
+
+	while (i < count) {
+		pipe = pipe_list + ndx;
+		rects_per_pipe = pipe->multirect.max_rects;
+
+		if (rects_per_pipe == 1)
+			seq_printf(s, "%s%d", ptypestr, i);
+		else
+			seq_printf(s, "%s%d.%d", ptypestr, i,
+					ndx % rects_per_pipe);
+
+		seq_printf(s, " :   %08u\t", pipe->play_cnt);
+
+		if ((++ndx % rects_per_pipe) == 0)
+			i++;
+
+		if ((ndx % 4) == 0)
+			seq_puts(s, "\n");
+	}
+
+	if ((ndx % 4) != 0)
+		seq_puts(s, "\n");
+}
+
+static int mdss_debugfs_stats_show(struct seq_file *s, void *v)
+{
+	struct mdss_data_type *mdata = (struct mdss_data_type *)s->private;
+	int i;
+
+	seq_puts(s, "\nmdp:\n");
+
+	for (i = 0; i < mdata->nctl; i++)
+		__stats_ctl_dump(mdata->ctl_off + i, s);
+	seq_puts(s, "\n");
+
+	__dump_stat(s, "VIG", mdata->vig_pipes, mdata->nvig_pipes);
+	__dump_stat(s, "RGB", mdata->rgb_pipes, mdata->nrgb_pipes);
+	__dump_stat(s, "DMA", mdata->dma_pipes, mdata->ndma_pipes);
+	__dump_stat(s, "CURSOR", mdata->cursor_pipes, mdata->ncursor_pipes);
+
+	return 0;
+}
+DEFINE_MDSS_DEBUGFS_SEQ_FOPS(mdss_debugfs_stats);
+
+int mdss_mdp_debugfs_init(struct mdss_data_type *mdata)
+{
+	struct mdss_debug_data *mdd;
+
+	if (!mdata)
+		return -ENODEV;
+
+	mdd = mdata->debug_inf.debug_data;
+	if (!mdd)
+		return -ENOENT;
+
+	debugfs_create_file("dump", 0644, mdd->root, mdata,
+			&mdss_debugfs_dump_fops);
+	debugfs_create_file("buffers", 0644, mdd->root, mdata,
+			&mdss_debugfs_buffers_fops);
+	debugfs_create_file("stat", 0644, mdd->root, mdata,
+			&mdss_debugfs_stats_fops);
+	debugfs_create_file("danger_stat", 0644, mdd->root, mdata,
+			&mdss_debugfs_danger_stats_fops);
+	debugfs_create_file("safe_stat", 0644, mdd->root, mdata,
+			&mdss_debugfs_safe_stats_fops);
+	debugfs_create_bool("serialize_wait4pp", 0644, mdd->root,
+		(u32 *)&mdata->serialize_wait4pp);
+	debugfs_create_bool("wait4autorefresh", 0644, mdd->root,
+		(u32 *)&mdata->wait4autorefresh);
+	debugfs_create_bool("enable_gate", 0644, mdd->root,
+		(u32 *)&mdata->enable_gate);
+
+	debugfs_create_u32("color0", 0644, mdd->bordercolor,
+		(u32 *)&mdata->bcolor0);
+	debugfs_create_u32("color1", 0644, mdd->bordercolor,
+		(u32 *)&mdata->bcolor1);
+	debugfs_create_u32("color2", 0644, mdd->bordercolor,
+		(u32 *)&mdata->bcolor2);
+	debugfs_create_u32("ad_debugen", 0644, mdd->postproc,
+		(u32 *)&mdata->ad_debugen);
+
+	return 0;
+}
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_mdp_debug.h b/drivers/video/fbdev/msm/mdss_mdp_debug.h
new file mode 100644
index 0000000..fe7ff09
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_debug.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2014-2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_MDP_DEBUG_H
+#define MDSS_MDP_DEBUG_H
+
+#include <linux/msm_mdp.h>
+#include <linux/stringify.h>
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+
+#define MDP_DANGER_SAFE_BIT_OFFSET	0
+#define VIG_DANGER_SAFE_BIT_OFFSET	4
+#define RGB_DANGER_SAFE_BIT_OFFSET	12
+#define DMA_DANGER_SAFE_BIT_OFFSET	20
+#define CURSOR_DANGER_SAFE_BIT_OFFSET	24
+
+#define DANGER_SAFE_STATUS(X, Y) (((X) & (BIT(Y) | BIT((Y)+1))) >> (Y))
+
+static inline const char *mdss_mdp_pipetype2str(u32 ptype)
+{
+	static const char * const strings[] = {
+#define PIPE_TYPE(t)[MDSS_MDP_PIPE_TYPE_ ## t] = __stringify(t)
+		PIPE_TYPE(VIG),
+		PIPE_TYPE(RGB),
+		PIPE_TYPE(DMA),
+		PIPE_TYPE(CURSOR),
+#undef PIPE_TYPE
+	};
+
+	if (ptype >= ARRAY_SIZE(strings) || !strings[ptype])
+		return "UNKNOWN";
+
+	return strings[ptype];
+}
+
+static inline const char *mdss_mdp_format2str(u32 format)
+{
+	static const char * const strings[] = {
+#define FORMAT_NAME(f)[MDP_ ## f] = __stringify(f)
+		FORMAT_NAME(RGB_565),
+		FORMAT_NAME(BGR_565),
+		FORMAT_NAME(RGB_888),
+		FORMAT_NAME(BGR_888),
+		FORMAT_NAME(RGBX_8888),
+		FORMAT_NAME(RGBA_8888),
+		FORMAT_NAME(ARGB_8888),
+		FORMAT_NAME(XRGB_8888),
+		FORMAT_NAME(BGRA_8888),
+		FORMAT_NAME(BGRX_8888),
+		FORMAT_NAME(Y_CBCR_H2V2_VENUS),
+		FORMAT_NAME(Y_CBCR_H2V2),
+		FORMAT_NAME(Y_CRCB_H2V2),
+		FORMAT_NAME(Y_CB_CR_H2V2),
+		FORMAT_NAME(Y_CR_CB_H2V2),
+		FORMAT_NAME(Y_CR_CB_GH2V2),
+		FORMAT_NAME(YCBYCR_H2V1),
+		FORMAT_NAME(YCRYCB_H2V1),
+		FORMAT_NAME(RGBA_8888_UBWC),
+		FORMAT_NAME(RGBX_8888_UBWC),
+		FORMAT_NAME(RGB_565_UBWC),
+		FORMAT_NAME(Y_CBCR_H2V2_UBWC)
+#undef FORMAT_NAME
+	};
+
+	if (format >= ARRAY_SIZE(strings) || !strings[format])
+		return "UNKNOWN";
+
+	return strings[format];
+}
+void mdss_mdp_dump(struct mdss_data_type *mdata);
+void mdss_mdp_hw_rev_debug_caps_init(struct mdss_data_type *mdata);
+
+
+#ifdef CONFIG_DEBUG_FS
+int mdss_mdp_debugfs_init(struct mdss_data_type *mdata);
+#else
+static inline int mdss_mdp_debugfs_init(struct mdss_data_type *mdata)
+{
+	return 0;
+}
+#endif
+
+#endif /* MDSS_MDP_DEBUG_H */
diff --git a/drivers/video/fbdev/msm/mdss_mdp_formats.h b/drivers/video/fbdev/msm/mdss_mdp_formats.h
new file mode 100644
index 0000000..cdb9547
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_formats.h
@@ -0,0 +1,504 @@
+/* Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_MDP_FORMATS_H
+#define MDSS_MDP_FORMATS_H
+
+#include <linux/msm_mdp.h>
+
+#include "mdss_mdp.h"
+
+	/*
+	 * Value of enum chosen to fit the number of bits
+	 * expected by the HW programming.
+	 */
+enum {
+	COLOR_4BIT,
+	COLOR_5BIT,
+	COLOR_6BIT,
+	COLOR_8BIT,
+	COLOR_ALPHA_1BIT = 0,
+	COLOR_ALPHA_4BIT = 1,
+};
+
+#define UBWC_META_MACRO_W_H 16
+#define UBWC_META_BLOCK_SIZE 256
+
+#define FMT_RGB_565(fmt, fetch_type, flag_arg, e0, e1, e2)		\
+	{							\
+		.format = (fmt),				\
+		.flag = flag_arg,					\
+		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,	\
+		.unpack_tight = 1,				\
+		.unpack_align_msb = 0,				\
+		.alpha_enable = 0,				\
+		.unpack_count = 3,				\
+		.bpp = 2,					\
+		.fetch_mode = (fetch_type),			\
+		.element = { (e0), (e1), (e2) },		\
+		.bits = {					\
+			[C2_R_Cr] = COLOR_5BIT,			\
+			[C0_G_Y] = COLOR_6BIT,			\
+			[C1_B_Cb] = COLOR_5BIT,			\
+		},						\
+	}
+
+#define FMT_RGB_888(fmt, fetch_type, flag_arg, e0, e1, e2)		\
+	{							\
+		.format = (fmt),				\
+		.flag = flag_arg,					\
+		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,	\
+		.unpack_tight = 1,				\
+		.unpack_align_msb = 0,				\
+		.alpha_enable = 0,				\
+		.unpack_count = 3,				\
+		.bpp = 3,					\
+		.fetch_mode = (fetch_type),			\
+		.element = { (e0), (e1), (e2) },		\
+		.bits = {					\
+			[C2_R_Cr] = COLOR_8BIT,			\
+			[C0_G_Y] = COLOR_8BIT,			\
+			[C1_B_Cb] = COLOR_8BIT,			\
+		},						\
+	}
+
+#define FMT_RGB_8888(fmt, fetch_type, flag_arg,	\
+		alpha_en, e0, e1, e2, e3)	\
+	{							\
+		.format = (fmt),				\
+		.flag = flag_arg,					\
+		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,	\
+		.unpack_tight = 1,				\
+		.unpack_align_msb = 0,				\
+		.alpha_enable = (alpha_en),			\
+		.unpack_count = 4,				\
+		.bpp = 4,					\
+		.fetch_mode = (fetch_type),			\
+		.element = { (e0), (e1), (e2), (e3) },		\
+		.bits = {					\
+			[C3_ALPHA] = COLOR_8BIT,		\
+			[C2_R_Cr] = COLOR_8BIT,			\
+			[C0_G_Y] = COLOR_8BIT,			\
+			[C1_B_Cb] = COLOR_8BIT,			\
+		},						\
+	}
+
+#define FMT_YUV_COMMON(fmt)					\
+		.format = (fmt),				\
+		.is_yuv = 1,					\
+		.bits = {					\
+			[C2_R_Cr] = COLOR_8BIT,			\
+			[C0_G_Y] = COLOR_8BIT,			\
+			[C1_B_Cb] = COLOR_8BIT,			\
+		},						\
+		.alpha_enable = 0,				\
+		.unpack_tight = 1,				\
+		.unpack_align_msb = 0
+
+#define FMT_YUV_PSEUDO(fmt, fetch_type, samp, \
+		flag_arg, e0, e1)		\
+	{							\
+		FMT_YUV_COMMON(fmt),				\
+		.flag = flag_arg,					\
+		.fetch_planes = MDSS_MDP_PLANE_PSEUDO_PLANAR,	\
+		.chroma_sample = samp,				\
+		.unpack_count = 2,				\
+		.bpp = 2,					\
+		.fetch_mode = (fetch_type),			\
+		.element = { (e0), (e1) },			\
+	}
+
+#define FMT_YUV_PLANR(fmt, fetch_type, samp, \
+		flag_arg, e0, e1)		\
+	{							\
+		FMT_YUV_COMMON(fmt),				\
+		.flag = flag_arg,					\
+		.fetch_planes = MDSS_MDP_PLANE_PLANAR,		\
+		.chroma_sample = samp,				\
+		.bpp = 1,					\
+		.unpack_count = 1,				\
+		.fetch_mode = (fetch_type),			\
+		.element = { (e0), (e1) }			\
+	}
+
+#define FMT_RGB_1555(fmt, alpha_en, flag_arg, e0, e1, e2, e3)		\
+	{							\
+		.format = (fmt),				\
+		.flag = flag_arg,					\
+		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,	\
+		.unpack_tight = 1,				\
+		.unpack_align_msb = 0,				\
+		.alpha_enable = (alpha_en),			\
+		.unpack_count = 4,				\
+		.bpp = 2,					\
+		.element = { (e0), (e1), (e2), (e3) },		\
+		.fetch_mode = MDSS_MDP_FETCH_LINEAR,		\
+		.bits = {					\
+			[C3_ALPHA] = COLOR_ALPHA_1BIT,		\
+			[C2_R_Cr] = COLOR_5BIT,			\
+			[C0_G_Y] = COLOR_5BIT,			\
+			[C1_B_Cb] = COLOR_5BIT,			\
+		},						\
+	}
+
+#define FMT_RGB_4444(fmt, alpha_en, flag_arg, e0, e1, e2, e3)		\
+	{							\
+		.format = (fmt),				\
+		.flag = flag_arg,					\
+		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,	\
+		.unpack_tight = 1,				\
+		.unpack_align_msb = 0,				\
+		.alpha_enable = (alpha_en),			\
+		.unpack_count = 4,				\
+		.bpp = 2,					\
+		.fetch_mode = MDSS_MDP_FETCH_LINEAR,		\
+		.element = { (e0), (e1), (e2), (e3) },		\
+		.bits = {					\
+			[C3_ALPHA] = COLOR_ALPHA_4BIT,		\
+			[C2_R_Cr] = COLOR_4BIT,			\
+			[C0_G_Y] = COLOR_4BIT,			\
+			[C1_B_Cb] = COLOR_4BIT,			\
+		},						\
+	}
+
+#define FMT_RGB_2101010(fmt, fetch_type, flag_arg,	\
+		alpha_en, e0, e1, e2, e3)	\
+	{							\
+		.format = (fmt),				\
+		.flag = flag_arg,					\
+		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,	\
+		.unpack_tight = 1,				\
+		.unpack_align_msb = 0,				\
+		.alpha_enable = (alpha_en),			\
+		.unpack_count = 4,				\
+		.bpp = 4,					\
+		.fetch_mode = (fetch_type),			\
+		.element = { (e0), (e1), (e2), (e3) },		\
+		.bits = {					\
+			[C3_ALPHA] = COLOR_8BIT,		\
+			[C2_R_Cr] = COLOR_8BIT,			\
+			[C0_G_Y] = COLOR_8BIT,			\
+			[C1_B_Cb] = COLOR_8BIT,			\
+		},						\
+		.unpack_dx_format = 1,	\
+	}
+
+#define FMT_YUV_PSEUDO_10(fmt, fetch_type, samp, \
+		flag_arg, e0, e1, unpack_type, unpack_align)		\
+	{							\
+		FMT_YUV_COMMON(fmt),				\
+		.flag = flag_arg,					\
+		.fetch_planes = MDSS_MDP_PLANE_PSEUDO_PLANAR,	\
+		.chroma_sample = samp,				\
+		.unpack_count = 2,				\
+		.bpp = 2,					\
+		.fetch_mode = (fetch_type),			\
+		.element = { (e0), (e1) },			\
+		.unpack_dx_format = 1,	\
+		.unpack_tight = unpack_type,	\
+		.unpack_align_msb = unpack_align,	\
+	}
+
+/*
+ * UBWC formats table:
+ * This table holds the UBWC formats supported.
+ * If a compression ratio needs to be used for this or any other format,
+ * the data will be passed by user-space.
+ */
+static struct mdss_mdp_format_params_ubwc mdss_mdp_format_ubwc_map[] = {
+	{
+		.mdp_format = FMT_RGB_565(MDP_RGB_565_UBWC,
+			MDSS_MDP_FETCH_UBWC,
+			VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+			C2_R_Cr, C0_G_Y, C1_B_Cb),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format = FMT_RGB_8888(MDP_RGBA_8888_UBWC,
+			MDSS_MDP_FETCH_UBWC,
+			VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT, 1,
+			C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format = FMT_RGB_8888(MDP_RGBX_8888_UBWC,
+			MDSS_MDP_FETCH_UBWC,
+			VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT, 0,
+			C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format = FMT_YUV_PSEUDO(MDP_Y_CBCR_H2V2_UBWC,
+			MDSS_MDP_FETCH_UBWC, MDSS_MDP_CHROMA_420,
+			VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+			C1_B_Cb, C2_R_Cr),
+		.micro = {
+			.tile_height = 8,
+			.tile_width = 32,
+		},
+	},
+	{
+		.mdp_format = FMT_RGB_2101010(MDP_RGBA_1010102_UBWC,
+			MDSS_MDP_FETCH_UBWC,
+			VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+			1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format = FMT_RGB_2101010(MDP_RGBX_1010102_UBWC,
+			MDSS_MDP_FETCH_UBWC,
+			VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+			0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format = FMT_YUV_PSEUDO_10(MDP_Y_CBCR_H2V2_TP10_UBWC,
+			MDSS_MDP_FETCH_UBWC, MDSS_MDP_CHROMA_420,
+			VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+			C1_B_Cb, C2_R_Cr, 1, 0),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 48,
+		},
+	},
+};
+
+static struct mdss_mdp_format_params mdss_mdp_format_map[] = {
+	FMT_RGB_565(MDP_RGB_565, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C0_G_Y, C2_R_Cr),
+	FMT_RGB_565(MDP_BGR_565, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_565(MDP_RGB_565_TILE, MDSS_MDP_FETCH_TILE, VALID_ROT_WB_FORMAT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr),
+	FMT_RGB_565(MDP_BGR_565_TILE, MDSS_MDP_FETCH_TILE, VALID_ROT_WB_FORMAT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_888(MDP_RGB_888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_888(MDP_BGR_888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C0_G_Y, C2_R_Cr),
+
+	FMT_RGB_8888(MDP_XRGB_8888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT | VALID_MDP_CURSOR_FORMAT, 0,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_8888(MDP_ARGB_8888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+		VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+		1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_8888(MDP_ABGR_8888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, 1,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+	FMT_RGB_8888(MDP_RGBA_8888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+		VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT, 1,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+	FMT_RGB_8888(MDP_RGBX_8888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, 0, C2_R_Cr, C0_G_Y, C1_B_Cb,
+		C3_ALPHA),
+	FMT_RGB_8888(MDP_BGRA_8888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+		VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+		1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+	FMT_RGB_8888(MDP_BGRX_8888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, 0, C1_B_Cb, C0_G_Y, C2_R_Cr,
+		C3_ALPHA),
+	FMT_RGB_8888(MDP_XBGR_8888, MDSS_MDP_FETCH_LINEAR, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, 0, C3_ALPHA, C1_B_Cb, C0_G_Y,
+		C2_R_Cr),
+	FMT_RGB_8888(MDP_RGBA_8888_TILE, MDSS_MDP_FETCH_TILE,
+		VALID_ROT_WB_FORMAT, 1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+	FMT_RGB_8888(MDP_ARGB_8888_TILE, MDSS_MDP_FETCH_TILE,
+		VALID_ROT_WB_FORMAT, 1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_8888(MDP_ABGR_8888_TILE, MDSS_MDP_FETCH_TILE,
+		VALID_ROT_WB_FORMAT, 1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+	FMT_RGB_8888(MDP_BGRA_8888_TILE, MDSS_MDP_FETCH_TILE,
+		VALID_ROT_WB_FORMAT, 1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+	FMT_RGB_8888(MDP_RGBX_8888_TILE, MDSS_MDP_FETCH_TILE,
+		VALID_ROT_WB_FORMAT, 0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+	FMT_RGB_8888(MDP_XRGB_8888_TILE, MDSS_MDP_FETCH_TILE,
+		VALID_ROT_WB_FORMAT, 0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_8888(MDP_XBGR_8888_TILE, MDSS_MDP_FETCH_TILE,
+		VALID_ROT_WB_FORMAT, 0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+	FMT_RGB_8888(MDP_BGRX_8888_TILE, MDSS_MDP_FETCH_TILE,
+		VALID_ROT_WB_FORMAT, 0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+
+	FMT_YUV_PSEUDO(MDP_Y_CRCB_H1V1, MDSS_MDP_FETCH_LINEAR,
+		MDSS_MDP_CHROMA_RGB, 0, C2_R_Cr, C1_B_Cb),
+	FMT_YUV_PSEUDO(MDP_Y_CBCR_H1V1, MDSS_MDP_FETCH_LINEAR,
+		MDSS_MDP_CHROMA_RGB, 0, C1_B_Cb, C2_R_Cr),
+	FMT_YUV_PSEUDO(MDP_Y_CRCB_H2V1, MDSS_MDP_FETCH_LINEAR,
+		MDSS_MDP_CHROMA_H2V1, VALID_ROT_WB_FORMAT, C2_R_Cr, C1_B_Cb),
+	FMT_YUV_PSEUDO(MDP_Y_CBCR_H2V1, MDSS_MDP_FETCH_LINEAR,
+		MDSS_MDP_CHROMA_H2V1, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C2_R_Cr),
+	FMT_YUV_PSEUDO(MDP_Y_CRCB_H1V2, MDSS_MDP_FETCH_LINEAR,
+		MDSS_MDP_CHROMA_H1V2, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C1_B_Cb),
+	FMT_YUV_PSEUDO(MDP_Y_CBCR_H1V2, MDSS_MDP_FETCH_LINEAR,
+		MDSS_MDP_CHROMA_H1V2, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C2_R_Cr),
+	FMT_YUV_PSEUDO(MDP_Y_CRCB_H2V2, MDSS_MDP_FETCH_LINEAR,
+		MDSS_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C1_B_Cb),
+	FMT_YUV_PSEUDO(MDP_Y_CBCR_H2V2, MDSS_MDP_FETCH_LINEAR,
+		MDSS_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C2_R_Cr),
+	FMT_YUV_PSEUDO(MDP_Y_CBCR_H2V2_VENUS, MDSS_MDP_FETCH_LINEAR,
+		MDSS_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C2_R_Cr),
+	FMT_YUV_PSEUDO(MDP_Y_CRCB_H2V2_VENUS, MDSS_MDP_FETCH_LINEAR,
+		MDSS_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C1_B_Cb),
+
+	FMT_YUV_PLANR(MDP_Y_CB_CR_H2V2, MDSS_MDP_FETCH_LINEAR,
+		MDSS_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C1_B_Cb),
+	FMT_YUV_PLANR(MDP_Y_CR_CB_H2V2, MDSS_MDP_FETCH_LINEAR,
+		MDSS_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C2_R_Cr),
+	FMT_YUV_PLANR(MDP_Y_CR_CB_GH2V2, MDSS_MDP_FETCH_LINEAR,
+		MDSS_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C2_R_Cr),
+
+	{
+		FMT_YUV_COMMON(MDP_YCBCR_H1V1),
+		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
+		.chroma_sample = MDSS_MDP_CHROMA_RGB,
+		.unpack_count = 3,
+		.bpp = 3,
+		.fetch_mode = MDSS_MDP_FETCH_LINEAR,
+		.element = { C2_R_Cr, C1_B_Cb, C0_G_Y },
+	},
+	{
+		FMT_YUV_COMMON(MDP_YCRCB_H1V1),
+		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
+		.chroma_sample = MDSS_MDP_CHROMA_RGB,
+		.unpack_count = 3,
+		.bpp = 3,
+		.fetch_mode = MDSS_MDP_FETCH_LINEAR,
+		.element = { C1_B_Cb, C2_R_Cr, C0_G_Y },
+	},
+	{
+		FMT_YUV_COMMON(MDP_YCRYCB_H2V1),
+		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
+		.chroma_sample = MDSS_MDP_CHROMA_H2V1,
+		.unpack_count = 4,
+		.bpp = 2,
+		.fetch_mode = MDSS_MDP_FETCH_LINEAR,
+		.element = { C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y },
+	},
+	{
+		FMT_YUV_COMMON(MDP_YCBYCR_H2V1),
+		.flag = VALID_MDP_WB_INTF_FORMAT,
+		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
+		.chroma_sample = MDSS_MDP_CHROMA_H2V1,
+		.unpack_count = 4,
+		.bpp = 2,
+		.fetch_mode = MDSS_MDP_FETCH_LINEAR,
+		.element = { C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y },
+	},
+	{
+		FMT_YUV_COMMON(MDP_CRYCBY_H2V1),
+		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
+		.chroma_sample = MDSS_MDP_CHROMA_H2V1,
+		.unpack_count = 4,
+		.bpp = 2,
+		.fetch_mode = MDSS_MDP_FETCH_LINEAR,
+		.element = { C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr },
+	},
+	{
+		FMT_YUV_COMMON(MDP_CBYCRY_H2V1),
+		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
+		.chroma_sample = MDSS_MDP_CHROMA_H2V1,
+		.unpack_count = 4,
+		.bpp = 2,
+		.fetch_mode = MDSS_MDP_FETCH_LINEAR,
+		.element = { C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y},
+	},
+	FMT_RGB_1555(MDP_RGBA_5551, 1, VALID_ROT_WB_FORMAT |
+		VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+	FMT_RGB_1555(MDP_ARGB_1555, 1, VALID_ROT_WB_FORMAT |
+		VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+	FMT_RGB_1555(MDP_ABGR_1555, 1, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+	FMT_RGB_1555(MDP_BGRA_5551, 1, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_1555(MDP_BGRX_5551, 0, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_1555(MDP_RGBX_5551, 0, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+	FMT_RGB_1555(MDP_XBGR_1555, 0, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+	FMT_RGB_1555(MDP_XRGB_1555, 0, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+	FMT_RGB_4444(MDP_ABGR_4444, 1, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+	FMT_RGB_4444(MDP_BGRA_4444, 1, VALID_ROT_WB_FORMAT |
+		VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_4444(MDP_BGRX_4444, 0, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_4444(MDP_RGBX_4444, 0, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+	FMT_RGB_4444(MDP_XBGR_4444, 0, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+	FMT_RGB_4444(MDP_XRGB_4444, 0, VALID_ROT_WB_FORMAT |
+		VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+
+	FMT_RGB_4444(MDP_RGBA_4444, 1, VALID_ROT_WB_FORMAT |
+		VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+	FMT_RGB_4444(MDP_ARGB_4444, 1, VALID_ROT_WB_FORMAT |
+		VALID_MDP_CURSOR_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+
+	FMT_RGB_2101010(MDP_RGBA_1010102, MDSS_MDP_FETCH_LINEAR,
+		VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT, 1,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+	FMT_RGB_2101010(MDP_ARGB_2101010, MDSS_MDP_FETCH_LINEAR, 0, 1,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_2101010(MDP_RGBX_1010102, MDSS_MDP_FETCH_LINEAR,
+		VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT, 0,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+	FMT_RGB_2101010(MDP_XRGB_2101010, MDSS_MDP_FETCH_LINEAR, 0, 0,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_2101010(MDP_BGRA_1010102, MDSS_MDP_FETCH_LINEAR,
+		VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT, 1,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+	FMT_RGB_2101010(MDP_ABGR_2101010, MDSS_MDP_FETCH_LINEAR, 0, 1,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+	FMT_RGB_2101010(MDP_BGRX_1010102, MDSS_MDP_FETCH_LINEAR,
+		VALID_ROT_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT, 0,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+	FMT_RGB_2101010(MDP_XBGR_2101010, MDSS_MDP_FETCH_LINEAR, 0, 0,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+
+	FMT_YUV_PSEUDO_10(MDP_Y_CBCR_H2V2_P010, MDSS_MDP_FETCH_LINEAR,
+		MDSS_MDP_CHROMA_420, VALID_ROT_WB_FORMAT,
+		C1_B_Cb, C2_R_Cr, 0, 1),
+
+};
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_mdp_hwio.h b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
new file mode 100644
index 0000000..54ceb19
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
@@ -0,0 +1,844 @@
+/* Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_MDP_HWIO_H
+#define MDSS_MDP_HWIO_H
+
+#include <linux/bitops.h>
+
+/*
+ * struct mdss_mdp_hwio_cfg - used to define a register bitfield
+ * @start: bitfield offset start from lsb
+ * @len: number of lsb bits that can be taken from field value
+ * @shift: number of lsb bits to truncate from field value
+ */
+struct mdss_mdp_hwio_cfg {
+	u32 start, len, shift;
+};
+
+static inline u32 mdss_mdp_hwio_mask(struct mdss_mdp_hwio_cfg *cfg, u32 val)
+{
+	u32 mask = (1 << cfg->len) - 1;
+
+	return ((val >> cfg->shift) & mask) << cfg->start;
+}
+
+#define IGC_LUT_ENTRIES	256
+#define GC_LUT_SEGMENTS	16
+#define ENHIST_LUT_ENTRIES 256
+#define HIST_V_SIZE	256
+
+/* QSEED3 LUT sizes */
+#define DIR_LUT_IDX		1
+#define DIR_LUT_COEFFS		200
+#define CIR_LUT_IDX		9
+#define CIR_LUT_COEFFS		60
+#define SEP_LUT_IDX		10
+#define SEP_LUT_COEFFS		60
+
+
+#define MDSS_MDP_FETCH_CONFIG_RESET_VALUE	0x00000087
+
+#define MDSS_REG_HW_VERSION				0x0
+#define MDSS_REG_HW_INTR_STATUS				0x10
+
+#define MDSS_INTR_MDP				BIT(0)
+#define MDSS_INTR_DSI0				BIT(4)
+#define MDSS_INTR_DSI1				BIT(5)
+#define MDSS_INTR_HDMI				BIT(8)
+#define MDSS_INTR_EDP				BIT(12)
+
+#define MDSS_MDP_REG_HW_VERSION				0x0
+#define MDSS_MDP_REG_DISP_INTF_SEL			0x00004
+#define MDSS_MDP_REG_INTR2_EN				0x00008
+#define MDSS_MDP_REG_INTR2_STATUS			0x0000C
+#define MDSS_MDP_REG_INTR2_CLEAR			0x0002C
+#define MDSS_MDP_REG_INTR_EN				0x00010
+#define MDSS_MDP_REG_INTR_STATUS			0x00014
+#define MDSS_MDP_REG_INTR_CLEAR				0x00018
+#define MDSS_MDP_REG_HIST_INTR_EN			0x0001C
+#define MDSS_MDP_REG_HIST_INTR_STATUS			0x00020
+#define MDSS_MDP_REG_HIST_INTR_CLEAR			0x00024
+#define MMSS_MDP_MDP_SSPP_SPARE_0			0x00028
+
+#define MMSS_MDP_PANIC_ROBUST_CTRL			0x00178
+#define MMSS_MDP_PANIC_LUT0				0x0017C
+#define MMSS_MDP_PANIC_LUT1				0x00180
+#define MMSS_MDP_ROBUST_LUT				0x00184
+#define MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL			0x00190
+
+#define MDSS_MDP_REG_VIDEO_INTF_UNDERFLOW_CTL		0x002E0
+#define MDSS_MDP_REG_SPLIT_DISPLAY_EN			0x002F4
+#define MDSS_MDP_REG_SPLIT_DISPLAY_UPPER_PIPE_CTRL	0x002F8
+#define MDSS_MDP_DANGER_STATUS				0x00360
+#define MDSS_MDP_SAFE_STATUS				0x00364
+#define MDSS_MDP_REG_SPLIT_DISPLAY_LOWER_PIPE_CTRL	0x003F0
+#define MDSS_MDP_REG_DCE_SEL				0x00450
+
+#define MDSS_INTF_DSI	0x1
+#define MDSS_INTF_HDMI	0x3
+#define MDSS_INTF_LCDC	0x5
+#define MDSS_INTF_EDP	0x9
+
+#define MDSS_MDP_INTR_WB_0_DONE				BIT(0)
+#define MDSS_MDP_INTR_WB_1_DONE				BIT(1)
+#define MDSS_MDP_INTR_WB_2_DONE				BIT(4)
+#define MDSS_MDP_INTR_PING_PONG_0_DONE			BIT(8)
+#define MDSS_MDP_INTR_PING_PONG_1_DONE			BIT(9)
+#define MDSS_MDP_INTR_PING_PONG_2_DONE			BIT(10)
+#define MDSS_MDP_INTR_PING_PONG_3_DONE			BIT(11)
+#define MDSS_MDP_INTR_PING_PONG_0_RD_PTR		BIT(12)
+#define MDSS_MDP_INTR_PING_PONG_1_RD_PTR		BIT(13)
+#define MDSS_MDP_INTR_PING_PONG_2_RD_PTR		BIT(14)
+#define MDSS_MDP_INTR_PING_PONG_3_RD_PTR		BIT(15)
+#define MDSS_MDP_INTR_PING_PONG_0_WR_PTR		BIT(16)
+#define MDSS_MDP_INTR_PING_PONG_1_WR_PTR		BIT(17)
+#define MDSS_MDP_INTR_PING_PONG_2_WR_PTR		BIT(18)
+#define MDSS_MDP_INTR_PING_PONG_3_WR_PTR		BIT(19)
+#define MDSS_MDP_INTR_PING_PONG_0_AUTOREFRESH_DONE	BIT(20)
+#define MDSS_MDP_INTR_PING_PONG_1_AUTOREFRESH_DONE	BIT(21)
+#define MDSS_MDP_INTR_PING_PONG_2_AUTOREFRESH_DONE	BIT(22)
+#define MDSS_MDP_INTR_PING_PONG_3_AUTOREFRESH_DONE	BIT(23)
+#define MDSS_MDP_INTR_INTF_0_UNDERRUN			BIT(24)
+#define MDSS_MDP_INTR_INTF_0_VSYNC			BIT(25)
+#define MDSS_MDP_INTR_INTF_1_UNDERRUN			BIT(26)
+#define MDSS_MDP_INTR_INTF_1_VSYNC			BIT(27)
+#define MDSS_MDP_INTR_INTF_2_UNDERRUN			BIT(28)
+#define MDSS_MDP_INTR_INTF_2_VSYNC			BIT(29)
+#define MDSS_MDP_INTR_INTF_3_UNDERRUN			BIT(30)
+#define MDSS_MDP_INTR_INTF_3_VSYNC			BIT(31)
+
+#define MDSS_MDP_INTR2_PING_PONG_2_CWB_OVERFLOW	        BIT(14)
+#define MDSS_MDP_INTR2_PING_PONG_3_CWB_OVERFLOW	        BIT(15)
+
+#define MDSS_MDP_HIST_INTR_VIG_0_DONE			BIT(0)
+#define MDSS_MDP_HIST_INTR_VIG_0_RESET_DONE		BIT(1)
+#define MDSS_MDP_HIST_INTR_VIG_1_DONE			BIT(4)
+#define MDSS_MDP_HIST_INTR_VIG_1_RESET_DONE		BIT(5)
+#define MDSS_MDP_HIST_INTR_VIG_2_DONE			BIT(8)
+#define MDSS_MDP_HIST_INTR_VIG_2_RESET_DONE		BIT(9)
+#define MDSS_MDP_HIST_INTR_VIG_3_DONE			BIT(10)
+#define MDSS_MDP_HIST_INTR_VIG_3_RESET_DONE		BIT(11)
+#define MDSS_MDP_HIST_INTR_DSPP_0_DONE			BIT(12)
+#define MDSS_MDP_HIST_INTR_DSPP_0_RESET_DONE		BIT(13)
+#define MDSS_MDP_HIST_INTR_DSPP_1_DONE			BIT(16)
+#define MDSS_MDP_HIST_INTR_DSPP_1_RESET_DONE		BIT(17)
+#define MDSS_MDP_HIST_INTR_DSPP_2_DONE			BIT(20)
+#define MDSS_MDP_HIST_INTR_DSPP_2_RESET_DONE		BIT(21)
+#define MDSS_MDP_HIST_INTR_DSPP_3_DONE			BIT(22)
+#define MDSS_MDP_HIST_INTR_DSPP_3_RESET_DONE		BIT(23)
+
+enum mdss_mdp_intr_type {
+	MDSS_MDP_IRQ_TYPE_WB_ROT_COMP,
+	MDSS_MDP_IRQ_TYPE_WB_WFD_COMP,
+	MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+	MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR,
+	MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR,
+	MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+	MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN,
+	MDSS_MDP_IRQ_TYPE_INTF_VSYNC,
+	MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW,
+};
+
+#define MDSS_MDP_INTF_INTR_PROG_LINE		BIT(8)
+
+enum mdss_mdp_intf_intr_type {
+	MDSS_MDP_INTF_IRQ_PROG_LINE = 8,
+};
+
+#define MDSS_MDP_REG_IGC_VIG_BASE			0x200
+#define MDSS_MDP_REG_IGC_RGB_BASE			0x210
+#define MDSS_MDP_REG_IGC_DMA_BASE			0x220
+#define MDSS_MDP_REG_IGC_DSPP_BASE			0x300
+
+enum mdss_mdp_ctl_index {
+	MDSS_MDP_CTL0,
+	MDSS_MDP_CTL1,
+	MDSS_MDP_CTL2,
+	MDSS_MDP_CTL3,
+	MDSS_MDP_CTL4,
+	MDSS_MDP_CTL5,
+	MDSS_MDP_MAX_CTL
+};
+
+
+#define MDSS_MDP_REG_CTL_LAYER_EXTN_OFFSET		0x40
+#define MDSS_MDP_REG_CTL_LAYER_EXTN2_OFFSET		0x70
+#define MDSS_MDP_CTL_X_LAYER_5				0x24
+
+/* mixer 5 has different offset than others */
+#define MDSS_MDP_REG_CTL_LAYER(lm)	\
+	(((lm) == 5) ? MDSS_MDP_CTL_X_LAYER_5 : ((lm) * 0x004))
+
+#define MDSS_MDP_REG_CTL_LAYER_EXTN(lm)	\
+	 (MDSS_MDP_REG_CTL_LAYER_EXTN_OFFSET + ((lm) * 0x004))
+
+#define MDSS_MDP_REG_CTL_LAYER_EXTN2(lm)	\
+	 (MDSS_MDP_REG_CTL_LAYER_EXTN2_OFFSET + ((lm) * 0x004))
+
+#define MDSS_MDP_REG_CTL_TOP				0x014
+#define MDSS_MDP_REG_CTL_FLUSH				0x018
+#define MDSS_MDP_REG_CTL_START				0x01C
+#define MDSS_MDP_REG_CTL_PACK_3D			0x020
+#define MDSS_MDP_REG_CTL_SW_RESET			0x030
+
+#define MDSS_MDP_CTL_OP_VIDEO_MODE		(0 << 17)
+#define MDSS_MDP_CTL_OP_CMD_MODE		(1 << 17)
+
+#define MDSS_MDP_CTL_OP_ROT0_MODE		0x1
+#define MDSS_MDP_CTL_OP_ROT1_MODE		0x2
+#define MDSS_MDP_CTL_OP_WB0_MODE		0x3
+#define MDSS_MDP_CTL_OP_WB1_MODE		0x4
+#define MDSS_MDP_CTL_OP_WFD_MODE		0x5
+
+#define MDSS_MDP_CTL_OP_PACK_3D_ENABLE		BIT(19)
+#define MDSS_MDP_CTL_OP_PACK_3D_FRAME_INT	(0 << 20)
+#define MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT	(1 << 20)
+#define MDSS_MDP_CTL_OP_PACK_3D_V_ROW_INT	(2 << 20)
+#define MDSS_MDP_CTL_OP_PACK_3D_COL_INT		(3 << 20)
+
+enum mdss_mdp_sspp_index {
+	MDSS_MDP_SSPP_VIG0,
+	MDSS_MDP_SSPP_VIG1,
+	MDSS_MDP_SSPP_VIG2,
+	MDSS_MDP_SSPP_RGB0,
+	MDSS_MDP_SSPP_RGB1,
+	MDSS_MDP_SSPP_RGB2,
+	MDSS_MDP_SSPP_DMA0,
+	MDSS_MDP_SSPP_DMA1,
+	MDSS_MDP_SSPP_VIG3,
+	MDSS_MDP_SSPP_RGB3,
+	MDSS_MDP_SSPP_CURSOR0,
+	MDSS_MDP_SSPP_CURSOR1,
+	MDSS_MDP_SSPP_DMA2,
+	MDSS_MDP_SSPP_DMA3,
+	MDSS_MDP_MAX_SSPP,
+};
+
+enum mdss_mdp_sspp_fetch_type {
+	MDSS_MDP_PLANE_INTERLEAVED,
+	MDSS_MDP_PLANE_PLANAR,
+	MDSS_MDP_PLANE_PSEUDO_PLANAR,
+};
+
+enum mdss_mdp_sspp_chroma_samp_type {
+	MDSS_MDP_CHROMA_RGB,
+	MDSS_MDP_CHROMA_H2V1,
+	MDSS_MDP_CHROMA_H1V2,
+	MDSS_MDP_CHROMA_420
+};
+
+#define MDSS_MDP_REG_SSPP_SRC_SIZE			0x000
+#define MDSS_MDP_REG_SSPP_SRC_IMG_SIZE			0x004
+#define MDSS_MDP_REG_SSPP_SRC_XY			0x008
+#define MDSS_MDP_REG_SSPP_OUT_SIZE			0x00C
+#define MDSS_MDP_REG_SSPP_OUT_XY			0x010
+#define MDSS_MDP_REG_SSPP_SRC0_ADDR			0x014
+#define MDSS_MDP_REG_SSPP_SRC1_ADDR			0x018
+#define MDSS_MDP_REG_SSPP_SRC2_ADDR			0x01C
+#define MDSS_MDP_REG_SSPP_SRC3_ADDR			0x020
+#define MDSS_MDP_REG_SSPP_SRC_YSTRIDE0			0x024
+#define MDSS_MDP_REG_SSPP_SRC_YSTRIDE1			0x028
+#define MDSS_MDP_REG_SSPP_STILE_FRAME_SIZE		0x02C
+#define MDSS_MDP_REG_SSPP_SRC_FORMAT			0x030
+#define MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN		0x034
+#define MDSS_MDP_REG_SSPP_SRC_OP_MODE			0x038
+#define MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR		0x03C
+#define MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_0		0x050
+#define MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_1		0x054
+#define MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_2		0x058
+#define MDSS_MDP_REG_SSPP_DANGER_LUT			0x060
+#define MDSS_MDP_REG_SSPP_SAFE_LUT			0x064
+#define MDSS_MDP_REG_SSPP_CREQ_LUT			0x068
+#define MDSS_MDP_REG_SSPP_QOS_CTRL			0x06C
+#define MDSS_MDP_REG_SSPP_CDP_CTRL			0x134
+#define MDSS_MDP_REG_SSPP_UBWC_ERROR_STATUS		0x138
+#define MDSS_MDP_REG_SSPP_TRAFFIC_SHAPER		0x130
+#define MDSS_MDP_REG_SSPP_TRAFFIC_SHAPER_PREFILL	0x150
+#define MDSS_MDP_REG_SSPP_TRAFFIC_SHAPER_REC1_PREFILL	0x154
+
+#define MDSS_MDP_REG_SSPP_MULTI_REC_OP_MODE		0x170
+#define MDSS_MDP_REG_SSPP_OUT_SIZE_REC1			0x160
+#define MDSS_MDP_REG_SSPP_OUT_XY_REC1			0x164
+#define MDSS_MDP_REG_SSPP_SRC_XY_REC1			0x168
+#define MDSS_MDP_REG_SSPP_SRC_SIZE_REC1			0x16C
+#define MDSS_MDP_REG_SSPP_SRC_FORMAT_REC1		0x174
+#define MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN_REC1	0x178
+#define MDSS_MDP_REG_SSPP_SRC_OP_MODE_REC1		0x17C
+#define MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR_REC1	0x180
+
+#define MDSS_MDP_OP_DEINTERLACE			BIT(22)
+#define MDSS_MDP_OP_DEINTERLACE_ODD		BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1			BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0			BIT(17)
+#define MDSS_MDP_OP_IGC_EN			BIT(16)
+#define MDSS_MDP_OP_FLIP_UD			BIT(14)
+#define MDSS_MDP_OP_FLIP_LR			BIT(13)
+#define MDSS_MDP_OP_BWC_EN			BIT(0)
+#define MDSS_MDP_OP_BWC_LOSSLESS		(0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH			(1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED			(2 << 1)
+
+#define MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR		0x03C
+#define MDSS_MDP_REG_SSPP_FETCH_CONFIG			0x048
+#define MDSS_MDP_REG_SSPP_VC1_RANGE			0x04C
+#define MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS		0x070
+
+#define MDSS_MDP_REG_SSPP_CURRENT_SRC0_ADDR		0x0A4
+#define MDSS_MDP_REG_SSPP_CURRENT_SRC1_ADDR		0x0A8
+#define MDSS_MDP_REG_SSPP_CURRENT_SRC2_ADDR		0x0AC
+#define MDSS_MDP_REG_SSPP_CURRENT_SRC3_ADDR		0x0B0
+#define MDSS_MDP_REG_SSPP_DECIMATION_CONFIG		0x0B4
+#define MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_LR		0x100
+#define MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_TB		0x104
+#define MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_REQ_PIXELS	0x108
+
+#define MDSS_MDP_REG_VIG_OP_MODE			0x200
+#define MDSS_MDP_REG_VIG_QSEED2_CONFIG			0x204
+#define MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPX		0x210
+#define MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPY		0x214
+#define MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX		0x218
+#define MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY		0x21C
+#define MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEX		0x220
+#define MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEY		0x224
+#define MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEX		0x228
+#define MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEY		0x22C
+#define MDSS_MDP_REG_VIG_QSEED2_SHARP			0x230
+#define MDSS_MDP_REG_VIG_MEM_COL_BASE			0x288
+#define MDSS_MDP_REG_VIG_PA_BASE			0x310
+
+/* QSEED3 registers shared by VIG and Destination Scaler */
+#define MDSS_MDP_REG_SCALER_HW_VERSION			0x00
+#define MDSS_MDP_REG_SCALER_OP_MODE			0x04
+#define MDSS_MDP_REG_SCALER_RGB2Y_COEFF			0x08
+#define MDSS_MDP_REG_SCALER_PHASE_INIT			0x0C
+#define MDSS_MDP_REG_SCALER_PHASE_STEP_Y_H		0x10
+#define MDSS_MDP_REG_SCALER_PHASE_STEP_Y_V		0x14
+#define MDSS_MDP_REG_SCALER_PHASE_STEP_UV_H		0x18
+#define MDSS_MDP_REG_SCALER_PHASE_STEP_UV_V		0x1C
+#define MDSS_MDP_REG_SCALER_PRELOAD			0x20
+#define MDSS_MDP_REG_SCALER_DE_SHARPEN			0x24
+#define MDSS_MDP_REG_SCALER_DE_SHARPEN_CTL		0x28
+#define MDSS_MDP_REG_SCALER_DE_SHAPE_CTL		0x2C
+#define MDSS_MDP_REG_SCALER_DE_THRESHOLD		0x30
+#define MDSS_MDP_REG_SCALER_DE_ADJUST_DATA_0		0x34
+#define MDSS_MDP_REG_SCALER_DE_ADJUST_DATA_1		0x38
+#define MDSS_MDP_REG_SCALER_DE_ADJUST_DATA_2		0x3C
+#define MDSS_MDP_REG_SCALER_SRC_SIZE_Y_RGB_A		0x40
+#define MDSS_MDP_REG_SCALER_SRC_SIZE_UV			0x44
+#define MDSS_MDP_REG_SCALER_DST_SIZE			0x48
+#define MDSS_MDP_REG_SCALER_COEF_LUT_CTRL		0x4C
+#define MDSS_MDP_REG_SCALER_BUFFER_CTRL			0x50
+#define MDSS_MDP_REG_SCALER_CLK_CTRL0			0x54
+#define MDSS_MDP_REG_SCALER_CLK_CTRL1			0x58
+#define MDSS_MDP_REG_SCALER_CLK_STATUS			0x5C
+#define MDSS_MDP_REG_SCALER_MISR_CTRL			0x70
+#define MDSS_MDP_REG_SCALER_MISR_SIGNATURE_0		0x74
+#define MDSS_MDP_REG_SCALER_MISR_SIGNATURE_1		0x78
+
+#define SCALER_EN			BIT(0)
+#define SCALER_DIR_EN			BIT(4)
+#define SCALER_DE_EN			BIT(8)
+#define SCALER_ALPHA_EN			BIT(10)
+#define SCALER_COLOR_SPACE		12
+#define SCALER_BIT_WIDTH		14
+#define Y_FILTER_CFG			16
+#define UV_FILTER_CFG			24
+#define ALPHA_FILTER_CFG		30
+#define SCALER_BLEND_CFG		31
+
+#define PHASE_BITS			0x3F
+#define PHASE_STEP_BITS			0xFFFFFF
+#define PRELOAD_BITS			0x7F
+
+#define Y_PHASE_INIT_H			0
+#define Y_PHASE_INIT_V			8
+#define UV_PHASE_INIT_H			16
+#define UV_PHASE_INIT_V			24
+#define Y_PRELOAD_H			0
+#define Y_PRELOAD_V			8
+#define UV_PRELOAD_H			16
+#define UV_PRELOAD_V			24
+/* supported filters */
+#define EDGE_DIRECTED_2D		0x0
+#define CIRCULAR_2D			0x1
+#define SEPERABLE_1D			0x2
+#define BILINEAR			0x3
+#define ALPHA_DROP_REPEAT		0x0
+#define ALPHA_BILINEAR			0x1
+
+
+/* in mpq product */
+#define MDSS_MDP_REG_VIG_FLUSH_SEL			0x204
+
+#define MDSS_MDP_VIG_OP_PA_SAT_ZERO_EXP_EN		BIT(2)
+#define MDSS_MDP_VIG_OP_PA_MEM_PROTECT_EN		BIT(3)
+#define MDSS_MDP_VIG_OP_PA_EN				BIT(4)
+#define MDSS_MDP_VIG_OP_PA_MEM_COL_SKIN_MASK		BIT(5)
+#define MDSS_MDP_VIG_OP_PA_MEM_COL_FOL_MASK		BIT(6)
+#define MDSS_MDP_VIG_OP_PA_MEM_COL_SKY_MASK		BIT(7)
+#define MDSS_MDP_VIG_OP_HIST_LUTV_EN			BIT(10)
+#define MDSS_MDP_VIG_OP_PA_HUE_MASK			BIT(25)
+#define MDSS_MDP_VIG_OP_PA_SAT_MASK			BIT(26)
+#define MDSS_MDP_VIG_OP_PA_VAL_MASK			BIT(27)
+#define MDSS_MDP_VIG_OP_PA_CONT_MASK			BIT(28)
+
+#define MDSS_MDP_REG_SCALE_CONFIG			0x204
+#define MDSS_MDP_REG_SCALE_PHASE_STEP_X			0x210
+#define MDSS_MDP_REG_SCALE_PHASE_STEP_Y			0x214
+#define MDSS_MDP_REG_SCALE_INIT_PHASE_X			0x220
+#define MDSS_MDP_REG_SCALE_INIT_PHASE_Y			0x224
+
+#define MDSS_MDP_REG_VIG_CSC_1_BASE			0x320
+
+#define MDSS_MDP_REG_VIG_CSC_10_BASE			0x1A04
+#define MDSS_MDP_REG_VIG_CSC_10_OP_MODE			0x1A00
+
+#define MDSS_MDP_REG_VIG_HIST_CTL_BASE			0x2C4
+#define MDSS_MDP_REG_VIG_HIST_DATA_BASE			0x2E0
+#define MDSS_MDP_REG_VIG_HIST_LUT_BASE			0x2F0
+
+#define MDSS_MDP_SCALE_FILTER_NEAREST		0x0
+#define MDSS_MDP_SCALE_FILTER_BIL		0x1
+#define MDSS_MDP_SCALE_FILTER_PCMN		0x2
+#define MDSS_MDP_SCALE_FILTER_CA		0x3
+#define MDSS_MDP_SCALEY_EN			BIT(1)
+#define MDSS_MDP_SCALEX_EN			BIT(0)
+#define MDSS_MDP_FMT_SOLID_FILL			0x4037FF
+
+#define MDSS_MDP_INTF_EDP_SEL	(BIT(3) | BIT(1))
+#define MDSS_MDP_INTF_HDMI_SEL	(BIT(25) | BIT(24))
+#define MDSS_MDP_INTF_DSI0_SEL	BIT(8)
+#define MDSS_MDP_INTF_DSI1_SEL	BIT(16)
+
+enum mdss_mdp_mixer_intf_index {
+	MDSS_MDP_INTF_LAYERMIXER0,
+	MDSS_MDP_INTF_LAYERMIXER1,
+	MDSS_MDP_INTF_LAYERMIXER2,
+	MDSS_MDP_INTF_LAYERMIXER3,
+	MDSS_MDP_INTF_MAX_LAYERMIXER,
+};
+
+enum mdss_mdp_mixer_wb_index {
+	MDSS_MDP_WB_LAYERMIXER0,
+	MDSS_MDP_WB_LAYERMIXER1,
+	MDSS_MDP_WB_MAX_LAYERMIXER,
+};
+
+enum mdss_mdp_stage_index {
+	MDSS_MDP_STAGE_UNUSED,
+	MDSS_MDP_STAGE_BASE,
+	MDSS_MDP_STAGE_0,
+	MDSS_MDP_STAGE_1,
+	MDSS_MDP_STAGE_2,
+	MDSS_MDP_STAGE_3,
+	MDSS_MDP_STAGE_4,
+	MDSS_MDP_STAGE_5,
+	MDSS_MDP_STAGE_6,
+	MDSS_MDP_MAX_STAGE
+};
+#define MAX_PIPES_PER_STAGE	0x2
+#define MAX_PIPES_PER_LM	(MDSS_MDP_MAX_STAGE*MAX_PIPES_PER_STAGE)
+
+#define MDSS_MDP_REG_LM_OP_MODE				0x000
+#define MDSS_MDP_REG_LM_OUT_SIZE			0x004
+#define MDSS_MDP_REG_LM_BORDER_COLOR_0			0x008
+#define MDSS_MDP_REG_LM_BORDER_COLOR_1			0x010
+
+#define MDSS_MDP_REG_LM_BLEND_OFFSET(stage)	(0x20 + ((stage) * 0x30))
+#define MDSS_MDP_REG_LM_BLEND_OP			0x00
+#define MDSS_MDP_REG_LM_BLEND_FG_ALPHA			0x04
+#define MDSS_MDP_REG_LM_BLEND_BG_ALPHA			0x08
+#define MDSS_MDP_REG_LM_BLEND_FG_TRANSP_LOW0		0x0C
+#define MDSS_MDP_REG_LM_BLEND_FG_TRANSP_LOW1		0x10
+#define MDSS_MDP_REG_LM_BLEND_FG_TRANSP_HIGH0		0x14
+#define MDSS_MDP_REG_LM_BLEND_FG_TRANSP_HIGH1		0x18
+#define MDSS_MDP_REG_LM_BLEND_BG_TRANSP_LOW0		0x1C
+#define MDSS_MDP_REG_LM_BLEND_BG_TRANSP_LOW1		0x20
+#define MDSS_MDP_REG_LM_BLEND_BG_TRANSP_HIGH0		0x24
+#define MDSS_MDP_REG_LM_BLEND_BG_TRANSP_HIGH1		0x28
+#define MDSS_MDP_REG_LM_BLEND_STAGE4	0x150
+
+#define MDSS_MDP_REG_LM_CURSOR_IMG_SIZE			0xE0
+#define MDSS_MDP_REG_LM_CURSOR_SIZE			0xE4
+#define MDSS_MDP_REG_LM_CURSOR_XY			0xE8
+#define MDSS_MDP_REG_LM_CURSOR_STRIDE			0xDC
+#define MDSS_MDP_REG_LM_CURSOR_FORMAT			0xEC
+#define MDSS_MDP_REG_LM_CURSOR_BASE_ADDR		0xF0
+#define MDSS_MDP_REG_LM_CURSOR_START_XY			0xF4
+#define MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG		0xF8
+#define MDSS_MDP_REG_LM_CURSOR_BLEND_PARAM		0xFC
+#define MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW0	0x100
+#define MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW1	0x104
+#define MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH0	0x108
+#define MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH1	0x10C
+
+#define MDSS_MDP_REG_LM_GC_LUT_BASE	0x110
+
+#define MDSS_MDP_LM_BORDER_COLOR		(1 << 24)
+#define MDSS_MDP_LM_CURSOR_OUT			(1 << 25)
+#define MDSS_MDP_BLEND_FG_ALPHA_FG_CONST	(0 << 0)
+#define MDSS_MDP_BLEND_FG_ALPHA_BG_CONST	(1 << 0)
+#define MDSS_MDP_BLEND_FG_ALPHA_FG_PIXEL	(2 << 0)
+#define MDSS_MDP_BLEND_FG_ALPHA_BG_PIXEL	(3 << 0)
+#define MDSS_MDP_BLEND_FG_INV_ALPHA		(1 << 2)
+#define MDSS_MDP_BLEND_FG_MOD_ALPHA		(1 << 3)
+#define MDSS_MDP_BLEND_FG_INV_MOD_ALPHA		(1 << 4)
+#define MDSS_MDP_BLEND_FG_TRANSP_EN		(1 << 5)
+#define MDSS_MDP_BLEND_BG_ALPHA_FG_CONST	(0 << 8)
+#define MDSS_MDP_BLEND_BG_ALPHA_BG_CONST	(1 << 8)
+#define MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL	(2 << 8)
+#define MDSS_MDP_BLEND_BG_ALPHA_BG_PIXEL	(3 << 8)
+#define MDSS_MDP_BLEND_BG_INV_ALPHA		(1 << 10)
+#define MDSS_MDP_BLEND_BG_MOD_ALPHA		(1 << 11)
+#define MDSS_MDP_BLEND_BG_INV_MOD_ALPHA		(1 << 12)
+#define MDSS_MDP_BLEND_BG_TRANSP_EN		(1 << 13)
+
+enum mdss_mdp_writeback_index {
+	MDSS_MDP_WRITEBACK0,
+	MDSS_MDP_WRITEBACK1,
+	MDSS_MDP_WRITEBACK2,
+	MDSS_MDP_WRITEBACK3,
+	MDSS_MDP_WRITEBACK4,
+	MDSS_MDP_MAX_WRITEBACK
+};
+
+#define MDSS_MDP_REG_WB_DST_FORMAT			0x000
+#define MDSS_MDP_REG_WB_DST_OP_MODE			0x004
+#define MDSS_MDP_REG_WB_DST_PACK_PATTERN		0x008
+#define MDSS_MDP_REG_WB_DST0_ADDR			0x00C
+#define MDSS_MDP_REG_WB_DST1_ADDR			0x010
+#define MDSS_MDP_REG_WB_DST2_ADDR			0x014
+#define MDSS_MDP_REG_WB_DST3_ADDR			0x018
+#define MDSS_MDP_REG_WB_DST_YSTRIDE0			0x01C
+#define MDSS_MDP_REG_WB_DST_YSTRIDE1			0x020
+#define MDSS_MDP_REG_WB_DST_YSTRIDE1			0x020
+#define MDSS_MDP_REG_WB_DST_DITHER_BITDEPTH		0x024
+#define MDSS_MDP_REG_WB_DST_MATRIX_ROW0			0x030
+#define MDSS_MDP_REG_WB_DST_MATRIX_ROW1			0x034
+#define MDSS_MDP_REG_WB_DST_MATRIX_ROW2			0x038
+#define MDSS_MDP_REG_WB_DST_MATRIX_ROW3			0x03C
+#define MDSS_MDP_REG_WB_DST_WRITE_CONFIG		0x048
+#define MDSS_MDP_REG_WB_ROTATION_DNSCALER		0x050
+#define MDSS_MDP_REG_WB_ROTATOR_PIPE_DOWNSCALER		0x054
+#define MDSS_MDP_REG_WB_N16_INIT_PHASE_X_C03		0x060
+#define MDSS_MDP_REG_WB_N16_INIT_PHASE_X_C12		0x064
+#define MDSS_MDP_REG_WB_N16_INIT_PHASE_Y_C03		0x068
+#define MDSS_MDP_REG_WB_N16_INIT_PHASE_Y_C12		0x06C
+#define MDSS_MDP_REG_WB_OUT_SIZE			0x074
+#define MDSS_MDP_REG_WB_ALPHA_X_VALUE			0x078
+#define MDSS_MDP_REG_WB_CSC_BASE			0x260
+#define MDSS_MDP_REG_WB_DST_ADDR_SW_STATUS		0x2B0
+#define MDSS_MDP_REG_WB_CDP_CTRL			0x2B4
+
+#define MDSS_MDP_MAX_AD_AL	65535
+#define MDSS_MDP_MAX_AD_STR	255
+#define MDSS_MDP_AD_BL_SCALE	4095
+
+#define MDSS_MDP_REG_AD_BYPASS				0x000
+#define MDSS_MDP_REG_AD_CTRL_0				0x004
+#define MDSS_MDP_REG_AD_CTRL_1				0x008
+#define MDSS_MDP_REG_AD_FRAME_SIZE			0x00C
+#define MDSS_MDP_REG_AD_CON_CTRL_0			0x010
+#define MDSS_MDP_REG_AD_CON_CTRL_1			0x014
+#define MDSS_MDP_REG_AD_STR_MAN				0x018
+#define MDSS_MDP_REG_AD_VAR				0x01C
+#define MDSS_MDP_REG_AD_DITH				0x020
+#define MDSS_MDP_REG_AD_DITH_CTRL			0x024
+#define MDSS_MDP_REG_AD_AMP_LIM				0x028
+#define MDSS_MDP_REG_AD_SLOPE				0x02C
+#define MDSS_MDP_REG_AD_BW_LVL				0x030
+#define MDSS_MDP_REG_AD_LOGO_POS			0x034
+#define MDSS_MDP_REG_AD_LUT_FI				0x038
+#define MDSS_MDP_REG_AD_LUT_CC				0x07C
+#define MDSS_MDP_REG_AD_STR_LIM				0x0C8
+#define MDSS_MDP_REG_AD_CALIB_AB			0x0CC
+#define MDSS_MDP_REG_AD_CALIB_CD			0x0D0
+#define MDSS_MDP_REG_AD_MODE_SEL			0x0D4
+#define MDSS_MDP_REG_AD_TFILT_CTRL			0x0D8
+#define MDSS_MDP_REG_AD_BL_MINMAX			0x0DC
+#define MDSS_MDP_REG_AD_BL				0x0E0
+#define MDSS_MDP_REG_AD_BL_MAX				0x0E8
+#define MDSS_MDP_REG_AD_AL				0x0EC
+#define MDSS_MDP_REG_AD_AL_MIN				0x0F0
+#define MDSS_MDP_REG_AD_AL_FILT				0x0F4
+#define MDSS_MDP_REG_AD_CFG_BUF				0x0F8
+#define MDSS_MDP_REG_AD_LUT_AL				0x100
+#define MDSS_MDP_REG_AD_TARG_STR			0x144
+#define MDSS_MDP_REG_AD_START_CALC			0x148
+#define MDSS_MDP_REG_AD_STR_OUT				0x14C
+#define MDSS_MDP_REG_AD_BL_OUT				0x154
+#define MDSS_MDP_REG_AD_CALC_DONE			0x158
+#define MDSS_MDP_REG_AD_FRAME_END			0x15C
+#define MDSS_MDP_REG_AD_PROCS_END			0x160
+#define MDSS_MDP_REG_AD_FRAME_START			0x164
+#define MDSS_MDP_REG_AD_PROCS_START			0x168
+#define MDSS_MDP_REG_AD_TILE_CTRL			0x16C
+
+enum mdss_mdp_dspp_index {
+	MDSS_MDP_DSPP0,
+	MDSS_MDP_DSPP1,
+	MDSS_MDP_DSPP2,
+	MDSS_MDP_DSPP3,
+	MDSS_MDP_MAX_DSPP
+};
+
+#define MDSS_MDP_REG_DSPP_OP_MODE			0x000
+#define MDSS_MDP_REG_DSPP_PCC_BASE			0x030
+#define MDSS_MDP_REG_DSPP_DITHER_DEPTH			0x150
+#define MDSS_MDP_REG_DSPP_HIST_CTL_BASE			0x210
+#define MDSS_MDP_REG_DSPP_HIST_DATA_BASE		0x22C
+#define MDSS_MDP_REG_DSPP_HIST_LUT_BASE			0x230
+#define MDSS_MDP_REG_DSPP_PA_BASE			0x238
+#define MDSS_MDP_REG_DSPP_SIX_ZONE_BASE			0x248
+#define MDSS_MDP_REG_DSPP_GAMUT_BASE			0x2DC
+#define MDSS_MDP_REG_DSPP_GC_BASE			0x2B0
+
+#define MDSS_MDP_DSPP_OP_IGC_LUT_EN			BIT(0)
+#define MDSS_MDP_DSPP_OP_PA_SAT_ZERO_EXP_EN		BIT(1)
+#define MDSS_MDP_DSPP_OP_PA_MEM_PROTECT_EN		BIT(2)
+#define MDSS_MDP_DSPP_OP_PCC_EN				BIT(4)
+#define MDSS_MDP_DSPP_OP_PA_MEM_COL_SKIN_MASK		BIT(5)
+#define MDSS_MDP_DSPP_OP_PA_MEM_COL_FOL_MASK		BIT(6)
+#define MDSS_MDP_DSPP_OP_PA_MEM_COL_SKY_MASK		BIT(7)
+#define MDSS_MDP_DSPP_OP_DST_DITHER_EN			BIT(8)
+#define MDSS_MDP_DSPP_OP_HIST_EN			BIT(16)
+#define MDSS_MDP_DSPP_OP_HIST_LUTV_EN			BIT(19)
+#define MDSS_MDP_DSPP_OP_PA_EN				BIT(20)
+#define MDSS_MDP_DSPP_OP_ARGC_LUT_EN			BIT(22)
+#define MDSS_MDP_DSPP_OP_GAMUT_EN			BIT(23)
+#define MDSS_MDP_DSPP_OP_GAMUT_PCC_ORDER		BIT(24)
+#define MDSS_MDP_DSPP_OP_PA_HUE_MASK			BIT(25)
+#define MDSS_MDP_DSPP_OP_PA_SAT_MASK			BIT(26)
+#define MDSS_MDP_DSPP_OP_PA_VAL_MASK			BIT(27)
+#define MDSS_MDP_DSPP_OP_PA_CONT_MASK			BIT(28)
+#define MDSS_MDP_DSPP_OP_PA_SIX_ZONE_HUE_MASK		BIT(29)
+#define MDSS_MDP_DSPP_OP_PA_SIX_ZONE_SAT_MASK		BIT(30)
+#define MDSS_MDP_DSPP_OP_PA_SIX_ZONE_VAL_MASK		BIT(31)
+
+enum mdss_mpd_intf_index {
+	MDSS_MDP_NO_INTF,
+	MDSS_MDP_INTF0,
+	MDSS_MDP_INTF1,
+	MDSS_MDP_INTF2,
+	MDSS_MDP_INTF3,
+	MDSS_MDP_MAX_INTF
+};
+
+#define MDSS_MDP_REG_INTF_TIMING_ENGINE_EN		0x000
+#define MDSS_MDP_REG_INTF_CONFIG			0x004
+#define MDSS_MDP_REG_INTF_HSYNC_CTL			0x008
+#define MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0		0x00C
+#define MDSS_MDP_REG_INTF_VSYNC_PERIOD_F1		0x010
+#define MDSS_MDP_REG_INTF_VSYNC_PULSE_WIDTH_F0		0x014
+#define MDSS_MDP_REG_INTF_VSYNC_PULSE_WIDTH_F1		0x018
+#define MDSS_MDP_REG_INTF_DISPLAY_V_START_F0		0x01C
+#define MDSS_MDP_REG_INTF_DISPLAY_V_START_F1		0x020
+#define MDSS_MDP_REG_INTF_DISPLAY_V_END_F0		0x024
+#define MDSS_MDP_REG_INTF_DISPLAY_V_END_F1		0x028
+#define MDSS_MDP_REG_INTF_ACTIVE_V_START_F0		0x02C
+#define MDSS_MDP_REG_INTF_ACTIVE_V_START_F1		0x030
+#define MDSS_MDP_REG_INTF_ACTIVE_V_END_F0		0x034
+#define MDSS_MDP_REG_INTF_ACTIVE_V_END_F1		0x038
+#define MDSS_MDP_REG_INTF_DISPLAY_HCTL			0x03C
+#define MDSS_MDP_REG_INTF_ACTIVE_HCTL			0x040
+#define MDSS_MDP_REG_INTF_BORDER_COLOR			0x044
+#define MDSS_MDP_REG_INTF_UNDERFLOW_COLOR		0x048
+#define MDSS_MDP_REG_INTF_HSYNC_SKEW			0x04C
+#define MDSS_MDP_REG_INTF_POLARITY_CTL			0x050
+#define MDSS_MDP_REG_INTF_TEST_CTL			0x054
+#define MDSS_MDP_REG_INTF_TP_COLOR0			0x058
+#define MDSS_MDP_REG_INTF_TP_COLOR1			0x05C
+#define MDSS_MDP_REG_INTF_FRAME_LINE_COUNT_EN           0x0A8
+#define MDSS_MDP_REG_INTF_FRAME_COUNT                   0x0AC
+#define MDSS_MDP_REG_INTF_LINE_COUNT                    0x0B0
+
+#define MDSS_MDP_REG_INTF_DEFLICKER_CONFIG		0x0F0
+#define MDSS_MDP_REG_INTF_DEFLICKER_STRNG_COEFF		0x0F4
+#define MDSS_MDP_REG_INTF_DEFLICKER_WEAK_COEFF		0x0F8
+
+#define MDSS_MDP_REG_INTF_DSI_CMD_MODE_TRIGGER_EN	0x084
+#define MDSS_MDP_REG_INTF_PANEL_FORMAT			0x090
+#define MDSS_MDP_REG_INTF_TPG_ENABLE			0x100
+#define MDSS_MDP_REG_INTF_TPG_MAIN_CONTROL		0x104
+#define MDSS_MDP_REG_INTF_TPG_VIDEO_CONFIG		0x108
+#define MDSS_MDP_REG_INTF_TPG_COMPONENT_LIMITS		0x10C
+#define MDSS_MDP_REG_INTF_TPG_RECTANGLE			0x110
+#define MDSS_MDP_REG_INTF_TPG_INITIAL_VALUE		0x114
+#define MDSS_MDP_REG_INTF_TPG_BLK_WHITE_PATTERN_FRAMES	0x118
+#define MDSS_MDP_REG_INTF_TPG_RGB_MAPPING		0x11C
+#define MDSS_MDP_REG_INTF_PROG_FETCH_START		0x170
+#define MDSS_MDP_REG_INTF_INTR_EN			0x1C0
+#define MDSS_MDP_REG_INTF_INTR_STATUS			0x1C4
+#define MDSS_MDP_REG_INTF_INTR_CLEAR			0x1C8
+#define MDSS_MDP_REG_INTF_PROG_LINE_INTR_CONF		0x250
+#define MDSS_MDP_REG_INTF_VBLANK_END_CONF		0x264
+
+#define MDSS_MDP_REG_INTF_FRAME_LINE_COUNT_EN		0x0A8
+#define MDSS_MDP_REG_INTF_FRAME_COUNT			0x0AC
+#define MDSS_MDP_REG_INTF_LINE_COUNT			0x0B0
+#define MDSS_MDP_PANEL_FORMAT_RGB888			0x213F
+#define MDSS_MDP_PANEL_FORMAT_RGB666			0x212A
+
+#define MDSS_MDP_PANEL_FORMAT_PACK_ALIGN_MSB		BIT(7)
+
+enum mdss_mdp_pingpong_index {
+	MDSS_MDP_PINGPONG0,
+	MDSS_MDP_PINGPONG1,
+	MDSS_MDP_PINGPONG2,
+	MDSS_MDP_PINGPONG3,
+	MDSS_MDP_MAX_PINGPONG
+};
+
+#define MDSS_MDP_REG_PP_TEAR_CHECK_EN			0x000
+#define MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC		0x004
+#define MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT		0x008
+#define MDSS_MDP_REG_PP_SYNC_WRCOUNT			0x00C
+#define MDSS_MDP_REG_PP_VSYNC_INIT_VAL			0x010
+#define MDSS_MDP_REG_PP_INT_COUNT_VAL			0x014
+#define MDSS_MDP_REG_PP_SYNC_THRESH			0x018
+#define MDSS_MDP_REG_PP_START_POS			0x01C
+#define MDSS_MDP_REG_PP_RD_PTR_IRQ			0x020
+#define MDSS_MDP_REG_PP_WR_PTR_IRQ			0x024
+#define MDSS_MDP_REG_PP_OUT_LINE_COUNT			0x028
+#define MDSS_MDP_REG_PP_LINE_COUNT			0x02C
+#define MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG		0x030
+
+#define MDSS_MDP_REG_PP_FBC_MODE			0x034
+#define MDSS_MDP_REG_PP_FBC_BUDGET_CTL			0x038
+#define MDSS_MDP_REG_PP_FBC_LOSSY_MODE			0x03C
+#define MDSS_MDP_REG_PP_DSC_MODE			0x0a0
+#define MDSS_MDP_REG_PP_DCE_DATA_IN_SWAP		0x0ac
+#define MDSS_MDP_REG_PP_DCE_DATA_OUT_SWAP		0x0c8
+
+#define MDSS_MDP_DSC_0_OFFSET				0x80000
+#define MDSS_MDP_DSC_1_OFFSET				0x80400
+
+#define MDSS_MDP_REG_DSC_COMMON_MODE			0x000
+#define MDSS_MDP_REG_DSC_ENC				0x004
+#define MDSS_MDP_REG_DSC_PICTURE			0x008
+#define MDSS_MDP_REG_DSC_SLICE				0x00c
+#define MDSS_MDP_REG_DSC_CHUNK_SIZE			0x010
+#define MDSS_MDP_REG_DSC_DELAY				0x014
+#define MDSS_MDP_REG_DSC_SCALE_INITIAL			0x018
+#define MDSS_MDP_REG_DSC_SCALE_DEC_INTERVAL		0x01c
+#define MDSS_MDP_REG_DSC_SCALE_INC_INTERVAL		0x020
+#define MDSS_MDP_REG_DSC_FIRST_LINE_BPG_OFFSET		0x024
+#define MDSS_MDP_REG_DSC_BPG_OFFSET			0x028
+#define MDSS_MDP_REG_DSC_DSC_OFFSET			0x02c
+#define MDSS_MDP_REG_DSC_FLATNESS			0x030
+#define MDSS_MDP_REG_DSC_RC_MODEL_SIZE			0x034
+#define MDSS_MDP_REG_DSC_RC				0x038
+#define MDSS_MDP_REG_DSC_RC_BUF_THRESH			0x03c
+#define MDSS_MDP_REG_DSC_RANGE_MIN_QP			0x074
+#define MDSS_MDP_REG_DSC_RANGE_MAX_QP			0x0b0
+#define MDSS_MDP_REG_DSC_RANGE_BPG_OFFSET		0x0ec
+
+#define MDSS_MDP_REG_SMP_ALLOC_W0			0x00080
+#define MDSS_MDP_REG_SMP_ALLOC_R0			0x00130
+
+#define MDSS_MDP_UP_MISR_SEL			0x2A0
+#define MDSS_MDP_UP_MISR_CTRL_MDP		0x2A4
+#define MDSS_MDP_UP_MISR_SIGN_MDP		0x2A8
+#define MDSS_MDP_UP_MISR_LMIX_SEL_OFFSET	0x4C
+
+#define MDSS_MDP_LP_MISR_SEL			0x350
+#define MDSS_MDP_LP_MISR_CTRL_MDP		0x354
+#define MDSS_MDP_LP_MISR_CTRL_HDMI		0x358
+#define MDSS_MDP_LP_MISR_CTRL_EDP		0x35C
+#define MDSS_MDP_LP_MISR_CTRL_DSI0		0x360
+#define MDSS_MDP_LP_MISR_CTRL_DSI1		0x364
+
+#define MDSS_MDP_LP_MISR_SIGN_MDP		0x368
+#define MDSS_MDP_LP_MISR_SIGN_EDP		0x36C
+#define MDSS_MDP_LP_MISR_SIGN_HDMI		0x370
+#define MDSS_MDP_LP_MISR_SIGN_DSI0		0x374
+#define MDSS_MDP_LP_MISR_SIGN_DSI1		0x378
+
+#define MDSS_MDP_MISR_CTRL_FRAME_COUNT_MASK	0xFF
+#define MDSS_MDP_MISR_CTRL_ENABLE		BIT(8)
+#define MDSS_MDP_MISR_CTRL_STATUS		BIT(9)
+#define MDSS_MDP_MISR_CTRL_STATUS_CLEAR	BIT(10)
+#define MDSS_MDP_LP_MISR_CTRL_FREE_RUN_MASK	BIT(31)
+
+#define MDSS_MDP_LP_MISR_SEL_LMIX0_BLEND	0x08
+#define MDSS_MDP_LP_MISR_SEL_LMIX0_GC		0x09
+#define MDSS_MDP_LP_MISR_SEL_LMIX1_BLEND	0x0A
+#define MDSS_MDP_LP_MISR_SEL_LMIX1_GC		0x0B
+#define MDSS_MDP_LP_MISR_SEL_LMIX2_BLEND	0x0C
+#define MDSS_MDP_LP_MISR_SEL_LMIX2_GC		0x0D
+#define MDSS_MDP_LP_MISR_SEL_LMIX3_BLEND	0x0E
+#define MDSS_MDP_LP_MISR_SEL_LMIX3_GC		0x0F
+#define MDSS_MDP_LP_MISR_SEL_LMIX4_BLEND	0x10
+#define MDSS_MDP_LP_MISR_SEL_LMIX4_GC		0x11
+
+#define MDSS_MDP_LAYER_MIXER_MISR_CTRL		0x380
+#define MDSS_MDP_LAYER_MIXER_MISR_SIGNATURE	\
+		(MDSS_MDP_LAYER_MIXER_MISR_CTRL + 0x4)
+
+#define MDSS_MDP_INTF_MISR_CTRL		0x180
+#define MDSS_MDP_INTF_MISR_SIGNATURE		(MDSS_MDP_INTF_MISR_CTRL + 0x4)
+#define MDSS_MDP_INTF_CMD_MISR_CTRL		(MDSS_MDP_INTF_MISR_CTRL + 0x8)
+#define MDSS_MDP_INTF_CMD_MISR_SIGNATURE	(MDSS_MDP_INTF_MISR_CTRL + 0xC)
+
+#define MDSS_MDP_REG_CDM_CSC_10_OPMODE                  0x000
+#define MDSS_MDP_REG_CDM_CSC_10_BASE                    0x004
+
+#define MDSS_MDP_REG_CDM_CDWN2_OP_MODE                  0x100
+#define MDSS_MDP_REG_CDM_CDWN2_CLAMP_OUT                0x104
+#define MDSS_MDP_REG_CDM_CDWN2_PARAMS_3D_0              0x108
+#define MDSS_MDP_REG_CDM_CDWN2_PARAMS_3D_1              0x10C
+#define MDSS_MDP_REG_CDM_CDWN2_COEFF_COSITE_H_0         0x110
+#define MDSS_MDP_REG_CDM_CDWN2_COEFF_COSITE_H_1         0x114
+#define MDSS_MDP_REG_CDM_CDWN2_COEFF_COSITE_H_2         0x118
+#define MDSS_MDP_REG_CDM_CDWN2_COEFF_OFFSITE_H_0        0x11C
+#define MDSS_MDP_REG_CDM_CDWN2_COEFF_OFFSITE_H_1        0x120
+#define MDSS_MDP_REG_CDM_CDWN2_COEFF_OFFSITE_H_2        0x124
+#define MDSS_MDP_REG_CDM_CDWN2_COEFF_COSITE_V           0x128
+#define MDSS_MDP_REG_CDM_CDWN2_COEFF_OFFSITE_V          0x12C
+#define MDSS_MDP_REG_CDM_CDWN2_OUT_SIZE                 0x130
+
+#define MDSS_MDP_REG_CDM_HDMI_PACK_OP_MODE              0x200
+
+/* Following offsets are with respect to MDP base */
+#define MDSS_MDP_MDP_OUT_CTL_0                          0x410
+#define MDSS_MDP_INTF_CMD_MISR_CTRL		(MDSS_MDP_INTF_MISR_CTRL + 0x8)
+#define MDSS_MDP_INTF_CMD_MISR_SIGNATURE	(MDSS_MDP_INTF_MISR_CTRL + 0xC)
+/* following offsets are with respect to MDP VBIF base */
+#define MMSS_VBIF_CLKON			0x4
+#define MMSS_VBIF_RD_LIM_CONF			0x0B0
+#define MMSS_VBIF_WR_LIM_CONF			0x0C0
+#define MMSS_VBIF_OUT_RD_LIM_CONF0		0x0D0
+
+#define MMSS_VBIF_XIN_HALT_CTRL0	0x200
+#define MMSS_VBIF_XIN_HALT_CTRL1	0x204
+#define MMSS_VBIF_AXI_HALT_CTRL0	0x208
+#define MMSS_VBIF_AXI_HALT_CTRL1	0x20C
+#define MMSS_VBIF_TEST_BUS_OUT_CTRL	0x210
+#define MMSS_VBIF_TEST_BUS_OUT		0x230
+
+#define MDSS_VBIF_QOS_REMAP_BASE	0x020
+#define MDSS_VBIF_QOS_REMAP_ENTRIES	0x4
+
+#define MDSS_VBIF_QOS_RP_REMAP_BASE	0x550
+#define MDSS_VBIF_QOS_LVL_REMAP_BASE	0x570
+
+#define MDSS_VBIF_FIXED_SORT_EN	0x30
+#define MDSS_VBIF_FIXED_SORT_SEL0	0x34
+
+#define MDSS_MDP_REG_TRAFFIC_SHAPER_EN			BIT(31)
+#define MDSS_MDP_REG_TRAFFIC_SHAPER_RD_CLIENT(num)	(0x030 + (num * 4))
+#define MDSS_MDP_REG_TRAFFIC_SHAPER_WR_CLIENT(num)	(0x060 + (num * 4))
+#define MDSS_MDP_REG_TRAFFIC_SHAPER_FIXPOINT_FACTOR	4
+
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
new file mode 100644
index 0000000..84218e3
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -0,0 +1,3528 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+
+#include "mdss_mdp.h"
+#include "mdss_panel.h"
+#include "mdss_debug.h"
+#include "mdss_mdp_trace.h"
+#include "mdss_dsi_clk.h"
+#include <linux/interrupt.h>
+
+#define MAX_RECOVERY_TRIALS 10
+#define MAX_SESSIONS 2
+
+#define SPLIT_MIXER_OFFSET 0x800
+
+#define STOP_TIMEOUT(hz) msecs_to_jiffies((1000 / hz) * (6 + 2))
+#define POWER_COLLAPSE_TIME msecs_to_jiffies(100)
+#define CMD_MODE_IDLE_TIMEOUT msecs_to_jiffies(16 * 4)
+#define INPUT_EVENT_HANDLER_DELAY_USECS (16000 * 4)
+#define AUTOREFRESH_MAX_FRAME_CNT 6
+
+static DEFINE_MUTEX(cmd_clk_mtx);
+
+static DEFINE_MUTEX(cmd_off_mtx);
+
+enum mdss_mdp_cmd_autorefresh_state {
+	MDP_AUTOREFRESH_OFF,
+	MDP_AUTOREFRESH_ON_REQUESTED,
+	MDP_AUTOREFRESH_ON,
+	MDP_AUTOREFRESH_OFF_REQUESTED
+};
+
+struct mdss_mdp_cmd_ctx {
+	struct mdss_mdp_ctl *ctl;
+
+	u32 default_pp_num;
+	u32 current_pp_num;
+	/*
+	 * aux_pp_num will be set only when topology is using split-lm.
+	 * aux_pp_num will be used only when MDSS_QUIRK_DSC_RIGHT_ONLY_PU
+	 * quirk is set and on following partial updates.
+	 *
+	 * right-only update on DUAL_LM_SINGLE_DISPLAY with DSC_MERGE
+	 * right-only update on DUAL_LM_DUAL_DISPLAY with DSC
+	 */
+	u32 aux_pp_num;
+
+	u8 ref_cnt;
+	struct completion stop_comp;
+	atomic_t rdptr_cnt;
+	wait_queue_head_t rdptr_waitq;
+	struct completion pp_done;
+	wait_queue_head_t pp_waitq;
+	struct list_head vsync_handlers;
+	struct list_head lineptr_handlers;
+	int panel_power_state;
+	atomic_t koff_cnt;
+	u32 intf_stopped;
+	struct mutex mdp_rdptr_lock;
+	struct mutex mdp_wrptr_lock;
+	struct mutex clk_mtx;
+	spinlock_t clk_lock;
+	spinlock_t koff_lock;
+	struct work_struct gate_clk_work;
+	struct delayed_work delayed_off_clk_work;
+	struct work_struct pp_done_work;
+	struct work_struct early_wakeup_clk_work;
+	atomic_t pp_done_cnt;
+	struct completion rdptr_done;
+
+	/*
+	 * While autorefresh is on, partial update is not supported. So
+	 * autorefresh state machine is always maintained through master ctx.
+	 */
+	struct mutex autorefresh_lock;
+	struct completion autorefresh_ppdone;
+	enum mdss_mdp_cmd_autorefresh_state autorefresh_state;
+	int autorefresh_frame_cnt;
+	bool ignore_external_te;
+	struct completion autorefresh_done;
+
+	int vsync_irq_cnt;
+	int lineptr_irq_cnt;
+	bool lineptr_enabled;
+	u32 prev_wr_ptr_irq;
+
+	struct mdss_intf_recovery intf_recovery;
+	struct mdss_intf_recovery intf_mdp_callback;
+	struct mdss_mdp_cmd_ctx *sync_ctx; /* for partial update */
+	u32 pp_timeout_report_cnt;
+	bool pingpong_split_slave;
+};
+
+struct mdss_mdp_cmd_ctx mdss_mdp_cmd_ctx_list[MAX_SESSIONS];
+
+static int mdss_mdp_cmd_do_notifier(struct mdss_mdp_cmd_ctx *ctx);
+static inline void mdss_mdp_cmd_clk_on(struct mdss_mdp_cmd_ctx *ctx);
+static inline void mdss_mdp_cmd_clk_off(struct mdss_mdp_cmd_ctx *ctx);
+static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg);
+static int mdss_mdp_disable_autorefresh(struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_ctl *sctl);
+static int mdss_mdp_setup_vsync(struct mdss_mdp_cmd_ctx *ctx, bool enable);
+
+static bool __mdss_mdp_cmd_is_aux_pp_needed(struct mdss_data_type *mdata,
+	struct mdss_mdp_ctl *mctl)
+{
+	return (mdata && mctl && mctl->is_master &&
+		mdss_has_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU) &&
+		is_dsc_compression(&mctl->panel_data->panel_info) &&
+		((mctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) ||
+		 ((mctl->mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) &&
+		  (mctl->panel_data->panel_info.dsc_enc_total == 1))) &&
+		!mctl->mixer_left->valid_roi &&
+		mctl->mixer_right->valid_roi);
+}
+
+static bool __mdss_mdp_cmd_is_panel_power_off(struct mdss_mdp_cmd_ctx *ctx)
+{
+	return mdss_panel_is_power_off(ctx->panel_power_state);
+}
+
+static bool __mdss_mdp_cmd_is_panel_power_on_interactive(
+		struct mdss_mdp_cmd_ctx *ctx)
+{
+	return mdss_panel_is_power_on_interactive(ctx->panel_power_state);
+}
+
+static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_mixer *mixer;
+	u32 cnt = 0xffff;	/* init it to an invalid value */
+	u32 init;
+	u32 height;
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+	if (!mixer) {
+		mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
+		if (!mixer) {
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+			goto exit;
+		}
+	}
+
+	init = mdss_mdp_pingpong_read(mixer->pingpong_base,
+		MDSS_MDP_REG_PP_VSYNC_INIT_VAL) & 0xffff;
+	height = mdss_mdp_pingpong_read(mixer->pingpong_base,
+		MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT) & 0xffff;
+
+	if (height < init) {
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+		goto exit;
+	}
+
+	cnt = mdss_mdp_pingpong_read(mixer->pingpong_base,
+		MDSS_MDP_REG_PP_INT_COUNT_VAL) & 0xffff;
+
+	if (cnt < init)		/* wrap around happened at height */
+		cnt += (height - init);
+	else
+		cnt -= init;
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+	pr_debug("cnt=%d init=%d height=%d\n", cnt, init, height);
+exit:
+	return cnt;
+}
+
+static int mdss_mdp_tearcheck_enable(struct mdss_mdp_ctl *ctl, bool enable)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_ctl *sctl;
+	struct mdss_mdp_pp_tear_check *te;
+	struct mdss_mdp_mixer *mixer =
+		mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+
+	if (IS_ERR_OR_NULL(ctl->panel_data)) {
+		pr_err("no panel data\n");
+		return -ENODEV;
+	}
+
+	if (IS_ERR_OR_NULL(mixer)) {
+		pr_err("mixer not configured\n");
+		return -ENODEV;
+	}
+
+	sctl = mdss_mdp_get_split_ctl(ctl);
+	te = &ctl->panel_data->panel_info.te;
+
+	pr_debug("%s: enable=%d\n", __func__, enable);
+
+	mdss_mdp_pingpong_write(mixer->pingpong_base,
+		MDSS_MDP_REG_PP_TEAR_CHECK_EN,
+		(te ? te->tear_check_en : 0) && enable);
+
+	/*
+	 * When there are two controls, driver needs to enable
+	 * tear check configuration for both.
+	 */
+	if (sctl) {
+		mixer = mdss_mdp_mixer_get(sctl, MDSS_MDP_MIXER_MUX_LEFT);
+		te = &sctl->panel_data->panel_info.te;
+		mdss_mdp_pingpong_write(mixer->pingpong_base,
+				MDSS_MDP_REG_PP_TEAR_CHECK_EN,
+				(te ? te->tear_check_en : 0) && enable);
+	}
+
+	/*
+	 * In the case of pingpong split, there is no second
+	 * control and enables only slave tear check block as
+	 * defined in slave_pingpong_base.
+	 */
+	if (is_pingpong_split(ctl->mfd))
+		mdss_mdp_pingpong_write(mdata->slave_pingpong_base,
+				MDSS_MDP_REG_PP_TEAR_CHECK_EN,
+				(te ? te->tear_check_en : 0) && enable);
+
+	/*
+	 * In case of DUAL_LM_SINGLE_DISPLAY, always keep right PP enabled
+	 * if partial update is enabled. So when right-only update comes then
+	 * by changing CTL topology, HW switches directly to right PP.
+	 */
+	if (ctl->panel_data->panel_info.partial_update_enabled &&
+	    is_dual_lm_single_display(ctl->mfd)) {
+
+		mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
+		mdss_mdp_pingpong_write(mixer->pingpong_base,
+				MDSS_MDP_REG_PP_TEAR_CHECK_EN,
+				(te ? te->tear_check_en : 0) && enable);
+
+	}
+
+	return 0;
+}
+
+static int mdss_mdp_cmd_tearcheck_cfg(struct mdss_mdp_mixer *mixer,
+		struct mdss_mdp_cmd_ctx *ctx, bool locked)
+{
+	struct mdss_mdp_pp_tear_check *te = NULL;
+	struct mdss_panel_info *pinfo;
+	u32 vsync_clk_speed_hz, total_lines, vclks_line, cfg = 0;
+	char __iomem *pingpong_base;
+	struct mdss_mdp_ctl *ctl = ctx->ctl;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (IS_ERR_OR_NULL(ctl->panel_data)) {
+		pr_err("no panel data\n");
+		return -ENODEV;
+	}
+
+	pinfo = &ctl->panel_data->panel_info;
+	te = &ctl->panel_data->panel_info.te;
+
+	mdss_mdp_vsync_clk_enable(1, locked);
+
+	vsync_clk_speed_hz =
+		mdss_mdp_get_clk_rate(MDSS_CLK_MDP_VSYNC, locked);
+
+	total_lines = mdss_panel_get_vtotal(pinfo);
+
+	total_lines *= pinfo->mipi.frame_rate;
+
+	vclks_line = (total_lines) ? vsync_clk_speed_hz/total_lines : 0;
+
+	cfg = BIT(19);
+	if (pinfo->mipi.hw_vsync_mode)
+		cfg |= BIT(20);
+
+	if (te->refx100) {
+		vclks_line = vclks_line * pinfo->mipi.frame_rate *
+			100 / te->refx100;
+	} else {
+		pr_warn("refx100 cannot be zero! Use 6000 as default\n");
+		vclks_line = vclks_line * pinfo->mipi.frame_rate *
+			100 / 6000;
+	}
+
+	cfg |= vclks_line;
+
+	pr_debug("%s: yres=%d vclks=%x height=%d init=%d rd=%d start=%d wr=%d\n",
+		__func__, pinfo->yres, vclks_line, te->sync_cfg_height,
+		te->vsync_init_val, te->rd_ptr_irq, te->start_pos,
+		te->wr_ptr_irq);
+	pr_debug("thrd_start =%d thrd_cont=%d pp_split=%d\n",
+		te->sync_threshold_start, te->sync_threshold_continue,
+		ctx->pingpong_split_slave);
+
+	pingpong_base = mixer->pingpong_base;
+
+	if (ctx->pingpong_split_slave)
+		pingpong_base = mdata->slave_pingpong_base;
+
+	mdss_mdp_pingpong_write(pingpong_base,
+		MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+	mdss_mdp_pingpong_write(pingpong_base,
+		MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT,
+		te ? te->sync_cfg_height : 0);
+	mdss_mdp_pingpong_write(pingpong_base,
+		MDSS_MDP_REG_PP_VSYNC_INIT_VAL,
+		te ? te->vsync_init_val : 0);
+	mdss_mdp_pingpong_write(pingpong_base,
+		MDSS_MDP_REG_PP_RD_PTR_IRQ,
+		te ? te->rd_ptr_irq : 0);
+	mdss_mdp_pingpong_write(pingpong_base,
+		MDSS_MDP_REG_PP_WR_PTR_IRQ,
+		te ? te->wr_ptr_irq : 0);
+	mdss_mdp_pingpong_write(pingpong_base,
+		MDSS_MDP_REG_PP_START_POS,
+		te ? te->start_pos : 0);
+	mdss_mdp_pingpong_write(pingpong_base,
+		MDSS_MDP_REG_PP_SYNC_THRESH,
+		te ? ((te->sync_threshold_continue << 16) |
+		 te->sync_threshold_start) : 0);
+	mdss_mdp_pingpong_write(pingpong_base,
+		MDSS_MDP_REG_PP_SYNC_WRCOUNT,
+		te ? (te->start_pos + te->sync_threshold_start + 1) : 0);
+
+	return 0;
+}
+
+static int mdss_mdp_cmd_tearcheck_setup(struct mdss_mdp_cmd_ctx *ctx,
+		bool locked)
+{
+	int rc = 0;
+	struct mdss_mdp_mixer *mixer = NULL, *mixer_right = NULL;
+	struct mdss_mdp_ctl *ctl = ctx->ctl;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 offset = 0;
+
+	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+	if (mixer) {
+		/*
+		 * Disable auto refresh mode, if enabled in splash to
+		 * avoid corruption.
+		 */
+		if (mdss_mdp_pingpong_read(mixer->pingpong_base,
+			MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG) & BIT(31)) {
+			offset = MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG;
+			if (is_pingpong_split(ctl->mfd))
+				writel_relaxed(0x0,
+					(mdata->slave_pingpong_base + offset));
+			if (is_split_lm(ctl->mfd)) {
+				mixer_right =
+					mdss_mdp_mixer_get(ctl,
+						MDSS_MDP_MIXER_MUX_RIGHT);
+				if (mixer_right)
+					writel_relaxed(0x0,
+					(mixer_right->pingpong_base + offset));
+			}
+			mdss_mdp_pingpong_write(mixer->pingpong_base,
+				MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0x0);
+			pr_debug("%s: disabling auto refresh\n", __func__);
+		}
+		rc = mdss_mdp_cmd_tearcheck_cfg(mixer, ctx, locked);
+		if (rc)
+			goto err;
+	}
+
+	/*
+	 * In case of DUAL_LM_SINGLE_DISPLAY, always keep right PP enabled
+	 * if partial update is enabled. So when right-only update comes then
+	 * by changing CTL topology, HW switches directly to right PP.
+	 */
+	if (ctl->panel_data->panel_info.partial_update_enabled &&
+	    is_dual_lm_single_display(ctl->mfd)) {
+
+		mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
+		if (mixer)
+			rc = mdss_mdp_cmd_tearcheck_cfg(mixer, ctx, locked);
+	}
+err:
+	return rc;
+}
+
+/**
+ * enum mdp_rsrc_ctl_events - events for the resource control state machine
+ * @MDP_RSRC_CTL_EVENT_KICKOFF:
+ *	This event happens at NORMAL priority.
+ *	Event that signals the start of the transfer, regardless of the
+ *	state at which we enter this state (ON/OFF or GATE),
+ *	we must ensure that power state is ON when we return from this
+ *	event.
+ *
+ * @MDP_RSRC_CTL_EVENT_PP_DONE:
+ *	This event happens at INTERRUPT level.
+ *	Event signals the end of the data transfer, when getting this
+ *	event we should have been in ON state, since a transfer was
+ *	ongoing (if this is not the case, then
+ *	there is a bug).
+ *	Since this event is received at interrupt ievel, by the end of
+ *	the event we haven't changed the power state, but scheduled
+ *	work items to do the transition, so by the end of this event:
+ *	1. A work item is scheduled to go to gate state as soon as
+ *		possible (as soon as scheduler give us the chance)
+ *	2. A delayed work is scheduled to go to OFF after
+ *		CMD_MODE_IDLE_TIMEOUT time. Power State will be updated
+ *		at the end of each work item, to make sure we update
+ *		the status once the transition is fully done.
+ *
+ * @MDP_RSRC_CTL_EVENT_STOP:
+ *	This event happens at NORMAL priority.
+ *	When we get this event, we are expected to wait to finish any
+ *	pending data transfer and turn off all the clocks/resources,
+ *	so after return from this event we must be in off
+ *	state.
+ *
+ * @MDP_RSRC_CTL_EVENT_EARLY_WAKE_UP:
+ *	This event happens at NORMAL priority from a work item.
+ *	Event signals that there will be a frame update soon and mdp should wake
+ *	up early to update the frame with little latency.
+ */
+enum mdp_rsrc_ctl_events {
+	MDP_RSRC_CTL_EVENT_KICKOFF = 1,
+	MDP_RSRC_CTL_EVENT_PP_DONE,
+	MDP_RSRC_CTL_EVENT_STOP,
+	MDP_RSRC_CTL_EVENT_EARLY_WAKE_UP
+};
+
+enum {
+	MDP_RSRC_CTL_STATE_OFF,
+	MDP_RSRC_CTL_STATE_ON,
+	MDP_RSRC_CTL_STATE_GATE,
+};
+
+/* helper functions for debugging */
+static char *get_sw_event_name(u32 sw_event)
+{
+	switch (sw_event) {
+	case MDP_RSRC_CTL_EVENT_KICKOFF:
+		return "KICKOFF";
+	case MDP_RSRC_CTL_EVENT_PP_DONE:
+		return "PP_DONE";
+	case MDP_RSRC_CTL_EVENT_STOP:
+		return "STOP";
+	case MDP_RSRC_CTL_EVENT_EARLY_WAKE_UP:
+		return "EARLY_WAKE_UP";
+	default:
+		return "UNKNOWN";
+	}
+}
+
+static char *get_clk_pwr_state_name(u32 pwr_state)
+{
+	switch (pwr_state) {
+	case MDP_RSRC_CTL_STATE_ON:
+		return "STATE_ON";
+	case MDP_RSRC_CTL_STATE_OFF:
+		return "STATE_OFF";
+	case MDP_RSRC_CTL_STATE_GATE:
+		return "STATE_GATE";
+	default:
+		return "UNKNOWN";
+	}
+}
+
+/**
+ * mdss_mdp_get_split_display_ctls() - get the display controllers
+ * @ctl: Pointer to pointer to the controller used to do the operation.
+ *	This can be the pointer to the master or slave of a display with
+ *	the MDP_DUAL_LM_DUAL_DISPLAY split mode.
+ * @sctl: Pointer to pointer where it is expected to be set the slave
+ *	controller. Function does not expect any input parameter here.
+ *
+ * This function will populate the pointers to pointers with the controllers of
+ * the split display ordered such way that the first input parameter will be
+ * populated with the master controller and second parameter will be populated
+ * with the slave controller, so the caller can assume both controllers are set
+ * in the right order after return.
+ *
+ * This function can only be called for split configuration that uses two
+ * controllers, it expects that first pointer is the one passed to do the
+ * operation and it can be either the pointer of the master or slave,
+ * since is the job of this function to find and accommodate the master/slave
+ * controllers accordingly.
+ *
+ * Return: 0 - succeed, otherwise - fail
+ */
+int mdss_mdp_get_split_display_ctls(struct mdss_mdp_ctl **ctl,
+	struct mdss_mdp_ctl **sctl)
+{
+	int rc = 0;
+	*sctl = NULL;
+
+	if (*ctl == NULL) {
+		pr_err("%s invalid ctl\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if ((*ctl)->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+		*sctl = mdss_mdp_get_split_ctl(*ctl);
+		if (*sctl) {
+			/* pointers are in the correct order */
+			pr_debug("%s ctls in correct order ctl:%d sctl:%d\n",
+				__func__, (*ctl)->num, (*sctl)->num);
+			goto exit;
+		} else {
+			/*
+			 * If we have a split display and we didn't find the
+			 * Slave controller from the Master, this means that
+			 * ctl is the slave controller, so look for the Master
+			 */
+			*sctl = mdss_mdp_get_main_ctl(*ctl);
+			if (!(*sctl)) {
+				/*
+				 * Bad state, this shouldn't happen, we should
+				 * be having both controllers since we are in
+				 * dual-lm, dual-display.
+				 */
+				pr_err("%s cannot find master ctl\n",
+					__func__);
+				WARN_ON(1);
+			}
+			/*
+			 * We have both controllers but sctl has the Master,
+			 * swap the pointers so we can keep the master in the
+			 * ctl pointer and control the order in the power
+			 * sequence.
+			 */
+			pr_debug("ctl is not the master, swap pointers\n");
+			swap(*ctl, *sctl);
+		}
+	} else {
+		pr_debug("%s no split mode:%d\n", __func__,
+			(*ctl)->mfd->split_mode);
+	}
+exit:
+	return rc;
+}
+
+/**
+ * mdss_mdp_resource_control() - control the state of mdp resources
+ * @ctl: pointer to controller to notify the event.
+ * @sw_event: software event to modify the state of the resources.
+ *
+ * This function implements an state machine to control the state of
+ * the mdp resources (clocks, bw, mmu), the states can be ON, OFF and GATE,
+ * transition between each state is controlled through the MDP_RSRC_CTL_EVENT_
+ * events.
+ *
+ * Return: 0 - succeed, otherwise - fail
+ */
+int mdss_mdp_resource_control(struct mdss_mdp_ctl *ctl, u32 sw_event)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_ctl *sctl = NULL;
+	struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
+	struct dsi_panel_clk_ctrl clk_ctrl;
+	u32 status;
+	int rc = 0;
+	bool schedule_off = false;
+
+	/* Get both controllers in the correct order for dual displays */
+	mdss_mdp_get_split_display_ctls(&ctl, &sctl);
+
+	ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("%s invalid ctx\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (sctl)
+		sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+
+	/* In pingpong split we have single controller, dual context */
+	if (is_pingpong_split(ctl->mfd))
+		sctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[SLAVE_CTX];
+
+	pr_debug("%pS-->%s: task:%s ctl:%d pwr_state:%s event:%s\n",
+		__builtin_return_address(0), __func__,
+		current->group_leader->comm, ctl->num,
+		get_clk_pwr_state_name(mdp5_data->resources_state),
+		get_sw_event_name(sw_event));
+
+	MDSS_XLOG(ctl->num, mdp5_data->resources_state, sw_event,
+		XLOG_FUNC_ENTRY);
+
+	switch (sw_event) {
+	case MDP_RSRC_CTL_EVENT_KICKOFF:
+		/*
+		 * Cancel any work item pending:
+		 * If POWER-OFF was cancel:
+		 *	Only UNGATE the clocks (resources should be ON)
+		 * If GATE && POWER-OFF were cancel:
+		 *	UNGATE and POWER-ON
+		 * If only GATE was cancel:
+		 *	something can be wrong, OFF should have been
+		 *	cancel as well.
+		 */
+
+		/* update the active only vote */
+		mdata->ao_bw_uc_idx = mdata->curr_bw_uc_idx;
+
+		/* Cancel GATE Work Item */
+		if (cancel_work_sync(&ctx->gate_clk_work)) {
+			pr_debug("%s gate work canceled\n", __func__);
+
+			if (mdp5_data->resources_state !=
+					MDP_RSRC_CTL_STATE_ON)
+				pr_debug("%s unexpected power state\n",
+					__func__);
+		}
+
+		/* Cancel OFF Work Item  */
+		if (cancel_delayed_work_sync(&ctx->delayed_off_clk_work)) {
+			pr_debug("%s off work canceled\n", __func__);
+
+			if (mdp5_data->resources_state ==
+					MDP_RSRC_CTL_STATE_OFF)
+				pr_debug("%s unexpected OFF state\n",
+					__func__);
+		}
+
+		mutex_lock(&ctl->rsrc_lock);
+		MDSS_XLOG(ctl->num, mdp5_data->resources_state, sw_event, 0x11);
+		/* Transition OFF->ON || GATE->ON (enable clocks) */
+		if ((mdp5_data->resources_state == MDP_RSRC_CTL_STATE_OFF) ||
+			(mdp5_data->resources_state ==
+			MDP_RSRC_CTL_STATE_GATE)) {
+			u32 flags = CTL_INTF_EVENT_FLAG_SKIP_BROADCAST;
+
+			/* Enable/Ungate DSI clocks and resources */
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+			clk_ctrl.state = MDSS_DSI_CLK_ON;
+			clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+			mdss_mdp_ctl_intf_event /* enable master */
+				(ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL,
+				(void *)&clk_ctrl, flags);
+
+			if (sctx) { /* then slave */
+				if (sctx->pingpong_split_slave)
+					flags |= CTL_INTF_EVENT_FLAG_SLAVE_INTF;
+
+				mdss_mdp_ctl_intf_event(sctx->ctl,
+					MDSS_EVENT_PANEL_CLK_CTRL,
+					 (void *)&clk_ctrl, flags);
+			}
+
+			if (mdp5_data->resources_state ==
+					MDP_RSRC_CTL_STATE_GATE)
+				mdp5_data->resources_state =
+					MDP_RSRC_CTL_STATE_ON;
+		}
+
+		/* Transition OFF->ON (enable resources)*/
+		if (mdp5_data->resources_state ==
+				MDP_RSRC_CTL_STATE_OFF) {
+			/* Add an extra vote for the ahb bus */
+			mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+				VOTE_INDEX_LOW);
+
+			/* Enable MDP resources */
+			mdss_mdp_cmd_clk_on(ctx);
+			if (sctx)
+				mdss_mdp_cmd_clk_on(sctx);
+
+			mdp5_data->resources_state = MDP_RSRC_CTL_STATE_ON;
+		}
+
+		if (mdp5_data->resources_state != MDP_RSRC_CTL_STATE_ON) {
+			/* we must be ON by the end of kickoff */
+			pr_err("%s unexpected power state during:%s\n",
+				__func__, get_sw_event_name(sw_event));
+			WARN_ON(1);
+		}
+		mutex_unlock(&ctl->rsrc_lock);
+		break;
+	case MDP_RSRC_CTL_EVENT_PP_DONE:
+		if (mdp5_data->resources_state != MDP_RSRC_CTL_STATE_ON) {
+			pr_err("%s unexpected power state during:%s\n",
+				__func__, get_sw_event_name(sw_event));
+			WARN_ON(1);
+		}
+
+		/* Check that no pending kickoff is on-going */
+		 status = mdss_mdp_ctl_perf_get_transaction_status(ctl);
+
+		/*
+		 * Same for the slave controller. for cases where
+		 * transaction is only pending in the slave controller.
+		 */
+		if (sctl)
+			status |= mdss_mdp_ctl_perf_get_transaction_status(
+				sctl);
+
+		/*
+		 * Schedule the work items to shut down only if
+		 * 1. no kickoff has been scheduled
+		 * 2. no stop command has been started
+		 * 3. no autorefresh is enabled
+		 * 4. no commit is pending
+		 */
+		if ((status == PERF_STATUS_DONE) &&
+			!ctx->intf_stopped &&
+			(ctx->autorefresh_state == MDP_AUTOREFRESH_OFF) &&
+			!ctl->mfd->atomic_commit_pending) {
+			pr_debug("schedule release after:%d ms\n",
+				jiffies_to_msecs
+				(CMD_MODE_IDLE_TIMEOUT));
+
+			MDSS_XLOG(ctl->num, mdp5_data->resources_state,
+				sw_event, 0x22);
+
+			/* start work item to gate */
+			if (mdata->enable_gate)
+				schedule_work(&ctx->gate_clk_work);
+
+			/* start work item to shut down after delay */
+			schedule_delayed_work(
+					&ctx->delayed_off_clk_work,
+					CMD_MODE_IDLE_TIMEOUT);
+		}
+
+		break;
+	case MDP_RSRC_CTL_EVENT_STOP:
+
+		/* Cancel early wakeup Work Item */
+		if (cancel_work_sync(&ctx->early_wakeup_clk_work))
+			pr_debug("early wakeup work canceled\n");
+
+		/* If we are already OFF, just return */
+		if (mdp5_data->resources_state ==
+				MDP_RSRC_CTL_STATE_OFF) {
+			pr_debug("resources already off\n");
+			goto exit;
+		}
+
+		/* If pp_done is on-going, wait for it to finish */
+		mdss_mdp_cmd_wait4pingpong(ctl, NULL);
+		if (sctl)
+			mdss_mdp_cmd_wait4pingpong(sctl, NULL);
+
+		mutex_lock(&ctx->autorefresh_lock);
+		if (ctx->autorefresh_state != MDP_AUTOREFRESH_OFF) {
+			pr_debug("move autorefresh to disable state\n");
+			mdss_mdp_disable_autorefresh(ctl, sctl);
+		}
+		mutex_unlock(&ctx->autorefresh_lock);
+
+		/*
+		 * If a pp_done happened just before the stop,
+		 * we can still have some work items running;
+		 * cancel any pending works.
+		 */
+
+		/* Cancel GATE Work Item */
+		if (cancel_work_sync(&ctx->gate_clk_work)) {
+			pr_debug("gate work canceled\n");
+
+			if (mdp5_data->resources_state !=
+				MDP_RSRC_CTL_STATE_ON)
+				pr_debug("%s power state is not ON\n",
+					__func__);
+		}
+
+		/* Cancel OFF Work Item  */
+		if (cancel_delayed_work_sync(&ctx->delayed_off_clk_work)) {
+			pr_debug("off work canceled\n");
+
+
+			if (mdp5_data->resources_state ==
+					MDP_RSRC_CTL_STATE_OFF)
+				pr_debug("%s unexpected OFF state\n",
+					__func__);
+		}
+
+		mutex_lock(&ctl->rsrc_lock);
+		MDSS_XLOG(ctl->num, mdp5_data->resources_state, sw_event, 0x33);
+		if ((mdp5_data->resources_state == MDP_RSRC_CTL_STATE_ON) ||
+				(mdp5_data->resources_state
+				== MDP_RSRC_CTL_STATE_GATE)) {
+
+			/* Enable MDP clocks if gated */
+			if (mdp5_data->resources_state ==
+					MDP_RSRC_CTL_STATE_GATE)
+				mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+			/* First Power off slave DSI (if present) */
+			if (sctx)
+				mdss_mdp_cmd_clk_off(sctx);
+
+			/* Now Power off master DSI */
+			mdss_mdp_cmd_clk_off(ctx);
+
+			/* Remove extra vote for the ahb bus */
+			mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+				VOTE_INDEX_DISABLE);
+
+
+			/* we are done accessing the resources */
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+			/* update the state, now we are in off */
+			mdp5_data->resources_state = MDP_RSRC_CTL_STATE_OFF;
+		}
+		mutex_unlock(&ctl->rsrc_lock);
+		break;
+	case MDP_RSRC_CTL_EVENT_EARLY_WAKE_UP:
+		/*
+		 * Cancel any work item pending and:
+		 * 1. If the current state is ON, stay in ON.
+		 * 2. If the current state is GATED, stay at GATED.
+		 * 3. If the current state is POWER-OFF, POWER-ON and
+		 *	schedule a work item to POWER-OFF if no
+		 *	kickoffs get scheduled.
+		 */
+
+		/* if panels are off, do not process early wake up */
+		if ((ctx && __mdss_mdp_cmd_is_panel_power_off(ctx)) ||
+			(sctx && __mdss_mdp_cmd_is_panel_power_off(sctx)))
+			break;
+
+		/* Cancel GATE Work Item */
+		if (cancel_work_sync(&ctx->gate_clk_work)) {
+			pr_debug("%s: %s - gate_work cancelled\n",
+				 __func__, get_sw_event_name(sw_event));
+			schedule_off = true;
+		}
+
+		/* Cancel OFF Work Item */
+		if (cancel_delayed_work_sync(
+				&ctx->delayed_off_clk_work)) {
+			pr_debug("%s: %s - off work cancelled\n",
+				 __func__, get_sw_event_name(sw_event));
+			schedule_off = true;
+		}
+
+		mutex_lock(&ctl->rsrc_lock);
+		MDSS_XLOG(ctl->num, mdp5_data->resources_state, sw_event,
+			schedule_off, 0x44);
+		if (mdp5_data->resources_state == MDP_RSRC_CTL_STATE_OFF) {
+			u32 flags = CTL_INTF_EVENT_FLAG_SKIP_BROADCAST;
+
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+			clk_ctrl.state = MDSS_DSI_CLK_ON;
+			clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+			mdss_mdp_ctl_intf_event(ctx->ctl,
+				MDSS_EVENT_PANEL_CLK_CTRL,
+				(void *)&clk_ctrl, flags);
+
+			if (sctx) { /* then slave */
+				if (sctx->pingpong_split_slave)
+					flags |= CTL_INTF_EVENT_FLAG_SLAVE_INTF;
+
+				mdss_mdp_ctl_intf_event(sctx->ctl,
+					MDSS_EVENT_PANEL_CLK_CTRL,
+					(void *)&clk_ctrl, flags);
+			}
+
+			mdss_mdp_cmd_clk_on(ctx);
+			if (sctx)
+				mdss_mdp_cmd_clk_on(sctx);
+
+			mdp5_data->resources_state = MDP_RSRC_CTL_STATE_ON;
+			schedule_off = true;
+		}
+
+		/*
+		 * Driver will schedule off work under three cases:
+		 * 1. Early wakeup cancelled the gate work.
+		 * 2. Early wakeup cancelled the off work.
+		 * 3. Early wakeup changed the state to ON.
+		 *
+		 * Driver will not allow off work under one condition:
+		 * 1. Kickoff is pending.
+		 */
+		if (schedule_off && !ctl->mfd->atomic_commit_pending) {
+			/*
+			 * Schedule off work after cmd mode idle timeout is
+			 * reached. This is to prevent the case where early wake
+			 * up is called but no frame update is sent.
+			 */
+			schedule_delayed_work(&ctx->delayed_off_clk_work,
+				      CMD_MODE_IDLE_TIMEOUT);
+			pr_debug("off work scheduled\n");
+		}
+		mutex_unlock(&ctl->rsrc_lock);
+		break;
+	default:
+		pr_warn("%s unexpected event (%d)\n", __func__, sw_event);
+		break;
+	}
+	MDSS_XLOG(sw_event, mdp5_data->resources_state, XLOG_FUNC_EXIT);
+
+exit:
+	return rc;
+}
+
+static bool mdss_mdp_cmd_is_autorefresh_enabled(struct mdss_mdp_ctl *mctl)
+{
+	struct mdss_mdp_cmd_ctx *ctx = mctl->intf_ctx[MASTER_CTX];
+	bool enabled = false;
+
+	/* check the ctl to make sure the lock was initialized */
+	if (!ctx || !ctx->ctl)
+		return 0;
+
+	mutex_lock(&ctx->autorefresh_lock);
+	if (ctx->autorefresh_state == MDP_AUTOREFRESH_ON)
+		enabled = true;
+	mutex_unlock(&ctx->autorefresh_lock);
+
+	return enabled;
+}
+
+static inline void mdss_mdp_cmd_clk_on(struct mdss_mdp_cmd_ctx *ctx)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	pr_debug("%pS-->%s: task:%s ctx%d\n", __builtin_return_address(0),
+		__func__, current->group_leader->comm, ctx->current_pp_num);
+
+	mutex_lock(&ctx->clk_mtx);
+	MDSS_XLOG(ctx->current_pp_num, atomic_read(&ctx->koff_cnt),
+		mdata->bus_ref_cnt);
+
+	mdss_bus_bandwidth_ctrl(true);
+
+	mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_RESUME);
+
+	mutex_unlock(&ctx->clk_mtx);
+}
+
+static inline void mdss_mdp_cmd_clk_off(struct mdss_mdp_cmd_ctx *ctx)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct dsi_panel_clk_ctrl clk_ctrl;
+
+	pr_debug("%pS-->%s: task:%s ctx%d\n", __builtin_return_address(0),
+		__func__, current->group_leader->comm, ctx->current_pp_num);
+
+	mutex_lock(&ctx->clk_mtx);
+	MDSS_XLOG(ctx->current_pp_num, atomic_read(&ctx->koff_cnt),
+		mdata->bus_ref_cnt);
+
+	mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_SUSPEND);
+
+	/* Power off DSI, is caller responsibility to do slave then master  */
+	if (ctx->ctl) {
+		u32 flags = CTL_INTF_EVENT_FLAG_SKIP_BROADCAST;
+
+		if (ctx->pingpong_split_slave)
+			flags |= CTL_INTF_EVENT_FLAG_SLAVE_INTF;
+
+		clk_ctrl.state = MDSS_DSI_CLK_OFF;
+		clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+		mdss_mdp_ctl_intf_event
+			(ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL,
+			(void *)&clk_ctrl, flags);
+	} else {
+		pr_err("OFF with ctl:NULL\n");
+	}
+
+	mdss_bus_bandwidth_ctrl(false);
+
+	mutex_unlock(&ctx->clk_mtx);
+}
+
+static void mdss_mdp_cmd_readptr_done(void *arg)
+{
+	struct mdss_mdp_ctl *ctl = arg;
+	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+	struct mdss_mdp_vsync_handler *tmp;
+	ktime_t vsync_time;
+
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return;
+	}
+
+	vsync_time = ktime_get();
+	ctl->vsync_cnt++;
+	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
+	complete_all(&ctx->rdptr_done);
+
+	/* If caller is waiting for the read pointer, notify. */
+	if (atomic_read(&ctx->rdptr_cnt)) {
+		if (atomic_add_unless(&ctx->rdptr_cnt, -1, 0)) {
+			MDSS_XLOG(atomic_read(&ctx->rdptr_cnt));
+			if (atomic_read(&ctx->rdptr_cnt))
+				pr_warn("%s: too many rdptrs=%d!\n",
+				  __func__, atomic_read(&ctx->rdptr_cnt));
+		}
+		wake_up_all(&ctx->rdptr_waitq);
+	}
+
+	spin_lock(&ctx->clk_lock);
+	list_for_each_entry(tmp, &ctx->vsync_handlers, list) {
+		if (tmp->enabled && !tmp->cmd_post_flush)
+			tmp->vsync_handler(ctl, vsync_time);
+	}
+	spin_unlock(&ctx->clk_lock);
+}
+
+static int mdss_mdp_cmd_wait4readptr(struct mdss_mdp_cmd_ctx *ctx)
+{
+	int rc = 0;
+
+	rc = wait_event_timeout(ctx->rdptr_waitq,
+			atomic_read(&ctx->rdptr_cnt) == 0,
+			KOFF_TIMEOUT);
+	if (rc <= 0) {
+		if (atomic_read(&ctx->rdptr_cnt))
+			pr_err("timed out waiting for rdptr irq\n");
+		else
+			rc = 1;
+	}
+	return rc;
+}
+
+static int mdss_mdp_cmd_intf_callback(void *data, int event)
+{
+	struct mdss_mdp_cmd_ctx *ctx = data;
+	struct mdss_mdp_pp_tear_check *te = NULL;
+	u32 timeout_us = 3000, val = 0;
+	struct mdss_mdp_mixer *mixer;
+
+	if (!data) {
+		pr_err("%s: invalid ctx\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!ctx->ctl)
+		return -EINVAL;
+
+	switch (event) {
+	case MDP_INTF_CALLBACK_DSI_WAIT:
+		pr_debug("%s: wait for frame cnt:%d event:%d\n",
+			__func__, atomic_read(&ctx->rdptr_cnt), event);
+
+		/*
+		 * if we are going to suspended or pp split is not enabled,
+		 * just return
+		 */
+		if (ctx->intf_stopped || !is_pingpong_split(ctx->ctl->mfd))
+			return -EINVAL;
+		atomic_inc(&ctx->rdptr_cnt);
+
+		/* enable clks and rd_ptr interrupt */
+		mdss_mdp_setup_vsync(ctx, true);
+
+		mixer = mdss_mdp_mixer_get(ctx->ctl, MDSS_MDP_MIXER_MUX_LEFT);
+		if (!mixer) {
+			pr_err("%s: null mixer\n", __func__);
+			return -EINVAL;
+		}
+
+		/* wait for read pointer */
+		MDSS_XLOG(atomic_read(&ctx->rdptr_cnt));
+		pr_debug("%s: wait for frame cnt:%d\n",
+			__func__, atomic_read(&ctx->rdptr_cnt));
+		mdss_mdp_cmd_wait4readptr(ctx);
+
+		/* wait for 3ms to make sure we are within the frame */
+		te = &ctx->ctl->panel_data->panel_info.te;
+		readl_poll_timeout(mixer->pingpong_base +
+			MDSS_MDP_REG_PP_INT_COUNT_VAL, val,
+			(val & 0xffff) > (te->start_pos +
+			te->sync_threshold_start), 10, timeout_us);
+
+		/* disable rd_ptr interrupt */
+		mdss_mdp_setup_vsync(ctx, false);
+
+		break;
+	default:
+		pr_debug("%s: unhandled event=%d\n", __func__, event);
+		break;
+	}
+	return 0;
+}
+
+static void mdss_mdp_cmd_lineptr_done(void *arg)
+{
+	struct mdss_mdp_ctl *ctl = arg;
+	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+	struct mdss_mdp_lineptr_handler *tmp;
+	ktime_t lineptr_time;
+
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return;
+	}
+
+	lineptr_time = ktime_get();
+	pr_debug("intr lineptr_time=%lld\n", ktime_to_ms(lineptr_time));
+
+	spin_lock(&ctx->clk_lock);
+	list_for_each_entry(tmp, &ctx->lineptr_handlers, list) {
+		if (tmp->enabled)
+			tmp->lineptr_handler(ctl, lineptr_time);
+	}
+	spin_unlock(&ctx->clk_lock);
+}
+
+static int mdss_mdp_cmd_intf_recovery(void *data, int event)
+{
+	struct mdss_mdp_cmd_ctx *ctx = data;
+	unsigned long flags;
+	bool reset_done = false, notify_frame_timeout = false;
+
+	if (!data) {
+		pr_err("%s: invalid ctx\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!ctx->ctl)
+		return -EINVAL;
+
+	/*
+	 * Currently, only intf_fifo_underflow is
+	 * supported for recovery sequence for command
+	 * mode DSI interface
+	 */
+	if (event != MDP_INTF_DSI_CMD_FIFO_UNDERFLOW) {
+		pr_warn("%s: unsupported recovery event:%d\n",
+					__func__, event);
+		return -EPERM;
+	}
+
+	if (atomic_read(&ctx->koff_cnt)) {
+		mdss_mdp_ctl_reset(ctx->ctl, true);
+		reset_done = true;
+	}
+
+	spin_lock_irqsave(&ctx->koff_lock, flags);
+	if (reset_done && atomic_add_unless(&ctx->koff_cnt, -1, 0)) {
+		pr_debug("%s: intf_num=%d\n", __func__, ctx->ctl->intf_num);
+		mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+			ctx->current_pp_num);
+		mdss_mdp_set_intr_callback_nosync(
+				MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+				ctx->current_pp_num, NULL, NULL);
+		if (mdss_mdp_cmd_do_notifier(ctx))
+			notify_frame_timeout = true;
+	}
+	spin_unlock_irqrestore(&ctx->koff_lock, flags);
+
+	if (notify_frame_timeout)
+		mdss_mdp_ctl_notify(ctx->ctl, MDP_NOTIFY_FRAME_TIMEOUT);
+
+	return 0;
+}
+
+static void mdss_mdp_cmd_pingpong_done(void *arg)
+{
+	struct mdss_mdp_ctl *ctl = arg;
+	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+	struct mdss_mdp_vsync_handler *tmp;
+	ktime_t vsync_time;
+	bool sync_ppdone;
+
+	if (!ctx) {
+		pr_err("%s: invalid ctx\n", __func__);
+		return;
+	}
+
+	mdss_mdp_ctl_perf_set_transaction_status(ctl,
+		PERF_HW_MDP_STATE, PERF_STATUS_DONE);
+
+	spin_lock(&ctx->clk_lock);
+	list_for_each_entry(tmp, &ctx->vsync_handlers, list) {
+		if (tmp->enabled && tmp->cmd_post_flush)
+			tmp->vsync_handler(ctl, vsync_time);
+	}
+	spin_unlock(&ctx->clk_lock);
+
+	spin_lock(&ctx->koff_lock);
+
+	mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+		ctx->current_pp_num);
+	mdss_mdp_set_intr_callback_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+		ctx->current_pp_num, NULL, NULL);
+
+	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctx->current_pp_num);
+
+	/*
+	 * check state of sync ctx before decrementing koff_cnt to avoid race
+	 * condition. That is, once both koff_cnt have been served and new koff
+	 * can be triggered (sctx->koff_cnt could change)
+	 */
+	sync_ppdone = mdss_mdp_cmd_do_notifier(ctx);
+
+	if (atomic_add_unless(&ctx->koff_cnt, -1, 0)) {
+		if (atomic_read(&ctx->koff_cnt))
+			pr_err("%s: too many kickoffs=%d!\n", __func__,
+			       atomic_read(&ctx->koff_cnt));
+		if (sync_ppdone) {
+			atomic_inc(&ctx->pp_done_cnt);
+			if (!ctl->commit_in_progress)
+				schedule_work(&ctx->pp_done_work);
+
+			mdss_mdp_resource_control(ctl,
+				MDP_RSRC_CTL_EVENT_PP_DONE);
+		}
+		wake_up_all(&ctx->pp_waitq);
+	} else {
+		pr_err("%s: should not have pingpong interrupt!\n", __func__);
+	}
+
+	pr_debug("%s: ctl_num=%d intf_num=%d ctx=%d cnt=%d\n", __func__,
+			ctl->num, ctl->intf_num, ctx->current_pp_num,
+			atomic_read(&ctx->koff_cnt));
+
+	trace_mdp_cmd_pingpong_done(ctl, ctx->current_pp_num,
+		atomic_read(&ctx->koff_cnt));
+
+	spin_unlock(&ctx->koff_lock);
+}
+
+static int mdss_mdp_setup_lineptr(struct mdss_mdp_cmd_ctx *ctx,
+	bool enable)
+{
+	int changed = 0;
+
+	mutex_lock(&ctx->mdp_wrptr_lock);
+
+	if (enable) {
+		if (ctx->lineptr_irq_cnt == 0)
+			changed++;
+		ctx->lineptr_irq_cnt++;
+	} else {
+		if (ctx->lineptr_irq_cnt) {
+			ctx->lineptr_irq_cnt--;
+			if (ctx->lineptr_irq_cnt == 0)
+				changed++;
+		} else {
+			pr_warn("%pS->%s: wr_ptr can not be turned off\n",
+				__builtin_return_address(0), __func__);
+		}
+	}
+
+	if (changed)
+		MDSS_XLOG(ctx->lineptr_irq_cnt, enable, current->pid);
+
+	pr_debug("%pS->%s: lineptr_irq_cnt=%d changed=%d enable=%d ctl:%d pp:%d\n",
+			__builtin_return_address(0), __func__,
+			ctx->lineptr_irq_cnt, changed, enable,
+			ctx->ctl->num, ctx->default_pp_num);
+
+	if (changed) {
+		if (enable) {
+			/* enable clocks and irq */
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+			mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR,
+				ctx->default_pp_num);
+		} else {
+			/* disable clocks and irq */
+			mdss_mdp_irq_disable(MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR,
+				ctx->default_pp_num);
+			/*
+			 * check the intr status and clear the irq before
+			 * disabling the clocks
+			 */
+			mdss_mdp_intr_check_and_clear(
+				MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR,
+				ctx->default_pp_num);
+
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+		}
+	}
+
+	mutex_unlock(&ctx->mdp_wrptr_lock);
+	return ctx->lineptr_irq_cnt;
+}
+
+static int mdss_mdp_cmd_add_lineptr_handler(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_lineptr_handler *handle)
+{
+	struct mdss_mdp_cmd_ctx *ctx;
+	unsigned long flags;
+	int ret = 0;
+
+	mutex_lock(&cmd_off_mtx);
+	ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx || !ctl->is_master) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	pr_debug("%pS->%s: ctl=%d\n",
+		__builtin_return_address(0), __func__, ctl->num);
+
+	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
+
+	spin_lock_irqsave(&ctx->clk_lock, flags);
+	if (!handle->enabled) {
+		handle->enabled = true;
+		list_add(&handle->list, &ctx->lineptr_handlers);
+	}
+	spin_unlock_irqrestore(&ctx->clk_lock, flags);
+
+	if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY)
+		mutex_lock(&cmd_clk_mtx);
+
+	mdss_mdp_setup_lineptr(ctx, true);
+	ctx->lineptr_enabled = true;
+
+	if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY)
+		mutex_unlock(&cmd_clk_mtx);
+done:
+	mutex_unlock(&cmd_off_mtx);
+
+	return ret;
+}
+
+static int mdss_mdp_cmd_remove_lineptr_handler(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_lineptr_handler *handle)
+{
+	struct mdss_mdp_cmd_ctx *ctx;
+	unsigned long flags;
+	bool disabled = true;
+
+	ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx || !ctl->is_master || !ctx->lineptr_enabled)
+		return -EINVAL;
+
+	pr_debug("%pS->%s: ctl=%d\n",
+		__builtin_return_address(0), __func__, ctl->num);
+
+	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
+
+	spin_lock_irqsave(&ctx->clk_lock, flags);
+	if (handle->enabled) {
+		handle->enabled = false;
+		list_del_init(&handle->list);
+	} else {
+		disabled = false;
+	}
+	spin_unlock_irqrestore(&ctx->clk_lock, flags);
+
+	if (disabled)
+		mdss_mdp_setup_lineptr(ctx, false);
+	ctx->lineptr_enabled = false;
+	ctx->prev_wr_ptr_irq = 0;
+
+	return 0;
+}
+
+static int mdss_mdp_cmd_lineptr_ctrl(struct mdss_mdp_ctl *ctl, bool enable)
+{
+	struct mdss_mdp_pp_tear_check *te;
+	struct mdss_mdp_cmd_ctx *ctx;
+	int rc = 0;
+
+	ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx || !ctl->is_master)
+		return -EINVAL;
+
+	te = &ctl->panel_data->panel_info.te;
+	pr_debug("%pS->%s: ctl=%d en=%d, prev_lineptr=%d, lineptr=%d\n",
+			__builtin_return_address(0), __func__, ctl->num,
+			enable, ctx->prev_wr_ptr_irq, te->wr_ptr_irq);
+
+	if (enable) {
+		/* update reg only if the value has changed */
+		if (ctx->prev_wr_ptr_irq != te->wr_ptr_irq) {
+			ctx->prev_wr_ptr_irq = te->wr_ptr_irq;
+			mdss_mdp_pingpong_write(ctl->mixer_left->pingpong_base,
+				MDSS_MDP_REG_PP_WR_PTR_IRQ, te->wr_ptr_irq);
+		}
+
+		/*
+		 * add handler only when lineptr is not enabled
+		 * and wr ptr is non zero
+		 */
+		if (!ctx->lineptr_enabled && te->wr_ptr_irq)
+			rc = mdss_mdp_cmd_add_lineptr_handler(ctl,
+				&ctl->lineptr_handler);
+		/* Disable handler when the value is zero */
+		else if (ctx->lineptr_enabled && !te->wr_ptr_irq)
+			rc = mdss_mdp_cmd_remove_lineptr_handler(ctl,
+				&ctl->lineptr_handler);
+	} else {
+		if (ctx->lineptr_enabled)
+			rc = mdss_mdp_cmd_remove_lineptr_handler(ctl,
+				&ctl->lineptr_handler);
+	}
+
+	return rc;
+}
+
+/*
+ * Interface used to update the new lineptr value set by the sysfs node.
+ * Value is instantly updated only when autorefresh is enabled, else
+ * new value would be set in the next kickoff.
+ */
+static int mdss_mdp_cmd_update_lineptr(struct mdss_mdp_ctl *ctl, bool enable)
+{
+	if (mdss_mdp_cmd_is_autorefresh_enabled(ctl))
+		return mdss_mdp_cmd_lineptr_ctrl(ctl, enable);
+
+	return 0;
+}
+
+/**
+ * mdss_mdp_cmd_autorefresh_pp_done() - pp done irq callback for autorefresh
+ * @arg: void pointer to the controller context.
+ *
+ * This function is the pp_done interrupt callback while disabling
+ * autorefresh. This function does not modify the kickoff count (koff_cnt).
+ */
+static void mdss_mdp_cmd_autorefresh_pp_done(void *arg)
+{
+	struct mdss_mdp_ctl *ctl = arg;
+	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+
+	if (!ctx) {
+		pr_err("%s: invalid ctx\n", __func__);
+		return;
+	}
+
+	mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+		ctx->current_pp_num);
+	mdss_mdp_set_intr_callback_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+		ctx->current_pp_num, NULL, NULL);
+
+	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctx->current_pp_num);
+	complete_all(&ctx->autorefresh_ppdone);
+
+	pr_debug("%s: ctl_num=%d intf_num=%d ctx=%d cnt=%d\n", __func__,
+		ctl->num, ctl->intf_num, ctx->current_pp_num,
+		atomic_read(&ctx->koff_cnt));
+}
+
+static void pingpong_done_work(struct work_struct *work)
+{
+	u32 status;
+	struct mdss_mdp_cmd_ctx *ctx =
+		container_of(work, typeof(*ctx), pp_done_work);
+	struct mdss_mdp_ctl *ctl = ctx->ctl;
+
+	if (ctl) {
+		while (atomic_add_unless(&ctx->pp_done_cnt, -1, 0))
+			mdss_mdp_ctl_notify(ctx->ctl, MDP_NOTIFY_FRAME_DONE);
+
+		status = mdss_mdp_ctl_perf_get_transaction_status(ctx->ctl);
+		if (status == 0)
+			mdss_mdp_ctl_perf_release_bw(ctx->ctl);
+
+		if (!ctl->is_master)
+			ctl = mdss_mdp_get_main_ctl(ctl);
+
+		/* do not disable lineptr when autorefresh is enabled */
+		if (mdss_mdp_is_lineptr_supported(ctl)
+			&& !mdss_mdp_cmd_is_autorefresh_enabled(ctl))
+			mdss_mdp_cmd_lineptr_ctrl(ctl, false);
+	}
+}
+
+static void clk_ctrl_delayed_off_work(struct work_struct *work)
+{
+	struct mdss_overlay_private *mdp5_data;
+	struct delayed_work *dw = to_delayed_work(work);
+	struct mdss_mdp_cmd_ctx *ctx = container_of(dw,
+		struct mdss_mdp_cmd_ctx, delayed_off_clk_work);
+	struct mdss_mdp_ctl *ctl, *sctl;
+	struct mdss_mdp_cmd_ctx *sctx = NULL;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!ctx) {
+		pr_err("%s: invalid ctx\n", __func__);
+		return;
+	}
+
+	ctl = ctx->ctl;
+	if (!ctl || !ctl->panel_data) {
+		pr_err("NULL ctl||panel_data\n");
+		return;
+	}
+
+	if (ctl->mfd->atomic_commit_pending) {
+		pr_debug("leave clocks on for queued kickoff\n");
+		return;
+	}
+
+	mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+	ATRACE_BEGIN(__func__);
+
+	/*
+	 * Ideally we should not wait for the gate work item to finish, since
+	 * this work happens CMD_MODE_IDLE_TIMEOUT time after,
+	 * but if the system is laggy, prevent from a race condition
+	 * between both work items by waiting for the gate to finish.
+	 */
+	if (mdata->enable_gate)
+		flush_work(&ctx->gate_clk_work);
+
+	pr_debug("ctl:%d pwr_state:%s\n", ctl->num,
+		get_clk_pwr_state_name
+		(mdp5_data->resources_state));
+
+	mutex_lock(&ctl->rsrc_lock);
+	MDSS_XLOG(ctl->num, mdp5_data->resources_state, XLOG_FUNC_ENTRY);
+
+	if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+		mutex_lock(&cmd_clk_mtx);
+
+		if (mdss_mdp_get_split_display_ctls(&ctl, &sctl)) {
+			/* error when getting both controllers, just returnr */
+			pr_err("cannot get both controllers for the split display\n");
+			goto exit;
+		}
+
+		/* re-assign to have the correct order in the context */
+		ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+		sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+		if (!ctx || !sctx) {
+			pr_err("invalid %s %s\n",
+				ctx?"":"ctx", sctx?"":"sctx");
+			goto exit;
+		}
+	} else if (is_pingpong_split(ctl->mfd)) {
+		mutex_lock(&cmd_clk_mtx);
+		sctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[SLAVE_CTX];
+		if (!sctx) {
+			pr_err("invalid sctx\n");
+			goto exit;
+		}
+	}
+
+	if (ctx->autorefresh_state != MDP_AUTOREFRESH_OFF) {
+		/*
+		 * Driver shouldn't have scheduled this work item if
+		 * autorefresh was enabled, but if any race
+		 * condition happens between this work queue and
+		 * the enable of the feature, make sure we do not
+		 * process this request and mark this error.
+		 */
+		pr_err("cannot disable clks while autorefresh is not off\n");
+		goto exit;
+	}
+
+	/* Enable clocks if Gate feature is enabled and we are in this state */
+	if (mdata->enable_gate && (mdp5_data->resources_state
+			== MDP_RSRC_CTL_STATE_GATE))
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	/* first power off the slave DSI (if present) */
+	if (sctx)
+		mdss_mdp_cmd_clk_off(sctx);
+
+	/* now power off the master DSI */
+	mdss_mdp_cmd_clk_off(ctx);
+
+	/* Remove extra vote for the ahb bus */
+	mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+		VOTE_INDEX_DISABLE);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+	/* update state machine that power off transition is done */
+	mdp5_data->resources_state = MDP_RSRC_CTL_STATE_OFF;
+
+exit:
+	/* do this at the end, so we can also protect the global power state*/
+	if ((ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) ||
+	    is_pingpong_split(ctl->mfd))
+		mutex_unlock(&cmd_clk_mtx);
+
+	MDSS_XLOG(ctl->num, mdp5_data->resources_state, XLOG_FUNC_EXIT);
+	mutex_unlock(&ctl->rsrc_lock);
+
+	ATRACE_END(__func__);
+}
+
+static void clk_ctrl_gate_work(struct work_struct *work)
+{
+	struct mdss_overlay_private *mdp5_data;
+	struct mdss_mdp_cmd_ctx *ctx =
+		container_of(work, typeof(*ctx), gate_clk_work);
+	struct mdss_mdp_ctl *ctl, *sctl;
+	struct mdss_mdp_cmd_ctx *sctx = NULL;
+	struct dsi_panel_clk_ctrl clk_ctrl;
+
+	if (!ctx) {
+		pr_err("%s: invalid ctx\n", __func__);
+		return;
+	}
+
+	ATRACE_BEGIN(__func__);
+	ctl = ctx->ctl;
+	if (!ctl) {
+		pr_err("%s: invalid ctl\n", __func__);
+		return;
+	}
+
+	mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+	if (!mdp5_data) {
+		pr_err("%s: invalid mdp data\n", __func__);
+		return;
+	}
+
+	pr_debug("%s ctl:%d pwr_state:%s\n", __func__,
+		ctl->num, get_clk_pwr_state_name
+		(mdp5_data->resources_state));
+
+	mutex_lock(&ctl->rsrc_lock);
+	MDSS_XLOG(ctl->num, mdp5_data->resources_state, XLOG_FUNC_ENTRY);
+
+
+	if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+		mutex_lock(&cmd_clk_mtx);
+
+		if (mdss_mdp_get_split_display_ctls(&ctl, &sctl)) {
+			/* error when getting both controllers, just return */
+			pr_err("%s cannot get both cts for the split display\n",
+				__func__);
+			goto exit;
+		}
+
+		/* re-assign to have the correct order in the context */
+		ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+		sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+		if (!ctx || !sctx) {
+			pr_err("%s ERROR invalid %s %s\n", __func__,
+				ctx?"":"ctx", sctx?"":"sctx");
+			goto exit;
+		}
+	} else if (is_pingpong_split(ctl->mfd)) {
+		mutex_lock(&cmd_clk_mtx);
+		sctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[SLAVE_CTX];
+		if (!sctx) {
+			pr_err("invalid sctx\n");
+			goto exit;
+		}
+	}
+
+	if (ctx->autorefresh_state != MDP_AUTOREFRESH_OFF) {
+		/*
+		 * Driver shouldn't have scheduled this work item if
+		 * autorefresh was enabled, but if any race
+		 * condition happens between this work queue and
+		 * the enable of the feature, make sure we do not
+		 * process this request and mark this error.
+		 */
+		pr_err("cannot gate clocks with autorefresh\n");
+		goto exit;
+	}
+
+	clk_ctrl.state = MDSS_DSI_CLK_EARLY_GATE;
+	clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+	/* First gate the DSI clocks for the slave controller (if present) */
+	if (sctx) {
+		u32 flags = CTL_INTF_EVENT_FLAG_SKIP_BROADCAST;
+
+		if (sctx->pingpong_split_slave)
+			flags |= CTL_INTF_EVENT_FLAG_SLAVE_INTF;
+
+		mdss_mdp_ctl_intf_event(sctx->ctl,
+			MDSS_EVENT_PANEL_CLK_CTRL,
+			(void *)&clk_ctrl, flags);
+	}
+
+	/* Now gate DSI clocks for the master */
+	mdss_mdp_ctl_intf_event
+		(ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL,
+		(void *)&clk_ctrl, CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
+
+	/* Gate mdp clocks */
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+	/* update state machine that gate transition is done */
+	mdp5_data->resources_state = MDP_RSRC_CTL_STATE_GATE;
+
+exit:
+	/* unlock mutex needed for split display */
+	if ((ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) ||
+	    is_pingpong_split(ctl->mfd))
+		mutex_unlock(&cmd_clk_mtx);
+
+	MDSS_XLOG(ctl->num, mdp5_data->resources_state, XLOG_FUNC_EXIT);
+	mutex_unlock(&ctl->rsrc_lock);
+
+	ATRACE_END(__func__);
+}
+
+static int mdss_mdp_setup_vsync(struct mdss_mdp_cmd_ctx *ctx,
+	bool enable)
+{
+	int changed = 0;
+
+	mutex_lock(&ctx->mdp_rdptr_lock);
+
+	if (enable) {
+		if (ctx->vsync_irq_cnt == 0)
+			changed++;
+		ctx->vsync_irq_cnt++;
+	} else {
+		if (ctx->vsync_irq_cnt) {
+			ctx->vsync_irq_cnt--;
+			if (ctx->vsync_irq_cnt == 0)
+				changed++;
+		} else {
+			pr_warn("%pS->%s: rd_ptr can not be turned off\n",
+				__builtin_return_address(0), __func__);
+		}
+	}
+
+	if (changed)
+		MDSS_XLOG(ctx->vsync_irq_cnt, enable, current->pid);
+
+	pr_debug("%pS->%s: vsync_cnt=%d changed=%d enable=%d ctl:%d pp:%d\n",
+			__builtin_return_address(0), __func__,
+			ctx->vsync_irq_cnt, changed, enable,
+			ctx->ctl->num, ctx->default_pp_num);
+
+	if (changed) {
+		if (enable) {
+			/* enable clocks and irq */
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+			mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR,
+				ctx->default_pp_num);
+		} else {
+			/* disable clocks and irq */
+			mdss_mdp_irq_disable(MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR,
+				ctx->default_pp_num);
+			/*
+			 * check the intr status and clear the irq before
+			 * disabling the clocks
+			 */
+			mdss_mdp_intr_check_and_clear(
+				MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR,
+				ctx->default_pp_num);
+
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+		}
+	}
+
+	mutex_unlock(&ctx->mdp_rdptr_lock);
+	return ctx->vsync_irq_cnt;
+}
+
+static int mdss_mdp_cmd_add_vsync_handler(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_vsync_handler *handle)
+{
+	struct mdss_mdp_ctl *sctl = NULL;
+	struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
+	unsigned long flags;
+	bool enable_rdptr = false;
+	int ret = 0;
+
+	mutex_lock(&cmd_off_mtx);
+	ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("%s: invalid ctx\n", __func__);
+		ret = -ENODEV;
+		goto done;
+	}
+
+	pr_debug("%pS->%s ctl:%d\n",
+		__builtin_return_address(0), __func__, ctl->num);
+
+	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
+	sctl = mdss_mdp_get_split_ctl(ctl);
+	if (sctl)
+		sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+
+	spin_lock_irqsave(&ctx->clk_lock, flags);
+	if (!handle->enabled) {
+		handle->enabled = true;
+		list_add(&handle->list, &ctx->vsync_handlers);
+
+		enable_rdptr = !handle->cmd_post_flush;
+	}
+	spin_unlock_irqrestore(&ctx->clk_lock, flags);
+
+	if (enable_rdptr) {
+		if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY)
+			mutex_lock(&cmd_clk_mtx);
+
+		/* enable rd_ptr interrupt and clocks */
+		mdss_mdp_setup_vsync(ctx, true);
+
+		if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY)
+			mutex_unlock(&cmd_clk_mtx);
+	}
+
+done:
+	mutex_unlock(&cmd_off_mtx);
+
+	return ret;
+}
+
+static int mdss_mdp_cmd_remove_vsync_handler(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_vsync_handler *handle)
+{
+	struct mdss_mdp_ctl *sctl;
+	struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
+	unsigned long flags;
+	bool disable_vsync_irq = false;
+
+	ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("%s: invalid ctx\n", __func__);
+		return -ENODEV;
+	}
+
+	pr_debug("%pS->%s ctl:%d\n",
+		__builtin_return_address(0), __func__, ctl->num);
+
+	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), 0x88888);
+	sctl = mdss_mdp_get_split_ctl(ctl);
+	if (sctl)
+		sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+
+	spin_lock_irqsave(&ctx->clk_lock, flags);
+	if (handle->enabled) {
+		handle->enabled = false;
+		list_del_init(&handle->list);
+		disable_vsync_irq = !handle->cmd_post_flush;
+	}
+	spin_unlock_irqrestore(&ctx->clk_lock, flags);
+
+	if (disable_vsync_irq) {
+		/* disable rd_ptr interrupt and clocks */
+		mdss_mdp_setup_vsync(ctx, false);
+		complete(&ctx->stop_comp);
+	}
+
+	return 0;
+}
+
+int mdss_mdp_cmd_reconfigure_splash_done(struct mdss_mdp_ctl *ctl,
+	bool handoff)
+{
+	struct mdss_panel_data *pdata;
+	struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+	struct dsi_panel_clk_ctrl clk_ctrl;
+	int ret = 0;
+
+	pdata = ctl->panel_data;
+
+	clk_ctrl.state = MDSS_DSI_CLK_OFF;
+	clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+	if (sctl) {
+		u32 flags = CTL_INTF_EVENT_FLAG_SKIP_BROADCAST;
+
+		if (is_pingpong_split(sctl->mfd))
+			flags |= CTL_INTF_EVENT_FLAG_SLAVE_INTF;
+
+		mdss_mdp_ctl_intf_event(sctl, MDSS_EVENT_PANEL_CLK_CTRL,
+			(void *)&clk_ctrl, flags);
+	}
+
+	mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_CLK_CTRL,
+		(void *)&clk_ctrl, CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
+
+	pdata->panel_info.cont_splash_enabled = 0;
+	if (sctl)
+		sctl->panel_data->panel_info.cont_splash_enabled = 0;
+	else if (pdata->next && is_pingpong_split(ctl->mfd))
+		pdata->next->panel_info.cont_splash_enabled = 0;
+
+	return ret;
+}
+
+static int __mdss_mdp_wait4pingpong(struct mdss_mdp_cmd_ctx *ctx)
+{
+	int rc = 0;
+	s64 expected_time = ktime_to_ms(ktime_get()) + KOFF_TIMEOUT_MS;
+	s64 time;
+
+	do {
+		rc = wait_event_timeout(ctx->pp_waitq,
+				atomic_read(&ctx->koff_cnt) == 0,
+				KOFF_TIMEOUT);
+		time = ktime_to_ms(ktime_get());
+
+		MDSS_XLOG(rc, time, expected_time, atomic_read(&ctx->koff_cnt));
+		/*
+		 * If we time out, counter is valid and time is less,
+		 * wait again.
+		 */
+	} while (atomic_read(&ctx->koff_cnt) && (rc == 0) &&
+			(time < expected_time));
+
+	return rc;
+}
+
+static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg)
+{
+	struct mdss_mdp_cmd_ctx *ctx;
+	struct mdss_panel_data *pdata;
+	unsigned long flags;
+	int rc = 0, te_irq;
+
+	ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+
+	pdata = ctl->panel_data;
+
+	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctl->roi_bkup.w,
+			ctl->roi_bkup.h);
+
+	pr_debug("%s: intf_num=%d ctx=%pK koff_cnt=%d\n", __func__,
+			ctl->intf_num, ctx, atomic_read(&ctx->koff_cnt));
+
+	rc = __mdss_mdp_wait4pingpong(ctx);
+
+	trace_mdp_cmd_wait_pingpong(ctl->num,
+				atomic_read(&ctx->koff_cnt));
+
+	if (rc <= 0) {
+		u32 status, mask;
+
+		mask = mdss_mdp_get_irq_mask(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+				ctx->current_pp_num);
+		status = mask & readl_relaxed(ctl->mdata->mdp_base +
+				MDSS_MDP_REG_INTR_STATUS);
+		MDSS_XLOG(status, rc, atomic_read(&ctx->koff_cnt));
+		if (status) {
+			pr_warn("pp done but irq not triggered\n");
+			mdss_mdp_irq_clear(ctl->mdata,
+				MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+				ctx->current_pp_num);
+			local_irq_save(flags);
+			mdss_mdp_cmd_pingpong_done(ctl);
+			local_irq_restore(flags);
+			rc = 1;
+		}
+
+		rc = atomic_read(&ctx->koff_cnt) == 0;
+	}
+
+	if (rc <= 0) {
+		pr_err("%s:wait4pingpong timed out ctl=%d rc=%d cnt=%d koff_cnt=%d\n",
+				__func__,
+				ctl->num, rc, ctx->pp_timeout_report_cnt,
+				atomic_read(&ctx->koff_cnt));
+
+		/* enable TE irq to check if it is coming from the panel */
+		te_irq = gpio_to_irq(pdata->panel_te_gpio);
+		enable_irq(te_irq);
+
+		/* wait for 20ms to ensure we are getting the next TE */
+		usleep_range(20000, 20010);
+
+		reinit_completion(&pdata->te_done);
+		rc = wait_for_completion_timeout(&pdata->te_done, KOFF_TIMEOUT);
+
+		if (!rc) {
+			MDSS_XLOG(0xbac);
+			mdss_fb_report_panel_dead(ctl->mfd);
+		} else if (ctx->pp_timeout_report_cnt == 0) {
+			MDSS_XLOG(0xbad);
+			MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
+				"dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
+				"dbg_bus", "vbif_dbg_bus", "panic");
+		} else if (ctx->pp_timeout_report_cnt == MAX_RECOVERY_TRIALS) {
+			MDSS_XLOG(0xbad2);
+			MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
+				"dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
+				"dbg_bus", "vbif_dbg_bus", "panic");
+			mdss_fb_report_panel_dead(ctl->mfd);
+		}
+
+		/* disable te irq */
+		disable_irq_nosync(te_irq);
+
+		ctx->pp_timeout_report_cnt++;
+		rc = -EPERM;
+
+		mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+			ctx->current_pp_num);
+		mdss_mdp_set_intr_callback_nosync(
+				MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+				ctx->current_pp_num, NULL, NULL);
+		if (atomic_add_unless(&ctx->koff_cnt, -1, 0)
+			&& mdss_mdp_cmd_do_notifier(ctx))
+			mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_TIMEOUT);
+
+	} else {
+		rc = 0;
+		ctx->pp_timeout_report_cnt = 0;
+	}
+
+	cancel_work_sync(&ctx->pp_done_work);
+
+	/* signal any pending ping pong done events */
+	while (atomic_add_unless(&ctx->pp_done_cnt, -1, 0))
+		mdss_mdp_ctl_notify(ctx->ctl, MDP_NOTIFY_FRAME_DONE);
+
+	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), rc);
+
+	return rc;
+}
+
+static int mdss_mdp_cmd_do_notifier(struct mdss_mdp_cmd_ctx *ctx)
+{
+	struct mdss_mdp_cmd_ctx *sctx;
+
+	sctx = ctx->sync_ctx;
+	if (!sctx || atomic_read(&sctx->koff_cnt) == 0)
+		return 1;
+
+	return 0;
+}
+
+static void mdss_mdp_cmd_set_sync_ctx(
+		struct mdss_mdp_ctl *ctl, struct mdss_mdp_ctl *sctl)
+{
+	struct mdss_mdp_cmd_ctx *ctx, *sctx;
+
+	ctx = (struct mdss_mdp_cmd_ctx *)ctl->intf_ctx[MASTER_CTX];
+
+	if (!sctl) {
+		ctx->sync_ctx = NULL;
+		return;
+	}
+
+	sctx = (struct mdss_mdp_cmd_ctx *)sctl->intf_ctx[MASTER_CTX];
+
+	if (!sctl->roi.w && !sctl->roi.h) {
+		/* left only */
+		ctx->sync_ctx = NULL;
+		sctx->sync_ctx = NULL;
+	} else  {
+		/* left + right */
+		ctx->sync_ctx = sctx;
+		sctx->sync_ctx = ctx;
+	}
+}
+
+/* only master ctl is valid and pingpong split with DSC is pending */
+static void mdss_mdp_cmd_dsc_reconfig(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_panel_info *pinfo;
+	bool changed = false;
+
+	if (!ctl || !ctl->is_master)
+		return;
+
+	pinfo = &ctl->panel_data->panel_info;
+	if (pinfo->compression_mode != COMPRESSION_DSC)
+		return;
+
+	changed = ctl->mixer_left->roi_changed;
+	if (is_split_lm(ctl->mfd))
+		changed |= ctl->mixer_right->roi_changed;
+
+	if (changed)
+		mdss_mdp_ctl_dsc_setup(ctl, pinfo);
+}
+
+static int mdss_mdp_cmd_set_partial_roi(struct mdss_mdp_ctl *ctl)
+{
+	int rc = -EINVAL;
+
+	if (!ctl->panel_data->panel_info.partial_update_enabled)
+		return rc;
+
+	/* set panel col and page addr */
+	rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_ENABLE_PARTIAL_ROI,
+				     NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+	return rc;
+}
+
+static int mdss_mdp_cmd_set_stream_size(struct mdss_mdp_ctl *ctl)
+{
+	int rc = -EINVAL;
+
+	if (!ctl->panel_data->panel_info.partial_update_enabled)
+		return rc;
+
+	/* set dsi controller stream size */
+	rc = mdss_mdp_ctl_intf_event(ctl,
+		MDSS_EVENT_DSI_STREAM_SIZE, NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+	return rc;
+}
+
+static int mdss_mdp_cmd_panel_on(struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_ctl *sctl)
+{
+	struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
+	int rc = 0;
+
+	ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+
+	if (sctl)
+		sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+
+	/* In pingpong split we have single controller, dual context */
+	if (is_pingpong_split(ctl->mfd))
+		sctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[SLAVE_CTX];
+
+	if (!__mdss_mdp_cmd_is_panel_power_on_interactive(ctx)) {
+		if (ctl->pending_mode_switch != SWITCH_RESOLUTION) {
+			rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_LINK_READY,
+					NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+			WARN(rc, "intf %d link ready error (%d)\n",
+					ctl->intf_num, rc);
+
+			rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_UNBLANK,
+					NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+			WARN(rc, "intf %d unblank error (%d)\n",
+					ctl->intf_num, rc);
+
+			rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_ON,
+					NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+			WARN(rc, "intf %d panel on error (%d)\n",
+					ctl->intf_num, rc);
+
+		}
+
+		rc = mdss_mdp_tearcheck_enable(ctl, true);
+		WARN(rc, "intf %d tearcheck enable error (%d)\n",
+				ctl->intf_num, rc);
+
+		ctx->panel_power_state = MDSS_PANEL_POWER_ON;
+		if (sctx)
+			sctx->panel_power_state = MDSS_PANEL_POWER_ON;
+
+		mdss_mdp_ctl_intf_event(ctl,
+			MDSS_EVENT_REGISTER_RECOVERY_HANDLER,
+			(void *)&ctx->intf_recovery,
+			CTL_INTF_EVENT_FLAG_DEFAULT);
+
+		mdss_mdp_ctl_intf_event(ctl,
+			MDSS_EVENT_REGISTER_MDP_CALLBACK,
+			(void *)&ctx->intf_mdp_callback,
+			CTL_INTF_EVENT_FLAG_DEFAULT);
+
+		ctx->intf_stopped = 0;
+		if (sctx)
+			sctx->intf_stopped = 0;
+	} else {
+		pr_err("%s: Panel already on\n", __func__);
+	}
+
+	return rc;
+}
+
+/*
+ * This function will be called from the sysfs node to enable and disable the
+ * feature with master ctl only.
+ */
+int mdss_mdp_cmd_set_autorefresh_mode(struct mdss_mdp_ctl *mctl, int frame_cnt)
+{
+	int rc = 0;
+	struct mdss_mdp_cmd_ctx *ctx;
+	struct mdss_panel_info *pinfo;
+
+	if (!mctl || !mctl->is_master || !mctl->panel_data) {
+		pr_err("invalid ctl mctl:%pK pdata:%pK\n",
+			mctl, mctl ? mctl->panel_data : 0);
+		return -ENODEV;
+	}
+
+	ctx = mctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+
+	pinfo = &mctl->panel_data->panel_info;
+	if (!pinfo->mipi.hw_vsync_mode) {
+		pr_err("hw vsync disabled, cannot handle autorefresh\n");
+		return -ENODEV;
+	}
+
+	if (frame_cnt < 0 || frame_cnt >= AUTOREFRESH_MAX_FRAME_CNT) {
+		pr_err("frame cnt %d is out of range (16 bits).\n", frame_cnt);
+		return -EINVAL;
+	}
+
+	if (ctx->intf_stopped) {
+		pr_debug("autorefresh cannot be changed when display is off\n");
+		return -EPERM;
+	}
+
+	mutex_lock(&ctx->autorefresh_lock);
+
+	if (frame_cnt == ctx->autorefresh_frame_cnt) {
+		pr_debug("No change to the refresh count\n");
+		goto exit;
+	}
+
+	MDSS_XLOG(ctx->autorefresh_state,
+		  ctx->autorefresh_frame_cnt, frame_cnt);
+
+	pr_debug("curent autorfresh state=%d, frmae_cnt: old=%d new=%d\n",
+			ctx->autorefresh_state,
+			ctx->autorefresh_frame_cnt, frame_cnt);
+
+	switch (ctx->autorefresh_state) {
+	case MDP_AUTOREFRESH_OFF:
+		if (frame_cnt == 0) {
+			pr_debug("oops autorefresh is already disabled. We shouldn't get here\n");
+			rc = -EINVAL;
+			goto exit;
+		}
+
+		/*
+		 * actual enable will happen in commit context when
+		 * next update is kicked off.
+		 */
+		ctx->autorefresh_state = MDP_AUTOREFRESH_ON_REQUESTED;
+		ctx->autorefresh_frame_cnt = frame_cnt;
+		mctl->mdata->serialize_wait4pp = true;
+
+		/* Cancel GATE Work Item */
+		if (cancel_work_sync(&ctx->gate_clk_work))
+			pr_debug("%s: gate work canceled\n", __func__);
+
+		/* Cancel OFF Work Item  */
+		if (cancel_delayed_work_sync(&ctx->delayed_off_clk_work))
+			pr_debug("%s: off work canceled\n", __func__);
+		break;
+	case MDP_AUTOREFRESH_ON_REQUESTED:
+		if (frame_cnt == 0) {
+			ctx->autorefresh_state = MDP_AUTOREFRESH_OFF;
+			ctx->autorefresh_frame_cnt = 0;
+			mctl->mdata->serialize_wait4pp = false;
+		} else {
+			ctx->autorefresh_frame_cnt = frame_cnt;
+			mctl->mdata->serialize_wait4pp = true;
+		}
+		break;
+	case MDP_AUTOREFRESH_ON:
+		if (frame_cnt == 0) {
+			/*
+			 * actual disable will happen in commit context when
+			 * next update is kicked off.
+			 */
+			ctx->autorefresh_state = MDP_AUTOREFRESH_OFF_REQUESTED;
+		} else {
+			ctx->autorefresh_frame_cnt = frame_cnt;
+			mctl->mdata->serialize_wait4pp = true;
+		}
+		break;
+	case MDP_AUTOREFRESH_OFF_REQUESTED:
+		if (frame_cnt == 0) {
+			pr_debug("autorefresh off is already requested\n");
+		} else {
+			pr_debug("cancelling autorefresh off request\n");
+			ctx->autorefresh_state = MDP_AUTOREFRESH_ON;
+			ctx->autorefresh_frame_cnt = frame_cnt;
+			mctl->mdata->serialize_wait4pp = true;
+		}
+		break;
+	default:
+		pr_err("invalid autorefresh state\n");
+	}
+
+	MDSS_XLOG(ctx->autorefresh_state,
+		ctx->autorefresh_frame_cnt);
+
+exit:
+	mutex_unlock(&ctx->autorefresh_lock);
+	return rc;
+}
+
+int mdss_mdp_cmd_get_autorefresh_mode(struct mdss_mdp_ctl *mctl)
+{
+	struct mdss_mdp_cmd_ctx *ctx = mctl->intf_ctx[MASTER_CTX];
+	int autorefresh_frame_cnt;
+
+	/* check the ctl to make sure the lock was initialized */
+	if (!ctx || !ctx->ctl)
+		return 0;
+
+	mutex_lock(&ctx->autorefresh_lock);
+	autorefresh_frame_cnt = ctx->autorefresh_frame_cnt;
+	mutex_unlock(&ctx->autorefresh_lock);
+
+	return ctx->autorefresh_frame_cnt;
+}
+
+static void mdss_mdp_cmd_pre_programming(struct mdss_mdp_ctl *mctl)
+{
+	struct mdss_mdp_cmd_ctx *ctx = mctl->intf_ctx[MASTER_CTX];
+	char __iomem *pp_base;
+	u32 autorefresh_state;
+	u32 cfg;
+
+	if (!mctl->is_master)
+		return;
+
+	mutex_lock(&ctx->autorefresh_lock);
+
+	autorefresh_state = ctx->autorefresh_state;
+	MDSS_XLOG(autorefresh_state);
+	pr_debug("pre_programming state: %d\n", autorefresh_state);
+
+	if ((autorefresh_state == MDP_AUTOREFRESH_ON) ||
+		(autorefresh_state == MDP_AUTOREFRESH_OFF_REQUESTED)) {
+
+		pp_base = mctl->mixer_left->pingpong_base;
+
+		/*
+		 * instruct MDP to ignore the panel TE so the next auto-refresh
+		 * is delayed until flush bits are set.
+		 */
+		cfg = mdss_mdp_pingpong_read(pp_base,
+			MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
+		cfg &= ~BIT(20);
+		mdss_mdp_pingpong_write(pp_base,
+			MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+		ctx->ignore_external_te = true;
+
+	}
+	mutex_unlock(&ctx->autorefresh_lock);
+}
+
+/* this function assumes that autorefresh_lock is held by the caller  */
+static void mdss_mdp_cmd_post_programming(struct mdss_mdp_ctl *mctl)
+{
+	struct mdss_mdp_cmd_ctx *ctx = mctl->intf_ctx[MASTER_CTX];
+	char __iomem *pp_base;
+	u32 cfg;
+
+	if (!mctl->is_master)
+		return;
+
+	/*
+	 * If listening to the external panel TE was disabled
+	 * (this happens when we get a kickoff with
+	 * autorefresh enabled), enable the panel TE back.
+	 */
+	if (ctx->ignore_external_te) {
+
+		MDSS_XLOG(ctx->ignore_external_te);
+		pr_debug("post_programming TE status: %d\n",
+			ctx->ignore_external_te);
+
+		pp_base = mctl->mixer_left->pingpong_base;
+
+		/* enable MDP to listen to the TE */
+		cfg = mdss_mdp_pingpong_read(pp_base,
+			MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
+		cfg |= BIT(20);
+		mdss_mdp_pingpong_write(pp_base,
+			MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+		ctx->ignore_external_te = false;
+	}
+}
+
+static void mdss_mdp_cmd_wait4_autorefresh_pp(struct mdss_mdp_ctl *ctl)
+{
+	int rc;
+	u32 val, line_out, intr_type = MDSS_MDP_IRQ_TYPE_PING_PONG_COMP;
+	char __iomem *pp_base = ctl->mixer_left->pingpong_base;
+	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+
+	line_out = mdss_mdp_pingpong_read(pp_base, MDSS_MDP_REG_PP_LINE_COUNT);
+
+	MDSS_XLOG(ctl->num, line_out, ctl->mixer_left->roi.h);
+
+	if ((line_out < ctl->mixer_left->roi.h) && line_out) {
+		reinit_completion(&ctx->autorefresh_ppdone);
+
+		/* enable ping pong done */
+		mdss_mdp_set_intr_callback(intr_type, ctx->current_pp_num,
+					mdss_mdp_cmd_autorefresh_pp_done, ctl);
+		mdss_mdp_irq_enable(intr_type, ctx->current_pp_num);
+
+		/* wait for ping pong done */
+		rc = wait_for_completion_timeout(&ctx->autorefresh_ppdone,
+				KOFF_TIMEOUT);
+		if (rc <= 0) {
+			val = mdss_mdp_pingpong_read(pp_base,
+				MDSS_MDP_REG_PP_LINE_COUNT);
+			if (val == ctl->mixer_left->roi.h) {
+				mdss_mdp_irq_clear(ctl->mdata,
+					MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+					ctx->current_pp_num);
+				mdss_mdp_irq_disable_nosync(intr_type,
+					ctx->current_pp_num);
+				mdss_mdp_set_intr_callback(intr_type,
+					ctx->current_pp_num, NULL, NULL);
+			} else {
+				pr_err("timedout waiting for ctl%d autorefresh pp done\n",
+					ctl->num);
+				MDSS_XLOG(0xbad3);
+				MDSS_XLOG_TOUT_HANDLER("mdp",
+					"vbif", "dbg_bus", "vbif_dbg_bus",
+					"panic");
+			}
+		}
+	}
+}
+
+static void mdss_mdp_cmd_autorefresh_done(void *arg)
+{
+	struct mdss_mdp_ctl *ctl = arg;
+	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+
+	if (!ctx) {
+		pr_err("%s: invalid ctx\n", __func__);
+		return;
+	}
+
+	mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+		ctx->current_pp_num);
+	mdss_mdp_set_intr_callback_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+		ctx->current_pp_num, NULL, NULL);
+
+	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctx->current_pp_num);
+	complete_all(&ctx->autorefresh_done);
+}
+
+static u32 get_autorefresh_timeout(struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_cmd_ctx *ctx, u32 frame_cnt)
+{
+	struct mdss_mdp_mixer *mixer;
+	struct mdss_panel_info *pinfo;
+	u32 line_count;
+	u32 fps, v_total;
+	unsigned long autorefresh_timeout;
+
+	pinfo = &ctl->panel_data->panel_info;
+	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+
+	if (!mixer || !pinfo)
+		return -EINVAL;
+
+	if (!ctx->ignore_external_te)
+		line_count = ctl->mixer_left->roi.h;
+	else
+		line_count = mdss_mdp_pingpong_read(mixer->pingpong_base,
+			MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT) & 0xffff;
+
+	fps = mdss_panel_get_framerate(pinfo, FPS_RESOLUTION_HZ);
+	v_total = mdss_panel_get_vtotal(pinfo);
+
+	/*
+	 * calculate the expected delay for the autorefresh to happen,
+	 * this should be:
+	 * autorefresh_done = line_count * frame_cnt * line_time
+	 */
+	frame_cnt *= 1000; /* to use mS */
+	autorefresh_timeout = mult_frac(line_count, frame_cnt,
+		(fps * v_total));
+
+	/* multiply by two to consider worst case scenario */
+	autorefresh_timeout *= 2;
+	autorefresh_timeout = msecs_to_jiffies(autorefresh_timeout);
+
+	pr_debug("lines:%d fps:%d v_total:%d frames:%d timeout=%lu\n",
+		line_count, fps, v_total, frame_cnt, autorefresh_timeout);
+
+	autorefresh_timeout = (autorefresh_timeout > CMD_MODE_IDLE_TIMEOUT) ?
+		autorefresh_timeout : CMD_MODE_IDLE_TIMEOUT;
+
+	return autorefresh_timeout;
+}
+
+static void mdss_mdp_cmd_wait4_autorefresh_done(struct mdss_mdp_ctl *ctl)
+{
+	int rc;
+	u32 val, line_out;
+	char __iomem *pp_base = ctl->mixer_left->pingpong_base;
+	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+	unsigned long flags;
+	unsigned long autorefresh_timeout;
+
+	line_out = mdss_mdp_pingpong_read(pp_base, MDSS_MDP_REG_PP_LINE_COUNT);
+
+	MDSS_XLOG(ctl->num, line_out, ctl->mixer_left->roi.h);
+
+	reinit_completion(&ctx->autorefresh_done);
+
+	/* enable autorefresh done */
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+		ctx->current_pp_num, mdss_mdp_cmd_autorefresh_done, ctl);
+	mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+		ctx->current_pp_num);
+
+	/*
+	 * Wait for autorefresh done, note that this interrupt would happen
+	 * once the RD_PTR is reset to init value for the number of frames
+	 * programmed with "autorefresh_frame_cnt", so this wait would take
+	 * one RD_PTR reset, if autorefresh_frame_cnt = 1
+	 * or the number of RD_PTR resets set by "autorefresh_frame_cnt".
+	 */
+	autorefresh_timeout = get_autorefresh_timeout(ctl,
+		ctx, ctx->autorefresh_frame_cnt);
+	rc = wait_for_completion_timeout(&ctx->autorefresh_done,
+			autorefresh_timeout);
+
+	if (rc <= 0) {
+		u32 status, mask;
+
+		mask = mdss_mdp_get_irq_mask(
+				MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+				ctx->current_pp_num);
+		status = mask & readl_relaxed(ctl->mdata->mdp_base +
+				MDSS_MDP_REG_INTR_STATUS);
+
+		if (status) {
+			pr_warn("autorefresh done but irq not triggered\n");
+			mdss_mdp_irq_clear(ctl->mdata,
+				MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+				ctx->current_pp_num);
+			local_irq_save(flags);
+			mdss_mdp_irq_disable_nosync(
+				MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+				ctx->current_pp_num);
+			mdss_mdp_set_intr_callback_nosync(
+				MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF,
+				ctx->current_pp_num, NULL, NULL);
+			local_irq_restore(flags);
+			rc = 1;
+		}
+	}
+
+	if (rc <= 0) {
+		val = mdss_mdp_pingpong_read(pp_base,
+			MDSS_MDP_REG_PP_LINE_COUNT);
+
+		pr_err("timedout waiting for ctl%d autorefresh done line_cnt:%d frames:%d\n",
+			ctl->num, val, ctx->autorefresh_frame_cnt);
+		MDSS_XLOG(0xbad4, val);
+		MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
+			"dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
+			"dbg_bus", "vbif_dbg_bus", "panic");
+	}
+}
+
+/* caller needs to hold autorefresh_lock before calling this function */
+static int mdss_mdp_disable_autorefresh(struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_ctl *sctl)
+{
+	u32 cfg;
+	struct mdss_mdp_cmd_ctx *ctx;
+	char __iomem *pp_base = ctl->mixer_left->pingpong_base;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+
+	MDSS_XLOG(ctx->autorefresh_state, ctx->autorefresh_frame_cnt);
+
+	/*
+	 * This can happen if driver gets sysfs request to enable autorefresh,
+	 * and a CMD_STOP is received before autorefresh is turned on by
+	 * the atomic commit.
+	 */
+	if (ctx->autorefresh_state == MDP_AUTOREFRESH_ON_REQUESTED) {
+		ctx->autorefresh_state = MDP_AUTOREFRESH_OFF;
+		ctx->autorefresh_frame_cnt = 0;
+		return 0;
+	}
+
+	pr_debug("%pS->%s: disabling autorefresh\n",
+		__builtin_return_address(0), __func__);
+
+	/*
+	 * Wait for autorefresh done before disabling it.
+	 * This is intended for debug only; if enabled it would cause a large
+	 * delay during disable due RD_PTR is program to wait for
+	 * wrapping around, which can take hundreds of ms
+	 */
+	if (mdata->wait4autorefresh)
+		mdss_mdp_cmd_wait4_autorefresh_done(ctl);
+
+	/*
+	 * To disable auto-refresh we need to make sure that no transfer
+	 * is on-going when we write the bit to disable it.
+	 * But since when autorefresh is enabled the HW automatically
+	 * will trigger a transfer whenever external TE is received and
+	 * the hw frame_cnt matches the programmed autorefresh frame_cnt,
+	 * in order to have enough time to disable the feature we will instruct
+	 * MDP to ignore the panel TE first; when doing this, the hw frame_cnt
+	 * will be increased only when the internal counter wraps-around
+	 * (instead of each time that the external panel TE is genarated),
+	 * this gives us enough margin to disable autorefresh.
+	 */
+	cfg = mdss_mdp_pingpong_read(pp_base,
+				     MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
+	cfg &= ~BIT(20);
+	mdss_mdp_pingpong_write(pp_base,
+				MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+	MDSS_XLOG(cfg);
+
+	/* wait for previous transfer to finish */
+	mdss_mdp_cmd_wait4_autorefresh_pp(ctl);
+	if (sctl)
+		mdss_mdp_cmd_wait4_autorefresh_pp(sctl);
+
+	/* disable autorefresh */
+	mdss_mdp_pingpong_write(pp_base, MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0);
+
+	if (is_pingpong_split(ctl->mfd))
+		mdss_mdp_pingpong_write(mdata->slave_pingpong_base,
+				MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG, 0);
+
+	ctx->autorefresh_state = MDP_AUTOREFRESH_OFF;
+	ctx->autorefresh_frame_cnt = 0;
+
+	/* enable MDP to listen to the TE */
+	cfg = mdss_mdp_pingpong_read(pp_base,
+				     MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC);
+	cfg |= BIT(20);
+	mdss_mdp_pingpong_write(pp_base,
+				MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+
+	ctl->mdata->serialize_wait4pp = false;
+	return 0;
+}
+
+
+static void __mdss_mdp_kickoff(struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_cmd_ctx *ctx)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	bool is_pp_split = is_pingpong_split(ctl->mfd);
+
+	MDSS_XLOG(ctx->autorefresh_state);
+
+	if ((ctx->autorefresh_state == MDP_AUTOREFRESH_ON_REQUESTED) ||
+		(ctx->autorefresh_state == MDP_AUTOREFRESH_ON)) {
+
+		pr_debug("enabling autorefresh for every %d frames state %d\n",
+			ctx->autorefresh_frame_cnt, ctx->autorefresh_state);
+
+		/* Program HW to take care of Kickoff */
+		mdss_mdp_pingpong_write(ctl->mixer_left->pingpong_base,
+			MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG,
+			BIT(31) | ctx->autorefresh_frame_cnt);
+
+		if (is_pp_split)
+			mdss_mdp_pingpong_write(mdata->slave_pingpong_base,
+				MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG,
+				BIT(31) | ctx->autorefresh_frame_cnt);
+
+		MDSS_XLOG(0x11, ctx->autorefresh_frame_cnt,
+			ctx->autorefresh_state, is_pp_split);
+		ctx->autorefresh_state = MDP_AUTOREFRESH_ON;
+
+	} else {
+		/* SW Kickoff */
+		mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
+		MDSS_XLOG(0x11, ctx->autorefresh_state);
+	}
+}
+
+/*
+ * There are 3 partial update possibilities
+ * left only ==> enable left pingpong_done
+ * left + right ==> enable both pingpong_done
+ * right only ==> enable right pingpong_done
+ *
+ * notification is triggered at pingpong_done which will
+ * signal timeline to release source buffer
+ *
+ * for left+right case, pingpong_done is enabled for both and
+ * only the last pingpong_done should trigger the notification
+ */
+static int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg)
+{
+	struct mdss_mdp_ctl *sctl = NULL, *mctl = ctl;
+	struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+
+	if (ctx->intf_stopped) {
+		pr_err("ctx=%d stopped already\n", ctx->current_pp_num);
+		return -EPERM;
+	}
+
+	if (!ctl->is_master) {
+		mctl = mdss_mdp_get_main_ctl(ctl);
+	} else {
+		sctl = mdss_mdp_get_split_ctl(ctl);
+		if (sctl && (sctl->roi.w == 0 || sctl->roi.h == 0)) {
+			/* left update only */
+			sctl = NULL;
+		}
+	}
+
+	mdss_mdp_ctl_perf_set_transaction_status(ctl,
+		PERF_HW_MDP_STATE, PERF_STATUS_BUSY);
+
+	if (sctl) {
+		sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+		mdss_mdp_ctl_perf_set_transaction_status(sctl,
+			PERF_HW_MDP_STATE, PERF_STATUS_BUSY);
+	}
+
+	/*
+	 * Turn on the panel, if not already. This is because the panel is
+	 * turned on only when we send the first frame and not during cmd
+	 * start. This is to ensure that no artifacts are seen on the panel.
+	 */
+	if (__mdss_mdp_cmd_is_panel_power_off(ctx))
+		mdss_mdp_cmd_panel_on(ctl, sctl);
+
+	ctx->current_pp_num = ctx->default_pp_num;
+	if (sctx)
+		sctx->current_pp_num = sctx->default_pp_num;
+
+	if (__mdss_mdp_cmd_is_aux_pp_needed(mdata, mctl))
+		ctx->current_pp_num = ctx->aux_pp_num;
+
+	MDSS_XLOG(ctl->num, ctx->current_pp_num,
+		ctl->roi.x, ctl->roi.y, ctl->roi.w, ctl->roi.h);
+
+	atomic_inc(&ctx->koff_cnt);
+	if (sctx)
+		atomic_inc(&sctx->koff_cnt);
+
+	trace_mdp_cmd_kickoff(ctl->num, atomic_read(&ctx->koff_cnt));
+
+	/*
+	 * Call state machine with kickoff event, we just do it for
+	 * current CTL, but internally state machine will check and
+	 * if this is a dual dsi, it will enable the power resources
+	 * for both DSIs
+	 */
+	mdss_mdp_resource_control(ctl, MDP_RSRC_CTL_EVENT_KICKOFF);
+
+	if (!ctl->is_master)
+		mctl = mdss_mdp_get_main_ctl(ctl);
+	mdss_mdp_cmd_dsc_reconfig(mctl);
+
+	mdss_mdp_cmd_set_partial_roi(ctl);
+
+	/*
+	 * tx dcs command if had any
+	 */
+	mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_CMDLIST_KOFF, NULL,
+		CTL_INTF_EVENT_FLAG_DEFAULT);
+
+	mdss_mdp_cmd_set_stream_size(ctl);
+
+	mdss_mdp_cmd_set_sync_ctx(ctl, sctl);
+
+	mutex_lock(&ctx->autorefresh_lock);
+	if (ctx->autorefresh_state == MDP_AUTOREFRESH_OFF_REQUESTED) {
+		pr_debug("%s: disable autorefresh ctl%d\n", __func__, ctl->num);
+		mdss_mdp_disable_autorefresh(ctl, sctl);
+	}
+
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+		ctx->current_pp_num, mdss_mdp_cmd_pingpong_done, ctl);
+	mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+			ctx->current_pp_num);
+	if (sctx) {
+		mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+			sctx->current_pp_num, mdss_mdp_cmd_pingpong_done, sctl);
+		mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+			sctx->current_pp_num);
+	}
+
+	mdss_mdp_ctl_perf_set_transaction_status(ctl,
+		PERF_SW_COMMIT_STATE, PERF_STATUS_DONE);
+	if (sctl) {
+		mdss_mdp_ctl_perf_set_transaction_status(sctl,
+			PERF_SW_COMMIT_STATE, PERF_STATUS_DONE);
+	}
+
+	if (mdss_mdp_is_lineptr_supported(ctl)) {
+		if (mdss_mdp_is_full_frame_update(ctl))
+			mdss_mdp_cmd_lineptr_ctrl(ctl, true);
+		else if (ctx->lineptr_enabled)
+			mdss_mdp_cmd_lineptr_ctrl(ctl, false);
+	}
+
+	/* Kickoff */
+	__mdss_mdp_kickoff(ctl, ctx);
+
+	mdss_mdp_cmd_post_programming(ctl);
+
+	/*
+	 * If auto-refresh is enabled, wait for an autorefresh done,
+	 * to make sure configuration has taken effect.
+	 * Do this after post-programming, so TE is enabled.
+	 */
+	if (ctx->autorefresh_state == MDP_AUTOREFRESH_ON)
+		mdss_mdp_cmd_wait4_autorefresh_done(ctl);
+
+	mb(); /* make sure everything is written before enable */
+	mutex_unlock(&ctx->autorefresh_lock);
+
+	MDSS_XLOG(ctl->num, ctx->current_pp_num,
+		sctx ? sctx->current_pp_num : -1, atomic_read(&ctx->koff_cnt));
+	return 0;
+}
+
+int mdss_mdp_cmd_restore(struct mdss_mdp_ctl *ctl, bool locked)
+{
+	struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
+
+	if (!ctl)
+		return -EINVAL;
+
+	pr_debug("%s: called for ctl%d\n", __func__, ctl->num);
+
+	ctx = (struct mdss_mdp_cmd_ctx *)ctl->intf_ctx[MASTER_CTX];
+	if (is_pingpong_split(ctl->mfd)) {
+		sctx = (struct mdss_mdp_cmd_ctx *)ctl->intf_ctx[SLAVE_CTX];
+	} else if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+		struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+
+		if (sctl)
+			sctx = (struct mdss_mdp_cmd_ctx *)
+					sctl->intf_ctx[MASTER_CTX];
+	}
+
+	if (mdss_mdp_cmd_tearcheck_setup(ctx, locked)) {
+		pr_warn("%s: ctx%d tearcheck setup failed\n", __func__,
+			ctx->current_pp_num);
+	} else {
+		if (sctx && mdss_mdp_cmd_tearcheck_setup(sctx, locked))
+			pr_warn("%s: ctx%d tearcheck setup failed\n", __func__,
+				sctx->current_pp_num);
+		else
+			mdss_mdp_tearcheck_enable(ctl, true);
+	}
+
+	return 0;
+}
+
+int mdss_mdp_cmd_ctx_stop(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_cmd_ctx *ctx, int panel_power_state)
+{
+	struct mdss_mdp_cmd_ctx *sctx = NULL;
+	struct mdss_mdp_ctl *sctl = NULL;
+
+	sctl = mdss_mdp_get_split_ctl(ctl);
+	if (sctl)
+		sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+
+	/* intf stopped,  no more kickoff */
+	ctx->intf_stopped = 1;
+
+	/* Make sure any rd ptr for dsi callback is done before disable vsync */
+	if (is_pingpong_split(ctl->mfd)) {
+		pr_debug("%s will wait for rd ptr:%d\n", __func__,
+			atomic_read(&ctx->rdptr_cnt));
+		MDSS_XLOG(atomic_read(&ctx->rdptr_cnt));
+		mdss_mdp_cmd_wait4readptr(ctx);
+	}
+
+	/*
+	 * if any vsyncs are still enabled, loop until the refcount
+	 * goes to zero, so the rd ptr interrupt is disabled.
+	 * Ideally this shouldn't be the case since vsync handlers
+	 * has been flushed by now, so issue a warning in case
+	 * that we hit this condition.
+	 */
+	if (ctx->vsync_irq_cnt) {
+		WARN(1, "vsync still enabled\n");
+		while (mdss_mdp_setup_vsync(ctx, false))
+			;
+	}
+	if (ctx->lineptr_irq_cnt) {
+		WARN(1, "lineptr irq still enabled\n");
+		while (mdss_mdp_setup_lineptr(ctx, false))
+			;
+	}
+
+	if (!ctl->pending_mode_switch) {
+		mdss_mdp_ctl_intf_event(ctl,
+			MDSS_EVENT_REGISTER_RECOVERY_HANDLER,
+			NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+
+		mdss_mdp_ctl_intf_event(ctl,
+			MDSS_EVENT_REGISTER_MDP_CALLBACK,
+			NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+	}
+
+	/* shut down the MDP/DSI resources if still enabled */
+	mdss_mdp_resource_control(ctl, MDP_RSRC_CTL_EVENT_STOP);
+
+	flush_work(&ctx->pp_done_work);
+
+	if (mdss_panel_is_power_off(panel_power_state) ||
+	    mdss_panel_is_power_on_ulp(panel_power_state))
+		mdss_mdp_tearcheck_enable(ctl, false);
+
+	if (mdss_panel_is_power_on(panel_power_state)) {
+		pr_debug("%s: intf stopped with panel on\n", __func__);
+		return 0;
+	}
+
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR,
+		ctx->default_pp_num, NULL, NULL);
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR,
+		ctx->default_pp_num, NULL, NULL);
+	mdss_mdp_set_intr_callback_nosync(MDSS_MDP_IRQ_TYPE_PING_PONG_COMP,
+		ctx->default_pp_num, NULL, NULL);
+
+	memset(ctx, 0, sizeof(*ctx));
+	/* intf stopped,  no more kickoff */
+	ctx->intf_stopped = 1;
+
+	return 0;
+}
+
+static int mdss_mdp_cmd_intfs_stop(struct mdss_mdp_ctl *ctl, int session,
+	int panel_power_state)
+{
+	struct mdss_mdp_cmd_ctx *ctx;
+
+	if (session >= MAX_SESSIONS)
+		return 0;
+
+	ctx = ctl->intf_ctx[MASTER_CTX];
+	if (!ctx->ref_cnt) {
+		pr_err("invalid ctx session: %d\n", session);
+		return -ENODEV;
+	}
+
+	mdss_mdp_cmd_ctx_stop(ctl, ctx, panel_power_state);
+
+	if (is_pingpong_split(ctl->mfd)) {
+		session += 1;
+
+		if (session >= MAX_SESSIONS)
+			return 0;
+
+		ctx = ctl->intf_ctx[SLAVE_CTX];
+		if (!ctx->ref_cnt) {
+			pr_err("invalid ctx session: %d\n", session);
+			return -ENODEV;
+		}
+		mdss_mdp_cmd_ctx_stop(ctl, ctx, panel_power_state);
+	}
+	pr_debug("%s:-\n", __func__);
+	return 0;
+}
+
+static int mdss_mdp_cmd_stop_sub(struct mdss_mdp_ctl *ctl,
+		int panel_power_state)
+{
+	struct mdss_mdp_cmd_ctx *ctx;
+	struct mdss_mdp_vsync_handler *tmp, *handle;
+	int session;
+
+	ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+
+	list_for_each_entry_safe(handle, tmp, &ctx->vsync_handlers, list)
+		mdss_mdp_cmd_remove_vsync_handler(ctl, handle);
+	if (mdss_mdp_is_lineptr_supported(ctl))
+		mdss_mdp_cmd_lineptr_ctrl(ctl, false);
+	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), XLOG_FUNC_ENTRY);
+
+	/* Command mode is supported only starting at INTF1 */
+	session = ctl->intf_num - MDSS_MDP_INTF1;
+	return mdss_mdp_cmd_intfs_stop(ctl, session, panel_power_state);
+}
+
+int mdss_mdp_cmd_stop(struct mdss_mdp_ctl *ctl, int panel_power_state)
+{
+	struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+	struct mdss_mdp_cmd_ctx *sctx = NULL;
+	struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+	bool panel_off = false;
+	bool turn_off_clocks = false;
+	bool send_panel_events = false;
+	int ret = 0;
+
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+
+	if (__mdss_mdp_cmd_is_panel_power_off(ctx)) {
+		pr_debug("%s: panel already off\n", __func__);
+		return 0;
+	}
+
+	if (ctx->panel_power_state == panel_power_state) {
+		pr_debug("%s: no transition needed %d --> %d\n", __func__,
+			ctx->panel_power_state, panel_power_state);
+		return 0;
+	}
+
+	pr_debug("%s: transition from %d --> %d\n", __func__,
+		ctx->panel_power_state, panel_power_state);
+
+	if (sctl)
+		sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+
+	MDSS_XLOG(ctx->panel_power_state, panel_power_state);
+
+	mutex_lock(&ctl->offlock);
+	mutex_lock(&cmd_off_mtx);
+	if (mdss_panel_is_power_off(panel_power_state)) {
+		/* Transition to display off */
+		send_panel_events = true;
+		turn_off_clocks = true;
+		panel_off = true;
+	} else if (__mdss_mdp_cmd_is_panel_power_on_interactive(ctx)) {
+		/*
+		 * If we are transitioning from interactive to low
+		 * power, then we need to send events to the interface
+		 * so that the panel can be configured in low power
+		 * mode.
+		 */
+		send_panel_events = true;
+		if (mdss_panel_is_power_on_ulp(panel_power_state))
+			turn_off_clocks = true;
+	} else {
+		/* Transitions between low power and ultra low power */
+		if (mdss_panel_is_power_on_ulp(panel_power_state)) {
+			/*
+			 * If we are transitioning from low power to ultra low
+			 * power mode, no more display updates are expected.
+			 * Turn off the interface clocks.
+			 */
+			pr_debug("%s: turn off clocks\n", __func__);
+			turn_off_clocks = true;
+		} else {
+			/*
+			 * Transition from ultra low power to low power does
+			 * not require any special handling. Just rest the
+			 * intf_stopped flag so that the clocks would
+			 * get turned on when the first update comes.
+			 */
+			pr_debug("%s: reset intf_stopped flag.\n", __func__);
+			mdss_mdp_ctl_intf_event(ctl,
+				MDSS_EVENT_REGISTER_RECOVERY_HANDLER,
+				(void *)&ctx->intf_recovery,
+				CTL_INTF_EVENT_FLAG_DEFAULT);
+
+			mdss_mdp_ctl_intf_event(ctl,
+				MDSS_EVENT_REGISTER_MDP_CALLBACK,
+				(void *)&ctx->intf_mdp_callback,
+				CTL_INTF_EVENT_FLAG_DEFAULT);
+
+			ctx->intf_stopped = 0;
+			if (sctx)
+				sctx->intf_stopped = 0;
+			/*
+			 * Tearcheck was disabled while entering LP2 state.
+			 * Enable it back to allow updates in LP1 state.
+			 */
+			mdss_mdp_tearcheck_enable(ctl, true);
+			goto end;
+		}
+	}
+
+	if (!turn_off_clocks)
+		goto panel_events;
+
+	if (ctl->pending_mode_switch)
+		send_panel_events = false;
+
+	pr_debug("%s: turn off interface clocks\n", __func__);
+	ret = mdss_mdp_cmd_stop_sub(ctl, panel_power_state);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("%s: unable to stop interface: %d\n",
+				__func__, ret);
+		goto end;
+	}
+
+	if (sctl) {
+		mdss_mdp_cmd_stop_sub(sctl, panel_power_state);
+		if (IS_ERR_VALUE(ret)) {
+			pr_err("%s: unable to stop slave intf: %d\n",
+					__func__, ret);
+			goto end;
+		}
+	}
+
+panel_events:
+	if ((!is_panel_split(ctl->mfd) || is_pingpong_split(ctl->mfd) ||
+	    (is_panel_split(ctl->mfd) && sctl)) && send_panel_events) {
+		pr_debug("%s: send panel events\n", __func__);
+		ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_BLANK,
+				(void *) (long int) panel_power_state,
+				CTL_INTF_EVENT_FLAG_DEFAULT);
+		WARN(ret, "intf %d unblank error (%d)\n", ctl->intf_num, ret);
+
+		ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_OFF,
+				(void *) (long int) panel_power_state,
+				CTL_INTF_EVENT_FLAG_DEFAULT);
+		WARN(ret, "intf %d unblank error (%d)\n", ctl->intf_num, ret);
+	}
+
+
+	if (!panel_off) {
+		pr_debug("%s: cmd_stop with panel always on\n", __func__);
+		goto end;
+	}
+
+	pr_debug("%s: turn off panel\n", __func__);
+	ctl->intf_ctx[MASTER_CTX] = NULL;
+	ctl->intf_ctx[SLAVE_CTX] = NULL;
+	ctl->ops.stop_fnc = NULL;
+	ctl->ops.display_fnc = NULL;
+	ctl->ops.wait_pingpong = NULL;
+	ctl->ops.add_vsync_handler = NULL;
+	ctl->ops.remove_vsync_handler = NULL;
+	ctl->ops.reconfigure = NULL;
+
+end:
+	if (!IS_ERR_VALUE(ret)) {
+		struct mdss_mdp_cmd_ctx *sctx = NULL;
+
+		ctx->panel_power_state = panel_power_state;
+		/* In pingpong split we have single controller, dual context */
+		if (is_pingpong_split(ctl->mfd))
+			sctx = (struct mdss_mdp_cmd_ctx *)
+					ctl->intf_ctx[SLAVE_CTX];
+		if (sctx)
+			sctx->panel_power_state = panel_power_state;
+	}
+
+	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), XLOG_FUNC_EXIT);
+	mutex_unlock(&cmd_off_mtx);
+	mutex_unlock(&ctl->offlock);
+	pr_debug("%s:-\n", __func__);
+
+	return ret;
+}
+
+static void early_wakeup_work(struct work_struct *work)
+{
+	int rc = 0;
+	struct mdss_mdp_cmd_ctx *ctx =
+		container_of(work, typeof(*ctx), early_wakeup_clk_work);
+	struct mdss_mdp_ctl *ctl;
+
+	if (!ctx) {
+		pr_err("%s: invalid ctx\n", __func__);
+		return;
+	}
+
+	ATRACE_BEGIN(__func__);
+	ctl = ctx->ctl;
+
+	if (!ctl) {
+		pr_err("%s: invalid ctl\n", __func__);
+		goto fail;
+	}
+
+	rc = mdss_mdp_resource_control(ctl, MDP_RSRC_CTL_EVENT_EARLY_WAKE_UP);
+	if (rc)
+		pr_err("%s: failed to control resources\n", __func__);
+
+fail:
+	ATRACE_END(__func__);
+}
+
+static int mdss_mdp_cmd_early_wake_up(struct mdss_mdp_ctl *ctl)
+{
+	u64 curr_time;
+	struct mdss_mdp_cmd_ctx *ctx;
+
+	curr_time = ktime_to_us(ktime_get());
+
+	if ((curr_time - ctl->last_input_time) <
+			INPUT_EVENT_HANDLER_DELAY_USECS)
+		return 0;
+	ctl->last_input_time = curr_time;
+
+	ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+	/*
+	 * Early wake up event is called from an interrupt context and
+	 * involves cancelling queued work items. So this will be
+	 * scheduled in a work item.
+	 * Only schedule if the interface has not been stopped.
+	 */
+	if (ctx && !ctx->intf_stopped)
+		schedule_work(&ctx->early_wakeup_clk_work);
+	return 0;
+}
+
+static int mdss_mdp_cmd_ctx_setup(struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_cmd_ctx *ctx, int default_pp_num, int aux_pp_num,
+	bool pingpong_split_slave)
+{
+	int ret = 0;
+
+	/*
+	 * Initialize the mutex before the ctl is assigned,
+	 * so we can prevent any race condition with the
+	 * initialization of the the mutex and the autorefresh
+	 * sysfs.
+	 */
+	mutex_init(&ctx->autorefresh_lock);
+
+	ctx->ctl = ctl;
+	ctx->default_pp_num = default_pp_num;
+	ctx->aux_pp_num = aux_pp_num;
+	ctx->pingpong_split_slave = pingpong_split_slave;
+	ctx->pp_timeout_report_cnt = 0;
+	init_waitqueue_head(&ctx->pp_waitq);
+	init_waitqueue_head(&ctx->rdptr_waitq);
+	init_completion(&ctx->stop_comp);
+	init_completion(&ctx->autorefresh_ppdone);
+	init_completion(&ctx->rdptr_done);
+	init_completion(&ctx->pp_done);
+	init_completion(&ctx->autorefresh_done);
+	spin_lock_init(&ctx->clk_lock);
+	spin_lock_init(&ctx->koff_lock);
+	mutex_init(&ctx->clk_mtx);
+	mutex_init(&ctx->mdp_rdptr_lock);
+	mutex_init(&ctx->mdp_wrptr_lock);
+	INIT_WORK(&ctx->gate_clk_work, clk_ctrl_gate_work);
+	INIT_DELAYED_WORK(&ctx->delayed_off_clk_work,
+		clk_ctrl_delayed_off_work);
+	INIT_WORK(&ctx->pp_done_work, pingpong_done_work);
+	INIT_WORK(&ctx->early_wakeup_clk_work, early_wakeup_work);
+	atomic_set(&ctx->pp_done_cnt, 0);
+	ctx->autorefresh_state = MDP_AUTOREFRESH_OFF;
+	ctx->autorefresh_frame_cnt = 0;
+	INIT_LIST_HEAD(&ctx->vsync_handlers);
+	INIT_LIST_HEAD(&ctx->lineptr_handlers);
+
+	ctx->intf_recovery.fxn = mdss_mdp_cmd_intf_recovery;
+	ctx->intf_recovery.data = ctx;
+
+	ctx->intf_mdp_callback.fxn = mdss_mdp_cmd_intf_callback;
+	ctx->intf_mdp_callback.data = ctx;
+
+	ctx->intf_stopped = 0;
+
+	pr_debug("%s: ctx=%pK num=%d aux=%d\n", __func__, ctx,
+		default_pp_num, aux_pp_num);
+	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
+
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR,
+		ctx->default_pp_num, mdss_mdp_cmd_readptr_done, ctl);
+
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR,
+		ctx->default_pp_num, mdss_mdp_cmd_lineptr_done, ctl);
+
+	ret = mdss_mdp_cmd_tearcheck_setup(ctx, false);
+	if (ret)
+		pr_err("tearcheck setup failed\n");
+
+	return ret;
+}
+
+static int mdss_mdp_cmd_intfs_setup(struct mdss_mdp_ctl *ctl,
+			int session)
+{
+	struct mdss_mdp_cmd_ctx *ctx;
+	struct mdss_mdp_ctl *sctl = NULL;
+	struct mdss_mdp_mixer *mixer;
+	int ret;
+	u32 default_pp_num, aux_pp_num;
+
+	if (session >= MAX_SESSIONS)
+		return 0;
+
+	sctl = mdss_mdp_get_split_ctl(ctl);
+	ctx = &mdss_mdp_cmd_ctx_list[session];
+	if (ctx->ref_cnt) {
+		if (mdss_panel_is_power_on(ctx->panel_power_state)) {
+			pr_debug("%s: cmd_start with panel always on\n",
+				__func__);
+			/*
+			 * It is possible that the resume was called from the
+			 * panel always on state without MDSS every
+			 * power-collapsed (such as a case with any other
+			 * interfaces connected). In such cases, we need to
+			 * explicitly call the restore function to enable
+			 * tearcheck logic.
+			 */
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+			mdss_mdp_cmd_restore(ctl, false);
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+			/* Turn on panel so that it can exit low power mode */
+			return mdss_mdp_cmd_panel_on(ctl, sctl);
+		}
+		pr_err("Intf %d already in use\n", session);
+		return -EBUSY;
+	}
+	ctx->ref_cnt++;
+	ctl->intf_ctx[MASTER_CTX] = ctx;
+
+
+	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+	if (!mixer) {
+		pr_err("mixer not setup correctly\n");
+		return -ENODEV;
+	}
+	default_pp_num = mixer->num;
+
+	if (is_split_lm(ctl->mfd)) {
+		if (is_dual_lm_single_display(ctl->mfd)) {
+			mixer = mdss_mdp_mixer_get(ctl,
+				MDSS_MDP_MIXER_MUX_RIGHT);
+			if (!mixer) {
+				pr_err("right mixer not setup correctly for dual_lm_single_display\n");
+				return -ENODEV;
+			}
+			aux_pp_num = mixer->num;
+		} else { /* DUAL_LM_DUAL_DISPLAY */
+			struct mdss_mdp_ctl *mctl = ctl;
+
+			if (!mctl->is_master) {
+				mctl = mdss_mdp_get_main_ctl(ctl);
+				if (!mctl) {
+					pr_err("%s master ctl cannot be NULL\n",
+						__func__);
+					return -EINVAL;
+				}
+			}
+
+			if (ctl->is_master) /* setup is called for master */
+				mixer = mdss_mdp_mixer_get(mctl,
+					MDSS_MDP_MIXER_MUX_RIGHT);
+			else
+				mixer = mdss_mdp_mixer_get(mctl,
+					MDSS_MDP_MIXER_MUX_LEFT);
+
+			if (!mixer) {
+				pr_err("right mixer not setup correctly for dual_lm_dual_display\n");
+				return -ENODEV;
+			}
+			aux_pp_num = mixer->num;
+		}
+	} else {
+		aux_pp_num = default_pp_num;
+	}
+
+	ret = mdss_mdp_cmd_ctx_setup(ctl, ctx,
+		default_pp_num, aux_pp_num, false);
+	if (ret) {
+		pr_err("mdss_mdp_cmd_ctx_setup failed for default_pp:%d aux_pp:%d\n",
+			default_pp_num, aux_pp_num);
+		ctx->ref_cnt--;
+		return -ENODEV;
+	}
+
+	if (is_pingpong_split(ctl->mfd)) {
+		session += 1;
+		if (session >= MAX_SESSIONS)
+			return 0;
+		ctx = &mdss_mdp_cmd_ctx_list[session];
+		if (ctx->ref_cnt) {
+			if (mdss_panel_is_power_on(ctx->panel_power_state)) {
+				pr_debug("%s: cmd_start with panel always on\n",
+						__func__);
+				mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+				mdss_mdp_cmd_restore(ctl, false);
+				mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+				return mdss_mdp_cmd_panel_on(ctl, sctl);
+			}
+			pr_err("Intf %d already in use\n", session);
+			return -EBUSY;
+		}
+		ctx->ref_cnt++;
+
+		ctl->intf_ctx[SLAVE_CTX] = ctx;
+
+		ret = mdss_mdp_cmd_ctx_setup(ctl, ctx, session, session, true);
+		if (ret) {
+			pr_err("mdss_mdp_cmd_ctx_setup failed for slave ping pong block");
+			ctx->ref_cnt--;
+			return -EPERM;
+		}
+	}
+	return 0;
+}
+
+void mdss_mdp_switch_roi_reset(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_ctl *mctl = ctl;
+	struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+
+	if (!ctl->panel_data ||
+	  !ctl->panel_data->panel_info.partial_update_supported)
+		return;
+
+	ctl->panel_data->panel_info.roi = ctl->roi;
+	if (sctl && sctl->panel_data)
+		sctl->panel_data->panel_info.roi = sctl->roi;
+
+	if (!ctl->is_master)
+		mctl = mdss_mdp_get_main_ctl(ctl);
+	mdss_mdp_cmd_dsc_reconfig(mctl);
+
+	mdss_mdp_cmd_set_partial_roi(ctl);
+}
+
+void mdss_mdp_switch_to_vid_mode(struct mdss_mdp_ctl *ctl, int prep)
+{
+	struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+	struct dsi_panel_clk_ctrl clk_ctrl;
+	long int mode = MIPI_VIDEO_PANEL;
+
+	pr_debug("%s start, prep = %d\n", __func__, prep);
+
+	if (prep) {
+		/*
+		 * In dsi_on there is an explicit decrement to dsi clk refcount
+		 * if we are in cmd mode, using the dsi client handle. We need
+		 * to rebalance clock in order to properly enable vid mode
+		 * compnents.
+		 */
+		clk_ctrl.state = MDSS_DSI_CLK_ON;
+		clk_ctrl.client = DSI_CLK_REQ_DSI_CLIENT;
+		if (sctl)
+			mdss_mdp_ctl_intf_event(sctl,
+				MDSS_EVENT_PANEL_CLK_CTRL, (void *)&clk_ctrl,
+				CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
+
+		mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_CLK_CTRL,
+			(void *)&clk_ctrl, CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
+
+		return;
+	}
+
+	mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_RECONFIG_CMD,
+			(void *) mode, CTL_INTF_EVENT_FLAG_DEFAULT);
+}
+
+static int mdss_mdp_cmd_reconfigure(struct mdss_mdp_ctl *ctl,
+		enum dynamic_switch_modes mode, bool prep)
+{
+	if (mdss_mdp_ctl_is_power_off(ctl))
+		return 0;
+
+	pr_debug("%s: ctl=%d mode=%d prep=%d\n", __func__,
+			ctl->num, mode, prep);
+
+	if (mode == SWITCH_TO_VIDEO_MODE) {
+		mdss_mdp_switch_to_vid_mode(ctl, prep);
+	} else if (mode == SWITCH_RESOLUTION) {
+		if (prep) {
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+			/*
+			 * Setup DSC conifg early, as DSI configuration during
+			 * resolution switch would rely on DSC params for
+			 * stream configs.
+			 */
+			mdss_mdp_cmd_dsc_reconfig(ctl);
+
+			/*
+			 * Make explicit cmd_panel_on call, when dynamic
+			 * resolution switch request comes before cont-splash
+			 * handoff, to match the ctl_stop/ctl_start done
+			 * during the reconfiguration.
+			 */
+			if (ctl->switch_with_handoff) {
+				struct mdss_mdp_cmd_ctx *ctx;
+				struct mdss_mdp_ctl *sctl;
+
+				ctx = (struct mdss_mdp_cmd_ctx *)
+					ctl->intf_ctx[MASTER_CTX];
+				if (ctx &&
+				     __mdss_mdp_cmd_is_panel_power_off(ctx)) {
+					sctl = mdss_mdp_get_split_ctl(ctl);
+					mdss_mdp_cmd_panel_on(ctl, sctl);
+				}
+				ctl->switch_with_handoff = false;
+			}
+
+			mdss_mdp_ctl_stop(ctl, MDSS_PANEL_POWER_OFF);
+			mdss_mdp_ctl_intf_event(ctl,
+				MDSS_EVENT_DSI_DYNAMIC_SWITCH,
+				(void *) mode, CTL_INTF_EVENT_FLAG_DEFAULT);
+		} else {
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+		}
+	}
+
+	return 0;
+}
+
+int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl)
+{
+	int ret, session = 0;
+
+	pr_debug("%s:+\n", __func__);
+
+	/* Command mode is supported only starting at INTF1 */
+	session = ctl->intf_num - MDSS_MDP_INTF1;
+	ret = mdss_mdp_cmd_intfs_setup(ctl, session);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("unable to set cmd interface: %d\n", ret);
+		return ret;
+	}
+
+	ctl->ops.stop_fnc = mdss_mdp_cmd_stop;
+	ctl->ops.display_fnc = mdss_mdp_cmd_kickoff;
+	ctl->ops.wait_pingpong = mdss_mdp_cmd_wait4pingpong;
+	ctl->ops.add_vsync_handler = mdss_mdp_cmd_add_vsync_handler;
+	ctl->ops.remove_vsync_handler = mdss_mdp_cmd_remove_vsync_handler;
+	ctl->ops.read_line_cnt_fnc = mdss_mdp_cmd_line_count;
+	ctl->ops.restore_fnc = mdss_mdp_cmd_restore;
+	ctl->ops.early_wake_up_fnc = mdss_mdp_cmd_early_wake_up;
+	ctl->ops.reconfigure = mdss_mdp_cmd_reconfigure;
+	ctl->ops.pre_programming = mdss_mdp_cmd_pre_programming;
+	ctl->ops.update_lineptr = mdss_mdp_cmd_update_lineptr;
+	pr_debug("%s:-\n", __func__);
+
+	return 0;
+}
+
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
new file mode 100644
index 0000000..453fe28
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -0,0 +1,2195 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/memblock.h>
+#include <video/msm_hdmi_modes.h>
+
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_panel.h"
+#include "mdss_debug.h"
+#include "mdss_mdp_trace.h"
+
+/* wait for at least 2 vsyncs for lowest refresh rate (24hz) */
+#define VSYNC_TIMEOUT_US 100000
+
+/* Poll time to do recovery during active region */
+#define POLL_TIME_USEC_FOR_LN_CNT 500
+
+/* Filter out input events for 1 vsync time after receiving an input event*/
+#define INPUT_EVENT_HANDLER_DELAY_USECS 16000
+
+enum {
+	MDP_INTF_INTR_PROG_LINE,
+	MDP_INTF_INTR_MAX,
+};
+
+struct intr_callback {
+	void (*func)(void *);
+	void *arg;
+};
+
+/* intf timing settings */
+struct intf_timing_params {
+	u32 width;
+	u32 height;
+	u32 xres;
+	u32 yres;
+
+	u32 h_back_porch;
+	u32 h_front_porch;
+	u32 v_back_porch;
+	u32 v_front_porch;
+	u32 hsync_pulse_width;
+	u32 vsync_pulse_width;
+
+	u32 border_clr;
+	u32 underflow_clr;
+	u32 hsync_skew;
+};
+
+struct mdss_mdp_video_ctx {
+	struct mdss_mdp_ctl *ctl;
+	u32 intf_num;
+	char __iomem *base;
+	u32 intf_type;
+	u8 ref_cnt;
+
+	u8 timegen_en;
+	bool polling_en;
+	u32 poll_cnt;
+	struct completion vsync_comp;
+	int wait_pending;
+
+	atomic_t vsync_ref;
+	spinlock_t vsync_lock;
+	spinlock_t dfps_lock;
+	struct mutex vsync_mtx;
+	struct list_head vsync_handlers;
+	struct mdss_intf_recovery intf_recovery;
+	struct work_struct early_wakeup_dfps_work;
+
+	atomic_t lineptr_ref;
+	spinlock_t lineptr_lock;
+	struct mutex lineptr_mtx;
+	struct list_head lineptr_handlers;
+
+	struct intf_timing_params itp;
+	bool lineptr_enabled;
+	u32 prev_wr_ptr_irq;
+
+	struct intr_callback mdp_intf_intr_cb[MDP_INTF_INTR_MAX];
+	u32 intf_irq_mask;
+	spinlock_t mdss_mdp_video_lock;
+	spinlock_t mdss_mdp_intf_intr_lock;
+};
+
+static void mdss_mdp_fetch_start_config(struct mdss_mdp_video_ctx *ctx,
+		struct mdss_mdp_ctl *ctl);
+
+static void mdss_mdp_fetch_end_config(struct mdss_mdp_video_ctx *ctx,
+		struct mdss_mdp_ctl *ctl);
+
+static void early_wakeup_dfps_update_work(struct work_struct *work);
+
+static inline void mdp_video_write(struct mdss_mdp_video_ctx *ctx,
+				   u32 reg, u32 val)
+{
+	writel_relaxed(val, ctx->base + reg);
+}
+
+static inline u32 mdp_video_read(struct mdss_mdp_video_ctx *ctx,
+				   u32 reg)
+{
+	return readl_relaxed(ctx->base + reg);
+}
+
+static inline u32 mdss_mdp_video_line_count(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	u32 line_cnt = 0;
+
+	if (!ctl || !ctl->intf_ctx[MASTER_CTX])
+		goto line_count_exit;
+	ctx = ctl->intf_ctx[MASTER_CTX];
+	line_cnt = mdp_video_read(ctx, MDSS_MDP_REG_INTF_LINE_COUNT);
+line_count_exit:
+	return line_cnt;
+}
+
+static int mdss_mdp_intf_intr2index(u32 intr_type)
+{
+	int index = -1;
+
+	switch (intr_type) {
+	case MDSS_MDP_INTF_IRQ_PROG_LINE:
+		index = MDP_INTF_INTR_PROG_LINE;
+		break;
+	}
+	return index;
+}
+
+int mdss_mdp_set_intf_intr_callback(struct mdss_mdp_video_ctx *ctx,
+		u32 intr_type, void (*fnc_ptr)(void *), void *arg)
+{
+	unsigned long flags;
+	int index;
+
+	index = mdss_mdp_intf_intr2index(intr_type);
+	if (index < 0) {
+		pr_warn("invalid intr type=%u\n", intr_type);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ctx->mdss_mdp_intf_intr_lock, flags);
+	WARN(ctx->mdp_intf_intr_cb[index].func && fnc_ptr,
+		"replacing current intr callback for ndx=%d\n", index);
+	ctx->mdp_intf_intr_cb[index].func = fnc_ptr;
+	ctx->mdp_intf_intr_cb[index].arg = arg;
+	spin_unlock_irqrestore(&ctx->mdss_mdp_intf_intr_lock, flags);
+
+	return 0;
+}
+
+static inline void mdss_mdp_intf_intr_done(struct mdss_mdp_video_ctx *ctx,
+	int index)
+{
+	void (*fnc)(void *);
+	void *arg;
+
+	spin_lock(&ctx->mdss_mdp_intf_intr_lock);
+	fnc = ctx->mdp_intf_intr_cb[index].func;
+	arg = ctx->mdp_intf_intr_cb[index].arg;
+	spin_unlock(&ctx->mdss_mdp_intf_intr_lock);
+	if (fnc)
+		fnc(arg);
+}
+
+/*
+ * mdss_mdp_video_isr() - ISR handler for video mode interfaces
+ *
+ * @ptr: pointer to all the video ctx
+ * @count: number of interfaces which should match ctx
+ *
+ * The video isr is meant to handle all the interrupts in video interface,
+ * in MDSS_MDP_REG_INTF_INTR_EN register. Currently it handles only the
+ * programmable lineptr interrupt.
+ */
+void mdss_mdp_video_isr(void *ptr, u32 count)
+{
+	struct mdss_mdp_video_ctx *head = (struct mdss_mdp_video_ctx *) ptr;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		struct mdss_mdp_video_ctx *ctx = &head[i];
+		u32 intr, mask;
+
+		if (!ctx->intf_irq_mask)
+			continue;
+
+		intr = mdp_video_read(ctx, MDSS_MDP_REG_INTF_INTR_STATUS);
+		mask = mdp_video_read(ctx, MDSS_MDP_REG_INTF_INTR_EN);
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_INTR_CLEAR, intr);
+
+		pr_debug("%s: intf=%d intr=%x mask=%x\n", __func__,
+				i, intr, mask);
+
+		if (!(intr & mask))
+			continue;
+
+		if (intr & MDSS_MDP_INTF_INTR_PROG_LINE)
+			mdss_mdp_intf_intr_done(ctx, MDP_INTF_INTR_PROG_LINE);
+	}
+}
+
+static int mdss_mdp_video_intf_irq_enable(struct mdss_mdp_ctl *ctl,
+		u32 intr_type)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	unsigned long irq_flags;
+	int ret = 0;
+	u32 irq;
+
+	if (!ctl || !ctl->intf_ctx[MASTER_CTX])
+		return -ENODEV;
+
+	ctx = ctl->intf_ctx[MASTER_CTX];
+
+	irq = 1 << intr_type;
+
+	spin_lock_irqsave(&ctx->mdss_mdp_video_lock, irq_flags);
+	if (ctx->intf_irq_mask & irq) {
+		pr_warn("MDSS MDP Intf IRQ-0x%x is already set, mask=%x\n",
+				irq, ctx->intf_irq_mask);
+		ret = -EBUSY;
+	} else {
+		pr_debug("MDSS MDP Intf IRQ mask old=%x new=%x\n",
+				ctx->intf_irq_mask, irq);
+		ctx->intf_irq_mask |= irq;
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_INTR_CLEAR, irq);
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_INTR_EN,
+				ctx->intf_irq_mask);
+		ctl->mdata->mdp_intf_irq_mask |=
+				(1 << (ctx->intf_num - MDSS_MDP_INTF0));
+		mdss_mdp_enable_hw_irq(ctl->mdata);
+	}
+	spin_unlock_irqrestore(&ctx->mdss_mdp_video_lock, irq_flags);
+
+	return ret;
+}
+
+void mdss_mdp_video_intf_irq_disable(struct mdss_mdp_ctl *ctl, u32 intr_type)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	unsigned long irq_flags;
+	u32 irq;
+
+	if (!ctl || !ctl->intf_ctx[MASTER_CTX])
+		return;
+
+	ctx = ctl->intf_ctx[MASTER_CTX];
+
+	irq = 1 << intr_type;
+
+	spin_lock_irqsave(&ctx->mdss_mdp_video_lock, irq_flags);
+	if (!(ctx->intf_irq_mask & irq)) {
+		pr_warn("MDSS MDP Intf IRQ-%x is NOT set, mask=%x\n",
+				irq, ctx->intf_irq_mask);
+	} else {
+		ctx->intf_irq_mask &= ~irq;
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_INTR_CLEAR, irq);
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_INTR_EN,
+				ctx->intf_irq_mask);
+		if (ctx->intf_irq_mask == 0) {
+			ctl->mdata->mdp_intf_irq_mask &=
+				~(1 << (ctx->intf_num - MDSS_MDP_INTF0));
+			mdss_mdp_disable_hw_irq(ctl->mdata);
+		}
+	}
+	spin_unlock_irqrestore(&ctx->mdss_mdp_video_lock, irq_flags);
+}
+
+int mdss_mdp_video_addr_setup(struct mdss_data_type *mdata,
+				u32 *offsets,  u32 count)
+{
+	struct mdss_mdp_video_ctx *head;
+	u32 i;
+
+	head = devm_kzalloc(&mdata->pdev->dev,
+			sizeof(struct mdss_mdp_video_ctx) * count, GFP_KERNEL);
+	if (!head)
+		return -ENOMEM;
+
+	for (i = 0; i < count; i++) {
+		head[i].base = mdata->mdss_io.base + offsets[i];
+		pr_debug("adding Video Intf #%d offset=0x%x virt=%pK\n", i,
+				offsets[i], head[i].base);
+		head[i].ref_cnt = 0;
+		head[i].intf_num = i + MDSS_MDP_INTF0;
+		INIT_LIST_HEAD(&head[i].vsync_handlers);
+		INIT_LIST_HEAD(&head[i].lineptr_handlers);
+	}
+
+	mdata->video_intf = head;
+	mdata->nintf = count;
+	return 0;
+}
+
+static int mdss_mdp_video_intf_recovery(void *data, int event)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	struct mdss_mdp_ctl *ctl = data;
+	struct mdss_panel_info *pinfo;
+	u32 line_cnt, min_ln_cnt, active_lns_cnt;
+	u64 clk_rate;
+	u32 clk_period, time_of_line;
+	u32 delay;
+
+	if (!data) {
+		pr_err("%s: invalid ctl\n", __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * Currently, only intf_fifo_overflow is
+	 * supported for recovery sequence for video
+	 * mode DSI interface
+	 */
+	if (event != MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW) {
+		pr_warn("%s: unsupported recovery event:%d\n",
+					__func__, event);
+		return -EPERM;
+	}
+
+	ctx = ctl->intf_ctx[MASTER_CTX];
+	pr_debug("%s: ctl num = %d, event = %d\n",
+				__func__, ctl->num, event);
+
+	pinfo = &ctl->panel_data->panel_info;
+	clk_rate = ((ctl->intf_type == MDSS_INTF_DSI) ?
+			pinfo->mipi.dsi_pclk_rate :
+			pinfo->clk_rate);
+
+	clk_rate = DIV_ROUND_UP_ULL(clk_rate, 1000); /* in kHz */
+	if (!clk_rate) {
+		pr_err("Unable to get proper clk_rate\n");
+		return -EINVAL;
+	}
+	/*
+	 * calculate clk_period as pico second to maintain good
+	 * accuracy with high pclk rate and this number is in 17 bit
+	 * range.
+	 */
+	clk_period = DIV_ROUND_UP_ULL(1000000000, clk_rate);
+	if (!clk_period) {
+		pr_err("Unable to calculate clock period\n");
+		return -EINVAL;
+	}
+	min_ln_cnt = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width;
+	active_lns_cnt = pinfo->yres;
+	time_of_line = (pinfo->lcdc.h_back_porch +
+		 pinfo->lcdc.h_front_porch +
+		 pinfo->lcdc.h_pulse_width +
+		 pinfo->xres) * clk_period;
+
+	/* delay in micro seconds */
+	delay = (time_of_line * (min_ln_cnt +
+			pinfo->lcdc.v_front_porch)) / 1000000;
+
+	/*
+	 * Wait for max delay before
+	 * polling to check active region
+	 */
+	if (delay > POLL_TIME_USEC_FOR_LN_CNT)
+		delay = POLL_TIME_USEC_FOR_LN_CNT;
+
+	mutex_lock(&ctl->offlock);
+	while (1) {
+		if (!ctl || ctl->mfd->shutdown_pending || !ctx ||
+				!ctx->timegen_en) {
+			pr_warn("Target is in suspend or shutdown pending\n");
+			mutex_unlock(&ctl->offlock);
+			return -EPERM;
+		}
+
+		line_cnt = mdss_mdp_video_line_count(ctl);
+
+		if ((line_cnt >= min_ln_cnt) && (line_cnt <
+			(active_lns_cnt + min_ln_cnt))) {
+			pr_debug("%s, Needed lines left line_cnt=%d\n",
+						__func__, line_cnt);
+			mutex_unlock(&ctl->offlock);
+			return 0;
+		}
+		pr_warn("line count is less. line_cnt = %d\n",
+							line_cnt);
+		/* Add delay so that line count is in active region */
+		udelay(delay);
+	}
+}
+
+static int mdss_mdp_video_timegen_setup(struct mdss_mdp_ctl *ctl,
+					struct intf_timing_params *p,
+					struct mdss_mdp_video_ctx *ctx)
+{
+	u32 hsync_period, vsync_period;
+	u32 hsync_start_x, hsync_end_x, display_v_start, display_v_end;
+	u32 active_h_start, active_h_end, active_v_start, active_v_end;
+	u32 den_polarity, hsync_polarity, vsync_polarity;
+	u32 display_hctl, active_hctl, hsync_ctl, polarity_ctl;
+	struct mdss_data_type *mdata;
+
+	mdata = ctl->mdata;
+	hsync_period = p->hsync_pulse_width + p->h_back_porch +
+			p->width + p->h_front_porch;
+	vsync_period = p->vsync_pulse_width + p->v_back_porch +
+			p->height + p->v_front_porch;
+
+	MDSS_XLOG(p->vsync_pulse_width, p->v_back_porch,
+			p->height, p->v_front_porch);
+
+	display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+			hsync_period) + p->hsync_skew;
+	display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+			p->hsync_skew - 1;
+
+	if (ctx->intf_type == MDSS_INTF_EDP) {
+		display_v_start += p->hsync_pulse_width + p->h_back_porch;
+		display_v_end -= p->h_front_porch;
+	}
+
+	/* TIMING_2 flush bit on 8939 is BIT 31 */
+	if (mdata->mdp_rev == MDSS_MDP_HW_REV_108 &&
+				ctx->intf_num == MDSS_MDP_INTF2)
+		ctl->flush_bits |= BIT(31);
+	else
+		ctl->flush_bits |= BIT(31) >>
+			(ctx->intf_num - MDSS_MDP_INTF0);
+
+	hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+	hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+	if (p->width != p->xres) {
+		active_h_start = hsync_start_x;
+		active_h_end = active_h_start + p->xres - 1;
+	} else {
+		active_h_start = 0;
+		active_h_end = 0;
+	}
+
+	if (p->height != p->yres) {
+		active_v_start = display_v_start;
+		active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+	} else {
+		active_v_start = 0;
+		active_v_end = 0;
+	}
+
+
+	if (active_h_end) {
+		active_hctl = (active_h_end << 16) | active_h_start;
+		active_hctl |= BIT(31);	/* ACTIVE_H_ENABLE */
+	} else {
+		active_hctl = 0;
+	}
+
+	if (active_v_end)
+		active_v_start |= BIT(31); /* ACTIVE_V_ENABLE */
+
+	hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+	display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+	den_polarity = 0;
+	if (ctx->intf_type == MDSS_INTF_HDMI) {
+		hsync_polarity = p->yres >= 720 ? 0 : 1;
+		vsync_polarity = p->yres >= 720 ? 0 : 1;
+	} else {
+		hsync_polarity = 0;
+		vsync_polarity = 0;
+	}
+	polarity_ctl = (den_polarity << 2)   | /*  DEN Polarity  */
+		       (vsync_polarity << 1) | /* VSYNC Polarity */
+		       (hsync_polarity << 0);  /* HSYNC Polarity */
+
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_HSYNC_CTL, hsync_ctl);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
+			vsync_period * hsync_period);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PULSE_WIDTH_F0,
+			   p->vsync_pulse_width * hsync_period);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_HCTL, display_hctl);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_V_START_F0,
+			   display_v_start);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_V_END_F0, display_v_end);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_ACTIVE_HCTL, active_hctl);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_ACTIVE_V_START_F0,
+			   active_v_start);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_ACTIVE_V_END_F0, active_v_end);
+
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_BORDER_COLOR, p->border_clr);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_UNDERFLOW_COLOR,
+			   p->underflow_clr);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_HSYNC_SKEW, p->hsync_skew);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_POLARITY_CTL, polarity_ctl);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_FRAME_LINE_COUNT_EN, 0x3);
+	MDSS_XLOG(hsync_period, vsync_period);
+
+	/*
+	 * If CDM is present Interface should have destination
+	 * format set to RGB
+	 */
+	if (ctl->cdm) {
+		u32 reg = mdp_video_read(ctx, MDSS_MDP_REG_INTF_CONFIG);
+
+		reg &= ~BIT(18); /* CSC_DST_DATA_FORMAT = RGB */
+		reg &= ~BIT(17); /* CSC_SRC_DATA_FROMAT = RGB */
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_CONFIG, reg);
+	}
+	return 0;
+}
+
+static void mdss_mdp_video_timegen_flush(struct mdss_mdp_ctl *ctl,
+					struct mdss_mdp_video_ctx *sctx)
+{
+	u32 ctl_flush;
+	struct mdss_data_type *mdata;
+
+	mdata = ctl->mdata;
+	ctl_flush = (BIT(31) >> (ctl->intf_num - MDSS_MDP_INTF0));
+	if (sctx) {
+		/* For 8939, sctx is always INTF2 and the flush bit is BIT 31 */
+		if (mdata->mdp_rev == MDSS_MDP_HW_REV_108)
+			ctl_flush |= BIT(31);
+		else
+			ctl_flush |= (BIT(31) >>
+					(sctx->intf_num - MDSS_MDP_INTF0));
+	}
+	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl_flush);
+	MDSS_XLOG(ctl->intf_num, sctx?sctx->intf_num:0xf00, ctl_flush);
+}
+
+static inline void video_vsync_irq_enable(struct mdss_mdp_ctl *ctl, bool clear)
+{
+	struct mdss_mdp_video_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+
+	mutex_lock(&ctx->vsync_mtx);
+	if (atomic_inc_return(&ctx->vsync_ref) == 1)
+		mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_INTF_VSYNC,
+				ctl->intf_num);
+	else if (clear)
+		mdss_mdp_irq_clear(ctl->mdata, MDSS_MDP_IRQ_TYPE_INTF_VSYNC,
+				ctl->intf_num);
+	mutex_unlock(&ctx->vsync_mtx);
+}
+
+static inline void video_vsync_irq_disable(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_video_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+
+	mutex_lock(&ctx->vsync_mtx);
+	if (atomic_dec_return(&ctx->vsync_ref) == 0)
+		mdss_mdp_irq_disable(MDSS_MDP_IRQ_TYPE_INTF_VSYNC,
+				ctl->intf_num);
+	mutex_unlock(&ctx->vsync_mtx);
+}
+
+static int mdss_mdp_video_add_vsync_handler(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_vsync_handler *handle)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	unsigned long flags;
+	int ret = 0;
+	bool irq_en = false;
+
+	if (!handle || !(handle->vsync_handler)) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("invalid ctx for ctl=%d\n", ctl->num);
+		ret = -ENODEV;
+		goto exit;
+	}
+
+	MDSS_XLOG(ctl->num, ctl->vsync_cnt, handle->enabled);
+
+	spin_lock_irqsave(&ctx->vsync_lock, flags);
+	if (!handle->enabled) {
+		handle->enabled = true;
+		list_add(&handle->list, &ctx->vsync_handlers);
+		irq_en = true;
+	}
+	spin_unlock_irqrestore(&ctx->vsync_lock, flags);
+	if (irq_en) {
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+		video_vsync_irq_enable(ctl, false);
+	}
+exit:
+	return ret;
+}
+
+static int mdss_mdp_video_remove_vsync_handler(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_vsync_handler *handle)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	unsigned long flags;
+	bool irq_dis = false;
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("invalid ctx for ctl=%d\n", ctl->num);
+		return -ENODEV;
+	}
+
+	MDSS_XLOG(ctl->num, ctl->vsync_cnt, handle->enabled);
+
+	spin_lock_irqsave(&ctx->vsync_lock, flags);
+	if (handle->enabled) {
+		handle->enabled = false;
+		list_del_init(&handle->list);
+		irq_dis = true;
+	}
+	spin_unlock_irqrestore(&ctx->vsync_lock, flags);
+	if (irq_dis) {
+		video_vsync_irq_disable(ctl);
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	}
+	return 0;
+}
+
+static int mdss_mdp_video_add_lineptr_handler(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_lineptr_handler *handle)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	unsigned long flags;
+	int ret = 0;
+	bool irq_en = false;
+
+	if (!handle || !(handle->lineptr_handler)) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("invalid ctx for ctl=%d\n", ctl->num);
+		ret = -ENODEV;
+		goto exit;
+	}
+
+	spin_lock_irqsave(&ctx->lineptr_lock, flags);
+	if (!handle->enabled) {
+		handle->enabled = true;
+		list_add(&handle->list, &ctx->lineptr_handlers);
+		irq_en = true;
+	}
+	spin_unlock_irqrestore(&ctx->lineptr_lock, flags);
+
+	if (irq_en) {
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+		mutex_lock(&ctx->lineptr_mtx);
+		if (atomic_inc_return(&ctx->lineptr_ref) == 1)
+			mdss_mdp_video_intf_irq_enable(ctl,
+				MDSS_MDP_INTF_IRQ_PROG_LINE);
+		mutex_unlock(&ctx->lineptr_mtx);
+	}
+	ctx->lineptr_enabled = true;
+
+exit:
+	return ret;
+}
+
+static int mdss_mdp_video_remove_lineptr_handler(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_lineptr_handler *handle)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	unsigned long flags;
+	bool irq_dis = false;
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx || !ctx->lineptr_enabled)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ctx->lineptr_lock, flags);
+	if (handle->enabled) {
+		handle->enabled = false;
+		list_del_init(&handle->list);
+		irq_dis = true;
+	}
+	spin_unlock_irqrestore(&ctx->lineptr_lock, flags);
+
+	if (irq_dis) {
+		mutex_lock(&ctx->lineptr_mtx);
+		if (atomic_dec_return(&ctx->lineptr_ref) == 0)
+			mdss_mdp_video_intf_irq_disable(ctl,
+				MDSS_MDP_INTF_IRQ_PROG_LINE);
+		mutex_unlock(&ctx->lineptr_mtx);
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	}
+	ctx->lineptr_enabled = false;
+	ctx->prev_wr_ptr_irq = 0;
+
+	return 0;
+}
+
+static int mdss_mdp_video_set_lineptr(struct mdss_mdp_ctl *ctl,
+	u32 new_lineptr)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	u32 pixel_start, offset, hsync_period;
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("invalid ctx for ctl=%d\n", ctl->num);
+		return -ENODEV;
+	}
+
+	if (new_lineptr == 0) {
+		mdp_video_write(ctx,
+			MDSS_MDP_REG_INTF_PROG_LINE_INTR_CONF, UINT_MAX);
+	} else if (new_lineptr <= ctx->itp.yres) {
+		hsync_period = ctx->itp.hsync_pulse_width
+			+ ctx->itp.h_back_porch + ctx->itp.width
+			+ ctx->itp.h_front_porch;
+
+		offset = ((ctx->itp.vsync_pulse_width + ctx->itp.v_back_porch)
+				* hsync_period) + ctx->itp.hsync_skew;
+
+		/* convert from line to pixel */
+		pixel_start = offset + (hsync_period * (new_lineptr - 1));
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_PROG_LINE_INTR_CONF,
+			pixel_start);
+
+		mdss_mdp_video_timegen_flush(ctl, ctx);
+	} else {
+		pr_err("invalid new lineptr_value: new=%d yres=%d\n",
+				new_lineptr, ctx->itp.yres);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mdss_mdp_video_lineptr_ctrl(struct mdss_mdp_ctl *ctl, bool enable)
+{
+	struct mdss_mdp_pp_tear_check *te;
+	struct mdss_mdp_video_ctx *ctx;
+	int rc = 0;
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx || !ctl->is_master)
+		return -EINVAL;
+
+	te = &ctl->panel_data->panel_info.te;
+	pr_debug("%pS->%s: ctl=%d en=%d, prev_lineptr=%d, lineptr=%d\n",
+			__builtin_return_address(0), __func__, ctl->num,
+			enable, ctx->prev_wr_ptr_irq, te->wr_ptr_irq);
+
+	if (enable) {
+		/* update reg only if the value has changed */
+		if (ctx->prev_wr_ptr_irq != te->wr_ptr_irq) {
+			if (mdss_mdp_video_set_lineptr(ctl,
+						te->wr_ptr_irq) < 0) {
+				/* invalid new value, so restore the previous */
+				te->wr_ptr_irq = ctx->prev_wr_ptr_irq;
+				goto end;
+			}
+			ctx->prev_wr_ptr_irq = te->wr_ptr_irq;
+		}
+
+		/*
+		 * add handler only when lineptr is not enabled
+		 * and wr ptr is non zero
+		 */
+		if (!ctx->lineptr_enabled && te->wr_ptr_irq)
+			rc = mdss_mdp_video_add_lineptr_handler(ctl,
+				&ctl->lineptr_handler);
+		/* Disable handler when the value is zero */
+		else if (ctx->lineptr_enabled && !te->wr_ptr_irq)
+			rc = mdss_mdp_video_remove_lineptr_handler(ctl,
+				&ctl->lineptr_handler);
+	} else {
+		if (ctx->lineptr_enabled)
+			rc = mdss_mdp_video_remove_lineptr_handler(ctl,
+				&ctl->lineptr_handler);
+	}
+
+end:
+	return rc;
+}
+
+void mdss_mdp_turn_off_time_engine(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_video_ctx *ctx, u32 sleep_time)
+{
+	struct mdss_mdp_ctl *sctl;
+
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
+	/* wait for at least one VSYNC for proper TG OFF */
+	msleep(sleep_time);
+
+	mdss_iommu_ctrl(0);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	ctx->timegen_en = false;
+
+	mdss_mdp_irq_disable(MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, ctl->intf_num);
+
+	sctl = mdss_mdp_get_split_ctl(ctl);
+	if (sctl)
+		mdss_mdp_irq_disable(MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN,
+			sctl->intf_num);
+}
+
+static int mdss_mdp_video_ctx_stop(struct mdss_mdp_ctl *ctl,
+		struct mdss_panel_info *pinfo, struct mdss_mdp_video_ctx *ctx)
+{
+	int rc = 0;
+	u32 frame_rate = 0;
+
+	mutex_lock(&ctl->offlock);
+	if (ctx->timegen_en) {
+		rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_BLANK, NULL,
+			CTL_INTF_EVENT_FLAG_DEFAULT);
+		if (rc == -EBUSY) {
+			pr_debug("intf #%d busy don't turn off\n",
+				 ctl->intf_num);
+			goto end;
+		}
+		WARN(rc, "intf %d blank error (%d)\n", ctl->intf_num, rc);
+
+		frame_rate = mdss_panel_get_framerate(pinfo,
+				FPS_RESOLUTION_HZ);
+		if (!(frame_rate >= 24 && frame_rate <= 240))
+			frame_rate = 24;
+
+		frame_rate = (1000/frame_rate) + 1;
+		mdss_mdp_turn_off_time_engine(ctl, ctx, frame_rate);
+
+		rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_OFF, NULL,
+			CTL_INTF_EVENT_FLAG_DEFAULT);
+		WARN(rc, "intf %d timegen off error (%d)\n", ctl->intf_num, rc);
+
+		mdss_bus_bandwidth_ctrl(false);
+	}
+
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_INTF_VSYNC,
+		ctx->intf_num, NULL, NULL);
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN,
+		ctx->intf_num, NULL, NULL);
+	mdss_mdp_set_intf_intr_callback(ctx, MDSS_MDP_INTF_IRQ_PROG_LINE,
+		NULL, NULL);
+
+	ctx->ref_cnt--;
+end:
+	mutex_unlock(&ctl->offlock);
+	return rc;
+}
+
+static int mdss_mdp_video_intfs_stop(struct mdss_mdp_ctl *ctl,
+	struct mdss_panel_data *pdata, int inum)
+{
+	struct mdss_data_type *mdata;
+	struct mdss_panel_info *pinfo;
+	struct mdss_mdp_video_ctx *ctx, *sctx = NULL;
+	struct mdss_mdp_vsync_handler *tmp, *handle;
+	int ret = 0;
+
+	if (pdata == NULL)
+		return 0;
+
+	mdata = ctl->mdata;
+	pinfo = &pdata->panel_info;
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx->ref_cnt) {
+		pr_err("Intf %d not in use\n", (inum + MDSS_MDP_INTF0));
+		return -ENODEV;
+	}
+	pr_debug("stop ctl=%d video Intf #%d base=%pK", ctl->num, ctx->intf_num,
+			ctx->base);
+
+	ret = mdss_mdp_video_ctx_stop(ctl, pinfo, ctx);
+	if (ret) {
+		pr_err("mdss_mdp_video_ctx_stop failed for intf: %d",
+				ctx->intf_num);
+		return -EPERM;
+	}
+
+	if (is_pingpong_split(ctl->mfd)) {
+		pinfo = &pdata->next->panel_info;
+
+		sctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[SLAVE_CTX];
+		if (!sctx->ref_cnt) {
+			pr_err("Intf %d not in use\n", (inum + MDSS_MDP_INTF0));
+			return -ENODEV;
+		}
+		pr_debug("stop ctl=%d video Intf #%d base=%pK", ctl->num,
+				sctx->intf_num, sctx->base);
+
+		ret = mdss_mdp_video_ctx_stop(ctl, pinfo, sctx);
+		if (ret) {
+			pr_err("mdss_mdp_video_ctx_stop failed for intf: %d",
+					sctx->intf_num);
+			return -EPERM;
+		}
+	}
+
+	list_for_each_entry_safe(handle, tmp, &ctx->vsync_handlers, list)
+		mdss_mdp_video_remove_vsync_handler(ctl, handle);
+
+	if (mdss_mdp_is_lineptr_supported(ctl))
+		mdss_mdp_video_lineptr_ctrl(ctl, false);
+
+	return 0;
+}
+
+
+static int mdss_mdp_video_stop(struct mdss_mdp_ctl *ctl, int panel_power_state)
+{
+	int intfs_num, ret = 0;
+
+	intfs_num = ctl->intf_num - MDSS_MDP_INTF0;
+	ret = mdss_mdp_video_intfs_stop(ctl, ctl->panel_data, intfs_num);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("unable to stop video interface: %d\n", ret);
+		return ret;
+	}
+
+	MDSS_XLOG(ctl->num, ctl->vsync_cnt);
+
+	mdss_mdp_ctl_reset(ctl, false);
+	ctl->intf_ctx[MASTER_CTX] = NULL;
+
+	if (ctl->cdm) {
+		mdss_mdp_cdm_destroy(ctl->cdm);
+		ctl->cdm = NULL;
+	}
+	return 0;
+}
+
+static void mdss_mdp_video_vsync_intr_done(void *arg)
+{
+	struct mdss_mdp_ctl *ctl = arg;
+	struct mdss_mdp_video_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+	struct mdss_mdp_vsync_handler *tmp;
+	ktime_t vsync_time;
+
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return;
+	}
+
+	vsync_time = ktime_get();
+	ctl->vsync_cnt++;
+
+	mdss_debug_frc_add_vsync_sample(ctl, vsync_time);
+
+	MDSS_XLOG(ctl->num, ctl->vsync_cnt, ctl->vsync_cnt);
+
+	pr_debug("intr ctl=%d vsync cnt=%u vsync_time=%d\n",
+		 ctl->num, ctl->vsync_cnt, (int)ktime_to_ms(vsync_time));
+
+	ctx->polling_en = false;
+	complete_all(&ctx->vsync_comp);
+	spin_lock(&ctx->vsync_lock);
+	list_for_each_entry(tmp, &ctx->vsync_handlers, list) {
+		tmp->vsync_handler(ctl, vsync_time);
+	}
+	spin_unlock(&ctx->vsync_lock);
+}
+
+static void mdss_mdp_video_lineptr_intr_done(void *arg)
+{
+	struct mdss_mdp_ctl *ctl = arg;
+	struct mdss_mdp_video_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+	struct mdss_mdp_lineptr_handler *tmp;
+	ktime_t lineptr_time;
+
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return;
+	}
+
+	lineptr_time = ktime_get();
+	pr_debug("intr lineptr_time=%lld\n", ktime_to_ms(lineptr_time));
+
+	spin_lock(&ctx->lineptr_lock);
+	list_for_each_entry(tmp, &ctx->lineptr_handlers, list) {
+		tmp->lineptr_handler(ctl, lineptr_time);
+	}
+	spin_unlock(&ctx->lineptr_lock);
+}
+
+static int mdss_mdp_video_pollwait(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_video_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+	u32 mask, status;
+	int rc;
+
+	mask = mdss_mdp_get_irq_mask(MDSS_MDP_IRQ_TYPE_INTF_VSYNC,
+			ctl->intf_num);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	rc = readl_poll_timeout(ctl->mdata->mdp_base + MDSS_MDP_REG_INTR_STATUS,
+		status,
+		(status & mask) || try_wait_for_completion(&ctx->vsync_comp),
+		1000,
+		VSYNC_TIMEOUT_US);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+	if (rc == 0) {
+		MDSS_XLOG(ctl->num, ctl->vsync_cnt);
+		pr_debug("vsync poll successful! rc=%d status=0x%x\n",
+				rc, status);
+		ctx->poll_cnt++;
+		if (status) {
+			struct mdss_mdp_vsync_handler *tmp;
+			unsigned long flags;
+			ktime_t vsync_time = ktime_get();
+
+			spin_lock_irqsave(&ctx->vsync_lock, flags);
+			list_for_each_entry(tmp, &ctx->vsync_handlers, list)
+				tmp->vsync_handler(ctl, vsync_time);
+			spin_unlock_irqrestore(&ctx->vsync_lock, flags);
+		}
+	} else {
+		pr_warn("vsync poll timed out! rc=%d status=0x%x mask=0x%x\n",
+				rc, status, mask);
+	}
+
+	return rc;
+}
+
+static int mdss_mdp_video_wait4comp(struct mdss_mdp_ctl *ctl, void *arg)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	int rc;
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+
+	WARN(!ctx->wait_pending, "waiting without commit! ctl=%d", ctl->num);
+
+	if (ctx->polling_en) {
+		rc = mdss_mdp_video_pollwait(ctl);
+	} else {
+		mutex_unlock(&ctl->lock);
+		rc = wait_for_completion_timeout(&ctx->vsync_comp,
+				usecs_to_jiffies(VSYNC_TIMEOUT_US));
+		mutex_lock(&ctl->lock);
+		if (rc == 0) {
+			pr_warn("vsync wait timeout %d, fallback to poll mode\n",
+					ctl->num);
+			ctx->polling_en++;
+			rc = mdss_mdp_video_pollwait(ctl);
+		} else {
+			rc = 0;
+		}
+	}
+	mdss_mdp_ctl_notify(ctl,
+			rc ? MDP_NOTIFY_FRAME_TIMEOUT : MDP_NOTIFY_FRAME_DONE);
+
+	if (ctx->wait_pending) {
+		ctx->wait_pending = 0;
+		video_vsync_irq_disable(ctl);
+	}
+
+	return rc;
+}
+
+static void recover_underrun_work(struct work_struct *work)
+{
+	struct mdss_mdp_ctl *ctl =
+		container_of(work, typeof(*ctl), recover_work);
+
+	if (!ctl || !ctl->ops.add_vsync_handler) {
+		pr_err("ctl or vsync handler is NULL\n");
+		return;
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	ctl->ops.add_vsync_handler(ctl, &ctl->recover_underrun_handler);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+}
+
+static void mdss_mdp_video_underrun_intr_done(void *arg)
+{
+	struct mdss_mdp_ctl *ctl = arg;
+
+	if (unlikely(!ctl))
+		return;
+
+	ctl->underrun_cnt++;
+	MDSS_XLOG(ctl->num, ctl->underrun_cnt);
+	trace_mdp_video_underrun_done(ctl->num, ctl->underrun_cnt);
+	pr_debug("display underrun detected for ctl=%d count=%d\n", ctl->num,
+			ctl->underrun_cnt);
+
+	if (!test_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
+		ctl->mdata->mdss_caps_map) &&
+		(ctl->opmode & MDSS_MDP_CTL_OP_PACK_3D_ENABLE))
+		schedule_work(&ctl->recover_work);
+}
+
+/**
+ * mdss_mdp_video_hfp_fps_update() - configure mdp with new fps.
+ * @ctx: pointer to the master context.
+ * @pdata: panel information data.
+ *
+ * This function configures the hardware to modify the fps.
+ * within mdp for the hfp method.
+ * Function assumes that timings for the new fps configuration
+ * are already updated in the panel data passed as parameter.
+ *
+ * Return: 0 - succeed, otherwise - fail
+ */
+static int mdss_mdp_video_hfp_fps_update(struct mdss_mdp_video_ctx *ctx,
+					struct mdss_panel_data *pdata)
+{
+	u32 hsync_period, vsync_period;
+	u32 hsync_start_x, hsync_end_x, display_v_start, display_v_end;
+	u32 display_hctl, hsync_ctl;
+	struct mdss_panel_info *pinfo = &pdata->panel_info;
+
+	hsync_period = mdss_panel_get_htotal(pinfo, true);
+	vsync_period = mdss_panel_get_vtotal(pinfo);
+
+	display_v_start = ((pinfo->lcdc.v_pulse_width +
+			pinfo->lcdc.v_back_porch) * hsync_period) +
+					pinfo->lcdc.hsync_skew;
+	display_v_end = ((vsync_period - pinfo->lcdc.v_front_porch) *
+				hsync_period) + pinfo->lcdc.hsync_skew - 1;
+
+	hsync_start_x = pinfo->lcdc.h_back_porch + pinfo->lcdc.h_pulse_width;
+	hsync_end_x = hsync_period - pinfo->lcdc.h_front_porch - 1;
+
+	hsync_ctl = (hsync_period << 16) | pinfo->lcdc.h_pulse_width;
+	display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_HSYNC_CTL, hsync_ctl);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
+				vsync_period * hsync_period);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PULSE_WIDTH_F0,
+			pinfo->lcdc.v_pulse_width * hsync_period);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_HCTL, display_hctl);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_V_START_F0,
+						display_v_start);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_V_END_F0, display_v_end);
+	MDSS_XLOG(ctx->intf_num, hsync_ctl, vsync_period, hsync_period);
+
+	return 0;
+}
+
+/**
+ * mdss_mdp_video_vfp_fps_update() - configure mdp with new fps.
+ * @ctx: pointer to the master context.
+ * @pdata: panel information data.
+ *
+ * This function configures the hardware to modify the fps.
+ * within mdp for the vfp method.
+ * Function assumes that timings for the new fps configuration
+ * are already updated in the panel data passed as parameter.
+ *
+ * Return: 0 - succeed, otherwise - fail
+ */
+static int mdss_mdp_video_vfp_fps_update(struct mdss_mdp_video_ctx *ctx,
+				 struct mdss_panel_data *pdata)
+{
+	u32 current_vsync_period_f0, new_vsync_period_f0;
+	int vsync_period, hsync_period;
+
+	/*
+	 * Change in the blanking times are already in the
+	 * panel info, so just get the vtotal and htotal expected
+	 * for this panel to configure those in hw.
+	 */
+	vsync_period = mdss_panel_get_vtotal(&pdata->panel_info);
+	hsync_period = mdss_panel_get_htotal(&pdata->panel_info, true);
+
+	current_vsync_period_f0 = mdp_video_read(ctx,
+		MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0);
+	new_vsync_period_f0 = (vsync_period * hsync_period);
+
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
+			current_vsync_period_f0 | 0x800000);
+	if (new_vsync_period_f0 & 0x800000) {
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
+			new_vsync_period_f0);
+	} else {
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
+			new_vsync_period_f0 | 0x800000);
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
+			new_vsync_period_f0 & 0x7fffff);
+	}
+
+	pr_debug("if:%d vtotal:%d htotal:%d f0:0x%x nw_f0:0x%x\n",
+		ctx->intf_num, vsync_period, hsync_period,
+		current_vsync_period_f0, new_vsync_period_f0);
+
+	MDSS_XLOG(ctx->intf_num, current_vsync_period_f0,
+		hsync_period, vsync_period, new_vsync_period_f0);
+
+	return 0;
+}
+
+static int mdss_mdp_video_fps_update(struct mdss_mdp_video_ctx *ctx,
+				 struct mdss_panel_data *pdata, int new_fps)
+{
+	int rc;
+
+	if (pdata->panel_info.dfps_update ==
+				DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP)
+		rc = mdss_mdp_video_vfp_fps_update(ctx, pdata);
+	else
+		rc = mdss_mdp_video_hfp_fps_update(ctx, pdata);
+
+	return rc;
+}
+
+static int mdss_mdp_video_wait4vsync(struct mdss_mdp_ctl *ctl)
+{
+	int rc = 0;
+	struct mdss_mdp_video_ctx *ctx;
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+
+	MDSS_XLOG(ctl->num, ctl->vsync_cnt, XLOG_FUNC_ENTRY);
+
+	video_vsync_irq_enable(ctl, true);
+	reinit_completion(&ctx->vsync_comp);
+	rc = wait_for_completion_timeout(&ctx->vsync_comp,
+		usecs_to_jiffies(VSYNC_TIMEOUT_US));
+
+	if (rc <= 0) {
+		pr_warn("vsync timeout %d fallback to poll mode\n",
+			ctl->num);
+		rc = mdss_mdp_video_pollwait(ctl);
+		MDSS_XLOG(ctl->num, ctl->vsync_cnt);
+		if (rc) {
+			pr_err("error polling for vsync\n");
+			MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
+				"dsi1_ctrl", "dsi1_phy", "vbif", "dbg_bus",
+				"vbif_dbg_bus", "panic");
+		}
+	} else {
+		rc = 0;
+	}
+	video_vsync_irq_disable(ctl);
+
+	MDSS_XLOG(ctl->num, ctl->vsync_cnt, XLOG_FUNC_EXIT);
+
+	return rc;
+}
+
+static int mdss_mdp_video_dfps_check_line_cnt(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_panel_data *pdata;
+	u32 line_cnt;
+
+	pdata = ctl->panel_data;
+	if (pdata == NULL) {
+		pr_err("%s: Invalid panel data\n", __func__);
+		return -EINVAL;
+	}
+
+	line_cnt = mdss_mdp_video_line_count(ctl);
+	if (line_cnt >=	pdata->panel_info.yres/2) {
+		pr_debug("Too few lines left line_cnt=%d yres/2=%d\n",
+			line_cnt,
+			pdata->panel_info.yres/2);
+		return -EPERM;
+	}
+	return 0;
+}
+
+/**
+ * mdss_mdp_video_config_fps() - modify the fps.
+ * @ctl: pointer to the master controller.
+ * @new_fps: new fps to be set.
+ *
+ * This function configures the hardware to modify the fps.
+ * Note that this function will flush the DSI and MDP
+ * to reconfigure the fps in VFP and HFP methods.
+ * Given above statement, is callers responsibility to call
+ * this function at the beginning of the frame, so it can be
+ * guaranteed that flush of both (DSI and MDP) happen within
+ * the same frame.
+ *
+ * Return: 0 - succeed, otherwise - fail
+ */
+static int mdss_mdp_video_config_fps(struct mdss_mdp_ctl *ctl, int new_fps)
+{
+	struct mdss_mdp_video_ctx *ctx, *sctx = NULL;
+	struct mdss_panel_data *pdata;
+	int rc = 0;
+	struct mdss_data_type *mdata = ctl->mdata;
+	struct mdss_mdp_ctl *sctl = NULL;
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx || !ctx->timegen_en || !ctx->ref_cnt) {
+		pr_err("invalid ctx or interface is powered off\n");
+		return -EINVAL;
+	}
+
+	sctl = mdss_mdp_get_split_ctl(ctl);
+	if (sctl) {
+		sctx = (struct mdss_mdp_video_ctx *) sctl->intf_ctx[MASTER_CTX];
+		if (!sctx) {
+			pr_err("invalid ctx\n");
+			return -ENODEV;
+		}
+	} else if (is_pingpong_split(ctl->mfd)) {
+		sctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[SLAVE_CTX];
+		if (!sctx || !sctx->ref_cnt) {
+			pr_err("invalid sctx or interface is powered off\n");
+			return -EINVAL;
+		}
+	}
+
+	mutex_lock(&ctl->offlock);
+	pdata = ctl->panel_data;
+	if (pdata == NULL) {
+		pr_err("%s: Invalid panel data\n", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	pr_debug("ctl:%d dfps_update:%d fps:%d\n",
+		ctl->num, pdata->panel_info.dfps_update, new_fps);
+	MDSS_XLOG(ctl->num, pdata->panel_info.dfps_update,
+		new_fps, XLOG_FUNC_ENTRY);
+
+	if (pdata->panel_info.dfps_update
+			!= DFPS_SUSPEND_RESUME_MODE) {
+		if (pdata->panel_info.dfps_update
+				== DFPS_IMMEDIATE_CLK_UPDATE_MODE) {
+			if (!ctx->timegen_en) {
+				pr_err("TG is OFF. DFPS mode invalid\n");
+				rc = -EINVAL;
+				goto end;
+			}
+			rc = mdss_mdp_ctl_intf_event(ctl,
+					MDSS_EVENT_PANEL_UPDATE_FPS,
+					(void *) (unsigned long) new_fps,
+					CTL_INTF_EVENT_FLAG_DEFAULT);
+
+			WARN(rc, "intf %d panel fps update error (%d)\n",
+							ctl->intf_num, rc);
+		} else if (pdata->panel_info.dfps_update
+				== DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP ||
+				pdata->panel_info.dfps_update
+				== DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP ||
+				pdata->panel_info.dfps_update
+				== DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP ||
+				pdata->panel_info.dfps_update
+				== DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) {
+			unsigned long flags;
+
+			if (!ctx->timegen_en) {
+				pr_err("TG is OFF. DFPS mode invalid\n");
+				rc = -EINVAL;
+				goto end;
+			}
+
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+			spin_lock_irqsave(&ctx->dfps_lock, flags);
+
+			if (mdata->mdp_rev < MDSS_MDP_HW_REV_105) {
+				rc = mdss_mdp_video_dfps_check_line_cnt(ctl);
+				if (rc < 0)
+					goto exit_dfps;
+			}
+
+			rc = mdss_mdp_video_fps_update(ctx, pdata, new_fps);
+			if (rc < 0) {
+				pr_err("%s: Error during DFPS: %d\n", __func__,
+					new_fps);
+				goto exit_dfps;
+			}
+			if (sctx) {
+				rc = mdss_mdp_video_fps_update(sctx,
+							pdata->next, new_fps);
+				if (rc < 0) {
+					pr_err("%s: DFPS error fps:%d\n",
+						__func__, new_fps);
+					goto exit_dfps;
+				}
+			}
+			rc = mdss_mdp_ctl_intf_event(ctl,
+					MDSS_EVENT_PANEL_UPDATE_FPS,
+					(void *) (unsigned long) new_fps,
+					CTL_INTF_EVENT_FLAG_DEFAULT);
+			WARN(rc, "intf %d panel fps update error (%d)\n",
+							ctl->intf_num, rc);
+
+			rc = 0;
+			mdss_mdp_fetch_start_config(ctx, ctl);
+			if (sctx)
+				mdss_mdp_fetch_start_config(sctx, ctl);
+
+			if (test_bit(MDSS_QOS_VBLANK_PANIC_CTRL,
+					mdata->mdss_qos_map)) {
+				mdss_mdp_fetch_end_config(ctx, ctl);
+				if (sctx)
+					mdss_mdp_fetch_end_config(sctx, ctl);
+			}
+
+			/*
+			 * MDP INTF registers support DB on targets
+			 * starting from MDP v1.5.
+			 */
+			if (mdata->mdp_rev >= MDSS_MDP_HW_REV_105)
+				mdss_mdp_video_timegen_flush(ctl, sctx);
+
+exit_dfps:
+			spin_unlock_irqrestore(&ctx->dfps_lock, flags);
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+			/*
+			 * Wait for one vsync to make sure these changes
+			 * are applied as part of one single frame and
+			 * no mixer changes happen at the same time.
+			 * A potential optimization would be not to wait
+			 * here, but next mixer programming would need
+			 * to wait before programming the flush bits.
+			 */
+			if (!rc) {
+				rc = mdss_mdp_video_wait4vsync(ctl);
+				if (rc < 0)
+					pr_err("Error in dfps_wait: %d\n", rc);
+			}
+
+		} else {
+			pr_err("intf %d panel, unknown FPS mode\n",
+							ctl->intf_num);
+			rc = -EINVAL;
+			goto end;
+		}
+	} else {
+		rc = mdss_mdp_ctl_intf_event(ctl,
+				MDSS_EVENT_PANEL_UPDATE_FPS,
+				(void *) (unsigned long) new_fps,
+				CTL_INTF_EVENT_FLAG_DEFAULT);
+		WARN(rc, "intf %d panel fps update error (%d)\n",
+						ctl->intf_num, rc);
+	}
+
+end:
+	MDSS_XLOG(ctl->num, new_fps, XLOG_FUNC_EXIT);
+	mutex_unlock(&ctl->offlock);
+	return rc;
+}
+
+static int mdss_mdp_video_display(struct mdss_mdp_ctl *ctl, void *arg)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	struct mdss_mdp_ctl *sctl;
+	struct mdss_panel_data *pdata = ctl->panel_data;
+	int rc;
+
+	pr_debug("kickoff ctl=%d\n", ctl->num);
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+
+	if (!ctx->wait_pending) {
+		ctx->wait_pending++;
+		video_vsync_irq_enable(ctl, true);
+		reinit_completion(&ctx->vsync_comp);
+	} else {
+		WARN(1, "commit without wait! ctl=%d", ctl->num);
+	}
+
+	MDSS_XLOG(ctl->num, ctl->underrun_cnt);
+
+	if (!ctx->timegen_en) {
+		rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_LINK_READY, NULL,
+			CTL_INTF_EVENT_FLAG_DEFAULT);
+		if (rc) {
+			pr_warn("intf #%d link ready error (%d)\n",
+					ctl->intf_num, rc);
+			video_vsync_irq_disable(ctl);
+			ctx->wait_pending = 0;
+			return rc;
+		}
+
+		rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_UNBLANK, NULL,
+			CTL_INTF_EVENT_FLAG_DEFAULT);
+		WARN(rc, "intf %d unblank error (%d)\n", ctl->intf_num, rc);
+
+		pr_debug("enabling timing gen for intf=%d\n", ctl->intf_num);
+
+		if (pdata->panel_info.cont_splash_enabled &&
+			!ctl->mfd->splash_info.splash_logo_enabled) {
+			rc = wait_for_completion_timeout(&ctx->vsync_comp,
+					usecs_to_jiffies(VSYNC_TIMEOUT_US));
+		}
+
+		rc = mdss_iommu_ctrl(1);
+		if (IS_ERR_VALUE(rc)) {
+			pr_err("IOMMU attach failed\n");
+			return rc;
+		}
+
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+		mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN,
+				ctl->intf_num);
+		sctl = mdss_mdp_get_split_ctl(ctl);
+		if (sctl)
+			mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN,
+				sctl->intf_num);
+
+		mdss_bus_bandwidth_ctrl(true);
+
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 1);
+		/* make sure MDP timing engine is enabled */
+		wmb();
+
+		rc = wait_for_completion_timeout(&ctx->vsync_comp,
+				usecs_to_jiffies(VSYNC_TIMEOUT_US));
+		WARN(rc == 0, "timeout (%d) enabling timegen on ctl=%d\n",
+				rc, ctl->num);
+
+		ctx->timegen_en = true;
+		rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_ON, NULL,
+			CTL_INTF_EVENT_FLAG_DEFAULT);
+		WARN(rc, "intf %d panel on error (%d)\n", ctl->intf_num, rc);
+		mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_POST_PANEL_ON, NULL,
+			CTL_INTF_EVENT_FLAG_DEFAULT);
+	}
+
+	if (mdss_mdp_is_lineptr_supported(ctl))
+		mdss_mdp_video_lineptr_ctrl(ctl, true);
+
+	return 0;
+}
+
+int mdss_mdp_video_reconfigure_splash_done(struct mdss_mdp_ctl *ctl,
+	bool handoff)
+{
+	struct mdss_panel_data *pdata;
+	int i, ret = 0, off;
+	u32 data, flush;
+	struct mdss_mdp_video_ctx *ctx;
+	struct mdss_mdp_ctl *sctl;
+
+	if (!ctl) {
+		pr_err("invalid ctl\n");
+		return -ENODEV;
+	}
+
+	off = 0;
+	ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("invalid ctx for ctl=%d\n", ctl->num);
+		return -ENODEV;
+	}
+
+	pdata = ctl->panel_data;
+	if (!pdata) {
+		pr_err("invalid pdata\n");
+		return -ENODEV;
+	}
+
+	pdata->panel_info.cont_splash_enabled = 0;
+	sctl = mdss_mdp_get_split_ctl(ctl);
+
+	if (sctl)
+		sctl->panel_data->panel_info.cont_splash_enabled = 0;
+	else if (ctl->panel_data->next && is_pingpong_split(ctl->mfd))
+		ctl->panel_data->next->panel_info.cont_splash_enabled = 0;
+
+	if (!handoff) {
+		ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CONT_SPLASH_BEGIN,
+				      NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+		if (ret) {
+			pr_err("%s: Failed to handle 'CONT_SPLASH_BEGIN' event\n"
+				, __func__);
+			return ret;
+		}
+
+		/* clear up mixer0 and mixer1 */
+		flush = 0;
+		for (i = 0; i < 2; i++) {
+			data = mdss_mdp_ctl_read(ctl,
+				MDSS_MDP_REG_CTL_LAYER(i));
+			if (data) {
+				mdss_mdp_ctl_write(ctl,
+					MDSS_MDP_REG_CTL_LAYER(i),
+					MDSS_MDP_LM_BORDER_COLOR);
+				flush |= (0x40 << i);
+			}
+		}
+		mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush);
+
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
+		/* wait for 1 VSYNC for the pipe to be unstaged */
+		msleep(20);
+
+		ret = mdss_mdp_ctl_intf_event(ctl,
+			MDSS_EVENT_CONT_SPLASH_FINISH, NULL,
+			CTL_INTF_EVENT_FLAG_DEFAULT);
+	}
+
+	return ret;
+}
+
+static void mdss_mdp_disable_prefill(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+	struct mdss_data_type *mdata = ctl->mdata;
+
+	if ((pinfo->prg_fet + pinfo->lcdc.v_back_porch +
+			pinfo->lcdc.v_pulse_width) > mdata->min_prefill_lines) {
+		ctl->disable_prefill = true;
+		pr_debug("disable prefill vbp:%d vpw:%d prg_fet:%d\n",
+			pinfo->lcdc.v_back_porch, pinfo->lcdc.v_pulse_width,
+			pinfo->prg_fet);
+	}
+}
+
+static void mdss_mdp_fetch_end_config(struct mdss_mdp_video_ctx *ctx,
+		struct mdss_mdp_ctl *ctl)
+{
+	int fetch_stop, h_total;
+	struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+	u32 lines_before_active = ctl->mdata->lines_before_active ? : 2;
+	u32 vblank_lines = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width;
+	u32 vblank_end_enable;
+
+	if (vblank_lines <= lines_before_active) {
+		pr_debug("cannot support fetch end vblank:%d lines:%d\n",
+			vblank_lines, lines_before_active);
+		return;
+	}
+
+	/* Fetch should always be stopped before the active start */
+	h_total = mdss_panel_get_htotal(pinfo, true);
+	fetch_stop = (vblank_lines - lines_before_active) * h_total;
+
+	vblank_end_enable = mdp_video_read(ctx, MDSS_MDP_REG_INTF_CONFIG);
+	vblank_end_enable |= BIT(22);
+
+	pr_debug("ctl:%d fetch_stop:%d lines:%d\n",
+		ctl->num, fetch_stop, lines_before_active);
+
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_VBLANK_END_CONF, fetch_stop);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_CONFIG, vblank_end_enable);
+	MDSS_XLOG(ctx->intf_num, fetch_stop, vblank_end_enable);
+}
+
+static void mdss_mdp_fetch_start_config(struct mdss_mdp_video_ctx *ctx,
+		struct mdss_mdp_ctl *ctl)
+{
+	int fetch_start, fetch_enable, v_total, h_total;
+	struct mdss_data_type *mdata;
+	struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+
+	mdata = ctl->mdata;
+
+	pinfo->prg_fet = mdss_mdp_get_prefetch_lines(pinfo);
+	if (!pinfo->prg_fet) {
+		pr_debug("programmable fetch is not needed/supported\n");
+		return;
+	}
+
+	/*
+	 * Fetch should always be outside the active lines. If the fetching
+	 * is programmed within active region, hardware behavior is unknown.
+	 */
+	v_total = mdss_panel_get_vtotal(pinfo);
+	h_total = mdss_panel_get_htotal(pinfo, true);
+
+	fetch_start = (v_total - pinfo->prg_fet) * h_total + 1;
+	fetch_enable = BIT(31);
+
+	if (pinfo->dynamic_fps && (pinfo->dfps_update ==
+			DFPS_IMMEDIATE_CLK_UPDATE_MODE))
+		fetch_enable |= BIT(23);
+
+	pr_debug("ctl:%d fetch_start:%d lines:%d\n",
+		ctl->num, fetch_start, pinfo->prg_fet);
+
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_PROG_FETCH_START, fetch_start);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_CONFIG, fetch_enable);
+	MDSS_XLOG(ctx->intf_num, fetch_enable, fetch_start);
+}
+
+static inline bool mdss_mdp_video_need_pixel_drop(u32 vic)
+{
+	return vic == HDMI_VFRMT_4096x2160p50_256_135 ||
+		vic == HDMI_VFRMT_4096x2160p60_256_135;
+}
+
+static int mdss_mdp_video_cdm_setup(struct mdss_mdp_cdm *cdm,
+	struct mdss_panel_info *pinfo, struct mdss_mdp_format_params *fmt)
+{
+	struct mdp_cdm_cfg setup;
+
+	if (fmt->is_yuv)
+		setup.csc_type = MDSS_MDP_CSC_RGB2YUV_601FR;
+	else
+		setup.csc_type = MDSS_MDP_CSC_RGB2RGB;
+
+	switch (fmt->chroma_sample) {
+	case MDSS_MDP_CHROMA_RGB:
+		setup.horz_downsampling_type = MDP_CDM_CDWN_DISABLE;
+		setup.vert_downsampling_type = MDP_CDM_CDWN_DISABLE;
+		break;
+	case MDSS_MDP_CHROMA_H2V1:
+		setup.horz_downsampling_type = MDP_CDM_CDWN_COSITE;
+		setup.vert_downsampling_type = MDP_CDM_CDWN_DISABLE;
+		break;
+	case MDSS_MDP_CHROMA_420:
+		if (mdss_mdp_video_need_pixel_drop(pinfo->vic)) {
+			setup.horz_downsampling_type = MDP_CDM_CDWN_PIXEL_DROP;
+			setup.vert_downsampling_type = MDP_CDM_CDWN_PIXEL_DROP;
+		} else {
+			setup.horz_downsampling_type = MDP_CDM_CDWN_COSITE;
+			setup.vert_downsampling_type = MDP_CDM_CDWN_OFFSITE;
+		}
+		break;
+	case MDSS_MDP_CHROMA_H1V2:
+	default:
+		pr_err("%s: unsupported chroma sampling type\n", __func__);
+		return -EINVAL;
+	}
+
+	setup.out_format = pinfo->out_format;
+	setup.mdp_csc_bit_depth = MDP_CDM_CSC_8BIT;
+	setup.output_width = pinfo->xres + pinfo->lcdc.xres_pad;
+	setup.output_height = pinfo->yres + pinfo->lcdc.yres_pad;
+	return mdss_mdp_cdm_setup(cdm, &setup);
+}
+
+static void mdss_mdp_handoff_programmable_fetch(struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_video_ctx *ctx)
+{
+	struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+	u32 fetch_start_handoff, v_total_handoff, h_total_handoff;
+
+	pinfo->prg_fet = 0;
+	if (mdp_video_read(ctx, MDSS_MDP_REG_INTF_CONFIG) & BIT(31)) {
+		fetch_start_handoff = mdp_video_read(ctx,
+			MDSS_MDP_REG_INTF_PROG_FETCH_START);
+		h_total_handoff = mdp_video_read(ctx,
+			MDSS_MDP_REG_INTF_HSYNC_CTL) >> 16;
+		v_total_handoff = mdp_video_read(ctx,
+			MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0)/h_total_handoff;
+		if (h_total_handoff)
+			pinfo->prg_fet = v_total_handoff -
+				((fetch_start_handoff - 1)/h_total_handoff);
+		pr_debug("programmable fetch lines %d start:%d\n",
+			pinfo->prg_fet, fetch_start_handoff);
+		MDSS_XLOG(pinfo->prg_fet, fetch_start_handoff,
+			h_total_handoff, v_total_handoff);
+	}
+}
+
+static int mdss_mdp_video_ctx_setup(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_video_ctx *ctx, struct mdss_panel_info *pinfo)
+{
+	struct intf_timing_params *itp = &ctx->itp;
+	u32 dst_bpp;
+	struct mdss_mdp_format_params *fmt;
+	struct mdss_data_type *mdata = ctl->mdata;
+	struct dsc_desc *dsc = NULL;
+
+	ctx->ctl = ctl;
+	ctx->intf_type = ctl->intf_type;
+	init_completion(&ctx->vsync_comp);
+	spin_lock_init(&ctx->vsync_lock);
+	spin_lock_init(&ctx->dfps_lock);
+	mutex_init(&ctx->vsync_mtx);
+	atomic_set(&ctx->vsync_ref, 0);
+	spin_lock_init(&ctx->lineptr_lock);
+	spin_lock_init(&ctx->mdss_mdp_video_lock);
+	spin_lock_init(&ctx->mdss_mdp_intf_intr_lock);
+	mutex_init(&ctx->lineptr_mtx);
+	atomic_set(&ctx->lineptr_ref, 0);
+	INIT_WORK(&ctl->recover_work, recover_underrun_work);
+
+	if (ctl->intf_type == MDSS_INTF_DSI) {
+		ctx->intf_recovery.fxn = mdss_mdp_video_intf_recovery;
+		ctx->intf_recovery.data = ctl;
+		if (mdss_mdp_ctl_intf_event(ctl,
+					MDSS_EVENT_REGISTER_RECOVERY_HANDLER,
+					(void *)&ctx->intf_recovery,
+					CTL_INTF_EVENT_FLAG_DEFAULT)) {
+			pr_err("Failed to register intf recovery handler\n");
+			return -EINVAL;
+		}
+	} else {
+		ctx->intf_recovery.fxn = NULL;
+		ctx->intf_recovery.data = NULL;
+	}
+
+	if (mdss_mdp_is_cdm_supported(mdata, ctl->intf_type, 0)) {
+
+		fmt = mdss_mdp_get_format_params(pinfo->out_format);
+		if (!fmt) {
+			pr_err("%s: format %d not supported\n", __func__,
+			       pinfo->out_format);
+			return -EINVAL;
+		}
+		if (fmt->is_yuv) {
+			ctl->cdm =
+			mdss_mdp_cdm_init(ctl, MDP_CDM_CDWN_OUTPUT_HDMI);
+			if (!IS_ERR_OR_NULL(ctl->cdm)) {
+				if (mdss_mdp_video_cdm_setup(ctl->cdm,
+					pinfo, fmt)) {
+					pr_err("%s: setting up cdm failed\n",
+					       __func__);
+					return -EINVAL;
+				}
+				ctl->flush_bits |= BIT(26);
+			} else {
+				pr_err("%s: failed to initialize cdm\n",
+					__func__);
+				return -EINVAL;
+			}
+		} else {
+			pr_debug("%s: Format is not YUV,cdm not required\n",
+				 __func__);
+		}
+	} else {
+		pr_debug("%s: cdm not supported\n", __func__);
+	}
+
+	if (pinfo->compression_mode == COMPRESSION_DSC)
+		dsc = &pinfo->dsc;
+
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_INTF_VSYNC,
+			ctx->intf_num, mdss_mdp_video_vsync_intr_done,
+			ctl);
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN,
+				ctx->intf_num,
+				mdss_mdp_video_underrun_intr_done, ctl);
+	mdss_mdp_set_intf_intr_callback(ctx, MDSS_MDP_INTF_IRQ_PROG_LINE,
+			mdss_mdp_video_lineptr_intr_done, ctl);
+
+	dst_bpp = pinfo->fbc.enabled ? (pinfo->fbc.target_bpp) : (pinfo->bpp);
+
+	memset(itp, 0, sizeof(struct intf_timing_params));
+	itp->width = mult_frac((pinfo->xres + pinfo->lcdc.border_left +
+			pinfo->lcdc.border_right), dst_bpp, pinfo->bpp);
+	itp->height = pinfo->yres + pinfo->lcdc.border_top +
+					pinfo->lcdc.border_bottom;
+	itp->border_clr = pinfo->lcdc.border_clr;
+	itp->underflow_clr = pinfo->lcdc.underflow_clr;
+	itp->hsync_skew = pinfo->lcdc.hsync_skew;
+
+	/* tg active area is not work, hence yres should equal to height */
+	itp->xres = mult_frac((pinfo->xres + pinfo->lcdc.border_left +
+			pinfo->lcdc.border_right), dst_bpp, pinfo->bpp);
+
+	itp->yres = pinfo->yres + pinfo->lcdc.border_top +
+				pinfo->lcdc.border_bottom;
+
+	if (dsc) {	/* compressed */
+		itp->width = dsc->pclk_per_line;
+		itp->xres = dsc->pclk_per_line;
+	}
+
+	itp->h_back_porch = pinfo->lcdc.h_back_porch;
+	itp->h_front_porch = pinfo->lcdc.h_front_porch;
+	itp->v_back_porch = pinfo->lcdc.v_back_porch;
+	itp->v_front_porch = pinfo->lcdc.v_front_porch;
+	itp->hsync_pulse_width = pinfo->lcdc.h_pulse_width;
+	itp->vsync_pulse_width = pinfo->lcdc.v_pulse_width;
+	/*
+	 * In case of YUV420 output, MDP outputs data at half the rate. So
+	 * reduce all horizontal parameters by half
+	 */
+	if (ctl->cdm && pinfo->out_format == MDP_Y_CBCR_H2V2) {
+		itp->width >>= 1;
+		itp->hsync_skew >>= 1;
+		itp->xres >>= 1;
+		itp->h_back_porch >>= 1;
+		itp->h_front_porch >>= 1;
+		itp->hsync_pulse_width >>= 1;
+	}
+	if (!ctl->panel_data->panel_info.cont_splash_enabled) {
+		if (mdss_mdp_video_timegen_setup(ctl, itp, ctx)) {
+			pr_err("unable to set timing parameters intfs: %d\n",
+				ctx->intf_num);
+			return -EINVAL;
+		}
+		mdss_mdp_fetch_start_config(ctx, ctl);
+
+		if (test_bit(MDSS_QOS_VBLANK_PANIC_CTRL, mdata->mdss_qos_map))
+			mdss_mdp_fetch_end_config(ctx, ctl);
+
+	} else {
+		mdss_mdp_handoff_programmable_fetch(ctl, ctx);
+	}
+
+	mdss_mdp_disable_prefill(ctl);
+
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_PANEL_FORMAT, ctl->dst_format);
+
+	return 0;
+}
+
+static int mdss_mdp_video_intfs_setup(struct mdss_mdp_ctl *ctl,
+	struct mdss_panel_data *pdata, int inum)
+{
+	struct mdss_data_type *mdata;
+	struct mdss_panel_info *pinfo;
+	struct mdss_mdp_video_ctx *ctx;
+	int ret = 0;
+
+	if (pdata == NULL)
+		return 0;
+
+	mdata = ctl->mdata;
+	pinfo = &pdata->panel_info;
+
+	if (inum < mdata->nintf) {
+		ctx = ((struct mdss_mdp_video_ctx *) mdata->video_intf) + inum;
+		if (ctx->ref_cnt) {
+			pr_err("Intf %d already in use\n",
+					(inum + MDSS_MDP_INTF0));
+			return -EBUSY;
+		}
+		pr_debug("video Intf #%d base=%pK", ctx->intf_num, ctx->base);
+		ctx->ref_cnt++;
+	} else {
+		pr_err("Invalid intf number: %d\n", (inum + MDSS_MDP_INTF0));
+		return -EINVAL;
+	}
+
+	ctl->intf_ctx[MASTER_CTX] = ctx;
+	ret = mdss_mdp_video_ctx_setup(ctl, ctx, pinfo);
+	if (ret) {
+		pr_err("Video context setup failed for interface: %d\n",
+				ctx->intf_num);
+		ctx->ref_cnt--;
+		return -EPERM;
+	}
+
+	/* Initialize early wakeup for the master ctx */
+	INIT_WORK(&ctx->early_wakeup_dfps_work, early_wakeup_dfps_update_work);
+
+	if (is_pingpong_split(ctl->mfd)) {
+		if ((inum + 1) >= mdata->nintf) {
+			pr_err("Intf not available for ping pong split: (%d)\n",
+					(inum + 1 + MDSS_MDP_INTF0));
+			return -EINVAL;
+		}
+
+		ctx = ((struct mdss_mdp_video_ctx *) mdata->video_intf) +
+			inum + 1;
+		if (ctx->ref_cnt) {
+			pr_err("Intf %d already in use\n",
+					(inum + MDSS_MDP_INTF0));
+			return -EBUSY;
+		}
+		pr_debug("video Intf #%d base=%pK", ctx->intf_num, ctx->base);
+		ctx->ref_cnt++;
+
+		ctl->intf_ctx[SLAVE_CTX] = ctx;
+		pinfo = &pdata->next->panel_info;
+		ret = mdss_mdp_video_ctx_setup(ctl, ctx, pinfo);
+		if (ret) {
+			pr_err("Video context setup failed for interface: %d\n",
+					ctx->intf_num);
+			ctx->ref_cnt--;
+			return -EPERM;
+		}
+	}
+	return 0;
+}
+
+void mdss_mdp_switch_to_cmd_mode(struct mdss_mdp_ctl *ctl, int prep)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	long int mode = MIPI_CMD_PANEL;
+	u32 frame_rate = 0;
+	int rc;
+
+	pr_debug("start, prep = %d\n", prep);
+
+	if (!prep) {
+		mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_RECONFIG_CMD,
+			(void *) mode, CTL_INTF_EVENT_FLAG_DEFAULT);
+		return;
+	}
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->intf_ctx[MASTER_CTX];
+
+	if (!ctx->timegen_en) {
+		pr_err("Time engine not enabled, cannot switch from vid\n");
+		return;
+	}
+
+	/* Start off by sending command to initial cmd mode */
+	rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_DYNAMIC_SWITCH,
+			     (void *) mode, CTL_INTF_EVENT_FLAG_DEFAULT);
+	if (rc) {
+		pr_err("intf #%d busy don't turn off, rc=%d\n",
+			 ctl->intf_num, rc);
+		return;
+	}
+
+	if (ctx->wait_pending) {
+		/* wait for at least commit to commplete */
+		wait_for_completion_interruptible_timeout(&ctx->vsync_comp,
+			  usecs_to_jiffies(VSYNC_TIMEOUT_US));
+	}
+	frame_rate = mdss_panel_get_framerate(&(ctl->panel_data->panel_info),
+			FPS_RESOLUTION_HZ);
+	if (!(frame_rate >= 24 && frame_rate <= 240))
+		frame_rate = 24;
+	frame_rate = ((1000/frame_rate) + 1);
+	/*
+	 * In order for panel to switch to cmd mode, we need
+	 * to wait for one more video frame to be sent after
+	 * issuing the switch command. We do this before
+	 * turning off the timeing engine.
+	 */
+	msleep(frame_rate);
+	mdss_mdp_turn_off_time_engine(ctl, ctx, frame_rate);
+	mdss_bus_bandwidth_ctrl(false);
+}
+
+static void early_wakeup_dfps_update_work(struct work_struct *work)
+{
+	struct mdss_mdp_video_ctx *ctx =
+		container_of(work, typeof(*ctx), early_wakeup_dfps_work);
+	struct mdss_panel_data *pdata;
+	struct mdss_panel_info *pinfo;
+	struct msm_fb_data_type *mfd;
+	struct mdss_mdp_ctl *ctl;
+	struct dynamic_fps_data data = {0};
+	int ret = 0;
+	int dfps;
+
+	if (!ctx) {
+		pr_err("%s: invalid ctx\n", __func__);
+		return;
+	}
+
+	ctl = ctx->ctl;
+
+	if (!ctl || !ctl->panel_data || !ctl->mfd || !ctl->mfd->fbi) {
+		pr_err("%s: invalid ctl\n", __func__);
+		return;
+	}
+
+	pdata = ctl->panel_data;
+	pinfo = &ctl->panel_data->panel_info;
+	mfd =	ctl->mfd;
+
+	if (!pinfo->dynamic_fps || !ctl->ops.config_fps_fnc ||
+		!pdata->panel_info.default_fps) {
+		pr_debug("%s: dfps not enabled on this panel\n", __func__);
+		return;
+	}
+
+	/* get the default fps that was cached before any dfps update */
+	dfps = pdata->panel_info.default_fps;
+
+	ATRACE_BEGIN(__func__);
+
+	if (dfps == pinfo->mipi.frame_rate) {
+		pr_debug("%s: FPS is already %d\n",
+			__func__, dfps);
+		goto exit;
+	}
+
+	data.fps = dfps;
+	if (mdss_mdp_dfps_update_params(mfd, pdata, &data))
+		pr_err("failed to set dfps params!\n");
+
+	/* update the HW with the new fps */
+	ATRACE_BEGIN("fps_update_wq");
+	ret = mdss_mdp_ctl_update_fps(ctl);
+	ATRACE_END("fps_update_wq");
+	if (ret)
+		pr_err("early wakeup failed to set %d fps ret=%d\n",
+			dfps, ret);
+
+exit:
+	ATRACE_END(__func__);
+}
+
+static int mdss_mdp_video_early_wake_up(struct mdss_mdp_ctl *ctl)
+{
+	u64 curr_time;
+
+	curr_time = ktime_to_us(ktime_get());
+
+	if ((curr_time - ctl->last_input_time) <
+			INPUT_EVENT_HANDLER_DELAY_USECS)
+		return 0;
+	ctl->last_input_time = curr_time;
+
+	/*
+	 * If the idle timer is running when input event happens, the timeout
+	 * will be delayed by idle_time again to ensure user space does not get
+	 * an idle event when new frames are expected.
+	 *
+	 * It would be nice to have this logic in mdss_fb.c itself by
+	 * implementing a new frame notification event. But input event handler
+	 * is called from interrupt context and scheduling a work item adds a
+	 * lot of latency rendering the input events useless in preventing the
+	 * idle time out.
+	 */
+	if ((ctl->mfd->idle_state == MDSS_FB_IDLE_TIMER_RUNNING) ||
+				(ctl->mfd->idle_state == MDSS_FB_IDLE)) {
+		/*
+		 * Modify the idle time so that an idle fallback can be
+		 * triggered for those cases, where we have no update
+		 * despite of a touch event and idle time is 0.
+		 */
+		if (!ctl->mfd->idle_time) {
+			ctl->mfd->idle_time = 70;
+			schedule_delayed_work(&ctl->mfd->idle_notify_work,
+							msecs_to_jiffies(200));
+		} else {
+			mod_delayed_work(system_wq, &ctl->mfd->idle_notify_work,
+					 msecs_to_jiffies(ctl->mfd->idle_time));
+		}
+		pr_debug("Delayed idle time\n");
+	} else {
+		pr_debug("Nothing to done for this state (%d)\n",
+			 ctl->mfd->idle_state);
+	}
+
+	/*
+	 * Schedule an fps update, so we can go to default fps before
+	 * commit. Early wake up event is called from an interrupt
+	 * context, so do this from work queue
+	 */
+	if (ctl->panel_data && ctl->panel_data->panel_info.dynamic_fps) {
+		struct mdss_mdp_video_ctx *ctx;
+
+		ctx = ctl->intf_ctx[MASTER_CTX];
+		if (ctx)
+			schedule_work(&ctx->early_wakeup_dfps_work);
+	}
+
+	return 0;
+}
+
+int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl)
+{
+	int intfs_num, ret = 0;
+
+	intfs_num = ctl->intf_num - MDSS_MDP_INTF0;
+	ret = mdss_mdp_video_intfs_setup(ctl, ctl->panel_data, intfs_num);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("unable to set video interface: %d\n", ret);
+		return ret;
+	}
+
+	ctl->ops.stop_fnc = mdss_mdp_video_stop;
+	ctl->ops.display_fnc = mdss_mdp_video_display;
+	ctl->ops.wait_fnc = mdss_mdp_video_wait4comp;
+	ctl->ops.wait_vsync_fnc = mdss_mdp_video_wait4vsync;
+	ctl->ops.read_line_cnt_fnc = mdss_mdp_video_line_count;
+	ctl->ops.add_vsync_handler = mdss_mdp_video_add_vsync_handler;
+	ctl->ops.remove_vsync_handler = mdss_mdp_video_remove_vsync_handler;
+	ctl->ops.config_fps_fnc = mdss_mdp_video_config_fps;
+	ctl->ops.early_wake_up_fnc = mdss_mdp_video_early_wake_up;
+	ctl->ops.update_lineptr = mdss_mdp_video_lineptr_ctrl;
+
+	return 0;
+}
+
+void *mdss_mdp_get_intf_base_addr(struct mdss_data_type *mdata,
+		u32 interface_id)
+{
+	struct mdss_mdp_video_ctx *ctx;
+
+	ctx = ((struct mdss_mdp_video_ctx *) mdata->video_intf) + interface_id;
+	return (void *)(ctx->base);
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
new file mode 100644
index 0000000..b155870
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
@@ -0,0 +1,917 @@
+/* Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+#include "mdss_rotator_internal.h"
+#include "mdss_panel.h"
+#include "mdss_mdp_trace.h"
+#include "mdss_debug.h"
+
+/*
+ * if BWC enabled and format is H1V2 or 420, do not use site C or I.
+ * Hence, set the bits 29:26 in format register, as zero.
+ */
+#define BWC_FMT_MASK	0xC3FFFFFF
+#define MDSS_DEFAULT_OT_SETTING    0x10
+
+enum mdss_mdp_writeback_type {
+	MDSS_MDP_WRITEBACK_TYPE_ROTATOR,
+	MDSS_MDP_WRITEBACK_TYPE_LINE,
+	MDSS_MDP_WRITEBACK_TYPE_WFD,
+};
+
+struct mdss_mdp_writeback_ctx {
+	u32 wb_num;
+	char __iomem *base;
+	u8 ref_cnt;
+	u8 type;
+	struct completion wb_comp;
+	int comp_cnt;
+
+	u32 intr_type;
+	u32 intf_num;
+
+	u32 xin_id;
+	u32 wr_lim;
+	struct mdss_mdp_shared_reg_ctrl clk_ctrl;
+
+	u32 opmode;
+	struct mdss_mdp_format_params *dst_fmt;
+	u16 img_width;
+	u16 img_height;
+	u16 width;
+	u16 height;
+	u16 frame_rate;
+	enum mdss_mdp_csc_type csc_type;
+	struct mdss_rect dst_rect;
+
+	u32 dnsc_factor_w;
+	u32 dnsc_factor_h;
+
+	u8 rot90;
+	u32 bwc_mode;
+	int initialized;
+
+	struct mdss_mdp_plane_sizes dst_planes;
+
+	spinlock_t wb_lock;
+	struct list_head vsync_handlers;
+
+	ktime_t start_time;
+	ktime_t end_time;
+};
+
+static struct mdss_mdp_writeback_ctx wb_ctx_list[MDSS_MDP_MAX_WRITEBACK] = {
+	{
+		.type = MDSS_MDP_WRITEBACK_TYPE_ROTATOR,
+		.intr_type = MDSS_MDP_IRQ_TYPE_WB_ROT_COMP,
+		.intf_num = 0,
+		.xin_id = 3,
+		.clk_ctrl.reg_off = 0x2BC,
+		.clk_ctrl.bit_off = 0x8,
+	},
+	{
+		.type = MDSS_MDP_WRITEBACK_TYPE_ROTATOR,
+		.intr_type = MDSS_MDP_IRQ_TYPE_WB_ROT_COMP,
+		.intf_num = 1,
+		.xin_id = 11,
+		.clk_ctrl.reg_off = 0x2BC,
+		.clk_ctrl.bit_off = 0xC,
+	},
+	{
+		.type = MDSS_MDP_WRITEBACK_TYPE_LINE,
+		.intr_type = MDSS_MDP_IRQ_TYPE_WB_ROT_COMP,
+		.intf_num = 0,
+		.xin_id = 3,
+		.clk_ctrl.reg_off = 0x2BC,
+		.clk_ctrl.bit_off = 0x8,
+	},
+	{
+		.type = MDSS_MDP_WRITEBACK_TYPE_LINE,
+		.intr_type = MDSS_MDP_IRQ_TYPE_WB_ROT_COMP,
+		.intf_num = 1,
+		.xin_id = 11,
+		.clk_ctrl.reg_off = 0x2BC,
+		.clk_ctrl.bit_off = 0xC,
+	},
+	{
+		.type = MDSS_MDP_WRITEBACK_TYPE_WFD,
+		.intr_type = MDSS_MDP_IRQ_TYPE_WB_WFD_COMP,
+		.intf_num = 0,
+		.xin_id = 6,
+		.clk_ctrl.reg_off = 0x2BC,
+		.clk_ctrl.bit_off = 0x10,
+	},
+};
+
+static inline void mdp_wb_write(struct mdss_mdp_writeback_ctx *ctx,
+				u32 reg, u32 val)
+{
+	writel_relaxed(val, ctx->base + reg);
+}
+
+static int mdss_mdp_writeback_addr_setup(struct mdss_mdp_writeback_ctx *ctx,
+					 const struct mdss_mdp_data *in_data)
+{
+	int ret;
+	struct mdss_mdp_data data;
+
+	if (!in_data)
+		return -EINVAL;
+	data = *in_data;
+
+	pr_debug("wb_num=%d addr=0x%pa\n", ctx->wb_num, &data.p[0].addr);
+
+	ret = mdss_mdp_data_check(&data, &ctx->dst_planes, ctx->dst_fmt);
+	if (ret)
+		return ret;
+
+	mdss_mdp_data_calc_offset(&data, ctx->dst_rect.x, ctx->dst_rect.y,
+			&ctx->dst_planes, ctx->dst_fmt);
+
+	if ((ctx->dst_fmt->fetch_planes == MDSS_MDP_PLANE_PLANAR) &&
+			(ctx->dst_fmt->element[0] == C1_B_Cb))
+		swap(data.p[1].addr, data.p[2].addr);
+
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST0_ADDR, data.p[0].addr);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST1_ADDR, data.p[1].addr);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST2_ADDR, data.p[2].addr);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST3_ADDR, data.p[3].addr);
+
+	return 0;
+}
+
+static int mdss_mdp_writeback_cdm_setup(struct mdss_mdp_writeback_ctx *ctx,
+	struct mdss_mdp_cdm *cdm, struct mdss_mdp_format_params *fmt)
+{
+	struct mdp_cdm_cfg setup;
+
+	switch (fmt->chroma_sample) {
+	case MDSS_MDP_CHROMA_RGB:
+		setup.horz_downsampling_type = MDP_CDM_CDWN_DISABLE;
+		setup.vert_downsampling_type = MDP_CDM_CDWN_DISABLE;
+		break;
+	case MDSS_MDP_CHROMA_H2V1:
+		setup.horz_downsampling_type = MDP_CDM_CDWN_COSITE;
+		setup.vert_downsampling_type = MDP_CDM_CDWN_DISABLE;
+		break;
+	case MDSS_MDP_CHROMA_420:
+		setup.horz_downsampling_type = MDP_CDM_CDWN_COSITE;
+		setup.vert_downsampling_type = MDP_CDM_CDWN_OFFSITE;
+		break;
+	case MDSS_MDP_CHROMA_H1V2:
+	default:
+		pr_err("%s: unsupported chroma sampling type\n", __func__);
+		return -EINVAL;
+	}
+
+	setup.out_format = fmt->format;
+	setup.mdp_csc_bit_depth = MDP_CDM_CSC_8BIT;
+	setup.output_width = ctx->width;
+	setup.output_height = ctx->height;
+	setup.csc_type = ctx->csc_type;
+	return mdss_mdp_cdm_setup(cdm, &setup);
+}
+
+void mdss_mdp_set_wb_cdp(struct mdss_mdp_writeback_ctx *ctx,
+	struct mdss_mdp_format_params *fmt)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 cdp_settings = 0x0;
+
+	/* Disable CDP for rotator in v1 */
+	if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR &&
+			mdss_has_quirk(mdata, MDSS_QUIRK_ROTCDP))
+		goto exit;
+
+	cdp_settings = MDSS_MDP_CDP_ENABLE;
+
+	if (!mdss_mdp_is_linear_format(fmt))
+		cdp_settings |= MDSS_MDP_CDP_ENABLE_UBWCMETA;
+
+	/* 64-transactions for line mode otherwise we keep 32 */
+	if (ctx->type != MDSS_MDP_WRITEBACK_TYPE_ROTATOR)
+		cdp_settings |= MDSS_MDP_CDP_AHEAD_64;
+
+exit:
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_CDP_CTRL, cdp_settings);
+}
+
+static int mdss_mdp_writeback_format_setup(struct mdss_mdp_writeback_ctx *ctx,
+		u32 format, struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_format_params *fmt;
+	u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
+	u32 dnsc_factor, write_config = 0;
+	u32 opmode = ctx->opmode;
+	bool rotation = false;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int rc;
+
+	pr_debug("wb_num=%d format=%d\n", ctx->wb_num, format);
+
+	if (ctx->rot90)
+		rotation = true;
+
+	fmt = mdss_mdp_get_format_params(format);
+	if (!fmt) {
+		pr_err("wb format=%d not supported\n", format);
+		return -EINVAL;
+	}
+
+	mdss_mdp_get_plane_sizes(fmt, ctx->img_width, ctx->img_height,
+				 &ctx->dst_planes,
+				 ctx->opmode & MDSS_MDP_OP_BWC_EN, rotation);
+
+	ctx->dst_fmt = fmt;
+
+	chroma_samp = fmt->chroma_sample;
+
+	if (ctl->cdm) {
+		rc = mdss_mdp_writeback_cdm_setup(ctx, ctl->cdm, fmt);
+		if (rc) {
+			pr_err("%s: CDM config failed with error %d\n",
+				__func__, rc);
+			return rc;
+		}
+		ctl->flush_bits |= BIT(26);
+	}
+	if (ctx->type != MDSS_MDP_WRITEBACK_TYPE_ROTATOR &&
+		fmt->is_yuv && !ctl->cdm) {
+		mdss_mdp_csc_setup(MDSS_MDP_BLOCK_WB, ctx->wb_num,
+				   MDSS_MDP_CSC_RGB2YUV_601L);
+		opmode |= (1 << 8) |	/* CSC_EN */
+			  (0 << 9) |	/* SRC_DATA=RGB */
+			  (1 << 10);	/* DST_DATA=YCBCR */
+
+		switch (chroma_samp) {
+		case MDSS_MDP_CHROMA_RGB:
+		case MDSS_MDP_CHROMA_420:
+		case MDSS_MDP_CHROMA_H2V1:
+			opmode |= (chroma_samp << 11);
+			break;
+		case MDSS_MDP_CHROMA_H1V2:
+		default:
+			pr_err("unsupported wb chroma samp=%d\n", chroma_samp);
+			return -EINVAL;
+		}
+	}
+
+	dst_format = (chroma_samp << 23) |
+		     (fmt->fetch_planes << 19) |
+		     (fmt->bits[C3_ALPHA] << 6) |
+		     (fmt->bits[C2_R_Cr] << 4) |
+		     (fmt->bits[C1_B_Cb] << 2) |
+		     (fmt->bits[C0_G_Y] << 0);
+
+	dst_format &= BWC_FMT_MASK;
+
+	if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
+		dst_format |= BIT(8); /* DSTC3_EN */
+		if (!fmt->alpha_enable)
+			dst_format |= BIT(14); /* DST_ALPHA_X */
+	}
+
+	if (fmt->is_yuv && test_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map))
+		dst_format |= BIT(15);
+
+	if (mdss_has_quirk(mdata, MDSS_QUIRK_FMT_PACK_PATTERN)) {
+		pattern = (fmt->element[3] << 24) |
+			  (fmt->element[2] << 15) |
+			  (fmt->element[1] << 8)  |
+			  (fmt->element[0] << 0);
+	} else {
+		pattern = (fmt->element[3] << 24) |
+			  (fmt->element[2] << 16) |
+			  (fmt->element[1] << 8)  |
+			  (fmt->element[0] << 0);
+	}
+
+	dst_format |= (fmt->unpack_align_msb << 18) |
+		      (fmt->unpack_tight << 17) |
+		      ((fmt->unpack_count - 1) << 12) |
+		      ((fmt->bpp - 1) << 9);
+
+	dst_format |= (fmt->unpack_dx_format << 21);
+
+	ystride0 = (ctx->dst_planes.ystride[0]) |
+		   (ctx->dst_planes.ystride[1] << 16);
+	ystride1 = (ctx->dst_planes.ystride[2]) |
+		   (ctx->dst_planes.ystride[3] << 16);
+	outsize = (ctx->dst_rect.h << 16) | ctx->dst_rect.w;
+
+	if (mdss_mdp_is_ubwc_format(fmt)) {
+		opmode |= BIT(0);
+		dst_format |= BIT(31);
+		if (mdata->highest_bank_bit)
+			write_config |= (mdata->highest_bank_bit << 8);
+		if (fmt->format == MDP_RGB_565_UBWC)
+			write_config |= 0x8;
+	}
+
+	if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR
+			&& mdata->has_rot_dwnscale) {
+		dnsc_factor = (ctx->dnsc_factor_h) | (ctx->dnsc_factor_w << 16);
+		mdp_wb_write(ctx, MDSS_MDP_REG_WB_ROTATOR_PIPE_DOWNSCALER,
+								dnsc_factor);
+	}
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_ALPHA_X_VALUE, 0xFF);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_FORMAT, dst_format);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_OP_MODE, opmode);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_PACK_PATTERN, pattern);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_YSTRIDE0, ystride0);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_YSTRIDE1, ystride1);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_OUT_SIZE, outsize);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_WRITE_CONFIG, write_config);
+
+	/* configure CDP */
+	if (test_bit(MDSS_QOS_CDP, mdata->mdss_qos_map))
+		mdss_mdp_set_wb_cdp(ctx, fmt);
+
+	return 0;
+}
+
+static int mdss_mdp_writeback_prepare_wfd(struct mdss_mdp_ctl *ctl, void *arg)
+{
+	struct mdss_mdp_writeback_ctx *ctx;
+	int ret;
+
+	ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+	if (!ctx)
+		return -ENODEV;
+
+	if (ctx->initialized && !ctl->shared_lock) /* already set */
+		return 0;
+
+	pr_debug("wfd setup ctl=%d\n", ctl->num);
+
+	ctx->opmode = 0;
+	ctx->img_width = ctl->width;
+	ctx->img_height = ctl->height;
+	ctx->width = ctl->width;
+	ctx->height = ctl->height;
+	ctx->frame_rate = ctl->frame_rate;
+	ctx->csc_type = ctl->csc_type;
+	ctx->dst_rect.x = 0;
+	ctx->dst_rect.y = 0;
+	ctx->dst_rect.w = ctx->width;
+	ctx->dst_rect.h = ctx->height;
+
+	ret = mdss_mdp_writeback_format_setup(ctx, ctl->dst_format, ctl);
+	if (ret) {
+		pr_err("format setup failed\n");
+		return ret;
+	}
+
+	ctx->initialized = true;
+
+	return 0;
+}
+
+static int mdss_mdp_writeback_prepare_rot(struct mdss_mdp_ctl *ctl, void *arg)
+{
+	struct mdss_mdp_writeback_ctx *ctx;
+	struct mdss_mdp_writeback_arg *wb_args;
+	struct mdss_rot_entry *entry;
+	struct mdp_rotation_item *item;
+	struct mdss_rot_perf *perf;
+	struct mdss_data_type *mdata;
+	u32 format;
+
+	ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+	if (!ctx)
+		return -ENODEV;
+	wb_args = (struct mdss_mdp_writeback_arg *) arg;
+	if (!wb_args)
+		return -ENOENT;
+
+	entry = (struct mdss_rot_entry *) wb_args->priv_data;
+	if (!entry) {
+		pr_err("unable to retrieve rot session ctl=%d\n", ctl->num);
+		return -ENODEV;
+	}
+	item = &entry->item;
+	perf = entry->perf;
+	mdata = ctl->mdata;
+	if (!mdata) {
+		pr_err("no mdata attached to ctl=%d", ctl->num);
+		return -ENODEV;
+	}
+	pr_debug("rot setup wb_num=%d\n", ctx->wb_num);
+
+	ctx->opmode = BIT(6); /* ROT EN */
+	if (ctl->mdata->rot_block_size == 128)
+		ctx->opmode |= BIT(4); /* block size 128 */
+
+	ctx->bwc_mode = 0;
+	ctx->opmode |= ctx->bwc_mode;
+
+	ctx->img_width = item->output.width;
+	ctx->img_height = item->output.height;
+	ctx->width = ctx->dst_rect.w = item->dst_rect.w;
+	ctx->height = ctx->dst_rect.h = item->dst_rect.h;
+	ctx->dst_rect.x = item->dst_rect.x;
+	ctx->dst_rect.y = item->dst_rect.y;
+	ctx->frame_rate = perf->config.frame_rate;
+	ctx->dnsc_factor_w = entry->dnsc_factor_w;
+	ctx->dnsc_factor_h = entry->dnsc_factor_h;
+
+	ctx->rot90 = !!(item->flags & MDP_ROTATION_90);
+
+	format = item->output.format;
+
+	if (ctx->rot90)
+		ctx->opmode |= BIT(5); /* ROT 90 */
+
+	return mdss_mdp_writeback_format_setup(ctx, format, ctl);
+}
+
+static int mdss_mdp_wb_add_vsync_handler(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_vsync_handler *handle)
+{
+	struct mdss_mdp_writeback_ctx *ctx;
+	unsigned long flags;
+	int ret = 0;
+
+	if (!handle || !(handle->vsync_handler)) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+	if (!ctx) {
+		pr_err("invalid ctx for ctl=%d\n", ctl->num);
+		ret = -ENODEV;
+		goto exit;
+	}
+
+	spin_lock_irqsave(&ctx->wb_lock, flags);
+	if (!handle->enabled) {
+		handle->enabled = true;
+		list_add(&handle->list, &ctx->vsync_handlers);
+	}
+	spin_unlock_irqrestore(&ctx->wb_lock, flags);
+exit:
+	return ret;
+}
+
+static int mdss_mdp_wb_remove_vsync_handler(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_vsync_handler *handle)
+{
+	struct mdss_mdp_writeback_ctx *ctx;
+	unsigned long flags;
+	int ret = 0;
+
+	if (!handle || !(handle->vsync_handler)) {
+		ret = -EINVAL;
+		goto exit;
+	}
+	ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+	if (!ctx) {
+		pr_err("invalid ctx for ctl=%d\n", ctl->num);
+		ret = -ENODEV;
+		goto exit;
+	}
+	spin_lock_irqsave(&ctx->wb_lock, flags);
+	if (handle->enabled) {
+		handle->enabled = false;
+		list_del_init(&handle->list);
+	}
+	spin_unlock_irqrestore(&ctx->wb_lock, flags);
+exit:
+	return ret;
+}
+
+static int mdss_mdp_writeback_stop(struct mdss_mdp_ctl *ctl,
+	int panel_power_state)
+{
+	struct mdss_mdp_writeback_ctx *ctx;
+	struct mdss_mdp_vsync_handler *t, *handle;
+
+	pr_debug("stop ctl=%d\n", ctl->num);
+
+	ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+	if (ctx) {
+		list_for_each_entry_safe(handle, t, &ctx->vsync_handlers, list)
+			mdss_mdp_wb_remove_vsync_handler(ctl, handle);
+
+		mdss_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
+				NULL, NULL);
+
+		complete_all(&ctx->wb_comp);
+
+		ctl->priv_data = NULL;
+		ctx->ref_cnt--;
+	}
+
+	if (ctl->cdm) {
+		mdss_mdp_cdm_destroy(ctl->cdm);
+		ctl->cdm = NULL;
+	}
+	return 0;
+}
+
+static void mdss_mdp_writeback_intr_done(void *arg)
+{
+	struct mdss_mdp_ctl *ctl = arg;
+	struct mdss_mdp_writeback_ctx *ctx = ctl->priv_data;
+	struct mdss_mdp_vsync_handler *tmp;
+	ktime_t vsync_time;
+
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return;
+	}
+	vsync_time = ktime_get();
+
+	pr_debug("intr wb_num=%d\n", ctx->wb_num);
+
+	mdss_mdp_irq_disable_nosync(ctx->intr_type, ctx->intf_num);
+
+	spin_lock(&ctx->wb_lock);
+	list_for_each_entry(tmp, &ctx->vsync_handlers, list) {
+		tmp->vsync_handler(ctl, vsync_time);
+	}
+	spin_unlock(&ctx->wb_lock);
+
+	complete_all(&ctx->wb_comp);
+	MDSS_XLOG(ctx->wb_num, ctx->type, ctx->xin_id, ctx->intf_num);
+}
+
+static bool mdss_mdp_traffic_shaper_helper(struct mdss_mdp_ctl *ctl,
+					 struct mdss_mdp_writeback_ctx *ctx,
+					 bool enable)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	bool traffic_shaper_enabled = false;
+	struct mdss_mdp_mixer *mixer = ctl->mixer_left;
+	int i;
+	u32 clk_rate;
+	u64 bw_rate;
+
+	if (!mixer)
+		return traffic_shaper_enabled;
+
+	/* currently only for rotator pipes */
+	if (!mixer->rotator_mode)
+		return traffic_shaper_enabled;
+
+	for (i = 0; i < MDSS_MDP_MAX_STAGE; i++) {
+		struct mdss_mdp_pipe *pipe;
+		struct mdss_mdp_perf_params perf;
+		u32 traffic_shaper;
+
+		pipe = mixer->stage_pipe[i];
+
+		memset(&perf, 0, sizeof(perf));
+
+		if (pipe == NULL)
+			continue;
+
+		if (enable) {
+			if (mdss_mdp_perf_calc_pipe(pipe, &perf, &mixer->roi,
+				PERF_CALC_PIPE_SINGLE_LAYER))
+				continue;
+
+			clk_rate = max(mdss_mdp_get_mdp_clk_rate(ctl->mdata),
+					perf.mdp_clk_rate);
+			ctl->traffic_shaper_mdp_clk = clk_rate;
+			bw_rate = perf.bw_overlap;
+
+			/*
+			 * Bandwidth vote accounts for both read and write
+			 * rotator, divide by 2 to get only the write bandwidth.
+			 */
+			do_div(bw_rate, 2);
+
+			/*
+			 * Calculating bytes per clock in 4.4 form
+			 * allowing up to 1/16 granularity.
+			 */
+			do_div(bw_rate,
+				(clk_rate >>
+				 MDSS_MDP_REG_TRAFFIC_SHAPER_FIXPOINT_FACTOR));
+
+			traffic_shaper = lower_32_bits(bw_rate) + 1;
+			traffic_shaper |= MDSS_MDP_REG_TRAFFIC_SHAPER_EN;
+			traffic_shaper_enabled = true;
+
+			pr_debug("pnum=%d inum:%d bw=%lld clk_rate=%u shaper=0x%x ena:%d\n",
+				pipe->num, ctx->intf_num, perf.bw_overlap,
+				clk_rate, traffic_shaper, enable);
+
+		} else {
+			traffic_shaper = 0;
+
+			pr_debug("inum:%d shaper=0x%x, ena:%d\n",
+				ctx->intf_num, traffic_shaper, enable);
+		}
+
+		writel_relaxed(traffic_shaper, mdata->mdp_base +
+			MDSS_MDP_REG_TRAFFIC_SHAPER_WR_CLIENT(ctx->intf_num));
+	}
+
+	return traffic_shaper_enabled;
+}
+
+static void mdss_mdp_traffic_shaper(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_writeback_ctx *ctx, bool enable)
+{
+	bool traffic_shaper_enabled = 0;
+
+	if (mdss_mdp_ctl_is_power_on(ctl)) {
+		traffic_shaper_enabled = mdss_mdp_traffic_shaper_helper
+			(ctl, ctx, enable);
+	}
+
+	ctl->traffic_shaper_enabled = traffic_shaper_enabled;
+
+	pr_debug("traffic shapper ctl:%d ena:%d\n", ctl->num,
+		ctl->traffic_shaper_enabled);
+}
+
+static int mdss_mdp_wb_wait4comp(struct mdss_mdp_ctl *ctl, void *arg)
+{
+	struct mdss_mdp_writeback_ctx *ctx;
+	int rc = 0;
+	u64 rot_time;
+	u32 status, mask, isr;
+
+	ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+
+	if (ctx->comp_cnt == 0)
+		return rc;
+
+	rc = wait_for_completion_timeout(&ctx->wb_comp,
+			KOFF_TIMEOUT);
+	mdss_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
+		NULL, NULL);
+
+	if (rc == 0) {
+		mask = BIT(ctx->intr_type + ctx->intf_num);
+
+		isr = readl_relaxed(ctl->mdata->mdp_base +
+					MDSS_MDP_REG_INTR_STATUS);
+		status = mask & isr;
+
+		pr_info_once("mask: 0x%x, isr: 0x%x, status: 0x%x\n",
+				mask, isr, status);
+
+		if (status) {
+			pr_warn_once("wb done but irq not triggered\n");
+			mdss_mdp_irq_clear(ctl->mdata,
+					ctx->intr_type,
+					ctx->intf_num);
+
+			mdss_mdp_writeback_intr_done(ctl);
+			rc = 0;
+		} else {
+			mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_TIMEOUT);
+			rc = -ENODEV;
+			WARN(1, "writeback kickoff timed out (%d) ctl=%d\n",
+							rc, ctl->num);
+		}
+	} else {
+		rc = 0;
+	}
+
+	if (rc == 0) {
+		ctx->end_time = ktime_get();
+		mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_DONE);
+	}
+
+	/* once operation is done, disable traffic shaper */
+	if (ctl->traffic_shaper_enabled)
+		mdss_mdp_traffic_shaper(ctl, ctx, false);
+
+	mdss_iommu_ctrl(0);
+	mdss_bus_bandwidth_ctrl(false);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+	/* Set flag to release Controller Bandwidth */
+	ctl->perf_release_ctl_bw = true;
+
+	ctx->comp_cnt--;
+
+	if (!rc) {
+		rot_time = (u64)ktime_to_us(ctx->end_time) -
+				(u64)ktime_to_us(ctx->start_time);
+		pr_debug("ctx%d type:%d xin_id:%d intf_num:%d took %llu microsecs\n",
+			ctx->wb_num, ctx->type, ctx->xin_id,
+				ctx->intf_num, rot_time);
+	}
+
+	return rc;
+}
+
+static void mdss_mdp_set_ot_limit_wb(struct mdss_mdp_writeback_ctx *ctx)
+{
+	struct mdss_mdp_set_ot_params ot_params;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	ot_params.xin_id = ctx->xin_id;
+	ot_params.num = ctx->wb_num;
+	ot_params.width = ctx->width;
+	ot_params.height = ctx->height;
+	ot_params.frame_rate = ctx->frame_rate;
+	ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
+	ot_params.reg_off_mdp_clk_ctrl = ctx->clk_ctrl.reg_off;
+	ot_params.bit_off_mdp_clk_ctrl = ctx->clk_ctrl.bit_off;
+	ot_params.is_rot = (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR);
+	ot_params.is_wb = true;
+	ot_params.is_yuv = ctx->dst_fmt->is_yuv;
+	ot_params.is_vbif_nrt = mdss_mdp_is_nrt_vbif_base_defined(mdata);
+
+	mdss_mdp_set_ot_limit(&ot_params);
+
+}
+
+static int mdss_mdp_writeback_display(struct mdss_mdp_ctl *ctl, void *arg)
+{
+	struct mdss_mdp_writeback_ctx *ctx;
+	struct mdss_mdp_writeback_arg *wb_args;
+	u32 flush_bits = 0;
+	int ret;
+
+	if (!ctl || !ctl->mdata)
+		return -ENODEV;
+
+	ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+	if (!ctx)
+		return -ENODEV;
+
+	if (ctx->comp_cnt) {
+		pr_err("previous kickoff not completed yet, ctl=%d\n",
+					ctl->num);
+		return -EPERM;
+	}
+
+	if (ctl->mdata->default_ot_wr_limit ||
+			ctl->mdata->default_ot_rd_limit)
+		mdss_mdp_set_ot_limit_wb(ctx);
+
+	wb_args = (struct mdss_mdp_writeback_arg *) arg;
+	if (!wb_args)
+		return -ENOENT;
+
+	if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR
+			&& ctl->mdata->traffic_shaper_en)
+		mdss_mdp_traffic_shaper(ctl, ctx, true);
+
+	ret = mdss_mdp_writeback_addr_setup(ctx, wb_args->data);
+	if (ret) {
+		pr_err("writeback data setup error ctl=%d\n", ctl->num);
+		return ret;
+	}
+
+	mdss_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
+		   mdss_mdp_writeback_intr_done, ctl);
+
+	flush_bits |= ctl->flush_reg_data;
+	flush_bits |= BIT(16); /* WB */
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_ADDR_SW_STATUS, ctl->is_secure);
+	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
+	MDSS_XLOG(ctl->intf_num, flush_bits);
+
+	reinit_completion(&ctx->wb_comp);
+	mdss_mdp_irq_enable(ctx->intr_type, ctx->intf_num);
+
+	ret = mdss_iommu_ctrl(1);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("IOMMU attach failed\n");
+		return ret;
+	}
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	mdss_bus_bandwidth_ctrl(true);
+	ctx->start_time = ktime_get();
+	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
+	/* make sure MDP writeback is enabled */
+	wmb();
+
+	MDSS_XLOG(ctx->wb_num, ctx->type, ctx->xin_id, ctx->intf_num,
+		ctx->dst_rect.w, ctx->dst_rect.h);
+	pr_debug("ctx%d type:%d xin_id:%d intf_num:%d start\n",
+		ctx->wb_num, ctx->type, ctx->xin_id, ctx->intf_num);
+
+	ctx->comp_cnt++;
+
+	return 0;
+}
+
+int mdss_mdp_writeback_start(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_writeback_ctx *ctx;
+	struct mdss_mdp_writeback *wb;
+	u32 mem_sel;
+	u32 mixer_type = MDSS_MDP_MIXER_TYPE_UNUSED;
+	struct mdss_mdp_format_params *fmt = NULL;
+	bool is_rot;
+
+	pr_debug("start ctl=%d\n", ctl->num);
+
+	if (!ctl->wb) {
+		pr_debug("wb not setup in the ctl\n");
+		return 0;
+	}
+
+	wb = ctl->wb;
+	mem_sel = (ctl->opmode & 0xF) - 1;
+	if (mem_sel < MDSS_MDP_MAX_WRITEBACK) {
+		ctx = &wb_ctx_list[mem_sel];
+		if (ctx->ref_cnt) {
+			pr_err("writeback in use %d\n", mem_sel);
+			return -EBUSY;
+		}
+		ctx->ref_cnt++;
+	} else {
+		pr_err("invalid writeback mode %d\n", mem_sel);
+		return -EINVAL;
+	}
+
+	fmt = mdss_mdp_get_format_params(ctl->dst_format);
+	if (!fmt)
+		return -EINVAL;
+
+	is_rot = (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR) ? true : false;
+
+	if (ctl->mixer_left) {
+		mixer_type = ctl->mixer_left->type;
+		/*
+		 * If the WB mixer is dedicated, the rotator uses a virtual
+		 * mixer. Mark the mixer_type as UNUSED in such cases.
+		 */
+		if ((mixer_type == MDSS_MDP_MIXER_TYPE_WRITEBACK) && is_rot)
+			mixer_type = MDSS_MDP_MIXER_TYPE_UNUSED;
+	}
+
+	if (mdss_mdp_is_cdm_supported(ctl->mdata, ctl->intf_type,
+		mixer_type) && fmt->is_yuv) {
+		ctl->cdm = mdss_mdp_cdm_init(ctl, MDP_CDM_CDWN_OUTPUT_WB);
+		if (IS_ERR_OR_NULL(ctl->cdm)) {
+			pr_err("cdm block already in use\n");
+			ctl->cdm = NULL;
+			return -EBUSY;
+		}
+	}
+	ctl->priv_data = ctx;
+	ctx->wb_num = wb->num;
+	ctx->base = wb->base;
+	ctx->initialized = false;
+	init_completion(&ctx->wb_comp);
+	spin_lock_init(&ctx->wb_lock);
+	INIT_LIST_HEAD(&ctx->vsync_handlers);
+
+	if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR)
+		ctl->ops.prepare_fnc = mdss_mdp_writeback_prepare_rot;
+	else {  /* wfd or line mode */
+		ctl->ops.prepare_fnc = mdss_mdp_writeback_prepare_wfd;
+
+		/* WB2 Intr Enable is BIT(2) in MDSS 1.8.0 */
+		if (ctl->mdata->mdp_rev == MDSS_MDP_HW_REV_108) {
+			ctx->intr_type = MDSS_MDP_IRQ_TYPE_WB_ROT_COMP;
+			ctx->intf_num = 2;
+		}
+	}
+	ctl->ops.stop_fnc = mdss_mdp_writeback_stop;
+	ctl->ops.display_fnc = mdss_mdp_writeback_display;
+	ctl->ops.wait_fnc = mdss_mdp_wb_wait4comp;
+	ctl->ops.add_vsync_handler = mdss_mdp_wb_add_vsync_handler;
+	ctl->ops.remove_vsync_handler = mdss_mdp_wb_remove_vsync_handler;
+
+	return 0;
+}
+
+int mdss_mdp_writeback_display_commit(struct mdss_mdp_ctl *ctl, void *arg)
+{
+	if (ctl->shared_lock && !mutex_is_locked(ctl->shared_lock)) {
+		pr_err("shared mutex is not locked before commit on ctl=%d\n",
+			ctl->num);
+		return -EINVAL;
+	}
+
+	if (ctl->mdata->mixer_switched) {
+		if (ctl->mixer_left)
+			ctl->mixer_left->params_changed++;
+		if (ctl->mixer_right)
+			ctl->mixer_right->params_changed++;
+	}
+
+	return mdss_mdp_display_commit(ctl, arg, NULL);
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
new file mode 100644
index 0000000..49a7bf4
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -0,0 +1,2364 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/msm_mdp.h>
+#include <linux/memblock.h>
+#include <linux/sync.h>
+#include <linux/sw_sync.h>
+#include <linux/file.h>
+
+#include <soc/qcom/event_timer.h>
+#include "mdss.h"
+#include "mdss_debug.h"
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_wfd.h"
+
+#define CHECK_LAYER_BOUNDS(offset, size, max_size) \
+	(((size) > (max_size)) || ((offset) > ((max_size) - (size))))
+
+#define SCALER_ENABLED \
+	(MDP_LAYER_ENABLE_PIXEL_EXT | MDP_LAYER_ENABLE_QSEED3_SCALE)
+
+enum {
+	MDSS_MDP_RELEASE_FENCE = 0,
+	MDSS_MDP_RETIRE_FENCE,
+};
+
+enum layer_pipe_q {
+	LAYER_USES_NEW_PIPE_Q = 0,
+	LAYER_USES_USED_PIPE_Q,
+	LAYER_USES_DESTROY_PIPE_Q,
+};
+
+enum layer_zorder_used {
+	LAYER_ZORDER_NONE = 0,
+	LAYER_ZORDER_LEFT = 1,
+	LAYER_ZORDER_RIGHT = 2,
+	LAYER_ZORDER_BOTH = 3,
+};
+
+struct mdss_mdp_validate_info_t {
+	struct mdp_input_layer *layer;
+	struct mdss_mdp_pipe_multirect_params multirect;
+};
+
+/*
+ * __layer_needs_src_split() - check needs source split configuration
+ * @layer:	input layer
+ *
+ * return true if the layer should be used as source split
+ */
+static bool __layer_needs_src_split(struct mdp_input_layer *layer)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	return (layer->flags & MDP_LAYER_ASYNC) ||
+		mdss_has_quirk(mdata, MDSS_QUIRK_SRC_SPLIT_ALWAYS);
+}
+
+static int __async_update_position_check(struct msm_fb_data_type *mfd,
+		struct mdss_mdp_pipe *pipe, struct mdp_point *src,
+		struct mdp_point *dst)
+{
+	struct fb_var_screeninfo *var = &mfd->fbi->var;
+	u32 xres = var->xres;
+	u32 yres = var->yres;
+
+	if (!pipe->async_update
+		|| CHECK_LAYER_BOUNDS(src->x, pipe->src.w, pipe->img_width)
+		|| CHECK_LAYER_BOUNDS(src->y, pipe->src.h, pipe->img_height)
+		|| CHECK_LAYER_BOUNDS(dst->x, pipe->dst.w, xres)
+		|| CHECK_LAYER_BOUNDS(dst->y, pipe->dst.h, yres)) {
+		pr_err("invalid configs: async_update=%d, src:{%d,%d}, dst:{%d,%d}\n",
+			pipe->async_update, src->x, src->y, dst->x, dst->y);
+		pr_err("pipe:- src:{%d,%d,%d,%d}, dst:{%d,%d,%d,%d}\n",
+			pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
+			pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __cursor_layer_check(struct msm_fb_data_type *mfd,
+		struct mdp_input_layer *layer)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if ((layer->z_order != HW_CURSOR_STAGE(mdata))
+			|| layer->src_rect.w > mdata->max_cursor_size
+			|| layer->src_rect.h > mdata->max_cursor_size
+			|| layer->src_rect.w != layer->dst_rect.w
+			|| layer->src_rect.h != layer->dst_rect.h
+			|| !mdata->ncursor_pipes) {
+		pr_err("Incorrect cursor configs for pipe:%d, cursor_pipes:%d, z_order:%d\n",
+				layer->pipe_ndx, mdata->ncursor_pipes,
+				layer->z_order);
+		pr_err("src:{%d,%d,%d,%d}, dst:{%d,%d,%d,%d}\n",
+				layer->src_rect.x, layer->src_rect.y,
+				layer->src_rect.w, layer->src_rect.h,
+				layer->dst_rect.x, layer->dst_rect.y,
+				layer->dst_rect.w, layer->dst_rect.h);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __layer_xres_check(struct msm_fb_data_type *mfd,
+	struct mdp_input_layer *layer)
+{
+	u32 xres = 0;
+	u32 left_lm_w = left_lm_w_from_mfd(mfd);
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+
+	if (layer->dst_rect.x >= left_lm_w) {
+		if (mdata->has_src_split)
+			xres = left_lm_w;
+		else
+			layer->dst_rect.x -= left_lm_w;
+
+		if (ctl->mixer_right) {
+			xres += ctl->mixer_right->width;
+		} else {
+			pr_err("ov cannot be placed on right mixer\n");
+			return -EPERM;
+		}
+	} else {
+		if (ctl->mixer_left) {
+			xres = ctl->mixer_left->width;
+		} else {
+			pr_err("ov cannot be placed on left mixer\n");
+			return -EPERM;
+		}
+
+		if (mdata->has_src_split && ctl->mixer_right)
+			xres += ctl->mixer_right->width;
+	}
+
+	if (CHECK_LAYER_BOUNDS(layer->dst_rect.x, layer->dst_rect.w, xres)) {
+		pr_err("dst_xres is invalid. dst_x:%d, dst_w:%d, xres:%d\n",
+			layer->dst_rect.x, layer->dst_rect.w, xres);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __layer_param_check(struct msm_fb_data_type *mfd,
+	struct mdp_input_layer *layer, struct mdss_mdp_format_params *fmt,
+	enum mdss_mdp_pipe_rect rect_num)
+{
+	u32 yres;
+	u32 min_src_size, min_dst_size = 1;
+	int content_secure;
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	u32 src_w, src_h, dst_w, dst_h, width, height;
+
+	if (!ctl) {
+		pr_err("ctl is null\n");
+		return -EINVAL;
+	}
+
+	if (ctl->mixer_left) {
+		yres = ctl->mixer_left->height;
+	} else {
+		pr_debug("Using fb var screen infor for height\n");
+		yres = mfd->fbi->var.yres;
+	}
+
+	content_secure = (layer->flags & MDP_LAYER_SECURE_SESSION);
+	if (!ctl->is_secure && content_secure &&
+				 (mfd->panel.type == WRITEBACK_PANEL)) {
+		pr_debug("return due to security concerns\n");
+		return -EPERM;
+	}
+	min_src_size = fmt->is_yuv ? 2 : 1;
+
+	if (layer->z_order >= (mdata->max_target_zorder + MDSS_MDP_STAGE_0)) {
+		pr_err("zorder %d out of range\n", layer->z_order);
+		return -EINVAL;
+	}
+
+	if (!mdss_mdp_pipe_search(mdata, layer->pipe_ndx, rect_num)) {
+		pr_err("layer pipe is invalid: 0x%x rect:%d\n",
+				layer->pipe_ndx, rect_num);
+		return -EINVAL;
+	}
+
+	width = layer->buffer.width;
+	height = layer->buffer.height;
+	if (layer->flags & MDP_LAYER_DEINTERLACE) {
+		width *= 2;
+		height /= 2;
+	}
+
+	if (layer->buffer.width > MAX_IMG_WIDTH ||
+	    layer->buffer.height > MAX_IMG_HEIGHT ||
+	    layer->src_rect.w < min_src_size ||
+	    layer->src_rect.h < min_src_size ||
+	    CHECK_LAYER_BOUNDS(layer->src_rect.x, layer->src_rect.w, width) ||
+	    CHECK_LAYER_BOUNDS(layer->src_rect.y, layer->src_rect.h, height)) {
+		pr_err("invalid source image img flag=%d wh=%dx%d rect=%d,%d,%d,%d\n",
+		       layer->flags, width, height,
+		       layer->src_rect.x, layer->src_rect.y,
+		       layer->src_rect.w, layer->src_rect.h);
+		return -EINVAL;
+	}
+
+	if (layer->dst_rect.w < min_dst_size ||
+		layer->dst_rect.h < min_dst_size) {
+		pr_err("invalid destination resolution (%dx%d)",
+		       layer->dst_rect.w, layer->dst_rect.h);
+		return -EINVAL;
+	}
+
+	if (layer->horz_deci || layer->vert_deci) {
+		if (!mdata->has_decimation) {
+			pr_err("No Decimation in MDP V=%x\n", mdata->mdp_rev);
+			return -EINVAL;
+		} else if ((layer->horz_deci > MAX_DECIMATION) ||
+				(layer->vert_deci > MAX_DECIMATION))  {
+			pr_err("Invalid decimation factors horz=%d vert=%d\n",
+					layer->horz_deci, layer->vert_deci);
+			return -EINVAL;
+		} else if (layer->flags & MDP_LAYER_BWC) {
+			pr_err("Decimation can't be enabled with BWC\n");
+			return -EINVAL;
+		} else if (fmt->fetch_mode != MDSS_MDP_FETCH_LINEAR) {
+			pr_err("Decimation can't be enabled with MacroTile format\n");
+			return -EINVAL;
+		}
+	}
+
+	if (CHECK_LAYER_BOUNDS(layer->dst_rect.y, layer->dst_rect.h, yres)) {
+		pr_err("invalid vertical destination: y=%d, h=%d, yres=%d\n",
+			layer->dst_rect.y, layer->dst_rect.h, yres);
+		return -EOVERFLOW;
+	}
+
+	dst_w = layer->dst_rect.w;
+	dst_h = layer->dst_rect.h;
+
+	src_w = layer->src_rect.w >> layer->horz_deci;
+	src_h = layer->src_rect.h >> layer->vert_deci;
+
+	if (src_w > mdata->max_mixer_width) {
+		pr_err("invalid source width=%d HDec=%d\n",
+			layer->src_rect.w, layer->horz_deci);
+		return -EINVAL;
+	}
+
+	if ((src_w * MAX_UPSCALE_RATIO) < dst_w) {
+		pr_err("too much upscaling Width %d->%d\n",
+		       layer->src_rect.w, layer->dst_rect.w);
+		return -E2BIG;
+	}
+
+	if ((src_h * MAX_UPSCALE_RATIO) < dst_h) {
+		pr_err("too much upscaling. Height %d->%d\n",
+		       layer->src_rect.h, layer->dst_rect.h);
+		return -E2BIG;
+	}
+
+	if (src_w > (dst_w * MAX_DOWNSCALE_RATIO)) {
+		pr_err("too much downscaling. Width %d->%d H Dec=%d\n",
+		       src_w, layer->dst_rect.w, layer->horz_deci);
+		return -E2BIG;
+	}
+
+	if (src_h > (dst_h * MAX_DOWNSCALE_RATIO)) {
+		pr_err("too much downscaling. Height %d->%d V Dec=%d\n",
+		       src_h, layer->dst_rect.h, layer->vert_deci);
+		return -E2BIG;
+	}
+
+	if (layer->flags & MDP_LAYER_BWC) {
+		if ((layer->buffer.width != layer->src_rect.w) ||
+		    (layer->buffer.height != layer->src_rect.h)) {
+			pr_err("BWC: mismatch of src img=%dx%d rect=%dx%d\n",
+				layer->buffer.width, layer->buffer.height,
+				layer->src_rect.w, layer->src_rect.h);
+			return -EINVAL;
+		}
+
+		if (layer->horz_deci || layer->vert_deci) {
+			pr_err("Can't enable BWC decode && decimate\n");
+			return -EINVAL;
+		}
+	}
+
+	if ((layer->flags & MDP_LAYER_DEINTERLACE) &&
+		!(layer->flags & SCALER_ENABLED)) {
+		if (layer->flags & MDP_SOURCE_ROTATED_90) {
+			if ((layer->src_rect.w % 4) != 0) {
+				pr_err("interlaced rect not h/4\n");
+				return -EINVAL;
+			}
+		} else if ((layer->src_rect.h % 4) != 0) {
+			pr_err("interlaced rect not h/4\n");
+			return -EINVAL;
+		}
+	}
+
+	if (fmt->is_yuv) {
+		if ((layer->src_rect.x & 0x1) || (layer->src_rect.y & 0x1) ||
+		    (layer->src_rect.w & 0x1) || (layer->src_rect.h & 0x1)) {
+			pr_err("invalid odd src resolution or coordinates\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* compare all reconfiguration parameter validation in this API */
+static int __validate_layer_reconfig(struct mdp_input_layer *layer,
+	struct mdss_mdp_pipe *pipe)
+{
+	int status = 0;
+	struct mdss_mdp_format_params *src_fmt;
+
+	/*
+	 * csc registers are not double buffered. It is not permitted
+	 * to change them on staged pipe with YUV layer.
+	 */
+	if (pipe->csc_coeff_set != layer->color_space) {
+		src_fmt = mdss_mdp_get_format_params(layer->buffer.format);
+		if (pipe->src_fmt->is_yuv && src_fmt && src_fmt->is_yuv) {
+			status = -EPERM;
+			pr_err("csc change is not permitted on used pipe\n");
+		}
+	}
+
+	return status;
+}
+
+static int __validate_single_layer(struct msm_fb_data_type *mfd,
+	struct mdss_mdp_validate_info_t *layer_info, u32 mixer_mux)
+{
+	u32 bwc_enabled;
+	int ret;
+	bool is_vig_needed = false;
+	struct mdss_mdp_format_params *fmt;
+	struct mdss_mdp_mixer *mixer = NULL;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+	struct mdp_input_layer *layer = layer_info->layer;
+	int ptype = get_pipe_type_from_ndx(layer->pipe_ndx);
+
+	if (ptype == MDSS_MDP_PIPE_TYPE_INVALID) {
+		pr_err("Invalid pipe ndx=%d\n", layer->pipe_ndx);
+		return -EINVAL;
+	}
+
+	if ((layer->dst_rect.w > mdata->max_mixer_width) ||
+		(layer->dst_rect.h > MAX_DST_H)) {
+		pr_err("exceeded max mixer supported resolution %dx%d\n",
+				layer->dst_rect.w, layer->dst_rect.h);
+		ret = -EINVAL;
+		goto exit_fail;
+	}
+
+	pr_debug("ctl=%u mux=%d z_order=%d flags=0x%x dst_x:%d\n",
+		mdp5_data->ctl->num, mixer_mux, layer->z_order,
+		layer->flags, layer->dst_rect.x);
+
+	fmt = mdss_mdp_get_format_params(layer->buffer.format);
+	if (!fmt) {
+		pr_err("invalid layer format %d\n", layer->buffer.format);
+		ret = -EINVAL;
+		goto exit_fail;
+	}
+
+	bwc_enabled = layer->flags & MDP_LAYER_BWC;
+
+	if (bwc_enabled) {
+		if (!mdp5_data->mdata->has_bwc) {
+			pr_err("layer uses bwc format but MDP does not support it\n");
+			ret = -EINVAL;
+			goto exit_fail;
+		}
+
+		layer->buffer.format =
+			mdss_mdp_get_rotator_dst_format(
+				layer->buffer.format, false, bwc_enabled);
+		fmt = mdss_mdp_get_format_params(layer->buffer.format);
+		if (!fmt) {
+			pr_err("invalid layer format %d\n",
+				layer->buffer.format);
+			ret = -EINVAL;
+			goto exit_fail;
+		}
+	}
+
+	if (ptype == MDSS_MDP_PIPE_TYPE_CURSOR) {
+		ret = __cursor_layer_check(mfd, layer);
+		if (ret)
+			goto exit_fail;
+	}
+
+	ret = __layer_xres_check(mfd, layer);
+	if (ret)
+		goto exit_fail;
+
+	ret = __layer_param_check(mfd, layer, fmt, layer_info->multirect.num);
+	if (ret)
+		goto exit_fail;
+
+	mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
+	if (!mixer) {
+		pr_err("unable to get %s mixer\n",
+			(mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) ?
+			"right" : "left");
+		ret = -EPERM;
+		goto exit_fail;
+	}
+
+	if (fmt->is_yuv || (mdata->has_non_scalar_rgb &&
+		((layer->src_rect.w != layer->dst_rect.w) ||
+			(layer->src_rect.h != layer->dst_rect.h))))
+		is_vig_needed = true;
+
+	if (is_vig_needed && ptype != MDSS_MDP_PIPE_TYPE_VIG) {
+		pr_err("pipe is non-scalar ndx=%x\n", layer->pipe_ndx);
+		ret = -EINVAL;
+		goto exit_fail;
+	}
+
+	if (((ptype == MDSS_MDP_PIPE_TYPE_DMA) ||
+		(ptype == MDSS_MDP_PIPE_TYPE_CURSOR)) &&
+		(layer->dst_rect.h != layer->src_rect.h ||
+		 layer->dst_rect.w != layer->src_rect.w)) {
+		pr_err("no scaling supported on dma/cursor pipe, pipe num:%d\n",
+				layer->pipe_ndx);
+		return -EINVAL;
+	}
+
+exit_fail:
+	return ret;
+}
+
+static int __configure_pipe_params(struct msm_fb_data_type *mfd,
+	struct mdss_mdp_validate_info_t *vinfo, struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_pipe *left_blend_pipe, bool is_single_layer,
+	u32 mixer_mux)
+{
+	int ret = 0;
+	u32 left_lm_w = left_lm_w_from_mfd(mfd);
+	u32 flags;
+	bool is_right_blend = false;
+
+	struct mdss_mdp_mixer *mixer = NULL;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+	struct mdp_input_layer *layer = vinfo->layer;
+
+	mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
+	pipe->src_fmt = mdss_mdp_get_format_params(layer->buffer.format);
+	if (!pipe->src_fmt || !mixer) {
+		pr_err("invalid layer format:%d or mixer:%pK\n",
+				layer->buffer.format, pipe->mixer_left);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	pipe->comp_ratio = layer->buffer.comp_ratio;
+
+	if (mfd->panel_orientation)
+		layer->flags ^= mfd->panel_orientation;
+
+	pipe->mixer_left = mixer;
+	pipe->mfd = mfd;
+	pipe->play_cnt = 0;
+	pipe->flags = 0;
+
+	if (layer->flags & MDP_LAYER_FLIP_LR)
+		pipe->flags = MDP_FLIP_LR;
+	if (layer->flags & MDP_LAYER_FLIP_UD)
+		pipe->flags |= MDP_FLIP_UD;
+	if (layer->flags & MDP_LAYER_SECURE_SESSION)
+		pipe->flags |= MDP_SECURE_OVERLAY_SESSION;
+	if (layer->flags & MDP_LAYER_SECURE_DISPLAY_SESSION)
+		pipe->flags |= MDP_SECURE_DISPLAY_OVERLAY_SESSION;
+	if (layer->flags & MDP_LAYER_SOLID_FILL)
+		pipe->flags |= MDP_SOLID_FILL;
+	if (layer->flags & MDP_LAYER_DEINTERLACE)
+		pipe->flags |= MDP_DEINTERLACE;
+	if (layer->flags & MDP_LAYER_BWC)
+		pipe->flags |= MDP_BWC_EN;
+	if (layer->flags & MDP_LAYER_PP)
+		pipe->flags |= MDP_OVERLAY_PP_CFG_EN;
+
+	pipe->is_fg = layer->flags & MDP_LAYER_FORGROUND;
+	pipe->img_width = layer->buffer.width & 0x3fff;
+	pipe->img_height = layer->buffer.height & 0x3fff;
+	pipe->src.x = layer->src_rect.x;
+	pipe->src.y = layer->src_rect.y;
+	pipe->src.w = layer->src_rect.w;
+	pipe->src.h = layer->src_rect.h;
+	pipe->dst.x = layer->dst_rect.x;
+	pipe->dst.y = layer->dst_rect.y;
+	pipe->dst.w = layer->dst_rect.w;
+	pipe->dst.h = layer->dst_rect.h;
+	pipe->horz_deci = layer->horz_deci;
+	pipe->vert_deci = layer->vert_deci;
+	pipe->bg_color = layer->bg_color;
+	pipe->alpha = layer->alpha;
+	pipe->transp = layer->transp_mask;
+	pipe->blend_op = layer->blend_op;
+	pipe->is_handed_off = false;
+	pipe->async_update = (layer->flags & MDP_LAYER_ASYNC) ? true : false;
+	pipe->csc_coeff_set = layer->color_space;
+
+	if (mixer->ctl) {
+		pipe->dst.x += mixer->ctl->border_x_off;
+		pipe->dst.y += mixer->ctl->border_y_off;
+		pr_debug("border{%d,%d}\n", mixer->ctl->border_x_off,
+				mixer->ctl->border_y_off);
+	}
+	pr_debug("src{%d,%d,%d,%d}, dst{%d,%d,%d,%d}\n",
+		pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
+		pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
+
+	if (layer->flags & SCALER_ENABLED)
+		memcpy(&pipe->scaler, layer->scale,
+			sizeof(struct mdp_scale_data_v2));
+
+	pipe->scaler.enable = (layer->flags & SCALER_ENABLED);
+
+	flags = pipe->flags;
+	if (is_single_layer)
+		flags |= PERF_CALC_PIPE_SINGLE_LAYER;
+
+	/*
+	 * async update is allowed only in video mode panels with single LM
+	 * or dual LM with src_split enabled.
+	 */
+	if (pipe->async_update && ((is_split_lm(mfd) && !mdata->has_src_split)
+			|| (!mdp5_data->ctl->is_video_mode))) {
+		pr_err("async update allowed only in video mode panel with src_split\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	/*
+	 * unstage the pipe if it's current z_order does not match with new
+	 * z_order because client may only call the validate.
+	 */
+	if (pipe->mixer_stage != layer->z_order)
+		mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+
+	/*
+	 * check if overlay span across two mixers and if source split is
+	 * available. If yes, enable src_split_req flag so that during mixer
+	 * staging, same pipe will be stagged on both layer mixers.
+	 */
+	if (mdata->has_src_split) {
+		is_right_blend = pipe->is_right_blend;
+		if (left_blend_pipe) {
+			if (pipe->priority <= left_blend_pipe->priority) {
+				pr_err("priority limitation. left:%d right%d\n",
+					left_blend_pipe->priority,
+					pipe->priority);
+				ret = -EPERM;
+				goto end;
+			} else {
+				pr_debug("pipe%d is a right_pipe\n", pipe->num);
+				is_right_blend = true;
+			}
+		} else if (pipe->is_right_blend) {
+			/*
+			 * pipe used to be right blend. So need to update mixer
+			 * configuration to remove it as a right blend.
+			 */
+			mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+			mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
+			is_right_blend = false;
+		}
+
+		if (is_split_lm(mfd) && __layer_needs_src_split(layer)) {
+			pipe->src_split_req = true;
+		} else if ((mixer_mux == MDSS_MDP_MIXER_MUX_LEFT) &&
+		    ((layer->dst_rect.x + layer->dst_rect.w) > mixer->width)) {
+			if (layer->dst_rect.x >= mixer->width) {
+				pr_err("%pS: err dst_x can't lie in right half",
+					__builtin_return_address(0));
+				pr_cont(" flags:0x%x dst x:%d w:%d lm_w:%d\n",
+					layer->flags, layer->dst_rect.x,
+					layer->dst_rect.w, mixer->width);
+				ret = -EINVAL;
+				goto end;
+			} else {
+				pipe->src_split_req = true;
+			}
+		} else {
+			if (pipe->src_split_req) {
+				mdss_mdp_mixer_pipe_unstage(pipe,
+					pipe->mixer_right);
+				pipe->mixer_right = NULL;
+			}
+			pipe->src_split_req = false;
+		}
+		pipe->is_right_blend = is_right_blend;
+	}
+
+	pipe->multirect.mode = vinfo->multirect.mode;
+	pipe->mixer_stage = layer->z_order;
+
+	if (mfd->panel_orientation & MDP_FLIP_LR)
+		pipe->dst.x = pipe->mixer_left->width - pipe->dst.x -
+			pipe->dst.w;
+	if (mfd->panel_orientation & MDP_FLIP_UD)
+		pipe->dst.y = pipe->mixer_left->height - pipe->dst.y -
+			pipe->dst.h;
+
+	memcpy(&pipe->layer, layer, sizeof(struct mdp_input_layer));
+
+	mdss_mdp_overlay_set_chroma_sample(pipe);
+
+	if (pipe->blend_op == BLEND_OP_NOT_DEFINED)
+		pipe->blend_op = pipe->src_fmt->alpha_enable ?
+			BLEND_OP_PREMULTIPLIED : BLEND_OP_OPAQUE;
+
+	if (pipe->src_fmt->is_yuv && !(pipe->flags & MDP_SOURCE_ROTATED_90) &&
+			!pipe->scaler.enable) {
+		pipe->overfetch_disable = OVERFETCH_DISABLE_BOTTOM;
+
+	if (pipe->dst.x >= left_lm_w)
+		pipe->overfetch_disable |= OVERFETCH_DISABLE_RIGHT;
+		pr_debug("overfetch flags=%x\n", pipe->overfetch_disable);
+	} else {
+		pipe->overfetch_disable = 0;
+	}
+
+	/*
+	 * When scaling is enabled src crop and image
+	 * width and height is modified by user
+	 */
+	if ((pipe->flags & MDP_DEINTERLACE) && !pipe->scaler.enable) {
+		if (pipe->flags & MDP_SOURCE_ROTATED_90) {
+			pipe->src.x = DIV_ROUND_UP(pipe->src.x, 2);
+			pipe->src.x &= ~1;
+			pipe->src.w /= 2;
+			pipe->img_width /= 2;
+		} else {
+			pipe->src.h /= 2;
+			pipe->src.y = DIV_ROUND_UP(pipe->src.y, 2);
+			pipe->src.y &= ~1;
+		}
+	}
+
+	ret = mdss_mdp_overlay_setup_scaling(pipe);
+	if (ret) {
+		pr_err("scaling setup failed %d\n", ret);
+		goto end;
+	}
+
+	if (layer->flags & MDP_LAYER_PP) {
+		memcpy(&pipe->pp_cfg, layer->pp_info,
+				sizeof(struct mdp_overlay_pp_params));
+		ret = mdss_mdp_pp_sspp_config(pipe);
+		if (ret) {
+			pr_err("pp setup failed %d\n", ret);
+			goto end;
+		}
+	}
+
+	if (pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR)
+		goto end;
+
+	ret = mdp_pipe_tune_perf(pipe, flags);
+	if (ret) {
+		pr_err("unable to satisfy performance. ret=%d\n", ret);
+		goto end;
+	}
+
+	ret = mdss_mdp_smp_reserve(pipe);
+	if (ret) {
+		pr_err("mdss_mdp_smp_reserve failed. pnum:%d ret=%d\n",
+			pipe->num, ret);
+		goto end;
+	}
+end:
+	return ret;
+}
+
+static struct sync_fence *__create_fence(struct msm_fb_data_type *mfd,
+	struct msm_sync_pt_data *sync_pt_data, u32 fence_type,
+	int *fence_fd, int value)
+{
+	struct mdss_overlay_private *mdp5_data;
+	struct mdss_mdp_ctl *ctl;
+	struct sync_fence *sync_fence = NULL;
+	char fence_name[32];
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+
+	ctl = mdp5_data->ctl;
+	if (!ctl->ops.add_vsync_handler) {
+		pr_err("fb%d vsync pending first update\n", mfd->index);
+		return ERR_PTR(-EOPNOTSUPP);
+	}
+
+	if (!mdss_mdp_ctl_is_power_on(ctl)) {
+		pr_err("fb%d ctl power on failed\n", mfd->index);
+		return ERR_PTR(-EPERM);
+	}
+
+	if (fence_type == MDSS_MDP_RETIRE_FENCE)
+		snprintf(fence_name, sizeof(fence_name), "fb%d_retire",
+			mfd->index);
+	else
+		snprintf(fence_name, sizeof(fence_name), "fb%d_release",
+			mfd->index);
+
+	if ((fence_type == MDSS_MDP_RETIRE_FENCE) &&
+		(mfd->panel.type == MIPI_CMD_PANEL)) {
+		if (mdp5_data->vsync_timeline) {
+			value = mdp5_data->vsync_timeline->value + 1 +
+				mdp5_data->retire_cnt++;
+			sync_fence = mdss_fb_sync_get_fence(
+				mdp5_data->vsync_timeline, fence_name, value);
+		} else {
+			return ERR_PTR(-EPERM);
+		}
+	} else {
+		sync_fence = mdss_fb_sync_get_fence(sync_pt_data->timeline,
+			fence_name, value);
+	}
+
+	if (IS_ERR_OR_NULL(sync_fence)) {
+		pr_err("%s: unable to retrieve release fence\n", fence_name);
+		goto end;
+	}
+
+	/* get fence fd */
+	*fence_fd = get_unused_fd_flags(0);
+	if (*fence_fd < 0) {
+		pr_err("%s: get_unused_fd_flags failed error:0x%x\n",
+			fence_name, *fence_fd);
+		sync_fence_put(sync_fence);
+		sync_fence = NULL;
+		goto end;
+	}
+
+end:
+	return sync_fence;
+}
+
+/*
+ * __handle_buffer_fences() - copy sync fences and return release/retire
+ * fence to caller.
+ *
+ * This function copies all input sync fences to acquire fence array and
+ * returns release/retire fences to caller. It acts like buff_sync ioctl.
+ */
+static int __handle_buffer_fences(struct msm_fb_data_type *mfd,
+	struct mdp_layer_commit_v1 *commit, struct mdp_input_layer *layer_list)
+{
+	struct sync_fence *fence, *release_fence, *retire_fence;
+	struct msm_sync_pt_data *sync_pt_data = NULL;
+	struct mdp_input_layer *layer;
+	int value;
+
+	u32 acq_fen_count, i, ret = 0;
+	u32 layer_count = commit->input_layer_cnt;
+
+	sync_pt_data = &mfd->mdp_sync_pt_data;
+	if (!sync_pt_data) {
+		pr_err("sync point data are NULL\n");
+		return -EINVAL;
+	}
+
+	i = mdss_fb_wait_for_fence(sync_pt_data);
+	if (i > 0)
+		pr_warn("%s: waited on %d active fences\n",
+			sync_pt_data->fence_name, i);
+
+	mutex_lock(&sync_pt_data->sync_mutex);
+	for (i = 0, acq_fen_count = 0; i < layer_count; i++) {
+		layer = &layer_list[i];
+
+		if (layer->buffer.fence < 0)
+			continue;
+
+		fence = sync_fence_fdget(layer->buffer.fence);
+		if (!fence) {
+			pr_err("%s: sync fence get failed! fd=%d\n",
+				sync_pt_data->fence_name, layer->buffer.fence);
+			ret = -EINVAL;
+			break;
+		}
+		sync_pt_data->acq_fen[acq_fen_count++] = fence;
+	}
+	sync_pt_data->acq_fen_cnt = acq_fen_count;
+	if (ret)
+		goto sync_fence_err;
+
+	value = sync_pt_data->timeline_value + sync_pt_data->threshold +
+			atomic_read(&sync_pt_data->commit_cnt);
+
+	release_fence = __create_fence(mfd, sync_pt_data,
+		MDSS_MDP_RELEASE_FENCE, &commit->release_fence, value);
+	if (IS_ERR_OR_NULL(release_fence)) {
+		pr_err("unable to retrieve release fence\n");
+		ret = PTR_ERR(release_fence);
+		goto release_fence_err;
+	}
+
+	retire_fence = __create_fence(mfd, sync_pt_data,
+		MDSS_MDP_RETIRE_FENCE, &commit->retire_fence, value);
+	if (IS_ERR_OR_NULL(retire_fence)) {
+		pr_err("unable to retrieve retire fence\n");
+		ret = PTR_ERR(retire_fence);
+		goto retire_fence_err;
+	}
+
+	sync_fence_install(release_fence, commit->release_fence);
+	sync_fence_install(retire_fence, commit->retire_fence);
+
+	mutex_unlock(&sync_pt_data->sync_mutex);
+	return ret;
+
+retire_fence_err:
+	put_unused_fd(commit->release_fence);
+	sync_fence_put(release_fence);
+release_fence_err:
+	commit->retire_fence = -1;
+	commit->release_fence = -1;
+sync_fence_err:
+	for (i = 0; i < sync_pt_data->acq_fen_cnt; i++)
+		sync_fence_put(sync_pt_data->acq_fen[i]);
+	sync_pt_data->acq_fen_cnt = 0;
+
+	mutex_unlock(&sync_pt_data->sync_mutex);
+
+	return ret;
+}
+
+/*
+ * __map_layer_buffer() - map input layer buffer
+ *
+ * This function maps input layer buffer. It supports only single layer
+ * buffer mapping right now. This is case for all formats including UBWC.
+ */
+static struct mdss_mdp_data *__map_layer_buffer(struct msm_fb_data_type *mfd,
+	struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_validate_info_t *validate_info_list,
+	u32 layer_count)
+{
+	struct mdss_mdp_data *src_data;
+	struct mdp_input_layer *layer = NULL;
+	struct mdp_layer_buffer *buffer;
+	struct msmfb_data image;
+	int i, ret;
+	u32 flags;
+	struct mdss_mdp_validate_info_t *vitem;
+
+	for (i = 0; i < layer_count; i++) {
+		vitem = &validate_info_list[i];
+		layer = vitem->layer;
+		if ((layer->pipe_ndx == pipe->ndx) &&
+		    (vitem->multirect.num == pipe->multirect.num))
+			break;
+	}
+
+	if (i == layer_count) {
+		pr_err("layer count index is out of bound\n");
+		src_data = ERR_PTR(-EINVAL);
+		goto end;
+	}
+
+	buffer = &layer->buffer;
+
+	if (pipe->flags & MDP_SOLID_FILL) {
+		pr_err("Unexpected buffer queue to a solid fill pipe\n");
+		src_data = ERR_PTR(-EINVAL);
+		goto end;
+	}
+
+	flags = (pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
+				MDP_SECURE_DISPLAY_OVERLAY_SESSION));
+
+	if (buffer->planes[0].fd < 0) {
+		pr_err("invalid file descriptor for layer buffer\n");
+		src_data = ERR_PTR(-EINVAL);
+		goto end;
+	}
+
+	src_data = mdss_mdp_overlay_buf_alloc(mfd, pipe);
+	if (!src_data) {
+		pr_err("unable to allocate source buffer\n");
+		src_data = ERR_PTR(-ENOMEM);
+		goto end;
+	}
+	memset(&image, 0, sizeof(image));
+
+	image.memory_id = buffer->planes[0].fd;
+	image.offset = buffer->planes[0].offset;
+	ret = mdss_mdp_data_get_and_validate_size(src_data, &image, 1,
+			flags, &mfd->pdev->dev, false, DMA_TO_DEVICE,
+			buffer);
+	if (ret)
+		goto end_buf_free;
+
+	src_data->num_planes = 1;
+	return src_data;
+
+end_buf_free:
+	mdss_mdp_overlay_buf_free(mfd, src_data);
+	src_data = ERR_PTR(ret);
+end:
+	return src_data;
+}
+
+static inline bool __compare_layer_config(struct mdp_input_layer *validate,
+	struct mdss_mdp_pipe *pipe)
+{
+	struct mdp_input_layer *layer = &pipe->layer;
+	bool status = true;
+
+	status = !memcmp(&validate->src_rect, &layer->src_rect,
+			sizeof(validate->src_rect)) &&
+		!memcmp(&validate->dst_rect, &layer->dst_rect,
+			sizeof(validate->dst_rect)) &&
+		validate->flags == layer->flags &&
+		validate->horz_deci == layer->horz_deci &&
+		validate->vert_deci == layer->vert_deci &&
+		validate->alpha == layer->alpha &&
+		validate->color_space == layer->color_space &&
+		validate->z_order == (layer->z_order - MDSS_MDP_STAGE_0) &&
+		validate->transp_mask == layer->transp_mask &&
+		validate->bg_color == layer->bg_color &&
+		validate->blend_op == layer->blend_op &&
+		validate->buffer.width == layer->buffer.width &&
+		validate->buffer.height == layer->buffer.height &&
+		validate->buffer.format == layer->buffer.format;
+
+	if (status && (validate->flags & SCALER_ENABLED))
+		status = !memcmp(validate->scale, &pipe->scaler,
+			sizeof(pipe->scaler));
+
+	return status;
+}
+
+/*
+ * __find_layer_in_validate_q() - Search layer in validation queue
+ *
+ * This functions helps to skip validation for layers where only buffer is
+ * changing. For ex: video playback case. In order to skip validation, it
+ * compares all input layer params except buffer handle, offset, fences.
+ */
+static struct mdss_mdp_pipe *__find_layer_in_validate_q(
+	struct mdss_mdp_validate_info_t *vinfo,
+	struct mdss_overlay_private *mdp5_data)
+{
+	bool found = false;
+	struct mdss_mdp_pipe *pipe;
+	struct mdp_input_layer *layer = vinfo->layer;
+
+	mutex_lock(&mdp5_data->list_lock);
+	list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
+		if ((pipe->ndx == layer->pipe_ndx) &&
+		    (pipe->multirect.num == vinfo->multirect.num)) {
+			if (__compare_layer_config(layer, pipe))
+				found = true;
+			break;
+		}
+	}
+	mutex_unlock(&mdp5_data->list_lock);
+
+	return found ? pipe : NULL;
+}
+
+static bool __find_pipe_in_list(struct list_head *head,
+	int pipe_ndx, struct mdss_mdp_pipe **out_pipe,
+	enum mdss_mdp_pipe_rect rect_num)
+{
+	struct mdss_mdp_pipe *pipe;
+
+	list_for_each_entry(pipe, head, list) {
+		if ((pipe_ndx == pipe->ndx) &&
+		    (rect_num == pipe->multirect.num)) {
+			*out_pipe = pipe;
+			return true;
+		}
+	}
+
+	return false;
+}
+
+/*
+ * Search pipe from destroy and cleanup list to avoid validation failure.
+ * It is caller responsibility to hold the list lock before calling this API.
+ */
+static struct mdss_mdp_pipe *__find_and_move_cleanup_pipe(
+	struct mdss_overlay_private *mdp5_data, u32 pipe_ndx,
+	enum mdss_mdp_pipe_rect rect_num)
+{
+	struct mdss_mdp_pipe *pipe = NULL;
+
+	if (__find_pipe_in_list(&mdp5_data->pipes_destroy,
+				pipe_ndx, &pipe, rect_num)) {
+		pr_debug("reuse destroy pipe id:%d ndx:%d rect:%d\n",
+				pipe->num, pipe_ndx, rect_num);
+		list_move(&pipe->list, &mdp5_data->pipes_used);
+	} else if (__find_pipe_in_list(&mdp5_data->pipes_cleanup,
+				pipe_ndx, &pipe, rect_num)) {
+		pr_debug("reuse cleanup pipe id:%d ndx:%d rect:%d\n",
+				pipe->num, pipe_ndx, rect_num);
+		mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+		mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
+		pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
+		list_move(&pipe->list, &mdp5_data->pipes_used);
+	}
+
+	return pipe;
+}
+
+/*
+ * __assign_pipe_for_layer() - get a pipe for layer
+ *
+ * This function first searches the pipe from used list, cleanup list and
+ * destroy list. On successful search, it returns the same pipe for current
+ * layer. It also un-stage the pipe from current mixer for used, cleanup,
+ * destroy pipes if they switches the mixer. On failure search, it returns
+ * the null pipe.
+ */
+static struct mdss_mdp_pipe *__assign_pipe_for_layer(
+	struct msm_fb_data_type *mfd,
+	struct mdss_mdp_mixer *mixer, u32 pipe_ndx,
+	enum layer_pipe_q *pipe_q_type,
+	enum mdss_mdp_pipe_rect rect_num)
+{
+	struct mdss_mdp_pipe *pipe = NULL;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+
+	mutex_lock(&mdp5_data->list_lock);
+	__find_pipe_in_list(&mdp5_data->pipes_used, pipe_ndx, &pipe, rect_num);
+	if (IS_ERR_OR_NULL(pipe)) {
+		pipe = __find_and_move_cleanup_pipe(mdp5_data,
+				pipe_ndx, rect_num);
+		if (IS_ERR_OR_NULL(pipe))
+			*pipe_q_type = LAYER_USES_NEW_PIPE_Q;
+		else
+			*pipe_q_type = LAYER_USES_DESTROY_PIPE_Q;
+	} else {
+		*pipe_q_type = LAYER_USES_USED_PIPE_Q;
+	}
+	mutex_unlock(&mdp5_data->list_lock);
+
+	/* found the pipe from used, destroy or cleanup list */
+	if (!IS_ERR_OR_NULL(pipe)) {
+		if (pipe->mixer_left != mixer) {
+			if (!mixer->ctl || (mixer->ctl->mfd != mfd)) {
+				pr_err("Can't switch mixer %d->%d pnum %d!\n",
+					pipe->mixer_left->num, mixer->num,
+						pipe->num);
+				pipe = ERR_PTR(-EINVAL);
+				goto end;
+			}
+			pr_debug("switching pipe%d mixer %d->%d\n",
+				pipe->num,
+				pipe->mixer_left ? pipe->mixer_left->num : -1,
+				mixer->num);
+			mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+			pipe->mixer_left = mixer;
+		}
+		goto end;
+	}
+
+	pipe = mdss_mdp_pipe_assign(mdata, mixer, pipe_ndx, rect_num);
+	if (IS_ERR_OR_NULL(pipe)) {
+		pr_err("error reserving pipe. pipe_ndx=0x%x rect_num=%d mfd ndx=%d\n",
+			pipe_ndx, rect_num, mfd->index);
+		goto end;
+	}
+
+	mutex_lock(&mdp5_data->list_lock);
+	list_add(&pipe->list, &mdp5_data->pipes_used);
+	mutex_unlock(&mdp5_data->list_lock);
+
+end:
+	if (!IS_ERR_OR_NULL(pipe)) {
+		pipe->dirty = false;
+		pipe->params_changed++;
+	}
+	return pipe;
+}
+
+/*
+ * __is_sd_state_valid() - validate secure display state
+ *
+ * This function checks if the current state of secrure display is valid,
+ * based on the new settings.
+ * For command mode panels, the sd state would be invalid if a non secure pipe
+ * comes and one of the below condition is met:
+ *	1) Secure Display is enabled for current client, and there is other
+	secure client.
+ *	2) Secure Display is disabled for current client, and there is other
+	secure client.
+ *	3) Secure pipes are already staged for the current client.
+ * For other panels, the sd state would be invalid if a non secure pipe comes
+ * and one of the below condition is met:
+ *	1) Secure Display is enabled for current or other client.
+ *	2) Secure pipes are already staged for the current client.
+ *
+ */
+static inline bool __is_sd_state_valid(uint32_t sd_pipes, uint32_t nonsd_pipes,
+	int panel_type, u32 sd_enabled)
+{
+	if (panel_type == MIPI_CMD_PANEL) {
+		if ((((mdss_get_sd_client_cnt() > 1) && sd_enabled) ||
+			(mdss_get_sd_client_cnt() && !sd_enabled) ||
+			sd_pipes)
+			&& nonsd_pipes)
+			return false;
+	} else {
+		if ((sd_pipes || mdss_get_sd_client_cnt()) && nonsd_pipes)
+			return false;
+	}
+	return true;
+}
+
+/*
+ * __validate_secure_display() - validate secure display
+ *
+ * This function travers through used pipe list and checks if any pipe
+ * is with secure display enabled flag. It fails if client tries to stage
+ * unsecure content with secure display session.
+ *
+ */
+static int __validate_secure_display(struct mdss_overlay_private *mdp5_data)
+{
+	struct mdss_mdp_pipe *pipe, *tmp;
+	uint32_t sd_pipes = 0, nonsd_pipes = 0;
+	int panel_type = mdp5_data->ctl->panel_data->panel_info.type;
+	int ret = 0;
+
+	mutex_lock(&mdp5_data->list_lock);
+	list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
+		if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)
+			sd_pipes++;
+		else
+			nonsd_pipes++;
+	}
+	mutex_unlock(&mdp5_data->list_lock);
+
+	pr_debug("pipe count:: secure display:%d non-secure:%d\n",
+		sd_pipes, nonsd_pipes);
+
+	mdp5_data->sd_transition_state = SD_TRANSITION_NONE;
+	if (!__is_sd_state_valid(sd_pipes, nonsd_pipes, panel_type,
+		mdp5_data->sd_enabled)) {
+		pr_err("non-secure layer validation request during secure display session\n");
+		pr_err(" secure client cnt:%d secure pipe cnt:%d non-secure pipe cnt:%d\n",
+			mdss_get_sd_client_cnt(), sd_pipes, nonsd_pipes);
+		ret = -EINVAL;
+	} else if (!mdp5_data->sd_enabled && sd_pipes) {
+		mdp5_data->sd_transition_state =
+			SD_TRANSITION_NON_SECURE_TO_SECURE;
+	} else if (mdp5_data->sd_enabled && !sd_pipes) {
+		mdp5_data->sd_transition_state =
+			SD_TRANSITION_SECURE_TO_NON_SECURE;
+	}
+	return ret;
+}
+
+/*
+ * __handle_free_list() - updates free pipe list
+ *
+ * This function travers through used pipe list and checks if any pipe
+ * is not staged in current validation cycle. It moves the pipe to cleanup
+ * list if no layer is attached for that pipe.
+ *
+ * This should be called after validation is successful for current cycle.
+ * Moving pipes before can affects staged pipe for previous cycle.
+ */
+static void __handle_free_list(struct mdss_overlay_private *mdp5_data,
+	struct mdss_mdp_validate_info_t *validate_info_list, u32 layer_count)
+{
+	int i;
+	struct mdp_input_layer *layer;
+	struct mdss_mdp_validate_info_t *vinfo;
+	struct mdss_mdp_pipe *pipe, *tmp;
+
+	mutex_lock(&mdp5_data->list_lock);
+	list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
+		for (i = 0; i < layer_count; i++) {
+			vinfo = &validate_info_list[i];
+			layer = vinfo->layer;
+
+			if ((pipe->ndx == layer->pipe_ndx) &&
+			    (pipe->multirect.num == vinfo->multirect.num))
+				break;
+		}
+
+		/*
+		 * if validate cycle is not attaching any layer for this
+		 * pipe then move it to cleanup list. It does overlay_unset
+		 * task.
+		 */
+		if (i == layer_count)
+			list_move(&pipe->list, &mdp5_data->pipes_cleanup);
+	}
+	mutex_unlock(&mdp5_data->list_lock);
+}
+
+static bool __multirect_validate_flip(struct mdp_input_layer **layers,
+		size_t count)
+{
+	/* not supporting more than 2 layers */
+	if (count != 2)
+		return false;
+
+	/* flip related validation */
+	if ((layers[0]->flags & MDP_LAYER_FLIP_LR) ||
+	    (layers[1]->flags & MDP_LAYER_FLIP_LR)) {
+		pr_err("multirect and HFLIP is not allowed. input layer flags=0x%x paired layer flags=0x%x\n",
+			layers[0]->flags, layers[1]->flags);
+		return false;
+	}
+	if ((layers[0]->flags & MDP_LAYER_FLIP_UD) !=
+	    (layers[1]->flags & MDP_LAYER_FLIP_UD)) {
+		pr_err("multirect VLFIP mismatch is not allowed\n");
+		return false;
+	}
+
+	return true;
+}
+
+static bool __multirect_validate_format(struct mdp_input_layer **layers,
+		size_t count)
+{
+	struct mdss_mdp_format_params *rec0_fmt, *rec1_fmt;
+	bool is_ubwc;
+
+	/* not supporting more than 2 layers */
+	if (count != 2)
+		return false;
+
+	/* format related validation */
+	rec0_fmt = mdss_mdp_get_format_params(layers[0]->buffer.format);
+	if (!rec0_fmt) {
+		pr_err("invalid input layer format %d\n",
+			layers[0]->buffer.format);
+		return false;
+	}
+	rec1_fmt = mdss_mdp_get_format_params(layers[1]->buffer.format);
+	if (!rec1_fmt) {
+		pr_err("invalid paired layer format %d\n",
+			layers[1]->buffer.format);
+		return false;
+	}
+	if (rec0_fmt->is_yuv || rec1_fmt->is_yuv) {
+		pr_err("multirect on YUV format is not supported. input=%d paired=%d\n",
+			rec0_fmt->is_yuv, rec1_fmt->is_yuv);
+		return false;
+	}
+	if (rec0_fmt->fetch_mode != rec1_fmt->fetch_mode) {
+		pr_err("multirect fetch_mode mismatch is not allowed. input=%d paired=%d\n",
+			rec0_fmt->fetch_mode, rec1_fmt->fetch_mode);
+		return false;
+	}
+	is_ubwc = mdss_mdp_is_ubwc_format(rec0_fmt);
+	if (is_ubwc && (rec0_fmt != rec1_fmt)) {
+		pr_err("multirect UBWC format mismatch is not allowed\n");
+		return false;
+	} else if (rec0_fmt->bpp != rec1_fmt->bpp) {
+		pr_err("multirect linear format bpp mismatch is not allowed. input=%d paired=%d\n",
+			rec0_fmt->bpp, rec1_fmt->bpp);
+		return false;
+	} else if (rec0_fmt->unpack_dx_format != rec1_fmt->unpack_dx_format) {
+		pr_err("multirect linear format 10bit vs 8bit mismatch is not allowed. input=%d paired=%d\n",
+			rec0_fmt->unpack_dx_format, rec1_fmt->unpack_dx_format);
+		return false;
+	}
+
+	if ((layers[0]->flags & MDP_LAYER_SOLID_FILL) !=
+			(layers[1]->flags & MDP_LAYER_SOLID_FILL)) {
+		pr_err("solid fill mismatch between multirect layers\n");
+		return false;
+	}
+
+	return true;
+}
+
+static bool __multirect_validate_rects(struct mdp_input_layer **layers,
+		size_t count)
+{
+	struct mdss_rect dst[MDSS_MDP_PIPE_MAX_RECTS];
+	int i;
+
+	/* not supporting more than 2 layers */
+	if (count != 2)
+		return false;
+
+	for (i = 0; i < count; i++) {
+		if ((layers[i]->src_rect.w != layers[i]->dst_rect.w) ||
+		    (layers[i]->src_rect.h != layers[i]->dst_rect.h)) {
+			pr_err("multirect layers cannot have scaling: src: %dx%d dst: %dx%d\n",
+				layers[i]->src_rect.w, layers[i]->src_rect.h,
+				layers[i]->dst_rect.w, layers[i]->dst_rect.h);
+			return false;
+		}
+
+		dst[i] = (struct mdss_rect) {layers[i]->dst_rect.x,
+					     layers[i]->dst_rect.y,
+					     layers[i]->dst_rect.w,
+					     layers[i]->dst_rect.h};
+	}
+
+	/* resolution related validation */
+	if (mdss_rect_overlap_check(&dst[0], &dst[1])) {
+		pr_err("multirect dst overlap is not allowed. input: %d,%d,%d,%d paired %d,%d,%d,%d\n",
+			dst[0].x, dst[0].y, dst[0].w, dst[0].y,
+			dst[1].x, dst[1].y, dst[1].w, dst[1].y);
+		return false;
+	}
+
+	return true;
+}
+
+static bool __multirect_validate_properties(struct mdp_input_layer **layers,
+		size_t count)
+{
+	/* not supporting more than 2 layers */
+	if (count != 2)
+		return false;
+
+	if ((layers[0]->flags & MDP_LAYER_ASYNC) ||
+	    (layers[1]->flags & MDP_LAYER_ASYNC)) {
+		pr_err("ASYNC update is not allowed with multirect\n");
+		return false;
+	}
+
+	if (layers[0]->z_order == layers[1]->z_order) {
+		pr_err("multirect layers cannot have same z_order=%d\n",
+			layers[0]->z_order);
+		return false;
+	}
+
+	return true;
+}
+
+static bool (*__multirect_validators[])(struct mdp_input_layer **layers,
+		size_t count) = {
+	__multirect_validate_flip,
+	__multirect_validate_format,
+	__multirect_validate_rects,
+	__multirect_validate_properties,
+};
+
+static inline int __multirect_layer_flags_to_mode(u32 flags)
+{
+	int mode;
+
+	if (flags & MDP_LAYER_MULTIRECT_ENABLE) {
+		if (flags & MDP_LAYER_MULTIRECT_PARALLEL_MODE)
+			mode = MDSS_MDP_PIPE_MULTIRECT_PARALLEL;
+		else
+			mode = MDSS_MDP_PIPE_MULTIRECT_SERIAL;
+	} else {
+		if (flags & MDP_LAYER_MULTIRECT_PARALLEL_MODE) {
+			pr_err("Invalid parallel mode flag set without multirect enabled\n");
+			return -EINVAL;
+		}
+
+		mode = MDSS_MDP_PIPE_MULTIRECT_NONE;
+	}
+	return mode;
+}
+
+static int __multirect_validate_mode(struct msm_fb_data_type *mfd,
+		struct mdp_input_layer **layers,
+		size_t count)
+{
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+	struct mdss_mdp_format_params *rec0_fmt;
+	bool is_ubwc;
+	int i, mode;
+	struct mdp_rect *dst[MDSS_MDP_PIPE_MAX_RECTS];
+
+	/* not supporting more than 2 layers */
+	if (count != 2)
+		return false;
+
+	for (i = 0; i < count; i++)
+		dst[i] = &layers[i]->dst_rect;
+
+	mode = __multirect_layer_flags_to_mode(layers[0]->flags);
+
+	/* format related validation */
+	rec0_fmt = mdss_mdp_get_format_params(layers[0]->buffer.format);
+	if (!rec0_fmt) {
+		pr_err("invalid input layer format %d\n",
+			layers[0]->buffer.format);
+		return false;
+	}
+
+	is_ubwc = mdss_mdp_is_ubwc_format(rec0_fmt);
+
+	if (mode == MDSS_MDP_PIPE_MULTIRECT_SERIAL) {
+		int threshold, yoffset;
+
+		if (dst[0]->y < dst[1]->y)
+			yoffset = dst[1]->y - (dst[0]->y + dst[0]->h);
+		else if (dst[1]->y < dst[0]->y)
+			yoffset = dst[0]->y - (dst[1]->y + dst[1]->h);
+		else
+			yoffset = 0;
+
+		/*
+		 * time multiplexed is possible only if the y position of layers
+		 * is not overlapping and there is sufficient time to buffer
+		 * 2 lines/tiles.  Otherwise use parallel fetch mode
+		 */
+		threshold = 2;
+		if (is_ubwc) {
+			struct mdss_mdp_format_params_ubwc *uf;
+
+			/* in ubwc all layers would need to be same format */
+			uf = (struct mdss_mdp_format_params_ubwc *)rec0_fmt;
+			threshold *= uf->micro.tile_height;
+		}
+
+		if (yoffset < threshold) {
+			pr_err("Unable to operate in serial fetch mode with yoffset=%d dst[0]=%d,%d dst[1]=%d,%d\n",
+					yoffset, dst[0]->y, dst[0]->h,
+					dst[1]->y, dst[1]->h);
+			return -EINVAL;
+		}
+	} else if (mode == MDSS_MDP_PIPE_MULTIRECT_PARALLEL) {
+		u32 left_lm_w, rec0_mixer, rec1_mixer;
+
+		/*
+		 * For UBWC, 5 lines worth of buffering is needed in to meet
+		 * the performance which requires 2560w*4bpp*5lines = 50KB,
+		 * where 2560 is max width. Now let's say pixel ram is fixed to
+		 * 50KB then in UBWC parellel fetch, maximum width of each
+		 * rectangle would be 2560/2 = 1280.
+		 *
+		 * For Linear, this restriction is avoided because maximum
+		 * buffering of 2 lines is enough which yields to
+		 * 2560w*4bpp*2lines=20KB. Based on this, we can have 2 max
+		 * width rectangles in parrellel fetch mode.
+		 */
+		if (is_ubwc &&
+			((dst[0]->w > (mdata->max_mixer_width / 2)) ||
+			(dst[1]->w > (mdata->max_mixer_width / 2)))) {
+			pr_err("in UBWC multirect parallel mode, max dst_w cannot be greater than %d. rec0_w=%d rec1_w=%d\n",
+				mdata->max_mixer_width / 2,
+				dst[0]->w, dst[1]->w);
+			return -EINVAL;
+		}
+
+		left_lm_w = left_lm_w_from_mfd(mfd);
+		if (dst[0]->x < left_lm_w) {
+			if (dst[0]->w > (left_lm_w - dst[0]->x)) {
+				pr_err("multirect parallel mode, rec0 dst (%d,%d) cannot cross lm boundary (%d)\n",
+					dst[0]->x, dst[0]->w, left_lm_w);
+				return -EINVAL;
+			}
+			rec0_mixer = MDSS_MDP_MIXER_MUX_LEFT;
+		} else {
+			rec0_mixer = MDSS_MDP_MIXER_MUX_RIGHT;
+		}
+
+		if (dst[1]->x < left_lm_w) {
+			if (dst[0]->w > (left_lm_w - dst[0]->x)) {
+				pr_err("multirect parallel mode, rec1 dst (%d,%d) cannot cross lm boundary (%d)\n",
+					dst[1]->x, dst[1]->w, left_lm_w);
+				return -EINVAL;
+			}
+			rec1_mixer = MDSS_MDP_MIXER_MUX_LEFT;
+		} else {
+			rec1_mixer = MDSS_MDP_MIXER_MUX_RIGHT;
+		}
+
+		if (rec0_mixer != rec1_mixer) {
+			pr_err("multirect parallel mode mixer mismatch. rec0_mix=%d rec1_mix=%d\n",
+				rec0_mixer, rec1_mixer);
+			return -EINVAL;
+		}
+	} else {
+		pr_err("Invalid multirect mode %d\n", mode);
+	}
+
+	pr_debug("layer->pndx:%d mode=%d\n", layers[0]->pipe_ndx, mode);
+
+	return 0;
+}
+
+static int __update_multirect_info(struct msm_fb_data_type *mfd,
+		struct mdss_mdp_validate_info_t *validate_info_list,
+		struct mdp_input_layer *layer_list, int ndx, int layer_cnt)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_validate_info_t *vinfo[MDSS_MDP_PIPE_MAX_RECTS];
+	int i, ptype, max_rects, mode;
+	int cnt = 1;
+
+	mode = __multirect_layer_flags_to_mode(layer_list[ndx].flags);
+	if (IS_ERR_VALUE(mode))
+		return mode;
+
+	pr_debug("layer #%d pipe_ndx=%d multirect mode=%d\n",
+			ndx, layer_list[ndx].pipe_ndx, mode);
+
+	vinfo[0] = &validate_info_list[ndx];
+	vinfo[0]->layer = &layer_list[ndx];
+	vinfo[0]->multirect.mode = mode;
+	vinfo[0]->multirect.num = MDSS_MDP_PIPE_RECT0;
+	vinfo[0]->multirect.next = NULL;
+
+	/* nothing to be done if multirect is disabled */
+	if (mode == MDSS_MDP_PIPE_MULTIRECT_NONE)
+		return cnt;
+
+	ptype = get_pipe_type_from_ndx(layer_list[ndx].pipe_ndx);
+	if (ptype == MDSS_MDP_PIPE_TYPE_INVALID) {
+		pr_err("invalid pipe ndx %d\n", layer_list[ndx].pipe_ndx);
+		return -EINVAL;
+	}
+
+	max_rects = mdata->rects_per_sspp[ptype] ? : 1;
+
+	for (i = ndx + 1; i < layer_cnt; i++) {
+		if (layer_list[ndx].pipe_ndx == layer_list[i].pipe_ndx) {
+			if (cnt >= max_rects) {
+				pr_err("more than %d layers of type %d with same pipe_ndx=%d indexes=%d %d\n",
+					max_rects, ptype,
+					layer_list[ndx].pipe_ndx, ndx, i);
+				return -EINVAL;
+			}
+
+			mode = __multirect_layer_flags_to_mode(
+					layer_list[i].flags);
+			if (IS_ERR_VALUE(mode))
+				return mode;
+
+			if (mode != vinfo[0]->multirect.mode) {
+				pr_err("unable to set different multirect modes for pipe_ndx=%d (%d %d)\n",
+					layer_list[ndx].pipe_ndx, ndx, i);
+				return -EINVAL;
+			}
+
+			pr_debug("found matching pair for pipe_ndx=%d (%d %d)\n",
+					layer_list[i].pipe_ndx, ndx, i);
+
+			vinfo[cnt] = &validate_info_list[i];
+			vinfo[cnt]->multirect.num = cnt;
+			vinfo[cnt]->multirect.next = vinfo[0]->layer;
+			vinfo[cnt]->multirect.mode = mode;
+			vinfo[cnt]->layer = &layer_list[i];
+
+			vinfo[cnt - 1]->multirect.next = vinfo[cnt]->layer;
+			cnt++;
+		}
+	}
+
+	if (cnt == 1) {
+		pr_err("multirect mode enabled but unable to find extra rects for pipe_ndx=%x\n",
+			layer_list[ndx].pipe_ndx);
+		return -EINVAL;
+	}
+
+	return cnt;
+}
+
+static int __validate_multirect(struct msm_fb_data_type *mfd,
+	struct mdss_mdp_validate_info_t *validate_info_list,
+	struct mdp_input_layer *layer_list, int ndx, int layer_cnt)
+{
+	struct mdp_input_layer *layers[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
+	int i, cnt, rc;
+
+	cnt = __update_multirect_info(mfd, validate_info_list,
+			layer_list, ndx, layer_cnt);
+	if (IS_ERR_VALUE(cnt))
+		return cnt;
+
+	if (cnt <= 1) {
+		/* nothing to validate in single rect mode */
+		return 0;
+	} else if (cnt > 2) {
+		pr_err("unsupported multirect configuration, multirect cnt=%d\n",
+				cnt);
+		return -EINVAL;
+	}
+
+	layers[0] = validate_info_list[ndx].layer;
+	layers[1] = validate_info_list[ndx].multirect.next;
+
+	for (i = 0; i < ARRAY_SIZE(__multirect_validators); i++) {
+		if (!__multirect_validators[i](layers, cnt))
+			return -EINVAL;
+	}
+
+	rc = __multirect_validate_mode(mfd, layers, cnt);
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	return 0;
+}
+
+/*
+ * __validate_layers() - validate input layers
+ * @mfd:	Framebuffer data structure for display
+ * @commit:	Commit version-1 structure for display
+ *
+ * This function validates all input layers present in layer_list. In case
+ * of failure, it updates the "error_code" for failed layer. It is possible
+ * to find failed layer from layer_list based on "error_code".
+ */
+static int __validate_layers(struct msm_fb_data_type *mfd,
+	struct file *file, struct mdp_layer_commit_v1 *commit)
+{
+	int ret, i = 0;
+	int rec_ndx[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
+	int rec_release_ndx[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
+	int rec_destroy_ndx[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
+	u32 left_lm_layers = 0, right_lm_layers = 0;
+	u32 left_cnt = 0, right_cnt = 0;
+	u32 left_lm_w = left_lm_w_from_mfd(mfd);
+	u32 mixer_mux, dst_x;
+	int layer_count = commit->input_layer_cnt;
+
+	struct mdss_mdp_pipe *pipe, *tmp, *left_blend_pipe;
+	struct mdss_mdp_pipe *right_plist[MAX_PIPES_PER_LM] = {0};
+	struct mdss_mdp_pipe *left_plist[MAX_PIPES_PER_LM] = {0};
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	struct mdss_mdp_mixer *mixer = NULL;
+	struct mdp_input_layer *layer, *prev_layer, *layer_list;
+	struct mdss_mdp_validate_info_t *validate_info_list = NULL;
+	bool is_single_layer = false, force_validate;
+	enum layer_pipe_q pipe_q_type;
+	enum layer_zorder_used zorder_used[MDSS_MDP_MAX_STAGE] = {0};
+	enum mdss_mdp_pipe_rect rect_num;
+
+	ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+	if (ret)
+		return ret;
+
+	if (!layer_count)
+		goto validate_skip;
+
+	layer_list = commit->input_layers;
+
+	validate_info_list = kcalloc(layer_count, sizeof(*validate_info_list),
+				     GFP_KERNEL);
+	if (!validate_info_list) {
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	for (i = 0; i < layer_count; i++) {
+		if (layer_list[i].dst_rect.x >= left_lm_w)
+			right_lm_layers++;
+		else
+			left_lm_layers++;
+
+		if (right_lm_layers >= MAX_PIPES_PER_LM ||
+		    left_lm_layers >= MAX_PIPES_PER_LM) {
+			pr_err("too many pipes stagged mixer left: %d mixer right:%d\n",
+				left_lm_layers, right_lm_layers);
+			ret = -EINVAL;
+			goto end;
+		}
+
+		if (!validate_info_list[i].layer) {
+			ret = __validate_multirect(mfd, validate_info_list,
+						   layer_list, i, layer_count);
+			if (ret) {
+				pr_err("error validating multirect config. ret=%d i=%d\n",
+					ret, i);
+				goto end;
+			}
+		}
+
+		rect_num = validate_info_list[i].multirect.num;
+		WARN_ON(rect_num >= MDSS_MDP_PIPE_MAX_RECTS);
+
+		if (rec_ndx[rect_num] & layer_list[i].pipe_ndx) {
+			pr_err("duplicate layer found pipe_ndx=%d rect=%d (0x%x)\n",
+					layer_list[i].pipe_ndx, rect_num,
+					rec_ndx[rect_num]);
+			ret = -EINVAL;
+			goto end;
+		}
+
+		rec_ndx[rect_num] |= layer_list[i].pipe_ndx;
+	}
+
+	/*
+	 * Force all layers to go through full validation after
+	 * dynamic resolution switch, immaterial of the configs in
+	 * the layer.
+	 */
+	mutex_lock(&mfd->switch_lock);
+	force_validate = (mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED);
+	mutex_unlock(&mfd->switch_lock);
+
+	for (i = 0; i < layer_count; i++) {
+		enum layer_zorder_used z = LAYER_ZORDER_NONE;
+
+		layer = &layer_list[i];
+		dst_x = layer->dst_rect.x;
+		left_blend_pipe = NULL;
+
+		prev_layer = (i > 0) ? &layer_list[i - 1] : NULL;
+		/*
+		 * check if current layer is at same z_order as
+		 * previous one, and fail if any or both are async layers,
+		 * as async layers should have unique z_order.
+		 *
+		 * If it has same z_order and qualifies as a right blend,
+		 * pass a pointer to the pipe representing previous overlay or
+		 * in other terms left blend layer.
+		 *
+		 * Following logic of selecting left_blend has an inherent
+		 * assumption that layer list is sorted on dst_x within a
+		 * same z_order. Otherwise it will fail based on z_order checks.
+		 */
+		if (prev_layer && (prev_layer->z_order == layer->z_order)) {
+			struct mdp_rect *left = &prev_layer->dst_rect;
+			struct mdp_rect *right = &layer->dst_rect;
+
+			if ((layer->flags & MDP_LAYER_ASYNC)
+				|| (prev_layer->flags & MDP_LAYER_ASYNC)) {
+				ret = -EINVAL;
+				layer->error_code = ret;
+				pr_err("async layer should have unique z_order\n");
+				goto validate_exit;
+			}
+
+			/*
+			 * check if layer is right blend by checking it's
+			 * directly to the right.
+			 */
+			if (((left->x + left->w) == right->x) &&
+			    (left->y == right->y) && (left->h == right->h))
+				left_blend_pipe = pipe;
+
+			/*
+			 * if the layer is right at the left lm boundary and
+			 * src split is not required then right blend is not
+			 * required as it will lie only on the left mixer
+			 */
+			if (!__layer_needs_src_split(prev_layer) &&
+			    ((left->x + left->w) == left_lm_w))
+				left_blend_pipe = NULL;
+		}
+
+		if (!is_split_lm(mfd) || __layer_needs_src_split(layer))
+			z = LAYER_ZORDER_BOTH;
+		else if (dst_x >= left_lm_w)
+			z = LAYER_ZORDER_RIGHT;
+		else if ((dst_x + layer->dst_rect.w) <= left_lm_w)
+			z = LAYER_ZORDER_LEFT;
+		else
+			z = LAYER_ZORDER_BOTH;
+
+		if (!left_blend_pipe && (layer->z_order >= MDSS_MDP_MAX_STAGE ||
+				(z & zorder_used[layer->z_order]))) {
+			pr_err("invalid z_order=%d or already in use %x\n",
+					layer->z_order, z);
+			ret = -EINVAL;
+			layer->error_code = ret;
+			goto validate_exit;
+		} else {
+			zorder_used[layer->z_order] |= z;
+		}
+
+		if ((layer->dst_rect.x < left_lm_w) ||
+				__layer_needs_src_split(layer)) {
+			is_single_layer = (left_lm_layers == 1);
+			mixer_mux = MDSS_MDP_MIXER_MUX_LEFT;
+		} else {
+			is_single_layer = (right_lm_layers == 1);
+			mixer_mux = MDSS_MDP_MIXER_MUX_RIGHT;
+		}
+
+		/**
+		 * search pipe in current used list to find if parameters
+		 * are same. validation can be skipped if only buffer handle
+		 * is changed.
+		 */
+		pipe = (force_validate) ? NULL :
+				__find_layer_in_validate_q(
+					&validate_info_list[i], mdp5_data);
+		if (pipe) {
+			if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
+				right_plist[right_cnt++] = pipe;
+			else
+				left_plist[left_cnt++] = pipe;
+
+			if (layer->flags & MDP_LAYER_PP) {
+				memcpy(&pipe->pp_cfg, layer->pp_info,
+					sizeof(struct mdp_overlay_pp_params));
+				ret = mdss_mdp_pp_sspp_config(pipe);
+				if (ret)
+					pr_err("pp setup failed %d\n", ret);
+				else
+					pipe->params_changed++;
+			}
+			pipe->dirty = false;
+			continue;
+		}
+
+		mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
+		if (!mixer) {
+			pr_err("unable to get %s mixer\n",
+				(mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) ?
+				"right" : "left");
+			ret = -EINVAL;
+			layer->error_code = ret;
+			goto validate_exit;
+		}
+
+		layer->z_order += MDSS_MDP_STAGE_0;
+		ret = __validate_single_layer(mfd, &validate_info_list[i],
+				mixer_mux);
+		if (ret) {
+			pr_err("layer:%d validation failed ret=%d\n", i, ret);
+			layer->error_code = ret;
+			goto validate_exit;
+		}
+
+		rect_num = validate_info_list[i].multirect.num;
+
+		pipe = __assign_pipe_for_layer(mfd, mixer, layer->pipe_ndx,
+			&pipe_q_type, rect_num);
+		if (IS_ERR_OR_NULL(pipe)) {
+			pr_err("error assigning pipe id=0x%x rc:%ld\n",
+				layer->pipe_ndx, PTR_ERR(pipe));
+			ret = PTR_ERR(pipe);
+			layer->error_code = ret;
+			goto validate_exit;
+		}
+
+		if (pipe_q_type == LAYER_USES_NEW_PIPE_Q)
+			rec_release_ndx[rect_num] |= pipe->ndx;
+		if (pipe_q_type == LAYER_USES_DESTROY_PIPE_Q)
+			rec_destroy_ndx[rect_num] |= pipe->ndx;
+
+		ret = mdss_mdp_pipe_map(pipe);
+		if (IS_ERR_VALUE(ret)) {
+			pr_err("Unable to map used pipe%d ndx=%x\n",
+				pipe->num, pipe->ndx);
+			layer->error_code = ret;
+			goto validate_exit;
+		}
+
+		if (pipe_q_type == LAYER_USES_USED_PIPE_Q) {
+			/*
+			 * reconfig is allowed on new/destroy pipes. Only used
+			 * pipe needs this extra validation.
+			 */
+			ret = __validate_layer_reconfig(layer, pipe);
+			if (ret) {
+				pr_err("layer reconfig validation failed=%d\n",
+					ret);
+				mdss_mdp_pipe_unmap(pipe);
+				layer->error_code = ret;
+				goto validate_exit;
+			}
+		}
+
+		ret = __configure_pipe_params(mfd, &validate_info_list[i], pipe,
+			left_blend_pipe, is_single_layer, mixer_mux);
+		if (ret) {
+			pr_err("configure pipe param failed: pipe index= %d\n",
+				pipe->ndx);
+			mdss_mdp_pipe_unmap(pipe);
+			layer->error_code = ret;
+			goto validate_exit;
+		}
+
+		mdss_mdp_pipe_unmap(pipe);
+
+		/* keep the original copy of dst_x */
+		pipe->layer.dst_rect.x = layer->dst_rect.x = dst_x;
+
+		if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
+			right_plist[right_cnt++] = pipe;
+		else
+			left_plist[left_cnt++] = pipe;
+
+		pr_debug("id:0x%x flags:0x%x dst_x:%d\n",
+			layer->pipe_ndx, layer->flags, layer->dst_rect.x);
+		layer->z_order -= MDSS_MDP_STAGE_0;
+	}
+
+	ret = mdss_mdp_perf_bw_check(mdp5_data->ctl, left_plist, left_cnt,
+		right_plist, right_cnt);
+	if (ret) {
+		pr_err("bw validation check failed: %d\n", ret);
+		goto validate_exit;
+	}
+
+validate_skip:
+	__handle_free_list(mdp5_data, validate_info_list, layer_count);
+
+	ret = __validate_secure_display(mdp5_data);
+
+validate_exit:
+	pr_debug("err=%d total_layer:%d left:%d right:%d rec0_rel_ndx=0x%x rec1_rel_ndx=0x%x rec0_destroy_ndx=0x%x rec1_destroy_ndx=0x%x processed=%d\n",
+		ret, layer_count, left_lm_layers, right_lm_layers,
+		rec_release_ndx[0], rec_release_ndx[1],
+		rec_destroy_ndx[0], rec_destroy_ndx[1], i);
+	MDSS_XLOG(rec_ndx[0], rec_ndx[1], layer_count,
+			left_lm_layers, right_lm_layers,
+			rec_release_ndx[0], rec_release_ndx[1],
+			rec_destroy_ndx[0], rec_destroy_ndx[1], ret);
+	mutex_lock(&mdp5_data->list_lock);
+	list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
+		if (IS_ERR_VALUE(ret)) {
+			if (((pipe->ndx & rec_release_ndx[0]) &&
+						(pipe->multirect.num == 0)) ||
+					((pipe->ndx & rec_release_ndx[1]) &&
+					 (pipe->multirect.num == 1))) {
+				mdss_mdp_smp_unreserve(pipe);
+				pipe->params_changed = 0;
+				pipe->dirty = true;
+				if (!list_empty(&pipe->list))
+					list_del_init(&pipe->list);
+				mdss_mdp_pipe_destroy(pipe);
+			} else if (((pipe->ndx & rec_destroy_ndx[0]) &&
+						(pipe->multirect.num == 0)) ||
+					((pipe->ndx & rec_destroy_ndx[1]) &&
+					 (pipe->multirect.num == 1))) {
+				/*
+				 * cleanup/destroy list pipes should move back
+				 * to destroy list. Next/current kickoff cycle
+				 * will release the pipe because validate also
+				 * acquires ov_lock.
+				 */
+				list_move(&pipe->list,
+					&mdp5_data->pipes_destroy);
+			}
+		} else {
+			pipe->file = file;
+			pr_debug("file pointer attached with pipe is %pK\n",
+				file);
+		}
+	}
+	mutex_unlock(&mdp5_data->list_lock);
+end:
+	kfree(validate_info_list);
+	mutex_unlock(&mdp5_data->ov_lock);
+
+	pr_debug("fb%d validated layers =%d\n", mfd->index, i);
+
+	return ret;
+}
+
+/*
+ * __parse_frc_info() - parse frc info from userspace
+ * @mdp5_data: mdss data per FB device
+ * @input_frc: frc info from user space
+ *
+ * This function fills the FRC info of current device which will be used
+ * during following kickoff.
+ */
+static void __parse_frc_info(struct mdss_overlay_private *mdp5_data,
+	struct mdp_frc_info *input_frc)
+{
+	struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+	struct mdss_mdp_frc_fsm *frc_fsm = mdp5_data->frc_fsm;
+
+	if (input_frc->flags & MDP_VIDEO_FRC_ENABLE) {
+		struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+		if (!frc_fsm->enable) {
+			/* init frc_fsm when first entry */
+			mdss_mdp_frc_fsm_init_state(frc_fsm);
+			/* keep vsync on when FRC is enabled */
+			ctl->ops.add_vsync_handler(ctl,
+					&ctl->frc_vsync_handler);
+		}
+
+		frc_info->cur_frc.frame_cnt = input_frc->frame_cnt;
+		frc_info->cur_frc.timestamp = input_frc->timestamp;
+	} else if (frc_fsm->enable) {
+		/* remove vsync handler when FRC is disabled */
+		ctl->ops.remove_vsync_handler(ctl, &ctl->frc_vsync_handler);
+	}
+
+	frc_fsm->enable = input_frc->flags & MDP_VIDEO_FRC_ENABLE;
+
+	pr_debug("frc_enable=%d\n", frc_fsm->enable);
+}
+
+/*
+ * mdss_mdp_layer_pre_commit() - pre commit validation for input layers
+ * @mfd:	Framebuffer data structure for display
+ * @commit:	Commit version-1 structure for display
+ *
+ * This function checks if layers present in commit request are already
+ * validated or not. If there is mismatch in validate and commit layers
+ * then it validate all input layers again. On successful validation, it
+ * maps the input layer buffer and creates release/retire fences.
+ *
+ * This function is called from client context and can return the error.
+ */
+int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd,
+	struct file *file, struct mdp_layer_commit_v1 *commit)
+{
+	int ret, i;
+	int layer_count = commit->input_layer_cnt;
+	bool validate_failed = false;
+
+	struct mdss_mdp_pipe *pipe, *tmp;
+	struct mdp_input_layer *layer_list;
+	struct mdss_overlay_private *mdp5_data;
+	struct mdss_mdp_data *src_data[MDSS_MDP_MAX_SSPP];
+	struct mdss_mdp_validate_info_t *validate_info_list;
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (!mdp5_data || !mdp5_data->ctl)
+		return -EINVAL;
+
+	layer_list = commit->input_layers;
+
+	/* handle null commit */
+	if (!layer_count) {
+		__handle_free_list(mdp5_data, NULL, layer_count);
+		/* Check for secure state transition. */
+		return __validate_secure_display(mdp5_data);
+	}
+
+	validate_info_list = kcalloc(layer_count, sizeof(*validate_info_list),
+				     GFP_KERNEL);
+	if (!validate_info_list)
+		return -ENOMEM;
+
+	for (i = 0; i < layer_count; i++) {
+		if (!validate_info_list[i].layer) {
+			ret = __update_multirect_info(mfd, validate_info_list,
+						   layer_list, i, layer_count);
+			if (IS_ERR_VALUE(ret)) {
+				pr_err("error updating multirect config. ret=%d i=%d\n",
+					ret, i);
+				goto end;
+			}
+		}
+	}
+
+	for (i = 0; i < layer_count; i++) {
+		pipe =  __find_layer_in_validate_q(&validate_info_list[i],
+						   mdp5_data);
+		if (!pipe) {
+			validate_failed = true;
+			break;
+		}
+	}
+
+	if (validate_failed) {
+		ret = __validate_layers(mfd, file, commit);
+		if (ret) {
+			pr_err("__validate_layers failed. rc=%d\n", ret);
+			goto end;
+		}
+	} else {
+		/*
+		 * move unassigned pipes to cleanup list since commit
+		 * supports validate+commit operation.
+		 */
+		__handle_free_list(mdp5_data, validate_info_list, layer_count);
+	}
+
+	i = 0;
+
+	mutex_lock(&mdp5_data->list_lock);
+	list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
+		if (pipe->flags & MDP_SOLID_FILL) {
+			src_data[i] = NULL;
+			continue;
+		}
+		src_data[i] = __map_layer_buffer(mfd, pipe, validate_info_list,
+			layer_count);
+		if (IS_ERR_OR_NULL(src_data[i++])) {
+			i--;
+			mutex_unlock(&mdp5_data->list_lock);
+			ret =  PTR_ERR(src_data[i]);
+			goto map_err;
+		}
+	}
+	mutex_unlock(&mdp5_data->list_lock);
+
+	ret = mdss_mdp_overlay_start(mfd);
+	if (ret) {
+		pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
+		goto map_err;
+	}
+
+	if (commit->frc_info)
+		__parse_frc_info(mdp5_data, commit->frc_info);
+
+	ret = __handle_buffer_fences(mfd, commit, layer_list);
+
+map_err:
+	if (ret) {
+		mutex_lock(&mdp5_data->list_lock);
+		for (i--; i >= 0; i--)
+			if (src_data[i])
+				mdss_mdp_overlay_buf_free(mfd, src_data[i]);
+		mutex_unlock(&mdp5_data->list_lock);
+	}
+end:
+	kfree(validate_info_list);
+
+	return ret;
+}
+
+/*
+ * mdss_mdp_layer_atomic_validate() - validate input layers
+ * @mfd:	Framebuffer data structure for display
+ * @commit:	Commit version-1 structure for display
+ *
+ * This function validates only input layers received from client. It
+ * does perform any validation for mdp_output_layer defined for writeback
+ * display.
+ */
+int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
+	struct file *file, struct mdp_layer_commit_v1 *commit)
+{
+	struct mdss_overlay_private *mdp5_data;
+
+	if (!mfd || !commit) {
+		pr_err("invalid input params\n");
+		return -EINVAL;
+	}
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (!mdp5_data || !mdp5_data->ctl) {
+		pr_err("invalid input params\n");
+		return -ENODEV;
+	}
+
+	if (mdss_fb_is_power_off(mfd)) {
+		pr_err("display interface is in off state fb:%d\n",
+			mfd->index);
+		return -EPERM;
+	}
+
+	return __validate_layers(mfd, file, commit);
+}
+
+int mdss_mdp_layer_pre_commit_wfd(struct msm_fb_data_type *mfd,
+	struct file *file, struct mdp_layer_commit_v1 *commit)
+{
+	int rc, count;
+	struct mdss_overlay_private *mdp5_data;
+	struct mdss_mdp_wfd *wfd = NULL;
+	struct mdp_output_layer *output_layer = NULL;
+	struct mdss_mdp_wfd_data *data = NULL;
+	struct sync_fence *fence = NULL;
+	struct msm_sync_pt_data *sync_pt_data = NULL;
+
+	if (!mfd || !commit)
+		return -EINVAL;
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (!mdp5_data || !mdp5_data->ctl || !mdp5_data->wfd) {
+		pr_err("invalid wfd state\n");
+		return -ENODEV;
+	}
+
+	if (commit->output_layer) {
+		wfd = mdp5_data->wfd;
+		output_layer = commit->output_layer;
+
+		if (output_layer->buffer.plane_count > MAX_PLANES) {
+			pr_err("Output buffer plane_count exceeds MAX_PLANES limit:%d\n",
+					output_layer->buffer.plane_count);
+			return -EINVAL;
+		}
+
+		data = mdss_mdp_wfd_add_data(wfd, output_layer);
+		if (IS_ERR_OR_NULL(data))
+			return PTR_ERR(data);
+
+		if (output_layer->buffer.fence >= 0) {
+			fence = sync_fence_fdget(output_layer->buffer.fence);
+			if (!fence) {
+				pr_err("fail to get output buffer fence\n");
+				rc = -EINVAL;
+				goto fence_get_err;
+			}
+		}
+	} else {
+		wfd = mdp5_data->wfd;
+		if (!wfd->ctl || !wfd->ctl->wb) {
+			pr_err("wfd commit with null out layer and no validate\n");
+			return -EINVAL;
+		}
+	}
+
+	rc = mdss_mdp_layer_pre_commit(mfd, file, commit);
+	if (rc) {
+		pr_err("fail to import input layer buffers. rc=%d\n", rc);
+		goto input_layer_err;
+	}
+
+	if (fence) {
+		sync_pt_data = &mfd->mdp_sync_pt_data;
+		mutex_lock(&sync_pt_data->sync_mutex);
+		count = sync_pt_data->acq_fen_cnt;
+
+		if (count >= MDP_MAX_FENCE_FD) {
+			pr_err("Reached maximum possible value for fence count\n");
+			mutex_unlock(&sync_pt_data->sync_mutex);
+			rc = -EINVAL;
+			goto input_layer_err;
+		}
+
+		sync_pt_data->acq_fen[count] = fence;
+		sync_pt_data->acq_fen_cnt++;
+		mutex_unlock(&sync_pt_data->sync_mutex);
+	}
+	return rc;
+
+input_layer_err:
+	if (fence)
+		sync_fence_put(fence);
+fence_get_err:
+	if (data)
+		mdss_mdp_wfd_remove_data(wfd, data);
+	return rc;
+}
+
+int mdss_mdp_layer_atomic_validate_wfd(struct msm_fb_data_type *mfd,
+	struct file *file, struct mdp_layer_commit_v1 *commit)
+{
+	int rc = 0;
+	struct mdss_overlay_private *mdp5_data;
+	struct mdss_mdp_wfd *wfd;
+	struct mdp_output_layer *output_layer;
+
+	if (!mfd || !commit)
+		return -EINVAL;
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (!mdp5_data || !mdp5_data->ctl || !mdp5_data->wfd) {
+		pr_err("invalid wfd state\n");
+		return -ENODEV;
+	}
+
+	if (!commit->output_layer) {
+		pr_err("no output layer defined\n");
+		return -EINVAL;
+	}
+
+	wfd = mdp5_data->wfd;
+	output_layer = commit->output_layer;
+
+	rc = mdss_mdp_wfd_validate(wfd, output_layer);
+	if (rc) {
+		pr_err("fail to validate the output layer = %d\n", rc);
+		goto validate_failed;
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	rc = mdss_mdp_wfd_setup(wfd, output_layer);
+	if (rc) {
+		pr_err("fail to prepare wfd = %d\n", rc);
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+		goto validate_failed;
+	}
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+	rc = mdss_mdp_layer_atomic_validate(mfd, file, commit);
+	if (rc) {
+		pr_err("fail to validate the input layers = %d\n", rc);
+		goto validate_failed;
+	}
+
+validate_failed:
+	return rc;
+}
+
+int mdss_mdp_async_position_update(struct msm_fb_data_type *mfd,
+		struct mdp_position_update *update_pos)
+{
+	int i, rc = 0;
+	struct mdss_mdp_pipe *pipe = NULL;
+	struct mdp_async_layer *layer;
+	struct mdss_rect dst, src;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	u32 flush_bits = 0, inputndx = 0;
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	for (i = 0; i < update_pos->input_layer_cnt; i++) {
+		layer = &update_pos->input_layers[i];
+		mutex_lock(&mdp5_data->list_lock);
+		__find_pipe_in_list(&mdp5_data->pipes_used, layer->pipe_ndx,
+			&pipe, MDSS_MDP_PIPE_RECT0);
+		mutex_unlock(&mdp5_data->list_lock);
+		if (!pipe) {
+			pr_err("invalid pipe ndx=0x%x for async update\n",
+					layer->pipe_ndx);
+			rc = -ENODEV;
+			layer->error_code = rc;
+			goto done;
+		}
+
+		rc =  __async_update_position_check(mfd, pipe, &layer->src,
+				&layer->dst);
+		if (rc) {
+			layer->error_code = rc;
+			goto done;
+		}
+
+		src = (struct mdss_rect) {layer->src.x, layer->src.y,
+				pipe->src.w, pipe->src.h};
+		dst = (struct mdss_rect) {layer->dst.x, layer->dst.y,
+				pipe->src.w, pipe->src.h};
+
+		pr_debug("src:{%d,%d,%d,%d}, dst:{%d,%d,%d,%d}\n",
+				src.x, src.y, src.w, src.h,
+				dst.x, dst.y, dst.w, dst.h);
+
+		mdss_mdp_pipe_position_update(pipe, &src, &dst);
+
+		flush_bits |= mdss_mdp_get_pipe_flush_bits(pipe);
+		inputndx |= layer->pipe_ndx;
+	}
+	mdss_mdp_async_ctl_flush(mfd, flush_bits);
+
+done:
+	MDSS_XLOG(inputndx, update_pos->input_layer_cnt, flush_bits, rc);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	return rc;
+}
+
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
new file mode 100644
index 0000000..f69f5b3
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -0,0 +1,6898 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/msm_mdp.h>
+#include <linux/memblock.h>
+#include <linux/sort.h>
+#include <linux/sw_sync.h>
+#include <linux/kmemleak.h>
+#include <asm/div64.h>
+
+#include <soc/qcom/event_timer.h>
+#include <linux/msm-bus.h>
+#include "mdss.h"
+#include "mdss_debug.h"
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_smmu.h"
+#include "mdss_mdp_wfd.h"
+#include "mdss_dsi_clk.h"
+
+#define VSYNC_PERIOD 16
+#define BORDERFILL_NDX	0x0BF000BF
+#define CHECK_BOUNDS(offset, size, max_size) \
+	(((size) > (max_size)) || ((offset) > ((max_size) - (size))))
+
+#define IS_RIGHT_MIXER_OV(flags, dst_x, left_lm_w)	\
+	((flags & MDSS_MDP_RIGHT_MIXER) || (dst_x >= left_lm_w))
+
+#define BUF_POOL_SIZE 32
+
+#define DFPS_DATA_MAX_HFP 8192
+#define DFPS_DATA_MAX_HBP 8192
+#define DFPS_DATA_MAX_HPW 8192
+#define DFPS_DATA_MAX_FPS 0x7fffffff
+#define DFPS_DATA_MAX_CLK_RATE 250000
+
+static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd);
+static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd);
+static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd);
+static void __overlay_kickoff_requeue(struct msm_fb_data_type *mfd);
+static void __vsync_retire_signal(struct msm_fb_data_type *mfd, int val);
+static int __vsync_set_vsync_handler(struct msm_fb_data_type *mfd);
+static int mdss_mdp_update_panel_info(struct msm_fb_data_type *mfd,
+		int mode, int dest_ctrl);
+static int mdss_mdp_set_cfg(struct msm_fb_data_type *mfd,
+		struct mdp_set_cfg *cfg);
+
+static inline bool is_ov_right_blend(struct mdp_rect *left_blend,
+	struct mdp_rect *right_blend, u32 left_lm_w)
+{
+	return (((left_blend->x + left_blend->w) == right_blend->x)	&&
+		((left_blend->x + left_blend->w) != left_lm_w)		&&
+		(left_blend->x != right_blend->x)			&&
+		(left_blend->y == right_blend->y)			&&
+		(left_blend->h == right_blend->h));
+}
+
+/**
+ * __is_more_decimation_doable() -
+ * @pipe: pointer to pipe data structure
+ *
+ * if per pipe BW exceeds the limit and user
+ * has not requested decimation then return
+ * -E2BIG error back to user else try more
+ * decimation based on following table config.
+ *
+ * ----------------------------------------------------------
+ * error | split mode | src_split | v_deci |     action     |
+ * ------|------------|-----------|--------|----------------|
+ *       |            |           |   00   | return error   |
+ *       |            |  enabled  |--------|----------------|
+ *       |            |           |   >1   | more decmation |
+ *       |     yes    |-----------|--------|----------------|
+ *       |            |           |   00   | return error   |
+ *       |            | disabled  |--------|----------------|
+ *       |            |           |   >1   | return error   |
+ * E2BIG |------------|-----------|--------|----------------|
+ *       |            |           |   00   | return error   |
+ *       |            |  enabled  |--------|----------------|
+ *       |            |           |   >1   | more decmation |
+ *       |     no     |-----------|--------|----------------|
+ *       |            |           |   00   | return error   |
+ *       |            | disabled  |--------|----------------|
+ *       |            |           |   >1   | more decmation |
+ * ----------------------------------------------------------
+ */
+static inline bool __is_more_decimation_doable(struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_data_type *mdata = pipe->mixer_left->ctl->mdata;
+	struct msm_fb_data_type *mfd = pipe->mixer_left->ctl->mfd;
+
+	if (!mfd->split_mode && !pipe->vert_deci)
+		return false;
+	else if (mfd->split_mode && (!mdata->has_src_split ||
+	   (mdata->has_src_split && !pipe->vert_deci)))
+		return false;
+	else
+		return true;
+}
+
+static struct mdss_mdp_pipe *__overlay_find_pipe(
+		struct msm_fb_data_type *mfd, u32 ndx)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_pipe *tmp, *pipe = NULL;
+
+	mutex_lock(&mdp5_data->list_lock);
+	list_for_each_entry(tmp, &mdp5_data->pipes_used, list) {
+		if (tmp->ndx == ndx) {
+			pipe = tmp;
+			break;
+		}
+	}
+	mutex_unlock(&mdp5_data->list_lock);
+
+	return pipe;
+}
+
+static int mdss_mdp_overlay_get(struct msm_fb_data_type *mfd,
+				struct mdp_overlay *req)
+{
+	struct mdss_mdp_pipe *pipe;
+
+	pipe = __overlay_find_pipe(mfd, req->id);
+	if (!pipe) {
+		pr_err("invalid pipe ndx=%x\n", req->id);
+		return pipe ? PTR_ERR(pipe) : -ENODEV;
+	}
+
+	*req = pipe->req_data;
+
+	return 0;
+}
+
+static int mdss_mdp_ov_xres_check(struct msm_fb_data_type *mfd,
+	struct mdp_overlay *req)
+{
+	u32 xres = 0;
+	u32 left_lm_w = left_lm_w_from_mfd(mfd);
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+
+	if (IS_RIGHT_MIXER_OV(req->flags, req->dst_rect.x, left_lm_w)) {
+		if (mdata->has_src_split) {
+			xres = left_lm_w;
+
+			if (req->flags & MDSS_MDP_RIGHT_MIXER) {
+				pr_warn("invalid use of RIGHT_MIXER flag.\n");
+				/*
+				 * if chip-set is capable of source split then
+				 * all layers which are only on right LM should
+				 * have their x offset relative to left LM's
+				 * left-top or in other words relative to
+				 * panel width.
+				 * By modifying dst_x below, we are assuming
+				 * that client is running in legacy mode
+				 * chipset capable of source split.
+				 */
+				if (req->dst_rect.x < left_lm_w)
+					req->dst_rect.x += left_lm_w;
+
+				req->flags &= ~MDSS_MDP_RIGHT_MIXER;
+			}
+		} else if (req->dst_rect.x >= left_lm_w) {
+			/*
+			 * this is a step towards removing a reliance on
+			 * MDSS_MDP_RIGHT_MIXER flags. With the new src split
+			 * code, some clients of non-src-split chipsets have
+			 * stopped sending MDSS_MDP_RIGHT_MIXER flag and
+			 * modified their xres relative to full panel
+			 * dimensions. In such cases, we need to deduct left
+			 * layer mixer width before we program this HW.
+			 */
+			req->dst_rect.x -= left_lm_w;
+			req->flags |= MDSS_MDP_RIGHT_MIXER;
+		}
+
+		if (ctl->mixer_right) {
+			xres += ctl->mixer_right->width;
+		} else {
+			pr_err("ov cannot be placed on right mixer\n");
+			return -EPERM;
+		}
+	} else {
+		if (ctl->mixer_left) {
+			xres = ctl->mixer_left->width;
+		} else {
+			pr_err("ov cannot be placed on left mixer\n");
+			return -EPERM;
+		}
+
+		if (mdata->has_src_split && ctl->mixer_right)
+			xres += ctl->mixer_right->width;
+	}
+
+	if (CHECK_BOUNDS(req->dst_rect.x, req->dst_rect.w, xres)) {
+		pr_err("dst_xres is invalid. dst_x:%d, dst_w:%d, xres:%d\n",
+			req->dst_rect.x, req->dst_rect.w, xres);
+		return -EOVERFLOW;
+	}
+
+	return 0;
+}
+
+int mdss_mdp_overlay_req_check(struct msm_fb_data_type *mfd,
+			       struct mdp_overlay *req,
+			       struct mdss_mdp_format_params *fmt)
+{
+	u32 yres;
+	u32 min_src_size, min_dst_size;
+	int content_secure;
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+
+	yres = mfd->fbi->var.yres;
+
+	content_secure = (req->flags & MDP_SECURE_OVERLAY_SESSION);
+	if (!ctl->is_secure && content_secure &&
+				 (mfd->panel.type == WRITEBACK_PANEL)) {
+		pr_debug("return due to security concerns\n");
+		return -EPERM;
+	}
+	if (mdata->mdp_rev >= MDSS_MDP_HW_REV_102) {
+		min_src_size = fmt->is_yuv ? 2 : 1;
+		min_dst_size = 1;
+	} else {
+		min_src_size = fmt->is_yuv ? 10 : 5;
+		min_dst_size = 2;
+	}
+
+	if (req->z_order >= (mdata->max_target_zorder + MDSS_MDP_STAGE_0)) {
+		pr_err("zorder %d out of range\n", req->z_order);
+		return -ERANGE;
+	}
+
+	/*
+	 * Cursor overlays are only supported for targets
+	 * with dedicated cursors within VP
+	 */
+	if ((req->pipe_type == MDSS_MDP_PIPE_TYPE_CURSOR) &&
+		((req->z_order != HW_CURSOR_STAGE(mdata)) ||
+		 !mdata->ncursor_pipes ||
+		 (req->src_rect.w > mdata->max_cursor_size))) {
+		pr_err("Incorrect cursor overlay cursor_pipes=%d zorder=%d\n",
+			mdata->ncursor_pipes, req->z_order);
+		return -EINVAL;
+	}
+
+	if (req->src.width > MAX_IMG_WIDTH ||
+	    req->src.height > MAX_IMG_HEIGHT ||
+	    req->src_rect.w < min_src_size || req->src_rect.h < min_src_size ||
+	    CHECK_BOUNDS(req->src_rect.x, req->src_rect.w, req->src.width) ||
+	    CHECK_BOUNDS(req->src_rect.y, req->src_rect.h, req->src.height)) {
+		pr_err("invalid source image img wh=%dx%d rect=%d,%d,%d,%d\n",
+		       req->src.width, req->src.height,
+		       req->src_rect.x, req->src_rect.y,
+		       req->src_rect.w, req->src_rect.h);
+		return -EOVERFLOW;
+	}
+
+	if (req->dst_rect.w < min_dst_size || req->dst_rect.h < min_dst_size) {
+		pr_err("invalid destination resolution (%dx%d)",
+		       req->dst_rect.w, req->dst_rect.h);
+		return -EOVERFLOW;
+	}
+
+	if (req->horz_deci || req->vert_deci) {
+		if (!mdata->has_decimation) {
+			pr_err("No Decimation in MDP V=%x\n", mdata->mdp_rev);
+			return -EINVAL;
+		} else if ((req->horz_deci > MAX_DECIMATION) ||
+				(req->vert_deci > MAX_DECIMATION))  {
+			pr_err("Invalid decimation factors horz=%d vert=%d\n",
+					req->horz_deci, req->vert_deci);
+			return -EINVAL;
+		} else if (req->flags & MDP_BWC_EN) {
+			pr_err("Decimation can't be enabled with BWC\n");
+			return -EINVAL;
+		} else if (fmt->fetch_mode != MDSS_MDP_FETCH_LINEAR) {
+			pr_err("Decimation can't be enabled with MacroTile format\n");
+			return -EINVAL;
+		}
+	}
+
+	if (!(req->flags & MDSS_MDP_ROT_ONLY)) {
+		u32 src_w, src_h, dst_w, dst_h;
+
+		if (CHECK_BOUNDS(req->dst_rect.y, req->dst_rect.h, yres)) {
+			pr_err("invalid vertical destination: y=%d, h=%d\n",
+				req->dst_rect.y, req->dst_rect.h);
+			return -EOVERFLOW;
+		}
+
+		if (req->flags & MDP_ROT_90) {
+			dst_h = req->dst_rect.w;
+			dst_w = req->dst_rect.h;
+		} else {
+			dst_w = req->dst_rect.w;
+			dst_h = req->dst_rect.h;
+		}
+
+		src_w = DECIMATED_DIMENSION(req->src_rect.w, req->horz_deci);
+		src_h = DECIMATED_DIMENSION(req->src_rect.h, req->vert_deci);
+
+		if (src_w > mdata->max_pipe_width) {
+			pr_err("invalid source width=%d HDec=%d\n",
+					req->src_rect.w, req->horz_deci);
+			return -EINVAL;
+		}
+
+		if ((src_w * MAX_UPSCALE_RATIO) < dst_w) {
+			pr_err("too much upscaling Width %d->%d\n",
+			       req->src_rect.w, req->dst_rect.w);
+			return -EINVAL;
+		}
+
+		if ((src_h * MAX_UPSCALE_RATIO) < dst_h) {
+			pr_err("too much upscaling. Height %d->%d\n",
+			       req->src_rect.h, req->dst_rect.h);
+			return -EINVAL;
+		}
+
+		if (src_w > (dst_w * MAX_DOWNSCALE_RATIO)) {
+			pr_err("too much downscaling. Width %d->%d H Dec=%d\n",
+			       src_w, req->dst_rect.w, req->horz_deci);
+			return -EINVAL;
+		}
+
+		if (src_h > (dst_h * MAX_DOWNSCALE_RATIO)) {
+			pr_err("too much downscaling. Height %d->%d V Dec=%d\n",
+			       src_h, req->dst_rect.h, req->vert_deci);
+			return -EINVAL;
+		}
+
+		if (req->flags & MDP_BWC_EN) {
+			if ((req->src.width != req->src_rect.w) ||
+			    (req->src.height != req->src_rect.h)) {
+				pr_err("BWC: mismatch of src img=%dx%d rect=%dx%d\n",
+					req->src.width, req->src.height,
+					req->src_rect.w, req->src_rect.h);
+				return -EINVAL;
+			}
+
+			if ((req->flags & MDP_DECIMATION_EN) ||
+					req->vert_deci || req->horz_deci) {
+				pr_err("Can't enable BWC and decimation\n");
+				return -EINVAL;
+			}
+		}
+
+		if ((req->flags & MDP_DEINTERLACE) &&
+					!req->scale.enable_pxl_ext) {
+			if (req->flags & MDP_SOURCE_ROTATED_90) {
+				if ((req->src_rect.w % 4) != 0) {
+					pr_err("interlaced rect not h/4\n");
+					return -EINVAL;
+				}
+			} else if ((req->src_rect.h % 4) != 0) {
+				pr_err("interlaced rect not h/4\n");
+				return -EINVAL;
+			}
+		}
+	} else {
+		if (req->flags & MDP_DEINTERLACE) {
+			if ((req->src_rect.h % 4) != 0) {
+				pr_err("interlaced rect h not multiple of 4\n");
+				return -EINVAL;
+			}
+		}
+	}
+
+	if (fmt->is_yuv) {
+		if ((req->src_rect.x & 0x1) || (req->src_rect.y & 0x1) ||
+		    (req->src_rect.w & 0x1) || (req->src_rect.h & 0x1)) {
+			pr_err("invalid odd src resolution or coordinates\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int mdp_pipe_tune_perf(struct mdss_mdp_pipe *pipe,
+	u32 flags)
+{
+	struct mdss_data_type *mdata = pipe->mixer_left->ctl->mdata;
+	struct mdss_mdp_perf_params perf;
+	int rc;
+
+	memset(&perf, 0, sizeof(perf));
+
+	flags |= PERF_CALC_PIPE_APPLY_CLK_FUDGE |
+		PERF_CALC_PIPE_CALC_SMP_SIZE;
+
+	for (;;) {
+		rc = mdss_mdp_perf_calc_pipe(pipe, &perf, NULL,
+			flags);
+
+		if (!rc && (perf.mdp_clk_rate <= mdata->max_mdp_clk_rate)) {
+			rc = mdss_mdp_perf_bw_check_pipe(&perf, pipe);
+			if (!rc) {
+				break;
+			} else if (rc == -E2BIG &&
+				   !__is_more_decimation_doable(pipe)) {
+				pr_debug("pipe%d exceeded per pipe BW\n",
+					pipe->num);
+				return rc;
+			}
+		}
+
+		/*
+		 * if decimation is available try to reduce minimum clock rate
+		 * requirement by applying vertical decimation and reduce
+		 * mdp clock requirement
+		 */
+		if (mdata->has_decimation && (pipe->vert_deci < MAX_DECIMATION)
+			&& !pipe->bwc_mode && !pipe->scaler.enable &&
+			mdss_mdp_is_linear_format(pipe->src_fmt))
+			pipe->vert_deci++;
+		else
+			return -E2BIG;
+	}
+
+	return 0;
+}
+
+static int __mdss_mdp_validate_pxl_extn(struct mdss_mdp_pipe *pipe)
+{
+	int plane;
+
+	for (plane = 0; plane < MAX_PLANES; plane++) {
+		u32 hor_req_pixels, hor_fetch_pixels;
+		u32 hor_ov_fetch, vert_ov_fetch;
+		u32 vert_req_pixels, vert_fetch_pixels;
+		u32 src_w = DECIMATED_DIMENSION(pipe->src.w, pipe->horz_deci);
+		u32 src_h = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
+
+		/*
+		 * plane 1 and 2 are for chroma and are same. While configuring
+		 * HW, programming only one of the chroma components is
+		 * sufficient.
+		 */
+		if (plane == 2)
+			continue;
+
+		/*
+		 * For chroma plane, width is half for the following sub sampled
+		 * formats. Except in case of decimation, where hardware avoids
+		 * 1 line of decimation instead of downsampling.
+		 */
+		if (plane == 1 && !pipe->horz_deci &&
+		    ((pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420) ||
+		     (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_H2V1))) {
+			src_w >>= 1;
+		}
+
+		if (plane == 1 && !pipe->vert_deci &&
+		    ((pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420) ||
+		     (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_H1V2)))
+			src_h >>= 1;
+
+		hor_req_pixels = pipe->scaler.roi_w[plane] +
+			pipe->scaler.num_ext_pxls_left[plane] +
+			pipe->scaler.num_ext_pxls_right[plane];
+
+		hor_fetch_pixels = src_w +
+			(pipe->scaler.left_ftch[plane] >> pipe->horz_deci) +
+			pipe->scaler.left_rpt[plane] +
+			(pipe->scaler.right_ftch[plane] >> pipe->horz_deci) +
+			pipe->scaler.right_rpt[plane];
+
+		hor_ov_fetch = src_w +
+			(pipe->scaler.left_ftch[plane] >> pipe->horz_deci)+
+			(pipe->scaler.right_ftch[plane] >> pipe->horz_deci);
+
+		vert_req_pixels = pipe->scaler.num_ext_pxls_top[plane] +
+			pipe->scaler.num_ext_pxls_btm[plane];
+
+		vert_fetch_pixels =
+			(pipe->scaler.top_ftch[plane] >> pipe->vert_deci) +
+			pipe->scaler.top_rpt[plane] +
+			(pipe->scaler.btm_ftch[plane] >> pipe->vert_deci)+
+			pipe->scaler.btm_rpt[plane];
+
+		vert_ov_fetch = src_h +
+			(pipe->scaler.top_ftch[plane] >> pipe->vert_deci)+
+			(pipe->scaler.btm_ftch[plane] >> pipe->vert_deci);
+
+		if ((hor_req_pixels != hor_fetch_pixels) ||
+			(hor_ov_fetch > pipe->img_width) ||
+			(vert_req_pixels != vert_fetch_pixels) ||
+			(vert_ov_fetch > pipe->img_height)) {
+			pr_err("err: plane=%d h_req:%d h_fetch:%d v_req:%d v_fetch:%d\n",
+					plane,
+					hor_req_pixels, hor_fetch_pixels,
+					vert_req_pixels, vert_fetch_pixels);
+			pr_err("roi_w[%d]=%d, src_img:[%d, %d]\n",
+					plane, pipe->scaler.roi_w[plane],
+					pipe->img_width, pipe->img_height);
+			pipe->scaler.enable = 0;
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int mdss_mdp_overlay_setup_scaling(struct mdss_mdp_pipe *pipe)
+{
+	u32 src;
+	int rc = 0;
+	struct mdss_data_type *mdata;
+
+	mdata = mdss_mdp_get_mdata();
+	if (pipe->scaler.enable) {
+		if (!test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
+			rc = __mdss_mdp_validate_pxl_extn(pipe);
+		return rc;
+	}
+
+	memset(&pipe->scaler, 0, sizeof(struct mdp_scale_data_v2));
+	src = DECIMATED_DIMENSION(pipe->src.w, pipe->horz_deci);
+	rc = mdss_mdp_calc_phase_step(src, pipe->dst.w,
+			&pipe->scaler.phase_step_x[0]);
+	if (rc == -EOVERFLOW) {
+		/* overflow on horizontal direction is acceptable */
+		rc = 0;
+	} else if (rc) {
+		pr_err("Horizontal scaling calculation failed=%d! %d->%d\n",
+				rc, src, pipe->dst.w);
+		return rc;
+	}
+
+	src = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
+	rc = mdss_mdp_calc_phase_step(src, pipe->dst.h,
+			&pipe->scaler.phase_step_y[0]);
+
+	if ((rc == -EOVERFLOW) && (pipe->type == MDSS_MDP_PIPE_TYPE_VIG)) {
+		/* overflow on Qseed2 scaler is acceptable */
+		rc = 0;
+	} else if (rc == -EOVERFLOW) {
+		/* overflow expected and should fallback to GPU */
+		rc = -ECANCELED;
+	} else if (rc) {
+		pr_err("Vertical scaling calculation failed=%d! %d->%d\n",
+				rc, src, pipe->dst.h);
+	}
+
+	if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
+		mdss_mdp_pipe_calc_qseed3_cfg(pipe);
+	else
+		mdss_mdp_pipe_calc_pixel_extn(pipe);
+
+	return rc;
+}
+
+inline void mdss_mdp_overlay_set_chroma_sample(
+	struct mdss_mdp_pipe *pipe)
+{
+	pipe->chroma_sample_v = pipe->chroma_sample_h = 0;
+
+	switch (pipe->src_fmt->chroma_sample) {
+	case MDSS_MDP_CHROMA_H1V2:
+		pipe->chroma_sample_v = 1;
+		break;
+	case MDSS_MDP_CHROMA_H2V1:
+		pipe->chroma_sample_h = 1;
+		break;
+	case MDSS_MDP_CHROMA_420:
+		pipe->chroma_sample_v = 1;
+		pipe->chroma_sample_h = 1;
+		break;
+	}
+	if (pipe->horz_deci)
+		pipe->chroma_sample_h = 0;
+	if (pipe->vert_deci)
+		pipe->chroma_sample_v = 0;
+}
+
+int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
+	struct mdp_overlay *req, struct mdss_mdp_pipe **ppipe,
+	struct mdss_mdp_pipe *left_blend_pipe, bool is_single_layer)
+{
+	struct mdss_mdp_format_params *fmt;
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_mdp_mixer *mixer = NULL;
+	u32 pipe_type, mixer_mux;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+	int ret;
+	u32 bwc_enabled;
+	u32 rot90;
+	bool is_vig_needed = false;
+	u32 left_lm_w = left_lm_w_from_mfd(mfd);
+	u32 flags = 0;
+
+	if (mdp5_data->ctl == NULL)
+		return -ENODEV;
+
+	if (req->flags & MDP_ROT_90) {
+		pr_err("unsupported inline rotation\n");
+		return -EOPNOTSUPP;
+	}
+
+	if ((req->dst_rect.w > mdata->max_mixer_width) ||
+		(req->dst_rect.h > MAX_DST_H)) {
+		pr_err("exceeded max mixer supported resolution %dx%d\n",
+				req->dst_rect.w, req->dst_rect.h);
+		return -EOVERFLOW;
+	}
+
+	if (IS_RIGHT_MIXER_OV(req->flags, req->dst_rect.x, left_lm_w))
+		mixer_mux = MDSS_MDP_MIXER_MUX_RIGHT;
+	else
+		mixer_mux = MDSS_MDP_MIXER_MUX_LEFT;
+
+	pr_debug("ctl=%u req id=%x mux=%d z_order=%d flags=0x%x dst_x:%d\n",
+		mdp5_data->ctl->num, req->id, mixer_mux, req->z_order,
+		req->flags, req->dst_rect.x);
+
+	fmt = mdss_mdp_get_format_params(req->src.format);
+	if (!fmt) {
+		pr_err("invalid pipe format %d\n", req->src.format);
+		return -EINVAL;
+	}
+
+	bwc_enabled = req->flags & MDP_BWC_EN;
+	rot90 = req->flags & MDP_SOURCE_ROTATED_90;
+
+	/*
+	 * Always set yuv rotator output to pseudo planar.
+	 */
+	if (bwc_enabled || rot90) {
+		req->src.format =
+			mdss_mdp_get_rotator_dst_format(req->src.format, rot90,
+				bwc_enabled);
+		fmt = mdss_mdp_get_format_params(req->src.format);
+		if (!fmt) {
+			pr_err("invalid pipe format %d\n", req->src.format);
+			return -EINVAL;
+		}
+	}
+
+	ret = mdss_mdp_ov_xres_check(mfd, req);
+	if (ret)
+		return ret;
+
+	ret = mdss_mdp_overlay_req_check(mfd, req, fmt);
+	if (ret)
+		return ret;
+
+	mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
+	if (!mixer) {
+		pr_err("unable to get mixer\n");
+		return -ENODEV;
+	}
+
+	if ((mdata->has_non_scalar_rgb) &&
+		((req->src_rect.w != req->dst_rect.w) ||
+			(req->src_rect.h != req->dst_rect.h)))
+		is_vig_needed = true;
+
+	if (req->id == MSMFB_NEW_REQUEST) {
+		switch (req->pipe_type) {
+		case PIPE_TYPE_VIG:
+			pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
+			break;
+		case PIPE_TYPE_RGB:
+			pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
+			break;
+		case PIPE_TYPE_DMA:
+			pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
+			break;
+		case PIPE_TYPE_CURSOR:
+			pipe_type = MDSS_MDP_PIPE_TYPE_CURSOR;
+			break;
+		case PIPE_TYPE_AUTO:
+		default:
+			if (req->flags & MDP_OV_PIPE_FORCE_DMA)
+				pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
+			else if (fmt->is_yuv ||
+				(req->flags & MDP_OV_PIPE_SHARE) ||
+				is_vig_needed)
+				pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
+			else
+				pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
+			break;
+		}
+
+		pipe = mdss_mdp_pipe_alloc(mixer, pipe_type, left_blend_pipe);
+
+		/* RGB pipes can be used instead of DMA */
+		if (IS_ERR_OR_NULL(pipe) &&
+		    (req->pipe_type == PIPE_TYPE_AUTO) &&
+		    (pipe_type == MDSS_MDP_PIPE_TYPE_DMA)) {
+			pr_debug("giving RGB pipe for fb%d. flags:0x%x\n",
+				mfd->index, req->flags);
+			pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
+			pipe = mdss_mdp_pipe_alloc(mixer, pipe_type,
+				left_blend_pipe);
+		}
+
+		/* VIG pipes can also support RGB format */
+		if (IS_ERR_OR_NULL(pipe) &&
+		    (req->pipe_type == PIPE_TYPE_AUTO) &&
+		    (pipe_type == MDSS_MDP_PIPE_TYPE_RGB)) {
+			pr_debug("giving ViG pipe for fb%d. flags:0x%x\n",
+				mfd->index, req->flags);
+			pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
+			pipe = mdss_mdp_pipe_alloc(mixer, pipe_type,
+				left_blend_pipe);
+		}
+
+		if (IS_ERR(pipe)) {
+			return PTR_ERR(pipe);
+		} else if (!pipe) {
+			pr_err("error allocating pipe. flags=0x%x req->pipe_type=%d pipe_type=%d\n",
+				req->flags, req->pipe_type, pipe_type);
+			return -ENODEV;
+		}
+
+		ret = mdss_mdp_pipe_map(pipe);
+		if (ret) {
+			pr_err("unable to map pipe=%d\n", pipe->num);
+			return ret;
+		}
+
+		mutex_lock(&mdp5_data->list_lock);
+		list_add(&pipe->list, &mdp5_data->pipes_used);
+		mutex_unlock(&mdp5_data->list_lock);
+		pipe->mixer_left = mixer;
+		pipe->mfd = mfd;
+		pipe->play_cnt = 0;
+	} else {
+		pipe = __overlay_find_pipe(mfd, req->id);
+		if (!pipe) {
+			pr_err("invalid pipe ndx=%x\n", req->id);
+			return -ENODEV;
+		}
+
+		ret = mdss_mdp_pipe_map(pipe);
+		if (IS_ERR_VALUE(ret)) {
+			pr_err("Unable to map used pipe%d ndx=%x\n",
+					pipe->num, pipe->ndx);
+			return ret;
+		}
+
+		if (is_vig_needed && (pipe->type != MDSS_MDP_PIPE_TYPE_VIG)) {
+			pr_err("pipe is non-scalar ndx=%x\n", req->id);
+			ret = -EINVAL;
+			goto exit_fail;
+		}
+
+		if ((pipe->mixer_left != mixer) &&
+				(pipe->type != MDSS_MDP_PIPE_TYPE_CURSOR)) {
+			if (!mixer->ctl || (mixer->ctl->mfd != mfd)) {
+				pr_err("Can't switch mixer %d->%d pnum %d!\n",
+					pipe->mixer_left->num, mixer->num,
+						pipe->num);
+				ret = -EINVAL;
+				goto exit_fail;
+			}
+			pr_debug("switching pipe%d mixer %d->%d stage%d\n",
+				pipe->num,
+				pipe->mixer_left ? pipe->mixer_left->num : -1,
+				mixer->num, req->z_order);
+			mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+			pipe->mixer_left = mixer;
+		}
+	}
+
+	if (left_blend_pipe) {
+		if (pipe->priority <= left_blend_pipe->priority) {
+			pr_err("priority limitation. left:%d right%d\n",
+				left_blend_pipe->priority, pipe->priority);
+			ret = -EBADSLT;
+			goto exit_fail;
+		} else {
+			pr_debug("pipe%d is a right_pipe\n", pipe->num);
+			pipe->is_right_blend = true;
+		}
+	} else if (pipe->is_right_blend) {
+		/*
+		 * pipe used to be right blend need to update mixer
+		 * configuration to remove it as a right blend
+		 */
+		mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+		mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
+		pipe->is_right_blend = false;
+	}
+
+	if (mfd->panel_orientation)
+		req->flags ^= mfd->panel_orientation;
+
+	req->priority = pipe->priority;
+	if (!pipe->dirty && !memcmp(req, &pipe->req_data, sizeof(*req))) {
+		pr_debug("skipping pipe_reconfiguration\n");
+		goto skip_reconfigure;
+	}
+
+	pipe->flags = req->flags;
+	if (bwc_enabled  &&  !mdp5_data->mdata->has_bwc) {
+		pr_err("BWC is not supported in MDP version %x\n",
+			mdp5_data->mdata->mdp_rev);
+		pipe->bwc_mode = 0;
+	} else {
+		pipe->bwc_mode = pipe->mixer_left->rotator_mode ?
+			0 : (bwc_enabled ? 1 : 0);
+	}
+	pipe->img_width = req->src.width & 0x3fff;
+	pipe->img_height = req->src.height & 0x3fff;
+	pipe->src.x = req->src_rect.x;
+	pipe->src.y = req->src_rect.y;
+	pipe->src.w = req->src_rect.w;
+	pipe->src.h = req->src_rect.h;
+	pipe->dst.x = req->dst_rect.x;
+	pipe->dst.y = req->dst_rect.y;
+	pipe->dst.w = req->dst_rect.w;
+	pipe->dst.h = req->dst_rect.h;
+
+	if (mixer->ctl) {
+		pipe->dst.x += mixer->ctl->border_x_off;
+		pipe->dst.y += mixer->ctl->border_y_off;
+	}
+
+	if (mfd->panel_orientation & MDP_FLIP_LR)
+		pipe->dst.x = pipe->mixer_left->width
+			- pipe->dst.x - pipe->dst.w;
+	if (mfd->panel_orientation & MDP_FLIP_UD)
+		pipe->dst.y = pipe->mixer_left->height
+			- pipe->dst.y - pipe->dst.h;
+
+	pipe->horz_deci = req->horz_deci;
+	pipe->vert_deci = req->vert_deci;
+
+	/*
+	 * check if overlay span across two mixers and if source split is
+	 * available. If yes, enable src_split_req flag so that during mixer
+	 * staging, same pipe will be stagged on both layer mixers.
+	 */
+	if (mdata->has_src_split) {
+		if ((pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR) &&
+				is_split_lm(mfd)) {
+			pipe->src_split_req = true;
+		} else if ((mixer_mux == MDSS_MDP_MIXER_MUX_LEFT) &&
+		    ((req->dst_rect.x + req->dst_rect.w) > mixer->width)) {
+			if (req->dst_rect.x >= mixer->width) {
+				pr_err("%pS: err dst_x can't lie in right half",
+					__builtin_return_address(0));
+				pr_cont(" flags:0x%x dst x:%d w:%d lm_w:%d\n",
+					req->flags, req->dst_rect.x,
+					req->dst_rect.w, mixer->width);
+				ret = -EINVAL;
+				goto exit_fail;
+			} else {
+				pipe->src_split_req = true;
+			}
+		} else {
+			if (pipe->src_split_req) {
+				mdss_mdp_mixer_pipe_unstage(pipe,
+					pipe->mixer_right);
+				pipe->mixer_right = NULL;
+			}
+			pipe->src_split_req = false;
+		}
+	}
+
+	memcpy(&pipe->scaler, &req->scale, sizeof(struct mdp_scale_data));
+	pipe->src_fmt = fmt;
+	mdss_mdp_overlay_set_chroma_sample(pipe);
+
+	pipe->mixer_stage = req->z_order;
+	pipe->is_fg = req->is_fg;
+	pipe->alpha = req->alpha;
+	pipe->transp = req->transp_mask;
+	pipe->blend_op = req->blend_op;
+	if (pipe->blend_op == BLEND_OP_NOT_DEFINED)
+		pipe->blend_op = fmt->alpha_enable ?
+					BLEND_OP_PREMULTIPLIED :
+					BLEND_OP_OPAQUE;
+
+	if (!fmt->alpha_enable && (pipe->blend_op != BLEND_OP_OPAQUE))
+		pr_debug("Unintended blend_op %d on layer with no alpha plane\n",
+			pipe->blend_op);
+
+	if (fmt->is_yuv && !(pipe->flags & MDP_SOURCE_ROTATED_90) &&
+			!pipe->scaler.enable) {
+		pipe->overfetch_disable = OVERFETCH_DISABLE_BOTTOM;
+
+		if (!(pipe->flags & MDSS_MDP_DUAL_PIPE) ||
+		    IS_RIGHT_MIXER_OV(pipe->flags, pipe->dst.x, left_lm_w))
+			pipe->overfetch_disable |= OVERFETCH_DISABLE_RIGHT;
+		pr_debug("overfetch flags=%x\n", pipe->overfetch_disable);
+	} else {
+		pipe->overfetch_disable = 0;
+	}
+	pipe->bg_color = req->bg_color;
+
+	if (pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR)
+		goto cursor_done;
+
+	mdss_mdp_pipe_pp_clear(pipe);
+	if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
+		memcpy(&pipe->pp_cfg, &req->overlay_pp_cfg,
+					sizeof(struct mdp_overlay_pp_params));
+		ret = mdss_mdp_pp_sspp_config(pipe);
+		if (ret) {
+			pr_err("failed to configure pp params ret %d\n", ret);
+			goto exit_fail;
+		}
+	}
+
+	/*
+	 * Populate Color Space.
+	 */
+	if (pipe->src_fmt->is_yuv && (pipe->type == MDSS_MDP_PIPE_TYPE_VIG))
+		pipe->csc_coeff_set = req->color_space;
+	/*
+	 * When scaling is enabled src crop and image
+	 * width and height is modified by user
+	 */
+	if ((pipe->flags & MDP_DEINTERLACE) && !pipe->scaler.enable) {
+		if (pipe->flags & MDP_SOURCE_ROTATED_90) {
+			pipe->src.x = DIV_ROUND_UP(pipe->src.x, 2);
+			pipe->src.x &= ~1;
+			pipe->src.w /= 2;
+			pipe->img_width /= 2;
+		} else {
+			pipe->src.h /= 2;
+			pipe->src.y = DIV_ROUND_UP(pipe->src.y, 2);
+			pipe->src.y &= ~1;
+		}
+	}
+
+	if (is_single_layer)
+		flags |= PERF_CALC_PIPE_SINGLE_LAYER;
+
+	ret = mdp_pipe_tune_perf(pipe, flags);
+	if (ret) {
+		pr_debug("unable to satisfy performance. ret=%d\n", ret);
+		goto exit_fail;
+	}
+
+	ret = mdss_mdp_overlay_setup_scaling(pipe);
+	if (ret)
+		goto exit_fail;
+
+	if ((mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) &&
+		(mdp5_data->mdata->wfd_mode == MDSS_MDP_WFD_SHARED))
+		mdss_mdp_smp_release(pipe);
+
+	ret = mdss_mdp_smp_reserve(pipe);
+	if (ret) {
+		pr_debug("mdss_mdp_smp_reserve failed. pnum:%d ret=%d\n",
+			pipe->num, ret);
+		goto exit_fail;
+	}
+
+
+	req->id = pipe->ndx;
+
+cursor_done:
+	req->vert_deci = pipe->vert_deci;
+
+	pipe->req_data = *req;
+	pipe->dirty = false;
+
+	pipe->params_changed++;
+skip_reconfigure:
+	*ppipe = pipe;
+
+	mdss_mdp_pipe_unmap(pipe);
+
+	return ret;
+exit_fail:
+	mdss_mdp_pipe_unmap(pipe);
+
+	mutex_lock(&mdp5_data->list_lock);
+	if (pipe->play_cnt == 0) {
+		pr_debug("failed for pipe %d\n", pipe->num);
+		if (!list_empty(&pipe->list))
+			list_del_init(&pipe->list);
+		mdss_mdp_pipe_destroy(pipe);
+	}
+
+	/* invalidate any overlays in this framebuffer after failure */
+	list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
+		pr_debug("freeing allocations for pipe %d\n", pipe->num);
+		mdss_mdp_smp_unreserve(pipe);
+		pipe->params_changed = 0;
+		pipe->dirty = true;
+	}
+	mutex_unlock(&mdp5_data->list_lock);
+	return ret;
+}
+
+static int mdss_mdp_overlay_set(struct msm_fb_data_type *mfd,
+				struct mdp_overlay *req)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	int ret;
+
+	ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+	if (ret)
+		return ret;
+
+	if (mdss_fb_is_power_off(mfd)) {
+		mutex_unlock(&mdp5_data->ov_lock);
+		return -EPERM;
+	}
+
+	if (req->src.format == MDP_RGB_BORDERFILL) {
+		req->id = BORDERFILL_NDX;
+	} else {
+		struct mdss_mdp_pipe *pipe;
+
+		/* userspace zorder start with stage 0 */
+		req->z_order += MDSS_MDP_STAGE_0;
+
+		ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe, NULL, false);
+
+		req->z_order -= MDSS_MDP_STAGE_0;
+	}
+
+	mutex_unlock(&mdp5_data->ov_lock);
+
+	return ret;
+}
+
+/*
+ * it's caller responsibility to acquire mdp5_data->list_lock while calling
+ * this function
+ */
+struct mdss_mdp_data *mdss_mdp_overlay_buf_alloc(struct msm_fb_data_type *mfd,
+		struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_data *buf;
+	int i;
+
+	if (list_empty(&mdp5_data->bufs_pool)) {
+		pr_debug("allocating %u bufs for fb%d\n",
+					BUF_POOL_SIZE, mfd->index);
+
+		buf = kcalloc(BUF_POOL_SIZE, sizeof(*buf), GFP_KERNEL);
+		if (!buf)
+			return NULL;
+
+		list_add(&buf->chunk_list, &mdp5_data->bufs_chunks);
+		kmemleak_not_leak(buf);
+
+		for (i = 0; i < BUF_POOL_SIZE; i++) {
+			buf->state = MDP_BUF_STATE_UNUSED;
+			list_add(&buf[i].buf_list, &mdp5_data->bufs_pool);
+		}
+	}
+
+	buf = list_first_entry(&mdp5_data->bufs_pool,
+			struct mdss_mdp_data, buf_list);
+	WARN_ON(buf->state != MDP_BUF_STATE_UNUSED);
+	buf->state = MDP_BUF_STATE_READY;
+	buf->last_alloc = local_clock();
+	buf->last_pipe = pipe;
+
+	list_move_tail(&buf->buf_list, &mdp5_data->bufs_used);
+	list_add_tail(&buf->pipe_list, &pipe->buf_queue);
+
+	pr_debug("buffer alloc: %pK\n", buf);
+
+	return buf;
+}
+
+static
+struct mdss_mdp_data *__mdp_overlay_buf_alloc(struct msm_fb_data_type *mfd,
+		struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_data *buf;
+
+	mutex_lock(&mdp5_data->list_lock);
+	buf = mdss_mdp_overlay_buf_alloc(mfd, pipe);
+	mutex_unlock(&mdp5_data->list_lock);
+
+	return buf;
+}
+
+static void mdss_mdp_overlay_buf_deinit(struct msm_fb_data_type *mfd)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_data *buf, *t;
+
+	pr_debug("performing cleanup of buffers pool on fb%d\n", mfd->index);
+
+	WARN_ON(!list_empty(&mdp5_data->bufs_used));
+
+	list_for_each_entry_safe(buf, t, &mdp5_data->bufs_pool, buf_list)
+		list_del(&buf->buf_list);
+
+	list_for_each_entry_safe(buf, t, &mdp5_data->bufs_chunks, chunk_list) {
+		list_del(&buf->chunk_list);
+		kfree(buf);
+	}
+}
+
+/*
+ * it's caller responsibility to acquire mdp5_data->list_lock while calling
+ * this function
+ */
+void mdss_mdp_overlay_buf_free(struct msm_fb_data_type *mfd,
+		struct mdss_mdp_data *buf)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (!list_empty(&buf->pipe_list))
+		list_del_init(&buf->pipe_list);
+
+	mdss_mdp_data_free(buf, false, DMA_TO_DEVICE);
+
+	buf->last_freed = local_clock();
+	buf->state = MDP_BUF_STATE_UNUSED;
+
+	pr_debug("buffer freed: %pK\n", buf);
+
+	list_move_tail(&buf->buf_list, &mdp5_data->bufs_pool);
+}
+
+static void __mdp_overlay_buf_free(struct msm_fb_data_type *mfd,
+		struct mdss_mdp_data *buf)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	mutex_lock(&mdp5_data->list_lock);
+	mdss_mdp_overlay_buf_free(mfd, buf);
+	mutex_unlock(&mdp5_data->list_lock);
+}
+
+static inline void __pipe_buf_mark_cleanup(struct msm_fb_data_type *mfd,
+		struct mdss_mdp_data *buf)
+{
+	/* buffer still in bufs_used, marking it as cleanup will clean it up */
+	buf->state = MDP_BUF_STATE_CLEANUP;
+	list_del_init(&buf->pipe_list);
+}
+
+/**
+ * __mdss_mdp_overlay_free_list_purge() - clear free list of buffers
+ * @mfd:	Msm frame buffer data structure for the associated fb
+ *
+ * Frees memory and clears current list of buffers which are pending free
+ */
+static void __mdss_mdp_overlay_free_list_purge(struct msm_fb_data_type *mfd)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_data *buf, *t;
+
+	pr_debug("purging fb%d free list\n", mfd->index);
+
+	list_for_each_entry_safe(buf, t, &mdp5_data->bufs_freelist, buf_list)
+		mdss_mdp_overlay_buf_free(mfd, buf);
+}
+
+static void __overlay_pipe_cleanup(struct msm_fb_data_type *mfd,
+		struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_data *buf, *tmpbuf;
+
+	list_for_each_entry_safe(buf, tmpbuf, &pipe->buf_queue, pipe_list) {
+		__pipe_buf_mark_cleanup(mfd, buf);
+		list_move(&buf->buf_list, &mdp5_data->bufs_freelist);
+
+		/*
+		 * in case of secure UI, the buffer needs to be released as
+		 * soon as session is closed.
+		 */
+		if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)
+			mdss_mdp_overlay_buf_free(mfd, buf);
+	}
+
+	mdss_mdp_pipe_destroy(pipe);
+}
+
+/**
+ * mdss_mdp_overlay_cleanup() - handles cleanup after frame commit
+ * @mfd:           Msm frame buffer data structure for the associated fb
+ * @destroy_pipes: list of pipes that should be destroyed as part of cleanup
+ *
+ * Goes through destroy_pipes list and ensures they are ready to be destroyed
+ * and cleaned up. Also cleanup of any pipe buffers after flip.
+ */
+static void mdss_mdp_overlay_cleanup(struct msm_fb_data_type *mfd,
+		struct list_head *destroy_pipes)
+{
+	struct mdss_mdp_pipe *pipe, *tmp;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	bool recovery_mode = false;
+	bool skip_fetch_halt, pair_found;
+	struct mdss_mdp_data *buf, *tmpbuf;
+
+	mutex_lock(&mdp5_data->list_lock);
+	list_for_each_entry(pipe, destroy_pipes, list) {
+		pair_found = false;
+		skip_fetch_halt = false;
+		tmp = pipe;
+
+		/*
+		 * Find if second rect is in the destroy list from the current
+		 * position. So if both rects are part of the destroy list then
+		 * fetch halt will be skipped for the 1st rect.
+		 */
+		list_for_each_entry_from(tmp, destroy_pipes, list) {
+			if (tmp->num == pipe->num) {
+				pair_found = true;
+				break;
+			}
+		}
+
+		/* skip fetch halt if pipe's other rect is still in use */
+		if (!pair_found) {
+			tmp = (struct mdss_mdp_pipe *)pipe->multirect.next;
+			if (tmp)
+				skip_fetch_halt =
+					atomic_read(&tmp->kref.refcount);
+		}
+
+		/* make sure pipe fetch has been halted before freeing buffer */
+		if (!skip_fetch_halt && mdss_mdp_pipe_fetch_halt(pipe, false)) {
+			/*
+			 * if pipe is not able to halt. Enter recovery mode,
+			 * by un-staging any pipes that are attached to mixer
+			 * so that any freed pipes that are not able to halt
+			 * can be staged in solid fill mode and be reset
+			 * with next vsync
+			 */
+			if (!recovery_mode) {
+				recovery_mode = true;
+				mdss_mdp_mixer_unstage_all(ctl->mixer_left);
+				mdss_mdp_mixer_unstage_all(ctl->mixer_right);
+			}
+			pipe->params_changed++;
+			pipe->unhalted = true;
+			mdss_mdp_pipe_queue_data(pipe, NULL);
+		}
+	}
+
+	if (recovery_mode) {
+		pr_warn("performing recovery sequence for fb%d\n", mfd->index);
+		__overlay_kickoff_requeue(mfd);
+	}
+
+	__mdss_mdp_overlay_free_list_purge(mfd);
+
+	list_for_each_entry_safe(buf, tmpbuf, &mdp5_data->bufs_used, buf_list) {
+		if (buf->state == MDP_BUF_STATE_CLEANUP)
+			list_move(&buf->buf_list, &mdp5_data->bufs_freelist);
+	}
+
+	list_for_each_entry_safe(pipe, tmp, destroy_pipes, list) {
+		list_del_init(&pipe->list);
+		if (recovery_mode) {
+			mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+			mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
+			pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
+		}
+		__overlay_pipe_cleanup(mfd, pipe);
+
+		if (pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
+			/*
+			 * track only RECT0, since at any given point there
+			 * can only be RECT0 only or RECT0 + RECT1
+			 */
+			ctl->mixer_left->next_pipe_map &= ~pipe->ndx;
+			if (ctl->mixer_right)
+				ctl->mixer_right->next_pipe_map &= ~pipe->ndx;
+		}
+	}
+	mutex_unlock(&mdp5_data->list_lock);
+}
+
+void mdss_mdp_handoff_cleanup_pipes(struct msm_fb_data_type *mfd,
+	u32 type)
+{
+	u32 i, npipes;
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+
+	switch (type) {
+	case MDSS_MDP_PIPE_TYPE_VIG:
+		pipe = mdata->vig_pipes;
+		npipes = mdata->nvig_pipes;
+		break;
+	case MDSS_MDP_PIPE_TYPE_RGB:
+		pipe = mdata->rgb_pipes;
+		npipes = mdata->nrgb_pipes;
+		break;
+	case MDSS_MDP_PIPE_TYPE_DMA:
+		pipe = mdata->dma_pipes;
+		npipes = mdata->ndma_pipes;
+		break;
+	default:
+		return;
+	}
+
+	for (i = 0; i < npipes; i++) {
+		/* only check for first rect and ignore additional */
+		if (pipe->is_handed_off) {
+			pr_debug("Unmapping handed off pipe %d\n", pipe->num);
+			list_move(&pipe->list, &mdp5_data->pipes_cleanup);
+			mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+			pipe->is_handed_off = false;
+		}
+		pipe += pipe->multirect.max_rects;
+	}
+}
+
+/**
+ * mdss_mdp_overlay_start() - Programs the MDP control data path to hardware
+ * @mfd: Msm frame buffer structure associated with fb device.
+ *
+ * Program the MDP hardware with the control settings for the framebuffer
+ * device. In addition to this, this function also handles the transition
+ * from the the splash screen to the android boot animation when the
+ * continuous splash screen feature is enabled.
+ */
+int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd)
+{
+	int rc;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+
+	if (mdss_mdp_ctl_is_power_on(ctl)) {
+		if (!mdp5_data->mdata->batfet)
+			mdss_mdp_batfet_ctrl(mdp5_data->mdata, true);
+		mdss_mdp_release_splash_pipe(mfd);
+		return 0;
+	} else if (mfd->panel_info->cont_splash_enabled) {
+		if (mdp5_data->allow_kickoff) {
+			mdp5_data->allow_kickoff = false;
+		} else {
+			mutex_lock(&mdp5_data->list_lock);
+			rc = list_empty(&mdp5_data->pipes_used);
+			mutex_unlock(&mdp5_data->list_lock);
+			if (rc) {
+				pr_debug("empty kickoff on fb%d during cont splash\n",
+					mfd->index);
+				return -EPERM;
+			}
+		}
+	} else if (mdata->handoff_pending) {
+		pr_warn("fb%d: commit while splash handoff pending\n",
+				mfd->index);
+		return -EPERM;
+	}
+
+	pr_debug("starting fb%d overlay\n", mfd->index);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	/*
+	 * If idle pc feature is not enabled, then get a reference to the
+	 * runtime device which will be released when overlay is turned off
+	 */
+	if (!mdp5_data->mdata->idle_pc_enabled ||
+		(mfd->panel_info->type != MIPI_CMD_PANEL)) {
+		rc = pm_runtime_get_sync(&mfd->pdev->dev);
+		if (IS_ERR_VALUE(rc)) {
+			pr_err("unable to resume with pm_runtime_get_sync rc=%d\n",
+				rc);
+			goto end;
+		}
+	}
+
+	/*
+	 * We need to do hw init before any hw programming.
+	 * Also, hw init involves programming the VBIF registers which
+	 * should be done only after attaching IOMMU which in turn would call
+	 * in to TZ to restore security configs on the VBIF registers.
+	 * This is not needed when continuous splash screen is enabled since
+	 * we would have called in to TZ to restore security configs from LK.
+	 */
+	if (!mfd->panel_info->cont_splash_enabled) {
+		rc = mdss_iommu_ctrl(1);
+		if (IS_ERR_VALUE(rc)) {
+			pr_err("iommu attach failed rc=%d\n", rc);
+			goto end;
+		}
+		mdss_hw_init(mdss_res);
+		mdss_iommu_ctrl(0);
+	}
+
+	/*
+	 * Increment the overlay active count prior to calling ctl_start.
+	 * This is needed to ensure that if idle power collapse kicks in
+	 * right away, it would be handled correctly.
+	 */
+	atomic_inc(&mdp5_data->mdata->active_intf_cnt);
+	rc = mdss_mdp_ctl_start(ctl, false);
+	if (rc == 0) {
+		mdss_mdp_ctl_notifier_register(mdp5_data->ctl,
+				&mfd->mdp_sync_pt_data.notifier);
+	} else {
+		pr_err("mdp ctl start failed.\n");
+		goto ctl_error;
+	}
+
+	/* Restore any previously configured PP features by resetting the dirty
+	 * bits for enabled features. The dirty bits will be consumed during the
+	 * first display commit when the PP hardware blocks are updated
+	 */
+	rc = mdss_mdp_pp_resume(mfd);
+	if (rc && (rc != -EPERM) && (rc != -ENODEV))
+		pr_err("PP resume err %d\n", rc);
+
+	rc = mdss_mdp_splash_cleanup(mfd, true);
+	if (!rc)
+		goto end;
+
+ctl_error:
+	mdss_mdp_ctl_destroy(ctl);
+	atomic_dec(&mdp5_data->mdata->active_intf_cnt);
+	mdp5_data->ctl = NULL;
+end:
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	return rc;
+}
+
+static void mdss_mdp_overlay_update_pm(struct mdss_overlay_private *mdp5_data)
+{
+	ktime_t wakeup_time;
+
+	if (!mdp5_data->cpu_pm_hdl)
+		return;
+
+	if (mdss_mdp_display_wakeup_time(mdp5_data->ctl, &wakeup_time))
+		return;
+
+	activate_event_timer(mdp5_data->cpu_pm_hdl, wakeup_time);
+}
+
+static void __unstage_pipe_and_clean_buf(struct msm_fb_data_type *mfd,
+		struct mdss_mdp_pipe *pipe, struct mdss_mdp_data *buf)
+{
+
+	pr_debug("unstaging pipe:%d rect:%d buf:%d\n",
+			pipe->num, pipe->multirect.num, !buf);
+	MDSS_XLOG(pipe->num, pipe->multirect.num, !buf);
+	mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+	mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
+	pipe->dirty = true;
+
+	if (buf)
+		__pipe_buf_mark_cleanup(mfd, buf);
+}
+
+static int __overlay_queue_pipes(struct msm_fb_data_type *mfd)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	struct mdss_mdp_ctl *tmp;
+	int ret = 0;
+
+	list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
+		struct mdss_mdp_data *buf;
+
+		if (pipe->dirty) {
+			pr_err("fb%d: pipe %d dirty! skipping configuration\n",
+					mfd->index, pipe->num);
+			continue;
+		}
+
+		/*
+		 * When secure display is enabled, if there is a non secure
+		 * display pipe, skip that
+		 */
+		if (mdss_get_sd_client_cnt() &&
+			!(pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)) {
+			pr_warn("Non secure pipe during secure display: %u: %08X, skip\n",
+					pipe->num, pipe->flags);
+			continue;
+		}
+		/*
+		 * When external is connected and no dedicated wfd is present,
+		 * reprogram DMA pipe before kickoff to clear out any previous
+		 * block mode configuration.
+		 */
+		if ((pipe->type == MDSS_MDP_PIPE_TYPE_DMA) &&
+		    (ctl->shared_lock &&
+		    (ctl->mdata->wfd_mode == MDSS_MDP_WFD_SHARED))) {
+			if (ctl->mdata->mixer_switched) {
+				ret = mdss_mdp_overlay_pipe_setup(mfd,
+					&pipe->req_data, &pipe, NULL, false);
+				pr_debug("resetting DMA pipe for ctl=%d",
+					 ctl->num);
+			}
+			if (ret) {
+				pr_err("can't reset DMA pipe ret=%d ctl=%d\n",
+					ret, ctl->num);
+				return ret;
+			}
+
+			tmp = mdss_mdp_ctl_mixer_switch(ctl,
+					MDSS_MDP_WB_CTL_TYPE_LINE);
+			if (!tmp)
+				return -EINVAL;
+			pipe->mixer_left = mdss_mdp_mixer_get(tmp,
+					MDSS_MDP_MIXER_MUX_DEFAULT);
+		}
+
+		buf = list_first_entry_or_null(&pipe->buf_queue,
+				struct mdss_mdp_data, pipe_list);
+		if (buf) {
+			switch (buf->state) {
+			case MDP_BUF_STATE_READY:
+				pr_debug("pnum=%d buf=%pK first buffer ready\n",
+						pipe->num, buf);
+				break;
+			case MDP_BUF_STATE_ACTIVE:
+				if (list_is_last(&buf->pipe_list,
+						&pipe->buf_queue)) {
+					pr_debug("pnum=%d no buf update\n",
+							pipe->num);
+				} else {
+					struct mdss_mdp_data *tmp = buf;
+					/*
+					 * buffer flip, new buffer will
+					 * replace currently active one,
+					 * mark currently active for cleanup
+					 */
+					buf = list_next_entry(tmp, pipe_list);
+					__pipe_buf_mark_cleanup(mfd, tmp);
+				}
+				break;
+			default:
+				pr_err("invalid state of buf %pK=%d\n",
+						buf, buf->state);
+				WARN_ON(1);
+				break;
+			}
+		}
+
+		/* ensure pipes are reconfigured after power off/on */
+		if (ctl->play_cnt == 0)
+			pipe->params_changed++;
+
+		if (buf && (buf->state == MDP_BUF_STATE_READY)) {
+			buf->state = MDP_BUF_STATE_ACTIVE;
+			ret = mdss_mdp_data_map(buf, false, DMA_TO_DEVICE);
+		} else if (!pipe->params_changed &&
+			   !mdss_mdp_is_roi_changed(pipe->mfd)) {
+
+			/*
+			 * no update for the given pipe nor any change in the
+			 * ROI so skip pipe programming and continue with next.
+			 */
+			continue;
+		} else if (buf) {
+			WARN_ON(buf->state != MDP_BUF_STATE_ACTIVE);
+			pr_debug("requeueing active buffer on pnum=%d\n",
+					pipe->num);
+		} else if ((pipe->flags & MDP_SOLID_FILL) == 0) {
+			pr_warn("commit without buffer on pipe %d\n",
+				pipe->num);
+			ret = -EINVAL;
+		}
+		/*
+		 * if we reach here without errors and buf == NULL
+		 * then solid fill will be set
+		 */
+		if (!IS_ERR_VALUE(ret))
+			ret = mdss_mdp_pipe_queue_data(pipe, buf);
+
+		if (IS_ERR_VALUE(ret)) {
+			pr_warn("Unable to queue data for pnum=%d rect=%d\n",
+					pipe->num, pipe->multirect.num);
+
+			/*
+			 * If we fail for a multi-rect pipe, unstage both rects
+			 * so we don't leave the pipe configured in multi-rect
+			 * mode with only one rectangle staged.
+			 */
+			if (pipe->multirect.mode !=
+					MDSS_MDP_PIPE_MULTIRECT_NONE) {
+				struct mdss_mdp_pipe *next_pipe =
+					(struct mdss_mdp_pipe *)
+					pipe->multirect.next;
+
+				if (next_pipe) {
+					struct mdss_mdp_data *next_buf =
+						list_first_entry_or_null(
+							&next_pipe->buf_queue,
+							struct mdss_mdp_data,
+							pipe_list);
+
+					__unstage_pipe_and_clean_buf(mfd,
+							next_pipe, next_buf);
+				} else {
+					pr_warn("cannot find rect pnum=%d\n",
+							pipe->num);
+				}
+			}
+
+			__unstage_pipe_and_clean_buf(mfd, pipe, buf);
+		}
+	}
+
+	return 0;
+}
+
+static void __overlay_kickoff_requeue(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+
+	mdss_mdp_display_commit(ctl, NULL, NULL);
+	mdss_mdp_display_wait4comp(ctl);
+
+	/* unstage any recovery pipes and re-queue used pipes */
+	mdss_mdp_mixer_unstage_all(ctl->mixer_left);
+	mdss_mdp_mixer_unstage_all(ctl->mixer_right);
+
+	__overlay_queue_pipes(mfd);
+
+	mdss_mdp_display_commit(ctl, NULL,  NULL);
+	mdss_mdp_display_wait4comp(ctl);
+}
+
+static int mdss_mdp_commit_cb(enum mdp_commit_stage_type commit_stage,
+	void *data)
+{
+	int ret = 0;
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_ctl *ctl;
+
+	switch (commit_stage) {
+	case MDP_COMMIT_STAGE_SETUP_DONE:
+		ctl = mfd_to_ctl(mfd);
+		mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_CTX_DONE);
+		mdp5_data->kickoff_released = true;
+		mutex_unlock(&mdp5_data->ov_lock);
+		break;
+	case MDP_COMMIT_STAGE_READY_FOR_KICKOFF:
+		mutex_lock(&mdp5_data->ov_lock);
+		break;
+	default:
+		pr_err("Invalid commit stage %x", commit_stage);
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * __is_roi_valid() - Check if ctl roi is valid for a given pipe.
+ * @pipe: pipe to check against.
+ * @l_roi: roi of the left ctl path.
+ * @r_roi: roi of the right ctl path.
+ *
+ * Validate roi against pipe's destination rectangle by checking following
+ * conditions. If any of these conditions are met then return failure,
+ * success otherwise.
+ *
+ * 1. Pipe has scaling and pipe's destination is intersecting with roi.
+ * 2. Pipe's destination and roi do not overlap, In such cases, pipe should
+ *    not be part of used list and should have been omitted by user program.
+ */
+static bool __is_roi_valid(struct mdss_mdp_pipe *pipe,
+	struct mdss_rect *l_roi, struct mdss_rect *r_roi)
+{
+	bool ret = true;
+	bool is_right_mixer = pipe->mixer_left->is_right_mixer;
+	struct mdss_rect roi = is_right_mixer ? *r_roi : *l_roi;
+	struct mdss_rect dst = pipe->dst;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 left_lm_w = left_lm_w_from_mfd(pipe->mfd);
+
+	if (pipe->src_split_req) {
+		if (roi.w) {
+			/* left_roi is valid */
+			roi.w += r_roi->w;
+		} else {
+			/*
+			 * if we come here then left_roi is zero but pipe's
+			 * output is crossing LM boundary if it was Full Screen
+			 * update. In such case, if right ROI's (x+w) is less
+			 * than pipe's dst_x then #2 check will fail even
+			 * though in full coordinate system it is valid.
+			 * ex:
+			 *    left_lm_w = 800;
+			 *    pipe->dst.x = 400;
+			 *    pipe->dst.w = 800;
+			 *    r_roi.x + r_roi.w = 300;
+			 * To avoid such pitfall, extend ROI for comparison.
+			 */
+			roi.w += left_lm_w + r_roi->w;
+		}
+	}
+
+	if (mdata->has_src_split && is_right_mixer)
+		dst.x -= left_lm_w;
+
+	/* condition #1 above */
+	if ((pipe->scaler.enable) ||
+	    (pipe->src.w != dst.w) || (pipe->src.h != dst.h)) {
+		struct mdss_rect res;
+
+		mdss_mdp_intersect_rect(&res, &dst, &roi);
+
+		if (!mdss_rect_cmp(&res, &dst)) {
+			pr_err("error. pipe%d has scaling and its output is interesecting with roi.\n",
+				pipe->num);
+			pr_err("pipe_dst:-> %d %d %d %d roi:-> %d %d %d %d\n",
+				dst.x, dst.y, dst.w, dst.h,
+				roi.x, roi.y, roi.w, roi.h);
+			ret = false;
+			goto end;
+		}
+	}
+
+	/* condition #2 above */
+	if (!mdss_rect_overlap_check(&dst, &roi)) {
+		pr_err("error. pipe%d's output is outside of ROI.\n",
+			pipe->num);
+		ret = false;
+	}
+end:
+	return ret;
+}
+
+int mdss_mode_switch(struct msm_fb_data_type *mfd, u32 mode)
+{
+	struct mdss_rect l_roi, r_roi;
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_ctl *sctl;
+	int rc = 0;
+
+	pr_debug("fb%d switch to mode=%x\n", mfd->index, mode);
+	ATRACE_FUNC();
+
+	ctl->pending_mode_switch = mode;
+	sctl = mdss_mdp_get_split_ctl(ctl);
+	if (sctl)
+		sctl->pending_mode_switch = mode;
+
+	/* No need for mode validation. It has been done in ioctl call */
+	if (mode == SWITCH_RESOLUTION) {
+		if (ctl->ops.reconfigure) {
+			/* wait for previous frame to complete before switch */
+			if (ctl->ops.wait_pingpong)
+				rc = ctl->ops.wait_pingpong(ctl, NULL);
+			if (!rc && sctl && sctl->ops.wait_pingpong)
+				rc = sctl->ops.wait_pingpong(sctl, NULL);
+			if (rc) {
+				pr_err("wait for pp failed before resolution switch\n");
+				return rc;
+			}
+
+			/*
+			 * Configure the mixer parameters before the switch as
+			 * the DSC parameter calculation is based on the mixer
+			 * ROI. And set it to full ROI as driver expects the
+			 * first frame after the resolution switch to be a
+			 * full frame update.
+			 */
+			if (ctl->mixer_left) {
+				l_roi = (struct mdss_rect) {0, 0,
+					ctl->mixer_left->width,
+					ctl->mixer_left->height};
+				ctl->mixer_left->roi_changed = true;
+				ctl->mixer_left->valid_roi = true;
+			}
+			if (ctl->mixer_right) {
+				r_roi = (struct mdss_rect) {0, 0,
+					ctl->mixer_right->width,
+					ctl->mixer_right->height};
+				ctl->mixer_right->roi_changed = true;
+				ctl->mixer_right->valid_roi = true;
+			}
+			mdss_mdp_set_roi(ctl, &l_roi, &r_roi);
+
+			mutex_lock(&mdp5_data->ov_lock);
+			ctl->ops.reconfigure(ctl, mode, 1);
+			mutex_unlock(&mdp5_data->ov_lock);
+		/*
+		 * For Video mode panels, reconfigure is not defined.
+		 * So doing an explicit ctrl stop during resolution switch
+		 * to balance the ctrl start at the end of this function.
+		 */
+		} else {
+			mdss_mdp_ctl_stop(ctl, MDSS_PANEL_POWER_OFF);
+		}
+	} else if (mode == MIPI_CMD_PANEL) {
+		/*
+		 * Need to reset roi if there was partial update in previous
+		 * Command frame
+		 */
+		l_roi = (struct mdss_rect){0, 0,
+				ctl->mixer_left->width,
+				ctl->mixer_left->height};
+		if (ctl->mixer_right) {
+			r_roi = (struct mdss_rect) {0, 0,
+				ctl->mixer_right->width,
+				ctl->mixer_right->height};
+		}
+		mdss_mdp_set_roi(ctl, &l_roi, &r_roi);
+		mdss_mdp_switch_roi_reset(ctl);
+
+		mdss_mdp_switch_to_cmd_mode(ctl, 1);
+		mdss_mdp_update_panel_info(mfd, 1, 0);
+		mdss_mdp_switch_to_cmd_mode(ctl, 0);
+		mdss_mdp_ctl_stop(ctl, MDSS_PANEL_POWER_OFF);
+	} else if (mode == MIPI_VIDEO_PANEL) {
+		if (ctl->ops.wait_pingpong)
+			rc = ctl->ops.wait_pingpong(ctl, NULL);
+		mdss_mdp_update_panel_info(mfd, 0, 0);
+		mdss_mdp_switch_to_vid_mode(ctl, 1);
+		mdss_mdp_ctl_stop(ctl, MDSS_PANEL_POWER_OFF);
+		mdss_mdp_switch_to_vid_mode(ctl, 0);
+	} else {
+		pr_err("Invalid mode switch arg %d\n", mode);
+		return -EINVAL;
+	}
+
+	mdss_mdp_ctl_start(ctl, true);
+	ATRACE_END(__func__);
+
+	return 0;
+}
+
+int mdss_mode_switch_post(struct msm_fb_data_type *mfd, u32 mode)
+{
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+	struct dsi_panel_clk_ctrl clk_ctrl;
+	int rc = 0;
+	u32 frame_rate = 0;
+
+	if (mode == MIPI_VIDEO_PANEL) {
+		/*
+		 * Need to make sure one frame has been sent in
+		 * video mode prior to issuing the mode switch
+		 * DCS to panel.
+		 */
+		frame_rate = mdss_panel_get_framerate
+			(&(ctl->panel_data->panel_info),
+			FPS_RESOLUTION_HZ);
+		if (!(frame_rate >= 24 && frame_rate <= 240))
+			frame_rate = 24;
+		frame_rate = ((1000/frame_rate) + 1);
+		msleep(frame_rate);
+
+		pr_debug("%s, start\n", __func__);
+		rc = mdss_mdp_ctl_intf_event(ctl,
+			MDSS_EVENT_DSI_DYNAMIC_SWITCH,
+			(void *) MIPI_VIDEO_PANEL, CTL_INTF_EVENT_FLAG_DEFAULT);
+		pr_debug("%s, end\n", __func__);
+	} else if (mode == MIPI_CMD_PANEL) {
+		/*
+		 * Needed to balance out clk refcount when going
+		 * from video to command. This allows for idle
+		 * power collapse to work as intended.
+		 */
+		clk_ctrl.state = MDSS_DSI_CLK_OFF;
+		clk_ctrl.client = DSI_CLK_REQ_DSI_CLIENT;
+		if (sctl)
+			mdss_mdp_ctl_intf_event(sctl,
+				MDSS_EVENT_PANEL_CLK_CTRL, (void *)&clk_ctrl,
+				CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
+
+		mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_CLK_CTRL,
+			(void *)&clk_ctrl, CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
+	} else if (mode == SWITCH_RESOLUTION) {
+		if (ctl->ops.reconfigure)
+			rc = ctl->ops.reconfigure(ctl, mode, 0);
+	}
+	ctl->pending_mode_switch = 0;
+	if (sctl)
+		sctl->pending_mode_switch = 0;
+
+	return rc;
+}
+
+static void __validate_and_set_roi(struct msm_fb_data_type *mfd,
+	struct mdp_display_commit *commit)
+{
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_rect l_roi = {0}, r_roi = {0};
+	struct mdp_rect tmp_roi = {0};
+	bool skip_partial_update = true;
+
+	if (!commit)
+		goto set_roi;
+
+	if (!memcmp(&commit->l_roi, &tmp_roi, sizeof(tmp_roi)) &&
+	    !memcmp(&commit->r_roi, &tmp_roi, sizeof(tmp_roi)))
+		goto set_roi;
+
+	rect_copy_mdp_to_mdss(&commit->l_roi, &l_roi);
+	rect_copy_mdp_to_mdss(&commit->r_roi, &r_roi);
+
+	pr_debug("input: l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d\n",
+		l_roi.x, l_roi.y, l_roi.w, l_roi.h,
+		r_roi.x, r_roi.y, r_roi.w, r_roi.h);
+
+	/*
+	 * Configure full ROI
+	 * - If partial update is disabled
+	 * - If it is the first frame update after dynamic resolution switch
+	 */
+	if (!ctl->panel_data->panel_info.partial_update_enabled
+			|| (ctl->pending_mode_switch == SWITCH_RESOLUTION))
+		goto set_roi;
+
+	skip_partial_update = false;
+
+	if (is_split_lm(mfd) && mdp5_data->mdata->has_src_split) {
+		u32 left_lm_w = left_lm_w_from_mfd(mfd);
+		struct mdss_rect merged_roi = l_roi;
+
+		/*
+		 * When source split is enabled on split LM displays,
+		 * user program merges left and right ROI and sends
+		 * it through l_roi. Split this merged ROI into
+		 * left/right ROI for validation.
+		 */
+		mdss_rect_split(&merged_roi, &l_roi, &r_roi, left_lm_w);
+
+		/*
+		 * When source split is enabled on split LM displays,
+		 * it is a HW requirement that both LM have same width
+		 * if update is on both sides. Since ROIs are
+		 * generated by user-land program, validate against
+		 * this requirement.
+		 */
+		if (l_roi.w && r_roi.w && (l_roi.w != r_roi.w)) {
+			pr_err("error. ROI's do not match. violating src_split requirement\n");
+			pr_err("l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d\n",
+				l_roi.x, l_roi.y, l_roi.w, l_roi.h,
+				r_roi.x, r_roi.y, r_roi.w, r_roi.h);
+			skip_partial_update = true;
+			goto set_roi;
+		}
+	}
+
+	list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
+		if (!__is_roi_valid(pipe, &l_roi, &r_roi)) {
+			skip_partial_update = true;
+			pr_err("error. invalid pu config for pipe%d: %d,%d,%d,%d\n",
+				pipe->num,
+				pipe->dst.x, pipe->dst.y,
+				pipe->dst.w, pipe->dst.h);
+			break;
+		}
+	}
+
+set_roi:
+	if (skip_partial_update) {
+		l_roi = (struct mdss_rect){0, 0,
+				ctl->mixer_left->width,
+				ctl->mixer_left->height};
+		if (ctl->mixer_right) {
+			r_roi = (struct mdss_rect) {0, 0,
+					ctl->mixer_right->width,
+					ctl->mixer_right->height};
+		}
+	}
+
+	pr_debug("after processing: %s l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d\n",
+		(l_roi.w && l_roi.h && r_roi.w && r_roi.h) ? "left+right" :
+			((l_roi.w && l_roi.h) ? "left-only" : "right-only"),
+		l_roi.x, l_roi.y, l_roi.w, l_roi.h,
+		r_roi.x, r_roi.y, r_roi.w, r_roi.h);
+
+	mdss_mdp_set_roi(ctl, &l_roi, &r_roi);
+}
+
+static bool __is_supported_candence(int cadence)
+{
+	return (cadence == FRC_CADENCE_22) ||
+		(cadence == FRC_CADENCE_23) ||
+		(cadence == FRC_CADENCE_23223);
+}
+
+/* compute how many vsyncs between these 2 timestamp */
+static int __compute_vsync_diff(s64 cur_ts,
+	s64 base_ts, int display_fp1000s)
+{
+	int vsync_diff;
+	int round_up = 0;
+	s64 ts_diff = (cur_ts - base_ts) * display_fp1000s;
+
+	do_div(ts_diff, 1000000);
+	vsync_diff = (int)ts_diff;
+	/*
+	 * In most case DIV_ROUND_UP_ULL is enough, but calculation might be
+	 * impacted by possible jitter when vsync_diff is close to boundaries.
+	 * E.g., we have 30fps like 12.0->13.998->15.999->18.0->19.998->21.999
+	 * and 7460.001->7462.002->7464.0->7466.001->7468.002. DIV_ROUND_UP_ULL
+	 * fails in the later case.
+	 */
+	round_up = ((vsync_diff % 1000) >= 900) ? 1 : 0;
+	/* round up vsync count to accommodate fractions: base & diff */
+	vsync_diff = (vsync_diff / 1000) + round_up + 1;
+	return vsync_diff;
+}
+
+static bool __validate_frc_info(struct mdss_mdp_frc_info *frc_info)
+{
+	struct mdss_mdp_frc_data *cur_frc = &frc_info->cur_frc;
+	struct mdss_mdp_frc_data *last_frc = &frc_info->last_frc;
+	struct mdss_mdp_frc_data *base_frc = &frc_info->base_frc;
+
+	pr_debug("frc: cur_fcnt=%d, cur_ts=%lld, last_fcnt=%d, last_ts=%lld, base_fcnt=%d, base_ts=%lld last_v_cnt=%d, last_repeat=%d base_v_cnt=%d\n",
+		cur_frc->frame_cnt, cur_frc->timestamp,
+		last_frc->frame_cnt, last_frc->timestamp,
+		base_frc->frame_cnt, base_frc->timestamp,
+		frc_info->last_vsync_cnt, frc_info->last_repeat,
+		frc_info->base_vsync_cnt);
+
+	if ((cur_frc->frame_cnt == last_frc->frame_cnt) &&
+			(cur_frc->timestamp == last_frc->timestamp)) {
+		/* ignore repeated frame: video w/ UI layers */
+		pr_debug("repeated frame input\n");
+		return false;
+	}
+
+	return true;
+}
+
+static void __init_cadence_calc(struct mdss_mdp_frc_cadence_calc *calc)
+{
+	memset(calc, 0, sizeof(struct mdss_mdp_frc_cadence_calc));
+}
+
+static int __calculate_cadence_id(struct mdss_mdp_frc_info *frc_info, int cnt)
+{
+	struct mdss_mdp_frc_cadence_calc *calc = &frc_info->calc;
+	struct mdss_mdp_frc_data *first = &calc->samples[0];
+	struct mdss_mdp_frc_data *last = &calc->samples[cnt-1];
+	s64 ts_diff =
+		(last->timestamp - first->timestamp)
+				* frc_info->display_fp1000s;
+	u32 fcnt_diff =
+		last->frame_cnt - first->frame_cnt;
+	u32 fps_ratio;
+	u32 cadence_id = FRC_CADENCE_NONE;
+
+	do_div(ts_diff, fcnt_diff);
+	fps_ratio = (u32)ts_diff;
+
+	if ((fps_ratio > FRC_CADENCE_23_RATIO_LOW) &&
+			(fps_ratio < FRC_CADENCE_23_RATIO_HIGH))
+		cadence_id = FRC_CADENCE_23;
+	else if ((fps_ratio > FRC_CADENCE_22_RATIO_LOW) &&
+			(fps_ratio < FRC_CADENCE_22_RATIO_HIGH))
+		cadence_id = FRC_CADENCE_22;
+	else if ((fps_ratio > FRC_CADENCE_23223_RATIO_LOW) &&
+			(fps_ratio < FRC_CADENCE_23223_RATIO_HIGH))
+		cadence_id = FRC_CADENCE_23223;
+
+	pr_debug("frc: first=%lld, last=%lld, cnt=%d, fps_ratio=%u, cadence_id=%d\n",
+			first->timestamp, last->timestamp, fcnt_diff,
+			fps_ratio, cadence_id);
+
+	return cadence_id;
+}
+
+static void __init_seq_gen(struct mdss_mdp_frc_seq_gen *gen, int cadence_id)
+{
+	int cadence22[2] = {2, 2};
+	int cadence23[2] = {2, 3};
+	int cadence23223[5] = {2, 3, 2, 2, 3};
+	int *cadence = NULL;
+	int len = 0;
+
+	memset(gen, 0, sizeof(struct mdss_mdp_frc_seq_gen));
+	gen->pos = -EBADSLT;
+	gen->base = -1;
+
+	switch (cadence_id) {
+	case FRC_CADENCE_22:
+		cadence = cadence22;
+		len = 2;
+		break;
+	case FRC_CADENCE_23:
+		cadence = cadence23;
+		len = 2;
+		break;
+	case FRC_CADENCE_23223:
+		cadence = cadence23223;
+		len = 5;
+		break;
+	default:
+		break;
+	}
+
+	if (len > 0) {
+		memcpy(gen->seq, cadence, len * sizeof(int));
+		gen->len = len;
+		gen->retry = 0;
+	}
+
+	pr_debug("init sequence, cadence=%d len=%d\n", cadence_id, len);
+}
+
+static int __match_sequence(struct mdss_mdp_frc_seq_gen *gen)
+{
+	int pos, i;
+	int len = gen->len;
+
+	/* use default position if many attempts have failed */
+	if (gen->retry++ >= FRC_CADENCE_SEQUENCE_MAX_RETRY)
+		return 0;
+
+	for (pos = 0; pos < len; pos++) {
+		for (i = 0; i < len; i++) {
+			if (gen->cache[(i+len-1) % len]
+					!= gen->seq[(pos+i) % len])
+				break;
+		}
+		if (i == len)
+			return pos;
+	}
+
+	return -EBADSLT;
+}
+
+static void __reset_cache(struct mdss_mdp_frc_seq_gen *gen)
+{
+	memset(gen->cache, 0, gen->len * sizeof(int));
+	gen->base = -1;
+}
+
+static void __cache_last(struct mdss_mdp_frc_seq_gen *gen, int expected_vsync)
+{
+	int i = 0;
+
+	/* only cache last in case of pre-defined cadence */
+	if ((gen->pos < 0) && (gen->len > 0)) {
+		/* set first sample's expected vsync as base */
+		if (gen->base < 0) {
+			gen->base = expected_vsync;
+			return;
+		}
+
+		/* cache is 0 if not filled */
+		while (gen->cache[i] && (i < gen->len))
+			i++;
+
+		gen->cache[i] = expected_vsync - gen->base;
+		gen->base = expected_vsync;
+
+		if (i == (gen->len - 1)) {
+			/* find init pos in sequence when cache is full */
+			gen->pos = __match_sequence(gen);
+			/* reset cache and re-collect samples for matching */
+			if (gen->pos < 0)
+				__reset_cache(gen);
+		}
+	}
+}
+
+static inline bool __is_seq_gen_matched(struct mdss_mdp_frc_seq_gen *gen)
+{
+	return (gen->len > 0) && (gen->pos >= 0);
+}
+
+static int __expected_repeat(struct mdss_mdp_frc_seq_gen *gen)
+{
+	int next_repeat = -1;
+
+	if (__is_seq_gen_matched(gen)) {
+		next_repeat = gen->seq[gen->pos];
+		gen->pos = (gen->pos + 1) % gen->len;
+	}
+
+	return next_repeat;
+}
+
+static bool __is_display_fps_changed(struct msm_fb_data_type *mfd,
+	struct mdss_mdp_frc_info *frc_info)
+{
+	bool display_fps_changed = false;
+	u32 display_fp1000s = mdss_panel_get_framerate(mfd->panel_info,
+							 FPS_RESOLUTION_KHZ);
+
+	if (frc_info->display_fp1000s != display_fp1000s) {
+		pr_debug("fps changes from %d to %d\n",
+			frc_info->display_fp1000s, display_fp1000s);
+		display_fps_changed = true;
+	}
+
+	return display_fps_changed;
+}
+
+static bool __is_video_fps_changed(struct mdss_mdp_frc_info *frc_info)
+{
+	bool video_fps_changed = false;
+
+	if ((frc_info->cur_frc.frame_cnt - frc_info->video_stat.frame_cnt)
+			== FRC_VIDEO_FPS_DETECT_WINDOW) {
+		s64 delta_t = frc_info->cur_frc.timestamp -
+			frc_info->video_stat.timestamp;
+
+		if (frc_info->video_stat.last_delta) {
+			video_fps_changed =
+				abs64(delta_t - frc_info->video_stat.last_delta)
+				> (FRC_VIDEO_FPS_CHANGE_THRESHOLD_US *
+					FRC_VIDEO_FPS_DETECT_WINDOW);
+
+			if (video_fps_changed)
+				pr_info("video fps changed from [%d]%lld to [%d]%lld\n",
+					frc_info->video_stat.frame_cnt,
+					frc_info->video_stat.last_delta,
+					frc_info->cur_frc.frame_cnt,
+					delta_t);
+		}
+
+		frc_info->video_stat.frame_cnt = frc_info->cur_frc.frame_cnt;
+		frc_info->video_stat.timestamp = frc_info->cur_frc.timestamp;
+		frc_info->video_stat.last_delta = delta_t;
+	}
+
+	return video_fps_changed;
+}
+
+static bool __is_video_seeking(struct mdss_mdp_frc_info *frc_info)
+{
+	s64 ts_diff =
+		frc_info->cur_frc.timestamp - frc_info->last_frc.timestamp;
+	bool video_seek = false;
+
+	video_seek = (ts_diff < 0)
+		|| (ts_diff > FRC_VIDEO_TS_DELTA_THRESHOLD_US);
+
+	if (video_seek)
+		pr_debug("video seeking: %lld -> %lld\n",
+			frc_info->last_frc.timestamp,
+			frc_info->cur_frc.timestamp);
+
+	return video_seek;
+}
+
+static bool __is_buffer_dropped(struct mdss_mdp_frc_info *frc_info)
+{
+	int buffer_drop_cnt
+		= frc_info->cur_frc.frame_cnt - frc_info->last_frc.frame_cnt;
+
+	if (buffer_drop_cnt > 1) {
+		struct mdss_mdp_frc_drop_stat *drop_stat = &frc_info->drop_stat;
+
+		/* collect dropping statistics */
+		if (!drop_stat->drop_cnt)
+			drop_stat->frame_cnt = frc_info->last_frc.frame_cnt;
+
+		drop_stat->drop_cnt++;
+
+		pr_info("video buffer drop from %d to %d\n",
+			frc_info->last_frc.frame_cnt,
+			frc_info->cur_frc.frame_cnt);
+	}
+	return buffer_drop_cnt > 1;
+}
+
+static bool __is_too_many_drops(struct mdss_mdp_frc_info *frc_info)
+{
+	struct mdss_mdp_frc_drop_stat *drop_stat = &frc_info->drop_stat;
+	bool too_many = false;
+
+	if (drop_stat->drop_cnt > FRC_MAX_VIDEO_DROPPING_CNT) {
+		too_many = (frc_info->cur_frc.frame_cnt - drop_stat->frame_cnt
+			< FRC_VIDEO_DROP_TOLERANCE_WINDOW);
+		frc_info->drop_stat.drop_cnt = 0;
+	}
+
+	return too_many;
+}
+
+static bool __is_video_cnt_rollback(struct mdss_mdp_frc_info *frc_info)
+{
+	/* video frame_cnt is assumed to increase monotonically */
+	bool video_rollback
+		= (frc_info->cur_frc.frame_cnt < frc_info->last_frc.frame_cnt)
+			|| (frc_info->cur_frc.frame_cnt <
+				frc_info->base_frc.frame_cnt);
+
+	if (video_rollback)
+		pr_info("video frame_cnt rolls back from %d to %d\n",
+			frc_info->last_frc.frame_cnt,
+			frc_info->cur_frc.frame_cnt);
+
+	return video_rollback;
+}
+
+static bool __is_video_pause(struct msm_fb_data_type *mfd,
+	struct mdss_mdp_frc_info *frc_info)
+{
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	bool video_pause =
+		(frc_info->cur_frc.frame_cnt - frc_info->last_frc.frame_cnt
+				== 1)
+		&& (ctl->vsync_cnt - frc_info->last_vsync_cnt >
+				FRC_VIDEO_PAUSE_THRESHOLD);
+
+	if (video_pause)
+		pr_debug("video paused: vsync elapsed %d\n",
+			ctl->vsync_cnt - frc_info->last_vsync_cnt);
+
+	return video_pause;
+}
+
+/*
+ * Workaround for some cases that video has the same timestamp for
+ * different frame. E.g., video player might provide the same frame
+ * twice to codec when seeking/flushing.
+ */
+static bool __is_timestamp_duplicated(struct mdss_mdp_frc_info *frc_info)
+{
+	bool ts_dup =
+		(frc_info->cur_frc.frame_cnt != frc_info->last_frc.frame_cnt)
+			&& (frc_info->cur_frc.timestamp
+				== frc_info->last_frc.timestamp);
+
+	if (ts_dup)
+		pr_info("timestamp of frame %d and %d are duplicated\n",
+			frc_info->last_frc.frame_cnt,
+			frc_info->cur_frc.frame_cnt);
+
+	return ts_dup;
+}
+
+static void __set_frc_base(struct msm_fb_data_type *mfd,
+	struct mdss_mdp_frc_info *frc_info)
+{
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+
+	frc_info->base_vsync_cnt = ctl->vsync_cnt;
+	frc_info->base_frc = frc_info->cur_frc;
+	frc_info->last_frc = frc_info->cur_frc;
+	frc_info->last_repeat = 0;
+	frc_info->last_vsync_cnt = 0;
+	frc_info->cadence_id = FRC_CADENCE_NONE;
+	frc_info->video_stat.last_delta = 0;
+	frc_info->video_stat.frame_cnt = frc_info->cur_frc.frame_cnt;
+	frc_info->video_stat.timestamp = frc_info->cur_frc.timestamp;
+	frc_info->display_fp1000s =
+		mdss_panel_get_framerate(mfd->panel_info, FPS_RESOLUTION_KHZ);
+
+
+	pr_debug("frc_base: vsync_cnt=%d frame_cnt=%d timestamp=%lld\n",
+		frc_info->base_vsync_cnt, frc_info->cur_frc.frame_cnt,
+		frc_info->cur_frc.timestamp);
+}
+
+/* calculate when we'd like to kickoff current frame based on its timestamp */
+static int __calculate_remaining_vsync(struct msm_fb_data_type *mfd,
+	struct mdss_mdp_frc_info *frc_info)
+{
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	struct mdss_mdp_frc_data *cur_frc = &frc_info->cur_frc;
+	struct mdss_mdp_frc_data *base_frc = &frc_info->base_frc;
+	int vsync_diff, expected_vsync_cnt, remaining_vsync;
+
+	/* how many vsync intervals between current & base */
+	vsync_diff = __compute_vsync_diff(cur_frc->timestamp,
+			base_frc->timestamp, frc_info->display_fp1000s);
+
+	/* expected vsync where we'd like to kickoff current frame */
+	expected_vsync_cnt = frc_info->base_vsync_cnt + vsync_diff;
+	/* how many remaining vsync we need display till kickoff */
+	remaining_vsync = expected_vsync_cnt - ctl->vsync_cnt;
+
+	pr_debug("frc: expected_vsync_cnt=%d, cur_vsync_cnt=%d, remaining=%d\n",
+		expected_vsync_cnt, ctl->vsync_cnt, remaining_vsync);
+
+	return remaining_vsync;
+}
+
+/* tune latency computed previously if possible jitter exists */
+static int __tune_possible_jitter(struct msm_fb_data_type *mfd,
+	struct mdss_mdp_frc_info *frc_info, int remaining_vsync)
+{
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	int cadence_id = frc_info->cadence_id;
+	int remaining = remaining_vsync;
+	int expected_repeat = __expected_repeat(&frc_info->gen);
+
+	if (cadence_id && (expected_repeat > 0)) {
+		int expected_vsync_cnt = remaining + ctl->vsync_cnt;
+		/* how many times current frame will be repeated */
+		int cur_repeat = expected_vsync_cnt - frc_info->last_vsync_cnt;
+
+		remaining -= cur_repeat - expected_repeat;
+		pr_debug("frc: tune vsync, input=%d, output=%d, last_repeat=%d, cur_repeat=%d, expected_repeat=%d\n",
+			remaining_vsync, remaining, frc_info->last_repeat,
+			cur_repeat, expected_repeat);
+	}
+
+	return remaining;
+}
+
+/* compute how many vsync we still need to wait for keeping cadence */
+static int __calculate_remaining_repeat(struct msm_fb_data_type *mfd,
+	struct mdss_mdp_frc_info *frc_info)
+{
+	int remaining_vsync = __calculate_remaining_vsync(mfd, frc_info);
+
+	remaining_vsync =
+		__tune_possible_jitter(mfd, frc_info, remaining_vsync);
+
+	return remaining_vsync;
+}
+
+static int __repeat_current_frame(struct mdss_mdp_ctl *ctl, int repeat)
+{
+	int expected_vsync = ctl->vsync_cnt + repeat;
+	int cnt = 0;
+	int ret = 0;
+
+	while (ctl->vsync_cnt < expected_vsync) {
+		cnt++;
+		if (ctl->ops.wait_vsync_fnc) {
+			ret = ctl->ops.wait_vsync_fnc(ctl);
+			if (ret < 0)
+				break;
+		}
+	}
+
+	if (ret)
+		pr_err("wrong waiting: repeat %d, actual: %d\n", repeat, cnt);
+
+	return ret;
+}
+
+static void __save_last_frc_info(struct mdss_mdp_ctl *ctl,
+	struct mdss_mdp_frc_info *frc_info)
+{
+	/* save last data */
+	frc_info->last_frc = frc_info->cur_frc;
+	frc_info->last_repeat = ctl->vsync_cnt - frc_info->last_vsync_cnt;
+	frc_info->last_vsync_cnt = ctl->vsync_cnt;
+}
+
+static void cadence_detect_callback(struct mdss_mdp_frc_fsm *frc_fsm)
+{
+	struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+	__init_cadence_calc(&frc_info->calc);
+}
+
+static void seq_match_callback(struct mdss_mdp_frc_fsm *frc_fsm)
+{
+	struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+	__init_seq_gen(&frc_info->gen, frc_info->cadence_id);
+}
+
+static void frc_disable_callback(struct mdss_mdp_frc_fsm *frc_fsm)
+{
+	struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+	frc_info->cadence_id = FRC_CADENCE_DISABLE;
+}
+
+/* default behavior of FRC FSM */
+static bool __is_frc_state_changed_in_default(struct msm_fb_data_type *mfd,
+	struct mdss_mdp_frc_info *frc_info)
+{
+	/*
+	 * Need change to INIT state in case of 2 changes:
+	 *
+	 * 1) video frame_cnt has been rolled back by codec.
+	 * 2) video fast-foward or rewind. Sometimes video seeking might cause
+	 *    buffer drop as well, so check seek ahead of buffer drop in order
+	 *    to avoid duplicated check.
+	 * 3) buffer drop.
+	 * 4) display fps has changed.
+	 * 5) video frame rate has changed.
+	 * 6) video pauses. it could be considered as lag case.
+	 * 7) duplicated timestamp of different frames which breaks FRC.
+	 */
+	return (__is_video_cnt_rollback(frc_info) ||
+		__is_video_seeking(frc_info) ||
+		__is_buffer_dropped(frc_info) ||
+		__is_display_fps_changed(mfd, frc_info) ||
+		__is_video_fps_changed(frc_info) ||
+		__is_video_pause(mfd, frc_info) ||
+		__is_timestamp_duplicated(frc_info));
+}
+
+static void __pre_frc_in_default(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
+	struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+	if (__is_too_many_drops(frc_info)) {
+		/*
+		 * disable frc when dropping too many buffers, this might happen
+		 * in some extreme cases like video is heavily loaded so any
+		 * extra latency could make things worse.
+		 */
+		pr_info("disable frc because there're too many drops\n");
+		mdss_mdp_frc_fsm_change_state(frc_fsm,
+			FRC_STATE_DISABLE, frc_disable_callback);
+		mdss_mdp_frc_fsm_update_state(frc_fsm);
+	} else if (__is_frc_state_changed_in_default(mfd, frc_info)) {
+		/* FRC status changed so reset to INIT state */
+		mdss_mdp_frc_fsm_change_state(frc_fsm, FRC_STATE_INIT, NULL);
+		mdss_mdp_frc_fsm_update_state(frc_fsm);
+	}
+}
+
+static void __do_frc_in_default(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
+{
+	/* do nothing */
+}
+
+static void __post_frc_in_default(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+	__save_last_frc_info(ctl, frc_info);
+
+	/* update frc_fsm state to new state for the next round */
+	mdss_mdp_frc_fsm_update_state(frc_fsm);
+}
+
+/* behavior of FRC FSM in INIT state */
+static void __do_frc_in_init_state(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
+	struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+	__set_frc_base(mfd, frc_info);
+
+	mdss_mdp_frc_fsm_change_state(frc_fsm,
+		FRC_STATE_CADENCE_DETECT, cadence_detect_callback);
+}
+
+/* behavior of FRC FSM in CADENCE_DETECT state */
+static void __do_frc_in_cadence_detect_state(struct mdss_mdp_frc_fsm *frc_fsm,
+	void *arg)
+{
+	struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+	struct mdss_mdp_frc_cadence_calc *calc = &frc_info->calc;
+
+	if (calc->sample_cnt < FRC_CADENCE_DETECT_WINDOW) {
+		calc->samples[calc->sample_cnt++] = frc_info->cur_frc;
+	} else {
+		/*
+		 * Get enough samples and check candence. FRC_CADENCE_23
+		 * and FRC_CADENCE_22 need >= 2 deltas, and >= 5 deltas
+		 * are necessary for computing FRC_CADENCE_23223.
+		 */
+		u32 cadence_id = FRC_CADENCE_23;
+		u32 sample_cnt[FRC_MAX_SUPPORT_CADENCE] = {0, 5, 5, 6};
+
+		while (cadence_id < FRC_CADENCE_FREE_RUN) {
+			if (cadence_id ==
+					__calculate_cadence_id(frc_info,
+						sample_cnt[cadence_id]))
+				break;
+			cadence_id++;
+		}
+
+		frc_info->cadence_id = cadence_id;
+		pr_info("frc: cadence_id=%d\n", cadence_id);
+
+		/* detected supported cadence, start sequence match */
+		if (__is_supported_candence(frc_info->cadence_id))
+			mdss_mdp_frc_fsm_change_state(frc_fsm,
+				FRC_STATE_SEQ_MATCH, seq_match_callback);
+		else
+			mdss_mdp_frc_fsm_change_state(frc_fsm,
+					FRC_STATE_FREERUN, NULL);
+	}
+}
+
+/* behavior of FRC FSM in SEQ_MATCH state */
+static void __do_frc_in_seq_match_state(struct mdss_mdp_frc_fsm *frc_fsm,
+	void *arg)
+{
+	struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+	struct mdss_mdp_frc_data *cur_frc = &frc_info->cur_frc;
+	struct mdss_mdp_frc_data *base_frc = &frc_info->base_frc;
+	int vsync_diff;
+
+	/* how many vsync intervals between current & base */
+	vsync_diff = __compute_vsync_diff(cur_frc->timestamp,
+			base_frc->timestamp, frc_info->display_fp1000s);
+
+	/* cache vsync diff to compute start pos in cadence */
+	__cache_last(&frc_info->gen, vsync_diff);
+
+	if (__is_seq_gen_matched(&frc_info->gen))
+		mdss_mdp_frc_fsm_change_state(frc_fsm, FRC_STATE_READY, NULL);
+}
+
+/* behavior of FRC FSM in FREE_RUN state */
+static bool __is_frc_state_changed_in_freerun_state(
+	struct msm_fb_data_type *mfd,
+	struct mdss_mdp_frc_info *frc_info)
+{
+	/*
+	 * Only need change to INIT state in case of 2 changes:
+	 *
+	 * 1) display fps has changed.
+	 * 2) video frame rate has changed.
+	 */
+	return (__is_display_fps_changed(mfd, frc_info) ||
+		__is_video_fps_changed(frc_info));
+}
+
+static void __pre_frc_in_freerun_state(struct mdss_mdp_frc_fsm *frc_fsm,
+	void *arg)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
+	struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+	/* FRC status changed so reset to INIT state */
+	if (__is_frc_state_changed_in_freerun_state(mfd, frc_info)) {
+		/* update state to INIT immediately */
+		mdss_mdp_frc_fsm_change_state(frc_fsm, FRC_STATE_INIT, NULL);
+		mdss_mdp_frc_fsm_update_state(frc_fsm);
+	}
+}
+
+/* behavior of FRC FSM in READY state */
+static void __do_frc_in_ready_state(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+	struct mdss_mdp_frc_data *cur_frc = &frc_info->cur_frc;
+
+	int remaining_repeat =
+		__calculate_remaining_repeat(mfd, frc_info);
+
+	mdss_debug_frc_add_kickoff_sample_pre(ctl, frc_info, remaining_repeat);
+
+	/* video arrives later than expected */
+	if (remaining_repeat < 0) {
+		pr_info("Frame %d lags behind %d vsync\n",
+				cur_frc->frame_cnt, -remaining_repeat);
+		mdss_mdp_frc_fsm_change_state(frc_fsm, FRC_STATE_INIT, NULL);
+		remaining_repeat = 0;
+	}
+
+	if (mdss_debug_frc_frame_repeat_disabled())
+		remaining_repeat = 0;
+
+	__repeat_current_frame(ctl, remaining_repeat);
+
+	mdss_debug_frc_add_kickoff_sample_post(ctl, frc_info, remaining_repeat);
+}
+
+/* behavior of FRC FSM in DISABLE state */
+static void __pre_frc_in_disable_state(struct mdss_mdp_frc_fsm *frc_fsm,
+	void *arg)
+{
+	/* do nothing */
+}
+
+static void __post_frc_in_disable_state(struct mdss_mdp_frc_fsm *frc_fsm,
+	void *arg)
+{
+	/* do nothing */
+}
+
+static int __config_secure_display(struct mdss_overlay_private *mdp5_data)
+{
+	int panel_type = mdp5_data->ctl->panel_data->panel_info.type;
+	int sd_enable = -1; /* Since 0 is a valid state, initialize with -1 */
+	int ret = 0;
+
+	if (panel_type == MIPI_CMD_PANEL)
+		mdss_mdp_display_wait4pingpong(mdp5_data->ctl, true);
+
+	/*
+	 * Start secure display session if we are transitioning from non secure
+	 * to secure display.
+	 */
+	if (mdp5_data->sd_transition_state ==
+			SD_TRANSITION_NON_SECURE_TO_SECURE)
+		sd_enable = 1;
+
+	/*
+	 * For command mode panels, if we are trasitioning from secure to
+	 * non secure session, disable the secure display, as we've already
+	 * waited for the previous frame transfer.
+	 */
+	if ((panel_type == MIPI_CMD_PANEL) &&
+			(mdp5_data->sd_transition_state ==
+			 SD_TRANSITION_SECURE_TO_NON_SECURE))
+		sd_enable = 0;
+
+	if (sd_enable != -1) {
+		ret = mdss_mdp_secure_display_ctrl(mdp5_data->mdata, sd_enable);
+		if (!ret)
+			mdp5_data->sd_enabled = sd_enable;
+	}
+
+	return ret;
+}
+
+/* predefined state table of FRC FSM */
+static struct mdss_mdp_frc_fsm_state frc_fsm_states[FRC_STATE_MAX] = {
+	{
+		.name = "FRC_FSM_INIT",
+		.state = FRC_STATE_INIT,
+		.ops = {
+			.pre_frc = __pre_frc_in_default,
+			.do_frc = __do_frc_in_init_state,
+			.post_frc = __post_frc_in_default,
+		},
+	},
+
+	{
+		.name = "FRC_FSM_CADENCE_DETECT",
+		.state = FRC_STATE_CADENCE_DETECT,
+		.ops = {
+			.pre_frc = __pre_frc_in_default,
+			.do_frc = __do_frc_in_cadence_detect_state,
+			.post_frc = __post_frc_in_default,
+		},
+	},
+
+	{
+		.name = "FRC_FSM_SEQ_MATCH",
+		.state = FRC_STATE_SEQ_MATCH,
+		.ops = {
+			.pre_frc = __pre_frc_in_default,
+			.do_frc = __do_frc_in_seq_match_state,
+			.post_frc = __post_frc_in_default,
+		},
+	},
+
+	{
+		.name = "FRC_FSM_FREERUN",
+		.state = FRC_STATE_FREERUN,
+		.ops = {
+			.pre_frc = __pre_frc_in_freerun_state,
+			.do_frc = __do_frc_in_default,
+			.post_frc = __post_frc_in_default,
+		},
+	},
+
+	{
+		.name = "FRC_FSM_READY",
+		.state = FRC_STATE_READY,
+		.ops = {
+			.pre_frc = __pre_frc_in_default,
+			.do_frc = __do_frc_in_ready_state,
+			.post_frc = __post_frc_in_default,
+		},
+	},
+
+	{
+		.name = "FRC_FSM_DISABLE",
+		.state = FRC_STATE_DISABLE,
+		.ops = {
+			.pre_frc = __pre_frc_in_disable_state,
+			.do_frc = __do_frc_in_default,
+			.post_frc = __post_frc_in_disable_state,
+		},
+	},
+};
+
+/*
+ * FRC FSM operations:
+ * mdss_mdp_frc_fsm_init_state: Init FSM state.
+ * mdss_mdp_frc_fsm_change_state: Change FSM state. The desired state will not
+ *                                be effective till update_state is called.
+ * mdss_mdp_frc_fsm_update_state: Update FSM state. Changed state is effective
+ *                                immediately once this function is called.
+ */
+void mdss_mdp_frc_fsm_init_state(struct mdss_mdp_frc_fsm *frc_fsm)
+{
+	pr_debug("frc_fsm: init frc fsm state\n");
+	frc_fsm->state = frc_fsm->to_state = frc_fsm_states[FRC_STATE_INIT];
+	memset(&frc_fsm->frc_info, 0, sizeof(struct mdss_mdp_frc_info));
+}
+
+void mdss_mdp_frc_fsm_change_state(struct mdss_mdp_frc_fsm *frc_fsm,
+	enum mdss_mdp_frc_state_type state,
+	void (*cb)(struct mdss_mdp_frc_fsm *frc_fsm))
+{
+	if (state != frc_fsm->state.state) {
+		pr_debug("frc_fsm: state changes from %s to %s\n",
+				frc_fsm->state.name,
+				frc_fsm_states[state].name);
+		frc_fsm->to_state = frc_fsm_states[state];
+		frc_fsm->cbs.update_state_cb = cb;
+	}
+}
+
+void mdss_mdp_frc_fsm_update_state(struct mdss_mdp_frc_fsm *frc_fsm)
+{
+	if (frc_fsm->to_state.state != frc_fsm->state.state) {
+		pr_debug("frc_fsm: state updates from %s to %s\n",
+				frc_fsm->state.name,
+				frc_fsm->to_state.name);
+
+		if (frc_fsm->cbs.update_state_cb)
+			frc_fsm->cbs.update_state_cb(frc_fsm);
+
+		frc_fsm->state = frc_fsm->to_state;
+	}
+}
+
+static void mdss_mdp_overlay_update_frc(struct msm_fb_data_type *mfd)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_frc_fsm *frc_fsm = mdp5_data->frc_fsm;
+	struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
+
+	if (__validate_frc_info(frc_info)) {
+		struct mdss_mdp_frc_fsm_state *state = &frc_fsm->state;
+
+		state->ops.pre_frc(frc_fsm, mfd);
+		state->ops.do_frc(frc_fsm, mfd);
+		state->ops.post_frc(frc_fsm, mfd);
+	}
+}
+
+int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
+				struct mdp_display_commit *data)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_pipe *pipe, *tmp;
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	int ret = 0;
+	struct mdss_mdp_commit_cb commit_cb;
+	u8 sd_transition_state = 0;
+
+	if (!ctl || !ctl->mixer_left)
+		return -ENODEV;
+
+	ATRACE_BEGIN(__func__);
+	if (ctl->shared_lock) {
+		mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_BEGIN);
+		mutex_lock(ctl->shared_lock);
+	}
+
+	mutex_lock(&mdp5_data->ov_lock);
+	ctl->bw_pending = 0;
+	ret = mdss_mdp_overlay_start(mfd);
+	if (ret) {
+		pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
+		mutex_unlock(&mdp5_data->ov_lock);
+		if (ctl->shared_lock)
+			mutex_unlock(ctl->shared_lock);
+		return ret;
+	}
+
+	ret = mdss_iommu_ctrl(1);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("iommu attach failed rc=%d\n", ret);
+		mutex_unlock(&mdp5_data->ov_lock);
+		if (ctl->shared_lock)
+			mutex_unlock(ctl->shared_lock);
+		return ret;
+	}
+	mutex_lock(&mdp5_data->list_lock);
+
+	if (!ctl->shared_lock)
+		mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_BEGIN);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	mdss_mdp_check_ctl_reset_status(ctl);
+	__validate_and_set_roi(mfd, data);
+
+	if (ctl->ops.wait_pingpong && mdp5_data->mdata->serialize_wait4pp)
+		mdss_mdp_display_wait4pingpong(ctl, true);
+
+	sd_transition_state = mdp5_data->sd_transition_state;
+	if (sd_transition_state != SD_TRANSITION_NONE) {
+		ret = __config_secure_display(mdp5_data);
+		if (IS_ERR_VALUE(ret)) {
+			pr_err("Secure session config failed\n");
+			goto commit_fail;
+		}
+	}
+
+	/*
+	 * Setup pipe in solid fill before unstaging,
+	 * to ensure no fetches are happening after dettach or reattach.
+	 */
+	list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_cleanup, list) {
+		mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+		mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
+		pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
+		list_move(&pipe->list, &mdp5_data->pipes_destroy);
+	}
+
+	/* call this function before any registers programming */
+	if (ctl->ops.pre_programming)
+		ctl->ops.pre_programming(ctl);
+
+	ATRACE_BEGIN("sspp_programming");
+	ret = __overlay_queue_pipes(mfd);
+	ATRACE_END("sspp_programming");
+	mutex_unlock(&mdp5_data->list_lock);
+
+	mdp5_data->kickoff_released = false;
+
+	if (mdp5_data->frc_fsm->enable)
+		mdss_mdp_overlay_update_frc(mfd);
+
+	if (mfd->panel.type == WRITEBACK_PANEL) {
+		ATRACE_BEGIN("wb_kickoff");
+		commit_cb.commit_cb_fnc = mdss_mdp_commit_cb;
+		commit_cb.data = mfd;
+		ret = mdss_mdp_wfd_kickoff(mdp5_data->wfd, &commit_cb);
+		ATRACE_END("wb_kickoff");
+	} else {
+		ATRACE_BEGIN("display_commit");
+		commit_cb.commit_cb_fnc = mdss_mdp_commit_cb;
+		commit_cb.data = mfd;
+		ret = mdss_mdp_display_commit(mdp5_data->ctl, NULL,
+			&commit_cb);
+		ATRACE_END("display_commit");
+	}
+	__vsync_set_vsync_handler(mfd);
+
+	/*
+	 * release the commit pending flag; we are releasing this flag
+	 * after the commit, since now the transaction status
+	 * in the cmd mode controllers is busy.
+	 */
+	mfd->atomic_commit_pending = false;
+
+	if (!mdp5_data->kickoff_released)
+		mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_CTX_DONE);
+
+	if (IS_ERR_VALUE(ret))
+		goto commit_fail;
+
+	mutex_unlock(&mdp5_data->ov_lock);
+	mdss_mdp_overlay_update_pm(mdp5_data);
+
+	ATRACE_BEGIN("display_wait4comp");
+	ret = mdss_mdp_display_wait4comp(mdp5_data->ctl);
+	ATRACE_END("display_wait4comp");
+	mdss_mdp_splash_cleanup(mfd, true);
+
+	/*
+	 * Configure Timing Engine, if new fps was set.
+	 * We need to do this after the wait for vsync
+	 * to guarantee that mdp flush bit and dsi flush
+	 * bit are set within the same vsync period
+	 * regardless of  mdp revision.
+	 */
+	ATRACE_BEGIN("fps_update");
+	ret = mdss_mdp_ctl_update_fps(ctl);
+	ATRACE_END("fps_update");
+
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("failed to update fps!\n");
+		goto commit_fail;
+	}
+
+	mutex_lock(&mdp5_data->ov_lock);
+	/*
+	 * If we are transitioning from secure to non-secure display,
+	 * disable the secure display.
+	 */
+	if (mdp5_data->sd_enabled && (sd_transition_state ==
+			SD_TRANSITION_SECURE_TO_NON_SECURE)) {
+		ret = mdss_mdp_secure_display_ctrl(mdp5_data->mdata, 0);
+		if (!ret)
+			mdp5_data->sd_enabled = 0;
+	}
+
+	mdss_fb_update_notify_update(mfd);
+commit_fail:
+	ATRACE_BEGIN("overlay_cleanup");
+	mdss_mdp_overlay_cleanup(mfd, &mdp5_data->pipes_destroy);
+	ATRACE_END("overlay_cleanup");
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_FLUSHED);
+	if (!mdp5_data->kickoff_released)
+		mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_CTX_DONE);
+
+	mutex_unlock(&mdp5_data->ov_lock);
+	if (ctl->shared_lock)
+		mutex_unlock(ctl->shared_lock);
+	mdss_iommu_ctrl(0);
+	ATRACE_END(__func__);
+
+	return ret;
+}
+
+int mdss_mdp_overlay_release(struct msm_fb_data_type *mfd, int ndx)
+{
+	struct mdss_mdp_pipe *pipe, *tmp;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	u32 unset_ndx = 0;
+
+	mutex_lock(&mdp5_data->list_lock);
+	list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
+		if (pipe->ndx & ndx) {
+			if (mdss_mdp_pipe_map(pipe)) {
+				pr_err("Unable to map used pipe%d ndx=%x\n",
+						pipe->num, pipe->ndx);
+				continue;
+			}
+
+			unset_ndx |= pipe->ndx;
+
+			pipe->file = NULL;
+			list_move(&pipe->list, &mdp5_data->pipes_cleanup);
+
+			mdss_mdp_pipe_unmap(pipe);
+
+			if (unset_ndx == ndx)
+				break;
+		}
+	}
+	mutex_unlock(&mdp5_data->list_lock);
+
+	if (unset_ndx != ndx) {
+		pr_warn("Unable to unset pipe(s) ndx=0x%x unset=0x%x\n",
+				ndx, unset_ndx);
+		return -ENOENT;
+	}
+
+	return 0;
+}
+
+static int mdss_mdp_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
+{
+	int ret = 0;
+	struct mdss_overlay_private *mdp5_data;
+
+	if (!mfd)
+		return -ENODEV;
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (!mdp5_data || !mdp5_data->ctl)
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+	if (ret)
+		return ret;
+
+	if (ndx == BORDERFILL_NDX) {
+		pr_debug("borderfill disable\n");
+		mdp5_data->borderfill_enable = false;
+		ret = 0;
+		goto done;
+	}
+
+	if (mdss_fb_is_power_off(mfd)) {
+		ret = -EPERM;
+		goto done;
+	}
+
+	pr_debug("unset ndx=%x\n", ndx);
+
+	ret = mdss_mdp_overlay_release(mfd, ndx);
+
+done:
+	mutex_unlock(&mdp5_data->ov_lock);
+
+	return ret;
+}
+
+/**
+ * mdss_mdp_overlay_release_all() - release any overlays associated with fb dev
+ * @mfd:	Msm frame buffer structure associated with fb device
+ * @release_all: ignore pid and release all the pipes
+ *
+ * Release any resources allocated by calling process, this can be called
+ * on fb_release to release any overlays/rotator sessions left open.
+ *
+ * Return number of resources released
+ */
+static int __mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd,
+	struct file *file)
+{
+	struct mdss_mdp_pipe *pipe, *tmp;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	u32 unset_ndx = 0;
+	int cnt = 0;
+
+	pr_debug("releasing all resources for fb%d file:%pK\n",
+		mfd->index, file);
+
+	mutex_lock(&mdp5_data->ov_lock);
+	mutex_lock(&mdp5_data->list_lock);
+	if (!mfd->ref_cnt && !list_empty(&mdp5_data->pipes_cleanup)) {
+		pr_debug("fb%d:: free pipes present in cleanup list",
+			mfd->index);
+		cnt++;
+	}
+
+	list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
+		if (!file || pipe->file == file) {
+			unset_ndx |= pipe->ndx;
+			pipe->file = NULL;
+			list_move(&pipe->list, &mdp5_data->pipes_cleanup);
+			cnt++;
+		}
+	}
+
+	pr_debug("mfd->ref_cnt=%d unset_ndx=0x%x cnt=%d\n",
+		mfd->ref_cnt, unset_ndx, cnt);
+
+	mutex_unlock(&mdp5_data->list_lock);
+	mutex_unlock(&mdp5_data->ov_lock);
+
+	return cnt;
+}
+
+static int mdss_mdp_overlay_queue(struct msm_fb_data_type *mfd,
+				  struct msmfb_overlay_data *req)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_mdp_data *src_data;
+	struct mdp_layer_buffer buffer;
+	int ret;
+	u32 flags;
+
+	pipe = __overlay_find_pipe(mfd, req->id);
+	if (!pipe) {
+		pr_err("pipe ndx=%x doesn't exist\n", req->id);
+		return -ENODEV;
+	}
+
+	if (pipe->dirty) {
+		pr_warn("dirty pipe, will not queue pipe pnum=%d\n", pipe->num);
+		return -ENODEV;
+	}
+
+	ret = mdss_mdp_pipe_map(pipe);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("Unable to map used pipe%d ndx=%x\n",
+				pipe->num, pipe->ndx);
+		return ret;
+	}
+
+	pr_debug("ov queue pnum=%d\n", pipe->num);
+
+	if (pipe->flags & MDP_SOLID_FILL)
+		pr_warn("Unexpected buffer queue to a solid fill pipe\n");
+
+	flags = (pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
+		MDP_SECURE_DISPLAY_OVERLAY_SESSION));
+
+	mutex_lock(&mdp5_data->list_lock);
+	src_data = mdss_mdp_overlay_buf_alloc(mfd, pipe);
+	if (!src_data) {
+		pr_err("unable to allocate source buffer\n");
+		ret = -ENOMEM;
+	} else {
+		buffer.width = pipe->img_width;
+		buffer.height = pipe->img_height;
+		buffer.format = pipe->src_fmt->format;
+		ret = mdss_mdp_data_get_and_validate_size(src_data, &req->data,
+			1, flags, &mfd->pdev->dev, false, DMA_TO_DEVICE,
+			&buffer);
+		if (IS_ERR_VALUE(ret)) {
+			mdss_mdp_overlay_buf_free(mfd, src_data);
+			pr_err("src_data pmem error\n");
+		}
+	}
+	mutex_unlock(&mdp5_data->list_lock);
+
+	mdss_mdp_pipe_unmap(pipe);
+
+	return ret;
+}
+
+static int mdss_mdp_overlay_play(struct msm_fb_data_type *mfd,
+				 struct msmfb_overlay_data *req)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	int ret = 0;
+
+	pr_debug("play req id=%x\n", req->id);
+
+	ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+	if (ret)
+		return ret;
+
+	if (mdss_fb_is_power_off(mfd)) {
+		ret = -EPERM;
+		goto done;
+	}
+
+	if (req->id == BORDERFILL_NDX) {
+		pr_debug("borderfill enable\n");
+		mdp5_data->borderfill_enable = true;
+		ret = mdss_mdp_overlay_free_fb_pipe(mfd);
+	} else {
+		ret = mdss_mdp_overlay_queue(mfd, req);
+	}
+
+done:
+	mutex_unlock(&mdp5_data->ov_lock);
+
+	return ret;
+}
+
+static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_pipe *pipe;
+	u32 fb_ndx = 0;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	pipe = mdss_mdp_get_staged_pipe(mdp5_data->ctl,
+		MDSS_MDP_MIXER_MUX_LEFT, MDSS_MDP_STAGE_BASE, false);
+	if (pipe)
+		fb_ndx |= pipe->ndx;
+
+	pipe = mdss_mdp_get_staged_pipe(mdp5_data->ctl,
+		MDSS_MDP_MIXER_MUX_RIGHT, MDSS_MDP_STAGE_BASE, false);
+	if (pipe)
+		fb_ndx |= pipe->ndx;
+
+	if (fb_ndx) {
+		pr_debug("unstaging framebuffer pipes %x\n", fb_ndx);
+		mdss_mdp_overlay_release(mfd, fb_ndx);
+	}
+	return 0;
+}
+
+static int mdss_mdp_overlay_get_fb_pipe(struct msm_fb_data_type *mfd,
+					struct mdss_mdp_pipe **ppipe,
+					int mixer_mux, bool *pipe_allocated)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_pipe *pipe;
+	int ret = 0;
+	struct mdp_overlay *req = NULL;
+
+	*pipe_allocated = false;
+	pipe = mdss_mdp_get_staged_pipe(mdp5_data->ctl, mixer_mux,
+		MDSS_MDP_STAGE_BASE, false);
+
+	if (pipe == NULL) {
+		struct fb_info *fbi = mfd->fbi;
+		struct mdss_mdp_mixer *mixer;
+		int bpp;
+		bool rotate_180 = (fbi->var.rotate == FB_ROTATE_UD);
+		struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+		bool split_lm = (fbi->var.xres > mdata->max_mixer_width ||
+			is_split_lm(mfd));
+		struct mdp_rect left_rect, right_rect;
+
+		mixer = mdss_mdp_mixer_get(mdp5_data->ctl,
+					MDSS_MDP_MIXER_MUX_LEFT);
+		if (!mixer) {
+			pr_err("unable to retrieve mixer\n");
+			return -ENODEV;
+		}
+
+		req = kcalloc(1, sizeof(struct mdp_overlay), GFP_KERNEL);
+		if (!req)
+			return -ENOMEM;
+
+		bpp = fbi->var.bits_per_pixel / 8;
+		req->id = MSMFB_NEW_REQUEST;
+		req->src.format = mfd->fb_imgType;
+		req->src.height = fbi->var.yres;
+		req->src.width = fbi->fix.line_length / bpp;
+
+		left_rect.x = 0;
+		left_rect.w = MIN(fbi->var.xres, mixer->width);
+		left_rect.y = 0;
+		left_rect.h = req->src.height;
+
+		right_rect.x = mixer->width;
+		right_rect.w = fbi->var.xres - mixer->width;
+		right_rect.y = 0;
+		right_rect.h = req->src.height;
+
+		if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) {
+			if (req->src.width <= mixer->width) {
+				pr_warn("right fb pipe not needed\n");
+				ret = -EINVAL;
+				goto done;
+			}
+			req->src_rect = req->dst_rect = right_rect;
+			if (split_lm && rotate_180)
+				req->src_rect = left_rect;
+		} else {
+			req->src_rect = req->dst_rect = left_rect;
+			if (split_lm && rotate_180)
+				req->src_rect = right_rect;
+		}
+
+		req->z_order = MDSS_MDP_STAGE_BASE;
+		if (rotate_180)
+			req->flags |= (MDP_FLIP_LR | MDP_FLIP_UD);
+
+		pr_debug("allocating base pipe mux=%d\n", mixer_mux);
+
+		ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe, NULL,
+			false);
+		if (ret)
+			goto done;
+
+		*pipe_allocated = true;
+	}
+	pr_debug("ctl=%d pnum=%d\n", mdp5_data->ctl->num, pipe->num);
+
+	*ppipe = pipe;
+
+done:
+	kfree(req);
+	return ret;
+}
+
+static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_data *buf_l = NULL, *buf_r = NULL;
+	struct mdss_mdp_pipe *l_pipe, *r_pipe, *pipe, *tmp;
+	struct fb_info *fbi;
+	struct mdss_overlay_private *mdp5_data;
+	struct mdss_data_type *mdata;
+	u32 offset;
+	int bpp, ret;
+	bool l_pipe_allocated = false, r_pipe_allocated = false;
+
+	if (!mfd || !mfd->mdp.private1)
+		return;
+
+	mdata = mfd_to_mdata(mfd);
+	fbi = mfd->fbi;
+	mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (!mdp5_data || !mdp5_data->ctl)
+		return;
+
+	/*
+	 * Ignore writeback updates through pan_display as output
+	 * buffer is not available.
+	 */
+	if (mfd->panel_info->type == WRITEBACK_PANEL) {
+		pr_err_once("writeback update not supported through pan display\n");
+		return;
+	}
+
+	if (IS_ERR_OR_NULL(mfd->fbmem_buf) || fbi->fix.smem_len == 0 ||
+		mdp5_data->borderfill_enable) {
+		if (mdata->handoff_pending) {
+			/*
+			 * Move pipes to cleanup queue and avoid kickoff if
+			 * pan display is called before handoff is completed.
+			 */
+			mutex_lock(&mdp5_data->list_lock);
+			list_for_each_entry_safe(pipe, tmp,
+			    &mdp5_data->pipes_used, list) {
+				list_move(&pipe->list,
+					&mdp5_data->pipes_cleanup);
+			}
+			mutex_unlock(&mdp5_data->list_lock);
+		}
+		mfd->mdp.kickoff_fnc(mfd, NULL);
+		return;
+	}
+
+	if (mutex_lock_interruptible(&mdp5_data->ov_lock))
+		return;
+
+	if ((mdss_fb_is_power_off(mfd)) &&
+		!((mfd->dcm_state == DCM_ENTER) &&
+		(mfd->panel.type == MIPI_CMD_PANEL))) {
+		mutex_unlock(&mdp5_data->ov_lock);
+		return;
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	bpp = fbi->var.bits_per_pixel / 8;
+	offset = fbi->var.xoffset * bpp +
+		 fbi->var.yoffset * fbi->fix.line_length;
+
+	if (offset > fbi->fix.smem_len) {
+		pr_err("invalid fb offset=%u total length=%u\n",
+		       offset, fbi->fix.smem_len);
+		goto clk_disable;
+	}
+
+	ret = mdss_mdp_overlay_get_fb_pipe(mfd, &l_pipe,
+		MDSS_MDP_MIXER_MUX_LEFT, &l_pipe_allocated);
+	if (ret) {
+		pr_err("unable to allocate base pipe\n");
+		goto iommu_disable;
+	}
+
+	if (mdss_mdp_pipe_map(l_pipe)) {
+		pr_err("unable to map base pipe\n");
+		goto pipe_release;
+	}
+
+	ret = mdss_mdp_overlay_start(mfd);
+	if (ret) {
+		pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
+		goto clk_disable;
+	}
+
+	ret = mdss_iommu_ctrl(1);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("IOMMU attach failed\n");
+		goto clk_disable;
+	}
+
+	buf_l = __mdp_overlay_buf_alloc(mfd, l_pipe);
+	if (!buf_l) {
+		pr_err("unable to allocate memory for fb buffer\n");
+		mdss_mdp_pipe_unmap(l_pipe);
+		goto pipe_release;
+	}
+
+	buf_l->p[0].srcp_table = mfd->fb_table;
+	buf_l->p[0].srcp_dma_buf = mfd->fbmem_buf;
+	buf_l->p[0].len = 0;
+	buf_l->p[0].addr = 0;
+	buf_l->p[0].offset = offset;
+	buf_l->p[0].skip_detach = true;
+	buf_l->p[0].mapped = false;
+	buf_l->num_planes = 1;
+
+	mdss_mdp_pipe_unmap(l_pipe);
+
+	if (fbi->var.xres > mdata->max_pipe_width || is_split_lm(mfd)) {
+		/*
+		 * TODO: Need to revisit the function for panels with width more
+		 * than max_pipe_width and less than max_mixer_width.
+		 */
+		ret = mdss_mdp_overlay_get_fb_pipe(mfd, &r_pipe,
+			MDSS_MDP_MIXER_MUX_RIGHT, &r_pipe_allocated);
+		if (ret) {
+			pr_err("unable to allocate right base pipe\n");
+			goto pipe_release;
+		}
+
+		if (mdss_mdp_pipe_map(r_pipe)) {
+			pr_err("unable to map right base pipe\n");
+			goto pipe_release;
+		}
+
+		buf_r = __mdp_overlay_buf_alloc(mfd, r_pipe);
+		if (!buf_r) {
+			pr_err("unable to allocate memory for fb buffer\n");
+			mdss_mdp_pipe_unmap(r_pipe);
+			goto pipe_release;
+		}
+
+		buf_r->p[0] = buf_l->p[0];
+		buf_r->num_planes = 1;
+
+		mdss_mdp_pipe_unmap(r_pipe);
+	}
+	mutex_unlock(&mdp5_data->ov_lock);
+
+	if ((fbi->var.activate & FB_ACTIVATE_VBL) ||
+	    (fbi->var.activate & FB_ACTIVATE_FORCE))
+		mfd->mdp.kickoff_fnc(mfd, NULL);
+
+	mdss_iommu_ctrl(0);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	return;
+
+pipe_release:
+	if (r_pipe_allocated)
+		mdss_mdp_overlay_release(mfd, r_pipe->ndx);
+	if (buf_l)
+		__mdp_overlay_buf_free(mfd, buf_l);
+	if (l_pipe_allocated)
+		mdss_mdp_overlay_release(mfd, l_pipe->ndx);
+iommu_disable:
+	mdss_iommu_ctrl(0);
+clk_disable:
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	mutex_unlock(&mdp5_data->ov_lock);
+}
+
+static void remove_underrun_vsync_handler(struct work_struct *work)
+{
+	int rc;
+	struct mdss_mdp_ctl *ctl =
+		container_of(work, typeof(*ctl), remove_underrun_handler);
+
+	if (!ctl || !ctl->ops.remove_vsync_handler) {
+		pr_err("ctl or vsync handler is NULL\n");
+		return;
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	rc = ctl->ops.remove_vsync_handler(ctl,
+			&ctl->recover_underrun_handler);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+}
+
+static void mdss_mdp_recover_underrun_handler(struct mdss_mdp_ctl *ctl,
+						ktime_t t)
+{
+	if (!ctl) {
+		pr_err("ctl is NULL\n");
+		return;
+	}
+
+	mdss_mdp_ctl_reset(ctl, true);
+	schedule_work(&ctl->remove_underrun_handler);
+}
+
+/* do nothing in case of deterministic frame rate control, only keep vsync on */
+static void mdss_mdp_overlay_frc_handler(struct mdss_mdp_ctl *ctl,
+						ktime_t t)
+{
+	pr_debug("vsync on ctl%d vsync_cnt=%d\n", ctl->num, ctl->vsync_cnt);
+}
+
+/* function is called in irq context should have minimum processing */
+static void mdss_mdp_overlay_handle_vsync(struct mdss_mdp_ctl *ctl,
+						ktime_t t)
+{
+	struct msm_fb_data_type *mfd = NULL;
+	struct mdss_overlay_private *mdp5_data = NULL;
+
+	if (!ctl) {
+		pr_err("ctl is NULL\n");
+		return;
+	}
+
+	mfd = ctl->mfd;
+	if (!mfd || !mfd->mdp.private1) {
+		pr_warn("Invalid handle for vsync\n");
+		return;
+	}
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+	if (!mdp5_data) {
+		pr_err("mdp5_data is NULL\n");
+		return;
+	}
+
+	pr_debug("vsync on fb%d play_cnt=%d\n", mfd->index, ctl->play_cnt);
+
+	mdp5_data->vsync_time = t;
+	sysfs_notify_dirent(mdp5_data->vsync_event_sd);
+}
+
+/* function is called in irq context should have minimum processing */
+static void mdss_mdp_overlay_handle_lineptr(struct mdss_mdp_ctl *ctl,
+						ktime_t t)
+{
+	struct mdss_overlay_private *mdp5_data = NULL;
+
+	if (!ctl || !ctl->mfd) {
+		pr_warn("Invalid handle for lineptr\n");
+		return;
+	}
+
+	mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+	if (!mdp5_data) {
+		pr_err("mdp5_data is NULL\n");
+		return;
+	}
+
+	pr_debug("lineptr irq on fb%d play_cnt=%d\n",
+			ctl->mfd->index, ctl->play_cnt);
+
+	mdp5_data->lineptr_time = t;
+	sysfs_notify_dirent(mdp5_data->lineptr_event_sd);
+}
+
+int mdss_mdp_overlay_vsync_ctrl(struct msm_fb_data_type *mfd, int en)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	int rc;
+
+	if (!ctl)
+		return -ENODEV;
+
+	mutex_lock(&mdp5_data->ov_lock);
+	if (!ctl->ops.add_vsync_handler || !ctl->ops.remove_vsync_handler) {
+		rc = -EOPNOTSUPP;
+		pr_err_once("fb%d vsync handlers are not registered\n",
+			mfd->index);
+		goto end;
+	}
+
+	if (!ctl->panel_data->panel_info.cont_splash_enabled
+		&& (!mdss_mdp_ctl_is_power_on(ctl) ||
+		mdss_panel_is_power_on_ulp(ctl->power_state))) {
+		pr_debug("fb%d vsync pending first update en=%d, ctl power state:%d\n",
+				mfd->index, en, ctl->power_state);
+		rc = -EPERM;
+		goto end;
+	}
+
+	pr_debug("fb%d vsync en=%d\n", mfd->index, en);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	if (en)
+		rc = ctl->ops.add_vsync_handler(ctl, &ctl->vsync_handler);
+	else
+		rc = ctl->ops.remove_vsync_handler(ctl, &ctl->vsync_handler);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+end:
+	mutex_unlock(&mdp5_data->ov_lock);
+	return rc;
+}
+
+static ssize_t dynamic_fps_sysfs_rda_dfps(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct mdss_panel_data *pdata;
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (!mdp5_data->ctl || !mdss_mdp_ctl_is_power_on(mdp5_data->ctl))
+		return 0;
+
+	pdata = dev_get_platdata(&mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("no panel connected for fb%d\n", mfd->index);
+		return -ENODEV;
+	}
+
+	mutex_lock(&mdp5_data->dfps_lock);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n",
+		       pdata->panel_info.mipi.frame_rate);
+	pr_debug("%s: '%d'\n", __func__,
+		pdata->panel_info.mipi.frame_rate);
+	mutex_unlock(&mdp5_data->dfps_lock);
+
+	return ret;
+} /* dynamic_fps_sysfs_rda_dfps */
+
+static int calc_extra_blanking(struct mdss_panel_data *pdata, u32 new_fps)
+{
+	int add_porches, diff;
+
+	/* calculate extra: lines for vfp-method, pixels for hfp-method */
+	diff = abs(pdata->panel_info.default_fps - new_fps);
+	add_porches = mult_frac(pdata->panel_info.saved_total,
+		diff, new_fps);
+
+	return add_porches;
+}
+
+static void cache_initial_timings(struct mdss_panel_data *pdata)
+{
+	if (!pdata->panel_info.default_fps) {
+
+		/*
+		 * This value will change dynamically once the
+		 * actual dfps update happen in hw.
+		 */
+		pdata->panel_info.current_fps =
+			mdss_panel_get_framerate(&pdata->panel_info,
+				FPS_RESOLUTION_DEFAULT);
+
+		/*
+		 * Keep the initial fps and porch values for this panel before
+		 * any dfps update happen, this is to prevent losing precision
+		 * in further calculations.
+		 */
+		pdata->panel_info.default_fps =
+			mdss_panel_get_framerate(&pdata->panel_info,
+				FPS_RESOLUTION_DEFAULT);
+
+		if (pdata->panel_info.dfps_update ==
+					DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP) {
+			pdata->panel_info.saved_total =
+				mdss_panel_get_vtotal(&pdata->panel_info);
+			pdata->panel_info.saved_fporch =
+				pdata->panel_info.lcdc.v_front_porch;
+
+		} else if (pdata->panel_info.dfps_update ==
+				DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP ||
+			pdata->panel_info.dfps_update ==
+				DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP ||
+			pdata->panel_info.dfps_update ==
+				DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) {
+			pdata->panel_info.saved_total =
+				mdss_panel_get_htotal(&pdata->panel_info, true);
+			pdata->panel_info.saved_fporch =
+				pdata->panel_info.lcdc.h_front_porch;
+		}
+	}
+}
+
+static inline void dfps_update_fps(struct mdss_panel_info *pinfo, u32 fps)
+{
+	if (pinfo->type == DTV_PANEL)
+		pinfo->lcdc.frame_rate = fps;
+	else
+		pinfo->mipi.frame_rate = fps;
+}
+
+static void dfps_update_panel_params(struct mdss_panel_data *pdata,
+	struct dynamic_fps_data *data)
+{
+	u32 new_fps = data->fps;
+
+	/* Keep initial values before any dfps update */
+	cache_initial_timings(pdata);
+
+	if (pdata->panel_info.dfps_update ==
+			DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP) {
+		int add_v_lines;
+
+		/* calculate extra vfp lines */
+		add_v_lines = calc_extra_blanking(pdata, new_fps);
+
+		/* update panel info with new values */
+		pdata->panel_info.lcdc.v_front_porch =
+			pdata->panel_info.saved_fporch + add_v_lines;
+
+		dfps_update_fps(&pdata->panel_info, new_fps);
+
+		pdata->panel_info.prg_fet =
+			mdss_mdp_get_prefetch_lines(&pdata->panel_info);
+
+	} else if (pdata->panel_info.dfps_update ==
+			DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP) {
+		int add_h_pixels;
+
+		/* calculate extra hfp pixels */
+		add_h_pixels = calc_extra_blanking(pdata, new_fps);
+
+		/* update panel info */
+		if (pdata->panel_info.default_fps > new_fps)
+			pdata->panel_info.lcdc.h_front_porch =
+				pdata->panel_info.saved_fporch + add_h_pixels;
+		else
+			pdata->panel_info.lcdc.h_front_porch =
+				pdata->panel_info.saved_fporch - add_h_pixels;
+
+		dfps_update_fps(&pdata->panel_info, new_fps);
+	} else if (pdata->panel_info.dfps_update ==
+		DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP) {
+
+		pr_debug("hfp=%d, hbp=%d, hpw=%d, clk=%d, fps=%d\n",
+			data->hfp, data->hbp, data->hpw,
+			data->clk_rate, data->fps);
+
+		pdata->panel_info.lcdc.h_front_porch = data->hfp;
+		pdata->panel_info.lcdc.h_back_porch  = data->hbp;
+		pdata->panel_info.lcdc.h_pulse_width = data->hpw;
+
+		pdata->panel_info.clk_rate = data->clk_rate;
+		if (pdata->panel_info.type == DTV_PANEL)
+			pdata->panel_info.clk_rate *= 1000;
+
+		dfps_update_fps(&pdata->panel_info, new_fps);
+	} else if (pdata->panel_info.dfps_update ==
+		DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) {
+
+		pr_debug("hfp=%d, hbp=%d, hpw=%d, clk=%d, fps=%d\n",
+			data->hfp, data->hbp, data->hpw,
+			data->clk_rate, data->fps);
+
+		pdata->panel_info.lcdc.h_front_porch = data->hfp;
+		pdata->panel_info.lcdc.h_back_porch  = data->hbp;
+		pdata->panel_info.lcdc.h_pulse_width = data->hpw;
+
+		pdata->panel_info.clk_rate = data->clk_rate;
+
+		dfps_update_fps(&pdata->panel_info, new_fps);
+		mdss_panel_update_clk_rate(&pdata->panel_info, new_fps);
+	} else {
+		dfps_update_fps(&pdata->panel_info, new_fps);
+		mdss_panel_update_clk_rate(&pdata->panel_info, new_fps);
+	}
+}
+
+int mdss_mdp_dfps_update_params(struct msm_fb_data_type *mfd,
+	struct mdss_panel_data *pdata, struct dynamic_fps_data *dfps_data)
+{
+	struct fb_var_screeninfo *var = &mfd->fbi->var;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	u32 dfps = dfps_data->fps;
+
+	mutex_lock(&mdp5_data->dfps_lock);
+
+	pr_debug("new_fps:%d\n", dfps);
+
+	if (dfps < pdata->panel_info.min_fps) {
+		pr_err("Unsupported FPS. min_fps = %d\n",
+				pdata->panel_info.min_fps);
+		mutex_unlock(&mdp5_data->dfps_lock);
+		return -EINVAL;
+	} else if (dfps > pdata->panel_info.max_fps) {
+		pr_warn("Unsupported FPS. Configuring to max_fps = %d\n",
+				pdata->panel_info.max_fps);
+		dfps = pdata->panel_info.max_fps;
+		dfps_data->fps = dfps;
+	}
+
+	dfps_update_panel_params(pdata, dfps_data);
+	if (pdata->next)
+		dfps_update_panel_params(pdata->next, dfps_data);
+
+	/*
+	 * Update the panel info in the upstream
+	 * data, so any further call to get the screen
+	 * info has the updated timings.
+	 */
+	mdss_panelinfo_to_fb_var(&pdata->panel_info, var);
+
+	MDSS_XLOG(dfps);
+	mutex_unlock(&mdp5_data->dfps_lock);
+
+	return 0;
+}
+
+
+static ssize_t dynamic_fps_sysfs_wta_dfps(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int panel_fps, rc = 0;
+	struct mdss_panel_data *pdata;
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct dynamic_fps_data data = {0};
+
+	if (!mdp5_data->ctl || !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)) {
+		pr_debug("panel is off\n");
+		return count;
+	}
+
+	pdata = dev_get_platdata(&mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("no panel connected for fb%d\n", mfd->index);
+		return -ENODEV;
+	}
+
+	if (!pdata->panel_info.dynamic_fps) {
+		pr_err_once("%s: Dynamic fps not enabled for this panel\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (pdata->panel_info.dfps_update ==
+		DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP ||
+		pdata->panel_info.dfps_update ==
+		DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) {
+		if (sscanf(buf, "%u %u %u %u %u",
+		    &data.hfp, &data.hbp, &data.hpw,
+		    &data.clk_rate, &data.fps) != 5) {
+			pr_err("could not read input\n");
+			return -EINVAL;
+		}
+	} else {
+		rc = kstrtoint(buf, 10, &data.fps);
+		if (rc) {
+			pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+			return rc;
+		}
+	}
+
+	panel_fps = mdss_panel_get_framerate(&pdata->panel_info,
+			FPS_RESOLUTION_DEFAULT);
+
+	if (data.fps == panel_fps) {
+		pr_debug("%s: FPS is already %d\n",
+			__func__, data.fps);
+		return count;
+	}
+
+	if (data.hfp > DFPS_DATA_MAX_HFP || data.hbp > DFPS_DATA_MAX_HBP ||
+		data.hpw > DFPS_DATA_MAX_HPW || data.fps > DFPS_DATA_MAX_FPS ||
+		data.clk_rate > DFPS_DATA_MAX_CLK_RATE){
+		pr_err("Data values out of bound.\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_mdp_dfps_update_params(mfd, pdata, &data);
+	if (rc) {
+		pr_err("failed to set dfps params\n");
+		return rc;
+	}
+
+	return count;
+} /* dynamic_fps_sysfs_wta_dfps */
+
+
+static DEVICE_ATTR(dynamic_fps, 0644, dynamic_fps_sysfs_rda_dfps,
+	dynamic_fps_sysfs_wta_dfps);
+
+static struct attribute *dynamic_fps_fs_attrs[] = {
+	&dev_attr_dynamic_fps.attr,
+	NULL,
+};
+static struct attribute_group dynamic_fps_fs_attrs_group = {
+	.attrs = dynamic_fps_fs_attrs,
+};
+
+static ssize_t mdss_mdp_vsync_show_event(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	u64 vsync_ticks;
+	int ret;
+
+	if (!mdp5_data->ctl ||
+		(!mdp5_data->ctl->panel_data->panel_info.cont_splash_enabled
+			&& !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)))
+		return -EAGAIN;
+
+	vsync_ticks = ktime_to_ns(mdp5_data->vsync_time);
+
+	pr_debug("fb%d vsync=%llu\n", mfd->index, vsync_ticks);
+	ret = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu\n", vsync_ticks);
+
+	return ret;
+}
+
+static ssize_t mdss_mdp_lineptr_show_event(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	u64 lineptr_ticks;
+	int ret;
+
+	if (!mdp5_data->ctl ||
+		(!mdp5_data->ctl->panel_data->panel_info.cont_splash_enabled
+			&& !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)))
+		return -EPERM;
+
+	lineptr_ticks = ktime_to_ns(mdp5_data->lineptr_time);
+
+	pr_debug("fb%d lineptr=%llu\n", mfd->index, lineptr_ticks);
+	ret = scnprintf(buf, PAGE_SIZE, "LINEPTR=%llu\n", lineptr_ticks);
+
+	return ret;
+}
+
+static ssize_t mdss_mdp_lineptr_show_value(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	int ret, lineptr_val;
+
+	if (!mdp5_data->ctl ||
+		(!mdp5_data->ctl->panel_data->panel_info.cont_splash_enabled
+			&& !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)))
+		return -EPERM;
+
+	lineptr_val = mfd->panel_info->te.wr_ptr_irq;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", lineptr_val);
+
+	return ret;
+}
+
+static ssize_t mdss_mdp_lineptr_set_value(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = fbi->par;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+	int ret, lineptr_value;
+
+	if (!ctl || (!ctl->panel_data->panel_info.cont_splash_enabled
+		&& !mdss_mdp_ctl_is_power_on(ctl)))
+		return -EAGAIN;
+
+	ret = kstrtoint(buf, 10, &lineptr_value);
+	if (ret || (lineptr_value < 0)
+		|| (lineptr_value > mfd->panel_info->yres)) {
+		pr_err("Invalid input for lineptr\n");
+		return -EINVAL;
+	}
+
+	if (!mdss_mdp_is_lineptr_supported(ctl)) {
+		pr_err("lineptr not supported\n");
+		return -ENOTSUPP;
+	}
+
+	mutex_lock(&mdp5_data->ov_lock);
+	mfd->panel_info->te.wr_ptr_irq = lineptr_value;
+	if (ctl && ctl->ops.update_lineptr)
+		ctl->ops.update_lineptr(ctl, true);
+	mutex_unlock(&mdp5_data->ov_lock);
+
+	return count;
+}
+
+static ssize_t mdss_mdp_bl_show_event(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	int ret;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp5_data->bl_events);
+	return ret;
+}
+
+static ssize_t mdss_mdp_hist_show_event(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	int ret;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp5_data->hist_events);
+	return ret;
+}
+
+static ssize_t mdss_mdp_ad_show_event(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	int ret;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp5_data->ad_events);
+	return ret;
+}
+
+static ssize_t mdss_mdp_ad_bl_show_event(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	int ret;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp5_data->ad_bl_events);
+	return ret;
+}
+
+static inline int mdss_mdp_ad_is_supported(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	struct mdss_mdp_mixer *mixer;
+
+	if (!ctl) {
+		pr_debug("there is no ctl attached to fb\n");
+		return 0;
+	}
+
+	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+	if (mixer && (mixer->num > ctl->mdata->nad_cfgs)) {
+		if (!mixer)
+			pr_warn("there is no mixer attached to fb\n");
+		else
+			pr_debug("mixer attached (%d) doesn't support ad\n",
+				 mixer->num);
+		return 0;
+	}
+
+	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
+	if (mixer && (mixer->num > ctl->mdata->nad_cfgs))
+		return 0;
+
+	return 1;
+}
+
+static ssize_t mdss_mdp_ad_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = fbi->par;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	int ret, state;
+
+	state = mdss_mdp_ad_is_supported(mfd) ? mdp5_data->ad_state : -1;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d", state);
+
+	return ret;
+}
+
+static ssize_t mdss_mdp_ad_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = fbi->par;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	int ret, ad;
+
+	ret = kstrtoint(buf, 10, &ad);
+	if (ret) {
+		pr_err("Invalid input for ad\n");
+		return -EINVAL;
+	}
+
+	mdp5_data->ad_state = ad;
+	sysfs_notify(&dev->kobj, NULL, "ad");
+
+	return count;
+}
+
+static ssize_t mdss_mdp_dyn_pu_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = fbi->par;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	int ret, state;
+
+	state = (mdp5_data->dyn_pu_state >= 0) ? mdp5_data->dyn_pu_state : -1;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d", state);
+
+	return ret;
+}
+
+static ssize_t mdss_mdp_dyn_pu_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = fbi->par;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	int ret, dyn_pu;
+
+	ret = kstrtoint(buf, 10, &dyn_pu);
+	if (ret) {
+		pr_err("Invalid input for partial update: ret = %d\n", ret);
+		return ret;
+	}
+
+	mdp5_data->dyn_pu_state = dyn_pu;
+	sysfs_notify(&dev->kobj, NULL, "dyn_pu");
+
+	return count;
+}
+static ssize_t mdss_mdp_cmd_autorefresh_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_mdp_ctl *ctl;
+
+	if (!mfd) {
+		pr_err("Invalid mfd structure\n");
+		return -EINVAL;
+	}
+
+	ctl = mfd_to_ctl(mfd);
+	if (!ctl) {
+		pr_err("Invalid ctl structure\n");
+		return -EINVAL;
+	}
+
+
+	if (mfd->panel_info->type != MIPI_CMD_PANEL) {
+		pr_err("Panel doesn't support autorefresh\n");
+		ret = -EINVAL;
+	} else {
+		ret = snprintf(buf, PAGE_SIZE, "%d\n",
+			mdss_mdp_ctl_cmd_get_autorefresh(ctl));
+	}
+	return ret;
+}
+
+static ssize_t mdss_mdp_cmd_autorefresh_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t len)
+{
+	int frame_cnt, rc;
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_mdp_ctl *ctl;
+
+	if (!mfd) {
+		pr_err("Invalid mfd structure\n");
+		rc = -EINVAL;
+		return rc;
+	}
+
+	ctl = mfd_to_ctl(mfd);
+	if (!ctl) {
+		pr_err("Invalid ctl structure\n");
+		rc = -EINVAL;
+		return rc;
+	}
+
+	if (mfd->panel_info->type != MIPI_CMD_PANEL) {
+		pr_err("Panel doesn't support autorefresh\n");
+		rc = -EINVAL;
+		return rc;
+	}
+
+	rc = kstrtoint(buf, 10, &frame_cnt);
+	if (rc) {
+		pr_err("kstrtoint failed. rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = mdss_mdp_ctl_cmd_set_autorefresh(ctl, frame_cnt);
+	if (rc) {
+		pr_err("cmd_set_autorefresh failed, rc=%d, frame_cnt=%d\n",
+			rc, frame_cnt);
+		return rc;
+	}
+
+	if (frame_cnt) {
+		/* enable/reconfig autorefresh */
+		mfd->mdp_sync_pt_data.threshold = 2;
+		mfd->mdp_sync_pt_data.retire_threshold = 0;
+	} else {
+		/* disable autorefresh */
+		mfd->mdp_sync_pt_data.threshold = 1;
+		mfd->mdp_sync_pt_data.retire_threshold = 1;
+	}
+
+	pr_debug("setting cmd autorefresh to cnt=%d\n", frame_cnt);
+
+	return len;
+}
+
+
+/* Print the last CRC Value read for batch mode */
+static ssize_t mdss_mdp_misr_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_mdp_ctl *ctl;
+
+	if (!mfd) {
+		pr_err("Invalid mfd structure\n");
+		return -EINVAL;
+	}
+
+	ctl = mfd_to_ctl(mfd);
+	if (!ctl) {
+		pr_err("Invalid ctl structure\n");
+		return -EINVAL;
+	}
+
+	ret = mdss_dump_misr_data(&buf, PAGE_SIZE);
+
+	return ret;
+}
+
+/*
+ * Enable crc batch mode. By enabling this mode through sysfs
+ * driver will keep collecting the misr in ftrace during interrupts,
+ * until disabled.
+ */
+static ssize_t mdss_mdp_misr_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t len)
+{
+	int enable_misr, rc;
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_ctl *ctl;
+	struct mdp_misr req, sreq;
+
+	if (!mfd) {
+		pr_err("Invalid mfd structure\n");
+		rc = -EINVAL;
+		return rc;
+	}
+
+	ctl = mfd_to_ctl(mfd);
+	if (!ctl) {
+		pr_err("Invalid ctl structure\n");
+		rc = -EINVAL;
+		return rc;
+	}
+
+	rc = kstrtoint(buf, 10, &enable_misr);
+	if (rc) {
+		pr_err("kstrtoint failed. rc=%d\n", rc);
+		return rc;
+	}
+
+	req.block_id = DISPLAY_MISR_MAX;
+	sreq.block_id = DISPLAY_MISR_MAX;
+
+	pr_debug("intf_type:%d enable:%d\n", ctl->intf_type, enable_misr);
+	if (ctl->intf_type == MDSS_INTF_DSI) {
+
+		req.block_id = DISPLAY_MISR_DSI0;
+		req.crc_op_mode = MISR_OP_BM;
+		req.frame_count = 1;
+		if (is_panel_split(mfd)) {
+
+			sreq.block_id = DISPLAY_MISR_DSI1;
+			sreq.crc_op_mode = MISR_OP_BM;
+			sreq.frame_count = 1;
+		}
+	} else if (ctl->intf_type == MDSS_INTF_HDMI) {
+
+		req.block_id = DISPLAY_MISR_HDMI;
+		req.crc_op_mode = MISR_OP_BM;
+		req.frame_count = 1;
+	} else {
+		pr_err("misr not supported fo this fb:%d\n", mfd->index);
+		rc = -ENODEV;
+		return rc;
+	}
+
+	if (enable_misr) {
+		mdss_misr_set(mdata, &req, ctl);
+
+		if ((ctl->intf_type == MDSS_INTF_DSI) && is_panel_split(mfd))
+			mdss_misr_set(mdata, &sreq, ctl);
+
+	} else {
+		mdss_misr_disable(mdata, &req, ctl);
+
+		if ((ctl->intf_type == MDSS_INTF_DSI) && is_panel_split(mfd))
+			mdss_misr_disable(mdata, &sreq, ctl);
+	}
+
+	pr_debug("misr %s\n", enable_misr ? "enabled" : "disabled");
+
+	return len;
+}
+
+static DEVICE_ATTR(msm_misr_en, 0644,
+	mdss_mdp_misr_show, mdss_mdp_misr_store);
+static DEVICE_ATTR(msm_cmd_autorefresh_en, 0644,
+	mdss_mdp_cmd_autorefresh_show, mdss_mdp_cmd_autorefresh_store);
+static DEVICE_ATTR(vsync_event, 0444, mdss_mdp_vsync_show_event, NULL);
+static DEVICE_ATTR(lineptr_event, 0444, mdss_mdp_lineptr_show_event, NULL);
+static DEVICE_ATTR(lineptr_value, 0664,
+		mdss_mdp_lineptr_show_value, mdss_mdp_lineptr_set_value);
+static DEVICE_ATTR(ad, 0664, mdss_mdp_ad_show,
+	mdss_mdp_ad_store);
+static DEVICE_ATTR(dyn_pu, 0664, mdss_mdp_dyn_pu_show,
+	mdss_mdp_dyn_pu_store);
+static DEVICE_ATTR(hist_event, 0444, mdss_mdp_hist_show_event, NULL);
+static DEVICE_ATTR(bl_event, 0444, mdss_mdp_bl_show_event, NULL);
+static DEVICE_ATTR(ad_event, 0444, mdss_mdp_ad_show_event, NULL);
+static DEVICE_ATTR(ad_bl_event, 0444, mdss_mdp_ad_bl_show_event, NULL);
+
+static struct attribute *mdp_overlay_sysfs_attrs[] = {
+	&dev_attr_vsync_event.attr,
+	&dev_attr_lineptr_event.attr,
+	&dev_attr_lineptr_value.attr,
+	&dev_attr_ad.attr,
+	&dev_attr_dyn_pu.attr,
+	&dev_attr_msm_misr_en.attr,
+	&dev_attr_msm_cmd_autorefresh_en.attr,
+	&dev_attr_hist_event.attr,
+	&dev_attr_bl_event.attr,
+	&dev_attr_ad_event.attr,
+	&dev_attr_ad_bl_event.attr,
+	NULL,
+};
+
+static struct attribute_group mdp_overlay_sysfs_group = {
+	.attrs = mdp_overlay_sysfs_attrs,
+};
+
+static void mdss_mdp_hw_cursor_setpos(struct mdss_mdp_mixer *mixer,
+		struct mdss_rect *roi, u32 start_x, u32 start_y)
+{
+	int roi_xy = (roi->y << 16) | roi->x;
+	int start_xy = (start_y << 16) | start_x;
+	int roi_size = (roi->h << 16) | roi->w;
+
+	if (!mixer) {
+		pr_err("mixer not available\n");
+		return;
+	}
+	mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_XY, roi_xy);
+	mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_START_XY, start_xy);
+	mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_SIZE, roi_size);
+}
+
+static void mdss_mdp_hw_cursor_setimage(struct mdss_mdp_mixer *mixer,
+	struct fb_cursor *cursor, u32 cursor_addr, struct mdss_rect *roi)
+{
+	int calpha_en, transp_en, alpha, size;
+	struct fb_image *img = &cursor->image;
+	u32 blendcfg;
+	int roi_size = 0;
+
+	if (!mixer) {
+		pr_err("mixer not available\n");
+		return;
+	}
+
+	if (img->bg_color == 0xffffffff)
+		transp_en = 0;
+	else
+		transp_en = 1;
+
+	alpha = (img->fg_color & 0xff000000) >> 24;
+
+	if (alpha)
+		calpha_en = 0x0; /* xrgb */
+	else
+		calpha_en = 0x2; /* argb */
+
+	roi_size = (roi->h << 16) | roi->w;
+	size = (img->height << 16) | img->width;
+	mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_IMG_SIZE, size);
+	mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_SIZE, roi_size);
+	mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_STRIDE,
+				img->width * 4);
+	mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BASE_ADDR,
+				cursor_addr);
+	blendcfg = mdp_mixer_read(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG);
+	blendcfg &= ~0x1;
+	blendcfg |= (transp_en << 3) | (calpha_en << 1);
+	mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG,
+				blendcfg);
+	if (calpha_en)
+		mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_PARAM,
+				alpha);
+
+	if (transp_en) {
+		mdp_mixer_write(mixer,
+				MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW0,
+				((img->bg_color & 0xff00) << 8) |
+				(img->bg_color & 0xff));
+		mdp_mixer_write(mixer,
+				MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW1,
+				((img->bg_color & 0xff0000) >> 16));
+		mdp_mixer_write(mixer,
+				MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH0,
+				((img->bg_color & 0xff00) << 8) |
+				(img->bg_color & 0xff));
+		mdp_mixer_write(mixer,
+				MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH1,
+				((img->bg_color & 0xff0000) >> 16));
+	}
+}
+
+static void mdss_mdp_hw_cursor_blend_config(struct mdss_mdp_mixer *mixer,
+		struct fb_cursor *cursor)
+{
+	u32 blendcfg;
+
+	if (!mixer) {
+		pr_err("mixer not availbale\n");
+		return;
+	}
+
+	blendcfg = mdp_mixer_read(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG);
+	if (!cursor->enable != !(blendcfg & 0x1)) {
+		if (cursor->enable) {
+			pr_debug("enable hw cursor on mixer=%d\n", mixer->num);
+			blendcfg |= 0x1;
+		} else {
+			pr_debug("disable hw cursor on mixer=%d\n", mixer->num);
+			blendcfg &= ~0x1;
+		}
+
+		mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG,
+				   blendcfg);
+		mixer->cursor_enabled = cursor->enable;
+		mixer->params_changed++;
+	}
+
+}
+
+static void mdss_mdp_set_rect(struct mdp_rect *rect, u16 x, u16 y, u16 w,
+		u16 h)
+{
+	rect->x = x;
+	rect->y = y;
+	rect->w = w;
+	rect->h = h;
+}
+
+static void mdss_mdp_curor_pipe_cleanup(struct msm_fb_data_type *mfd,
+		int cursor_pipe)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (mdp5_data->cursor_ndx[cursor_pipe] != MSMFB_NEW_REQUEST) {
+		mdss_mdp_overlay_release(mfd,
+				mdp5_data->cursor_ndx[cursor_pipe]);
+		mdp5_data->cursor_ndx[cursor_pipe] = MSMFB_NEW_REQUEST;
+	}
+}
+
+int mdss_mdp_cursor_flush(struct msm_fb_data_type *mfd,
+		struct mdss_mdp_pipe *pipe, int cursor_pipe)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+	struct mdss_mdp_ctl *sctl = NULL;
+	u32 flush_bits = BIT(22 + pipe->num - MDSS_MDP_SSPP_CURSOR0);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
+	MDSS_XLOG(ctl->intf_num, flush_bits);
+	if ((!ctl->split_flush_en) && pipe->mixer_right) {
+		sctl = mdss_mdp_get_split_ctl(ctl);
+		if (!sctl) {
+			pr_err("not able to get the other ctl\n");
+			return -ENODEV;
+		}
+		mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
+		MDSS_XLOG(sctl->intf_num, flush_bits);
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+	return 0;
+}
+
+static int mdss_mdp_cursor_pipe_setup(struct msm_fb_data_type *mfd,
+		struct mdp_overlay *req, int cursor_pipe) {
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int ret = 0;
+	u32 cursor_addr;
+	struct mdss_mdp_data *buf = NULL;
+
+	req->id = mdp5_data->cursor_ndx[cursor_pipe];
+	ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe, NULL, false);
+	if (ret) {
+		pr_err("cursor pipe setup failed, cursor_pipe:%d, ret:%d\n",
+				cursor_pipe, ret);
+		mdp5_data->cursor_ndx[cursor_pipe] = MSMFB_NEW_REQUEST;
+		return ret;
+	}
+
+	pr_debug("req id:%d cursor_pipe:%d pnum:%d\n",
+		req->id, cursor_pipe, pipe->ndx);
+
+	if (mdata->mdss_util->iommu_attached()) {
+		cursor_addr = mfd->cursor_buf_iova;
+	} else {
+		if (MDSS_LPAE_CHECK(mfd->cursor_buf_phys)) {
+			pr_err("can't access phy mem >4GB w/o iommu\n");
+			ret = -ERANGE;
+			goto done;
+		}
+		cursor_addr = mfd->cursor_buf_phys;
+	}
+
+	buf = __mdp_overlay_buf_alloc(mfd, pipe);
+	if (!buf) {
+		pr_err("unable to allocate memory for cursor buffer\n");
+		ret = -ENOMEM;
+		goto done;
+	}
+	mdp5_data->cursor_ndx[cursor_pipe] = pipe->ndx;
+	buf->p[0].addr = cursor_addr;
+	buf->p[0].len = mdss_mdp_get_cursor_frame_size(mdata);
+	buf->num_planes = 1;
+
+	buf->state = MDP_BUF_STATE_ACTIVE;
+	if (!(req->flags & MDP_SOLID_FILL))
+		ret = mdss_mdp_pipe_queue_data(pipe, buf);
+	else
+		ret = mdss_mdp_pipe_queue_data(pipe, NULL);
+
+	if (ret) {
+		pr_err("cursor pipe queue data failed in async mode\n");
+		return ret;
+	}
+
+	ret = mdss_mdp_cursor_flush(mfd, pipe, cursor_pipe);
+done:
+	if (ret && mdp5_data->cursor_ndx[cursor_pipe] == MSMFB_NEW_REQUEST)
+		mdss_mdp_overlay_release(mfd, pipe->ndx);
+
+	return ret;
+}
+
+static int mdss_mdp_hw_cursor_pipe_update(struct msm_fb_data_type *mfd,
+				     struct fb_cursor *cursor)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_mixer *mixer;
+	struct fb_image *img = &cursor->image;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdp_overlay *req = NULL;
+	struct mdss_rect roi;
+	int ret = 0;
+	struct fb_var_screeninfo *var = &mfd->fbi->var;
+	u32 xres = var->xres;
+	u32 yres = var->yres;
+	u32 start_x = img->dx;
+	u32 start_y = img->dy;
+	u32 left_lm_w = left_lm_w_from_mfd(mfd);
+	struct platform_device *pdev = mfd->pdev;
+	u32 cursor_frame_size = mdss_mdp_get_cursor_frame_size(mdata);
+
+	ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+	if (ret)
+		return ret;
+
+	if (mdss_fb_is_power_off(mfd)) {
+		ret = -EPERM;
+		goto done;
+	}
+
+	if (!cursor->enable) {
+		mdss_mdp_curor_pipe_cleanup(mfd, CURSOR_PIPE_LEFT);
+		mdss_mdp_curor_pipe_cleanup(mfd, CURSOR_PIPE_RIGHT);
+		goto done;
+	}
+
+	mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
+	if (!mixer) {
+		ret = -ENODEV;
+		goto done;
+	}
+
+	if (!mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
+		ret = mdss_smmu_dma_alloc_coherent(&pdev->dev,
+			cursor_frame_size, (dma_addr_t *) &mfd->cursor_buf_phys,
+			&mfd->cursor_buf_iova, &mfd->cursor_buf,
+			GFP_KERNEL, MDSS_IOMMU_DOMAIN_UNSECURE);
+		if (ret) {
+			pr_err("can't allocate cursor buffer rc:%d\n", ret);
+			goto done;
+		}
+
+		mixer->cursor_hotx = 0;
+		mixer->cursor_hoty = 0;
+	}
+
+	pr_debug("mixer=%d enable=%x set=%x\n", mixer->num, cursor->enable,
+			cursor->set);
+
+	if (cursor->set & FB_CUR_SETHOT) {
+		if ((cursor->hot.x < img->width) &&
+			(cursor->hot.y < img->height)) {
+			mixer->cursor_hotx = cursor->hot.x;
+			mixer->cursor_hoty = cursor->hot.y;
+			 /* Update cursor position */
+			cursor->set |= FB_CUR_SETPOS;
+		} else {
+			pr_err("Invalid cursor hotspot coordinates\n");
+			ret = -EINVAL;
+			goto done;
+		}
+	}
+
+	memset(&roi, 0, sizeof(struct mdss_rect));
+	if (start_x > mixer->cursor_hotx) {
+		start_x -= mixer->cursor_hotx;
+	} else {
+		roi.x = mixer->cursor_hotx - start_x;
+		start_x = 0;
+	}
+	if (start_y > mixer->cursor_hoty) {
+		start_y -= mixer->cursor_hoty;
+	} else {
+		roi.y = mixer->cursor_hoty - start_y;
+		start_y = 0;
+	}
+
+	if ((img->width > mdata->max_cursor_size) ||
+		(img->height > mdata->max_cursor_size) ||
+		(img->depth != 32) || (start_x >= xres) ||
+		(start_y >= yres)) {
+		pr_err("Invalid cursor image coordinates\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	roi.w = min(xres - start_x, img->width - roi.x);
+	roi.h = min(yres - start_y, img->height - roi.y);
+
+	if ((roi.w > mdata->max_cursor_size) ||
+		(roi.h > mdata->max_cursor_size)) {
+		pr_err("Invalid cursor ROI size\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	req = kcalloc(1, sizeof(struct mdp_overlay), GFP_KERNEL);
+	if (!req) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	req->pipe_type = PIPE_TYPE_CURSOR;
+	req->z_order = HW_CURSOR_STAGE(mdata);
+
+	req->src.width = img->width;
+	req->src.height = img->height;
+	req->src.format = mfd->fb_imgType;
+
+	mdss_mdp_set_rect(&req->src_rect, roi.x, roi.y, roi.w, roi.h);
+	mdss_mdp_set_rect(&req->dst_rect, start_x, start_y, roi.w, roi.h);
+
+	req->bg_color = img->bg_color;
+	req->alpha = (img->fg_color >> ((32 - var->transp.offset) - 8)) & 0xff;
+	if (req->alpha)
+		req->blend_op = BLEND_OP_PREMULTIPLIED;
+	else
+		req->blend_op = BLEND_OP_COVERAGE;
+	req->transp_mask = img->bg_color & ~(0xff << var->transp.offset);
+
+	if (mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
+		ret = copy_from_user(mfd->cursor_buf, img->data,
+				     img->width * img->height * 4);
+		if (ret) {
+			pr_err("copy_from_user error. rc=%d\n", ret);
+			goto done;
+		}
+
+		mixer->cursor_hotx = 0;
+		mixer->cursor_hoty = 0;
+	}
+
+	/*
+	 * When source split is enabled, only CURSOR_PIPE_LEFT is used,
+	 * with both mixers of the pipe staged all the time.
+	 * When source split is disabled, 2 pipes are staged, with one
+	 * pipe containing the actual data and another one a transparent
+	 * solid fill when the data falls only in left or right dsi.
+	 * Both are done to support async cursor functionality.
+	 */
+	if (mdata->has_src_split || (!is_split_lm(mfd))
+			|| (mdata->ncursor_pipes == 1)) {
+		ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_LEFT);
+	} else if ((start_x + roi.w) <= left_lm_w) {
+		ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_LEFT);
+		if (ret)
+			goto done;
+		req->bg_color = 0;
+		req->flags |= MDP_SOLID_FILL;
+		req->dst_rect.x = left_lm_w;
+		ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_RIGHT);
+	} else if (start_x >= left_lm_w) {
+		ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_RIGHT);
+		if (ret)
+			goto done;
+		req->bg_color = 0;
+		req->flags |= MDP_SOLID_FILL;
+		req->dst_rect.x = 0;
+		ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_LEFT);
+	} else if ((start_x <= left_lm_w) && ((start_x + roi.w) >= left_lm_w)) {
+		mdss_mdp_set_rect(&req->dst_rect, start_x, start_y,
+				(left_lm_w - start_x), roi.h);
+		mdss_mdp_set_rect(&req->src_rect, 0, 0, (left_lm_w -
+				start_x), roi.h);
+		ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_LEFT);
+		if (ret)
+			goto done;
+
+		mdss_mdp_set_rect(&req->dst_rect, left_lm_w, start_y, ((start_x
+				+ roi.w) - left_lm_w), roi.h);
+		mdss_mdp_set_rect(&req->src_rect, (left_lm_w - start_x), 0,
+				(roi.w - (left_lm_w - start_x)), roi.h);
+		ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_RIGHT);
+	} else {
+		pr_err("Invalid case for cursor pipe setup\n");
+		ret = -EINVAL;
+	}
+
+done:
+	if (ret) {
+		mdss_mdp_curor_pipe_cleanup(mfd, CURSOR_PIPE_LEFT);
+		mdss_mdp_curor_pipe_cleanup(mfd, CURSOR_PIPE_RIGHT);
+	}
+
+	kfree(req);
+	mutex_unlock(&mdp5_data->ov_lock);
+	return ret;
+}
+
+static int mdss_mdp_hw_cursor_update(struct msm_fb_data_type *mfd,
+				     struct fb_cursor *cursor)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_mixer *mixer_left = NULL;
+	struct mdss_mdp_mixer *mixer_right = NULL;
+	struct fb_image *img = &cursor->image;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct fbcurpos cursor_hot;
+	struct mdss_rect roi;
+	int ret = 0;
+	u32 xres = mfd->fbi->var.xres;
+	u32 yres = mfd->fbi->var.yres;
+	u32 start_x = img->dx;
+	u32 start_y = img->dy;
+	u32 left_lm_w = left_lm_w_from_mfd(mfd);
+	struct platform_device *pdev = mfd->pdev;
+	u32 cursor_frame_size = mdss_mdp_get_cursor_frame_size(mdata);
+
+	mixer_left = mdss_mdp_mixer_get(mdp5_data->ctl,
+			MDSS_MDP_MIXER_MUX_DEFAULT);
+	if (!mixer_left)
+		return -ENODEV;
+	if (is_split_lm(mfd)) {
+		mixer_right = mdss_mdp_mixer_get(mdp5_data->ctl,
+				MDSS_MDP_MIXER_MUX_RIGHT);
+		if (!mixer_right)
+			return -ENODEV;
+	}
+
+	if (!mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
+		ret = mdss_smmu_dma_alloc_coherent(&pdev->dev,
+			cursor_frame_size, (dma_addr_t *) &mfd->cursor_buf_phys,
+			&mfd->cursor_buf_iova, &mfd->cursor_buf,
+			GFP_KERNEL, MDSS_IOMMU_DOMAIN_UNSECURE);
+		if (ret) {
+			pr_err("can't allocate cursor buffer rc:%d\n", ret);
+			return ret;
+		}
+	}
+
+	if ((img->width > mdata->max_cursor_size) ||
+		(img->height > mdata->max_cursor_size) ||
+		(img->depth != 32) || (start_x >= xres) || (start_y >= yres))
+		return -EINVAL;
+
+	pr_debug("enable=%x set=%x\n", cursor->enable, cursor->set);
+
+	memset(&cursor_hot, 0, sizeof(struct fbcurpos));
+	memset(&roi, 0, sizeof(struct mdss_rect));
+	if (cursor->set & FB_CUR_SETHOT) {
+		if ((cursor->hot.x < img->width) &&
+			(cursor->hot.y < img->height)) {
+			cursor_hot.x = cursor->hot.x;
+			cursor_hot.y = cursor->hot.y;
+			 /* Update cursor position */
+			cursor->set |= FB_CUR_SETPOS;
+		} else {
+			pr_err("Invalid cursor hotspot coordinates\n");
+			return -EINVAL;
+		}
+	}
+
+	if (start_x > cursor_hot.x) {
+		start_x -= cursor_hot.x;
+	} else {
+		roi.x = cursor_hot.x - start_x;
+		start_x = 0;
+	}
+	if (start_y > cursor_hot.y) {
+		start_y -= cursor_hot.y;
+	} else {
+		roi.y = cursor_hot.y - start_y;
+		start_y = 0;
+	}
+
+	roi.w = min(xres - start_x, img->width - roi.x);
+	roi.h = min(yres - start_y, img->height - roi.y);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	if (mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
+		u32 cursor_addr;
+
+		ret = copy_from_user(mfd->cursor_buf, img->data,
+				     img->width * img->height * 4);
+		if (ret) {
+			pr_err("copy_from_user error. rc=%d\n", ret);
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+			return ret;
+		}
+
+		if (mdata->mdss_util->iommu_attached()) {
+			cursor_addr = mfd->cursor_buf_iova;
+		} else {
+			if (MDSS_LPAE_CHECK(mfd->cursor_buf_phys)) {
+				pr_err("can't access phy mem >4GB w/o iommu\n");
+				mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+				return -ERANGE;
+			}
+			cursor_addr = mfd->cursor_buf_phys;
+		}
+		mdss_mdp_hw_cursor_setimage(mixer_left, cursor, cursor_addr,
+				&roi);
+		if (is_split_lm(mfd))
+			mdss_mdp_hw_cursor_setimage(mixer_right, cursor,
+					cursor_addr, &roi);
+	}
+
+	if ((start_x + roi.w) <= left_lm_w) {
+		if (cursor->set & FB_CUR_SETPOS)
+			mdss_mdp_hw_cursor_setpos(mixer_left, &roi, start_x,
+					start_y);
+		mdss_mdp_hw_cursor_blend_config(mixer_left, cursor);
+		cursor->enable = false;
+		mdss_mdp_hw_cursor_blend_config(mixer_right, cursor);
+	} else if (start_x >= left_lm_w) {
+		start_x -= left_lm_w;
+		if (cursor->set & FB_CUR_SETPOS)
+			mdss_mdp_hw_cursor_setpos(mixer_right, &roi, start_x,
+					start_y);
+		mdss_mdp_hw_cursor_blend_config(mixer_right, cursor);
+		cursor->enable = false;
+		mdss_mdp_hw_cursor_blend_config(mixer_left, cursor);
+	} else {
+		struct mdss_rect roi_right = roi;
+
+		roi.w = left_lm_w - start_x;
+		if (cursor->set & FB_CUR_SETPOS)
+			mdss_mdp_hw_cursor_setpos(mixer_left, &roi, start_x,
+					start_y);
+		mdss_mdp_hw_cursor_blend_config(mixer_left, cursor);
+
+		roi_right.x = 0;
+		roi_right.w = (start_x + roi_right.w) - left_lm_w;
+		start_x = 0;
+		if (cursor->set & FB_CUR_SETPOS)
+			mdss_mdp_hw_cursor_setpos(mixer_right, &roi_right,
+					start_x, start_y);
+		mdss_mdp_hw_cursor_blend_config(mixer_right, cursor);
+	}
+
+	mixer_left->ctl->flush_bits |= BIT(6) << mixer_left->num;
+	if (is_split_lm(mfd))
+		mixer_right->ctl->flush_bits |= BIT(6) << mixer_right->num;
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	return 0;
+}
+
+static int mdss_bl_scale_config(struct msm_fb_data_type *mfd,
+						struct mdp_bl_scale_data *data)
+{
+	int ret = 0;
+	int curr_bl;
+
+	mutex_lock(&mfd->bl_lock);
+	curr_bl = mfd->bl_level;
+	mfd->bl_scale = data->scale;
+	mfd->bl_min_lvl = data->min_lvl;
+	pr_debug("update scale = %d, min_lvl = %d\n", mfd->bl_scale,
+							mfd->bl_min_lvl);
+
+	/* Update current backlight to use new scaling, if it is not zero */
+	if (curr_bl)
+		mdss_fb_set_backlight(mfd, curr_bl);
+
+	mutex_unlock(&mfd->bl_lock);
+	return ret;
+}
+
+static int mdss_mdp_pp_ioctl(struct msm_fb_data_type *mfd,
+				void __user *argp)
+{
+	int ret;
+	struct msmfb_mdp_pp mdp_pp;
+	u32 copyback = 0;
+	u32 copy_from_kernel = 0;
+
+	ret = copy_from_user(&mdp_pp, argp, sizeof(mdp_pp));
+	if (ret)
+		return ret;
+
+	/* Supprt only MDP register read/write and
+	 * exit_dcm in DCM state
+	 */
+	if (mfd->dcm_state == DCM_ENTER &&
+			(mdp_pp.op != mdp_op_calib_buffer &&
+			mdp_pp.op != mdp_op_calib_dcm_state))
+		return -EPERM;
+
+	switch (mdp_pp.op) {
+	case mdp_op_pa_cfg:
+		ret = mdss_mdp_pa_config(mfd, &mdp_pp.data.pa_cfg_data,
+					&copyback);
+		break;
+
+	case mdp_op_pa_v2_cfg:
+		ret = mdss_mdp_pa_v2_config(mfd, &mdp_pp.data.pa_v2_cfg_data,
+					&copyback);
+		break;
+
+	case mdp_op_pcc_cfg:
+		ret = mdss_mdp_pcc_config(mfd, &mdp_pp.data.pcc_cfg_data,
+					&copyback);
+		break;
+
+	case mdp_op_lut_cfg:
+		switch (mdp_pp.data.lut_cfg_data.lut_type) {
+		case mdp_lut_igc:
+			ret = mdss_mdp_igc_lut_config(mfd,
+					(struct mdp_igc_lut_data *)
+					&mdp_pp.data.lut_cfg_data.data,
+					&copyback, copy_from_kernel);
+			break;
+
+		case mdp_lut_pgc:
+			ret = mdss_mdp_argc_config(mfd,
+				&mdp_pp.data.lut_cfg_data.data.pgc_lut_data,
+				&copyback);
+			break;
+
+		case mdp_lut_hist:
+			ret = mdss_mdp_hist_lut_config(mfd,
+				(struct mdp_hist_lut_data *)
+				&mdp_pp.data.lut_cfg_data.data, &copyback);
+			break;
+
+		default:
+			ret = -ENOTSUPP;
+			break;
+		}
+		break;
+	case mdp_op_dither_cfg:
+		ret = mdss_mdp_dither_config(mfd,
+				&mdp_pp.data.dither_cfg_data,
+				&copyback,
+				false);
+		break;
+	case mdp_op_gamut_cfg:
+		ret = mdss_mdp_gamut_config(mfd,
+				&mdp_pp.data.gamut_cfg_data,
+				&copyback);
+		break;
+	case mdp_bl_scale_cfg:
+		ret = mdss_bl_scale_config(mfd, (struct mdp_bl_scale_data *)
+						&mdp_pp.data.bl_scale_data);
+		break;
+	case mdp_op_ad_cfg:
+		ret = mdss_mdp_ad_config(mfd, &mdp_pp.data.ad_init_cfg);
+		break;
+	case mdp_op_ad_input:
+		ret = mdss_mdp_ad_input(mfd, &mdp_pp.data.ad_input, 1);
+		if (ret > 0) {
+			ret = 0;
+			copyback = 1;
+		}
+		break;
+	case mdp_op_calib_cfg:
+		ret = mdss_mdp_calib_config((struct mdp_calib_config_data *)
+					 &mdp_pp.data.calib_cfg, &copyback);
+		break;
+	case mdp_op_calib_mode:
+		ret = mdss_mdp_calib_mode(mfd, &mdp_pp.data.mdss_calib_cfg);
+		break;
+	case mdp_op_calib_buffer:
+		ret = mdss_mdp_calib_config_buffer(
+				(struct mdp_calib_config_buffer *)
+				 &mdp_pp.data.calib_buffer, &copyback);
+		break;
+	case mdp_op_calib_dcm_state:
+		ret = mdss_fb_dcm(mfd, mdp_pp.data.calib_dcm.dcm_state);
+		break;
+	default:
+		pr_err("Unsupported request to MDP_PP IOCTL. %d = op\n",
+								mdp_pp.op);
+		ret = -EINVAL;
+		break;
+	}
+	if ((ret == 0) && copyback)
+		ret = copy_to_user(argp, &mdp_pp, sizeof(struct msmfb_mdp_pp));
+	return ret;
+}
+
+static int mdss_mdp_histo_ioctl(struct msm_fb_data_type *mfd, u32 cmd,
+				void __user *argp)
+{
+	int ret = -ENOTSUP;
+	struct mdp_histogram_data hist;
+	struct mdp_histogram_start_req hist_req;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 block;
+
+	if (!mdata)
+		return -EPERM;
+
+	switch (cmd) {
+	case MSMFB_HISTOGRAM_START:
+		if (mdss_fb_is_power_off(mfd))
+			return -EPERM;
+
+		ret = copy_from_user(&hist_req, argp, sizeof(hist_req));
+		if (ret)
+			return ret;
+
+		ret = mdss_mdp_hist_start(&hist_req);
+		break;
+
+	case MSMFB_HISTOGRAM_STOP:
+		ret = copy_from_user(&block, argp, sizeof(int));
+		if (ret)
+			return ret;
+
+		ret = mdss_mdp_hist_stop(block);
+		if (ret)
+			return ret;
+		break;
+
+	case MSMFB_HISTOGRAM:
+		if (mdss_fb_is_power_off(mfd)) {
+			pr_err("mfd is turned off MSMFB_HISTOGRAM failed\n");
+			return -EPERM;
+		}
+
+		ret = copy_from_user(&hist, argp, sizeof(hist));
+		if (ret)
+			return ret;
+
+		ret = mdss_mdp_hist_collect(&hist);
+		if (!ret)
+			ret = copy_to_user(argp, &hist, sizeof(hist));
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+static int mdss_fb_set_metadata(struct msm_fb_data_type *mfd,
+				struct msmfb_metadata *metadata)
+{
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	int ret = 0;
+
+	if (!ctl)
+		return  -EPERM;
+	switch (metadata->op) {
+	case metadata_op_vic:
+		if (mfd->panel_info)
+			mfd->panel_info->vic =
+				metadata->data.video_info_code;
+		else
+			ret = -EINVAL;
+		break;
+	case metadata_op_crc:
+		if (mdss_fb_is_power_off(mfd))
+			return -EPERM;
+		ret = mdss_misr_set(mdata, &metadata->data.misr_request, ctl);
+		break;
+	default:
+		pr_warn("unsupported request to MDP META IOCTL\n");
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static int mdss_fb_get_hw_caps(struct msm_fb_data_type *mfd,
+		struct mdss_hw_caps *caps)
+{
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+
+	caps->mdp_rev = mdata->mdp_rev;
+	caps->vig_pipes = mdata->nvig_pipes;
+	caps->rgb_pipes = mdata->nrgb_pipes;
+	caps->dma_pipes = mdata->ndma_pipes;
+	if (mdata->has_bwc)
+		caps->features |= MDP_BWC_EN;
+	if (mdata->has_decimation)
+		caps->features |= MDP_DECIMATION_EN;
+
+	if (mdata->smp_mb_cnt) {
+		caps->max_smp_cnt = mdata->smp_mb_cnt;
+		caps->smp_per_pipe = mdata->smp_mb_per_pipe;
+	}
+
+	return 0;
+}
+
+static int mdss_fb_get_metadata(struct msm_fb_data_type *mfd,
+				struct msmfb_metadata *metadata)
+{
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+	struct mdss_mdp_ctl *ctl = NULL;
+	int ret = 0;
+
+	switch (metadata->op) {
+	case metadata_op_frame_rate:
+		metadata->data.panel_frame_rate =
+			mdss_panel_get_framerate(mfd->panel_info,
+				FPS_RESOLUTION_DEFAULT);
+		pr_debug("current fps:%d\n", metadata->data.panel_frame_rate);
+		break;
+	case metadata_op_get_caps:
+		ret = mdss_fb_get_hw_caps(mfd, &metadata->data.caps);
+		break;
+	case metadata_op_get_ion_fd:
+		if (mfd->fb_ion_handle && mfd->fb_ion_client) {
+			get_dma_buf(mfd->fbmem_buf);
+			metadata->data.fbmem_ionfd =
+				ion_share_dma_buf_fd(mfd->fb_ion_client,
+					mfd->fb_ion_handle);
+			if (metadata->data.fbmem_ionfd < 0) {
+				dma_buf_put(mfd->fbmem_buf);
+				pr_err("fd allocation failed. fd = %d\n",
+						metadata->data.fbmem_ionfd);
+			}
+		}
+		break;
+	case metadata_op_crc:
+		ctl = mfd_to_ctl(mfd);
+		if (!ctl || mdss_fb_is_power_off(mfd))
+			return -EPERM;
+		ret = mdss_misr_get(mdata, &metadata->data.misr_request, ctl,
+			ctl->is_video_mode);
+		break;
+	default:
+		pr_warn("Unsupported request to MDP META IOCTL.\n");
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static int __mdss_mdp_clean_dirty_pipes(struct msm_fb_data_type *mfd)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_pipe *pipe;
+	int unset_ndx = 0;
+
+	mutex_lock(&mdp5_data->list_lock);
+	list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
+		if (pipe->dirty)
+			unset_ndx |= pipe->ndx;
+	}
+	mutex_unlock(&mdp5_data->list_lock);
+	if (unset_ndx)
+		mdss_mdp_overlay_release(mfd, unset_ndx);
+
+	return unset_ndx;
+}
+
+static int mdss_mdp_overlay_precommit(struct msm_fb_data_type *mfd)
+{
+	struct mdss_overlay_private *mdp5_data;
+	int ret;
+
+	if (!mfd)
+		return -ENODEV;
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+	if (!mdp5_data)
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+	if (ret)
+		return ret;
+
+	/*
+	 * we can assume that any pipes that are still dirty at this point are
+	 * not properly tracked by user land. This could be for any reason,
+	 * mark them for cleanup at this point.
+	 */
+	ret = __mdss_mdp_clean_dirty_pipes(mfd);
+	if (ret) {
+		pr_warn("fb%d: dirty pipes remaining %x\n",
+				mfd->index, ret);
+		ret = -EPIPE;
+	}
+
+	/*
+	 * If we are in process of mode switch we may have an invalid state.
+	 * We can allow commit to happen if there are no pipes attached as only
+	 * border color will be seen regardless of resolution or mode.
+	 */
+	if ((mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED) &&
+			(mfd->switch_state != MDSS_MDP_WAIT_FOR_COMMIT)) {
+		if (list_empty(&mdp5_data->pipes_used)) {
+			mfd->switch_state = MDSS_MDP_WAIT_FOR_COMMIT;
+		} else {
+			pr_warn("Invalid commit on fb%d with state=%d\n",
+					mfd->index, mfd->switch_state);
+			ret = -EINVAL;
+		}
+	}
+	mutex_unlock(&mdp5_data->ov_lock);
+
+	return ret;
+}
+
+/*
+ * This routine serves two purposes.
+ * 1. Propagate overlay_id returned from sorted list to original list
+ *    to user-space.
+ * 2. In case of error processing sorted list, map the error overlay's
+ *    index to original list because user-space is not aware of the sorted list.
+ */
+static int __mdss_overlay_map(struct mdp_overlay *ovs,
+	struct mdp_overlay *op_ovs, int num_ovs, int num_ovs_processed)
+{
+	int mapped = num_ovs_processed;
+	int j, k;
+
+	for (j = 0; j < num_ovs; j++) {
+		for (k = 0; k < num_ovs; k++) {
+			if ((ovs[j].dst_rect.x == op_ovs[k].dst_rect.x) &&
+			    (ovs[j].z_order == op_ovs[k].z_order)) {
+				op_ovs[k].id = ovs[j].id;
+				op_ovs[k].priority = ovs[j].priority;
+				break;
+			}
+		}
+
+		if ((mapped != num_ovs) && (mapped == j)) {
+			pr_debug("mapped %d->%d\n", mapped, k);
+			mapped = k;
+		}
+	}
+
+	return mapped;
+}
+
+static inline void __overlay_swap_func(void *a, void *b, int size)
+{
+	swap(*(struct mdp_overlay *)a, *(struct mdp_overlay *)b);
+}
+
+static inline int __zorder_dstx_cmp_func(const void *a, const void *b)
+{
+	int rc = 0;
+	const struct mdp_overlay *ov1 = a;
+	const struct mdp_overlay *ov2 = b;
+
+	if (ov1->z_order < ov2->z_order)
+		rc = -1;
+	else if ((ov1->z_order == ov2->z_order) &&
+		 (ov1->dst_rect.x < ov2->dst_rect.x))
+		rc = -1;
+
+	return rc;
+}
+
+/*
+ * first sort list of overlays based on z_order and then within
+ * same z_order sort them on dst_x.
+ */
+static int __mdss_overlay_src_split_sort(struct msm_fb_data_type *mfd,
+	struct mdp_overlay *ovs, int num_ovs)
+{
+	int i;
+	int left_lm_zo_cnt[MDSS_MDP_MAX_STAGE] = {0};
+	int right_lm_zo_cnt[MDSS_MDP_MAX_STAGE] = {0};
+	u32 left_lm_w = left_lm_w_from_mfd(mfd);
+
+	sort(ovs, num_ovs, sizeof(struct mdp_overlay), __zorder_dstx_cmp_func,
+		__overlay_swap_func);
+
+	for (i = 0; i < num_ovs; i++) {
+		if (ovs[i].z_order >= MDSS_MDP_MAX_STAGE) {
+			pr_err("invalid stage:%u\n", ovs[i].z_order);
+			return -EINVAL;
+		}
+		if (ovs[i].dst_rect.x < left_lm_w) {
+			if (left_lm_zo_cnt[ovs[i].z_order] == 2) {
+				pr_err("more than 2 ov @ stage%u on left lm\n",
+					ovs[i].z_order);
+				return -EINVAL;
+			}
+			left_lm_zo_cnt[ovs[i].z_order]++;
+		} else {
+			if (right_lm_zo_cnt[ovs[i].z_order] == 2) {
+				pr_err("more than 2 ov @ stage%u on right lm\n",
+					ovs[i].z_order);
+				return -EINVAL;
+			}
+			right_lm_zo_cnt[ovs[i].z_order]++;
+		}
+	}
+
+	return 0;
+}
+
+static int __handle_overlay_prepare(struct msm_fb_data_type *mfd,
+	struct mdp_overlay_list *ovlist, struct mdp_overlay *ip_ovs)
+{
+	int ret, i;
+	int new_reqs = 0, left_cnt = 0, right_cnt = 0;
+	int num_ovs = ovlist->num_overlays;
+	u32 left_lm_w = left_lm_w_from_mfd(mfd);
+	u32 left_lm_ovs = 0, right_lm_ovs = 0;
+	bool is_single_layer = false;
+
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	struct mdp_overlay *sorted_ovs = NULL;
+	struct mdp_overlay *req, *prev_req;
+
+	struct mdss_mdp_pipe *pipe, *left_blend_pipe;
+	struct mdss_mdp_pipe *right_plist[MAX_PIPES_PER_LM] = { 0 };
+	struct mdss_mdp_pipe *left_plist[MAX_PIPES_PER_LM] = { 0 };
+
+	bool sort_needed = mdata->has_src_split && (num_ovs > 1);
+
+	ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+	if (ret)
+		return ret;
+
+	if (mdss_fb_is_power_off(mfd)) {
+		mutex_unlock(&mdp5_data->ov_lock);
+		return -EPERM;
+	}
+
+	if (sort_needed) {
+		sorted_ovs = kcalloc(num_ovs, sizeof(*ip_ovs), GFP_KERNEL);
+		if (!sorted_ovs) {
+			pr_err("error allocating ovlist mem\n");
+			return -ENOMEM;
+		}
+		memcpy(sorted_ovs, ip_ovs, num_ovs * sizeof(*ip_ovs));
+		ret = __mdss_overlay_src_split_sort(mfd, sorted_ovs, num_ovs);
+		if (ret) {
+			pr_err("src_split_sort failed. ret=%d\n", ret);
+			kfree(sorted_ovs);
+			return ret;
+		}
+	}
+
+	pr_debug("prepare fb%d num_ovs=%d\n", mfd->index, num_ovs);
+
+	for (i = 0; i < num_ovs; i++) {
+		if (IS_RIGHT_MIXER_OV(ip_ovs[i].flags, ip_ovs[i].dst_rect.x,
+			left_lm_w))
+			right_lm_ovs++;
+		else
+			left_lm_ovs++;
+
+		if ((left_lm_ovs > 1) && (right_lm_ovs > 1))
+			break;
+	}
+
+	for (i = 0; i < num_ovs; i++) {
+		left_blend_pipe = NULL;
+
+		if (sort_needed) {
+			req = &sorted_ovs[i];
+			prev_req = (i > 0) ? &sorted_ovs[i - 1] : NULL;
+
+			/*
+			 * check if current overlay is at same z_order as
+			 * previous one and qualifies as a right blend. If yes,
+			 * pass a pointer to the pipe representing previous
+			 * overlay or in other terms left blend overlay.
+			 */
+			if (prev_req && (prev_req->z_order == req->z_order) &&
+			    is_ov_right_blend(&prev_req->dst_rect,
+				    &req->dst_rect, left_lm_w)) {
+				left_blend_pipe = pipe;
+			}
+		} else {
+			req = &ip_ovs[i];
+		}
+
+		if (IS_RIGHT_MIXER_OV(ip_ovs[i].flags, ip_ovs[i].dst_rect.x,
+			left_lm_w))
+			is_single_layer = (right_lm_ovs == 1);
+		else
+			is_single_layer = (left_lm_ovs == 1);
+
+		req->z_order += MDSS_MDP_STAGE_0;
+		ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe,
+			left_blend_pipe, is_single_layer);
+		req->z_order -= MDSS_MDP_STAGE_0;
+
+		if (IS_ERR_VALUE(ret))
+			goto validate_exit;
+
+		pr_debug("pnum:%d id:0x%x flags:0x%x dst_x:%d l_blend_pnum%d\n",
+			pipe->num, req->id, req->flags, req->dst_rect.x,
+			left_blend_pipe ? left_blend_pipe->num : -1);
+
+		/* keep track of the new overlays to unset in case of errors */
+		if (pipe->play_cnt == 0)
+			new_reqs |= pipe->ndx;
+
+		if (IS_RIGHT_MIXER_OV(pipe->flags, pipe->dst.x, left_lm_w)) {
+			if (right_cnt >= MAX_PIPES_PER_LM) {
+				pr_err("too many pipes on right mixer\n");
+				ret = -EINVAL;
+				goto validate_exit;
+			}
+			right_plist[right_cnt] = pipe;
+			right_cnt++;
+		} else {
+			if (left_cnt >= MAX_PIPES_PER_LM) {
+				pr_err("too many pipes on left mixer\n");
+				ret = -EINVAL;
+				goto validate_exit;
+			}
+			left_plist[left_cnt] = pipe;
+			left_cnt++;
+		}
+	}
+
+	ret = mdss_mdp_perf_bw_check(mdp5_data->ctl, left_plist, left_cnt,
+			right_plist, right_cnt);
+
+validate_exit:
+	if (sort_needed)
+		ovlist->processed_overlays =
+			__mdss_overlay_map(sorted_ovs, ip_ovs, num_ovs, i);
+	else
+		ovlist->processed_overlays = i;
+
+	if (IS_ERR_VALUE(ret)) {
+		pr_debug("err=%d total_ovs:%d processed:%d left:%d right:%d\n",
+			ret, num_ovs, ovlist->processed_overlays, left_lm_ovs,
+			right_lm_ovs);
+		mdss_mdp_overlay_release(mfd, new_reqs);
+	}
+	mutex_unlock(&mdp5_data->ov_lock);
+
+	kfree(sorted_ovs);
+
+	return ret;
+}
+
+static int __handle_ioctl_overlay_prepare(struct msm_fb_data_type *mfd,
+		void __user *argp)
+{
+	struct mdp_overlay_list ovlist;
+	struct mdp_overlay *req_list[OVERLAY_MAX];
+	struct mdp_overlay *overlays;
+	int i, ret;
+
+	if (!mfd_to_ctl(mfd))
+		return -ENODEV;
+
+	if (copy_from_user(&ovlist, argp, sizeof(ovlist)))
+		return -EFAULT;
+
+	if (ovlist.num_overlays > OVERLAY_MAX) {
+		pr_err("Number of overlays exceeds max\n");
+		return -EINVAL;
+	}
+
+	overlays = kmalloc_array(ovlist.num_overlays, sizeof(*overlays),
+				 GFP_KERNEL);
+	if (!overlays)
+		return -ENOMEM;
+
+	if (copy_from_user(req_list, ovlist.overlay_list,
+				sizeof(struct mdp_overlay *) *
+				ovlist.num_overlays)) {
+		ret = -EFAULT;
+		goto validate_exit;
+	}
+
+	for (i = 0; i < ovlist.num_overlays; i++) {
+		if (copy_from_user(overlays + i, req_list[i],
+				sizeof(struct mdp_overlay))) {
+			ret = -EFAULT;
+			goto validate_exit;
+		}
+	}
+
+	ret = __handle_overlay_prepare(mfd, &ovlist, overlays);
+	if (!IS_ERR_VALUE(ret)) {
+		for (i = 0; i < ovlist.num_overlays; i++) {
+			if (copy_to_user(req_list[i], overlays + i,
+					sizeof(struct mdp_overlay))) {
+				ret = -EFAULT;
+				goto validate_exit;
+			}
+		}
+	}
+
+	if (copy_to_user(argp, &ovlist, sizeof(ovlist)))
+		ret = -EFAULT;
+
+validate_exit:
+	kfree(overlays);
+
+	return ret;
+}
+
+static int mdss_mdp_overlay_ioctl_handler(struct msm_fb_data_type *mfd,
+					  u32 cmd, void __user *argp)
+{
+	struct mdp_overlay *req = NULL;
+	int val, ret = -ENOTSUP;
+	struct msmfb_metadata metadata;
+	struct mdp_pp_feature_version pp_feature_version;
+	struct msmfb_overlay_data data;
+	struct mdp_set_cfg cfg;
+
+	switch (cmd) {
+	case MSMFB_MDP_PP:
+		ret = mdss_mdp_pp_ioctl(mfd, argp);
+		break;
+	case MSMFB_MDP_PP_GET_FEATURE_VERSION:
+		ret = copy_from_user(&pp_feature_version, argp,
+				     sizeof(pp_feature_version));
+		if (ret) {
+			pr_err("copy_from_user failed for pp_feature_version\n");
+			ret = -EFAULT;
+		} else {
+			ret = mdss_mdp_pp_get_version(&pp_feature_version);
+			if (!ret) {
+				ret = copy_to_user(argp, &pp_feature_version,
+						sizeof(pp_feature_version));
+				if (ret) {
+					pr_err("copy_to_user failed for pp_feature_version\n");
+					ret = -EFAULT;
+				}
+			} else {
+				pr_err("get pp version failed ret %d\n", ret);
+			}
+		}
+		break;
+	case MSMFB_HISTOGRAM_START:
+	case MSMFB_HISTOGRAM_STOP:
+	case MSMFB_HISTOGRAM:
+		ret = mdss_mdp_histo_ioctl(mfd, cmd, argp);
+		break;
+
+	case MSMFB_OVERLAY_GET:
+		req = kmalloc(sizeof(struct mdp_overlay), GFP_KERNEL);
+		if (!req)
+			return -ENOMEM;
+		ret = copy_from_user(req, argp, sizeof(*req));
+		if (!ret) {
+			ret = mdss_mdp_overlay_get(mfd, req);
+
+			if (!IS_ERR_VALUE(ret))
+				ret = copy_to_user(argp, req, sizeof(*req));
+		}
+
+		if (ret)
+			pr_debug("OVERLAY_GET failed (%d)\n", ret);
+		break;
+
+	case MSMFB_OVERLAY_SET:
+		req = kmalloc(sizeof(struct mdp_overlay), GFP_KERNEL);
+		if (!req)
+			return -ENOMEM;
+		ret = copy_from_user(req, argp, sizeof(*req));
+		if (!ret) {
+			ret = mdss_mdp_overlay_set(mfd, req);
+
+			if (!IS_ERR_VALUE(ret))
+				ret = copy_to_user(argp, req, sizeof(*req));
+		}
+		if (ret)
+			pr_debug("OVERLAY_SET failed (%d)\n", ret);
+		break;
+
+	case MSMFB_OVERLAY_UNSET:
+		if (!IS_ERR_VALUE(copy_from_user(&val, argp, sizeof(val))))
+			ret = mdss_mdp_overlay_unset(mfd, val);
+		break;
+
+	case MSMFB_OVERLAY_PLAY:
+		ret = copy_from_user(&data, argp, sizeof(data));
+		if (!ret)
+			ret = mdss_mdp_overlay_play(mfd, &data);
+
+		if (ret)
+			pr_debug("OVERLAY_PLAY failed (%d)\n", ret);
+		break;
+
+	case MSMFB_OVERLAY_VSYNC_CTRL:
+		if (!copy_from_user(&val, argp, sizeof(val))) {
+			ret = mdss_mdp_overlay_vsync_ctrl(mfd, val);
+		} else {
+			pr_err("MSMFB_OVERLAY_VSYNC_CTRL failed (%d)\n", ret);
+			ret = -EFAULT;
+		}
+		break;
+
+	case MSMFB_METADATA_SET:
+		ret = copy_from_user(&metadata, argp, sizeof(metadata));
+		if (ret)
+			return ret;
+		ret = mdss_fb_set_metadata(mfd, &metadata);
+		break;
+
+	case MSMFB_METADATA_GET:
+		ret = copy_from_user(&metadata, argp, sizeof(metadata));
+		if (ret)
+			return ret;
+		ret = mdss_fb_get_metadata(mfd, &metadata);
+		if (!ret)
+			ret = copy_to_user(argp, &metadata, sizeof(metadata));
+		break;
+
+	case MSMFB_OVERLAY_PREPARE:
+		ret = __handle_ioctl_overlay_prepare(mfd, argp);
+		break;
+	case MSMFB_MDP_SET_CFG:
+		ret = copy_from_user(&cfg, argp, sizeof(cfg));
+		if (ret) {
+			pr_err("copy failed MSMFB_MDP_SET_CFG ret %d\n", ret);
+			ret = -EFAULT;
+			break;
+		}
+		ret = mdss_mdp_set_cfg(mfd, &cfg);
+		break;
+
+	default:
+		break;
+	}
+
+	kfree(req);
+	return ret;
+}
+
+/**
+ * __mdss_mdp_overlay_ctl_init - Helper function to initialize control structure
+ * @mfd: msm frame buffer data structure associated with the fb device.
+ *
+ * Helper function that allocates and initializes the mdp control structure
+ * for a frame buffer device. Whenever applicable, this function will also setup
+ * the control for the split display path as well.
+ *
+ * Return: pointer to the newly allocated control structure.
+ */
+static struct mdss_mdp_ctl *__mdss_mdp_overlay_ctl_init(
+	struct msm_fb_data_type *mfd)
+{
+	int rc = 0;
+	struct mdss_mdp_ctl *ctl;
+	struct mdss_panel_data *pdata;
+	struct mdss_overlay_private *mdp5_data;
+
+	if (!mfd)
+		return ERR_PTR(-EINVAL);
+
+	pdata = dev_get_platdata(&mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("no panel connected for fb%d\n", mfd->index);
+		rc = -ENODEV;
+		goto error;
+	}
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+	if (!mdp5_data) {
+		rc = -EINVAL;
+		goto error;
+	}
+
+	ctl = mdss_mdp_ctl_init(pdata, mfd);
+	if (IS_ERR_OR_NULL(ctl)) {
+		pr_err("Unable to initialize ctl for fb%d\n",
+			mfd->index);
+		rc = PTR_ERR(ctl);
+		goto error;
+	}
+	ctl->is_master = true;
+	ctl->vsync_handler.vsync_handler =
+					mdss_mdp_overlay_handle_vsync;
+	ctl->vsync_handler.cmd_post_flush = false;
+
+	ctl->recover_underrun_handler.vsync_handler =
+			mdss_mdp_recover_underrun_handler;
+	ctl->recover_underrun_handler.cmd_post_flush = false;
+
+	ctl->frc_vsync_handler.vsync_handler =
+			mdss_mdp_overlay_frc_handler;
+	ctl->frc_vsync_handler.cmd_post_flush = false;
+
+	ctl->lineptr_handler.lineptr_handler =
+					mdss_mdp_overlay_handle_lineptr;
+
+	INIT_WORK(&ctl->remove_underrun_handler,
+				remove_underrun_vsync_handler);
+
+	if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+		/* enable split display */
+		rc = mdss_mdp_ctl_split_display_setup(ctl, pdata->next);
+		if (rc) {
+			mdss_mdp_ctl_destroy(ctl);
+			goto error;
+		}
+	}
+
+	mdp5_data->ctl = ctl;
+error:
+	if (rc)
+		return ERR_PTR(rc);
+	else
+		return ctl;
+}
+
+static void mdss_mdp_set_lm_flag(struct msm_fb_data_type *mfd)
+{
+	u32 width;
+	struct mdss_data_type *mdata;
+
+	/* if lm_widths are set, the split_mode would have been set */
+	if (mfd->panel_info->lm_widths[0] && mfd->panel_info->lm_widths[1])
+		return;
+
+	mdata = mdss_mdp_get_mdata();
+	width = mfd->fbi->var.xres;
+
+	/* setting the appropriate split_mode for HDMI usecases */
+	if ((mfd->split_mode == MDP_SPLIT_MODE_NONE ||
+			mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) &&
+			(width > mdata->max_mixer_width)) {
+		width /= 2;
+		mfd->split_mode = MDP_DUAL_LM_SINGLE_DISPLAY;
+		mfd->split_fb_left = width;
+		mfd->split_fb_right = width;
+	} else if (is_dual_lm_single_display(mfd) &&
+		   (width <= mdata->max_mixer_width)) {
+		mfd->split_mode = MDP_SPLIT_MODE_NONE;
+		mfd->split_fb_left = 0;
+		mfd->split_fb_right = 0;
+	}
+}
+
+static void mdss_mdp_handle_invalid_switch_state(struct msm_fb_data_type *mfd)
+{
+	int rc = 0;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+	struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
+	struct mdss_mdp_data *buf, *tmpbuf;
+
+	mfd->switch_state = MDSS_MDP_NO_UPDATE_REQUESTED;
+
+	/*
+	 * Handle only for cmd mode panels as for video mode, buffers
+	 * cannot be freed at this point. Needs revisting to handle the
+	 * use case for video mode panels.
+	 */
+	if (mfd->panel_info->type == MIPI_CMD_PANEL) {
+		if (ctl->ops.wait_pingpong)
+			rc = ctl->ops.wait_pingpong(ctl, NULL);
+		if (!rc && sctl && sctl->ops.wait_pingpong)
+			rc = sctl->ops.wait_pingpong(sctl, NULL);
+		if (rc) {
+			pr_err("wait for pp failed\n");
+			return;
+		}
+
+		mutex_lock(&mdp5_data->list_lock);
+		list_for_each_entry_safe(buf, tmpbuf,
+				&mdp5_data->bufs_used, buf_list)
+			list_move(&buf->buf_list, &mdp5_data->bufs_freelist);
+		mutex_unlock(&mdp5_data->list_lock);
+	}
+}
+
+static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
+{
+	int rc;
+	struct mdss_overlay_private *mdp5_data;
+	struct mdss_mdp_ctl *ctl = NULL;
+	struct mdss_data_type *mdata;
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+	if (!mdp5_data)
+		return -EINVAL;
+
+	mdata = mfd_to_mdata(mfd);
+	if (!mdata)
+		return -EINVAL;
+
+	mdss_mdp_set_lm_flag(mfd);
+
+	if (!mdp5_data->ctl) {
+		ctl = __mdss_mdp_overlay_ctl_init(mfd);
+		if (IS_ERR_OR_NULL(ctl))
+			return PTR_ERR(ctl);
+	} else {
+		ctl = mdp5_data->ctl;
+	}
+
+	if (mfd->panel_info->type == WRITEBACK_PANEL && !mdp5_data->wfd) {
+		mdp5_data->wfd = mdss_mdp_wfd_init(&mfd->pdev->dev, ctl);
+		if (IS_ERR_OR_NULL(mdp5_data->wfd)) {
+			rc = PTR_ERR(mdp5_data->wfd);
+			goto panel_on;
+		}
+	}
+
+	if (mdss_fb_is_power_on(mfd)) {
+		pr_debug("panel was never turned off\n");
+		rc = mdss_mdp_ctl_start(ctl, false);
+		goto panel_on;
+	}
+
+	rc = mdss_mdp_ctl_intf_event(mdp5_data->ctl, MDSS_EVENT_RESET,
+		NULL, false);
+	if (rc)
+		goto panel_on;
+
+	/* Skip the overlay start and kickoff for all displays
+	 * if handoff is pending. Previously we skipped it for DTV
+	 * panel and pluggable panels (bridge chip hdmi case). But
+	 * it does not cover the case where there is a non pluggable
+	 * tertiary display. Using the flag handoff_pending to skip
+	 * overlay start and kickoff should cover all cases
+	 * TODO: In the long run, the overlay start and kickoff
+	 * should not be skipped, instead, the handoff can be done
+	 */
+	if (!mfd->panel_info->cont_splash_enabled &&
+		!mdata->handoff_pending) {
+		rc = mdss_mdp_overlay_start(mfd);
+		if (rc)
+			goto end;
+		if (mfd->panel_info->type != WRITEBACK_PANEL) {
+			atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
+			rc = mdss_mdp_overlay_kickoff(mfd, NULL);
+		}
+	} else {
+		rc = mdss_mdp_ctl_setup(ctl);
+		if (rc)
+			goto end;
+	}
+
+panel_on:
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("Failed to turn on fb%d\n", mfd->index);
+		mdss_mdp_overlay_off(mfd);
+		goto end;
+	}
+
+end:
+	return rc;
+}
+
+static int mdss_mdp_handoff_cleanup_ctl(struct msm_fb_data_type *mfd)
+{
+	int rc;
+	int need_cleanup;
+	struct mdss_overlay_private *mdp5_data;
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+
+	mdss_mdp_overlay_free_fb_pipe(mfd);
+
+	mutex_lock(&mdp5_data->list_lock);
+	need_cleanup = !list_empty(&mdp5_data->pipes_cleanup) ||
+		!list_empty(&mdp5_data->pipes_used);
+	mutex_unlock(&mdp5_data->list_lock);
+
+	if (need_cleanup)
+		mdss_mdp_overlay_kickoff(mfd, NULL);
+
+	rc = mdss_mdp_ctl_stop(mdp5_data->ctl, mfd->panel_power_state);
+	if (!rc) {
+		if (mdss_fb_is_power_off(mfd)) {
+			mutex_lock(&mdp5_data->list_lock);
+			__mdss_mdp_overlay_free_list_purge(mfd);
+			mutex_unlock(&mdp5_data->list_lock);
+		}
+	}
+
+	rc = mdss_mdp_splash_cleanup(mfd, false);
+	if (rc)
+		pr_err("%s: failed splash clean up %d\n", __func__, rc);
+
+	return rc;
+}
+
+static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd)
+{
+	int rc;
+	struct mdss_overlay_private *mdp5_data;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_mixer *mixer;
+	int need_cleanup;
+	int retire_cnt;
+	bool destroy_ctl = false;
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (!mdp5_data || !mdp5_data->ctl) {
+		pr_err("ctl not initialized\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * Keep a reference to the runtime pm until the overlay is turned
+	 * off, and then release this last reference at the end. This will
+	 * help in distinguishing between idle power collapse versus suspend
+	 * power collapse
+	 */
+	pm_runtime_get_sync(&mfd->pdev->dev);
+
+	if (mdss_fb_is_power_on_lp(mfd)) {
+		pr_debug("panel not turned off. keeping overlay on\n");
+		goto ctl_stop;
+	}
+
+	mutex_lock(&mdp5_data->ov_lock);
+
+	mdss_mdp_overlay_free_fb_pipe(mfd);
+
+	mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_LEFT);
+	if (mixer)
+		mixer->cursor_enabled = 0;
+
+	mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_RIGHT);
+	if (mixer)
+		mixer->cursor_enabled = 0;
+
+	mutex_lock(&mdp5_data->list_lock);
+	need_cleanup = !list_empty(&mdp5_data->pipes_cleanup);
+	mutex_unlock(&mdp5_data->list_lock);
+	mutex_unlock(&mdp5_data->ov_lock);
+
+	destroy_ctl = !mfd->ref_cnt || mfd->panel_reconfig;
+
+	mutex_lock(&mfd->switch_lock);
+	if (mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED) {
+		destroy_ctl = true;
+		need_cleanup = false;
+		pr_warn("fb%d blank while mode switch (%d) in progress\n",
+				mfd->index, mfd->switch_state);
+		mdss_mdp_handle_invalid_switch_state(mfd);
+	}
+	mutex_unlock(&mfd->switch_lock);
+
+	if (need_cleanup) {
+		pr_debug("cleaning up pipes on fb%d\n", mfd->index);
+		if (mdata->handoff_pending)
+			mdp5_data->allow_kickoff = true;
+
+		mdss_mdp_overlay_kickoff(mfd, NULL);
+	} else if (!mdss_mdp_ctl_is_power_on(mdp5_data->ctl)) {
+		if (mfd->panel_reconfig) {
+			if (mfd->panel_info->cont_splash_enabled)
+				mdss_mdp_handoff_cleanup_ctl(mfd);
+
+			mdp5_data->borderfill_enable = false;
+			mdss_mdp_ctl_destroy(mdp5_data->ctl);
+			mdp5_data->ctl = NULL;
+		}
+		goto end;
+	}
+
+	/*
+	 * If retire fences are still active wait for a vsync time
+	 * for retire fence to be updated.
+	 * As a last resort signal the timeline if vsync doesn't arrive.
+	 */
+	mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+	retire_cnt = mdp5_data->retire_cnt;
+	mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
+	if (retire_cnt) {
+		u32 fps = mdss_panel_get_framerate(mfd->panel_info,
+					FPS_RESOLUTION_HZ);
+		u32 vsync_time = 1000 / (fps ? : DEFAULT_FRAME_RATE);
+
+		msleep(vsync_time);
+
+		mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+		retire_cnt = mdp5_data->retire_cnt;
+		mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
+		__vsync_retire_signal(mfd, retire_cnt);
+
+		/*
+		 * the retire work can still schedule after above retire_signal
+		 * api call. Flush workqueue guarantees that current caller
+		 * context is blocked till retire_work finishes. Any work
+		 * schedule after flush call should not cause any issue because
+		 * retire_signal api checks for retire_cnt with sync_mutex lock.
+		 */
+
+		flush_kthread_work(&mdp5_data->vsync_work);
+	}
+
+ctl_stop:
+	mutex_lock(&mdp5_data->ov_lock);
+	/* set the correct pipe_mapped before ctl_stop */
+	mdss_mdp_mixer_update_pipe_map(mdp5_data->ctl,
+			MDSS_MDP_MIXER_MUX_LEFT);
+	mdss_mdp_mixer_update_pipe_map(mdp5_data->ctl,
+			MDSS_MDP_MIXER_MUX_RIGHT);
+	rc = mdss_mdp_ctl_stop(mdp5_data->ctl, mfd->panel_power_state);
+	if (rc == 0) {
+		if (mdss_fb_is_power_off(mfd)) {
+			mutex_lock(&mdp5_data->list_lock);
+			__mdss_mdp_overlay_free_list_purge(mfd);
+			if (!mfd->ref_cnt)
+				mdss_mdp_overlay_buf_deinit(mfd);
+			mutex_unlock(&mdp5_data->list_lock);
+			mdss_mdp_ctl_notifier_unregister(mdp5_data->ctl,
+					&mfd->mdp_sync_pt_data.notifier);
+
+			if (destroy_ctl) {
+				mdp5_data->borderfill_enable = false;
+				mdss_mdp_ctl_destroy(mdp5_data->ctl);
+				mdp5_data->ctl = NULL;
+			}
+
+			atomic_dec(&mdp5_data->mdata->active_intf_cnt);
+
+			if (!mdp5_data->mdata->idle_pc_enabled ||
+				(mfd->panel_info->type != MIPI_CMD_PANEL)) {
+				rc = pm_runtime_put(&mfd->pdev->dev);
+				if (rc)
+					pr_err("unable to suspend w/pm_runtime_put (%d)\n",
+						rc);
+			}
+		}
+	}
+	mutex_unlock(&mdp5_data->ov_lock);
+
+	if (mdp5_data->wfd) {
+		mdss_mdp_wfd_deinit(mdp5_data->wfd);
+		mdp5_data->wfd = NULL;
+	}
+
+end:
+	/* Release the last reference to the runtime device */
+	rc = pm_runtime_put(&mfd->pdev->dev);
+	if (rc)
+		pr_err("unable to suspend w/pm_runtime_put (%d)\n", rc);
+
+	return rc;
+}
+
+static int __mdss_mdp_ctl_handoff(struct msm_fb_data_type *mfd,
+	struct mdss_mdp_ctl *ctl, struct mdss_data_type *mdata)
+{
+	int rc = 0;
+	int i, j;
+	u32 mixercfg;
+	struct mdss_mdp_pipe *pipe = NULL;
+	struct mdss_overlay_private *mdp5_data;
+
+	if (!ctl || !mdata)
+		return -EINVAL;
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+
+	for (i = 0; i < mdata->nmixers_intf; i++) {
+		mixercfg = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_LAYER(i));
+		pr_debug("for lm%d mixercfg = 0x%09x\n", i, mixercfg);
+
+		j = MDSS_MDP_SSPP_VIG0;
+		for (; j < MDSS_MDP_SSPP_CURSOR0 && mixercfg; j++) {
+			u32 cfg = j * 3;
+
+			if ((j == MDSS_MDP_SSPP_VIG3) ||
+			    (j == MDSS_MDP_SSPP_RGB3)) {
+				/* Add 2 to account for Cursor & Border bits */
+				cfg += 2;
+			}
+			if (mixercfg & (0x7 << cfg)) {
+				pr_debug("Pipe %d staged\n", j);
+				/* bootloader display always uses RECT0 */
+				pipe = mdss_mdp_pipe_search(mdata, BIT(j),
+					MDSS_MDP_PIPE_RECT0);
+				if (!pipe) {
+					pr_warn("Invalid pipe %d staged\n", j);
+					continue;
+				}
+
+				rc = mdss_mdp_pipe_handoff(pipe);
+				if (rc) {
+					pr_err("Failed to handoff pipe%d\n",
+						pipe->num);
+					goto exit;
+				}
+
+				pipe->mfd = mfd;
+				mutex_lock(&mdp5_data->list_lock);
+				list_add(&pipe->list, &mdp5_data->pipes_used);
+				mutex_unlock(&mdp5_data->list_lock);
+
+				rc = mdss_mdp_mixer_handoff(ctl, i, pipe);
+				if (rc) {
+					pr_err("failed to handoff mix%d\n", i);
+					goto exit;
+				}
+			}
+		}
+	}
+exit:
+	return rc;
+}
+
+/**
+ * mdss_mdp_overlay_handoff() - Read MDP registers to handoff an active ctl path
+ * @mfd: Msm frame buffer structure associated with the fb device.
+ *
+ * This function populates the MDP software structures with the current state of
+ * the MDP hardware to handoff any active control path for the framebuffer
+ * device. This is needed to identify any ctl, mixers and pipes being set up by
+ * the bootloader to display the splash screen when the continuous splash screen
+ * feature is enabled in kernel.
+ */
+static int mdss_mdp_overlay_handoff(struct msm_fb_data_type *mfd)
+{
+	int rc = 0;
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_ctl *ctl = NULL;
+	struct mdss_mdp_ctl *sctl = NULL;
+
+	if (!mdp5_data->ctl) {
+		ctl = __mdss_mdp_overlay_ctl_init(mfd);
+		if (IS_ERR_OR_NULL(ctl)) {
+			rc = PTR_ERR(ctl);
+			goto error;
+		}
+	} else {
+		ctl = mdp5_data->ctl;
+	}
+
+	/*
+	 * vsync interrupt needs on during continuous splash, this is
+	 * to initialize necessary ctl members here.
+	 */
+	rc = mdss_mdp_ctl_start(ctl, true);
+	if (rc) {
+		pr_err("Failed to initialize ctl\n");
+		goto error;
+	}
+
+	ctl->clk_rate = mdss_mdp_get_clk_rate(MDSS_CLK_MDP_CORE, false);
+	pr_debug("Set the ctl clock rate to %d Hz\n", ctl->clk_rate);
+
+	rc = __mdss_mdp_ctl_handoff(mfd, ctl, mdata);
+	if (rc) {
+		pr_err("primary ctl handoff failed. rc=%d\n", rc);
+		goto error;
+	}
+
+	if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+		sctl = mdss_mdp_get_split_ctl(ctl);
+		if (!sctl) {
+			pr_err("cannot get secondary ctl. fail the handoff\n");
+			rc = -EPERM;
+			goto error;
+		}
+		rc = __mdss_mdp_ctl_handoff(mfd, sctl, mdata);
+		if (rc) {
+			pr_err("secondary ctl handoff failed. rc=%d\n", rc);
+			goto error;
+		}
+	}
+
+	rc = mdss_mdp_smp_handoff(mdata);
+	if (rc)
+		pr_err("Failed to handoff smps\n");
+
+	mdp5_data->handoff = true;
+
+error:
+	if (rc && ctl) {
+		mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_RGB);
+		mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_VIG);
+		mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_DMA);
+		mdss_mdp_ctl_destroy(ctl);
+		mdp5_data->ctl = NULL;
+		mdp5_data->handoff = false;
+	}
+
+	return rc;
+}
+
+static void __vsync_retire_handle_vsync(struct mdss_mdp_ctl *ctl, ktime_t t)
+{
+	struct msm_fb_data_type *mfd = ctl->mfd;
+	struct mdss_overlay_private *mdp5_data;
+
+	if (!mfd || !mfd->mdp.private1) {
+		pr_warn("Invalid handle for vsync\n");
+		return;
+	}
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+	queue_kthread_work(&mdp5_data->worker, &mdp5_data->vsync_work);
+}
+
+static void __vsync_retire_work_handler(struct kthread_work *work)
+{
+	struct mdss_overlay_private *mdp5_data =
+		container_of(work, typeof(*mdp5_data), vsync_work);
+
+	if (!mdp5_data->ctl || !mdp5_data->ctl->mfd)
+		return;
+
+	if (!mdp5_data->ctl->ops.remove_vsync_handler)
+		return;
+
+	__vsync_retire_signal(mdp5_data->ctl->mfd, 1);
+}
+
+static void __vsync_retire_signal(struct msm_fb_data_type *mfd, int val)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+	if (mdp5_data->retire_cnt > 0) {
+		sw_sync_timeline_inc(mdp5_data->vsync_timeline, val);
+		mdp5_data->retire_cnt -= min(val, mdp5_data->retire_cnt);
+		pr_debug("Retire signaled! timeline val=%d remaining=%d\n",
+				mdp5_data->vsync_timeline->value,
+				mdp5_data->retire_cnt);
+
+		if (mdp5_data->retire_cnt == 0) {
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+			mdp5_data->ctl->ops.remove_vsync_handler(mdp5_data->ctl,
+					&mdp5_data->vsync_retire_handler);
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+		}
+	}
+	mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
+}
+
+static struct sync_fence *
+__vsync_retire_get_fence(struct msm_sync_pt_data *sync_pt_data)
+{
+	struct msm_fb_data_type *mfd;
+	struct mdss_overlay_private *mdp5_data;
+	struct mdss_mdp_ctl *ctl;
+	int value;
+
+	mfd = container_of(sync_pt_data, typeof(*mfd), mdp_sync_pt_data);
+	mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (!mdp5_data || !mdp5_data->ctl)
+		return ERR_PTR(-ENODEV);
+
+	ctl = mdp5_data->ctl;
+	if (!ctl->ops.add_vsync_handler)
+		return ERR_PTR(-EOPNOTSUPP);
+
+	if (!mdss_mdp_ctl_is_power_on(ctl)) {
+		pr_debug("fb%d vsync pending first update\n", mfd->index);
+		return ERR_PTR(-EPERM);
+	}
+
+	value = mdp5_data->vsync_timeline->value + 1 + mdp5_data->retire_cnt;
+	mdp5_data->retire_cnt++;
+
+	return mdss_fb_sync_get_fence(mdp5_data->vsync_timeline,
+			"mdp-retire", value);
+}
+
+static int __vsync_set_vsync_handler(struct msm_fb_data_type *mfd)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_ctl *ctl;
+	int rc;
+	int retire_cnt;
+
+	ctl = mdp5_data->ctl;
+	mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
+	retire_cnt = mdp5_data->retire_cnt;
+	mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
+	if (!retire_cnt || mdp5_data->vsync_retire_handler.enabled)
+		return 0;
+
+	if (!ctl->ops.add_vsync_handler)
+		return -EOPNOTSUPP;
+
+	if (!mdss_mdp_ctl_is_power_on(ctl)) {
+		pr_debug("fb%d vsync pending first update\n", mfd->index);
+		return -EPERM;
+	}
+
+	rc = ctl->ops.add_vsync_handler(ctl,
+			&mdp5_data->vsync_retire_handler);
+	return rc;
+}
+
+static int __vsync_retire_setup(struct msm_fb_data_type *mfd)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	char name[24];
+	struct sched_param param = { .sched_priority = 5 };
+
+	snprintf(name, sizeof(name), "mdss_fb%d_retire", mfd->index);
+	mdp5_data->vsync_timeline = sw_sync_timeline_create(name);
+	if (mdp5_data->vsync_timeline == NULL) {
+		pr_err("cannot vsync create time line");
+		return -ENOMEM;
+	}
+
+	init_kthread_worker(&mdp5_data->worker);
+	init_kthread_work(&mdp5_data->vsync_work, __vsync_retire_work_handler);
+
+	mdp5_data->thread = kthread_run(kthread_worker_fn,
+					&mdp5_data->worker,
+					"vsync_retire_work");
+
+	if (IS_ERR(mdp5_data->thread)) {
+		pr_err("unable to start vsync thread\n");
+		mdp5_data->thread = NULL;
+		return -ENOMEM;
+	}
+
+	sched_setscheduler(mdp5_data->thread, SCHED_FIFO, &param);
+
+	mfd->mdp_sync_pt_data.get_retire_fence = __vsync_retire_get_fence;
+
+	mdp5_data->vsync_retire_handler.vsync_handler =
+		__vsync_retire_handle_vsync;
+	mdp5_data->vsync_retire_handler.cmd_post_flush = false;
+
+	return 0;
+}
+
+static int mdss_mdp_update_panel_info(struct msm_fb_data_type *mfd,
+		int mode, int dest_ctrl)
+{
+	int ret = 0;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_panel_data *pdata;
+	struct mdss_mdp_ctl *sctl;
+
+	if (ctl == NULL) {
+		pr_debug("ctl not initialized\n");
+		return 0;
+	}
+
+	ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_UPDATE_PANEL_DATA,
+		(void *)(unsigned long)mode, CTL_INTF_EVENT_FLAG_DEFAULT);
+	if (ret)
+		pr_err("Dynamic switch to %s mode failed!\n",
+					mode ? "command" : "video");
+
+	if (dest_ctrl) {
+		/*
+		 * Destroy current ctrl structure as this is
+		 * going to be re-initialized with the requested mode.
+		 */
+		mdss_mdp_ctl_destroy(mdp5_data->ctl);
+		mdp5_data->ctl = NULL;
+	} else {
+		pdata = dev_get_platdata(&mfd->pdev->dev);
+
+		if (mdp5_data->mdata->has_pingpong_split &&
+			pdata->panel_info.use_pingpong_split)
+			mfd->split_mode = MDP_PINGPONG_SPLIT;
+		/*
+		 * Dynamic change so we need to reconfig instead of
+		 * destroying current ctrl structure.
+		 */
+		mdss_mdp_ctl_reconfig(ctl, pdata);
+
+		/*
+		 * Set flag when dynamic resolution switch happens before
+		 * handoff of cont-splash
+		 */
+		if (mdata->handoff_pending)
+			ctl->switch_with_handoff = true;
+
+		sctl = mdss_mdp_get_split_ctl(ctl);
+		if (sctl) {
+			if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+				mdss_mdp_ctl_reconfig(sctl, pdata->next);
+				sctl->border_x_off +=
+					pdata->panel_info.lcdc.border_left +
+					pdata->panel_info.lcdc.border_right;
+			} else {
+				/*
+				 * todo: need to revisit this and properly
+				 * cleanup slave resources
+				 */
+				mdss_mdp_ctl_destroy(sctl);
+				ctl->mixer_right = NULL;
+			}
+		} else if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
+			/* enable split display for the first time */
+			ret = mdss_mdp_ctl_split_display_setup(ctl,
+					pdata->next);
+			if (ret) {
+				mdss_mdp_ctl_destroy(ctl);
+				mdp5_data->ctl = NULL;
+			}
+		}
+	}
+
+	return ret;
+}
+
+int mdss_mdp_input_event_handler(struct msm_fb_data_type *mfd)
+{
+	int rc = 0;
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+
+	if (ctl && mdss_panel_is_power_on(ctl->power_state) &&
+	    ctl->ops.early_wake_up_fnc)
+		rc = ctl->ops.early_wake_up_fnc(ctl);
+
+	return rc;
+}
+
+static void mdss_mdp_signal_retire_fence(struct msm_fb_data_type *mfd,
+						int retire_cnt)
+{
+	__vsync_retire_signal(mfd, retire_cnt);
+	pr_debug("Signaled (%d) pending retire fence\n", retire_cnt);
+}
+
+int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
+{
+	struct device *dev = mfd->fbi->dev;
+	struct msm_mdp_interface *mdp5_interface = &mfd->mdp;
+	struct mdss_overlay_private *mdp5_data = NULL;
+	struct irq_info *mdss_irq;
+	int rc;
+
+	mdp5_data = kcalloc(1, sizeof(struct mdss_overlay_private), GFP_KERNEL);
+	if (!mdp5_data)
+		return -ENOMEM;
+
+	mdp5_data->frc_fsm
+		= kcalloc(1, sizeof(struct mdss_mdp_frc_fsm), GFP_KERNEL);
+	if (!mdp5_data->frc_fsm) {
+		rc = -ENOMEM;
+		pr_err("fail to allocate mdp5 frc fsm structure\n");
+		goto init_fail1;
+	}
+
+	mdp5_data->mdata = dev_get_drvdata(mfd->pdev->dev.parent);
+	if (!mdp5_data->mdata) {
+		pr_err("unable to initialize overlay for fb%d\n", mfd->index);
+		rc = -ENODEV;
+		goto init_fail;
+	}
+
+	mdp5_interface->on_fnc = mdss_mdp_overlay_on;
+	mdp5_interface->off_fnc = mdss_mdp_overlay_off;
+	mdp5_interface->release_fnc = __mdss_mdp_overlay_release_all;
+	mdp5_interface->do_histogram = NULL;
+	if (mdp5_data->mdata->ncursor_pipes)
+		mdp5_interface->cursor_update = mdss_mdp_hw_cursor_pipe_update;
+	else
+		mdp5_interface->cursor_update = mdss_mdp_hw_cursor_update;
+	mdp5_interface->async_position_update =
+		mdss_mdp_async_position_update;
+	mdp5_interface->dma_fnc = mdss_mdp_overlay_pan_display;
+	mdp5_interface->ioctl_handler = mdss_mdp_overlay_ioctl_handler;
+	mdp5_interface->kickoff_fnc = mdss_mdp_overlay_kickoff;
+	mdp5_interface->mode_switch = mdss_mode_switch;
+	mdp5_interface->mode_switch_post = mdss_mode_switch_post;
+	mdp5_interface->pre_commit_fnc = mdss_mdp_overlay_precommit;
+	mdp5_interface->splash_init_fnc = mdss_mdp_splash_init;
+	mdp5_interface->configure_panel = mdss_mdp_update_panel_info;
+	mdp5_interface->input_event_handler = mdss_mdp_input_event_handler;
+	mdp5_interface->signal_retire_fence = mdss_mdp_signal_retire_fence;
+
+	if (mfd->panel_info->type == WRITEBACK_PANEL) {
+		mdp5_interface->atomic_validate =
+			mdss_mdp_layer_atomic_validate_wfd;
+		mdp5_interface->pre_commit = mdss_mdp_layer_pre_commit_wfd;
+		mdp5_interface->is_config_same = mdss_mdp_wfd_is_config_same;
+	} else {
+		mdp5_interface->atomic_validate =
+			mdss_mdp_layer_atomic_validate;
+		mdp5_interface->pre_commit = mdss_mdp_layer_pre_commit;
+	}
+
+	INIT_LIST_HEAD(&mdp5_data->pipes_used);
+	INIT_LIST_HEAD(&mdp5_data->pipes_cleanup);
+	INIT_LIST_HEAD(&mdp5_data->pipes_destroy);
+	INIT_LIST_HEAD(&mdp5_data->bufs_pool);
+	INIT_LIST_HEAD(&mdp5_data->bufs_chunks);
+	INIT_LIST_HEAD(&mdp5_data->bufs_used);
+	INIT_LIST_HEAD(&mdp5_data->bufs_freelist);
+	INIT_LIST_HEAD(&mdp5_data->rot_proc_list);
+	mutex_init(&mdp5_data->list_lock);
+	mutex_init(&mdp5_data->ov_lock);
+	mutex_init(&mdp5_data->dfps_lock);
+	mdp5_data->hw_refresh = true;
+	mdp5_data->cursor_ndx[CURSOR_PIPE_LEFT] = MSMFB_NEW_REQUEST;
+	mdp5_data->cursor_ndx[CURSOR_PIPE_RIGHT] = MSMFB_NEW_REQUEST;
+	mdp5_data->allow_kickoff = false;
+
+	mfd->mdp.private1 = mdp5_data;
+	mfd->wait_for_kickoff = true;
+
+	rc = mdss_mdp_overlay_fb_parse_dt(mfd);
+	if (rc)
+		return rc;
+
+	/*
+	 * disable BWC if primary panel is video mode on specific
+	 * chipsets to workaround HW problem.
+	 */
+	if (mdss_has_quirk(mdp5_data->mdata, MDSS_QUIRK_BWCPANIC) &&
+	    mfd->panel_info->type == MIPI_VIDEO_PANEL && (mfd->index == 0))
+		mdp5_data->mdata->has_bwc = false;
+
+	mfd->panel_orientation = mfd->panel_info->panel_orientation;
+
+	if ((mfd->panel_info->panel_orientation & MDP_FLIP_LR) &&
+	    (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY))
+		mdp5_data->mixer_swap = true;
+
+	rc = sysfs_create_group(&dev->kobj, &mdp_overlay_sysfs_group);
+	if (rc) {
+		pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
+		goto init_fail;
+	}
+
+	mdp5_data->vsync_event_sd = sysfs_get_dirent(dev->kobj.sd,
+						     "vsync_event");
+	if (!mdp5_data->vsync_event_sd) {
+		pr_err("vsync_event sysfs lookup failed\n");
+		rc = -ENODEV;
+		goto init_fail;
+	}
+
+	mdp5_data->lineptr_event_sd = sysfs_get_dirent(dev->kobj.sd,
+						     "lineptr_event");
+	if (!mdp5_data->lineptr_event_sd) {
+		pr_err("lineptr_event sysfs lookup failed\n");
+		rc = -ENODEV;
+		goto init_fail;
+	}
+
+	mdp5_data->hist_event_sd = sysfs_get_dirent(dev->kobj.sd,
+						    "hist_event");
+	if (!mdp5_data->hist_event_sd) {
+		pr_err("hist_event sysfs lookup failed\n");
+		rc = -ENODEV;
+		goto init_fail;
+	}
+
+	mdp5_data->bl_event_sd = sysfs_get_dirent(dev->kobj.sd,
+							 "bl_event");
+	if (!mdp5_data->bl_event_sd) {
+		pr_err("bl_event sysfs lookup failed\n");
+		rc = -ENODEV;
+		goto init_fail;
+	}
+
+	mdp5_data->ad_event_sd = sysfs_get_dirent(dev->kobj.sd,
+							 "ad_event");
+	if (!mdp5_data->ad_event_sd) {
+		pr_err("ad_event sysfs lookup failed\n");
+		rc = -ENODEV;
+		goto init_fail;
+	}
+
+	mdp5_data->ad_bl_event_sd = sysfs_get_dirent(dev->kobj.sd,
+							 "ad_bl_event");
+	if (!mdp5_data->ad_bl_event_sd) {
+		pr_err("ad_bl_event sysfs lookup failed\n");
+		rc = -ENODEV;
+		goto init_fail;
+	}
+
+	rc = sysfs_create_link_nowarn(&dev->kobj,
+			&mdp5_data->mdata->pdev->dev.kobj, "mdp");
+	if (rc)
+		pr_warn("problem creating link to mdp sysfs\n");
+
+	rc = sysfs_create_link_nowarn(&dev->kobj,
+			&mfd->pdev->dev.kobj, "mdss_fb");
+	if (rc)
+		pr_warn("problem creating link to mdss_fb sysfs\n");
+
+	if (mfd->panel_info->type == MIPI_VIDEO_PANEL ||
+	    mfd->panel_info->type == DTV_PANEL) {
+		rc = sysfs_create_group(&dev->kobj,
+			&dynamic_fps_fs_attrs_group);
+		if (rc) {
+			pr_err("Error dfps sysfs creation ret=%d\n", rc);
+			goto init_fail;
+		}
+	}
+
+	if (mfd->panel_info->mipi.dms_mode ||
+			mfd->panel_info->type == MIPI_CMD_PANEL) {
+		rc = __vsync_retire_setup(mfd);
+		if (IS_ERR_VALUE(rc)) {
+			pr_err("unable to create vsync timeline\n");
+			goto init_fail;
+		}
+	}
+	mfd->mdp_sync_pt_data.async_wait_fences = true;
+
+	pm_runtime_set_suspended(&mfd->pdev->dev);
+	pm_runtime_enable(&mfd->pdev->dev);
+
+	kobject_uevent(&dev->kobj, KOBJ_ADD);
+	pr_debug("vsync kobject_uevent(KOBJ_ADD)\n");
+
+	mdss_irq = mdss_intr_line();
+
+	/* Adding event timer only for primary panel */
+	if ((mfd->index == 0) && (mfd->panel_info->type != WRITEBACK_PANEL)) {
+		mdp5_data->cpu_pm_hdl = add_event_timer(mdss_irq->irq,
+				mdss_mdp_ctl_event_timer, (void *)mdp5_data);
+		if (!mdp5_data->cpu_pm_hdl)
+			pr_warn("%s: unable to add event timer\n", __func__);
+	}
+
+	if (mfd->panel_info->cont_splash_enabled) {
+		rc = mdss_mdp_overlay_handoff(mfd);
+		if (rc) {
+			/*
+			 * Even though handoff failed, it is not fatal.
+			 * MDP can continue, just that we would have a longer
+			 * delay in transitioning from splash screen to boot
+			 * animation
+			 */
+			pr_warn("Overlay handoff failed for fb%d. rc=%d\n",
+				mfd->index, rc);
+			rc = 0;
+		}
+	}
+	mdp5_data->dyn_pu_state = mfd->panel_info->partial_update_enabled;
+
+	if (mdss_mdp_pp_overlay_init(mfd))
+		pr_warn("Failed to initialize pp overlay data.\n");
+	return rc;
+init_fail:
+	kfree(mdp5_data->frc_fsm);
+init_fail1:
+	kfree(mdp5_data);
+	return rc;
+}
+
+static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd)
+{
+	int rc = 0;
+	struct platform_device *pdev = mfd->pdev;
+	struct mdss_overlay_private *mdp5_mdata = mfd_to_mdp5_data(mfd);
+
+	mdp5_mdata->mixer_swap = of_property_read_bool(pdev->dev.of_node,
+					   "qcom,mdss-mixer-swap");
+	if (mdp5_mdata->mixer_swap) {
+		pr_info("mixer swap is enabled for fb device=%s\n",
+			pdev->name);
+	}
+
+	return rc;
+}
+
+static int mdss_mdp_scaler_lut_init(struct mdss_data_type *mdata,
+		struct mdp_scale_luts_info *lut_tbl)
+{
+	struct mdss_mdp_qseed3_lut_tbl *qseed3_lut_tbl;
+	int ret;
+
+	if (!mdata->scaler_off)
+		return -EFAULT;
+
+	qseed3_lut_tbl = &mdata->scaler_off->lut_tbl;
+	if ((lut_tbl->dir_lut_size !=
+		DIR_LUT_IDX * DIR_LUT_COEFFS * sizeof(uint32_t)) ||
+		(lut_tbl->cir_lut_size !=
+		 CIR_LUT_IDX * CIR_LUT_COEFFS * sizeof(uint32_t)) ||
+		(lut_tbl->sep_lut_size !=
+		 SEP_LUT_IDX * SEP_LUT_COEFFS * sizeof(uint32_t)))
+		return -EINVAL;
+
+	if (!qseed3_lut_tbl->dir_lut) {
+		qseed3_lut_tbl->dir_lut = devm_kzalloc(&mdata->pdev->dev,
+				lut_tbl->dir_lut_size,
+				GFP_KERNEL);
+		if (!qseed3_lut_tbl->dir_lut) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+	}
+
+	if (!qseed3_lut_tbl->cir_lut) {
+		qseed3_lut_tbl->cir_lut = devm_kzalloc(&mdata->pdev->dev,
+				lut_tbl->cir_lut_size,
+				GFP_KERNEL);
+		if (!qseed3_lut_tbl->cir_lut) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+	}
+
+	if (!qseed3_lut_tbl->sep_lut) {
+		qseed3_lut_tbl->sep_lut = devm_kzalloc(&mdata->pdev->dev,
+				lut_tbl->sep_lut_size,
+				GFP_KERNEL);
+		if (!qseed3_lut_tbl->sep_lut) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+	}
+
+	/* Invalidate before updating */
+	qseed3_lut_tbl->valid = false;
+
+
+	if (copy_from_user(qseed3_lut_tbl->dir_lut,
+				(void *)(unsigned long)lut_tbl->dir_lut,
+				lut_tbl->dir_lut_size)) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (copy_from_user(qseed3_lut_tbl->cir_lut,
+				(void *)(unsigned long)lut_tbl->cir_lut,
+				lut_tbl->cir_lut_size)) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (copy_from_user(qseed3_lut_tbl->sep_lut,
+				(void *)(unsigned long)lut_tbl->sep_lut,
+				lut_tbl->sep_lut_size)) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	qseed3_lut_tbl->valid = true;
+	return ret;
+
+fail:
+	kfree(qseed3_lut_tbl->dir_lut);
+	kfree(qseed3_lut_tbl->cir_lut);
+	kfree(qseed3_lut_tbl->sep_lut);
+err:
+	qseed3_lut_tbl->valid = false;
+	return ret;
+}
+
+static int mdss_mdp_set_cfg(struct msm_fb_data_type *mfd,
+		struct mdp_set_cfg *cfg)
+{
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+	int ret = -EINVAL;
+	struct mdp_scale_luts_info luts_info;
+
+	switch (cfg->flags) {
+	case MDP_QSEED3_LUT_CFG:
+		if (cfg->len != sizeof(luts_info)) {
+			pr_err("invalid length %d expected %zd\n", cfg->len,
+				sizeof(luts_info));
+			ret = -EINVAL;
+			break;
+		}
+		ret = copy_from_user(&luts_info,
+				(void *)(unsigned long)cfg->payload, cfg->len);
+		if (ret) {
+			pr_err("qseed3 lut copy failed ret %d\n", ret);
+			ret = -EFAULT;
+			break;
+		}
+		ret = mdss_mdp_scaler_lut_init(mdata, &luts_info);
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pipe.c b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
new file mode 100644
index 0000000..3a8df20
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
@@ -0,0 +1,3137 @@
+/* Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+
+#include "mdss_mdp.h"
+#include "mdss_mdp_trace.h"
+#include "mdss_debug.h"
+
+#define SMP_MB_SIZE		(mdss_res->smp_mb_size)
+#define SMP_MB_CNT		(mdss_res->smp_mb_cnt)
+#define SMP_MB_ENTRY_SIZE	16
+#define MAX_BPP 4
+
+#define PIPE_CLEANUP_TIMEOUT_US 100000
+
+/* following offsets are relative to ctrl register bit offset */
+#define CLK_FORCE_ON_OFFSET	0x0
+#define CLK_FORCE_OFF_OFFSET	0x1
+/* following offsets are relative to status register bit offset */
+#define CLK_STATUS_OFFSET	0x0
+
+#define QOS_LUT_NRT_READ	0x0
+#define PANIC_LUT_NRT_READ	0x0
+#define ROBUST_LUT_NRT_READ	0xFFFF
+
+#define VBLANK_PANIC_DEFAULT_CONFIG 0x200000 /* Priority 2, no panic */
+#define VBLANK_PANIC_CREQ_MASK 0x300030
+
+#define QSEED3_DEFAULT_PRELAOD_H  0x4
+#define QSEED3_DEFAULT_PRELAOD_V  0x3
+
+static DEFINE_MUTEX(mdss_mdp_sspp_lock);
+static DEFINE_MUTEX(mdss_mdp_smp_lock);
+
+static void mdss_mdp_pipe_free(struct kref *kref);
+static int mdss_mdp_smp_mmb_set(int client_id, unsigned long *smp);
+static void mdss_mdp_smp_mmb_free(unsigned long *smp, bool write);
+static struct mdss_mdp_pipe *mdss_mdp_pipe_search_by_client_id(
+	struct mdss_data_type *mdata, int client_id,
+	enum mdss_mdp_pipe_rect rect_num);
+static int mdss_mdp_calc_stride(struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_plane_sizes *ps);
+static u32 mdss_mdp_calc_per_plane_num_blks(u32 ystride,
+	struct mdss_mdp_pipe *pipe);
+static int mdss_mdp_pipe_program_pixel_extn(struct mdss_mdp_pipe *pipe);
+
+/**
+ * enum mdss_mdp_pipe_qos - Different qos configurations for each pipe
+ *
+ * @MDSS_MDP_PIPE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
+ * @MDSS_MDP_PIPE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
+ *	this configuration is mutually exclusive from VBLANK_CTRL.
+ * @MDSS_MDP_PIPE_QOS_PANIC_CTRL: Setup panic for the pipe.
+ */
+enum mdss_mdp_pipe_qos {
+	MDSS_MDP_PIPE_QOS_VBLANK_CTRL = BIT(0),
+	MDSS_MDP_PIPE_QOS_VBLANK_AMORTIZE = BIT(1),
+	MDSS_MDP_PIPE_QOS_PANIC_CTRL = BIT(2),
+};
+
+static inline void mdss_mdp_pipe_write(struct mdss_mdp_pipe *pipe,
+				       u32 reg, u32 val)
+{
+	writel_relaxed(val, pipe->base + reg);
+}
+
+/**
+ * This function is used to decide if certain register programming can be
+ * delayed or not. This is useful when multirect is used where two pipe
+ * structures access same set of registers.
+ */
+static inline bool is_pipe_programming_delay_needed(
+	struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_mdp_pipe *next_pipe = pipe->multirect.next;
+
+	return (pipe->multirect.mode != MDSS_MDP_PIPE_MULTIRECT_NONE) &&
+	       next_pipe->params_changed;
+}
+
+static inline u32 mdss_mdp_pipe_read(struct mdss_mdp_pipe *pipe, u32 reg)
+{
+	return readl_relaxed(pipe->base + reg);
+}
+
+static inline int mdss_calc_fill_level(struct mdss_mdp_format_params *fmt,
+	u32 src_width)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 fixed_buff_size = mdata->pixel_ram_size;
+	u32 total_fl;
+
+	if (fmt->fetch_planes == MDSS_MDP_PLANE_PSEUDO_PLANAR) {
+		if (fmt->chroma_sample == MDSS_MDP_CHROMA_420) {
+			/* NV12 */
+			total_fl = (fixed_buff_size / 2) /
+				((src_width + 32) * fmt->bpp);
+		} else {
+			/* non NV12 */
+			total_fl = (fixed_buff_size) /
+				((src_width + 32) * fmt->bpp);
+		}
+	} else {
+		total_fl = (fixed_buff_size * 2) /
+			((src_width + 32) * fmt->bpp);
+	}
+
+	return total_fl;
+}
+
+static inline u32 get_qos_lut_linear(u32 total_fl)
+{
+	u32 qos_lut;
+
+	if (total_fl <= 4)
+		qos_lut = 0x1B;
+	else if (total_fl <= 5)
+		qos_lut = 0x5B;
+	else if (total_fl <= 6)
+		qos_lut = 0x15B;
+	else if (total_fl <= 7)
+		qos_lut = 0x55B;
+	else if (total_fl <= 8)
+		qos_lut = 0x155B;
+	else if (total_fl <= 9)
+		qos_lut = 0x555B;
+	else if (total_fl <= 10)
+		qos_lut = 0x1555B;
+	else if (total_fl <= 11)
+		qos_lut = 0x5555B;
+	else if (total_fl <= 12)
+		qos_lut = 0x15555B;
+	else
+		qos_lut = 0x55555B;
+
+	return qos_lut;
+}
+
+static inline u32 get_qos_lut_macrotile(u32 total_fl)
+{
+	u32 qos_lut;
+
+	if (total_fl <= 10)
+		qos_lut = 0x1AAff;
+	else if (total_fl <= 11)
+		qos_lut = 0x5AAFF;
+	else if (total_fl <= 12)
+		qos_lut = 0x15AAFF;
+	else
+		qos_lut = 0x55AAFF;
+
+	return qos_lut;
+}
+
+static void mdss_mdp_pipe_qos_lut(struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_mdp_ctl *ctl = pipe->mixer_left->ctl;
+	u32 qos_lut;
+	u32 total_fl = 0;
+
+	if ((ctl->intf_num == MDSS_MDP_NO_INTF) ||
+			pipe->mixer_left->rotator_mode) {
+		qos_lut = QOS_LUT_NRT_READ; /* low priority for nrt */
+	} else {
+		total_fl = mdss_calc_fill_level(pipe->src_fmt,
+			pipe->src.w);
+
+		if (mdss_mdp_is_linear_format(pipe->src_fmt))
+			qos_lut = get_qos_lut_linear(total_fl);
+		else
+			qos_lut = get_qos_lut_macrotile(total_fl);
+	}
+
+	trace_mdp_perf_set_qos_luts(pipe->num, pipe->src_fmt->format,
+		ctl->intf_num, pipe->mixer_left->rotator_mode, total_fl,
+		qos_lut, mdss_mdp_is_linear_format(pipe->src_fmt));
+
+	pr_debug("pnum:%d fmt:%d intf:%d rot:%d fl:%d lut:0x%x\n",
+		pipe->num, pipe->src_fmt->format, ctl->intf_num,
+		pipe->mixer_left->rotator_mode, total_fl, qos_lut);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_CREQ_LUT,
+		qos_lut);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+}
+
+bool is_rt_pipe(struct mdss_mdp_pipe *pipe)
+{
+	return pipe && pipe->mixer_left &&
+		pipe->mixer_left->type == MDSS_MDP_MIXER_TYPE_INTF;
+}
+
+static void mdss_mdp_config_pipe_panic_lut(struct mdss_mdp_pipe *pipe)
+{
+	u32 panic_lut, robust_lut;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!is_rt_pipe(pipe)) {
+		panic_lut = PANIC_LUT_NRT_READ;
+		robust_lut = ROBUST_LUT_NRT_READ;
+	} else if (mdss_mdp_is_linear_format(pipe->src_fmt)) {
+		panic_lut = mdata->default_panic_lut_per_pipe_linear;
+		robust_lut = mdata->default_robust_lut_per_pipe_linear;
+	} else {
+		panic_lut = mdata->default_panic_lut_per_pipe_tile;
+		robust_lut = mdata->default_robust_lut_per_pipe_tile;
+	}
+
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_DANGER_LUT,
+		panic_lut);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SAFE_LUT,
+		robust_lut);
+
+	trace_mdp_perf_set_panic_luts(pipe->num, pipe->src_fmt->format,
+		pipe->src_fmt->fetch_mode, panic_lut, robust_lut);
+
+	pr_debug("pnum:%d fmt:%d mode:%d luts[0x%x, 0x%x]\n",
+		pipe->num, pipe->src_fmt->format, pipe->src_fmt->fetch_mode,
+		panic_lut, robust_lut);
+}
+
+static void mdss_mdp_pipe_qos_ctrl(struct mdss_mdp_pipe *pipe,
+	bool enable, u32 flags)
+{
+	u32 per_pipe_qos;
+
+	per_pipe_qos = mdss_mdp_pipe_read(pipe, MDSS_MDP_REG_SSPP_QOS_CTRL);
+
+	if (flags & MDSS_MDP_PIPE_QOS_VBLANK_CTRL) {
+		per_pipe_qos |= VBLANK_PANIC_DEFAULT_CONFIG;
+
+		if (enable)
+			per_pipe_qos |= BIT(16);
+		else
+			per_pipe_qos &= ~BIT(16);
+	}
+
+	if (flags & MDSS_MDP_PIPE_QOS_VBLANK_AMORTIZE) {
+		/* this feature overrules previous VBLANK_CTRL */
+		per_pipe_qos &= ~BIT(16);
+		per_pipe_qos &= ~VBLANK_PANIC_CREQ_MASK; /* clear vblank bits */
+	}
+
+	if (flags & MDSS_MDP_PIPE_QOS_PANIC_CTRL) {
+		if (enable)
+			per_pipe_qos |= BIT(0);
+		else
+			per_pipe_qos &= ~BIT(0);
+	}
+
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_QOS_CTRL,
+		per_pipe_qos);
+}
+
+/**
+ * @mdss_mdp_pipe_panic_vblank_signal_ctrl -
+ * @pipe: pointer to a pipe
+ * @enable: TRUE - enables feature FALSE - disables feature
+ *
+ * This function assumes that clocks are enabled, so it is callers
+ * responsibility to enable clocks before calling this function.
+ */
+static int mdss_mdp_pipe_panic_vblank_signal_ctrl(struct mdss_mdp_pipe *pipe,
+	bool enable)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mdata->has_panic_ctrl)
+		goto end;
+
+	if (!is_rt_pipe(pipe))
+		goto end;
+
+	if (!test_bit(MDSS_QOS_VBLANK_PANIC_CTRL, mdata->mdss_qos_map))
+		goto end;
+
+	mutex_lock(&mdata->reg_lock);
+
+	mdss_mdp_pipe_qos_ctrl(pipe, enable, MDSS_MDP_PIPE_QOS_VBLANK_CTRL);
+
+	mutex_unlock(&mdata->reg_lock);
+
+end:
+	return 0;
+}
+
+int mdss_mdp_pipe_panic_signal_ctrl(struct mdss_mdp_pipe *pipe, bool enable)
+{
+	uint32_t panic_robust_ctrl;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mdata->has_panic_ctrl)
+		goto end;
+
+	if (!is_rt_pipe(pipe))
+		goto end;
+
+	mutex_lock(&mdata->reg_lock);
+	switch (mdss_mdp_panic_signal_support_mode(mdata)) {
+	case MDSS_MDP_PANIC_COMMON_REG_CFG:
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+		panic_robust_ctrl = readl_relaxed(mdata->mdp_base +
+				MMSS_MDP_PANIC_ROBUST_CTRL);
+		if (enable)
+			panic_robust_ctrl |= BIT(pipe->panic_ctrl_ndx);
+		else
+			panic_robust_ctrl &= ~BIT(pipe->panic_ctrl_ndx);
+		writel_relaxed(panic_robust_ctrl,
+				mdata->mdp_base + MMSS_MDP_PANIC_ROBUST_CTRL);
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+		break;
+	case MDSS_MDP_PANIC_PER_PIPE_CFG:
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+		mdss_mdp_pipe_qos_ctrl(pipe, enable,
+			MDSS_MDP_PIPE_QOS_PANIC_CTRL);
+
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+		break;
+	}
+	mutex_unlock(&mdata->reg_lock);
+
+end:
+	return 0;
+}
+
+void mdss_mdp_bwcpanic_ctrl(struct mdss_data_type *mdata, bool enable)
+{
+	if (!mdata)
+		return;
+
+	mutex_lock(&mdata->reg_lock);
+	if (enable) {
+		writel_relaxed(0x0, mdata->mdp_base + MMSS_MDP_PANIC_LUT0);
+		writel_relaxed(0x0, mdata->mdp_base + MMSS_MDP_PANIC_LUT1);
+		writel_relaxed(0x0, mdata->mdp_base + MMSS_MDP_ROBUST_LUT);
+	} else {
+		writel_relaxed(mdata->default_panic_lut0,
+			mdata->mdp_base + MMSS_MDP_PANIC_LUT0);
+		writel_relaxed(mdata->default_panic_lut1,
+			mdata->mdp_base + MMSS_MDP_PANIC_LUT1);
+		writel_relaxed(mdata->default_robust_lut,
+			mdata->mdp_base + MMSS_MDP_ROBUST_LUT);
+	}
+	mutex_unlock(&mdata->reg_lock);
+}
+
+/**
+ * @mdss_mdp_pipe_nrt_vbif_setup -
+ * @mdata: pointer to global driver data.
+ * @pipe: pointer to a pipe
+ *
+ * This function assumes that clocks are enabled, so it is callers
+ * responsibility to enable clocks before calling this function.
+ */
+static void mdss_mdp_pipe_nrt_vbif_setup(struct mdss_data_type *mdata,
+					struct mdss_mdp_pipe *pipe)
+{
+	uint32_t nrt_vbif_client_sel;
+
+	if (pipe->type != MDSS_MDP_PIPE_TYPE_DMA)
+		return;
+
+	mutex_lock(&mdata->reg_lock);
+	nrt_vbif_client_sel = readl_relaxed(mdata->mdp_base +
+				MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL);
+	if (mdss_mdp_is_nrt_vbif_client(mdata, pipe))
+		nrt_vbif_client_sel |= BIT(pipe->num - MDSS_MDP_SSPP_DMA0);
+	else
+		nrt_vbif_client_sel &= ~BIT(pipe->num - MDSS_MDP_SSPP_DMA0);
+	writel_relaxed(nrt_vbif_client_sel,
+			mdata->mdp_base + MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL);
+	mutex_unlock(&mdata->reg_lock);
+}
+
+static inline bool is_unused_smp_allowed(void)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	switch (MDSS_GET_MAJOR_MINOR(mdata->mdp_rev)) {
+	case MDSS_GET_MAJOR_MINOR(MDSS_MDP_HW_REV_103):
+	case MDSS_GET_MAJOR_MINOR(MDSS_MDP_HW_REV_105):
+	case MDSS_GET_MAJOR_MINOR(MDSS_MDP_HW_REV_109):
+	case MDSS_GET_MAJOR_MINOR(MDSS_MDP_HW_REV_110):
+		return true;
+	default:
+		return false;
+	}
+}
+
+static u32 mdss_mdp_smp_mmb_reserve(struct mdss_mdp_pipe_smp_map *smp_map,
+	size_t n, bool force_alloc)
+{
+	u32 i, mmb;
+	u32 fixed_cnt = bitmap_weight(smp_map->fixed, SMP_MB_CNT);
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (n <= fixed_cnt)
+		return fixed_cnt;
+
+	n -= fixed_cnt;
+
+	i = bitmap_weight(smp_map->allocated, SMP_MB_CNT);
+
+	/*
+	 * SMP programming is not double buffered. Fail the request,
+	 * that calls for change in smp configuration (addition/removal
+	 * of smp blocks), so that fallback solution happens.
+	 */
+	if (i != 0 && !force_alloc &&
+	    (((n < i) && !is_unused_smp_allowed()) || (n > i))) {
+		pr_debug("Can't change mmb config, num_blks: %zu alloc: %d\n",
+			n, i);
+		return 0;
+	}
+
+	/*
+	 * Clear previous SMP reservations and reserve according to the
+	 * latest configuration
+	 */
+	mdss_mdp_smp_mmb_free(smp_map->reserved, false);
+
+	/* Reserve mmb blocks*/
+	for (; i < n; i++) {
+		if (bitmap_full(mdata->mmb_alloc_map, SMP_MB_CNT))
+			break;
+
+		mmb = find_first_zero_bit(mdata->mmb_alloc_map, SMP_MB_CNT);
+		set_bit(mmb, smp_map->reserved);
+		set_bit(mmb, mdata->mmb_alloc_map);
+	}
+
+	return i + fixed_cnt;
+}
+
+static int mdss_mdp_smp_mmb_set(int client_id, unsigned long *smp)
+{
+	u32 mmb, off, data, s;
+	int cnt = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	for_each_set_bit(mmb, smp, SMP_MB_CNT) {
+		off = (mmb / 3) * 4;
+		s = (mmb % 3) * 8;
+		data = readl_relaxed(mdata->mdp_base +
+			MDSS_MDP_REG_SMP_ALLOC_W0 + off);
+		data &= ~(0xFF << s);
+		data |= client_id << s;
+		writel_relaxed(data, mdata->mdp_base +
+			MDSS_MDP_REG_SMP_ALLOC_W0 + off);
+		writel_relaxed(data, mdata->mdp_base +
+			MDSS_MDP_REG_SMP_ALLOC_R0 + off);
+		cnt++;
+	}
+	return cnt;
+}
+
+static void mdss_mdp_smp_mmb_amend(unsigned long *smp, unsigned long *extra)
+{
+	bitmap_or(smp, smp, extra, SMP_MB_CNT);
+	bitmap_zero(extra, SMP_MB_CNT);
+}
+
+static void mdss_mdp_smp_mmb_free(unsigned long *smp, bool write)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!bitmap_empty(smp, SMP_MB_CNT)) {
+		if (write)
+			mdss_mdp_smp_mmb_set(0, smp);
+		bitmap_andnot(mdata->mmb_alloc_map, mdata->mmb_alloc_map,
+			      smp, SMP_MB_CNT);
+		bitmap_zero(smp, SMP_MB_CNT);
+	}
+}
+
+u32 mdss_mdp_smp_calc_num_blocks(struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_mdp_plane_sizes ps;
+	int rc = 0;
+	int i, num_blks = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (mdata->has_pixel_ram)
+		return 0;
+
+	rc = mdss_mdp_calc_stride(pipe, &ps);
+	if (rc) {
+		pr_err("wrong stride calc\n");
+		return 0;
+	}
+
+	for (i = 0; i < ps.num_planes; i++) {
+		num_blks += mdss_mdp_calc_per_plane_num_blks(ps.ystride[i],
+			pipe);
+		pr_debug("SMP for BW %d mmb for pnum=%d plane=%d\n",
+			num_blks, pipe->num, i);
+	}
+
+	pr_debug("SMP blks %d mb_cnt for pnum=%d\n",
+		num_blks, pipe->num);
+	return num_blks;
+}
+
+/**
+ * @mdss_mdp_smp_get_size - get allocated smp size for a pipe
+ * @pipe: pointer to a pipe
+ *
+ * Function counts number of blocks that are currently allocated for a
+ * pipe, then smp buffer size is number of blocks multiplied by block
+ * size.
+ */
+u32 mdss_mdp_smp_get_size(struct mdss_mdp_pipe *pipe)
+{
+	int i, mb_cnt = 0, smp_size;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (mdata->has_pixel_ram) {
+		smp_size = mdata->pixel_ram_size;
+	} else {
+		for (i = 0; i < MAX_PLANES; i++) {
+			mb_cnt += bitmap_weight(pipe->smp_map[i].allocated,
+								SMP_MB_CNT);
+			mb_cnt += bitmap_weight(pipe->smp_map[i].fixed,
+								SMP_MB_CNT);
+		}
+
+		smp_size = mb_cnt * SMP_MB_SIZE;
+	}
+
+	pr_debug("SMP size %d for pnum=%d\n",
+		smp_size, pipe->num);
+
+	return smp_size;
+}
+
+static void mdss_mdp_smp_set_wm_levels(struct mdss_mdp_pipe *pipe, int mb_cnt)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 useable_space, latency_bytes, val, wm[3];
+	struct mdss_mdp_mixer *mixer = pipe->mixer_left;
+
+	useable_space = mb_cnt * SMP_MB_SIZE;
+
+	/*
+	 * For 1.3.x version, when source format is macrotile then useable
+	 * space within total allocated SMP space is limited to src_w *
+	 * bpp * nlines. Unlike linear format, any extra space left over is
+	 * not filled.
+	 *
+	 * All other versions, in case of linear we calculate the latency
+	 * bytes as the bytes to be used for the latency buffer lines, so the
+	 * transactions when filling the full SMPs have the lowest priority.
+	 */
+
+	latency_bytes = mdss_mdp_calc_latency_buf_bytes(pipe->src_fmt->is_yuv,
+		pipe->bwc_mode, mdss_mdp_is_tile_format(pipe->src_fmt),
+		pipe->src.w, pipe->src_fmt->bpp, false, useable_space,
+		mdss_mdp_is_ubwc_format(pipe->src_fmt),
+		mdss_mdp_is_nv12_format(pipe->src_fmt),
+		(pipe->flags & MDP_FLIP_LR));
+
+	if ((pipe->flags & MDP_FLIP_LR) &&
+		!mdss_mdp_is_tile_format(pipe->src_fmt)) {
+		/*
+		 * when doing hflip, one line is reserved to be consumed down
+		 * the pipeline. This line will always be marked as full even
+		 * if it doesn't have any data. In order to generate proper
+		 * priority levels ignore this region while setting up
+		 * watermark levels
+		 */
+		u8 bpp = pipe->src_fmt->is_yuv ? 1 :
+			pipe->src_fmt->bpp;
+		latency_bytes -= (pipe->src.w * bpp);
+	}
+
+	if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_103) &&
+		mdss_mdp_is_tile_format(pipe->src_fmt)) {
+		val = latency_bytes / SMP_MB_ENTRY_SIZE;
+
+		wm[0] = (val * 5) / 8;
+		wm[1] = (val * 6) / 8;
+		wm[2] = (val * 7) / 8;
+	} else if (mixer->rotator_mode ||
+		(mixer->ctl->intf_num == MDSS_MDP_NO_INTF)) {
+		/* any non real time pipe */
+		wm[0]  = 0xffff;
+		wm[1]  = 0xffff;
+		wm[2]  = 0xffff;
+	} else {
+		/*
+		 *  1/3 of the latency buffer bytes from the
+		 *  SMP pool that is being fetched
+		 */
+		val = (latency_bytes / SMP_MB_ENTRY_SIZE) / 3;
+
+		wm[0] = val;
+		wm[1] = wm[0] + val;
+		wm[2] = wm[1] + val;
+	}
+
+	trace_mdp_perf_set_wm_levels(pipe->num, useable_space, latency_bytes,
+		wm[0], wm[1], wm[2], mb_cnt, SMP_MB_SIZE);
+
+	pr_debug("pnum=%d useable_space=%u watermarks %u,%u,%u\n", pipe->num,
+			useable_space, wm[0], wm[1], wm[2]);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_0, wm[0]);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_1, wm[1]);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_2, wm[2]);
+}
+
+static void mdss_mdp_smp_free(struct mdss_mdp_pipe *pipe)
+{
+	int i;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (mdata->has_pixel_ram)
+		return;
+
+	mutex_lock(&mdss_mdp_smp_lock);
+	for (i = 0; i < MAX_PLANES; i++) {
+		mdss_mdp_smp_mmb_free(pipe->smp_map[i].reserved, false);
+		mdss_mdp_smp_mmb_free(pipe->smp_map[i].allocated, true);
+	}
+	mutex_unlock(&mdss_mdp_smp_lock);
+}
+
+void mdss_mdp_smp_unreserve(struct mdss_mdp_pipe *pipe)
+{
+	int i;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (mdata->has_pixel_ram)
+		return;
+
+	mutex_lock(&mdss_mdp_smp_lock);
+	for (i = 0; i < MAX_PLANES; i++)
+		mdss_mdp_smp_mmb_free(pipe->smp_map[i].reserved, false);
+	mutex_unlock(&mdss_mdp_smp_lock);
+}
+
+static int mdss_mdp_calc_stride(struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_plane_sizes *ps)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u16 width;
+	int rc = 0;
+	u32 format, seg_w = 0;
+
+	if (mdata->has_pixel_ram)
+		return 0;
+
+	width = DECIMATED_DIMENSION(pipe->src.w, pipe->horz_deci);
+
+	if (pipe->bwc_mode) {
+		rc = mdss_mdp_get_rau_strides(pipe->src.w, pipe->src.h,
+			pipe->src_fmt, ps);
+		if (rc)
+			return rc;
+		/*
+		 * Override fetch strides with SMP buffer size for both the
+		 * planes. BWC line buffer needs to be divided into 16
+		 * segments and every segment is aligned to format
+		 * specific RAU size
+		 */
+		seg_w = DIV_ROUND_UP(pipe->src.w, 16);
+		if (pipe->src_fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) {
+			ps->ystride[0] = ALIGN(seg_w, 32) * 16 * ps->rau_h[0] *
+					pipe->src_fmt->bpp;
+			ps->ystride[1] = 0;
+		} else {
+			u32 bwc_width = ALIGN(seg_w, 64) * 16;
+
+			ps->ystride[0] = bwc_width * ps->rau_h[0];
+			ps->ystride[1] = bwc_width * ps->rau_h[1];
+			/*
+			 * Since chroma for H1V2 is not subsampled it needs
+			 * to be accounted for with bpp factor
+			 */
+			if (pipe->src_fmt->chroma_sample ==
+				MDSS_MDP_CHROMA_H1V2)
+				ps->ystride[1] *= 2;
+		}
+		pr_debug("BWC SMP strides ystride0=%x ystride1=%x\n",
+			ps->ystride[0], ps->ystride[1]);
+	} else {
+		format = pipe->src_fmt->format;
+		/*
+		 * when decimation block is present, all chroma planes
+		 * are fetched on a single SMP plane for chroma pixels
+		 */
+		if (mdata->has_decimation) {
+			switch (pipe->src_fmt->chroma_sample) {
+			case MDSS_MDP_CHROMA_H2V1:
+				format = MDP_Y_CRCB_H2V1;
+				break;
+			case MDSS_MDP_CHROMA_420:
+				format = MDP_Y_CBCR_H2V2;
+				break;
+			default:
+				break;
+			}
+		}
+		rc = mdss_mdp_get_plane_sizes(pipe->src_fmt, width, pipe->src.h,
+			ps, 0, 0);
+		if (rc)
+			return rc;
+
+		if (pipe->mixer_left && (ps->num_planes == 1)) {
+			ps->ystride[0] = MAX_BPP *
+				max(pipe->mixer_left->width, width);
+		} else if (mdata->has_decimation) {
+			/*
+			 * To avoid quailty loss, MDP does one less decimation
+			 * on chroma components if they are subsampled.
+			 * Account for this to have enough SMPs for latency
+			 */
+			switch (pipe->src_fmt->chroma_sample) {
+			case MDSS_MDP_CHROMA_H2V1:
+			case MDSS_MDP_CHROMA_420:
+				ps->ystride[1] <<= 1;
+				break;
+			}
+		}
+	}
+
+	return rc;
+}
+
+static u32 mdss_mdp_calc_per_plane_num_blks(u32 ystride,
+	struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 num_blks = 0;
+	u32 nlines = 0;
+
+	if (pipe->mixer_left && (pipe->mixer_left->rotator_mode ||
+		(pipe->mixer_left->type == MDSS_MDP_MIXER_TYPE_WRITEBACK))) {
+		if (mdss_mdp_is_tile_format(pipe->src_fmt))
+			num_blks = 4;
+		else
+			num_blks = 1;
+	} else {
+		if (mdss_mdp_is_tile_format(pipe->src_fmt))
+			nlines = 8;
+		else
+			nlines = pipe->bwc_mode ? 1 : 2;
+
+		num_blks = DIV_ROUND_UP(ystride * nlines,
+				SMP_MB_SIZE);
+
+		if (mdata->mdp_rev == MDSS_MDP_HW_REV_100)
+			num_blks = roundup_pow_of_two(num_blks);
+
+		if (mdata->smp_mb_per_pipe &&
+			(num_blks > mdata->smp_mb_per_pipe) &&
+			!(pipe->flags & MDP_FLIP_LR))
+			num_blks = mdata->smp_mb_per_pipe;
+	}
+
+	pr_debug("pipenum:%d tile:%d bwc:%d ystride%d pipeblks:%d blks:%d\n",
+		pipe->num, mdss_mdp_is_tile_format(pipe->src_fmt),
+		pipe->bwc_mode, ystride, mdata->smp_mb_per_pipe, num_blks);
+
+	return num_blks;
+}
+
+int mdss_mdp_smp_reserve(struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 num_blks = 0, reserved = 0;
+	struct mdss_mdp_plane_sizes ps;
+	int i, rc = 0;
+	bool force_alloc = 0;
+
+	if (mdata->has_pixel_ram)
+		return 0;
+
+	rc = mdss_mdp_calc_stride(pipe, &ps);
+	if (rc)
+		return rc;
+
+	force_alloc = pipe->flags & MDP_SMP_FORCE_ALLOC;
+
+	mutex_lock(&mdss_mdp_smp_lock);
+	if (!is_unused_smp_allowed()) {
+		for (i = (MAX_PLANES - 1); i >= ps.num_planes; i--) {
+			if (bitmap_weight(pipe->smp_map[i].allocated,
+					  SMP_MB_CNT)) {
+				pr_debug("unsed mmb for pipe%d plane%d not allowed\n",
+					pipe->num, i);
+				mutex_unlock(&mdss_mdp_smp_lock);
+				return -EAGAIN;
+			}
+		}
+	}
+
+	for (i = 0; i < ps.num_planes; i++) {
+		num_blks = mdss_mdp_calc_per_plane_num_blks(ps.ystride[i],
+			pipe);
+		pr_debug("reserving %d mmb for pnum=%d plane=%d\n",
+				num_blks, pipe->num, i);
+		reserved = mdss_mdp_smp_mmb_reserve(&pipe->smp_map[i],
+			num_blks, force_alloc);
+		if (reserved < num_blks)
+			break;
+	}
+
+	if (reserved < num_blks) {
+		pr_debug("insufficient MMB blocks. pnum:%d\n", pipe->num);
+		for (; i >= 0; i--)
+			mdss_mdp_smp_mmb_free(pipe->smp_map[i].reserved,
+				false);
+		rc = -ENOBUFS;
+	}
+	mutex_unlock(&mdss_mdp_smp_lock);
+
+	return rc;
+}
+/*
+ * mdss_mdp_smp_alloc() -- set smp mmb and and wm levels for a staged pipe
+ * @pipe: pointer to a pipe
+ *
+ * Function amends reserved smp mmbs to allocated bitmap and ties respective
+ * mmbs to their pipe fetch_ids. Based on the number of total allocated mmbs
+ * for a staged pipe, it also sets the watermark levels (wm).
+ *
+ * This function will be called on every commit where pipe params might not
+ * have changed. In such cases, we need to ensure that wm levels are not
+ * wiped out. Also in some rare situations hw might have reset and wiped out
+ * smp mmb programming but new smp reservation is not done. In such cases we
+ * need to ensure that for a staged pipes, mmbs are set properly based on
+ * allocated bitmap.
+ */
+static int mdss_mdp_smp_alloc(struct mdss_mdp_pipe *pipe)
+{
+	int i;
+	int cnt = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (mdata->has_pixel_ram)
+		return 0;
+
+	mutex_lock(&mdss_mdp_smp_lock);
+	for (i = 0; i < MAX_PLANES; i++) {
+		cnt += bitmap_weight(pipe->smp_map[i].fixed, SMP_MB_CNT);
+
+		if (bitmap_empty(pipe->smp_map[i].reserved, SMP_MB_CNT)) {
+			cnt += mdss_mdp_smp_mmb_set(pipe->ftch_id + i,
+				pipe->smp_map[i].allocated);
+			continue;
+		}
+
+		mdss_mdp_smp_mmb_amend(pipe->smp_map[i].allocated,
+			pipe->smp_map[i].reserved);
+		cnt += mdss_mdp_smp_mmb_set(pipe->ftch_id + i,
+			pipe->smp_map[i].allocated);
+	}
+	mdss_mdp_smp_set_wm_levels(pipe, cnt);
+	mutex_unlock(&mdss_mdp_smp_lock);
+	return 0;
+}
+
+void mdss_mdp_smp_release(struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (mdata->has_pixel_ram)
+		return;
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	mdss_mdp_smp_free(pipe);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+}
+
+int mdss_mdp_smp_setup(struct mdss_data_type *mdata, u32 cnt, u32 size)
+{
+	if (!mdata)
+		return -EINVAL;
+
+	mdata->smp_mb_cnt = cnt;
+	mdata->smp_mb_size = size;
+
+	return 0;
+}
+
+/**
+ * mdss_mdp_smp_handoff() - Handoff SMP MMBs in use by staged pipes
+ * @mdata: pointer to the global mdss data structure.
+ *
+ * Iterate through the list of all SMP MMBs and check to see if any
+ * of them are assigned to a pipe being marked as being handed-off.
+ * If so, update the corresponding software allocation map to reflect
+ * this.
+ *
+ * This function would typically be called during MDP probe for the case
+ * when certain pipes might be programmed in the bootloader to display
+ * the splash screen.
+ */
+int mdss_mdp_smp_handoff(struct mdss_data_type *mdata)
+{
+	int rc = 0;
+	int i, client_id, prev_id = 0;
+	u32 off, s, data;
+	struct mdss_mdp_pipe *pipe = NULL;
+
+	if (mdata->has_pixel_ram)
+		return 0;
+
+	/*
+	 * figure out what SMP MMBs are allocated for each of the pipes
+	 * that need to be handed off.
+	 */
+	for (i = 0; i < SMP_MB_CNT; i++) {
+		off = (i / 3) * 4;
+		s = (i % 3) * 8;
+		data = readl_relaxed(mdata->mdp_base +
+			MDSS_MDP_REG_SMP_ALLOC_W0 + off);
+		client_id = (data >> s) & 0xFF;
+		if (test_bit(i, mdata->mmb_alloc_map)) {
+			/*
+			 * Certain pipes may have a dedicated set of
+			 * SMP MMBs statically allocated to them. In
+			 * such cases, we do not need to do anything
+			 * here.
+			 */
+			pr_debug("smp mmb %d already assigned to pipe %d (client_id %d)\n"
+				, i, pipe ? pipe->num : -1, client_id);
+			continue;
+		}
+
+		if (client_id) {
+			if (client_id != prev_id) {
+				pipe = mdss_mdp_pipe_search_by_client_id(mdata,
+					client_id, MDSS_MDP_PIPE_RECT0);
+				prev_id = client_id;
+			}
+
+			if (!pipe) {
+				pr_warn("Invalid client id %d for SMP MMB %d\n",
+					client_id, i);
+				continue;
+			}
+
+			if (!pipe->is_handed_off) {
+				pr_warn("SMP MMB %d assigned to a pipe not marked for handoff (client id %d)\n"
+					, i, client_id);
+				continue;
+			}
+
+			/*
+			 * Assume that the source format only has
+			 * one plane
+			 */
+			pr_debug("Assigning smp mmb %d to pipe %d (client_id %d)\n"
+				, i, pipe->num, client_id);
+			set_bit(i, pipe->smp_map[0].allocated);
+			set_bit(i, mdata->mmb_alloc_map);
+		}
+	}
+
+	return rc;
+}
+
+void mdss_mdp_pipe_unmap(struct mdss_mdp_pipe *pipe)
+{
+	if (kref_put_mutex(&pipe->kref, mdss_mdp_pipe_free,
+			&mdss_mdp_sspp_lock)) {
+		WARN(1, "Unexpected free pipe during unmap\n");
+		mutex_unlock(&mdss_mdp_sspp_lock);
+	}
+}
+
+int mdss_mdp_pipe_map(struct mdss_mdp_pipe *pipe)
+{
+	if (!kref_get_unless_zero(&pipe->kref))
+		return -EINVAL;
+	return 0;
+}
+
+/**
+ * mdss_mdp_qos_vbif_remapper_setup - Program the VBIF QoS remapper
+ *		registers based on real or non real time clients
+ * @mdata:	Pointer to the global mdss data structure.
+ * @pipe:	Pointer to source pipe struct to get xin id's.
+ * @is_realtime:	To determine if pipe's client is real or
+ *			non real time.
+ * This function assumes that clocks are on, so it is caller responsibility to
+ * call this function with clocks enabled.
+ */
+static void mdss_mdp_qos_vbif_remapper_setup(struct mdss_data_type *mdata,
+			struct mdss_mdp_pipe *pipe, bool is_realtime)
+{
+	u32 mask, reg_val, reg_val_lvl, i, vbif_qos;
+	u32 reg_high;
+	bool is_nrt_vbif = mdss_mdp_is_nrt_vbif_client(mdata, pipe);
+
+	if (mdata->npriority_lvl == 0)
+		return;
+
+	if (test_bit(MDSS_QOS_REMAPPER, mdata->mdss_qos_map)) {
+		mutex_lock(&mdata->reg_lock);
+		for (i = 0; i < mdata->npriority_lvl; i++) {
+			reg_high = ((pipe->xin_id & 0x8) >> 3) * 4 + (i * 8);
+
+			reg_val = MDSS_VBIF_READ(mdata,
+				MDSS_VBIF_QOS_RP_REMAP_BASE +
+				reg_high, is_nrt_vbif);
+			reg_val_lvl = MDSS_VBIF_READ(mdata,
+				MDSS_VBIF_QOS_LVL_REMAP_BASE + reg_high,
+				is_nrt_vbif);
+
+			mask = 0x3 << (pipe->xin_id * 4);
+			vbif_qos = is_realtime ?
+				mdata->vbif_rt_qos[i] : mdata->vbif_nrt_qos[i];
+
+			reg_val &= ~(mask);
+			reg_val |= vbif_qos << (pipe->xin_id * 4);
+
+			reg_val_lvl &= ~(mask);
+			reg_val_lvl |= vbif_qos << (pipe->xin_id * 4);
+
+			pr_debug("idx:%d xin:%d reg:0x%x val:0x%x lvl:0x%x\n",
+			   i, pipe->xin_id, reg_high, reg_val, reg_val_lvl);
+			MDSS_VBIF_WRITE(mdata, MDSS_VBIF_QOS_RP_REMAP_BASE +
+				reg_high, reg_val, is_nrt_vbif);
+			MDSS_VBIF_WRITE(mdata, MDSS_VBIF_QOS_LVL_REMAP_BASE +
+				reg_high, reg_val_lvl, is_nrt_vbif);
+		}
+		mutex_unlock(&mdata->reg_lock);
+	} else {
+		mutex_lock(&mdata->reg_lock);
+		for (i = 0; i < mdata->npriority_lvl; i++) {
+			reg_val = MDSS_VBIF_READ(mdata,
+				MDSS_VBIF_QOS_REMAP_BASE + i*4, is_nrt_vbif);
+
+			mask = 0x3 << (pipe->xin_id * 2);
+			reg_val &= ~(mask);
+			vbif_qos = is_realtime ?
+				mdata->vbif_rt_qos[i] : mdata->vbif_nrt_qos[i];
+			reg_val |= vbif_qos << (pipe->xin_id * 2);
+			MDSS_VBIF_WRITE(mdata, MDSS_VBIF_QOS_REMAP_BASE + i*4,
+				reg_val, is_nrt_vbif);
+		}
+		mutex_unlock(&mdata->reg_lock);
+	}
+}
+
+/**
+ * mdss_mdp_fixed_qos_arbiter_setup - Program the RT/NRT registers based on
+ *              real or non real time clients
+ * @mdata:      Pointer to the global mdss data structure.
+ * @pipe:       Pointer to source pipe struct to get xin id's.
+ * @is_realtime:        To determine if pipe's client is real or
+ *                      non real time.
+ * This function assumes that clocks are on, so it is caller responsibility to
+ * call this function with clocks enabled.
+ */
+static void mdss_mdp_fixed_qos_arbiter_setup(struct mdss_data_type *mdata,
+		struct mdss_mdp_pipe *pipe, bool is_realtime)
+{
+	u32 mask, reg_val;
+	bool is_nrt_vbif = mdss_mdp_is_nrt_vbif_client(mdata, pipe);
+
+	if (!mdata->has_fixed_qos_arbiter_enabled)
+		return;
+
+	mutex_lock(&mdata->reg_lock);
+	reg_val = MDSS_VBIF_READ(mdata, MDSS_VBIF_FIXED_SORT_EN, is_nrt_vbif);
+	mask = 0x1 << pipe->xin_id;
+	reg_val |= mask;
+
+	/* Enable the fixed sort for the client */
+	MDSS_VBIF_WRITE(mdata, MDSS_VBIF_FIXED_SORT_EN, reg_val, is_nrt_vbif);
+	reg_val = MDSS_VBIF_READ(mdata, MDSS_VBIF_FIXED_SORT_SEL0, is_nrt_vbif);
+	mask = 0x1 << (pipe->xin_id * 2);
+	if (is_realtime) {
+		reg_val &= ~mask;
+		pr_debug("Real time traffic on pipe type=%x  pnum=%d\n",
+				pipe->type, pipe->num);
+	} else {
+		reg_val |= mask;
+		pr_debug("Non real time traffic on pipe type=%x  pnum=%d\n",
+				pipe->type, pipe->num);
+	}
+	/* Set the fixed_sort regs as per RT/NRT client */
+	MDSS_VBIF_WRITE(mdata, MDSS_VBIF_FIXED_SORT_SEL0, reg_val, is_nrt_vbif);
+	mutex_unlock(&mdata->reg_lock);
+}
+
+static void mdss_mdp_init_pipe_params(struct mdss_mdp_pipe *pipe)
+{
+	kref_init(&pipe->kref);
+	init_waitqueue_head(&pipe->free_waitq);
+	INIT_LIST_HEAD(&pipe->buf_queue);
+
+	pipe->flags = 0;
+	pipe->is_right_blend = false;
+	pipe->src_split_req = false;
+	pipe->bwc_mode = 0;
+
+	pipe->mfd = NULL;
+	pipe->mixer_left = pipe->mixer_right = NULL;
+	pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
+	memset(&pipe->scaler, 0, sizeof(struct mdp_scale_data));
+	memset(&pipe->layer, 0, sizeof(struct mdp_input_layer));
+
+	pipe->multirect.mode = MDSS_MDP_PIPE_MULTIRECT_NONE;
+}
+
+static int mdss_mdp_pipe_init_config(struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_mixer *mixer, bool pipe_share)
+{
+	int rc = 0;
+	struct mdss_data_type *mdata;
+
+	if (pipe && pipe->unhalted) {
+		rc = mdss_mdp_pipe_fetch_halt(pipe, false);
+		if (rc) {
+			pr_err("%d failed because pipe is in bad state\n",
+				pipe->num);
+			goto end;
+		}
+	}
+
+	mdata = mixer->ctl->mdata;
+
+	if (pipe) {
+		pr_debug("type=%x   pnum=%d  rect=%d\n",
+				pipe->type, pipe->num, pipe->multirect.num);
+		mdss_mdp_init_pipe_params(pipe);
+	} else if (pipe_share) {
+		/*
+		 * when there is no dedicated wfd blk, DMA pipe can be
+		 * shared as long as its attached to a writeback mixer
+		 */
+		pipe = mdata->dma_pipes + mixer->num;
+		if (pipe->mixer_left->type != MDSS_MDP_MIXER_TYPE_WRITEBACK) {
+			rc = -EINVAL;
+			goto end;
+		}
+		kref_get(&pipe->kref);
+		pr_debug("pipe sharing for pipe=%d\n", pipe->num);
+	}
+
+end:
+	return rc;
+}
+
+static struct mdss_mdp_pipe *mdss_mdp_pipe_init(struct mdss_mdp_mixer *mixer,
+	u32 type, u32 off, struct mdss_mdp_pipe *left_blend_pipe)
+{
+	struct mdss_mdp_pipe *pipe = NULL;
+	struct mdss_data_type *mdata;
+	struct mdss_mdp_pipe *pipe_pool = NULL;
+	u32 npipes;
+	bool pipe_share = false;
+	u32 i;
+	int rc;
+
+	if (!mixer || !mixer->ctl || !mixer->ctl->mdata)
+		return NULL;
+
+	mdata = mixer->ctl->mdata;
+
+	switch (type) {
+	case MDSS_MDP_PIPE_TYPE_VIG:
+		pipe_pool = mdata->vig_pipes;
+		npipes = mdata->nvig_pipes;
+		break;
+
+	case MDSS_MDP_PIPE_TYPE_RGB:
+		pipe_pool = mdata->rgb_pipes;
+		npipes = mdata->nrgb_pipes;
+		break;
+
+	case MDSS_MDP_PIPE_TYPE_DMA:
+		pipe_pool = mdata->dma_pipes;
+		npipes = mdata->ndma_pipes;
+		if ((mdata->wfd_mode == MDSS_MDP_WFD_SHARED) &&
+		   (mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK))
+			pipe_share = true;
+		break;
+
+	case MDSS_MDP_PIPE_TYPE_CURSOR:
+		pipe_pool = mdata->cursor_pipes;
+		npipes = mdata->ncursor_pipes;
+		break;
+
+	default:
+		npipes = 0;
+		pr_err("invalid pipe type %d\n", type);
+		break;
+	}
+
+	/* allocate lower priority right blend pipe */
+	if (left_blend_pipe && (left_blend_pipe->type == type) && pipe_pool) {
+		struct mdss_mdp_pipe *pool_head = pipe_pool + off;
+
+		off += left_blend_pipe->priority - pool_head->priority + 1;
+		if (off >= npipes) {
+			pr_warn("priority limitation. l_pipe:%d. no low priority %d pipe type available.\n",
+				left_blend_pipe->num, type);
+			pipe = ERR_PTR(-EBADSLT);
+			return pipe;
+		}
+	}
+
+	for (i = off; i < npipes; i++) {
+		pipe = pipe_pool + i;
+		if (pipe && atomic_read(&pipe->kref.refcount) == 0) {
+			pipe->mixer_left = mixer;
+			break;
+		}
+		pipe = NULL;
+	}
+
+	if (pipe && type == MDSS_MDP_PIPE_TYPE_CURSOR) {
+		mdss_mdp_init_pipe_params(pipe);
+		pr_debug("cursor: type=%x pnum=%d\n",
+			pipe->type, pipe->num);
+		goto cursor_done;
+	}
+
+	rc = mdss_mdp_pipe_init_config(pipe, mixer, pipe_share);
+	if (rc)
+		return ERR_PTR(-EINVAL);
+cursor_done:
+	if (!pipe)
+		pr_err("no %d type pipes available\n", type);
+
+	return pipe;
+}
+
+struct mdss_mdp_pipe *mdss_mdp_pipe_alloc(struct mdss_mdp_mixer *mixer,
+	u32 type, struct mdss_mdp_pipe *left_blend_pipe)
+{
+	struct mdss_mdp_pipe *pipe;
+
+	mutex_lock(&mdss_mdp_sspp_lock);
+	pipe = mdss_mdp_pipe_init(mixer, type, 0, left_blend_pipe);
+	mutex_unlock(&mdss_mdp_sspp_lock);
+	return pipe;
+}
+
+struct mdss_mdp_pipe *mdss_mdp_pipe_get(u32 ndx,
+	enum mdss_mdp_pipe_rect rect_num)
+{
+	struct mdss_mdp_pipe *pipe = NULL;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!ndx)
+		return ERR_PTR(-EINVAL);
+
+	mutex_lock(&mdss_mdp_sspp_lock);
+
+	pipe = mdss_mdp_pipe_search(mdata, ndx, rect_num);
+	if (!pipe) {
+		pipe = ERR_PTR(-EINVAL);
+		goto error;
+	}
+
+	if (mdss_mdp_pipe_map(pipe))
+		pipe = ERR_PTR(-EACCES);
+
+error:
+	mutex_unlock(&mdss_mdp_sspp_lock);
+	return pipe;
+}
+
+struct mdss_mdp_pipe *mdss_mdp_pipe_assign(struct mdss_data_type *mdata,
+	struct mdss_mdp_mixer *mixer, u32 ndx, enum mdss_mdp_pipe_rect rect_num)
+{
+	struct mdss_mdp_pipe *pipe = NULL;
+	int rc;
+	int retry_count = 0;
+
+	if (!ndx)
+		return ERR_PTR(-EINVAL);
+
+	mutex_lock(&mdss_mdp_sspp_lock);
+	pipe = mdss_mdp_pipe_search(mdata, ndx, rect_num);
+	if (!pipe) {
+		pr_err("pipe search failed\n");
+		pipe = ERR_PTR(-EINVAL);
+		goto error;
+	}
+
+	if (atomic_read(&pipe->kref.refcount) != 0) {
+		mutex_unlock(&mdss_mdp_sspp_lock);
+		do {
+			rc = wait_event_interruptible_timeout(pipe->free_waitq,
+				!atomic_read(&pipe->kref.refcount),
+				usecs_to_jiffies(PIPE_CLEANUP_TIMEOUT_US));
+			if (rc == 0 || retry_count == 5) {
+				pr_err("pipe ndx:%d free wait failed, mfd ndx:%d rc=%d\n",
+					pipe->ndx,
+					pipe->mfd ? pipe->mfd->index : -1, rc);
+				pipe = ERR_PTR(-EBUSY);
+				goto end;
+			} else if (rc == -ERESTARTSYS) {
+				pr_debug("interrupt signal received\n");
+				retry_count++;
+				continue;
+			} else {
+				break;
+			}
+		} while (true);
+
+		mutex_lock(&mdss_mdp_sspp_lock);
+	}
+	pipe->mixer_left = mixer;
+
+	rc = mdss_mdp_pipe_init_config(pipe, mixer, false);
+	if (rc)
+		pipe = ERR_PTR(rc);
+
+error:
+	mutex_unlock(&mdss_mdp_sspp_lock);
+end:
+	return pipe;
+}
+
+static struct mdss_mdp_pipe *__pipe_lookup(struct mdss_mdp_pipe *pipe_list,
+		int count, enum mdss_mdp_pipe_rect rect_num,
+		bool (*cmp)(struct mdss_mdp_pipe *, void *), void *data)
+{
+	struct mdss_mdp_pipe *pipe;
+	int i, j, max_rects;
+
+	for (i = 0, pipe = pipe_list; i < count; i++) {
+		max_rects = pipe->multirect.max_rects;
+		for (j = 0; j < max_rects; j++, pipe++)
+			if ((rect_num == pipe->multirect.num) &&
+					cmp(pipe, data))
+				return pipe;
+	}
+
+	return NULL;
+}
+
+static struct mdss_mdp_pipe *mdss_mdp_pipe_lookup(
+		struct mdss_data_type *mdata, enum mdss_mdp_pipe_rect rect_num,
+		bool (*cmp)(struct mdss_mdp_pipe *, void *), void *data)
+{
+	struct mdss_mdp_pipe *pipe;
+
+	pipe = __pipe_lookup(mdata->vig_pipes, mdata->nvig_pipes,
+			rect_num, cmp, data);
+	if (pipe)
+		return pipe;
+
+	pipe = __pipe_lookup(mdata->rgb_pipes, mdata->nrgb_pipes,
+			rect_num, cmp, data);
+	if (pipe)
+		return pipe;
+
+	pipe = __pipe_lookup(mdata->dma_pipes, mdata->ndma_pipes,
+			rect_num, cmp, data);
+	if (pipe)
+		return pipe;
+
+	pipe = __pipe_lookup(mdata->cursor_pipes, mdata->ncursor_pipes,
+			rect_num, cmp, data);
+	if (pipe)
+		return pipe;
+
+	return NULL;
+}
+
+static bool __pipe_cmp_fetch_id(struct mdss_mdp_pipe *pipe, void *data)
+{
+	u32 *fetch_id = data;
+
+	return pipe->ftch_id == *fetch_id;
+}
+
+static struct mdss_mdp_pipe *mdss_mdp_pipe_search_by_client_id(
+	struct mdss_data_type *mdata, int client_id,
+	enum mdss_mdp_pipe_rect rect_num)
+{
+	return mdss_mdp_pipe_lookup(mdata, rect_num,
+			__pipe_cmp_fetch_id, &client_id);
+}
+
+static bool __pipe_cmp_ndx(struct mdss_mdp_pipe *pipe, void *data)
+{
+	u32 *ndx = data;
+
+	return pipe->ndx == *ndx;
+}
+
+struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata,
+	u32 ndx, enum mdss_mdp_pipe_rect rect_num)
+{
+	return mdss_mdp_pipe_lookup(mdata, rect_num, __pipe_cmp_ndx, &ndx);
+}
+
+/*
+ * This API checks if pipe is stagged on mixer or not. If
+ * any pipe is stagged on mixer than it will generate the
+ * panic signal.
+ *
+ * Only pipe_free API can call this API.
+ */
+static void mdss_mdp_pipe_check_stage(struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_mixer *mixer)
+{
+	int index;
+
+	if (pipe->mixer_stage == MDSS_MDP_STAGE_UNUSED || !mixer)
+		return;
+
+	index = (pipe->mixer_stage * MAX_PIPES_PER_STAGE);
+	if (pipe->is_right_blend)
+		index++;
+	if (index < MAX_PIPES_PER_LM && pipe == mixer->stage_pipe[index]) {
+		pr_err("pipe%d mixer:%d pipe->mixer_stage=%d src_split:%d right blend:%d\n",
+			pipe->num, mixer->num, pipe->mixer_stage,
+			pipe->src_split_req, pipe->is_right_blend);
+		MDSS_XLOG_TOUT_HANDLER("mdp", "dbg_bus", "panic");
+	}
+}
+
+static void mdss_mdp_pipe_hw_cleanup(struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	mdss_mdp_pipe_panic_vblank_signal_ctrl(pipe, false);
+	mdss_mdp_pipe_panic_signal_ctrl(pipe, false);
+
+	if (pipe->play_cnt) {
+		mdss_mdp_pipe_fetch_halt(pipe, false);
+		mdss_mdp_pipe_pp_clear(pipe);
+		mdss_mdp_smp_free(pipe);
+	} else {
+		mdss_mdp_smp_unreserve(pipe);
+	}
+
+	if (mdss_has_quirk(mdata, MDSS_QUIRK_BWCPANIC) && pipe->bwc_mode) {
+		unsigned long pnum_bitmap = BIT(pipe->num);
+
+		bitmap_andnot(mdata->bwc_enable_map, mdata->bwc_enable_map,
+			&pnum_bitmap, MAX_DRV_SUP_PIPES);
+
+		if (bitmap_empty(mdata->bwc_enable_map, MAX_DRV_SUP_PIPES))
+			mdss_mdp_bwcpanic_ctrl(mdata, false);
+	}
+
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_MULTI_REC_OP_MODE, 0);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+}
+
+static void mdss_mdp_pipe_free(struct kref *kref)
+{
+	struct mdss_mdp_pipe *pipe, *next_pipe;
+
+	pipe = container_of(kref, struct mdss_mdp_pipe, kref);
+
+	pr_debug("ndx=%x pnum=%d rect=%d\n",
+			pipe->ndx, pipe->num, pipe->multirect.num);
+
+	next_pipe = (struct mdss_mdp_pipe *) pipe->multirect.next;
+	if (!next_pipe || (atomic_read(&next_pipe->kref.refcount) == 0)) {
+		mdss_mdp_pipe_hw_cleanup(pipe);
+	} else {
+		pr_debug("skip hw cleanup on pnum=%d rect=%d, rect%d still in use\n",
+				pipe->num, pipe->multirect.num,
+				next_pipe->multirect.num);
+	}
+
+	mdss_mdp_pipe_check_stage(pipe, pipe->mixer_left);
+	mdss_mdp_pipe_check_stage(pipe, pipe->mixer_right);
+}
+
+static bool mdss_mdp_check_pipe_in_use(struct mdss_mdp_pipe *pipe)
+{
+	int i;
+	bool in_use = false;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_ctl *ctl;
+	struct mdss_mdp_mixer *mixer;
+
+	for (i = 0; i < mdata->nctl; i++) {
+		ctl = mdata->ctl_off + i;
+		if (!ctl || !ctl->ref_cnt)
+			continue;
+
+		mixer = ctl->mixer_left;
+		if (!mixer || mixer->rotator_mode)
+			continue;
+
+		if (mdss_mdp_mixer_reg_has_pipe(mixer, pipe)) {
+			in_use = true;
+			pr_err("IN USE: pipe=%d mixer=%d\n",
+					pipe->num, mixer->num);
+			MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt",
+				"dbg_bus", "vbif_dbg_bus", "panic");
+		}
+
+		mixer = ctl->mixer_right;
+		if (mixer && mdss_mdp_mixer_reg_has_pipe(mixer, pipe)) {
+			in_use = true;
+			pr_err("IN USE: pipe=%d mixer=%d\n",
+					pipe->num, mixer->num);
+			MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt",
+				"dbg_bus", "vbif_dbg_bus", "panic");
+		}
+	}
+
+	return in_use;
+}
+
+static int mdss_mdp_is_pipe_idle(struct mdss_mdp_pipe *pipe,
+	bool ignore_force_on, bool is_nrt_vbif)
+{
+	u32 reg_val;
+	u32 vbif_idle_mask, forced_on_mask, clk_status_idle_mask;
+	bool is_idle = false, is_forced_on;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	forced_on_mask = BIT(pipe->clk_ctrl.bit_off + CLK_FORCE_ON_OFFSET);
+	reg_val = readl_relaxed(mdata->mdp_base + pipe->clk_ctrl.reg_off);
+	is_forced_on = (reg_val & forced_on_mask) ? true : false;
+
+	pr_debug("pipe#:%d clk_ctrl: 0x%x forced_on_mask: 0x%x\n", pipe->num,
+		reg_val, forced_on_mask);
+	/* if forced on then no need to check status */
+	if (!is_forced_on) {
+		clk_status_idle_mask =
+			BIT(pipe->clk_status.bit_off + CLK_STATUS_OFFSET);
+		reg_val = readl_relaxed(mdata->mdp_base +
+			pipe->clk_status.reg_off);
+
+		if ((reg_val & clk_status_idle_mask) == 0)
+			is_idle = true;
+
+		pr_debug("pipe#:%d clk_status:0x%x clk_status_idle_mask:0x%x\n",
+			pipe->num, reg_val, clk_status_idle_mask);
+	}
+
+	if (!ignore_force_on && (is_forced_on || !is_idle))
+		goto exit;
+
+	/*
+	 * skip vbif check for cursor pipes as the same xin-id is shared
+	 * between cursor0, cursor1 and dsi
+	 */
+	if (pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR) {
+		if (ignore_force_on && is_forced_on)
+			is_idle = true;
+		goto exit;
+	}
+
+	vbif_idle_mask = BIT(pipe->xin_id + 16);
+	reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1, is_nrt_vbif);
+
+	if (reg_val & vbif_idle_mask)
+		is_idle = true;
+
+	pr_debug("pipe#:%d XIN_HALT_CTRL1: 0x%x, vbif_idle_mask: 0x%x\n",
+			pipe->num, reg_val, vbif_idle_mask);
+
+exit:
+	return is_idle;
+}
+
+/*
+ * mdss_mdp_pipe_clk_force_off() - check force off mask and reset for the pipe.
+ * @pipe: pointer to the pipe data structure which needs to be checked for clk.
+ *
+ * This function would be called where software reset is available for pipe
+ * clocks.
+ */
+
+void mdss_mdp_pipe_clk_force_off(struct mdss_mdp_pipe *pipe)
+{
+	u32 reg_val, force_off_mask;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	force_off_mask =
+		BIT(pipe->clk_ctrl.bit_off + CLK_FORCE_OFF_OFFSET);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	mutex_lock(&mdata->reg_lock);
+	reg_val = readl_relaxed(mdata->mdp_base +
+			pipe->clk_ctrl.reg_off);
+	if (reg_val & force_off_mask) {
+		reg_val &= ~force_off_mask;
+		writel_relaxed(reg_val,
+				mdata->mdp_base + pipe->clk_ctrl.reg_off);
+	}
+	mutex_unlock(&mdata->reg_lock);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+}
+
+/**
+ * mdss_mdp_pipe_fetch_halt() - Halt VBIF client corresponding to specified pipe
+ * @pipe: pointer to the pipe data structure which needs to be halted.
+ *
+ * Check if VBIF client corresponding to specified pipe is idle or not. If not
+ * send a halt request for the client in question and wait for it be idle.
+ *
+ * This function would typically be called after pipe is unstaged or before it
+ * is initialized. On success it should be assumed that pipe is in idle state
+ * and would not fetch any more data. This function cannot be called from
+ * interrupt context.
+ */
+int mdss_mdp_pipe_fetch_halt(struct mdss_mdp_pipe *pipe, bool is_recovery)
+{
+	bool is_idle, forced_on = false, in_use = false;
+	int rc = 0;
+	u32 reg_val, idle_mask, clk_val, clk_mask;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	bool sw_reset_avail = mdss_mdp_pipe_is_sw_reset_available(mdata);
+	bool is_nrt_vbif = mdss_mdp_is_nrt_vbif_client(mdata, pipe);
+	u32 sw_reset_off = pipe->sw_reset.reg_off;
+	u32 clk_ctrl_off = pipe->clk_ctrl.reg_off;
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	is_idle = mdss_mdp_is_pipe_idle(pipe, true, is_nrt_vbif);
+	/*
+	 * avoid pipe_in_use check in recovery path as the pipes would not
+	 * have been unstaged at this point.
+	 */
+	if (!is_idle && !is_recovery)
+		in_use = mdss_mdp_check_pipe_in_use(pipe);
+
+	if (!is_idle && !in_use) {
+
+		pr_err("%pS: pipe%d is not idle. xin_id=%d\n",
+			__builtin_return_address(0), pipe->num, pipe->xin_id);
+
+		mutex_lock(&mdata->reg_lock);
+		idle_mask = BIT(pipe->xin_id + 16);
+
+		/*
+		 * make sure client clock is not gated while halting by forcing
+		 * it ON only if it was not previously forced on
+		 */
+		clk_val = readl_relaxed(mdata->mdp_base + clk_ctrl_off);
+		clk_mask = BIT(pipe->clk_ctrl.bit_off + CLK_FORCE_ON_OFFSET);
+		if (!(clk_val & clk_mask)) {
+			clk_val |= clk_mask;
+			writel_relaxed(clk_val, mdata->mdp_base + clk_ctrl_off);
+			wmb(); /* ensure write is finished before progressing */
+			forced_on = true;
+		}
+
+		reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+								is_nrt_vbif);
+		MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+				reg_val | BIT(pipe->xin_id), is_nrt_vbif);
+
+		if (sw_reset_avail) {
+			reg_val = readl_relaxed(mdata->mdp_base + sw_reset_off);
+			writel_relaxed(reg_val | BIT(pipe->sw_reset.bit_off),
+					mdata->mdp_base + sw_reset_off);
+			wmb(); /* ensure write is finished before progressing */
+		}
+		mutex_unlock(&mdata->reg_lock);
+
+		rc = mdss_mdp_wait_for_xin_halt(pipe->xin_id, is_nrt_vbif);
+
+		mutex_lock(&mdata->reg_lock);
+		reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+								is_nrt_vbif);
+		MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+				reg_val & ~BIT(pipe->xin_id), is_nrt_vbif);
+
+		clk_val = readl_relaxed(mdata->mdp_base + clk_ctrl_off);
+		if (forced_on)
+			clk_val &= ~clk_mask;
+
+		if (sw_reset_avail) {
+			reg_val = readl_relaxed(mdata->mdp_base + sw_reset_off);
+			writel_relaxed(reg_val & ~BIT(pipe->sw_reset.bit_off),
+				mdata->mdp_base + sw_reset_off);
+			wmb(); /* ensure write is finished before progressing */
+
+			clk_val |= BIT(pipe->clk_ctrl.bit_off +
+				CLK_FORCE_OFF_OFFSET);
+		}
+		writel_relaxed(clk_val, mdata->mdp_base + clk_ctrl_off);
+		mutex_unlock(&mdata->reg_lock);
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	return rc;
+}
+
+int mdss_mdp_pipe_destroy(struct mdss_mdp_pipe *pipe)
+{
+	if (!kref_put_mutex(&pipe->kref, mdss_mdp_pipe_free,
+			&mdss_mdp_sspp_lock)) {
+		pr_err("unable to free pipe %d while still in use\n",
+				pipe->num);
+		return -EBUSY;
+	}
+
+	wake_up_all(&pipe->free_waitq);
+	mutex_unlock(&mdss_mdp_sspp_lock);
+
+	return 0;
+}
+
+/**
+ * mdss_mdp_pipe_handoff() - Handoff staged pipes during bootup
+ * @pipe: pointer to the pipe to be handed-off
+ *
+ * Populate the software structures for the pipe based on the current
+ * configuration of the hardware pipe by the reading the appropriate MDP
+ * registers.
+ *
+ * This function would typically be called during MDP probe for the case
+ * when certain pipes might be programmed in the bootloader to display
+ * the splash screen.
+ */
+int mdss_mdp_pipe_handoff(struct mdss_mdp_pipe *pipe)
+{
+	int rc = 0;
+	u32 src_fmt, reg = 0, bpp = 0;
+
+	/*
+	 * todo: for now, only reading pipe src and dest size details
+	 * from the registers. This is needed for appropriately
+	 * calculating perf metrics for the handed off pipes.
+	 * We may need to parse some more details at a later date.
+	 */
+	reg = mdss_mdp_pipe_read(pipe, MDSS_MDP_REG_SSPP_SRC_SIZE);
+	pipe->src.h = reg >> 16;
+	pipe->src.w = reg & 0xFFFF;
+	reg = mdss_mdp_pipe_read(pipe, MDSS_MDP_REG_SSPP_OUT_SIZE);
+	pipe->dst.h = reg >> 16;
+	pipe->dst.w = reg & 0xFFFF;
+
+	/* Assume that the source format is RGB */
+	reg = mdss_mdp_pipe_read(pipe, MDSS_MDP_REG_SSPP_SRC_FORMAT);
+	bpp = ((reg >> 9) & 0x3) + 1;
+	switch (bpp) {
+	case 4:
+		src_fmt = MDP_RGBA_8888;
+		break;
+	case 3:
+		src_fmt = MDP_RGB_888;
+		break;
+	case 2:
+		src_fmt = MDP_RGB_565;
+		break;
+	default:
+		pr_err("Invalid bpp=%d found\n", bpp);
+		rc = -EINVAL;
+		goto error;
+	}
+	pipe->src_fmt = mdss_mdp_get_format_params(src_fmt);
+	if (!pipe->src_fmt) {
+		pr_err("%s: failed to retrieve format parameters\n",
+			__func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	pr_debug("Pipe settings: src.h=%d src.w=%d dst.h=%d dst.w=%d bpp=%d\n"
+		, pipe->src.h, pipe->src.w, pipe->dst.h, pipe->dst.w,
+		pipe->src_fmt->bpp);
+
+	pipe->is_handed_off = true;
+	pipe->play_cnt = 1;
+	mdss_mdp_init_pipe_params(pipe);
+
+error:
+	return rc;
+}
+
+void mdss_mdp_pipe_position_update(struct mdss_mdp_pipe *pipe,
+		struct mdss_rect *src, struct mdss_rect *dst)
+{
+	u32 src_size, src_xy, dst_size, dst_xy;
+	u32 tmp_src_size, tmp_src_xy, reg_data;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	src_size = (src->h << 16) | src->w;
+	src_xy = (src->y << 16) | src->x;
+	dst_size = (dst->h << 16) | dst->w;
+
+	/*
+	 * base layer requirements are different compared to other layers
+	 * located at different stages. If source split is enabled and base
+	 * layer is used, base layer on the right LM's x offset is relative
+	 * to right LM's co-ordinate system unlike other layers which are
+	 * relative to left LM's top-left.
+	 */
+	if (pipe->mixer_stage == MDSS_MDP_STAGE_BASE && mdata->has_src_split
+			&& dst->x >= left_lm_w_from_mfd(pipe->mfd))
+		dst->x -= left_lm_w_from_mfd(pipe->mfd);
+	dst_xy = (dst->y << 16) | dst->x;
+
+	/*
+	 * Software overfetch is used when scalar pixel extension is
+	 * not enabled
+	 */
+	if (pipe->overfetch_disable && !pipe->scaler.enable) {
+		if (pipe->overfetch_disable & OVERFETCH_DISABLE_LEFT)
+			src_xy &= ~0xFFFF;
+		if (pipe->overfetch_disable & OVERFETCH_DISABLE_TOP)
+			src_xy &= ~(0xFFFF << 16);
+	}
+
+	if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_103) &&
+		pipe->bwc_mode) {
+		/* check source dimensions change */
+		tmp_src_size = mdss_mdp_pipe_read(pipe,
+						 MDSS_MDP_REG_SSPP_SRC_SIZE);
+		tmp_src_xy = mdss_mdp_pipe_read(pipe,
+						 MDSS_MDP_REG_SSPP_SRC_XY);
+		if (src_xy != tmp_src_xy || tmp_src_size != src_size) {
+			reg_data = readl_relaxed(mdata->mdp_base +
+							 AHB_CLK_OFFSET);
+			reg_data |= BIT(28);
+			writel_relaxed(reg_data,
+					 mdata->mdp_base + AHB_CLK_OFFSET);
+		}
+	}
+
+	if (pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_SIZE, src_size);
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_XY, src_xy);
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_OUT_SIZE, dst_size);
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_OUT_XY, dst_xy);
+	} else {
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_SIZE_REC1,
+				    src_size);
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_XY_REC1,
+				    src_xy);
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_OUT_SIZE_REC1,
+				    dst_size);
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_OUT_XY_REC1,
+				    dst_xy);
+	}
+
+	MDSS_XLOG(pipe->num, pipe->multirect.num, src_size, src_xy,
+		  dst_size, dst_xy, pipe->multirect.mode);
+}
+
+static void mdss_mdp_pipe_stride_update(struct mdss_mdp_pipe *pipe)
+{
+	u32 reg0, reg1;
+	u32 ystride[MAX_PLANES] = {0};
+	struct mdss_mdp_pipe *rec0_pipe, *rec1_pipe;
+	u32 secure = 0;
+
+	/*
+	 * since stride registers are shared between both rectangles in
+	 * multirect mode, delayed programming allows programming of both
+	 * together
+	 */
+	if (is_pipe_programming_delay_needed(pipe)) {
+		pr_debug("skip stride programming for pipe%d rec%d\n",
+			pipe->num, pipe->multirect.num);
+		return;
+	}
+
+	if (pipe->multirect.mode == MDSS_MDP_PIPE_MULTIRECT_NONE) {
+		memcpy(&ystride, &pipe->src_planes.ystride,
+		       sizeof(u32) * MAX_PLANES);
+		if (pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+			secure = 0xF;
+	} else {
+		if (pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
+			rec0_pipe = pipe;
+			rec1_pipe = pipe->multirect.next;
+		} else {
+			rec1_pipe = pipe;
+			rec0_pipe = pipe->multirect.next;
+		}
+
+		ystride[0] = rec0_pipe->src_planes.ystride[0];
+		ystride[2] = rec0_pipe->src_planes.ystride[2];
+		if (rec0_pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+			secure |= 0x5;
+
+		ystride[1] = rec1_pipe->src_planes.ystride[0];
+		ystride[3] = rec1_pipe->src_planes.ystride[2];
+		if (rec1_pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+			secure |= 0xA;
+	}
+
+	reg0 =  (ystride[0]) | (ystride[1] << 16);
+	reg1 =  (ystride[2]) | (ystride[3] << 16);
+
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_YSTRIDE0, reg0);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_YSTRIDE1, reg1);
+
+	pr_debug("pipe%d multirect:num%d mode=%d, ystride0=0x%x ystride1=0x%x\n",
+		pipe->num, pipe->multirect.num, pipe->multirect.mode,
+		reg0, reg1);
+	MDSS_XLOG(pipe->num, pipe->multirect.num,
+		  pipe->multirect.mode, reg0, reg1);
+}
+
+static int mdss_mdp_image_setup(struct mdss_mdp_pipe *pipe,
+					struct mdss_mdp_data *data)
+{
+	u32 img_size;
+	u32 width, height, decimation;
+	int ret = 0;
+	struct mdss_rect dst, src;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	bool rotation = false;
+
+	pr_debug("ctl: %d pnum=%d wh=%dx%d src={%d,%d,%d,%d} dst={%d,%d,%d,%d}\n",
+			pipe->mixer_left->ctl->num, pipe->num,
+			pipe->img_width, pipe->img_height,
+			pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
+			pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
+
+	width = pipe->img_width;
+	height = pipe->img_height;
+
+	if (pipe->flags & MDP_SOURCE_ROTATED_90)
+		rotation = true;
+
+	mdss_mdp_get_plane_sizes(pipe->src_fmt, width, height,
+			&pipe->src_planes, pipe->bwc_mode, rotation);
+
+	if (data != NULL) {
+		ret = mdss_mdp_data_check(data, &pipe->src_planes,
+			pipe->src_fmt);
+		if (ret)
+			return ret;
+	}
+
+	if ((pipe->flags & MDP_DEINTERLACE) &&
+			!(pipe->flags & MDP_SOURCE_ROTATED_90)) {
+		int i;
+
+		for (i = 0; i < pipe->src_planes.num_planes; i++)
+			pipe->src_planes.ystride[i] *= 2;
+		width *= 2;
+		height /= 2;
+	}
+
+	decimation = ((1 << pipe->horz_deci) - 1) << 8;
+	decimation |= ((1 << pipe->vert_deci) - 1);
+	if (decimation)
+		pr_debug("Image decimation h=%d v=%d\n",
+				pipe->horz_deci, pipe->vert_deci);
+
+	dst = pipe->dst;
+	src = pipe->src;
+
+	if (!pipe->mixer_left->ctl->is_video_mode &&
+	    (pipe->mixer_left->type != MDSS_MDP_MIXER_TYPE_WRITEBACK)) {
+
+		struct mdss_rect roi = pipe->mixer_left->roi;
+		bool is_right_mixer = pipe->mixer_left->is_right_mixer;
+		struct mdss_mdp_ctl *main_ctl;
+
+		if (pipe->mixer_left->ctl->is_master)
+			main_ctl = pipe->mixer_left->ctl;
+		else
+			main_ctl = mdss_mdp_get_main_ctl(pipe->mixer_left->ctl);
+
+		if (!main_ctl) {
+			pr_err("Error: couldn't find main_ctl for pipe%d\n",
+				pipe->num);
+			return -EINVAL;
+		}
+
+		if (pipe->src_split_req && main_ctl->mixer_right->valid_roi) {
+			/*
+			 * pipe is staged on both mixers, expand roi to span
+			 * both mixers before cropping pipe's dimensions.
+			 */
+			roi.w += main_ctl->mixer_right->roi.w;
+		} else if (mdata->has_src_split && is_right_mixer) {
+			/*
+			 * pipe is only on right mixer but since source-split
+			 * is enabled, its dst_x is full panel coordinate
+			 * aligned where as ROI is mixer coordinate aligned.
+			 * Modify dst_x before applying ROI crop.
+			 */
+			dst.x -= left_lm_w_from_mfd(pipe->mfd);
+		}
+
+		mdss_mdp_crop_rect(&src, &dst, &roi);
+
+		if (mdata->has_src_split && is_right_mixer) {
+			/*
+			 * re-adjust dst_x only if both mixers are active,
+			 * meaning right mixer will be working in source
+			 * split mode.
+			 */
+			if (mdss_mdp_is_both_lm_valid(main_ctl))
+				dst.x += main_ctl->mixer_left->roi.w;
+		}
+
+		if (pipe->flags & MDP_FLIP_LR) {
+			src.x = pipe->src.x + (pipe->src.x + pipe->src.w)
+				- (src.x + src.w);
+		}
+		if (pipe->flags & MDP_FLIP_UD) {
+			src.y = pipe->src.y + (pipe->src.y + pipe->src.h)
+				- (src.y + src.h);
+		}
+	}
+
+	/*
+	 * Software overfetch is used when scalar pixel extension is
+	 * not enabled
+	 */
+	if (pipe->overfetch_disable && !pipe->scaler.enable) {
+		if (pipe->overfetch_disable & OVERFETCH_DISABLE_BOTTOM) {
+			height = pipe->src.h;
+			if (!(pipe->overfetch_disable & OVERFETCH_DISABLE_TOP))
+				height += pipe->src.y;
+		}
+		if (pipe->overfetch_disable & OVERFETCH_DISABLE_RIGHT) {
+			width = pipe->src.w;
+			if (!(pipe->overfetch_disable & OVERFETCH_DISABLE_LEFT))
+				width += pipe->src.x;
+		}
+
+		pr_debug("overfetch w=%d/%d h=%d/%d\n", width,
+			pipe->img_width, height, pipe->img_height);
+	}
+	img_size = (height << 16) | width;
+
+	/*
+	 * in solid fill, there is no src rectangle, but hardware needs to
+	 * be programmed same as dst to avoid issues in scaling blocks
+	 */
+	if (data == NULL) {
+		src = (struct mdss_rect) {0, 0, dst.w, dst.h};
+		decimation = 0;
+	}
+
+	mdss_mdp_pipe_position_update(pipe, &src, &dst);
+	mdss_mdp_pipe_stride_update(pipe);
+
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_IMG_SIZE, img_size);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_DECIMATION_CONFIG,
+			decimation);
+
+	return 0;
+}
+
+static void mdss_mdp_set_pipe_cdp(struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 cdp_settings = 0x0;
+	bool is_rotator = (pipe->mixer_left && pipe->mixer_left->rotator_mode);
+
+	/* Disable CDP for rotator pipe in v1 */
+	if (is_rotator && mdss_has_quirk(mdata, MDSS_QUIRK_ROTCDP))
+		goto exit;
+
+	cdp_settings = MDSS_MDP_CDP_ENABLE;
+
+	if (!mdss_mdp_is_linear_format(pipe->src_fmt)) {
+		/* Enable Amortized for non-linear formats */
+		cdp_settings |= MDSS_MDP_CDP_ENABLE_UBWCMETA;
+		cdp_settings |= MDSS_MDP_CDP_AMORTIZED;
+	} else {
+		/* 64-transactions for line mode otherwise we keep 32 */
+		if (!is_rotator)
+			cdp_settings |= MDSS_MDP_CDP_AHEAD_64;
+	}
+
+exit:
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_CDP_CTRL, cdp_settings);
+}
+
+static int mdss_mdp_format_setup(struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_mdp_format_params *fmt;
+	u32 chroma_samp, unpack, src_format;
+	u32 opmode;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	fmt = pipe->src_fmt;
+
+	opmode = pipe->bwc_mode;
+	if (pipe->flags & MDP_FLIP_LR)
+		opmode |= MDSS_MDP_OP_FLIP_LR;
+	if (pipe->flags & MDP_FLIP_UD)
+		opmode |= MDSS_MDP_OP_FLIP_UD;
+
+	pr_debug("pnum=%d format=%d opmode=%x\n", pipe->num, fmt->format,
+			opmode);
+
+	chroma_samp = fmt->chroma_sample;
+	if (pipe->flags & MDP_SOURCE_ROTATED_90) {
+		if (chroma_samp == MDSS_MDP_CHROMA_H2V1)
+			chroma_samp = MDSS_MDP_CHROMA_H1V2;
+		else if (chroma_samp == MDSS_MDP_CHROMA_H1V2)
+			chroma_samp = MDSS_MDP_CHROMA_H2V1;
+	}
+
+	src_format = (chroma_samp << 23) |
+		     (fmt->fetch_planes << 19) |
+		     (fmt->bits[C3_ALPHA] << 6) |
+		     (fmt->bits[C2_R_Cr] << 4) |
+		     (fmt->bits[C1_B_Cb] << 2) |
+		     (fmt->bits[C0_G_Y] << 0);
+
+	if (mdss_mdp_is_tile_format(fmt))
+		src_format |= BIT(30);
+
+	if (pipe->flags & MDP_ROT_90)
+		src_format |= BIT(11); /* ROT90 */
+
+	if (fmt->alpha_enable &&
+			fmt->fetch_planes != MDSS_MDP_PLANE_INTERLEAVED)
+		src_format |= BIT(8); /* SRCC3_EN */
+
+	unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+			(fmt->element[1] << 8) | (fmt->element[0] << 0);
+	src_format |= ((fmt->unpack_count - 1) << 12) |
+			(fmt->unpack_tight << 17) |
+			(fmt->unpack_align_msb << 18) |
+			((fmt->bpp - 1) << 9);
+
+	if (mdss_mdp_is_ubwc_format(fmt)) {
+		opmode |= BIT(0);
+		src_format |= BIT(31);
+	}
+
+	if (fmt->is_yuv && test_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map))
+		src_format |= BIT(15);
+
+	src_format |= (fmt->unpack_dx_format << 14);
+
+	mdss_mdp_pipe_sspp_setup(pipe, &opmode);
+	if (fmt->fetch_mode != MDSS_MDP_FETCH_LINEAR
+		&& mdata->highest_bank_bit) {
+		u32 fetch_config = MDSS_MDP_FETCH_CONFIG_RESET_VALUE;
+
+		fetch_config |= (mdata->highest_bank_bit << 18);
+		if (fmt->format == MDP_Y_CBCR_H2V2_TP10_UBWC)
+			fetch_config |= (2 << 16);
+
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_FETCH_CONFIG,
+			fetch_config);
+	}
+	if (pipe->scaler.enable)
+		opmode |= (1 << 31);
+
+	if (pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
+		mdss_mdp_pipe_write(pipe,
+			MDSS_MDP_REG_SSPP_SRC_FORMAT, src_format);
+		mdss_mdp_pipe_write(pipe,
+			MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN, unpack);
+		mdss_mdp_pipe_write(pipe,
+			MDSS_MDP_REG_SSPP_SRC_OP_MODE, opmode);
+	} else {
+		mdss_mdp_pipe_write(pipe,
+			MDSS_MDP_REG_SSPP_SRC_FORMAT_REC1, src_format);
+		mdss_mdp_pipe_write(pipe,
+			MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN_REC1, unpack);
+		mdss_mdp_pipe_write(pipe,
+			MDSS_MDP_REG_SSPP_SRC_OP_MODE_REC1, opmode);
+	}
+
+	/* clear UBWC error */
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_UBWC_ERROR_STATUS, BIT(31));
+
+	/* configure CDP */
+	if (test_bit(MDSS_QOS_CDP, mdata->mdss_qos_map))
+		mdss_mdp_set_pipe_cdp(pipe);
+
+	return 0;
+}
+
+int mdss_mdp_pipe_addr_setup(struct mdss_data_type *mdata,
+	struct mdss_mdp_pipe *head, u32 *offsets, u32 *ftch_id, u32 *xin_id,
+	u32 type, const int *pnums, u32 len, u32 rects_per_sspp,
+	u8 priority_base)
+{
+	u32 i, j;
+
+	if (!head || !mdata) {
+		pr_err("unable to setup pipe type=%d: invalid input\n", type);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < len; i++) {
+		struct mdss_mdp_pipe *pipe = head + (i * rects_per_sspp);
+
+		pipe->type = type;
+		pipe->ftch_id  = ftch_id[i];
+		pipe->xin_id = xin_id[i];
+		pipe->num = pnums[i];
+		pipe->ndx = BIT(pnums[i]);
+		pipe->priority = i + priority_base;
+		pipe->base = mdata->mdss_io.base + offsets[i];
+		pipe->multirect.num = MDSS_MDP_PIPE_RECT0;
+		pipe->multirect.mode = MDSS_MDP_PIPE_MULTIRECT_NONE;
+		pipe->multirect.max_rects = rects_per_sspp;
+		pipe->multirect.next = NULL;
+
+		pr_info("type:%d ftchid:%d xinid:%d num:%d rect:%d ndx:0x%x prio:%d\n",
+			pipe->type, pipe->ftch_id, pipe->xin_id, pipe->num,
+			pipe->multirect.num, pipe->ndx, pipe->priority);
+
+		for (j = 1; j < rects_per_sspp; j++) {
+			struct mdss_mdp_pipe *next = pipe + j;
+
+			pipe[j-1].multirect.next = next;
+			*next = pipe[j-1];
+			next->multirect.num++;
+			next->multirect.next = pipe;
+
+			pr_info("type:%d ftchid:%d xinid:%d num:%d rect:%d ndx:0x%x prio:%d\n",
+				next->type, next->ftch_id, next->xin_id,
+				next->num, next->multirect.num, next->ndx,
+				next->priority);
+		}
+
+	}
+
+	return 0;
+}
+
+static int mdss_mdp_src_addr_setup(struct mdss_mdp_pipe *pipe,
+				   struct mdss_mdp_data *src_data)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int i, ret = 0;
+	u32 addr[MAX_PLANES] = { 0 };
+
+	pr_debug("pnum=%d\n", pipe->num);
+
+	ret = mdss_mdp_data_check(src_data, &pipe->src_planes, pipe->src_fmt);
+	if (ret)
+		return ret;
+
+	if (pipe->overfetch_disable && !pipe->scaler.enable) {
+		u32 x = 0, y = 0;
+
+		if (pipe->overfetch_disable & OVERFETCH_DISABLE_LEFT)
+			x = pipe->src.x;
+		if (pipe->overfetch_disable & OVERFETCH_DISABLE_TOP)
+			y = pipe->src.y;
+
+		mdss_mdp_data_calc_offset(src_data, x, y,
+			&pipe->src_planes, pipe->src_fmt);
+	}
+
+	for (i = 0; i < MAX_PLANES; i++)
+		addr[i] = src_data->p[i].addr;
+
+	/* planar format expects YCbCr, swap chroma planes if YCrCb */
+	if (mdata->mdp_rev < MDSS_MDP_HW_REV_102 &&
+			(pipe->src_fmt->fetch_planes == MDSS_MDP_PLANE_PLANAR)
+				&& (pipe->src_fmt->element[0] == C1_B_Cb))
+		swap(addr[1], addr[2]);
+
+	if (pipe->multirect.mode == MDSS_MDP_PIPE_MULTIRECT_NONE) {
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC0_ADDR, addr[0]);
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC1_ADDR, addr[1]);
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC2_ADDR, addr[2]);
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC3_ADDR, addr[3]);
+	} else if (pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC0_ADDR, addr[0]);
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC2_ADDR, addr[2]);
+	} else { /* RECT1 */
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC1_ADDR, addr[0]);
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC3_ADDR, addr[2]);
+	}
+
+	return 0;
+}
+
+static int mdss_mdp_pipe_solidfill_setup(struct mdss_mdp_pipe *pipe)
+{
+	int ret;
+	u32 secure, format, unpack, opmode = 0;
+
+	pr_debug("solid fill setup on pnum=%d\n", pipe->num);
+
+	ret = mdss_mdp_image_setup(pipe, NULL);
+	if (ret) {
+		pr_err("image setup error for pnum=%d\n", pipe->num);
+		return ret;
+	}
+
+	format = MDSS_MDP_FMT_SOLID_FILL;
+	secure = (pipe->flags & MDP_SECURE_OVERLAY_SESSION ? 0xF : 0x0);
+
+	/* support ARGB color format only */
+	unpack = (C3_ALPHA << 24) | (C2_R_Cr << 16) |
+		(C1_B_Cb << 8) | (C0_G_Y << 0);
+	if (pipe->scaler.enable)
+		opmode |= (1 << 31);
+
+	if (pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
+		/*
+		 * rect0 will drive whether to secure the pipeline, even though
+		 * no secure content is being fetched
+		 */
+		mdss_mdp_pipe_write(pipe,
+			MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
+		mdss_mdp_pipe_write(pipe,
+			MDSS_MDP_REG_SSPP_SRC_FORMAT, format);
+		mdss_mdp_pipe_write(pipe,
+			MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR, pipe->bg_color);
+		mdss_mdp_pipe_write(pipe,
+			MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN, unpack);
+		mdss_mdp_pipe_write(pipe,
+			MDSS_MDP_REG_SSPP_SRC_OP_MODE, opmode);
+	} else {
+		mdss_mdp_pipe_write(pipe,
+			MDSS_MDP_REG_SSPP_SRC_FORMAT_REC1, format);
+		mdss_mdp_pipe_write(pipe,
+			MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR_REC1,
+			pipe->bg_color);
+		mdss_mdp_pipe_write(pipe,
+			MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN_REC1, unpack);
+		mdss_mdp_pipe_write(pipe,
+			MDSS_MDP_REG_SSPP_SRC_OP_MODE_REC1, opmode);
+	}
+
+	if (pipe->type != MDSS_MDP_PIPE_TYPE_DMA) {
+		mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SCALE_CONFIG, 0);
+		if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG)
+			mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_VIG_OP_MODE, 0);
+	}
+
+	return 0;
+}
+
+static void mdss_mdp_set_ot_limit_pipe(struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_mdp_set_ot_params ot_params;
+	struct mdss_mdp_ctl *ctl = pipe->mixer_left->ctl;
+
+	ot_params.xin_id = pipe->xin_id;
+	ot_params.num = pipe->num;
+	ot_params.width = pipe->src.w;
+	ot_params.height = pipe->src.h;
+	ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
+	ot_params.reg_off_mdp_clk_ctrl = pipe->clk_ctrl.reg_off;
+	ot_params.bit_off_mdp_clk_ctrl = pipe->clk_ctrl.bit_off +
+		CLK_FORCE_ON_OFFSET;
+	ot_params.is_rot = pipe->mixer_left->rotator_mode;
+	ot_params.is_wb = ctl->intf_num == MDSS_MDP_NO_INTF;
+	ot_params.is_yuv = pipe->src_fmt->is_yuv;
+	ot_params.frame_rate = pipe->frame_rate;
+
+	/* rotator read uses nrt vbif */
+	if (mdss_mdp_is_nrt_vbif_base_defined(ctl->mdata) &&
+			pipe->mixer_left->rotator_mode)
+		ot_params.is_vbif_nrt = true;
+	else
+		ot_params.is_vbif_nrt = false;
+
+	mdss_mdp_set_ot_limit(&ot_params);
+}
+
+bool mdss_mdp_is_amortizable_pipe(struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_mixer *mixer, struct mdss_data_type *mdata)
+{
+	/* do not apply for rotator or WB */
+	return ((pipe->src.y > mdata->prefill_data.ts_threshold) &&
+		(mixer->type == MDSS_MDP_MIXER_TYPE_INTF));
+}
+
+static inline void __get_ordered_rects(struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_pipe **low_pipe,
+	struct mdss_mdp_pipe **high_pipe)
+{
+	*low_pipe = pipe;
+
+	if (pipe->multirect.mode == MDSS_MDP_PIPE_MULTIRECT_NONE) {
+		*high_pipe = NULL;
+		return;
+	}
+
+	*high_pipe = pipe->multirect.next;
+
+	/* if pipes are not in order, order them according to position */
+	if ((*low_pipe)->src.y > (*high_pipe)->src.y) {
+		*low_pipe = pipe->multirect.next;
+		*high_pipe = pipe;
+	}
+}
+
+static u32 __get_ts_count(struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_mixer *mixer, bool is_low_pipe)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 ts_diff, ts_ypos;
+	struct mdss_mdp_pipe *low_pipe, *high_pipe;
+	u32 ts_count = 0;
+	u32 v_total, fps, h_total, xres;
+
+	if (mdss_mdp_get_panel_params(pipe, mixer, &fps, &v_total,
+			&h_total, &xres)) {
+		pr_err(" error retreiving the panel params!\n");
+		return -EINVAL;
+	}
+
+	if (is_low_pipe) {
+		/* only calculate count if lower pipe is amortizable */
+		if (mdss_mdp_is_amortizable_pipe(pipe, mixer, mdata)) {
+			ts_diff = mdata->prefill_data.ts_threshold -
+					mdata->prefill_data.ts_end;
+			ts_ypos = pipe->src.y - ts_diff;
+			ts_count = mult_frac(ts_ypos, 19200000, fps * v_total);
+		}
+	} else { /* high pipe */
+
+		/* only calculate count for high pipe in serial mode */
+		if (pipe &&
+		    pipe->multirect.mode == MDSS_MDP_PIPE_MULTIRECT_SERIAL) {
+			__get_ordered_rects(pipe, &low_pipe, &high_pipe);
+			ts_count = high_pipe->src.y - low_pipe->src.y - 1;
+			ts_count = mult_frac(ts_count, 19200000, fps * v_total);
+		}
+	}
+
+	return ts_count;
+}
+
+static u32 __calc_ts_bytes(struct mdss_rect *src, u32 fps, u32 bpp)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 ts_bytes;
+
+	ts_bytes = src->h * src->w *
+		bpp * fps;
+	ts_bytes = mult_frac(ts_bytes,
+		mdata->prefill_data.ts_rate.numer,
+		mdata->prefill_data.ts_rate.denom);
+	ts_bytes /= 19200000;
+
+	return ts_bytes;
+}
+
+static u32 __get_ts_bytes(struct mdss_mdp_pipe *pipe,
+	struct mdss_mdp_mixer *mixer)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_pipe *low_pipe, *high_pipe;
+	u32 v_total, fps, h_total, xres;
+	u64 low_pipe_bw, high_pipe_bw, temp;
+	u32 ts_bytes_low, ts_bytes_high;
+	u64 ts_bytes = 0;
+
+	if (mdss_mdp_get_panel_params(pipe, mixer, &fps, &v_total,
+			&h_total, &xres)) {
+		pr_err(" error retreiving the panel params!\n");
+		return -EINVAL;
+	}
+
+	switch (pipe->multirect.mode) {
+	case MDSS_MDP_PIPE_MULTIRECT_NONE:
+
+		/* do not amortize if pipe is not amortizable */
+		if (!mdss_mdp_is_amortizable_pipe(pipe, mixer, mdata)) {
+			ts_bytes = 0;
+			goto exit;
+		}
+
+		ts_bytes = __calc_ts_bytes(&pipe->src, fps,
+			pipe->src_fmt->bpp);
+
+		break;
+	case MDSS_MDP_PIPE_MULTIRECT_PARALLEL:
+
+		__get_ordered_rects(pipe, &low_pipe, &high_pipe);
+
+		/* do not amortize if low_pipe is not amortizable */
+		if (!mdss_mdp_is_amortizable_pipe(low_pipe, mixer, mdata)) {
+			ts_bytes = 0;
+			goto exit;
+		}
+
+		/* calculate ts bytes as the sum of both rects */
+		ts_bytes_low = __calc_ts_bytes(&low_pipe->src, fps,
+			low_pipe->src_fmt->bpp);
+		ts_bytes_high = __calc_ts_bytes(&low_pipe->src, fps,
+			high_pipe->src_fmt->bpp);
+
+		ts_bytes = ts_bytes_low + ts_bytes_high;
+		break;
+	case MDSS_MDP_PIPE_MULTIRECT_SERIAL:
+
+		__get_ordered_rects(pipe, &low_pipe, &high_pipe);
+
+		/* calculate amortization using per-pipe bw */
+		mdss_mdp_get_pipe_overlap_bw(low_pipe,
+			&low_pipe->mixer_left->roi,
+			&low_pipe_bw, &temp, 0);
+		mdss_mdp_get_pipe_overlap_bw(high_pipe,
+			&high_pipe->mixer_left->roi,
+			&high_pipe_bw, &temp, 0);
+
+		/* amortize depending on the lower pipe amortization */
+		if (mdss_mdp_is_amortizable_pipe(low_pipe, mixer, mdata))
+			ts_bytes = DIV_ROUND_UP_ULL(max(low_pipe_bw,
+				high_pipe_bw), 19200000);
+		else
+			ts_bytes = DIV_ROUND_UP_ULL(high_pipe_bw, 19200000);
+		break;
+	default:
+		pr_err("unknown multirect mode!\n");
+		goto exit;
+	break;
+	};
+
+	ts_bytes &= 0xFF;
+	ts_bytes |= BIT(27) | BIT(31);
+exit:
+	return (u32) ts_bytes;
+}
+
+static int mdss_mdp_set_ts_pipe(struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_mixer *mixer;
+	u32 ts_count_low = 0, ts_count_high = 0;
+	u32 ts_rec0, ts_rec1;
+	u32 ts_bytes = 0;
+	struct mdss_mdp_pipe *low_pipe = NULL;
+	struct mdss_mdp_pipe *high_pipe = NULL;
+
+	if (!test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map))
+		return 0;
+
+	mixer = pipe->mixer_left;
+	if (!mixer)
+		return -EINVAL;
+
+	if (!mixer->ctl)
+		return -EINVAL;
+
+	if (!mdata->prefill_data.ts_threshold ||
+	    (mdata->prefill_data.ts_threshold < mdata->prefill_data.ts_end)) {
+		pr_err("invalid ts data!\n");
+		return -EINVAL;
+	}
+
+	/* high pipe will be null for non-multi rect cases */
+	__get_ordered_rects(pipe, &low_pipe, &high_pipe);
+
+	ts_count_low  = __get_ts_count(low_pipe, mixer, true);
+	if (high_pipe != NULL)
+		ts_count_high = __get_ts_count(high_pipe, mixer, false);
+	ts_bytes = __get_ts_bytes(pipe, mixer);
+
+	if (low_pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
+		ts_rec0 = ts_count_low;
+		ts_rec1 = ts_count_high;
+	} else {
+		ts_rec0 = ts_count_high;
+		ts_rec1 = ts_count_low;
+	}
+
+	mdss_mdp_pipe_qos_ctrl(pipe, false, MDSS_MDP_PIPE_QOS_VBLANK_AMORTIZE);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_TRAFFIC_SHAPER, ts_bytes);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_TRAFFIC_SHAPER_PREFILL,
+		ts_rec0);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_TRAFFIC_SHAPER_REC1_PREFILL,
+		ts_rec1);
+	MDSS_XLOG(pipe->num, ts_bytes, ts_rec0, ts_rec1);
+	pr_debug("ts: pipe:%d bytes=0x%x count0=0x%x count1=0x%x\n",
+		pipe->num, ts_bytes, ts_rec0, ts_rec1);
+	return 0;
+}
+
+int mdss_mdp_pipe_queue_data(struct mdss_mdp_pipe *pipe,
+			     struct mdss_mdp_data *src_data)
+{
+	int ret = 0;
+	struct mdss_mdp_ctl *ctl;
+	u32 params_changed;
+	u32 opmode = 0, multirect_opmode = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	bool roi_changed = false;
+	bool delayed_programming;
+
+	if (!pipe) {
+		pr_err("pipe not setup properly for queue\n");
+		return -ENODEV;
+	}
+
+	if (!pipe->mixer_left || !pipe->mixer_left->ctl) {
+		if (src_data)
+			pr_err("pipe%d mixer not setup properly\n", pipe->num);
+		return -ENODEV;
+	}
+
+	if (pipe->src_split_req && !mdata->has_src_split) {
+		pr_err("src split can't be requested on mdp:0x%x\n",
+			mdata->mdp_rev);
+		return -EINVAL;
+	}
+
+	pr_debug("pnum=%x mixer=%d play_cnt=%u\n", pipe->num,
+		 pipe->mixer_left->num, pipe->play_cnt);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	ctl = pipe->mixer_left->ctl;
+	roi_changed = pipe->mixer_left->roi_changed;
+
+	/*
+	 * if pipe is staged on 2 mixers then it is possible that only
+	 * right mixer roi has changed.
+	 */
+	if (pipe->mixer_right)
+		roi_changed |= pipe->mixer_right->roi_changed;
+
+	delayed_programming = is_pipe_programming_delay_needed(pipe);
+
+	/*
+	 * Reprogram the pipe when there is no dedicated wfd blk and
+	 * virtual mixer is allocated for the DMA pipe during concurrent
+	 * line and block mode operations
+	 */
+	params_changed = (pipe->params_changed) ||
+		((pipe->type == MDSS_MDP_PIPE_TYPE_DMA) &&
+		 (pipe->mixer_left->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) &&
+		 (ctl->mdata->mixer_switched)) || roi_changed;
+
+	/* apply changes that are common in case of multi rects only once */
+	if (params_changed && !delayed_programming) {
+		bool is_realtime = !((ctl->intf_num == MDSS_MDP_NO_INTF)
+				|| pipe->mixer_left->rotator_mode);
+
+		mdss_mdp_pipe_panic_vblank_signal_ctrl(pipe, false);
+		mdss_mdp_pipe_panic_signal_ctrl(pipe, false);
+
+		mdss_mdp_qos_vbif_remapper_setup(mdata, pipe, is_realtime);
+		mdss_mdp_fixed_qos_arbiter_setup(mdata, pipe, is_realtime);
+
+		if (mdata->vbif_nrt_io.base)
+			mdss_mdp_pipe_nrt_vbif_setup(mdata, pipe);
+
+		if (pipe && mdss_mdp_pipe_is_sw_reset_available(mdata))
+			mdss_mdp_pipe_clk_force_off(pipe);
+
+		if (pipe->scaler.enable)
+			mdss_mdp_pipe_program_pixel_extn(pipe);
+	}
+
+	if ((!(pipe->flags & MDP_VPU_PIPE) && (src_data == NULL)) ||
+	    (pipe->flags & MDP_SOLID_FILL)) {
+		pipe->params_changed = 0;
+		mdss_mdp_pipe_solidfill_setup(pipe);
+
+		MDSS_XLOG(pipe->num, pipe->multirect.num,
+			pipe->mixer_left->num, pipe->play_cnt, 0x111);
+
+		goto update_nobuf;
+	}
+
+	MDSS_XLOG(pipe->num, pipe->multirect.num, pipe->mixer_left->num,
+						pipe->play_cnt, 0x222);
+
+	if (params_changed) {
+		pipe->params_changed = 0;
+
+		ret = mdss_mdp_pipe_pp_setup(pipe, &opmode);
+		if (ret) {
+			pr_err("pipe pp setup error for pnum=%d rect=%d\n",
+					pipe->num, pipe->multirect.num);
+			goto done;
+		}
+
+		ret = mdss_mdp_image_setup(pipe, src_data);
+		if (ret) {
+			pr_err("image setup error for pnum=%d\n", pipe->num);
+			goto done;
+		}
+
+		ret = mdss_mdp_format_setup(pipe);
+		if (ret) {
+			pr_err("format %d setup error pnum=%d\n",
+			       pipe->src_fmt->format, pipe->num);
+			goto done;
+		}
+
+		if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG)
+			mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_VIG_OP_MODE,
+			opmode);
+
+		if (!delayed_programming) {
+			if (test_bit(MDSS_QOS_PER_PIPE_LUT,
+				     mdata->mdss_qos_map))
+				mdss_mdp_pipe_qos_lut(pipe);
+
+			if (mdss_mdp_panic_signal_support_mode(mdata) ==
+					MDSS_MDP_PANIC_PER_PIPE_CFG)
+				mdss_mdp_config_pipe_panic_lut(pipe);
+
+			if (pipe->type != MDSS_MDP_PIPE_TYPE_CURSOR) {
+				mdss_mdp_pipe_panic_vblank_signal_ctrl(pipe, 1);
+				mdss_mdp_pipe_panic_signal_ctrl(pipe, true);
+				mdss_mdp_set_ot_limit_pipe(pipe);
+				mdss_mdp_set_ts_pipe(pipe);
+			}
+		}
+	}
+
+	/*
+	 * enable multirect only when both RECT0 and RECT1 are enabled,
+	 * othwerise expect to work in non-multirect only in RECT0
+	 */
+	if (pipe->multirect.mode != MDSS_MDP_PIPE_MULTIRECT_NONE) {
+		multirect_opmode = BIT(0) | BIT(1);
+
+		if (pipe->multirect.mode == MDSS_MDP_PIPE_MULTIRECT_SERIAL)
+			multirect_opmode |= BIT(2);
+	}
+
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_MULTI_REC_OP_MODE,
+			    multirect_opmode);
+	if (src_data == NULL) {
+		pr_debug("src_data=%pK pipe num=%dx\n",
+				src_data, pipe->num);
+		goto update_nobuf;
+	}
+
+	if (pipe->type != MDSS_MDP_PIPE_TYPE_CURSOR)
+		mdss_mdp_smp_alloc(pipe);
+
+	ret = mdss_mdp_src_addr_setup(pipe, src_data);
+	if (ret) {
+		pr_err("addr setup error for pnum=%d\n", pipe->num);
+		goto done;
+	}
+
+update_nobuf:
+	if (pipe->src_split_req) {
+		pr_debug("src_split_enabled. pnum:%d\n", pipe->num);
+		mdss_mdp_mixer_pipe_update(pipe, ctl->mixer_left,
+			params_changed);
+		mdss_mdp_mixer_pipe_update(pipe, ctl->mixer_right,
+			params_changed);
+		pipe->mixer_right = ctl->mixer_right;
+	} else {
+		mdss_mdp_mixer_pipe_update(pipe, pipe->mixer_left,
+			params_changed);
+	}
+
+	pipe->play_cnt++;
+
+	if (mdss_has_quirk(mdata, MDSS_QUIRK_BWCPANIC)) {
+		unsigned long pnum_bitmap = BIT(pipe->num);
+
+		if (pipe->bwc_mode)
+			bitmap_or(mdata->bwc_enable_map, mdata->bwc_enable_map,
+				&pnum_bitmap, MAX_DRV_SUP_PIPES);
+		else
+			bitmap_andnot(mdata->bwc_enable_map,
+				mdata->bwc_enable_map, &pnum_bitmap,
+				MAX_DRV_SUP_PIPES);
+	}
+
+done:
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+	return ret;
+}
+
+int mdss_mdp_pipe_is_staged(struct mdss_mdp_pipe *pipe)
+{
+	return (pipe == pipe->mixer_left->stage_pipe[pipe->mixer_stage]);
+}
+
+static inline void __mdss_mdp_pipe_program_pixel_extn_helper(
+	struct mdss_mdp_pipe *pipe, u32 plane, u32 off)
+{
+	u32 src_h = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
+	u32 mask = 0xFF;
+	u32 lr_pe, tb_pe, tot_req_pixels;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	/*
+	 * CB CR plane required pxls need to be accounted
+	 * for chroma decimation.
+	 */
+	if (plane == 1)
+		src_h >>= pipe->chroma_sample_v;
+
+	lr_pe = ((pipe->scaler.right_ftch[plane] & mask) << 24)|
+		((pipe->scaler.right_rpt[plane] & mask) << 16)|
+		((pipe->scaler.left_ftch[plane] & mask) << 8)|
+		(pipe->scaler.left_rpt[plane] & mask);
+
+	tb_pe = ((pipe->scaler.btm_ftch[plane] & mask) << 24)|
+		((pipe->scaler.btm_rpt[plane] & mask) << 16)|
+		((pipe->scaler.top_ftch[plane] & mask) << 8)|
+		(pipe->scaler.top_rpt[plane] & mask);
+
+	writel_relaxed(lr_pe, pipe->base +
+			MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_LR + off);
+	writel_relaxed(tb_pe, pipe->base +
+			MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_TB + off);
+
+	mask = 0xFFFF;
+	if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
+		tot_req_pixels = ((pipe->scaler.num_ext_pxls_top[plane] &
+				mask) << 16 |
+			(pipe->scaler.num_ext_pxls_left[plane] & mask));
+	else
+		tot_req_pixels =
+			(((src_h + pipe->scaler.num_ext_pxls_top[plane] +
+			pipe->scaler.num_ext_pxls_btm[plane]) & mask) << 16) |
+			((pipe->scaler.roi_w[plane] +
+			pipe->scaler.num_ext_pxls_left[plane] +
+			pipe->scaler.num_ext_pxls_right[plane]) & mask);
+
+	writel_relaxed(tot_req_pixels, pipe->base +
+			MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_REQ_PIXELS + off);
+
+	MDSS_XLOG(pipe->num, plane, lr_pe, tb_pe, tot_req_pixels);
+	pr_debug("pipe num=%d, plane=%d, LR PE=0x%x, TB PE=0x%x, req_pixels=0x0%x\n",
+		pipe->num, plane, lr_pe, tb_pe, tot_req_pixels);
+}
+
+/**
+ * mdss_mdp_pipe_program_pixel_extn - Program the source pipe's
+ *				      sw pixel extension
+ * @pipe:	Source pipe struct containing pixel extn values
+ *
+ * Function programs the pixel extn values calculated during
+ * scale setup.
+ */
+static int mdss_mdp_pipe_program_pixel_extn(struct mdss_mdp_pipe *pipe)
+{
+	/* Y plane pixel extn */
+	__mdss_mdp_pipe_program_pixel_extn_helper(pipe, 0, 0);
+	/* CB CR plane pixel extn */
+	__mdss_mdp_pipe_program_pixel_extn_helper(pipe, 1, 16);
+	/* Alpha plane pixel extn */
+	__mdss_mdp_pipe_program_pixel_extn_helper(pipe, 3, 32);
+	return 0;
+}
+
+
+static int __pxl_extn_helper(int residue)
+{
+	int tmp = 0;
+
+	if (residue == 0) {
+		return tmp;
+	} else if (residue > 0) {
+		tmp = (uint32_t) residue;
+		tmp >>= PHASE_STEP_SHIFT;
+		return -tmp;
+	}
+	tmp = (uint32_t)(-residue);
+	tmp >>= PHASE_STEP_SHIFT;
+	if ((tmp << PHASE_STEP_SHIFT) != (-residue))
+		tmp++;
+	return tmp;
+}
+
+/**
+ * mdss_mdp_calc_pxl_extn - Calculate source pipe's sw pixel extension
+ *
+ * @pipe:	Source pipe struct containing pixel extn values
+ *
+ * Function calculates the pixel extn values during scale setup.
+ */
+void mdss_mdp_pipe_calc_pixel_extn(struct mdss_mdp_pipe *pipe)
+{
+	int caf, i;
+	uint32_t src_h;
+	bool unity_scale_x = false, upscale_x = false;
+	bool unity_scale_y, upscale_y;
+
+	if (!(pipe->src_fmt->is_yuv))
+		unity_scale_x = (pipe->src.w == pipe->dst.w);
+
+	if (!unity_scale_x)
+		upscale_x = (pipe->src.w <= pipe->dst.w);
+
+	pr_debug("pipe=%d, src(%d, %d, %d, %d), dest(%d, %d, %d, %d)\n",
+			pipe->num,
+			pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
+			pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
+
+	for (i = 0; i < MAX_PLANES; i++) {
+		int64_t left = 0, right = 0, top = 0, bottom = 0;
+
+		caf = 0;
+
+		/*
+		 * phase step x,y for 0 plane should be calculated before
+		 * this
+		 */
+		if (pipe->src_fmt->is_yuv && (i == 1 || i == 2)) {
+			pipe->scaler.phase_step_x[i] =
+				pipe->scaler.phase_step_x[0]
+					>> pipe->chroma_sample_h;
+			pipe->scaler.phase_step_y[i] =
+				pipe->scaler.phase_step_y[0]
+					>> pipe->chroma_sample_v;
+		} else if (i > 0) {
+			pipe->scaler.phase_step_x[i] =
+				pipe->scaler.phase_step_x[0];
+			pipe->scaler.phase_step_y[i] =
+				pipe->scaler.phase_step_y[0];
+		}
+		/* Pixel extension calculations for X direction */
+		pipe->scaler.roi_w[i] = DECIMATED_DIMENSION(pipe->src.w,
+			pipe->horz_deci);
+
+		if (pipe->src_fmt->is_yuv)
+			pipe->scaler.roi_w[i] &= ~0x1;
+
+		/* CAF filtering on only luma plane */
+		if (i == 0 && pipe->src_fmt->is_yuv)
+			caf = 1;
+		if (i == 1 || i == 2)
+			pipe->scaler.roi_w[i] >>= pipe->chroma_sample_h;
+
+		pr_debug("roi_w[%d]=%d, caf=%d\n", i, pipe->scaler.roi_w[i],
+			caf);
+		if (unity_scale_x) {
+			left = 0;
+			right = 0;
+		} else if (!upscale_x) {
+			left = 0;
+			right = (pipe->dst.w - 1) *
+				pipe->scaler.phase_step_x[i];
+			right -= (pipe->scaler.roi_w[i] - 1) *
+				PHASE_STEP_UNIT_SCALE;
+			right += pipe->scaler.phase_step_x[i];
+			right = -(right);
+		} else {
+			left = (1 << PHASE_RESIDUAL);
+			left -= (caf * PHASE_STEP_UNIT_SCALE);
+
+			right = (1 << PHASE_RESIDUAL);
+			right += (pipe->dst.w - 1) *
+				pipe->scaler.phase_step_x[i];
+			right -= ((pipe->scaler.roi_w[i] - 1) *
+				PHASE_STEP_UNIT_SCALE);
+			right += (caf * PHASE_STEP_UNIT_SCALE);
+			right = -(right);
+		}
+		pr_debug("left=%lld, right=%lld\n", left, right);
+		pipe->scaler.num_ext_pxls_left[i] = __pxl_extn_helper(left);
+		pipe->scaler.num_ext_pxls_right[i] = __pxl_extn_helper(right);
+
+		/* Pixel extension calculations for Y direction */
+		unity_scale_y = false;
+		upscale_y = false;
+
+		src_h = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
+
+		/* Subsampling of chroma components is factored */
+		if (i == 1 || i == 2)
+			src_h >>= pipe->chroma_sample_v;
+
+		if (!(pipe->src_fmt->is_yuv))
+			unity_scale_y = (src_h == pipe->dst.h);
+
+		if (!unity_scale_y)
+			upscale_y = (src_h <= pipe->dst.h);
+
+		if (unity_scale_y) {
+			top = 0;
+			bottom = 0;
+		} else if (!upscale_y) {
+			top = 0;
+			bottom = (pipe->dst.h - 1) *
+				pipe->scaler.phase_step_y[i];
+			bottom -= (src_h - 1) * PHASE_STEP_UNIT_SCALE;
+			bottom += pipe->scaler.phase_step_y[i];
+			bottom = -(bottom);
+		} else {
+			top = (1 << PHASE_RESIDUAL);
+			top -= (caf * PHASE_STEP_UNIT_SCALE);
+
+			bottom = (1 << PHASE_RESIDUAL);
+			bottom += (pipe->dst.h - 1) *
+				pipe->scaler.phase_step_y[i];
+			bottom -= (src_h - 1) * PHASE_STEP_UNIT_SCALE;
+			bottom += (caf * PHASE_STEP_UNIT_SCALE);
+			bottom = -(bottom);
+		}
+
+		pipe->scaler.num_ext_pxls_top[i] = __pxl_extn_helper(top);
+		pipe->scaler.num_ext_pxls_btm[i] = __pxl_extn_helper(bottom);
+
+		/* Single pixel rgb scale adjustment */
+		if ((!(pipe->src_fmt->is_yuv)) &&
+			((pipe->src.h - pipe->dst.h) == 1)) {
+
+			uint32_t residue = pipe->scaler.phase_step_y[i] -
+				PHASE_STEP_UNIT_SCALE;
+			uint32_t result = (pipe->dst.h * residue) + residue;
+
+			if (result < PHASE_STEP_UNIT_SCALE)
+				pipe->scaler.num_ext_pxls_btm[i] -= 1;
+		}
+
+		if (pipe->scaler.num_ext_pxls_left[i] >= 0)
+			pipe->scaler.left_rpt[i] =
+				pipe->scaler.num_ext_pxls_left[i];
+		else
+			pipe->scaler.left_ftch[i] =
+				pipe->scaler.num_ext_pxls_left[i];
+
+		if (pipe->scaler.num_ext_pxls_right[i] >= 0)
+			pipe->scaler.right_rpt[i] =
+				pipe->scaler.num_ext_pxls_right[i];
+		else
+			pipe->scaler.right_ftch[i] =
+				pipe->scaler.num_ext_pxls_right[i];
+
+		if (pipe->scaler.num_ext_pxls_top[i] >= 0)
+			pipe->scaler.top_rpt[i] =
+				pipe->scaler.num_ext_pxls_top[i];
+		else
+			pipe->scaler.top_ftch[i] =
+				pipe->scaler.num_ext_pxls_top[i];
+
+		if (pipe->scaler.num_ext_pxls_btm[i] >= 0)
+			pipe->scaler.btm_rpt[i] =
+				pipe->scaler.num_ext_pxls_btm[i];
+		else
+			pipe->scaler.btm_ftch[i] =
+				pipe->scaler.num_ext_pxls_btm[i];
+
+		pr_debug("plane repeat=%d, left=%d, right=%d, top=%d, btm=%d\n",
+				i, pipe->scaler.left_rpt[i],
+				pipe->scaler.right_rpt[i],
+				pipe->scaler.top_rpt[i],
+				pipe->scaler.btm_rpt[i]);
+		pr_debug("plane overfetch=%d, left=%d, right=%d, top=%d, btm=%d\n",
+				i, pipe->scaler.left_ftch[i],
+				pipe->scaler.right_ftch[i],
+				pipe->scaler.top_ftch[i],
+				pipe->scaler.btm_ftch[i]);
+	}
+
+	pipe->scaler.enable = 1;
+}
+
+/**
+ * mdss_mdp_pipe_calc_qseed3_cfg - Calculate source pipe's sw qseed3 filter
+ * configuration
+ *
+ * @pipe:	Source pipe struct
+ *
+ * Function set the qseed3 filter configuration to bilenear configuration
+ * also calclulates pixel extension for qseed3
+ */
+void mdss_mdp_pipe_calc_qseed3_cfg(struct mdss_mdp_pipe *pipe)
+{
+	int i;
+	int roi_h;
+
+	/* calculate qseed3 pixel extension values */
+	for (i = 0; i < MAX_PLANES; i++) {
+
+		/* Pixel extension calculations for X direction */
+		pipe->scaler.roi_w[i] = DECIMATED_DIMENSION(pipe->src.w,
+			pipe->horz_deci);
+
+		if (pipe->src_fmt->is_yuv)
+			pipe->scaler.roi_w[i] &= ~0x1;
+		/*
+		 * phase step x,y for 0 plane should be calculated before
+		 * this
+		 */
+		if (pipe->src_fmt->is_yuv && (i == 1 || i == 2)) {
+			pipe->scaler.phase_step_x[i] =
+				pipe->scaler.phase_step_x[0]
+					>> pipe->chroma_sample_h;
+
+			pipe->scaler.phase_step_y[i] =
+				pipe->scaler.phase_step_y[0]
+					>> pipe->chroma_sample_v;
+
+			pipe->scaler.roi_w[i] >>= pipe->chroma_sample_h;
+		}
+
+		pipe->scaler.preload_x[i] = QSEED3_DEFAULT_PRELAOD_H;
+		pipe->scaler.src_width[i] = pipe->scaler.roi_w[i];
+		pipe->scaler.num_ext_pxls_left[i] = pipe->scaler.roi_w[i];
+
+		/* Pixel extension calculations for Y direction */
+		roi_h = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
+
+		/* Subsampling of chroma components is factored */
+		if (i == 1 || i == 2)
+			roi_h >>= pipe->chroma_sample_v;
+
+		pipe->scaler.preload_y[i] = QSEED3_DEFAULT_PRELAOD_V;
+		pipe->scaler.src_height[i] = roi_h;
+		pipe->scaler.num_ext_pxls_top[i] = roi_h;
+
+		pr_debug("QSEED3 params=%d, preload_x=%d, preload_y=%d,src_w=%d,src_h=%d\n",
+				i, pipe->scaler.preload_x[i],
+				pipe->scaler.preload_y[i],
+				pipe->scaler.src_width[i],
+				pipe->scaler.src_height[i]);
+	}
+
+	pipe->scaler.dst_width = pipe->dst.w;
+	pipe->scaler.dst_height = pipe->dst.h;
+	/* assign filters */
+	pipe->scaler.y_rgb_filter_cfg = FILTER_BILINEAR;
+	pipe->scaler.uv_filter_cfg = FILTER_BILINEAR;
+	pipe->scaler.alpha_filter_cfg = FILTER_ALPHA_BILINEAR;
+	pipe->scaler.lut_flag = 0;
+	pipe->scaler.enable = ENABLE_SCALE;
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
new file mode 100644
index 0000000..6131ed1
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -0,0 +1,7592 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_pp.h"
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include "mdss_mdp_pp_cache_config.h"
+
+struct mdp_csc_cfg mdp_csc_8bit_convert[MDSS_MDP_MAX_CSC] = {
+	[MDSS_MDP_CSC_YUV2RGB_601L] = {
+		0,
+		{
+			0x0254, 0x0000, 0x0331,
+			0x0254, 0xff37, 0xfe60,
+			0x0254, 0x0409, 0x0000,
+		},
+		{ 0xfff0, 0xff80, 0xff80,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+	},
+	[MDSS_MDP_CSC_YUV2RGB_601FR] = {
+		0,
+		{
+			0x0200, 0x0000, 0x02ce,
+			0x0200, 0xff50, 0xfe92,
+			0x0200, 0x038b, 0x0000,
+		},
+		{ 0x0000, 0xff80, 0xff80,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+	},
+	[MDSS_MDP_CSC_YUV2RGB_709L] = {
+		0,
+		{
+			0x0254, 0x0000, 0x0396,
+			0x0254, 0xff93, 0xfeef,
+			0x0254, 0x043e, 0x0000,
+		},
+		{ 0xfff0, 0xff80, 0xff80,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+	},
+	[MDSS_MDP_CSC_YUV2RGB_2020L] = {
+		0,
+		{
+			0x0256, 0x0000, 0x035e,
+			0x0256, 0xffa0, 0xfeb2,
+			0x0256, 0x044c, 0x0000,
+		},
+		{ 0xfff0, 0xff80, 0xff80,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+	},
+	[MDSS_MDP_CSC_YUV2RGB_2020FR] = {
+		0,
+		{
+			0x0200, 0x0000, 0x02f3,
+			0x0200, 0xffac, 0xfedb,
+			0x0200, 0x03c3, 0x0000,
+		},
+		{ 0x0000, 0xff80, 0xff80,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+	},
+	[MDSS_MDP_CSC_RGB2YUV_601L] = {
+		0,
+		{
+			0x0083, 0x0102, 0x0032,
+			0xffb4, 0xff6b, 0x00e1,
+			0x00e1, 0xff44, 0xffdb
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0010, 0x0080, 0x0080,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0010, 0x00eb, 0x0010, 0x00f0, 0x0010, 0x00f0,},
+	},
+	[MDSS_MDP_CSC_RGB2YUV_601FR] = {
+		0,
+		{
+			0x0099, 0x012d, 0x003a,
+			0xffaa, 0xff56, 0x0100,
+			0x0100, 0xff2a, 0xffd6
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0000, 0x0080, 0x0080,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+	},
+	[MDSS_MDP_CSC_RGB2YUV_709L] = {
+		0,
+		{
+			0x005d, 0x013a, 0x0020,
+			0xffcc, 0xff53, 0x00e1,
+			0x00e1, 0xff34, 0xffeb
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0010, 0x0080, 0x0080,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0010, 0x00eb, 0x0010, 0x00f0, 0x0010, 0x00f0,},
+	},
+	[MDSS_MDP_CSC_RGB2YUV_2020L] = {
+		0,
+		{
+			0x0073, 0x0129, 0x001a,
+			0xffc1, 0xff5e, 0x00e0,
+			0x00e0, 0xff32, 0xffee
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0010, 0x0080, 0x0080,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0010, 0x00eb, 0x0010, 0x00f0, 0x0010, 0x00f0,},
+	},
+	[MDSS_MDP_CSC_RGB2YUV_2020FR] = {
+		0,
+		{
+			0x0086, 0x015b, 0x001e,
+			0xffb9, 0xff47, 0x0100,
+			0x0100, 0xff15, 0xffeb
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0x0080, 0x0080,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+	},
+	[MDSS_MDP_CSC_YUV2YUV] = {
+		0,
+		{
+			0x0200, 0x0000, 0x0000,
+			0x0000, 0x0200, 0x0000,
+			0x0000, 0x0000, 0x0200,
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+	},
+	[MDSS_MDP_CSC_RGB2RGB] = {
+		0,
+		{
+			0x0200, 0x0000, 0x0000,
+			0x0000, 0x0200, 0x0000,
+			0x0000, 0x0000, 0x0200,
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+	},
+};
+
+struct mdp_csc_cfg mdp_csc_10bit_convert[MDSS_MDP_MAX_CSC] = {
+	[MDSS_MDP_CSC_YUV2RGB_601L] = {
+		0,
+		{
+			0x0254, 0x0000, 0x0331,
+			0x0254, 0xff37, 0xfe60,
+			0x0254, 0x0409, 0x0000,
+		},
+		{ 0xffc0, 0xfe00, 0xfe00,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+	},
+	[MDSS_MDP_CSC_YUV2RGB_601FR] = {
+		0,
+		{
+			0x0200, 0x0000, 0x02ce,
+			0x0200, 0xff50, 0xfe92,
+			0x0200, 0x038b, 0x0000,
+		},
+		{ 0x0000, 0xfe00, 0xfe00,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+	},
+	[MDSS_MDP_CSC_YUV2RGB_709L] = {
+		0,
+		{
+			0x0254, 0x0000, 0x0396,
+			0x0254, 0xff93, 0xfeef,
+			0x0254, 0x043a, 0x0000,
+		},
+		{ 0xffc0, 0xfe00, 0xfe00,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+	},
+	[MDSS_MDP_CSC_YUV2RGB_2020L] = {
+		0,
+		{
+			0x0256, 0x0000, 0x035e,
+			0x0256, 0xffa0, 0xfeb2,
+			0x0256, 0x044c, 0x0000,
+		},
+		{ 0xffc0, 0xfe00, 0xfe00,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+	},
+	[MDSS_MDP_CSC_YUV2RGB_2020FR] = {
+		0,
+		{
+			0x0200, 0x0000, 0x02f3,
+			0x0200, 0xffac, 0xfedb,
+			0x0200, 0x03c3, 0x0000,
+		},
+		{ 0x0000, 0xfe00, 0xfe00,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+	},
+	[MDSS_MDP_CSC_RGB2YUV_601L] = {
+		0,
+		{
+			0x0083, 0x0102, 0x0032,
+			0xffb4, 0xff6b, 0x00e1,
+			0x00e1, 0xff44, 0xffdb
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0040, 0x0200, 0x0200,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+		{ 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+	},
+	[MDSS_MDP_CSC_RGB2YUV_601FR] = {
+		0,
+		{
+			0x0099, 0x012d, 0x003a,
+			0xffaa, 0xff56, 0x0100,
+			0x0100, 0xff2a, 0xffd6
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0000, 0x0200, 0x0200,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+	},
+	[MDSS_MDP_CSC_RGB2YUV_709L] = {
+		0,
+		{
+			0x005d, 0x013a, 0x0020,
+			0xffcc, 0xff53, 0x00e1,
+			0x00e1, 0xff34, 0xffeb
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0040, 0x0200, 0x0200,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+		{ 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+	},
+	[MDSS_MDP_CSC_RGB2YUV_2020L] = {
+		0,
+		{
+			0x0073, 0x0129, 0x001a,
+			0xffc1, 0xff5e, 0x00e0,
+			0x00e0, 0xff32, 0xffee
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0040, 0x0200, 0x0200,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+		{ 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+	},
+	[MDSS_MDP_CSC_RGB2YUV_2020FR] = {
+		0,
+		{
+			0x0086, 0x015b, 0x001e,
+			0xffb9, 0xff47, 0x0100,
+			0x0100, 0xff15, 0xffeb
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0x0200, 0x0200,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+	},
+	[MDSS_MDP_CSC_YUV2YUV] = {
+		0,
+		{
+			0x0200, 0x0000, 0x0000,
+			0x0000, 0x0200, 0x0000,
+			0x0000, 0x0000, 0x0200,
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+	},
+	[MDSS_MDP_CSC_RGB2RGB] = {
+		0,
+		{
+			0x0200, 0x0000, 0x0000,
+			0x0000, 0x0200, 0x0000,
+			0x0000, 0x0000, 0x0200,
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+		{ 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+	},
+};
+
+#define CSC_MV_OFF	0x0
+#define CSC_BV_OFF	0x2C
+#define CSC_LV_OFF	0x14
+#define CSC_POST_OFF	0xC
+#define CSC_10BIT_LV_SHIFT	16
+#define CSC_8BIT_LV_SHIFT	8
+
+
+#define HIST_INTR_DSPP_MASK		0xFFF000
+#define HIST_V2_INTR_BIT_MASK		0xF33000
+#define HIST_V1_INTR_BIT_MASK		0X333333
+#define HIST_WAIT_TIMEOUT(frame) ((75 * HZ * (frame)) / 1000)
+#define HIST_KICKOFF_WAIT_FRACTION 4
+
+/* hist collect state */
+enum {
+	HIST_UNKNOWN,
+	HIST_IDLE,
+	HIST_READY,
+};
+
+static u32 dither_matrix[16] = {
+	15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10};
+static u32 dither_depth_map[9] = {
+	0, 0, 0, 0, 0, 1, 2, 3, 3};
+
+static u32 igc_limited[IGC_LUT_ENTRIES] = {
+	16777472, 17826064, 18874656, 19923248,
+	19923248, 20971840, 22020432, 23069024,
+	24117616, 25166208, 26214800, 26214800,
+	27263392, 28311984, 29360576, 30409168,
+	31457760, 32506352, 32506352, 33554944,
+	34603536, 35652128, 36700720, 37749312,
+	38797904, 38797904, 39846496, 40895088,
+	41943680, 42992272, 44040864, 45089456,
+	45089456, 46138048, 47186640, 48235232,
+	49283824, 50332416, 51381008, 51381008,
+	52429600, 53478192, 54526784, 55575376,
+	56623968, 57672560, 58721152, 58721152,
+	59769744, 60818336, 61866928, 62915520,
+	63964112, 65012704, 65012704, 66061296,
+	67109888, 68158480, 69207072, 70255664,
+	71304256, 71304256, 72352848, 73401440,
+	74450032, 75498624, 76547216, 77595808,
+	77595808, 78644400, 79692992, 80741584,
+	81790176, 82838768, 83887360, 83887360,
+	84935952, 85984544, 87033136, 88081728,
+	89130320, 90178912, 90178912, 91227504,
+	92276096, 93324688, 94373280, 95421872,
+	96470464, 96470464, 97519056, 98567648,
+	99616240, 100664832, 101713424, 102762016,
+	102762016, 103810608, 104859200, 105907792,
+	106956384, 108004976, 109053568, 109053568,
+	110102160, 111150752, 112199344, 113247936,
+	114296528, 115345120, 115345120, 116393712,
+	117442304, 118490896, 119539488, 120588080,
+	121636672, 121636672, 122685264, 123733856,
+	124782448, 125831040, 126879632, 127928224,
+	127928224, 128976816, 130025408, 131074000,
+	132122592, 133171184, 134219776, 135268368,
+	135268368, 136316960, 137365552, 138414144,
+	139462736, 140511328, 141559920, 141559920,
+	142608512, 143657104, 144705696, 145754288,
+	146802880, 147851472, 147851472, 148900064,
+	149948656, 150997248, 152045840, 153094432,
+	154143024, 154143024, 155191616, 156240208,
+	157288800, 158337392, 159385984, 160434576,
+	160434576, 161483168, 162531760, 163580352,
+	164628944, 165677536, 166726128, 166726128,
+	167774720, 168823312, 169871904, 170920496,
+	171969088, 173017680, 173017680, 174066272,
+	175114864, 176163456, 177212048, 178260640,
+	179309232, 179309232, 180357824, 181406416,
+	182455008, 183503600, 184552192, 185600784,
+	185600784, 186649376, 187697968, 188746560,
+	189795152, 190843744, 191892336, 191892336,
+	192940928, 193989520, 195038112, 196086704,
+	197135296, 198183888, 198183888, 199232480,
+	200281072, 201329664, 202378256, 203426848,
+	204475440, 204475440, 205524032, 206572624,
+	207621216, 208669808, 209718400, 210766992,
+	211815584, 211815584, 212864176, 213912768,
+	214961360, 216009952, 217058544, 218107136,
+	218107136, 219155728, 220204320, 221252912,
+	222301504, 223350096, 224398688, 224398688,
+	225447280, 226495872, 227544464, 228593056,
+	229641648, 230690240, 230690240, 231738832,
+	232787424, 233836016, 234884608, 235933200,
+	236981792, 236981792, 238030384, 239078976,
+	240127568, 241176160, 242224752, 243273344,
+	243273344, 244321936, 245370528, 246419120};
+
+
+#define MDSS_MDP_PA_SIZE		0xC
+#define MDSS_MDP_SIX_ZONE_SIZE		0xC
+#define MDSS_MDP_MEM_COL_SIZE		0x3C
+#define MDSS_MDP_GC_SIZE		0x28
+#define MDSS_MDP_PCC_SIZE		0xB8
+#define MDSS_MDP_GAMUT_SIZE		0x5C
+#define MDSS_MDP_IGC_DSPP_SIZE		0x28
+#define MDSS_MDP_IGC_SSPP_SIZE		0x88
+#define MDSS_MDP_VIG_QSEED2_SHARP_SIZE	0x0C
+#define TOTAL_BLEND_STAGES		0x4
+
+#define PP_FLAGS_DIRTY_PA	0x1
+#define PP_FLAGS_DIRTY_PCC	0x2
+#define PP_FLAGS_DIRTY_IGC	0x4
+#define PP_FLAGS_DIRTY_ARGC	0x8
+#define PP_FLAGS_DIRTY_ENHIST	0x10
+#define PP_FLAGS_DIRTY_DITHER	0x20
+#define PP_FLAGS_DIRTY_GAMUT	0x40
+#define PP_FLAGS_DIRTY_HIST_COL	0x80
+#define PP_FLAGS_DIRTY_PGC	0x100
+#define PP_FLAGS_DIRTY_SHARP	0x200
+/* Leave space for future features */
+#define PP_FLAGS_RESUME_COMMIT	0x10000000
+
+#define IS_PP_RESUME_COMMIT(x)	((x) & PP_FLAGS_RESUME_COMMIT)
+#define PP_FLAGS_LUT_BASED (PP_FLAGS_DIRTY_IGC | PP_FLAGS_DIRTY_GAMUT | \
+		PP_FLAGS_DIRTY_PGC | PP_FLAGS_DIRTY_ARGC)
+#define IS_PP_LUT_DIRTY(x)	((x) & PP_FLAGS_LUT_BASED)
+#define IS_SIX_ZONE_DIRTY(d, pa)	(((d) & PP_FLAGS_DIRTY_PA) && \
+		((pa) & MDP_PP_PA_SIX_ZONE_ENABLE))
+
+#define PP_SSPP		0
+#define PP_DSPP		1
+
+#define PP_AD_BAD_HW_NUM 255
+
+#define PP_AD_STATE_INIT	0x2
+#define PP_AD_STATE_CFG		0x4
+#define PP_AD_STATE_DATA	0x8
+#define PP_AD_STATE_RUN		0x10
+#define PP_AD_STATE_VSYNC	0x20
+#define PP_AD_STATE_BL_LIN	0x40
+#define PP_AD_STATE_IPC_RESUME	0x80
+#define PP_AD_STATE_IPC_RESET	0x100
+
+#define PP_AD_STATE_IS_INITCFG(st)	(((st) & PP_AD_STATE_INIT) &&\
+						((st) & PP_AD_STATE_CFG))
+
+#define PP_AD_STATE_IS_READY(st)	(((st) & PP_AD_STATE_INIT) &&\
+						((st) & PP_AD_STATE_CFG) &&\
+						((st) & PP_AD_STATE_DATA))
+
+#define PP_AD_STS_DIRTY_INIT	0x2
+#define PP_AD_STS_DIRTY_CFG	0x4
+#define PP_AD_STS_DIRTY_DATA	0x8
+#define PP_AD_STS_DIRTY_VSYNC	0x10
+#define PP_AD_STS_DIRTY_ENABLE	0x20
+
+#define PP_AD_STS_IS_DIRTY(sts) (((sts) & PP_AD_STS_DIRTY_INIT) ||\
+					((sts) & PP_AD_STS_DIRTY_CFG))
+
+/* Bits 0 and 1 and 5 */
+#define MDSS_AD_INPUT_AMBIENT	(0x23)
+/* Bits 3 and 7 */
+#define MDSS_AD_INPUT_STRENGTH	(0x88)
+/*
+ * Check data by shifting by mode to see if it matches to the
+ * MDSS_AD_INPUT_* bitfields
+ */
+#define MDSS_AD_MODE_DATA_MATCH(mode, data) ((1 << (mode)) & (data))
+#define MDSS_AD_RUNNING_AUTO_BL(ad) (((ad)->state & PP_AD_STATE_RUN) &&\
+				((ad)->cfg.mode == MDSS_AD_MODE_AUTO_BL))
+#define MDSS_AD_RUNNING_AUTO_STR(ad) (((ad)->state & PP_AD_STATE_RUN) &&\
+				((ad)->cfg.mode == MDSS_AD_MODE_AUTO_STR))
+#define MDSS_AD_AUTO_TRIGGER 0x80
+#define MDSS_AD_T_FILTER_CTRL_0 0
+#define MDSS_AD_IPC_FRAME_COUNT 2
+#define MDSS_AD_MODE_IPC_BIT	0x4
+#define MDSS_AD_MODE_MAN_IPC	0x5
+
+#define SHARP_STRENGTH_DEFAULT	32
+#define SHARP_EDGE_THR_DEFAULT	112
+#define SHARP_SMOOTH_THR_DEFAULT	8
+#define SHARP_NOISE_THR_DEFAULT	2
+
+static struct mdp_pp_driver_ops pp_driver_ops;
+static struct mdp_pp_feature_ops *pp_ops;
+
+static DEFINE_MUTEX(mdss_pp_mutex);
+static struct mdss_pp_res_type *mdss_pp_res;
+
+static u32 pp_hist_read(char __iomem *v_addr,
+				struct pp_hist_col_info *hist_info);
+static int pp_hist_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix,
+				struct pp_sts_type *pp_sts);
+static int pp_hist_disable(struct pp_hist_col_info *hist_info);
+static void pp_update_pcc_regs(char __iomem *addr,
+				struct mdp_pcc_cfg_data *cfg_ptr);
+static void pp_update_igc_lut(struct mdp_igc_lut_data *cfg,
+				char __iomem *addr, u32 blk_idx,
+				u32 total_idx);
+static void pp_update_gc_one_lut(char __iomem *addr,
+				struct mdp_ar_gc_lut_data *lut_data,
+				uint8_t num_stages);
+static void pp_update_argc_lut(char __iomem *addr,
+				struct mdp_pgc_lut_data *config);
+static void pp_update_hist_lut(char __iomem *base,
+				struct mdp_hist_lut_data *cfg);
+static int pp_gm_has_invalid_lut_size(struct mdp_gamut_cfg_data *config);
+static void pp_gamut_config(struct mdp_gamut_cfg_data *gamut_cfg,
+				char __iomem *base,
+				struct pp_sts_type *pp_sts);
+static void pp_pa_config(unsigned long flags, char __iomem *addr,
+				struct pp_sts_type *pp_sts,
+				struct mdp_pa_cfg *pa_config);
+static void pp_pa_v2_config(unsigned long flags, char __iomem *addr,
+				struct pp_sts_type *pp_sts,
+				struct mdp_pa_v2_data *pa_v2_config,
+				int mdp_location);
+static void pp_pcc_config(unsigned long flags, char __iomem *addr,
+				struct pp_sts_type *pp_sts,
+				struct mdp_pcc_cfg_data *pcc_config);
+static void pp_igc_config(unsigned long flags, char __iomem *addr,
+				struct pp_sts_type *pp_sts,
+				struct mdp_igc_lut_data *igc_config,
+				u32 pipe_num, u32 pipe_cnt);
+static void pp_enhist_config(unsigned long flags, char __iomem *addr,
+				struct pp_sts_type *pp_sts,
+				struct mdp_hist_lut_data *enhist_cfg);
+static void pp_dither_config(char __iomem *addr,
+				struct pp_sts_type *pp_sts,
+				struct mdp_dither_cfg_data *dither_cfg);
+static void pp_dspp_opmode_config(struct mdss_mdp_ctl *ctl, u32 num,
+					struct pp_sts_type *pp_sts, int mdp_rev,
+					u32 *opmode);
+static void pp_sharp_config(char __iomem *addr,
+				struct pp_sts_type *pp_sts,
+				struct mdp_sharp_cfg *sharp_config);
+static void pp_update_pa_v2_vig_opmode(struct pp_sts_type *pp_sts,
+				u32 *opmode);
+static int pp_copy_pa_six_zone_lut(struct mdp_pa_v2_cfg_data *pa_v2_config,
+				u32 disp_num);
+static void pp_update_pa_v2_global_adj_regs(char __iomem *addr,
+				struct mdp_pa_v2_data *pa_config);
+static void pp_update_pa_v2_mem_col(char __iomem *addr,
+				struct mdp_pa_v2_data *pa_v2_config);
+static void pp_update_pa_v2_mem_col_regs(char __iomem *addr,
+				struct mdp_pa_mem_col_cfg *cfg);
+static void pp_update_pa_v2_six_zone_regs(char __iomem *addr,
+				struct mdp_pa_v2_data *pa_v2_config);
+static void pp_update_pa_v2_sts(struct pp_sts_type *pp_sts,
+				struct mdp_pa_v2_data *pa_v2_config);
+static int pp_read_pa_v2_regs(char __iomem *addr,
+				struct mdp_pa_v2_data *pa_v2_config,
+				u32 disp_num);
+static void pp_read_pa_mem_col_regs(char __iomem *addr,
+				struct mdp_pa_mem_col_cfg *mem_col_cfg);
+static struct msm_fb_data_type *mdss_get_mfd_from_index(int index);
+static int mdss_mdp_mfd_valid_ad(struct msm_fb_data_type *mfd);
+static int mdss_mdp_get_ad(struct msm_fb_data_type *mfd,
+					struct mdss_ad_info **ad);
+static int pp_ad_invalidate_input(struct msm_fb_data_type *mfd);
+static void pp_ad_vsync_handler(struct mdss_mdp_ctl *ctl, ktime_t t);
+static void pp_ad_cfg_write(struct mdss_mdp_ad *ad_hw,
+						struct mdss_ad_info *ad);
+static void pp_ad_init_write(struct mdss_mdp_ad *ad_hw,
+			struct mdss_ad_info *ad, struct mdss_mdp_ctl *ctl);
+static void pp_ad_input_write(struct mdss_mdp_ad *ad_hw,
+						struct mdss_ad_info *ad);
+static int pp_ad_setup_hw_nums(struct msm_fb_data_type *mfd,
+						struct mdss_ad_info *ad);
+static void pp_ad_bypass_config(struct mdss_ad_info *ad,
+				struct mdss_mdp_ctl *ctl, u32 num, u32 *opmode);
+static int mdss_mdp_ad_setup(struct msm_fb_data_type *mfd);
+static void pp_ad_cfg_lut(char __iomem *addr, u32 *data);
+static int pp_ad_attenuate_bl(struct mdss_ad_info *ad, u32 bl, u32 *bl_out);
+static int pp_ad_linearize_bl(struct mdss_ad_info *ad, u32 bl, u32 *bl_out,
+		int inv);
+static int pp_ad_calc_bl(struct msm_fb_data_type *mfd, int bl_in, int *bl_out,
+		bool *bl_out_notify);
+static int pp_num_to_side(struct mdss_mdp_ctl *ctl, u32 num);
+static int pp_update_pcc_pipe_setup(struct mdss_mdp_pipe *pipe, u32 location);
+static void mdss_mdp_hist_irq_set_mask(u32 irq);
+static void mdss_mdp_hist_irq_clear_mask(u32 irq);
+static void mdss_mdp_hist_intr_notify(u32 disp);
+static int mdss_mdp_panel_default_dither_config(struct msm_fb_data_type *mfd,
+					u32 panel_bpp, bool enable);
+static int mdss_mdp_limited_lut_igc_config(struct msm_fb_data_type *mfd,
+					bool enable);
+static inline int pp_validate_dspp_mfd_block(struct msm_fb_data_type *mfd,
+					int block);
+static int pp_mfd_release_all(struct msm_fb_data_type *mfd);
+static int pp_mfd_ad_release_all(struct msm_fb_data_type *mfd);
+static int mdss_mdp_ad_ipc_reset(struct msm_fb_data_type *mfd);
+static int pp_get_driver_ops(struct mdp_pp_driver_ops *ops);
+static int pp_ppb_setup(struct mdss_mdp_mixer *mixer);
+
+static u32 last_sts, last_state;
+
+static inline void mdss_mdp_pp_get_dcm_state(struct mdss_mdp_pipe *pipe,
+	u32 *dcm_state)
+{
+	if (pipe && pipe->mixer_left && pipe->mixer_left->ctl &&
+		pipe->mixer_left->ctl->mfd)
+		*dcm_state = pipe->mixer_left->ctl->mfd->dcm_state;
+}
+
+inline int linear_map(int in, int *out, int in_max, int out_max)
+{
+	if (in < 0 || !out || in_max <= 0 || out_max <= 0)
+		return -EINVAL;
+	*out = ((2 * (in * out_max) + in_max) / (2 * in_max));
+	pr_debug("in = %d, out = %d, in_max = %d, out_max = %d\n",
+		in, *out, in_max, out_max);
+	if ((in > 0) && (*out == 0))
+		*out = 1;
+	return 0;
+
+}
+
+/**
+ * __get_hist_pipe() - get a pipe only if histogram is supported on it
+ * @pnum: pipe number desired
+ *
+ * returns the pipe with id only if the pipe supports sspp histogram
+ */
+static inline struct mdss_mdp_pipe *__get_hist_pipe(int pnum)
+{
+	enum mdss_mdp_pipe_type ptype;
+
+	ptype = get_pipe_type_from_num(pnum);
+
+	/* only VIG pipes support histogram */
+	if (ptype != MDSS_MDP_PIPE_TYPE_VIG)
+		return NULL;
+
+	return mdss_mdp_pipe_get(BIT(pnum), MDSS_MDP_PIPE_RECT0);
+}
+
+int mdss_mdp_csc_setup_data(u32 block, u32 blk_idx, struct mdp_csc_cfg *data)
+{
+	int i, ret = 0;
+	char __iomem *base, *addr;
+	u32 val = 0, lv_shift = 0;
+	struct mdss_data_type *mdata;
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_mdp_cdm *cdm;
+	struct mdss_mdp_writeback *wb;
+
+	if (data == NULL) {
+		pr_err("no csc matrix specified\n");
+		return -EINVAL;
+	}
+
+	mdata = mdss_mdp_get_mdata();
+	switch (block) {
+	case MDSS_MDP_BLOCK_SSPP:
+		lv_shift = CSC_8BIT_LV_SHIFT;
+		/*
+		 * CSC is used on VIG pipes and currently VIG pipes do not
+		 * support multirect so always use RECT0.
+		 */
+		pipe = mdss_mdp_pipe_search(mdata, BIT(blk_idx),
+				MDSS_MDP_PIPE_RECT0);
+		if (!pipe) {
+			pr_err("invalid blk index=%d\n", blk_idx);
+			ret = -EINVAL;
+			break;
+		}
+		if (mdss_mdp_pipe_is_yuv(pipe)) {
+			base = pipe->base + MDSS_MDP_REG_VIG_CSC_1_BASE;
+		} else {
+			pr_err("non ViG pipe %d for CSC is not allowed\n",
+				blk_idx);
+			ret = -EINVAL;
+		}
+		break;
+	case MDSS_MDP_BLOCK_WB:
+		lv_shift = CSC_8BIT_LV_SHIFT;
+		if (blk_idx < mdata->nwb) {
+			wb = mdata->wb + blk_idx;
+			if (wb->base)
+				base = wb->base + MDSS_MDP_REG_WB_CSC_BASE;
+			else
+				ret = -EINVAL;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	case MDSS_MDP_BLOCK_CDM:
+		lv_shift = CSC_10BIT_LV_SHIFT;
+		if (blk_idx < mdata->ncdm) {
+			cdm = mdata->cdm_off + blk_idx;
+			if (cdm->base)
+				base = cdm->base +
+					MDSS_MDP_REG_CDM_CSC_10_BASE;
+			else
+				ret = -EINVAL;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	case MDSS_MDP_BLOCK_SSPP_10:
+		lv_shift = CSC_10BIT_LV_SHIFT;
+
+		/* CSC can be applied only on VIG which RECT0 only */
+		pipe = mdss_mdp_pipe_search(mdata, BIT(blk_idx),
+				MDSS_MDP_PIPE_RECT0);
+		if (!pipe) {
+			pr_err("invalid blk index=%d\n", blk_idx);
+			ret = -EINVAL;
+			break;
+		}
+		if (mdss_mdp_pipe_is_yuv(pipe)) {
+			base = pipe->base + MDSS_MDP_REG_VIG_CSC_10_BASE;
+		} else {
+			pr_err("non ViG pipe %d for CSC is not allowed\n",
+			blk_idx);
+			ret = -EINVAL;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	if (ret != 0) {
+		pr_err("unsupported block id %d for csc\n", blk_idx);
+		return ret;
+	}
+
+	addr = base + CSC_MV_OFF;
+	for (i = 0; i < 9; i++) {
+		if (i & 0x1) {
+			val |= data->csc_mv[i] << 16;
+			writel_relaxed(val, addr);
+			addr += sizeof(u32);
+		} else {
+			val = data->csc_mv[i];
+		}
+	}
+	writel_relaxed(val, addr); /* COEFF_33 */
+
+	addr = base + CSC_BV_OFF;
+	for (i = 0; i < 3; i++) {
+		writel_relaxed(data->csc_pre_bv[i], addr);
+		writel_relaxed(data->csc_post_bv[i], addr + CSC_POST_OFF);
+		addr += sizeof(u32);
+	}
+
+	addr = base + CSC_LV_OFF;
+	for (i = 0; i < 6; i += 2) {
+		val = (data->csc_pre_lv[i] << lv_shift) | data->csc_pre_lv[i+1];
+		writel_relaxed(val, addr);
+
+		val = (data->csc_post_lv[i] << lv_shift) |
+			data->csc_post_lv[i+1];
+		writel_relaxed(val, addr + CSC_POST_OFF);
+		addr += sizeof(u32);
+	}
+
+	return ret;
+}
+
+int mdss_mdp_csc_setup(u32 block, u32 blk_idx, u32 csc_type)
+{
+	struct mdp_csc_cfg *data;
+
+	if (csc_type >= MDSS_MDP_MAX_CSC) {
+		pr_err("invalid csc matrix index %d\n", csc_type);
+		return -ERANGE;
+	}
+
+	pr_debug("csc type=%d blk=%d idx=%d\n", csc_type,
+		 block, blk_idx);
+
+	if (block == MDSS_MDP_BLOCK_CDM || block == MDSS_MDP_BLOCK_SSPP_10)
+		data = &mdp_csc_10bit_convert[csc_type];
+	else
+		data = &mdp_csc_8bit_convert[csc_type];
+	return mdss_mdp_csc_setup_data(block, blk_idx, data);
+}
+
+static void pp_gamut_config(struct mdp_gamut_cfg_data *gamut_cfg,
+				char __iomem *base, struct pp_sts_type *pp_sts)
+{
+	char __iomem *addr;
+	int i, j;
+
+	if (gamut_cfg->flags & MDP_PP_OPS_WRITE) {
+		addr = base + MDSS_MDP_REG_DSPP_GAMUT_BASE;
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			for (j = 0; j < gamut_cfg->tbl_size[i]; j++)
+				writel_relaxed((u32)gamut_cfg->r_tbl[i][j]
+						& 0x1FFF, addr);
+			addr += 4;
+		}
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			for (j = 0; j < gamut_cfg->tbl_size[i]; j++)
+				writel_relaxed((u32)gamut_cfg->g_tbl[i][j]
+						& 0x1FFF, addr);
+			addr += 4;
+		}
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			for (j = 0; j < gamut_cfg->tbl_size[i]; j++)
+				writel_relaxed((u32)gamut_cfg->b_tbl[i][j]
+						& 0x1FFF, addr);
+			addr += 4;
+		}
+		if (gamut_cfg->gamut_first)
+			pp_sts->gamut_sts |= PP_STS_GAMUT_FIRST;
+	}
+
+	if (gamut_cfg->flags & MDP_PP_OPS_DISABLE)
+		pp_sts->gamut_sts &= ~PP_STS_ENABLE;
+	else if (gamut_cfg->flags & MDP_PP_OPS_ENABLE)
+		pp_sts->gamut_sts |= PP_STS_ENABLE;
+	pp_sts_set_split_bits(&pp_sts->gamut_sts, gamut_cfg->flags);
+}
+
+static void pp_pa_config(unsigned long flags, char __iomem *addr,
+				struct pp_sts_type *pp_sts,
+				struct mdp_pa_cfg *pa_config)
+{
+	if (flags & PP_FLAGS_DIRTY_PA) {
+		if (pa_config->flags & MDP_PP_OPS_WRITE) {
+			writel_relaxed(pa_config->hue_adj, addr);
+			addr += 4;
+			writel_relaxed(pa_config->sat_adj, addr);
+			addr += 4;
+			writel_relaxed(pa_config->val_adj, addr);
+			addr += 4;
+			writel_relaxed(pa_config->cont_adj, addr);
+		}
+		if (pa_config->flags & MDP_PP_OPS_DISABLE)
+			pp_sts->pa_sts &= ~PP_STS_ENABLE;
+		else if (pa_config->flags & MDP_PP_OPS_ENABLE)
+			pp_sts->pa_sts |= PP_STS_ENABLE;
+	}
+}
+
+static void pp_pa_v2_config(unsigned long flags, char __iomem *addr,
+				struct pp_sts_type *pp_sts,
+				struct mdp_pa_v2_data *pa_v2_config,
+				int mdp_location)
+{
+	if ((flags & PP_FLAGS_DIRTY_PA) &&
+			(pa_v2_config->flags & MDP_PP_OPS_WRITE)) {
+		pp_update_pa_v2_global_adj_regs(addr,
+				pa_v2_config);
+		/* Update PA DSPP Regs */
+		if (mdp_location == PP_DSPP) {
+			addr += 0x10;
+			pp_update_pa_v2_six_zone_regs(addr, pa_v2_config);
+			addr += 0xC;
+			pp_update_pa_v2_mem_col(addr, pa_v2_config);
+		} else if (mdp_location == PP_SSPP) { /* Update PA SSPP Regs */
+			addr -= MDSS_MDP_REG_VIG_PA_BASE;
+			addr += MDSS_MDP_REG_VIG_MEM_COL_BASE;
+			pp_update_pa_v2_mem_col(addr, pa_v2_config);
+		}
+		pp_update_pa_v2_sts(pp_sts, pa_v2_config);
+	}
+}
+
+static void pp_update_pa_v2_global_adj_regs(char __iomem *addr,
+				struct mdp_pa_v2_data *pa_v2_config)
+{
+	if (pa_v2_config->flags & MDP_PP_PA_HUE_ENABLE)
+		writel_relaxed(pa_v2_config->global_hue_adj, addr);
+	addr += 4;
+	if (pa_v2_config->flags & MDP_PP_PA_SAT_ENABLE)
+		/* Sat Global Adjust reg includes Sat Threshold */
+		writel_relaxed(pa_v2_config->global_sat_adj, addr);
+	addr += 4;
+	if (pa_v2_config->flags & MDP_PP_PA_VAL_ENABLE)
+		writel_relaxed(pa_v2_config->global_val_adj, addr);
+	addr += 4;
+	if (pa_v2_config->flags & MDP_PP_PA_CONT_ENABLE)
+		writel_relaxed(pa_v2_config->global_cont_adj, addr);
+}
+
+static void pp_update_pa_v2_mem_col(char __iomem *addr,
+				struct mdp_pa_v2_data *pa_v2_config)
+{
+	/* Update skin zone memory color registers */
+	if (pa_v2_config->flags & MDP_PP_PA_SKIN_ENABLE)
+		pp_update_pa_v2_mem_col_regs(addr, &pa_v2_config->skin_cfg);
+	addr += 0x14;
+	/* Update sky zone memory color registers */
+	if (pa_v2_config->flags & MDP_PP_PA_SKY_ENABLE)
+		pp_update_pa_v2_mem_col_regs(addr, &pa_v2_config->sky_cfg);
+	addr += 0x14;
+	/* Update foliage zone memory color registers */
+	if (pa_v2_config->flags & MDP_PP_PA_FOL_ENABLE)
+		pp_update_pa_v2_mem_col_regs(addr, &pa_v2_config->fol_cfg);
+}
+
+static void pp_update_pa_v2_mem_col_regs(char __iomem *addr,
+				struct mdp_pa_mem_col_cfg *cfg)
+{
+	writel_relaxed(cfg->color_adjust_p0, addr);
+	addr += 4;
+	writel_relaxed(cfg->color_adjust_p1, addr);
+	addr += 4;
+	writel_relaxed(cfg->hue_region, addr);
+	addr += 4;
+	writel_relaxed(cfg->sat_region, addr);
+	addr += 4;
+	writel_relaxed(cfg->val_region, addr);
+}
+
+static void pp_update_pa_v2_six_zone_regs(char __iomem *addr,
+				struct mdp_pa_v2_data *pa_v2_config)
+{
+	int i;
+	u32 data;
+	/* Update six zone memory color registers */
+	if (pa_v2_config->flags & MDP_PP_PA_SIX_ZONE_ENABLE) {
+		addr += 4;
+		writel_relaxed(pa_v2_config->six_zone_curve_p1[0], addr);
+		addr -= 4;
+		/* Index Update to trigger auto-incrementing LUT accesses */
+		data = (1 << 26);
+		writel_relaxed((pa_v2_config->six_zone_curve_p0[0] & 0xFFF) |
+				data, addr);
+
+		/* Remove Index Update */
+		for (i = 1; i < MDP_SIX_ZONE_LUT_SIZE; i++) {
+			addr += 4;
+			writel_relaxed(pa_v2_config->six_zone_curve_p1[i],
+					addr);
+			addr -= 4;
+			writel_relaxed(pa_v2_config->six_zone_curve_p0[i] &
+					0xFFF, addr);
+		}
+		addr += 8;
+		writel_relaxed(pa_v2_config->six_zone_thresh, addr);
+	}
+}
+
+static void pp_update_pa_v2_sts(struct pp_sts_type *pp_sts,
+				struct mdp_pa_v2_data *pa_v2_config)
+{
+	pp_sts->pa_sts = 0;
+	/* PA STS update */
+	if (pa_v2_config->flags & MDP_PP_OPS_ENABLE)
+		pp_sts->pa_sts |= PP_STS_ENABLE;
+	else
+		pp_sts->pa_sts &= ~PP_STS_ENABLE;
+
+	/* Global HSV STS update */
+	if (pa_v2_config->flags & MDP_PP_PA_HUE_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_HUE_MASK;
+	if (pa_v2_config->flags & MDP_PP_PA_SAT_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_SAT_MASK;
+	if (pa_v2_config->flags & MDP_PP_PA_VAL_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_VAL_MASK;
+	if (pa_v2_config->flags & MDP_PP_PA_CONT_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_CONT_MASK;
+	if (pa_v2_config->flags & MDP_PP_PA_MEM_PROTECT_EN)
+		pp_sts->pa_sts |= PP_STS_PA_MEM_PROTECT_EN;
+	if (pa_v2_config->flags & MDP_PP_PA_SAT_ZERO_EXP_EN)
+		pp_sts->pa_sts |= PP_STS_PA_SAT_ZERO_EXP_EN;
+
+	/* Memory Color STS update */
+	if (pa_v2_config->flags & MDP_PP_PA_MEM_COL_SKIN_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_MEM_COL_SKIN_MASK;
+	if (pa_v2_config->flags & MDP_PP_PA_MEM_COL_SKY_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_MEM_COL_SKY_MASK;
+	if (pa_v2_config->flags & MDP_PP_PA_MEM_COL_FOL_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_MEM_COL_FOL_MASK;
+
+	/* Six Zone STS update */
+	if (pa_v2_config->flags & MDP_PP_PA_SIX_ZONE_HUE_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_HUE_MASK;
+	if (pa_v2_config->flags & MDP_PP_PA_SIX_ZONE_SAT_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_SAT_MASK;
+	if (pa_v2_config->flags & MDP_PP_PA_SIX_ZONE_VAL_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_VAL_MASK;
+
+	pp_sts_set_split_bits(&pp_sts->pa_sts, pa_v2_config->flags);
+}
+
+static void pp_pcc_config(unsigned long flags, char __iomem *addr,
+				struct pp_sts_type *pp_sts,
+				struct mdp_pcc_cfg_data *pcc_config)
+{
+	if (flags & PP_FLAGS_DIRTY_PCC) {
+		if (pcc_config->ops & MDP_PP_OPS_WRITE)
+			pp_update_pcc_regs(addr, pcc_config);
+
+		if (pcc_config->ops & MDP_PP_OPS_DISABLE)
+			pp_sts->pcc_sts &= ~PP_STS_ENABLE;
+		else if (pcc_config->ops & MDP_PP_OPS_ENABLE)
+			pp_sts->pcc_sts |= PP_STS_ENABLE;
+		pp_sts_set_split_bits(&pp_sts->pcc_sts, pcc_config->ops);
+	}
+}
+
+static void pp_igc_config(unsigned long flags, char __iomem *addr,
+				struct pp_sts_type *pp_sts,
+				struct mdp_igc_lut_data *igc_config,
+				u32 pipe_num, u32 pipe_cnt)
+{
+	u32 tbl_idx;
+
+	if (igc_config->ops & MDP_PP_OPS_WRITE)
+		pp_update_igc_lut(igc_config, addr, pipe_num,
+				 pipe_cnt);
+
+	if (igc_config->ops & MDP_PP_IGC_FLAG_ROM0) {
+		pp_sts->igc_sts |= PP_STS_ENABLE;
+		tbl_idx = 1;
+	} else if (igc_config->ops & MDP_PP_IGC_FLAG_ROM1) {
+		pp_sts->igc_sts |= PP_STS_ENABLE;
+		tbl_idx = 2;
+	} else {
+		tbl_idx = 0;
+	}
+	pp_sts->igc_tbl_idx = tbl_idx;
+	if (igc_config->ops & MDP_PP_OPS_DISABLE)
+		pp_sts->igc_sts &= ~PP_STS_ENABLE;
+	else if (igc_config->ops & MDP_PP_OPS_ENABLE)
+		pp_sts->igc_sts |= PP_STS_ENABLE;
+	pp_sts_set_split_bits(&pp_sts->igc_sts, igc_config->ops);
+}
+
+static void pp_enhist_config(unsigned long flags, char __iomem *addr,
+				struct pp_sts_type *pp_sts,
+				struct mdp_hist_lut_data *enhist_cfg)
+{
+	if (flags & PP_FLAGS_DIRTY_ENHIST) {
+		if (enhist_cfg->ops & MDP_PP_OPS_WRITE)
+			pp_update_hist_lut(addr, enhist_cfg);
+
+		if (enhist_cfg->ops & MDP_PP_OPS_DISABLE)
+			pp_sts->enhist_sts &= ~PP_STS_ENABLE;
+		else if (enhist_cfg->ops & MDP_PP_OPS_ENABLE)
+			pp_sts->enhist_sts |= PP_STS_ENABLE;
+	}
+}
+
+/*the below function doesn't do error checking on the input params*/
+static void pp_sharp_config(char __iomem *addr,
+				struct pp_sts_type *pp_sts,
+				struct mdp_sharp_cfg *sharp_config)
+{
+	if (sharp_config->flags & MDP_PP_OPS_WRITE) {
+		writel_relaxed(sharp_config->strength, addr);
+		addr += 4;
+		writel_relaxed(sharp_config->edge_thr, addr);
+		addr += 4;
+		writel_relaxed(sharp_config->smooth_thr, addr);
+		addr += 4;
+		writel_relaxed(sharp_config->noise_thr, addr);
+	}
+	if (sharp_config->flags & MDP_PP_OPS_DISABLE)
+		pp_sts->sharp_sts &= ~PP_STS_ENABLE;
+	else if (sharp_config->flags & MDP_PP_OPS_ENABLE)
+		pp_sts->sharp_sts |= PP_STS_ENABLE;
+
+}
+
+static void pp_vig_pipe_opmode_config(struct pp_sts_type *pp_sts, u32 *opmode)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if ((mdata->mdp_rev < MDSS_MDP_HW_REV_103) &&
+	    (pp_sts->pa_sts & PP_STS_ENABLE))
+		*opmode |= MDSS_MDP_VIG_OP_PA_EN;
+	else if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103)
+		pp_update_pa_v2_vig_opmode(pp_sts,
+				opmode);
+
+	if (pp_sts->enhist_sts & PP_STS_ENABLE)
+		/* Enable HistLUT and PA */
+		*opmode |= MDSS_MDP_VIG_OP_HIST_LUTV_EN |
+			   MDSS_MDP_VIG_OP_PA_EN;
+}
+
+static int pp_vig_pipe_setup(struct mdss_mdp_pipe *pipe, u32 *op)
+{
+	unsigned long flags = 0;
+	char __iomem *offset;
+	struct mdss_data_type *mdata;
+	u32 dcm_state = DCM_UNINIT, current_opmode, csc_reset;
+	int ret = 0;
+	u32 csc_op;
+
+	pr_debug("pnum=%x\n", pipe->num);
+
+	mdss_mdp_pp_get_dcm_state(pipe, &dcm_state);
+
+	mdata = mdss_mdp_get_mdata();
+	if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_301) ||
+	    IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_300)) {
+		if (pipe->src_fmt->is_yuv) {
+			/* TODO: check csc cfg from PP block */
+			mdss_mdp_csc_setup(MDSS_MDP_BLOCK_SSPP_10, pipe->num,
+			pp_vig_csc_pipe_val(pipe));
+			csc_op = ((0 << 2) |	/* DST_DATA=RGB */
+					  (1 << 1) |	/* SRC_DATA=YCBCR*/
+					  (1 << 0));	/* CSC_10_EN */
+		} else {
+				csc_op = 0; /* CSC_10_DISABLE */
+		}
+		writel_relaxed(csc_op, pipe->base +
+		MDSS_MDP_REG_VIG_CSC_10_OP_MODE);
+	} else if ((pipe->flags & MDP_OVERLAY_PP_CFG_EN) &&
+	    (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_CSC_CFG)) {
+		*op |= !!(pipe->pp_cfg.csc_cfg.flags &
+				MDP_CSC_FLAG_ENABLE) << 17;
+		*op |= !!(pipe->pp_cfg.csc_cfg.flags &
+				MDP_CSC_FLAG_YUV_IN) << 18;
+		*op |= !!(pipe->pp_cfg.csc_cfg.flags &
+				MDP_CSC_FLAG_YUV_OUT) << 19;
+		/*
+		 * TODO: Allow pipe to be programmed whenever new CSC is
+		 * applied (i.e. dirty bit)
+		 */
+		mdss_mdp_csc_setup_data(MDSS_MDP_BLOCK_SSPP, pipe->num,
+				&pipe->pp_cfg.csc_cfg);
+	} else if (pipe->src_fmt->is_yuv) {
+		*op |= (0 << 19) |	/* DST_DATA=RGB */
+			(1 << 18) |	/* SRC_DATA=YCBCR */
+			(1 << 17);	/* CSC_1_EN */
+		/*
+		 * TODO: Needs to be part of dirty bit logic: if there
+		 * is a previously configured pipe need to re-configure
+		 * CSC matrix
+		 */
+		mdss_mdp_csc_setup(MDSS_MDP_BLOCK_SSPP, pipe->num,
+			   pp_vig_csc_pipe_val(pipe));
+	}
+
+	/* Update CSC state only if tuning mode is enable */
+	if (dcm_state == DTM_ENTER) {
+		/* Reset bit 16 to 19 for CSC_STATE in VIG_OP_MODE */
+		csc_reset = 0xFFF0FFFF;
+		current_opmode = readl_relaxed(pipe->base +
+						MDSS_MDP_REG_VIG_OP_MODE);
+		*op |= (current_opmode & csc_reset);
+		return 0;
+	}
+
+	/* Histogram collection enabled checked inside pp_hist_setup */
+	pp_hist_setup(op, MDSS_PP_SSPP_CFG | pipe->num, pipe->mixer_left,
+			&pipe->pp_res.pp_sts);
+
+	if (!(pipe->flags & MDP_OVERLAY_PP_CFG_EN)) {
+		pr_debug("Overlay PP CFG enable not set\n");
+		return 0;
+	}
+
+	if ((pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PA_CFG) &&
+			(mdata->mdp_rev < MDSS_MDP_HW_REV_103)) {
+		flags = PP_FLAGS_DIRTY_PA;
+		pp_pa_config(flags,
+				pipe->base + MDSS_MDP_REG_VIG_PA_BASE,
+				&pipe->pp_res.pp_sts,
+				&pipe->pp_cfg.pa_cfg);
+	}
+	if ((pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PA_V2_CFG) &&
+			(mdata->mdp_rev >= MDSS_MDP_HW_REV_103)) {
+		flags = PP_FLAGS_DIRTY_PA;
+		if (!pp_ops[PA].pp_set_config)
+			pp_pa_v2_config(flags,
+				pipe->base + MDSS_MDP_REG_VIG_PA_BASE,
+				&pipe->pp_res.pp_sts,
+				&pipe->pp_cfg.pa_v2_cfg,
+				PP_SSPP);
+		else
+			pp_ops[PA].pp_set_config(pipe->base,
+				&pipe->pp_res.pp_sts,
+				&pipe->pp_cfg.pa_v2_cfg_data,
+				SSPP_VIG);
+	}
+
+	if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_HIST_LUT_CFG) {
+		flags = PP_FLAGS_DIRTY_ENHIST;
+		if (!pp_ops[HIST_LUT].pp_set_config) {
+			pp_enhist_config(flags,
+				pipe->base + MDSS_MDP_REG_VIG_HIST_LUT_BASE,
+				&pipe->pp_res.pp_sts,
+				&pipe->pp_cfg.hist_lut_cfg);
+			if ((pipe->pp_res.pp_sts.enhist_sts & PP_STS_ENABLE) &&
+			    !(pipe->pp_res.pp_sts.pa_sts & PP_STS_ENABLE)) {
+				/* Program default value */
+				offset = pipe->base + MDSS_MDP_REG_VIG_PA_BASE;
+				writel_relaxed(0, offset);
+				writel_relaxed(0, offset + 4);
+				writel_relaxed(0, offset + 8);
+				writel_relaxed(0, offset + 12);
+			}
+		} else {
+			pp_ops[HIST_LUT].pp_set_config(pipe->base,
+				&pipe->pp_res.pp_sts,
+				&pipe->pp_cfg.hist_lut_cfg,
+				SSPP_VIG);
+		}
+	}
+
+	if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PCC_CFG) {
+		ret = pp_update_pcc_pipe_setup(pipe, SSPP_VIG);
+		if (ret)
+			pr_err("error in enabling the pcc ret %d pipe type %d pipe num %d\n",
+				ret, pipe->type, pipe->num);
+	}
+	if (pp_driver_ops.pp_opmode_config)
+		pp_driver_ops.pp_opmode_config(SSPP_VIG, &pipe->pp_res.pp_sts,
+					       op, 0);
+	else
+		pp_vig_pipe_opmode_config(&pipe->pp_res.pp_sts, op);
+
+	return 0;
+}
+
+static void pp_update_pa_v2_vig_opmode(struct pp_sts_type *pp_sts,
+				u32 *opmode)
+{
+	if (pp_sts->pa_sts & PP_STS_ENABLE)
+		*opmode |= MDSS_MDP_VIG_OP_PA_EN;
+	if (pp_sts->pa_sts & PP_STS_PA_HUE_MASK)
+		*opmode |= MDSS_MDP_VIG_OP_PA_HUE_MASK;
+	if (pp_sts->pa_sts & PP_STS_PA_SAT_MASK)
+		*opmode |= MDSS_MDP_VIG_OP_PA_SAT_MASK;
+	if (pp_sts->pa_sts & PP_STS_PA_VAL_MASK)
+		*opmode |= MDSS_MDP_VIG_OP_PA_VAL_MASK;
+	if (pp_sts->pa_sts & PP_STS_PA_CONT_MASK)
+		*opmode |= MDSS_MDP_VIG_OP_PA_CONT_MASK;
+	if (pp_sts->pa_sts & PP_STS_PA_MEM_PROTECT_EN)
+		*opmode |= MDSS_MDP_VIG_OP_PA_MEM_PROTECT_EN;
+	if (pp_sts->pa_sts & PP_STS_PA_SAT_ZERO_EXP_EN)
+		*opmode |= MDSS_MDP_VIG_OP_PA_SAT_ZERO_EXP_EN;
+	if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKIN_MASK)
+		*opmode |= MDSS_MDP_VIG_OP_PA_MEM_COL_SKIN_MASK;
+	if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKY_MASK)
+		*opmode |= MDSS_MDP_VIG_OP_PA_MEM_COL_SKY_MASK;
+	if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_FOL_MASK)
+		*opmode |= MDSS_MDP_VIG_OP_PA_MEM_COL_FOL_MASK;
+}
+
+static int pp_rgb_pipe_setup(struct mdss_mdp_pipe *pipe, u32 *op)
+{
+	int ret = 0;
+
+	if (!pipe) {
+		pr_err("invalid param pipe %pK\n", pipe);
+		return -EINVAL;
+	}
+	if (pipe->flags & MDP_OVERLAY_PP_CFG_EN &&
+	    pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PCC_CFG) {
+		ret = pp_update_pcc_pipe_setup(pipe, SSPP_RGB);
+		if (ret)
+			pr_err("error in enabling the pcc ret %d pipe type %d pipe num %d\n",
+				ret, pipe->type, pipe->num);
+	}
+	return 0;
+}
+
+static int pp_dma_pipe_setup(struct mdss_mdp_pipe *pipe, u32 *op)
+{
+	int ret = 0;
+
+	if (!pipe) {
+		pr_err("invalid param pipe %pK\n", pipe);
+		return -EINVAL;
+	}
+	if (pipe->flags & MDP_OVERLAY_PP_CFG_EN &&
+	    pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PCC_CFG) {
+		ret = pp_update_pcc_pipe_setup(pipe, SSPP_DMA);
+		if (ret)
+			pr_err("error in enabling the pcc ret %d pipe type %d pipe num %d\n",
+				ret, pipe->type, pipe->num);
+	}
+	return 0;
+}
+
+static int mdss_mdp_qseed2_setup(struct mdss_mdp_pipe *pipe)
+{
+	u32 scale_config = 0;
+	int init_phasex = 0, init_phasey = 0;
+	int phasex_step = 0, phasey_step = 0;
+	u32 chroma_sample;
+	u32 filter_mode;
+	struct mdss_data_type *mdata;
+	u32 src_w, src_h;
+	u32 dcm_state = DCM_UNINIT;
+	u32 chroma_shift_x = 0, chroma_shift_y = 0;
+
+	pr_debug("pipe=%d, change pxl ext=%d\n", pipe->num,
+			pipe->scaler.enable);
+	mdata = mdss_mdp_get_mdata();
+
+	if (pipe->type == MDSS_MDP_PIPE_TYPE_DMA ||
+	    pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR) {
+		if (pipe->dst.h != pipe->src.h || pipe->dst.w != pipe->src.w) {
+			pr_err("no scaling supported on dma/cursor pipe, num:%d\n",
+					pipe->num);
+			return -EINVAL;
+		} else {
+			return 0;
+		}
+	}
+
+	mdss_mdp_pp_get_dcm_state(pipe, &dcm_state);
+
+	if (mdata->mdp_rev >= MDSS_MDP_HW_REV_102 && pipe->src_fmt->is_yuv)
+		filter_mode = MDSS_MDP_SCALE_FILTER_CA;
+	else
+		filter_mode = MDSS_MDP_SCALE_FILTER_BIL;
+
+	src_w = DECIMATED_DIMENSION(pipe->src.w, pipe->horz_deci);
+	src_h = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
+
+	chroma_sample = pipe->src_fmt->chroma_sample;
+	if (pipe->flags & MDP_SOURCE_ROTATED_90) {
+		if (chroma_sample == MDSS_MDP_CHROMA_H1V2)
+			chroma_sample = MDSS_MDP_CHROMA_H2V1;
+		else if (chroma_sample == MDSS_MDP_CHROMA_H2V1)
+			chroma_sample = MDSS_MDP_CHROMA_H1V2;
+	}
+
+	if (!(pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_SHARP_CFG)) {
+		pipe->pp_cfg.sharp_cfg.flags = MDP_PP_OPS_ENABLE |
+			MDP_PP_OPS_WRITE;
+		pipe->pp_cfg.sharp_cfg.strength = SHARP_STRENGTH_DEFAULT;
+		pipe->pp_cfg.sharp_cfg.edge_thr = SHARP_EDGE_THR_DEFAULT;
+		pipe->pp_cfg.sharp_cfg.smooth_thr = SHARP_SMOOTH_THR_DEFAULT;
+		pipe->pp_cfg.sharp_cfg.noise_thr = SHARP_NOISE_THR_DEFAULT;
+	}
+
+	if (dcm_state != DTM_ENTER &&
+		((pipe->src_fmt->is_yuv) &&
+		!((pipe->dst.w < src_w) || (pipe->dst.h < src_h)))) {
+		pp_sharp_config(pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_SHARP,
+				&pipe->pp_res.pp_sts,
+				&pipe->pp_cfg.sharp_cfg);
+	}
+
+	if ((src_h != pipe->dst.h) ||
+	    (pipe->src_fmt->is_yuv &&
+			(pipe->pp_res.pp_sts.sharp_sts & PP_STS_ENABLE)) ||
+	    (chroma_sample == MDSS_MDP_CHROMA_420) ||
+	    (chroma_sample == MDSS_MDP_CHROMA_H1V2) ||
+	    (pipe->scaler.enable && (src_h != pipe->dst.h))) {
+		pr_debug("scale y - src_h=%d dst_h=%d\n", src_h, pipe->dst.h);
+
+		if ((src_h / MAX_DOWNSCALE_RATIO) > pipe->dst.h) {
+			pr_err("too much downscaling height=%d->%d\n",
+			       src_h, pipe->dst.h);
+			return -EINVAL;
+		}
+
+		scale_config |= MDSS_MDP_SCALEY_EN;
+		phasey_step = pipe->scaler.phase_step_y[0];
+		init_phasey = pipe->scaler.init_phase_y[0];
+
+		if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
+			if (!pipe->vert_deci &&
+			    ((chroma_sample == MDSS_MDP_CHROMA_420) ||
+			    (chroma_sample == MDSS_MDP_CHROMA_H1V2)))
+				chroma_shift_y = 1; /* 2x upsample chroma */
+
+			if (src_h <= pipe->dst.h)
+				scale_config |= /* G/Y, A */
+					(filter_mode << 10) |
+					(MDSS_MDP_SCALE_FILTER_BIL << 18);
+			else
+				scale_config |= /* G/Y, A */
+					(MDSS_MDP_SCALE_FILTER_PCMN << 10) |
+					(MDSS_MDP_SCALE_FILTER_PCMN << 18);
+
+			if ((src_h >> chroma_shift_y) <= pipe->dst.h)
+				scale_config |= /* CrCb */
+					(MDSS_MDP_SCALE_FILTER_BIL << 14);
+			else
+				scale_config |= /* CrCb */
+					(MDSS_MDP_SCALE_FILTER_PCMN << 14);
+
+			writel_relaxed(init_phasey, pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEY);
+			writel_relaxed(phasey_step >> chroma_shift_y,
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
+		} else {
+			if (src_h <= pipe->dst.h)
+				scale_config |= /* RGB, A */
+					(MDSS_MDP_SCALE_FILTER_BIL << 10) |
+					(MDSS_MDP_SCALE_FILTER_BIL << 18);
+			else
+				scale_config |= /* RGB, A */
+					(MDSS_MDP_SCALE_FILTER_PCMN << 10) |
+					(MDSS_MDP_SCALE_FILTER_PCMN << 18);
+		}
+	}
+
+	if ((src_w != pipe->dst.w) ||
+	    (pipe->src_fmt->is_yuv &&
+			(pipe->pp_res.pp_sts.sharp_sts & PP_STS_ENABLE)) ||
+	    (chroma_sample == MDSS_MDP_CHROMA_420) ||
+	    (chroma_sample == MDSS_MDP_CHROMA_H2V1) ||
+	    (pipe->scaler.enable && (src_w != pipe->dst.w))) {
+		pr_debug("scale x - src_w=%d dst_w=%d\n", src_w, pipe->dst.w);
+
+		if ((src_w / MAX_DOWNSCALE_RATIO) > pipe->dst.w) {
+			pr_err("too much downscaling width=%d->%d\n",
+			       src_w, pipe->dst.w);
+			return -EINVAL;
+		}
+
+		scale_config |= MDSS_MDP_SCALEX_EN;
+		init_phasex = pipe->scaler.init_phase_x[0];
+		phasex_step = pipe->scaler.phase_step_x[0];
+
+		if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
+			if (!pipe->horz_deci &&
+			    ((chroma_sample == MDSS_MDP_CHROMA_420) ||
+			    (chroma_sample == MDSS_MDP_CHROMA_H2V1)))
+				chroma_shift_x = 1; /* 2x upsample chroma */
+
+			if (src_w <= pipe->dst.w)
+				scale_config |= /* G/Y, A */
+					(filter_mode << 8) |
+					(MDSS_MDP_SCALE_FILTER_BIL << 16);
+			else
+				scale_config |= /* G/Y, A */
+					(MDSS_MDP_SCALE_FILTER_PCMN << 8) |
+					(MDSS_MDP_SCALE_FILTER_PCMN << 16);
+
+			if ((src_w >> chroma_shift_x) <= pipe->dst.w)
+				scale_config |= /* CrCb */
+					(MDSS_MDP_SCALE_FILTER_BIL << 12);
+			else
+				scale_config |= /* CrCb */
+					(MDSS_MDP_SCALE_FILTER_PCMN << 12);
+
+			writel_relaxed(init_phasex, pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEX);
+			writel_relaxed(phasex_step >> chroma_shift_x,
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
+		} else {
+			if (src_w <= pipe->dst.w)
+				scale_config |= /* RGB, A */
+					(MDSS_MDP_SCALE_FILTER_BIL << 8) |
+					(MDSS_MDP_SCALE_FILTER_BIL << 16);
+			else
+				scale_config |= /* RGB, A */
+					(MDSS_MDP_SCALE_FILTER_PCMN << 8) |
+					(MDSS_MDP_SCALE_FILTER_PCMN << 16);
+		}
+	}
+
+	if (pipe->scaler.enable) {
+		if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
+			/*program x,y initial phase and phase step*/
+			writel_relaxed(pipe->scaler.init_phase_x[0],
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEX);
+			writel_relaxed(pipe->scaler.phase_step_x[0],
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPX);
+			writel_relaxed(pipe->scaler.init_phase_x[1],
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEX);
+			writel_relaxed(pipe->scaler.phase_step_x[1],
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
+
+			writel_relaxed(pipe->scaler.init_phase_y[0],
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEY);
+			writel_relaxed(pipe->scaler.phase_step_y[0],
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPY);
+			writel_relaxed(pipe->scaler.init_phase_y[1],
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEY);
+			writel_relaxed(pipe->scaler.phase_step_y[1],
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
+		} else {
+
+			writel_relaxed(pipe->scaler.phase_step_x[0],
+				pipe->base +
+				MDSS_MDP_REG_SCALE_PHASE_STEP_X);
+			writel_relaxed(pipe->scaler.phase_step_y[0],
+				pipe->base +
+				MDSS_MDP_REG_SCALE_PHASE_STEP_Y);
+			writel_relaxed(pipe->scaler.init_phase_x[0],
+				pipe->base +
+				MDSS_MDP_REG_SCALE_INIT_PHASE_X);
+			writel_relaxed(pipe->scaler.init_phase_y[0],
+				pipe->base +
+				MDSS_MDP_REG_SCALE_INIT_PHASE_Y);
+		}
+	} else {
+		if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
+			/*program x,y initial phase and phase step*/
+			writel_relaxed(0,
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEX);
+			writel_relaxed(init_phasex,
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEX);
+			writel_relaxed(phasex_step,
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPX);
+			writel_relaxed(phasex_step >> chroma_shift_x,
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
+
+			writel_relaxed(0,
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEY);
+			writel_relaxed(init_phasey,
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEY);
+			writel_relaxed(phasey_step,
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPY);
+			writel_relaxed(phasey_step >> chroma_shift_y,
+				pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
+		} else {
+
+			writel_relaxed(phasex_step,
+				pipe->base +
+				MDSS_MDP_REG_SCALE_PHASE_STEP_X);
+			writel_relaxed(phasey_step,
+				pipe->base +
+				MDSS_MDP_REG_SCALE_PHASE_STEP_Y);
+			writel_relaxed(0,
+				pipe->base +
+				MDSS_MDP_REG_SCALE_INIT_PHASE_X);
+			writel_relaxed(0,
+				pipe->base +
+				MDSS_MDP_REG_SCALE_INIT_PHASE_Y);
+		}
+	}
+
+	writel_relaxed(scale_config, pipe->base +
+	   MDSS_MDP_REG_SCALE_CONFIG);
+
+	return 0;
+}
+
+int mdss_mdp_scaler_lut_cfg(struct mdp_scale_data_v2 *scaler,
+						char __iomem *offset)
+{
+	int i, j, filter;
+	struct mdss_data_type *mdata;
+	char __iomem *lut_addr;
+	uint32_t *lut_type[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
+	uint32_t lut_offset, lut_len;
+	struct mdss_mdp_qseed3_lut_tbl *lut_tbl;
+	/* for each filter, 4 lut regions offset and length table */
+	static uint32_t offset_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
+		{{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
+		{{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
+		{{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
+		{{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
+		{{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
+	};
+
+	mdata = mdss_mdp_get_mdata();
+	lut_tbl = &mdata->scaler_off->lut_tbl;
+	if ((!lut_tbl) || (!lut_tbl->valid)) {
+		pr_err("%s:Invalid QSEED3 LUT TABLE\n", __func__);
+		return -EINVAL;
+	}
+	if ((scaler->lut_flag & SCALER_LUT_DIR_WR) ||
+		(scaler->lut_flag & SCALER_LUT_Y_CIR_WR) ||
+		(scaler->lut_flag & SCALER_LUT_UV_CIR_WR) ||
+		(scaler->lut_flag & SCALER_LUT_Y_SEP_WR) ||
+		(scaler->lut_flag & SCALER_LUT_UV_SEP_WR)) {
+
+		if (scaler->lut_flag & SCALER_LUT_DIR_WR)
+			lut_type[0] = lut_tbl->dir_lut;
+		if (scaler->lut_flag & SCALER_LUT_Y_CIR_WR)
+			lut_type[1] =
+				lut_tbl->cir_lut + scaler->y_rgb_cir_lut_idx *
+				CIR_LUT_COEFFS;
+		if (scaler->lut_flag & SCALER_LUT_UV_CIR_WR)
+			lut_type[2] = lut_tbl->cir_lut +
+				scaler->uv_cir_lut_idx * CIR_LUT_COEFFS;
+		if (scaler->lut_flag & SCALER_LUT_Y_SEP_WR)
+			lut_type[3] =
+				lut_tbl->sep_lut + scaler->y_rgb_sep_lut_idx *
+				SEP_LUT_COEFFS;
+		if (scaler->lut_flag & SCALER_LUT_UV_SEP_WR)
+			lut_type[4] =
+				lut_tbl->sep_lut + scaler->uv_sep_lut_idx *
+				SEP_LUT_COEFFS;
+
+		/* for each filter per plane */
+		for (filter = 0; filter < QSEED3_FILTERS; filter++) {
+			if (!lut_type[filter])
+				continue;
+			lut_offset = 0;
+			/* for each lut region */
+			for (i = 0; i < 4; i++) {
+				lut_addr = offset +
+					offset_tbl[filter][i][1];
+				lut_len =
+					offset_tbl[filter][i][0] << 2;
+				for (j = 0; j < lut_len; j++) {
+					writel_relaxed(
+							(lut_type[filter])
+							[lut_offset++],
+								lut_addr);
+						lut_addr += 4;
+					}
+				}
+		}
+	}
+
+	if (scaler->lut_flag & SCALER_LUT_SWAP)
+		writel_relaxed(BIT(0), MDSS_MDP_REG_SCALER_COEF_LUT_CTRL +
+				offset);
+
+	return 0;
+}
+
+static void  mdss_mdp_scaler_detail_enhance_cfg(
+				struct mdp_det_enhance_data *detail_en,
+				char __iomem *offset)
+{
+
+	uint32_t sharp_lvl, sharp_ctl, shape_ctl;
+	uint32_t de_thr;
+	uint32_t adjust_a, adjust_b, adjust_c;
+
+	if (detail_en->enable) {
+		sharp_lvl = (detail_en->sharpen_level1 & 0x1FF) |
+			((detail_en->sharpen_level2 & 0x1FF) << 16);
+
+		sharp_ctl = ((detail_en->limit & 0xF) << 9) |
+			((detail_en->prec_shift & 0x7) << 13) |
+			((detail_en->clip & 0x7) << 16);
+
+		shape_ctl = (detail_en->thr_quiet & 0xFF)  |
+			((detail_en->thr_dieout & 0x3FF) << 16);
+
+		de_thr = (detail_en->thr_low & 0x3FF)  |
+			((detail_en->thr_high & 0x3FF) << 16);
+
+		adjust_a = (detail_en->adjust_a[0] & 0x3FF) |
+			((detail_en->adjust_a[1] & 0x3FF) << 10) |
+			((detail_en->adjust_a[2] & 0x3FF) << 20);
+
+		adjust_b = (detail_en->adjust_b[0] & 0x3FF) |
+			((detail_en->adjust_b[1] & 0x3FF) << 10) |
+			((detail_en->adjust_b[2] & 0x3FF) << 20);
+
+		adjust_c = (detail_en->adjust_c[0] & 0x3FF) |
+			((detail_en->adjust_c[1] & 0x3FF) << 10) |
+			((detail_en->adjust_c[2] & 0x3FF) << 20);
+
+		writel_relaxed(sharp_lvl, MDSS_MDP_REG_SCALER_DE_SHARPEN +
+				offset);
+		writel_relaxed(sharp_ctl, MDSS_MDP_REG_SCALER_DE_SHARPEN_CTL +
+				offset);
+		writel_relaxed(shape_ctl, MDSS_MDP_REG_SCALER_DE_SHAPE_CTL +
+				offset);
+		writel_relaxed(de_thr, MDSS_MDP_REG_SCALER_DE_THRESHOLD +
+				offset);
+		writel_relaxed(adjust_a, MDSS_MDP_REG_SCALER_DE_ADJUST_DATA_0
+				+ offset);
+		writel_relaxed(adjust_b, MDSS_MDP_REG_SCALER_DE_ADJUST_DATA_1
+				+ offset);
+		writel_relaxed(adjust_c, MDSS_MDP_REG_SCALER_DE_ADJUST_DATA_2
+				+ offset);
+	}
+}
+
+int mdss_mdp_qseed3_setup(struct mdss_mdp_pipe *pipe,
+					int location, int id)
+{
+	int rc = 0;
+	struct mdp_scale_data_v2 *scaler;
+	struct mdss_data_type *mdata;
+	char __iomem *offset, *lut_offset;
+	struct mdss_mdp_format_params *fmt;
+	uint32_t op_mode;
+	uint32_t phase_init, preload, src_y_rgb, src_uv, dst;
+
+	mdata = mdss_mdp_get_mdata();
+	/* SRC pipe QSEED3 Configuration */
+	if (location == SSPP_VIG) {
+		scaler = &pipe->scaler;
+		offset = pipe->base + mdata->scaler_off->vig_scaler_off;
+		lut_offset = pipe->base + mdata->scaler_off->vig_scaler_lut_off;
+		fmt = pipe->src_fmt;
+	}  else if (location == DSPP) {
+		/* Destination scaler QSEED3 Configuration */
+		if ((mdata->scaler_off->has_dest_scaler) &&
+				(id < mdata->scaler_off->ndest_scalers)) {
+			/* TODO :point to the destination params */
+			scaler = NULL;
+			offset = mdata->scaler_off->dest_base +
+				mdata->scaler_off->dest_scaler_off[id];
+			lut_offset = mdata->scaler_off->dest_base +
+				mdata->scaler_off->dest_scaler_lut_off[id];
+			/*TODO : set pixel fmt to RGB101010 */
+			return -ENOTSUP;
+		} else {
+			return -EINVAL;
+		}
+	} else {
+		return -EINVAL;
+	}
+
+	pr_debug("scaler->enable=%d", scaler->enable);
+	op_mode = readl_relaxed(MDSS_MDP_REG_SCALER_OP_MODE +
+			offset);
+
+	if (scaler->enable) {
+		op_mode |= SCALER_EN;
+		op_mode |= (scaler->y_rgb_filter_cfg & 0x3) <<
+			Y_FILTER_CFG;
+
+		if (fmt->is_yuv) {
+			op_mode |= (1 << SCALER_COLOR_SPACE);
+			op_mode |= (scaler->uv_filter_cfg & 0x3) <<
+				UV_FILTER_CFG;
+		}
+
+		if (fmt->alpha_enable) {
+			op_mode |= SCALER_ALPHA_EN;
+			op_mode |= (scaler->alpha_filter_cfg & 1) <<
+				ALPHA_FILTER_CFG;
+		}
+
+		/* TODO:if src_fmt is 10 bits program the bitwidth
+		 * accordingly
+		 */
+		if (!fmt->unpack_dx_format)
+			op_mode |= 0x1 << SCALER_BIT_WIDTH;
+
+		op_mode |= (scaler->blend_cfg & 1) <<
+			SCALER_BLEND_CFG;
+
+		op_mode |= (scaler->enable & ENABLE_DIRECTION_DETECTION) ?
+			 (1 << SCALER_DIR_EN) : 0;
+		phase_init =
+			((scaler->init_phase_x[0] & PHASE_BITS)
+			 << Y_PHASE_INIT_H) |
+			((scaler->init_phase_y[0] & PHASE_BITS) <<
+			 Y_PHASE_INIT_V) |
+			((scaler->init_phase_x[1] & PHASE_BITS) <<
+			 UV_PHASE_INIT_H) |
+			((scaler->init_phase_y[1] & PHASE_BITS) <<
+			 UV_PHASE_INIT_V);
+
+		preload =
+			((scaler->preload_x[0] & PRELOAD_BITS)
+			 << Y_PRELOAD_H) |
+			((scaler->preload_y[0] & PRELOAD_BITS) <<
+			 Y_PRELOAD_V) |
+			((scaler->preload_x[1] & PRELOAD_BITS) <<
+			 UV_PRELOAD_H) |
+			((scaler->preload_y[1] & PRELOAD_BITS) <<
+			 UV_PRELOAD_V);
+
+		src_y_rgb = (scaler->src_width[0] & 0x1FFFF) |
+			((scaler->src_height[0] & 0x1FFFF) << 16);
+
+		src_uv = (scaler->src_width[1] & 0x1FFFF) |
+			((scaler->src_height[1] & 0x1FFFF) << 16);
+
+		dst = (scaler->dst_width & 0x1FFFF) |
+			((scaler->dst_height & 0x1FFFF) << 16);
+
+		if (scaler->detail_enhance.enable) {
+			mdss_mdp_scaler_detail_enhance_cfg(
+						&scaler->detail_enhance,
+						offset);
+			op_mode |= SCALER_DE_EN;
+		}
+
+		/* LUT Config */
+		if (scaler->lut_flag) {
+			rc = mdss_mdp_scaler_lut_cfg(scaler, lut_offset);
+			if (rc) {
+				pr_err("%s:Failed QSEED3 LUT cfg\n",
+						__func__);
+				return -EINVAL;
+			}
+		}
+
+		writel_relaxed(phase_init,
+				MDSS_MDP_REG_SCALER_PHASE_INIT +
+				offset);
+		writel_relaxed(scaler->phase_step_x[0] &
+				PHASE_STEP_BITS,
+				MDSS_MDP_REG_SCALER_PHASE_STEP_Y_H +
+				offset);
+
+		writel_relaxed(scaler->phase_step_y[0] &
+				PHASE_STEP_BITS,
+				MDSS_MDP_REG_SCALER_PHASE_STEP_Y_V + offset);
+
+		writel_relaxed(scaler->phase_step_x[1] &
+				PHASE_STEP_BITS,
+				MDSS_MDP_REG_SCALER_PHASE_STEP_UV_H + offset);
+
+		writel_relaxed(scaler->phase_step_y[1] &
+				PHASE_STEP_BITS,
+				MDSS_MDP_REG_SCALER_PHASE_STEP_UV_V + offset);
+
+		writel_relaxed(preload, MDSS_MDP_REG_SCALER_PRELOAD +
+				offset);
+		writel_relaxed(src_y_rgb,
+				MDSS_MDP_REG_SCALER_SRC_SIZE_Y_RGB_A +
+				offset);
+		writel_relaxed(src_uv, MDSS_MDP_REG_SCALER_SRC_SIZE_UV
+				+ offset);
+
+		writel_relaxed(dst, MDSS_MDP_REG_SCALER_DST_SIZE +
+				offset);
+	} else {
+		op_mode &= ~SCALER_EN;
+	}
+
+	writel_relaxed(op_mode, MDSS_MDP_REG_SCALER_OP_MODE +
+			offset);
+	return rc;
+}
+
+static int mdss_mdp_scale_setup(struct mdss_mdp_pipe *pipe)
+{
+	struct mdss_data_type *mdata;
+	int rc = 0;
+
+	mdata = mdss_mdp_get_mdata();
+	if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
+		rc = mdss_mdp_qseed3_setup(pipe, SSPP_VIG, 0);
+	else
+		rc = mdss_mdp_qseed2_setup(pipe);
+
+	return rc;
+}
+
+int mdss_mdp_pipe_pp_setup(struct mdss_mdp_pipe *pipe, u32 *op)
+{
+	int ret = 0;
+
+	if (!pipe)
+		return -ENODEV;
+
+	ret = mdss_mdp_scale_setup(pipe);
+	if (ret) {
+		pr_err("scale setup on pipe %d type %d failed ret %d\n",
+			pipe->num, pipe->type, ret);
+		return -EINVAL;
+	}
+
+	switch (pipe->type) {
+	case MDSS_MDP_PIPE_TYPE_VIG:
+		ret = pp_vig_pipe_setup(pipe, op);
+		break;
+	case MDSS_MDP_PIPE_TYPE_RGB:
+		ret = pp_rgb_pipe_setup(pipe, op);
+		break;
+	case MDSS_MDP_PIPE_TYPE_DMA:
+		ret = pp_dma_pipe_setup(pipe, op);
+		break;
+	default:
+		pr_debug("no PP setup for pipe type %d\n",
+			 pipe->type);
+		break;
+	}
+
+	return ret;
+}
+
+void mdss_mdp_pipe_pp_clear(struct mdss_mdp_pipe *pipe)
+{
+	struct pp_hist_col_info *hist_info;
+
+	if (!pipe) {
+		pr_err("Invalid pipe context passed, %pK\n",
+			pipe);
+		return;
+	}
+
+	if (mdss_mdp_pipe_is_yuv(pipe)) {
+		hist_info = &pipe->pp_res.hist;
+		pp_hist_disable(hist_info);
+	}
+
+	kfree(pipe->pp_res.pa_cfg_payload);
+	pipe->pp_res.pa_cfg_payload = NULL;
+	pipe->pp_cfg.pa_v2_cfg_data.cfg_payload = NULL;
+	kfree(pipe->pp_res.igc_cfg_payload);
+	pipe->pp_res.igc_cfg_payload = NULL;
+	pipe->pp_cfg.igc_cfg.cfg_payload = NULL;
+	kfree(pipe->pp_res.pcc_cfg_payload);
+	pipe->pp_res.pcc_cfg_payload = NULL;
+	pipe->pp_cfg.pcc_cfg_data.cfg_payload = NULL;
+	kfree(pipe->pp_res.hist_lut_cfg_payload);
+	pipe->pp_res.hist_lut_cfg_payload = NULL;
+	pipe->pp_cfg.hist_lut_cfg.cfg_payload = NULL;
+
+	memset(&pipe->pp_res.pp_sts, 0, sizeof(struct pp_sts_type));
+	pipe->pp_cfg.config_ops = 0;
+}
+
+int mdss_mdp_pipe_sspp_setup(struct mdss_mdp_pipe *pipe, u32 *op)
+{
+	int i, ret = 0;
+	unsigned long flags = 0;
+	char __iomem *pipe_base;
+	u32 pipe_num, pipe_cnt;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 current_opmode, location;
+	u32 dcm_state = DCM_UNINIT;
+	struct mdss_mdp_pipe *pipe_list;
+
+	if (pipe == NULL)
+		return -EINVAL;
+
+	mdss_mdp_pp_get_dcm_state(pipe, &dcm_state);
+
+	/* Read IGC state and update the same if tuning mode is enable */
+	if (dcm_state == DTM_ENTER) {
+		current_opmode = readl_relaxed(pipe->base +
+						MDSS_MDP_REG_SSPP_SRC_OP_MODE);
+		*op |= (current_opmode & BIT(16));
+		return ret;
+	}
+
+	/*
+	 * TODO: should this function be responsible for masking multiple
+	 * pipes to be written in dual pipe case?
+	 * if so, requires rework of update_igc_lut
+	 */
+	switch (pipe->type) {
+	case MDSS_MDP_PIPE_TYPE_VIG:
+		pipe_base = mdata->mdp_base + MDSS_MDP_REG_IGC_VIG_BASE;
+		pipe_cnt = mdata->nvig_pipes;
+		pipe_list = mdata->vig_pipes;
+		location = SSPP_VIG;
+		break;
+	case MDSS_MDP_PIPE_TYPE_RGB:
+		pipe_base = mdata->mdp_base + MDSS_MDP_REG_IGC_RGB_BASE;
+		pipe_cnt = mdata->nrgb_pipes;
+		pipe_list = mdata->rgb_pipes;
+		location = SSPP_RGB;
+		break;
+	case MDSS_MDP_PIPE_TYPE_DMA:
+		pipe_base = mdata->mdp_base + MDSS_MDP_REG_IGC_DMA_BASE;
+		pipe_cnt = mdata->ndma_pipes;
+		pipe_list = mdata->dma_pipes;
+		location = SSPP_DMA;
+		break;
+	case MDSS_MDP_PIPE_TYPE_CURSOR:
+		/* cursor does not support the feature */
+		return 0;
+	default:
+		pr_err("Invalid pipe type %d\n", pipe->type);
+		return -EINVAL;
+	}
+
+	for (i = 0, pipe_num = 0; pipe_num < pipe_cnt; pipe_num++) {
+		if (pipe->num == pipe_list[i].num)
+			break;
+		i += pipe->multirect.max_rects;
+	}
+
+	if (pipe_num == pipe_cnt) {
+		pr_err("Invalid pipe num %d pipe type %d\n",
+				pipe->num, pipe->type);
+		return -EINVAL;
+	}
+
+	if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_IGC_CFG) {
+		flags |= PP_FLAGS_DIRTY_IGC;
+		if (!pp_ops[IGC].pp_set_config) {
+			pp_igc_config(flags, pipe_base, &pipe->pp_res.pp_sts,
+			      &pipe->pp_cfg.igc_cfg, pipe_num, pipe_cnt);
+		} else {
+			pipe->pp_cfg.igc_cfg.block = pipe_num;
+			pipe_base = mdata->mdp_base +
+				    mdata->pp_block_off.sspp_igc_lut_off;
+			pp_ops[IGC].pp_set_config(pipe_base,
+				 &pipe->pp_res.pp_sts, &pipe->pp_cfg.igc_cfg,
+				 location);
+		}
+	}
+
+	if (pipe->pp_res.pp_sts.igc_sts & PP_STS_ENABLE)
+		*op |= (1 << 16); /* IGC_LUT_EN */
+
+	return ret;
+}
+
+static int pp_mixer_setup(struct mdss_mdp_mixer *mixer)
+{
+	u32 flags, disp_num, opmode = 0, lm_bitmask = 0;
+	struct mdp_pgc_lut_data *pgc_config;
+	struct pp_sts_type *pp_sts;
+	struct mdss_mdp_ctl *ctl;
+	char __iomem *addr;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mixer || !mixer->ctl || !mixer->ctl->mfd || !mdata) {
+		pr_err("invalid parameters, mixer %pK ctl %pK mfd %pK mdata %pK\n",
+			mixer, (mixer ? mixer->ctl : NULL),
+			(mixer ? (mixer->ctl ? mixer->ctl->mfd : NULL) : NULL),
+			mdata);
+		return -EINVAL;
+	}
+	ctl = mixer->ctl;
+	disp_num = ctl->mfd->index;
+
+	if (disp_num < MDSS_BLOCK_DISP_NUM)
+		flags = mdss_pp_res->pp_disp_flags[disp_num];
+	else
+		flags = 0;
+
+	if (mixer->num == MDSS_MDP_INTF_LAYERMIXER3)
+		lm_bitmask = BIT(20);
+	else if (mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK)
+		lm_bitmask = BIT(9) << mixer->num;
+	else
+		lm_bitmask = BIT(6) << mixer->num;
+
+	pp_sts = &mdss_pp_res->pp_disp_sts[disp_num];
+	/* GC_LUT is in layer mixer */
+	if (flags & PP_FLAGS_DIRTY_ARGC) {
+		if (pp_ops[GC].pp_set_config) {
+			if (mdata->pp_block_off.lm_pgc_off == U32_MAX) {
+				pr_err("invalid pgc offset %d\n", U32_MAX);
+			} else {
+				addr = mixer->base +
+					mdata->pp_block_off.lm_pgc_off;
+				pp_ops[GC].pp_set_config(addr, pp_sts,
+				   &mdss_pp_res->argc_disp_cfg[disp_num], LM);
+			}
+		} else {
+			pgc_config = &mdss_pp_res->argc_disp_cfg[disp_num];
+			if (pgc_config->flags & MDP_PP_OPS_WRITE) {
+				addr = mixer->base +
+					MDSS_MDP_REG_LM_GC_LUT_BASE;
+				pp_update_argc_lut(addr, pgc_config);
+			}
+			if (pgc_config->flags & MDP_PP_OPS_DISABLE)
+				pp_sts->argc_sts &= ~PP_STS_ENABLE;
+			else if (pgc_config->flags & MDP_PP_OPS_ENABLE)
+				pp_sts->argc_sts |= PP_STS_ENABLE;
+		}
+		ctl->flush_bits |= lm_bitmask;
+	}
+
+	/* update LM opmode if LM needs flush */
+	if ((pp_sts->argc_sts & PP_STS_ENABLE) &&
+		(ctl->flush_bits & lm_bitmask)) {
+		if (pp_driver_ops.pp_opmode_config) {
+			pp_driver_ops.pp_opmode_config(LM, pp_sts,
+					&opmode, 0);
+		} else {
+			addr = mixer->base + MDSS_MDP_REG_LM_OP_MODE;
+			opmode = readl_relaxed(addr);
+			opmode |= (1 << 0); /* GC_LUT_EN */
+			writel_relaxed(opmode, addr);
+		}
+	}
+	return 0;
+}
+
+static char __iomem *mdss_mdp_get_mixer_addr_off(u32 mixer_num)
+{
+	struct mdss_data_type *mdata;
+	struct mdss_mdp_mixer *mixer;
+
+	mdata = mdss_mdp_get_mdata();
+	if (mdata->nmixers_intf <= mixer_num) {
+		pr_err("Invalid mixer_num=%d\n", mixer_num);
+		return ERR_PTR(-EINVAL);
+	}
+	mixer = mdata->mixer_intf + mixer_num;
+	return mixer->base;
+}
+
+static char __iomem *mdss_mdp_get_dspp_addr_off(u32 dspp_num)
+{
+	struct mdss_data_type *mdata;
+	struct mdss_mdp_mixer *mixer;
+
+	mdata = mdss_mdp_get_mdata();
+	if (mdata->ndspp <= dspp_num) {
+		pr_debug("destination not supported dspp_num=%d\n",
+			  dspp_num);
+		return ERR_PTR(-EINVAL);
+	}
+	mixer = mdata->mixer_intf + dspp_num;
+	return mixer->dspp_base;
+}
+
+/* Assumes that function will be called from within clock enabled space*/
+static int pp_hist_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix,
+			struct pp_sts_type *pp_sts)
+{
+	int ret = 0;
+	char __iomem *base;
+	u32 op_flags = 0, block_type = 0;
+	struct mdss_mdp_pipe *pipe;
+	struct pp_hist_col_info *hist_info;
+	unsigned long flag;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 intr_mask;
+
+	if (!mdata)
+		return -EPERM;
+
+	intr_mask = 1;
+	if (mix && (PP_LOCAT(block) == MDSS_PP_DSPP_CFG)) {
+		/* HIST_EN */
+		block_type = DSPP;
+		op_flags = BIT(16);
+		hist_info = &mdss_pp_res->dspp_hist[mix->num];
+		base = mdss_mdp_get_dspp_addr_off(PP_BLOCK(block));
+		if (IS_ERR(base)) {
+			ret = -EPERM;
+			goto error;
+		}
+	} else if (PP_LOCAT(block) == MDSS_PP_SSPP_CFG &&
+		(pp_driver_ops.is_sspp_hist_supp) &&
+		(pp_driver_ops.is_sspp_hist_supp())) {
+		block_type = SSPP_VIG;
+		pipe = __get_hist_pipe(PP_BLOCK(block));
+		if (IS_ERR_OR_NULL(pipe)) {
+			pr_debug("pipe DNE (%d)\n",
+					(u32) PP_BLOCK(block));
+			ret = -ENODEV;
+			goto error;
+		}
+		op_flags = BIT(8);
+		hist_info = &pipe->pp_res.hist;
+		base = pipe->base;
+		mdss_mdp_pipe_unmap(pipe);
+	} else {
+		ret = -EINVAL;
+		goto error;
+	}
+
+	mutex_lock(&hist_info->hist_mutex);
+	spin_lock_irqsave(&hist_info->hist_lock, flag);
+	/*
+	 * Set histogram interrupt if histogram collection is enabled. The
+	 * interrupt register offsets are the same across different mdss
+	 * versions so far, hence mdss_mdp_hist_irq_set_mask is used for
+	 * all the mdss versions.
+	 */
+	if (hist_info->col_en)
+		mdss_mdp_hist_irq_set_mask(intr_mask << hist_info->intr_shift);
+	/*
+	 * Starting from msmcobalt, the histogram enable bit has been moved
+	 * from DSPP opmode register to PA_HIST opmode register, hence we need
+	 * to update the histogram enable bit differently based on mdss version.
+	 * If HIST pp_set_config is defined, we will enable or disable the
+	 * hist_en bit in PA_HIST opmode register inside HIST pp_set_config
+	 * function; else, we only need to add the hist_en bit to the *op when
+	 * histogram collection is enable, and *op will be passed to
+	 * pp_dspp_setup to update the DSPP opmode register.
+	 */
+	if (pp_ops[HIST].pp_set_config)
+		ret = pp_ops[HIST].pp_set_config(base, pp_sts, hist_info,
+							block_type);
+	else if (hist_info->col_en)
+		*op |= op_flags;
+
+	spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+	mutex_unlock(&hist_info->hist_mutex);
+error:
+	return ret;
+}
+
+static void pp_dither_config(char __iomem *addr,
+			struct pp_sts_type *pp_sts,
+			struct mdp_dither_cfg_data *dither_cfg)
+{
+	u32 data;
+	int i;
+
+	if (dither_cfg->flags & MDP_PP_OPS_WRITE) {
+		data = dither_depth_map[dither_cfg->g_y_depth];
+		data |= dither_depth_map[dither_cfg->b_cb_depth] << 2;
+		data |= dither_depth_map[dither_cfg->r_cr_depth] << 4;
+		writel_relaxed(data, addr);
+		addr += 0x14;
+		for (i = 0; i < 16; i += 4) {
+			data = dither_matrix[i] |
+				(dither_matrix[i + 1] << 4) |
+				(dither_matrix[i + 2] << 8) |
+				(dither_matrix[i + 3] << 12);
+			writel_relaxed(data, addr);
+			addr += 4;
+		}
+	}
+	if (dither_cfg->flags & MDP_PP_OPS_DISABLE)
+		pp_sts->dither_sts &= ~PP_STS_ENABLE;
+	else if (dither_cfg->flags & MDP_PP_OPS_ENABLE)
+		pp_sts->dither_sts |= PP_STS_ENABLE;
+	pp_sts_set_split_bits(&pp_sts->dither_sts, dither_cfg->flags);
+}
+
+static void pp_dspp_opmode_config(struct mdss_mdp_ctl *ctl, u32 num,
+					struct pp_sts_type *pp_sts, int mdp_rev,
+					u32 *opmode)
+{
+	int side;
+	bool pa_side_enabled = false;
+
+	side = pp_num_to_side(ctl, num);
+
+	if (side < 0)
+		return;
+
+	if (pp_driver_ops.pp_opmode_config) {
+		pp_driver_ops.pp_opmode_config(DSPP,
+					       pp_sts, opmode, side);
+		return;
+	}
+
+	if (pp_sts_is_enabled(pp_sts->pa_sts, side)) {
+		*opmode |= MDSS_MDP_DSPP_OP_PA_EN; /* PA_EN */
+		pa_side_enabled = true;
+	}
+	if (mdp_rev >= MDSS_MDP_HW_REV_103 && pa_side_enabled) {
+		if (pp_sts->pa_sts & PP_STS_PA_HUE_MASK)
+			*opmode |= MDSS_MDP_DSPP_OP_PA_HUE_MASK;
+		if (pp_sts->pa_sts & PP_STS_PA_SAT_MASK)
+			*opmode |= MDSS_MDP_DSPP_OP_PA_SAT_MASK;
+		if (pp_sts->pa_sts & PP_STS_PA_VAL_MASK)
+			*opmode |= MDSS_MDP_DSPP_OP_PA_VAL_MASK;
+		if (pp_sts->pa_sts & PP_STS_PA_CONT_MASK)
+			*opmode |= MDSS_MDP_DSPP_OP_PA_CONT_MASK;
+		if (pp_sts->pa_sts & PP_STS_PA_MEM_PROTECT_EN)
+			*opmode |= MDSS_MDP_DSPP_OP_PA_MEM_PROTECT_EN;
+		if (pp_sts->pa_sts & PP_STS_PA_SAT_ZERO_EXP_EN)
+			*opmode |= MDSS_MDP_DSPP_OP_PA_SAT_ZERO_EXP_EN;
+		if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKIN_MASK)
+			*opmode |= MDSS_MDP_DSPP_OP_PA_MEM_COL_SKIN_MASK;
+		if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_FOL_MASK)
+			*opmode |= MDSS_MDP_DSPP_OP_PA_MEM_COL_FOL_MASK;
+		if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKY_MASK)
+			*opmode |= MDSS_MDP_DSPP_OP_PA_MEM_COL_SKY_MASK;
+		if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_HUE_MASK)
+			*opmode |= MDSS_MDP_DSPP_OP_PA_SIX_ZONE_HUE_MASK;
+		if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_SAT_MASK)
+			*opmode |= MDSS_MDP_DSPP_OP_PA_SIX_ZONE_SAT_MASK;
+		if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_VAL_MASK)
+			*opmode |= MDSS_MDP_DSPP_OP_PA_SIX_ZONE_VAL_MASK;
+	}
+	if (pp_sts_is_enabled(pp_sts->pcc_sts, side))
+		*opmode |= MDSS_MDP_DSPP_OP_PCC_EN; /* PCC_EN */
+
+	if (pp_sts_is_enabled(pp_sts->igc_sts, side)) {
+		*opmode |= MDSS_MDP_DSPP_OP_IGC_LUT_EN | /* IGC_LUT_EN */
+			      (pp_sts->igc_tbl_idx << 1);
+	}
+	if (pp_sts->enhist_sts & PP_STS_ENABLE) {
+		*opmode |= MDSS_MDP_DSPP_OP_HIST_LUTV_EN | /* HIST_LUT_EN */
+				  MDSS_MDP_DSPP_OP_PA_EN; /* PA_EN */
+	}
+	if (pp_sts_is_enabled(pp_sts->dither_sts, side))
+		*opmode |= MDSS_MDP_DSPP_OP_DST_DITHER_EN; /* DITHER_EN */
+	if (pp_sts_is_enabled(pp_sts->gamut_sts, side)) {
+		*opmode |= MDSS_MDP_DSPP_OP_GAMUT_EN; /* GAMUT_EN */
+		if (pp_sts->gamut_sts & PP_STS_GAMUT_FIRST)
+			*opmode |= MDSS_MDP_DSPP_OP_GAMUT_PCC_ORDER;
+	}
+	if (pp_sts_is_enabled(pp_sts->pgc_sts, side))
+		*opmode |= MDSS_MDP_DSPP_OP_ARGC_LUT_EN;
+}
+
+static int pp_dspp_setup(u32 disp_num, struct mdss_mdp_mixer *mixer)
+{
+	u32 ad_flags, flags, dspp_num, opmode = 0, ad_bypass;
+	struct mdp_pgc_lut_data *pgc_config;
+	struct pp_sts_type *pp_sts = NULL;
+	char __iomem *base, *addr = NULL;
+	int ret = 0;
+	struct mdss_data_type *mdata;
+	struct mdss_ad_info *ad = NULL;
+	struct mdss_mdp_ad *ad_hw = NULL;
+	struct mdp_pa_v2_cfg_data *pa_v2_cfg_data = NULL;
+	struct mdss_mdp_ctl *ctl;
+	u32 mixer_cnt;
+	u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+	int side;
+
+	if (!mixer || !mixer->ctl || !mixer->ctl->mdata)
+		return -EINVAL;
+	ctl = mixer->ctl;
+	mdata = ctl->mdata;
+	dspp_num = mixer->num;
+	/* no corresponding dspp */
+	if ((mixer->type != MDSS_MDP_MIXER_TYPE_INTF) ||
+		(dspp_num >= mdata->ndspp))
+		return -EINVAL;
+	base = mdss_mdp_get_dspp_addr_off(dspp_num);
+	if (IS_ERR(base))
+		return -EINVAL;
+
+	side = pp_num_to_side(ctl, dspp_num);
+	if (side < 0) {
+		pr_err("invalid side information for dspp_num %d", dspp_num);
+		return -EINVAL;
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	if ((mdata->pp_block_off.dspp_gamut_off != U32_MAX) &&
+			(pp_driver_ops.gamut_clk_gate_en))
+		pp_driver_ops.gamut_clk_gate_en(base +
+					mdata->pp_block_off.dspp_gamut_off);
+
+	if (disp_num < MDSS_BLOCK_DISP_NUM) {
+		pp_sts = &mdss_pp_res->pp_disp_sts[disp_num];
+		pp_sts->side_sts = side;
+
+		ret = pp_hist_setup(&opmode, MDSS_PP_DSPP_CFG | dspp_num, mixer,
+				pp_sts);
+		if (ret)
+			goto dspp_exit;
+
+		flags = mdss_pp_res->pp_disp_flags[disp_num];
+	} else {
+		flags = 0;
+	}
+
+	mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
+	if (dspp_num < mdata->nad_cfgs && disp_num < mdata->nad_cfgs &&
+				(mixer_cnt <= mdata->nmax_concurrent_ad_hw)) {
+		ad = &mdata->ad_cfgs[disp_num];
+		ad_flags = ad->reg_sts;
+		ad_hw = &mdata->ad_off[dspp_num];
+	} else {
+		ad_flags = 0;
+	}
+
+	/* nothing to update */
+	if ((!flags) && (!(opmode)) && (!ad_flags))
+		goto dspp_exit;
+
+	if (flags & PP_FLAGS_DIRTY_PA) {
+		if (!pp_ops[PA].pp_set_config) {
+			if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103) {
+				pa_v2_cfg_data =
+					&mdss_pp_res->pa_v2_disp_cfg[disp_num];
+				pp_pa_v2_config(flags,
+					base + MDSS_MDP_REG_DSPP_PA_BASE,
+					pp_sts,
+					&pa_v2_cfg_data->pa_v2_data,
+					PP_DSPP);
+			} else
+				pp_pa_config(flags,
+					base + MDSS_MDP_REG_DSPP_PA_BASE,
+					pp_sts,
+					&mdss_pp_res->pa_disp_cfg[disp_num]);
+		} else {
+			pp_ops[PA].pp_set_config(base, pp_sts,
+					&mdss_pp_res->pa_v2_disp_cfg[disp_num],
+					DSPP);
+		}
+	}
+	if (flags & PP_FLAGS_DIRTY_PCC) {
+		if (!pp_ops[PCC].pp_set_config)
+			pp_pcc_config(flags, base + MDSS_MDP_REG_DSPP_PCC_BASE,
+					pp_sts,
+					&mdss_pp_res->pcc_disp_cfg[disp_num]);
+		else {
+			if (mdata->pp_block_off.dspp_pcc_off == U32_MAX) {
+				pr_err("invalid pcc off %d\n", U32_MAX);
+			} else {
+				addr = base + mdata->pp_block_off.dspp_pcc_off;
+				pp_ops[PCC].pp_set_config(addr, pp_sts,
+					&mdss_pp_res->pcc_disp_cfg[disp_num],
+					DSPP);
+			}
+		}
+	}
+
+	if (flags & PP_FLAGS_DIRTY_IGC) {
+		if (!pp_ops[IGC].pp_set_config) {
+			pp_igc_config(flags,
+			      mdata->mdp_base + MDSS_MDP_REG_IGC_DSPP_BASE,
+			      pp_sts, &mdss_pp_res->igc_disp_cfg[disp_num],
+			      dspp_num, mdata->ndspp);
+		} else {
+			addr = mdata->mdp_base + MDSS_MDP_REG_IGC_DSPP_BASE;
+			/* Pass dspp num using block */
+			mdss_pp_res->igc_disp_cfg[disp_num].block = dspp_num;
+			pp_ops[IGC].pp_set_config(addr, pp_sts,
+				&mdss_pp_res->igc_disp_cfg[disp_num],
+				DSPP);
+		}
+	}
+
+	if (flags & PP_FLAGS_DIRTY_ENHIST) {
+		if (!pp_ops[HIST_LUT].pp_set_config) {
+			pp_enhist_config(flags,
+				base + MDSS_MDP_REG_DSPP_HIST_LUT_BASE,
+				pp_sts,
+				&mdss_pp_res->enhist_disp_cfg[disp_num]);
+
+			if ((pp_sts->enhist_sts & PP_STS_ENABLE) &&
+			    !(pp_sts->pa_sts & PP_STS_ENABLE)) {
+				/* Program default value */
+				addr = base + MDSS_MDP_REG_DSPP_PA_BASE;
+				writel_relaxed(0, addr);
+				writel_relaxed(0, addr + 4);
+				writel_relaxed(0, addr + 8);
+				writel_relaxed(0, addr + 12);
+			}
+		} else {
+			/* Pass dspp num using block */
+			mdss_pp_res->enhist_disp_cfg[disp_num].block = dspp_num;
+			pp_ops[HIST_LUT].pp_set_config(base, pp_sts,
+				&mdss_pp_res->enhist_disp_cfg[disp_num], DSPP);
+		}
+	}
+
+	if (flags & PP_FLAGS_DIRTY_DITHER) {
+		if (!pp_ops[DITHER].pp_set_config) {
+			pp_dither_config(addr, pp_sts,
+				&mdss_pp_res->dither_disp_cfg[disp_num]);
+		} else {
+			addr = base + MDSS_MDP_REG_DSPP_DITHER_DEPTH;
+			pp_ops[DITHER].pp_set_config(addr, pp_sts,
+			      &mdss_pp_res->dither_disp_cfg[disp_num], DSPP);
+		}
+	}
+	if (flags & PP_FLAGS_DIRTY_GAMUT) {
+		if (!pp_ops[GAMUT].pp_set_config) {
+			pp_gamut_config(&mdss_pp_res->gamut_disp_cfg[disp_num],
+					 base, pp_sts);
+		} else {
+			if (mdata->pp_block_off.dspp_gamut_off == U32_MAX) {
+				pr_err("invalid gamut off %d\n", U32_MAX);
+			} else {
+				addr = base +
+				       mdata->pp_block_off.dspp_gamut_off;
+				pp_ops[GAMUT].pp_set_config(addr, pp_sts,
+				      &mdss_pp_res->gamut_disp_cfg[disp_num],
+				      DSPP);
+			}
+		}
+	}
+
+	if (flags & PP_FLAGS_DIRTY_PGC) {
+		pgc_config = &mdss_pp_res->pgc_disp_cfg[disp_num];
+		if (pp_ops[GC].pp_set_config) {
+			if (mdata->pp_block_off.dspp_pgc_off == U32_MAX) {
+				pr_err("invalid pgc offset %d\n", U32_MAX);
+			} else {
+				addr = base +
+					mdata->pp_block_off.dspp_pgc_off;
+				pp_ops[GC].pp_set_config(addr, pp_sts,
+					&mdss_pp_res->pgc_disp_cfg[disp_num],
+					DSPP);
+			}
+		} else {
+			if (pgc_config->flags & MDP_PP_OPS_WRITE) {
+				addr = base + MDSS_MDP_REG_DSPP_GC_BASE;
+				pp_update_argc_lut(addr, pgc_config);
+			}
+			if (pgc_config->flags & MDP_PP_OPS_DISABLE)
+				pp_sts->pgc_sts &= ~PP_STS_ENABLE;
+			else if (pgc_config->flags & MDP_PP_OPS_ENABLE)
+				pp_sts->pgc_sts |= PP_STS_ENABLE;
+			pp_sts_set_split_bits(&pp_sts->pgc_sts,
+					      pgc_config->flags);
+		}
+	}
+
+	if (pp_sts != NULL)
+		pp_dspp_opmode_config(ctl, dspp_num, pp_sts, mdata->mdp_rev,
+					&opmode);
+
+	if (ad_hw) {
+		mutex_lock(&ad->lock);
+		ad_flags = ad->reg_sts;
+		if (ad_flags & PP_AD_STS_DIRTY_DATA)
+			pp_ad_input_write(ad_hw, ad);
+		if (ad_flags & PP_AD_STS_DIRTY_INIT)
+			pp_ad_init_write(ad_hw, ad, ctl);
+		if (ad_flags & PP_AD_STS_DIRTY_CFG)
+			pp_ad_cfg_write(ad_hw, ad);
+
+		if (ad->state & PP_AD_STATE_IPC_RESET) {
+			writel_relaxed(ad->cfg.t_filter_recursion,
+				ad_hw->base + MDSS_MDP_REG_AD_TFILT_CTRL);
+			writel_relaxed(ad->cfg.mode | MDSS_AD_AUTO_TRIGGER,
+				ad_hw->base + MDSS_MDP_REG_AD_MODE_SEL);
+		}
+
+		pp_ad_bypass_config(ad, ctl, ad_hw->num, &ad_bypass);
+		writel_relaxed(ad_bypass, ad_hw->base);
+		mutex_unlock(&ad->lock);
+	}
+
+	writel_relaxed(opmode, base + MDSS_MDP_REG_DSPP_OP_MODE);
+
+	if (dspp_num == MDSS_MDP_DSPP3)
+		ctl->flush_bits |= BIT(21);
+	else
+		ctl->flush_bits |= BIT(13 + dspp_num);
+
+	wmb(); /* ensure write is finished before progressing */
+dspp_exit:
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	return ret;
+}
+
+int mdss_mdp_pp_setup(struct mdss_mdp_ctl *ctl)
+{
+	int ret = 0;
+
+	if ((!ctl->mfd) || (!mdss_pp_res))
+		return -EINVAL;
+
+	/* TODO: have some sort of reader/writer lock to prevent unclocked
+	 * access while display power is toggled
+	 */
+	mutex_lock(&ctl->lock);
+	if (!mdss_mdp_ctl_is_power_on(ctl)) {
+		ret = -EPERM;
+		goto error;
+	}
+	ret = mdss_mdp_pp_setup_locked(ctl);
+error:
+	mutex_unlock(&ctl->lock);
+
+	return ret;
+}
+
+int mdss_mdp_pp_setup_locked(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_data_type *mdata;
+	int ret = 0, i;
+	u32 flags, pa_v2_flags;
+	u32 max_bw_needed;
+	u32 mixer_cnt;
+	u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+	u32 disp_num;
+	bool valid_mixers = true;
+	bool valid_ad_panel = true;
+
+	if ((!ctl) || (!ctl->mfd) || (!mdss_pp_res) || (!ctl->mdata))
+		return -EINVAL;
+
+	mdata = ctl->mdata;
+	/* treat fb_num the same as block logical id*/
+	disp_num = ctl->mfd->index;
+
+	mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
+	if (!mixer_cnt) {
+		valid_mixers = false;
+		ret = -EINVAL;
+		pr_warn("Configuring post processing without mixers, err = %d\n",
+									ret);
+		goto exit;
+	}
+	if (mdata->nad_cfgs == 0)
+		valid_mixers = false;
+	for (i = 0; i < mixer_cnt && valid_mixers; i++) {
+		if (mixer_id[i] >= mdata->nad_cfgs)
+			valid_mixers = false;
+	}
+	valid_ad_panel = (ctl->mfd->panel_info->type != DTV_PANEL) &&
+		(((mdata->mdp_rev < MDSS_MDP_HW_REV_103) &&
+			(ctl->mfd->panel_info->type == WRITEBACK_PANEL)) ||
+		(ctl->mfd->panel_info->type != WRITEBACK_PANEL));
+
+	if (valid_mixers && (mixer_cnt <= mdata->nmax_concurrent_ad_hw) &&
+		valid_ad_panel) {
+		ret = mdss_mdp_ad_setup(ctl->mfd);
+		if (ret < 0)
+			pr_warn("ad_setup(disp%d) returns %d\n", disp_num, ret);
+	}
+
+	mutex_lock(&mdss_pp_mutex);
+
+	flags = mdss_pp_res->pp_disp_flags[disp_num];
+	if (pp_ops[PA].pp_set_config)
+		pa_v2_flags = mdss_pp_res->pa_v2_disp_cfg[disp_num].flags;
+	else
+		pa_v2_flags =
+			mdss_pp_res->pa_v2_disp_cfg[disp_num].pa_v2_data.flags;
+	/*
+	 * If a LUT based PP feature needs to be reprogrammed during resume,
+	 * increase the register bus bandwidth to maximum frequency
+	 * in order to speed up the register reprogramming.
+	 */
+	max_bw_needed = (IS_PP_RESUME_COMMIT(flags) &&
+				(IS_PP_LUT_DIRTY(flags) ||
+				IS_SIX_ZONE_DIRTY(flags, pa_v2_flags)));
+	if (mdata->pp_reg_bus_clt && max_bw_needed) {
+		ret = mdss_update_reg_bus_vote(mdata->pp_reg_bus_clt,
+				VOTE_INDEX_HIGH);
+		if (ret)
+			pr_err("Updated reg_bus_scale failed, ret = %d", ret);
+	}
+
+	if (ctl->mixer_left) {
+		pp_mixer_setup(ctl->mixer_left);
+		pp_dspp_setup(disp_num, ctl->mixer_left);
+		pp_ppb_setup(ctl->mixer_left);
+	}
+	if (ctl->mixer_right) {
+		pp_mixer_setup(ctl->mixer_right);
+		pp_dspp_setup(disp_num, ctl->mixer_right);
+		pp_ppb_setup(ctl->mixer_right);
+	}
+
+	if (valid_mixers && (mixer_cnt <= mdata->nmax_concurrent_ad_hw) &&
+		valid_ad_panel) {
+		ret = mdss_mdp_ad_ipc_reset(ctl->mfd);
+		if (ret < 0)
+			pr_warn("ad_setup(disp%d) returns %d\n", disp_num, ret);
+	}
+
+	/* clear dirty flag */
+	if (disp_num < MDSS_BLOCK_DISP_NUM) {
+		mdss_pp_res->pp_disp_flags[disp_num] = 0;
+		if (disp_num < mdata->nad_cfgs)
+			mdata->ad_cfgs[disp_num].reg_sts = 0;
+	}
+
+	if (mdata->pp_reg_bus_clt && max_bw_needed) {
+		ret = mdss_update_reg_bus_vote(mdata->pp_reg_bus_clt,
+				VOTE_INDEX_DISABLE);
+		if (ret)
+			pr_err("Updated reg_bus_scale failed, ret = %d", ret);
+	}
+	if (IS_PP_RESUME_COMMIT(flags))
+		mdss_pp_res->pp_disp_flags[disp_num] &=
+			~PP_FLAGS_RESUME_COMMIT;
+	mutex_unlock(&mdss_pp_mutex);
+exit:
+	return ret;
+}
+
+/*
+ * Set dirty and write bits on features that were enabled so they will be
+ * reconfigured
+ */
+int mdss_mdp_pp_resume(struct msm_fb_data_type *mfd)
+{
+	u32 flags = 0, disp_num, ret = 0;
+	struct pp_sts_type pp_sts;
+	struct mdss_ad_info *ad;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdp_pa_v2_cfg_data *pa_v2_cache_cfg = NULL;
+
+	if (!mfd) {
+		pr_err("invalid input: mfd = 0x%pK\n", mfd);
+		return -EINVAL;
+	}
+
+	if (!mdss_mdp_mfd_valid_dspp(mfd)) {
+		pr_debug("PP not supported on display num %d hw config\n",
+			mfd->index);
+		return -EPERM;
+	}
+
+	disp_num = mfd->index;
+	pp_sts = mdss_pp_res->pp_disp_sts[disp_num];
+
+	if (pp_sts.pa_sts & PP_STS_ENABLE) {
+		flags |= PP_FLAGS_DIRTY_PA;
+		pa_v2_cache_cfg = &mdss_pp_res->pa_v2_disp_cfg[disp_num];
+		if (pp_ops[PA].pp_set_config) {
+			if (!(pa_v2_cache_cfg->flags & MDP_PP_OPS_DISABLE))
+				pa_v2_cache_cfg->flags |= MDP_PP_OPS_WRITE;
+		} else if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103) {
+			if (!(pa_v2_cache_cfg->pa_v2_data.flags
+						& MDP_PP_OPS_DISABLE))
+				pa_v2_cache_cfg->pa_v2_data.flags |=
+					MDP_PP_OPS_WRITE;
+		} else {
+			if (!(mdss_pp_res->pa_disp_cfg[disp_num].flags
+						& MDP_PP_OPS_DISABLE))
+				mdss_pp_res->pa_disp_cfg[disp_num].flags |=
+					MDP_PP_OPS_WRITE;
+		}
+	}
+	if (pp_sts.pcc_sts & PP_STS_ENABLE) {
+		flags |= PP_FLAGS_DIRTY_PCC;
+		if (!(mdss_pp_res->pcc_disp_cfg[disp_num].ops
+					& MDP_PP_OPS_DISABLE))
+			mdss_pp_res->pcc_disp_cfg[disp_num].ops |=
+				MDP_PP_OPS_WRITE;
+	}
+	if (pp_sts.igc_sts & PP_STS_ENABLE) {
+		flags |= PP_FLAGS_DIRTY_IGC;
+		if (!(mdss_pp_res->igc_disp_cfg[disp_num].ops
+					& MDP_PP_OPS_DISABLE))
+			mdss_pp_res->igc_disp_cfg[disp_num].ops |=
+				MDP_PP_OPS_WRITE;
+	}
+	if (pp_sts.argc_sts & PP_STS_ENABLE) {
+		flags |= PP_FLAGS_DIRTY_ARGC;
+		if (!(mdss_pp_res->argc_disp_cfg[disp_num].flags
+					& MDP_PP_OPS_DISABLE))
+			mdss_pp_res->argc_disp_cfg[disp_num].flags |=
+				MDP_PP_OPS_WRITE;
+	}
+	if (pp_sts.enhist_sts & PP_STS_ENABLE) {
+		flags |= PP_FLAGS_DIRTY_ENHIST;
+		if (!(mdss_pp_res->enhist_disp_cfg[disp_num].ops
+					& MDP_PP_OPS_DISABLE))
+			mdss_pp_res->enhist_disp_cfg[disp_num].ops |=
+				MDP_PP_OPS_WRITE;
+	}
+	if (pp_sts.dither_sts & PP_STS_ENABLE) {
+		flags |= PP_FLAGS_DIRTY_DITHER;
+		if (!(mdss_pp_res->dither_disp_cfg[disp_num].flags
+					& MDP_PP_OPS_DISABLE))
+			mdss_pp_res->dither_disp_cfg[disp_num].flags |=
+				MDP_PP_OPS_WRITE;
+	}
+	if (pp_sts.gamut_sts & PP_STS_ENABLE) {
+		flags |= PP_FLAGS_DIRTY_GAMUT;
+		if (!(mdss_pp_res->gamut_disp_cfg[disp_num].flags
+					& MDP_PP_OPS_DISABLE))
+			mdss_pp_res->gamut_disp_cfg[disp_num].flags |=
+				MDP_PP_OPS_WRITE;
+	}
+	if (pp_sts.pgc_sts & PP_STS_ENABLE) {
+		flags |= PP_FLAGS_DIRTY_PGC;
+		if (!(mdss_pp_res->pgc_disp_cfg[disp_num].flags
+					& MDP_PP_OPS_DISABLE))
+			mdss_pp_res->pgc_disp_cfg[disp_num].flags |=
+				MDP_PP_OPS_WRITE;
+	}
+
+	mdss_pp_res->pp_disp_flags[disp_num] |= flags;
+	mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_RESUME_COMMIT;
+
+	ret = mdss_mdp_get_ad(mfd, &ad);
+	if (ret == -ENODEV || ret == -EPERM) {
+		pr_debug("AD not supported on device, disp num %d\n",
+			mfd->index);
+		return 0;
+	} else if (ret || !ad) {
+		pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
+			ret, ad);
+		return ret;
+	}
+
+	mutex_lock(&ad->lock);
+	if (mfd->ipc_resume) {
+		mfd->ipc_resume = false;
+		if (PP_AD_STATE_RUN & ad->state) {
+			ad->ipc_frame_count = 0;
+			ad->state |= PP_AD_STATE_IPC_RESUME;
+			ad->cfg.mode |= MDSS_AD_MODE_IPC_BIT;
+			pr_debug("switch mode to %d, last_ad_data = %d\n",
+				 ad->cfg.mode, ad->last_ad_data);
+		}
+	}
+
+	if (PP_AD_STATE_CFG & ad->state)
+		ad->sts |= PP_AD_STS_DIRTY_CFG;
+	if (PP_AD_STATE_INIT & ad->state)
+		ad->sts |= PP_AD_STS_DIRTY_INIT;
+	if ((PP_AD_STATE_DATA & ad->state) &&
+			(ad->sts & PP_STS_ENABLE))
+		ad->sts |= PP_AD_STS_DIRTY_DATA;
+
+	if (PP_AD_STATE_RUN & ad->state)
+		ad->state &= ~PP_AD_STATE_VSYNC;
+	mutex_unlock(&ad->lock);
+
+	return 0;
+}
+
+static int mdss_mdp_pp_dt_parse(struct device *dev)
+{
+	int ret = -EINVAL;
+	struct device_node *node;
+	struct mdss_data_type *mdata;
+	u32 prop_val;
+
+	mdata = mdss_mdp_get_mdata();
+	if (dev && mdata) {
+		/* initialize offsets to U32_MAX */
+		memset(&mdata->pp_block_off, U8_MAX,
+			sizeof(mdata->pp_block_off));
+		node = of_get_child_by_name(dev->of_node,
+					    "qcom,mdss-pp-offsets");
+		if (node) {
+			ret = of_property_read_u32(node,
+					"qcom,mdss-sspp-mdss-igc-lut-off",
+					&prop_val);
+			if (ret) {
+				pr_err("read property %s failed ret %d\n",
+				       "qcom,mdss-sspp-mdss-igc-lut-off", ret);
+				goto bail_out;
+			} else {
+				mdata->pp_block_off.sspp_igc_lut_off =
+				prop_val;
+			}
+
+			ret = of_property_read_u32(node,
+						"qcom,mdss-sspp-vig-pcc-off",
+						&prop_val);
+			if (ret) {
+				pr_err("read property %s failed ret %d\n",
+				       "qcom,mdss-sspp-vig-pcc-off", ret);
+				goto bail_out;
+			} else {
+				mdata->pp_block_off.vig_pcc_off = prop_val;
+			}
+
+			ret = of_property_read_u32(node,
+						"qcom,mdss-sspp-rgb-pcc-off",
+						&prop_val);
+			if (ret) {
+				pr_err("read property %s failed ret %d\n",
+				       "qcom,mdss-sspp-rgb-pcc-off", ret);
+				goto bail_out;
+			} else {
+				mdata->pp_block_off.rgb_pcc_off = prop_val;
+			}
+
+			ret = of_property_read_u32(node,
+						   "qcom,mdss-sspp-dma-pcc-off",
+						   &prop_val);
+			if (ret) {
+				pr_err("read property %s failed ret %d\n",
+				       "qcom,mdss-sspp-dma-pcc-off", ret);
+				goto bail_out;
+			} else {
+				mdata->pp_block_off.dma_pcc_off = prop_val;
+			}
+
+			ret = of_property_read_u32(node,
+						   "qcom,mdss-lm-pgc-off",
+						   &prop_val);
+
+			if (ret) {
+				pr_err("read property %s failed ret %d\n",
+				       "qcom,mdss-lm-pgc-off", ret);
+				goto bail_out;
+			} else {
+				mdata->pp_block_off.lm_pgc_off = prop_val;
+			}
+
+			ret = of_property_read_u32(node,
+						   "qcom,mdss-dspp-gamut-off",
+						   &prop_val);
+			if (ret) {
+				pr_debug("Could not read/find %s prop ret %d\n",
+				       "qcom,mdss-dspp-gamut-off", ret);
+				mdata->pp_block_off.dspp_gamut_off = U32_MAX;
+			} else {
+				mdata->pp_block_off.dspp_gamut_off = prop_val;
+			}
+
+			ret = of_property_read_u32(node,
+						   "qcom,mdss-dspp-pcc-off",
+						   &prop_val);
+			if (ret) {
+				pr_err("read property %s failed ret %d\n",
+				       "qcom,mdss-dspp-pcc-off", ret);
+				goto bail_out;
+			} else {
+				mdata->pp_block_off.dspp_pcc_off = prop_val;
+			}
+
+			ret = of_property_read_u32(node,
+						   "qcom,mdss-dspp-pgc-off",
+						   &prop_val);
+			if (ret) {
+				pr_err("read property %s failed ret %d\n",
+				       "qcom,mdss-dspp-pgc-off", ret);
+				goto bail_out;
+			} else {
+				mdata->pp_block_off.dspp_pgc_off = prop_val;
+			}
+		} else {
+			pr_debug("offsets are not supported\n");
+			ret = 0;
+		}
+	} else {
+		pr_err("invalid dev %pK mdata %pK\n", dev, mdata);
+		ret = -EINVAL;
+	}
+bail_out:
+	return ret;
+}
+
+int mdss_mdp_pp_init(struct device *dev)
+{
+	int i, ret = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_pipe *vig;
+	struct pp_hist_col_info *hist = NULL;
+	u32 ctl_off = 0;
+
+	if (!mdata)
+		return -EPERM;
+
+
+	mdata->pp_reg_bus_clt = mdss_reg_bus_vote_client_create("pp\0");
+	if (IS_ERR(mdata->pp_reg_bus_clt))
+		pr_err("bus client register failed\n");
+
+	mutex_lock(&mdss_pp_mutex);
+	if (!mdss_pp_res) {
+		mdss_pp_res = devm_kzalloc(dev, sizeof(*mdss_pp_res),
+				GFP_KERNEL);
+		if (mdss_pp_res == NULL) {
+			ret = -ENOMEM;
+		} else {
+			if (mdss_mdp_pp_dt_parse(dev))
+				pr_info("No PP info in device tree\n");
+
+			ret = pp_get_driver_ops(&pp_driver_ops);
+			if (ret) {
+				pr_err("pp_get_driver_ops failed, ret=%d\n",
+						ret);
+				goto pp_exit;
+			}
+			pp_ops = pp_driver_ops.pp_ops;
+			hist = devm_kzalloc(dev,
+					sizeof(struct pp_hist_col_info) *
+					mdata->ndspp,
+					GFP_KERNEL);
+			if (hist == NULL) {
+				pr_err("dspp histogram allocation failed!\n");
+				ret = -ENOMEM;
+				goto pp_exit;
+			}
+			for (i = 0; i < mdata->ndspp; i++) {
+				mutex_init(&hist[i].hist_mutex);
+				spin_lock_init(&hist[i].hist_lock);
+				hist[i].intr_shift = (i * 4) + 12;
+				if (pp_driver_ops.get_hist_offset) {
+					ret = pp_driver_ops.get_hist_offset(
+						DSPP, &ctl_off);
+					if (ret) {
+						pr_err("get_hist_offset ret %d\n",
+							ret);
+						goto hist_exit;
+					}
+					hist[i].base =
+						i < mdata->ndspp ?
+						mdss_mdp_get_dspp_addr_off(i) +
+						ctl_off : NULL;
+				} else {
+					hist[i].base = i < mdata->ndspp ?
+						mdss_mdp_get_dspp_addr_off(i) +
+						MDSS_MDP_REG_DSPP_HIST_CTL_BASE
+						: NULL;
+				}
+			}
+			if (mdata->ndspp == 4)
+				hist[3].intr_shift = 22;
+
+			mdss_pp_res->dspp_hist = hist;
+		}
+	}
+	if (mdata && mdata->vig_pipes) {
+		vig = mdata->vig_pipes;
+		for (i = 0; i < mdata->nvig_pipes; i++) {
+			mutex_init(&vig[i].pp_res.hist.hist_mutex);
+			spin_lock_init(&vig[i].pp_res.hist.hist_lock);
+			vig[i].pp_res.hist.intr_shift = (vig[i].num * 4);
+			if (i == 3)
+				vig[i].pp_res.hist.intr_shift = 10;
+			if (pp_driver_ops.get_hist_offset) {
+				ret = pp_driver_ops.get_hist_offset(
+					SSPP_VIG, &ctl_off);
+				if (ret) {
+					pr_err("get_hist_offset ret %d\n",
+						ret);
+					goto hist_exit;
+				}
+				vig[i].pp_res.hist.base = vig[i].base +
+					ctl_off;
+			} else {
+				vig[i].pp_res.hist.base = vig[i].base +
+					MDSS_MDP_REG_VIG_HIST_CTL_BASE;
+			}
+		}
+	}
+	mutex_unlock(&mdss_pp_mutex);
+	return ret;
+hist_exit:
+	devm_kfree(dev, hist);
+pp_exit:
+	devm_kfree(dev, mdss_pp_res);
+	mutex_unlock(&mdss_pp_mutex);
+	return ret;
+}
+
+void mdss_mdp_pp_term(struct device *dev)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (mdss_pp_res) {
+		mutex_lock(&mdss_pp_mutex);
+		devm_kfree(dev, mdss_pp_res->dspp_hist);
+		devm_kfree(dev, mdss_pp_res);
+		mdss_pp_res = NULL;
+		mutex_unlock(&mdss_pp_mutex);
+	}
+
+	mdss_reg_bus_vote_client_destroy(mdata->pp_reg_bus_clt);
+	mdata->pp_reg_bus_clt = NULL;
+}
+
+int mdss_mdp_pp_overlay_init(struct msm_fb_data_type *mfd)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mfd || !mdata) {
+		pr_err("Invalid mfd %pK mdata %pK\n", mfd, mdata);
+		return -EPERM;
+	}
+	if (mfd->index >= (MDP_BLOCK_MAX - MDP_LOGICAL_BLOCK_DISP_0))
+		return 0;
+
+	if (mdata->nad_cfgs)
+		mfd->mdp.ad_calc_bl = pp_ad_calc_bl;
+	mfd->mdp.pp_release_fnc = pp_mfd_release_all;
+	return 0;
+}
+
+int mdss_mdp_pp_default_overlay_config(struct msm_fb_data_type *mfd,
+					struct mdss_panel_data *pdata,
+					bool enable)
+{
+	int ret = 0;
+
+	if (!mfd || !pdata) {
+		pr_err("Invalid parameters mfd %pK pdata %pK\n", mfd, pdata);
+		return -EINVAL;
+	}
+
+	ret = mdss_mdp_panel_default_dither_config(mfd, pdata->panel_info.bpp,
+						enable);
+	if (ret)
+		pr_err("Unable to configure default dither on fb%d ret %d\n",
+			mfd->index, ret);
+
+	if (pdata->panel_info.type == DTV_PANEL) {
+		ret = mdss_mdp_limited_lut_igc_config(mfd, enable);
+		if (ret)
+			pr_err("Unable to configure DTV panel default IGC ret %d\n",
+				ret);
+	}
+
+	return ret;
+}
+
+static bool pp_ad_bl_threshold_check(int al_thresh, int base, int prev_bl,
+					 int curr_bl)
+{
+	int bl_thresh = 0, diff = 0;
+	bool ret = false;
+
+	pr_debug("al_thresh = %d, base = %d\n", al_thresh, base);
+	if (base <= 0) {
+		pr_debug("Invalid base for threshold calculation %d\n", base);
+		return ret;
+	}
+	bl_thresh = (curr_bl * al_thresh) / (base * 4);
+	diff = (curr_bl > prev_bl) ? (curr_bl - prev_bl) : (prev_bl - curr_bl);
+	ret = (diff > bl_thresh) ? true : false;
+	pr_debug("prev_bl =%d, curr_bl = %d, bl_thresh = %d, diff = %d, ret = %d\n",
+		prev_bl, curr_bl, bl_thresh, diff, ret);
+
+	return ret;
+}
+
+static int pp_ad_calc_bl(struct msm_fb_data_type *mfd, int bl_in, int *bl_out,
+	bool *bl_out_notify)
+{
+	int ret = -1;
+	int temp = bl_in;
+	u32 ad_bl_out = 0;
+	struct mdss_ad_info *ad;
+
+	ret = mdss_mdp_get_ad(mfd, &ad);
+	if (ret == -ENODEV || ret == -EPERM) {
+		pr_debug("AD not supported on device, disp num %d\n",
+			mfd->index);
+		return 0;
+	} else if (ret || !ad) {
+		pr_err("Failed to get ad info: ret = %d, ad = 0x%pK.\n",
+			ret, ad);
+		return ret;
+	}
+
+	/* Don't update BL = 0 to AD */
+	if (bl_in == 0)
+		return 0;
+	mutex_lock(&ad->lock);
+	if (!mfd->ad_bl_level)
+		mfd->ad_bl_level = bl_in;
+	if (!(ad->sts & PP_STS_ENABLE)) {
+		pr_debug("AD is not enabled.\n");
+		mutex_unlock(&ad->lock);
+		return -EPERM;
+	}
+
+	if (!ad->bl_mfd || !ad->bl_mfd->panel_info ||
+		!ad->bl_att_lut) {
+		pr_err("Invalid ad info: bl_mfd = 0x%pK, ad->bl_mfd->panel_info = 0x%pK, bl_att_lut = 0x%pK\n",
+			ad->bl_mfd,
+			(!ad->bl_mfd) ? NULL : ad->bl_mfd->panel_info,
+			ad->bl_att_lut);
+		mutex_unlock(&ad->lock);
+		return -EINVAL;
+	}
+
+	ret = pp_ad_linearize_bl(ad, bl_in, &temp,
+		MDP_PP_AD_BL_LINEAR);
+	if (ret) {
+		pr_err("Failed to linearize BL: %d\n", ret);
+		mutex_unlock(&ad->lock);
+		return ret;
+	}
+
+	if (ad->init.alpha > 0) {
+		ret = pp_ad_attenuate_bl(ad, temp, &temp);
+		if (ret) {
+			pr_err("Failed to attenuate BL: %d\n", ret);
+			mutex_unlock(&ad->lock);
+			return ret;
+		}
+		ad_bl_out = temp;
+
+		ret = pp_ad_linearize_bl(ad, temp, &temp,
+						MDP_PP_AD_BL_LINEAR_INV);
+		if (ret) {
+			pr_err("Failed to inverse linearize BL: %d\n", ret);
+			mutex_unlock(&ad->lock);
+			return ret;
+		}
+		*bl_out = temp;
+	} else {
+		ad_bl_out = temp;
+	}
+
+	if (pp_ad_bl_threshold_check(ad->init.al_thresh, ad->init.alpha_base,
+					ad->last_bl, ad_bl_out)) {
+		mfd->ad_bl_level = ad_bl_out;
+		pr_debug("backlight send to AD block: %d\n", mfd->ad_bl_level);
+		*bl_out_notify = true;
+		pp_ad_invalidate_input(mfd);
+	}
+
+	mutex_unlock(&ad->lock);
+	return 0;
+}
+
+static int pp_get_dspp_num(u32 disp_num, u32 *dspp_num)
+{
+	int i;
+	u32 mixer_cnt;
+	u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
+
+	if (!mixer_cnt || !mdata)
+		return -EPERM;
+
+	/* only read the first mixer */
+	for (i = 0; i < mixer_cnt; i++) {
+		if (mixer_id[i] < mdata->nmixers_intf)
+			break;
+	}
+	if (i >= mixer_cnt || mixer_id[i] >= mdata->ndspp)
+		return -EPERM;
+	*dspp_num = mixer_id[i];
+	return 0;
+}
+
+int mdss_mdp_pa_config(struct msm_fb_data_type *mfd,
+			struct mdp_pa_cfg_data *config,
+			u32 *copyback)
+{
+	int ret = 0;
+	u32 disp_num, dspp_num = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	char __iomem *pa_addr;
+
+	if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103)
+		return -EINVAL;
+
+	ret = pp_validate_dspp_mfd_block(mfd, config->block);
+	if (ret) {
+		pr_err("Invalid block %d mfd index %d, ret %d\n",
+				config->block,
+				(mfd ? mfd->index : -1), ret);
+		return ret;
+	}
+
+	mutex_lock(&mdss_pp_mutex);
+	disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+	if (config->pa_data.flags & MDP_PP_OPS_READ) {
+		ret = pp_get_dspp_num(disp_num, &dspp_num);
+		if (ret) {
+			pr_err("no dspp connects to disp %d\n",
+					disp_num);
+			goto pa_config_exit;
+		}
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+		pa_addr = mdss_mdp_get_dspp_addr_off(dspp_num) +
+			MDSS_MDP_REG_DSPP_PA_BASE;
+		config->pa_data.hue_adj = readl_relaxed(pa_addr);
+		pa_addr += 4;
+		config->pa_data.sat_adj = readl_relaxed(pa_addr);
+		pa_addr += 4;
+		config->pa_data.val_adj = readl_relaxed(pa_addr);
+		pa_addr += 4;
+		config->pa_data.cont_adj = readl_relaxed(pa_addr);
+		*copyback = 1;
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	} else {
+		mdss_pp_res->pa_disp_cfg[disp_num] = config->pa_data;
+		mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_PA;
+	}
+
+pa_config_exit:
+	mutex_unlock(&mdss_pp_mutex);
+	return ret;
+}
+
+int mdss_mdp_pa_v2_config(struct msm_fb_data_type *mfd,
+			struct mdp_pa_v2_cfg_data *config,
+			u32 *copyback)
+{
+	int ret = 0;
+	u32 disp_num, dspp_num = 0;
+	char __iomem *pa_addr;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdp_pa_v2_cfg_data *pa_v2_cache = NULL;
+	struct mdp_pp_cache_res res_cache;
+	uint32_t flags = 0;
+
+	if (mdata->mdp_rev < MDSS_MDP_HW_REV_103)
+		return -EINVAL;
+
+	ret = pp_validate_dspp_mfd_block(mfd, config->block);
+	if (ret) {
+		pr_err("Invalid block %d mfd index %d, ret %d\n",
+				config->block,
+				(mfd ? mfd->index : -1), ret);
+		return ret;
+	}
+
+	if (pp_ops[PA].pp_set_config)
+		flags = config->flags;
+	else
+		flags = config->pa_v2_data.flags;
+
+	if ((flags & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
+		pr_warn("Can't set both split bits\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&mdss_pp_mutex);
+	disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+	if (flags & MDP_PP_OPS_READ) {
+		ret = pp_get_dspp_num(disp_num, &dspp_num);
+		if (ret) {
+			pr_err("no dspp connects to disp %d\n",
+				disp_num);
+			goto pa_config_exit;
+		}
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+		pa_addr = mdss_mdp_get_dspp_addr_off(dspp_num);
+		if (IS_ERR(pa_addr)) {
+			ret = PTR_ERR(pa_addr);
+			goto pa_clk_off;
+		}
+		if (pp_ops[PA].pp_get_config) {
+			ret = pp_ops[PA].pp_get_config(pa_addr, config,
+					DSPP, disp_num);
+			if (ret)
+				pr_err("PA get config failed %d\n", ret);
+		} else {
+			pa_addr += MDSS_MDP_REG_DSPP_PA_BASE;
+			ret = pp_read_pa_v2_regs(pa_addr,
+					&config->pa_v2_data,
+					disp_num);
+			if (ret)
+				goto pa_config_exit;
+			*copyback = 1;
+		}
+pa_clk_off:
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	} else {
+		if (pp_ops[PA].pp_set_config) {
+			pr_debug("version of PA is %d\n", config->version);
+			res_cache.block = DSPP;
+			res_cache.mdss_pp_res = mdss_pp_res;
+			res_cache.pipe_res = NULL;
+			ret = pp_pa_cache_params(config, &res_cache);
+			if (ret) {
+				pr_err("PA config failed version %d ret %d\n",
+					config->version, ret);
+				ret = -EFAULT;
+				goto pa_config_exit;
+			}
+		} else {
+			if (flags & MDP_PP_PA_SIX_ZONE_ENABLE) {
+				ret = pp_copy_pa_six_zone_lut(config, disp_num);
+				if (ret) {
+					pr_err("PA copy six zone lut failed ret %d\n",
+						ret);
+					goto pa_config_exit;
+				}
+			}
+			pa_v2_cache = &mdss_pp_res->pa_v2_disp_cfg[disp_num];
+			*pa_v2_cache = *config;
+			pa_v2_cache->pa_v2_data.six_zone_curve_p0 =
+				mdss_pp_res->six_zone_lut_curve_p0[disp_num];
+			pa_v2_cache->pa_v2_data.six_zone_curve_p1 =
+				mdss_pp_res->six_zone_lut_curve_p1[disp_num];
+		}
+		mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_PA;
+	}
+
+pa_config_exit:
+	mutex_unlock(&mdss_pp_mutex);
+	return ret;
+}
+
+
+static int pp_read_pa_v2_regs(char __iomem *addr,
+				struct mdp_pa_v2_data *pa_v2_config,
+				u32 disp_num)
+{
+	int i;
+	u32 data;
+
+	if (pa_v2_config->flags & MDP_PP_PA_HUE_ENABLE)
+		pa_v2_config->global_hue_adj = readl_relaxed(addr);
+	addr += 4;
+	if (pa_v2_config->flags & MDP_PP_PA_SAT_ENABLE)
+		pa_v2_config->global_sat_adj = readl_relaxed(addr);
+	addr += 4;
+	if (pa_v2_config->flags & MDP_PP_PA_VAL_ENABLE)
+		pa_v2_config->global_val_adj = readl_relaxed(addr);
+	addr += 4;
+	if (pa_v2_config->flags & MDP_PP_PA_CONT_ENABLE)
+		pa_v2_config->global_cont_adj = readl_relaxed(addr);
+	addr += 4;
+
+	/* Six zone LUT and thresh data */
+	if (pa_v2_config->flags & MDP_PP_PA_SIX_ZONE_ENABLE) {
+		if (pa_v2_config->six_zone_len != MDP_SIX_ZONE_LUT_SIZE)
+			return -EINVAL;
+
+		data = (3 << 25);
+		writel_relaxed(data, addr);
+
+		for (i = 0; i < MDP_SIX_ZONE_LUT_SIZE; i++) {
+			addr += 4;
+			mdss_pp_res->six_zone_lut_curve_p1[disp_num][i] =
+				readl_relaxed(addr);
+			addr -= 4;
+			mdss_pp_res->six_zone_lut_curve_p0[disp_num][i] =
+				readl_relaxed(addr) & 0xFFF;
+		}
+
+		if (copy_to_user(pa_v2_config->six_zone_curve_p0,
+			&mdss_pp_res->six_zone_lut_curve_p0[disp_num][0],
+			pa_v2_config->six_zone_len * sizeof(u32))) {
+			return -EFAULT;
+		}
+
+		if (copy_to_user(pa_v2_config->six_zone_curve_p1,
+			&mdss_pp_res->six_zone_lut_curve_p1[disp_num][0],
+			pa_v2_config->six_zone_len * sizeof(u32))) {
+			return -EFAULT;
+		}
+
+		addr += 8;
+		pa_v2_config->six_zone_thresh = readl_relaxed(addr);
+		addr += 4;
+	} else {
+		addr += 12;
+	}
+
+	/* Skin memory color config registers */
+	if (pa_v2_config->flags & MDP_PP_PA_SKIN_ENABLE)
+		pp_read_pa_mem_col_regs(addr, &pa_v2_config->skin_cfg);
+
+	addr += 0x14;
+	/* Sky memory color config registers */
+	if (pa_v2_config->flags & MDP_PP_PA_SKY_ENABLE)
+		pp_read_pa_mem_col_regs(addr, &pa_v2_config->sky_cfg);
+
+	addr += 0x14;
+	/* Foliage memory color config registers */
+	if (pa_v2_config->flags & MDP_PP_PA_FOL_ENABLE)
+		pp_read_pa_mem_col_regs(addr, &pa_v2_config->fol_cfg);
+
+	return 0;
+}
+
+static void pp_read_pa_mem_col_regs(char __iomem *addr,
+				struct mdp_pa_mem_col_cfg *mem_col_cfg)
+{
+	mem_col_cfg->color_adjust_p0 = readl_relaxed(addr);
+	addr += 4;
+	mem_col_cfg->color_adjust_p1 = readl_relaxed(addr);
+	addr += 4;
+	mem_col_cfg->hue_region = readl_relaxed(addr);
+	addr += 4;
+	mem_col_cfg->sat_region = readl_relaxed(addr);
+	addr += 4;
+	mem_col_cfg->val_region = readl_relaxed(addr);
+}
+
+static int pp_copy_pa_six_zone_lut(struct mdp_pa_v2_cfg_data *pa_v2_config,
+				u32 disp_num)
+{
+	if (pa_v2_config->pa_v2_data.six_zone_len != MDP_SIX_ZONE_LUT_SIZE)
+		return -EINVAL;
+
+	if (copy_from_user(&mdss_pp_res->six_zone_lut_curve_p0[disp_num][0],
+			pa_v2_config->pa_v2_data.six_zone_curve_p0,
+			pa_v2_config->pa_v2_data.six_zone_len * sizeof(u32))) {
+		return -EFAULT;
+	}
+	if (copy_from_user(&mdss_pp_res->six_zone_lut_curve_p1[disp_num][0],
+			pa_v2_config->pa_v2_data.six_zone_curve_p1,
+			pa_v2_config->pa_v2_data.six_zone_len * sizeof(u32))) {
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static void pp_read_pcc_regs(char __iomem *addr,
+				struct mdp_pcc_cfg_data *cfg_ptr)
+{
+	cfg_ptr->r.c = readl_relaxed(addr);
+	cfg_ptr->g.c = readl_relaxed(addr + 4);
+	cfg_ptr->b.c = readl_relaxed(addr + 8);
+	addr += 0x10;
+
+	cfg_ptr->r.r = readl_relaxed(addr);
+	cfg_ptr->g.r = readl_relaxed(addr + 4);
+	cfg_ptr->b.r = readl_relaxed(addr + 8);
+	addr += 0x10;
+
+	cfg_ptr->r.g = readl_relaxed(addr);
+	cfg_ptr->g.g = readl_relaxed(addr + 4);
+	cfg_ptr->b.g = readl_relaxed(addr + 8);
+	addr += 0x10;
+
+	cfg_ptr->r.b = readl_relaxed(addr);
+	cfg_ptr->g.b = readl_relaxed(addr + 4);
+	cfg_ptr->b.b = readl_relaxed(addr + 8);
+	addr += 0x10;
+
+	cfg_ptr->r.rr = readl_relaxed(addr);
+	cfg_ptr->g.rr = readl_relaxed(addr + 4);
+	cfg_ptr->b.rr = readl_relaxed(addr + 8);
+	addr += 0x10;
+
+	cfg_ptr->r.rg = readl_relaxed(addr);
+	cfg_ptr->g.rg = readl_relaxed(addr + 4);
+	cfg_ptr->b.rg = readl_relaxed(addr + 8);
+	addr += 0x10;
+
+	cfg_ptr->r.rb = readl_relaxed(addr);
+	cfg_ptr->g.rb = readl_relaxed(addr + 4);
+	cfg_ptr->b.rb = readl_relaxed(addr + 8);
+	addr += 0x10;
+
+	cfg_ptr->r.gg = readl_relaxed(addr);
+	cfg_ptr->g.gg = readl_relaxed(addr + 4);
+	cfg_ptr->b.gg = readl_relaxed(addr + 8);
+	addr += 0x10;
+
+	cfg_ptr->r.gb = readl_relaxed(addr);
+	cfg_ptr->g.gb = readl_relaxed(addr + 4);
+	cfg_ptr->b.gb = readl_relaxed(addr + 8);
+	addr += 0x10;
+
+	cfg_ptr->r.bb = readl_relaxed(addr);
+	cfg_ptr->g.bb = readl_relaxed(addr + 4);
+	cfg_ptr->b.bb = readl_relaxed(addr + 8);
+	addr += 0x10;
+
+	cfg_ptr->r.rgb_0 = readl_relaxed(addr);
+	cfg_ptr->g.rgb_0 = readl_relaxed(addr + 4);
+	cfg_ptr->b.rgb_0 = readl_relaxed(addr + 8);
+	addr += 0x10;
+
+	cfg_ptr->r.rgb_1 = readl_relaxed(addr);
+	cfg_ptr->g.rgb_1 = readl_relaxed(addr + 4);
+	cfg_ptr->b.rgb_1 = readl_relaxed(addr + 8);
+}
+
+static void pp_update_pcc_regs(char __iomem *addr,
+				struct mdp_pcc_cfg_data *cfg_ptr)
+{
+	writel_relaxed(cfg_ptr->r.c, addr);
+	writel_relaxed(cfg_ptr->g.c, addr + 4);
+	writel_relaxed(cfg_ptr->b.c, addr + 8);
+	addr += 0x10;
+
+	writel_relaxed(cfg_ptr->r.r, addr);
+	writel_relaxed(cfg_ptr->g.r, addr + 4);
+	writel_relaxed(cfg_ptr->b.r, addr + 8);
+	addr += 0x10;
+
+	writel_relaxed(cfg_ptr->r.g, addr);
+	writel_relaxed(cfg_ptr->g.g, addr + 4);
+	writel_relaxed(cfg_ptr->b.g, addr + 8);
+	addr += 0x10;
+
+	writel_relaxed(cfg_ptr->r.b, addr);
+	writel_relaxed(cfg_ptr->g.b, addr + 4);
+	writel_relaxed(cfg_ptr->b.b, addr + 8);
+	addr += 0x10;
+
+	writel_relaxed(cfg_ptr->r.rr, addr);
+	writel_relaxed(cfg_ptr->g.rr, addr + 4);
+	writel_relaxed(cfg_ptr->b.rr, addr + 8);
+	addr += 0x10;
+
+	writel_relaxed(cfg_ptr->r.rg, addr);
+	writel_relaxed(cfg_ptr->g.rg, addr + 4);
+	writel_relaxed(cfg_ptr->b.rg, addr + 8);
+	addr += 0x10;
+
+	writel_relaxed(cfg_ptr->r.rb, addr);
+	writel_relaxed(cfg_ptr->g.rb, addr + 4);
+	writel_relaxed(cfg_ptr->b.rb, addr + 8);
+	addr += 0x10;
+
+	writel_relaxed(cfg_ptr->r.gg, addr);
+	writel_relaxed(cfg_ptr->g.gg, addr + 4);
+	writel_relaxed(cfg_ptr->b.gg, addr + 8);
+	addr += 0x10;
+
+	writel_relaxed(cfg_ptr->r.gb, addr);
+	writel_relaxed(cfg_ptr->g.gb, addr + 4);
+	writel_relaxed(cfg_ptr->b.gb, addr + 8);
+	addr += 0x10;
+
+	writel_relaxed(cfg_ptr->r.bb, addr);
+	writel_relaxed(cfg_ptr->g.bb, addr + 4);
+	writel_relaxed(cfg_ptr->b.bb, addr + 8);
+	addr += 0x10;
+
+	writel_relaxed(cfg_ptr->r.rgb_0, addr);
+	writel_relaxed(cfg_ptr->g.rgb_0, addr + 4);
+	writel_relaxed(cfg_ptr->b.rgb_0, addr + 8);
+	addr += 0x10;
+
+	writel_relaxed(cfg_ptr->r.rgb_1, addr);
+	writel_relaxed(cfg_ptr->g.rgb_1, addr + 4);
+	writel_relaxed(cfg_ptr->b.rgb_1, addr + 8);
+}
+
+int mdss_mdp_pcc_config(struct msm_fb_data_type *mfd,
+				struct mdp_pcc_cfg_data *config,
+				u32 *copyback)
+{
+	int ret = 0;
+	u32 disp_num, dspp_num = 0;
+	char __iomem *addr;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdp_pp_cache_res res_cache;
+
+	ret = pp_validate_dspp_mfd_block(mfd, config->block);
+	if (ret) {
+		pr_err("Invalid block %d mfd index %d, ret %d\n",
+				config->block,
+				(mfd ? mfd->index : -1), ret);
+		return ret;
+	}
+
+	if ((config->ops & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
+		pr_warn("Can't set both split bits\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&mdss_pp_mutex);
+	disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+	if (config->ops & MDP_PP_OPS_READ) {
+		ret = pp_get_dspp_num(disp_num, &dspp_num);
+		if (ret) {
+			pr_err("%s, no dspp connects to disp %d\n",
+				__func__, disp_num);
+			goto pcc_config_exit;
+		}
+
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+		if (pp_ops[PCC].pp_get_config) {
+			addr = mdss_mdp_get_dspp_addr_off(disp_num);
+			if (IS_ERR_OR_NULL(addr)) {
+				pr_err("invalid dspp base_addr %pK\n",
+					addr);
+				ret = -EINVAL;
+				goto pcc_clk_off;
+			}
+			if (mdata->pp_block_off.dspp_pcc_off == U32_MAX) {
+				pr_err("invalid pcc params off %d\n",
+					mdata->pp_block_off.dspp_pcc_off);
+				ret = -EINVAL;
+				goto pcc_clk_off;
+			}
+			addr += mdata->pp_block_off.dspp_pcc_off;
+			ret = pp_ops[PCC].pp_get_config(addr, config,
+					DSPP, disp_num);
+			if (ret)
+				pr_err("pcc get config failed %d\n", ret);
+			goto pcc_clk_off;
+		}
+
+		addr = mdss_mdp_get_dspp_addr_off(dspp_num) +
+			  MDSS_MDP_REG_DSPP_PCC_BASE;
+		pp_read_pcc_regs(addr, config);
+		*copyback = 1;
+pcc_clk_off:
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	} else {
+		if (pp_ops[PCC].pp_set_config) {
+			pr_debug("version of pcc is %d\n", config->version);
+			res_cache.block = DSPP;
+			res_cache.mdss_pp_res = mdss_pp_res;
+			res_cache.pipe_res = NULL;
+			ret = pp_pcc_cache_params(config, &res_cache);
+			if (ret) {
+				pr_err("pcc config failed version %d ret %d\n",
+					config->version, ret);
+				ret = -EFAULT;
+				goto pcc_config_exit;
+			} else
+				goto pcc_set_dirty;
+		}
+		mdss_pp_res->pcc_disp_cfg[disp_num] = *config;
+pcc_set_dirty:
+		mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_PCC;
+	}
+
+pcc_config_exit:
+	mutex_unlock(&mdss_pp_mutex);
+	return ret;
+}
+
+static void pp_read_igc_lut_cached(struct mdp_igc_lut_data *cfg)
+{
+	int i;
+	u32 disp_num;
+
+	disp_num = cfg->block - MDP_LOGICAL_BLOCK_DISP_0;
+	for (i = 0; i < IGC_LUT_ENTRIES; i++) {
+		cfg->c0_c1_data[i] =
+			mdss_pp_res->igc_disp_cfg[disp_num].c0_c1_data[i];
+		cfg->c2_data[i] =
+			mdss_pp_res->igc_disp_cfg[disp_num].c2_data[i];
+	}
+}
+
+static void pp_read_igc_lut(struct mdp_igc_lut_data *cfg,
+			    char __iomem *addr, u32 blk_idx, int32_t total_idx)
+{
+	int i;
+	u32 data;
+	int32_t mask = 0, idx = total_idx;
+
+	while (idx > 0) {
+		mask = (mask << 1) + 1;
+		idx--;
+	}
+	/* INDEX_UPDATE & VALUE_UPDATEN */
+	data = (3 << 24) | (((~(1 << blk_idx)) & mask) << 28);
+	writel_relaxed(data, addr);
+
+	for (i = 0; i < cfg->len; i++)
+		cfg->c0_c1_data[i] = readl_relaxed(addr) & 0xFFF;
+
+	addr += 0x4;
+	writel_relaxed(data, addr);
+	for (i = 0; i < cfg->len; i++)
+		cfg->c0_c1_data[i] |= (readl_relaxed(addr) & 0xFFF) << 16;
+
+	addr += 0x4;
+	writel_relaxed(data, addr);
+	for (i = 0; i < cfg->len; i++)
+		cfg->c2_data[i] = readl_relaxed(addr) & 0xFFF;
+}
+
+static void pp_update_igc_lut(struct mdp_igc_lut_data *cfg,
+				char __iomem *addr, u32 blk_idx,
+				u32 total_idx)
+{
+	int i;
+	u32 data;
+	int32_t mask = 0, idx = total_idx;
+
+	while (idx > 0) {
+		mask = (mask << 1) + 1;
+		idx--;
+	}
+
+	/* INDEX_UPDATE */
+	data = (1 << 25) | (((~(1 << blk_idx)) & mask) << 28);
+	writel_relaxed((cfg->c0_c1_data[0] & 0xFFF) | data, addr);
+
+	/* disable index update */
+	data &= ~(1 << 25);
+	for (i = 1; i < cfg->len; i++)
+		writel_relaxed((cfg->c0_c1_data[i] & 0xFFF) | data, addr);
+
+	addr += 0x4;
+	data |= (1 << 25);
+	writel_relaxed(((cfg->c0_c1_data[0] >> 16) & 0xFFF) | data, addr);
+	data &= ~(1 << 25);
+	for (i = 1; i < cfg->len; i++)
+		writel_relaxed(((cfg->c0_c1_data[i] >> 16) & 0xFFF) | data,
+				addr);
+
+	addr += 0x4;
+	data |= (1 << 25);
+	writel_relaxed((cfg->c2_data[0] & 0xFFF) | data, addr);
+	data &= ~(1 << 25);
+	for (i = 1; i < cfg->len; i++)
+		writel_relaxed((cfg->c2_data[i] & 0xFFF) | data, addr);
+}
+
+static int mdss_mdp_limited_lut_igc_config(struct msm_fb_data_type *mfd,
+					bool enable)
+{
+	int ret = 0;
+	u32 copyback = 0;
+	u32 copy_from_kernel = 1;
+	struct mdp_igc_lut_data config;
+	struct mdp_pp_feature_version igc_version = {
+		.pp_feature = IGC,
+	};
+	struct mdp_igc_lut_data_v1_7 igc_data;
+
+	if (!mfd)
+		return -EINVAL;
+
+	if (!mdss_mdp_mfd_valid_dspp(mfd)) {
+		pr_debug("IGC not supported on display num %d hw configuration\n",
+			mfd->index);
+		return 0;
+	}
+
+	ret = mdss_mdp_pp_get_version(&igc_version);
+	if (ret)
+		pr_err("failed to get default IGC version, ret %d\n", ret);
+
+	config.version = igc_version.version_info;
+	if (enable)
+		config.ops = MDP_PP_OPS_WRITE | MDP_PP_OPS_ENABLE;
+	else
+		config.ops = MDP_PP_OPS_DISABLE;
+	config.block = (mfd->index) + MDP_LOGICAL_BLOCK_DISP_0;
+	switch (config.version) {
+	case mdp_igc_v1_7:
+		config.cfg_payload = &igc_data;
+		igc_data.table_fmt = mdp_igc_custom;
+		igc_data.len = IGC_LUT_ENTRIES;
+		igc_data.c0_c1_data = igc_limited;
+		igc_data.c2_data = igc_limited;
+		break;
+	case mdp_pp_legacy:
+	default:
+		config.cfg_payload = NULL;
+		config.len = IGC_LUT_ENTRIES;
+		config.c0_c1_data = igc_limited;
+		config.c2_data = igc_limited;
+		break;
+	}
+
+	ret = mdss_mdp_igc_lut_config(mfd, &config, &copyback,
+					copy_from_kernel);
+	return ret;
+}
+
+int mdss_mdp_igc_lut_config(struct msm_fb_data_type *mfd,
+					struct mdp_igc_lut_data *config,
+					u32 *copyback, u32 copy_from_kernel)
+{
+	int ret = 0;
+	u32 tbl_idx, disp_num, dspp_num = 0;
+	struct mdp_igc_lut_data local_cfg;
+	char __iomem *igc_addr;
+	struct mdp_pp_cache_res res_cache;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	ret = pp_validate_dspp_mfd_block(mfd, config->block);
+	if (ret) {
+		pr_err("Invalid block %d mfd index %d, ret %d\n",
+				config->block,
+				(mfd ? mfd->index : -1), ret);
+		return ret;
+	}
+
+	if ((config->ops & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
+		pr_warn("Can't set both split bits\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&mdss_pp_mutex);
+	disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+	if (config->ops & MDP_PP_OPS_READ) {
+		if (config->len != IGC_LUT_ENTRIES) {
+			pr_err("invalid len for IGC table for read %d\n",
+			       config->len);
+			return -EINVAL;
+		}
+		ret = pp_get_dspp_num(disp_num, &dspp_num);
+		if (ret) {
+			pr_err("%s, no dspp connects to disp %d\n",
+				__func__, disp_num);
+			goto igc_config_exit;
+		}
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+		if (config->ops & MDP_PP_IGC_FLAG_ROM0)
+			tbl_idx = 1;
+		else if (config->ops & MDP_PP_IGC_FLAG_ROM1)
+			tbl_idx = 2;
+		else
+			tbl_idx = 0;
+		igc_addr = mdata->mdp_base + MDSS_MDP_REG_IGC_DSPP_BASE +
+			(0x10 * tbl_idx);
+		local_cfg = *config;
+		local_cfg.c0_c1_data =
+			&mdss_pp_res->igc_lut_c0c1[disp_num][0];
+		local_cfg.c2_data =
+			&mdss_pp_res->igc_lut_c2[disp_num][0];
+		if (mdata->has_no_lut_read)
+			pp_read_igc_lut_cached(&local_cfg);
+		else {
+			if (pp_ops[IGC].pp_get_config) {
+				config->block = dspp_num;
+				pp_ops[IGC].pp_get_config(igc_addr, config,
+							  DSPP, disp_num);
+				goto clock_off;
+			} else {
+				pp_read_igc_lut(&local_cfg, igc_addr,
+						dspp_num, mdata->ndspp);
+			}
+		}
+		if (copy_to_user(config->c0_c1_data, local_cfg.c0_c1_data,
+			config->len * sizeof(u32))) {
+			ret = -EFAULT;
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+			goto igc_config_exit;
+		}
+		if (copy_to_user(config->c2_data, local_cfg.c2_data,
+			config->len * sizeof(u32))) {
+			ret = -EFAULT;
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+			goto igc_config_exit;
+		}
+		*copyback = 1;
+clock_off:
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	} else {
+		if (pp_ops[IGC].pp_set_config) {
+			res_cache.block = DSPP;
+			res_cache.mdss_pp_res = mdss_pp_res;
+			res_cache.pipe_res = NULL;
+			ret = pp_igc_lut_cache_params(config,
+						&res_cache, copy_from_kernel);
+			if (ret) {
+				pr_err("igc caching failed ret %d", ret);
+				goto igc_config_exit;
+			} else
+				goto igc_set_dirty;
+		}
+		if (config->len != IGC_LUT_ENTRIES) {
+			pr_err("invalid len for IGC table for write %d\n",
+			       config->len);
+			return -EINVAL;
+		}
+		if (copy_from_kernel) {
+			memcpy(&mdss_pp_res->igc_lut_c0c1[disp_num][0],
+			config->c0_c1_data, config->len * sizeof(u32));
+			memcpy(&mdss_pp_res->igc_lut_c2[disp_num][0],
+			config->c2_data, config->len * sizeof(u32));
+		} else {
+			if (copy_from_user(
+				&mdss_pp_res->igc_lut_c0c1[disp_num][0],
+				config->c0_c1_data,
+				config->len * sizeof(u32))) {
+				ret = -EFAULT;
+				goto igc_config_exit;
+			}
+			if (copy_from_user(
+				&mdss_pp_res->igc_lut_c2[disp_num][0],
+				config->c2_data, config->len * sizeof(u32))) {
+				ret = -EFAULT;
+				goto igc_config_exit;
+			}
+		}
+		mdss_pp_res->igc_disp_cfg[disp_num] = *config;
+		mdss_pp_res->igc_disp_cfg[disp_num].c0_c1_data =
+			&mdss_pp_res->igc_lut_c0c1[disp_num][0];
+		mdss_pp_res->igc_disp_cfg[disp_num].c2_data =
+			&mdss_pp_res->igc_lut_c2[disp_num][0];
+igc_set_dirty:
+		mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_IGC;
+	}
+
+igc_config_exit:
+	mutex_unlock(&mdss_pp_mutex);
+	return ret;
+}
+static void pp_update_gc_one_lut(char __iomem *addr,
+		struct mdp_ar_gc_lut_data *lut_data,
+		uint8_t num_stages)
+{
+	int i, start_idx, idx;
+
+	start_idx = ((readl_relaxed(addr) >> 16) & 0xF) + 1;
+	for (i = start_idx; i < GC_LUT_SEGMENTS; i++) {
+		idx = min((uint8_t)i, (uint8_t)(num_stages-1));
+		writel_relaxed(lut_data[idx].x_start, addr);
+	}
+	for (i = 0; i < start_idx; i++) {
+		idx = min((uint8_t)i, (uint8_t)(num_stages-1));
+		writel_relaxed(lut_data[idx].x_start, addr);
+	}
+	addr += 4;
+	start_idx = ((readl_relaxed(addr) >> 16) & 0xF) + 1;
+	for (i = start_idx; i < GC_LUT_SEGMENTS; i++) {
+		idx = min((uint8_t)i, (uint8_t)(num_stages-1));
+		writel_relaxed(lut_data[idx].slope, addr);
+	}
+	for (i = 0; i < start_idx; i++) {
+		idx = min((uint8_t)i, (uint8_t)(num_stages-1));
+		writel_relaxed(lut_data[idx].slope, addr);
+	}
+	addr += 4;
+	start_idx = ((readl_relaxed(addr) >> 16) & 0xF) + 1;
+	for (i = start_idx; i < GC_LUT_SEGMENTS; i++) {
+		idx = min((uint8_t)i, (uint8_t)(num_stages-1));
+		writel_relaxed(lut_data[idx].offset, addr);
+	}
+	for (i = 0; i < start_idx; i++) {
+		idx = min((uint8_t)i, (uint8_t)(num_stages-1));
+		writel_relaxed(lut_data[idx].offset, addr);
+	}
+}
+static void pp_update_argc_lut(char __iomem *addr,
+				struct mdp_pgc_lut_data *config)
+{
+	pp_update_gc_one_lut(addr, config->r_data, config->num_r_stages);
+	addr += 0x10;
+	pp_update_gc_one_lut(addr, config->g_data, config->num_g_stages);
+	addr += 0x10;
+	pp_update_gc_one_lut(addr, config->b_data, config->num_b_stages);
+}
+static void pp_read_gc_one_lut(char __iomem *addr,
+		struct mdp_ar_gc_lut_data *gc_data)
+{
+	int i, start_idx, data;
+
+	data = readl_relaxed(addr);
+	start_idx = (data >> 16) & 0xF;
+	gc_data[start_idx].x_start = data & 0xFFF;
+
+	for (i = start_idx + 1; i < GC_LUT_SEGMENTS; i++) {
+		data = readl_relaxed(addr);
+		gc_data[i].x_start = data & 0xFFF;
+	}
+	for (i = 0; i < start_idx; i++) {
+		data = readl_relaxed(addr);
+		gc_data[i].x_start = data & 0xFFF;
+	}
+
+	addr += 4;
+	data = readl_relaxed(addr);
+	start_idx = (data >> 16) & 0xF;
+	gc_data[start_idx].slope = data & 0x7FFF;
+	for (i = start_idx + 1; i < GC_LUT_SEGMENTS; i++) {
+		data = readl_relaxed(addr);
+		gc_data[i].slope = data & 0x7FFF;
+	}
+	for (i = 0; i < start_idx; i++) {
+		data = readl_relaxed(addr);
+		gc_data[i].slope = data & 0x7FFF;
+	}
+	addr += 4;
+	data = readl_relaxed(addr);
+	start_idx = (data >> 16) & 0xF;
+	gc_data[start_idx].offset = data & 0x7FFF;
+	for (i = start_idx + 1; i < GC_LUT_SEGMENTS; i++) {
+		data = readl_relaxed(addr);
+		gc_data[i].offset = data & 0x7FFF;
+	}
+	for (i = 0; i < start_idx; i++) {
+		data = readl_relaxed(addr);
+		gc_data[i].offset = data & 0x7FFF;
+	}
+}
+
+static int pp_read_argc_lut(struct mdp_pgc_lut_data *config, char __iomem *addr)
+{
+	int ret = 0;
+
+	pp_read_gc_one_lut(addr, config->r_data);
+	addr += 0x10;
+	pp_read_gc_one_lut(addr, config->g_data);
+	addr += 0x10;
+	pp_read_gc_one_lut(addr, config->b_data);
+	return ret;
+}
+
+static int pp_read_argc_lut_cached(struct mdp_pgc_lut_data *config)
+{
+	int i;
+	u32 disp_num;
+	struct mdp_pgc_lut_data *pgc_ptr;
+
+	disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
+	switch (PP_LOCAT(config->block)) {
+	case MDSS_PP_LM_CFG:
+		pgc_ptr = &mdss_pp_res->argc_disp_cfg[disp_num];
+		break;
+	case MDSS_PP_DSPP_CFG:
+		pgc_ptr = &mdss_pp_res->pgc_disp_cfg[disp_num];
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	for (i = 0; i < GC_LUT_SEGMENTS; i++) {
+		config->r_data[i].x_start = pgc_ptr->r_data[i].x_start;
+		config->r_data[i].slope   = pgc_ptr->r_data[i].slope;
+		config->r_data[i].offset  = pgc_ptr->r_data[i].offset;
+
+		config->g_data[i].x_start = pgc_ptr->g_data[i].x_start;
+		config->g_data[i].slope   = pgc_ptr->g_data[i].slope;
+		config->g_data[i].offset  = pgc_ptr->g_data[i].offset;
+
+		config->b_data[i].x_start = pgc_ptr->b_data[i].x_start;
+		config->b_data[i].slope   = pgc_ptr->b_data[i].slope;
+		config->b_data[i].offset  = pgc_ptr->b_data[i].offset;
+	}
+
+	return 0;
+}
+
+/* Note: Assumes that its inputs have been checked by calling function */
+static void pp_update_hist_lut(char __iomem *addr,
+				struct mdp_hist_lut_data *cfg)
+{
+	int i;
+
+	for (i = 0; i < ENHIST_LUT_ENTRIES; i++)
+		writel_relaxed(cfg->data[i], addr);
+	/* swap */
+	if (PP_LOCAT(cfg->block) == MDSS_PP_DSPP_CFG)
+		writel_relaxed(1, addr + 4);
+	else
+		writel_relaxed(1, addr + 16);
+}
+
+int mdss_mdp_argc_config(struct msm_fb_data_type *mfd,
+				struct mdp_pgc_lut_data *config,
+				u32 *copyback)
+{
+	int ret = 0;
+	u32 disp_num, num = 0, is_lm = 0;
+	struct mdp_pgc_lut_data local_cfg;
+	struct mdp_pgc_lut_data *pgc_ptr;
+	u32 tbl_size, r_size, g_size, b_size;
+	char __iomem *argc_addr = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_ctl *ctl = NULL;
+	u32 dirty_flag = 0;
+
+	if ((PP_BLOCK(config->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+		(PP_BLOCK(config->block) >= MDP_BLOCK_MAX)) {
+		pr_err("invalid block value %d\n", PP_BLOCK(config->block));
+		return -EINVAL;
+	}
+
+	if ((config->flags & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
+		pr_warn("Can't set both split bits\n");
+		return -EINVAL;
+	}
+
+	if ((PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0) !=
+			mfd->index) {
+		pr_err("PP block %d does not match corresponding mfd index %d\n",
+				config->block, mfd->index);
+		return -EINVAL;
+	}
+
+	disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
+	ctl = mfd_to_ctl(mfd);
+	num = (ctl && ctl->mixer_left) ? ctl->mixer_left->num : -1;
+	if (num < 0) {
+		pr_err("invalid mfd index %d config\n",
+				mfd->index);
+		return -EPERM;
+	}
+	switch (PP_LOCAT(config->block)) {
+	case MDSS_PP_LM_CFG:
+		/*
+		 * LM GC LUT should be disabled before being rewritten. Skip
+		 * GC LUT config if it is already enabled.
+		 */
+		if ((mdss_pp_res->pp_disp_sts[disp_num].argc_sts &
+				PP_STS_ENABLE) &&
+				!(config->flags & MDP_PP_OPS_DISABLE)) {
+			pr_err("LM GC already enabled disp %d, skipping config\n",
+					mfd->index);
+			return -EPERM;
+		}
+		argc_addr = mdss_mdp_get_mixer_addr_off(num) +
+			MDSS_MDP_REG_LM_GC_LUT_BASE;
+		pgc_ptr = &mdss_pp_res->argc_disp_cfg[disp_num];
+		dirty_flag = PP_FLAGS_DIRTY_ARGC;
+		break;
+	case MDSS_PP_DSPP_CFG:
+		if (!mdss_mdp_mfd_valid_dspp(mfd)) {
+			pr_err("invalid mfd index %d for dspp config\n",
+				mfd->index);
+			return -EPERM;
+		}
+		argc_addr = mdss_mdp_get_dspp_addr_off(num) +
+					MDSS_MDP_REG_DSPP_GC_BASE;
+		pgc_ptr = &mdss_pp_res->pgc_disp_cfg[disp_num];
+		dirty_flag = PP_FLAGS_DIRTY_PGC;
+		break;
+	default:
+		goto argc_config_exit;
+	}
+
+	mutex_lock(&mdss_pp_mutex);
+
+	tbl_size = GC_LUT_SEGMENTS * sizeof(struct mdp_ar_gc_lut_data);
+	if (config->flags & MDP_PP_OPS_READ) {
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+		if (pp_ops[GC].pp_get_config) {
+			char __iomem *temp_addr = NULL;
+			u32 off = 0;
+
+			is_lm = (PP_LOCAT(config->block) == MDSS_PP_LM_CFG);
+			off = (is_lm) ? mdata->pp_block_off.lm_pgc_off :
+				mdata->pp_block_off.dspp_pgc_off;
+			if (off == U32_MAX) {
+				pr_err("invalid offset for loc %d off %d\n",
+					PP_LOCAT(config->block), U32_MAX);
+				ret = -EINVAL;
+				goto clock_off;
+			}
+			temp_addr = (is_lm) ?
+				     mdss_mdp_get_mixer_addr_off(num) :
+				     mdss_mdp_get_dspp_addr_off(num);
+			if (IS_ERR_OR_NULL(temp_addr)) {
+				pr_err("invalid addr is_lm %d\n", is_lm);
+				ret = -EINVAL;
+				goto clock_off;
+			}
+			temp_addr += off;
+			ret = pp_ops[GC].pp_get_config(temp_addr, config,
+				((is_lm) ? LM : DSPP), disp_num);
+			if (ret)
+				pr_err("gc get config failed %d\n", ret);
+			goto clock_off;
+		}
+		local_cfg = *config;
+		local_cfg.r_data =
+			&mdss_pp_res->gc_lut_r[disp_num][0];
+		local_cfg.g_data =
+			&mdss_pp_res->gc_lut_g[disp_num][0];
+		local_cfg.b_data =
+			&mdss_pp_res->gc_lut_b[disp_num][0];
+		if (mdata->has_no_lut_read)
+			pp_read_argc_lut_cached(&local_cfg);
+		else
+			pp_read_argc_lut(&local_cfg, argc_addr);
+		if (copy_to_user(config->r_data,
+			&mdss_pp_res->gc_lut_r[disp_num][0], tbl_size)) {
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+			ret = -EFAULT;
+			goto argc_config_exit;
+		}
+		if (copy_to_user(config->g_data,
+			&mdss_pp_res->gc_lut_g[disp_num][0], tbl_size)) {
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+			ret = -EFAULT;
+			goto argc_config_exit;
+		}
+		if (copy_to_user(config->b_data,
+			&mdss_pp_res->gc_lut_b[disp_num][0], tbl_size)) {
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+			ret = -EFAULT;
+			goto argc_config_exit;
+		}
+		*copyback = 1;
+clock_off:
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	} else {
+		if (pp_ops[GC].pp_set_config) {
+			pr_debug("version of gc is %d\n", config->version);
+			is_lm = (PP_LOCAT(config->block) == MDSS_PP_LM_CFG);
+			ret = pp_pgc_lut_cache_params(config, mdss_pp_res,
+				((is_lm) ? LM : DSPP));
+			if (ret) {
+				pr_err("pgc cache params failed, ret %d\n",
+					ret);
+				goto argc_config_exit;
+			}
+		} else {
+			r_size = config->num_r_stages *
+				sizeof(struct mdp_ar_gc_lut_data);
+			g_size = config->num_g_stages *
+				sizeof(struct mdp_ar_gc_lut_data);
+			b_size = config->num_b_stages *
+				sizeof(struct mdp_ar_gc_lut_data);
+			if (r_size > tbl_size ||
+			    g_size > tbl_size ||
+			    b_size > tbl_size ||
+			    r_size == 0 ||
+			    g_size == 0 ||
+			    b_size == 0) {
+				ret = -EINVAL;
+				pr_warn("%s, number of rgb stages invalid\n",
+						__func__);
+				goto argc_config_exit;
+			}
+			if (copy_from_user(&mdss_pp_res->gc_lut_r[disp_num][0],
+						config->r_data, r_size)) {
+				ret = -EFAULT;
+				goto argc_config_exit;
+			}
+			if (copy_from_user(&mdss_pp_res->gc_lut_g[disp_num][0],
+						config->g_data, g_size)) {
+				ret = -EFAULT;
+				goto argc_config_exit;
+			}
+			if (copy_from_user(&mdss_pp_res->gc_lut_b[disp_num][0],
+						config->b_data, b_size)) {
+				ret = -EFAULT;
+				goto argc_config_exit;
+			}
+
+			*pgc_ptr = *config;
+			pgc_ptr->r_data =
+				&mdss_pp_res->gc_lut_r[disp_num][0];
+			pgc_ptr->g_data =
+				&mdss_pp_res->gc_lut_g[disp_num][0];
+			pgc_ptr->b_data =
+				&mdss_pp_res->gc_lut_b[disp_num][0];
+		}
+		mdss_pp_res->pp_disp_flags[disp_num] |= dirty_flag;
+	}
+argc_config_exit:
+	mutex_unlock(&mdss_pp_mutex);
+	return ret;
+}
+int mdss_mdp_hist_lut_config(struct msm_fb_data_type *mfd,
+					struct mdp_hist_lut_data *config,
+					u32 *copyback)
+{
+	int i, ret = 0;
+	u32 disp_num, dspp_num = 0;
+	char __iomem *hist_addr = NULL, *base_addr = NULL;
+	struct mdp_pp_cache_res res_cache;
+
+	ret = pp_validate_dspp_mfd_block(mfd, PP_BLOCK(config->block));
+	if (ret) {
+		pr_err("Invalid block %d mfd index %d, ret %d\n",
+				PP_BLOCK(config->block),
+				(mfd ? mfd->index : -1), ret);
+		return ret;
+	}
+
+	mutex_lock(&mdss_pp_mutex);
+	disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
+
+	if (config->ops & MDP_PP_OPS_READ) {
+		ret = pp_get_dspp_num(disp_num, &dspp_num);
+		if (ret) {
+			pr_err("%s, no dspp connects to disp %d\n",
+				__func__, disp_num);
+			goto enhist_config_exit;
+		}
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+		base_addr = mdss_mdp_get_dspp_addr_off(dspp_num);
+		if (IS_ERR_OR_NULL(base_addr)) {
+			pr_err("invalid base addr %pK\n",
+				base_addr);
+			ret = -EINVAL;
+			goto hist_lut_clk_off;
+		}
+		hist_addr = base_addr + MDSS_MDP_REG_DSPP_HIST_LUT_BASE;
+		if (pp_ops[HIST_LUT].pp_get_config) {
+			ret = pp_ops[HIST_LUT].pp_get_config(base_addr, config,
+				DSPP, disp_num);
+			if (ret)
+				pr_err("hist_lut get config failed %d\n", ret);
+			goto hist_lut_clk_off;
+		}
+
+		for (i = 0; i < ENHIST_LUT_ENTRIES; i++)
+			mdss_pp_res->enhist_lut[disp_num][i] =
+				readl_relaxed(hist_addr);
+		if (copy_to_user(config->data,
+			&mdss_pp_res->enhist_lut[disp_num][0],
+			ENHIST_LUT_ENTRIES * sizeof(u32))) {
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+			ret = -EFAULT;
+			goto enhist_config_exit;
+		}
+		*copyback = 1;
+hist_lut_clk_off:
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	} else {
+		if (pp_ops[HIST_LUT].pp_set_config) {
+			res_cache.block = DSPP;
+			res_cache.mdss_pp_res = mdss_pp_res;
+			res_cache.pipe_res = NULL;
+			ret = pp_hist_lut_cache_params(config, &res_cache);
+			if (ret) {
+				pr_err("hist_lut config failed version %d ret %d\n",
+					config->version, ret);
+				ret = -EFAULT;
+				goto enhist_config_exit;
+			} else {
+				goto enhist_set_dirty;
+			}
+		}
+		if (copy_from_user(&mdss_pp_res->enhist_lut[disp_num][0],
+			config->data, ENHIST_LUT_ENTRIES * sizeof(u32))) {
+			ret = -EFAULT;
+			goto enhist_config_exit;
+		}
+		mdss_pp_res->enhist_disp_cfg[disp_num] = *config;
+		mdss_pp_res->enhist_disp_cfg[disp_num].data =
+			&mdss_pp_res->enhist_lut[disp_num][0];
+enhist_set_dirty:
+		mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_ENHIST;
+	}
+enhist_config_exit:
+	mutex_unlock(&mdss_pp_mutex);
+	return ret;
+}
+
+static int mdss_mdp_panel_default_dither_config(struct msm_fb_data_type *mfd,
+					u32 panel_bpp, bool enable)
+{
+	int ret = 0;
+	struct mdp_dither_cfg_data dither;
+	struct mdp_pp_feature_version dither_version = {
+		.pp_feature = DITHER,
+	};
+	struct mdp_dither_data_v1_7 dither_data;
+
+	if (!mdss_mdp_mfd_valid_dspp(mfd)) {
+		pr_debug("dither config not supported on display num %d\n",
+			mfd->index);
+		return 0;
+	}
+
+	dither.block = mfd->index + MDP_LOGICAL_BLOCK_DISP_0;
+	dither.flags = MDP_PP_OPS_DISABLE;
+
+	ret = mdss_mdp_pp_get_version(&dither_version);
+	if (ret) {
+		pr_err("failed to get default dither version, ret %d\n",
+				ret);
+		return ret;
+	}
+	dither.version = dither_version.version_info;
+	dither.cfg_payload = NULL;
+
+	if (enable) {
+		switch (panel_bpp) {
+		case 24:
+			dither.flags = MDP_PP_OPS_ENABLE | MDP_PP_OPS_WRITE;
+			switch (dither.version) {
+			case mdp_dither_v1_7:
+				dither_data.g_y_depth = 8;
+				dither_data.r_cr_depth = 8;
+				dither_data.b_cb_depth = 8;
+				/*
+				 * Use default dither table by setting len to 0
+				 */
+				dither_data.len = 0;
+				dither.cfg_payload = &dither_data;
+				break;
+			case mdp_pp_legacy:
+			default:
+				dither.g_y_depth = 8;
+				dither.r_cr_depth = 8;
+				dither.b_cb_depth = 8;
+				dither.cfg_payload = NULL;
+				break;
+			}
+			break;
+		case 18:
+			dither.flags = MDP_PP_OPS_ENABLE | MDP_PP_OPS_WRITE;
+			switch (dither.version) {
+			case mdp_dither_v1_7:
+				dither_data.g_y_depth = 6;
+				dither_data.r_cr_depth = 6;
+				dither_data.b_cb_depth = 6;
+				/*
+				 * Use default dither table by setting len to 0
+				 */
+				dither_data.len = 0;
+				dither.cfg_payload = &dither_data;
+				break;
+			case mdp_pp_legacy:
+			default:
+				dither.g_y_depth = 6;
+				dither.r_cr_depth = 6;
+				dither.b_cb_depth = 6;
+				dither.cfg_payload = NULL;
+				break;
+			}
+			break;
+		default:
+			dither.cfg_payload = NULL;
+			break;
+		}
+	}
+	ret = mdss_mdp_dither_config(mfd, &dither, NULL, true);
+	if (ret)
+		pr_err("dither config failed, ret %d\n", ret);
+
+	return ret;
+}
+
+int mdss_mdp_dither_config(struct msm_fb_data_type *mfd,
+					struct mdp_dither_cfg_data *config,
+					u32 *copyback,
+					int copy_from_kernel)
+{
+	u32 disp_num;
+	int ret = 0;
+
+	ret = pp_validate_dspp_mfd_block(mfd, config->block);
+	if (ret) {
+		pr_err("Invalid block %d mfd index %d, ret %d\n",
+				config->block,
+				(mfd ? mfd->index : -1), ret);
+		return ret;
+	}
+
+	if (config->flags & MDP_PP_OPS_READ) {
+		pr_err("Dither read is not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	if ((config->flags & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
+		pr_warn("Can't set both split bits\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&mdss_pp_mutex);
+	disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+	if (pp_ops[DITHER].pp_set_config) {
+		pr_debug("version of dither is %d\n", config->version);
+		ret = pp_dither_cache_params(config, mdss_pp_res,
+				copy_from_kernel);
+		if (ret) {
+			pr_err("dither config failed version %d ret %d\n",
+				config->version, ret);
+			goto dither_config_exit;
+		} else {
+			goto dither_set_dirty;
+		}
+	}
+
+	mdss_pp_res->dither_disp_cfg[disp_num] = *config;
+dither_set_dirty:
+	mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_DITHER;
+dither_config_exit:
+	mutex_unlock(&mdss_pp_mutex);
+	return ret;
+}
+
+static int pp_gm_has_invalid_lut_size(struct mdp_gamut_cfg_data *config)
+{
+	if (config->tbl_size[0] != GAMUT_T0_SIZE)
+		return -EINVAL;
+	if (config->tbl_size[1] != GAMUT_T1_SIZE)
+		return -EINVAL;
+	if (config->tbl_size[2] != GAMUT_T2_SIZE)
+		return -EINVAL;
+	if (config->tbl_size[3] != GAMUT_T3_SIZE)
+		return -EINVAL;
+	if (config->tbl_size[4] != GAMUT_T4_SIZE)
+		return -EINVAL;
+	if (config->tbl_size[5] != GAMUT_T5_SIZE)
+		return -EINVAL;
+	if (config->tbl_size[6] != GAMUT_T6_SIZE)
+		return -EINVAL;
+	if (config->tbl_size[7] != GAMUT_T7_SIZE)
+		return -EINVAL;
+	return 0;
+}
+
+
+int mdss_mdp_gamut_config(struct msm_fb_data_type *mfd,
+					struct mdp_gamut_cfg_data *config,
+					u32 *copyback)
+{
+	int i, j, ret = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 disp_num, dspp_num = 0;
+	uint16_t *tbl_off;
+	struct mdp_gamut_cfg_data local_cfg;
+	uint16_t *r_tbl[MDP_GAMUT_TABLE_NUM];
+	uint16_t *g_tbl[MDP_GAMUT_TABLE_NUM];
+	uint16_t *b_tbl[MDP_GAMUT_TABLE_NUM];
+	char __iomem *addr;
+	u32 data = (3 << 20);
+
+	ret = pp_validate_dspp_mfd_block(mfd, config->block);
+	if (ret) {
+		pr_err("Invalid block %d mfd index %d, ret %d\n",
+				config->block,
+				(mfd ? mfd->index : -1), ret);
+		return ret;
+	}
+
+	if ((config->flags & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
+		pr_warn("Can't set both split bits\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&mdss_pp_mutex);
+	disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+	if (config->flags & MDP_PP_OPS_READ) {
+		ret = pp_get_dspp_num(disp_num, &dspp_num);
+		if (ret) {
+			pr_err("%s, no dspp connects to disp %d\n",
+				__func__, disp_num);
+			goto gamut_config_exit;
+		}
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+		if (pp_ops[GAMUT].pp_get_config) {
+			addr = mdss_mdp_get_dspp_addr_off(disp_num);
+			if (IS_ERR_OR_NULL(addr)) {
+				pr_err("invalid dspp base addr %pK\n",
+				       addr);
+				ret = -EINVAL;
+				goto gamut_clk_off;
+			}
+			if (mdata->pp_block_off.dspp_gamut_off == U32_MAX) {
+				pr_err("invalid gamut parmas off %d\n",
+				       mdata->pp_block_off.dspp_gamut_off);
+				ret = -EINVAL;
+				goto gamut_clk_off;
+			}
+			addr += mdata->pp_block_off.dspp_gamut_off;
+			ret = pp_ops[GAMUT].pp_get_config(addr, config, DSPP,
+						  disp_num);
+			if (ret)
+				pr_err("gamut get config failed %d\n", ret);
+			goto gamut_clk_off;
+		}
+		if (pp_gm_has_invalid_lut_size(config)) {
+			pr_err("invalid lut size for gamut\n");
+			ret = -EINVAL;
+			goto gamut_clk_off;
+		}
+		addr = mdss_mdp_get_dspp_addr_off(dspp_num) +
+			  MDSS_MDP_REG_DSPP_GAMUT_BASE;
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			r_tbl[i] = kzalloc(
+				sizeof(uint16_t) * config->tbl_size[i],
+				GFP_KERNEL);
+			if (!r_tbl[i]) {
+				pr_err("%s: alloc failed\n", __func__);
+				ret = -ENOMEM;
+				goto gamut_clk_off;
+			}
+			/* Reset gamut LUT index to 0 */
+			writel_relaxed(data, addr);
+			for (j = 0; j < config->tbl_size[i]; j++)
+				r_tbl[i][j] = readl_relaxed(addr) & 0x1FFF;
+			addr += 4;
+			ret = copy_to_user(config->r_tbl[i], r_tbl[i],
+				     sizeof(uint16_t) * config->tbl_size[i]);
+			kfree(r_tbl[i]);
+			if (ret) {
+				pr_err("%s: copy tbl to usr failed\n",
+					__func__);
+				ret = -EFAULT;
+				goto gamut_clk_off;
+			}
+		}
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			g_tbl[i] = kzalloc(
+				sizeof(uint16_t) * config->tbl_size[i],
+				GFP_KERNEL);
+			if (!g_tbl[i]) {
+				pr_err("%s: alloc failed\n", __func__);
+				ret = -ENOMEM;
+				goto gamut_clk_off;
+			}
+			/* Reset gamut LUT index to 0 */
+			writel_relaxed(data, addr);
+			for (j = 0; j < config->tbl_size[i]; j++)
+				g_tbl[i][j] = readl_relaxed(addr) & 0x1FFF;
+			addr += 4;
+			ret = copy_to_user(config->g_tbl[i], g_tbl[i],
+				     sizeof(uint16_t) * config->tbl_size[i]);
+			kfree(g_tbl[i]);
+			if (ret) {
+				pr_err("%s: copy tbl to usr failed\n",
+					__func__);
+				ret = -EFAULT;
+				goto gamut_clk_off;
+			}
+		}
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			b_tbl[i] = kzalloc(
+				sizeof(uint16_t) * config->tbl_size[i],
+				GFP_KERNEL);
+			if (!b_tbl[i]) {
+				pr_err("%s: alloc failed\n", __func__);
+				ret = -ENOMEM;
+				goto gamut_clk_off;
+			}
+			/* Reset gamut LUT index to 0 */
+			writel_relaxed(data, addr);
+			for (j = 0; j < config->tbl_size[i]; j++)
+				b_tbl[i][j] = readl_relaxed(addr) & 0x1FFF;
+			addr += 4;
+			ret = copy_to_user(config->b_tbl[i], b_tbl[i],
+				     sizeof(uint16_t) * config->tbl_size[i]);
+			kfree(b_tbl[i]);
+			if (ret) {
+				pr_err("%s: copy tbl to usr failed\n",
+					__func__);
+				ret = -EFAULT;
+				goto gamut_clk_off;
+			}
+		}
+		*copyback = 1;
+gamut_clk_off:
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	} else {
+		if (pp_ops[GAMUT].pp_set_config) {
+			pr_debug("version of gamut is %d\n", config->version);
+			ret = pp_gamut_cache_params(config, mdss_pp_res);
+			if (ret) {
+				pr_err("gamut config failed version %d ret %d\n",
+					config->version, ret);
+				ret = -EFAULT;
+				goto gamut_config_exit;
+			} else {
+				goto gamut_set_dirty;
+			}
+		}
+		if (pp_gm_has_invalid_lut_size(config)) {
+			pr_err("invalid lut size for gamut\n");
+			ret = -EINVAL;
+			goto gamut_config_exit;
+		}
+		local_cfg = *config;
+		tbl_off = mdss_pp_res->gamut_tbl[disp_num];
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			local_cfg.r_tbl[i] = tbl_off;
+			if (copy_from_user(tbl_off, config->r_tbl[i],
+				config->tbl_size[i] * sizeof(uint16_t))) {
+				ret = -EFAULT;
+				goto gamut_config_exit;
+			}
+			tbl_off += local_cfg.tbl_size[i];
+		}
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			local_cfg.g_tbl[i] = tbl_off;
+			if (copy_from_user(tbl_off, config->g_tbl[i],
+				config->tbl_size[i] * sizeof(uint16_t))) {
+				ret = -EFAULT;
+				goto gamut_config_exit;
+			}
+			tbl_off += local_cfg.tbl_size[i];
+		}
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			local_cfg.b_tbl[i] = tbl_off;
+			if (copy_from_user(tbl_off, config->b_tbl[i],
+				config->tbl_size[i] * sizeof(uint16_t))) {
+				ret = -EFAULT;
+				goto gamut_config_exit;
+			}
+			tbl_off += local_cfg.tbl_size[i];
+		}
+		mdss_pp_res->gamut_disp_cfg[disp_num] = local_cfg;
+gamut_set_dirty:
+		mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_GAMUT;
+	}
+gamut_config_exit:
+	mutex_unlock(&mdss_pp_mutex);
+	return ret;
+}
+
+static u32 pp_hist_read(char __iomem *v_addr,
+				struct pp_hist_col_info *hist_info)
+{
+	int i, i_start;
+	u32 sum = 0;
+	u32 data;
+
+	data = readl_relaxed(v_addr);
+	i_start = data >> 24;
+	hist_info->data[i_start] = data & 0xFFFFFF;
+	sum += hist_info->data[i_start];
+	for (i = i_start + 1; i < HIST_V_SIZE; i++) {
+		hist_info->data[i] = readl_relaxed(v_addr) & 0xFFFFFF;
+		sum += hist_info->data[i];
+	}
+	for (i = 0; i < i_start; i++) {
+		hist_info->data[i] = readl_relaxed(v_addr) & 0xFFFFFF;
+		sum += hist_info->data[i];
+	}
+	hist_info->hist_cnt_read++;
+	return sum;
+}
+
+/* Assumes that relevant clocks are enabled */
+static int pp_hist_enable(struct pp_hist_col_info *hist_info,
+				struct mdp_histogram_start_req *req,
+				struct mdss_mdp_ctl *ctl)
+{
+	unsigned long flag;
+	int ret = 0;
+
+	mutex_lock(&hist_info->hist_mutex);
+	/* check if it is idle */
+	spin_lock_irqsave(&hist_info->hist_lock, flag);
+	if (hist_info->col_en) {
+		spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+		pr_err("%s Hist collection has already been enabled %pK\n",
+			__func__, hist_info->base);
+		ret = -EBUSY;
+		goto exit;
+	}
+	hist_info->col_state = HIST_IDLE;
+	hist_info->col_en = true;
+	hist_info->frame_cnt = req->frame_cnt;
+	hist_info->hist_cnt_read = 0;
+	hist_info->hist_cnt_sent = 0;
+	hist_info->hist_cnt_time = 0;
+	if (ctl && ctl->mfd) {
+		hist_info->ctl = ctl;
+		hist_info->disp_num =
+			ctl->mfd->index + MDP_LOGICAL_BLOCK_DISP_0;
+	}
+	/* if hist v2, make sure HW is unlocked */
+	writel_relaxed(0, hist_info->base);
+	spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+exit:
+	mutex_unlock(&hist_info->hist_mutex);
+	return ret;
+}
+
+#define MDSS_MAX_HIST_BIN_SIZE 16777215
+int mdss_mdp_hist_start(struct mdp_histogram_start_req *req)
+{
+	struct pp_hist_col_info *hist_info;
+	int i, ret = 0;
+	u32 disp_num, dspp_num = 0;
+	u32 mixer_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+	u32 frame_size, intr_mask = 0;
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_mdp_ctl *ctl;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	bool sspp_hist_supp = false;
+
+	if (!mdss_is_ready())
+		return -EPROBE_DEFER;
+
+	if (mdata->mdp_rev < MDSS_MDP_HW_REV_103) {
+		pr_err("Unsupported mdp rev %d\n", mdata->mdp_rev);
+		return -EOPNOTSUPP;
+	}
+
+	if (pp_driver_ops.is_sspp_hist_supp)
+		sspp_hist_supp =  pp_driver_ops.is_sspp_hist_supp();
+
+	if (!sspp_hist_supp &&
+		(PP_LOCAT(req->block) == MDSS_PP_SSPP_CFG)) {
+		pr_warn("No histogram on SSPP\n");
+		ret = -EINVAL;
+		goto hist_exit;
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	if (PP_LOCAT(req->block) == MDSS_PP_SSPP_CFG) {
+		i = MDSS_PP_ARG_MASK & req->block;
+		if (!i) {
+			ret = -EINVAL;
+			pr_warn("Must pass pipe arguments, %d\n", i);
+			goto hist_stop_clk;
+		}
+
+		for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
+			if (!PP_ARG(i, req->block))
+				continue;
+			pipe = __get_hist_pipe(i);
+			if (IS_ERR_OR_NULL(pipe))
+				continue;
+			hist_info = &pipe->pp_res.hist;
+			ret = pp_hist_enable(hist_info, req, NULL);
+			intr_mask = 1 << hist_info->intr_shift;
+			mdss_mdp_hist_intr_req(&mdata->hist_intr, intr_mask,
+					       true);
+			mdss_mdp_pipe_unmap(pipe);
+		}
+	} else if (PP_LOCAT(req->block) == MDSS_PP_DSPP_CFG) {
+		if ((PP_BLOCK(req->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+				(PP_BLOCK(req->block) >= MDP_BLOCK_MAX))
+			goto hist_stop_clk;
+
+		disp_num = PP_BLOCK(req->block) - MDP_LOGICAL_BLOCK_DISP_0;
+		mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
+
+		if (!mixer_cnt) {
+			pr_err("%s, no dspp connects to disp %d\n",
+					__func__, disp_num);
+			ret = -EPERM;
+			goto hist_stop_clk;
+		}
+		if (mixer_cnt > mdata->nmixers_intf) {
+			pr_err("%s, Too many dspp connects to disp %d\n",
+					__func__, mixer_cnt);
+			ret = -EPERM;
+			goto hist_stop_clk;
+		}
+
+		ctl = mdata->mixer_intf[mixer_id[0]].ctl;
+		frame_size = (ctl->width * ctl->height);
+
+		if (!frame_size ||
+			((MDSS_MAX_HIST_BIN_SIZE / frame_size) <
+			req->frame_cnt)) {
+			pr_err("%s, too many frames for given display size, %d\n",
+					__func__, req->frame_cnt);
+			ret = -EINVAL;
+			goto hist_stop_clk;
+		}
+
+		for (i = 0; i < mixer_cnt; i++) {
+			dspp_num = mixer_id[i];
+			if (dspp_num >= mdata->ndspp) {
+				ret = -EINVAL;
+				pr_warn("Invalid dspp num %d\n", dspp_num);
+				goto hist_stop_clk;
+			}
+			hist_info = &mdss_pp_res->dspp_hist[dspp_num];
+			ret = pp_hist_enable(hist_info, req, ctl);
+			if (ret) {
+				pr_err("failed to enable histogram dspp_num %d ret %d\n",
+				       dspp_num, ret);
+				goto hist_stop_clk;
+			}
+			intr_mask |= 1 << hist_info->intr_shift;
+			mdss_pp_res->pp_disp_flags[disp_num] |=
+							PP_FLAGS_DIRTY_HIST_COL;
+		}
+		mdss_mdp_hist_intr_req(&mdata->hist_intr, intr_mask,
+					   true);
+	}
+hist_stop_clk:
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+hist_exit:
+	return ret;
+}
+
+static int pp_hist_disable(struct pp_hist_col_info *hist_info)
+{
+	int ret = 0;
+	unsigned long flag;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 intr_mask = 1;
+
+	mutex_lock(&hist_info->hist_mutex);
+	spin_lock_irqsave(&hist_info->hist_lock, flag);
+	if (hist_info->col_en == false) {
+		spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+		pr_debug("Histogram already disabled (%pK)\n", hist_info->base);
+		ret = -EINVAL;
+		goto exit;
+	}
+	hist_info->col_en = false;
+	hist_info->col_state = HIST_UNKNOWN;
+	hist_info->disp_num = 0;
+	hist_info->ctl = NULL;
+	/* make sure HW is unlocked */
+	writel_relaxed(0, hist_info->base);
+	spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+	mdss_mdp_hist_intr_req(&mdata->hist_intr,
+				intr_mask << hist_info->intr_shift, false);
+	ret = 0;
+exit:
+	mutex_unlock(&hist_info->hist_mutex);
+	return ret;
+}
+
+int mdss_mdp_hist_stop(u32 block)
+{
+	int i, ret = 0;
+	u32 disp_num;
+	struct pp_hist_col_info *hist_info;
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mdata)
+		return -EPERM;
+
+	if (mdata->mdp_rev < MDSS_MDP_HW_REV_103) {
+		pr_err("Unsupported mdp rev %d\n", mdata->mdp_rev);
+		return -EOPNOTSUPP;
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	if (PP_LOCAT(block) == MDSS_PP_SSPP_CFG) {
+		i = MDSS_PP_ARG_MASK & block;
+		if (!i) {
+			pr_warn("Must pass pipe arguments, %d\n", i);
+			goto hist_stop_clk;
+		}
+
+		for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
+			if (!PP_ARG(i, block))
+				continue;
+			pipe = __get_hist_pipe(i);
+			if (IS_ERR_OR_NULL(pipe)) {
+				pr_warn("Invalid Hist pipe (%d)\n", i);
+				continue;
+			}
+			hist_info = &pipe->pp_res.hist;
+			ret = pp_hist_disable(hist_info);
+			mdss_mdp_pipe_unmap(pipe);
+			if (ret)
+				goto hist_stop_clk;
+		}
+	} else if (PP_LOCAT(block) == MDSS_PP_DSPP_CFG) {
+		if ((PP_BLOCK(block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+				(PP_BLOCK(block) >= MDP_BLOCK_MAX))
+			goto hist_stop_clk;
+
+		disp_num = PP_BLOCK(block);
+		for (i = 0; i < mdata->ndspp; i++) {
+			hist_info = &mdss_pp_res->dspp_hist[i];
+			if (disp_num != hist_info->disp_num)
+				continue;
+			ret = pp_hist_disable(hist_info);
+			if (ret)
+				goto hist_stop_clk;
+			mdss_pp_res->pp_disp_flags[i] |=
+							PP_FLAGS_DIRTY_HIST_COL;
+		}
+	}
+hist_stop_clk:
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	return ret;
+}
+
+/**
+ * mdss_mdp_hist_intr_req() - Request changes the histogram interrupts
+ * @intr: structure containting state of interrupt register
+ * @bits: the bits on interrupt register that should be changed
+ * @en: true if bits should be set, false if bits should be cleared
+ *
+ * Adds or removes the bits from the interrupt request.
+ *
+ * Does not store reference count for each bit. I.e. a bit with multiple
+ * enable requests can be disabled with a single disable request.
+ *
+ * Return: 0 if uneventful, errno on invalid input
+ */
+int mdss_mdp_hist_intr_req(struct mdss_intr *intr, u32 bits, bool en)
+{
+	unsigned long flag;
+	int ret = 0;
+
+	if (!intr) {
+		pr_err("NULL addr passed, %pK\n", intr);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&intr->lock, flag);
+	if (en)
+		intr->req |= bits;
+	else
+		intr->req &= ~bits;
+	spin_unlock_irqrestore(&intr->lock, flag);
+
+	mdss_mdp_hist_intr_setup(intr, MDSS_IRQ_REQ);
+
+	return ret;
+}
+
+
+#define MDSS_INTR_STATE_ACTIVE	1
+#define MDSS_INTR_STATE_NULL	0
+#define MDSS_INTR_STATE_SUSPEND	-1
+
+/**
+ * mdss_mdp_hist_intr_setup() - Manage intr and clk depending on requests.
+ * @intr: structure containting state of intr reg
+ * @state: MDSS_IRQ_SUSPEND if suspend is needed,
+ *         MDSS_IRQ_RESUME if resume is needed,
+ *         MDSS_IRQ_REQ if neither (i.e. requesting an interrupt)
+ *
+ * This function acts as a gatekeeper for the interrupt, making sure that the
+ * MDP clocks are enabled while the interrupts are enabled to prevent
+ * unclocked accesses.
+ *
+ * To reduce code repetition, 4 state transitions have been encoded here. Each
+ * transition updates the interrupt's state structure (mdss_intr) to reflect
+ * the which bits have been requested (intr->req), are currently enabled
+ * (intr->curr), as well as defines which interrupt bits need to be enabled or
+ * disabled ('en' and 'dis' respectively). The 4th state is not explicity
+ * coded in the if/else chain, but is for MDSS_IRQ_REQ's when the interrupt
+ * is in suspend, in which case, the only change required (intr->req being
+ * updated) has already occurred in the calling function.
+ *
+ * To control the clock, which can't be requested while holding the spinlock,
+ * the initial state is compared with the exit state to detect when the
+ * interrupt needs a clock.
+ *
+ * The clock requests surrounding the majority of this function serve to
+ * enable the register writes to change the interrupt register, as well as to
+ * prevent a race condition that could keep the clocks on (due to mdp_clk_cnt
+ * never being decremented below 0) when a enable/disable occurs but the
+ * disable requests the clocks disabled before the enable is able to request
+ * the clocks enabled.
+ *
+ * Return: 0 if uneventful, errno on repeated action or invalid input
+ */
+int mdss_mdp_hist_intr_setup(struct mdss_intr *intr, int type)
+{
+	unsigned long flag;
+	int ret = 0, req_clk = 0;
+	u32 en = 0, dis = 0;
+	u32 diff, init_curr;
+	int init_state;
+
+	if (!intr) {
+		WARN(1, "NULL intr pointer\n");
+		return -EINVAL;
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	spin_lock_irqsave(&intr->lock, flag);
+
+	init_state = intr->state;
+	init_curr = intr->curr;
+
+	if (type == MDSS_IRQ_RESUME) {
+		/* resume intrs */
+		if (intr->state == MDSS_INTR_STATE_ACTIVE) {
+			ret = -EPERM;
+			goto exit;
+		}
+		en = intr->req;
+		dis = 0;
+		intr->curr = intr->req;
+		intr->state = intr->curr ?
+				MDSS_INTR_STATE_ACTIVE : MDSS_INTR_STATE_NULL;
+	} else if (type == MDSS_IRQ_SUSPEND) {
+		/* suspend intrs */
+		if (intr->state == MDSS_INTR_STATE_SUSPEND) {
+			ret = -EPERM;
+			goto exit;
+		}
+		en = 0;
+		dis = intr->curr;
+		intr->curr = 0;
+		intr->state = MDSS_INTR_STATE_SUSPEND;
+	} else if (intr->state != MDSS_IRQ_SUSPEND &&
+			type == MDSS_IRQ_REQ) {
+		/* Not resuming/suspending or in suspend state */
+		diff = intr->req ^ intr->curr;
+		en = diff & ~intr->curr;
+		dis = diff & ~intr->req;
+		intr->curr = intr->req;
+		intr->state = intr->curr ?
+				MDSS_INTR_STATE_ACTIVE : MDSS_INTR_STATE_NULL;
+	}
+
+	if (en)
+		mdss_mdp_hist_irq_enable(en);
+	if (dis)
+		mdss_mdp_hist_irq_disable(dis);
+
+	if ((init_state != MDSS_INTR_STATE_ACTIVE) &&
+				(intr->state == MDSS_INTR_STATE_ACTIVE))
+		req_clk = 1;
+	else if ((init_state == MDSS_INTR_STATE_ACTIVE) &&
+				(intr->state != MDSS_INTR_STATE_ACTIVE))
+		req_clk = -1;
+
+exit:
+	spin_unlock_irqrestore(&intr->lock, flag);
+	if (req_clk < 0)
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	else if (req_clk > 0)
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	return ret;
+}
+
+static int pp_hist_collect(struct mdp_histogram_data *hist,
+				struct pp_hist_col_info *hist_info,
+				char __iomem *ctl_base, u32 expect_sum,
+				u32 block)
+{
+	int ret = 0;
+	u32 sum;
+	char __iomem *v_base = NULL;
+	unsigned long flag;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mdata)
+		return -EPERM;
+
+	mutex_lock(&hist_info->hist_mutex);
+	spin_lock_irqsave(&hist_info->hist_lock, flag);
+	if ((hist_info->col_en == 0) ||
+		(hist_info->col_state != HIST_READY)) {
+		pr_err("invalid params for histogram hist_info->col_en %d hist_info->col_state %d",
+			hist_info->col_en, hist_info->col_state);
+		ret = -ENODATA;
+		spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+		goto hist_collect_exit;
+	}
+	spin_unlock_irqrestore(&hist_info->hist_lock, flag);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	if (pp_ops[HIST].pp_get_config) {
+		sum = pp_ops[HIST].pp_get_config(ctl_base, hist_info,
+				block, 0);
+	} else {
+		if (block == DSPP)
+			v_base = ctl_base +
+				MDSS_MDP_REG_DSPP_HIST_DATA_BASE;
+		else if (block == SSPP_VIG)
+			v_base = ctl_base +
+				MDSS_MDP_REG_VIG_HIST_CTL_BASE;
+		sum = pp_hist_read(v_base, hist_info);
+	}
+	writel_relaxed(0, hist_info->base);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	if (sum < 0) {
+		pr_err("failed to get the hist data, sum = %d\n", sum);
+		ret = sum;
+	} else if (expect_sum && sum != expect_sum) {
+		pr_err("hist error: bin sum incorrect! (%d/%d)\n",
+			sum, expect_sum);
+		ret = -EINVAL;
+	}
+hist_collect_exit:
+	mutex_unlock(&hist_info->hist_mutex);
+	return ret;
+}
+
+int mdss_mdp_hist_collect(struct mdp_histogram_data *hist)
+{
+	int i, j, off, ret = 0, temp_ret = 0;
+	struct pp_hist_col_info *hist_info;
+	struct pp_hist_col_info *hists[MDSS_MDP_INTF_MAX_LAYERMIXER];
+	u32 dspp_num, disp_num;
+	char __iomem *ctl_base;
+	u32 hist_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+	u32 *hist_concat = NULL;
+	u32 *hist_data_addr;
+	u32 pipe_cnt = 0;
+	u32 pipe_num = MDSS_MDP_SSPP_VIG0;
+	u32 exp_sum = 0;
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	unsigned long flag;
+
+	if (mdata->mdp_rev < MDSS_MDP_HW_REV_103) {
+		pr_err("Unsupported mdp rev %d\n", mdata->mdp_rev);
+		return -EOPNOTSUPP;
+	}
+
+	if (PP_LOCAT(hist->block) == MDSS_PP_DSPP_CFG) {
+		if ((PP_BLOCK(hist->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+				(PP_BLOCK(hist->block) >= MDP_BLOCK_MAX))
+			return -EINVAL;
+
+		disp_num = PP_BLOCK(hist->block) - MDP_LOGICAL_BLOCK_DISP_0;
+		hist_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
+
+		if (!hist_cnt) {
+			pr_err("%s, no dspp connects to disp %d\n",
+					__func__, disp_num);
+			ret = -EPERM;
+			goto hist_collect_exit;
+		}
+		if (hist_cnt > mdata->nmixers_intf) {
+			pr_err("%s, Too many dspp connects to disp %d\n",
+					__func__, hist_cnt);
+			ret = -EPERM;
+			goto hist_collect_exit;
+		}
+
+		for (i = 0; i < hist_cnt; i++) {
+			dspp_num = mixer_id[i];
+			if (dspp_num >= mdata->ndspp) {
+				ret = -EINVAL;
+				pr_warn("Invalid dspp num %d\n", dspp_num);
+				goto hist_collect_exit;
+			}
+			hists[i] = &mdss_pp_res->dspp_hist[dspp_num];
+		}
+		for (i = 0; i < hist_cnt; i++) {
+			dspp_num = mixer_id[i];
+			ctl_base = mdss_mdp_get_dspp_addr_off(dspp_num);
+			exp_sum = (mdata->mixer_intf[dspp_num].width *
+					mdata->mixer_intf[dspp_num].height);
+			if (ret)
+				temp_ret = ret;
+			ret = pp_hist_collect(hist, hists[i], ctl_base,
+				exp_sum, DSPP);
+			if (ret)
+				pr_err("hist error: dspp[%d] collect %d\n",
+					dspp_num, ret);
+		}
+		/* state of dspp histogram blocks attached to logical display
+		 * should be changed atomically to idle. This will ensure that
+		 * histogram interrupt will see consistent states for all dspp's
+		 * attached to logical display.
+		 */
+		for (i = 0; i < hist_cnt; i++) {
+			if (!i)
+				spin_lock_irqsave(&hists[i]->hist_lock, flag);
+			else
+				spin_lock(&hists[i]->hist_lock);
+		}
+		for (i = 0; i < hist_cnt; i++)
+			hists[i]->col_state = HIST_IDLE;
+		for (i = hist_cnt - 1; i >= 0; i--) {
+			if (!i)
+				spin_unlock_irqrestore(&hists[i]->hist_lock,
+						       flag);
+			else
+				spin_unlock(&hists[i]->hist_lock);
+		}
+		if (ret || temp_ret) {
+			ret = ret ? ret : temp_ret;
+			goto hist_collect_exit;
+		}
+
+		if (hist->bin_cnt != HIST_V_SIZE) {
+			pr_err("User not expecting size %d output\n",
+							HIST_V_SIZE);
+			ret = -EINVAL;
+			goto hist_collect_exit;
+		}
+		if (hist_cnt > 1) {
+			for (i = 1; i < hist_cnt; i++) {
+				mutex_lock(&hists[i]->hist_mutex);
+				for (j = 0; j < HIST_V_SIZE; j++)
+					hists[0]->data[j] += hists[i]->data[j];
+				mutex_unlock(&hists[i]->hist_mutex);
+			}
+		}
+		hist_data_addr = hists[0]->data;
+
+		for (i = 0; i < hist_cnt; i++)
+			hists[i]->hist_cnt_sent++;
+
+	} else if (PP_LOCAT(hist->block) == MDSS_PP_SSPP_CFG) {
+
+		hist_cnt = MDSS_PP_ARG_MASK & hist->block;
+		if (!hist_cnt) {
+			pr_warn("Must pass pipe arguments, %d\n", hist_cnt);
+			goto hist_collect_exit;
+		}
+
+		/* Find the first pipe requested */
+		for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
+			if (PP_ARG(i, hist_cnt)) {
+				pipe_num = i;
+				break;
+			}
+		}
+
+		pipe = __get_hist_pipe(pipe_num);
+		if (IS_ERR_OR_NULL(pipe)) {
+			pr_warn("Invalid starting hist pipe, %d\n", pipe_num);
+			ret = -ENODEV;
+			goto hist_collect_exit;
+		}
+		hist_info  = &pipe->pp_res.hist;
+		mdss_mdp_pipe_unmap(pipe);
+		for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
+			if (!PP_ARG(i, hist->block))
+				continue;
+			pipe_cnt++;
+			pipe = __get_hist_pipe(i);
+			if (IS_ERR_OR_NULL(pipe)) {
+				pr_warn("Invalid Hist pipe (%d)\n", i);
+				continue;
+			}
+			hist_info = &pipe->pp_res.hist;
+			mdss_mdp_pipe_unmap(pipe);
+		}
+		for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
+			if (!PP_ARG(i, hist->block))
+				continue;
+			pipe_cnt++;
+			pipe = __get_hist_pipe(i);
+			if (IS_ERR_OR_NULL(pipe)) {
+				pr_warn("Invalid Hist pipe (%d)\n", i);
+				continue;
+			}
+			hist_info = &pipe->pp_res.hist;
+			ctl_base = pipe->base;
+			if (ret)
+				temp_ret = ret;
+			ret = pp_hist_collect(hist, hist_info, ctl_base,
+				exp_sum, SSPP_VIG);
+			if (ret)
+				pr_debug("hist error: pipe[%d] collect: %d\n",
+					pipe->num, ret);
+
+			mdss_mdp_pipe_unmap(pipe);
+		}
+		for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
+			if (!PP_ARG(i, hist->block))
+				continue;
+			pipe_cnt++;
+			pipe = __get_hist_pipe(i);
+			if (IS_ERR_OR_NULL(pipe)) {
+				pr_warn("Invalid Hist pipe (%d)\n", i);
+				continue;
+			}
+			hist_info = &pipe->pp_res.hist;
+			mdss_mdp_pipe_unmap(pipe);
+		}
+		if (ret || temp_ret) {
+			ret = ret ? ret : temp_ret;
+			goto hist_collect_exit;
+		}
+
+		if (pipe_cnt != 0 &&
+			(hist->bin_cnt != (HIST_V_SIZE * pipe_cnt))) {
+			pr_err("User not expecting size %d output\n",
+						pipe_cnt * HIST_V_SIZE);
+			ret = -EINVAL;
+			goto hist_collect_exit;
+		}
+		if (pipe_cnt > 1) {
+			hist_concat = kzalloc(HIST_V_SIZE * pipe_cnt *
+						sizeof(u32), GFP_KERNEL);
+			if (!hist_concat) {
+				ret = -ENOMEM;
+				goto hist_collect_exit;
+			}
+
+			for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
+				if (!PP_ARG(i, hist->block))
+					continue;
+				pipe = __get_hist_pipe(i);
+				if (IS_ERR_OR_NULL(pipe)) {
+					pr_warn("Invalid Hist pipe (%d)\n", i);
+					continue;
+				}
+				hist_info  = &pipe->pp_res.hist;
+				off = HIST_V_SIZE * i;
+				mutex_lock(&hist_info->hist_mutex);
+				for (j = off; j < off + HIST_V_SIZE; j++)
+					hist_concat[j] =
+						hist_info->data[j - off];
+				hist_info->hist_cnt_sent++;
+				mutex_unlock(&hist_info->hist_mutex);
+				mdss_mdp_pipe_unmap(pipe);
+			}
+
+			hist_data_addr = hist_concat;
+		} else {
+			hist_data_addr = hist_info->data;
+		}
+	} else {
+		pr_info("No Histogram at location %d\n", PP_LOCAT(hist->block));
+		goto hist_collect_exit;
+	}
+	ret = copy_to_user(hist->c0, hist_data_addr, sizeof(u32) *
+								hist->bin_cnt);
+hist_collect_exit:
+	kfree(hist_concat);
+
+	return ret;
+}
+
+static inline struct pp_hist_col_info *get_hist_info_from_isr(u32 *isr)
+{
+	u32 blk_idx;
+	struct pp_hist_col_info *hist_info = NULL;
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (*isr & HIST_INTR_DSPP_MASK) {
+		if (*isr & (MDSS_MDP_HIST_INTR_DSPP_0_DONE |
+				MDSS_MDP_HIST_INTR_DSPP_0_RESET_DONE)) {
+			blk_idx = 0;
+			*isr &= ~(MDSS_MDP_HIST_INTR_DSPP_0_DONE |
+				MDSS_MDP_HIST_INTR_DSPP_0_RESET_DONE);
+		} else if (*isr & (MDSS_MDP_HIST_INTR_DSPP_1_DONE |
+				MDSS_MDP_HIST_INTR_DSPP_1_RESET_DONE)) {
+			blk_idx = 1;
+			*isr &= ~(MDSS_MDP_HIST_INTR_DSPP_1_DONE |
+				MDSS_MDP_HIST_INTR_DSPP_1_RESET_DONE);
+		} else if (*isr & (MDSS_MDP_HIST_INTR_DSPP_2_DONE |
+				MDSS_MDP_HIST_INTR_DSPP_2_RESET_DONE)) {
+			blk_idx = 2;
+			*isr &= ~(MDSS_MDP_HIST_INTR_DSPP_2_DONE |
+				MDSS_MDP_HIST_INTR_DSPP_2_RESET_DONE);
+		} else {
+			blk_idx = 3;
+			*isr &= ~(MDSS_MDP_HIST_INTR_DSPP_3_DONE |
+				MDSS_MDP_HIST_INTR_DSPP_3_RESET_DONE);
+		}
+		hist_info = &mdss_pp_res->dspp_hist[blk_idx];
+	} else {
+		if (*isr & (MDSS_MDP_HIST_INTR_VIG_0_DONE |
+				MDSS_MDP_HIST_INTR_VIG_0_RESET_DONE)) {
+			blk_idx = MDSS_MDP_SSPP_VIG0;
+			*isr &= ~(MDSS_MDP_HIST_INTR_VIG_0_DONE |
+				MDSS_MDP_HIST_INTR_VIG_0_RESET_DONE);
+		} else if (*isr & (MDSS_MDP_HIST_INTR_VIG_1_DONE |
+				MDSS_MDP_HIST_INTR_VIG_1_RESET_DONE)) {
+			blk_idx = MDSS_MDP_SSPP_VIG1;
+			*isr &= ~(MDSS_MDP_HIST_INTR_VIG_1_DONE |
+				MDSS_MDP_HIST_INTR_VIG_1_RESET_DONE);
+		} else if (*isr & (MDSS_MDP_HIST_INTR_VIG_2_DONE |
+				MDSS_MDP_HIST_INTR_VIG_2_RESET_DONE)) {
+			blk_idx = MDSS_MDP_SSPP_VIG2;
+			*isr &= ~(MDSS_MDP_HIST_INTR_VIG_2_DONE |
+				MDSS_MDP_HIST_INTR_VIG_2_RESET_DONE);
+		} else {
+			blk_idx = MDSS_MDP_SSPP_VIG3;
+			*isr &= ~(MDSS_MDP_HIST_INTR_VIG_3_DONE |
+				MDSS_MDP_HIST_INTR_VIG_3_RESET_DONE);
+		}
+		pipe = mdss_mdp_pipe_search(mdata, BIT(blk_idx),
+				MDSS_MDP_PIPE_RECT0);
+		if (IS_ERR_OR_NULL(pipe)) {
+			pr_debug("pipe DNE, %d\n", blk_idx);
+			return NULL;
+		}
+		hist_info = &pipe->pp_res.hist;
+	}
+
+	return hist_info;
+}
+
+/**
+ * mdss_mdp_hist_intr_done - Handle histogram interrupts.
+ * @isr: incoming histogram interrupts as bit mask
+ *
+ * This function takes the histogram interrupts received by the
+ * MDP interrupt handler, and handles each of the interrupts by
+ * progressing the histogram state if necessary and then clearing
+ * the interrupt.
+ */
+void mdss_mdp_hist_intr_done(u32 isr)
+{
+	u32 isr_blk, is_hist_done, isr_tmp;
+	struct pp_hist_col_info *hist_info = NULL;
+	u32 isr_mask = HIST_V2_INTR_BIT_MASK;
+	u32 intr_mask = 1, disp_num = 0;
+
+	if (pp_driver_ops.get_hist_isr_info)
+		pp_driver_ops.get_hist_isr_info(&isr_mask);
+
+	isr &= isr_mask;
+	while (isr != 0) {
+		isr_tmp = isr;
+		hist_info = get_hist_info_from_isr(&isr);
+		if (hist_info == NULL) {
+			pr_err("hist interrupt gave incorrect blk_idx\n");
+			continue;
+		}
+		isr_blk = (isr_tmp >> hist_info->intr_shift) & 0x3;
+		is_hist_done = isr_blk & 0x1;
+		spin_lock(&hist_info->hist_lock);
+		if (hist_info && is_hist_done && hist_info->col_en &&
+			hist_info->col_state == HIST_IDLE) {
+			hist_info->col_state = HIST_READY;
+			disp_num = hist_info->disp_num;
+			/* Clear the interrupt until next commit */
+			mdss_mdp_hist_irq_clear_mask(intr_mask <<
+						hist_info->intr_shift);
+			writel_relaxed(1, hist_info->base);
+			spin_unlock(&hist_info->hist_lock);
+			mdss_mdp_hist_intr_notify(disp_num);
+		} else {
+			spin_unlock(&hist_info->hist_lock);
+		}
+	};
+}
+
+static struct msm_fb_data_type *mdss_get_mfd_from_index(int index)
+{
+	struct msm_fb_data_type *out = NULL;
+	struct mdss_mdp_ctl *ctl;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int i;
+
+	for (i = 0; i < mdata->nctl; i++) {
+		ctl = mdata->ctl_off + i;
+		if ((mdss_mdp_ctl_is_power_on(ctl)) && (ctl->mfd)
+				&& (ctl->mfd->index == index))
+			out = ctl->mfd;
+	}
+	return out;
+}
+
+static int pp_num_to_side(struct mdss_mdp_ctl *ctl, u32 num)
+{
+	u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+	u32 mixer_num;
+
+	if (!ctl || !ctl->mfd)
+		return -EINVAL;
+	mixer_num = mdss_mdp_get_ctl_mixers(ctl->mfd->index, mixer_id);
+	if (mixer_num < 2)
+		return MDSS_SIDE_NONE;
+	else if (mixer_id[1] == num)
+		return MDSS_SIDE_RIGHT;
+	else if (mixer_id[0] == num)
+		return MDSS_SIDE_LEFT;
+
+	pr_err("invalid, not on any side\n");
+	return -EINVAL;
+}
+
+static int mdss_mdp_get_ad(struct msm_fb_data_type *mfd,
+					struct mdss_ad_info **ret_ad)
+{
+	int ret = 0;
+	struct mdss_data_type *mdata;
+	struct mdss_mdp_ctl *ctl = NULL;
+
+	*ret_ad = NULL;
+	if (!mfd) {
+		pr_err("invalid parameter mfd %pK\n", mfd);
+		return -EINVAL;
+	}
+	mdata = mfd_to_mdata(mfd);
+
+	if (mdata->nad_cfgs == 0) {
+		pr_debug("Assertive Display not supported by device\n");
+		return -ENODEV;
+	}
+
+	if (!mdss_mdp_mfd_valid_ad(mfd)) {
+		pr_debug("AD not supported on display num %d hw config\n",
+			mfd->index);
+		return -EPERM;
+	}
+
+	if (mfd->panel_info->type == DTV_PANEL) {
+		pr_debug("AD not supported on external display\n");
+		return -EPERM;
+	}
+
+	ctl = mfd_to_ctl(mfd);
+	if ((ctl) && (ctl->mixer_left))
+		*ret_ad = &mdata->ad_cfgs[ctl->mixer_left->num];
+	else
+		ret = -EPERM;
+
+	return ret;
+}
+
+/* must call this function from within ad->lock */
+static int pp_ad_invalidate_input(struct msm_fb_data_type *mfd)
+{
+	int ret;
+	struct mdss_ad_info *ad;
+
+	if (!mfd) {
+		pr_err("Invalid mfd\n");
+		return -EINVAL;
+	}
+
+	ret = mdss_mdp_get_ad(mfd, &ad);
+	if (ret == -ENODEV || ret == -EPERM) {
+		pr_debug("AD not supported on device, disp num %d\n",
+			mfd->index);
+		return 0;
+	} else if (ret || !ad) {
+		pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
+			ret, ad);
+		return ret;
+	}
+	pr_debug("AD backlight level changed (%d), trigger update to AD\n",
+						mfd->ad_bl_level);
+	if (ad->cfg.mode == MDSS_AD_MODE_AUTO_BL) {
+		pr_err("AD auto backlight no longer supported.\n");
+		return -EINVAL;
+	}
+
+	if (ad->state & PP_AD_STATE_RUN) {
+		ad->calc_itr = ad->cfg.stab_itr;
+		ad->sts |= PP_AD_STS_DIRTY_VSYNC;
+		ad->sts |= PP_AD_STS_DIRTY_DATA;
+	}
+
+	return 0;
+}
+
+int mdss_mdp_ad_config(struct msm_fb_data_type *mfd,
+			struct mdss_ad_init_cfg *init_cfg)
+{
+	struct mdss_ad_info *ad;
+	struct msm_fb_data_type *bl_mfd;
+	int lin_ret = -1, inv_ret = -1, att_ret = -1, ret = 0;
+	u32 last_ops;
+	struct mdss_overlay_private *mdp5_data;
+
+	ret = mdss_mdp_get_ad(mfd, &ad);
+	if (ret == -ENODEV || ret == -EPERM) {
+		pr_err("AD not supported on device, disp num %d\n",
+			mfd->index);
+		return ret;
+	} else if (ret || !ad) {
+		pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
+			ret, ad);
+		return ret;
+	}
+	if (mfd->panel_info->type == WRITEBACK_PANEL) {
+		bl_mfd = mdss_get_mfd_from_index(0);
+		if (!bl_mfd)
+			return ret;
+	} else {
+		bl_mfd = mfd;
+	}
+
+	if ((init_cfg->ops & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
+		pr_warn("Can't set both split bits\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ad->lock);
+	if (init_cfg->ops & MDP_PP_AD_INIT) {
+		memcpy(&ad->init, &init_cfg->params.init,
+				sizeof(struct mdss_ad_init));
+		if (init_cfg->params.init.bl_lin_len == AD_BL_LIN_LEN) {
+			lin_ret = copy_from_user(&ad->bl_lin,
+				init_cfg->params.init.bl_lin,
+				init_cfg->params.init.bl_lin_len *
+				sizeof(uint32_t));
+			inv_ret = copy_from_user(&ad->bl_lin_inv,
+				init_cfg->params.init.bl_lin_inv,
+				init_cfg->params.init.bl_lin_len *
+				sizeof(uint32_t));
+			if (lin_ret || inv_ret)
+				ret = -ENOMEM;
+		} else {
+			ret = -EINVAL;
+		}
+		if (ret) {
+			ad->state &= ~PP_AD_STATE_BL_LIN;
+			goto ad_config_exit;
+		} else
+			ad->state |= PP_AD_STATE_BL_LIN;
+
+		if ((init_cfg->params.init.bl_att_len == AD_BL_ATT_LUT_LEN) &&
+			(init_cfg->params.init.bl_att_lut)) {
+			att_ret = copy_from_user(&ad->bl_att_lut,
+				init_cfg->params.init.bl_att_lut,
+				init_cfg->params.init.bl_att_len *
+				sizeof(uint32_t));
+			if (att_ret)
+				ret = -ENOMEM;
+		} else {
+			ret = -EINVAL;
+		}
+		if (ret) {
+			ad->state &= ~PP_AD_STATE_BL_LIN;
+			goto ad_config_exit;
+		} else
+			ad->state |= PP_AD_STATE_BL_LIN;
+
+		ad->sts |= PP_AD_STS_DIRTY_INIT;
+	} else if (init_cfg->ops & MDP_PP_AD_CFG) {
+		memcpy(&ad->cfg, &init_cfg->params.cfg,
+				sizeof(struct mdss_ad_cfg));
+		if (ad->state & PP_AD_STATE_IPC_RESUME)
+			ad->cfg.mode |= MDSS_AD_MODE_IPC_BIT;
+		ad->cfg.backlight_scale = MDSS_MDP_AD_BL_SCALE;
+		ad->sts |= PP_AD_STS_DIRTY_CFG;
+		mdp5_data = mfd_to_mdp5_data(mfd);
+		if (mdp5_data)
+			mdp5_data->ad_events = 0;
+	}
+
+	last_ops = ad->ops & MDSS_PP_SPLIT_MASK;
+	ad->ops = init_cfg->ops & MDSS_PP_SPLIT_MASK;
+	/*
+	 *  if there is a change in the split mode config, the init values
+	 *  need to be re-written to hardware (if they have already been
+	 *  written or if there is data pending to be written). Check for
+	 *  pending data (DIRTY_INIT) is not checked here since it will not
+	 *  affect the outcome of this conditional (i.e. if init hasn't
+	 *  already been written (*_STATE_INIT is set), this conditional will
+	 *  only evaluate to true (and set the DIRTY bit) if the DIRTY bit has
+	 *  already been set).
+	 */
+	if ((last_ops ^ ad->ops) && (ad->state & PP_AD_STATE_INIT))
+		ad->sts |= PP_AD_STS_DIRTY_INIT;
+
+
+	if (!ret && (init_cfg->ops & MDP_PP_OPS_DISABLE)) {
+		ad->sts &= ~PP_STS_ENABLE;
+		mutex_unlock(&ad->lock);
+		cancel_work_sync(&ad->calc_work);
+		mutex_lock(&ad->lock);
+		ad->mfd = NULL;
+		ad->bl_mfd = NULL;
+	} else if (!ret && (init_cfg->ops & MDP_PP_OPS_ENABLE)) {
+		ad->sts |= PP_STS_ENABLE;
+		ad->mfd = mfd;
+		ad->bl_mfd = bl_mfd;
+	}
+ad_config_exit:
+	mutex_unlock(&ad->lock);
+	return ret;
+}
+
+int mdss_mdp_ad_input(struct msm_fb_data_type *mfd,
+			struct mdss_ad_input *input, int wait) {
+	int ret = 0;
+	struct mdss_ad_info *ad;
+	u32 bl;
+	struct mdss_overlay_private *mdp5_data;
+
+	ret = mdss_mdp_get_ad(mfd, &ad);
+	if (ret == -ENODEV || ret == -EPERM) {
+		pr_err("AD not supported on device, disp num %d\n",
+			mfd->index);
+		return ret;
+	} else if (ret || !ad) {
+		pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
+			ret, ad);
+		return ret;
+	}
+
+	mutex_lock(&ad->lock);
+	if ((!PP_AD_STATE_IS_INITCFG(ad->state) &&
+			!PP_AD_STS_IS_DIRTY(ad->sts)) &&
+			(input->mode != MDSS_AD_MODE_CALIB)) {
+		pr_warn("AD not initialized or configured.\n");
+		ret = -EPERM;
+		goto error;
+	}
+	switch (input->mode) {
+	case MDSS_AD_MODE_AUTO_BL:
+	case MDSS_AD_MODE_AUTO_STR:
+		if (!MDSS_AD_MODE_DATA_MATCH(ad->cfg.mode,
+				MDSS_AD_INPUT_AMBIENT)) {
+			pr_err("Invalid mode %x\n", ad->cfg.mode);
+			ret = -EINVAL;
+			goto error;
+		}
+		if (input->in.amb_light > MDSS_MDP_MAX_AD_AL) {
+			pr_warn("invalid input ambient light\n");
+			ret = -EINVAL;
+			goto error;
+		}
+		ad->ad_data_mode = MDSS_AD_INPUT_AMBIENT;
+		pr_debug("ambient = %d\n", input->in.amb_light);
+		ad->ad_data = input->in.amb_light;
+		ad->calc_itr = ad->cfg.stab_itr;
+		ad->sts |= PP_AD_STS_DIRTY_VSYNC;
+		ad->sts |= PP_AD_STS_DIRTY_DATA;
+		mdp5_data = mfd_to_mdp5_data(mfd);
+		if (mdp5_data)
+			mdp5_data->ad_events = 0;
+		break;
+	case MDSS_AD_MODE_TARG_STR:
+	case MDSS_AD_MODE_MAN_STR:
+		if (!MDSS_AD_MODE_DATA_MATCH(ad->cfg.mode,
+				MDSS_AD_INPUT_STRENGTH)) {
+			pr_err("Invalid mode %x\n", ad->cfg.mode);
+			ret = -EINVAL;
+			goto error;
+		}
+		if (input->in.strength > MDSS_MDP_MAX_AD_STR) {
+			pr_warn("invalid input strength\n");
+			ret = -EINVAL;
+			goto error;
+		}
+		ad->ad_data_mode = MDSS_AD_INPUT_STRENGTH;
+		pr_debug("strength = %d\n", input->in.strength);
+		ad->ad_data = input->in.strength;
+		ad->calc_itr = ad->cfg.stab_itr;
+		ad->sts |= PP_AD_STS_DIRTY_VSYNC;
+		ad->sts |= PP_AD_STS_DIRTY_DATA;
+		break;
+	case MDSS_AD_MODE_CALIB:
+		wait = 0;
+		if (mfd->calib_mode) {
+			bl = input->in.calib_bl;
+			if (bl >= AD_BL_LIN_LEN) {
+				pr_warn("calib_bl 255 max!\n");
+				break;
+			}
+			mutex_unlock(&ad->lock);
+			mutex_lock(&mfd->bl_lock);
+			MDSS_BRIGHT_TO_BL(bl, bl, mfd->panel_info->bl_max,
+					mfd->panel_info->brightness_max);
+			mdss_fb_set_backlight(mfd, bl);
+			mutex_unlock(&mfd->bl_lock);
+			mutex_lock(&ad->lock);
+			mfd->calib_mode_bl = bl;
+		} else {
+			pr_warn("should be in calib mode\n");
+		}
+		break;
+	default:
+		pr_warn("invalid default %d\n", input->mode);
+		ret = -EINVAL;
+		goto error;
+	}
+error:
+	mutex_unlock(&ad->lock);
+	return ret;
+}
+
+static void pp_ad_input_write(struct mdss_mdp_ad *ad_hw,
+						struct mdss_ad_info *ad)
+{
+	char __iomem *base;
+
+	base = ad_hw->base;
+	switch (ad->cfg.mode) {
+	case MDSS_AD_MODE_AUTO_BL:
+		writel_relaxed(ad->ad_data, base + MDSS_MDP_REG_AD_AL);
+		break;
+	case MDSS_AD_MODE_AUTO_STR:
+		pr_debug("bl_data = %d, ad_data = %d\n", ad->bl_data,
+				ad->ad_data);
+		ad->last_ad_data = ad->ad_data;
+		ad->last_ad_data_valid = true;
+		writel_relaxed(ad->bl_data, base + MDSS_MDP_REG_AD_BL);
+		writel_relaxed(ad->ad_data, base + MDSS_MDP_REG_AD_AL);
+		break;
+	case MDSS_AD_MODE_TARG_STR:
+		writel_relaxed(ad->bl_data, base + MDSS_MDP_REG_AD_BL);
+		writel_relaxed(ad->ad_data, base + MDSS_MDP_REG_AD_TARG_STR);
+		break;
+	case MDSS_AD_MODE_MAN_STR:
+		writel_relaxed(ad->bl_data, base + MDSS_MDP_REG_AD_BL);
+		writel_relaxed(ad->ad_data, base + MDSS_MDP_REG_AD_STR_MAN);
+		break;
+	case MDSS_AD_MODE_MAN_IPC:
+		if (!ad->last_ad_data_valid) {
+			ad->last_ad_data = ad->ad_data;
+			ad->last_ad_data_valid = true;
+		}
+		pr_debug("bl_data = %d, last_ad_data = %d, last_str = %d\n",
+				ad->bl_data, ad->last_ad_data, ad->last_str);
+		writel_relaxed(ad->bl_data, base + MDSS_MDP_REG_AD_BL);
+		writel_relaxed(ad->last_ad_data, base + MDSS_MDP_REG_AD_AL);
+		writel_relaxed(ad->last_str, base + MDSS_MDP_REG_AD_STR_MAN);
+		break;
+	default:
+		pr_warn("Invalid mode! %d\n", ad->cfg.mode);
+		break;
+	}
+}
+
+#define MDSS_AD_MERGED_WIDTH 4
+static void pp_ad_init_write(struct mdss_mdp_ad *ad_hw, struct mdss_ad_info *ad,
+						struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_data_type *mdata = ctl->mdata;
+	u32 temp, cfg_buf_mode;
+	u32 frame_start, frame_end, procs_start, procs_end, tile_ctrl;
+	u32 num;
+	int side;
+	char __iomem *base;
+	bool is_calc, is_dual_pipe, split_mode;
+	u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+	u32 mixer_num;
+
+	mixer_num = mdss_mdp_get_ctl_mixers(ctl->mfd->index, mixer_id);
+	if (mixer_num > 1)
+		is_dual_pipe = true;
+	else
+		is_dual_pipe = false;
+
+	base = ad_hw->base;
+	is_calc = ad->calc_hw_num == ad_hw->num;
+	split_mode = !!(ad->ops & MDSS_PP_SPLIT_MASK);
+
+	writel_relaxed(ad->init.i_control[0] & 0x1F,
+				base + MDSS_MDP_REG_AD_CON_CTRL_0);
+	writel_relaxed(ad->init.i_control[1] << 8,
+				base + MDSS_MDP_REG_AD_CON_CTRL_1);
+
+	temp = ad->init.white_lvl << 16;
+	temp |= ad->init.black_lvl & 0xFFFF;
+	writel_relaxed(temp, base + MDSS_MDP_REG_AD_BW_LVL);
+
+	writel_relaxed(ad->init.var, base + MDSS_MDP_REG_AD_VAR);
+
+	writel_relaxed(ad->init.limit_ampl, base + MDSS_MDP_REG_AD_AMP_LIM);
+
+	writel_relaxed(ad->init.i_dither, base + MDSS_MDP_REG_AD_DITH);
+
+	temp = ad->init.slope_max << 8;
+	temp |= ad->init.slope_min & 0xFF;
+	writel_relaxed(temp, base + MDSS_MDP_REG_AD_SLOPE);
+
+	writel_relaxed(ad->init.dither_ctl, base + MDSS_MDP_REG_AD_DITH_CTRL);
+
+	writel_relaxed(ad->init.format, base + MDSS_MDP_REG_AD_CTRL_0);
+	writel_relaxed(ad->init.auto_size, base + MDSS_MDP_REG_AD_CTRL_1);
+
+	if (split_mode)
+		temp = mdata->mixer_intf[ad_hw->num].width << 16;
+	else
+		temp = ad->init.frame_w << 16;
+	temp |= ad->init.frame_h & 0xFFFF;
+	writel_relaxed(temp, base + MDSS_MDP_REG_AD_FRAME_SIZE);
+
+	temp = ad->init.logo_v << 8;
+	temp |= ad->init.logo_h & 0xFF;
+	writel_relaxed(temp, base + MDSS_MDP_REG_AD_LOGO_POS);
+
+	pp_ad_cfg_lut(base + MDSS_MDP_REG_AD_LUT_FI, ad->init.asym_lut);
+	pp_ad_cfg_lut(base + MDSS_MDP_REG_AD_LUT_CC, ad->init.color_corr_lut);
+
+	if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103) {
+		if (is_dual_pipe && !split_mode) {
+			num = ad_hw->num;
+			side = pp_num_to_side(ctl, num);
+			tile_ctrl = 0x5;
+			if ((ad->calc_hw_num + 1) == num)
+				tile_ctrl |= 0x10;
+
+			if (side <= MDSS_SIDE_NONE) {
+				WARN(1, "error finding sides, %d\n", side);
+				frame_start = 0;
+				procs_start = frame_start;
+				frame_end = 0;
+				procs_end = frame_end;
+			} else if (side == MDSS_SIDE_LEFT) {
+				frame_start = 0;
+				procs_start = 0;
+				frame_end = mdata->mixer_intf[num].width +
+							MDSS_AD_MERGED_WIDTH;
+				procs_end = mdata->mixer_intf[num].width;
+			} else {
+				procs_start = ad->init.frame_w -
+					(mdata->mixer_intf[num].width);
+				procs_end = ad->init.frame_w;
+				frame_start = procs_start -
+							MDSS_AD_MERGED_WIDTH;
+				frame_end = procs_end;
+			}
+			procs_end -= 1;
+			frame_end -= 1;
+			cfg_buf_mode = 0x3;
+		} else {
+			frame_start = 0x0;
+			frame_end = 0xFFFF;
+			procs_start = 0x0;
+			procs_end = 0xFFFF;
+			tile_ctrl = 0x0;
+			cfg_buf_mode = 0x2;
+		}
+
+		writel_relaxed(frame_start, base + MDSS_MDP_REG_AD_FRAME_START);
+		writel_relaxed(frame_end, base + MDSS_MDP_REG_AD_FRAME_END);
+		writel_relaxed(procs_start, base + MDSS_MDP_REG_AD_PROCS_START);
+		writel_relaxed(procs_end, base + MDSS_MDP_REG_AD_PROCS_END);
+		writel_relaxed(tile_ctrl, base + MDSS_MDP_REG_AD_TILE_CTRL);
+		writel_relaxed(cfg_buf_mode, base + MDSS_MDP_REG_AD_CFG_BUF);
+	}
+}
+
+#define MDSS_PP_AD_DEF_CALIB 0x6E
+static void pp_ad_cfg_write(struct mdss_mdp_ad *ad_hw, struct mdss_ad_info *ad)
+{
+	char __iomem *base;
+	u32 temp, temp_calib = MDSS_PP_AD_DEF_CALIB;
+
+	base = ad_hw->base;
+	switch (ad->cfg.mode) {
+	case MDSS_AD_MODE_AUTO_BL:
+		temp = ad->cfg.backlight_max << 16;
+		temp |= ad->cfg.backlight_min & 0xFFFF;
+		writel_relaxed(temp, base + MDSS_MDP_REG_AD_BL_MINMAX);
+		writel_relaxed(ad->cfg.amb_light_min,
+				base + MDSS_MDP_REG_AD_AL_MIN);
+		temp = ad->cfg.filter[1] << 16;
+		temp |= ad->cfg.filter[0] & 0xFFFF;
+		writel_relaxed(temp, base + MDSS_MDP_REG_AD_AL_FILT);
+		/* fall-through */
+	case MDSS_AD_MODE_AUTO_STR:
+		memcpy(ad->last_calib, ad->cfg.calib, sizeof(ad->last_calib));
+		ad->last_calib_valid = true;
+		pp_ad_cfg_lut(base + MDSS_MDP_REG_AD_LUT_AL,
+				ad->cfg.al_calib_lut);
+		writel_relaxed(ad->cfg.strength_limit,
+				base + MDSS_MDP_REG_AD_STR_LIM);
+		temp = ad->cfg.calib[3] << 16;
+		temp |= ad->cfg.calib[2] & 0xFFFF;
+		writel_relaxed(temp, base + MDSS_MDP_REG_AD_CALIB_CD);
+		writel_relaxed(ad->cfg.t_filter_recursion,
+				base + MDSS_MDP_REG_AD_TFILT_CTRL);
+		temp_calib = ad->cfg.calib[0] & 0xFFFF;
+		/* fall-through */
+	case MDSS_AD_MODE_TARG_STR:
+		temp = ad->cfg.calib[1] << 16;
+		temp |= temp_calib;
+		writel_relaxed(temp, base + MDSS_MDP_REG_AD_CALIB_AB);
+		/* fall-through */
+	case MDSS_AD_MODE_MAN_STR:
+		writel_relaxed(ad->cfg.backlight_scale,
+				base + MDSS_MDP_REG_AD_BL_MAX);
+		writel_relaxed(ad->cfg.mode | MDSS_AD_AUTO_TRIGGER,
+				base + MDSS_MDP_REG_AD_MODE_SEL);
+		pr_debug("stab_itr = %d\n", ad->cfg.stab_itr);
+		break;
+	case MDSS_AD_MODE_MAN_IPC:
+		if (!ad->last_calib_valid) {
+			memcpy(ad->last_calib, ad->cfg.calib,
+				sizeof(ad->last_calib));
+			ad->last_calib_valid = true;
+		}
+		writel_relaxed(MDSS_AD_T_FILTER_CTRL_0,
+				base + MDSS_MDP_REG_AD_TFILT_CTRL);
+		pp_ad_cfg_lut(base + MDSS_MDP_REG_AD_LUT_AL,
+				ad->cfg.al_calib_lut);
+		writel_relaxed(ad->cfg.strength_limit,
+				base + MDSS_MDP_REG_AD_STR_LIM);
+		temp = ad->last_calib[3] << 16;
+		temp |= ad->last_calib[2] & 0xFFFF;
+		writel_relaxed(temp, base + MDSS_MDP_REG_AD_CALIB_CD);
+		temp_calib = ad->last_calib[0] & 0xFFFF;
+		temp = ad->last_calib[1] << 16;
+		temp |= temp_calib;
+		writel_relaxed(temp, base + MDSS_MDP_REG_AD_CALIB_AB);
+		writel_relaxed(ad->cfg.backlight_scale,
+				base + MDSS_MDP_REG_AD_BL_MAX);
+		writel_relaxed(ad->cfg.mode | MDSS_AD_AUTO_TRIGGER,
+				base + MDSS_MDP_REG_AD_MODE_SEL);
+		pr_debug("stab_itr = %d\n", ad->cfg.stab_itr);
+		break;
+	default:
+		break;
+	}
+}
+
+static void pp_ad_vsync_handler(struct mdss_mdp_ctl *ctl, ktime_t t)
+{
+	struct mdss_data_type *mdata = ctl->mdata;
+	struct mdss_ad_info *ad;
+
+	if (ctl->mixer_left && ctl->mixer_left->num < mdata->nad_cfgs) {
+		ad = &mdata->ad_cfgs[ctl->mixer_left->num];
+		queue_work(mdata->ad_calc_wq, &ad->calc_work);
+	}
+}
+
+#define MDSS_PP_AD_BYPASS_DEF 0x101
+static void pp_ad_bypass_config(struct mdss_ad_info *ad,
+				struct mdss_mdp_ctl *ctl, u32 num, u32 *opmode)
+{
+	int side = pp_num_to_side(ctl, num);
+
+	if (pp_sts_is_enabled(ad->reg_sts | (ad->ops & MDSS_PP_SPLIT_MASK),
+								side)) {
+		*opmode = 0;
+	} else {
+		*opmode = MDSS_PP_AD_BYPASS_DEF;
+	}
+}
+
+static int pp_ad_setup_hw_nums(struct msm_fb_data_type *mfd,
+						struct mdss_ad_info *ad)
+{
+	u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+	u32 mixer_num;
+
+	mixer_num = mdss_mdp_get_ctl_mixers(mfd->index, mixer_id);
+	if (!mixer_num)
+		return -EINVAL;
+
+	/* default to left mixer */
+	ad->calc_hw_num = mixer_id[0];
+	if ((mixer_num > 1) && (ad->ops & MDSS_PP_SPLIT_RIGHT_ONLY))
+		ad->calc_hw_num = mixer_id[1];
+	return 0;
+}
+
+static int mdss_mdp_ad_ipc_reset(struct msm_fb_data_type *mfd)
+{
+	int ret = 0;
+	struct mdss_ad_info *ad;
+
+	if (!mfd) {
+		pr_err("mfd = 0x%pK\n", mfd);
+		return -EINVAL;
+	}
+
+	ret = mdss_mdp_get_ad(mfd, &ad);
+	if (ret == -ENODEV || ret == -EPERM) {
+		pr_debug("AD not supported on device, disp num %d\n",
+			mfd->index);
+		return 0;
+	} else if (ret || !ad) {
+		pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
+			ret, ad);
+		return ret;
+	}
+
+	mutex_lock(&ad->lock);
+	if (ad->state & PP_AD_STATE_RUN && ad->state & PP_AD_STATE_IPC_RESET)
+		ad->state &= ~PP_AD_STATE_IPC_RESET;
+	mutex_unlock(&ad->lock);
+
+	return 0;
+}
+
+static int mdss_mdp_ad_setup(struct msm_fb_data_type *mfd)
+{
+	int ret = 0;
+	struct mdss_ad_info *ad;
+	struct mdss_mdp_ctl *ctl, *sctl;
+	struct msm_fb_data_type *bl_mfd;
+	struct mdss_data_type *mdata;
+	u32 bypass = MDSS_PP_AD_BYPASS_DEF, bl;
+	u32 width;
+
+	if (!mfd) {
+		pr_err("mfd = 0x%pK\n", mfd);
+		return -EINVAL;
+	}
+
+	ctl = mfd_to_ctl(mfd);
+	if (!ctl) {
+		pr_err("ctl = 0x%pK\n", ctl);
+		return -EINVAL;
+	}
+	sctl = mdss_mdp_get_split_ctl(ctl);
+
+	ret = mdss_mdp_get_ad(mfd, &ad);
+	if (ret == -ENODEV || ret == -EPERM) {
+		pr_debug("AD not supported on device, disp num %d\n",
+			mfd->index);
+		return 0;
+	} else if (ret || !ad) {
+		pr_err("Failed to get ad info: ret = %d, ad = 0x%pK\n",
+			ret, ad);
+		return ret;
+	}
+	if (mfd->panel_info->type == WRITEBACK_PANEL) {
+		bl_mfd = mdss_get_mfd_from_index(0);
+		if (!bl_mfd) {
+			ret = -EINVAL;
+			pr_warn("failed to get primary FB bl handle, err = %d\n",
+									ret);
+			goto exit;
+		}
+	} else {
+		bl_mfd = mfd;
+	}
+
+	mdata = mdss_mdp_get_mdata();
+
+	mutex_lock(&ad->lock);
+	if (ad->state & PP_AD_STATE_RUN && ad->state & PP_AD_STATE_IPC_RESUME) {
+		if (ad->ipc_frame_count == MDSS_AD_IPC_FRAME_COUNT) {
+			ad->state &= ~PP_AD_STATE_IPC_RESUME;
+			ad->state |= PP_AD_STATE_IPC_RESET;
+			ad->cfg.mode &= ~MDSS_AD_MODE_IPC_BIT;
+			if (ad->last_ad_data != ad->ad_data)
+				ad->sts |= PP_AD_STS_DIRTY_DATA;
+			if (memcmp(ad->last_calib, ad->cfg.calib,
+				sizeof(ad->last_calib)))
+				ad->sts |= PP_AD_STS_DIRTY_CFG;
+			pr_debug("switch mode to %d, last_ad_data = %d\n",
+				 ad->cfg.mode, ad->last_ad_data);
+		} else {
+			ad->ipc_frame_count++;
+		}
+	}
+
+	if (ad->sts != last_sts || ad->state != last_state) {
+		last_sts = ad->sts;
+		last_state = ad->state;
+		pr_debug("beginning: ad->sts = 0x%08x, state = 0x%08x\n",
+							ad->sts, ad->state);
+	}
+
+	if (ad->sts & PP_AD_STS_DIRTY_DATA) {
+		ad->sts &= ~PP_AD_STS_DIRTY_DATA;
+		ad->state |= PP_AD_STATE_DATA;
+		pr_debug("dirty data, last_bl = %d\n", ad->last_bl);
+		if (!bl_mfd->ad_bl_level)
+			bl_mfd->ad_bl_level = bl_mfd->bl_level;
+		bl = bl_mfd->ad_bl_level;
+
+		if (ad->last_bl != bl) {
+			ad->last_bl = bl;
+			linear_map(bl, &ad->bl_data,
+				bl_mfd->panel_info->bl_max,
+				MDSS_MDP_AD_BL_SCALE);
+		}
+		if (!(ad->state & PP_AD_STATE_IPC_RESUME)) {
+			ad->calc_itr = ad->cfg.stab_itr;
+			ad->sts |= PP_AD_STS_DIRTY_VSYNC;
+		}
+		ad->reg_sts |= PP_AD_STS_DIRTY_DATA;
+	}
+
+	if (ad->sts & PP_AD_STS_DIRTY_CFG) {
+		ad->sts &= ~PP_AD_STS_DIRTY_CFG;
+		ad->state |= PP_AD_STATE_CFG;
+
+		ad->reg_sts |= PP_AD_STS_DIRTY_CFG;
+	}
+	if (ad->sts & PP_AD_STS_DIRTY_INIT) {
+		ad->sts &= ~PP_AD_STS_DIRTY_INIT;
+		if (pp_ad_setup_hw_nums(mfd, ad)) {
+			pr_warn("failed to setup ad master\n");
+			ad->calc_hw_num = PP_AD_BAD_HW_NUM;
+		} else {
+			ad->state |= PP_AD_STATE_INIT;
+			ad->reg_sts |= PP_AD_STS_DIRTY_INIT;
+		}
+	}
+
+	width = ctl->width;
+	if (sctl)
+		width += sctl->width;
+
+	/* update ad screen size if it has changed since last configuration */
+	if ((ad->init.frame_w != width) ||
+			(ad->init.frame_h != ctl->height)) {
+		pr_debug("changing from %dx%d to %dx%d\n", ad->init.frame_w,
+							ad->init.frame_h,
+							width,
+							ctl->height);
+		ad->init.frame_w = width;
+		ad->init.frame_h = ctl->height;
+		ad->reg_sts |= PP_AD_STS_DIRTY_INIT;
+	}
+
+	if ((ad->sts & PP_STS_ENABLE) && PP_AD_STATE_IS_READY(ad->state)) {
+		bypass = 0;
+		ad->reg_sts |= PP_AD_STS_DIRTY_ENABLE;
+		ad->state |= PP_AD_STATE_RUN;
+		if (bl_mfd != mfd)
+			bl_mfd->ext_ad_ctrl = mfd->index;
+		bl_mfd->ext_bl_ctrl = ad->cfg.bl_ctrl_mode;
+	} else {
+		if (ad->state & PP_AD_STATE_RUN) {
+			ad->reg_sts = PP_AD_STS_DIRTY_ENABLE;
+			/* Clear state and regs when going to off state*/
+			ad->sts = 0;
+			ad->sts |= PP_AD_STS_DIRTY_VSYNC;
+			ad->state &= ~PP_AD_STATE_INIT;
+			ad->state &= ~PP_AD_STATE_CFG;
+			ad->state &= ~PP_AD_STATE_DATA;
+			ad->state &= ~PP_AD_STATE_BL_LIN;
+			ad->state &= ~PP_AD_STATE_IPC_RESUME;
+			ad->state &= ~PP_AD_STATE_IPC_RESET;
+			ad->ad_data = 0;
+			ad->ad_data_mode = 0;
+			ad->last_bl = 0;
+			ad->last_ad_data = 0;
+			ad->last_calib_valid = false;
+			ad->last_ad_data_valid = false;
+			ad->ipc_frame_count = 0;
+			ad->calc_itr = 0;
+			ad->calc_hw_num = PP_AD_BAD_HW_NUM;
+			memset(&ad->last_calib, 0, sizeof(ad->last_calib));
+			memset(&ad->bl_lin, 0, sizeof(uint32_t) *
+								AD_BL_LIN_LEN);
+			memset(&ad->bl_lin_inv, 0, sizeof(uint32_t) *
+								AD_BL_LIN_LEN);
+			memset(&ad->bl_att_lut, 0, sizeof(uint32_t) *
+				AD_BL_ATT_LUT_LEN);
+			memset(&ad->init, 0, sizeof(struct mdss_ad_init));
+			memset(&ad->cfg, 0, sizeof(struct mdss_ad_cfg));
+			bl_mfd->ext_bl_ctrl = 0;
+			bl_mfd->ext_ad_ctrl = -1;
+			bl_mfd->ad_bl_level = 0;
+		}
+		ad->state &= ~PP_AD_STATE_RUN;
+	}
+	if (!bypass)
+		ad->reg_sts |= PP_STS_ENABLE;
+	else
+		ad->reg_sts &= ~PP_STS_ENABLE;
+
+	if (PP_AD_STS_DIRTY_VSYNC & ad->sts) {
+		pr_debug("dirty vsync, calc_itr = %d\n", ad->calc_itr);
+		ad->sts &= ~PP_AD_STS_DIRTY_VSYNC;
+		if (!(PP_AD_STATE_VSYNC & ad->state) && ad->calc_itr &&
+					(ad->state & PP_AD_STATE_RUN)) {
+			ctl->ops.add_vsync_handler(ctl, &ad->handle);
+			ad->state |= PP_AD_STATE_VSYNC;
+		} else if ((PP_AD_STATE_VSYNC & ad->state) &&
+			(!ad->calc_itr || !(PP_AD_STATE_RUN & ad->state))) {
+			ctl->ops.remove_vsync_handler(ctl, &ad->handle);
+			ad->state &= ~PP_AD_STATE_VSYNC;
+		}
+	}
+
+	if (ad->sts != last_sts || ad->state != last_state) {
+		last_sts = ad->sts;
+		last_state = ad->state;
+		pr_debug("end: ad->sts = 0x%08x, state = 0x%08x\n", ad->sts,
+								ad->state);
+	}
+	mutex_unlock(&ad->lock);
+exit:
+	return ret;
+}
+
+#define MDSS_PP_AD_SLEEP 10
+static void pp_ad_calc_worker(struct work_struct *work)
+{
+	struct mdss_ad_info *ad;
+	struct mdss_mdp_ctl *ctl;
+	struct mdss_overlay_private *mdp5_data;
+	struct mdss_data_type *mdata;
+	char __iomem *base;
+
+	ad = container_of(work, struct mdss_ad_info, calc_work);
+
+	mutex_lock(&ad->lock);
+	if (!ad->mfd || !(ad->sts & PP_STS_ENABLE)) {
+		mutex_unlock(&ad->lock);
+		return;
+	}
+	mdp5_data = mfd_to_mdp5_data(ad->mfd);
+	if (!mdp5_data) {
+		pr_err("mdp5_data = 0x%pK\n", mdp5_data);
+		mutex_unlock(&ad->lock);
+		return;
+	}
+
+	ctl = mfd_to_ctl(ad->mfd);
+	mdata = mfd_to_mdata(ad->mfd);
+	if (!ctl || !mdata || ad->calc_hw_num >= mdata->nad_cfgs) {
+		pr_err("ctl = 0x%pK, mdata = 0x%pK, ad->calc_hw_num = %d, mdata->nad_cfg = %d\n",
+			ctl, mdata, ad->calc_hw_num,
+			(!mdata ? 0 : mdata->nad_cfgs));
+		mutex_unlock(&ad->lock);
+		return;
+	}
+
+	base = mdata->ad_off[ad->calc_hw_num].base;
+
+	if ((ad->cfg.mode == MDSS_AD_MODE_AUTO_STR) && (ad->last_bl == 0)) {
+		mutex_unlock(&ad->lock);
+		return;
+	}
+	if ((PP_AD_STATE_RUN & ad->state) && ad->calc_itr > 0)
+		ad->calc_itr--;
+
+	mdp5_data->ad_events++;
+	sysfs_notify_dirent(mdp5_data->ad_event_sd);
+	if (!ad->calc_itr) {
+		ad->state &= ~PP_AD_STATE_VSYNC;
+		ctl->ops.remove_vsync_handler(ctl, &ad->handle);
+	}
+	mutex_unlock(&ad->lock);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	ad->last_str = 0xFF & readl_relaxed(base + MDSS_MDP_REG_AD_STR_OUT);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	pr_debug("itr number %d str %d\n", ad->calc_itr, ad->last_str);
+}
+
+#define PP_AD_LUT_LEN 33
+static void pp_ad_cfg_lut(char __iomem *addr, u32 *data)
+{
+	int i;
+	u32 temp;
+
+	for (i = 0; i < PP_AD_LUT_LEN - 1; i += 2) {
+		temp = data[i+1] << 16;
+		temp |= (data[i] & 0xFFFF);
+		writel_relaxed(temp, addr + (i*2));
+	}
+	writel_relaxed(data[PP_AD_LUT_LEN - 1] << 16,
+			addr + ((PP_AD_LUT_LEN - 1) * 2));
+}
+
+/* must call this function from within ad->lock */
+static int pp_ad_attenuate_bl(struct mdss_ad_info *ad, u32 bl, u32 *bl_out)
+{
+	u32 shift = 0, ratio_temp = 0;
+	u32 n, lut_interval, bl_att;
+
+	if (bl < 0 || ad->init.alpha < 0) {
+		pr_err("Invalid input: backlight = %d, alpha = %d\n", bl,
+			ad->init.alpha);
+		return -EINVAL;
+	}
+
+	if (ad->init.alpha == 0) {
+		pr_debug("alpha = %d, hence no attenuation needed\n",
+			ad->init.alpha);
+		return 0;
+	}
+	pr_debug("bl_in = %d\n", bl);
+	/* map panel backlight range to AD backlight range */
+	linear_map(bl, &bl, ad->bl_mfd->panel_info->bl_max,
+		MDSS_MDP_AD_BL_SCALE);
+
+	pr_debug("Before attenuation = %d\n", bl);
+	ratio_temp = MDSS_MDP_AD_BL_SCALE / (AD_BL_ATT_LUT_LEN - 1);
+	while (ratio_temp > 0) {
+		ratio_temp = ratio_temp >> 1;
+		shift++;
+	}
+	n = bl >> shift;
+	if (n >= (AD_BL_ATT_LUT_LEN - 1)) {
+		pr_err("Invalid index for BL attenuation: %d.\n", n);
+		return -EINVAL;
+	}
+	lut_interval = (MDSS_MDP_AD_BL_SCALE + 1) / (AD_BL_ATT_LUT_LEN - 1);
+	bl_att = ((ad->bl_att_lut[n + 1] - ad->bl_att_lut[n]) *
+		(bl - lut_interval * n) + (ad->bl_att_lut[n] * lut_interval)) /
+		lut_interval;
+	pr_debug("n = %u, bl_att_lut[%u] = %u, bl_att_lut[%u] = %u, bl_att = %u\n",
+		n, n, ad->bl_att_lut[n], n + 1, ad->bl_att_lut[n + 1], bl_att);
+	*bl_out = (ad->init.alpha * bl_att +
+		(ad->init.alpha_base - ad->init.alpha) * bl) /
+		ad->init.alpha_base;
+
+	pr_debug("After attenuation = %d\n", *bl_out);
+	/* map AD backlight range back to panel backlight range */
+	linear_map(*bl_out, bl_out, MDSS_MDP_AD_BL_SCALE,
+		ad->bl_mfd->panel_info->bl_max);
+
+	pr_debug("bl_out = %d\n", *bl_out);
+	return 0;
+}
+
+/* must call this function from within ad->lock */
+static int pp_ad_linearize_bl(struct mdss_ad_info *ad, u32 bl, u32 *bl_out,
+	int inv)
+{
+
+	u32 n, bl_lut_max_index = AD_BL_LIN_LEN - 1;
+	uint32_t *bl_lut = NULL;
+	int ret = -EINVAL;
+
+	if (bl < 0 || bl > ad->bl_mfd->panel_info->bl_max) {
+		pr_err("Invalid backlight input: bl = %d, bl_max = %d\n", bl,
+			ad->bl_mfd->panel_info->bl_max);
+		return -EINVAL;
+	}
+
+	pr_debug("bl_in = %d, inv = %d\n", bl, inv);
+	if (inv == MDP_PP_AD_BL_LINEAR) {
+		bl_lut = ad->bl_lin;
+	} else if (inv == MDP_PP_AD_BL_LINEAR_INV) {
+		bl_lut = ad->bl_lin_inv;
+	} else {
+		pr_err("invalid inv param: inv = %d\n", inv);
+		return -EINVAL;
+	}
+
+	/* map panel backlight range to AD backlight range */
+	linear_map(bl, &bl, ad->bl_mfd->panel_info->bl_max,
+		MDSS_MDP_AD_BL_SCALE);
+
+	pr_debug("Before linearization = %d\n", bl);
+	n = bl * bl_lut_max_index / MDSS_MDP_AD_BL_SCALE;
+	pr_debug("n = %u\n", n);
+	if (n > bl_lut_max_index) {
+		pr_err("Invalid index for BL linearization: %d.\n", n);
+		return ret;
+	} else if (n == bl_lut_max_index) {
+		*bl_out = bl_lut[n];
+	} else if (bl == n * MDSS_MDP_AD_BL_SCALE / bl_lut_max_index) {
+		*bl_out = bl_lut[n];
+	} else if (bl == (n + 1) * MDSS_MDP_AD_BL_SCALE / bl_lut_max_index) {
+		*bl_out = bl_lut[n + 1];
+	} else {
+		/* linear piece-wise interpolation */
+		*bl_out = ((bl_lut[n + 1] - bl_lut[n]) *
+			(bl - n * MDSS_MDP_AD_BL_SCALE /
+			bl_lut_max_index) + bl_lut[n] *
+			MDSS_MDP_AD_BL_SCALE / bl_lut_max_index) *
+			bl_lut_max_index / MDSS_MDP_AD_BL_SCALE;
+	}
+	pr_debug("After linearization = %d\n", *bl_out);
+
+	/* map AD backlight range back to panel backlight range */
+	linear_map(*bl_out, bl_out, MDSS_MDP_AD_BL_SCALE,
+		ad->bl_mfd->panel_info->bl_max);
+
+	pr_debug("bl_out = %d\n", *bl_out);
+	return 0;
+}
+
+int mdss_mdp_ad_addr_setup(struct mdss_data_type *mdata, u32 *ad_offsets)
+{
+	u32 i;
+	int rc = 0;
+
+	mdata->ad_off = devm_kzalloc(&mdata->pdev->dev,
+				sizeof(struct mdss_mdp_ad) * mdata->nad_cfgs,
+				GFP_KERNEL);
+
+	if (!mdata->ad_off) {
+		pr_err("unable to setup assertive display hw:devm_kzalloc fail\n");
+		return -ENOMEM;
+	}
+
+	mdata->ad_cfgs = devm_kzalloc(&mdata->pdev->dev,
+			sizeof(struct mdss_ad_info) * mdata->nad_cfgs,
+			GFP_KERNEL);
+
+	if (!mdata->ad_cfgs) {
+		pr_err("unable to setup assertive display:devm_kzalloc fail\n");
+		devm_kfree(&mdata->pdev->dev, mdata->ad_off);
+		return -ENOMEM;
+	}
+
+	mdata->ad_calc_wq = create_singlethread_workqueue("ad_calc_wq");
+	for (i = 0; i < mdata->nad_cfgs; i++) {
+		mdata->ad_off[i].base = mdata->mdss_io.base + ad_offsets[i];
+		mdata->ad_off[i].num = i;
+		mdata->ad_cfgs[i].num = i;
+		mdata->ad_cfgs[i].ops = 0;
+		mdata->ad_cfgs[i].reg_sts = 0;
+		mdata->ad_cfgs[i].calc_itr = 0;
+		mdata->ad_cfgs[i].last_str = 0xFFFFFFFF;
+		mdata->ad_cfgs[i].last_bl = 0;
+		mdata->ad_cfgs[i].last_ad_data = 0;
+		memset(mdata->ad_cfgs[i].last_calib, 0,
+			sizeof(mdata->ad_cfgs[i].last_calib));
+		mdata->ad_cfgs[i].last_calib_valid = false;
+		mdata->ad_cfgs[i].last_ad_data_valid = false;
+		mutex_init(&mdata->ad_cfgs[i].lock);
+		mdata->ad_cfgs[i].handle.vsync_handler = pp_ad_vsync_handler;
+		mdata->ad_cfgs[i].handle.cmd_post_flush = true;
+		INIT_WORK(&mdata->ad_cfgs[i].calc_work, pp_ad_calc_worker);
+	}
+	return rc;
+}
+
+static int is_valid_calib_ctrl_addr(char __iomem *ptr)
+{
+	char __iomem *base;
+	int ret = 0, counter = 0;
+	int stage = 0;
+	struct mdss_mdp_ctl *ctl;
+
+	/* Controller */
+	for (counter = 0; counter < mdss_res->nctl; counter++) {
+		ctl = mdss_res->ctl_off + counter;
+		base = ctl->base;
+
+		if (ptr == base + MDSS_MDP_REG_CTL_TOP) {
+			ret = MDP_PP_OPS_READ;
+			break;
+		} else if (ptr == base + MDSS_MDP_REG_CTL_FLUSH) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		}
+
+		for (stage = 0; stage < (mdss_res->nmixers_intf +
+					 mdss_res->nmixers_wb); stage++)
+			if (ptr == base + MDSS_MDP_REG_CTL_LAYER(stage)) {
+				ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+				goto End;
+			}
+	}
+
+End:
+	return ret;
+}
+
+static int is_valid_calib_dspp_addr(char __iomem *ptr)
+{
+	char __iomem *base;
+	int ret = 0, counter = 0;
+	struct mdss_mdp_mixer *mixer;
+
+	for (counter = 0; counter < mdss_res->nmixers_intf; counter++) {
+		mixer = mdss_res->mixer_intf + counter;
+		base = mixer->dspp_base;
+
+		if (ptr == base) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		/* PA range */
+		} else if ((ptr >= base + MDSS_MDP_REG_DSPP_PA_BASE) &&
+				(ptr <= base + MDSS_MDP_REG_DSPP_PA_BASE +
+						MDSS_MDP_PA_SIZE)) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		/* PCC range */
+		} else if ((ptr >= base + MDSS_MDP_REG_DSPP_PCC_BASE) &&
+				(ptr <= base + MDSS_MDP_REG_DSPP_PCC_BASE +
+						MDSS_MDP_PCC_SIZE)) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		/* Gamut range */
+		} else if ((ptr >= base + MDSS_MDP_REG_DSPP_GAMUT_BASE) &&
+				(ptr <= base + MDSS_MDP_REG_DSPP_GAMUT_BASE +
+						MDSS_MDP_GAMUT_SIZE)) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		/* GC range */
+		} else if ((ptr >= base + MDSS_MDP_REG_DSPP_GC_BASE) &&
+				(ptr <= base + MDSS_MDP_REG_DSPP_GC_BASE +
+						MDSS_MDP_GC_SIZE)) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		/* Dither enable/disable */
+		} else if ((ptr == base + MDSS_MDP_REG_DSPP_DITHER_DEPTH)) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		/* Six zone and mem color */
+		} else if (mdss_res->mdp_rev >= MDSS_MDP_HW_REV_103 &&
+			(ptr >= base + MDSS_MDP_REG_DSPP_SIX_ZONE_BASE) &&
+			(ptr <= base + MDSS_MDP_REG_DSPP_SIX_ZONE_BASE +
+					MDSS_MDP_SIX_ZONE_SIZE +
+					MDSS_MDP_MEM_COL_SIZE)) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int is_valid_calib_vig_addr(char __iomem *ptr)
+{
+	char __iomem *base;
+	int ret = 0, counter = 0;
+	struct mdss_mdp_pipe *pipe;
+
+	for (counter = 0; counter < mdss_res->nvig_pipes; counter++) {
+		pipe = mdss_res->vig_pipes + counter;
+		base = pipe->base;
+
+		if (ptr == base + MDSS_MDP_REG_VIG_OP_MODE) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		} else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_FORMAT) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		} else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		} else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		} else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_OP_MODE) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		/* QSEED2 range */
+		} else if ((ptr >= base + MDSS_MDP_REG_VIG_QSEED2_SHARP) &&
+				(ptr <= base + MDSS_MDP_REG_VIG_QSEED2_SHARP +
+					MDSS_MDP_VIG_QSEED2_SHARP_SIZE)) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		/* PA range */
+		} else if ((ptr >= base + MDSS_MDP_REG_VIG_PA_BASE) &&
+				(ptr <= base + MDSS_MDP_REG_VIG_PA_BASE +
+						MDSS_MDP_PA_SIZE)) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		/* Mem color range */
+		} else if (mdss_res->mdp_rev >= MDSS_MDP_HW_REV_103 &&
+			(ptr >= base + MDSS_MDP_REG_VIG_MEM_COL_BASE) &&
+				(ptr <= base + MDSS_MDP_REG_VIG_MEM_COL_BASE +
+						MDSS_MDP_MEM_COL_SIZE)) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int is_valid_calib_rgb_addr(char __iomem *ptr)
+{
+	char __iomem *base;
+	int ret = 0, counter = 0;
+	struct mdss_mdp_pipe *pipe;
+
+	for (counter = 0; counter < mdss_res->nrgb_pipes; counter++) {
+		pipe = mdss_res->rgb_pipes + counter;
+		base = pipe->base;
+
+		if (ptr == base + MDSS_MDP_REG_SSPP_SRC_FORMAT) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		} else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		} else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		} else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_OP_MODE) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int is_valid_calib_dma_addr(char __iomem *ptr)
+{
+	char __iomem *base;
+	int ret = 0, counter = 0;
+	struct mdss_mdp_pipe *pipe;
+
+	for (counter = 0; counter < mdss_res->ndma_pipes; counter++) {
+		pipe = mdss_res->dma_pipes + counter;
+		base = pipe->base;
+
+		if (ptr == base + MDSS_MDP_REG_SSPP_SRC_FORMAT) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		} else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		} else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		} else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_OP_MODE) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int is_valid_calib_mixer_addr(char __iomem *ptr)
+{
+	char __iomem *base;
+	int ret = 0, counter = 0;
+	int stage = 0;
+	struct mdss_mdp_mixer *mixer;
+
+	for (counter = 0; counter < (mdss_res->nmixers_intf +
+					mdss_res->nmixers_wb); counter++) {
+		mixer = mdss_res->mixer_intf + counter;
+		base = mixer->base;
+
+		if (ptr == base + MDSS_MDP_REG_LM_OP_MODE) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		/* GC range */
+		} else if ((ptr >= base + MDSS_MDP_REG_LM_GC_LUT_BASE) &&
+			(ptr <= base + MDSS_MDP_REG_LM_GC_LUT_BASE +
+						MDSS_MDP_GC_SIZE)) {
+			ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+			break;
+		}
+
+		for (stage = 0; stage < TOTAL_BLEND_STAGES; stage++)
+			if (ptr == base + MDSS_MDP_REG_LM_BLEND_OFFSET(stage) +
+						 MDSS_MDP_REG_LM_BLEND_OP) {
+				ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+				goto End;
+			} else if (ptr == base +
+					MDSS_MDP_REG_LM_BLEND_OFFSET(stage) +
+					MDSS_MDP_REG_LM_BLEND_FG_ALPHA) {
+				ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+				goto End;
+			} else if (ptr == base +
+					 MDSS_MDP_REG_LM_BLEND_OFFSET(stage) +
+					 MDSS_MDP_REG_LM_BLEND_BG_ALPHA) {
+				ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+				goto End;
+			}
+	}
+
+End:
+	return ret;
+}
+
+static int is_valid_calib_addr(void *addr, u32 operation)
+{
+	int ret = 0;
+	char __iomem *ptr = addr;
+	char __iomem *mixer_base = mdss_res->mixer_intf->base;
+	char __iomem *ctl_base   = mdss_res->ctl_off->base;
+	char __iomem *dspp_base  = mdss_res->mixer_intf->dspp_base;
+
+	if ((uintptr_t) addr % 4) {
+		ret = 0;
+	} else if (ptr == mdss_res->mdss_io.base + MDSS_REG_HW_VERSION) {
+		ret = MDP_PP_OPS_READ;
+	} else if (ptr == (mdss_res->mdp_base + MDSS_MDP_REG_HW_VERSION) ||
+	    ptr == (mdss_res->mdp_base + MDSS_MDP_REG_DISP_INTF_SEL)) {
+		ret = MDP_PP_OPS_READ;
+	/* IGC DSPP range */
+	} else if (ptr >= (mdss_res->mdp_base + MDSS_MDP_REG_IGC_DSPP_BASE) &&
+		    ptr <= (mdss_res->mdp_base + MDSS_MDP_REG_IGC_DSPP_BASE +
+						MDSS_MDP_IGC_DSPP_SIZE)) {
+		ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+	/* IGC SSPP range */
+	} else if (ptr >= (mdss_res->mdp_base + MDSS_MDP_REG_IGC_VIG_BASE) &&
+		    ptr <= (mdss_res->mdp_base + MDSS_MDP_REG_IGC_VIG_BASE +
+						MDSS_MDP_IGC_SSPP_SIZE)) {
+		ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
+	} else {
+		if (ptr >= dspp_base) {
+			ret = is_valid_calib_dspp_addr(ptr);
+			if (ret)
+				goto valid_addr;
+		}
+		if (ptr >= ctl_base) {
+			ret = is_valid_calib_ctrl_addr(ptr);
+			if (ret)
+				goto valid_addr;
+		}
+		if (mdss_res->vig_pipes &&
+		  ptr >= mdss_res->vig_pipes->base) {
+			ret = is_valid_calib_vig_addr(ptr);
+			if (ret)
+				goto valid_addr;
+		}
+		if (mdss_res->rgb_pipes &&
+		  ptr >= mdss_res->rgb_pipes->base) {
+			ret = is_valid_calib_rgb_addr(ptr);
+			if (ret)
+				goto valid_addr;
+		}
+		if (mdss_res->dma_pipes &&
+		  ptr >= mdss_res->dma_pipes->base) {
+			ret = is_valid_calib_dma_addr(ptr);
+			if (ret)
+				goto valid_addr;
+		}
+		if (ptr >= mixer_base)
+			ret = is_valid_calib_mixer_addr(ptr);
+	}
+
+valid_addr:
+	return ret & operation;
+}
+
+int mdss_mdp_calib_config(struct mdp_calib_config_data *cfg, u32 *copyback)
+{
+	int ret = -1;
+	void *ptr;
+
+	/* Calib addrs are always offsets from the MDSS base */
+	ptr = (void *)((unsigned long) cfg->addr) +
+		((uintptr_t) mdss_res->mdss_io.base);
+	if (is_valid_calib_addr(ptr, cfg->ops))
+		ret = 0;
+	else
+		return ret;
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	if (cfg->ops & MDP_PP_OPS_READ) {
+		cfg->data = readl_relaxed(ptr);
+		*copyback = 1;
+		ret = 0;
+	} else if (cfg->ops & MDP_PP_OPS_WRITE) {
+		writel_relaxed(cfg->data, ptr);
+		ret = 0;
+	}
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	return ret;
+}
+
+int mdss_mdp_calib_mode(struct msm_fb_data_type *mfd,
+				struct mdss_calib_cfg *cfg)
+{
+	if (!mdss_pp_res || !mfd)
+		return -EINVAL;
+	mutex_lock(&mdss_pp_mutex);
+	mfd->calib_mode = cfg->calib_mask;
+	mutex_lock(&mfd->bl_lock);
+	mfd->calib_mode_bl = mfd->bl_level;
+	mutex_unlock(&mfd->bl_lock);
+	mutex_unlock(&mdss_pp_mutex);
+	return 0;
+}
+
+int mdss_mdp_calib_config_buffer(struct mdp_calib_config_buffer *cfg,
+						u32 *copyback)
+{
+	int ret = -1, counter;
+	uint32_t *buff = NULL, *buff_org = NULL;
+	void *ptr;
+	int i = 0;
+
+	if (!cfg) {
+		pr_err("Invalid buffer pointer\n");
+		return ret;
+	}
+
+	if (cfg->size == 0 || cfg->size > PAGE_SIZE) {
+		pr_err("Invalid buffer size %d\n", cfg->size);
+		return ret;
+	}
+
+	counter = cfg->size / (sizeof(uint32_t) * 2);
+	buff_org = buff = kzalloc(cfg->size, GFP_KERNEL);
+	if (buff == NULL) {
+		pr_err("Config buffer allocation failed\n");
+		return ret;
+	}
+
+	if (copy_from_user(buff, cfg->buffer, cfg->size)) {
+		kfree(buff);
+		pr_err("config buffer copy failed\n");
+		return ret;
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+
+	for (i = 0; i < counter; i++) {
+		ptr = (void *) (((unsigned int) *buff) +
+				mdss_res->mdss_io.base);
+
+		if (!is_valid_calib_addr(ptr, cfg->ops)) {
+			ret = -1;
+			pr_err("Address validation failed or access not permitted\n");
+			break;
+		}
+
+		buff++;
+		if (cfg->ops & MDP_PP_OPS_READ)
+			*buff = readl_relaxed(ptr);
+		else if (cfg->ops & MDP_PP_OPS_WRITE)
+			writel_relaxed(*buff, ptr);
+		buff++;
+	}
+
+	if (ret & MDP_PP_OPS_READ) {
+		ret = copy_to_user(cfg->buffer, buff_org, cfg->size);
+		*copyback = 1;
+	}
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+
+	kfree(buff_org);
+	return ret;
+}
+
+static int sspp_cache_location(u32 pipe_type, enum pp_config_block *block)
+{
+	int ret = 0;
+
+	if (!block) {
+		pr_err("invalid params %pK\n", block);
+		return -EINVAL;
+	}
+	switch (pipe_type) {
+	case MDSS_MDP_PIPE_TYPE_VIG:
+		*block = SSPP_VIG;
+		break;
+	case MDSS_MDP_PIPE_TYPE_RGB:
+		*block = SSPP_RGB;
+		break;
+	case MDSS_MDP_PIPE_TYPE_DMA:
+		*block = SSPP_DMA;
+		break;
+	default:
+		pr_err("invalid pipe type %d\n", pipe_type);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+int mdss_mdp_pp_sspp_config(struct mdss_mdp_pipe *pipe)
+{
+	struct mdp_histogram_start_req hist;
+	struct mdp_pp_cache_res cache_res;
+	u32 len = 0;
+	int ret = 0;
+
+	if (!pipe) {
+		pr_err("invalid params, pipe %pK\n", pipe);
+		return -EINVAL;
+	}
+
+	cache_res.mdss_pp_res = NULL;
+	cache_res.pipe_res = pipe;
+	ret = sspp_cache_location(pipe->type, &cache_res.block);
+	if (ret) {
+		pr_err("invalid cache res block for igc ret %d\n",
+			ret);
+		goto exit_fail;
+	}
+	if ((pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_IGC_CFG)) {
+		len = pipe->pp_cfg.igc_cfg.len;
+		if (pp_ops[IGC].pp_set_config) {
+			ret = pp_igc_lut_cache_params(&pipe->pp_cfg.igc_cfg,
+						      &cache_res, false);
+			if (ret) {
+				pr_err("failed to cache igc params ret %d\n",
+					ret);
+				goto exit_fail;
+			}
+		}  else if (len == IGC_LUT_ENTRIES) {
+			ret = copy_from_user(pipe->pp_res.igc_c0_c1,
+					pipe->pp_cfg.igc_cfg.c0_c1_data,
+					sizeof(uint32_t) * len);
+			if (ret) {
+				pr_err("failed to copy the igc c0_c1 data\n");
+				ret = -EFAULT;
+				goto exit_fail;
+			}
+			ret = copy_from_user(pipe->pp_res.igc_c2,
+					pipe->pp_cfg.igc_cfg.c2_data,
+					sizeof(uint32_t) * len);
+			if (ret) {
+				ret = -EFAULT;
+				pr_err("failed to copy the igc c2 data\n");
+				goto exit_fail;
+			}
+			pipe->pp_cfg.igc_cfg.c0_c1_data =
+							pipe->pp_res.igc_c0_c1;
+			pipe->pp_cfg.igc_cfg.c2_data = pipe->pp_res.igc_c2;
+		} else
+			pr_warn("invalid length of IGC len %d\n", len);
+	}
+	if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_HIST_CFG) {
+		if (pipe->pp_cfg.hist_cfg.ops & MDP_PP_OPS_ENABLE) {
+			hist.block = pipe->pp_cfg.hist_cfg.block;
+			hist.frame_cnt =
+				pipe->pp_cfg.hist_cfg.frame_cnt;
+			hist.bit_mask = pipe->pp_cfg.hist_cfg.bit_mask;
+			hist.num_bins = pipe->pp_cfg.hist_cfg.num_bins;
+			mdss_mdp_hist_start(&hist);
+		} else if (pipe->pp_cfg.hist_cfg.ops &
+						MDP_PP_OPS_DISABLE) {
+			mdss_mdp_hist_stop(pipe->pp_cfg.hist_cfg.block);
+		}
+	}
+	if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_HIST_LUT_CFG) {
+		if (!pp_ops[HIST_LUT].pp_set_config) {
+			len = pipe->pp_cfg.hist_lut_cfg.len;
+			if (len != ENHIST_LUT_ENTRIES) {
+				ret = -EINVAL;
+				pr_err("Invalid hist lut len: %d\n", len);
+				goto exit_fail;
+			}
+			ret = copy_from_user(pipe->pp_res.hist_lut,
+					pipe->pp_cfg.hist_lut_cfg.data,
+					sizeof(uint32_t) * len);
+			if (ret) {
+				ret = -EFAULT;
+				pr_err("failed to copy the hist lut\n");
+				goto exit_fail;
+			}
+			pipe->pp_cfg.hist_lut_cfg.data = pipe->pp_res.hist_lut;
+		} else {
+			ret = pp_hist_lut_cache_params(
+					&pipe->pp_cfg.hist_lut_cfg,
+					&cache_res);
+			if (ret) {
+				pr_err("Failed to cache Hist LUT params on pipe %d, ret %d\n",
+						pipe->num, ret);
+				goto exit_fail;
+			}
+		}
+	}
+	if ((pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PA_V2_CFG) &&
+	    (pp_ops[PA].pp_set_config)) {
+		ret = pp_pa_cache_params(&pipe->pp_cfg.pa_v2_cfg_data,
+					 &cache_res);
+		if (ret) {
+			pr_err("Failed to cache PA params on pipe %d, ret %d\n",
+				pipe->num, ret);
+			goto exit_fail;
+		}
+	}
+	if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PCC_CFG
+	    && pp_ops[PCC].pp_set_config) {
+		ret = pp_pcc_cache_params(&pipe->pp_cfg.pcc_cfg_data,
+					  &cache_res);
+		if (ret) {
+			pr_err("failed to cache the pcc params ret %d\n", ret);
+			goto exit_fail;
+		}
+	}
+exit_fail:
+	if (ret) {
+		pr_err("VIG PP setup failed on pipe %d type %d ret %d\n",
+				pipe->num, pipe->type, ret);
+		pipe->pp_cfg.config_ops = 0;
+	}
+
+	return ret;
+}
+
+static int pp_update_pcc_pipe_setup(struct mdss_mdp_pipe *pipe, u32 location)
+{
+	int ret = 0;
+	struct mdss_data_type *mdata = NULL;
+	char __iomem *pipe_base = NULL;
+
+	if (!pipe) {
+		pr_err("invalid param pipe %pK\n", pipe);
+		return -EINVAL;
+	}
+
+	mdata = mdss_mdp_get_mdata();
+	pipe_base = pipe->base;
+	switch (location) {
+	case SSPP_VIG:
+		if (mdata->pp_block_off.vig_pcc_off == U32_MAX) {
+			pr_err("invalid offset for vig pcc %d\n",
+				U32_MAX);
+			ret = -EINVAL;
+			goto exit_sspp_setup;
+		}
+		pipe_base += mdata->pp_block_off.vig_pcc_off;
+		break;
+	case SSPP_RGB:
+		if (mdata->pp_block_off.rgb_pcc_off == U32_MAX) {
+			pr_err("invalid offset for rgb pcc %d\n",
+				U32_MAX);
+			ret = -EINVAL;
+			goto exit_sspp_setup;
+		}
+		pipe_base += mdata->pp_block_off.rgb_pcc_off;
+		break;
+	case SSPP_DMA:
+		if (mdata->pp_block_off.dma_pcc_off == U32_MAX) {
+			pr_err("invalid offset for dma pcc %d\n",
+				U32_MAX);
+			ret = -EINVAL;
+			goto exit_sspp_setup;
+		}
+		pipe_base += mdata->pp_block_off.dma_pcc_off;
+		break;
+	default:
+		pr_err("invalid location for PCC %d\n",
+			location);
+		ret = -EINVAL;
+		goto exit_sspp_setup;
+	}
+	pp_ops[PCC].pp_set_config(pipe_base, &pipe->pp_res.pp_sts,
+		&pipe->pp_cfg.pcc_cfg_data, location);
+exit_sspp_setup:
+	return ret;
+}
+
+int mdss_mdp_pp_get_version(struct mdp_pp_feature_version *version)
+{
+	int ret = 0;
+	u32 ver_info = mdp_pp_legacy;
+
+	if (!version) {
+		pr_err("invalid param version %pK\n", version);
+		ret = -EINVAL;
+		goto exit_version;
+	}
+	if (version->pp_feature >= PP_FEATURE_MAX) {
+		pr_err("invalid feature passed %d\n", version->pp_feature);
+		ret = -EINVAL;
+		goto exit_version;
+	}
+	if (pp_ops[version->pp_feature].pp_get_version)
+		ret = pp_ops[version->pp_feature].pp_get_version(&ver_info);
+	if (ret)
+		pr_err("failed to query version for feature %d ret %d\n",
+			version->pp_feature, ret);
+	else
+		version->version_info = ver_info;
+exit_version:
+	return ret;
+}
+
+static void mdss_mdp_hist_irq_set_mask(u32 irq)
+{
+	u32 mask;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	spin_lock(&mdata->hist_intr.lock);
+	mask = readl_relaxed(mdata->mdp_base + MDSS_MDP_REG_HIST_INTR_EN);
+	mask |= irq;
+	pr_debug("interrupt mask being set %x irq updated %x\n", mask, irq);
+	writel_relaxed(mask, mdata->mdp_base + MDSS_MDP_REG_HIST_INTR_EN);
+	spin_unlock(&mdata->hist_intr.lock);
+}
+
+static void mdss_mdp_hist_irq_clear_mask(u32 irq)
+{
+	u32 mask;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	spin_lock(&mdata->hist_intr.lock);
+	mask = readl_relaxed(mdata->mdp_base + MDSS_MDP_REG_HIST_INTR_EN);
+	mask = mask & ~irq;
+	pr_debug("interrupt mask being cleared %x irq cleared %x\n", mask, irq);
+	writel_relaxed(mask, mdata->mdp_base + MDSS_MDP_REG_HIST_INTR_EN);
+	spin_unlock(&mdata->hist_intr.lock);
+}
+
+static void mdss_mdp_hist_intr_notify(u32 disp)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct pp_hist_col_info *hist_info = NULL;
+	int i = 0, disp_count = 0, hist_count = 0;
+	struct mdss_mdp_ctl *ctl = NULL;
+	struct mdss_overlay_private *mdp5_data = NULL;
+
+	for (i = 0; i < mdata->ndspp; i++) {
+		hist_info = &mdss_pp_res->dspp_hist[i];
+		spin_lock(&hist_info->hist_lock);
+		if (hist_info->disp_num == disp) {
+			disp_count++;
+			ctl = hist_info->ctl;
+			if (hist_info->col_state == HIST_READY)
+				hist_count++;
+		}
+		spin_unlock(&hist_info->hist_lock);
+	}
+	if (disp_count != hist_count || !ctl)
+		return;
+	mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+	if (!mdp5_data) {
+		pr_err("mdp5_data is NULL\n");
+		return;
+	}
+	mdp5_data->hist_events++;
+	sysfs_notify_dirent(mdp5_data->hist_event_sd);
+}
+
+int mdss_mdp_copy_layer_pp_info(struct mdp_input_layer *layer)
+{
+	struct mdp_overlay_pp_params *pp_info = NULL;
+	int ret = 0;
+	uint32_t ops;
+
+	if (!layer) {
+		pr_err("invalid layer pointer passed %pK\n", layer);
+		return -EFAULT;
+	}
+
+	pp_info = kmalloc(sizeof(struct mdp_overlay_pp_params),
+			GFP_KERNEL);
+	if (!pp_info)
+		return -ENOMEM;
+
+	ret = copy_from_user(pp_info, layer->pp_info,
+			sizeof(struct mdp_overlay_pp_params));
+	if (ret) {
+		pr_err("layer list copy from user failed, pp_info = %pK\n",
+			layer->pp_info);
+		ret = -EFAULT;
+		goto exit_pp_info;
+	}
+
+	ops = pp_info->config_ops;
+	if (ops & MDP_OVERLAY_PP_IGC_CFG) {
+		ret = pp_copy_layer_igc_payload(pp_info);
+		if (ret) {
+			pr_err("Failed to copy IGC payload, ret = %d\n", ret);
+			goto exit_pp_info;
+		}
+	} else {
+		pp_info->igc_cfg.cfg_payload = NULL;
+	}
+	if (ops & MDP_OVERLAY_PP_HIST_LUT_CFG) {
+		ret = pp_copy_layer_hist_lut_payload(pp_info);
+		if (ret) {
+			pr_err("Failed to copy Hist LUT payload, ret = %d\n",
+				ret);
+			goto exit_igc;
+		}
+	} else {
+		pp_info->hist_lut_cfg.cfg_payload = NULL;
+	}
+	if (ops & MDP_OVERLAY_PP_PA_V2_CFG) {
+		ret = pp_copy_layer_pa_payload(pp_info);
+		if (ret) {
+			pr_err("Failed to copy PA payload, ret = %d\n", ret);
+			goto exit_hist_lut;
+		}
+	} else {
+		pp_info->pa_v2_cfg_data.cfg_payload = NULL;
+	}
+	if (ops & MDP_OVERLAY_PP_PCC_CFG) {
+		ret = pp_copy_layer_pcc_payload(pp_info);
+		if (ret) {
+			pr_err("Failed to copy PCC payload, ret = %d\n", ret);
+			goto exit_pa;
+		}
+	} else {
+		pp_info->pcc_cfg_data.cfg_payload = NULL;
+	}
+
+	layer->pp_info = pp_info;
+
+	return ret;
+
+exit_pa:
+	kfree(pp_info->pa_v2_cfg_data.cfg_payload);
+exit_hist_lut:
+	kfree(pp_info->hist_lut_cfg.cfg_payload);
+exit_igc:
+	kfree(pp_info->igc_cfg.cfg_payload);
+exit_pp_info:
+	kfree(pp_info);
+	return ret;
+}
+
+void mdss_mdp_free_layer_pp_info(struct mdp_input_layer *layer)
+{
+	struct mdp_overlay_pp_params *pp_info = (layer) ?
+		(struct mdp_overlay_pp_params *) layer->pp_info : NULL;
+
+	if (!pp_info)
+		return;
+
+	kfree(pp_info->igc_cfg.cfg_payload);
+	kfree(pp_info->hist_lut_cfg.cfg_payload);
+	kfree(pp_info->pa_v2_cfg_data.cfg_payload);
+	kfree(pp_info->pcc_cfg_data.cfg_payload);
+	kfree(pp_info);
+	layer->pp_info = NULL;
+}
+
+int mdss_mdp_mfd_valid_dspp(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_ctl *ctl = NULL;
+	int valid_dspp = false;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	ctl = mfd_to_ctl(mfd);
+	valid_dspp = (ctl) && (ctl->mixer_left) &&
+			(ctl->mixer_left->num < mdata->ndspp);
+	if ((ctl) && (ctl->mixer_right))
+		valid_dspp &= (ctl->mixer_right->num < mdata->ndspp);
+	return valid_dspp;
+}
+
+static int mdss_mdp_mfd_valid_ad(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_ctl *ctl = NULL;
+	int valid_ad = false;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	ctl = mfd_to_ctl(mfd);
+	valid_ad = (ctl) && (ctl->mixer_left) &&
+			(ctl->mixer_left->num < mdata->nad_cfgs);
+	if ((ctl) && (ctl->mixer_right))
+		valid_ad &= (ctl->mixer_right->num < mdata->nad_cfgs);
+	return valid_ad;
+}
+
+static int pp_mfd_release_all(struct msm_fb_data_type *mfd)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int ret = 0;
+
+	if (!mfd || !mdata) {
+		pr_err("Invalid mfd %pK mdata %pK\n", mfd, mdata);
+		return -EPERM;
+	}
+
+	if (mfd->index >= (MDP_BLOCK_MAX - MDP_LOGICAL_BLOCK_DISP_0))
+		return ret;
+
+	if (mdata->nad_cfgs) {
+		ret = pp_mfd_ad_release_all(mfd);
+		if (ret)
+			pr_err("ad release all failed on disp %d, ret %d\n",
+				mfd->index, ret);
+	}
+
+	if (mdss_mdp_mfd_valid_dspp(mfd))
+		mdss_mdp_hist_stop(mfd->index + MDP_LOGICAL_BLOCK_DISP_0);
+	memset(&mdss_pp_res->pp_disp_sts[mfd->index], 0,
+			sizeof(mdss_pp_res->pp_disp_sts[mfd->index]));
+	mfd->bl_scale = 1024;
+
+	return ret;
+}
+
+static int pp_mfd_ad_release_all(struct msm_fb_data_type *mfd)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_ctl *ctl = NULL;
+	struct mdss_ad_info *ad = NULL;
+	int ret = 0;
+
+	if (!mdata || !mfd) {
+		pr_err("invalid params mdata %pK mfd %pK\n", mdata, mfd);
+		return -EINVAL;
+	}
+	if (!mdata->ad_calc_wq)
+		return 0;
+
+	ret = mdss_mdp_get_ad(mfd, &ad);
+	if (ret == -ENODEV || ret == -EPERM) {
+		pr_debug("AD not supported on device, disp num %d\n",
+			mfd->index);
+		return 0;
+	} else if (ret) {
+		pr_err("failed to get ad_info ret %d\n", ret);
+		return ret;
+	}
+	if (!ad->mfd)
+		return 0;
+
+	mutex_lock(&ad->lock);
+	ad->sts &= ~PP_STS_ENABLE;
+	ad->mfd = NULL;
+	ad->bl_mfd = NULL;
+	ad->state = 0;
+	mutex_unlock(&ad->lock);
+	cancel_work_sync(&ad->calc_work);
+
+	ctl = mfd_to_ctl(mfd);
+	if (ctl && ctl->ops.remove_vsync_handler)
+		ctl->ops.remove_vsync_handler(ctl, &ad->handle);
+
+	return ret;
+}
+
+static inline int pp_validate_dspp_mfd_block(struct msm_fb_data_type *mfd,
+					int block)
+{
+	if (!mfd)
+		return -EINVAL;
+
+	if (!mdss_mdp_mfd_valid_dspp(mfd)) {
+		pr_err("invalid display num %d for PP config\n", mfd->index);
+		return -EPERM;
+	}
+
+	if ((block < MDP_LOGICAL_BLOCK_DISP_0) ||
+			(block >= MDP_BLOCK_MAX)) {
+		pr_err("invalid block %d\n", block);
+		return -EINVAL;
+	}
+
+	if ((block - MDP_LOGICAL_BLOCK_DISP_0) != mfd->index) {
+		pr_err("PP block %d does not match corresponding mfd index %d\n",
+				block, mfd->index);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int pp_get_driver_ops(struct mdp_pp_driver_ops *ops)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int ret = 0;
+	void *pp_cfg = NULL;
+
+	switch (mdata->mdp_rev) {
+	case MDSS_MDP_HW_REV_107:
+	case MDSS_MDP_HW_REV_107_1:
+	case MDSS_MDP_HW_REV_107_2:
+	case MDSS_MDP_HW_REV_114:
+	case MDSS_MDP_HW_REV_115:
+	case MDSS_MDP_HW_REV_116:
+		pp_cfg = pp_get_driver_ops_v1_7(ops);
+		if (IS_ERR_OR_NULL(pp_cfg))
+			ret = -EINVAL;
+		else
+			mdss_pp_res->pp_data_v1_7 = pp_cfg;
+		break;
+	case MDSS_MDP_HW_REV_300:
+	case MDSS_MDP_HW_REV_301:
+		pp_cfg = pp_get_driver_ops_v3(ops);
+		if (IS_ERR_OR_NULL(pp_cfg)) {
+			ret = -EINVAL;
+		} else {
+			mdss_pp_res->pp_data_v1_7 = pp_cfg;
+			/* Currently all caching data is used from v17 for V3
+			 * hence setting the pointer to NULL. Will be used if we
+			 * have to add any caching specific to V3.
+			 */
+			mdss_pp_res->pp_data_v3 = NULL;
+		}
+		break;
+	default:
+		memset(ops, 0, sizeof(struct mdp_pp_driver_ops));
+		break;
+	}
+	return ret;
+}
+
+static int pp_ppb_setup(struct mdss_mdp_mixer *mixer)
+{
+	struct pp_sts_type *pp_sts;
+	struct mdss_mdp_ctl *ctl;
+	char __iomem *addr;
+	u32 flags, disp_num;
+	int ret = 0;
+
+	if (!mixer || !mixer->ctl || !mixer->ctl->mfd) {
+		pr_err("invalid parameters, mixer %pK ctl %pK mfd %pK\n",
+			mixer, (mixer ? mixer->ctl : NULL),
+		       (mixer ? (mixer->ctl ? mixer->ctl->mfd : NULL) : NULL));
+		return -EINVAL;
+	}
+	ctl = mixer->ctl;
+	disp_num = ctl->mfd->index;
+
+	if (disp_num < MDSS_BLOCK_DISP_NUM)
+		flags = mdss_pp_res->pp_disp_flags[disp_num];
+	else
+		flags = 0;
+	if ((flags & PP_FLAGS_DIRTY_DITHER)) {
+		if (pp_ops[DITHER].pp_set_config) {
+			pp_sts = &mdss_pp_res->pp_disp_sts[disp_num];
+			addr = mixer->pingpong_base;
+			/* if dither is supported in PPB function will
+			 * return 0. Failure will indicate that there
+			 * is no DITHER in PPB. In case of error skip the
+			 * programming of CTL flush bits for dither flush.
+			 */
+			ret = pp_ops[DITHER].pp_set_config(addr, pp_sts,
+				&mdss_pp_res->dither_disp_cfg[disp_num], PPB);
+			if (!ret) {
+				switch (mixer->num) {
+				case MDSS_MDP_INTF_LAYERMIXER0:
+				case MDSS_MDP_INTF_LAYERMIXER1:
+				case MDSS_MDP_INTF_LAYERMIXER2:
+					ctl->flush_bits |= BIT(13) <<
+						mixer->num;
+					break;
+				case MDSS_MDP_INTF_LAYERMIXER3:
+					ctl->flush_bits |= BIT(21);
+					break;
+				}
+			}
+			ret = 0;
+		}
+	}
+	return ret;
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.h b/drivers/video/fbdev/msm/mdss_mdp_pp.h
new file mode 100644
index 0000000..46ec80f
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2014-2015, 2017, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_MDP_PP_DEBUG_H
+#define MDSS_MDP_PP_DEBUG_H
+
+#include <linux/msm_mdp.h>
+
+#define MDSS_BLOCK_DISP_NUM (MDP_BLOCK_MAX - MDP_LOGICAL_BLOCK_DISP_0)
+
+/* PP STS related flags */
+#define PP_STS_ENABLE	0x1
+#define PP_STS_GAMUT_FIRST	0x2
+#define PP_STS_PA_LUT_FIRST	0x4
+
+#define PP_STS_PA_HUE_MASK		0x2
+#define PP_STS_PA_SAT_MASK		0x4
+#define PP_STS_PA_VAL_MASK		0x8
+#define PP_STS_PA_CONT_MASK		0x10
+#define PP_STS_PA_MEM_PROTECT_EN	0x20
+#define PP_STS_PA_MEM_COL_SKIN_MASK	0x40
+#define PP_STS_PA_MEM_COL_FOL_MASK	0x80
+#define PP_STS_PA_MEM_COL_SKY_MASK	0x100
+#define PP_STS_PA_SIX_ZONE_HUE_MASK	0x200
+#define PP_STS_PA_SIX_ZONE_SAT_MASK	0x400
+#define PP_STS_PA_SIX_ZONE_VAL_MASK	0x800
+#define PP_STS_PA_SAT_ZERO_EXP_EN	0x1000
+#define PP_STS_PA_MEM_PROT_HUE_EN	0x2000
+#define PP_STS_PA_MEM_PROT_SAT_EN	0x4000
+#define PP_STS_PA_MEM_PROT_VAL_EN	0x8000
+#define PP_STS_PA_MEM_PROT_CONT_EN	0x10000
+#define PP_STS_PA_MEM_PROT_BLEND_EN	0x20000
+#define PP_STS_PA_MEM_PROT_SIX_EN	0x40000
+
+/* Demo mode macros */
+#define MDSS_SIDE_NONE	0
+#define MDSS_SIDE_LEFT	1
+#define MDSS_SIDE_RIGHT	2
+/* size calculated for c0,c1_c2 for 4 tables */
+#define GAMUT_COLOR_COEFF_SIZE_V1_7 (2 * MDP_GAMUT_TABLE_V1_7_SZ * 4)
+/* 16 entries for c0,c1,c2 */
+#define GAMUT_SCALE_OFFSET_SIZE_V1_7 (3 * MDP_GAMUT_SCALE_OFF_SZ)
+#define GAMUT_TOTAL_TABLE_SIZE_V1_7 (GAMUT_COLOR_COEFF_SIZE_V1_7 + \
+				  GAMUT_SCALE_OFFSET_SIZE_V1_7)
+
+#define GAMUT_T0_SIZE	125
+#define GAMUT_T1_SIZE	100
+#define GAMUT_T2_SIZE	80
+#define GAMUT_T3_SIZE	100
+#define GAMUT_T4_SIZE	100
+#define GAMUT_T5_SIZE	80
+#define GAMUT_T6_SIZE	64
+#define GAMUT_T7_SIZE	80
+#define GAMUT_TOTAL_TABLE_SIZE (GAMUT_T0_SIZE + GAMUT_T1_SIZE + \
+	GAMUT_T2_SIZE + GAMUT_T3_SIZE + GAMUT_T4_SIZE + \
+	GAMUT_T5_SIZE + GAMUT_T6_SIZE + GAMUT_T7_SIZE)
+
+/* Total 5 QSEED3 filters: Direction filter + Y plane cir and sep + UV plane
+ * cir and  sep filters
+ */
+#define QSEED3_FILTERS		5
+
+#define QSEED3_LUT_REGIONS	4
+
+enum pp_block_opmodes {
+	PP_OPMODE_VIG = 1,
+	PP_OPMODE_DSPP,
+	PP_OPMODE_MAX
+};
+
+enum pp_config_block {
+	SSPP_RGB = 1,
+	SSPP_DMA,
+	SSPP_VIG,
+	DSPP,
+	LM,
+	PPB
+};
+
+struct mdp_pp_feature_ops {
+	u32 feature;
+	int (*pp_get_config)(char __iomem *base_addr, void *cfg_data,
+			u32 block_type, u32 disp_num);
+	int (*pp_set_config)(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type);
+	int (*pp_get_version)(u32 *version);
+};
+
+struct mdp_pp_driver_ops {
+	struct mdp_pp_feature_ops pp_ops[PP_FEATURE_MAX];
+	void (*pp_opmode_config)(int location, struct pp_sts_type *pp_sts,
+			u32 *opmode, int side);
+	int (*get_hist_offset)(u32 block, u32 *ctl_off);
+	int (*get_hist_isr_info)(u32 *isr_mask);
+	bool (*is_sspp_hist_supp)(void);
+	void (*gamut_clk_gate_en)(char __iomem *base_addr);
+};
+
+struct mdss_pp_res_type_v1_7 {
+	u32 pgc_lm_table_c0[MDSS_BLOCK_DISP_NUM][PGC_LUT_ENTRIES];
+	u32 pgc_lm_table_c1[MDSS_BLOCK_DISP_NUM][PGC_LUT_ENTRIES];
+	u32 pgc_lm_table_c2[MDSS_BLOCK_DISP_NUM][PGC_LUT_ENTRIES];
+	u32 pgc_table_c0[MDSS_BLOCK_DISP_NUM][PGC_LUT_ENTRIES];
+	u32 pgc_table_c1[MDSS_BLOCK_DISP_NUM][PGC_LUT_ENTRIES];
+	u32 pgc_table_c2[MDSS_BLOCK_DISP_NUM][PGC_LUT_ENTRIES];
+	u32 igc_table_c0_c1[MDSS_BLOCK_DISP_NUM][IGC_LUT_ENTRIES];
+	u32 igc_table_c2[MDSS_BLOCK_DISP_NUM][IGC_LUT_ENTRIES];
+	u32 hist_lut[MDSS_BLOCK_DISP_NUM][ENHIST_LUT_ENTRIES];
+	u32 six_zone_lut_p0[MDSS_BLOCK_DISP_NUM][MDP_SIX_ZONE_LUT_SIZE];
+	u32 six_zone_lut_p1[MDSS_BLOCK_DISP_NUM][MDP_SIX_ZONE_LUT_SIZE];
+	struct mdp_pgc_lut_data_v1_7 pgc_dspp_v17_data[MDSS_BLOCK_DISP_NUM];
+	struct mdp_pgc_lut_data_v1_7 pgc_lm_v17_data[MDSS_BLOCK_DISP_NUM];
+	struct mdp_igc_lut_data_v1_7 igc_v17_data[MDSS_BLOCK_DISP_NUM];
+	struct mdp_hist_lut_data_v1_7 hist_lut_v17_data[MDSS_BLOCK_DISP_NUM];
+	struct mdp_dither_data_v1_7 dither_v17_data[MDSS_BLOCK_DISP_NUM];
+	struct mdp_gamut_data_v1_7 gamut_v17_data[MDSS_BLOCK_DISP_NUM];
+	struct mdp_pcc_data_v1_7 pcc_v17_data[MDSS_BLOCK_DISP_NUM];
+	struct mdp_pa_data_v1_7 pa_v17_data[MDSS_BLOCK_DISP_NUM];
+};
+
+struct mdss_pp_res_type {
+	/* logical info */
+	u32 pp_disp_flags[MDSS_BLOCK_DISP_NUM];
+	u32 igc_lut_c0c1[MDSS_BLOCK_DISP_NUM][IGC_LUT_ENTRIES];
+	u32 igc_lut_c2[MDSS_BLOCK_DISP_NUM][IGC_LUT_ENTRIES];
+	struct mdp_ar_gc_lut_data
+		gc_lut_r[MDSS_BLOCK_DISP_NUM][GC_LUT_SEGMENTS];
+	struct mdp_ar_gc_lut_data
+		gc_lut_g[MDSS_BLOCK_DISP_NUM][GC_LUT_SEGMENTS];
+	struct mdp_ar_gc_lut_data
+		gc_lut_b[MDSS_BLOCK_DISP_NUM][GC_LUT_SEGMENTS];
+	u32 enhist_lut[MDSS_BLOCK_DISP_NUM][ENHIST_LUT_ENTRIES];
+	struct mdp_pa_cfg pa_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	struct mdp_pa_v2_cfg_data pa_v2_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	u32 six_zone_lut_curve_p0[MDSS_BLOCK_DISP_NUM][MDP_SIX_ZONE_LUT_SIZE];
+	u32 six_zone_lut_curve_p1[MDSS_BLOCK_DISP_NUM][MDP_SIX_ZONE_LUT_SIZE];
+	struct mdp_pcc_cfg_data pcc_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	struct mdp_igc_lut_data igc_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	struct mdp_pgc_lut_data argc_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	struct mdp_pgc_lut_data pgc_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	struct mdp_hist_lut_data enhist_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	struct mdp_dither_cfg_data dither_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	struct mdp_gamut_cfg_data gamut_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	uint16_t gamut_tbl[MDSS_BLOCK_DISP_NUM][GAMUT_TOTAL_TABLE_SIZE * 3];
+	u32 hist_data[MDSS_BLOCK_DISP_NUM][HIST_V_SIZE];
+	struct pp_sts_type pp_disp_sts[MDSS_BLOCK_DISP_NUM];
+	/* physical info */
+	struct pp_hist_col_info *dspp_hist;
+	/*
+	 * The pp_data_v1_7 will be a pointer to newer MDP revisions of the
+	 * pp_res, which will hold the cfg_payloads of each feature in a single
+	 * struct.
+	 */
+	void *pp_data_v1_7;
+	void *pp_data_v3;
+};
+
+void *pp_get_driver_ops_v1_7(struct mdp_pp_driver_ops *ops);
+void *pp_get_driver_ops_v3(struct mdp_pp_driver_ops *ops);
+
+
+static inline void pp_sts_set_split_bits(u32 *sts, u32 bits)
+{
+	u32 tmp = *sts;
+
+	tmp &= ~MDSS_PP_SPLIT_MASK;
+	tmp |= bits & MDSS_PP_SPLIT_MASK;
+	*sts = tmp;
+}
+
+static inline bool pp_sts_is_enabled(u32 sts, int side)
+{
+	bool ret = false;
+	/*
+	 * If there are no sides, or if there are no split mode bits set, the
+	 * side can't be disabled via split mode.
+	 *
+	 * Otherwise, if the side being checked opposes the split mode
+	 * configuration, the side is disabled.
+	 */
+	if ((side == MDSS_SIDE_NONE) || !(sts & MDSS_PP_SPLIT_MASK))
+		ret = true;
+	else if ((sts & MDSS_PP_SPLIT_RIGHT_ONLY) && (side == MDSS_SIDE_RIGHT))
+		ret = true;
+	else if ((sts & MDSS_PP_SPLIT_LEFT_ONLY) && (side == MDSS_SIDE_LEFT))
+		ret = true;
+
+	return ret && (sts & PP_STS_ENABLE);
+}
+
+/* Debug related functions */
+void pp_print_lut(void *data, int size, char *tab, uint32_t type);
+void pp_print_uint16_lut(uint16_t *data, int size, char *tab);
+void pp_print_pcc_coeff(struct mdp_pcc_coeff *pcc_coeff, int tab_depth);
+void pp_print_pcc_cfg_data(struct mdp_pcc_cfg_data *pcc_data, int tab_depth);
+void pp_print_csc_cfg(struct mdp_csc_cfg *data, int tab_depth);
+void pp_print_csc_cfg_data(struct mdp_csc_cfg_data *data, int tab_depth);
+void pp_print_igc_lut_data(struct mdp_igc_lut_data *data, int tab_depth);
+void pp_print_ar_gc_lut_data(struct mdp_ar_gc_lut_data *data, int tab_depth);
+void pp_print_pgc_lut_data(struct mdp_pgc_lut_data *data, int tab_depth);
+void pp_print_hist_lut_data(struct mdp_hist_lut_data *data, int tab_depth);
+void pp_print_lut_cfg_data(struct mdp_lut_cfg_data *data, int tab_depth);
+void pp_print_qseed_cfg(struct mdp_qseed_cfg *data, int tab_depth);
+void pp_print_qseed_cfg_data(struct mdp_qseed_cfg_data *data, int tab_depth);
+void pp_print_pa_cfg(struct mdp_pa_cfg *data, int tab_depth);
+void pp_print_pa_cfg_data(struct mdp_pa_cfg_data *data, int tab_depth);
+void pp_print_mem_col_cfg(struct mdp_pa_mem_col_cfg *data, int tab_depth);
+void pp_print_pa_v2_data(struct mdp_pa_v2_data *data, int tab_depth);
+void pp_print_pa_v2_cfg_data(struct mdp_pa_v2_cfg_data *data, int tab_depth);
+void pp_print_dither_cfg_data(struct mdp_dither_cfg_data *data, int tab_depth);
+void pp_print_gamut_cfg_data(struct mdp_gamut_cfg_data *data, int tab_depth);
+void pp_print_ad_init(struct mdss_ad_init *data, int tab_depth);
+void pp_print_ad_cfg(struct mdss_ad_cfg *data, int tab_depth);
+void pp_print_ad_init_cfg(struct mdss_ad_init_cfg *data, int tab_depth);
+void pp_print_ad_input(struct mdss_ad_input *data, int tab_depth);
+void pp_print_histogram_cfg(struct mdp_histogram_cfg *data, int tab_depth);
+void pp_print_sharp_cfg(struct mdp_sharp_cfg *data, int tab_depth);
+void pp_print_calib_config_data(struct mdp_calib_config_data *data,
+				int tab_depth);
+void pp_print_calib_config_buffer(struct mdp_calib_config_buffer *data,
+				int tab_depth);
+void pp_print_calib_dcm_state(struct mdp_calib_dcm_state *data, int tab_depth);
+void pp_print_mdss_calib_cfg(struct mdss_calib_cfg *data, int tab_depth);
+
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
new file mode 100644
index 0000000..ade3add
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
@@ -0,0 +1,1503 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_pp.h"
+#include "mdss_mdp_pp_cache_config.h"
+
+#define IGC_C1_SHIFT 16
+static u32 pp_igc_601[IGC_LUT_ENTRIES] = {
+	0, 1, 2, 4, 5, 6, 7, 9, 10, 11, 12, 14, 15, 16, 18, 20, 21, 23,
+	25, 27, 29, 31, 33, 35, 37, 40, 42, 45, 48, 50, 53, 56, 59, 62,
+	66, 69, 72, 76, 79, 83, 87, 91, 95, 99, 103, 107, 112, 116, 121,
+	126, 131, 136, 141, 146, 151, 156, 162, 168, 173, 179, 185, 191,
+	197, 204, 210, 216, 223, 230, 237, 244, 251, 258, 265, 273, 280,
+	288, 296, 304, 312, 320, 329, 337, 346, 354, 363, 372, 381, 390,
+	400, 409, 419, 428, 438, 448, 458, 469, 479, 490, 500, 511, 522,
+	533, 544, 555, 567, 578, 590, 602, 614, 626, 639, 651, 664, 676,
+	689, 702, 715, 728, 742, 755, 769, 783, 797, 811, 825, 840, 854,
+	869, 884, 899, 914, 929, 945, 960, 976, 992, 1008, 1024, 1041,
+	1057, 1074, 1091, 1108, 1125, 1142, 1159, 1177, 1195, 1213, 1231,
+	1249, 1267, 1286, 1304, 1323, 1342, 1361, 1381, 1400, 1420, 1440,
+	1459, 1480, 1500, 1520, 1541, 1562, 1582, 1603, 1625, 1646, 1668,
+	1689, 1711, 1733, 1755, 1778, 1800, 1823, 1846, 1869, 1892, 1916,
+	1939, 1963, 1987, 2011, 2035, 2059, 2084, 2109, 2133, 2159, 2184,
+	2209, 2235, 2260, 2286, 2312, 2339, 2365, 2392, 2419, 2446, 2473,
+	2500, 2527, 2555, 2583, 2611, 2639, 2668, 2696, 2725, 2754, 2783,
+	2812, 2841, 2871, 2901, 2931, 2961, 2991, 3022, 3052, 3083, 3114,
+	3146, 3177, 3209, 3240, 3272, 3304, 3337, 3369, 3402, 3435, 3468,
+	3501, 3535, 3568, 3602, 3636, 3670, 3705, 3739, 3774, 3809, 3844,
+	3879, 3915, 3950, 3986, 4022, 4059, 4095,
+};
+
+static u32 pp_igc_709[IGC_LUT_ENTRIES] = {
+	0, 4, 7, 11, 14, 18, 21, 25, 29, 32, 36, 39, 43, 46, 50, 54, 57,
+	61, 64, 68, 71, 75, 78, 82, 86, 90, 94, 98, 102, 107, 111, 115,
+	120, 125, 130, 134, 139, 145, 150, 155, 161, 166, 172, 177, 183,
+	189, 195, 201, 208, 214, 220, 227, 234, 240, 247, 254, 261, 269,
+	276, 283, 291, 298, 306, 314, 322, 330, 338, 347, 355, 364, 372,
+	381, 390, 399, 408, 417, 426, 436, 445, 455, 465, 474, 484, 495,
+	505, 515, 525, 536, 547, 558, 568, 579, 591, 602, 613, 625, 636,
+	648, 660, 672, 684, 696, 708, 721, 733, 746, 759, 772, 785, 798,
+	811, 825, 838, 852, 865, 879, 893, 907, 922, 936, 950, 965, 980,
+	995, 1010, 1025, 1040, 1055, 1071, 1086, 1102, 1118, 1134, 1150,
+	1166, 1183, 1199, 1216, 1232, 1249, 1266, 1283, 1300, 1318, 1335,
+	1353, 1370, 1388, 1406, 1424, 1443, 1461, 1479, 1498, 1517, 1536,
+	1555, 1574, 1593, 1612, 1632, 1652, 1671, 1691, 1711, 1731, 1752,
+	1772, 1793, 1813, 1834, 1855, 1876, 1897, 1919, 1940, 1962, 1984,
+	2005, 2027, 2050, 2072, 2094, 2117, 2139, 2162, 2185, 2208, 2231,
+	2255, 2278, 2302, 2325, 2349, 2373, 2397, 2422, 2446, 2471, 2495,
+	2520, 2545, 2570, 2595, 2621, 2646, 2672, 2697, 2723, 2749, 2775,
+	2802, 2828, 2855, 2881, 2908, 2935, 2962, 2990, 3017, 3044, 3072,
+	3100, 3128, 3156, 3184, 3212, 3241, 3270, 3298, 3327, 3356, 3385,
+	3415, 3444, 3474, 3503, 3533, 3563, 3594, 3624, 3654, 3685, 3716,
+	3746, 3777, 3808, 3840, 3871, 3903, 3934, 3966, 3998, 4030, 4063,
+	4095,
+};
+
+static u32 pp_igc_srgb[IGC_LUT_ENTRIES] = {
+	0, 1, 2, 4, 5, 6, 7, 9, 10, 11, 12, 14, 15, 16, 18, 20, 21, 23,
+	25, 27, 29, 31, 33, 35, 37, 40, 42, 45, 48, 50, 53, 56, 59, 62,
+	66, 69, 72, 76, 79, 83, 87, 91, 95, 99, 103, 107, 112, 116, 121,
+	126, 131, 136, 141, 146, 151, 156, 162, 168, 173, 179, 185, 191,
+	197, 204, 210, 216, 223, 230, 237, 244, 251, 258, 265, 273, 280,
+	288, 296, 304, 312, 320, 329, 337, 346, 354, 363, 372, 381, 390,
+	400, 409, 419, 428, 438, 448, 458, 469, 479, 490, 500, 511, 522,
+	533, 544, 555, 567, 578, 590, 602, 614, 626, 639, 651, 664, 676,
+	689, 702, 715, 728, 742, 755, 769, 783, 797, 811, 825, 840, 854,
+	869, 884, 899, 914, 929, 945, 960, 976, 992, 1008, 1024, 1041,
+	1057, 1074, 1091, 1108, 1125, 1142, 1159, 1177, 1195, 1213, 1231, 1249,
+	1267, 1286, 1304, 1323, 1342, 1361, 1381, 1400, 1420, 1440, 1459, 1480,
+	1500, 1520, 1541, 1562, 1582, 1603, 1625, 1646, 1668, 1689, 1711, 1733,
+	1755, 1778, 1800, 1823, 1846, 1869, 1892, 1916, 1939, 1963, 1987, 2011,
+	2035, 2059, 2084, 2109, 2133, 2159, 2184, 2209, 2235, 2260, 2286, 2312,
+	2339, 2365, 2392, 2419, 2446, 2473, 2500, 2527, 2555, 2583, 2611, 2639,
+	2668, 2696, 2725, 2754, 2783, 2812, 2841, 2871, 2901, 2931, 2961, 2991,
+	3022, 3052, 3083, 3114, 3146, 3177, 3209, 3240, 3272, 3304, 3337, 3369,
+	3402, 3435, 3468, 3501, 3535, 3568, 3602, 3636, 3670, 3705, 3739, 3774,
+	3809, 3844, 3879, 3915, 3950, 3986, 4022, 4059, 4095
+};
+
+static int pp_hist_lut_cache_params_v1_7(struct mdp_hist_lut_data *config,
+				      struct mdss_pp_res_type *mdss_pp_res)
+{
+	u32 disp_num;
+	struct mdss_pp_res_type_v1_7 *res_cache = NULL;
+	struct mdp_hist_lut_data_v1_7 *v17_cache_data = NULL, v17_usr_config;
+	int ret = 0;
+
+	if (!config || !mdss_pp_res) {
+		pr_err("invalid param config %pK pp_res %pK\n",
+			config, mdss_pp_res);
+		return -EINVAL;
+	}
+	if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+		(config->block >= MDP_BLOCK_MAX)) {
+		pr_err("invalid config block %d\n", config->block);
+		return -EINVAL;
+	}
+	if (!mdss_pp_res->pp_data_v1_7) {
+		pr_err("invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
+		return -EINVAL;
+	}
+
+	res_cache = mdss_pp_res->pp_data_v1_7;
+	if (config->ops & MDP_PP_OPS_READ) {
+		pr_err("read op is not supported\n");
+		return -EINVAL;
+	}
+	{
+		disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+		mdss_pp_res->enhist_disp_cfg[disp_num] = *config;
+		v17_cache_data = &res_cache->hist_lut_v17_data[disp_num];
+		mdss_pp_res->enhist_disp_cfg[disp_num].cfg_payload =
+		(void *) v17_cache_data;
+
+		if (copy_from_user(&v17_usr_config, config->cfg_payload,
+				   sizeof(v17_usr_config))) {
+			pr_err("failed to copy v17 hist_lut\n");
+			ret = -EFAULT;
+			return ret;
+		}
+		if ((config->ops & MDP_PP_OPS_DISABLE)) {
+			pr_debug("disable hist_lut\n");
+			ret = 0;
+			return ret;
+		}
+		memcpy(v17_cache_data, &v17_usr_config, sizeof(v17_usr_config));
+		if (v17_usr_config.len != ENHIST_LUT_ENTRIES) {
+			pr_err("Invalid table size %d exp %d\n",
+				v17_usr_config.len, ENHIST_LUT_ENTRIES);
+			ret = -EINVAL;
+			return ret;
+		}
+		v17_cache_data->data = &res_cache->hist_lut[disp_num][0];
+		if (copy_from_user(v17_cache_data->data, v17_usr_config.data,
+				   v17_usr_config.len * sizeof(u32))) {
+			pr_err("failed to copy v17 hist_lut->data\n");
+			ret = -EFAULT;
+			return ret;
+		}
+	}
+	return ret;
+}
+
+static int pp_hist_lut_cache_params_pipe_v1_7(struct mdp_hist_lut_data *config,
+			struct mdss_mdp_pipe *pipe)
+{
+	struct mdp_hist_lut_data_v1_7 *hist_lut_cache_data;
+	struct mdp_hist_lut_data_v1_7 hist_lut_usr_config;
+	int ret = 0;
+
+	if (!config || !pipe) {
+		pr_err("Invalid param config %pK pipe %pK\n",
+			config, pipe);
+		return -EINVAL;
+	}
+
+	if (config->ops & MDP_PP_OPS_DISABLE) {
+		pr_debug("Disable Hist LUT on pipe %d\n", pipe->num);
+		goto hist_lut_cache_pipe_exit;
+	}
+
+	if (config->ops & MDP_PP_OPS_READ) {
+		pr_err("Read op is not supported\n");
+		return -EINVAL;
+	}
+
+	if (!config->cfg_payload) {
+		pr_err("Hist LUT config payload invalid\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&hist_lut_usr_config,
+				(void __user *) config->cfg_payload,
+				sizeof(hist_lut_usr_config))) {
+		pr_err("failed to copy hist lut config\n");
+		return -EFAULT;
+	}
+
+	hist_lut_cache_data = pipe->pp_res.hist_lut_cfg_payload;
+	if (!hist_lut_cache_data) {
+		hist_lut_cache_data = kzalloc(
+				sizeof(struct mdp_hist_lut_data_v1_7),
+				GFP_KERNEL);
+		if (!hist_lut_cache_data) {
+			pr_err("failed to allocate cache_data\n");
+			ret = -ENOMEM;
+			goto hist_lut_cache_pipe_exit;
+		} else
+			pipe->pp_res.hist_lut_cfg_payload = hist_lut_cache_data;
+	}
+
+	*hist_lut_cache_data = hist_lut_usr_config;
+
+	if (hist_lut_cache_data->len != ENHIST_LUT_ENTRIES) {
+		pr_err("Invalid Hist LUT length %d\n",
+			hist_lut_cache_data->len);
+		ret = -EINVAL;
+		goto hist_lut_cache_pipe_exit;
+	}
+
+	if (copy_from_user(pipe->pp_res.hist_lut,
+			   hist_lut_usr_config.data,
+			   sizeof(uint32_t) * hist_lut_cache_data->len)) {
+		pr_err("Failed to copy usr Hist LUT data\n");
+		ret = -EFAULT;
+		goto hist_lut_cache_pipe_exit;
+	}
+
+	hist_lut_cache_data->data = pipe->pp_res.hist_lut;
+
+hist_lut_cache_pipe_exit:
+	if (ret || (config->ops & MDP_PP_OPS_DISABLE)) {
+		kfree(pipe->pp_res.hist_lut_cfg_payload);
+		pipe->pp_res.hist_lut_cfg_payload = NULL;
+	}
+	pipe->pp_cfg.hist_lut_cfg.cfg_payload =
+			pipe->pp_res.hist_lut_cfg_payload;
+	return ret;
+}
+
+int pp_hist_lut_cache_params(struct mdp_hist_lut_data *config,
+			struct mdp_pp_cache_res *res_cache)
+{
+	int ret = 0;
+
+	if (!config || !res_cache) {
+		pr_err("invalid param config %pK res_cache %pK\n",
+			config, res_cache);
+		return -EINVAL;
+	}
+	if (res_cache->block != SSPP_VIG && res_cache->block != DSPP) {
+		pr_err("invalid block for Hist LUT %d\n", res_cache->block);
+		return -EINVAL;
+	}
+	if (!res_cache->mdss_pp_res && !res_cache->pipe_res) {
+		pr_err("NULL payload for block %d mdss_pp_res %pK pipe_res %pK\n",
+			res_cache->block, res_cache->mdss_pp_res,
+			res_cache->pipe_res);
+		return -EINVAL;
+	}
+
+	switch (config->version) {
+	case mdp_hist_lut_v1_7:
+		if (res_cache->block == DSPP) {
+			ret = pp_hist_lut_cache_params_v1_7(config,
+					res_cache->mdss_pp_res);
+			if (ret)
+				pr_err("failed to cache Hist LUT params for DSPP ret %d\n",
+					ret);
+		} else {
+			ret = pp_hist_lut_cache_params_pipe_v1_7(config,
+					res_cache->pipe_res);
+			if (ret)
+				pr_err("failed to cache Hist LUT params for SSPP ret %d\n",
+					ret);
+		}
+		break;
+	default:
+		pr_err("unsupported hist_lut version %d\n",
+			config->version);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+int pp_dither_cache_params_v1_7(struct mdp_dither_cfg_data *config,
+			  struct mdss_pp_res_type *mdss_pp_res,
+			  int copy_from_kernel)
+{
+	u32 disp_num;
+	int ret = 0;
+	struct mdss_pp_res_type_v1_7 *res_cache = NULL;
+	struct mdp_dither_data_v1_7 *v17_cache_data = NULL, v17_usr_config;
+
+	if (!config || !mdss_pp_res) {
+		pr_err("invalid param config %pK pp_res %pK\n",
+			config, mdss_pp_res);
+		return -EINVAL;
+	}
+	if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+		(config->block >= MDP_BLOCK_MAX)) {
+		pr_err("invalid config block %d\n", config->block);
+		return -EINVAL;
+	}
+	if (!mdss_pp_res->pp_data_v1_7) {
+		pr_err("invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
+		return -EINVAL;
+	}
+
+	res_cache = mdss_pp_res->pp_data_v1_7;
+
+	if ((config->flags & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
+		pr_warn("Can't set both split bits\n");
+		return -EINVAL;
+	}
+
+	if (config->flags & MDP_PP_OPS_READ) {
+		pr_err("read op is not supported\n");
+		return -ENOTSUPP;
+	}
+
+	disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+	mdss_pp_res->dither_disp_cfg[disp_num] = *config;
+
+	if (config->flags & MDP_PP_OPS_DISABLE) {
+		pr_debug("disable dither\n");
+		ret = 0;
+		goto dither_config_exit;
+	}
+
+	if (!(config->flags & MDP_PP_OPS_WRITE)) {
+		pr_debug("op for dither %d\n", config->flags);
+		goto dither_config_exit;
+	}
+
+	v17_cache_data = &res_cache->dither_v17_data[disp_num];
+	mdss_pp_res->dither_disp_cfg[disp_num].cfg_payload =
+		(void *)v17_cache_data;
+	if (copy_from_kernel) {
+		memcpy(v17_cache_data, config->cfg_payload,
+				sizeof(struct mdp_dither_data_v1_7));
+	} else {
+		if (copy_from_user(&v17_usr_config, config->cfg_payload,
+				sizeof(v17_usr_config))) {
+			pr_err("failed to copy v17 dither\n");
+			ret = -EFAULT;
+			goto dither_config_exit;
+		}
+		memcpy(v17_cache_data, &v17_usr_config, sizeof(v17_usr_config));
+	}
+	if (v17_cache_data->len &&
+		v17_cache_data->len != MDP_DITHER_DATA_V1_7_SZ) {
+		pr_err("invalid dither len %d expected %d\n",
+			   v17_cache_data->len, MDP_DITHER_DATA_V1_7_SZ);
+		ret = -EINVAL;
+	}
+
+dither_config_exit:
+	return ret;
+}
+
+int pp_dither_cache_params(struct mdp_dither_cfg_data *config,
+	struct mdss_pp_res_type *mdss_pp_res,
+	int copy_from_kernel)
+{
+	int ret = 0;
+
+	if (!config || !mdss_pp_res) {
+		pr_err("invalid param config %pK pp_res %pK\n",
+			config, mdss_pp_res);
+		return -EINVAL;
+	}
+	switch (config->version) {
+	case mdp_dither_v1_7:
+		ret = pp_dither_cache_params_v1_7(config, mdss_pp_res,
+				copy_from_kernel);
+		break;
+	default:
+		pr_err("unsupported dither version %d\n",
+			config->version);
+		break;
+	}
+	return ret;
+}
+
+
+static int pp_gamut_cache_params_v1_7(struct mdp_gamut_cfg_data *config,
+				      struct mdss_pp_res_type *mdss_pp_res)
+{
+	u32 disp_num, tbl_sz;
+	struct mdss_pp_res_type_v1_7 *res_cache;
+	struct mdp_gamut_data_v1_7 *v17_cache_data, v17_usr_config;
+	u32 gamut_size = 0, scal_coff_size = 0, sz = 0, index = 0;
+	u32 *tbl_gamut = NULL;
+	int ret = 0, i = 0;
+
+	if (!config || !mdss_pp_res) {
+		pr_err("invalid param config %pK pp_res %pK\n",
+			config, mdss_pp_res);
+		return -EINVAL;
+	}
+
+	if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+		(config->block >= MDP_BLOCK_MAX)) {
+		pr_err("invalid config block %d\n", config->block);
+		return -EINVAL;
+	}
+	if (!mdss_pp_res->pp_data_v1_7) {
+		pr_err("invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
+		return -EINVAL;
+	}
+	res_cache = mdss_pp_res->pp_data_v1_7;
+	if (config->flags & MDP_PP_OPS_READ) {
+		pr_err("read op is not supported\n");
+		return -EINVAL;
+	}
+
+	disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+	/* Copy top level gamut cfg struct into PP res cache */
+	memcpy(&mdss_pp_res->gamut_disp_cfg[disp_num], config,
+			sizeof(struct mdp_gamut_cfg_data));
+
+	v17_cache_data = &res_cache->gamut_v17_data[disp_num];
+	mdss_pp_res->gamut_disp_cfg[disp_num].cfg_payload =
+		(void *) v17_cache_data;
+	tbl_gamut = v17_cache_data->c0_data[0];
+
+	if ((config->flags & MDP_PP_OPS_DISABLE)) {
+		pr_debug("disable gamut\n");
+		ret = 0;
+		goto gamut_config_exit;
+	}
+
+	if (copy_from_user(&v17_usr_config, config->cfg_payload,
+			   sizeof(v17_usr_config))) {
+		pr_err("failed to copy v17 gamut\n");
+		ret = -EFAULT;
+		goto gamut_config_exit;
+	}
+	if (v17_usr_config.mode != mdp_gamut_coarse_mode &&
+	   v17_usr_config.mode != mdp_gamut_fine_mode) {
+		pr_err("invalid gamut mode %d\n", v17_usr_config.mode);
+		return -EINVAL;
+	}
+	if (!(config->flags & MDP_PP_OPS_WRITE)) {
+		pr_debug("op for gamut %d\n", config->flags);
+		goto gamut_config_exit;
+	}
+	tbl_sz = (v17_usr_config.mode == mdp_gamut_fine_mode) ?
+		MDP_GAMUT_TABLE_V1_7_SZ :
+		 MDP_GAMUT_TABLE_V1_7_COARSE_SZ;
+	v17_cache_data->mode = v17_usr_config.mode;
+	v17_cache_data->map_en = v17_usr_config.map_en;
+	/* sanity check for sizes */
+	for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
+		if (v17_usr_config.tbl_size[i] != tbl_sz) {
+			pr_err("invalid tbl size %d exp %d tbl index %d mode %d\n",
+			       v17_usr_config.tbl_size[i], tbl_sz, i,
+			       v17_usr_config.mode);
+			ret = -EINVAL;
+			goto gamut_config_exit;
+		}
+		gamut_size += v17_usr_config.tbl_size[i];
+		if (i >= MDP_GAMUT_SCALE_OFF_TABLE_NUM)
+			continue;
+		if (v17_usr_config.tbl_scale_off_sz[i] !=
+		    MDP_GAMUT_SCALE_OFF_SZ) {
+			pr_err("invalid scale size %d exp %d scale index %d mode %d\n",
+			       v17_usr_config.tbl_scale_off_sz[i],
+			       MDP_GAMUT_SCALE_OFF_SZ, i,
+			       v17_usr_config.mode);
+			ret = -EINVAL;
+			goto gamut_config_exit;
+		}
+		scal_coff_size += v17_usr_config.tbl_scale_off_sz[i];
+
+	}
+	/* gamut size should be accounted for c0, c1c2 table */
+	sz = gamut_size * 2 + scal_coff_size;
+	if (sz > GAMUT_TOTAL_TABLE_SIZE_V1_7) {
+		pr_err("Invalid table size act %d max %d\n",
+		      sz, GAMUT_TOTAL_TABLE_SIZE_V1_7);
+		ret = -EINVAL;
+		goto gamut_config_exit;
+	}
+	/* Allocate for fine mode other modes will fit */
+	if (!tbl_gamut)
+		tbl_gamut = vmalloc(GAMUT_TOTAL_TABLE_SIZE_V1_7 *
+				    sizeof(u32));
+	if (!tbl_gamut) {
+		ret = -ENOMEM;
+		goto gamut_config_exit;
+	}
+	index = 0;
+	for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
+		ret = copy_from_user(&tbl_gamut[index],
+			v17_usr_config.c0_data[i],
+			(sizeof(u32) * v17_usr_config.tbl_size[i]));
+		if (ret) {
+			pr_err("copying c0 table %d from userspace failed size %zd ret %d\n",
+				i, (sizeof(u32) *
+				v17_usr_config.tbl_size[i]), ret);
+			ret = -EFAULT;
+			goto gamut_memory_free_exit;
+		}
+		v17_cache_data->c0_data[i] = &tbl_gamut[index];
+		v17_cache_data->tbl_size[i] =
+			v17_usr_config.tbl_size[i];
+		index += v17_usr_config.tbl_size[i];
+		ret = copy_from_user(&tbl_gamut[index],
+			v17_usr_config.c1_c2_data[i],
+			(sizeof(u32) * v17_usr_config.tbl_size[i]));
+		if (ret) {
+			pr_err("copying c1_c2 table %d from userspace failed size %zd ret %d\n",
+				i, (sizeof(u32) *
+				v17_usr_config.tbl_size[i]), ret);
+			ret = -EINVAL;
+			goto gamut_memory_free_exit;
+		}
+		v17_cache_data->c1_c2_data[i] = &tbl_gamut[index];
+		index += v17_usr_config.tbl_size[i];
+	}
+	for (i = 0; i < MDP_GAMUT_SCALE_OFF_TABLE_NUM; i++) {
+		ret = copy_from_user(&tbl_gamut[index],
+			v17_usr_config.scale_off_data[i],
+			(sizeof(u32) *
+			v17_usr_config.tbl_scale_off_sz[i]));
+		if (ret) {
+			pr_err("copying scale offset table %d from userspace failed size %zd ret %d\n",
+				i, (sizeof(u32) *
+				v17_usr_config.tbl_scale_off_sz[i]),
+				ret);
+			ret = -EFAULT;
+			goto gamut_memory_free_exit;
+		}
+		v17_cache_data->tbl_scale_off_sz[i] =
+			v17_usr_config.tbl_scale_off_sz[i];
+		v17_cache_data->scale_off_data[i] = &tbl_gamut[index];
+		index += v17_usr_config.tbl_scale_off_sz[i];
+	}
+
+gamut_config_exit:
+	return ret;
+gamut_memory_free_exit:
+	vfree(tbl_gamut);
+	for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
+		v17_cache_data->c0_data[i] = NULL;
+		v17_cache_data->c1_c2_data[i] = NULL;
+		v17_cache_data->tbl_size[i] = 0;
+		if (i < MDP_GAMUT_SCALE_OFF_TABLE_NUM) {
+			v17_cache_data->scale_off_data[i] = NULL;
+			v17_cache_data->tbl_scale_off_sz[i] = 0;
+		}
+	}
+	return ret;
+}
+
+int pp_gamut_cache_params(struct mdp_gamut_cfg_data *config,
+			  struct mdss_pp_res_type *mdss_pp_res)
+{
+	int ret = 0;
+
+	if (!config || !mdss_pp_res) {
+		pr_err("invalid param config %pK pp_res %pK\n",
+			config, mdss_pp_res);
+		return -EINVAL;
+	}
+	switch (config->version) {
+	case mdp_gamut_v1_7:
+		ret = pp_gamut_cache_params_v1_7(config, mdss_pp_res);
+		break;
+	default:
+		pr_err("unsupported gamut version %d\n",
+			config->version);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static int pp_pcc_cache_params_pipe_v1_7(struct mdp_pcc_cfg_data *config,
+				      struct mdss_mdp_pipe *pipe)
+{
+	struct mdp_pcc_data_v1_7 *v17_cache_data = NULL, v17_usr_config;
+
+	if (!pipe || !config) {
+		pr_err("invalid params pipe %pK config %pK\n", pipe, config);
+		return -EINVAL;
+	}
+
+	if (config->ops & MDP_PP_OPS_DISABLE) {
+		pr_debug("disable ops set cleanup payload\n");
+		goto cleanup;
+	}
+
+	if (config->ops & MDP_PP_OPS_READ) {
+		pr_err("read ops not supported\n");
+		return -EINVAL;
+	}
+
+	if (!config->cfg_payload) {
+		pr_err("PCC config payload invalid\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&v17_usr_config,
+				(void __user *) config->cfg_payload,
+				sizeof(v17_usr_config))) {
+		pr_err("failed to copy pcc config\n");
+		return -EFAULT;
+	}
+
+	if (!(config->ops & MDP_PP_OPS_WRITE)) {
+		pr_debug("write ops not set value of flag is %d\n",
+			config->ops);
+		goto cleanup;
+	}
+
+	v17_cache_data = pipe->pp_res.pcc_cfg_payload;
+	if (!v17_cache_data) {
+		v17_cache_data = kzalloc(sizeof(struct mdp_pcc_data_v1_7),
+						GFP_KERNEL);
+		pipe->pp_res.pcc_cfg_payload = v17_cache_data;
+	}
+	if (!v17_cache_data) {
+		pr_err("failed to allocate the pcc cache data\n");
+		return -ENOMEM;
+	}
+	memcpy(v17_cache_data, &v17_usr_config, sizeof(v17_usr_config));
+	pipe->pp_cfg.pcc_cfg_data.cfg_payload = v17_cache_data;
+cleanup:
+	if (config->ops & MDP_PP_OPS_DISABLE) {
+		kfree(pipe->pp_res.pcc_cfg_payload);
+		pipe->pp_res.pcc_cfg_payload = NULL;
+		pipe->pp_cfg.pcc_cfg_data.cfg_payload = NULL;
+	}
+	return 0;
+}
+
+static int pp_pcc_cache_params_v1_7(struct mdp_pcc_cfg_data *config,
+				      struct mdss_pp_res_type *mdss_pp_res)
+{
+	u32 disp_num;
+	int ret = 0;
+	struct mdss_pp_res_type_v1_7 *res_cache;
+	struct mdp_pcc_data_v1_7 *v17_cache_data, v17_usr_config;
+
+	if (!config || !mdss_pp_res) {
+		pr_err("invalid param config %pK pp_res %pK\n",
+			config, mdss_pp_res);
+		return -EINVAL;
+	}
+
+	if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+		(config->block >= MDP_BLOCK_MAX)) {
+		pr_err("invalid config block %d\n", config->block);
+		return -EINVAL;
+	}
+	if (!mdss_pp_res->pp_data_v1_7) {
+		pr_err("invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
+		return -EINVAL;
+	}
+
+	res_cache = mdss_pp_res->pp_data_v1_7;
+	if (config->ops & MDP_PP_OPS_READ) {
+		pr_err("read op is not supported\n");
+		return -EINVAL;
+	}
+	{
+		disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+		mdss_pp_res->pcc_disp_cfg[disp_num] = *config;
+		v17_cache_data = &res_cache->pcc_v17_data[disp_num];
+		mdss_pp_res->pcc_disp_cfg[disp_num].cfg_payload =
+			(void *) v17_cache_data;
+		if (copy_from_user(&v17_usr_config, config->cfg_payload,
+				   sizeof(v17_usr_config))) {
+			pr_err("failed to copy v17 pcc\n");
+			ret = -EFAULT;
+			goto pcc_config_exit;
+		}
+		if ((config->ops & MDP_PP_OPS_DISABLE)) {
+			pr_debug("disable pcc\n");
+			ret = 0;
+			goto pcc_config_exit;
+		}
+		if (!(config->ops & MDP_PP_OPS_WRITE)) {
+			pr_debug("op for pcc %d\n", config->ops);
+			goto pcc_config_exit;
+		}
+		memcpy(v17_cache_data, &v17_usr_config, sizeof(v17_usr_config));
+	}
+pcc_config_exit:
+	return ret;
+}
+
+int pp_pcc_cache_params(struct mdp_pcc_cfg_data *config,
+			struct mdp_pp_cache_res *res_cache)
+{
+	int ret = 0;
+
+	if (!config || !res_cache) {
+		pr_err("invalid param config %pK pp_res %pK\n",
+			config, res_cache);
+		return -EINVAL;
+	}
+	if (res_cache->block < SSPP_RGB || res_cache->block > DSPP) {
+		pr_err("invalid block for PCC %d\n", res_cache->block);
+		return -EINVAL;
+	}
+	if (!res_cache->mdss_pp_res && !res_cache->pipe_res) {
+		pr_err("NULL payload for block %d mdss_pp_res %pK pipe_res %pK\n",
+			res_cache->block, res_cache->mdss_pp_res,
+			res_cache->pipe_res);
+		return -EINVAL;
+	}
+	switch (config->version) {
+	case mdp_pcc_v1_7:
+		if (res_cache->block == DSPP) {
+			ret = pp_pcc_cache_params_v1_7(config,
+					res_cache->mdss_pp_res);
+			if (ret)
+				pr_err("caching for DSPP failed for PCC ret %d\n",
+					ret);
+		} else {
+			ret = pp_pcc_cache_params_pipe_v1_7(config,
+						res_cache->pipe_res);
+			if (ret)
+				pr_err("caching for SSPP failed for PCC ret %d block %d\n",
+					ret, res_cache->block);
+		}
+		break;
+	default:
+		pr_err("unsupported pcc version %d\n",
+			config->version);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static int pp_igc_lut_cache_params_v1_7(struct mdp_igc_lut_data *config,
+			    struct mdss_pp_res_type *mdss_pp_res,
+			    u32 copy_from_kernel)
+{
+	int ret = 0;
+	struct mdss_pp_res_type_v1_7 *res_cache;
+	struct mdp_igc_lut_data_v1_7 *v17_cache_data, v17_usr_config;
+	u32 disp_num;
+
+	if (!config || !mdss_pp_res) {
+		pr_err("invalid param config %pK pp_res %pK\n",
+			config, mdss_pp_res);
+		return -EINVAL;
+	}
+	if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+		(config->block >= MDP_BLOCK_MAX)) {
+		pr_err("invalid config block %d\n", config->block);
+		return -EINVAL;
+	}
+	if (!mdss_pp_res->pp_data_v1_7) {
+		pr_err("invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
+		return -EINVAL;
+	}
+	res_cache = mdss_pp_res->pp_data_v1_7;
+	if (config->ops & MDP_PP_OPS_READ) {
+		pr_err("read op is not supported\n");
+		return -EINVAL;
+	}
+	{
+		disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+		mdss_pp_res->igc_disp_cfg[disp_num] = *config;
+		v17_cache_data = &res_cache->igc_v17_data[disp_num];
+		mdss_pp_res->igc_disp_cfg[disp_num].cfg_payload =
+		(void *) v17_cache_data;
+		if (!copy_from_kernel) {
+			if (copy_from_user(&v17_usr_config,
+					   config->cfg_payload,
+					   sizeof(v17_usr_config))) {
+				pr_err("failed to copy igc config\n");
+				ret = -EFAULT;
+				goto igc_config_exit;
+			}
+		} else {
+			if (!config->cfg_payload) {
+				pr_err("can't copy config info NULL payload\n");
+				ret = -EINVAL;
+				goto igc_config_exit;
+			}
+			memcpy(&v17_usr_config, config->cfg_payload,
+			       sizeof(v17_usr_config));
+		}
+		if (!(config->ops & MDP_PP_OPS_WRITE)) {
+			pr_debug("op for gamut %d\n", config->ops);
+			goto igc_config_exit;
+		}
+		if (copy_from_kernel && (!v17_usr_config.c0_c1_data ||
+		    !v17_usr_config.c2_data)) {
+			pr_err("copy from kernel invalid params c0_c1_data %pK c2_data %pK\n",
+				v17_usr_config.c0_c1_data,
+				v17_usr_config.c2_data);
+			ret = -EINVAL;
+			goto igc_config_exit;
+		}
+		if (v17_usr_config.len != IGC_LUT_ENTRIES) {
+			pr_err("Invalid table size %d exp %d\n",
+				v17_usr_config.len, IGC_LUT_ENTRIES);
+			ret = -EINVAL;
+			goto igc_config_exit;
+		}
+		memcpy(v17_cache_data, &v17_usr_config,
+		       sizeof(v17_usr_config));
+		v17_cache_data->c0_c1_data =
+		&res_cache->igc_table_c0_c1[disp_num][0];
+		v17_cache_data->c2_data =
+		&res_cache->igc_table_c2[disp_num][0];
+		if (copy_from_kernel) {
+			memcpy(v17_cache_data->c0_c1_data,
+			       v17_usr_config.c0_c1_data,
+			       v17_usr_config.len * sizeof(u32));
+			memcpy(v17_cache_data->c2_data, v17_usr_config.c2_data,
+			       v17_usr_config.len * sizeof(u32));
+		} else {
+			ret = copy_from_user(v17_cache_data->c0_c1_data,
+					     v17_usr_config.c0_c1_data,
+					     v17_usr_config.len * sizeof(u32));
+			if (ret) {
+				pr_err("copy from user failed for c0_c1_data size %zd ret %d\n",
+				       v17_usr_config.len * sizeof(u32), ret);
+				ret = -EFAULT;
+				goto igc_config_exit;
+			}
+			ret = copy_from_user(v17_cache_data->c2_data,
+					     v17_usr_config.c2_data,
+					     v17_usr_config.len * sizeof(u32));
+			if (ret) {
+				pr_err("copy from user failed for c2_data size %zd ret %d\n",
+				       v17_usr_config.len * sizeof(u32), ret);
+				ret = -EFAULT;
+				goto igc_config_exit;
+			}
+		}
+	}
+igc_config_exit:
+	return ret;
+}
+
+static int pp_igc_lut_cache_params_pipe_v1_7(struct mdp_igc_lut_data *config,
+			    struct mdss_mdp_pipe *pipe,
+			    u32 copy_from_kernel)
+{
+	struct mdp_igc_lut_data_v1_7 *v17_cache_data = NULL, v17_usr_config;
+	int ret = 0, fix_up = 0, i = 0;
+
+	if (!config || !pipe) {
+		pr_err("invalid param config %pK pipe %pK\n",
+			config, pipe);
+		return -EINVAL;
+	}
+	if (config->ops & MDP_PP_OPS_READ) {
+		pr_err("read op is not supported\n");
+		return -EINVAL;
+	}
+
+	if (!config->cfg_payload) {
+		pr_err("can't copy config info NULL payload\n");
+		ret = -EINVAL;
+		goto igc_config_exit;
+	}
+
+	if (copy_from_user(&v17_usr_config,
+				(void __user *) config->cfg_payload,
+				sizeof(v17_usr_config))) {
+		pr_err("failed to copy igc config\n");
+		return -EFAULT;
+	}
+
+	if (!(config->ops & MDP_PP_OPS_WRITE)) {
+		pr_debug("op for gamut %d\n", config->ops);
+		goto igc_config_exit;
+	}
+
+	switch (v17_usr_config.table_fmt) {
+	case mdp_igc_custom:
+		if (!v17_usr_config.c0_c1_data ||
+		    !v17_usr_config.c2_data ||
+		    v17_usr_config.len != IGC_LUT_ENTRIES) {
+			pr_err("invalid c0_c1data %pK c2_data %pK tbl len %d\n",
+					v17_usr_config.c0_c1_data,
+					v17_usr_config.c2_data,
+					v17_usr_config.len);
+			ret = -EINVAL;
+			goto igc_config_exit;
+		}
+		break;
+	case mdp_igc_rec709:
+		v17_usr_config.c0_c1_data = pp_igc_709;
+		v17_usr_config.c2_data = pp_igc_709;
+		v17_usr_config.len = IGC_LUT_ENTRIES;
+		copy_from_kernel = 1;
+		fix_up = 1;
+		break;
+	case mdp_igc_srgb:
+		v17_usr_config.c0_c1_data = pp_igc_srgb;
+		v17_usr_config.c2_data = pp_igc_srgb;
+		v17_usr_config.len = IGC_LUT_ENTRIES;
+		copy_from_kernel = 1;
+		fix_up = 1;
+		break;
+	case mdp_igc_rec601:
+		v17_usr_config.c0_c1_data = pp_igc_601;
+		v17_usr_config.c2_data = pp_igc_601;
+		v17_usr_config.len = IGC_LUT_ENTRIES;
+		copy_from_kernel = 1;
+		fix_up = 1;
+		break;
+	default:
+		pr_err("invalid format %d\n",
+				v17_usr_config.table_fmt);
+		ret = -EINVAL;
+		goto igc_config_exit;
+	}
+	v17_cache_data = pipe->pp_res.igc_cfg_payload;
+	if (!v17_cache_data)
+		v17_cache_data = kzalloc(sizeof(struct mdp_igc_lut_data_v1_7),
+					GFP_KERNEL);
+	if (!v17_cache_data) {
+		ret = -ENOMEM;
+		goto igc_config_exit;
+	} else {
+		pipe->pp_res.igc_cfg_payload = v17_cache_data;
+		pipe->pp_cfg.igc_cfg.cfg_payload = v17_cache_data;
+	}
+	v17_cache_data->c0_c1_data = pipe->pp_res.igc_c0_c1;
+	v17_cache_data->c2_data = pipe->pp_res.igc_c2;
+	v17_cache_data->len = IGC_LUT_ENTRIES;
+	if (copy_from_kernel) {
+		memcpy(v17_cache_data->c0_c1_data,
+				v17_usr_config.c0_c1_data,
+				IGC_LUT_ENTRIES * sizeof(u32));
+		memcpy(v17_cache_data->c2_data,
+				v17_usr_config.c2_data,
+				IGC_LUT_ENTRIES * sizeof(u32));
+		if (fix_up) {
+			for (i = 0; i < IGC_LUT_ENTRIES; i++)
+				v17_cache_data->c0_c1_data[i]
+					|= (v17_cache_data->c0_c1_data[i]
+							<< IGC_C1_SHIFT);
+		}
+	} else {
+		if (copy_from_user(v17_cache_data->c0_c1_data,
+				v17_usr_config.c0_c1_data,
+				IGC_LUT_ENTRIES * sizeof(u32))) {
+			pr_err("error in copying the c0_c1_data of size %zd\n",
+					IGC_LUT_ENTRIES * sizeof(u32));
+			ret = -EFAULT;
+			goto igc_config_exit;
+		}
+		if (copy_from_user(v17_cache_data->c2_data,
+				v17_usr_config.c2_data,
+				IGC_LUT_ENTRIES * sizeof(u32))) {
+			pr_err("error in copying the c2_data of size %zd\n",
+					IGC_LUT_ENTRIES * sizeof(u32));
+			ret = -EFAULT;
+		}
+	}
+igc_config_exit:
+	if (ret || (config->ops & MDP_PP_OPS_DISABLE)) {
+		kfree(v17_cache_data);
+		pipe->pp_cfg.igc_cfg.cfg_payload = NULL;
+		pipe->pp_res.igc_cfg_payload = NULL;
+	}
+	return ret;
+}
+
+int pp_igc_lut_cache_params(struct mdp_igc_lut_data *config,
+			    struct mdp_pp_cache_res *res_cache,
+			    u32 copy_from_kernel)
+{
+	int ret = 0;
+
+	if (!config || !res_cache) {
+		pr_err("invalid param config %pK pp_res %pK\n",
+			config, res_cache);
+		return -EINVAL;
+	}
+	if (res_cache->block < SSPP_RGB || res_cache->block > DSPP) {
+		pr_err("invalid block for IGC %d\n", res_cache->block);
+		return -EINVAL;
+	}
+	if (!res_cache->mdss_pp_res && !res_cache->pipe_res) {
+		pr_err("NULL payload for block %d mdss_pp_res %pK pipe_res %pK\n",
+			res_cache->block, res_cache->mdss_pp_res,
+			res_cache->pipe_res);
+		ret = -EINVAL;
+		goto igc_exit;
+	}
+	switch (config->version) {
+	case mdp_igc_v1_7:
+		if (res_cache->block == DSPP) {
+			ret = pp_igc_lut_cache_params_v1_7(config,
+				     res_cache->mdss_pp_res, copy_from_kernel);
+			if (ret)
+				pr_err("failed to cache IGC params for DSPP ret %d\n",
+					ret);
+
+		} else {
+			ret = pp_igc_lut_cache_params_pipe_v1_7(config,
+				      res_cache->pipe_res, copy_from_kernel);
+			if (ret)
+				pr_err("failed to cache IGC params for SSPP ret %d\n",
+					ret);
+		}
+		break;
+	default:
+		pr_err("unsupported igc version %d\n",
+			config->version);
+		ret = -EINVAL;
+		break;
+	}
+igc_exit:
+	return ret;
+}
+
+static int pp_pgc_lut_cache_params_v1_7(struct mdp_pgc_lut_data *config,
+			    struct mdss_pp_res_type *mdss_pp_res,
+			    int location)
+{
+	int ret = 0;
+	u32 sz = 0;
+	u32 disp_num;
+	struct mdp_pgc_lut_data_v1_7 *v17_cache_data = NULL, v17_usr_config;
+	struct mdss_pp_res_type_v1_7 *res_cache = NULL;
+
+	if (location != DSPP && location != LM) {
+		pr_err("Invalid location for pgc %d\n", location);
+		return -EINVAL;
+	}
+	disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
+	if (disp_num >= MDSS_BLOCK_DISP_NUM) {
+		pr_err("invalid disp_num %d\n", disp_num);
+		return -EINVAL;
+	}
+	res_cache = mdss_pp_res->pp_data_v1_7;
+	if (!res_cache) {
+		pr_err("invalid resource payload\n");
+		return -EINVAL;
+	}
+	if (copy_from_user(&v17_usr_config, config->cfg_payload,
+			   sizeof(v17_usr_config))) {
+		pr_err("failed to copy from user config info\n");
+		return -EFAULT;
+	}
+	if (v17_usr_config.len != PGC_LUT_ENTRIES) {
+		pr_err("invalid entries for pgc act %d exp %d\n",
+			v17_usr_config.len, PGC_LUT_ENTRIES);
+		return -EFAULT;
+	}
+	if (config->flags & MDP_PP_OPS_READ) {
+		pr_err("ops read not supported\n");
+		return -EINVAL;
+	}
+	if (!(config->flags & MDP_PP_OPS_WRITE)) {
+		pr_debug("ops write not set flags %d\n", config->flags);
+		if (location == DSPP)
+			mdss_pp_res->pgc_disp_cfg[disp_num].flags =
+				config->flags;
+		else
+			mdss_pp_res->argc_disp_cfg[disp_num].flags =
+				config->flags;
+		return 0;
+	}
+	if (location == DSPP) {
+		mdss_pp_res->pgc_disp_cfg[disp_num] = *config;
+		v17_cache_data = &res_cache->pgc_dspp_v17_data[disp_num];
+		v17_cache_data->c0_data = &res_cache->pgc_table_c0[disp_num][0];
+		v17_cache_data->c1_data = &res_cache->pgc_table_c1[disp_num][0];
+		v17_cache_data->c2_data = &res_cache->pgc_table_c2[disp_num][0];
+		mdss_pp_res->pgc_disp_cfg[disp_num].cfg_payload =
+			v17_cache_data;
+	} else {
+		mdss_pp_res->argc_disp_cfg[disp_num] = *config;
+		v17_cache_data = &res_cache->pgc_lm_v17_data[disp_num];
+		v17_cache_data->c0_data =
+			&res_cache->pgc_lm_table_c0[disp_num][0];
+		v17_cache_data->c1_data =
+			&res_cache->pgc_lm_table_c1[disp_num][0];
+		v17_cache_data->c2_data =
+			&res_cache->pgc_lm_table_c2[disp_num][0];
+		mdss_pp_res->argc_disp_cfg[disp_num].cfg_payload =
+			v17_cache_data;
+	}
+	v17_cache_data->len = 0;
+	sz = PGC_LUT_ENTRIES * sizeof(u32);
+	if (copy_from_user(v17_cache_data->c0_data, v17_usr_config.c0_data,
+			   sz)) {
+		pr_err("failed to copy c0_data from user sz %d\n", sz);
+		ret = -EFAULT;
+		goto bail_out;
+	}
+	if (copy_from_user(v17_cache_data->c1_data, v17_usr_config.c1_data,
+			   sz)) {
+		pr_err("failed to copy c1_data from user sz %d\n", sz);
+		ret = -EFAULT;
+		goto bail_out;
+	}
+	if (copy_from_user(v17_cache_data->c2_data, v17_usr_config.c2_data,
+			   sz)) {
+		pr_err("failed to copy c2_data from user sz %d\n", sz);
+		ret = -EFAULT;
+		goto bail_out;
+	}
+	v17_cache_data->len = PGC_LUT_ENTRIES;
+	return 0;
+bail_out:
+	if (location == DSPP)
+		mdss_pp_res->pgc_disp_cfg[disp_num].flags = 0;
+	else
+		mdss_pp_res->argc_disp_cfg[disp_num].flags = 0;
+	return ret;
+}
+
+int pp_pgc_lut_cache_params(struct mdp_pgc_lut_data *config,
+			    struct mdss_pp_res_type *mdss_pp_res, int loc)
+{
+	int ret = 0;
+
+	if (!config || !mdss_pp_res) {
+		pr_err("invalid param config %pK pp_res %pK\n",
+			config, mdss_pp_res);
+		return -EINVAL;
+	}
+	switch (config->version) {
+	case mdp_pgc_v1_7:
+		ret = pp_pgc_lut_cache_params_v1_7(config, mdss_pp_res, loc);
+		break;
+	default:
+		pr_err("unsupported igc version %d\n",
+			config->version);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static int pp_pa_cache_params_v1_7(struct mdp_pa_v2_cfg_data *config,
+				   struct mdss_pp_res_type *mdss_pp_res)
+{
+	struct mdss_pp_res_type_v1_7 *res_cache;
+	struct mdp_pa_data_v1_7 *pa_cache_data, pa_usr_config;
+	int disp_num, ret = 0;
+
+	if (!config || !mdss_pp_res) {
+		pr_err("Invalid param config %pK pp_res %pK\n",
+			config, mdss_pp_res);
+		return -EINVAL;
+	}
+
+	if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+			(config->block >= MDP_BLOCK_MAX)) {
+		pr_err("Invalid config block %d\n", config->block);
+		return -EINVAL;
+	}
+
+	if (!mdss_pp_res->pp_data_v1_7) {
+		pr_err("Invalid pp_data_v1_7 %pK\n", mdss_pp_res->pp_data_v1_7);
+		return -EINVAL;
+	}
+
+	res_cache = mdss_pp_res->pp_data_v1_7;
+	if (config->flags & MDP_PP_OPS_READ) {
+		pr_err("Read op is not supported\n");
+		return -EINVAL;
+	}
+
+	disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+	mdss_pp_res->pa_v2_disp_cfg[disp_num] = *config;
+	pa_cache_data = &res_cache->pa_v17_data[disp_num];
+	mdss_pp_res->pa_v2_disp_cfg[disp_num].cfg_payload =
+		(void *) pa_cache_data;
+
+	if (copy_from_user(&pa_usr_config, config->cfg_payload,
+			   sizeof(pa_usr_config))) {
+		pr_err("Failed to copy v1_7 PA\n");
+		ret = -EFAULT;
+		goto pa_config_exit;
+	}
+
+	if ((config->flags & MDP_PP_OPS_DISABLE)) {
+		pr_debug("Disable PA\n");
+		ret = 0;
+		goto pa_config_exit;
+	}
+
+	if (!(config->flags & MDP_PP_OPS_WRITE)) {
+		pr_debug("op for PA %d\n", config->flags);
+		ret = 0;
+		goto pa_config_exit;
+	}
+
+	memcpy(pa_cache_data, &pa_usr_config, sizeof(pa_usr_config));
+	/* Copy six zone LUT if six zone is enabled to be written */
+	if (config->flags & MDP_PP_PA_SIX_ZONE_ENABLE) {
+		if (pa_usr_config.six_zone_len != MDP_SIX_ZONE_LUT_SIZE) {
+			pr_err("Invalid six zone size, actual %d max %d\n",
+					pa_usr_config.six_zone_len,
+					MDP_SIX_ZONE_LUT_SIZE);
+			ret = -EINVAL;
+			goto pa_config_exit;
+		}
+
+		ret = copy_from_user(&res_cache->six_zone_lut_p0[disp_num][0],
+				     pa_usr_config.six_zone_curve_p0,
+				     pa_usr_config.six_zone_len * sizeof(u32));
+		if (ret) {
+			pr_err("copying six_zone_curve_p0 lut from userspace failed size %zd ret %d\n",
+				(sizeof(u32) * pa_usr_config.six_zone_len),
+				ret);
+			ret = -EFAULT;
+			goto pa_config_exit;
+		}
+		pa_cache_data->six_zone_curve_p0 =
+			&res_cache->six_zone_lut_p0[disp_num][0];
+		ret = copy_from_user(&res_cache->six_zone_lut_p1[disp_num][0],
+				     pa_usr_config.six_zone_curve_p1,
+				     pa_usr_config.six_zone_len * sizeof(u32));
+		if (ret) {
+			pr_err("copying six_zone_curve_p1 lut from userspace failed size %zd ret %d\n",
+				(sizeof(u32) * pa_usr_config.six_zone_len),
+				ret);
+			ret = -EFAULT;
+			goto pa_config_exit;
+		}
+		pa_cache_data->six_zone_curve_p1 =
+			&res_cache->six_zone_lut_p1[disp_num][0];
+	}
+
+pa_config_exit:
+	if (ret || config->flags & MDP_PP_OPS_DISABLE) {
+		pa_cache_data->six_zone_len = 0;
+		pa_cache_data->six_zone_curve_p0 = NULL;
+		pa_cache_data->six_zone_curve_p1 = NULL;
+	}
+	return ret;
+}
+
+static int pp_pa_cache_params_pipe_v1_7(struct mdp_pa_v2_cfg_data *config,
+			struct mdss_mdp_pipe *pipe)
+{
+	struct mdp_pa_data_v1_7 *pa_cache_data, pa_usr_config;
+	int ret = 0;
+
+	if (!config || !pipe) {
+		pr_err("Invalid param config %pK pipe %pK\n",
+			config, pipe);
+		return -EINVAL;
+	}
+
+	if (config->flags & MDP_PP_OPS_DISABLE) {
+		pr_debug("Disable PA on pipe %d\n", pipe->num);
+		goto pa_cache_pipe_exit;
+	}
+
+	if (config->flags & MDP_PP_OPS_READ) {
+		pr_err("Read op is not supported\n");
+		return -EINVAL;
+	}
+
+	if (!config->cfg_payload) {
+		pr_err("invalid PA config payload\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&pa_usr_config,
+				(void __user *) config->cfg_payload,
+				sizeof(pa_usr_config))) {
+		pr_err("failed to copy pa usr config\n");
+		return -EFAULT;
+	}
+
+	pa_cache_data = pipe->pp_res.pa_cfg_payload;
+	if (!pa_cache_data) {
+		pa_cache_data = kzalloc(sizeof(struct mdp_pa_data_v1_7),
+					GFP_KERNEL);
+		if (!pa_cache_data) {
+			ret = -ENOMEM;
+			goto pa_cache_pipe_exit;
+		} else
+			pipe->pp_res.pa_cfg_payload = pa_cache_data;
+	}
+
+	*pa_cache_data = pa_usr_config;
+
+	/* No six zone in SSPP */
+	pa_cache_data->six_zone_len = 0;
+	pa_cache_data->six_zone_curve_p0 = NULL;
+	pa_cache_data->six_zone_curve_p1 = NULL;
+
+pa_cache_pipe_exit:
+	if (ret || (config->flags & MDP_PP_OPS_DISABLE)) {
+		kfree(pipe->pp_res.pa_cfg_payload);
+		pipe->pp_res.pa_cfg_payload = NULL;
+	}
+	pipe->pp_cfg.pa_v2_cfg_data.cfg_payload = pipe->pp_res.pa_cfg_payload;
+	return ret;
+}
+
+int pp_pa_cache_params(struct mdp_pa_v2_cfg_data *config,
+			struct mdp_pp_cache_res *res_cache)
+{
+	int ret = 0;
+
+	if (!config || !res_cache) {
+		pr_err("invalid param config %pK pp_res %pK\n",
+			config, res_cache);
+		return -EINVAL;
+	}
+	if (res_cache->block != SSPP_VIG && res_cache->block != DSPP) {
+		pr_err("invalid block for PA %d\n", res_cache->block);
+		return -EINVAL;
+	}
+	if (!res_cache->mdss_pp_res && !res_cache->pipe_res) {
+		pr_err("NULL payload for block %d mdss_pp_res %pK pipe_res %pK\n",
+			res_cache->block, res_cache->mdss_pp_res,
+			res_cache->pipe_res);
+		return -EINVAL;
+	}
+
+	switch (config->version) {
+	case mdp_pa_v1_7:
+		if (res_cache->block == DSPP) {
+			ret = pp_pa_cache_params_v1_7(config,
+					res_cache->mdss_pp_res);
+			if (ret)
+				pr_err("failed to cache PA params for DSPP ret %d\n",
+					ret);
+		} else {
+			ret = pp_pa_cache_params_pipe_v1_7(config,
+					res_cache->pipe_res);
+			if (ret)
+				pr_err("failed to cache PA params for SSPP ret %d\n",
+					ret);
+
+		}
+		break;
+	default:
+		pr_err("unsupported pa version %d\n",
+			config->version);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+int pp_copy_layer_igc_payload(struct mdp_overlay_pp_params *pp_info)
+{
+	void *cfg_payload = NULL;
+	int ret = 0;
+
+	switch (pp_info->igc_cfg.version) {
+	case mdp_igc_v1_7:
+		cfg_payload = kmalloc(
+				sizeof(struct mdp_igc_lut_data_v1_7),
+				GFP_KERNEL);
+		if (!cfg_payload) {
+			ret = -ENOMEM;
+			goto exit;
+		}
+
+		ret = copy_from_user(cfg_payload,
+				pp_info->igc_cfg.cfg_payload,
+				sizeof(struct mdp_igc_lut_data_v1_7));
+		if (ret) {
+			pr_err("layer list copy from user failed, IGC cfg payload = %pK\n",
+				pp_info->igc_cfg.cfg_payload);
+			ret = -EFAULT;
+			kfree(cfg_payload);
+			cfg_payload = NULL;
+			goto exit;
+		}
+		break;
+	default:
+		pr_debug("No version set, fallback to legacy IGC version\n");
+		cfg_payload = NULL;
+		break;
+	}
+
+exit:
+	pp_info->igc_cfg.cfg_payload = cfg_payload;
+	return ret;
+}
+
+int pp_copy_layer_hist_lut_payload(struct mdp_overlay_pp_params *pp_info)
+{
+	void *cfg_payload = NULL;
+	int ret = 0;
+
+	switch (pp_info->hist_lut_cfg.version) {
+	case mdp_hist_lut_v1_7:
+		cfg_payload = kmalloc(
+				sizeof(struct mdp_hist_lut_data_v1_7),
+				GFP_KERNEL);
+		if (!cfg_payload) {
+			ret = -ENOMEM;
+			goto exit;
+		}
+
+		ret = copy_from_user(cfg_payload,
+				pp_info->hist_lut_cfg.cfg_payload,
+				sizeof(struct mdp_hist_lut_data_v1_7));
+		if (ret) {
+			pr_err("layer list copy from user failed, Hist LUT cfg payload = %pK\n",
+				pp_info->hist_lut_cfg.cfg_payload);
+			ret = -EFAULT;
+			kfree(cfg_payload);
+			cfg_payload = NULL;
+			goto exit;
+		}
+		break;
+	default:
+		pr_debug("No version set, fallback to legacy Hist LUT version\n");
+		cfg_payload = NULL;
+		break;
+	}
+
+exit:
+	pp_info->hist_lut_cfg.cfg_payload = cfg_payload;
+	return ret;
+}
+
+int pp_copy_layer_pa_payload(struct mdp_overlay_pp_params *pp_info)
+{
+	void *cfg_payload = NULL;
+	int ret = 0;
+
+	switch (pp_info->pa_v2_cfg_data.version) {
+	case mdp_pa_v1_7:
+		cfg_payload = kmalloc(
+				sizeof(struct mdp_pa_data_v1_7),
+				GFP_KERNEL);
+		if (!cfg_payload) {
+			ret = -ENOMEM;
+			goto exit;
+		}
+
+		ret = copy_from_user(cfg_payload,
+				pp_info->pa_v2_cfg_data.cfg_payload,
+				sizeof(struct mdp_pa_data_v1_7));
+		if (ret) {
+			pr_err("layer list copy from user failed, PA cfg payload = %pK\n",
+				pp_info->pa_v2_cfg_data.cfg_payload);
+			ret = -EFAULT;
+			kfree(cfg_payload);
+			cfg_payload = NULL;
+			goto exit;
+		}
+		break;
+	default:
+		pr_debug("No version set, fallback to legacy PA version\n");
+		cfg_payload = NULL;
+		break;
+	}
+
+exit:
+	pp_info->pa_v2_cfg_data.cfg_payload = cfg_payload;
+	return ret;
+}
+
+int pp_copy_layer_pcc_payload(struct mdp_overlay_pp_params *pp_info)
+{
+	void *cfg_payload = NULL;
+	int ret = 0;
+
+	switch (pp_info->pcc_cfg_data.version) {
+	case mdp_pcc_v1_7:
+		cfg_payload = kmalloc(
+				sizeof(struct mdp_pcc_data_v1_7),
+				GFP_KERNEL);
+		if (!cfg_payload) {
+			ret = -ENOMEM;
+			goto exit;
+		}
+
+		ret = copy_from_user(cfg_payload,
+				pp_info->pcc_cfg_data.cfg_payload,
+				sizeof(struct mdp_pcc_data_v1_7));
+		if (ret) {
+			pr_err("layer list copy from user failed, PCC cfg payload = %pK\n",
+				pp_info->pcc_cfg_data.cfg_payload);
+			ret = -EFAULT;
+			kfree(cfg_payload);
+			cfg_payload = NULL;
+			goto exit;
+		}
+		break;
+	default:
+		pr_debug("No version set, fallback to legacy PCC version\n");
+		cfg_payload = NULL;
+		break;
+	}
+
+exit:
+	pp_info->pcc_cfg_data.cfg_payload = cfg_payload;
+	return ret;
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.h b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.h
new file mode 100644
index 0000000..ab9a3dd
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014-2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_MDP_CACHE_CONFIG_H
+#define MDSS_MDP_CACHE_CONFIG_H
+#include "mdss_mdp_pp.h"
+
+struct mdp_pp_cache_res {
+	enum pp_config_block block;
+	struct mdss_pp_res_type *mdss_pp_res;
+	struct mdss_mdp_pipe *pipe_res;
+};
+
+int pp_hist_lut_cache_params(struct mdp_hist_lut_data *config,
+			  struct mdp_pp_cache_res *res_cache);
+
+int pp_dither_cache_params(struct mdp_dither_cfg_data *config,
+			  struct mdss_pp_res_type *mdss_pp_res,
+			  int copy_from_kernel);
+
+int pp_gamut_cache_params(struct mdp_gamut_cfg_data *config,
+			  struct mdss_pp_res_type *mdss_pp_res);
+int pp_pcc_cache_params(struct mdp_pcc_cfg_data *config,
+			  struct mdp_pp_cache_res *res_cache);
+int pp_pa_cache_params(struct mdp_pa_v2_cfg_data *config,
+			  struct mdp_pp_cache_res *res_cache);
+
+int pp_igc_lut_cache_params(struct mdp_igc_lut_data *config,
+			    struct mdp_pp_cache_res *res_cache,
+			    u32 copy_from_kernel);
+
+int pp_pgc_lut_cache_params(struct mdp_pgc_lut_data *config,
+			    struct mdss_pp_res_type *mdss_pp_res,
+			    int location);
+
+int pp_copy_layer_igc_payload(struct mdp_overlay_pp_params *pp_info);
+int pp_copy_layer_hist_lut_payload(struct mdp_overlay_pp_params *pp_info);
+int pp_copy_layer_pa_payload(struct mdp_overlay_pp_params *pp_info);
+int pp_copy_layer_pcc_payload(struct mdp_overlay_pp_params *pp_info);
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_common.c b/drivers/video/fbdev/msm/mdss_mdp_pp_common.c
new file mode 100644
index 0000000..c4e7462
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_common.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+#include "mdss_mdp_pp_common.h"
+
+void pp_pa_set_sts(struct pp_sts_type *pp_sts,
+		   struct mdp_pa_data_v1_7 *pa_data,
+		   int enable_flag, int block_type)
+{
+	if (!pp_sts) {
+		pr_err("invalid input pp_sts %pK\n", pp_sts);
+		return;
+	}
+
+	pp_sts->pa_sts = 0;
+
+	if (enable_flag & MDP_PP_OPS_DISABLE) {
+		pp_sts->pa_sts &= ~PP_STS_ENABLE;
+		return;
+	} else if (enable_flag & MDP_PP_OPS_ENABLE) {
+		pp_sts->pa_sts |= PP_STS_ENABLE;
+	}
+
+	if (!pa_data) {
+		pr_err("invalid input pa_data %pK\n", pa_data);
+		return;
+	}
+
+	/* Global HSV STS update */
+	if (pa_data->mode & MDP_PP_PA_HUE_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_HUE_MASK;
+	if (pa_data->mode & MDP_PP_PA_SAT_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_SAT_MASK;
+	if (pa_data->mode & MDP_PP_PA_VAL_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_VAL_MASK;
+	if (pa_data->mode & MDP_PP_PA_CONT_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_CONT_MASK;
+	if (pa_data->mode & MDP_PP_PA_SAT_ZERO_EXP_EN)
+		pp_sts->pa_sts |= PP_STS_PA_SAT_ZERO_EXP_EN;
+
+	/* Memory Protect STS update */
+	if (pa_data->mode & MDP_PP_PA_MEM_PROT_HUE_EN)
+		pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_HUE_EN;
+	if (pa_data->mode & MDP_PP_PA_MEM_PROT_SAT_EN)
+		pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_SAT_EN;
+	if (pa_data->mode & MDP_PP_PA_MEM_PROT_VAL_EN)
+		pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_VAL_EN;
+	if (pa_data->mode & MDP_PP_PA_MEM_PROT_CONT_EN)
+		pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_CONT_EN;
+	if (pa_data->mode & MDP_PP_PA_MEM_PROT_BLEND_EN)
+		pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_BLEND_EN;
+	if ((block_type == DSPP) &&
+			(pa_data->mode & MDP_PP_PA_MEM_PROT_SIX_EN))
+		pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_SIX_EN;
+
+	/* Memory Color STS update */
+	if (pa_data->mode & MDP_PP_PA_MEM_COL_SKIN_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_MEM_COL_SKIN_MASK;
+	if (pa_data->mode & MDP_PP_PA_MEM_COL_SKY_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_MEM_COL_SKY_MASK;
+	if (pa_data->mode & MDP_PP_PA_MEM_COL_FOL_MASK)
+		pp_sts->pa_sts |= PP_STS_PA_MEM_COL_FOL_MASK;
+
+	/* Six Zone STS update */
+	if (block_type == DSPP) {
+		if (pa_data->mode & MDP_PP_PA_SIX_ZONE_HUE_MASK)
+			pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_HUE_MASK;
+		if (pa_data->mode & MDP_PP_PA_SIX_ZONE_SAT_MASK)
+			pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_SAT_MASK;
+		if (pa_data->mode & MDP_PP_PA_SIX_ZONE_VAL_MASK)
+			pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_VAL_MASK;
+
+		pp_sts_set_split_bits(&pp_sts->pa_sts, enable_flag);
+	}
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_common.h b/drivers/video/fbdev/msm/mdss_mdp_pp_common.h
new file mode 100644
index 0000000..53a191d
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_common.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016, 2018,The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef MDSS_MDP_PP_COMMON_H
+#define MDSS_MDP_PP_COMMON_H
+
+#include "mdss_mdp.h"
+#include "mdss_mdp_pp.h"
+
+#define JUMP_REGISTERS_OFF(n) ((n) * (sizeof(uint32_t)))
+#define REG_MASK(n) ((BIT(n)) - 1)
+#define REG_MASK_SHIFT(n, shift) ((REG_MASK(n)) << (shift))
+
+void pp_pa_set_sts(struct pp_sts_type *pp_sts,
+		   struct mdp_pa_data_v1_7 *pa_data,
+		   int enable_flag, int block_type);
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_debug.c b/drivers/video/fbdev/msm/mdss_mdp_pp_debug.c
new file mode 100644
index 0000000..d5c3abb
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_debug.c
@@ -0,0 +1,857 @@
+/*
+ * Copyright (c) 2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_pp.h"
+
+#define MAX_TAB_BUFFER_SIZE	12
+#define MAX_LINE_BUFFER_SIZE 256
+
+static inline void tab_prefix(char *tab_str, int n)
+{
+	while ((n)--)
+		strlcat((tab_str), "\t", MAX_TAB_BUFFER_SIZE);
+}
+
+enum {
+	UINT32,
+	UINT16,
+};
+
+void pp_print_lut(void *data, int size, char *tab, uint32_t type)
+{
+	char buf[MAX_LINE_BUFFER_SIZE];
+	int lines = size / 16;
+	int last_start = lines * 16;
+	int i, j;
+	uint32_t read = 0;
+
+	if (!data || !tab)
+		return;
+
+	buf[0] = '\0';
+	for (i = 0; i < lines; i++) {
+		buf[0] = '\0';
+		read += snprintf(buf, MAX_LINE_BUFFER_SIZE - read,
+			"%s", tab);
+		for (j = 0; j < 16; j++) {
+			if (type == UINT32)
+				read += snprintf(buf + read,
+					MAX_LINE_BUFFER_SIZE - read, "%04x ",
+					((uint32_t *)data)[i*16+j]);
+			else if (type == UINT16)
+				read += snprintf(buf + read,
+					MAX_LINE_BUFFER_SIZE - read, "%02x ",
+					((uint16_t *)data)[i*16+j]);
+		}
+		snprintf(buf + read, MAX_LINE_BUFFER_SIZE - read, "\n");
+
+		pr_debug("%s", buf);
+		memset(buf, 0, sizeof(char) * MAX_LINE_BUFFER_SIZE);
+		read = 0;
+	}
+
+	lines = size % 16;
+	read += snprintf(buf, MAX_LINE_BUFFER_SIZE - read, "%s", tab);
+	for (i = 0; i < lines; i++) {
+		if (type == UINT32)
+			read += snprintf(buf + read,
+					MAX_LINE_BUFFER_SIZE - read, "%04x ",
+					((uint32_t *)data)[last_start+i]);
+		else if (type == UINT16)
+			read += snprintf(buf + read,
+					MAX_LINE_BUFFER_SIZE - read, "%02x ",
+					((uint16_t *)data)[last_start+i]);
+	}
+	snprintf(buf + read, MAX_LINE_BUFFER_SIZE - read, "\n");
+	pr_debug("%s", buf);
+}
+
+void pp_print_pcc_coeff(struct mdp_pcc_coeff *pcc_coeff, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!pcc_coeff || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_pcc_coeff:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sc: %x\n"
+		"%sr: %x\n%sg: %x\n%sb: %x\n"
+		"%srr: %x\n%sgg: %x\n%sbb: %x\n"
+		"%srg: %x\n%sgb: %x\n%srb: %x\n"
+		"%srgb_0: %x\n%srgb_1: %x\n",
+		tab, pcc_coeff->c,
+		tab, pcc_coeff->r,
+		tab, pcc_coeff->g,
+		tab, pcc_coeff->b,
+		tab, pcc_coeff->rr,
+		tab, pcc_coeff->gg,
+		tab, pcc_coeff->bb,
+		tab, pcc_coeff->rg,
+		tab, pcc_coeff->gb,
+		tab, pcc_coeff->rb,
+		tab, pcc_coeff->rgb_0,
+		tab, pcc_coeff->rgb_1);
+}
+
+void pp_print_pcc_cfg_data(struct mdp_pcc_cfg_data *pcc_data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!pcc_data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_pcc_cfg_data:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sblock: %x\n%sops: %x\n",
+		tab, pcc_data->block,
+		tab, pcc_data->ops);
+
+	pp_print_pcc_coeff(&pcc_data->r, tab_depth + 1);
+	pp_print_pcc_coeff(&pcc_data->g, tab_depth + 1);
+	pp_print_pcc_coeff(&pcc_data->b, tab_depth + 1);
+}
+
+void pp_print_csc_cfg(struct mdp_csc_cfg *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_csc_cfg:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sflags: %x\n",
+		tab, data->flags);
+
+	pr_debug("%scsc_mv[]:\n", tab);
+	pp_print_lut(&data->csc_mv[0], 9, tab, UINT32);
+	pr_debug("%scsc_pre_bv[]:\n", tab);
+	pp_print_lut(&data->csc_pre_bv[0], 3, tab, UINT32);
+	pr_debug("%scsc_post_bv[]:\n", tab);
+	pp_print_lut(&data->csc_post_bv[0], 3, tab, UINT32);
+	pr_debug("%scsc_pre_lv[]:\n", tab);
+	pp_print_lut(&data->csc_pre_lv[0], 6, tab, UINT32);
+	pr_debug("%scsc_post_lv[]:\n", tab);
+	pp_print_lut(&data->csc_post_lv[0], 6, tab, UINT32);
+}
+
+void pp_print_csc_cfg_data(struct mdp_csc_cfg_data *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[tab_depth] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_csc_cfg_data:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sblock: %x\n",
+		tab, data->block);
+
+	pp_print_csc_cfg(&data->csc_data, tab_depth + 1);
+}
+
+void pp_print_igc_lut_data(struct mdp_igc_lut_data *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[tab_depth] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_igc_lut_data:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sblock: %x\n"
+		"%slen: %x\n"
+		"%sops: %x\n",
+		tab, data->block,
+		tab, data->len,
+		tab, data->ops);
+
+	pr_debug("%sc0_c1_data[]:\n", tab);
+	pp_print_lut(&data->c0_c1_data[0], data->len, tab, UINT32);
+	pr_debug("%sc2_data[]:\n", tab);
+	pp_print_lut(&data->c2_data[0], data->len, tab, UINT32);
+}
+
+void pp_print_ar_gc_lut_data(struct mdp_ar_gc_lut_data *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[tab_depth] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_ar_gc_lut_data:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sx_start: %x\n"
+		"%sslope: %x\n"
+		"%soffset: %x\n",
+		tab, data->x_start,
+		tab, data->slope,
+		tab, data->offset);
+}
+
+void pp_print_pgc_lut_data(struct mdp_pgc_lut_data *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+	int i;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[tab_depth] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_pgc_lut_data:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sblock: %x\n"
+		"%sflags: %x\n"
+		"%snum_r_stages: %x\n"
+		"%snum_g_stages: %x\n"
+		"%snum_b_stages: %x\n",
+		tab, data->block,
+		tab, data->flags,
+		tab, data->num_r_stages,
+		tab, data->num_g_stages,
+		tab, data->num_b_stages);
+
+	for (i = 0; i < data->num_r_stages; i++) {
+		pr_debug("%sr_data[%d]\n", tab, i);
+		pp_print_ar_gc_lut_data(&data->r_data[i], tab_depth + 1);
+	}
+	for (i = 0; i < data->num_g_stages; i++) {
+		pr_debug("%sg_data[%d]\n", tab, i);
+		pp_print_ar_gc_lut_data(&data->g_data[i], tab_depth + 1);
+	}
+	for (i = 0; i < data->num_b_stages; i++) {
+		pr_debug("%sb_data[%d]\n", tab, i);
+		pp_print_ar_gc_lut_data(&data->b_data[i], tab_depth + 1);
+	}
+}
+
+void pp_print_hist_lut_data(struct mdp_hist_lut_data *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[tab_depth] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_hist_lut_data:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sblock: %x\n"
+		"%sops: %x\n"
+		"%slen: %x\n",
+		tab, data->block,
+		tab, data->ops,
+		tab, data->len);
+
+	pr_debug("%sdata[]:\n", tab);
+	pp_print_lut(&data->data[0], data->len, tab, UINT32);
+}
+
+void pp_print_lut_cfg_data(struct mdp_lut_cfg_data *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[tab_depth] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_lut_cfg_data:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%slut_type: %x\n",
+		tab, data->lut_type);
+
+	switch (data->lut_type) {
+	case mdp_lut_igc:
+		pp_print_igc_lut_data(&data->data.igc_lut_data, tab_depth + 1);
+		break;
+	case mdp_lut_pgc:
+		pp_print_pgc_lut_data(&data->data.pgc_lut_data, tab_depth + 1);
+		break;
+	case mdp_lut_hist:
+		pp_print_hist_lut_data(&data->data.hist_lut_data,
+				tab_depth + 1);
+		break;
+	default:
+		break;
+	}
+}
+
+void pp_print_qseed_cfg(struct mdp_qseed_cfg *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[tab_depth] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_qseed_cfg:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%stable_num: %x\n"
+		"%sops: %x\n"
+		"%slen: %x\n",
+		tab, data->table_num,
+		tab, data->ops,
+		tab, data->len);
+
+	pr_debug("%sdata[]:\n", tab);
+	pp_print_lut(&data->data[0], data->len, tab, UINT32);
+}
+
+void pp_print_qseed_cfg_data(struct mdp_qseed_cfg_data *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[tab_depth] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_qseed_cfg_data:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sblock: %x\n",
+		tab, data->block);
+
+	pp_print_qseed_cfg(&data->qseed_data, tab_depth + 1);
+}
+
+void pp_print_pa_cfg(struct mdp_pa_cfg *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_pa_cfg:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sflags: %x\n"
+		"%shue_adj: %x\n"
+		"%ssat_adj: %x\n"
+		"%sval_adj: %x\n"
+		"%scont_adj: %x\n",
+		tab, data->flags,
+		tab, data->hue_adj,
+		tab, data->sat_adj,
+		tab, data->val_adj,
+		tab, data->cont_adj);
+}
+
+void pp_print_pa_cfg_data(struct mdp_pa_cfg_data *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_pa_cfg_data:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sblock: %x\n",
+		tab, data->block);
+
+	pp_print_pa_cfg(&data->pa_data, tab_depth + 1);
+}
+
+void pp_print_mem_col_cfg(struct mdp_pa_mem_col_cfg *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_pa_mem_col_cfg:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%scolor_adjust_p0: %x\n"
+		"%scolor_adjust_p1: %x\n"
+		"%shue_region: %x\n"
+		"%ssat_region: %x\n"
+		"%sval_region: %x\n",
+		tab, data->color_adjust_p0,
+		tab, data->color_adjust_p1,
+		tab, data->hue_region,
+		tab, data->sat_region,
+		tab, data->val_region);
+}
+
+void pp_print_pa_v2_data(struct mdp_pa_v2_data *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_pa_v2_data:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sflags: %x\n"
+		"%sglobal_hue_adj: %x\n"
+		"%sglobal_sat_adj: %x\n"
+		"%sglobal_val_adj: %x\n"
+		"%sglobal_cont_adj: %x\n",
+		tab, data->flags,
+		tab, data->global_hue_adj,
+		tab, data->global_sat_adj,
+		tab, data->global_val_adj,
+		tab, data->global_cont_adj);
+
+	pp_print_mem_col_cfg(&data->skin_cfg, tab_depth + 1);
+	pp_print_mem_col_cfg(&data->sky_cfg, tab_depth + 1);
+	pp_print_mem_col_cfg(&data->fol_cfg, tab_depth + 1);
+
+	pr_debug("%ssix_zone_len: %x\n"
+		"%ssix_zone_thresh: %x\n",
+		tab, data->six_zone_len,
+		tab, data->six_zone_thresh);
+
+	pr_debug("%ssix_zone_curve_p0[]:\n", tab);
+	pp_print_lut(&data->six_zone_curve_p0[0], data->six_zone_len, tab,
+			UINT32);
+	pr_debug("%ssix_zone_curve_p1[]:\n", tab);
+	pp_print_lut(&data->six_zone_curve_p1[0], data->six_zone_len, tab,
+			UINT32);
+}
+
+void pp_print_pa_v2_cfg_data(struct mdp_pa_v2_cfg_data *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_pa_v2_cfg_data:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sblock: %x\n",
+		tab, data->block);
+
+	pp_print_pa_v2_data(&data->pa_v2_data, tab_depth + 1);
+}
+
+void pp_print_dither_cfg_data(struct mdp_dither_cfg_data *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_dither_cfg_data:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sblock: %x\n"
+		"%sflags: %x\n"
+		"%sg_y_depth: %x\n"
+		"%sr_cr_depth: %x\n"
+		"%sb_cb_depth: %x\n",
+		tab, data->block,
+		tab, data->flags,
+		tab, data->g_y_depth,
+		tab, data->r_cr_depth,
+		tab, data->b_cb_depth);
+}
+
+void pp_print_gamut_cfg_data(struct mdp_gamut_cfg_data *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+	int i;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_gamut_cfg_data:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sblock: %x\n"
+		"%sflags: %x\n"
+		"%sgamut_first: %x\n",
+		tab, data->block,
+		tab, data->flags,
+		tab, data->gamut_first);
+
+	pr_debug("%stbl_size[]:\n", tab);
+	pp_print_lut(&data->tbl_size[0], MDP_GAMUT_TABLE_NUM, tab, UINT32);
+
+	for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+		pr_debug("%sr_tbl[%d]:\n", tab, i);
+		pp_print_lut(&data->r_tbl[i][0], data->tbl_size[i], tab,
+				UINT16);
+	}
+
+	for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+		pr_debug("%sg_tbl[%d]:\n", tab, i);
+		pp_print_lut(&data->g_tbl[i][0], data->tbl_size[i], tab,
+				UINT16);
+	}
+
+	for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+		pr_debug("%sb_tbl[%d]:\n", tab, i);
+		pp_print_lut(&data->b_tbl[i][0], data->tbl_size[i], tab,
+				UINT16);
+	}
+}
+
+void pp_print_ad_init(struct mdss_ad_init *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdss_ad_init:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sasym_lut[]:\n", tab);
+	pp_print_lut(&data->asym_lut[0], 33, tab, UINT32);
+
+	pr_debug("%scolor_corr_lut[]:\n", tab);
+	pp_print_lut(&data->color_corr_lut[0], 33, tab, UINT32);
+
+	pr_debug("%si_control[]:\n%s%x %x\n"
+		"%sblack_lvl: %x\n"
+		"%swhite_lvl: %x\n"
+		"%svar: %x\n"
+		"%slimit_ampl: %x\n"
+		"%si_dither: %x\n"
+		"%sslope_max: %x\n"
+		"%sslope_min: %x\n"
+		"%sdither_ctl: %x\n"
+		"%sformat: %x\n"
+		"%sauto_size: %x\n"
+		"%sframe_w: %x\n"
+		"%sframe_h: %x\n"
+		"%slogo_v: %x\n"
+		"%slogo_h: %x\n"
+		"%sbl_lin_len: %x\n",
+		tab, tab, data->i_control[0], data->i_control[1],
+		tab, data->black_lvl,
+		tab, data->white_lvl,
+		tab, data->var,
+		tab, data->limit_ampl,
+		tab, data->i_dither,
+		tab, data->slope_max,
+		tab, data->slope_min,
+		tab, data->dither_ctl,
+		tab, data->format,
+		tab, data->auto_size,
+		tab, data->frame_w,
+		tab, data->frame_h,
+		tab, data->logo_v,
+		tab, data->logo_h,
+		tab, data->bl_lin_len);
+
+	pr_debug("%sbl_lin[]:\n", tab);
+	pp_print_lut(&data->bl_lin[0], data->bl_lin_len, tab, UINT32);
+
+	pr_debug("%sbl_lin_inv[]:\n", tab);
+	pp_print_lut(&data->bl_lin_inv[0], data->bl_lin_len, tab, UINT32);
+}
+
+void pp_print_ad_cfg(struct mdss_ad_cfg *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdss_ad_cfg:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%smode: %x\n",
+		tab, data->mode);
+
+	pr_debug("%sal_calib_lut[]:\n", tab);
+	pp_print_lut(&data->al_calib_lut[0], 33, tab, UINT32);
+
+	pr_debug("%sbacklight_min: %x\n"
+		"%sbacklight_max: %x\n"
+		"%sbacklight_scale: %x\n"
+		"%samb_light_min: %x\n",
+		tab, data->backlight_min,
+		tab, data->backlight_max,
+		tab, data->backlight_scale,
+		tab, data->amb_light_min);
+
+	pp_print_lut(&data->filter[0], 2, tab, UINT16);
+	pp_print_lut(&data->calib[0], 4, tab, UINT16);
+
+	pr_debug("%sstrength_limit: %x\n"
+		"%st_filter_recursion: %x\n"
+		"%sstab_itr: %x\n"
+		"%sbl_ctrl_mode: %x\n",
+		tab, data->strength_limit,
+		tab, data->t_filter_recursion,
+		tab, data->stab_itr,
+		tab, data->bl_ctrl_mode);
+}
+
+void pp_print_ad_init_cfg(struct mdss_ad_init_cfg *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdss_ad_init_cfg:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sops: %x\n",
+		tab, data->ops);
+
+	if (data->ops & MDP_PP_AD_INIT)
+		pp_print_ad_init(&data->params.init, tab_depth + 1);
+	else if (data->ops & MDP_PP_AD_CFG)
+		pp_print_ad_cfg(&data->params.cfg, tab_depth + 1);
+}
+
+void pp_print_ad_input(struct mdss_ad_input *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdss_ad_input:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%smode: %x\n",
+		tab, data->mode);
+
+	switch (data->mode) {
+	case MDSS_AD_MODE_AUTO_BL:
+	case MDSS_AD_MODE_AUTO_STR:
+		pr_debug("%samb_light: %x\n",
+			tab, data->in.amb_light);
+		break;
+	case MDSS_AD_MODE_TARG_STR:
+	case MDSS_AD_MODE_MAN_STR:
+		pr_debug("%sstrength: %x\n",
+			tab, data->in.strength);
+		break;
+	case MDSS_AD_MODE_CALIB:
+		pr_debug("%scalib_bl: %x\n",
+			tab, data->in.calib_bl);
+		break;
+	default:
+		break;
+	}
+
+	pr_debug("%soutput: %x\n",
+		tab, data->output);
+}
+
+void pp_print_histogram_cfg(struct mdp_histogram_cfg *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_histogram_cfg:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sops: %x\n"
+		"%sblock: %x\n"
+		"%sframe_cnt: %x\n"
+		"%sbit_mask: %x\n"
+		"%snum_bins: %x\n",
+		tab, data->ops,
+		tab, data->block,
+		tab, data->frame_cnt,
+		tab, data->bit_mask,
+		tab, data->num_bins);
+}
+
+void pp_print_sharp_cfg(struct mdp_sharp_cfg *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_sharp_cfg:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sflags: %x\n"
+		"%sstrength: %x\n"
+		"%sedge_thr: %x\n"
+		"%ssmooth_thr: %x\n"
+		"%snoise_thr: %x\n",
+		tab, data->flags,
+		tab, data->strength,
+		tab, data->edge_thr,
+		tab, data->smooth_thr,
+		tab, data->noise_thr);
+}
+
+void pp_print_calib_config_data(struct mdp_calib_config_data *data,
+				int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_calib_config_data:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sops: %x\n"
+		"%saddr: %x\n"
+		"%sdata: %x\n",
+		tab, data->ops,
+		tab, data->addr,
+		tab, data->data);
+}
+
+void pp_print_calib_config_buffer(struct mdp_calib_config_buffer *data,
+				int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_calib_config_buffer:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sops: %x\n"
+		"%ssize: %x\n",
+		tab, data->ops,
+		tab, data->size);
+
+	pr_debug("%sbuffer[]:\n", tab);
+	pp_print_lut(&data->buffer[0], data->size, tab, UINT32);
+}
+
+void pp_print_calib_dcm_state(struct mdp_calib_dcm_state *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdp_calib_dcm_state:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sops: %x\n"
+		"%sdcm_state: %x\n",
+		tab, data->ops,
+		tab, data->dcm_state);
+}
+
+void pp_print_mdss_calib_cfg(struct mdss_calib_cfg *data, int tab_depth)
+{
+	char tab[MAX_TAB_BUFFER_SIZE];
+	int tmp = 1;
+
+	if (!data || tab_depth < 0)
+		return;
+
+	tab[0] = '\0';
+	tab_prefix(tab, tab_depth);
+	pr_debug("%smdss_calib_cfg:\n", tab);
+	tab_prefix(tab, tmp);
+
+	pr_debug("%sops: %x\n"
+		"%scalib_mask: %x\n",
+		tab, data->ops,
+		tab, data->calib_mask);
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c b/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
new file mode 100644
index 0000000..9438aca
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
@@ -0,0 +1,2117 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/uaccess.h>
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_pp.h"
+#include "mdss_mdp_pp_common.h"
+
+
+/* MDP v1.7 specific macros */
+
+/* PCC_EN for PCC opmode*/
+#define PCC_ENABLE	BIT(0)
+#define PCC_OP_MODE_OFF 0
+#define PCC_CONST_COEFF_OFF 4
+#define PCC_R_COEFF_OFF 0x10
+#define PCC_G_COEFF_OFF 0x1C
+#define PCC_B_COEFF_OFF 0x28
+#define PCC_RG_COEFF_OFF 0x34
+#define PCC_RB_COEFF_OFF 0x40
+#define PCC_GB_COEFF_OFF 0x4C
+#define PCC_RGB_COEFF_OFF 0x58
+#define PCC_CONST_COEFF_MASK 0xFFFF
+#define PCC_COEFF_MASK 0x3FFFF
+
+
+#define GAMUT_OP_MODE_OFF 0
+#define GAMUT_TABLE_INDEX 4
+#define GAMUT_TABLE_UPPER_R 8
+#define GAMUT_TABLE_LOWER_GB 0xC
+#define GAMUT_C0_SCALE_OFF 0x10
+#define GAMUT_CLK_CTRL 0xD0
+#define GAMUT_CLK_STATUS 0xD4
+#define GAMUT_READ_TABLE_EN  BIT(16)
+#define GAMUT_TABLE_SELECT(x) ((BIT(x)) << 12)
+#define GAMUT_COARSE_EN (BIT(2))
+#define GAMUT_COARSE_INDEX 1248
+#define GAMUT_FINE_INDEX 0
+#define GAMUT_MAP_EN BIT(1)
+#define GAMUT_ENABLE BIT(0)
+#define GAMUT_CLK_GATING_ACTIVE 0x0
+#define GAMUT_CLK_GATING_PARTIAL_ACTIVE 0x11
+#define GAMUT_CLK_GATING_INACTIVE 0x33
+
+#define IGC_MASK_MAX 3
+#define IGC_C0_LUT 0
+#define IGC_RGB_C0_LUT 0xC
+#define IGC_DMA_C0_LUT 0x18
+#define IGC_CONFIG_MASK(n) \
+	((((1 << (IGC_MASK_MAX + 1)) - 1) & ~(1 << n)) << 28)
+#define IGC_INDEX_UPDATE BIT(25)
+#define IGC_INDEX_VALUE_UPDATE (BIT(24) | IGC_INDEX_UPDATE)
+#define IGC_DATA_MASK (BIT(12) - 1)
+#define IGC_DSPP_OP_MODE_EN BIT(0)
+
+#define HIST_LUT_VIG_OP_FIRST_EN	BIT(21)
+#define HIST_LUT_DSPP_OP_FIRST_EN	BIT(21)
+#define HIST_LUT_VIG_OP_ENABLE		BIT(10)
+#define HIST_LUT_DSPP_OP_ENABLE		BIT(19)
+#define REG_SSPP_VIG_HIST_LUT_BASE	0x1400
+#define REG_DSPP_HIST_LUT_BASE		0x1400
+#define REG_SSPP_VIG_HIST_SWAP_BASE	0x300
+#define REG_DSPP_HIST_SWAP_BASE		0x234
+#define ENHIST_LOWER_VALUE_MASK		0x3FF
+#define ENHIST_UPPER_VALUE_MASK		0x3FF0000
+#define ENHIST_BIT_SHIFT		16
+
+#define PGC_OPMODE_OFF 0
+#define PGC_C0_LUT_INDEX 4
+#define PGC_INDEX_OFF 4
+#define PGC_C1C2_LUT_OFF 8
+#define PGC_LUT_SWAP 0x1C
+#define PGC_LUT_SEL 0x20
+#define PGC_DATA_MASK (BIT(10) - 1)
+#define PGC_ODD_SHIFT 16
+#define PGC_SWAP 1
+#define PGC_8B_ROUND BIT(1)
+#define PGC_ENABLE BIT(0)
+
+#define HIST_V3_INTR_BIT_MASK 0xF33333
+#define HIST_CTL_OFF_DSPP_V1_7 0x210
+#define HIST_CTL_OFF_SSPP_V1_7 0x2C4
+#define HIST_DATA_OFF_DSPP_V1_7 0x1000
+#define HIST_DATA_OFF_SSPP_V1_7 0xA00
+#define HIST_DATA_MASK 0xFFFFFF
+#define DITHER_MATRIX_OFF 0x14
+#define DITHER_MATRIX_INDEX 16
+#define DITHER_DEPTH_MAP_INDEX 9
+static u32 dither_matrix[DITHER_MATRIX_INDEX] = {
+	15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10};
+static u32 dither_depth_map[DITHER_DEPTH_MAP_INDEX] = {
+	0, 0, 0, 0, 0, 1, 2, 3, 3};
+
+#define PA_DSPP_GLOBAL_OFF 0x238
+#define PA_DSPP_MEM_COL_SKIN_P0_OFF 0x254
+#define PA_DSPP_MEM_COL_SKIN_P2_OFF 0x318
+#define PA_DSPP_MEM_COL_SKY_P0_OFF 0x268
+#define PA_DSPP_MEM_COL_SKY_P2_OFF 0x320
+#define PA_DSPP_MEM_COL_FOL_P0_OFF 0x27C
+#define PA_DSPP_MEM_COL_FOL_P2_OFF 0x328
+#define PA_SIX_ZONE_LUT_OFF 0x248
+#define PA_SIX_ZONE_REGION_OFF 0x250
+#define PA_SIX_ZONE_ADJ_OFF 0x330
+#define PA_VIG_GLOBAL_OFF 0x310
+#define PA_VIG_MEM_COL_SKIN_P0_OFF 0x288
+#define PA_VIG_MEM_COL_SKIN_P2_OFF 0x418
+#define PA_VIG_MEM_COL_SKY_P0_OFF 0x29C
+#define PA_VIG_MEM_COL_SKY_P2_OFF 0x420
+#define PA_VIG_MEM_COL_FOL_P0_OFF 0x2B0
+#define PA_VIG_MEM_COL_FOL_P2_OFF 0x428
+#define PA_DSPP_HOLD_OFF 0x314
+#define PA_VIG_HOLD_OFF 0x414
+#define PA_GLOBAL_HUE_MASK 0xFFF
+#define PA_GLOBAL_SAT_MASK 0xFFFF
+#define PA_GLOBAL_VAL_MASK 0xFF
+#define PA_GLOBAL_CONT_MASK 0xFF
+#define PA_MEM_COL_ADJ_P0_MASK 0xFFFF07FF
+#define PA_MEM_COL_HUE_REGION_MASK 0x7FF07FF
+#define PA_MEM_COL_SAT_REGION_MASK 0xFFFFFF
+#define PA_MEM_COL_VAL_REGION_MASK 0xFFFFFF
+#define PA_SIX_ZONE_INDEX_UPDATE BIT(26)
+#define PA_SIX_ZONE_VALUE_UPDATE BIT(25)
+#define PA_SIX_ZONE_CURVE_P0_MASK 0xFFF
+#define PA_SIX_ZONE_CURVE_P1_MASK 0xFFF0FFF
+#define PA_SIX_ZONE_ADJ_P0_MASK 0xFFFF
+#define PA_HOLD_MASK 0x3
+#define PA_HOLD_SAT_SHIFT 0
+#define PA_HOLD_VAL_SHIFT 2
+#define PA_HOLD_SKIN_SHIFT 0
+#define PA_HOLD_SKY_SHIFT 4
+#define PA_HOLD_FOL_SHIFT 8
+#define PA_HOLD_SIX_ZONE_SHIFT 12
+#define PA_HOLD_SKIN_MASK 0xF
+#define PA_HOLD_SKY_MASK 0xF0
+#define PA_HOLD_FOL_MASK 0xF00
+#define PA_HOLD_SIX_ZONE_MASK 0xF000
+#define PA_DSPP_OP_ENABLE BIT(20)
+#define PA_DSPP_OP_HUE_MASK BIT(25)
+#define PA_DSPP_OP_SAT_MASK BIT(26)
+#define PA_DSPP_OP_VAL_MASK BIT(27)
+#define PA_DSPP_OP_CONT_MASK BIT(28)
+#define PA_DSPP_OP_SAT_ZERO_EXP_EN BIT(1)
+#define PA_DSPP_OP_SIX_ZONE_HUE_MASK BIT(29)
+#define PA_DSPP_OP_SIX_ZONE_SAT_MASK BIT(30)
+#define PA_DSPP_OP_SIX_ZONE_VAL_MASK BIT(31)
+#define PA_DSPP_OP_MEM_COL_SKIN_MASK BIT(5)
+#define PA_DSPP_OP_MEM_COL_FOL_MASK BIT(6)
+#define PA_DSPP_OP_MEM_COL_SKY_MASK BIT(7)
+#define PA_DSPP_OP_MEM_PROT_HUE_EN BIT(22)
+#define PA_DSPP_OP_MEM_PROT_SAT_EN BIT(23)
+#define PA_DSPP_OP_MEM_PROT_VAL_EN BIT(24)
+#define PA_DSPP_OP_MEM_PROT_CONT_EN BIT(18)
+#define PA_DSPP_OP_MEM_PROT_BLEND_EN BIT(3)
+#define PA_DSPP_OP_MEM_PROT_SIX_EN BIT(17)
+#define PA_VIG_OP_HUE_MASK BIT(25)
+#define PA_VIG_OP_SAT_MASK BIT(26)
+#define PA_VIG_OP_VAL_MASK BIT(27)
+#define PA_VIG_OP_CONT_MASK BIT(28)
+#define PA_VIG_OP_MEM_PROT_HUE_EN BIT(12)
+#define PA_VIG_OP_MEM_PROT_SAT_EN BIT(13)
+#define PA_VIG_OP_MEM_PROT_VAL_EN BIT(14)
+#define PA_VIG_OP_MEM_PROT_CONT_EN BIT(15)
+#define PA_VIG_OP_MEM_COL_SKIN_MASK BIT(5)
+#define PA_VIG_OP_MEM_COL_FOL_MASK BIT(6)
+#define PA_VIG_OP_MEM_COL_SKY_MASK BIT(7)
+#define PA_VIG_OP_MEM_PROT_BLEND_EN BIT(1)
+#define PA_VIG_OP_ENABLE BIT(4)
+#define PA_VIG_OP_SAT_ZERO_EXP_EN BIT(2)
+
+static struct mdss_pp_res_type_v1_7 config_data;
+
+static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
+			   u32 block_type, u32 disp_num);
+static int pp_hist_lut_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type);
+static int pp_dither_get_config(char __iomem *base_addr, void *cfg_data,
+			   u32 block_type, u32 disp_num);
+static int pp_dither_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type);
+/* histogram prototypes */
+static int pp_hist_get_config(char __iomem *base_addr, void *cfg_data,
+			   u32 block_type, u32 disp_num);
+static int pp_get_hist_offset(u32 block, u32 *ctl_off);
+static int pp_get_hist_isr(u32 *isr_mask);
+static bool pp_is_sspp_hist_supp(void);
+
+static void pp_opmode_config(int location, struct pp_sts_type *pp_sts,
+		u32 *opmode, int side);
+
+/* Gamut prototypes */
+static int pp_gamut_get_config(char __iomem *base_addr, void *cfg_data,
+			   u32 block_type, u32 disp_num);
+static int pp_gamut_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type);
+/* PCC prototypes */
+static int pp_pcc_set_config(char __iomem *base_addr,
+			struct pp_sts_type *pp_sts, void *cfg_data,
+			u32 block_type);
+static int pp_pcc_get_config(char __iomem *base_addr, void *cfg_data,
+				u32 block_type, u32 disp_num);
+/* PA prototypes */
+static int pp_pa_set_config(char __iomem *base_addr,
+			struct pp_sts_type *pp_sts, void *cfg_data,
+			u32 block_type);
+static int pp_pa_get_config(char __iomem *base_addr, void *cfg_data,
+				u32 block_type, u32 disp_num);
+static void pp_pa_update_dspp_opmode(int pa_sts, u32 *opmode);
+static void pp_pa_update_vig_opmode(int pa_sts, u32 *opmode);
+
+static int pp_igc_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type);
+static int pp_igc_get_config(char __iomem *base_addr, void *cfg_data,
+			   u32 block_type, u32 disp_num);
+static int pp_pgc_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type);
+static int pp_pgc_get_config(char __iomem *base_addr, void *cfg_data,
+			   u32 block_type, u32 disp_num);
+static int pp_pcc_get_version(u32 *version);
+static int pp_igc_get_version(u32 *version);
+static int pp_pgc_get_version(u32 *version);
+static int pp_pa_get_version(u32 *version);
+static int pp_gamut_get_version(u32 *version);
+static int pp_dither_get_version(u32 *version);
+static int pp_hist_lut_get_version(u32 *version);
+static void pp_gamut_clock_gating_en(char __iomem *base_addr);
+
+void *pp_get_driver_ops_v1_7(struct mdp_pp_driver_ops *ops)
+{
+	if (!ops) {
+		pr_err("PP driver ops invalid %pK\n", ops);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* IGC ops */
+	ops->pp_ops[IGC].pp_set_config = pp_igc_set_config;
+	ops->pp_ops[IGC].pp_get_config = pp_igc_get_config;
+	ops->pp_ops[IGC].pp_get_version = pp_igc_get_version;
+
+	/* PCC ops */
+	ops->pp_ops[PCC].pp_set_config = pp_pcc_set_config;
+	ops->pp_ops[PCC].pp_get_config = pp_pcc_get_config;
+	ops->pp_ops[PCC].pp_get_version = pp_pcc_get_version;
+	/* GC ops */
+	ops->pp_ops[GC].pp_set_config = pp_pgc_set_config;
+	ops->pp_ops[GC].pp_get_config = pp_pgc_get_config;
+	ops->pp_ops[GC].pp_get_version = pp_pgc_get_version;
+
+	/* PA ops */
+	ops->pp_ops[PA].pp_set_config = pp_pa_set_config;
+	ops->pp_ops[PA].pp_get_config = pp_pa_get_config;
+	ops->pp_ops[PA].pp_get_version = pp_pa_get_version;
+
+	/* Gamut ops */
+	ops->pp_ops[GAMUT].pp_set_config = pp_gamut_set_config;
+	ops->pp_ops[GAMUT].pp_get_config = pp_gamut_get_config;
+	ops->pp_ops[GAMUT].pp_get_version = pp_gamut_get_version;
+
+	/* Dither ops */
+	ops->pp_ops[DITHER].pp_set_config = pp_dither_set_config;
+	ops->pp_ops[DITHER].pp_get_config = pp_dither_get_config;
+	ops->pp_ops[DITHER].pp_get_version = pp_dither_get_version;
+
+	/* QSEED ops */
+	ops->pp_ops[QSEED].pp_set_config = NULL;
+	ops->pp_ops[QSEED].pp_get_config = NULL;
+	ops->pp_ops[QSEED].pp_get_version = NULL;
+
+	/* HIST_LUT ops */
+	ops->pp_ops[HIST_LUT].pp_set_config = pp_hist_lut_set_config;
+	ops->pp_ops[HIST_LUT].pp_get_config = pp_hist_lut_get_config;
+	ops->pp_ops[HIST_LUT].pp_get_version = pp_hist_lut_get_version;
+
+	/* HIST ops */
+	ops->pp_ops[HIST].pp_set_config = NULL;
+	ops->pp_ops[HIST].pp_get_config = pp_hist_get_config;
+	ops->pp_ops[HIST].pp_get_version = NULL;
+
+	/* Set opmode pointers */
+	ops->pp_opmode_config = pp_opmode_config;
+
+	ops->get_hist_offset = pp_get_hist_offset;
+	ops->get_hist_isr_info = pp_get_hist_isr;
+	ops->is_sspp_hist_supp = pp_is_sspp_hist_supp;
+	ops->gamut_clk_gate_en = pp_gamut_clock_gating_en;
+	return &config_data;
+}
+
+static void pp_opmode_config(int location, struct pp_sts_type *pp_sts,
+		u32 *opmode, int side)
+{
+	if (!pp_sts || !opmode) {
+		pr_err("Invalid pp_sts %pK or opmode %pK\n", pp_sts, opmode);
+		return;
+	}
+	switch (location) {
+	case SSPP_RGB:
+		break;
+	case SSPP_DMA:
+		break;
+	case SSPP_VIG:
+		if (pp_sts->pa_sts & PP_STS_ENABLE)
+			pp_pa_update_vig_opmode(pp_sts->pa_sts, opmode);
+		if (pp_sts->enhist_sts & PP_STS_ENABLE) {
+			*opmode |= HIST_LUT_VIG_OP_ENABLE |
+				  PA_VIG_OP_ENABLE;
+			if (pp_sts->enhist_sts & PP_STS_PA_LUT_FIRST)
+				*opmode |= HIST_LUT_VIG_OP_FIRST_EN;
+		}
+		break;
+	case DSPP:
+		if (pp_sts_is_enabled(pp_sts->pa_sts, side))
+			pp_pa_update_dspp_opmode(pp_sts->pa_sts, opmode);
+		if (pp_sts_is_enabled(pp_sts->igc_sts, side))
+			*opmode |= IGC_DSPP_OP_MODE_EN;
+		if (pp_sts->enhist_sts & PP_STS_ENABLE) {
+			*opmode |= HIST_LUT_DSPP_OP_ENABLE |
+				  PA_DSPP_OP_ENABLE;
+			if (pp_sts->enhist_sts & PP_STS_PA_LUT_FIRST)
+				*opmode |= HIST_LUT_DSPP_OP_FIRST_EN;
+		}
+		if (pp_sts_is_enabled(pp_sts->dither_sts, side))
+			*opmode |= MDSS_MDP_DSPP_OP_DST_DITHER_EN;
+		break;
+	case LM:
+		if (pp_sts->argc_sts & PP_STS_ENABLE)
+			pr_debug("pgc in LM enabled\n");
+		break;
+	default:
+		pr_err("Invalid block type %d\n", location);
+		break;
+	}
+}
+
+static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
+			   u32 block_type, u32 disp_num)
+{
+
+	int ret = 0, i = 0;
+	char __iomem *hist_addr;
+	u32 sz = 0, temp = 0, *data = NULL;
+	struct mdp_hist_lut_data_v1_7 lut_data_v1_7;
+	struct mdp_hist_lut_data_v1_7 *lut_data = &lut_data_v1_7;
+	struct mdp_hist_lut_data *lut_cfg_data = NULL;
+
+	if (!base_addr || !cfg_data) {
+		pr_err("invalid params base_addr %pK cfg_data %pK\n",
+		       base_addr, cfg_data);
+		return -EINVAL;
+	}
+
+	lut_cfg_data = (struct mdp_hist_lut_data *) cfg_data;
+	if (!(lut_cfg_data->ops & MDP_PP_OPS_READ)) {
+		pr_err("read ops not set for hist_lut %d\n", lut_cfg_data->ops);
+		return 0;
+	}
+	if (lut_cfg_data->version != mdp_hist_lut_v1_7 ||
+		!lut_cfg_data->cfg_payload) {
+		pr_err("invalid hist_lut version %d payload %pK\n",
+		       lut_cfg_data->version, lut_cfg_data->cfg_payload);
+		return -EINVAL;
+	}
+	if (copy_from_user(lut_data, (void __user *) lut_cfg_data->cfg_payload,
+			sizeof(*lut_data))) {
+		pr_err("copy from user failed for lut_data\n");
+		return -EFAULT;
+	}
+	if (lut_data->len != ENHIST_LUT_ENTRIES) {
+		pr_err("invalid hist_lut len %d", lut_data->len);
+		return -EINVAL;
+	}
+	sz = ENHIST_LUT_ENTRIES * sizeof(u32);
+	if (!access_ok(VERIFY_WRITE, lut_data->data, sz)) {
+		pr_err("invalid lut address for hist_lut sz %d\n", sz);
+		return -EFAULT;
+	}
+
+	switch (block_type) {
+	case SSPP_VIG:
+		hist_addr = base_addr + REG_SSPP_VIG_HIST_LUT_BASE;
+		break;
+	case DSPP:
+		hist_addr = base_addr + REG_DSPP_HIST_LUT_BASE;
+		break;
+	default:
+		pr_err("Invalid block type %d\n", block_type);
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret) {
+		pr_err("Failed to read hist_lut table ret %d", ret);
+		return ret;
+	}
+
+	data = kzalloc(sz, GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	for (i = 0; i < ENHIST_LUT_ENTRIES; i += 2) {
+		temp = readl_relaxed(hist_addr);
+		data[i] = temp & ENHIST_LOWER_VALUE_MASK;
+		data[i + 1] =
+			(temp & ENHIST_UPPER_VALUE_MASK) >> ENHIST_BIT_SHIFT;
+		hist_addr += 4;
+	}
+	if (copy_to_user(lut_data->data, data, sz)) {
+		pr_err("failed to copy the hist_lut back to user\n");
+		ret = -EFAULT;
+	}
+	kfree(data);
+	return ret;
+}
+
+static int pp_hist_lut_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type)
+{
+	int ret = 0, i = 0;
+	u32 temp = 0;
+	struct mdp_hist_lut_data *lut_cfg_data = NULL;
+	struct mdp_hist_lut_data_v1_7 *lut_data = NULL;
+	char __iomem *hist_addr = NULL, *swap_addr = NULL;
+
+	if (!base_addr || !cfg_data || !pp_sts) {
+		pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+		      base_addr, cfg_data, pp_sts);
+		return -EINVAL;
+	}
+
+	lut_cfg_data = (struct mdp_hist_lut_data *) cfg_data;
+	if (lut_cfg_data->version != mdp_hist_lut_v1_7) {
+		pr_err("invalid hist_lut version %d\n", lut_cfg_data->version);
+		return -EINVAL;
+	}
+
+	if (lut_cfg_data->ops & MDP_PP_OPS_DISABLE) {
+		pr_debug("Disable Hist LUT\n");
+		goto bail_out;
+	}
+
+	if (!(lut_cfg_data->ops & ~(MDP_PP_OPS_READ))) {
+		pr_err("only read ops set for lut\n");
+		return ret;
+	}
+	if (!(lut_cfg_data->ops & MDP_PP_OPS_WRITE)) {
+		pr_debug("non write ops set %d\n", lut_cfg_data->ops);
+		goto bail_out;
+	}
+	lut_data = lut_cfg_data->cfg_payload;
+	if (!lut_data) {
+		pr_err("invalid hist_lut cfg_payload %pK\n", lut_data);
+		return -EINVAL;
+	}
+
+	if (lut_data->len != ENHIST_LUT_ENTRIES || !lut_data->data) {
+		pr_err("invalid hist_lut len %d data %pK\n",
+		       lut_data->len, lut_data->data);
+		return -EINVAL;
+	}
+	switch (block_type) {
+	case SSPP_VIG:
+		hist_addr = base_addr + REG_SSPP_VIG_HIST_LUT_BASE;
+		swap_addr = base_addr +
+			REG_SSPP_VIG_HIST_SWAP_BASE;
+		break;
+	case DSPP:
+		hist_addr = base_addr + REG_DSPP_HIST_LUT_BASE;
+		swap_addr = base_addr + REG_DSPP_HIST_SWAP_BASE;
+		break;
+	default:
+		pr_err("Invalid block type %d\n", block_type);
+		ret = -EINVAL;
+		break;
+	}
+	if (ret) {
+		pr_err("hist_lut table not updated ret %d", ret);
+		return ret;
+	}
+	for (i = 0; i < ENHIST_LUT_ENTRIES; i += 2) {
+		temp = (lut_data->data[i] & ENHIST_LOWER_VALUE_MASK) |
+			((lut_data->data[i + 1] & ENHIST_LOWER_VALUE_MASK)
+			 << ENHIST_BIT_SHIFT);
+
+		writel_relaxed(temp, hist_addr);
+		hist_addr += 4;
+	}
+	if (lut_cfg_data->hist_lut_first)
+		pp_sts->enhist_sts |= PP_STS_PA_LUT_FIRST;
+	else
+		pp_sts->enhist_sts &= ~PP_STS_PA_LUT_FIRST;
+
+	writel_relaxed(1, swap_addr);
+
+bail_out:
+	if (lut_cfg_data->ops & MDP_PP_OPS_DISABLE)
+		pp_sts->enhist_sts &= ~PP_STS_ENABLE;
+	else if (lut_cfg_data->ops & MDP_PP_OPS_ENABLE)
+		pp_sts->enhist_sts |= PP_STS_ENABLE;
+
+	return ret;
+}
+
+static int pp_dither_get_config(char __iomem *base_addr, void *cfg_data,
+			   u32 block_type, u32 disp_num)
+{
+	pr_err("Operation not supported\n");
+	return -ENOTSUPP;
+}
+
+static int pp_dither_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type)
+{
+	int i = 0;
+	u32 data;
+	struct mdp_dither_cfg_data *dither_cfg_data = NULL;
+	struct mdp_dither_data_v1_7 *dither_data = NULL;
+	uint32_t *pdata = NULL;
+
+	if (!base_addr || !cfg_data || !pp_sts) {
+		pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+		      base_addr, cfg_data, pp_sts);
+		return -EINVAL;
+	}
+	if (block_type != DSPP)
+		return -ENOTSUPP;
+	dither_cfg_data = (struct mdp_dither_cfg_data *) cfg_data;
+
+	if (dither_cfg_data->version != mdp_dither_v1_7) {
+		pr_err("invalid dither version %d\n", dither_cfg_data->version);
+		return -EINVAL;
+	}
+	if (dither_cfg_data->flags & MDP_PP_OPS_READ) {
+		pr_err("Invalid context for read operation\n");
+		return -EINVAL;
+	}
+	if (dither_cfg_data->flags & MDP_PP_OPS_DISABLE) {
+		pr_debug("set disable dither\n");
+		goto bail_out;
+	}
+
+	if (!(dither_cfg_data->flags & MDP_PP_OPS_WRITE)) {
+		pr_debug("non write ops set %d\n", dither_cfg_data->flags);
+		goto bail_out;
+	}
+
+	dither_data = dither_cfg_data->cfg_payload;
+	if (!dither_data) {
+		pr_err("invalid payload for dither %pK\n", dither_data);
+		return -EINVAL;
+	}
+
+	if ((dither_data->g_y_depth >= DITHER_DEPTH_MAP_INDEX) ||
+		(dither_data->b_cb_depth >= DITHER_DEPTH_MAP_INDEX) ||
+		(dither_data->r_cr_depth >= DITHER_DEPTH_MAP_INDEX) ||
+		(dither_data->len > DITHER_MATRIX_INDEX)) {
+		pr_err("invalid data for dither, g_y_depth %d y_cb_depth %d r_cr_depth %d\n len %d",
+			dither_data->g_y_depth, dither_data->b_cb_depth,
+			dither_data->r_cr_depth, dither_data->len);
+		return -EINVAL;
+	}
+	if (!dither_data->len)
+		pdata = dither_matrix;
+	else
+		pdata = dither_data->data;
+
+	data = dither_depth_map[dither_data->g_y_depth];
+	data |= dither_depth_map[dither_data->b_cb_depth] << 2;
+	data |= dither_depth_map[dither_data->r_cr_depth] << 4;
+	data |= (dither_data->temporal_en) ? (1  << 8) : 0;
+	writel_relaxed(data, base_addr);
+	base_addr += DITHER_MATRIX_OFF;
+	for (i = 0; i < DITHER_MATRIX_INDEX; i += 4) {
+		data = pdata[i] | (pdata[i + 1] << 4) |
+		       (pdata[i + 2] << 8) | (pdata[i + 3] << 12);
+		writel_relaxed(data, base_addr);
+		base_addr += 4;
+	}
+bail_out:
+	if (dither_cfg_data->flags & MDP_PP_OPS_DISABLE)
+		pp_sts->dither_sts &= ~PP_STS_ENABLE;
+	else if (dither_cfg_data->flags & MDP_PP_OPS_ENABLE)
+		pp_sts->dither_sts |= PP_STS_ENABLE;
+	pp_sts_set_split_bits(&pp_sts->dither_sts, dither_cfg_data->flags);
+
+	return 0;
+}
+
+static int pp_hist_get_config(char __iomem *base_addr, void *cfg_data,
+			   u32 block_type, u32 disp_num)
+{
+	int ret = 0, i = 0;
+	u32 sum = 0;
+	struct pp_hist_col_info *hist_info = NULL;
+
+	if (!base_addr || !cfg_data) {
+		pr_err("invalid params base_addr %pK cfg_data %pK\n",
+		       base_addr, cfg_data);
+		return -EINVAL;
+	}
+
+	hist_info = (struct pp_hist_col_info *) cfg_data;
+
+	switch (block_type) {
+	case SSPP_VIG:
+		base_addr += HIST_DATA_OFF_SSPP_V1_7;
+		break;
+	case DSPP:
+		base_addr += HIST_DATA_OFF_DSPP_V1_7;
+		break;
+	default:
+		pr_err("Invalid block type %d\n", block_type);
+		ret = -EINVAL;
+		break;
+	}
+	if (ret) {
+		pr_err("Failed to read hist data ret %d\n", ret);
+		return ret;
+	}
+
+	for (i = 0; i < HIST_V_SIZE; i++) {
+		hist_info->data[i] = readl_relaxed(base_addr) & HIST_DATA_MASK;
+		base_addr += 0x4;
+		sum += hist_info->data[i];
+	}
+	hist_info->hist_cnt_read++;
+	return sum;
+}
+
+static int pp_get_hist_offset(u32 block, u32 *ctl_off)
+{
+	int ret = 0;
+
+	if (!ctl_off) {
+		pr_err("invalid params ctl_off %pK\n", ctl_off);
+		return -EINVAL;
+	}
+	switch (block) {
+	case SSPP_VIG:
+		*ctl_off = HIST_CTL_OFF_SSPP_V1_7;
+		break;
+	case DSPP:
+		*ctl_off = HIST_CTL_OFF_DSPP_V1_7;
+		break;
+	default:
+		pr_err("Invalid block type %d\n", block);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static int pp_get_hist_isr(u32 *isr_mask)
+{
+	if (!isr_mask) {
+		pr_err("invalid params isr_mask %pK\n", isr_mask);
+		return -EINVAL;
+	}
+
+	*isr_mask = HIST_V3_INTR_BIT_MASK;
+	return 0;
+}
+
+static bool pp_is_sspp_hist_supp(void)
+{
+	return false;
+}
+
+static int pp_gamut_get_config(char __iomem *base_addr, void *cfg_data,
+			   u32 block_type, u32 disp_num)
+{
+	u32 val = 0, sz = 0, sz_scale = 0, mode = 0, tbl_sz = 0;
+	u32 index_start = 0;
+	int i = 0, j = 0, ret = 0;
+	u32 *gamut_tbl = NULL, *gamut_c0 = NULL, *gamut_c1c2 = NULL;
+	struct mdp_gamut_cfg_data *gamut_cfg = (struct mdp_gamut_cfg_data *)
+						cfg_data;
+	struct mdp_gamut_data_v1_7 gamut_data;
+	u32 clk_gate_disable = 0;
+
+	if (!base_addr || !cfg_data) {
+		pr_err("invalid params base_addr %pK cfg_data %pK\n",
+		       base_addr, cfg_data);
+		return -EINVAL;
+	}
+	if (gamut_cfg->version != mdp_gamut_v1_7) {
+		pr_err("unsupported version of gamut %d\n",
+		       gamut_cfg->version);
+		return -EINVAL;
+	}
+	if (copy_from_user(&gamut_data, gamut_cfg->cfg_payload,
+			   sizeof(gamut_data))) {
+		pr_err("copy from user failed for gamut_data\n");
+		return -EFAULT;
+	}
+	mode = readl_relaxed(base_addr + GAMUT_OP_MODE_OFF);
+	if (mode & GAMUT_COARSE_EN) {
+		tbl_sz = MDP_GAMUT_TABLE_V1_7_COARSE_SZ;
+		sz = tbl_sz * sizeof(u32);
+		index_start = GAMUT_COARSE_INDEX;
+		gamut_data.mode = mdp_gamut_coarse_mode;
+	} else {
+		tbl_sz = MDP_GAMUT_TABLE_V1_7_SZ;
+		sz = tbl_sz * sizeof(u32);
+		index_start = GAMUT_FINE_INDEX;
+		gamut_data.mode = mdp_gamut_fine_mode;
+	}
+	gamut_data.map_en = mode & GAMUT_MAP_EN;
+	sz_scale = MDP_GAMUT_SCALE_OFF_SZ * sizeof(u32);
+	for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
+		if (!access_ok(VERIFY_WRITE, gamut_data.c0_data[i], sz)) {
+			pr_err("invalid c0 address for sz %d table index %d\n",
+				sz, (i+1));
+			return -EFAULT;
+		}
+		if (!access_ok(VERIFY_WRITE, gamut_data.c1_c2_data[i], sz)) {
+			pr_err("invalid c1c2 address for sz %d table index %d\n",
+				sz, (i+1));
+			return -EFAULT;
+		}
+		gamut_data.tbl_size[i] = tbl_sz;
+		if (i < MDP_GAMUT_SCALE_OFF_TABLE_NUM) {
+			if (!access_ok(VERIFY_WRITE,
+			     gamut_data.scale_off_data[i], sz_scale)) {
+				pr_err("invalid scale address for sz %d color c%d\n",
+					sz_scale, i);
+				return -EFAULT;
+			}
+			gamut_data.tbl_scale_off_sz[i] =
+			MDP_GAMUT_SCALE_OFF_SZ;
+		}
+	}
+	/* allocate for c0 and c1c2 tables */
+	gamut_tbl = kzalloc((sz * 2), GFP_KERNEL);
+	if (!gamut_tbl)
+		return -ENOMEM;
+
+	gamut_c0 = gamut_tbl;
+	gamut_c1c2 = gamut_c0 + tbl_sz;
+	writel_relaxed(GAMUT_CLK_GATING_INACTIVE, base_addr + GAMUT_CLK_CTRL);
+	clk_gate_disable = 1;
+	for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
+		val = index_start;
+		val |= GAMUT_READ_TABLE_EN;
+		val |= GAMUT_TABLE_SELECT(i);
+		writel_relaxed(val, (base_addr + GAMUT_TABLE_INDEX));
+		for (j = 0; j < tbl_sz; j++) {
+			gamut_c1c2[j] = readl_relaxed(base_addr +
+					GAMUT_TABLE_LOWER_GB);
+			gamut_c0[j] = readl_relaxed(base_addr +
+				      GAMUT_TABLE_UPPER_R);
+		}
+		if (copy_to_user(gamut_data.c0_data[i], gamut_c0, sz)) {
+			pr_err("copy to user failed for table %d c0 sz %d\n",
+			       i, sz);
+			ret = -EFAULT;
+			goto bail_out;
+		}
+		if (copy_to_user(gamut_data.c1_c2_data[i], gamut_c1c2, sz)) {
+			pr_err("copy to user failed for table %d c1c2 sz %d\n",
+			       i, sz);
+			ret = -EFAULT;
+			goto bail_out;
+		}
+	}
+	sz_scale = MDP_GAMUT_SCALE_OFF_TABLE_NUM * MDP_GAMUT_SCALE_OFF_SZ
+		   * sizeof(u32);
+	if (sz < sz_scale) {
+		kfree(gamut_tbl);
+		gamut_tbl = kzalloc(sz_scale, GFP_KERNEL);
+		if (!gamut_tbl) {
+			pr_err("failed to alloc scale tbl size %d\n",
+			       sz_scale);
+			ret = -ENOMEM;
+			goto bail_out;
+		}
+	}
+	gamut_c0 = gamut_tbl;
+	base_addr += GAMUT_C0_SCALE_OFF;
+	for (i = 0;
+	     i < (MDP_GAMUT_SCALE_OFF_TABLE_NUM * MDP_GAMUT_SCALE_OFF_SZ);
+	     i++) {
+		gamut_c0[i] = readl_relaxed(base_addr);
+		base_addr += 4;
+	}
+	for (i = 0; i < MDP_GAMUT_SCALE_OFF_TABLE_NUM; i++) {
+		if (copy_to_user(gamut_data.scale_off_data[i],
+				 &gamut_c0[i * MDP_GAMUT_SCALE_OFF_SZ],
+				 (MDP_GAMUT_SCALE_OFF_SZ * sizeof(u32)))) {
+			pr_err("copy to user failed for scale color c%d\n",
+			       i);
+			ret = -EFAULT;
+			goto bail_out;
+		}
+	}
+	if (copy_to_user(gamut_cfg->cfg_payload, &gamut_data,
+			 sizeof(gamut_data))) {
+		pr_err("failed to copy the gamut info into payload\n");
+		ret = -EFAULT;
+	}
+bail_out:
+	if (clk_gate_disable)
+		writel_relaxed(GAMUT_CLK_GATING_PARTIAL_ACTIVE,
+					   base_addr + GAMUT_CLK_CTRL);
+	kfree(gamut_tbl);
+	return ret;
+}
+
+static int pp_gamut_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type)
+{
+	int val = 0, ret = 0, i = 0, j = 0;
+	u32 index_start = 0, tbl_sz = 0;
+	struct mdp_gamut_cfg_data *gamut_cfg_data = NULL;
+	struct mdp_gamut_data_v1_7 *gamut_data = NULL;
+	char __iomem *base_addr_scale = base_addr;
+	uint64_t gamut_val;
+
+	if (!base_addr || !cfg_data || !pp_sts) {
+		pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+		      base_addr, cfg_data, pp_sts);
+		return -EINVAL;
+	}
+	gamut_cfg_data = (struct mdp_gamut_cfg_data *) cfg_data;
+	if (gamut_cfg_data->version != mdp_gamut_v1_7) {
+		pr_err("invalid gamut version %d\n", gamut_cfg_data->version);
+		return -EINVAL;
+	}
+	if (!(gamut_cfg_data->flags & ~(MDP_PP_OPS_READ))) {
+		pr_debug("only read ops is set %d", gamut_cfg_data->flags);
+		return 0;
+	}
+
+	if (gamut_cfg_data->flags & MDP_PP_OPS_DISABLE) {
+		pr_debug("disabling gamut\n");
+		goto bail_out;
+	}
+
+	gamut_data = (struct mdp_gamut_data_v1_7 *)
+		      gamut_cfg_data->cfg_payload;
+	if (!gamut_data) {
+		pr_err("invalid payload for gamut %pK\n", gamut_data);
+		return -EINVAL;
+	}
+
+	if (gamut_data->mode != mdp_gamut_fine_mode &&
+	    gamut_data->mode != mdp_gamut_coarse_mode) {
+		pr_err("invalid gamut mode %d", gamut_data->mode);
+		return -EINVAL;
+	}
+	index_start = (gamut_data->mode == mdp_gamut_fine_mode) ?
+		       GAMUT_FINE_INDEX : GAMUT_COARSE_INDEX;
+	tbl_sz = (gamut_data->mode == mdp_gamut_fine_mode) ?
+		  MDP_GAMUT_TABLE_V1_7_SZ : MDP_GAMUT_TABLE_V1_7_COARSE_SZ;
+	if (!(gamut_cfg_data->flags & MDP_PP_OPS_WRITE))
+		goto bail_out;
+	/* Sanity check for all tables */
+	for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
+		if (!gamut_data->c0_data[i] || !gamut_data->c1_c2_data[i]
+		    || (gamut_data->tbl_size[i] != tbl_sz)) {
+			pr_err("invalid param for c0 %pK c1c2 %pK table %d size %d expected sz %d\n",
+			       gamut_data->c0_data[i],
+			       gamut_data->c1_c2_data[i], i,
+			       gamut_data->tbl_size[i], tbl_sz);
+			ret = -EINVAL;
+			goto bail_out;
+		}
+		if (i < MDP_GAMUT_SCALE_OFF_TABLE_NUM &&
+		    (!gamut_data->scale_off_data[i] ||
+		    (gamut_data->tbl_scale_off_sz[i] !=
+		    MDP_GAMUT_SCALE_OFF_SZ))) {
+			pr_err("invalid param for scale table %pK for c%d size %d expected size%d\n",
+				gamut_data->scale_off_data[i], i,
+				gamut_data->tbl_scale_off_sz[i],
+				MDP_GAMUT_SCALE_OFF_SZ);
+			ret = -EINVAL;
+			goto bail_out;
+		}
+	}
+	base_addr_scale += GAMUT_C0_SCALE_OFF;
+	writel_relaxed(GAMUT_CLK_GATING_INACTIVE, base_addr + GAMUT_CLK_CTRL);
+	for (i = 0; i < MDP_GAMUT_TABLE_NUM_V1_7; i++) {
+		val = index_start;
+		val |= GAMUT_TABLE_SELECT(i);
+		writel_relaxed(val, (base_addr + GAMUT_TABLE_INDEX));
+
+		writel_relaxed(gamut_data->c1_c2_data[i][0],
+				base_addr + GAMUT_TABLE_LOWER_GB);
+		for (j = 0; j < gamut_data->tbl_size[i] - 1; j++) {
+			gamut_val = gamut_data->c1_c2_data[i][j + 1];
+			gamut_val = (gamut_val << 32) |
+					gamut_data->c0_data[i][j];
+			writeq_relaxed(gamut_val,
+					base_addr + GAMUT_TABLE_UPPER_R);
+		}
+		writel_relaxed(gamut_data->c0_data[i][j],
+				base_addr + GAMUT_TABLE_UPPER_R);
+		if ((i >= MDP_GAMUT_SCALE_OFF_TABLE_NUM) ||
+				(!gamut_data->map_en))
+			continue;
+		for (j = 0; j < MDP_GAMUT_SCALE_OFF_SZ; j++) {
+			writel_relaxed((gamut_data->scale_off_data[i][j]),
+				       base_addr_scale);
+			base_addr_scale += 4;
+		}
+	}
+	writel_relaxed(GAMUT_CLK_GATING_PARTIAL_ACTIVE,
+				   base_addr + GAMUT_CLK_CTRL);
+bail_out:
+	if (!ret) {
+		val = 0;
+		pp_sts_set_split_bits(&pp_sts->gamut_sts,
+				gamut_cfg_data->flags);
+		if (gamut_cfg_data->flags & MDP_PP_OPS_DISABLE) {
+			pp_sts->gamut_sts &= ~PP_STS_ENABLE;
+			writel_relaxed(val, base_addr + GAMUT_OP_MODE_OFF);
+		} else if (gamut_cfg_data->flags & MDP_PP_OPS_ENABLE) {
+			pp_sts->gamut_sts |= PP_STS_ENABLE;
+			if (pp_sts_is_enabled(pp_sts->gamut_sts,
+					      pp_sts->side_sts)) {
+				if (gamut_data->mode == mdp_gamut_coarse_mode)
+					val |= GAMUT_COARSE_EN;
+				if (gamut_data->map_en)
+					val |= GAMUT_MAP_EN;
+				val |= GAMUT_ENABLE;
+			}
+			writel_relaxed(val, base_addr + GAMUT_OP_MODE_OFF);
+		}
+	}
+	return ret;
+}
+
+static int pp_pcc_set_config(char __iomem *base_addr,
+			struct pp_sts_type *pp_sts, void *cfg_data,
+			u32 block_type)
+{
+	struct mdp_pcc_cfg_data *pcc_cfg_data = NULL;
+	struct mdp_pcc_data_v1_7 *pcc_data = NULL;
+	char __iomem *addr = NULL;
+	u32 opmode = 0;
+
+	if (!base_addr || !cfg_data || !pp_sts) {
+		pr_err("invalid params base_addr %pK cfg_data %pK pp_sts %pK\n",
+			base_addr, cfg_data, pp_sts);
+		return -EINVAL;
+	}
+	pcc_cfg_data = (struct mdp_pcc_cfg_data *) cfg_data;
+	if (pcc_cfg_data->version != mdp_pcc_v1_7) {
+		pr_err("invalid pcc version %d\n", pcc_cfg_data->version);
+		return -EINVAL;
+	}
+	if (!(pcc_cfg_data->ops & ~(MDP_PP_OPS_READ))) {
+		pr_info("only read ops is set %d", pcc_cfg_data->ops);
+		return 0;
+	}
+	pcc_data = pcc_cfg_data->cfg_payload;
+	if (!pcc_data) {
+		pr_err("invalid payload for pcc %pK\n", pcc_data);
+		return -EINVAL;
+	}
+
+	if (!(pcc_cfg_data->ops & MDP_PP_OPS_WRITE))
+		goto bail_out;
+
+	addr = base_addr + PCC_CONST_COEFF_OFF;
+	writel_relaxed(pcc_data->r.c & PCC_CONST_COEFF_MASK, addr);
+	writel_relaxed(pcc_data->g.c & PCC_CONST_COEFF_MASK, addr + 4);
+	writel_relaxed(pcc_data->b.c & PCC_CONST_COEFF_MASK, addr + 8);
+
+	addr = base_addr + PCC_R_COEFF_OFF;
+	writel_relaxed(pcc_data->r.r & PCC_COEFF_MASK, addr);
+	writel_relaxed(pcc_data->g.r & PCC_COEFF_MASK, addr + 4);
+	writel_relaxed(pcc_data->b.r & PCC_COEFF_MASK, addr + 8);
+
+	addr = base_addr + PCC_G_COEFF_OFF;
+	writel_relaxed(pcc_data->r.g & PCC_COEFF_MASK, addr);
+	writel_relaxed(pcc_data->g.g & PCC_COEFF_MASK, addr + 4);
+	writel_relaxed(pcc_data->b.g & PCC_COEFF_MASK, addr + 8);
+
+	addr = base_addr + PCC_B_COEFF_OFF;
+	writel_relaxed(pcc_data->r.b & PCC_COEFF_MASK, addr);
+	writel_relaxed(pcc_data->g.b & PCC_COEFF_MASK, addr + 4);
+	writel_relaxed(pcc_data->b.b & PCC_COEFF_MASK, addr + 8);
+
+	addr = base_addr + PCC_RG_COEFF_OFF;
+	writel_relaxed(pcc_data->r.rg & PCC_COEFF_MASK, addr);
+	writel_relaxed(pcc_data->g.rg & PCC_COEFF_MASK, addr + 4);
+	writel_relaxed(pcc_data->b.rg & PCC_COEFF_MASK, addr + 8);
+
+	addr = base_addr + PCC_RB_COEFF_OFF;
+	writel_relaxed(pcc_data->r.rb & PCC_COEFF_MASK, addr);
+	writel_relaxed(pcc_data->g.rb & PCC_COEFF_MASK, addr + 4);
+	writel_relaxed(pcc_data->b.rb & PCC_COEFF_MASK, addr + 8);
+
+	addr = base_addr + PCC_GB_COEFF_OFF;
+	writel_relaxed(pcc_data->r.gb & PCC_COEFF_MASK, addr);
+	writel_relaxed(pcc_data->g.gb & PCC_COEFF_MASK, addr + 4);
+	writel_relaxed(pcc_data->b.gb & PCC_COEFF_MASK, addr + 8);
+
+	addr = base_addr + PCC_RGB_COEFF_OFF;
+	writel_relaxed(pcc_data->r.rgb & PCC_COEFF_MASK, addr);
+	writel_relaxed(pcc_data->g.rgb & PCC_COEFF_MASK, addr + 4);
+	writel_relaxed(pcc_data->b.rgb & PCC_COEFF_MASK, addr + 8);
+
+bail_out:
+	pp_sts_set_split_bits(&pp_sts->pcc_sts, pcc_cfg_data->ops);
+	if (pcc_cfg_data->ops & MDP_PP_OPS_DISABLE) {
+		writel_relaxed(opmode, base_addr + PCC_OP_MODE_OFF);
+		pp_sts->pcc_sts &= ~PP_STS_ENABLE;
+	} else if (pcc_cfg_data->ops & MDP_PP_OPS_ENABLE) {
+		pp_sts->pcc_sts |= PP_STS_ENABLE;
+		if (pp_sts_is_enabled(pp_sts->pcc_sts, pp_sts->side_sts))
+			opmode |= PCC_ENABLE;
+		writel_relaxed(opmode, base_addr + PCC_OP_MODE_OFF);
+	}
+
+	return 0;
+}
+
+static int pp_pcc_get_config(char __iomem *base_addr, void *cfg_data,
+				u32 block_type, u32 disp_num)
+{
+	char __iomem *addr;
+	struct mdp_pcc_cfg_data *pcc_cfg = NULL;
+	struct mdp_pcc_data_v1_7 pcc_data;
+
+	if (!base_addr || !cfg_data) {
+		pr_err("invalid params base_addr %pK cfg_data %pK\n",
+		       base_addr, cfg_data);
+		return -EINVAL;
+	}
+
+	pcc_cfg = (struct mdp_pcc_cfg_data *) cfg_data;
+	if (pcc_cfg->version != mdp_pcc_v1_7) {
+		pr_err("unsupported version of pcc %d\n",
+		       pcc_cfg->version);
+		return -EINVAL;
+	}
+
+	addr = base_addr + PCC_CONST_COEFF_OFF;
+	pcc_data.r.c = readl_relaxed(addr) & PCC_CONST_COEFF_MASK;
+	pcc_data.g.c = readl_relaxed(addr + 4) & PCC_CONST_COEFF_MASK;
+	pcc_data.b.c = readl_relaxed(addr + 8) & PCC_CONST_COEFF_MASK;
+
+	addr = base_addr + PCC_R_COEFF_OFF;
+	pcc_data.r.r = readl_relaxed(addr) & PCC_COEFF_MASK;
+	pcc_data.g.r = readl_relaxed(addr + 4) & PCC_COEFF_MASK;
+	pcc_data.b.r = readl_relaxed(addr + 8) & PCC_COEFF_MASK;
+
+	addr = base_addr + PCC_G_COEFF_OFF;
+	pcc_data.r.g = readl_relaxed(addr) & PCC_COEFF_MASK;
+	pcc_data.g.g = readl_relaxed(addr + 4) & PCC_COEFF_MASK;
+	pcc_data.b.g = readl_relaxed(addr + 8) & PCC_COEFF_MASK;
+
+	addr = base_addr + PCC_B_COEFF_OFF;
+	pcc_data.r.b = readl_relaxed(addr) & PCC_COEFF_MASK;
+	pcc_data.g.b = readl_relaxed(addr + 4) & PCC_COEFF_MASK;
+	pcc_data.b.b = readl_relaxed(addr + 8) & PCC_COEFF_MASK;
+
+	addr = base_addr + PCC_RG_COEFF_OFF;
+	pcc_data.r.rg = readl_relaxed(addr) & PCC_COEFF_MASK;
+	pcc_data.g.rg = readl_relaxed(addr + 4) & PCC_COEFF_MASK;
+	pcc_data.b.rg = readl_relaxed(addr + 8) & PCC_COEFF_MASK;
+
+	addr = base_addr + PCC_RB_COEFF_OFF;
+	pcc_data.r.rb = readl_relaxed(addr) & PCC_COEFF_MASK;
+	pcc_data.g.rb = readl_relaxed(addr + 4) & PCC_COEFF_MASK;
+	pcc_data.b.rb = readl_relaxed(addr + 8) & PCC_COEFF_MASK;
+
+	addr = base_addr + PCC_GB_COEFF_OFF;
+	pcc_data.r.gb = readl_relaxed(addr) & PCC_COEFF_MASK;
+	pcc_data.g.gb = readl_relaxed(addr + 4) & PCC_COEFF_MASK;
+	pcc_data.b.gb = readl_relaxed(addr + 8) & PCC_COEFF_MASK;
+
+	addr = base_addr + PCC_RGB_COEFF_OFF;
+	pcc_data.r.rgb = readl_relaxed(addr) & PCC_COEFF_MASK;
+	pcc_data.g.rgb = readl_relaxed(addr + 4) & PCC_COEFF_MASK;
+	pcc_data.b.rgb = readl_relaxed(addr + 8) & PCC_COEFF_MASK;
+
+	if (copy_to_user(pcc_cfg->cfg_payload, &pcc_data,
+			 sizeof(pcc_data))) {
+		pr_err("failed to copy the pcc info into payload\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static void pp_pa_set_global_adj_regs(char __iomem *base_addr,
+				struct mdp_pa_data_v1_7 *pa_data, u32 flags,
+				int block_type)
+{
+	char __iomem *addr = NULL;
+
+	if (block_type == DSPP)
+		addr = base_addr + PA_DSPP_GLOBAL_OFF;
+	else
+		addr = base_addr + PA_VIG_GLOBAL_OFF;
+
+	if (flags & MDP_PP_PA_HUE_ENABLE)
+		writel_relaxed((pa_data->global_hue_adj &
+				PA_GLOBAL_HUE_MASK), addr);
+	addr += 4;
+	if (flags & MDP_PP_PA_SAT_ENABLE)
+		writel_relaxed((pa_data->global_sat_adj &
+				PA_GLOBAL_SAT_MASK), addr);
+	addr += 4;
+	if (flags & MDP_PP_PA_VAL_ENABLE)
+		writel_relaxed((pa_data->global_val_adj &
+				PA_GLOBAL_VAL_MASK), addr);
+	addr += 4;
+	if (flags & MDP_PP_PA_CONT_ENABLE)
+		writel_relaxed((pa_data->global_cont_adj &
+				PA_GLOBAL_CONT_MASK), addr);
+}
+
+static void pp_pa_set_mem_col_regs(char __iomem *mem_col_p0_addr,
+				char __iomem *mem_col_p2_addr,
+				struct mdp_pa_mem_col_data_v1_7 *mem_col_data)
+{
+	writel_relaxed((mem_col_data->color_adjust_p0 &
+			PA_MEM_COL_ADJ_P0_MASK), mem_col_p0_addr);
+	mem_col_p0_addr += 4;
+	writel_relaxed(mem_col_data->color_adjust_p1, mem_col_p0_addr);
+	mem_col_p0_addr += 4;
+	writel_relaxed((mem_col_data->hue_region &
+			PA_MEM_COL_HUE_REGION_MASK), mem_col_p0_addr);
+	mem_col_p0_addr += 4;
+	writel_relaxed((mem_col_data->sat_region &
+			PA_MEM_COL_SAT_REGION_MASK), mem_col_p0_addr);
+	mem_col_p0_addr += 4;
+	writel_relaxed((mem_col_data->val_region &
+			PA_MEM_COL_VAL_REGION_MASK), mem_col_p0_addr);
+
+	writel_relaxed(mem_col_data->color_adjust_p2, mem_col_p2_addr);
+	mem_col_p2_addr += 4;
+	writel_relaxed(mem_col_data->blend_gain, mem_col_p2_addr);
+}
+
+static void pp_pa_set_mem_col(char __iomem *base_addr,
+				struct mdp_pa_data_v1_7 *pa_data, u32 flags,
+				int block_type, uint32_t *pa_hold,
+				uint32_t *pa_hold_mask)
+{
+	uint32_t sat_hold = 0, val_hold = 0, mem_col_hold = 0;
+	u32 skin_p0_off = 0, skin_p2_off = 0;
+	u32 sky_p0_off = 0, sky_p2_off = 0;
+	u32 fol_p0_off = 0, fol_p2_off = 0;
+	char __iomem *mem_col_p0_addr = NULL;
+	char __iomem *mem_col_p2_addr = NULL;
+
+	if (block_type == DSPP) {
+		skin_p0_off = PA_DSPP_MEM_COL_SKIN_P0_OFF;
+		skin_p2_off = PA_DSPP_MEM_COL_SKIN_P2_OFF;
+		sky_p0_off = PA_DSPP_MEM_COL_SKY_P0_OFF;
+		sky_p2_off = PA_DSPP_MEM_COL_SKY_P2_OFF;
+		fol_p0_off = PA_DSPP_MEM_COL_FOL_P0_OFF;
+		fol_p2_off = PA_DSPP_MEM_COL_FOL_P2_OFF;
+	} else {
+		skin_p0_off = PA_VIG_MEM_COL_SKIN_P0_OFF;
+		skin_p2_off = PA_VIG_MEM_COL_SKIN_P2_OFF;
+		sky_p0_off = PA_VIG_MEM_COL_SKY_P0_OFF;
+		sky_p2_off = PA_VIG_MEM_COL_SKY_P2_OFF;
+		fol_p0_off = PA_VIG_MEM_COL_FOL_P0_OFF;
+		fol_p2_off = PA_VIG_MEM_COL_FOL_P2_OFF;
+	}
+	/* Update skin zone memory color registers */
+	if (flags & MDP_PP_PA_SKIN_ENABLE) {
+		mem_col_p0_addr = base_addr + skin_p0_off;
+		mem_col_p2_addr = base_addr + skin_p2_off;
+		pp_pa_set_mem_col_regs(mem_col_p0_addr, mem_col_p2_addr,
+				       &pa_data->skin_cfg);
+		sat_hold = (pa_data->skin_cfg.sat_hold & PA_HOLD_MASK) <<
+			    PA_HOLD_SAT_SHIFT;
+		val_hold = (pa_data->skin_cfg.val_hold & PA_HOLD_MASK) <<
+			    PA_HOLD_VAL_SHIFT;
+		mem_col_hold = (sat_hold | val_hold) << PA_HOLD_SKIN_SHIFT;
+		*pa_hold |= mem_col_hold;
+		*pa_hold_mask |= PA_HOLD_SKIN_MASK;
+	}
+	/* Update sky zone memory color registers */
+	if (flags & MDP_PP_PA_SKY_ENABLE) {
+		mem_col_p0_addr = base_addr + sky_p0_off;
+		mem_col_p2_addr = base_addr + sky_p2_off;
+		pp_pa_set_mem_col_regs(mem_col_p0_addr, mem_col_p2_addr,
+				       &pa_data->sky_cfg);
+		sat_hold = (pa_data->sky_cfg.sat_hold & PA_HOLD_MASK) <<
+			    PA_HOLD_SAT_SHIFT;
+		val_hold = (pa_data->sky_cfg.val_hold & PA_HOLD_MASK) <<
+			    PA_HOLD_VAL_SHIFT;
+		mem_col_hold = (sat_hold | val_hold) << PA_HOLD_SKY_SHIFT;
+		*pa_hold |= mem_col_hold;
+		*pa_hold_mask |= PA_HOLD_SKY_MASK;
+	}
+	/* Update foliage zone memory color registers */
+	if (flags & MDP_PP_PA_FOL_ENABLE) {
+		mem_col_p0_addr = base_addr + fol_p0_off;
+		mem_col_p2_addr = base_addr + fol_p2_off;
+		pp_pa_set_mem_col_regs(mem_col_p0_addr, mem_col_p2_addr,
+				       &pa_data->fol_cfg);
+		sat_hold = (pa_data->fol_cfg.sat_hold & PA_HOLD_MASK) <<
+			    PA_HOLD_SAT_SHIFT;
+		val_hold = (pa_data->fol_cfg.val_hold & PA_HOLD_MASK) <<
+			    PA_HOLD_VAL_SHIFT;
+		mem_col_hold = (sat_hold | val_hold) << PA_HOLD_FOL_SHIFT;
+		*pa_hold |= mem_col_hold;
+		*pa_hold_mask |= PA_HOLD_FOL_MASK;
+	}
+}
+
+static void pp_pa_set_six_zone(char __iomem *base_addr,
+				struct mdp_pa_data_v1_7 *pa_data,
+				u32 flags,
+				uint32_t *pa_hold,
+				uint32_t *pa_hold_mask)
+{
+	u32 data, i;
+	char __iomem *addr = base_addr + PA_SIX_ZONE_LUT_OFF;
+	uint32_t sat_hold = 0, val_hold = 0, mem_col_hold = 0;
+	/* Update six zone memory color registers */
+	if (!(flags & MDP_PP_PA_SIX_ZONE_ENABLE))
+		return;
+
+	if (!pa_data->six_zone_len || !pa_data->six_zone_curve_p0 ||
+	    !pa_data->six_zone_curve_p1) {
+		pr_err("Invalid six zone data: len %d curve_p0 %pK curve_p1 %pK\n",
+		       pa_data->six_zone_len,
+		       pa_data->six_zone_curve_p0,
+		       pa_data->six_zone_curve_p1);
+		return;
+	}
+
+	writel_relaxed((pa_data->six_zone_curve_p1[0] &
+			PA_SIX_ZONE_CURVE_P1_MASK), addr + 4);
+	/* Index Update to trigger auto-incrementing LUT accesses */
+	data = PA_SIX_ZONE_INDEX_UPDATE;
+	writel_relaxed((pa_data->six_zone_curve_p0[0] &
+			PA_SIX_ZONE_CURVE_P0_MASK) | data, addr);
+
+	/* Remove Index Update */
+	for (i = 1; i < MDP_SIX_ZONE_LUT_SIZE; i++) {
+		writel_relaxed((pa_data->six_zone_curve_p1[i] &
+				PA_SIX_ZONE_CURVE_P1_MASK), addr + 4);
+		writel_relaxed((pa_data->six_zone_curve_p0[i] &
+				PA_SIX_ZONE_CURVE_P0_MASK), addr);
+	}
+	addr = base_addr + PA_SIX_ZONE_REGION_OFF;
+	writel_relaxed(pa_data->six_zone_thresh, addr);
+
+	addr = base_addr + PA_SIX_ZONE_ADJ_OFF;
+	writel_relaxed((pa_data->six_zone_adj_p0 &
+			PA_SIX_ZONE_ADJ_P0_MASK), addr);
+	addr += 4;
+	writel_relaxed(pa_data->six_zone_adj_p1, addr);
+
+	sat_hold = (pa_data->six_zone_sat_hold & PA_HOLD_MASK) <<
+		    PA_HOLD_SAT_SHIFT;
+	val_hold = (pa_data->six_zone_val_hold & PA_HOLD_MASK) <<
+		    PA_HOLD_VAL_SHIFT;
+	mem_col_hold = (sat_hold | val_hold) << PA_HOLD_SIX_ZONE_SHIFT;
+	*pa_hold |= mem_col_hold;
+	*pa_hold_mask |= PA_HOLD_SIX_ZONE_MASK;
+}
+
+static int pp_pa_set_config(char __iomem *base_addr,
+			struct pp_sts_type *pp_sts, void *cfg_data,
+			u32 block_type)
+{
+	struct mdp_pa_v2_cfg_data *pa_cfg_data = NULL;
+	struct mdp_pa_data_v1_7 *pa_data = NULL;
+	uint32_t pa_hold = 0, pa_hold_mask = 0, pa_hold_tmp;
+	char __iomem *pa_hold_addr = NULL;
+	int ret = 0;
+
+	if (!base_addr || !cfg_data || !pp_sts) {
+		pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+		      base_addr, cfg_data, pp_sts);
+		return -EINVAL;
+	}
+	if ((block_type != DSPP) && (block_type != SSPP_VIG)) {
+		pr_err("Invalid block type %d\n", block_type);
+		return -EINVAL;
+	}
+
+	pa_cfg_data = (struct mdp_pa_v2_cfg_data *) cfg_data;
+	if (pa_cfg_data->version != mdp_pa_v1_7) {
+		pr_err("invalid pa version %d\n", pa_cfg_data->version);
+		return -EINVAL;
+	}
+	if (!(pa_cfg_data->flags & ~(MDP_PP_OPS_READ))) {
+		pr_info("only read ops is set %d", pa_cfg_data->flags);
+		return 0;
+	}
+	if (pa_cfg_data->flags & MDP_PP_OPS_DISABLE) {
+		pr_debug("Disable PA");
+		goto pa_set_sts;
+	}
+
+	pa_data = pa_cfg_data->cfg_payload;
+	if (!pa_data) {
+		pr_err("invalid payload for pa %pK\n", pa_data);
+		return -EINVAL;
+	}
+
+	if (!(pa_cfg_data->flags & MDP_PP_OPS_WRITE)) {
+		pr_warn("No write flag enabled for PA flags %d\n",
+			pa_cfg_data->flags);
+		return 0;
+	}
+
+	pp_pa_set_global_adj_regs(base_addr, pa_data, pa_cfg_data->flags,
+			block_type);
+	pp_pa_set_mem_col(base_addr, pa_data, pa_cfg_data->flags,
+			block_type, &pa_hold, &pa_hold_mask);
+	if (block_type == DSPP)
+		pp_pa_set_six_zone(base_addr, pa_data, pa_cfg_data->flags,
+				   &pa_hold, &pa_hold_mask);
+
+	/*
+	 * Only modify the PA hold bits for PA features that have
+	 * been updated.
+	 */
+	if (block_type == DSPP)
+		pa_hold_addr = base_addr + PA_DSPP_HOLD_OFF;
+	else
+		pa_hold_addr = base_addr + PA_VIG_HOLD_OFF;
+	pa_hold_tmp = readl_relaxed(pa_hold_addr);
+	pa_hold_tmp &= ~pa_hold_mask;
+	pa_hold |= pa_hold_tmp;
+	writel_relaxed(pa_hold, pa_hold_addr);
+
+pa_set_sts:
+	pp_pa_set_sts(pp_sts, pa_data, pa_cfg_data->flags, block_type);
+
+	return ret;
+}
+
+static void pp_pa_get_global_adj_regs(char __iomem *base_addr,
+				struct mdp_pa_data_v1_7 *pa_data, u32 flags,
+				int block_type)
+{
+	char __iomem *addr = NULL;
+
+	if (block_type == DSPP)
+		addr = base_addr + PA_DSPP_GLOBAL_OFF;
+	else
+		addr = base_addr + PA_VIG_GLOBAL_OFF;
+
+	if (flags & MDP_PP_PA_HUE_ENABLE)
+		pa_data->global_hue_adj = readl_relaxed(addr) &
+					  PA_GLOBAL_HUE_MASK;
+	addr += 4;
+	if (flags & MDP_PP_PA_SAT_ENABLE)
+		pa_data->global_sat_adj = readl_relaxed(addr) &
+					  PA_GLOBAL_SAT_MASK;
+	addr += 4;
+	if (flags & MDP_PP_PA_VAL_ENABLE)
+		pa_data->global_val_adj = readl_relaxed(addr) &
+					  PA_GLOBAL_VAL_MASK;
+	addr += 4;
+	if (flags & MDP_PP_PA_CONT_ENABLE)
+		pa_data->global_cont_adj = readl_relaxed(addr) &
+					  PA_GLOBAL_CONT_MASK;
+}
+
+static void pp_pa_get_mem_col_regs(char __iomem *mem_col_p0_addr,
+				char __iomem *mem_col_p2_addr,
+				struct mdp_pa_mem_col_data_v1_7 *mem_col_data)
+{
+	mem_col_data->color_adjust_p0 = readl_relaxed(mem_col_p0_addr) &
+					PA_MEM_COL_ADJ_P0_MASK;
+	mem_col_p0_addr += 4;
+	mem_col_data->color_adjust_p1 = readl_relaxed(mem_col_p0_addr);
+	mem_col_p0_addr += 4;
+	mem_col_data->hue_region = readl_relaxed(mem_col_p0_addr) &
+				   PA_MEM_COL_HUE_REGION_MASK;
+	mem_col_p0_addr += 4;
+	mem_col_data->sat_region = readl_relaxed(mem_col_p0_addr) &
+				   PA_MEM_COL_SAT_REGION_MASK;
+	mem_col_p0_addr += 4;
+	mem_col_data->val_region = readl_relaxed(mem_col_p0_addr) &
+				   PA_MEM_COL_VAL_REGION_MASK;
+
+	mem_col_data->color_adjust_p2 = readl_relaxed(mem_col_p2_addr);
+	mem_col_p2_addr += 4;
+	mem_col_data->blend_gain = readl_relaxed(mem_col_p2_addr);
+}
+
+static void pp_pa_get_mem_col(char __iomem *base_addr,
+				struct mdp_pa_data_v1_7 *pa_data, u32 flags,
+				int block_type,
+				uint32_t pa_hold)
+{
+	uint32_t mem_col_hold = 0;
+	u32 skin_p0_off = 0, skin_p2_off = 0;
+	u32 sky_p0_off = 0, sky_p2_off = 0;
+	u32 fol_p0_off = 0, fol_p2_off = 0;
+	char __iomem *mem_col_p0_addr = NULL;
+	char __iomem *mem_col_p2_addr = NULL;
+
+	if (block_type == DSPP) {
+		skin_p0_off = PA_DSPP_MEM_COL_SKIN_P0_OFF;
+		skin_p2_off = PA_DSPP_MEM_COL_SKIN_P2_OFF;
+		sky_p0_off = PA_DSPP_MEM_COL_SKY_P0_OFF;
+		sky_p2_off = PA_DSPP_MEM_COL_SKY_P2_OFF;
+		fol_p0_off = PA_DSPP_MEM_COL_FOL_P0_OFF;
+		fol_p2_off = PA_DSPP_MEM_COL_FOL_P2_OFF;
+	} else {
+		skin_p0_off = PA_VIG_MEM_COL_SKIN_P0_OFF;
+		skin_p2_off = PA_VIG_MEM_COL_SKIN_P2_OFF;
+		sky_p0_off = PA_VIG_MEM_COL_SKY_P0_OFF;
+		sky_p2_off = PA_VIG_MEM_COL_SKY_P2_OFF;
+		fol_p0_off = PA_VIG_MEM_COL_FOL_P0_OFF;
+		fol_p2_off = PA_VIG_MEM_COL_FOL_P2_OFF;
+	}
+	/* Update skin zone memory color registers */
+	if (flags & MDP_PP_PA_SKIN_ENABLE) {
+		mem_col_p0_addr = base_addr + skin_p0_off;
+		mem_col_p2_addr = base_addr + skin_p2_off;
+		pp_pa_get_mem_col_regs(mem_col_p0_addr, mem_col_p2_addr,
+				       &pa_data->skin_cfg);
+		mem_col_hold = pa_hold >> PA_HOLD_SKIN_SHIFT;
+		pa_data->skin_cfg.sat_hold = (mem_col_hold >>
+				PA_HOLD_SAT_SHIFT) & PA_HOLD_MASK;
+		pa_data->skin_cfg.val_hold = (mem_col_hold >>
+				PA_HOLD_VAL_SHIFT) & PA_HOLD_MASK;
+	}
+	/* Update sky zone memory color registers */
+	if (flags & MDP_PP_PA_SKY_ENABLE) {
+		mem_col_p0_addr = base_addr + sky_p0_off;
+		mem_col_p2_addr = base_addr + sky_p2_off;
+		pp_pa_get_mem_col_regs(mem_col_p0_addr, mem_col_p2_addr,
+				       &pa_data->sky_cfg);
+		mem_col_hold = pa_hold >> PA_HOLD_SKY_SHIFT;
+		pa_data->sky_cfg.sat_hold = (mem_col_hold >>
+				PA_HOLD_SAT_SHIFT) & PA_HOLD_MASK;
+		pa_data->sky_cfg.val_hold = (mem_col_hold >>
+				PA_HOLD_VAL_SHIFT) & PA_HOLD_MASK;
+	}
+	/* Update foliage zone memory color registers */
+	if (flags & MDP_PP_PA_FOL_ENABLE) {
+		mem_col_p0_addr = base_addr + fol_p0_off;
+		mem_col_p2_addr = base_addr + fol_p2_off;
+		pp_pa_get_mem_col_regs(mem_col_p0_addr, mem_col_p2_addr,
+				       &pa_data->fol_cfg);
+		mem_col_hold = pa_hold >> PA_HOLD_FOL_SHIFT;
+		pa_data->fol_cfg.sat_hold = (mem_col_hold >>
+				PA_HOLD_SAT_SHIFT) & PA_HOLD_MASK;
+		pa_data->fol_cfg.val_hold = (mem_col_hold >>
+				PA_HOLD_VAL_SHIFT) & PA_HOLD_MASK;
+	}
+}
+
+static int pp_pa_get_six_zone(char __iomem *base_addr,
+				struct mdp_pa_data_v1_7 *pa_data, u32 flags,
+				u32 pa_hold)
+{
+	uint32_t six_zone_sz = 0, six_zone_buf_sz = 0;
+	u32 data = 0;
+	char __iomem *addr = base_addr + PA_SIX_ZONE_LUT_OFF;
+	uint32_t *six_zone_read_buf = NULL;
+	uint32_t *six_zone_p0 = NULL, *six_zone_p1 = NULL;
+	uint32_t six_zone_hold = 0;
+	int ret = 0, i;
+
+	if (pa_data->six_zone_len != MDP_SIX_ZONE_LUT_SIZE) {
+		pr_err("Invalid six zone length %d\n",
+			pa_data->six_zone_len);
+		return -EINVAL;
+	}
+	six_zone_sz = pa_data->six_zone_len * sizeof(uint32_t);
+
+	if (!access_ok(VERIFY_WRITE, pa_data->six_zone_curve_p0,
+			six_zone_sz)) {
+		pr_err("invalid six_zone_curve_p0 addr for sz %d\n",
+			six_zone_sz);
+		return -EFAULT;
+	}
+	if (!access_ok(VERIFY_WRITE, pa_data->six_zone_curve_p1,
+			six_zone_sz)) {
+		pr_err("invalid six_zone_curve_p1 addr for sz %d\n",
+			six_zone_sz);
+		return -EFAULT;
+	}
+
+	six_zone_buf_sz = 2 * six_zone_sz;
+	six_zone_read_buf = kzalloc(six_zone_buf_sz, GFP_KERNEL);
+	if (!six_zone_read_buf) {
+		pr_err("allocation failed for six zone lut size %d\n",
+			six_zone_buf_sz);
+		ret = -ENOMEM;
+		goto six_zone_exit;
+	}
+	six_zone_p0 = six_zone_read_buf;
+	six_zone_p1 = &six_zone_read_buf[MDP_SIX_ZONE_LUT_SIZE];
+
+	data = PA_SIX_ZONE_VALUE_UPDATE | PA_SIX_ZONE_INDEX_UPDATE;
+	writel_relaxed(data, addr);
+
+	for (i = 0; i < MDP_SIX_ZONE_LUT_SIZE; i++) {
+		six_zone_p1[i] = readl_relaxed(addr + 4) &
+				 PA_SIX_ZONE_CURVE_P1_MASK;
+		six_zone_p0[i] = readl_relaxed(addr) &
+				 PA_SIX_ZONE_CURVE_P0_MASK;
+	}
+
+	addr = base_addr + PA_SIX_ZONE_REGION_OFF;
+	pa_data->six_zone_thresh = readl_relaxed(addr);
+
+	addr = base_addr + PA_SIX_ZONE_ADJ_OFF;
+	pa_data->six_zone_adj_p0 = readl_relaxed(addr) &
+				   PA_SIX_ZONE_ADJ_P0_MASK;
+	addr += 4;
+	pa_data->six_zone_adj_p1 = readl_relaxed(addr);
+
+	if (copy_to_user(pa_data->six_zone_curve_p0, six_zone_p0,
+			 six_zone_sz)) {
+		pr_err("Failed to copy six zone p0 data\n");
+		ret = -EFAULT;
+		goto six_zone_memory_exit;
+	}
+	if (copy_to_user(pa_data->six_zone_curve_p1, six_zone_p1,
+			 six_zone_sz)) {
+		pr_err("Failed to copy six zone p1 data\n");
+		ret = -EFAULT;
+		goto six_zone_memory_exit;
+	}
+
+	six_zone_hold = pa_hold >> PA_HOLD_SIX_ZONE_SHIFT;
+	pa_data->six_zone_sat_hold = (six_zone_hold >> PA_HOLD_SAT_SHIFT) &
+				     PA_HOLD_MASK;
+	pa_data->six_zone_val_hold = (six_zone_hold >> PA_HOLD_VAL_SHIFT) &
+				     PA_HOLD_MASK;
+
+six_zone_memory_exit:
+	kfree(six_zone_read_buf);
+six_zone_exit:
+	return ret;
+}
+
+static int pp_pa_get_config(char __iomem *base_addr, void *cfg_data,
+				u32 block_type, u32 disp_num)
+{
+	struct mdp_pa_v2_cfg_data *pa_cfg_data = NULL;
+	struct mdp_pa_data_v1_7 pa_data;
+	int ret = 0;
+	uint32_t pa_hold = 0;
+	char __iomem *pa_hold_addr = NULL;
+
+	if (!base_addr || !cfg_data) {
+		pr_err("invalid params base_addr %pK cfg_data %pK\n",
+		      base_addr, cfg_data);
+		return -EINVAL;
+	}
+	if ((block_type != DSPP) && (block_type != SSPP_VIG)) {
+		pr_err("Invalid block type %d\n", block_type);
+		return -EINVAL;
+	}
+
+	pa_cfg_data = (struct mdp_pa_v2_cfg_data *) cfg_data;
+	if (pa_cfg_data->version != mdp_pa_v1_7) {
+		pr_err("invalid pa version %d\n", pa_cfg_data->version);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&pa_data, pa_cfg_data->cfg_payload,
+			sizeof(pa_data))) {
+		pr_err("copy from user failed for pa data\n");
+		return -EFAULT;
+	}
+
+	if (block_type == DSPP)
+		pa_hold_addr = base_addr + PA_DSPP_HOLD_OFF;
+	else
+		pa_hold_addr = base_addr + PA_VIG_HOLD_OFF;
+	pa_hold = readl_relaxed(pa_hold_addr);
+
+	if ((block_type == DSPP) &&
+	    (pa_cfg_data->flags & MDP_PP_PA_SIX_ZONE_ENABLE)) {
+		ret = pp_pa_get_six_zone(base_addr,
+				   &pa_data,
+				   pa_cfg_data->flags,
+				   pa_hold);
+		if (ret) {
+			pr_err("six zone read failed ret %d\n", ret);
+			return ret;
+		}
+	}
+	pp_pa_get_global_adj_regs(base_addr, &pa_data, pa_cfg_data->flags,
+			block_type);
+	pp_pa_get_mem_col(base_addr, &pa_data, pa_cfg_data->flags,
+			block_type, pa_hold);
+
+	ret = copy_to_user(pa_cfg_data->cfg_payload, &pa_data, sizeof(pa_data));
+	if (ret) {
+		pr_err("Failed to copy PA data to user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static void pp_pa_update_vig_opmode(int pa_sts, u32 *opmode)
+{
+	*opmode |= PA_VIG_OP_ENABLE;
+	if (pa_sts & PP_STS_PA_HUE_MASK)
+		*opmode |= PA_VIG_OP_HUE_MASK;
+	if (pa_sts & PP_STS_PA_SAT_MASK)
+		*opmode |= PA_VIG_OP_SAT_MASK;
+	if (pa_sts & PP_STS_PA_VAL_MASK)
+		*opmode |= PA_VIG_OP_VAL_MASK;
+	if (pa_sts & PP_STS_PA_CONT_MASK)
+		*opmode |= PA_VIG_OP_CONT_MASK;
+	if (pa_sts & PP_STS_PA_SAT_ZERO_EXP_EN)
+		*opmode |= PA_VIG_OP_SAT_ZERO_EXP_EN;
+	if (pa_sts & PP_STS_PA_MEM_COL_SKIN_MASK)
+		*opmode |= PA_VIG_OP_MEM_COL_SKIN_MASK;
+	if (pa_sts & PP_STS_PA_MEM_COL_FOL_MASK)
+		*opmode |= PA_VIG_OP_MEM_COL_FOL_MASK;
+	if (pa_sts & PP_STS_PA_MEM_COL_SKY_MASK)
+		*opmode |= PA_VIG_OP_MEM_COL_SKY_MASK;
+	if (pa_sts & PP_STS_PA_MEM_PROT_HUE_EN)
+		*opmode |= PA_VIG_OP_MEM_PROT_HUE_EN;
+	if (pa_sts & PP_STS_PA_MEM_PROT_SAT_EN)
+		*opmode |= PA_VIG_OP_MEM_PROT_SAT_EN;
+	if (pa_sts & PP_STS_PA_MEM_PROT_VAL_EN)
+		*opmode |= PA_VIG_OP_MEM_PROT_VAL_EN;
+	if (pa_sts & PP_STS_PA_MEM_PROT_CONT_EN)
+		*opmode |= PA_VIG_OP_MEM_PROT_CONT_EN;
+	if (pa_sts & PP_STS_PA_MEM_PROT_BLEND_EN)
+		*opmode |= PA_VIG_OP_MEM_PROT_BLEND_EN;
+}
+
+static void pp_pa_update_dspp_opmode(int pa_sts, u32 *opmode)
+{
+	*opmode |= PA_DSPP_OP_ENABLE;
+	if (pa_sts & PP_STS_PA_HUE_MASK)
+		*opmode |= PA_DSPP_OP_HUE_MASK;
+	if (pa_sts & PP_STS_PA_SAT_MASK)
+		*opmode |= PA_DSPP_OP_SAT_MASK;
+	if (pa_sts & PP_STS_PA_VAL_MASK)
+		*opmode |= PA_DSPP_OP_VAL_MASK;
+	if (pa_sts & PP_STS_PA_CONT_MASK)
+		*opmode |= PA_DSPP_OP_CONT_MASK;
+	if (pa_sts & PP_STS_PA_SAT_ZERO_EXP_EN)
+		*opmode |= PA_DSPP_OP_SAT_ZERO_EXP_EN;
+	if (pa_sts & PP_STS_PA_MEM_COL_SKIN_MASK)
+		*opmode |= PA_DSPP_OP_MEM_COL_SKIN_MASK;
+	if (pa_sts & PP_STS_PA_MEM_COL_FOL_MASK)
+		*opmode |= PA_DSPP_OP_MEM_COL_FOL_MASK;
+	if (pa_sts & PP_STS_PA_MEM_COL_SKY_MASK)
+		*opmode |= PA_DSPP_OP_MEM_COL_SKY_MASK;
+	if (pa_sts & PP_STS_PA_SIX_ZONE_HUE_MASK)
+		*opmode |= PA_DSPP_OP_SIX_ZONE_HUE_MASK;
+	if (pa_sts & PP_STS_PA_SIX_ZONE_SAT_MASK)
+		*opmode |= PA_DSPP_OP_SIX_ZONE_SAT_MASK;
+	if (pa_sts & PP_STS_PA_SIX_ZONE_VAL_MASK)
+		*opmode |= PA_DSPP_OP_SIX_ZONE_VAL_MASK;
+	if (pa_sts & PP_STS_PA_MEM_PROT_HUE_EN)
+		*opmode |= PA_DSPP_OP_MEM_PROT_HUE_EN;
+	if (pa_sts & PP_STS_PA_MEM_PROT_SAT_EN)
+		*opmode |= PA_DSPP_OP_MEM_PROT_SAT_EN;
+	if (pa_sts & PP_STS_PA_MEM_PROT_VAL_EN)
+		*opmode |= PA_DSPP_OP_MEM_PROT_VAL_EN;
+	if (pa_sts & PP_STS_PA_MEM_PROT_CONT_EN)
+		*opmode |= PA_DSPP_OP_MEM_PROT_CONT_EN;
+	if (pa_sts & PP_STS_PA_MEM_PROT_BLEND_EN)
+		*opmode |= PA_DSPP_OP_MEM_PROT_BLEND_EN;
+	if (pa_sts & PP_STS_PA_MEM_PROT_SIX_EN)
+		*opmode |= PA_DSPP_OP_MEM_PROT_SIX_EN;
+}
+
+static int pp_igc_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type)
+{
+	int ret = 0, i = 0;
+	struct mdp_igc_lut_data *lut_cfg_data = NULL;
+	struct mdp_igc_lut_data_v1_7 *lut_data = NULL;
+	char __iomem *c0 = NULL, *c1 = NULL, *c2 = NULL;
+	u32 data;
+
+	if (!base_addr || !cfg_data || !pp_sts) {
+		pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+		      base_addr, cfg_data, pp_sts);
+		return -EINVAL;
+	}
+
+	lut_cfg_data = (struct mdp_igc_lut_data *) cfg_data;
+	if (lut_cfg_data->version != mdp_igc_v1_7 ||
+	    !lut_cfg_data->cfg_payload) {
+		pr_err_once("invalid igc version %d payload %pK\n",
+		       lut_cfg_data->version, lut_cfg_data->cfg_payload);
+		return -EINVAL;
+	}
+	if (!(lut_cfg_data->ops & ~(MDP_PP_OPS_READ))) {
+		pr_err("only read ops set for lut\n");
+		return ret;
+	}
+	if (lut_cfg_data->block > IGC_MASK_MAX) {
+		pr_err("invalid mask value for IGC %d", lut_cfg_data->block);
+		return -EINVAL;
+	}
+	if (!(lut_cfg_data->ops & MDP_PP_OPS_WRITE)) {
+		pr_debug("non write ops set %d\n", lut_cfg_data->ops);
+		goto bail_out;
+	}
+	lut_data = lut_cfg_data->cfg_payload;
+	if (lut_data->len != IGC_LUT_ENTRIES || !lut_data->c0_c1_data ||
+	    !lut_data->c2_data) {
+		pr_err("invalid lut len %d c0_c1_data %pK  c2_data %pK\n",
+		       lut_data->len, lut_data->c0_c1_data, lut_data->c2_data);
+		return -EINVAL;
+	}
+	switch (block_type) {
+	case SSPP_RGB:
+		c0 = base_addr + IGC_RGB_C0_LUT;
+		break;
+	case SSPP_DMA:
+		c0 = base_addr + IGC_DMA_C0_LUT;
+		break;
+	case SSPP_VIG:
+	case DSPP:
+		c0 = base_addr + IGC_C0_LUT;
+		break;
+	default:
+		pr_err("Invalid block type %d\n", block_type);
+		ret = -EINVAL;
+		break;
+	}
+	if (ret) {
+		pr_err("igc table not updated ret %d\n", ret);
+		return ret;
+	}
+	c1 = c0 + 4;
+	c2 = c1 + 4;
+	data = IGC_INDEX_UPDATE | IGC_CONFIG_MASK(lut_cfg_data->block);
+	pr_debug("data %x block type %d mask %x\n",
+		  data, lut_cfg_data->block,
+		  IGC_CONFIG_MASK(lut_cfg_data->block));
+	writel_relaxed((lut_data->c0_c1_data[0] & IGC_DATA_MASK) | data, c0);
+	writel_relaxed(((lut_data->c0_c1_data[0] >> 16)
+			& IGC_DATA_MASK) | data, c1);
+	writel_relaxed((lut_data->c2_data[0] & IGC_DATA_MASK) | data, c2);
+	data &= ~IGC_INDEX_UPDATE;
+	/* update the index for c0, c1 , c2 */
+	for (i = 1; i < IGC_LUT_ENTRIES; i++) {
+		writel_relaxed((lut_data->c0_c1_data[i] & IGC_DATA_MASK)
+			       | data, c0);
+		writel_relaxed(((lut_data->c0_c1_data[i] >> 16)
+				& IGC_DATA_MASK) | data, c1);
+		writel_relaxed((lut_data->c2_data[i] & IGC_DATA_MASK)
+				| data, c2);
+	}
+bail_out:
+	if (!ret) {
+		if (lut_cfg_data->ops & MDP_PP_OPS_DISABLE)
+			pp_sts->igc_sts &= ~PP_STS_ENABLE;
+		else if (lut_cfg_data->ops & MDP_PP_OPS_ENABLE)
+			pp_sts->igc_sts |= PP_STS_ENABLE;
+		pp_sts_set_split_bits(&pp_sts->igc_sts,
+				      lut_cfg_data->ops);
+	}
+	return ret;
+}
+
+static int pp_igc_get_config(char __iomem *base_addr, void *cfg_data,
+			   u32 block_type, u32 disp_num)
+{
+	int ret = 0, i = 0;
+	struct mdp_igc_lut_data *lut_cfg_data = NULL;
+	struct mdp_igc_lut_data_v1_7 lut_data_v1_7;
+	struct mdp_igc_lut_data_v1_7 *lut_data = &lut_data_v1_7;
+	char __iomem *c1 = NULL, *c2 = NULL;
+	u32 *c0c1_data = NULL, *c2_data = NULL;
+	u32 data = 0, sz = 0;
+
+	if (!base_addr || !cfg_data || block_type != DSPP) {
+		pr_err("invalid params base_addr %pK cfg_data %pK block_type %d\n",
+		      base_addr, cfg_data, block_type);
+		return -EINVAL;
+	}
+	lut_cfg_data = (struct mdp_igc_lut_data *) cfg_data;
+	if (!(lut_cfg_data->ops & MDP_PP_OPS_READ)) {
+		pr_err("read ops not set for lut ops %d\n", lut_cfg_data->ops);
+		return ret;
+	}
+	if (lut_cfg_data->version != mdp_igc_v1_7 ||
+	    !lut_cfg_data->cfg_payload ||
+	    lut_cfg_data->block > IGC_MASK_MAX) {
+		pr_err("invalid igc version %d payload %pK block %d\n",
+		       lut_cfg_data->version, lut_cfg_data->cfg_payload,
+		       lut_cfg_data->block);
+		ret = -EINVAL;
+		goto exit;
+	}
+	if (copy_from_user(lut_data, (void __user *) lut_cfg_data->cfg_payload,
+			sizeof(*lut_data))) {
+		pr_err("copy from user failed for lut_data\n");
+		return -EFAULT;
+	}
+	if (lut_data->len != IGC_LUT_ENTRIES) {
+		pr_err("invalid lut len %d\n", lut_data->len);
+		ret = -EINVAL;
+		goto exit;
+	}
+	sz = IGC_LUT_ENTRIES * sizeof(u32);
+	if (!access_ok(VERIFY_WRITE, lut_data->c0_c1_data, sz) ||
+	    (!access_ok(VERIFY_WRITE, lut_data->c2_data, sz))) {
+		pr_err("invalid lut address for sz %d\n", sz);
+		ret = -EFAULT;
+		goto exit;
+	}
+	/* Allocate for c0c1 and c2 tables */
+	c0c1_data = kzalloc(sz * 2, GFP_KERNEL);
+	if (!c0c1_data) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+	c2_data = &c0c1_data[IGC_LUT_ENTRIES];
+	data = IGC_INDEX_VALUE_UPDATE | IGC_CONFIG_MASK(lut_cfg_data->block);
+	pr_debug("data %x block type %d mask %x\n",
+		  data, lut_cfg_data->block,
+		  IGC_CONFIG_MASK(lut_cfg_data->block));
+	c1 = base_addr + 4;
+	c2 = c1 + 4;
+	writel_relaxed(data, base_addr);
+	writel_relaxed(data, c1);
+	writel_relaxed(data, c2);
+	for (i = 0; i < IGC_LUT_ENTRIES; i++) {
+		c0c1_data[i] = readl_relaxed(base_addr) & IGC_DATA_MASK;
+		c0c1_data[i] |= (readl_relaxed(c1) & IGC_DATA_MASK) << 16;
+		c2_data[i] = readl_relaxed(c2) & IGC_DATA_MASK;
+	}
+	if (copy_to_user(lut_data->c0_c1_data, c0c1_data, sz)) {
+		pr_err("failed to copy the c0c1 data");
+		ret = -EFAULT;
+	}
+	if (!ret && copy_to_user(lut_data->c2_data, c2_data, sz)) {
+		pr_err("failed to copy the c2 data");
+		ret = -EFAULT;
+	}
+	kfree(c0c1_data);
+exit:
+	return ret;
+}
+
+
+static int pp_pgc_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type)
+{
+	char __iomem *c0 = NULL, *c1 = NULL, *c2 = NULL;
+	u32 val = 0, i = 0, *sts = NULL;
+	struct mdp_pgc_lut_data *pgc_data = NULL;
+	struct mdp_pgc_lut_data_v1_7  *pgc_data_v17 = NULL;
+
+	if (!base_addr || !cfg_data || !pp_sts) {
+		pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+		      base_addr, cfg_data, pp_sts);
+		return -EINVAL;
+	}
+	if (block_type != DSPP && block_type != LM) {
+		pr_err("invalid block type %d\n", block_type);
+		return -EINVAL;
+	}
+	sts = (block_type == DSPP) ? &pp_sts->pgc_sts : &pp_sts->argc_sts;
+	pgc_data = (struct mdp_pgc_lut_data *) cfg_data;
+	if (pgc_data->version != mdp_pgc_v1_7) {
+		pr_err("invalid pgc version %d\n",
+			pgc_data->version);
+		return -EINVAL;
+	}
+	if (!(pgc_data->flags & ~(MDP_PP_OPS_READ))) {
+		pr_debug("only read ops is set %d", pgc_data->flags);
+		return 0;
+	}
+	if (pgc_data->flags & MDP_PP_OPS_DISABLE) {
+		pr_debug("disable GC\n");
+		goto set_ops;
+	}
+
+	pgc_data_v17 = (struct mdp_pgc_lut_data_v1_7 *) pgc_data->cfg_payload;
+	if (!pgc_data_v17) {
+		pr_err("invalid payload for GC %pK\n", pgc_data_v17);
+		return -EINVAL;
+	}
+
+	if (pgc_data_v17->len != PGC_LUT_ENTRIES || !pgc_data_v17->c0_data ||
+	    !pgc_data_v17->c1_data || !pgc_data_v17->c2_data) {
+		pr_err("Invalid params entries %d c0_data %pK c1_data %pK c2_data %pK\n",
+			pgc_data_v17->len, pgc_data_v17->c0_data,
+			pgc_data_v17->c1_data, pgc_data_v17->c2_data);
+		return -EINVAL;
+	}
+	c0 = base_addr + PGC_C0_LUT_INDEX;
+	c1 = c0 + PGC_C1C2_LUT_OFF;
+	c2 = c1 + PGC_C1C2_LUT_OFF;
+	/*  set the indexes to zero */
+	writel_relaxed(0, c0 + PGC_INDEX_OFF);
+	writel_relaxed(0, c1 + PGC_INDEX_OFF);
+	writel_relaxed(0, c2 + PGC_INDEX_OFF);
+	for (i = 0; i < PGC_LUT_ENTRIES; i += 2) {
+		val = pgc_data_v17->c0_data[i] & PGC_DATA_MASK;
+		val |= (pgc_data_v17->c0_data[i + 1] & PGC_DATA_MASK) <<
+			PGC_ODD_SHIFT;
+		writel_relaxed(val, c0);
+		val = pgc_data_v17->c1_data[i] & PGC_DATA_MASK;
+		val |= (pgc_data_v17->c1_data[i + 1] & PGC_DATA_MASK) <<
+			PGC_ODD_SHIFT;
+		writel_relaxed(val, c1);
+		val = pgc_data_v17->c2_data[i] & PGC_DATA_MASK;
+		val |= (pgc_data_v17->c2_data[i + 1] & PGC_DATA_MASK) <<
+			PGC_ODD_SHIFT;
+		writel_relaxed(val, c2);
+	}
+	if (block_type == DSPP) {
+		val = PGC_SWAP;
+		writel_relaxed(val, base_addr + PGC_LUT_SWAP);
+	}
+
+set_ops:
+	if (pgc_data->flags & MDP_PP_OPS_DISABLE) {
+		*sts &= ~PP_STS_ENABLE;
+		writel_relaxed(0, base_addr + PGC_OPMODE_OFF);
+	} else if (pgc_data->flags & MDP_PP_OPS_ENABLE) {
+		val = PGC_ENABLE;
+		val |= (pgc_data->flags & MDP_PP_PGC_ROUNDING_ENABLE)
+			? BIT(1) : 0;
+		writel_relaxed(val, base_addr + PGC_OPMODE_OFF);
+		*sts |= PP_STS_ENABLE;
+	}
+	return 0;
+}
+
+static int pp_pgc_get_config(char __iomem *base_addr, void *cfg_data,
+			   u32 block_type, u32 disp_num)
+{
+	int ret = 0;
+	char __iomem *c0 = NULL, *c1 = NULL, *c2 = NULL;
+	u32 *c0_data = NULL, *c1_data = NULL, *c2_data = NULL;
+	u32 val = 0, i = 0, sz = 0;
+	struct mdp_pgc_lut_data *pgc_data = NULL;
+	struct mdp_pgc_lut_data_v1_7  pgc_lut_data_v17;
+	struct mdp_pgc_lut_data_v1_7  *pgc_data_v17 = &pgc_lut_data_v17;
+
+	if (!base_addr || !cfg_data) {
+		pr_err("invalid params base_addr %pK cfg_data %pK block_type %d\n",
+		      base_addr, cfg_data, block_type);
+		return -EINVAL;
+	}
+	pgc_data = (struct mdp_pgc_lut_data *) cfg_data;
+	if (pgc_data->version != mdp_pgc_v1_7 || !pgc_data->cfg_payload) {
+		pr_err("invalid pgc version %d payload %pK\n",
+			pgc_data->version, pgc_data->cfg_payload);
+		return -EINVAL;
+	}
+	if (copy_from_user(pgc_data_v17, (void __user *) pgc_data->cfg_payload,
+			sizeof(*pgc_data_v17))) {
+		pr_err("copy from user failed for pgc lut data\n");
+		return -EFAULT;
+	}
+	if (!(pgc_data->flags & MDP_PP_OPS_READ)) {
+		pr_info("read ops is not set %d", pgc_data->flags);
+		return -EINVAL;
+	}
+	sz = PGC_LUT_ENTRIES * sizeof(u32);
+	if (!access_ok(VERIFY_WRITE, pgc_data_v17->c0_data, sz) ||
+	    !access_ok(VERIFY_WRITE, pgc_data_v17->c1_data, sz) ||
+	    !access_ok(VERIFY_WRITE, pgc_data_v17->c2_data, sz)) {
+		pr_err("incorrect payload for PGC read size %d\n",
+			PGC_LUT_ENTRIES);
+		return -EFAULT;
+	}
+	c0_data = kzalloc(sz * 3, GFP_KERNEL);
+	if (!c0_data)
+		return -ENOMEM;
+
+	c1_data = c0_data + PGC_LUT_ENTRIES;
+	c2_data = c1_data + PGC_LUT_ENTRIES;
+	c0 = base_addr + PGC_C0_LUT_INDEX;
+	c1 = c0 + PGC_C1C2_LUT_OFF;
+	c2 = c1 + PGC_C1C2_LUT_OFF;
+	/*  set the indexes to zero */
+	writel_relaxed(0, c0 + 4);
+	writel_relaxed(0, c1 + 4);
+	writel_relaxed(0, c2 + 4);
+	for (i = 0; i < PGC_LUT_ENTRIES; i += 2) {
+		val = readl_relaxed(c0);
+		c0_data[i] = val & PGC_DATA_MASK;
+		c0_data[i + 1] = (val >> PGC_ODD_SHIFT) & PGC_DATA_MASK;
+		val = readl_relaxed(c1);
+		c1_data[i] = val & PGC_DATA_MASK;
+		c1_data[i + 1] = (val >> PGC_ODD_SHIFT) & PGC_DATA_MASK;
+		val = readl_relaxed(c2);
+		c2_data[i] = val & PGC_DATA_MASK;
+		c2_data[i + 1] = (val >> PGC_ODD_SHIFT) & PGC_DATA_MASK;
+	}
+	if (copy_to_user(pgc_data_v17->c0_data, c0_data, sz)) {
+		pr_err("failed to copyuser c0 data of sz %d\n", sz);
+		ret = -EFAULT;
+	}
+	if (!ret && copy_to_user(pgc_data_v17->c1_data, c1_data, sz)) {
+		pr_err("failed to copyuser c1 data of sz %d\n", sz);
+		ret = -EFAULT;
+	}
+	if (!ret && copy_to_user(pgc_data_v17->c2_data, c2_data, sz)) {
+		pr_err("failed to copyuser c2 data of sz %d\n", sz);
+		ret = -EFAULT;
+	}
+	if (!ret)
+		pgc_data_v17->len = PGC_LUT_ENTRIES;
+	kfree(c0_data);
+	return ret;
+}
+
+static int pp_pcc_get_version(u32 *version)
+{
+	if (!version) {
+		pr_err("invalid param version %pK\n", version);
+		return -EINVAL;
+	}
+	*version = mdp_pcc_v1_7;
+	return 0;
+}
+
+static int pp_igc_get_version(u32 *version)
+{
+	if (!version) {
+		pr_err("invalid param version %pK\n", version);
+		return -EINVAL;
+	}
+	*version = mdp_igc_v1_7;
+	return 0;
+}
+
+static int pp_pgc_get_version(u32 *version)
+{
+	if (!version) {
+		pr_err("invalid param version %pK\n", version);
+		return -EINVAL;
+	}
+	*version = mdp_pgc_v1_7;
+	return 0;
+}
+
+static int pp_pa_get_version(u32 *version)
+{
+	if (!version) {
+		pr_err("invalid param version %pK\n", version);
+		return -EINVAL;
+	}
+	*version = mdp_pa_v1_7;
+	return 0;
+}
+
+static int pp_gamut_get_version(u32 *version)
+{
+	if (!version) {
+		pr_err("invalid param version %pK\n", version);
+		return -EINVAL;
+	}
+	*version = mdp_gamut_v1_7;
+	return 0;
+}
+
+static int pp_dither_get_version(u32 *version)
+{
+	if (!version) {
+		pr_err("invalid param version %pK\n", version);
+		return -EINVAL;
+	}
+	*version = mdp_dither_v1_7;
+	return 0;
+}
+
+static int pp_hist_lut_get_version(u32 *version)
+{
+	if (!version) {
+		pr_err("invalid param version %pK\n", version);
+		return -EINVAL;
+	}
+	*version = mdp_hist_lut_v1_7;
+	return 0;
+}
+
+static void pp_gamut_clock_gating_en(char __iomem *base_addr)
+{
+	u32 val;
+
+	if (base_addr) {
+		val = readl_relaxed(base_addr + GAMUT_CLK_CTRL);
+		if (val == GAMUT_CLK_GATING_PARTIAL_ACTIVE)
+			writel_relaxed(GAMUT_CLK_GATING_ACTIVE,
+						   base_addr + GAMUT_CLK_CTRL);
+	}
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c b/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c
new file mode 100644
index 0000000..7611ea4
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c
@@ -0,0 +1,824 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/uaccess.h>
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_pp.h"
+#include "mdss_mdp_pp_common.h"
+
+#define IGC_DSPP_OP_MODE_EN BIT(0)
+#define ENHIST_BIT_SHIFT 16
+/* PA related define */
+
+/* Offsets from DSPP/VIG base to PA block */
+#define PA_DSPP_BLOCK_REG_OFF 0x800
+#define PA_VIG_BLOCK_REG_OFF 0x1200
+
+/* Offsets to various subblocks from PA block
+ * in VIG/DSPP.
+ */
+#define PA_OP_MODE_REG_OFF 0x0
+#define PA_HIST_REG_OFF 0x4
+#define PA_LUTV_SWAP_REG_OFF 0x18
+#define PA_HSIC_REG_OFF 0x1C
+#define PA_DITHER_CTL_REG_OFF 0x2C
+#define PA_PWL_HOLD_REG_OFF 0x40
+
+/* Memory Color offsets */
+#define PA_MEM_COL_REG_OFF 0x80
+#define PA_MEM_SKIN_REG_OFF (PA_MEM_COL_REG_OFF)
+#define PA_MEM_SKY_REG_OFF  (PA_MEM_SKIN_REG_OFF + \
+				JUMP_REGISTERS_OFF(5))
+#define PA_MEM_FOL_REG_OFF  (PA_MEM_SKY_REG_OFF + \
+				JUMP_REGISTERS_OFF(5))
+#define PA_MEM_SKIN_ADJUST_P2_REG_OFF (PA_MEM_FOL_REG_OFF + \
+					JUMP_REGISTERS_OFF(5))
+#define PA_MEM_SKY_ADJUST_P2_REG_OFF (PA_MEM_SKIN_ADJUST_P2_REG_OFF + \
+					JUMP_REGISTERS_OFF(2))
+#define PA_MEM_FOL_ADJUST_P2_REG_OFF (PA_MEM_SKY_ADJUST_P2_REG_OFF + \
+					JUMP_REGISTERS_OFF(2))
+
+#define PA_SZONE_REG_OFF 0x100
+#define PA_LUTV_REG_OFF 0x200
+#define PA_HIST_RAM_REG_OFF 0x400
+
+#define PPB_GLOBAL_DITHER_REG_OFF 0x30E0
+#define DITHER_MATRIX_LEN 16
+#define DITHER_DEPTH_MAP_INDEX 9
+static u32 dither_matrix[DITHER_MATRIX_LEN] = {
+	15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10};
+static u32 dither_depth_map[DITHER_DEPTH_MAP_INDEX] = {
+	0, 0, 0, 0, 0, 1, 2, 3, 3};
+
+/* histogram prototypes */
+static int pp_get_hist_offset(u32 block, u32 *ctl_off);
+static int pp_hist_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type);
+static int pp_hist_get_config(char __iomem *base_addr, void *cfg_data,
+		u32 block_type, u32 disp_num);
+
+/* PA LUT prototypes */
+static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
+			   u32 block_type, u32 disp_num);
+static int pp_hist_lut_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type);
+static int pp_hist_lut_get_version(u32 *version);
+static void pp_hist_lut_opmode_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts);
+
+static int pp_pa_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type);
+static int pp_pa_get_config(char __iomem *base_addr, void *cfg_data,
+		u32 block_type, u32 disp_num);
+static int pp_pa_get_version(u32 *version);
+
+static int pp_dither_get_config(char __iomem *base_addr, void *cfg_data,
+		u32 block_type, u32 disp_num);
+static int pp_dither_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type);
+static int pp_dither_get_version(u32 *version);
+
+static void pp_opmode_config(int location, struct pp_sts_type *pp_sts,
+		u32 *opmode, int side);
+
+static void pp_pa_set_global_adj_regs(char __iomem *base_addr,
+		struct mdp_pa_data_v1_7 *pa_data, u32 flag);
+
+static void pp_pa_set_mem_col(char __iomem *base_addr,
+		struct mdp_pa_data_v1_7 *pa_data, u32 flags);
+
+static void pp_pa_set_six_zone(char __iomem *base_addr,
+		struct mdp_pa_data_v1_7 *pa_data,
+		u32 flags);
+
+static void pp_pa_opmode_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts);
+
+void *pp_get_driver_ops_v3(struct mdp_pp_driver_ops *ops)
+{
+	void *pp_cfg = NULL;
+
+	if (!ops) {
+		pr_err("PP driver ops invalid %pK\n", ops);
+		return ERR_PTR(-EINVAL);
+	}
+
+	pp_cfg = pp_get_driver_ops_v1_7(ops);
+	if (IS_ERR_OR_NULL(pp_cfg))
+		return NULL;
+	/* PA ops */
+	ops->pp_ops[PA].pp_set_config = pp_pa_set_config;
+	ops->pp_ops[PA].pp_get_config = pp_pa_get_config;
+	ops->pp_ops[PA].pp_get_version = pp_pa_get_version;
+
+	/* HIST_LUT ops */
+	ops->pp_ops[HIST_LUT].pp_set_config = pp_hist_lut_set_config;
+	ops->pp_ops[HIST_LUT].pp_get_config = pp_hist_lut_get_config;
+	ops->pp_ops[HIST_LUT].pp_get_version = pp_hist_lut_get_version;
+
+	/* HIST ops */
+	ops->pp_ops[HIST].pp_set_config = pp_hist_set_config;
+	ops->pp_ops[HIST].pp_get_config = pp_hist_get_config;
+	ops->pp_ops[HIST].pp_get_version = NULL;
+
+	/* Dither ops */
+	ops->pp_ops[DITHER].pp_set_config = pp_dither_set_config;
+	ops->pp_ops[DITHER].pp_get_config = pp_dither_get_config;
+	ops->pp_ops[DITHER].pp_get_version = pp_dither_get_version;
+
+	/* Set opmode pointers */
+	ops->pp_opmode_config = pp_opmode_config;
+
+	ops->get_hist_offset = pp_get_hist_offset;
+	ops->gamut_clk_gate_en = NULL;
+
+	return pp_cfg;
+}
+
+static int pp_get_hist_offset(u32 block, u32 *ctl_off)
+{
+	int ret = 0;
+
+	if (!ctl_off) {
+		pr_err("invalid params ctl_off %pK\n", ctl_off);
+		return -EINVAL;
+	}
+
+	switch (block) {
+	case SSPP_VIG:
+		*ctl_off = PA_VIG_BLOCK_REG_OFF + PA_HIST_REG_OFF;
+		break;
+	case DSPP:
+		*ctl_off = PA_DSPP_BLOCK_REG_OFF + PA_HIST_REG_OFF;
+		break;
+	default:
+		pr_err("Invalid block type %d\n", block);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static int pp_hist_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data, u32 block_type)
+{
+	u32 opmode = 0;
+	struct pp_hist_col_info *hist_info = NULL;
+
+	if (!base_addr || !cfg_data || !pp_sts) {
+		pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+		      base_addr, cfg_data, pp_sts);
+		return -EINVAL;
+	}
+
+	if (block_type != DSPP) {
+		pr_err("Invalid block type %d\n", block_type);
+		return -EINVAL;
+	}
+
+	hist_info = (struct pp_hist_col_info *)cfg_data;
+	opmode = readl_relaxed(base_addr + PA_DSPP_BLOCK_REG_OFF +
+				PA_OP_MODE_REG_OFF);
+	/* set the hist_en bit */
+	if (hist_info->col_en) {
+		pp_sts->hist_sts |= PP_STS_ENABLE;
+		opmode |= BIT(16);
+	} else {
+		pp_sts->hist_sts &= ~PP_STS_ENABLE;
+		opmode &= ~BIT(16);
+	}
+
+	writel_relaxed(opmode, base_addr + PA_DSPP_BLOCK_REG_OFF +
+			PA_OP_MODE_REG_OFF);
+	return 0;
+}
+
+static int pp_hist_get_config(char __iomem *base_addr, void *cfg_data,
+			   u32 block_type, u32 disp_num)
+{
+	int i = 0;
+	u32 sum = 0;
+	struct pp_hist_col_info *hist_info = NULL;
+	char __iomem *hist_addr;
+
+	if (!base_addr || !cfg_data) {
+		pr_err("invalid params base_addr %pK cfg_data %pK\n",
+		       base_addr, cfg_data);
+		return -EINVAL;
+	}
+
+	if (block_type != DSPP) {
+		pr_err("Invalid block type %d\n", block_type);
+		return -EINVAL;
+	}
+
+	hist_info = (struct pp_hist_col_info *) cfg_data;
+	hist_addr = base_addr + PA_DSPP_BLOCK_REG_OFF + PA_HIST_RAM_REG_OFF;
+
+	for (i = 0; i < HIST_V_SIZE; i++) {
+		hist_info->data[i] = readl_relaxed(hist_addr) & REG_MASK(24);
+		hist_addr += 0x4;
+		sum += hist_info->data[i];
+	}
+	hist_info->hist_cnt_read++;
+	return sum;
+}
+
+static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
+			   u32 block_type, u32 disp_num)
+{
+
+	int ret = 0, i = 0;
+	char __iomem *hist_lut_addr;
+	u32 sz = 0, temp = 0, *data = NULL;
+	struct mdp_hist_lut_data_v1_7 lut_data_v1_7;
+	struct mdp_hist_lut_data_v1_7 *lut_data = &lut_data_v1_7;
+	struct mdp_hist_lut_data *lut_cfg_data = NULL;
+
+	if (!base_addr || !cfg_data) {
+		pr_err("invalid params base_addr %pK cfg_data %pK\n",
+		       base_addr, cfg_data);
+		return -EINVAL;
+	}
+
+	if (block_type != DSPP) {
+		pr_err("Invalid block type %d\n", block_type);
+		return -EINVAL;
+	}
+
+	lut_cfg_data = (struct mdp_hist_lut_data *) cfg_data;
+	if (!(lut_cfg_data->ops & MDP_PP_OPS_READ)) {
+		pr_err("read ops not set for hist_lut %d\n", lut_cfg_data->ops);
+		return 0;
+	}
+	if (lut_cfg_data->version != mdp_hist_lut_v1_7 ||
+		!lut_cfg_data->cfg_payload) {
+		pr_err("invalid hist_lut version %d payload %pK\n",
+		       lut_cfg_data->version, lut_cfg_data->cfg_payload);
+		return -EINVAL;
+	}
+	if (copy_from_user(lut_data, (void __user *) lut_cfg_data->cfg_payload,
+		sizeof(*lut_data))) {
+		pr_err("copy from user failed for lut_data\n");
+		return -EFAULT;
+	}
+	if (lut_data->len != ENHIST_LUT_ENTRIES) {
+		pr_err("invalid hist_lut len %d", lut_data->len);
+		return -EINVAL;
+	}
+	sz = ENHIST_LUT_ENTRIES * sizeof(u32);
+	if (!access_ok(VERIFY_WRITE, lut_data->data, sz)) {
+		pr_err("invalid lut address for hist_lut sz %d\n", sz);
+		return -EFAULT;
+	}
+
+	hist_lut_addr = base_addr + PA_DSPP_BLOCK_REG_OFF + PA_LUTV_REG_OFF;
+
+	data = kzalloc(sz, GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	for (i = 0; i < ENHIST_LUT_ENTRIES; i += 2) {
+		temp = readl_relaxed(hist_lut_addr);
+		data[i] = temp & REG_MASK(10);
+		data[i + 1] =
+			(temp & REG_MASK_SHIFT(10, 16)) >> ENHIST_BIT_SHIFT;
+		hist_lut_addr += 4;
+	}
+	if (copy_to_user(lut_data->data, data, sz)) {
+		pr_err("failed to copy the hist_lut back to user\n");
+		ret = -EFAULT;
+	}
+	kfree(data);
+	return ret;
+}
+
+static int pp_hist_lut_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type)
+{
+	int ret = 0, i = 0;
+	u32 temp = 0;
+	struct mdp_hist_lut_data *lut_cfg_data = NULL;
+	struct mdp_hist_lut_data_v1_7 *lut_data = NULL;
+	char __iomem *hist_lut_addr = NULL, *swap_addr = NULL;
+
+	if (!base_addr || !cfg_data || !pp_sts) {
+		pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+		      base_addr, cfg_data, pp_sts);
+		return -EINVAL;
+	}
+
+	if (block_type != DSPP) {
+		pr_err("Invalid block type %d\n", block_type);
+		return -EINVAL;
+	}
+
+	lut_cfg_data = (struct mdp_hist_lut_data *) cfg_data;
+	if (lut_cfg_data->version != mdp_hist_lut_v1_7) {
+		pr_err("invalid hist_lut version %d\n", lut_cfg_data->version);
+		return -EINVAL;
+	}
+
+	if (!(lut_cfg_data->ops & ~(MDP_PP_OPS_READ))) {
+		pr_err("only read ops set for lut\n");
+		return ret;
+	}
+	if (lut_cfg_data->ops & MDP_PP_OPS_DISABLE ||
+		!(lut_cfg_data->ops & MDP_PP_OPS_WRITE)) {
+		pr_debug("non write ops set %d\n", lut_cfg_data->ops);
+		goto hist_lut_set_sts;
+	}
+	lut_data = lut_cfg_data->cfg_payload;
+	if (!lut_data) {
+		pr_err("invalid hist_lut cfg_payload %pK\n", lut_data);
+		return -EINVAL;
+	}
+
+	if (lut_data->len != ENHIST_LUT_ENTRIES || !lut_data->data) {
+		pr_err("invalid hist_lut len %d data %pK\n",
+		       lut_data->len, lut_data->data);
+		return -EINVAL;
+	}
+
+	hist_lut_addr = base_addr + PA_DSPP_BLOCK_REG_OFF + PA_LUTV_REG_OFF;
+	swap_addr = base_addr + PA_DSPP_BLOCK_REG_OFF + PA_LUTV_SWAP_REG_OFF;
+
+	for (i = 0; i < ENHIST_LUT_ENTRIES; i += 2) {
+		temp = (lut_data->data[i] & REG_MASK(10)) |
+			((lut_data->data[i + 1] & REG_MASK(10))
+			 << ENHIST_BIT_SHIFT);
+
+		writel_relaxed(temp, hist_lut_addr);
+		hist_lut_addr += 4;
+	}
+
+	writel_relaxed(1, swap_addr);
+
+hist_lut_set_sts:
+	if (lut_cfg_data->ops & MDP_PP_OPS_DISABLE) {
+		pp_sts->enhist_sts &= ~(PP_STS_ENABLE | PP_STS_PA_LUT_FIRST);
+	} else if (lut_cfg_data->ops & MDP_PP_OPS_ENABLE) {
+		pp_sts->enhist_sts |= PP_STS_ENABLE;
+		if (lut_cfg_data->hist_lut_first)
+			pp_sts->enhist_sts |= PP_STS_PA_LUT_FIRST;
+		else
+			pp_sts->enhist_sts &= ~PP_STS_PA_LUT_FIRST;
+	}
+
+	pp_hist_lut_opmode_config(base_addr + PA_DSPP_BLOCK_REG_OFF, pp_sts);
+	return ret;
+}
+
+static int pp_hist_lut_get_version(u32 *version)
+{
+	if (!version) {
+		pr_err("invalid param version %pK\n", version);
+		return -EINVAL;
+	}
+	*version = mdp_hist_lut_v1_7;
+	return 0;
+}
+
+static void pp_hist_lut_opmode_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts)
+{
+	u32 opmode = 0;
+
+	if (!base_addr || !pp_sts) {
+		pr_err("invalid params base_addr %pK pp_sts_type %pK\n",
+			base_addr, pp_sts);
+		return;
+	}
+	opmode = readl_relaxed(base_addr + PA_OP_MODE_REG_OFF);
+
+	/* set the hist_lutv_en and hist_lutv_first_en bits */
+	if (pp_sts->enhist_sts & PP_STS_ENABLE) {
+		opmode |= BIT(19) | BIT(20);
+		opmode |= (pp_sts->enhist_sts & PP_STS_PA_LUT_FIRST) ?
+				BIT(21) : 0;
+	} else {
+		opmode &= ~(BIT(19) | BIT(21));
+		if (!(pp_sts->pa_sts & PP_STS_ENABLE))
+			opmode &= ~BIT(20);
+	}
+
+	writel_relaxed(opmode, base_addr + PA_OP_MODE_REG_OFF);
+}
+
+static int pp_pa_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type)
+{
+	struct mdp_pa_v2_cfg_data *pa_cfg_data = NULL;
+	struct mdp_pa_data_v1_7 *pa_data = NULL;
+	char __iomem *block_addr = NULL;
+
+	if (!base_addr || !cfg_data || !pp_sts) {
+		pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+				base_addr, cfg_data, pp_sts);
+		return -EINVAL;
+	}
+	if ((block_type != DSPP) && (block_type != SSPP_VIG)) {
+		pr_err("Invalid block type %d\n", block_type);
+		return -EINVAL;
+	}
+
+	pa_cfg_data = (struct mdp_pa_v2_cfg_data *) cfg_data;
+	if (pa_cfg_data->version != mdp_pa_v1_7) {
+		pr_err("invalid pa version %d\n", pa_cfg_data->version);
+		return -EINVAL;
+	}
+	if (!(pa_cfg_data->flags & ~(MDP_PP_OPS_READ))) {
+		pr_info("only read ops is set %d", pa_cfg_data->flags);
+		return 0;
+	}
+
+	block_addr = base_addr +
+		((block_type == DSPP) ? PA_DSPP_BLOCK_REG_OFF :
+		 PA_VIG_BLOCK_REG_OFF);
+
+	if (pa_cfg_data->flags & MDP_PP_OPS_DISABLE ||
+		!(pa_cfg_data->flags & MDP_PP_OPS_WRITE)) {
+		pr_debug("pa_cfg_data->flags = %d\n", pa_cfg_data->flags);
+		goto pa_set_sts;
+	}
+
+	pa_data = pa_cfg_data->cfg_payload;
+	if (!pa_data) {
+		pr_err("invalid payload for pa %pK\n", pa_data);
+		return -EINVAL;
+	}
+
+	pp_pa_set_global_adj_regs(block_addr, pa_data, pa_cfg_data->flags);
+	pp_pa_set_mem_col(block_addr, pa_data, pa_cfg_data->flags);
+	if (block_type == DSPP)
+		pp_pa_set_six_zone(block_addr, pa_data, pa_cfg_data->flags);
+
+pa_set_sts:
+	pp_pa_set_sts(pp_sts, pa_data, pa_cfg_data->flags, block_type);
+	pp_pa_opmode_config(block_addr, pp_sts);
+
+	return 0;
+}
+
+static int pp_pa_get_config(char __iomem *base_addr, void *cfg_data,
+		u32 block_type, u32 disp_num)
+{
+	return -ENOTSUPP;
+}
+
+static int pp_pa_get_version(u32 *version)
+{
+	if (!version) {
+		pr_err("invalid param version");
+		return -EINVAL;
+	}
+	*version = mdp_pa_v1_7;
+	return 0;
+}
+
+static int pp_dither_get_config(char __iomem *base_addr, void *cfg_data,
+		u32 block_type, u32 disp_num)
+{
+	return -ENOTSUPP;
+}
+
+static int pp_dither_set_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts, void *cfg_data,
+		u32 block_type)
+{
+	int i = 0;
+	u32 data;
+	struct mdp_dither_cfg_data *dither_cfg_data = NULL;
+	struct mdp_dither_data_v1_7 *dither_data = NULL;
+	char __iomem *dither_opmode = NULL;
+
+	if (!base_addr || !cfg_data || !pp_sts) {
+		pr_err("invalid params base_addr %pK cfg_data %pK pp_sts_type %pK\n",
+		      base_addr, cfg_data, pp_sts);
+		return -EINVAL;
+	}
+	if (block_type != PPB)
+		return -ENOTSUPP;
+	dither_opmode = base_addr + PPB_GLOBAL_DITHER_REG_OFF;
+	base_addr = dither_opmode + 4;
+
+	dither_cfg_data = (struct mdp_dither_cfg_data *) cfg_data;
+
+	if (dither_cfg_data->version != mdp_dither_v1_7) {
+		pr_err("invalid dither version %d\n", dither_cfg_data->version);
+		return -EINVAL;
+	}
+
+	if (dither_cfg_data->flags & MDP_PP_OPS_READ) {
+		pr_err("Invalid context for read operation\n");
+		return -EINVAL;
+	}
+
+	if (dither_cfg_data->flags & MDP_PP_OPS_DISABLE ||
+		!(dither_cfg_data->flags & MDP_PP_OPS_WRITE)) {
+		pr_debug("non write ops set %d\n", dither_cfg_data->flags);
+		goto dither_set_sts;
+	}
+
+	dither_data = dither_cfg_data->cfg_payload;
+	if (!dither_data) {
+		pr_err("invalid payload for dither %pK\n", dither_data);
+		return -EINVAL;
+	}
+
+	if ((dither_data->g_y_depth >= DITHER_DEPTH_MAP_INDEX) ||
+		(dither_data->b_cb_depth >= DITHER_DEPTH_MAP_INDEX) ||
+		(dither_data->r_cr_depth >= DITHER_DEPTH_MAP_INDEX)) {
+		pr_err("invalid data for dither, g_y_depth %d y_cb_depth %d r_cr_depth %d\n",
+			dither_data->g_y_depth, dither_data->b_cb_depth,
+			dither_data->r_cr_depth);
+		return -EINVAL;
+	}
+	data = dither_depth_map[dither_data->g_y_depth];
+	data |= dither_depth_map[dither_data->b_cb_depth] << 2;
+	data |= dither_depth_map[dither_data->r_cr_depth] << 4;
+	data |= (dither_data->temporal_en) ? (1  << 8) : 0;
+	writel_relaxed(data, base_addr);
+	base_addr += 4;
+	for (i = 0; i < DITHER_MATRIX_LEN; i += 4) {
+		data = (dither_matrix[i] & REG_MASK(4)) |
+			((dither_matrix[i + 1] & REG_MASK(4)) << 4) |
+			((dither_matrix[i + 2] & REG_MASK(4)) << 8) |
+			((dither_matrix[i + 3] & REG_MASK(4)) << 12);
+		writel_relaxed(data, base_addr);
+		base_addr += 4;
+	}
+
+dither_set_sts:
+	pp_sts_set_split_bits(&pp_sts->dither_sts,
+			dither_cfg_data->flags);
+	if (dither_cfg_data->flags & MDP_PP_OPS_DISABLE) {
+		pp_sts->dither_sts &= ~PP_STS_ENABLE;
+		writel_relaxed(0, dither_opmode);
+	} else if (dither_cfg_data->flags & MDP_PP_OPS_ENABLE) {
+		pp_sts->dither_sts |= PP_STS_ENABLE;
+		if (pp_sts_is_enabled(pp_sts->dither_sts, pp_sts->side_sts))
+			writel_relaxed(BIT(0), dither_opmode);
+	}
+	return 0;
+}
+
+static int pp_dither_get_version(u32 *version)
+{
+	if (!version) {
+		pr_err("invalid param version");
+		return -EINVAL;
+	}
+	*version = mdp_dither_v1_7;
+	return 0;
+}
+
+static void pp_opmode_config(int location, struct pp_sts_type *pp_sts,
+		u32 *opmode, int side)
+{
+	if (!pp_sts || !opmode) {
+		pr_err("Invalid pp_sts %pK or opmode %pK\n", pp_sts, opmode);
+		return;
+	}
+	switch (location) {
+	case SSPP_DMA:
+		break;
+	case SSPP_VIG:
+		break;
+	case DSPP:
+		if (pp_sts_is_enabled(pp_sts->igc_sts, side))
+			*opmode |= IGC_DSPP_OP_MODE_EN;
+		break;
+	case LM:
+		if (pp_sts->argc_sts & PP_STS_ENABLE)
+			pr_debug("pgc in LM enabled\n");
+		break;
+	default:
+		pr_err("Invalid block type %d\n", location);
+		break;
+	}
+}
+
+static void pp_pa_set_global_adj_regs(char __iomem *base_addr,
+		struct mdp_pa_data_v1_7 *pa_data, u32 flags)
+{
+	char __iomem *addr = NULL;
+
+	addr = base_addr + PA_HSIC_REG_OFF;
+	if (flags & MDP_PP_PA_HUE_ENABLE)
+		writel_relaxed((pa_data->global_hue_adj &
+					REG_MASK(12)), addr);
+	addr += 4;
+	if (flags & MDP_PP_PA_SAT_ENABLE)
+		writel_relaxed((pa_data->global_sat_adj &
+					REG_MASK(16)), addr);
+	addr += 4;
+	if (flags & MDP_PP_PA_VAL_ENABLE)
+		writel_relaxed((pa_data->global_val_adj &
+					REG_MASK(8)), addr);
+	addr += 4;
+	if (flags & MDP_PP_PA_CONT_ENABLE)
+		writel_relaxed((pa_data->global_cont_adj &
+					REG_MASK(8)), addr);
+}
+
+static void pp_pa_set_mem_col(char __iomem *base_addr,
+		struct mdp_pa_data_v1_7 *pa_data, u32 flags)
+{
+	char __iomem *mem_col_base = NULL, *mem_col_p2 = NULL;
+	struct mdp_pa_mem_col_data_v1_7 *mem_col_data = NULL;
+	uint32_t mask = 0, hold = 0, hold_mask = 0;
+	uint32_t hold_curr = 0;
+
+	flags &= (MDP_PP_PA_SKIN_ENABLE | MDP_PP_PA_SKY_ENABLE |
+			MDP_PP_PA_FOL_ENABLE);
+	if (!flags)
+		return;
+	while (flags) {
+		if (flags & MDP_PP_PA_SKIN_ENABLE) {
+			flags &= ~MDP_PP_PA_SKIN_ENABLE;
+			mem_col_base = base_addr + PA_MEM_SKIN_REG_OFF;
+			mem_col_p2 = base_addr + PA_MEM_SKIN_ADJUST_P2_REG_OFF;
+			mem_col_data = &pa_data->skin_cfg;
+			hold |= pa_data->skin_cfg.sat_hold & REG_MASK(2);
+			hold |= (pa_data->skin_cfg.val_hold & REG_MASK(2))
+				<< 2;
+			hold_mask |= REG_MASK(4);
+		} else if (flags & MDP_PP_PA_SKY_ENABLE) {
+			flags &= ~MDP_PP_PA_SKY_ENABLE;
+			mem_col_base = base_addr + PA_MEM_SKY_REG_OFF;
+			mem_col_p2 = base_addr + PA_MEM_SKY_ADJUST_P2_REG_OFF;
+			mem_col_data = &pa_data->sky_cfg;
+			hold |= (pa_data->sky_cfg.sat_hold & REG_MASK(2)) << 4;
+			hold |= (pa_data->sky_cfg.val_hold & REG_MASK(2)) << 6;
+			hold_mask |= REG_MASK_SHIFT(4, 4);
+		} else if (flags & MDP_PP_PA_FOL_ENABLE) {
+			flags &= ~MDP_PP_PA_FOL_ENABLE;
+			mem_col_base = base_addr + PA_MEM_FOL_REG_OFF;
+			mem_col_p2 = base_addr + PA_MEM_FOL_ADJUST_P2_REG_OFF;
+			mem_col_data = &pa_data->fol_cfg;
+			hold |= (pa_data->fol_cfg.sat_hold & REG_MASK(2)) << 8;
+			hold |= (pa_data->fol_cfg.val_hold & REG_MASK(2)) << 10;
+			hold_mask |= REG_MASK_SHIFT(4, 8);
+		} else {
+			break;
+		}
+		mask = REG_MASK_SHIFT(16, 16) | REG_MASK(11);
+		writel_relaxed((mem_col_data->color_adjust_p0 & mask),
+				mem_col_base);
+		mem_col_base += 4;
+		mask = U32_MAX;
+		writel_relaxed((mem_col_data->color_adjust_p1 & mask),
+				mem_col_base);
+		mem_col_base += 4;
+		mask = REG_MASK_SHIFT(11, 16) | REG_MASK(11);
+		writel_relaxed((mem_col_data->hue_region & mask),
+				mem_col_base);
+		mem_col_base += 4;
+		mask = REG_MASK(24);
+		writel_relaxed((mem_col_data->sat_region & mask),
+				mem_col_base);
+		mem_col_base += 4;
+		/* mask is same for val and sat */
+		writel_relaxed((mem_col_data->val_region & mask),
+				mem_col_base);
+		mask = U32_MAX;
+		writel_relaxed((mem_col_data->color_adjust_p2 & mask),
+				mem_col_p2);
+		mem_col_p2 += 4;
+		writel_relaxed((mem_col_data->blend_gain & mask),
+				mem_col_p2);
+	}
+	hold_curr = readl_relaxed(base_addr + PA_PWL_HOLD_REG_OFF) &
+			REG_MASK(16);
+	hold_curr &= ~hold_mask;
+	hold = hold_curr | (hold & hold_mask);
+	writel_relaxed(hold, (base_addr + PA_PWL_HOLD_REG_OFF));
+}
+
+static void pp_pa_set_six_zone(char __iomem *base_addr,
+		struct mdp_pa_data_v1_7 *pa_data,
+		u32 flags)
+{
+	char __iomem *addr = base_addr + PA_SZONE_REG_OFF;
+	uint32_t mask_p0 = 0, mask_p1 = 0, hold = 0, hold_mask = 0;
+	uint32_t hold_curr = 0;
+	int i = 0;
+
+	if (!(flags & MDP_PP_PA_SIX_ZONE_ENABLE))
+		return;
+
+	if (pa_data->six_zone_len != MDP_SIX_ZONE_LUT_SIZE ||
+			!pa_data->six_zone_curve_p0 ||
+			!pa_data->six_zone_curve_p1) {
+		pr_err("Invalid six zone data: len %d curve_p0 %pK curve_p1 %pK\n",
+				pa_data->six_zone_len,
+				pa_data->six_zone_curve_p0,
+				pa_data->six_zone_curve_p1);
+		return;
+	}
+	mask_p0 = REG_MASK(12);
+	mask_p1 = REG_MASK(12) | REG_MASK_SHIFT(12, 16);
+	writel_relaxed((pa_data->six_zone_curve_p1[0] & mask_p1), addr + 4);
+	/* Update the index to 0 and write value */
+	writel_relaxed((pa_data->six_zone_curve_p0[0] & mask_p0) | BIT(26),
+			addr);
+	for (i = 1; i < MDP_SIX_ZONE_LUT_SIZE; i++) {
+		writel_relaxed((pa_data->six_zone_curve_p1[i] & mask_p1),
+				addr + 4);
+		writel_relaxed((pa_data->six_zone_curve_p0[i] & mask_p0), addr);
+	}
+	addr += 8;
+	writel_relaxed(pa_data->six_zone_thresh, addr);
+	addr += 4;
+	writel_relaxed(pa_data->six_zone_adj_p0 & REG_MASK(16), addr);
+	addr += 4;
+	writel_relaxed(pa_data->six_zone_adj_p1, addr);
+
+	hold = (pa_data->six_zone_sat_hold & REG_MASK(2)) << 12;
+	hold |= (pa_data->six_zone_val_hold & REG_MASK(2)) << 14;
+	hold_mask = REG_MASK_SHIFT(4, 12);
+	hold_curr = readl_relaxed(base_addr + PA_PWL_HOLD_REG_OFF) &
+					REG_MASK(16);
+	hold_curr &= ~hold_mask;
+	hold = hold_curr | (hold & hold_mask);
+	writel_relaxed(hold, (base_addr + PA_PWL_HOLD_REG_OFF));
+}
+
+static void pp_pa_opmode_config(char __iomem *base_addr,
+		struct pp_sts_type *pp_sts)
+{
+	uint32_t opmode = 0;
+
+	/* set the PA bits */
+	if (pp_sts->pa_sts & PP_STS_ENABLE) {
+		opmode |= BIT(20);
+
+		if (pp_sts->pa_sts & PP_STS_PA_HUE_MASK)
+			opmode |= BIT(25);
+		if (pp_sts->pa_sts & PP_STS_PA_SAT_MASK)
+			opmode |= BIT(26);
+		if (pp_sts->pa_sts & PP_STS_PA_VAL_MASK)
+			opmode |= BIT(27);
+		if (pp_sts->pa_sts & PP_STS_PA_CONT_MASK)
+			opmode |= BIT(28);
+		if (pp_sts->pa_sts & PP_STS_PA_SAT_ZERO_EXP_EN)
+			opmode |= BIT(1);
+		if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKIN_MASK)
+			opmode |= BIT(5);
+		if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_FOL_MASK)
+			opmode |= BIT(6);
+		if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKY_MASK)
+			opmode |= BIT(7);
+		if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_HUE_MASK)
+			opmode |= BIT(29);
+		if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_SAT_MASK)
+			opmode |= BIT(30);
+		if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_VAL_MASK)
+			opmode |= BIT(31);
+		if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_HUE_EN)
+			opmode |= BIT(22);
+		if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_SAT_EN)
+			opmode |= BIT(23);
+		if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_VAL_EN)
+			opmode |= BIT(24);
+		if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_CONT_EN)
+			opmode |= BIT(18);
+		if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_BLEND_EN)
+			opmode |= BIT(3);
+		if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_SIX_EN)
+			opmode |= BIT(17);
+	}
+
+	/* reset hist_en, hist_lutv_en and hist_lutv_first_en
+	 *  bits based on the pp_sts
+	 */
+	if (pp_sts->hist_sts & PP_STS_ENABLE)
+		opmode |= BIT(16);
+	if (pp_sts->enhist_sts & PP_STS_ENABLE)
+		opmode |= BIT(19) | BIT(20);
+	if (pp_sts->enhist_sts & PP_STS_PA_LUT_FIRST)
+		opmode |= BIT(21);
+
+	writel_relaxed(opmode, base_addr + PA_OP_MODE_REG_OFF);
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c
new file mode 100644
index 0000000..f17cf6f
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c
@@ -0,0 +1,774 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2015, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/memblock.h>
+#include <linux/bootmem.h>
+#include <linux/iommu.h>
+#include <linux/of_address.h>
+#include <linux/fb.h>
+#include <linux/dma-buf.h>
+
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "splash.h"
+#include "mdss_mdp_splash_logo.h"
+#include "mdss_smmu.h"
+
+#define INVALID_PIPE_INDEX 0xFFFF
+#define MAX_FRAME_DONE_COUNT_WAIT 2
+
+static int mdss_mdp_splash_alloc_memory(struct msm_fb_data_type *mfd,
+							uint32_t size)
+{
+	int rc;
+	struct msm_fb_splash_info *sinfo;
+	unsigned long buf_size = size;
+	struct mdss_data_type *mdata;
+	struct ion_handle *handle;
+
+	if (!mfd || !size)
+		return -EINVAL;
+
+	mdata = mfd_to_mdata(mfd);
+	sinfo = &mfd->splash_info;
+
+	if (!mdata || !mdata->iclient || sinfo->splash_buffer)
+		return -EINVAL;
+
+	handle = ion_alloc(mdata->iclient, size, SZ_4K,
+				ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
+	if (IS_ERR_OR_NULL(handle)) {
+		pr_err("ion memory allocation failed\n");
+		rc = PTR_RET(handle);
+		goto end;
+	}
+
+	sinfo->size = size;
+	sinfo->dma_buf = ion_share_dma_buf(mdata->iclient, handle);
+	if (IS_ERR(sinfo->dma_buf)) {
+		rc = PTR_ERR(sinfo->dma_buf);
+		goto imap_err;
+	}
+
+	sinfo->attachment = mdss_smmu_dma_buf_attach(sinfo->dma_buf,
+			&mfd->pdev->dev, MDSS_IOMMU_DOMAIN_UNSECURE);
+	if (IS_ERR(sinfo->attachment)) {
+		rc = PTR_ERR(sinfo->attachment);
+		goto err_put;
+	}
+
+	sinfo->table = dma_buf_map_attachment(sinfo->attachment,
+			DMA_BIDIRECTIONAL);
+	if (IS_ERR(sinfo->table)) {
+		rc = PTR_ERR(sinfo->table);
+		goto err_detach;
+	}
+
+	rc = mdss_smmu_map_dma_buf(sinfo->dma_buf, sinfo->table,
+			MDSS_IOMMU_DOMAIN_UNSECURE, &sinfo->iova,
+			&buf_size, DMA_BIDIRECTIONAL);
+	if (rc) {
+		pr_err("mdss smmu map dma buf failed!\n");
+		goto err_unmap;
+	}
+	sinfo->size = buf_size;
+
+	dma_buf_begin_cpu_access(sinfo->dma_buf, 0, size, DMA_BIDIRECTIONAL);
+	sinfo->splash_buffer = dma_buf_kmap(sinfo->dma_buf, 0);
+	if (IS_ERR(sinfo->splash_buffer)) {
+		pr_err("ion kernel memory mapping failed\n");
+		rc = IS_ERR(sinfo->splash_buffer);
+		goto kmap_err;
+	}
+
+	/**
+	 * dma_buf has the reference
+	 */
+	ion_free(mdata->iclient, handle);
+
+	return rc;
+kmap_err:
+	mdss_smmu_unmap_dma_buf(sinfo->table, MDSS_IOMMU_DOMAIN_UNSECURE,
+			DMA_BIDIRECTIONAL, sinfo->dma_buf);
+err_unmap:
+	dma_buf_unmap_attachment(sinfo->attachment, sinfo->table,
+			DMA_BIDIRECTIONAL);
+err_detach:
+	dma_buf_detach(sinfo->dma_buf, sinfo->attachment);
+err_put:
+	dma_buf_put(sinfo->dma_buf);
+imap_err:
+	ion_free(mdata->iclient, handle);
+end:
+	return rc;
+}
+
+static void mdss_mdp_splash_free_memory(struct msm_fb_data_type *mfd)
+{
+	struct msm_fb_splash_info *sinfo;
+	struct mdss_data_type *mdata;
+
+	if (!mfd)
+		return;
+
+	sinfo = &mfd->splash_info;
+	mdata = mfd_to_mdata(mfd);
+
+	if (!mdata || !mdata->iclient || !sinfo->dma_buf)
+		return;
+
+	dma_buf_end_cpu_access(sinfo->dma_buf, 0, sinfo->size,
+			       DMA_BIDIRECTIONAL);
+	dma_buf_kunmap(sinfo->dma_buf, 0, sinfo->splash_buffer);
+
+	mdss_smmu_unmap_dma_buf(sinfo->table, MDSS_IOMMU_DOMAIN_UNSECURE, 0,
+				sinfo->dma_buf);
+	dma_buf_unmap_attachment(sinfo->attachment, sinfo->table,
+			DMA_BIDIRECTIONAL);
+	dma_buf_detach(sinfo->dma_buf, sinfo->attachment);
+	dma_buf_put(sinfo->dma_buf);
+
+	sinfo->splash_buffer = NULL;
+}
+
+static int mdss_mdp_splash_iommu_attach(struct msm_fb_data_type *mfd)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int rc, ret;
+
+	/*
+	 * iommu dynamic attach for following conditions.
+	 * 1. it is still not attached
+	 * 2. MDP hardware version supports the feature
+	 * 3. configuration is with valid splash buffer
+	 */
+	if (mdata->mdss_util->iommu_attached() ||
+		!mfd->panel_info->cont_splash_enabled ||
+		!mdss_mdp_iommu_dyn_attach_supported(mdp5_data->mdata) ||
+		!mdp5_data->splash_mem_addr ||
+		!mdp5_data->splash_mem_size) {
+		pr_debug("dynamic attach is not supported\n");
+		return -EPERM;
+	}
+
+	rc = mdss_smmu_map(MDSS_IOMMU_DOMAIN_UNSECURE,
+				mdp5_data->splash_mem_addr,
+				mdp5_data->splash_mem_addr,
+				mdp5_data->splash_mem_size,
+				IOMMU_READ | IOMMU_NOEXEC);
+	if (rc) {
+		pr_debug("iommu memory mapping failed rc=%d\n", rc);
+	} else {
+		ret = mdss_iommu_ctrl(1);
+		if (IS_ERR_VALUE(ret)) {
+			pr_err("mdss iommu attach failed\n");
+			mdss_smmu_unmap(MDSS_IOMMU_DOMAIN_UNSECURE,
+					mdp5_data->splash_mem_addr,
+					mdp5_data->splash_mem_size);
+		} else {
+			mfd->splash_info.iommu_dynamic_attached = true;
+		}
+	}
+
+	return rc;
+}
+
+static void mdss_mdp_splash_unmap_splash_mem(struct msm_fb_data_type *mfd)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (mfd->splash_info.iommu_dynamic_attached) {
+
+		mdss_smmu_unmap(MDSS_IOMMU_DOMAIN_UNSECURE,
+				mdp5_data->splash_mem_addr,
+				mdp5_data->splash_mem_size);
+		mdss_iommu_ctrl(0);
+
+		mfd->splash_info.iommu_dynamic_attached = false;
+	}
+}
+
+void mdss_mdp_release_splash_pipe(struct msm_fb_data_type *mfd)
+{
+	struct msm_fb_splash_info *sinfo;
+
+	if (!mfd || !mfd->splash_info.splash_pipe_allocated)
+		return;
+
+	sinfo = &mfd->splash_info;
+
+	if (sinfo->pipe_ndx[0] != INVALID_PIPE_INDEX)
+		mdss_mdp_overlay_release(mfd, sinfo->pipe_ndx[0]);
+	if (sinfo->pipe_ndx[1] != INVALID_PIPE_INDEX)
+		mdss_mdp_overlay_release(mfd, sinfo->pipe_ndx[1]);
+	sinfo->splash_pipe_allocated = false;
+}
+
+/*
+ * In order to free reseved memory from bootup we are not
+ * able to call the __init free functions, as we could be
+ * passed the init boot sequence. As a reult we need to
+ * free this memory ourselves using the
+ * free_reeserved_page() function.
+ */
+void mdss_free_bootmem(u32 mem_addr, u32 size)
+{
+	unsigned long pfn_start, pfn_end, pfn_idx;
+
+	pfn_start = mem_addr >> PAGE_SHIFT;
+	pfn_end = (mem_addr + size) >> PAGE_SHIFT;
+	for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
+		free_reserved_page(pfn_to_page(pfn_idx));
+}
+
+int mdss_mdp_splash_cleanup(struct msm_fb_data_type *mfd,
+					bool use_borderfill)
+{
+	struct mdss_overlay_private *mdp5_data;
+	struct mdss_mdp_ctl *ctl;
+	static u32 splash_mem_addr;
+	static u32 splash_mem_size;
+	int rc = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mfd)
+		return -EINVAL;
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+	if (!mdp5_data)
+		return -EINVAL;
+
+	ctl = mdp5_data->ctl;
+	if (!ctl)
+		return -EINVAL;
+
+	if (!mfd->panel_info->cont_splash_enabled ||
+		(mfd->splash_info.iommu_dynamic_attached && !use_borderfill)) {
+		if (mfd->splash_info.iommu_dynamic_attached &&
+			use_borderfill) {
+			mdss_mdp_splash_unmap_splash_mem(mfd);
+			memblock_free(mdp5_data->splash_mem_addr,
+					mdp5_data->splash_mem_size);
+			mdss_free_bootmem(mdp5_data->splash_mem_addr,
+					mdp5_data->splash_mem_size);
+		}
+		goto end;
+	}
+
+	/* 1-to-1 mapping */
+	mdss_mdp_splash_iommu_attach(mfd);
+
+	if (use_borderfill && mdp5_data->handoff &&
+		!mfd->splash_info.iommu_dynamic_attached) {
+		/*
+		 * Set up border-fill on the handed off pipes.
+		 * This is needed to ensure that there are no memory
+		 * accesses prior to attaching iommu during continuous
+		 * splash screen case. However, for command mode
+		 * displays, this is not necessary since the panels can
+		 * refresh from their internal memory if no data is sent
+		 * out on the dsi lanes.
+		 */
+		if (mdp5_data->handoff && ctl && ctl->is_video_mode) {
+			rc = mdss_mdp_display_commit(ctl, NULL, NULL);
+			if (!IS_ERR_VALUE(rc)) {
+				mdss_mdp_display_wait4comp(ctl);
+			} else {
+				/*
+				 * Since border-fill setup failed, we
+				 * need to ensure that we turn off the
+				 * MDP timing generator before attaching
+				 * iommu
+				 */
+				pr_err("failed to set BF at handoff\n");
+				mdp5_data->handoff = false;
+			}
+		}
+	}
+
+	if (rc || mdp5_data->handoff) {
+		/* Add all the handed off pipes to the cleanup list */
+		mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_RGB);
+		mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_VIG);
+		mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_DMA);
+	}
+
+	mdss_mdp_ctl_splash_finish(ctl, mdp5_data->handoff);
+
+	/* If DSI-1 interface is enabled by LK & split dsi is not enabled,
+	 * free cont_splash_mem for dsi during the cleanup for DSI-1.
+	 */
+	if (!mdata->splash_split_disp &&
+		(mdata->splash_intf_sel & MDSS_MDP_INTF_DSI1_SEL) &&
+		mfd->panel_info->pdest == DISPLAY_1) {
+		pr_debug("delay cleanup for display %d\n",
+						mfd->panel_info->pdest);
+		splash_mem_addr = mdp5_data->splash_mem_addr;
+		splash_mem_size = mdp5_data->splash_mem_size;
+
+		mdss_mdp_footswitch_ctrl_splash(0);
+		goto end;
+	}
+
+	if (!mdata->splash_split_disp &&
+		(mdata->splash_intf_sel & MDSS_MDP_INTF_DSI1_SEL) &&
+		mfd->panel_info->pdest == DISPLAY_2 &&
+		!mfd->splash_info.iommu_dynamic_attached) {
+		pr_debug("free splash mem for display %d\n",
+						mfd->panel_info->pdest);
+		/* Give back the reserved memory to the system */
+		memblock_free(splash_mem_addr, splash_mem_size);
+		mdss_free_bootmem(splash_mem_addr, splash_mem_size);
+
+		mdss_mdp_footswitch_ctrl_splash(0);
+		goto end;
+	}
+
+	if (mdp5_data->splash_mem_addr &&
+		!mfd->splash_info.iommu_dynamic_attached) {
+		pr_debug("free splash mem for display %d\n",
+						mfd->panel_info->pdest);
+		/* Give back the reserved memory to the system */
+		memblock_free(mdp5_data->splash_mem_addr,
+					mdp5_data->splash_mem_size);
+		mdss_free_bootmem(mdp5_data->splash_mem_addr,
+					mdp5_data->splash_mem_size);
+	}
+
+	mdss_mdp_footswitch_ctrl_splash(0);
+end:
+	return rc;
+}
+
+static struct mdss_mdp_pipe *mdss_mdp_splash_get_pipe(
+					struct msm_fb_data_type *mfd,
+					struct mdp_overlay *req)
+{
+	struct mdss_mdp_pipe *pipe;
+	int ret;
+	struct mdss_mdp_data *buf;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	uint32_t image_size = SPLASH_IMAGE_WIDTH * SPLASH_IMAGE_HEIGHT
+						* SPLASH_IMAGE_BPP;
+
+	ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe, NULL, true);
+	if (ret)
+		return NULL;
+
+	if (mdss_mdp_pipe_map(pipe)) {
+		pr_err("unable to map base pipe\n");
+		return NULL;
+	}
+
+	mutex_lock(&mdp5_data->list_lock);
+	buf = mdss_mdp_overlay_buf_alloc(mfd, pipe);
+	if (!buf) {
+		pr_err("unable to allocate memory for splash buffer\n");
+		mdss_mdp_pipe_unmap(pipe);
+		mutex_unlock(&mdp5_data->list_lock);
+		return NULL;
+	}
+	mutex_unlock(&mdp5_data->list_lock);
+
+	buf->p[0].addr = mfd->splash_info.iova;
+	buf->p[0].len = image_size;
+	buf->num_planes = 1;
+	mdss_mdp_pipe_unmap(pipe);
+
+	return pipe;
+}
+
+static int mdss_mdp_splash_kickoff(struct msm_fb_data_type *mfd,
+				struct mdss_rect *src_rect,
+				struct mdss_rect *dest_rect)
+{
+	struct mdss_mdp_pipe *pipe;
+	struct fb_info *fbi;
+	struct mdp_overlay *req = NULL;
+	struct mdss_overlay_private *mdp5_data;
+	struct mdss_data_type *mdata;
+	struct mdss_mdp_mixer *mixer;
+	int ret;
+	bool use_single_pipe = false;
+	struct msm_fb_splash_info *sinfo;
+
+	if (!mfd)
+		return -EINVAL;
+
+	fbi = mfd->fbi;
+	mdp5_data = mfd_to_mdp5_data(mfd);
+	mdata = mfd_to_mdata(mfd);
+	sinfo = &mfd->splash_info;
+
+	if (!mdp5_data || !mdp5_data->ctl)
+		return -EINVAL;
+
+	if (mutex_lock_interruptible(&mdp5_data->ov_lock))
+		return -EINVAL;
+
+	ret = mdss_mdp_overlay_start(mfd);
+	if (ret) {
+		pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
+		goto end;
+	}
+
+	mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_LEFT);
+	if (!mixer) {
+		pr_err("unable to retrieve mixer\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	req = kzalloc(sizeof(struct mdp_overlay), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	/*
+	 * use single pipe for
+	 * 1. split display disabled
+	 * 2. splash image is only on one side of panel
+	 * 3. source split is enabled and splash image is within line
+	 *    buffer boundary
+	 */
+	use_single_pipe =
+		!is_split_lm(mfd) ||
+		(is_split_lm(mfd) &&
+		((dest_rect->x + dest_rect->w) < mfd->split_fb_left ||
+		dest_rect->x > mfd->split_fb_left)) ||
+		(mdata->has_src_split &&
+		src_rect->w < min_t(u16, mixer->width,
+					mdss_mdp_line_buffer_width()) &&
+		dest_rect->w < min_t(u16, mixer->width,
+					mdss_mdp_line_buffer_width()));
+
+	req->src.width = src_rect->w;
+	if (use_single_pipe)
+		req->src_rect.w = src_rect->w;
+	else
+		req->src_rect.w = min_t(u16, mixer->width, src_rect->w >> 1);
+	req->dst_rect.w = req->src_rect.w;
+	req->src.height = req->dst_rect.h = req->src_rect.h =
+			src_rect->h;
+	req->src.format = SPLASH_IMAGE_FORMAT;
+	req->id = MSMFB_NEW_REQUEST;
+	req->z_order = MDSS_MDP_STAGE_0;
+	req->alpha = 0xff;
+	req->transp_mask = MDP_TRANSP_NOP;
+	req->dst_rect.x = dest_rect->x;
+	req->dst_rect.y = dest_rect->y;
+
+	pipe = mdss_mdp_splash_get_pipe(mfd, req);
+	if (!pipe) {
+		pr_err("unable to allocate base pipe\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	sinfo->pipe_ndx[0] = pipe->ndx;
+
+	if (!use_single_pipe) {
+		req->id = MSMFB_NEW_REQUEST;
+		req->src_rect.x = src_rect->x + min_t(u16, mixer->width,
+					src_rect->w - req->src_rect.w);
+		req->dst_rect.x = mixer->width;
+		pipe = mdss_mdp_splash_get_pipe(mfd, req);
+		if (!pipe) {
+			pr_err("unable to allocate right base pipe\n");
+			mdss_mdp_overlay_release(mfd, sinfo->pipe_ndx[0]);
+			ret = -EINVAL;
+			goto end;
+		}
+		sinfo->pipe_ndx[1] = pipe->ndx;
+	}
+	mutex_unlock(&mdp5_data->ov_lock);
+
+	ret = mfd->mdp.kickoff_fnc(mfd, NULL);
+	if (ret) {
+		pr_err("error in displaying image\n");
+		mdss_mdp_overlay_release(mfd, sinfo->pipe_ndx[0] |
+					sinfo->pipe_ndx[1]);
+	}
+
+	kfree(req);
+	return ret;
+end:
+	kfree(req);
+	sinfo->pipe_ndx[0] = INVALID_PIPE_INDEX;
+	sinfo->pipe_ndx[1] = INVALID_PIPE_INDEX;
+	mutex_unlock(&mdp5_data->ov_lock);
+	return ret;
+}
+
+static int mdss_mdp_display_splash_image(struct msm_fb_data_type *mfd)
+{
+	int rc = 0;
+	struct fb_info *fbi;
+	uint32_t image_len = SPLASH_IMAGE_WIDTH * SPLASH_IMAGE_HEIGHT
+						* SPLASH_IMAGE_BPP;
+	struct mdss_rect src_rect, dest_rect;
+	struct msm_fb_splash_info *sinfo;
+
+	if (!mfd || !mfd->fbi) {
+		pr_err("invalid input parameter\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	fbi = mfd->fbi;
+	sinfo = &mfd->splash_info;
+
+	if (fbi->var.xres < SPLASH_IMAGE_WIDTH ||
+		  fbi->var.yres < SPLASH_IMAGE_HEIGHT ||
+		  (fbi->var.bits_per_pixel >> 3) < SPLASH_IMAGE_BPP) {
+		pr_err("invalid splash parameter configuration\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	sinfo->pipe_ndx[0] = INVALID_PIPE_INDEX;
+	sinfo->pipe_ndx[1] = INVALID_PIPE_INDEX;
+
+	src_rect.x = 0;
+	src_rect.y = 0;
+	dest_rect.w = src_rect.w = SPLASH_IMAGE_WIDTH;
+	dest_rect.h = src_rect.h = SPLASH_IMAGE_HEIGHT;
+	dest_rect.x = (fbi->var.xres >> 1) - (SPLASH_IMAGE_WIDTH >> 1);
+	dest_rect.y = (fbi->var.yres >> 1) - (SPLASH_IMAGE_HEIGHT >> 1);
+
+	rc = mdss_mdp_splash_alloc_memory(mfd, image_len);
+	if (rc) {
+		pr_err("splash buffer allocation failed\n");
+		goto end;
+	}
+
+	memcpy(sinfo->splash_buffer, splash_bgr888_image, image_len);
+
+	rc = mdss_mdp_splash_iommu_attach(mfd);
+	if (rc)
+		pr_debug("iommu dynamic attach failed\n");
+
+	rc = mdss_mdp_splash_kickoff(mfd, &src_rect, &dest_rect);
+	if (rc)
+		pr_err("splash image display failed\n");
+	else
+		sinfo->splash_pipe_allocated = true;
+end:
+	return rc;
+}
+
+static int mdss_mdp_splash_ctl_cb(struct notifier_block *self,
+					unsigned long event, void *data)
+{
+	struct msm_fb_splash_info *sinfo = container_of(self,
+					struct msm_fb_splash_info, notifier);
+	struct msm_fb_data_type *mfd;
+
+	if (!sinfo)
+		goto done;
+
+	mfd = container_of(sinfo, struct msm_fb_data_type, splash_info);
+
+	if (!mfd)
+		goto done;
+
+	if (event != MDP_NOTIFY_FRAME_DONE)
+		goto done;
+
+	if (!sinfo->frame_done_count) {
+		mdss_mdp_splash_unmap_splash_mem(mfd);
+		mdss_mdp_splash_cleanup(mfd, false);
+	/* wait for 2 frame done events before releasing memory */
+	} else if (sinfo->frame_done_count > MAX_FRAME_DONE_COUNT_WAIT &&
+			sinfo->splash_thread) {
+		complete(&sinfo->frame_done);
+		sinfo->splash_thread = NULL;
+	}
+
+	/* increase frame done count after pipes are staged from other client */
+	if (!sinfo->splash_pipe_allocated)
+		sinfo->frame_done_count++;
+done:
+	return NOTIFY_OK;
+}
+
+static int mdss_mdp_splash_thread(void *data)
+{
+	struct msm_fb_data_type *mfd = data;
+	struct mdss_overlay_private *mdp5_data;
+	int ret = -EINVAL;
+
+	if (!mfd) {
+		pr_err("invalid input parameter\n");
+		goto end;
+	}
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+	lock_fb_info(mfd->fbi);
+	ret = fb_blank(mfd->fbi, FB_BLANK_UNBLANK);
+	if (ret) {
+		pr_err("can't turn on fb!\n");
+		goto end;
+	}
+	unlock_fb_info(mfd->fbi);
+
+	mutex_lock(&mfd->bl_lock);
+	mfd->allow_bl_update = true;
+	mdss_fb_set_backlight(mfd, mfd->panel_info->bl_max >> 1);
+	mutex_unlock(&mfd->bl_lock);
+
+	init_completion(&mfd->splash_info.frame_done);
+
+	mfd->splash_info.notifier.notifier_call = mdss_mdp_splash_ctl_cb;
+	mdss_mdp_ctl_notifier_register(mdp5_data->ctl,
+				&mfd->splash_info.notifier);
+
+	ret = mdss_mdp_display_splash_image(mfd);
+	if (ret) {
+		/*
+		 * keep thread alive to release dynamically allocated
+		 * resources
+		 */
+		pr_err("splash image display failed\n");
+	}
+
+	/* wait for second display complete to release splash resources */
+	ret = wait_for_completion_killable(&mfd->splash_info.frame_done);
+
+	mdss_mdp_splash_free_memory(mfd);
+
+	mdss_mdp_ctl_notifier_unregister(mdp5_data->ctl,
+				&mfd->splash_info.notifier);
+end:
+	return ret;
+}
+
+static __ref int mdss_mdp_splash_parse_dt(struct msm_fb_data_type *mfd)
+{
+	struct platform_device *pdev = mfd->pdev;
+	struct mdss_overlay_private *mdp5_mdata = mfd_to_mdp5_data(mfd);
+	int len = 0, rc = 0;
+	u32 offsets[2];
+	struct device_node *pnode, *child_node;
+
+	mfd->splash_info.splash_logo_enabled =
+				of_property_read_bool(pdev->dev.of_node,
+				"qcom,mdss-fb-splash-logo-enabled");
+
+	of_find_property(pdev->dev.of_node, "qcom,memblock-reserve", &len);
+	if (len) {
+		len = len / sizeof(u32);
+
+		rc = of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,memblock-reserve", offsets, len);
+		if (rc) {
+			pr_err("error reading mem reserve settings for fb\n");
+			goto error;
+		}
+	} else {
+		child_node = of_get_child_by_name(pdev->dev.of_node,
+					"qcom,cont-splash-memory");
+		if (!child_node) {
+			pr_err("splash mem child node is not present\n");
+			rc = -EINVAL;
+			goto error;
+		}
+
+		pnode = of_parse_phandle(child_node, "linux,contiguous-region",
+					0);
+		if (pnode != NULL) {
+			const u32 *addr;
+			u64 size;
+
+			addr = of_get_address(pnode, 0, &size, NULL);
+			if (!addr) {
+				pr_err("failed to parse the splash memory address\n");
+				of_node_put(pnode);
+				rc = -EINVAL;
+				goto error;
+			}
+			offsets[0] = (u32) of_read_ulong(addr, 2);
+			offsets[1] = (u32) size;
+			of_node_put(pnode);
+		} else {
+			pr_err("mem reservation for splash screen fb not present\n");
+			rc = -EINVAL;
+			goto error;
+		}
+	}
+
+	if (!memblock_is_reserved(offsets[0])) {
+		pr_debug("failed to reserve memory for fb splash\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	mdp5_mdata->splash_mem_addr = offsets[0];
+	mdp5_mdata->splash_mem_size = offsets[1];
+	pr_debug("memaddr=%x size=%x\n", mdp5_mdata->splash_mem_addr,
+		mdp5_mdata->splash_mem_size);
+
+error:
+	if (!rc && !mfd->panel_info->cont_splash_enabled &&
+		mdp5_mdata->splash_mem_addr) {
+		pr_debug("mem reservation not reqd if cont splash disabled\n");
+		memblock_free(mdp5_mdata->splash_mem_addr,
+					mdp5_mdata->splash_mem_size);
+		mdss_free_bootmem(mdp5_mdata->splash_mem_addr,
+					mdp5_mdata->splash_mem_size);
+	} else if (rc && mfd->panel_info->cont_splash_enabled) {
+		pr_err("no rsvd mem found in DT for splash screen\n");
+	} else {
+		rc = 0;
+	}
+
+	return rc;
+}
+
+int mdss_mdp_splash_init(struct msm_fb_data_type *mfd)
+{
+	int rc;
+
+	if (!mfd) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = mdss_mdp_splash_parse_dt(mfd);
+	if (rc) {
+		pr_err("splash memory reserve failed\n");
+		goto end;
+	}
+
+	if (!mfd->splash_info.splash_logo_enabled) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	mfd->splash_info.splash_thread = kthread_run(mdss_mdp_splash_thread,
+							mfd, "mdss_fb_splash");
+
+	if (IS_ERR(mfd->splash_info.splash_thread)) {
+		pr_err("unable to start splash thread %d\n", mfd->index);
+		mfd->splash_info.splash_thread = NULL;
+	}
+
+end:
+	return rc;
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_splash_logo.h b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.h
new file mode 100644
index 0000000..205bb65
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef MDSS_MDP_SPLASH_LOGO
+#define MDSS_MDP_SPLASH_LOGO
+
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/completion.h>
+
+struct msm_fb_splash_info {
+	struct task_struct	*splash_thread;
+	bool			splash_logo_enabled;
+	bool			iommu_dynamic_attached;
+	struct notifier_block	notifier;
+	uint32_t		frame_done_count;
+	struct completion	frame_done;
+
+	struct dma_buf		*dma_buf;
+	struct dma_buf_attachment *attachment;
+	struct sg_table		*table;
+	dma_addr_t		iova;
+	void			*splash_buffer;
+	int			pipe_ndx[2];
+	bool			splash_pipe_allocated;
+	uint32_t		size;
+};
+
+struct msm_fb_data_type;
+
+void mdss_mdp_release_splash_pipe(struct msm_fb_data_type *mfd);
+int mdss_mdp_splash_cleanup(struct msm_fb_data_type *mfd,
+				 bool use_borderfill);
+int mdss_mdp_splash_init(struct msm_fb_data_type *mfd);
+
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_mdp_trace.h b/drivers/video/fbdev/msm/mdss_mdp_trace.h
new file mode 100644
index 0000000..c100e9c
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_trace.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#if !defined(TRACE_MDSS_MDP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define TRACE_MDSS_MDP_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mdss
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mdss_mdp_trace
+
+#include <linux/tracepoint.h>
+#include "mdss_mdp.h"
+
+DECLARE_EVENT_CLASS(mdp_sspp_template,
+	TP_PROTO(struct mdss_mdp_pipe *pipe),
+	TP_ARGS(pipe),
+	TP_STRUCT__entry(
+			__field(u32, num)
+			__field(u32, play_cnt)
+			__field(u32, mixer)
+			__field(u32, stage)
+			__field(u32, flags)
+			__field(u32, format)
+			__field(u16, img_w)
+			__field(u16, img_h)
+			__field(u16, src_x)
+			__field(u16, src_y)
+			__field(u16, src_w)
+			__field(u16, src_h)
+			__field(u16, dst_x)
+			__field(u16, dst_y)
+			__field(u16, dst_w)
+			__field(u16, dst_h)
+	),
+	TP_fast_assign(
+			__entry->num = pipe->num;
+			__entry->play_cnt = pipe->play_cnt;
+			__entry->mixer = pipe->mixer_left->num;
+			__entry->stage = pipe->mixer_stage;
+			__entry->flags = pipe->flags;
+			__entry->format = pipe->src_fmt ?
+					pipe->src_fmt->format : -1;
+			__entry->img_w = pipe->img_width;
+			__entry->img_h = pipe->img_height;
+			__entry->src_x = pipe->src.x;
+			__entry->src_y = pipe->src.y;
+			__entry->src_w = pipe->src.w;
+			__entry->src_h = pipe->src.h;
+			__entry->dst_x = pipe->dst.x;
+			__entry->dst_y = pipe->dst.y;
+			__entry->dst_w = pipe->dst.w;
+			__entry->dst_h = pipe->dst.h;
+	),
+
+	TP_printk("pnum=%d mixer=%d play_cnt=%d flags=0x%x stage=%d format=%d img=%dx%d src=[%d,%d,%d,%d] dst=[%d,%d,%d,%d]",
+			__entry->num, __entry->mixer, __entry->play_cnt,
+			__entry->flags, __entry->stage,
+			__entry->format, __entry->img_w, __entry->img_h,
+			__entry->src_x, __entry->src_y,
+			__entry->src_w, __entry->src_h,
+			__entry->dst_x, __entry->dst_y,
+			__entry->dst_w, __entry->dst_h)
+);
+
+DEFINE_EVENT(mdp_sspp_template, mdp_sspp_set,
+	TP_PROTO(struct mdss_mdp_pipe *pipe),
+	TP_ARGS(pipe)
+);
+
+DEFINE_EVENT(mdp_sspp_template, mdp_sspp_change,
+	TP_PROTO(struct mdss_mdp_pipe *pipe),
+	TP_ARGS(pipe)
+);
+
+TRACE_EVENT(mdp_perf_set_qos_luts,
+	TP_PROTO(u32 pnum, u32 fmt, u32 intf, u32 rot, u32 fl,
+		u32 lut, bool linear),
+	TP_ARGS(pnum, fmt, intf, rot, fl, lut, linear),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, fmt)
+			__field(u32, intf)
+			__field(u32, rot)
+			__field(u32, fl)
+			__field(u32, lut)
+			__field(bool, linear)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->fmt = fmt;
+			__entry->intf = intf;
+			__entry->rot = rot;
+			__entry->fl = fl;
+			__entry->lut = lut;
+			__entry->linear = linear;
+	),
+	TP_printk("pnum=%d fmt=%d intf=%d rot=%d fl:%d lut=0x%x lin:%d",
+			__entry->pnum, __entry->fmt,
+			__entry->intf, __entry->rot, __entry->fl,
+			__entry->lut, __entry->linear)
+);
+
+TRACE_EVENT(mdp_perf_set_panic_luts,
+	TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 panic_lut,
+		u32 robust_lut),
+	TP_ARGS(pnum, fmt, mode, panic_lut, robust_lut),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, fmt)
+			__field(u32, mode)
+			__field(u32, panic_lut)
+			__field(u32, robust_lut)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->fmt = fmt;
+			__entry->mode = mode;
+			__entry->panic_lut = panic_lut;
+			__entry->robust_lut = robust_lut;
+	),
+	TP_printk("pnum=%d fmt=%d mode=%d luts[0x%x, 0x%x]",
+			__entry->pnum, __entry->fmt,
+			__entry->mode, __entry->panic_lut,
+			__entry->robust_lut)
+);
+
+TRACE_EVENT(mdp_perf_set_wm_levels,
+	TP_PROTO(u32 pnum, u32 use_space, u32 priority_bytes, u32 wm0, u32 wm1,
+		u32 wm2, u32 mb_cnt, u32 mb_size),
+	TP_ARGS(pnum, use_space, priority_bytes, wm0, wm1, wm2, mb_cnt,
+		mb_size),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, use_space)
+			__field(u32, priority_bytes)
+			__field(u32, wm0)
+			__field(u32, wm1)
+			__field(u32, wm2)
+			__field(u32, mb_cnt)
+			__field(u32, mb_size)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->use_space = use_space;
+			__entry->priority_bytes = priority_bytes;
+			__entry->wm0 = wm0;
+			__entry->wm1 = wm1;
+			__entry->wm2 = wm2;
+			__entry->mb_cnt = mb_cnt;
+			__entry->mb_size = mb_size;
+	),
+	TP_printk("pnum:%d useable_space:%d priority_bytes:%d watermark:[%d | %d | %d] nmb=%d mb_size=%d",
+			__entry->pnum, __entry->use_space,
+			__entry->priority_bytes, __entry->wm0, __entry->wm1,
+			__entry->wm2, __entry->mb_cnt, __entry->mb_size)
+);
+
+TRACE_EVENT(mdp_perf_set_ot,
+	TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 is_vbif_rt),
+	TP_ARGS(pnum, xin_id, rd_lim, is_vbif_rt),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, xin_id)
+			__field(u32, rd_lim)
+			__field(u32, is_vbif_rt)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->xin_id = xin_id;
+			__entry->rd_lim = rd_lim;
+			__entry->is_vbif_rt = is_vbif_rt;
+	),
+	TP_printk("pnum:%d xin_id:%d ot:%d rt:%d",
+			__entry->pnum, __entry->xin_id, __entry->rd_lim,
+			__entry->is_vbif_rt)
+);
+
+TRACE_EVENT(mdp_perf_prefill_calc,
+	TP_PROTO(u32 pnum, u32 latency_buf, u32 ot, u32 y_buf, u32 y_scaler,
+		u32 pp_lines, u32 pp_bytes, u32 post_sc, u32 fbc_bytes,
+		u32 prefill_bytes),
+	TP_ARGS(pnum, latency_buf, ot, y_buf, y_scaler, pp_lines, pp_bytes,
+		post_sc, fbc_bytes, prefill_bytes),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, latency_buf)
+			__field(u32, ot)
+			__field(u32, y_buf)
+			__field(u32, y_scaler)
+			__field(u32, pp_lines)
+			__field(u32, pp_bytes)
+			__field(u32, post_sc)
+			__field(u32, fbc_bytes)
+			__field(u32, prefill_bytes)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->latency_buf = latency_buf;
+			__entry->ot = ot;
+			__entry->y_buf = y_buf;
+			__entry->y_scaler = y_scaler;
+			__entry->pp_lines = pp_lines;
+			__entry->pp_bytes = pp_bytes;
+			__entry->post_sc = post_sc;
+			__entry->fbc_bytes = fbc_bytes;
+			__entry->prefill_bytes = prefill_bytes;
+	),
+	TP_printk("pnum:%d latency_buf:%d ot:%d y_buf:%d y_scaler:%d pp_lines:%d, pp_bytes=%d post_sc:%d fbc_bytes:%d prefill:%d",
+			__entry->pnum, __entry->latency_buf, __entry->ot,
+			__entry->y_buf, __entry->y_scaler, __entry->pp_lines,
+			__entry->pp_bytes, __entry->post_sc,
+			__entry->fbc_bytes, __entry->prefill_bytes)
+);
+
+TRACE_EVENT(mdp_mixer_update,
+	TP_PROTO(u32 mixer_num),
+	TP_ARGS(mixer_num),
+	TP_STRUCT__entry(
+			__field(u32, mixer_num)
+	),
+	TP_fast_assign(
+			__entry->mixer_num = mixer_num;
+	),
+	TP_printk("mixer=%d",
+			__entry->mixer_num)
+);
+
+TRACE_EVENT(mdp_commit,
+	TP_PROTO(struct mdss_mdp_ctl *ctl),
+	TP_ARGS(ctl),
+	TP_STRUCT__entry(
+			__field(u32, num)
+			__field(u32, play_cnt)
+			__field(u32, clk_rate)
+			__field(u64, bandwidth)
+	),
+	TP_fast_assign(
+			__entry->num = ctl->num;
+			__entry->play_cnt = ctl->play_cnt;
+			__entry->clk_rate = ctl->new_perf.mdp_clk_rate;
+			__entry->bandwidth = ctl->new_perf.bw_ctl;
+	),
+	TP_printk("num=%d play_cnt=%d bandwidth=%llu clk_rate=%u",
+			__entry->num,
+			__entry->play_cnt,
+			__entry->bandwidth,
+			__entry->clk_rate)
+);
+
+TRACE_EVENT(mdp_video_underrun_done,
+	TP_PROTO(u32 ctl_num, u32 underrun_cnt),
+	TP_ARGS(ctl_num, underrun_cnt),
+	TP_STRUCT__entry(
+			__field(u32, ctl_num)
+			__field(u32, underrun_cnt)
+	),
+	TP_fast_assign(
+			__entry->ctl_num = ctl_num;
+			__entry->underrun_cnt = underrun_cnt;
+	),
+	TP_printk("ctl=%d count=%d",
+			__entry->ctl_num, __entry->underrun_cnt)
+);
+
+TRACE_EVENT(mdp_perf_update_bus,
+	TP_PROTO(int client, unsigned long long ab_quota,
+	unsigned long long ib_quota),
+	TP_ARGS(client, ab_quota, ib_quota),
+	TP_STRUCT__entry(
+			__field(int, client)
+			__field(u64, ab_quota)
+			__field(u64, ib_quota)
+	),
+	TP_fast_assign(
+			__entry->client = client;
+			__entry->ab_quota = ab_quota;
+			__entry->ib_quota = ib_quota;
+	),
+	TP_printk("Request client:%d ab=%llu ib=%llu",
+			__entry->client,
+			__entry->ab_quota,
+			__entry->ib_quota)
+);
+
+TRACE_EVENT(mdp_misr_crc,
+	TP_PROTO(u32 block_id, u32 vsync_cnt, u32 crc),
+	TP_ARGS(block_id, vsync_cnt, crc),
+	TP_STRUCT__entry(
+			__field(u32, block_id)
+			__field(u32, vsync_cnt)
+			__field(u32, crc)
+	),
+	TP_fast_assign(
+			__entry->block_id = block_id;
+			__entry->vsync_cnt = vsync_cnt;
+			__entry->crc = crc;
+	),
+	TP_printk("block_id:%d vsync_cnt:%d crc:0x%08x",
+			__entry->block_id, __entry->vsync_cnt, __entry->crc)
+);
+
+TRACE_EVENT(mdp_cmd_pingpong_done,
+	TP_PROTO(struct mdss_mdp_ctl *ctl, u32 pp_num, int koff_cnt),
+	TP_ARGS(ctl, pp_num, koff_cnt),
+	TP_STRUCT__entry(
+			__field(u32, ctl_num)
+			__field(u32, intf_num)
+			__field(u32, pp_num)
+			__field(int, koff_cnt)
+	),
+	TP_fast_assign(
+			__entry->ctl_num = ctl->num;
+			__entry->intf_num = ctl->intf_num;
+			__entry->pp_num = pp_num;
+			__entry->koff_cnt = koff_cnt;
+	),
+	TP_printk("ctl num:%d intf_num:%d ctx:%d kickoff:%d",
+			__entry->ctl_num, __entry->intf_num, __entry->pp_num,
+			__entry->koff_cnt)
+);
+
+TRACE_EVENT(mdp_cmd_release_bw,
+	TP_PROTO(u32 ctl_num),
+	TP_ARGS(ctl_num),
+	TP_STRUCT__entry(
+			__field(u32, ctl_num)
+	),
+	TP_fast_assign(
+			__entry->ctl_num = ctl_num;
+	),
+	TP_printk("ctl num:%d", __entry->ctl_num)
+);
+
+TRACE_EVENT(mdp_cmd_kickoff,
+	TP_PROTO(u32 ctl_num, int kickoff_cnt),
+	TP_ARGS(ctl_num, kickoff_cnt),
+	TP_STRUCT__entry(
+			__field(u32, ctl_num)
+			__field(int, kickoff_cnt)
+	),
+	TP_fast_assign(
+			__entry->ctl_num = ctl_num;
+			__entry->kickoff_cnt = kickoff_cnt;
+	),
+	TP_printk("kickoff ctl=%d cnt=%d",
+			__entry->ctl_num,
+			__entry->kickoff_cnt)
+);
+
+TRACE_EVENT(mdp_cmd_wait_pingpong,
+	TP_PROTO(u32 ctl_num, int kickoff_cnt),
+	TP_ARGS(ctl_num, kickoff_cnt),
+	TP_STRUCT__entry(
+			__field(u32, ctl_num)
+			__field(int, kickoff_cnt)
+	),
+	TP_fast_assign(
+			__entry->ctl_num = ctl_num;
+			__entry->kickoff_cnt = kickoff_cnt;
+	),
+	TP_printk("pingpong ctl=%d cnt=%d",
+			__entry->ctl_num,
+			__entry->kickoff_cnt)
+);
+
+TRACE_EVENT(tracing_mark_write,
+	TP_PROTO(int pid, const char *name, bool trace_begin),
+	TP_ARGS(pid, name, trace_begin),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(trace_name, name)
+			__field(bool, trace_begin)
+	),
+	TP_fast_assign(
+			__entry->pid = pid;
+			__assign_str(trace_name, name);
+			__entry->trace_begin = trace_begin;
+	),
+	TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+		__entry->pid, __get_str(trace_name))
+);
+
+TRACE_EVENT(mdp_trace_counter,
+	TP_PROTO(int pid, char *name, int value),
+	TP_ARGS(pid, name, value),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(counter_name, name)
+			__field(int, value)
+	),
+	TP_fast_assign(
+			__entry->pid = current->tgid;
+			__assign_str(counter_name, name);
+			__entry->value = value;
+	),
+	TP_printk("%d|%s|%d", __entry->pid,
+			__get_str(counter_name), __entry->value)
+);
+
+TRACE_EVENT(rotator_bw_ao_as_context,
+	TP_PROTO(u32 state),
+	TP_ARGS(state),
+	TP_STRUCT__entry(
+			__field(u32, state)
+	),
+	TP_fast_assign(
+			__entry->state = state;
+	),
+	TP_printk("Rotator bw context %s",
+			__entry->state ? "Active Only" : "Active+Sleep")
+
+);
+
+#endif /* if !defined(TRACE_MDSS_MDP_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/video/fbdev/msm/mdss_mdp_util.c b/drivers/video/fbdev/msm/mdss_mdp_util.c
new file mode 100644
index 0000000..6c28fe9
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_util.c
@@ -0,0 +1,1322 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/msm_ion.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <media/msm_media_info.h>
+
+#include <linux/dma-buf.h>
+
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_formats.h"
+#include "mdss_debug.h"
+#include "mdss_smmu.h"
+#include "mdss_panel.h"
+
+#define PHY_ADDR_4G (1ULL<<32)
+
+void mdss_mdp_format_flag_removal(u32 *table, u32 num, u32 remove_bits)
+{
+	struct mdss_mdp_format_params *fmt = NULL;
+	int i, j;
+
+	if (table == NULL) {
+		pr_err("Null table provided\n");
+		return;
+	}
+
+	for (i = 0; i < num; i++) {
+		for (j = 0; j < ARRAY_SIZE(mdss_mdp_format_map); j++) {
+			fmt = &mdss_mdp_format_map[i];
+			if (table[i] == fmt->format) {
+				fmt->flag &= ~remove_bits;
+				break;
+			}
+		}
+	}
+}
+
+#define SET_BIT(value, bit_num) \
+	{ \
+		value[bit_num >> 3] |= (1 << (bit_num & 7)); \
+	}
+static inline void __set_pipes_supported_fmt(struct mdss_mdp_pipe *pipe_list,
+		int count, struct mdss_mdp_format_params *fmt)
+{
+	struct mdss_mdp_pipe *pipe = pipe_list;
+	int i, j;
+
+	for (i = 0; i < count; i++, pipe += j)
+		for (j = 0; j < pipe->multirect.max_rects; j++)
+			SET_BIT(pipe[j].supported_formats, fmt->format);
+}
+
+void mdss_mdp_set_supported_formats(struct mdss_data_type *mdata)
+{
+	struct mdss_mdp_writeback *wb = mdata->wb;
+	bool has_tile = mdata->highest_bank_bit && !mdata->has_ubwc;
+	bool has_ubwc = mdata->has_ubwc;
+	int i;
+	int j;
+
+	for (i = 0; i < ARRAY_SIZE(mdss_mdp_format_map); i++) {
+		struct mdss_mdp_format_params *fmt = &mdss_mdp_format_map[i];
+
+		if ((fmt->fetch_mode == MDSS_MDP_FETCH_TILE && has_tile) ||
+			(fmt->fetch_mode == MDSS_MDP_FETCH_LINEAR)) {
+			if (fmt->unpack_dx_format &&
+				!test_bit(MDSS_CAPS_10_BIT_SUPPORTED,
+				mdata->mdss_caps_map))
+				continue;
+
+			__set_pipes_supported_fmt(mdata->vig_pipes,
+					mdata->nvig_pipes, fmt);
+
+			if (fmt->flag & VALID_ROT_WB_FORMAT) {
+				for (j = 0; j < mdata->nwb; j++)
+					SET_BIT(wb[j].supported_input_formats,
+							fmt->format);
+			}
+			if (fmt->flag & VALID_MDP_WB_INTF_FORMAT) {
+				for (j = 0; j < mdata->nwb; j++)
+					SET_BIT(wb[j].supported_output_formats,
+							fmt->format);
+			}
+			if (fmt->flag & VALID_MDP_CURSOR_FORMAT &&
+					mdata->ncursor_pipes) {
+				__set_pipes_supported_fmt(mdata->cursor_pipes,
+						mdata->ncursor_pipes, fmt);
+			}
+
+			if (!fmt->is_yuv) {
+				__set_pipes_supported_fmt(mdata->rgb_pipes,
+						mdata->nrgb_pipes, fmt);
+				__set_pipes_supported_fmt(mdata->dma_pipes,
+						mdata->ndma_pipes, fmt);
+			}
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mdss_mdp_format_ubwc_map) && has_ubwc; i++) {
+		struct mdss_mdp_format_params *fmt =
+			&mdss_mdp_format_ubwc_map[i].mdp_format;
+
+		if (fmt->unpack_dx_format &&
+			!test_bit(MDSS_CAPS_10_BIT_SUPPORTED,
+			mdata->mdss_caps_map))
+			continue;
+
+		__set_pipes_supported_fmt(mdata->vig_pipes,
+				mdata->nvig_pipes, fmt);
+
+		if (fmt->flag & VALID_ROT_WB_FORMAT) {
+			for (j = 0; j < mdata->nwb; j++)
+				SET_BIT(wb[j].supported_input_formats,
+						fmt->format);
+		}
+		if (fmt->flag & VALID_MDP_WB_INTF_FORMAT) {
+			for (j = 0; j < mdata->nwb; j++)
+				SET_BIT(wb[j].supported_output_formats,
+						fmt->format);
+		}
+		if (fmt->flag & VALID_MDP_CURSOR_FORMAT &&
+				mdata->ncursor_pipes) {
+			__set_pipes_supported_fmt(mdata->cursor_pipes,
+					mdata->ncursor_pipes, fmt);
+		}
+
+		if (!fmt->is_yuv) {
+			__set_pipes_supported_fmt(mdata->rgb_pipes,
+					mdata->nrgb_pipes, fmt);
+			__set_pipes_supported_fmt(mdata->dma_pipes,
+					mdata->ndma_pipes, fmt);
+		}
+	}
+}
+
+struct mdss_mdp_format_params *mdss_mdp_get_format_params(u32 format)
+{
+	struct mdss_mdp_format_params *fmt = NULL;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int i;
+	bool fmt_found = false;
+
+	for (i = 0; i < ARRAY_SIZE(mdss_mdp_format_map); i++) {
+		fmt = &mdss_mdp_format_map[i];
+		if (format == fmt->format) {
+			fmt_found = true;
+			break;
+		}
+	}
+
+	if (!fmt_found) {
+		for (i = 0; i < ARRAY_SIZE(mdss_mdp_format_ubwc_map); i++) {
+			fmt = &mdss_mdp_format_ubwc_map[i].mdp_format;
+			if (format == fmt->format)
+				break;
+		}
+	}
+
+	return (mdss_mdp_is_ubwc_format(fmt) &&
+		!mdss_mdp_is_ubwc_supported(mdata)) ? NULL : fmt;
+}
+
+int mdss_mdp_get_ubwc_micro_dim(u32 format, u16 *w, u16 *h)
+{
+	struct mdss_mdp_format_params_ubwc *fmt = NULL;
+	bool fmt_found = false;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mdss_mdp_format_ubwc_map); i++) {
+		fmt = &mdss_mdp_format_ubwc_map[i];
+		if (format == fmt->mdp_format.format) {
+			fmt_found = true;
+			break;
+		}
+	}
+
+	if (!fmt_found)
+		return -EINVAL;
+
+	*w = fmt->micro.tile_width;
+	*h = fmt->micro.tile_height;
+	return 0;
+}
+
+void mdss_mdp_get_v_h_subsample_rate(u8 chroma_sample,
+		u8 *v_sample, u8 *h_sample)
+{
+	switch (chroma_sample) {
+	case MDSS_MDP_CHROMA_H2V1:
+		*v_sample = 1;
+		*h_sample = 2;
+		break;
+	case MDSS_MDP_CHROMA_H1V2:
+		*v_sample = 2;
+		*h_sample = 1;
+		break;
+	case MDSS_MDP_CHROMA_420:
+		*v_sample = 2;
+		*h_sample = 2;
+		break;
+	default:
+		*v_sample = 1;
+		*h_sample = 1;
+		break;
+	}
+}
+
+void mdss_mdp_intersect_rect(struct mdss_rect *res_rect,
+	const struct mdss_rect *dst_rect,
+	const struct mdss_rect *sci_rect)
+{
+	int l = max(dst_rect->x, sci_rect->x);
+	int t = max(dst_rect->y, sci_rect->y);
+	int r = min((dst_rect->x + dst_rect->w), (sci_rect->x + sci_rect->w));
+	int b = min((dst_rect->y + dst_rect->h), (sci_rect->y + sci_rect->h));
+
+	if (r < l || b < t)
+		*res_rect = (struct mdss_rect){0, 0, 0, 0};
+	else
+		*res_rect = (struct mdss_rect){l, t, (r-l), (b-t)};
+}
+
+void mdss_mdp_crop_rect(struct mdss_rect *src_rect,
+	struct mdss_rect *dst_rect,
+	const struct mdss_rect *sci_rect)
+{
+	struct mdss_rect res;
+
+	mdss_mdp_intersect_rect(&res, dst_rect, sci_rect);
+
+	if (res.w && res.h) {
+		if ((res.w != dst_rect->w) || (res.h != dst_rect->h)) {
+			src_rect->x = src_rect->x + (res.x - dst_rect->x);
+			src_rect->y = src_rect->y + (res.y - dst_rect->y);
+			src_rect->w = res.w;
+			src_rect->h = res.h;
+		}
+		*dst_rect = (struct mdss_rect)
+			{(res.x - sci_rect->x), (res.y - sci_rect->y),
+			res.w, res.h};
+	}
+}
+
+/*
+ * rect_copy_mdp_to_mdss() - copy mdp_rect struct to mdss_rect
+ * @mdp  - pointer to mdp_rect, destination of the copy
+ * @mdss - pointer to mdss_rect, source of the copy
+ */
+void rect_copy_mdss_to_mdp(struct mdp_rect *mdp, struct mdss_rect *mdss)
+{
+	mdp->x = mdss->x;
+	mdp->y = mdss->y;
+	mdp->w = mdss->w;
+	mdp->h = mdss->h;
+}
+
+/*
+ * rect_copy_mdp_to_mdss() - copy mdp_rect struct to mdss_rect
+ * @mdp  - pointer to mdp_rect, source of the copy
+ * @mdss - pointer to mdss_rect, destination of the copy
+ */
+void rect_copy_mdp_to_mdss(struct mdp_rect *mdp, struct mdss_rect *mdss)
+{
+	mdss->x = mdp->x;
+	mdss->y = mdp->y;
+	mdss->w = mdp->w;
+	mdss->h = mdp->h;
+}
+
+/*
+ * mdss_rect_cmp() - compares two rects
+ * @rect1 - rect value to compare
+ * @rect2 - rect value to compare
+ *
+ * Returns 1 if the rects are same, 0 otherwise.
+ */
+int mdss_rect_cmp(struct mdss_rect *rect1, struct mdss_rect *rect2)
+{
+	return rect1->x == rect2->x && rect1->y == rect2->y &&
+	       rect1->w == rect2->w && rect1->h == rect2->h;
+}
+
+/*
+ * mdss_rect_overlap_check() - compare two rects and check if they overlap
+ * @rect1 - rect value to compare
+ * @rect2 - rect value to compare
+ *
+ * Returns true if rects overlap, false otherwise.
+ */
+bool mdss_rect_overlap_check(struct mdss_rect *rect1, struct mdss_rect *rect2)
+{
+	u32 rect1_left = rect1->x, rect1_right = rect1->x + rect1->w;
+	u32 rect1_top = rect1->y, rect1_bottom = rect1->y + rect1->h;
+	u32 rect2_left = rect2->x, rect2_right = rect2->x + rect2->w;
+	u32 rect2_top = rect2->y, rect2_bottom = rect2->y + rect2->h;
+
+	if ((rect1_right <= rect2_left) ||
+	    (rect1_left >= rect2_right) ||
+	    (rect1_bottom <= rect2_top) ||
+	    (rect1_top >= rect2_bottom))
+		return false;
+
+	return true;
+}
+
+/*
+ * mdss_rect_split() - split roi into two with regards to split-point.
+ * @in_roi - input roi, non-split
+ * @l_roi  - left roi after split
+ * @r_roi  - right roi after split
+ *
+ * Split input ROI into left and right ROIs with respect to split-point. This
+ * is useful during partial update with ping-pong split enabled, where user-land
+ * program is aware of only one frame-buffer but physically there are two
+ * distinct panels which requires their own ROIs.
+ */
+void mdss_rect_split(struct mdss_rect *in_roi, struct mdss_rect *l_roi,
+	struct mdss_rect *r_roi, u32 splitpoint)
+{
+	memset(l_roi, 0x0, sizeof(*l_roi));
+	memset(r_roi, 0x0, sizeof(*r_roi));
+
+	/* left update needed */
+	if (in_roi->x < splitpoint) {
+		*l_roi = *in_roi;
+
+		if ((l_roi->x + l_roi->w) >= splitpoint)
+			l_roi->w = splitpoint - in_roi->x;
+	}
+
+	/* right update needed */
+	if ((in_roi->x + in_roi->w) > splitpoint) {
+		*r_roi = *in_roi;
+
+		if (in_roi->x < splitpoint) {
+			r_roi->x = 0;
+			r_roi->w = in_roi->x + in_roi->w - splitpoint;
+		} else {
+			r_roi->x = in_roi->x - splitpoint;
+		}
+	}
+
+	pr_debug("left: %d,%d,%d,%d right: %d,%d,%d,%d\n",
+		l_roi->x, l_roi->y, l_roi->w, l_roi->h,
+		r_roi->x, r_roi->y, r_roi->w, r_roi->h);
+}
+
+int mdss_mdp_get_rau_strides(u32 w, u32 h,
+			       struct mdss_mdp_format_params *fmt,
+			       struct mdss_mdp_plane_sizes *ps)
+{
+	if (fmt->is_yuv) {
+		ps->rau_cnt = DIV_ROUND_UP(w, 64);
+		ps->ystride[0] = 64 * 4;
+		ps->rau_h[0] = 4;
+		ps->rau_h[1] = 2;
+		if (fmt->chroma_sample == MDSS_MDP_CHROMA_H1V2)
+			ps->ystride[1] = 64 * 2;
+		else if (fmt->chroma_sample == MDSS_MDP_CHROMA_H2V1) {
+			ps->ystride[1] = 32 * 4;
+			ps->rau_h[1] = 4;
+		} else
+			ps->ystride[1] = 32 * 2;
+
+		/* account for both chroma components */
+		ps->ystride[1] <<= 1;
+	} else if (fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) {
+		ps->rau_cnt = DIV_ROUND_UP(w, 32);
+		ps->ystride[0] = 32 * 4 * fmt->bpp;
+		ps->ystride[1] = 0;
+		ps->rau_h[0] = 4;
+		ps->rau_h[1] = 0;
+	} else  {
+		pr_err("Invalid format=%d\n", fmt->format);
+		return -EINVAL;
+	}
+
+	ps->ystride[0] *= ps->rau_cnt;
+	ps->ystride[1] *= ps->rau_cnt;
+	ps->num_planes = 2;
+
+	pr_debug("BWC rau_cnt=%d strides={%d,%d} heights={%d,%d}\n",
+		ps->rau_cnt, ps->ystride[0], ps->ystride[1],
+		ps->rau_h[0], ps->rau_h[1]);
+
+	return 0;
+}
+
+static int mdss_mdp_get_ubwc_plane_size(struct mdss_mdp_format_params *fmt,
+	u32 width, u32 height, struct mdss_mdp_plane_sizes *ps)
+{
+	int rc = 0;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_mdp_format_params_ubwc *fmt_ubwc =
+		(struct mdss_mdp_format_params_ubwc *)fmt;
+
+	if (!mdss_mdp_is_ubwc_supported(mdata)) {
+		pr_err("ubwc format is not supported for format: %d\n",
+			fmt->format);
+		return -EINVAL;
+	}
+
+	if (fmt->format == MDP_Y_CBCR_H2V2_UBWC ||
+		fmt->format == MDP_Y_CBCR_H2V2_TP10_UBWC) {
+		uint32_t y_stride_alignment, uv_stride_alignment;
+		uint32_t y_height_alignment, uv_height_alignment;
+		uint32_t y_tile_width = fmt_ubwc->micro.tile_width;
+		uint32_t y_tile_height = fmt_ubwc->micro.tile_height;
+		uint32_t uv_tile_width = y_tile_width / 2;
+		uint32_t uv_tile_height = y_tile_height;
+		uint32_t y_bpp_numer = 1, y_bpp_denom = 1;
+		uint32_t uv_bpp_numer = 1, uv_bpp_denom = 1;
+
+		ps->num_planes = 4;
+		if (fmt->format == MDP_Y_CBCR_H2V2_UBWC) {
+			y_stride_alignment = 128;
+			uv_stride_alignment = 64;
+			y_height_alignment = 32;
+			uv_height_alignment = 32;
+			y_bpp_numer = 1;
+			uv_bpp_numer = 2;
+			y_bpp_denom = 1;
+			uv_bpp_denom = 1;
+		} else if (fmt->format == MDP_Y_CBCR_H2V2_TP10_UBWC) {
+			y_stride_alignment = 192;
+			uv_stride_alignment = 96;
+			y_height_alignment = 16;
+			uv_height_alignment = 16;
+			y_bpp_numer = 4;
+			uv_bpp_numer = 8;
+			y_bpp_denom = 3;
+			uv_bpp_denom = 3;
+		}
+
+		/* Y bitstream stride and plane size */
+		ps->ystride[0] = ALIGN(width, y_stride_alignment);
+		ps->ystride[0] = (ps->ystride[0] * y_bpp_numer) / y_bpp_denom;
+		ps->plane_size[0] = ALIGN(ps->ystride[0] *
+			ALIGN(height, y_height_alignment), 4096);
+
+		/* CbCr bitstream stride and plane size */
+		ps->ystride[1] = ALIGN(width / 2, uv_stride_alignment);
+		ps->ystride[1] = (ps->ystride[1] * uv_bpp_numer) / uv_bpp_denom;
+		ps->plane_size[1] = ALIGN(ps->ystride[1] *
+			ALIGN(height / 2, uv_height_alignment), 4096);
+
+		/* Y meta data stride and plane size */
+		ps->ystride[2] = ALIGN(DIV_ROUND_UP(width, y_tile_width), 64);
+		ps->plane_size[2] = ALIGN(ps->ystride[2] *
+			ALIGN(DIV_ROUND_UP(height, y_tile_height), 16), 4096);
+
+		/* CbCr meta data stride and plane size */
+		ps->ystride[3] =
+			ALIGN(DIV_ROUND_UP(width / 2, uv_tile_width), 64);
+		ps->plane_size[3] = ALIGN(ps->ystride[3] * ALIGN(
+			DIV_ROUND_UP(height / 2, uv_tile_height), 16), 4096);
+	} else if (fmt->format == MDP_RGBA_8888_UBWC ||
+		fmt->format == MDP_RGBX_8888_UBWC ||
+		fmt->format == MDP_RGB_565_UBWC ||
+		fmt->format == MDP_RGBA_1010102_UBWC ||
+		fmt->format == MDP_RGBX_1010102_UBWC) {
+		uint32_t stride_alignment, bpp, aligned_bitstream_width;
+
+		if (fmt->format == MDP_RGB_565_UBWC) {
+			stride_alignment = 128;
+			bpp = 2;
+		} else {
+			stride_alignment = 64;
+			bpp = 4;
+		}
+		ps->num_planes = 2;
+
+		/* RGB bitstream stride and plane size */
+		aligned_bitstream_width = ALIGN(width, stride_alignment);
+		ps->ystride[0] = aligned_bitstream_width * bpp;
+		ps->plane_size[0] = ALIGN(bpp * aligned_bitstream_width *
+			ALIGN(height, 16), 4096);
+
+		/* RGB meta data stride and plane size */
+		ps->ystride[2] =
+			ALIGN(DIV_ROUND_UP(aligned_bitstream_width, 16), 64);
+		ps->plane_size[2] = ALIGN(ps->ystride[2] *
+			ALIGN(DIV_ROUND_UP(height, 4), 16), 4096);
+	} else {
+		pr_err("%s: UBWC format not supported for fmt:%d\n",
+			__func__, fmt->format);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int mdss_mdp_get_plane_sizes(struct mdss_mdp_format_params *fmt, u32 w, u32 h,
+	struct mdss_mdp_plane_sizes *ps, u32 bwc_mode, bool rotation)
+{
+	int i, rc = 0;
+	u32 bpp;
+
+	if (ps == NULL)
+		return -EINVAL;
+
+	memset(ps, 0, sizeof(struct mdss_mdp_plane_sizes));
+
+	if ((w > MAX_IMG_WIDTH) || (h > MAX_IMG_HEIGHT))
+		return -ERANGE;
+
+	bpp = fmt->bpp;
+
+	if (mdss_mdp_is_ubwc_format(fmt)) {
+		rc = mdss_mdp_get_ubwc_plane_size(fmt, w, h, ps);
+	} else if (bwc_mode) {
+		u32 height, meta_size;
+
+		rc = mdss_mdp_get_rau_strides(w, h, fmt, ps);
+		if (rc)
+			return rc;
+
+		height = DIV_ROUND_UP(h, ps->rau_h[0]);
+		meta_size = DIV_ROUND_UP(ps->rau_cnt, 8);
+		ps->ystride[1] += meta_size;
+		ps->ystride[0] += ps->ystride[1] + meta_size;
+		ps->plane_size[0] = ps->ystride[0] * height;
+
+		ps->ystride[1] = 2;
+		ps->plane_size[1] = 2 * ps->rau_cnt * height;
+
+		pr_debug("BWC data stride=%d size=%d meta size=%d\n",
+			ps->ystride[0], ps->plane_size[0], ps->plane_size[1]);
+	} else {
+		if (fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) {
+			ps->num_planes = 1;
+			ps->plane_size[0] = w * h * bpp;
+			ps->ystride[0] = w * bpp;
+		} else if (fmt->format == MDP_Y_CBCR_H2V2_VENUS ||
+				fmt->format == MDP_Y_CRCB_H2V2_VENUS) {
+
+			int cf = (fmt->format == MDP_Y_CBCR_H2V2_VENUS) ?
+					COLOR_FMT_NV12 : COLOR_FMT_NV21;
+			ps->num_planes = 2;
+			ps->ystride[0] = VENUS_Y_STRIDE(cf, w);
+			ps->ystride[1] = VENUS_UV_STRIDE(cf, w);
+			ps->plane_size[0] = VENUS_Y_SCANLINES(cf, h) *
+				ps->ystride[0];
+			ps->plane_size[1] = VENUS_UV_SCANLINES(cf, h) *
+				ps->ystride[1];
+		} else {
+			u8 v_subsample, h_subsample, stride_align, height_align;
+			u32 chroma_samp;
+
+			chroma_samp = fmt->chroma_sample;
+
+			mdss_mdp_get_v_h_subsample_rate(chroma_samp,
+				&v_subsample, &h_subsample);
+
+			switch (fmt->format) {
+			case MDP_Y_CR_CB_GH2V2:
+				stride_align = 16;
+				height_align = 1;
+				break;
+			default:
+				stride_align = 1;
+				height_align = 1;
+				break;
+			}
+
+			w = w << fmt->unpack_dx_format;
+
+			ps->ystride[0] = ALIGN(w, stride_align);
+			ps->ystride[1] = ALIGN(w / h_subsample, stride_align);
+			ps->plane_size[0] = ps->ystride[0] *
+				ALIGN(h, height_align);
+			ps->plane_size[1] = ps->ystride[1] * (h / v_subsample);
+
+			if (fmt->fetch_planes == MDSS_MDP_PLANE_PSEUDO_PLANAR) {
+				ps->num_planes = 2;
+				ps->plane_size[1] *= 2;
+				ps->ystride[1] *= 2;
+			} else { /* planar */
+				ps->num_planes = 3;
+				ps->plane_size[2] = ps->plane_size[1];
+				ps->ystride[2] = ps->ystride[1];
+			}
+		}
+	}
+
+	/* Safe to use MAX_PLANES as ps is memset at start of function */
+	for (i = 0; i < MAX_PLANES; i++)
+		ps->total_size += ps->plane_size[i];
+
+	return rc;
+}
+
+static int mdss_mdp_ubwc_data_check(struct mdss_mdp_data *data,
+			struct mdss_mdp_plane_sizes *ps,
+			struct mdss_mdp_format_params *fmt)
+{
+	int i, inc;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	unsigned long data_size = 0;
+	dma_addr_t base_addr;
+
+	if (!mdss_mdp_is_ubwc_supported(mdata)) {
+		pr_err("ubwc format is not supported for format: %d\n",
+			fmt->format);
+		return -ENOTSUPP;
+	}
+
+	if (data->p[0].len == ps->plane_size[0])
+		goto end;
+
+	/* From this point, assumption is plane 0 is to be divided */
+	data_size = data->p[0].len;
+	if (data_size < ps->total_size) {
+		pr_err("insufficient current mem len=%lu required mem len=%u\n",
+		       data_size, ps->total_size);
+		return -ENOMEM;
+	}
+
+	base_addr = data->p[0].addr;
+
+	if (fmt->format == MDP_Y_CBCR_H2V2_UBWC ||
+		fmt->format == MDP_Y_CBCR_H2V2_TP10_UBWC) {
+		/************************************************/
+		/*      UBWC            **                      */
+		/*      buffer          **      MDP PLANE       */
+		/*      format          **                      */
+		/************************************************/
+		/* -------------------  ** -------------------- */
+		/* |      Y meta     |  ** |    Y bitstream   | */
+		/* |       data      |  ** |       plane      | */
+		/* -------------------  ** -------------------- */
+		/* |    Y bitstream  |  ** |  CbCr bitstream  | */
+		/* |       data      |  ** |       plane      | */
+		/* -------------------  ** -------------------- */
+		/* |   Cbcr metadata |  ** |       Y meta     | */
+		/* |       data      |  ** |       plane      | */
+		/* -------------------  ** -------------------- */
+		/* |  CbCr bitstream |  ** |     CbCr meta    | */
+		/* |       data      |  ** |       plane      | */
+		/* -------------------  ** -------------------- */
+		/************************************************/
+
+		/* configure Y bitstream plane */
+		data->p[0].addr = base_addr + ps->plane_size[2];
+		data->p[0].len = ps->plane_size[0];
+
+		/* configure CbCr bitstream plane */
+		data->p[1].addr = base_addr + ps->plane_size[0]
+			+ ps->plane_size[2] + ps->plane_size[3];
+		data->p[1].len = ps->plane_size[1];
+
+		/* configure Y metadata plane */
+		data->p[2].addr = base_addr;
+		data->p[2].len = ps->plane_size[2];
+
+		/* configure CbCr metadata plane */
+		data->p[3].addr = base_addr + ps->plane_size[0]
+			+ ps->plane_size[2];
+		data->p[3].len = ps->plane_size[3];
+	} else {
+		/************************************************/
+		/*      UBWC            **                      */
+		/*      buffer          **      MDP PLANE       */
+		/*      format          **                      */
+		/************************************************/
+		/* -------------------  ** -------------------- */
+		/* |      RGB meta   |  ** |   RGB bitstream  | */
+		/* |       data      |  ** |       plane      | */
+		/* -------------------  ** -------------------- */
+		/* |  RGB bitstream  |  ** |       NONE       | */
+		/* |       data      |  ** |                  | */
+		/* -------------------  ** -------------------- */
+		/*                      ** |     RGB meta     | */
+		/*                      ** |       plane      | */
+		/*                      ** -------------------- */
+		/************************************************/
+
+		/* configure RGB bitstream plane */
+		data->p[0].addr = base_addr + ps->plane_size[2];
+		data->p[0].len = ps->plane_size[0];
+
+		/* configure RGB metadata plane */
+		data->p[2].addr = base_addr;
+		data->p[2].len = ps->plane_size[2];
+	}
+	data->num_planes = ps->num_planes;
+
+end:
+	if (data->num_planes != ps->num_planes) {
+		pr_err("num_planes don't match: fmt:%d, data:%d, ps:%d\n",
+				fmt->format, data->num_planes, ps->num_planes);
+		return -EINVAL;
+	}
+
+	inc = ((fmt->format == MDP_Y_CBCR_H2V2_UBWC ||
+		fmt->format == MDP_Y_CBCR_H2V2_TP10_UBWC) ? 1 : 2);
+	for (i = 0; i < MAX_PLANES; i += inc) {
+		if (data->p[i].len != ps->plane_size[i]) {
+			pr_err("plane:%d fmt:%d, len does not match: data:%lu, ps:%d\n",
+					i, fmt->format, data->p[i].len,
+					ps->plane_size[i]);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int mdss_mdp_data_check(struct mdss_mdp_data *data,
+			struct mdss_mdp_plane_sizes *ps,
+			struct mdss_mdp_format_params *fmt)
+{
+	struct mdss_mdp_img_data *prev, *curr;
+	int i;
+
+	if (!ps)
+		return 0;
+
+	if (!data || data->num_planes == 0)
+		return -ENOMEM;
+
+	if (mdss_mdp_is_ubwc_format(fmt))
+		return mdss_mdp_ubwc_data_check(data, ps, fmt);
+
+	pr_debug("srcp0=%pa len=%lu frame_size=%u\n", &data->p[0].addr,
+		data->p[0].len, ps->total_size);
+
+	for (i = 0; i < ps->num_planes; i++) {
+		curr = &data->p[i];
+		if (i >= data->num_planes) {
+			u32 psize = ps->plane_size[i-1];
+
+			prev = &data->p[i-1];
+			if (prev->len > psize) {
+				curr->len = prev->len - psize;
+				prev->len = psize;
+			}
+			curr->addr = prev->addr + psize;
+		}
+		if (curr->len < ps->plane_size[i]) {
+			pr_err("insufficient mem=%lu p=%d len=%u\n",
+			       curr->len, i, ps->plane_size[i]);
+			return -ENOMEM;
+		}
+		pr_debug("plane[%d] addr=%pa len=%lu\n", i,
+				&curr->addr, curr->len);
+	}
+	data->num_planes = ps->num_planes;
+
+	return 0;
+}
+
+int mdss_mdp_validate_offset_for_ubwc_format(
+	struct mdss_mdp_format_params *fmt, u16 x, u16 y)
+{
+	int ret;
+	u16 micro_w, micro_h;
+
+	ret = mdss_mdp_get_ubwc_micro_dim(fmt->format, &micro_w, &micro_h);
+	if (ret || !micro_w || !micro_h) {
+		pr_err("Could not get valid micro tile dimensions\n");
+		return -EINVAL;
+	}
+
+	if (x % (micro_w * UBWC_META_MACRO_W_H)) {
+		pr_err("x=%d does not align with meta width=%d\n", x,
+			micro_w * UBWC_META_MACRO_W_H);
+		return -EINVAL;
+	}
+
+	if (y % (micro_h * UBWC_META_MACRO_W_H)) {
+		pr_err("y=%d does not align with meta height=%d\n", y,
+			UBWC_META_MACRO_W_H);
+		return -EINVAL;
+	}
+	return ret;
+}
+
+/* x and y are assumednt to be valid, expected to line up with start of tiles */
+void mdss_mdp_ubwc_data_calc_offset(struct mdss_mdp_data *data, u16 x, u16 y,
+	struct mdss_mdp_plane_sizes *ps, struct mdss_mdp_format_params *fmt)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u16 macro_w, micro_w, micro_h;
+	u32 offset;
+	int ret;
+
+	if (!mdss_mdp_is_ubwc_supported(mdata)) {
+		pr_err("ubwc format is not supported for format: %d\n",
+			fmt->format);
+		return;
+	}
+
+	ret = mdss_mdp_get_ubwc_micro_dim(fmt->format, &micro_w, &micro_h);
+	if (ret || !micro_w || !micro_h) {
+		pr_err("Could not get valid micro tile dimensions\n");
+		return;
+	}
+	macro_w = 4 * micro_w;
+
+	if (fmt->format == MDP_Y_CBCR_H2V2_UBWC ||
+		fmt->format == MDP_Y_CBCR_H2V2_TP10_UBWC) {
+		u16 chroma_macro_w = macro_w / 2;
+		u16 chroma_micro_w = micro_w / 2;
+
+		/* plane 1 and 3 are chroma, with sub sample of 2 */
+		offset = y * ps->ystride[0] +
+			(x / macro_w) * 4096;
+		if (offset < data->p[0].len) {
+			data->p[0].addr += offset;
+		} else {
+			ret = 1;
+			goto done;
+		}
+
+		offset = y / 2 * ps->ystride[1] +
+			((x / 2) / chroma_macro_w) * 4096;
+		if (offset < data->p[1].len) {
+			data->p[1].addr += offset;
+		} else {
+			ret = 2;
+			goto done;
+		}
+
+		offset = (y / micro_h) * ps->ystride[2] +
+			((x / micro_w) / UBWC_META_MACRO_W_H) *
+			UBWC_META_BLOCK_SIZE;
+		if (offset < data->p[2].len) {
+			data->p[2].addr += offset;
+		} else {
+			ret = 3;
+			goto done;
+		}
+
+		offset = ((y / 2) / micro_h) * ps->ystride[3] +
+			(((x / 2) / chroma_micro_w) / UBWC_META_MACRO_W_H) *
+			UBWC_META_BLOCK_SIZE;
+		if (offset < data->p[3].len) {
+			data->p[3].addr += offset;
+		} else {
+			ret = 4;
+			goto done;
+		}
+
+	} else {
+		offset = y * ps->ystride[0] +
+			(x / macro_w) * 4096;
+		if (offset < data->p[0].len) {
+			data->p[0].addr += offset;
+		} else {
+			ret = 1;
+			goto done;
+		}
+
+		offset = DIV_ROUND_UP(y, micro_h) * ps->ystride[2] +
+			((x / micro_w) / UBWC_META_MACRO_W_H) *
+			UBWC_META_BLOCK_SIZE;
+		if (offset < data->p[2].len) {
+			data->p[2].addr += offset;
+		} else {
+			ret = 3;
+			goto done;
+		}
+	}
+
+done:
+	if (ret) {
+		WARN(1, "idx %d, offsets:%u too large for buflen%lu\n",
+			(ret - 1), offset, data->p[(ret - 1)].len);
+	}
+}
+
+void mdss_mdp_data_calc_offset(struct mdss_mdp_data *data, u16 x, u16 y,
+	struct mdss_mdp_plane_sizes *ps, struct mdss_mdp_format_params *fmt)
+{
+	if ((x == 0) && (y == 0))
+		return;
+
+	if (mdss_mdp_is_ubwc_format(fmt)) {
+		mdss_mdp_ubwc_data_calc_offset(data, x, y, ps, fmt);
+		return;
+	}
+
+	data->p[0].addr += y * ps->ystride[0];
+
+	if (data->num_planes == 1) {
+		data->p[0].addr += x * fmt->bpp;
+	} else {
+		u16 xoff, yoff;
+		u8 v_subsample, h_subsample;
+
+		mdss_mdp_get_v_h_subsample_rate(fmt->chroma_sample,
+			&v_subsample, &h_subsample);
+
+		xoff = x / h_subsample;
+		yoff = y / v_subsample;
+
+		data->p[0].addr += x;
+		data->p[1].addr += xoff + (yoff * ps->ystride[1]);
+		if (data->num_planes == 2) /* pseudo planar */
+			data->p[1].addr += xoff;
+		else /* planar */
+			data->p[2].addr += xoff + (yoff * ps->ystride[2]);
+	}
+}
+
+static int mdss_mdp_put_img(struct mdss_mdp_img_data *data, bool rotator,
+		int dir)
+{
+	struct ion_client *iclient = mdss_get_ionclient();
+	u32 domain;
+
+	if (data->flags & MDP_MEMORY_ID_TYPE_FB) {
+		pr_debug("fb mem buf=0x%pa\n", &data->addr);
+		fdput(data->srcp_f);
+		memset(&data->srcp_f, 0, sizeof(struct fd));
+	} else if (data->srcp_f.file) {
+		pr_debug("pmem buf=0x%pa\n", &data->addr);
+		memset(&data->srcp_f, 0, sizeof(struct fd));
+	} else if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
+		pr_debug("ion hdl=%pK buf=0x%pa\n", data->srcp_dma_buf,
+							&data->addr);
+		if (!iclient) {
+			pr_err("invalid ion client\n");
+			return -ENOMEM;
+		}
+		if (data->mapped) {
+			domain = mdss_smmu_get_domain_type(data->flags,
+				rotator);
+			mdss_smmu_unmap_dma_buf(data->srcp_table,
+						domain, dir,
+						data->srcp_dma_buf);
+			data->mapped = false;
+		}
+		if (!data->skip_detach) {
+			dma_buf_unmap_attachment(data->srcp_attachment,
+				data->srcp_table,
+				mdss_smmu_dma_data_direction(dir));
+			dma_buf_detach(data->srcp_dma_buf,
+					data->srcp_attachment);
+			dma_buf_put(data->srcp_dma_buf);
+				data->srcp_dma_buf = NULL;
+		}
+	} else if (data->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION) {
+		/*
+		 * skip memory unmapping - secure display uses physical
+		 * address which does not require buffer unmapping
+		 *
+		 * For LT targets in secure display usecase, srcp_dma_buf will
+		 * be filled due to map call which will be unmapped above.
+		 *
+		 */
+		pr_debug("skip memory unmapping for secure display content\n");
+	} else {
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int mdss_mdp_get_img(struct msmfb_data *img,
+		struct mdss_mdp_img_data *data, struct device *dev,
+		bool rotator, int dir)
+{
+	struct fd f;
+	int ret = -EINVAL;
+	int fb_num;
+	unsigned long *len;
+	u32 domain;
+	dma_addr_t *start;
+	struct ion_client *iclient = mdss_get_ionclient();
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	start = &data->addr;
+	len = &data->len;
+	data->flags |= img->flags;
+	data->offset = img->offset;
+	if (img->flags & MDP_MEMORY_ID_TYPE_FB) {
+		f = fdget(img->memory_id);
+		if (f.file == NULL) {
+			pr_err("invalid framebuffer file (%d)\n",
+					img->memory_id);
+			return -EINVAL;
+		}
+		data->srcp_f = f;
+
+		if (MAJOR(f.file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
+			fb_num = MINOR(f.file->f_dentry->d_inode->i_rdev);
+			ret = mdss_fb_get_phys_info(start, len, fb_num);
+			if (ret)
+				pr_err("mdss_fb_get_phys_info() failed\n");
+		} else {
+			pr_err("invalid FB_MAJOR\n");
+			ret = -1;
+		}
+	} else if (iclient) {
+		if (mdss_mdp_is_map_needed(mdata, data)) {
+			data->srcp_dma_buf = dma_buf_get(img->memory_id);
+			if (IS_ERR_OR_NULL(data->srcp_dma_buf)) {
+				pr_err("error on ion_import_fd\n");
+				ret = PTR_ERR(data->srcp_dma_buf);
+				data->srcp_dma_buf = NULL;
+				return ret;
+			}
+			domain = mdss_smmu_get_domain_type(data->flags,
+							   rotator);
+
+			data->srcp_attachment =
+				mdss_smmu_dma_buf_attach(data->srcp_dma_buf,
+							 dev, domain);
+			if (IS_ERR(data->srcp_attachment)) {
+				ret = PTR_ERR(data->srcp_attachment);
+				goto err_put;
+			}
+
+			data->srcp_table =
+				dma_buf_map_attachment(data->srcp_attachment,
+				mdss_smmu_dma_data_direction(dir));
+			if (IS_ERR(data->srcp_table)) {
+				ret = PTR_ERR(data->srcp_table);
+				goto err_detach;
+			}
+
+			data->addr = 0;
+			data->len = 0;
+			data->mapped = false;
+			data->skip_detach = false;
+			/* return early, mapping will be done later */
+			ret = 0;
+			goto done;
+		} else {
+			struct ion_handle *ihandle = NULL;
+			struct sg_table *sg_ptr = NULL;
+
+			do {
+				ihandle = ion_import_dma_buf(iclient,
+							     img->memory_id);
+				if (IS_ERR_OR_NULL(ihandle)) {
+					ret = -EINVAL;
+					pr_err("ion import buffer failed\n");
+					break;
+				}
+
+				sg_ptr = ion_sg_table(iclient, ihandle);
+				if (sg_ptr == NULL) {
+					pr_err("ion sg table get failed\n");
+					ret = -EINVAL;
+					break;
+				}
+
+				if (sg_ptr->nents != 1) {
+					pr_err("ion buffer mapping failed\n");
+					ret = -EINVAL;
+					break;
+				}
+
+				if (((uint64_t)sg_dma_address(sg_ptr->sgl) >=
+					PHY_ADDR_4G - sg_ptr->sgl->length)) {
+					pr_err("ion buffer mapped size is invalid\n");
+					ret = -EINVAL;
+					break;
+				}
+
+				data->addr = sg_dma_address(sg_ptr->sgl);
+				data->len = sg_ptr->sgl->length;
+				data->mapped = true;
+				ret = 0;
+			} while (0);
+
+			if (!IS_ERR_OR_NULL(ihandle))
+				ion_free(iclient, ihandle);
+			return ret;
+		}
+	}
+	if (start && !*start) {
+		pr_err("start address is zero!\n");
+		mdss_mdp_put_img(data, rotator, dir);
+		return -ENOMEM;
+	}
+
+	if (!ret && (data->offset < data->len)) {
+		data->addr += data->offset;
+		data->len -= data->offset;
+
+		pr_debug("mem=%d ihdl=%pK buf=0x%pa len=0x%lx\n",
+			 img->memory_id, data->srcp_dma_buf, &data->addr,
+			 data->len);
+	} else {
+		mdss_mdp_put_img(data, rotator, dir);
+		return ret ? : -EOVERFLOW;
+	}
+
+	return ret;
+err_detach:
+	dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
+err_put:
+	dma_buf_put(data->srcp_dma_buf);
+done:
+	return ret;
+}
+
+static int mdss_mdp_map_buffer(struct mdss_mdp_img_data *data, bool rotator,
+		int dir)
+{
+	int ret = -EINVAL;
+	int domain;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct scatterlist *sg;
+	unsigned int i;
+	struct sg_table *table;
+
+	if (data->addr && data->len)
+		return 0;
+
+	if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
+		if (mdss_res->mdss_util->iommu_attached() &&
+			(mdss_mdp_is_map_needed(mdata, data))) {
+			domain = mdss_smmu_get_domain_type(data->flags,
+					rotator);
+			data->dir = dir;
+			data->domain = domain;
+			ret = mdss_smmu_map_dma_buf(data->srcp_dma_buf,
+					data->srcp_table, domain,
+					&data->addr, &data->len, dir);
+			if (IS_ERR_VALUE(ret)) {
+				pr_err("smmu map dma buf failed: (%d)\n", ret);
+				goto err_unmap;
+			}
+			data->mapped = true;
+		} else {
+			data->addr = sg_phys(data->srcp_table->sgl);
+			data->len = 0;
+			table = data->srcp_table;
+			for_each_sg(table->sgl, sg, table->nents, i) {
+				data->len += sg->length;
+			}
+			ret = 0;
+		}
+	}
+
+	if (!data->addr) {
+		pr_err("start address is zero!\n");
+		mdss_mdp_put_img(data, rotator, dir);
+		return -ENOMEM;
+	}
+
+	if (!ret && (data->offset < data->len)) {
+		data->addr += data->offset;
+		data->len -= data->offset;
+
+		pr_debug("ihdl=%pK buf=0x%pa len=0x%lx\n",
+			 data->srcp_dma_buf, &data->addr, data->len);
+	} else {
+		mdss_mdp_put_img(data, rotator, dir);
+		return ret ? : -EOVERFLOW;
+	}
+
+	return ret;
+
+err_unmap:
+	dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
+		mdss_smmu_dma_data_direction(dir));
+	dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
+	dma_buf_put(data->srcp_dma_buf);
+	return ret;
+}
+
+static int mdss_mdp_data_get(struct mdss_mdp_data *data,
+		struct msmfb_data *planes, int num_planes, u32 flags,
+		struct device *dev, bool rotator, int dir)
+{
+	int i, rc = 0;
+
+	if ((num_planes <= 0) || (num_planes > MAX_PLANES))
+		return -EINVAL;
+
+	for (i = 0; i < num_planes; i++) {
+		data->p[i].flags = flags;
+		rc = mdss_mdp_get_img(&planes[i], &data->p[i], dev, rotator,
+				dir);
+		if (rc) {
+			pr_err("failed to get buf p=%d flags=%x\n", i, flags);
+			while (i > 0) {
+				i--;
+				mdss_mdp_put_img(&data->p[i], rotator, dir);
+			}
+			break;
+		}
+	}
+
+	data->num_planes = i;
+
+	return rc;
+}
+
+int mdss_mdp_data_map(struct mdss_mdp_data *data, bool rotator, int dir)
+{
+	int i, rc = 0;
+
+	if (!data || !data->num_planes || data->num_planes > MAX_PLANES)
+		return -EINVAL;
+
+	for (i = 0; i < data->num_planes; i++) {
+		rc = mdss_mdp_map_buffer(&data->p[i], rotator, dir);
+		if (rc) {
+			pr_err("failed to map buf p=%d\n", i);
+			while (i > 0) {
+				i--;
+				mdss_mdp_put_img(&data->p[i], rotator, dir);
+			}
+			break;
+		}
+	}
+
+	return rc;
+}
+
+void mdss_mdp_data_free(struct mdss_mdp_data *data, bool rotator, int dir)
+{
+	int i;
+
+	mdss_iommu_ctrl(1);
+	for (i = 0; i < data->num_planes && data->p[i].len; i++)
+		mdss_mdp_put_img(&data->p[i], rotator, dir);
+	memset(&data->p, 0, sizeof(struct mdss_mdp_img_data) * MAX_PLANES);
+	mdss_iommu_ctrl(0);
+
+	data->num_planes = 0;
+}
+
+int mdss_mdp_data_get_and_validate_size(struct mdss_mdp_data *data,
+	struct msmfb_data *planes, int num_planes, u32 flags,
+	struct device *dev, bool rotator, int dir,
+	struct mdp_layer_buffer *buffer)
+{
+	struct mdss_mdp_format_params *fmt;
+	struct mdss_mdp_plane_sizes ps;
+	int ret, i;
+	unsigned long total_buf_len = 0;
+
+	fmt = mdss_mdp_get_format_params(buffer->format);
+	if (!fmt) {
+		pr_err("Format %d not supported\n", buffer->format);
+		return -EINVAL;
+	}
+
+	ret = mdss_mdp_data_get(data, planes, num_planes,
+		flags, dev, rotator, dir);
+	if (ret)
+		return ret;
+
+	mdss_mdp_get_plane_sizes(fmt, buffer->width, buffer->height, &ps, 0, 0);
+
+	for (i = 0; i < num_planes ; i++) {
+		unsigned long plane_len = (data->p[i].srcp_dma_buf) ?
+				data->p[i].srcp_dma_buf->size : data->p[i].len;
+
+		if (plane_len < planes[i].offset) {
+			pr_err("Offset=%d larger than buffer size=%lu\n",
+				planes[i].offset, plane_len);
+			ret = -EINVAL;
+			goto buf_too_small;
+		}
+		total_buf_len += plane_len - planes[i].offset;
+	}
+
+	if (total_buf_len < ps.total_size) {
+		pr_err("Buffer size=%lu, expected size=%d\n", total_buf_len,
+			ps.total_size);
+		ret = -EINVAL;
+		goto buf_too_small;
+	}
+	return 0;
+
+buf_too_small:
+	mdss_mdp_data_free(data, rotator, dir);
+	return ret;
+}
+
+int mdss_mdp_calc_phase_step(u32 src, u32 dst, u32 *out_phase)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 unit, residue, result;
+
+	if (src == 0 || dst == 0)
+		return -EINVAL;
+
+	unit = 1 << PHASE_STEP_SHIFT;
+	*out_phase = mult_frac(unit, src, dst);
+
+	/* check if overflow is possible */
+	if (mdss_has_quirk(mdata, MDSS_QUIRK_DOWNSCALE_HANG) && src > dst) {
+		residue = *out_phase - unit;
+		result = (residue * dst) + residue;
+
+		while (result > (unit + (unit >> 1)))
+			result -= unit;
+
+		if ((result > residue) && (result < unit))
+			return -EOVERFLOW;
+	}
+
+	return 0;
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_wfd.c b/drivers/video/fbdev/msm/mdss_mdp_wfd.c
new file mode 100644
index 0000000..f11459b
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_wfd.c
@@ -0,0 +1,483 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+
+#include "mdss_mdp_wfd.h"
+
+/*
+ * time out value for wfd to wait for any pending frames to finish
+ * assuming 30fps, and max 5 frames in the queue
+ */
+#define WFD_TIMEOUT_IN_MS 150
+
+struct mdss_mdp_wfd *mdss_mdp_wfd_init(struct device *device,
+	struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_wfd *wfd;
+
+	wfd = kzalloc(sizeof(struct mdss_mdp_wfd), GFP_KERNEL);
+	if (!wfd)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_init(&wfd->lock);
+	INIT_LIST_HEAD(&wfd->data_queue);
+	init_completion(&wfd->comp);
+	wfd->ctl = ctl;
+	wfd->device = device;
+
+	return wfd;
+}
+
+void mdss_mdp_wfd_deinit(struct mdss_mdp_wfd *wfd)
+{
+	struct mdss_mdp_wfd_data *node, *temp;
+
+	list_for_each_entry_safe(node, temp, &wfd->data_queue, next)
+	mdss_mdp_wfd_remove_data(wfd, node);
+
+	kfree(wfd);
+}
+
+int mdss_mdp_wfd_wait_for_finish(struct mdss_mdp_wfd *wfd)
+{
+	int ret;
+
+	mutex_lock(&wfd->lock);
+	if (list_empty(&wfd->data_queue)) {
+		mutex_unlock(&wfd->lock);
+		return 0;
+	}
+	init_completion(&wfd->comp);
+	mutex_unlock(&wfd->lock);
+
+	ret = wait_for_completion_timeout(&wfd->comp,
+				msecs_to_jiffies(WFD_TIMEOUT_IN_MS));
+
+	if (ret == 0)
+		ret = -ETIME;
+	else if (ret > 0)
+		ret = 0;
+
+	return ret;
+}
+
+void mdss_mdp_wfd_destroy(struct mdss_mdp_wfd *wfd)
+{
+	struct mdss_mdp_ctl *ctl = wfd->ctl;
+
+	if (!ctl)
+		return;
+
+	if (ctl->ops.stop_fnc)
+		ctl->ops.stop_fnc(ctl, 0);
+
+	mdss_mdp_reset_mixercfg(ctl);
+
+	if (ctl->wb)
+		mdss_mdp_wb_free(ctl->wb);
+
+	if (ctl->mixer_left)
+		mdss_mdp_mixer_free(ctl->mixer_left);
+
+	if (ctl->mixer_right)
+		mdss_mdp_mixer_free(ctl->mixer_right);
+
+	ctl->mixer_left = NULL;
+	ctl->mixer_right = NULL;
+	ctl->wb = NULL;
+}
+
+bool mdss_mdp_wfd_is_config_same(struct msm_fb_data_type *mfd,
+	struct mdp_output_layer *layer)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_ctl *ctl = mdp5_data->wfd->ctl;
+	struct mdss_mdp_writeback *wb = NULL;
+
+	wb = ctl->wb;
+	if (!wb || !ctl->mixer_left)
+		return false;
+
+	if ((wb->num != layer->writeback_ndx)
+		|| (ctl->width != layer->buffer.width)
+		|| (ctl->height != layer->buffer.height)
+		|| (ctl->dst_format != layer->buffer.format))
+		return false;
+
+	return true;
+}
+
+int mdss_mdp_wfd_setup(struct mdss_mdp_wfd *wfd,
+	struct mdp_output_layer *layer)
+{
+	u32 wb_idx = layer->writeback_ndx;
+	struct mdss_mdp_ctl *ctl = wfd->ctl;
+	struct mdss_mdp_writeback *wb = NULL;
+	struct mdss_mdp_format_params *fmt = NULL;
+	int ret = 0;
+	u32 width, height, max_mixer_width;
+
+	if (!ctl)
+		return -EINVAL;
+
+	if (mdss_mdp_wfd_is_config_same(ctl->mfd, layer)) {
+		pr_debug("wfd prepared already\n");
+		return 0;
+	}
+
+	if (ctl->wb) {
+		pr_debug("config change, wait for pending buffer done\n");
+		ret = mdss_mdp_wfd_wait_for_finish(wfd);
+		if (ret) {
+			pr_err("fail to wait for outstanding request\n");
+			return ret;
+		}
+		mdss_mdp_wfd_destroy(wfd);
+	}
+	width = layer->buffer.width;
+	height = layer->buffer.height;
+	max_mixer_width = ctl->mdata->max_mixer_width;
+	pr_debug("widthxheight:%dx%d,wb_idx:%d, ctl:%d\n", width, height,
+			wb_idx, ctl->num);
+
+	wb = mdss_mdp_wb_assign(wb_idx, ctl->num);
+	if (!wb) {
+		pr_err("could not allocate wb\n");
+		ret = -EINVAL;
+		goto wfd_setup_error;
+	}
+	ctl->wb = wb;
+	ctl->dst_format = layer->buffer.format;
+	ctl->dst_comp_ratio = layer->buffer.comp_ratio;
+	ctl->width = width;
+	ctl->height = height;
+	ctl->roi =  (struct mdss_rect) {0, 0, width, height};
+	ctl->is_secure = (layer->flags & MDP_LAYER_SECURE_SESSION);
+
+	fmt = mdss_mdp_get_format_params(layer->buffer.format);
+
+	if (fmt == NULL) {
+		pr_err("invalid buffer format\n");
+		ret = -EINVAL;
+		goto wfd_setup_error;
+	}
+
+	/* only 3 csc type supported */
+	if (fmt->is_yuv) {
+		switch (layer->color_space) {
+		case MDP_CSC_ITU_R_601:
+			ctl->csc_type = MDSS_MDP_CSC_RGB2YUV_601L;
+			break;
+		case MDP_CSC_ITU_R_709:
+			ctl->csc_type = MDSS_MDP_CSC_RGB2YUV_709L;
+			break;
+		case MDP_CSC_ITU_R_601_FR:
+		default:
+			ctl->csc_type = MDSS_MDP_CSC_RGB2YUV_601FR;
+			break;
+		}
+	} else {
+		ctl->csc_type = MDSS_MDP_CSC_RGB2RGB;
+	}
+
+	if (ctl->mdata->wfd_mode == MDSS_MDP_WFD_INTERFACE) {
+		ctl->mixer_left = mdss_mdp_mixer_alloc(ctl,
+			MDSS_MDP_MIXER_TYPE_INTF, (width > max_mixer_width), 0);
+		if (width > max_mixer_width) {
+			ctl->mixer_right = mdss_mdp_mixer_alloc(ctl,
+				MDSS_MDP_MIXER_TYPE_INTF, true, 0);
+			ctl->mfd->split_mode = MDP_DUAL_LM_SINGLE_DISPLAY;
+			width = width / 2;
+		} else {
+			ctl->mfd->split_mode = MDP_SPLIT_MODE_NONE;
+		}
+	} else if (width > max_mixer_width) {
+		pr_err("width > max_mixer_width supported only in MDSS_MDP_WB_INTF\n");
+		goto wfd_setup_error;
+	} else if (ctl->mdata->wfd_mode == MDSS_MDP_WFD_DEDICATED) {
+		ctl->mixer_left = mdss_mdp_mixer_alloc(ctl,
+				MDSS_MDP_MIXER_TYPE_WRITEBACK, false, 0);
+	} else {
+		ctl->mixer_left = mdss_mdp_mixer_assign(wb->num, true, false);
+	}
+
+	if (!ctl->mixer_left ||
+		((ctl->mfd->split_mode ==
+			MDP_DUAL_LM_SINGLE_DISPLAY) && (!ctl->mixer_right))) {
+		if (ctl->mixer_left)
+			mdss_mdp_mixer_free(ctl->mixer_left);
+		if (ctl->mixer_right)
+			mdss_mdp_mixer_free(ctl->mixer_right);
+		pr_err("could not allocate mixer(s) for ctl:%d\n", ctl->num);
+		ret = -ENODEV;
+		goto wfd_setup_error;
+	}
+
+	if (ctl->mixer_left->type == MDSS_MDP_MIXER_TYPE_INTF ||
+			ctl->mdata->wfd_mode == MDSS_MDP_WFD_DEDICATED) {
+		ctl->opmode = MDSS_MDP_CTL_OP_WFD_MODE;
+	} else {
+		switch (ctl->mixer_left->num) {
+		case MDSS_MDP_WB_LAYERMIXER0:
+			ctl->opmode = MDSS_MDP_CTL_OP_WB0_MODE;
+			break;
+		case MDSS_MDP_WB_LAYERMIXER1:
+			ctl->opmode = MDSS_MDP_CTL_OP_WB1_MODE;
+			break;
+		default:
+			pr_err("Incorrect writeback config num=%d\n",
+					ctl->mixer_left->num);
+			ret = -EINVAL;
+			goto wfd_setup_error;
+		}
+		ctl->wb_type = MDSS_MDP_WB_CTL_TYPE_LINE;
+	}
+
+	ctl->mixer_left->width = width;
+	ctl->mixer_left->height = height;
+	ctl->mixer_left->roi = (struct mdss_rect) {0, 0, width, height};
+	ctl->mixer_left->ctl = ctl;
+	ctl->mixer_left->valid_roi = true;
+	ctl->mixer_left->roi_changed = true;
+
+	if (ctl->mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) {
+		ctl->mixer_right->width = width;
+		ctl->mixer_right->height = height;
+		ctl->mixer_right->roi = (struct mdss_rect) {0, 0,
+			width, height};
+		ctl->mixer_right->valid_roi = true;
+		ctl->mixer_right->roi_changed = true;
+		ctl->mixer_right->ctl = ctl;
+		ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+				       MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT;
+	} else {
+		ctl->opmode &= ~(MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+				       MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT);
+	}
+
+	if (ctl->ops.start_fnc) {
+		ret = ctl->ops.start_fnc(ctl);
+		if (ret) {
+			pr_err("wfd start failed %d\n", ret);
+			goto wfd_setup_error;
+		}
+	}
+
+	return ret;
+
+wfd_setup_error:
+	mdss_mdp_wfd_destroy(wfd);
+	return ret;
+}
+
+static int mdss_mdp_wfd_import_data(struct device *device,
+	struct mdss_mdp_wfd_data *wfd_data)
+{
+	int i, ret = 0;
+	u32 flags = 0;
+	struct mdp_layer_buffer *buffer = &wfd_data->layer.buffer;
+	struct mdss_mdp_data *data = &wfd_data->data;
+	struct msmfb_data planes[MAX_PLANES];
+
+	if (wfd_data->layer.flags & MDP_LAYER_SECURE_SESSION)
+		flags = MDP_SECURE_OVERLAY_SESSION;
+
+	if (buffer->plane_count > MAX_PLANES) {
+		pr_err("buffer plane_count exceeds MAX_PLANES limit:%d",
+				buffer->plane_count);
+		return -EINVAL;
+	}
+
+	memset(planes, 0, sizeof(planes));
+
+	for (i = 0; i < buffer->plane_count; i++) {
+		planes[i].memory_id = buffer->planes[i].fd;
+		planes[i].offset = buffer->planes[i].offset;
+	}
+
+	ret =  mdss_mdp_data_get_and_validate_size(data, planes,
+			buffer->plane_count, flags, device,
+			false, DMA_FROM_DEVICE, buffer);
+
+	return ret;
+}
+
+struct mdss_mdp_wfd_data *mdss_mdp_wfd_add_data(
+	struct mdss_mdp_wfd *wfd,
+	struct mdp_output_layer *layer)
+{
+	int ret;
+	struct mdss_mdp_wfd_data *wfd_data;
+
+	if (!wfd->ctl || !wfd->ctl->wb) {
+		pr_err("wfd not setup\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	wfd_data = kzalloc(sizeof(struct mdss_mdp_wfd_data), GFP_KERNEL);
+	if (!wfd_data)
+		return ERR_PTR(-ENOMEM);
+
+	wfd_data->layer = *layer;
+	ret = mdss_mdp_wfd_import_data(wfd->device, wfd_data);
+	if (ret) {
+		pr_err("fail to import data\n");
+		mdss_mdp_data_free(&wfd_data->data, true, DMA_FROM_DEVICE);
+		kfree(wfd_data);
+		return ERR_PTR(ret);
+	}
+
+	mutex_lock(&wfd->lock);
+	list_add_tail(&wfd_data->next, &wfd->data_queue);
+	mutex_unlock(&wfd->lock);
+
+	return wfd_data;
+}
+
+void mdss_mdp_wfd_remove_data(struct mdss_mdp_wfd *wfd,
+	struct mdss_mdp_wfd_data *wfd_data)
+{
+	mutex_lock(&wfd->lock);
+	list_del_init(&wfd_data->next);
+	if (list_empty(&wfd->data_queue))
+		complete(&wfd->comp);
+	mutex_unlock(&wfd->lock);
+	mdss_mdp_data_free(&wfd_data->data, true, DMA_FROM_DEVICE);
+	kfree(wfd_data);
+}
+
+static int mdss_mdp_wfd_validate_out_configuration(struct mdss_mdp_wfd *wfd,
+	struct mdp_output_layer *layer)
+{
+	struct mdss_mdp_format_params *fmt = NULL;
+	struct mdss_mdp_ctl *ctl = wfd->ctl;
+	u32 wb_idx = layer->writeback_ndx;
+
+	if (mdss_mdp_is_wb_mdp_intf(wb_idx, ctl->num)) {
+		fmt = mdss_mdp_get_format_params(layer->buffer.format);
+		if (fmt && !(fmt->flag & VALID_MDP_WB_INTF_FORMAT)) {
+			pr_err("wb=%d does not support dst fmt:%d\n", wb_idx,
+				layer->buffer.format);
+			return -EINVAL;
+		}
+
+		if (!ctl->mdata->has_wb_ubwc && mdss_mdp_is_ubwc_format(fmt)) {
+			pr_err("wb=%d does not support UBWC fmt:%d\n", wb_idx,
+				layer->buffer.format);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+int mdss_mdp_wfd_validate(struct mdss_mdp_wfd *wfd,
+	struct mdp_output_layer *layer)
+{
+	u32 wb_idx = layer->writeback_ndx;
+
+	if (mdss_mdp_wfd_validate_out_configuration(wfd, layer)) {
+		pr_err("failed to validate output config\n");
+		return -EINVAL;
+	}
+
+	if (wb_idx > wfd->ctl->mdata->nwb)
+		return -EINVAL;
+
+	return 0;
+}
+
+int mdss_mdp_wfd_kickoff(struct mdss_mdp_wfd *wfd,
+	struct mdss_mdp_commit_cb *commit_cb)
+{
+	struct mdss_mdp_ctl *ctl = wfd->ctl;
+	struct mdss_mdp_writeback_arg wb_args;
+	struct mdss_mdp_wfd_data *wfd_data;
+	int ret = 0;
+
+	if (!ctl) {
+		pr_err("no ctl\n");
+		return -EINVAL;
+	}
+
+	if (!ctl->wb) {
+		pr_err("wfd not prepared\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&wfd->lock);
+	if (list_empty(&wfd->data_queue)) {
+		pr_debug("no output buffer\n");
+		mutex_unlock(&wfd->lock);
+		mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_DONE);
+		return 0;
+	}
+	wfd_data = list_first_entry(&wfd->data_queue,
+				struct mdss_mdp_wfd_data, next);
+	mutex_unlock(&wfd->lock);
+
+	ret = mdss_mdp_data_map(&wfd_data->data, true, DMA_FROM_DEVICE);
+	if (ret) {
+		pr_err("fail to acquire output buffer\n");
+		goto kickoff_error;
+	}
+
+	memset(&wb_args, 0, sizeof(wb_args));
+	wb_args.data = &wfd_data->data;
+
+	ret = mdss_mdp_writeback_display_commit(ctl, &wb_args);
+	if (ret) {
+		pr_err("wfd commit error = %d, ctl=%d\n", ret, ctl->num);
+		goto kickoff_error;
+	}
+
+	if (commit_cb)
+		commit_cb->commit_cb_fnc(
+			MDP_COMMIT_STAGE_SETUP_DONE,
+			commit_cb->data);
+
+	ret = mdss_mdp_display_wait4comp(ctl);
+
+	if (commit_cb)
+		commit_cb->commit_cb_fnc(MDP_COMMIT_STAGE_READY_FOR_KICKOFF,
+			commit_cb->data);
+
+kickoff_error:
+	mdss_mdp_wfd_commit_done(wfd);
+	return ret;
+}
+
+int mdss_mdp_wfd_commit_done(struct mdss_mdp_wfd *wfd)
+{
+	struct mdss_mdp_wfd_data *wfd_data;
+
+	mutex_lock(&wfd->lock);
+	if (list_empty(&wfd->data_queue)) {
+		pr_err("no output buffer\n");
+		mutex_unlock(&wfd->lock);
+		return -EINVAL;
+	}
+	wfd_data = list_first_entry(&wfd->data_queue,
+				struct mdss_mdp_wfd_data, next);
+	mutex_unlock(&wfd->lock);
+
+	mdss_mdp_wfd_remove_data(wfd, wfd_data);
+
+	return 0;
+}
+
diff --git a/drivers/video/fbdev/msm/mdss_mdp_wfd.h b/drivers/video/fbdev/msm/mdss_mdp_wfd.h
new file mode 100644
index 0000000..e603c48
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_wfd.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MDSS_MDP_WFD_H__
+#define __MDSS_MDP_WFD_H__
+
+#include <linux/sync.h>
+#include <linux/sw_sync.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/msm_mdp_ext.h>
+
+#include "mdss_mdp.h"
+
+struct mdss_mdp_wfd_data {
+	struct mdp_output_layer layer;
+	struct mdss_mdp_data data;
+	bool signal_required;
+	struct list_head next;
+};
+
+struct mdss_mdp_wfd {
+	struct mutex lock;
+	struct list_head data_queue;
+	struct mdss_mdp_ctl *ctl;
+	struct device *device;
+	struct completion comp;
+};
+
+struct mdss_mdp_wfd *mdss_mdp_wfd_init(struct device *device,
+	struct mdss_mdp_ctl *ctl);
+
+void mdss_mdp_wfd_deinit(struct mdss_mdp_wfd *wfd);
+
+int mdss_mdp_wfd_setup(struct mdss_mdp_wfd *wfd,
+	struct mdp_output_layer *layer);
+
+void mdss_mdp_wfd_destroy(struct mdss_mdp_wfd *wfd);
+
+struct mdss_mdp_wfd_data *mdss_mdp_wfd_add_data(
+	struct mdss_mdp_wfd *wfd,
+	struct mdp_output_layer *layer);
+
+void mdss_mdp_wfd_remove_data(struct mdss_mdp_wfd *wfd,
+	struct mdss_mdp_wfd_data *data);
+
+int mdss_mdp_wfd_validate(struct mdss_mdp_wfd *wfd,
+	struct mdp_output_layer *layer);
+
+int mdss_mdp_wfd_kickoff(struct mdss_mdp_wfd *wfd,
+	struct mdss_mdp_commit_cb *commit_cb);
+
+int mdss_mdp_wfd_commit_done(struct mdss_mdp_wfd *wfd);
+
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_panel.c b/drivers/video/fbdev/msm/mdss_panel.c
new file mode 100644
index 0000000..de69d63
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_panel.c
@@ -0,0 +1,986 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <linux/uaccess.h>
+
+#include "mdss_panel.h"
+
+#define NUM_INTF 2
+
+/*
+ * rc_buf_thresh = {896, 1792, 2688, 3548, 4480, 5376, 6272, 6720,
+ *		7168, 7616, 7744, 7872, 8000, 8064, 8192};
+ *	(x >> 6) & 0x0ff)
+ */
+static u32 dsc_rc_buf_thresh[] = {0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54,
+		0x62, 0x69, 0x70, 0x77, 0x79, 0x7b, 0x7d, 0x7e};
+static char dsc_rc_range_min_qp_1_1[] = {0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5,
+				5, 5, 7, 13};
+static char dsc_rc_range_min_qp_1_1_scr1[] = {0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5,
+				5, 5, 9, 12};
+static char dsc_rc_range_max_qp_1_1[] = {4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 11,
+			 12, 13, 13, 15};
+static char dsc_rc_range_max_qp_1_1_scr1[] = {4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10,
+			 11, 11, 12, 13};
+static char dsc_rc_range_bpg_offset[] = {2, 0, 0, -2, -4, -6, -8, -8,
+			-8, -10, -10, -12, -12, -12, -12};
+
+int mdss_panel_debugfs_fbc_setup(struct mdss_panel_debugfs_info *debugfs_info,
+	struct mdss_panel_info *panel_info, struct dentry *parent)
+{
+	struct dentry *fbc_root;
+	struct fbc_panel_info *fbc = &debugfs_info->panel_info.fbc;
+
+	fbc_root = debugfs_create_dir("fbc", parent);
+	if (IS_ERR_OR_NULL(fbc_root)) {
+		pr_err("Debugfs create fbc dir failed with error: %ld\n",
+					PTR_ERR(fbc_root));
+		return -ENODEV;
+	}
+
+	debugfs_create_bool("enable", 0644, fbc_root,
+			(bool *)&fbc->enabled);
+	debugfs_create_u32("bpp", 0644, fbc_root,
+			(u32 *)&fbc->target_bpp);
+	debugfs_create_u32("packing", 0644, fbc_root,
+			(u32 *)&fbc->comp_mode);
+	debugfs_create_bool("quant_err", 0644, fbc_root,
+			(bool *)&fbc->qerr_enable);
+	debugfs_create_u32("bias", 0644, fbc_root,
+			(u32 *)&fbc->cd_bias);
+	debugfs_create_bool("pat_mode", 0644, fbc_root,
+			(bool *)&fbc->pat_enable);
+	debugfs_create_bool("vlc_mode", 0644, fbc_root,
+			(bool *)&fbc->vlc_enable);
+	debugfs_create_bool("bflc_mode", 0644, fbc_root,
+			(bool *)&fbc->bflc_enable);
+	debugfs_create_u32("hline_budget", 0644, fbc_root,
+			(u32 *)&fbc->line_x_budget);
+	debugfs_create_u32("budget_ctrl", 0644, fbc_root,
+			(u32 *)&fbc->block_x_budget);
+	debugfs_create_u32("block_budget", 0644, fbc_root,
+			(u32 *)&fbc->block_budget);
+	debugfs_create_u32("lossless_thd", 0644, fbc_root,
+			(u32 *)&fbc->lossless_mode_thd);
+	debugfs_create_u32("lossy_thd", 0644, fbc_root,
+			(u32 *)&fbc->lossy_mode_thd);
+	debugfs_create_u32("rgb_thd", 0644, fbc_root,
+			(u32 *)&fbc->lossy_rgb_thd);
+	debugfs_create_u32("lossy_mode_idx", 0644, fbc_root,
+			(u32 *)&fbc->lossy_mode_idx);
+	debugfs_create_u32("slice_height", 0644, fbc_root,
+			(u32 *)&fbc->slice_height);
+	debugfs_create_u32("pred_mode", 0644, fbc_root,
+			(u32 *)&fbc->pred_mode);
+	debugfs_create_u32("enc_mode", 0644, fbc_root,
+			(u32 *)&fbc->enc_mode);
+	debugfs_create_u32("max_pred_err", 0644, fbc_root,
+			(u32 *)&fbc->max_pred_err);
+
+	debugfs_info->panel_info.fbc = panel_info->fbc;
+
+	return 0;
+}
+
+struct array_data {
+	void *array;
+	u32 elements;
+	size_t size; /* size of each data in array */
+};
+
+static int panel_debugfs_array_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return nonseekable_open(inode, file);
+}
+
+static ssize_t panel_debugfs_array_read(struct file *file, char __user *buf,
+				size_t len, loff_t *ppos)
+{
+	char *buffer, *bufp;
+	int buf_size;
+	struct array_data *data = file->private_data;
+	int i = 0, elements = data->elements;
+	ssize_t ret = 0;
+
+	/*
+	 * Max size:
+	 *  - 10 digits ("0x" + 8 digits value) + ' '/'\n' = 11 bytes per number
+	 *  - terminating NUL character
+	 */
+	buf_size = elements*11 + 1;
+	buffer = kmalloc(buf_size, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	bufp = buffer;
+	while (i < elements) {
+		char term = (i < elements-1) ? ' ' : '\n';
+
+		if (data->size == sizeof(u8)) {
+			u8 *array = (u8 *)data->array;
+
+			bufp += snprintf(bufp, buf_size-(bufp-buffer),
+						"0x%02x%c", array[i], term);
+		} else if (data->size == sizeof(u16)) {
+			u16 *array = (u16 *)data->array;
+
+			bufp += snprintf(bufp, buf_size-(bufp-buffer),
+						"0x%02x%c", array[i], term);
+		} else {
+			u32 *array = (u32 *)data->array;
+
+			bufp += snprintf(bufp, buf_size-(bufp-buffer),
+						"0x%02x%c", array[i], term);
+		}
+		i++;
+	}
+	*bufp = '\0';
+	ret = simple_read_from_buffer(buf, len, ppos,
+					buffer, bufp-buffer);
+
+	kfree(buffer);
+	return ret;
+}
+
+static ssize_t panel_debugfs_array_write(struct file *file,
+	const char __user *p, size_t count, loff_t *ppos)
+{
+	struct array_data *data = file->private_data;
+	char *buffer, *bufp;
+	int buf_size;
+	ssize_t res;
+	int i = 0, elements = data->elements;
+
+	/*
+	 * Max size:
+	 *  - 10 digits ("0x" + 8 digits value) + ' '/'\n' = 11 bytes per number
+	 *  - terminating NUL character
+	 */
+	buf_size = elements*11 + 1;
+	buffer = kmalloc(buf_size, GFP_KERNEL);
+	if (!buffer) {
+		pr_err("Failed to allocate memory\n");
+		return -ENOMEM;
+	}
+	res = simple_write_to_buffer(buffer, buf_size, ppos, p, count);
+	if (res)
+		*ppos += res;
+
+	buffer[buf_size-1] = '\0';
+	bufp = buffer;
+
+	while (i < elements) {
+		uint32_t value = 0;
+		int step = 0;
+
+		if (sscanf(bufp, "%x%n", &value, &step) > 0) {
+			if (data->size == sizeof(u8)) {
+				u8 *array = (u8 *)data->array;
+				*(array+i) = (u8)value;
+			} else if (data->size == sizeof(u16)) {
+				u16 *array = (u16 *)data->array;
+				*(array+i) = (u16)value;
+			} else {
+				u32 *array = (u32 *)data->array;
+				*(array+i) = (u32)value;
+			}
+			bufp += step;
+		}
+		i++;
+	}
+	kfree(buffer);
+	return res;
+}
+
+static const struct file_operations panel_debugfs_array_fops = {
+	.owner = THIS_MODULE,
+	.open = panel_debugfs_array_open,
+	.read = panel_debugfs_array_read,
+	.write = panel_debugfs_array_write,
+};
+
+struct dentry *panel_debugfs_create_array(const char *name, umode_t mode,
+				struct dentry *parent,
+				void *array, size_t size, u32 elements)
+{
+	struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
+
+	if (data == NULL)
+		return NULL;
+
+	/* only support integer of 3 kinds of length format */
+	if ((size != sizeof(u8)) &&
+	    (size != sizeof(u16)) &&
+	    (size != sizeof(u32))) {
+		pr_warn("Value size %zu bytes is not supported\n", size);
+		kfree(data);
+		return NULL;
+	}
+
+	data->array = array;
+	data->size = size;
+	data->elements = elements;
+
+	return debugfs_create_file(name, mode, parent, data,
+				&panel_debugfs_array_fops);
+}
+
+#define DEBUGFS_CREATE_ARRAY(name, node, array) \
+	panel_debugfs_create_array(name, 0644, node, array, \
+				   sizeof(array[0]), ARRAY_SIZE(array))
+
+static int _create_phy_ctrl_nodes(struct mdss_panel_debugfs_info *debugfs_info,
+	struct dentry *node) {
+
+	struct mdss_panel_info *pinfo = &debugfs_info->panel_info;
+	struct dentry *phy_node;
+
+	phy_node = debugfs_create_dir("dsi_phy_ctrl", node);
+	if (IS_ERR_OR_NULL(phy_node)) {
+		pr_err("Debugfs create phy ctrl node failed with error: %ld\n",
+					PTR_ERR(phy_node));
+		return -ENODEV;
+	}
+
+	DEBUGFS_CREATE_ARRAY("regulator", phy_node,
+			     pinfo->mipi.dsi_phy_db.regulator);
+	DEBUGFS_CREATE_ARRAY("strength", phy_node,
+			     pinfo->mipi.dsi_phy_db.strength);
+	DEBUGFS_CREATE_ARRAY("bistctrl", phy_node,
+			     pinfo->mipi.dsi_phy_db.bistctrl);
+	DEBUGFS_CREATE_ARRAY("lanecfg", phy_node,
+			     pinfo->mipi.dsi_phy_db.lanecfg);
+	DEBUGFS_CREATE_ARRAY("timing", phy_node,
+			     pinfo->mipi.dsi_phy_db.timing);
+
+	return 0;
+}
+
+static int _create_dsi_panel_nodes(struct mdss_panel_debugfs_info *dfs,
+	struct dentry *parent)
+{
+	struct dentry *lcdc_root, *mipi_root, *te_root;
+	struct mdss_panel_info *pinfo = &dfs->panel_info;
+
+	lcdc_root = debugfs_create_dir("lcdc", parent);
+	if (IS_ERR_OR_NULL(lcdc_root)) {
+		pr_err("Debugfs create lcdc dir failed with error: %ld\n",
+					PTR_ERR(lcdc_root));
+		return -ENODEV;
+	}
+	mipi_root = debugfs_create_dir("mipi", parent);
+	if (IS_ERR_OR_NULL(mipi_root)) {
+		pr_err("Debugfs create mipi dir failed with error: %ld\n",
+					PTR_ERR(mipi_root));
+		return -ENODEV;
+	}
+	te_root = debugfs_create_dir("te", parent);
+	if (IS_ERR_OR_NULL(te_root)) {
+		pr_err("Debugfs create te check dir failed with error: %ld\n",
+					PTR_ERR(te_root));
+		return -ENODEV;
+	}
+
+	debugfs_create_u32("partial_update_enabled", 0644, dfs->root,
+			(u32 *)&pinfo->partial_update_enabled);
+	debugfs_create_u32("partial_update_roi_merge", 0644, dfs->root,
+			(u32 *)&pinfo->partial_update_roi_merge);
+	debugfs_create_u32("dcs_cmd_by_left", 0644, dfs->root,
+			(u32 *)&pinfo->dcs_cmd_by_left);
+	debugfs_create_bool("ulps_feature_enabled", 0644, dfs->root,
+			&pinfo->ulps_feature_enabled);
+	debugfs_create_bool("ulps_suspend_enabled", 0644, dfs->root,
+			&pinfo->ulps_suspend_enabled);
+	debugfs_create_bool("esd_check_enabled", 0644, dfs->root,
+			&pinfo->esd_check_enabled);
+	debugfs_create_bool("panel_ack_disabled", 0644, dfs->root,
+			&pinfo->panel_ack_disabled);
+
+	debugfs_create_u32("hsync_skew", 0644, lcdc_root,
+			(u32 *)&pinfo->lcdc.hsync_skew);
+	debugfs_create_u32("underflow_clr", 0644, lcdc_root,
+			(u32 *)&pinfo->lcdc.underflow_clr);
+	debugfs_create_u32("border_clr", 0644, lcdc_root,
+			(u32 *)&pinfo->lcdc.border_clr);
+	debugfs_create_u32("h_back_porch", 0644, lcdc_root,
+			(u32 *)&pinfo->lcdc.h_back_porch);
+	debugfs_create_u32("h_front_porch", 0644, lcdc_root,
+			(u32 *)&pinfo->lcdc.h_front_porch);
+	debugfs_create_u32("h_pulse_width", 0644, lcdc_root,
+			(u32 *)&pinfo->lcdc.h_pulse_width);
+	debugfs_create_u32("v_back_porch", 0644, lcdc_root,
+			(u32 *)&pinfo->lcdc.v_back_porch);
+	debugfs_create_u32("v_front_porch", 0644, lcdc_root,
+			(u32 *)&pinfo->lcdc.v_front_porch);
+	debugfs_create_u32("v_pulse_width", 0644, lcdc_root,
+			(u32 *)&pinfo->lcdc.v_pulse_width);
+
+	/* Create mipi related nodes */
+	debugfs_create_u8("frame_rate", 0644, mipi_root,
+			(char *)&pinfo->mipi.frame_rate);
+	debugfs_create_u8("hfp_power_stop", 0644, mipi_root,
+			(char *)&pinfo->mipi.hfp_power_stop);
+	debugfs_create_u8("hsa_power_stop", 0644, mipi_root,
+			(char *)&pinfo->mipi.hsa_power_stop);
+	debugfs_create_u8("hbp_power_stop", 0644, mipi_root,
+			(char *)&pinfo->mipi.hbp_power_stop);
+	debugfs_create_u8("last_line_interleave_en", 0644, mipi_root,
+			(char *)&pinfo->mipi.last_line_interleave_en);
+	debugfs_create_u8("bllp_power_stop", 0644, mipi_root,
+			(char *)&pinfo->mipi.bllp_power_stop);
+	debugfs_create_u8("eof_bllp_power_stop", 0644, mipi_root,
+			(char *)&pinfo->mipi.eof_bllp_power_stop);
+	debugfs_create_u8("data_lane0", 0644, mipi_root,
+			(char *)&pinfo->mipi.data_lane0);
+	debugfs_create_u8("data_lane1", 0644, mipi_root,
+			(char *)&pinfo->mipi.data_lane1);
+	debugfs_create_u8("data_lane2", 0644, mipi_root,
+			(char *)&pinfo->mipi.data_lane2);
+	debugfs_create_u8("data_lane3", 0644, mipi_root,
+			(char *)&pinfo->mipi.data_lane3);
+	debugfs_create_u8("t_clk_pre", 0644, mipi_root,
+			(char *)&pinfo->mipi.t_clk_pre);
+	debugfs_create_u8("t_clk_post", 0644, mipi_root,
+			(char *)&pinfo->mipi.t_clk_post);
+	debugfs_create_u8("stream", 0644, mipi_root,
+			(char *)&pinfo->mipi.stream);
+	debugfs_create_u8("interleave_mode", 0644, mipi_root,
+			(char *)&pinfo->mipi.interleave_mode);
+	debugfs_create_u8("vsync_enable", 0644, mipi_root,
+			(char *)&pinfo->mipi.vsync_enable);
+	debugfs_create_u8("hw_vsync_mode", 0644, mipi_root,
+			(char *)&pinfo->mipi.hw_vsync_mode);
+	debugfs_create_u8("te_sel", 0644, mipi_root,
+			(char *)&pinfo->mipi.te_sel);
+	debugfs_create_u8("insert_dcs_cmd", 0644, mipi_root,
+			(char *)&pinfo->mipi.insert_dcs_cmd);
+	debugfs_create_u8("wr_mem_start", 0644, mipi_root,
+			(char *)&pinfo->mipi.wr_mem_start);
+	debugfs_create_u8("wr_mem_continue", 0644, mipi_root,
+			(char *)&pinfo->mipi.wr_mem_continue);
+	debugfs_create_u8("pulse_mode_hsa_he", 0644, mipi_root,
+			(char *)&pinfo->mipi.pulse_mode_hsa_he);
+	debugfs_create_u8("vc", 0644, mipi_root, (char *)&pinfo->mipi.vc);
+	debugfs_create_u8("lp11_init", 0644, mipi_root,
+			(char *)&pinfo->mipi.lp11_init);
+	debugfs_create_u32("init_delay", 0644, mipi_root,
+			(u32 *)&pinfo->mipi.init_delay);
+	debugfs_create_u8("rx_eot_ignore", 0644, mipi_root,
+			(char *)&pinfo->mipi.rx_eot_ignore);
+	debugfs_create_u8("tx_eot_append", 0644, mipi_root,
+			(char *)&pinfo->mipi.tx_eot_append);
+	debugfs_create_u32("adjust_timer_ms", 0644, mipi_root,
+			(u32 *)&pinfo->adjust_timer_delay_ms);
+
+	/* TE reltaed nodes */
+	debugfs_create_u32("te_tear_check_en", 0644, te_root,
+			(u32 *)&pinfo->te.tear_check_en);
+	debugfs_create_u32("te_sync_cfg_height", 0644, te_root,
+			(u32 *)&pinfo->te.sync_cfg_height);
+	debugfs_create_u32("te_vsync_init_val", 0644, te_root,
+			(u32 *)&pinfo->te.vsync_init_val);
+	debugfs_create_u32("te_sync_threshold_start", 0644, te_root,
+			(u32 *)&pinfo->te.sync_threshold_start);
+	debugfs_create_u32("te_sync_threshold_continue", 0644, te_root,
+			(u32 *)&pinfo->te.sync_threshold_continue);
+	debugfs_create_u32("te_start_pos", 0644, te_root,
+			(u32 *)&pinfo->te.sync_threshold_continue);
+	debugfs_create_u32("te_rd_ptr_irq", 0644, te_root,
+			(u32 *)&pinfo->te.rd_ptr_irq);
+	debugfs_create_u32("te_refx100", 0644, te_root,
+			(u32 *)&pinfo->te.refx100);
+
+	return 0;
+}
+
+int mdss_panel_debugfs_panel_setup(struct mdss_panel_debugfs_info *debugfs_info,
+	struct mdss_panel_info *panel_info, struct dentry *parent)
+{
+	/* create panel info nodes */
+	debugfs_create_u32("xres", 0644, debugfs_info->root,
+		(u32 *)&debugfs_info->panel_info.xres);
+	debugfs_create_u32("yres", 0644, debugfs_info->root,
+		(u32 *)&debugfs_info->panel_info.yres);
+	debugfs_create_u32("dynamic_fps", 0644, debugfs_info->root,
+		(u32 *)&debugfs_info->panel_info.dynamic_fps);
+	debugfs_create_u32("physical_width", 0644, debugfs_info->root,
+		(u32 *)&debugfs_info->panel_info.physical_width);
+	debugfs_create_u32("physical_height", 0644, debugfs_info->root,
+		(u32 *)&debugfs_info->panel_info.physical_height);
+	debugfs_create_u32("min_refresh_rate", 0644, debugfs_info->root,
+		(u32 *)&debugfs_info->panel_info.min_fps);
+	debugfs_create_u32("max_refresh_rate", 0644, debugfs_info->root,
+		(u32 *)&debugfs_info->panel_info.max_fps);
+	debugfs_create_u64("clk_rate", 0644, debugfs_info->root,
+		(u64 *)&debugfs_info->panel_info.clk_rate);
+	debugfs_create_u32("bl_min", 0644, debugfs_info->root,
+		(u32 *)&debugfs_info->panel_info.bl_min);
+	debugfs_create_u32("bl_max", 0644, debugfs_info->root,
+		(u32 *)&debugfs_info->panel_info.bl_max);
+	debugfs_create_u32("brightness_max", 0644, debugfs_info->root,
+		(u32 *)&debugfs_info->panel_info.brightness_max);
+
+	if ((panel_info->type == MIPI_CMD_PANEL) ||
+	    (panel_info->type == MIPI_VIDEO_PANEL)) {
+		_create_dsi_panel_nodes(debugfs_info, debugfs_info->root);
+		_create_phy_ctrl_nodes(debugfs_info, debugfs_info->root);
+	}
+
+	debugfs_info->panel_info = *panel_info;
+	return 0;
+}
+
+int mdss_panel_debugfs_setup(struct mdss_panel_info *panel_info, struct dentry
+		*parent, char *intf_str)
+{
+	struct mdss_panel_debugfs_info *debugfs_info;
+
+	debugfs_info = kzalloc(sizeof(*debugfs_info), GFP_KERNEL);
+	if (!debugfs_info)
+		return -ENOMEM;
+
+	debugfs_info->parent = parent;
+	debugfs_info->root = debugfs_create_dir(intf_str, parent);
+	if (IS_ERR_OR_NULL(debugfs_info->root)) {
+		pr_err("Debugfs create dir failed with error: %ld\n",
+					PTR_ERR(debugfs_info->root));
+		kfree(debugfs_info);
+		return -ENODEV;
+	}
+
+	debugfs_create_u32("override_flag", 0644, parent,
+			(u32 *)&debugfs_info->override_flag);
+
+	mdss_panel_debugfs_fbc_setup(debugfs_info, panel_info,
+				debugfs_info->root);
+	mdss_panel_debugfs_panel_setup(debugfs_info, panel_info,
+				debugfs_info->root);
+
+	debugfs_info->override_flag = 0;
+
+	panel_info->debugfs_info = debugfs_info;
+	return 0;
+}
+
+int mdss_panel_debugfs_init(struct mdss_panel_info *panel_info,
+		char const *panel_name)
+{
+	struct mdss_panel_data *pdata;
+	struct dentry *parent;
+	char intf_str[10];
+	int intf_index = 0;
+	int rc = 0;
+
+	if (panel_info->type != MIPI_VIDEO_PANEL
+		&& panel_info->type != MIPI_CMD_PANEL)
+		return -ENOTSUPP;
+
+	pdata = container_of(panel_info, struct mdss_panel_data, panel_info);
+	parent = debugfs_create_dir(panel_name, NULL);
+	if (IS_ERR_OR_NULL(parent)) {
+		pr_err("Debugfs create dir failed with error: %ld\n",
+			PTR_ERR(parent));
+		return -ENODEV;
+	}
+
+	do {
+		snprintf(intf_str, sizeof(intf_str), "intf%d", intf_index++);
+		rc = mdss_panel_debugfs_setup(&pdata->panel_info, parent,
+				intf_str);
+		if (rc) {
+			pr_err("error in initilizing panel debugfs\n");
+			mdss_panel_debugfs_cleanup(&pdata->panel_info);
+			return rc;
+		}
+		pdata = pdata->next;
+	} while (pdata && intf_index < NUM_INTF);
+
+	pr_debug("Initilized mdss_panel_debugfs_info\n");
+	return 0;
+}
+
+void mdss_panel_debugfs_cleanup(struct mdss_panel_info *panel_info)
+{
+	struct mdss_panel_data *pdata;
+	struct mdss_panel_debugfs_info *debugfs_info;
+	struct dentry *parent = NULL;
+
+	pdata = container_of(panel_info, struct mdss_panel_data, panel_info);
+	do {
+		debugfs_info = pdata->panel_info.debugfs_info;
+		if (debugfs_info && !parent)
+			parent = debugfs_info->parent;
+		kfree(debugfs_info);
+		pdata = pdata->next;
+	} while (pdata);
+	debugfs_remove_recursive(parent);
+	pr_debug("Cleaned up mdss_panel_debugfs_info\n");
+}
+
+void mdss_panel_override_te_params(struct mdss_panel_info *pinfo)
+{
+	pinfo->te.sync_cfg_height = mdss_panel_get_vtotal(pinfo);
+	pinfo->te.vsync_init_val = pinfo->yres;
+	pinfo->te.start_pos = pinfo->yres;
+	pinfo->te.rd_ptr_irq = pinfo->yres + 1;
+	pr_debug("SW TE override: read_ptr:%d,start_pos:%d,height:%d,init_val:%d\n",
+		pinfo->te.rd_ptr_irq, pinfo->te.start_pos,
+		pinfo->te.sync_cfg_height,
+		pinfo->te.vsync_init_val);
+}
+
+void mdss_panel_debugfsinfo_to_panelinfo(struct mdss_panel_info *panel_info)
+{
+	struct mdss_panel_data *pdata;
+	struct mdss_panel_info *pinfo;
+	struct mdss_panel_debugfs_info *dfs_info;
+
+	pdata = container_of(panel_info, struct mdss_panel_data, panel_info);
+
+	do {
+		pinfo = &pdata->panel_info;
+		dfs_info = pinfo->debugfs_info;
+
+		pinfo->xres = dfs_info->panel_info.xres;
+		pinfo->yres = dfs_info->panel_info.yres;
+		pinfo->dynamic_fps = dfs_info->panel_info.dynamic_fps;
+		pinfo->physical_width = dfs_info->panel_info.physical_width;
+		pinfo->physical_height = dfs_info->panel_info.physical_height;
+		pinfo->min_fps = dfs_info->panel_info.min_fps;
+		pinfo->max_fps = dfs_info->panel_info.max_fps;
+		pinfo->clk_rate = dfs_info->panel_info.clk_rate;
+		pinfo->bl_min = dfs_info->panel_info.bl_min;
+		pinfo->bl_max = dfs_info->panel_info.bl_max;
+		pinfo->brightness_max = dfs_info->panel_info.brightness_max;
+		pinfo->adjust_timer_delay_ms =
+			dfs_info->panel_info.adjust_timer_delay_ms;
+
+		if ((pinfo->type == MIPI_CMD_PANEL) ||
+		    (pinfo->type == MIPI_VIDEO_PANEL)) {
+			pinfo->fbc = dfs_info->panel_info.fbc;
+			pinfo->lcdc = dfs_info->panel_info.lcdc;
+			pinfo->mipi = dfs_info->panel_info.mipi;
+			pinfo->te = dfs_info->panel_info.te;
+			pinfo->partial_update_enabled =
+				dfs_info->panel_info.partial_update_enabled;
+			pinfo->partial_update_roi_merge =
+				dfs_info->panel_info.partial_update_roi_merge;
+			pinfo->dcs_cmd_by_left =
+				dfs_info->panel_info.dcs_cmd_by_left;
+			pinfo->ulps_feature_enabled =
+				dfs_info->panel_info.ulps_feature_enabled;
+			pinfo->ulps_suspend_enabled =
+				dfs_info->panel_info.ulps_suspend_enabled;
+			pinfo->esd_check_enabled =
+				dfs_info->panel_info.esd_check_enabled;
+			pinfo->panel_ack_disabled =
+				dfs_info->panel_info.panel_ack_disabled;
+		}
+
+		pinfo->panel_max_vtotal = mdss_panel_get_vtotal(pinfo);
+
+		/* override te parameters if panel is in sw te mode */
+		if (panel_info->sim_panel_mode == SIM_SW_TE_MODE)
+			mdss_panel_override_te_params(panel_info);
+
+		pdata = pdata->next;
+	} while (pdata);
+}
+
+struct mdss_panel_timing *mdss_panel_get_timing_by_name(
+		struct mdss_panel_data *pdata,
+		const char *name)
+{
+	struct mdss_panel_timing *pt;
+
+	if (pdata && name) {
+		list_for_each_entry(pt, &pdata->timings_list, list)
+			if (pt->name && !strcmp(pt->name, name))
+				return pt;
+	}
+
+	return NULL;
+}
+
+void mdss_panel_info_from_timing(struct mdss_panel_timing *pt,
+		struct mdss_panel_info *pinfo)
+{
+	if (!pt || !pinfo)
+		return;
+
+	pinfo->clk_rate = pt->clk_rate;
+	pinfo->xres = pt->xres;
+	pinfo->lcdc.h_front_porch = pt->h_front_porch;
+	pinfo->lcdc.h_back_porch = pt->h_back_porch;
+	pinfo->lcdc.h_pulse_width = pt->h_pulse_width;
+
+	pinfo->yres = pt->yres;
+	pinfo->lcdc.v_front_porch = pt->v_front_porch;
+	pinfo->lcdc.v_back_porch = pt->v_back_porch;
+	pinfo->lcdc.v_pulse_width = pt->v_pulse_width;
+
+	pinfo->lcdc.border_bottom = pt->border_bottom;
+	pinfo->lcdc.border_top = pt->border_top;
+	pinfo->lcdc.border_left = pt->border_left;
+	pinfo->lcdc.border_right = pt->border_right;
+	pinfo->lcdc.xres_pad = pt->border_left + pt->border_right;
+	pinfo->lcdc.yres_pad = pt->border_top + pt->border_bottom;
+
+	pinfo->lm_widths[0] = pt->lm_widths[0];
+	pinfo->lm_widths[1] = pt->lm_widths[1];
+
+	pinfo->mipi.frame_rate = pt->frame_rate;
+	pinfo->edp.frame_rate = pinfo->mipi.frame_rate;
+
+	pinfo->dsc = pt->dsc;
+	pinfo->dsc_enc_total = pt->dsc_enc_total;
+	pinfo->fbc = pt->fbc;
+	pinfo->compression_mode = pt->compression_mode;
+
+	pinfo->roi_alignment = pt->roi_alignment;
+	pinfo->te = pt->te;
+
+	/* override te parameters if panel is in sw te mode */
+	if (pinfo->sim_panel_mode == SIM_SW_TE_MODE)
+		mdss_panel_override_te_params(pinfo);
+}
+
+/*
+ * All the calculations done by this routine only depend on slice_width
+ * and slice_height. They are independent of picture dimesion and dsc_merge.
+ * Thus this function should be called only when slice dimension changes.
+ * Since currently we don't support dynamic slice dimension changes, this
+ * routine shall be called only during probe.
+ */
+void mdss_panel_dsc_parameters_calc(struct dsc_desc *dsc)
+{
+	int bpp, bpc;
+	int mux_words_size;
+	int groups_per_line, groups_total;
+	int min_rate_buffer_size;
+	int hrd_delay;
+	int pre_num_extra_mux_bits, num_extra_mux_bits;
+	int slice_bits;
+	int target_bpp_x16;
+	int data;
+	int final_value, final_scale;
+
+	dsc->rc_model_size = 8192;	/* rate_buffer_size */
+	if (dsc->version == 0x11 && dsc->scr_rev == 0x1)
+		dsc->first_line_bpg_offset = 15;
+	else
+		dsc->first_line_bpg_offset = 12;
+	dsc->min_qp_flatness = 3;
+	dsc->max_qp_flatness = 12;
+	dsc->line_buf_depth = 9;
+
+	dsc->edge_factor = 6;
+	dsc->quant_incr_limit0 = 11;
+	dsc->quant_incr_limit1 = 11;
+	dsc->tgt_offset_hi = 3;
+	dsc->tgt_offset_lo = 3;
+
+	dsc->buf_thresh = dsc_rc_buf_thresh;
+	if (dsc->version == 0x11 && dsc->scr_rev == 0x1) {
+		dsc->range_min_qp = dsc_rc_range_min_qp_1_1_scr1;
+		dsc->range_max_qp = dsc_rc_range_max_qp_1_1_scr1;
+	} else {
+		dsc->range_min_qp = dsc_rc_range_min_qp_1_1;
+		dsc->range_max_qp = dsc_rc_range_max_qp_1_1;
+	}
+	dsc->range_bpg_offset = dsc_rc_range_bpg_offset;
+
+	bpp = dsc->bpp;
+	bpc = dsc->bpc;
+
+	if (bpp == 8)
+		dsc->initial_offset = 6144;
+	else
+		dsc->initial_offset = 2048;	/* bpp = 12 */
+
+	if (bpc == 8)
+		mux_words_size = 48;
+	else
+		mux_words_size = 64;	/* bpc == 12 */
+
+	dsc->slice_last_group_size = 3 - (dsc->slice_width % 3);
+
+	dsc->det_thresh_flatness = 7 + 2*(bpc - 8);
+
+	dsc->initial_xmit_delay = dsc->rc_model_size / (2 * bpp);
+
+	groups_per_line = DIV_ROUND_UP(dsc->slice_width, 3);
+
+	dsc->chunk_size = dsc->slice_width * bpp / 8;
+	if ((dsc->slice_width * bpp) % 8)
+		dsc->chunk_size++;
+
+	/* rbs-min */
+	min_rate_buffer_size =  dsc->rc_model_size - dsc->initial_offset +
+			dsc->initial_xmit_delay * bpp +
+			groups_per_line * dsc->first_line_bpg_offset;
+
+	hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, bpp);
+
+	dsc->initial_dec_delay = hrd_delay - dsc->initial_xmit_delay;
+
+	dsc->initial_scale_value = 8 * dsc->rc_model_size /
+			(dsc->rc_model_size - dsc->initial_offset);
+
+	slice_bits = 8 * dsc->chunk_size * dsc->slice_height;
+
+	groups_total = groups_per_line * dsc->slice_height;
+
+	data = dsc->first_line_bpg_offset * 2048;
+
+	dsc->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->slice_height - 1));
+
+	pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * bpc + 4) - 2);
+
+	num_extra_mux_bits = pre_num_extra_mux_bits - (mux_words_size -
+		((slice_bits - pre_num_extra_mux_bits) % mux_words_size));
+
+	data = 2048 * (dsc->rc_model_size - dsc->initial_offset
+		+ num_extra_mux_bits);
+	dsc->slice_bpg_offset = DIV_ROUND_UP(data, groups_total);
+
+	/* bpp * 16 + 0.5 */
+	data = bpp * 16;
+	data *= 2;
+	data++;
+	data /= 2;
+	target_bpp_x16 = data;
+
+	data = (dsc->initial_xmit_delay * target_bpp_x16) / 16;
+	final_value =  dsc->rc_model_size - data + num_extra_mux_bits;
+
+	final_scale = 8 * dsc->rc_model_size /
+		(dsc->rc_model_size - final_value);
+
+	dsc->final_offset = final_value;
+
+	data = (final_scale - 9) * (dsc->nfl_bpg_offset +
+		dsc->slice_bpg_offset);
+	dsc->scale_increment_interval = (2048 * dsc->final_offset) / data;
+
+	dsc->scale_decrement_interval = groups_per_line /
+		(dsc->initial_scale_value - 8);
+
+	pr_debug("initial_xmit_delay=%d\n", dsc->initial_xmit_delay);
+	pr_debug("bpg_offset, nfl=%d slice=%d\n",
+		dsc->nfl_bpg_offset, dsc->slice_bpg_offset);
+	pr_debug("groups_per_line=%d chunk_size=%d\n",
+		groups_per_line, dsc->chunk_size);
+	pr_debug("min_rate_buffer_size=%d hrd_delay=%d\n",
+		min_rate_buffer_size, hrd_delay);
+	pr_debug("initial_dec_delay=%d initial_scale_value=%d\n",
+		dsc->initial_dec_delay, dsc->initial_scale_value);
+	pr_debug("slice_bits=%d, groups_total=%d\n", slice_bits, groups_total);
+	pr_debug("first_line_bgp_offset=%d slice_height=%d\n",
+		dsc->first_line_bpg_offset, dsc->slice_height);
+	pr_debug("final_value=%d final_scale=%d\n", final_value, final_scale);
+	pr_debug("sacle_increment_interval=%d scale_decrement_interval=%d\n",
+		dsc->scale_increment_interval, dsc->scale_decrement_interval);
+}
+
+void mdss_panel_dsc_update_pic_dim(struct dsc_desc *dsc,
+	int pic_width, int pic_height)
+{
+	if (!dsc || !pic_width || !pic_height) {
+		pr_err("Error: invalid input. pic_width=%d pic_height=%d\n",
+			pic_width, pic_height);
+		return;
+	}
+
+	if ((pic_width % dsc->slice_width) ||
+	    (pic_height % dsc->slice_height)) {
+		pr_err("Error: pic_dim=%dx%d has to be multiple of slice_dim=%dx%d\n",
+			pic_width, pic_height,
+			dsc->slice_width, dsc->slice_height);
+		return;
+	}
+
+	dsc->pic_width = pic_width;
+	dsc->pic_height = pic_height;
+}
+
+void mdss_panel_dsc_initial_line_calc(struct dsc_desc *dsc, int enc_ip_width)
+{
+	int ssm_delay, total_pixels, soft_slice_per_enc;
+
+#define MAX_XMIT_DELAY 512
+	if (!dsc || !enc_ip_width || !dsc->slice_width ||
+	    (enc_ip_width < dsc->slice_width) ||
+	    (dsc->initial_xmit_delay > MAX_XMIT_DELAY)) {
+		pr_err("Error: invalid input\n");
+		return;
+	}
+#undef MAX_XMIT_DELAY
+
+	soft_slice_per_enc = enc_ip_width / dsc->slice_width;
+	ssm_delay = ((dsc->bpc < 10) ? 84 : 92);
+	total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
+	if (soft_slice_per_enc > 1)
+		total_pixels += (ssm_delay * 3);
+
+	dsc->initial_lines = DIV_ROUND_UP(total_pixels, dsc->slice_width);
+}
+
+void mdss_panel_dsc_pclk_param_calc(struct dsc_desc *dsc, int intf_width)
+{
+	int slice_per_pkt, slice_per_intf;
+	int bytes_in_slice, total_bytes_per_intf;
+
+	if (!dsc || !dsc->slice_width || !dsc->slice_per_pkt ||
+	    (intf_width < dsc->slice_width)) {
+		pr_err("Error: invalid input. intf_width=%d slice_width=%d\n",
+			intf_width,
+			dsc ? dsc->slice_width : -1);
+		return;
+	}
+
+	slice_per_pkt = dsc->slice_per_pkt;
+	slice_per_intf = DIV_ROUND_UP(intf_width, dsc->slice_width);
+
+	/*
+	 * If slice_per_pkt is greater than slice_per_intf then default to 1.
+	 * This can happen during partial update.
+	 */
+	if (slice_per_pkt > slice_per_intf)
+		slice_per_pkt = 1;
+
+	bytes_in_slice = DIV_ROUND_UP(dsc->slice_width * dsc->bpp, 8);
+	total_bytes_per_intf = bytes_in_slice * slice_per_intf;
+
+	dsc->eol_byte_num = total_bytes_per_intf % 3;
+	dsc->pclk_per_line =  DIV_ROUND_UP(total_bytes_per_intf, 3);
+	dsc->bytes_in_slice = bytes_in_slice;
+	dsc->bytes_per_pkt = bytes_in_slice * slice_per_pkt;
+	dsc->pkt_per_line = slice_per_intf / slice_per_pkt;
+
+	pr_debug("slice_per_pkt=%d slice_per_intf=%d bytes_in_slice=%d total_bytes_per_intf=%d\n",
+		slice_per_pkt, slice_per_intf,
+		bytes_in_slice, total_bytes_per_intf);
+}
+
+int mdss_panel_dsc_prepare_pps_buf(struct dsc_desc *dsc, char *buf,
+	int pps_id)
+{
+	char *bp;
+	char data;
+	int i, bpp;
+
+	bp = buf;
+	*bp++ = (dsc->version & 0xff);	/* pps0 */
+	*bp++ = (pps_id & 0xff);		/* pps1 */
+	bp++;					/* pps2, reserved */
+
+	data = dsc->line_buf_depth & 0x0f;
+	data |= ((dsc->bpc & 0xf) << 4);
+	*bp++ = data;				 /* pps3 */
+
+	bpp = dsc->bpp;
+	bpp <<= 4;	/* 4 fraction bits */
+	data = (bpp >> 8);
+	data &= 0x03;		/* upper two bits */
+	data |= ((dsc->block_pred_enable & 0x1) << 5);
+	data |= ((dsc->convert_rgb & 0x1) << 4);
+	data |= ((dsc->enable_422 & 0x1) << 3);
+	data |= ((dsc->vbr_enable & 0x1) << 2);
+	*bp++ = data;				/* pps4 */
+	*bp++ = (bpp & 0xff);			/* pps5 */
+
+	*bp++ = ((dsc->pic_height >> 8) & 0xff); /* pps6 */
+	*bp++ = (dsc->pic_height & 0x0ff);	/* pps7 */
+	*bp++ = ((dsc->pic_width >> 8) & 0xff);	/* pps8 */
+	*bp++ = (dsc->pic_width & 0x0ff);	/* pps9 */
+
+	*bp++ = ((dsc->slice_height >> 8) & 0xff);/* pps10 */
+	*bp++ = (dsc->slice_height & 0x0ff);	/* pps11 */
+	*bp++ = ((dsc->slice_width >> 8) & 0xff); /* pps12 */
+	*bp++ = (dsc->slice_width & 0x0ff);	/* pps13 */
+
+	*bp++ = ((dsc->chunk_size >> 8) & 0xff);/* pps14 */
+	*bp++ = (dsc->chunk_size & 0x0ff);	/* pps15 */
+
+	*bp++ = (dsc->initial_xmit_delay >> 8) & 0x3; /* pps16, bit 0, 1 */
+	*bp++ = (dsc->initial_xmit_delay & 0xff);/* pps17 */
+
+	*bp++ = ((dsc->initial_dec_delay >> 8) & 0xff);	/* pps18 */
+	*bp++ = (dsc->initial_dec_delay & 0xff);/* pps19 */
+
+	bp++;					/* pps20, reserved */
+
+	*bp++ = (dsc->initial_scale_value & 0x3f); /* pps21 */
+
+	*bp++ = ((dsc->scale_increment_interval >> 8) & 0xff); /* pps22 */
+	*bp++ = (dsc->scale_increment_interval & 0xff);	/* pps23 */
+
+	*bp++ = ((dsc->scale_decrement_interval >> 8) & 0xf); /* pps24 */
+	*bp++ = (dsc->scale_decrement_interval & 0x0ff);/* pps25 */
+
+	bp++;			/* pps26, reserved */
+
+	*bp++ = (dsc->first_line_bpg_offset & 0x1f);/* pps27 */
+
+	*bp++ = ((dsc->nfl_bpg_offset >> 8) & 0xff);/* pps28 */
+	*bp++ = (dsc->nfl_bpg_offset & 0x0ff);	/* pps29 */
+	*bp++ = ((dsc->slice_bpg_offset >> 8) & 0xff);/* pps30 */
+	*bp++ = (dsc->slice_bpg_offset & 0x0ff);/* pps31 */
+
+	*bp++ = ((dsc->initial_offset >> 8) & 0xff);/* pps32 */
+	*bp++ = (dsc->initial_offset & 0x0ff);	/* pps33 */
+
+	*bp++ = ((dsc->final_offset >> 8) & 0xff);/* pps34 */
+	*bp++ = (dsc->final_offset & 0x0ff);	/* pps35 */
+
+	*bp++ = (dsc->min_qp_flatness & 0x1f);	/* pps36 */
+	*bp++ = (dsc->max_qp_flatness & 0x1f);	/* pps37 */
+
+	*bp++ = ((dsc->rc_model_size >> 8) & 0xff);/* pps38 */
+	*bp++ = (dsc->rc_model_size & 0x0ff);	/* pps39 */
+
+	*bp++ = (dsc->edge_factor & 0x0f);	/* pps40 */
+
+	*bp++ = (dsc->quant_incr_limit0 & 0x1f);	/* pps41 */
+	*bp++ = (dsc->quant_incr_limit1 & 0x1f);	/* pps42 */
+
+	data = ((dsc->tgt_offset_hi & 0xf) << 4);
+	data |= (dsc->tgt_offset_lo & 0x0f);
+	*bp++ = data;				/* pps43 */
+
+	for (i = 0; i < 14; i++)
+		*bp++ = (dsc->buf_thresh[i] & 0xff);/* pps44 - pps57 */
+
+	for (i = 0; i < 15; i++) {		/* pps58 - pps87 */
+		data = (dsc->range_min_qp[i] & 0x1f); /* 5 bits */
+		data <<= 3;
+		data |= ((dsc->range_max_qp[i] >> 2) & 0x07); /* 3 bits */
+		*bp++ = data;
+		data = (dsc->range_max_qp[i] & 0x03); /* 2 bits */
+		data <<= 6;
+		data |= (dsc->range_bpg_offset[i] & 0x3f); /* 6 bits */
+		*bp++ = data;
+	}
+
+	/* pps88 to pps127 are reserved */
+
+	return DSC_PPS_LEN;	/* 128 */
+}
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
new file mode 100644
index 0000000..53db752
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -0,0 +1,1207 @@
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_PANEL_H
+#define MDSS_PANEL_H
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+
+#define KHZ_TO_HZ 1000
+
+/* panel id type */
+struct panel_id {
+	u16 id;
+	u16 type;
+};
+
+enum fps_resolution {
+	FPS_RESOLUTION_DEFAULT,
+	FPS_RESOLUTION_HZ,
+	FPS_RESOLUTION_KHZ,
+};
+
+#define DEFAULT_FRAME_RATE	60
+#define DEFAULT_ROTATOR_FRAME_RATE 120
+#define ROTATOR_LOW_FRAME_RATE 30
+#define MDSS_DSI_RST_SEQ_LEN	10
+/* worst case prefill lines for all chipsets including all vertical blank */
+#define MDSS_MDP_MAX_PREFILL_FETCH 25
+
+#define OVERRIDE_CFG	"override"
+#define SIM_PANEL	"sim"
+#define SIM_SW_TE_PANEL	"sim-swte"
+#define SIM_HW_TE_PANEL	"sim-hwte"
+
+/* panel type list */
+#define NO_PANEL		0xffff	/* No Panel */
+#define MDDI_PANEL		1	/* MDDI */
+#define EBI2_PANEL		2	/* EBI2 */
+#define LCDC_PANEL		3	/* internal LCDC type */
+#define EXT_MDDI_PANEL		4	/* Ext.MDDI */
+#define TV_PANEL		5	/* TV */
+#define HDMI_PANEL		6	/* HDMI TV */
+#define DTV_PANEL		7	/* DTV */
+#define MIPI_VIDEO_PANEL	8	/* MIPI */
+#define MIPI_CMD_PANEL		9	/* MIPI */
+#define WRITEBACK_PANEL		10	/* Wifi display */
+#define LVDS_PANEL		11	/* LVDS */
+#define EDP_PANEL		12	/* LVDS */
+
+#define DSC_PPS_LEN		128
+
+/* HDR propeties count */
+#define DISPLAY_PRIMARIES_COUNT	8	/* WRGB x and y values*/
+
+static inline const char *mdss_panel2str(u32 panel)
+{
+	static const char * const names[] = {
+#define PANEL_NAME(n)[n ## _PANEL] = __stringify(n)
+		PANEL_NAME(MIPI_VIDEO),
+		PANEL_NAME(MIPI_CMD),
+		PANEL_NAME(EDP),
+		PANEL_NAME(HDMI),
+		PANEL_NAME(DTV),
+		PANEL_NAME(WRITEBACK),
+#undef PANEL_NAME
+	};
+
+	if (panel >= ARRAY_SIZE(names) || !names[panel])
+		return "UNKNOWN";
+
+	return names[panel];
+}
+
+/* panel class */
+enum {
+	DISPLAY_LCD = 0,	/* lcd = ebi2/mddi */
+	DISPLAY_LCDC,		/* lcdc */
+	DISPLAY_TV,		/* TV Out */
+	DISPLAY_EXT_MDDI,	/* External MDDI */
+	DISPLAY_WRITEBACK,
+};
+
+/* panel device locaiton */
+enum {
+	DISPLAY_1 = 0,		/* attached as first device */
+	DISPLAY_2,		/* attached on second device */
+	DISPLAY_3,		/* attached on third device */
+	DISPLAY_4,		/* attached on fourth device */
+	MAX_PHYS_TARGET_NUM,
+};
+
+enum {
+	MDSS_PANEL_INTF_INVALID = -1,
+	MDSS_PANEL_INTF_DSI,
+	MDSS_PANEL_INTF_EDP,
+	MDSS_PANEL_INTF_HDMI,
+};
+
+enum {
+	MDSS_PANEL_POWER_OFF = 0,
+	MDSS_PANEL_POWER_ON,
+	MDSS_PANEL_POWER_LP1,
+	MDSS_PANEL_POWER_LP2,
+};
+
+enum {
+	MDSS_PANEL_LOW_PERSIST_MODE_OFF = 0,
+	MDSS_PANEL_LOW_PERSIST_MODE_ON,
+};
+
+enum {
+	MODE_GPIO_NOT_VALID = 0,
+	MODE_GPIO_HIGH,
+	MODE_GPIO_LOW,
+};
+
+/*
+ * enum sim_panel_modes - Different panel modes for simulator panels
+ *
+ * @SIM_MODE:		Disables all host reads for video mode simulator panels.
+ * @SIM_SW_TE_MODE:	Disables all host reads and genereates the SW TE. Used
+ *                      for cmd mode simulator panels.
+ * @SIM_HW_TE_MODE:	Disables all host reads and expects TE from hardware
+ *                      (terminator card). Used for cmd mode simulator panels.
+ */
+enum {
+	SIM_MODE = 1,
+	SIM_SW_TE_MODE,
+	SIM_HW_TE_MODE,
+};
+
+struct mdss_rect {
+	u16 x;
+	u16 y;
+	u16 w;
+	u16 h;
+};
+
+#define MDSS_MAX_PANEL_LEN      256
+#define MDSS_INTF_MAX_NAME_LEN 5
+#define MDSS_DISPLAY_ID_MAX_LEN 16
+struct mdss_panel_intf {
+	char name[MDSS_INTF_MAX_NAME_LEN];
+	int  type;
+};
+
+struct mdss_panel_cfg {
+	char arg_cfg[MDSS_MAX_PANEL_LEN + 1];
+	int  pan_intf;
+	bool lk_cfg;
+	bool init_done;
+};
+
+#define MDP_INTF_DSI_CMD_FIFO_UNDERFLOW		0x0001
+#define MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW	0x0002
+
+
+enum {
+	MDP_INTF_CALLBACK_DSI_WAIT,
+};
+
+struct mdss_intf_recovery {
+	int (*fxn)(void *ctx, int event);
+	void *data;
+};
+
+/**
+ * enum mdss_intf_events - Different events generated by MDP core
+ *
+ * @MDSS_EVENT_RESET:		MDP control path is being (re)initialized.
+ * @MDSS_EVENT_LINK_READY	Interface data path inited to ready state.
+ * @MDSS_EVENT_UNBLANK:		Sent before first frame update from MDP is
+ *				sent to panel.
+ * @MDSS_EVENT_PANEL_ON:	After first frame update from MDP.
+ * @MDSS_EVENT_POST_PANEL_ON	send 2nd phase panel on commands to panel
+ * @MDSS_EVENT_BLANK:		MDP has no contents to display only blank screen
+ *				is shown in panel. Sent before panel off.
+ * @MDSS_EVENT_PANEL_OFF:	MDP has suspended frame updates, panel should be
+ *				completely shutdown after this call.
+ * @MDSS_EVENT_CLOSE:		MDP has tore down entire session.
+ * @MDSS_EVENT_SUSPEND:		Propagation of power suspend event.
+ * @MDSS_EVENT_RESUME:		Propagation of power resume event.
+ * @MDSS_EVENT_CHECK_PARAMS:	Event generated when a panel reconfiguration is
+ *				requested including when resolution changes.
+ *				The event handler receives pointer to
+ *				struct mdss_panel_info and should return one of:
+ *				 - negative if the configuration is invalid
+ *				 - 0 if there is no panel reconfig needed
+ *				 - 1 if reconfig is needed to take effect
+ * @MDSS_EVENT_CONT_SPLASH_BEGIN: Special event used to handle transition of
+ *				display state from boot loader to panel driver.
+ *				The event handler will disable the panel.
+ * @MDSS_EVENT_CONT_SPLASH_FINISH: Special event used to handle transition of
+ *				display state from boot loader to panel driver.
+ *				The event handler will enable the panel and
+ *				vote for the display clocks.
+ * @MDSS_EVENT_PANEL_UPDATE_FPS: Event to update the frame rate of the panel.
+ * @MDSS_EVENT_FB_REGISTERED:	Called after fb dev driver has been registered,
+ *				panel driver gets ptr to struct fb_info which
+ *				holds fb dev information.
+ * @MDSS_EVENT_PANEL_CLK_CTRL:	panel clock control
+ *				- 0 clock disable
+ *				- 1 clock enable
+ * @MDSS_EVENT_DSI_CMDLIST_KOFF: acquire dsi_mdp_busy lock before kickoff.
+ * @MDSS_EVENT_ENABLE_PARTIAL_ROI: Event to update ROI of the panel.
+ * @MDSS_EVENT_DSC_PPS_SEND: Event to send DSC PPS command to panel.
+ * @MDSS_EVENT_DSI_STREAM_SIZE: Event to update DSI controller's stream size
+ * @MDSS_EVENT_DSI_UPDATE_PANEL_DATA: Event to update the dsi driver structures
+ *				based on the dsi mode passed as argument.
+ *				- 0: update to video mode
+ *				- 1: update to command mode
+ * @MDSS_EVENT_REGISTER_RECOVERY_HANDLER: Event to recover the interface in
+ *					case there was any errors detected.
+ * @MDSS_EVENT_REGISTER_MDP_CALLBACK: Event to register callback to MDP driver.
+ * @MDSS_EVENT_DSI_PANEL_STATUS: Event to check the panel status
+ *				<= 0: panel check fail
+ *				>  0: panel check success
+ * @MDSS_EVENT_DSI_DYNAMIC_SWITCH: Send DCS command to panel to initiate
+ *				switching panel to new mode
+ *				- MIPI_VIDEO_PANEL: switch to video mode
+ *				- MIPI_CMD_PANEL: switch to command mode
+ * @MDSS_EVENT_DSI_RECONFIG_CMD: Setup DSI controller in new mode
+ *				- MIPI_VIDEO_PANEL: switch to video mode
+ *				- MIPI_CMD_PANEL: switch to command mode
+ * @MDSS_EVENT_DSI_RESET_WRITE_PTR: Reset the write pointer coordinates on
+ *				the panel.
+ * @MDSS_EVENT_PANEL_TIMING_SWITCH: Panel timing switch is requested.
+ *				Argument provided is new panel timing.
+ */
+enum mdss_intf_events {
+	MDSS_EVENT_RESET = 1,
+	MDSS_EVENT_LINK_READY,
+	MDSS_EVENT_UNBLANK,
+	MDSS_EVENT_PANEL_ON,
+	MDSS_EVENT_POST_PANEL_ON,
+	MDSS_EVENT_BLANK,
+	MDSS_EVENT_PANEL_OFF,
+	MDSS_EVENT_CLOSE,
+	MDSS_EVENT_SUSPEND,
+	MDSS_EVENT_RESUME,
+	MDSS_EVENT_CHECK_PARAMS,
+	MDSS_EVENT_CONT_SPLASH_BEGIN,
+	MDSS_EVENT_CONT_SPLASH_FINISH,
+	MDSS_EVENT_PANEL_UPDATE_FPS,
+	MDSS_EVENT_FB_REGISTERED,
+	MDSS_EVENT_PANEL_CLK_CTRL,
+	MDSS_EVENT_DSI_CMDLIST_KOFF,
+	MDSS_EVENT_ENABLE_PARTIAL_ROI,
+	MDSS_EVENT_DSC_PPS_SEND,
+	MDSS_EVENT_DSI_STREAM_SIZE,
+	MDSS_EVENT_DSI_UPDATE_PANEL_DATA,
+	MDSS_EVENT_REGISTER_RECOVERY_HANDLER,
+	MDSS_EVENT_REGISTER_MDP_CALLBACK,
+	MDSS_EVENT_DSI_PANEL_STATUS,
+	MDSS_EVENT_DSI_DYNAMIC_SWITCH,
+	MDSS_EVENT_DSI_RECONFIG_CMD,
+	MDSS_EVENT_DSI_RESET_WRITE_PTR,
+	MDSS_EVENT_PANEL_TIMING_SWITCH,
+	MDSS_EVENT_UPDATE_PARAMS,
+	MDSS_EVENT_MAX,
+};
+
+struct lcd_panel_info {
+	u32 h_back_porch;
+	u32 h_front_porch;
+	u32 h_pulse_width;
+	u32 v_back_porch;
+	u32 v_front_porch;
+	u32 v_pulse_width;
+	u32 border_clr;
+	u32 underflow_clr;
+	u32 hsync_skew;
+	u32 border_top;
+	u32 border_bottom;
+	u32 border_left;
+	u32 border_right;
+	/* Pad width */
+	u32 xres_pad;
+	/* Pad height */
+	u32 yres_pad;
+	u32 frame_rate;
+};
+
+
+/* DSI PHY configuration */
+struct mdss_dsi_phy_ctrl {
+	char regulator[7];	/* 8996, 1 * 5 */
+	char timing[12];
+	char ctrl[4];
+	char strength[10];	/* 8996, 2 * 5 */
+	char bistctrl[6];
+	uint32_t pll[21];
+	char lanecfg[45];	/* 8996, 4 * 5 */
+	bool reg_ldo_mode;
+
+	char timing_8996[40];/* 8996, 8 * 5 */
+	char regulator_len;
+	char strength_len;
+	char lanecfg_len;
+};
+
+/**
+ * enum dynamic_mode_switch - Dynamic mode switch methods
+ * @DYNAMIC_MODE_SWITCH_DISABLED: Dynamic mode switch is not supported
+ * @DYNAMIC_MODE_SWITCH_SUSPEND_RESUME: Switch requires panel suspend/resume
+ * @DYNAMIC_MODE_SWITCH_IMMEDIATE: Supports video/cmd mode switch immediately
+ * @DYNAMIC_MODE_RESOLUTION_SWITCH_IMMEDIATE: Panel supports display resolution
+ * switch immediately.
+ **/
+enum dynamic_mode_switch {
+	DYNAMIC_MODE_SWITCH_DISABLED = 0,
+	DYNAMIC_MODE_SWITCH_SUSPEND_RESUME,
+	DYNAMIC_MODE_SWITCH_IMMEDIATE,
+	DYNAMIC_MODE_RESOLUTION_SWITCH_IMMEDIATE,
+};
+
+/**
+ * enum dynamic_switch_modes - Type of dynamic mode switch to be given as
+ * argument to MDSS_EVENT_DSI_DYNAMIC_SWITCH event
+ * @SWITCH_TO_CMD_MODE: Switch from DSI video mode to command mode
+ * @SWITCH_TO_VIDEO_MODE: Switch from DSI command mode to video mode
+ * @SWITCH_RESOLUTION: Switch only display resolution
+ **/
+enum dynamic_switch_modes {
+	SWITCH_MODE_UNKNOWN = 0,
+	SWITCH_TO_CMD_MODE,
+	SWITCH_TO_VIDEO_MODE,
+	SWITCH_RESOLUTION,
+};
+
+/**
+ * struct mdss_panel_timing - structure for panel timing information
+ * @list: List head ptr to track within panel data timings list
+ * @name: A unique name of this timing that can be used to identify it
+ * @xres: Panel width
+ * @yres: Panel height
+ * @h_back_porch: Horizontal back porch
+ * @h_front_porch: Horizontal front porch
+ * @h_pulse_width: Horizontal pulse width
+ * @hsync_skew: Horizontal sync skew
+ * @v_back_porch: Vertical back porch
+ * @v_front_porch: Vertical front porch
+ * @v_pulse_width: Vertical pulse width
+ * @border_top: Border color on top
+ * @border_bottom: Border color on bottom
+ * @border_left: Border color on left
+ * @border_right: Border color on right
+ * @clk_rate: Pixel clock rate of this panel timing
+ * @frame_rate: Display refresh rate
+ * @fbc: Framebuffer compression parameters for this display timing
+ * @te: Tearcheck parameters for this display timing
+ **/
+struct mipi_panel_info {
+	char boot_mode;	/* identify if mode switched from starting mode */
+	char mode;		/* video/cmd */
+	char interleave_mode;
+	char crc_check;
+	char ecc_check;
+	char dst_format;	/* shared by video and command */
+	char data_lane0;
+	char data_lane1;
+	char data_lane2;
+	char data_lane3;
+	char rgb_swap;
+	char b_sel;
+	char g_sel;
+	char r_sel;
+	char rx_eot_ignore;
+	char tx_eot_append;
+	char t_clk_post; /* 0xc0, DSI_CLKOUT_TIMING_CTRL */
+	char t_clk_pre;  /* 0xc0, DSI_CLKOUT_TIMING_CTRL */
+	char vc;	/* virtual channel */
+	struct mdss_dsi_phy_ctrl dsi_phy_db;
+	/* video mode */
+	char pulse_mode_hsa_he;
+	char hfp_power_stop;
+	char hbp_power_stop;
+	char hsa_power_stop;
+	char eof_bllp_power_stop;
+	char last_line_interleave_en;
+	char bllp_power_stop;
+	char traffic_mode;
+	char frame_rate;
+	/* command mode */
+	char frame_rate_idle;
+	char interleave_max;
+	char insert_dcs_cmd;
+	char wr_mem_continue;
+	char wr_mem_start;
+	char te_sel;
+	char stream;	/* 0 or 1 */
+	char mdp_trigger;
+	char dma_trigger;
+	/* Dynamic Switch Support */
+	enum dynamic_mode_switch dms_mode;
+
+	u32 pixel_packing;
+	u32 dsi_pclk_rate;
+	/* The packet-size should not bet changed */
+	char no_max_pkt_size;
+	/* Clock required during LP commands */
+	bool force_clk_lane_hs;
+
+	char vsync_enable;
+	char hw_vsync_mode;
+
+	char lp11_init;
+	u32  init_delay;
+	u32  post_init_delay;
+	u8 default_lanes;
+};
+
+struct edp_panel_info {
+	char frame_rate;	/* fps */
+};
+
+/**
+ * struct dynamic_fps_data - defines dynamic fps related data
+ * @hfp: horizontal front porch
+ * @hbp: horizontal back porch
+ * @hpw: horizontal pulse width
+ * @clk_rate: panel clock rate in HZ
+ * @fps: frames per second
+ */
+struct dynamic_fps_data {
+	u32 hfp;
+	u32 hbp;
+	u32 hpw;
+	u32 clk_rate;
+	u32 fps;
+};
+
+/**
+ * enum dynamic_fps_update - defines fps update modes
+ * @DFPS_SUSPEND_RESUME_MODE: suspend/resume mode
+ * @DFPS_IMMEDIATE_CLK_UPDATE_MODE: update fps using clock
+ * @DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP: update fps using vertical timings
+ * @DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP: update fps using horizontal timings
+ * @DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP: update fps using both horizontal
+ *  timings and clock.
+ * @DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK: update fps using both
+ *  horizontal timings, clock need to be caculate base on new clock and
+ *  porches.
+ * @DFPS_MODE_MAX: defines maximum limit of supported modes.
+ */
+enum dynamic_fps_update {
+	DFPS_SUSPEND_RESUME_MODE,
+	DFPS_IMMEDIATE_CLK_UPDATE_MODE,
+	DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP,
+	DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP,
+	DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP,
+	DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK,
+	DFPS_MODE_MAX
+};
+
+enum lvds_mode {
+	LVDS_SINGLE_CHANNEL_MODE,
+	LVDS_DUAL_CHANNEL_MODE,
+};
+
+struct lvds_panel_info {
+	enum lvds_mode channel_mode;
+	/* Channel swap in dual mode */
+	char channel_swap;
+};
+
+enum {
+	COMPRESSION_NONE,
+	COMPRESSION_DSC,
+	COMPRESSION_FBC
+};
+
+struct dsc_desc {
+	u8 version; /* top 4 bits major and lower 4 bits minor version */
+	u8 scr_rev; /* 8 bit value for dsc scr revision */
+
+	/*
+	 * Following parameters can change per frame if partial update is on
+	 */
+	int pic_height;
+	int pic_width;
+	int initial_lines;
+
+	/*
+	 * Following parameters are used for DSI and not for MDP. They can
+	 * change per frame if partial update is enabled.
+	 */
+	int pkt_per_line;
+	int bytes_in_slice;
+	int bytes_per_pkt;
+	int eol_byte_num;
+	int pclk_per_line;	/* width */
+
+	/*
+	 * Following parameters only changes when slice dimensions are changed.
+	 */
+	int full_frame_slices; /* denotes number of slice per intf */
+	int slice_height;
+	int slice_width;
+	int chunk_size;
+
+	int slice_last_group_size;
+	int bpp;	/* target bits per pixel */
+	int bpc;	/* uncompressed bits per component */
+	int line_buf_depth;
+	bool config_by_manufacture_cmd;
+	bool block_pred_enable;
+	int vbr_enable;
+	int enable_422;
+	int convert_rgb;
+	int input_10_bits;
+	int slice_per_pkt;
+
+	int initial_dec_delay;
+	int initial_xmit_delay;
+
+	int initial_scale_value;
+	int scale_decrement_interval;
+	int scale_increment_interval;
+
+	int first_line_bpg_offset;
+	int nfl_bpg_offset;
+	int slice_bpg_offset;
+
+	int initial_offset;
+	int final_offset;
+
+	int rc_model_size;	/* rate_buffer_size */
+
+	int det_thresh_flatness;
+	int max_qp_flatness;
+	int min_qp_flatness;
+	int edge_factor;
+	int quant_incr_limit0;
+	int quant_incr_limit1;
+	int tgt_offset_hi;
+	int tgt_offset_lo;
+	u32 *buf_thresh;
+	char *range_min_qp;
+	char *range_max_qp;
+	char *range_bpg_offset;
+};
+
+struct fbc_panel_info {
+	u32 enabled;
+	u32 target_bpp;
+	u32 comp_mode;
+	u32 qerr_enable;
+	u32 cd_bias;
+	u32 pat_enable;
+	u32 vlc_enable;
+	u32 bflc_enable;
+
+	u32 line_x_budget;
+	u32 block_x_budget;
+	u32 block_budget;
+
+	u32 lossless_mode_thd;
+	u32 lossy_mode_thd;
+	u32 lossy_rgb_thd;
+	u32 lossy_mode_idx;
+
+	u32 slice_height;
+	bool pred_mode;
+	bool enc_mode;
+	u32 max_pred_err;
+};
+
+struct mdss_mdp_pp_tear_check {
+	u32 tear_check_en;
+	u32 sync_cfg_height;
+	u32 vsync_init_val;
+	u32 sync_threshold_start;
+	u32 sync_threshold_continue;
+	u32 start_pos;
+	u32 rd_ptr_irq;
+	u32 wr_ptr_irq;
+	u32 refx100;
+};
+
+struct mdss_panel_roi_alignment {
+	u32 xstart_pix_align;
+	u32 width_pix_align;
+	u32 ystart_pix_align;
+	u32 height_pix_align;
+	u32 min_width;
+	u32 min_height;
+};
+
+struct mdss_panel_hdr_properties {
+	bool hdr_enabled;
+
+	/* WRGB X and y values arrayed in format */
+	/* [WX, WY, RX, RY, GX, GY, BX, BY] */
+	u32 display_primaries[DISPLAY_PRIMARIES_COUNT];
+
+	/* peak brightness supported by panel */
+	u32 peak_brightness;
+	/* Blackness level supported by panel */
+	u32 blackness_level;
+};
+
+struct mdss_panel_info {
+	u32 xres;
+	u32 yres;
+	u32 physical_width;
+	u32 physical_height;
+	u32 bpp;
+	u32 type;
+	u32 wait_cycle;
+	u32 pdest;
+	u32 brightness_max;
+	u32 bl_max;
+	u32 bl_min;
+	u32 fb_num;
+	u64 clk_rate;
+	u32 clk_min;
+	u64 clk_max;
+	u32 mdp_transfer_time_us;
+	u32 frame_count;
+	u32 is_3d_panel;
+	u32 out_format;
+	u32 rst_seq[MDSS_DSI_RST_SEQ_LEN];
+	u32 rst_seq_len;
+	u32 vic; /* video identification code */
+	struct mdss_rect roi;
+	int pwm_pmic_gpio;
+	int pwm_lpg_chan;
+	int pwm_period;
+	bool dynamic_fps;
+	bool ulps_feature_enabled;
+	bool ulps_suspend_enabled;
+	bool panel_ack_disabled;
+	bool esd_check_enabled;
+	bool allow_phy_power_off;
+	char dfps_update;
+	/* new requested fps before it is updated in hw */
+	int new_fps;
+	/* stores initial fps after boot */
+	u32 default_fps;
+	/* stores initial vtotal (vfp-method) or htotal (hfp-method) */
+	u32 saved_total;
+	/* stores initial vfp (vfp-method) or hfp (hfp-method) */
+	u32 saved_fporch;
+	/* current fps, once is programmed in hw */
+	int current_fps;
+
+	int panel_max_fps;
+	int panel_max_vtotal;
+	u32 mode_gpio_state;
+	u32 min_fps;
+	u32 max_fps;
+	u32 prg_fet;
+	struct mdss_panel_roi_alignment roi_alignment;
+
+	u32 cont_splash_enabled;
+	bool esd_rdy;
+	bool partial_update_supported; /* value from dts if pu is supported */
+	bool partial_update_enabled; /* is pu currently allowed */
+	u32 dcs_cmd_by_left;
+	u32 partial_update_roi_merge;
+	struct ion_handle *splash_ihdl;
+	int panel_power_state;
+	int compression_mode;
+
+	uint32_t panel_dead;
+	u32 panel_force_dead;
+	u32 panel_orientation;
+	bool dynamic_switch_pending;
+	bool is_lpm_mode;
+	bool is_split_display; /* two DSIs in one display, pp split or not */
+	bool use_pingpong_split;
+
+	/*
+	 * index[0] = left layer mixer, value of 0 not valid
+	 * index[1] = right layer mixer, 0 is possible
+	 *
+	 * Ex(1): 1080x1920 display using single DSI and single lm, [1080 0]
+	 * Ex(2): 1440x2560 display using two DSIs and two lms,
+	 *        each with 720x2560, [720 0]
+	 * Ex(3): 1440x2560 display using single DSI w/ compression and
+	 *        single lm, [1440 0]
+	 * Ex(4): 1440x2560 display using single DSI w/ compression and
+	 *        two lms, [720 720]
+	 * Ex(5): 1080x1920 display using single DSI and two lm, [540 540]
+	 * Ex(6): 1080x1920 display using single DSI and two lm,
+	 *        [880 400] - not practical but possible
+	 */
+	u32 lm_widths[2];
+
+	bool is_prim_panel;
+	bool is_pluggable;
+	char display_id[MDSS_DISPLAY_ID_MAX_LEN];
+	bool is_cec_supported;
+
+	/* refer sim_panel_modes enum for different modes */
+	u8 sim_panel_mode;
+
+	void *edid_data;
+	void *dba_data;
+	void *cec_data;
+
+	char panel_name[MDSS_MAX_PANEL_LEN];
+	struct mdss_mdp_pp_tear_check te;
+
+	/*
+	 * Value of 2 only when single DSI is configured with 2 DSC
+	 * encoders. When 2 encoders are used, currently both use
+	 * same configuration.
+	 */
+	u8 dsc_enc_total; /* max 2 */
+	struct dsc_desc dsc;
+
+	/*
+	 * To determine, if DSC panel requires the pps to be sent
+	 * before or after the switch, during dynamic resolution switching
+	 */
+	bool send_pps_before_switch;
+
+	struct lcd_panel_info lcdc;
+	struct fbc_panel_info fbc;
+	struct mipi_panel_info mipi;
+	struct lvds_panel_info lvds;
+	struct edp_panel_info edp;
+
+	bool is_dba_panel;
+
+	/*
+	 * Delay(in ms) to accommodate s/w delay while
+	 * configuring the event timer wakeup logic.
+	 */
+	u32 adjust_timer_delay_ms;
+
+	/* debugfs structure for the panel */
+	struct mdss_panel_debugfs_info *debugfs_info;
+
+	/* persistence mode on/off */
+	bool persist_mode;
+
+	/* HDR properties of display panel*/
+	struct mdss_panel_hdr_properties hdr_properties;
+};
+
+struct mdss_panel_timing {
+	struct list_head list;
+	const char *name;
+
+	u32 xres;
+	u32 yres;
+
+	u32 h_back_porch;
+	u32 h_front_porch;
+	u32 h_pulse_width;
+	u32 hsync_skew;
+	u32 v_back_porch;
+	u32 v_front_porch;
+	u32 v_pulse_width;
+
+	u32 border_top;
+	u32 border_bottom;
+	u32 border_left;
+	u32 border_right;
+
+	u32 lm_widths[2];
+
+	u64 clk_rate;
+	char frame_rate;
+
+	u8 dsc_enc_total;
+	struct dsc_desc dsc;
+	struct fbc_panel_info fbc;
+	u32 compression_mode;
+
+	struct mdss_mdp_pp_tear_check te;
+	struct mdss_panel_roi_alignment roi_alignment;
+};
+
+struct mdss_panel_data {
+	struct mdss_panel_info panel_info;
+	void (*set_backlight)(struct mdss_panel_data *pdata, u32 bl_level);
+	int (*apply_display_setting)(struct mdss_panel_data *pdata, u32 mode);
+	unsigned char *mmss_cc_base;
+
+	/**
+	 * event_handler() - callback handler for MDP core events
+	 * @pdata:	Pointer referring to the panel struct associated to this
+	 *		event. Can be used to retrieve panel info.
+	 * @e:		Event being generated, see enum mdss_intf_events
+	 * @arg:	Optional argument to pass some info from some events.
+	 *
+	 * Used to register handler to be used to propagate different events
+	 * happening in MDP core driver. Panel driver can listen for any of
+	 * these events to perform appropriate actions for panel initialization
+	 * and teardown.
+	 */
+	int (*event_handler)(struct mdss_panel_data *pdata, int e, void *arg);
+	struct device_node *(*get_fb_node)(struct platform_device *pdev);
+	bool (*get_idle)(struct mdss_panel_data *pdata);
+
+	struct list_head timings_list;
+	struct mdss_panel_timing *current_timing;
+	bool active;
+
+	/* To store dsc cfg name passed by bootloader */
+	char dsc_cfg_np_name[MDSS_MAX_PANEL_LEN];
+	struct mdss_panel_data *next;
+
+	int panel_te_gpio;
+	struct completion te_done;
+};
+
+struct mdss_panel_debugfs_info {
+	struct dentry *root;
+	struct dentry *parent;
+	struct mdss_panel_info panel_info;
+	u32 override_flag;
+	struct mdss_panel_debugfs_info *next;
+};
+
+/**
+ * mdss_get_panel_framerate() - get panel frame rate based on panel information
+ * @panel_info:	Pointer to panel info containing all panel information
+ */
+static inline u32 mdss_panel_get_framerate(struct mdss_panel_info *panel_info,
+					   u32 flags)
+{
+	u32 frame_rate, pixel_total;
+	u64 rate;
+	struct mdss_panel_data *panel_data =
+			container_of(panel_info, typeof(*panel_data),
+					panel_info);
+	bool idle = false;
+
+	if (panel_info == NULL) {
+		frame_rate = DEFAULT_FRAME_RATE;
+		goto end;
+	}
+
+	switch (panel_info->type) {
+	case MIPI_VIDEO_PANEL:
+	case MIPI_CMD_PANEL:
+		frame_rate = panel_info->mipi.frame_rate;
+		if (panel_data->get_idle)
+			idle = panel_data->get_idle(panel_data);
+		if (idle)
+			frame_rate = panel_info->mipi.frame_rate_idle;
+		else
+			frame_rate = panel_info->mipi.frame_rate;
+		break;
+	case EDP_PANEL:
+		frame_rate = panel_info->edp.frame_rate;
+		break;
+	case WRITEBACK_PANEL:
+		frame_rate = DEFAULT_FRAME_RATE;
+		break;
+	case DTV_PANEL:
+		if (panel_info->dynamic_fps) {
+			frame_rate = panel_info->lcdc.frame_rate;
+			break;
+		}
+	default:
+		pixel_total = (panel_info->lcdc.h_back_porch +
+			  panel_info->lcdc.h_front_porch +
+			  panel_info->lcdc.h_pulse_width +
+			  panel_info->xres) *
+			 (panel_info->lcdc.v_back_porch +
+			  panel_info->lcdc.v_front_porch +
+			  panel_info->lcdc.v_pulse_width +
+			  panel_info->yres);
+		if (pixel_total) {
+			rate = panel_info->clk_rate * KHZ_TO_HZ;
+			do_div(rate, pixel_total);
+			frame_rate = (u32)rate;
+		} else {
+			frame_rate = DEFAULT_FRAME_RATE;
+		}
+		break;
+	}
+end:
+	if (flags == FPS_RESOLUTION_KHZ) {
+		if (!(frame_rate / KHZ_TO_HZ))
+			frame_rate *= KHZ_TO_HZ;
+	} else if (flags == FPS_RESOLUTION_HZ) {
+		if (frame_rate / KHZ_TO_HZ)
+			frame_rate /= KHZ_TO_HZ;
+	}
+
+	return frame_rate;
+}
+
+/*
+ * mdss_panel_get_vtotal() - return panel vertical height
+ * @pinfo:	Pointer to panel info containing all panel information
+ *
+ * Returns the total height of the panel including any blanking regions
+ * which are not visible to user but used to calculate panel pixel clock.
+ */
+static inline int mdss_panel_get_vtotal(struct mdss_panel_info *pinfo)
+{
+	return pinfo->yres + pinfo->lcdc.v_back_porch +
+			pinfo->lcdc.v_front_porch +
+			pinfo->lcdc.v_pulse_width+
+			pinfo->lcdc.border_top +
+			pinfo->lcdc.border_bottom;
+}
+
+/*
+ * mdss_panel_get_htotal() - return panel horizontal width
+ * @pinfo:	Pointer to panel info containing all panel information
+ * @compression: true to factor fbc settings, false to ignore.
+ *
+ * Returns the total width of the panel including any blanking regions
+ * which are not visible to user but used for calculations. For certain
+ * usescases where the fbc parameters need to be ignored like bandwidth
+ * calculation, the appropriate flag can be passed.
+ */
+static inline int mdss_panel_get_htotal(struct mdss_panel_info *pinfo, bool
+		compression)
+{
+	struct dsc_desc *dsc = NULL;
+
+	int adj_xres = pinfo->xres + pinfo->lcdc.border_left +
+				pinfo->lcdc.border_right;
+
+	if (compression) {
+		if (pinfo->compression_mode == COMPRESSION_DSC) {
+			dsc = &pinfo->dsc;
+			adj_xres = dsc->pclk_per_line;
+		} else if (pinfo->fbc.enabled) {
+			adj_xres = mult_frac(adj_xres,
+				pinfo->fbc.target_bpp, pinfo->bpp);
+		}
+	}
+
+	return adj_xres + pinfo->lcdc.h_back_porch +
+		pinfo->lcdc.h_front_porch +
+		pinfo->lcdc.h_pulse_width;
+}
+
+static inline bool is_dsc_compression(struct mdss_panel_info *pinfo)
+{
+	if (pinfo)
+		return (pinfo->compression_mode == COMPRESSION_DSC);
+
+	return false;
+}
+
+int mdss_register_panel(struct platform_device *pdev,
+	struct mdss_panel_data *pdata);
+
+/*
+ * mdss_panel_is_power_off: - checks if a panel is off
+ * @panel_power_state: enum identifying the power state to be checked
+ */
+static inline bool mdss_panel_is_power_off(int panel_power_state)
+{
+	return (panel_power_state == MDSS_PANEL_POWER_OFF);
+}
+
+/**
+ * mdss_panel_is_power_on_interactive: - checks if a panel is on and interactive
+ * @panel_power_state: enum identifying the power state to be checked
+ *
+ * This function returns true only is the panel is fully interactive and
+ * opertaing in normal mode.
+ */
+static inline bool mdss_panel_is_power_on_interactive(int panel_power_state)
+{
+	return (panel_power_state == MDSS_PANEL_POWER_ON);
+}
+
+/**
+ * mdss_panel_is_panel_power_on: - checks if a panel is on
+ * @panel_power_state: enum identifying the power state to be checked
+ *
+ * A panel is considered to be on as long as it can accept any commands
+ * or data. Sometimes it is possible to program the panel to be in a low
+ * power non-interactive state. This function returns false only if panel
+ * has explicitly been turned off.
+ */
+static inline bool mdss_panel_is_power_on(int panel_power_state)
+{
+	return !mdss_panel_is_power_off(panel_power_state);
+}
+
+/**
+ * mdss_panel_is_panel_power_on_lp: - checks if a panel is in a low power mode
+ * @pdata: pointer to the panel struct associated to the panel
+ * @panel_power_state: enum identifying the power state to be checked
+ *
+ * This function returns true if the panel is in an intermediate low power
+ * state where it is still on but not fully interactive. It may or may not
+ * accept any commands and display updates.
+ */
+static inline bool mdss_panel_is_power_on_lp(int panel_power_state)
+{
+	return !mdss_panel_is_power_off(panel_power_state) &&
+		!mdss_panel_is_power_on_interactive(panel_power_state);
+}
+
+/**
+ * mdss_panel_is_panel_power_on_ulp: - checks if panel is in
+ *                                   ultra low power mode
+ * @pdata: pointer to the panel struct associated to the panel
+ * @panel_power_state: enum identifying the power state to be checked
+ *
+ * This function returns true if the panel is in a ultra low power
+ * state where it is still on but cannot receive any display updates.
+ */
+static inline bool mdss_panel_is_power_on_ulp(int panel_power_state)
+{
+	return panel_power_state == MDSS_PANEL_POWER_LP2;
+}
+
+/**
+ * mdss_panel_update_clk_rate() - update the clock rate based on panel timing
+ *				information.
+ * @panel_info:	Pointer to panel info containing all panel information
+ * @fps: frame rate of the panel
+ */
+static inline void mdss_panel_update_clk_rate(struct mdss_panel_info *pinfo,
+	u32 fps)
+{
+	struct lcd_panel_info *lcdc = &pinfo->lcdc;
+	u32 htotal, vtotal;
+
+	if (pinfo->type == DTV_PANEL) {
+		htotal = pinfo->xres + lcdc->h_front_porch +
+				lcdc->h_back_porch + lcdc->h_pulse_width;
+		vtotal = pinfo->yres + lcdc->v_front_porch +
+				lcdc->v_back_porch + lcdc->v_pulse_width;
+
+		pinfo->clk_rate = mult_frac(htotal * vtotal, fps, 1000);
+
+		pr_debug("vtotal %d, htotal %d, rate %llu\n",
+			vtotal, htotal, pinfo->clk_rate);
+	}
+}
+
+/**
+ * mdss_panel_calc_frame_rate() - calculate panel frame rate based
+ *                                on panel timing information.
+ * @panel_info:	Pointer to panel info containing all panel information
+ */
+static inline u8 mdss_panel_calc_frame_rate(struct mdss_panel_info *pinfo)
+{
+		u32 pixel_total = 0;
+		u8 frame_rate = 0;
+		unsigned long pclk_rate = pinfo->mipi.dsi_pclk_rate;
+		u32 xres;
+
+		xres = pinfo->xres;
+		if (pinfo->compression_mode == COMPRESSION_DSC)
+			xres /= 3;
+
+		pixel_total = (pinfo->lcdc.h_back_porch +
+			  pinfo->lcdc.h_front_porch +
+			  pinfo->lcdc.h_pulse_width +
+			  xres) *
+			 (pinfo->lcdc.v_back_porch +
+			  pinfo->lcdc.v_front_porch +
+			  pinfo->lcdc.v_pulse_width +
+			  pinfo->yres);
+
+		if (pclk_rate && pixel_total)
+			frame_rate =
+				DIV_ROUND_CLOSEST(pclk_rate, pixel_total);
+		else
+			frame_rate = pinfo->panel_max_fps;
+
+		return frame_rate;
+}
+
+/**
+ * mdss_panel_intf_type: - checks if a given intf type is primary
+ * @intf_val: panel interface type of the individual controller
+ *
+ * Individual controller queries with MDP to check if it is
+ * configured as the primary interface.
+ *
+ * returns a pointer to the configured structure mdss_panel_cfg
+ * to the controller that's configured as the primary panel interface.
+ * returns NULL on error or if @intf_val is not the configured
+ * controller.
+ */
+struct mdss_panel_cfg *mdss_panel_intf_type(int intf_val);
+
+/**
+ * mdss_is_ready() - checks if mdss is probed and ready
+ *
+ * Checks if mdss resources have been initialized
+ *
+ * returns true if mdss is ready, else returns false.
+ */
+bool mdss_is_ready(void);
+int mdss_rect_cmp(struct mdss_rect *rect1, struct mdss_rect *rect2);
+
+/**
+ * mdss_panel_override_te_params() - overrides TE params to enable SW TE
+ * @pinfo: panel info
+ */
+void mdss_panel_override_te_params(struct mdss_panel_info *pinfo);
+
+/**
+ * mdss_panel_dsc_parameters_calc: calculate DSC parameters
+ * @dsc: pointer to DSC structure associated with panel_info
+ */
+void mdss_panel_dsc_parameters_calc(struct dsc_desc *dsc);
+
+/**
+ * mdss_panel_dsc_update_pic_dim: update DSC structure with picture dimension
+ * @dsc: pointer to DSC structure associated with panel_info
+ * @pic_width: Picture width
+ * @pic_height: Picture height
+ */
+void mdss_panel_dsc_update_pic_dim(struct dsc_desc *dsc,
+	int pic_width, int pic_height);
+
+/**
+ * mdss_panel_dsc_initial_line_calc: update DSC initial line buffering
+ * @dsc: pointer to DSC structure associated with panel_info
+ * @enc_ip_width: uncompressed input width for DSC enc represented by @dsc
+ *              i.e.
+ *                 * 720 for full frame single_display_dual_lm: 1440x2560
+ *                 * 1080 for full frame dual_display_dual_lm: 2160x3840
+ *                 * 360 for partial frame single_display_dual_lm: 360x2560
+ */
+void mdss_panel_dsc_initial_line_calc(struct dsc_desc *dsc, int enc_ip_width);
+
+/**
+ * mdss_panel_dsc_pclk_param_calc: calculate DSC params related to DSI
+ * @dsc: pointer to DSC structure associated with panel_info
+ * @intf_width: Uncompressed width per interface
+ *              i.e.
+ *                 * 1440 for full frame single_display_dual_lm: 1440x2560
+ *                 * 1080 for full frame dual_display_dual_lm: 2160x3840
+ *                 * 720 for partial frame single_display_dual_lm: 720x2560
+ */
+void mdss_panel_dsc_pclk_param_calc(struct dsc_desc *dsc, int intf_width);
+
+/**
+ * mdss_panel_dsc_prepare_pps_buf - prepares Picture Parameter Set to be
+ *                                sent to panel
+ * @dsc: pointer to DSC structure associated with panel_info
+ * @buf: buffer that holds PPS
+ * @pps_id: pps_identifier
+ *
+ * returns length of the PPS buffer.
+ */
+int mdss_panel_dsc_prepare_pps_buf(struct dsc_desc *dsc, char *buf,
+	int pps_id);
+#ifdef CONFIG_FB_MSM_MDSS
+int mdss_panel_debugfs_init(struct mdss_panel_info *panel_info,
+		char const *panel_name);
+void mdss_panel_debugfs_cleanup(struct mdss_panel_info *panel_info);
+void mdss_panel_debugfsinfo_to_panelinfo(struct mdss_panel_info *panel_info);
+
+/*
+ * mdss_panel_info_from_timing() - populate panel info from panel timing
+ * @pt:		pointer to source panel timing
+ * @pinfo:	pointer to destination panel info
+ *
+ * Populates relevant data from panel timing into panel info
+ */
+void mdss_panel_info_from_timing(struct mdss_panel_timing *pt,
+		struct mdss_panel_info *pinfo);
+
+/**
+ * mdss_panel_get_timing_by_name() - return panel timing with matching name
+ * @pdata:	pointer to panel data struct containing list of panel timings
+ * @name:	name of the panel timing to be returned
+ *
+ * Looks through list of timings provided in panel data and returns pointer
+ * to panel timing matching it. If none is found, NULL is returned.
+ */
+struct mdss_panel_timing *mdss_panel_get_timing_by_name(
+		struct mdss_panel_data *pdata,
+		const char *name);
+#else
+static inline int mdss_panel_debugfs_init(
+		struct mdss_panel_info *panel_info,
+		char const *panel_name) { return 0; };
+static inline void mdss_panel_debugfs_cleanup(
+		struct mdss_panel_info *panel_info) { };
+static inline void mdss_panel_debugfsinfo_to_panelinfo(
+		struct mdss_panel_info *panel_info) { };
+static inline void mdss_panel_info_from_timing(struct mdss_panel_timing *pt,
+		struct mdss_panel_info *pinfo) { };
+static inline struct mdss_panel_timing *mdss_panel_get_timing_by_name(
+		struct mdss_panel_data *pdata,
+		const char *name) { return NULL; };
+#endif
+#endif /* MDSS_PANEL_H */
diff --git a/drivers/video/fbdev/msm/mdss_qpic.c b/drivers/video/fbdev/msm/mdss_qpic.c
new file mode 100644
index 0000000..3e0ca75
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_qpic.c
@@ -0,0 +1,820 @@
+/* Copyright (c) 2013-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/bootmem.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk/msm-clk.h>
+
+#include <linux/msm-sps.h>
+#include <linux/msm-bus.h>
+
+#include "mdss_fb.h"
+#include "mdss_qpic.h"
+#include "mdss_qpic_panel.h"
+
+static int mdss_qpic_probe(struct platform_device *pdev);
+static int mdss_qpic_remove(struct platform_device *pdev);
+static void qpic_interrupt_en(u32 en);
+
+struct qpic_data_type *qpic_res;
+
+/* for debugging */
+static u32 use_bam = true;
+static u32 use_irq = true;
+static u32 use_vsync;
+
+static const struct of_device_id mdss_qpic_dt_match[] = {
+	{ .compatible = "qcom,mdss_qpic",},
+	{}
+};
+MODULE_DEVICE_TABLE(of, mdss_qpic_dt_match);
+
+static struct platform_driver mdss_qpic_driver = {
+	.probe = mdss_qpic_probe,
+	.remove = mdss_qpic_remove,
+	.shutdown = NULL,
+	.driver = {
+		/*
+		 * Simulate mdp hw
+		 */
+		.name = "mdp",
+		.of_match_table = mdss_qpic_dt_match,
+	},
+};
+
+static void mdss_qpic_clk_ctrl(bool enable)
+{
+	if (enable) {
+		if (qpic_res->qpic_clk)
+			clk_prepare_enable(qpic_res->qpic_clk);
+		if (qpic_res->qpic_a_clk)
+			clk_prepare_enable(qpic_res->qpic_a_clk);
+	} else {
+		if (qpic_res->qpic_a_clk)
+			clk_disable_unprepare(qpic_res->qpic_a_clk);
+		if (qpic_res->qpic_clk)
+			clk_disable_unprepare(qpic_res->qpic_clk);
+	}
+}
+
+int qpic_on(struct msm_fb_data_type *mfd)
+{
+	int ret;
+
+	mdss_qpic_clk_ctrl(true);
+
+	ret = mdss_qpic_panel_on(qpic_res->panel_data, &qpic_res->panel_io);
+	qpic_res->qpic_is_on = true;
+	return ret;
+}
+
+int qpic_off(struct msm_fb_data_type *mfd)
+{
+	int ret;
+
+	ret = mdss_qpic_panel_off(qpic_res->panel_data, &qpic_res->panel_io);
+	if (use_irq)
+		qpic_interrupt_en(false);
+
+	mdss_qpic_clk_ctrl(false);
+	qpic_res->qpic_is_on = false;
+
+	return ret;
+}
+
+static int msm_qpic_bus_set_vote(u32 vote)
+{
+	int ret;
+
+	if (!qpic_res->bus_handle)
+		return 0;
+	ret = msm_bus_scale_client_update_request(qpic_res->bus_handle,
+			vote);
+	if (ret)
+		pr_err("msm_bus_scale_client_update_request() failed, bus_handle=0x%x, vote=%d, err=%d\n",
+			qpic_res->bus_handle, vote, ret);
+	return ret;
+}
+
+static void mdss_qpic_pan_display(struct msm_fb_data_type *mfd)
+{
+
+	struct fb_info *fbi;
+	u32 offset, fb_offset, size;
+	int bpp;
+
+	if (!mfd) {
+		pr_err("%s: mfd is NULL!", __func__);
+		return;
+	}
+
+	if (!qpic_res->qpic_is_on) {
+		pr_err("%s: Failed since panel is not ON\n", __func__);
+		return;
+	}
+
+	fbi = mfd->fbi;
+
+	bpp = fbi->var.bits_per_pixel / 8;
+	offset = fbi->var.xoffset * bpp +
+		 fbi->var.yoffset * fbi->fix.line_length;
+
+	if (offset > fbi->fix.smem_len) {
+		pr_err("invalid fb offset=%u total length=%u\n",
+		       offset, fbi->fix.smem_len);
+		return;
+	}
+	if (use_bam)
+		fb_offset = (u32)fbi->fix.smem_start + offset;
+	else
+		fb_offset = (u32)mfd->fbi->screen_base + offset;
+
+	msm_qpic_bus_set_vote(1);
+	mdss_qpic_panel_on(qpic_res->panel_data, &qpic_res->panel_io);
+	size = fbi->var.xres * fbi->var.yres * bpp;
+
+	qpic_send_frame(0, 0, fbi->var.xres - 1, fbi->var.yres - 1,
+		(u32 *)fb_offset, size);
+	msm_qpic_bus_set_vote(0);
+}
+
+int mdss_qpic_alloc_fb_mem(struct msm_fb_data_type *mfd)
+{
+	size_t size;
+	u32 yres = mfd->fbi->var.yres_virtual;
+
+	size = PAGE_ALIGN(mfd->fbi->fix.line_length * yres);
+
+	if (!qpic_res->res_init)
+		return -EINVAL;
+
+	if (mfd->index != 0) {
+		mfd->fbi->fix.smem_start = 0;
+		mfd->fbi->screen_base = NULL;
+		mfd->fbi->fix.smem_len = 0;
+		mfd->iova = 0;
+		return 0;
+	}
+
+	if (!qpic_res->fb_virt) {
+		qpic_res->fb_virt = (void *)dmam_alloc_coherent(
+						&qpic_res->pdev->dev,
+						size,
+						&qpic_res->fb_phys,
+						GFP_KERNEL);
+		pr_debug("%s size=%d vir_addr=%x phys_addr=%x",
+			__func__, size, (int)qpic_res->fb_virt,
+			(int)qpic_res->fb_phys);
+		if (!qpic_res->fb_virt) {
+			pr_err("%s fb allocation failed", __func__);
+			return -ENOMEM;
+		}
+	}
+
+	if (!qpic_res->cmd_buf_virt) {
+		qpic_res->cmd_buf_virt = dma_alloc_writecombine(
+			NULL, QPIC_MAX_CMD_BUF_SIZE,
+			&qpic_res->cmd_buf_phys, GFP_KERNEL);
+		pr_debug("%s cmd_buf virt=%x phys=%x", __func__,
+			(int)qpic_res->cmd_buf_virt,
+			qpic_res->cmd_buf_phys);
+		if (!qpic_res->cmd_buf_virt) {
+			pr_err("%s cmd buf allocation failed", __func__);
+			return -ENOMEM;
+		}
+	}
+	mfd->fbi->fix.smem_start = qpic_res->fb_phys;
+	mfd->fbi->screen_base = qpic_res->fb_virt;
+	mfd->fbi->fix.smem_len = size;
+	mfd->iova = 0;
+	return 0;
+}
+
+u32 mdss_qpic_fb_stride(u32 fb_index, u32 xres, int bpp)
+{
+	return xres * bpp;
+}
+
+int mdss_qpic_overlay_init(struct msm_fb_data_type *mfd)
+{
+	struct msm_mdp_interface *qpic_interface = &mfd->mdp;
+
+	qpic_interface->on_fnc = qpic_on;
+	qpic_interface->off_fnc = qpic_off;
+	qpic_interface->do_histogram = NULL;
+	qpic_interface->cursor_update = NULL;
+	qpic_interface->dma_fnc = mdss_qpic_pan_display;
+	qpic_interface->ioctl_handler = NULL;
+	qpic_interface->kickoff_fnc = NULL;
+	return 0;
+}
+
+int qpic_register_panel(struct mdss_panel_data *pdata)
+{
+	struct platform_device *mdss_fb_dev = NULL;
+	int rc;
+
+	if (!qpic_res)
+		return -ENODEV;
+
+	mdss_fb_dev = platform_device_alloc("mdss_fb", pdata->panel_info.pdest);
+	if (!mdss_fb_dev) {
+		pr_err("unable to allocate mdss_fb device\n");
+		return -ENOMEM;
+	}
+
+	mdss_fb_dev->dev.platform_data = pdata;
+
+	rc = platform_device_add(mdss_fb_dev);
+	if (rc) {
+		platform_device_put(mdss_fb_dev);
+		pr_err("unable to probe mdss_fb device (%d)\n", rc);
+		return rc;
+	}
+
+	qpic_res->panel_data = pdata;
+
+	return rc;
+}
+
+int qpic_init_sps(struct platform_device *pdev,
+				struct qpic_sps_endpt *end_point)
+{
+	int rc = 0;
+	struct sps_pipe *pipe_handle;
+	struct sps_connect *sps_config = &end_point->config;
+	struct sps_register_event *sps_event = &end_point->bam_event;
+	struct sps_bam_props bam = {0};
+	unsigned long bam_handle = 0;
+
+	if (qpic_res->sps_init)
+		return 0;
+	bam.phys_addr = qpic_res->qpic_phys + 0x4000;
+	bam.virt_addr = qpic_res->qpic_base + 0x4000;
+	bam.irq = qpic_res->irq - 4;
+	bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;
+
+	rc = sps_phy2h(bam.phys_addr, &bam_handle);
+	if (rc)
+		rc = sps_register_bam_device(&bam, &bam_handle);
+	if (rc) {
+		pr_err("%s bam_handle is NULL", __func__);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	pipe_handle = sps_alloc_endpoint();
+	if (!pipe_handle) {
+		pr_err("sps_alloc_endpoint() failed\n");
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	rc = sps_get_config(pipe_handle, sps_config);
+	if (rc) {
+		pr_err("sps_get_config() failed %d\n", rc);
+		goto free_endpoint;
+	}
+
+	/* WRITE CASE: source - system memory; destination - BAM */
+	sps_config->source = SPS_DEV_HANDLE_MEM;
+	sps_config->destination = bam_handle;
+	sps_config->mode = SPS_MODE_DEST;
+	sps_config->dest_pipe_index = 6;
+
+	sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
+	sps_config->lock_group = 0;
+	/*
+	 * Descriptor FIFO is a cyclic FIFO. If 64 descriptors
+	 * are allowed to be submitted before we get any ack for any of them,
+	 * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) *
+	 * sizeof(struct sps_iovec).
+	 */
+	sps_config->desc.size = (64) *
+					sizeof(struct sps_iovec);
+	sps_config->desc.base = dmam_alloc_coherent(&pdev->dev,
+					sps_config->desc.size,
+					&sps_config->desc.phys_base,
+					GFP_KERNEL);
+	if (!sps_config->desc.base) {
+		pr_err("dmam_alloc_coherent() failed for size %x\n",
+				sps_config->desc.size);
+		rc = -ENOMEM;
+		goto free_endpoint;
+	}
+	memset(sps_config->desc.base, 0x00, sps_config->desc.size);
+
+	rc = sps_connect(pipe_handle, sps_config);
+	if (rc) {
+		pr_err("sps_connect() failed %d\n", rc);
+		goto free_endpoint;
+	}
+
+	init_completion(&end_point->completion);
+	sps_event->mode = SPS_TRIGGER_WAIT;
+	sps_event->options = SPS_O_EOT;
+	sps_event->xfer_done = &end_point->completion;
+	sps_event->user = (void *)qpic_res;
+
+	rc = sps_register_event(pipe_handle, sps_event);
+	if (rc) {
+		pr_err("sps_register_event() failed %d\n", rc);
+		goto sps_disconnect;
+	}
+
+	end_point->handle = pipe_handle;
+	qpic_res->sps_init = true;
+	goto out;
+sps_disconnect:
+	sps_disconnect(pipe_handle);
+free_endpoint:
+	sps_free_endpoint(pipe_handle);
+out:
+	return rc;
+}
+
+void mdss_qpic_reset(void)
+{
+	u32 time_end;
+
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_RESET, 1 << 0);
+	/* wait 100 us after reset as suggested by hw */
+	usleep_range(100, 110);
+	time_end = (u32)ktime_to_ms(ktime_get()) +
+		QPIC_MAX_VSYNC_WAIT_TIME;
+	while (((QPIC_INP(QPIC_REG_QPIC_LCDC_STTS) & (1 << 8)) == 0)) {
+		if ((u32)ktime_to_ms(ktime_get()) > time_end) {
+			pr_err("%s reset not finished", __func__);
+			break;
+		}
+		/* yield 100 us for next polling by experiment*/
+		usleep_range(100, 110);
+	}
+}
+
+static void qpic_interrupt_en(u32 en)
+{
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_CLR, 0xff);
+	if (en) {
+		if (!qpic_res->irq_ena) {
+			init_completion(&qpic_res->fifo_eof_comp);
+			qpic_res->irq_ena = true;
+			enable_irq(qpic_res->irq);
+		}
+	} else {
+		QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN, 0);
+		disable_irq(qpic_res->irq);
+		qpic_res->irq_ena = false;
+	}
+}
+
+static irqreturn_t qpic_irq_handler(int irq, void *ptr)
+{
+	u32 data;
+
+	data = QPIC_INP(QPIC_REG_QPIC_LCDC_IRQ_STTS);
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_CLR, 0xff);
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN, 0);
+
+	if (data & ((1 << 2) | (1 << 4)))
+		complete(&qpic_res->fifo_eof_comp);
+	return IRQ_HANDLED;
+}
+
+static int qpic_send_pkt_bam(u32 cmd, u32 len, u8 *param)
+{
+	int  ret = 0;
+	u32 phys_addr, cfg2, block_len, flags;
+
+	if ((cmd != OP_WRITE_MEMORY_START) &&
+		(cmd != OP_WRITE_MEMORY_CONTINUE)) {
+		memcpy((u8 *)qpic_res->cmd_buf_virt, param, len);
+		phys_addr = qpic_res->cmd_buf_phys;
+	} else {
+		phys_addr = (u32)param;
+	}
+	cfg2 = QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2);
+	cfg2 &= ~0xFF;
+	cfg2 |= cmd;
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, cfg2);
+	block_len = 0x7FF0;
+	while (len > 0)  {
+		if (len <= 0x7FF0) {
+			flags = SPS_IOVEC_FLAG_EOT;
+			block_len = len;
+		} else {
+			flags = 0;
+		}
+		ret = sps_transfer_one(qpic_res->qpic_endpt.handle,
+				phys_addr, block_len, NULL, flags);
+		if (ret)
+			pr_err("failed to submit command %x ret %d\n",
+				cmd, ret);
+		phys_addr += block_len;
+		len -= block_len;
+	}
+	ret = wait_for_completion_timeout(
+		&qpic_res->qpic_endpt.completion,
+		msecs_to_jiffies(100 * 4));
+	if (ret <= 0)
+		pr_err("%s timeout %x", __func__, ret);
+	else
+		ret = 0;
+	return ret;
+}
+
+void qpic_dump_reg(void)
+{
+	pr_info("%s\n", __func__);
+	pr_info("QPIC_REG_QPIC_LCDC_CTRL = %x\n",
+		QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL));
+	pr_info("QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT = %x\n",
+		QPIC_INP(QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT));
+	pr_info("QPIC_REG_QPIC_LCDC_CFG0 = %x\n",
+		QPIC_INP(QPIC_REG_QPIC_LCDC_CFG0));
+	pr_info("QPIC_REG_QPIC_LCDC_CFG1 = %x\n",
+		QPIC_INP(QPIC_REG_QPIC_LCDC_CFG1));
+	pr_info("QPIC_REG_QPIC_LCDC_CFG2 = %x\n",
+		QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2));
+	pr_info("QPIC_REG_QPIC_LCDC_IRQ_EN = %x\n",
+		QPIC_INP(QPIC_REG_QPIC_LCDC_IRQ_EN));
+	pr_info("QPIC_REG_QPIC_LCDC_IRQ_STTS = %x\n",
+		QPIC_INP(QPIC_REG_QPIC_LCDC_IRQ_STTS));
+	pr_info("QPIC_REG_QPIC_LCDC_STTS = %x\n",
+		QPIC_INP(QPIC_REG_QPIC_LCDC_STTS));
+	pr_info("QPIC_REG_QPIC_LCDC_FIFO_SOF = %x\n",
+		QPIC_INP(QPIC_REG_QPIC_LCDC_FIFO_SOF));
+}
+
+static int qpic_wait_for_fifo(void)
+{
+	u32 data, time_end;
+	int ret = 0;
+
+	if (use_irq) {
+		data = QPIC_INP(QPIC_REG_QPIC_LCDC_STTS);
+		data &= 0x3F;
+		if (data == 0)
+			return ret;
+		reinit_completion(&qpic_res->fifo_eof_comp);
+		QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN, (1 << 4));
+		ret = wait_for_completion_timeout(&qpic_res->fifo_eof_comp,
+				msecs_to_jiffies(QPIC_MAX_VSYNC_WAIT_TIME));
+		if (ret > 0) {
+			ret = 0;
+		} else {
+			pr_err("%s timeout %x\n", __func__, ret);
+			ret = -ETIMEDOUT;
+		}
+		QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN, 0);
+	} else {
+		time_end = (u32)ktime_to_ms(ktime_get()) +
+			QPIC_MAX_VSYNC_WAIT_TIME;
+		while (1) {
+			data = QPIC_INP(QPIC_REG_QPIC_LCDC_STTS);
+			data &= 0x3F;
+			if (data == 0)
+				break;
+			/* yield 10 us for next polling by experiment*/
+			usleep_range(10, 11);
+			if (ktime_to_ms(ktime_get()) > time_end) {
+				pr_err("%s time out", __func__);
+				ret = -EBUSY;
+				break;
+			}
+		}
+	}
+	return ret;
+}
+
+static int qpic_wait_for_eof(void)
+{
+	u32 data, time_end;
+	int ret = 0;
+
+	if (use_irq) {
+		data = QPIC_INP(QPIC_REG_QPIC_LCDC_IRQ_STTS);
+		if (data & (1 << 2))
+			return ret;
+		reinit_completion(&qpic_res->fifo_eof_comp);
+		QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN, (1 << 2));
+		ret = wait_for_completion_timeout(&qpic_res->fifo_eof_comp,
+				msecs_to_jiffies(QPIC_MAX_VSYNC_WAIT_TIME));
+		if (ret > 0) {
+			ret = 0;
+		} else {
+			pr_err("%s timeout %x\n", __func__, ret);
+			ret = -ETIMEDOUT;
+		}
+		QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN, 0);
+	} else {
+		time_end = (u32)ktime_to_ms(ktime_get()) +
+			QPIC_MAX_VSYNC_WAIT_TIME;
+		while (1) {
+			data = QPIC_INP(QPIC_REG_QPIC_LCDC_IRQ_STTS);
+			if (data & (1 << 2))
+				break;
+			/* yield 10 us for next polling by experiment*/
+			usleep_range(10, 11);
+			if (ktime_to_ms(ktime_get()) > time_end) {
+				pr_err("%s wait for eof time out\n", __func__);
+				qpic_dump_reg();
+				ret = -EBUSY;
+				break;
+			}
+		}
+	}
+	return ret;
+}
+
+static int qpic_send_pkt_sw(u32 cmd, u32 len, u8 *param)
+{
+	u32 bytes_left, space, data, cfg2;
+	int i, ret = 0;
+
+	if (len <= 4) {
+		len = (len + 3) / 4; /* len in dwords */
+		data = 0;
+		if (param) {
+			for (i = 0; i < len; i++)
+				data |= (u32)param[i] << (8 * i);
+		}
+		QPIC_OUTP(QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT, len);
+		QPIC_OUTP(QPIC_REG_LCD_DEVICE_CMD0 + (4 * cmd), data);
+		return 0;
+	}
+
+	if ((len & 0x1) != 0) {
+		pr_debug("%s: number of bytes needs be even", __func__);
+		len = (len + 1) & (~0x1);
+	}
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_CLR, 0xff);
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT, 0);
+	cfg2 = QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2);
+	if ((cmd != OP_WRITE_MEMORY_START) &&
+		(cmd != OP_WRITE_MEMORY_CONTINUE))
+		cfg2 |= (1 << 24); /* transparent mode */
+	else
+		cfg2 &= ~(1 << 24);
+
+	cfg2 &= ~0xFF;
+	cfg2 |= cmd;
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, cfg2);
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_FIFO_SOF, 0x0);
+	bytes_left = len;
+
+	while (bytes_left > 0) {
+		ret = qpic_wait_for_fifo();
+		if (ret)
+			goto exit_send_cmd_sw;
+
+		space = 16;
+
+		while ((space > 0) && (bytes_left > 0)) {
+			/* write to fifo */
+			if (bytes_left >= 4) {
+				QPIC_OUTP(QPIC_REG_QPIC_LCDC_FIFO_DATA_PORT0,
+					*(u32 *)param);
+				param += 4;
+				bytes_left -= 4;
+				space--;
+			} else if (bytes_left == 2) {
+				QPIC_OUTPW(QPIC_REG_QPIC_LCDC_FIFO_DATA_PORT0,
+					*(u16 *)param);
+				bytes_left -= 2;
+			}
+		}
+	}
+	/* finished */
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_FIFO_EOF, 0x0);
+	ret = qpic_wait_for_eof();
+exit_send_cmd_sw:
+	cfg2 &= ~(1 << 24);
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, cfg2);
+	return ret;
+}
+
+int qpic_send_pkt(u32 cmd, u8 *param, u32 len)
+{
+	if (!use_bam || ((cmd != OP_WRITE_MEMORY_CONTINUE) &&
+		(cmd != OP_WRITE_MEMORY_START)))
+		return qpic_send_pkt_sw(cmd, len, param);
+	else
+		return qpic_send_pkt_bam(cmd, len, param);
+}
+
+int mdss_qpic_init(void)
+{
+	int ret = 0;
+	u32 data;
+
+	mdss_qpic_reset();
+
+	pr_info("%s version=%x", __func__, QPIC_INP(QPIC_REG_LCDC_VERSION));
+	data = QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL);
+	/* clear vsync wait , bam mode = 0*/
+	data &= ~(3 << 0);
+	data &= ~(0x1f << 3);
+	data |= (1 << 3); /* threshold */
+	data |= (1 << 8); /* lcd_en */
+	data &= ~(0x1f << 9);
+	data |= (1 << 9); /* threshold */
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_CTRL, data);
+
+	if (use_irq && (!qpic_res->irq_requested)) {
+		ret = devm_request_irq(&qpic_res->pdev->dev,
+			qpic_res->irq, qpic_irq_handler,
+			IRQF_DISABLED,	"QPIC", qpic_res);
+		if (ret) {
+			pr_err("qpic request_irq() failed!\n");
+			use_irq = false;
+		} else {
+			disable_irq(qpic_res->irq);
+		}
+		qpic_res->irq_requested = true;
+	}
+
+	qpic_interrupt_en(use_irq);
+
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG0, 0x02108501);
+	data = QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2);
+	data &= ~(0xFFF);
+	data |= 0x200; /* XRGB */
+	data |= 0x2C;
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, data);
+
+	if (use_bam) {
+		qpic_init_sps(qpic_res->pdev, &qpic_res->qpic_endpt);
+		data = QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL);
+		data |= (1 << 1);
+		QPIC_OUTP(QPIC_REG_QPIC_LCDC_CTRL, data);
+	}
+	/* TE enable */
+	if (use_vsync) {
+		data = QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL);
+		data |= (1 << 0);
+		QPIC_OUTP(QPIC_REG_QPIC_LCDC_CTRL, data);
+	}
+
+	return ret;
+}
+
+u32 qpic_read_data(u32 cmd_index, u32 size)
+{
+	u32 data = 0;
+
+	if (size <= 4) {
+		QPIC_OUTP(QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT, size);
+		data = QPIC_INP(QPIC_REG_LCD_DEVICE_CMD0 + (cmd_index * 4));
+	}
+	return data;
+}
+
+static int msm_qpic_bus_register(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct msm_bus_scale_pdata *use_cases;
+
+	use_cases = msm_bus_cl_get_pdata(pdev);
+	if (!use_cases) {
+		pr_err("msm_bus_cl_get_pdata failed\n");
+		return -EINVAL;
+	}
+	qpic_res->bus_handle =
+		msm_bus_scale_register_client(use_cases);
+	if (!qpic_res->bus_handle) {
+		ret = -EINVAL;
+		pr_err("msm_bus_scale_register_client failed\n");
+	}
+	return ret;
+}
+
+static int mdss_qpic_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	int rc = 0;
+	static struct msm_mdp_interface qpic_interface = {
+		.init_fnc = mdss_qpic_overlay_init,
+		.fb_mem_alloc_fnc = mdss_qpic_alloc_fb_mem,
+		.fb_stride = mdss_qpic_fb_stride,
+	};
+
+
+	if (!pdev->dev.of_node) {
+		pr_err("qpic driver only supports device tree probe\n");
+		return -ENOTSUPP;
+	}
+
+	if (!qpic_res)
+		qpic_res = devm_kzalloc(&pdev->dev,
+			sizeof(*qpic_res), GFP_KERNEL);
+
+	if (!qpic_res)
+		return -ENOMEM;
+
+	if (qpic_res->res_init) {
+		pr_err("qpic already initialized\n");
+		return -EINVAL;
+	}
+
+	pdev->id = 0;
+
+	qpic_res->pdev = pdev;
+	platform_set_drvdata(pdev, qpic_res);
+
+	res = platform_get_resource_byname(pdev,
+		IORESOURCE_MEM, "qpic_base");
+	if (!res) {
+		pr_err("unable to get QPIC reg base address\n");
+		rc = -ENOMEM;
+		goto probe_done;
+	}
+
+	qpic_res->qpic_reg_size = resource_size(res);
+	qpic_res->qpic_base = devm_ioremap(&pdev->dev, res->start,
+					qpic_res->qpic_reg_size);
+	if (unlikely(!qpic_res->qpic_base)) {
+		pr_err("unable to map MDSS QPIC base\n");
+		rc = -ENOMEM;
+		goto probe_done;
+	}
+	qpic_res->qpic_phys = res->start;
+	pr_info("MDSS QPIC HW Base phy_Address=0x%x virt=0x%x\n",
+		(int) res->start,
+		(int) qpic_res->qpic_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		pr_err("unable to get QPIC irq\n");
+		rc = -ENOMEM;
+		goto probe_done;
+	}
+
+	qpic_res->qpic_a_clk = clk_get(&pdev->dev, "core_a_clk");
+	if (IS_ERR(qpic_res->qpic_a_clk))
+		pr_err("%s: Can't find core_a_clk", __func__);
+
+	qpic_res->qpic_clk = clk_get(&pdev->dev, "core_clk");
+	if (IS_ERR(qpic_res->qpic_clk))
+		pr_err("%s: Can't find core_clk", __func__);
+
+	qpic_res->irq = res->start;
+	qpic_res->res_init = true;
+
+	mdss_qpic_panel_io_init(pdev, &qpic_res->panel_io);
+
+	rc = mdss_fb_register_mdp_instance(&qpic_interface);
+	if (rc)
+		pr_err("unable to register QPIC instance\n");
+
+	msm_qpic_bus_register(pdev);
+probe_done:
+	return rc;
+}
+
+static int mdss_qpic_remove(struct platform_device *pdev)
+{
+	if (qpic_res->bus_handle)
+		msm_bus_scale_unregister_client(qpic_res->bus_handle);
+	qpic_res->bus_handle = 0;
+	return 0;
+}
+
+static int __init mdss_qpic_driver_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&mdss_qpic_driver);
+	if (ret)
+		pr_err("mdss_qpic_register_driver() failed!\n");
+	return ret;
+}
+
+module_init(mdss_qpic_driver_init);
+
+
diff --git a/drivers/video/fbdev/msm/mdss_qpic.h b/drivers/video/fbdev/msm/mdss_qpic.h
new file mode 100644
index 0000000..5fc7f9d
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_qpic.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_QPIC_H
+#define MDSS_QPIC_H
+
+#include <linux/list.h>
+#include <linux/msm-sps.h>
+
+#include <linux/pinctrl/consumer.h>
+#include "mdss_panel.h"
+#include "mdss_qpic_panel.h"
+
+#define QPIC_REG_QPIC_LCDC_CTRL				0x22000
+#define QPIC_REG_LCDC_VERSION				0x22004
+#define QPIC_REG_QPIC_LCDC_IRQ_EN			0x22008
+#define QPIC_REG_QPIC_LCDC_IRQ_STTS			0x2200C
+#define QPIC_REG_QPIC_LCDC_IRQ_CLR			0x22010
+#define QPIC_REG_QPIC_LCDC_STTS				0x22014
+#define QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT	0x22018
+#define QPIC_REG_QPIC_LCDC_CFG0				0x22020
+#define QPIC_REG_QPIC_LCDC_CFG1				0x22024
+#define QPIC_REG_QPIC_LCDC_CFG2				0x22028
+#define QPIC_REG_QPIC_LCDC_RESET			0x2202C
+#define QPIC_REG_QPIC_LCDC_FIFO_SOF			0x22100
+#define QPIC_REG_LCD_DEVICE_CMD0			0x23000
+#define QPIC_REG_QPIC_LCDC_FIFO_DATA_PORT0	0x22140
+#define QPIC_REG_QPIC_LCDC_FIFO_EOF			0x22180
+
+#define QPIC_OUTP(off, data) \
+	writel_relaxed((data), qpic_res->qpic_base + (off))
+#define QPIC_OUTPW(off, data) \
+	writew_relaxed((data), qpic_res->qpic_base + (off))
+#define QPIC_INP(off) \
+	readl_relaxed(qpic_res->qpic_base + (off))
+
+#define QPIC_MAX_VSYNC_WAIT_TIME			500
+#define QPIC_MAX_CMD_BUF_SIZE				512
+
+int mdss_qpic_init(void);
+int qpic_send_pkt(u32 cmd, u8 *param, u32 len);
+u32 qpic_read_data(u32 cmd_index, u32 size);
+u32 msm_qpic_get_bam_hdl(struct sps_bam_props *bam);
+int mdss_qpic_panel_on(struct mdss_panel_data *pdata,
+	struct qpic_panel_io_desc *panel_io);
+int mdss_qpic_panel_off(struct mdss_panel_data *pdata,
+	struct qpic_panel_io_desc *panel_io);
+int qpic_register_panel(struct mdss_panel_data *pdata);
+
+/* Structure that defines an SPS end point for a BAM pipe. */
+struct qpic_sps_endpt {
+	struct sps_pipe *handle;
+	struct sps_connect config;
+	struct sps_register_event bam_event;
+	struct completion completion;
+};
+
+struct qpic_data_type {
+	u32 rev;
+	struct platform_device *pdev;
+	size_t qpic_reg_size;
+	u32 qpic_phys;
+	char __iomem *qpic_base;
+	u32 irq;
+	u32 irq_ena;
+	u32 res_init;
+	void *fb_virt;
+	u32 fb_phys;
+	void *cmd_buf_virt;
+	u32 cmd_buf_phys;
+	struct qpic_sps_endpt qpic_endpt;
+	u32 sps_init;
+	u32 irq_requested;
+	struct mdss_panel_data *panel_data;
+	struct qpic_panel_io_desc panel_io;
+	u32 bus_handle;
+	struct completion fifo_eof_comp;
+	u32 qpic_is_on;
+	struct clk *qpic_clk;
+	struct clk *qpic_a_clk;
+};
+
+u32 qpic_send_frame(
+		u32 x_start,
+		u32 y_start,
+		u32 x_end,
+		u32 y_end,
+		u32 *data,
+		u32 total_bytes);
+
+u32 qpic_panel_get_framerate(void);
+
+#endif /* MDSS_QPIC_H */
diff --git a/drivers/video/fbdev/msm/mdss_qpic_panel.c b/drivers/video/fbdev/msm/mdss_qpic_panel.c
new file mode 100644
index 0000000..3637716
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_qpic_panel.c
@@ -0,0 +1,305 @@
+/* Copyright (c) 2014-2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/qpnp/pin.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <linux/uaccess.h>
+
+#include <linux/msm-sps.h>
+
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_qpic.h"
+#include "mdss_qpic_panel.h"
+
+static u32 panel_is_on;
+static u32 panel_refresh_rate;
+
+static int (*qpic_panel_on)(struct qpic_panel_io_desc *qpic_panel_io);
+static void (*qpic_panel_off)(struct qpic_panel_io_desc *qpic_panel_io);
+
+static int mdss_qpic_pinctrl_init(struct platform_device *pdev,
+		struct qpic_panel_io_desc *qpic_panel_io);
+
+u32 qpic_panel_get_framerate(void)
+{
+	return panel_refresh_rate;
+}
+
+/* write a frame of pixels to a MIPI screen */
+u32 qpic_send_frame(u32 x_start,
+				u32 y_start,
+				u32 x_end,
+				u32 y_end,
+				u32 *data,
+				u32 total_bytes)
+{
+	u8 param[4];
+	u32 status;
+	u32 start_0_7;
+	u32 end_0_7;
+	u32 start_8_15;
+	u32 end_8_15;
+
+	/* convert to 16 bit representation */
+	x_start = x_start & 0xffff;
+	y_start = y_start & 0xffff;
+	x_end = x_end & 0xffff;
+	y_end = y_end & 0xffff;
+
+	/* set column/page */
+	start_0_7 = x_start & 0xff;
+	end_0_7 = x_end & 0xff;
+	start_8_15 = (x_start >> 8) & 0xff;
+	end_8_15 = (x_end >> 8) & 0xff;
+	param[0] = start_8_15;
+	param[1] = start_0_7;
+	param[2] = end_8_15;
+	param[3] = end_0_7;
+	status = qpic_send_pkt(OP_SET_COLUMN_ADDRESS, param, 4);
+	if (status) {
+		pr_err("Failed to set column address");
+		return status;
+	}
+
+	start_0_7 = y_start & 0xff;
+	end_0_7 = y_end & 0xff;
+	start_8_15 = (y_start >> 8) & 0xff;
+	end_8_15 = (y_end >> 8) & 0xff;
+	param[0] = start_8_15;
+	param[1] = start_0_7;
+	param[2] = end_8_15;
+	param[3] = end_0_7;
+	status = qpic_send_pkt(OP_SET_PAGE_ADDRESS, param, 4);
+	if (status) {
+		pr_err("Failed to set page address");
+		return status;
+	}
+
+	status = qpic_send_pkt(OP_WRITE_MEMORY_START, (u8 *)data, total_bytes);
+	if (status) {
+		pr_err("Failed to start memory write");
+		return status;
+	}
+	return 0;
+}
+
+static int mdss_qpic_pinctrl_init(struct platform_device *pdev,
+		struct qpic_panel_io_desc *qpic_panel_io)
+{
+	qpic_panel_io->pin_res.pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(qpic_panel_io->pin_res.pinctrl)) {
+		pr_err("%s: failed to get pinctrl\n", __func__);
+		return PTR_ERR(qpic_panel_io->pin_res.pinctrl);
+	}
+
+	qpic_panel_io->pin_res.gpio_state_active
+		= pinctrl_lookup_state(qpic_panel_io->pin_res.pinctrl,
+				MDSS_PINCTRL_STATE_DEFAULT);
+	if (IS_ERR_OR_NULL(qpic_panel_io->pin_res.gpio_state_active))
+		pr_warn("%s: cannot get default pinstate\n", __func__);
+
+	qpic_panel_io->pin_res.gpio_state_suspend
+		= pinctrl_lookup_state(qpic_panel_io->pin_res.pinctrl,
+				MDSS_PINCTRL_STATE_SLEEP);
+	if (IS_ERR_OR_NULL(qpic_panel_io->pin_res.gpio_state_suspend))
+		pr_warn("%s: cannot get sleep pinstate\n", __func__);
+
+	return 0;
+}
+
+int mdss_qpic_panel_on(struct mdss_panel_data *pdata,
+	struct qpic_panel_io_desc *panel_io)
+{
+	int rc = 0;
+
+	if (panel_is_on)
+		return 0;
+	mdss_qpic_init();
+
+	if (qpic_panel_on)
+		rc = qpic_panel_on(panel_io);
+	if (rc)
+		return rc;
+	panel_is_on = true;
+	return 0;
+}
+
+int mdss_qpic_panel_off(struct mdss_panel_data *pdata,
+	struct qpic_panel_io_desc *panel_io)
+{
+	if (qpic_panel_off)
+		qpic_panel_off(panel_io);
+	panel_is_on = false;
+	return 0;
+}
+
+int mdss_qpic_panel_io_init(struct platform_device *pdev,
+	struct qpic_panel_io_desc *qpic_panel_io)
+{
+	int rc = 0;
+	struct device_node *np = pdev->dev.of_node;
+	int rst_gpio, cs_gpio, te_gpio, ad8_gpio, bl_gpio;
+	struct regulator *vdd_vreg;
+	struct regulator *avdd_vreg;
+
+	rc = mdss_qpic_pinctrl_init(pdev, qpic_panel_io);
+	if (rc)
+		pr_warn("%s: failed to get pin resources\n", __func__);
+
+	rst_gpio = of_get_named_gpio(np, "qcom,rst-gpio", 0);
+	cs_gpio = of_get_named_gpio(np, "qcom,cs-gpio", 0);
+	ad8_gpio = of_get_named_gpio(np, "qcom,ad8-gpio", 0);
+	te_gpio = of_get_named_gpio(np, "qcom,te-gpio", 0);
+	bl_gpio = of_get_named_gpio(np, "qcom,bl-gpio", 0);
+
+	if (!gpio_is_valid(rst_gpio))
+		pr_warn("%s: reset gpio not specified\n", __func__);
+	else
+		qpic_panel_io->rst_gpio = rst_gpio;
+
+	if (!gpio_is_valid(cs_gpio))
+		pr_warn("%s: cs gpio not specified\n", __func__);
+	else
+		qpic_panel_io->cs_gpio = cs_gpio;
+
+	if (!gpio_is_valid(ad8_gpio))
+		pr_warn("%s: ad8 gpio not specified\n", __func__);
+	else
+		qpic_panel_io->ad8_gpio = ad8_gpio;
+
+	if (!gpio_is_valid(te_gpio))
+		pr_warn("%s: te gpio not specified\n", __func__);
+	else
+		qpic_panel_io->te_gpio = te_gpio;
+
+	if (!gpio_is_valid(bl_gpio))
+		pr_warn("%s: te gpio not specified\n", __func__);
+	else
+		qpic_panel_io->bl_gpio = bl_gpio;
+
+	vdd_vreg = devm_regulator_get(&pdev->dev, "vdd");
+	if (IS_ERR(vdd_vreg))
+		pr_err("%s could not get vdd,", __func__);
+	else
+		qpic_panel_io->vdd_vreg = vdd_vreg;
+
+	avdd_vreg = devm_regulator_get(&pdev->dev, "avdd");
+	if (IS_ERR(avdd_vreg))
+		pr_err("%s could not get avdd,", __func__);
+	else
+		qpic_panel_io->avdd_vreg = avdd_vreg;
+
+	return 0;
+}
+
+static int mdss_panel_parse_dt(struct platform_device *pdev,
+			       struct mdss_panel_data *panel_data)
+{
+	struct device_node *np = pdev->dev.of_node;
+	u32 res[6], tmp;
+	int rc;
+
+	rc = of_property_read_u32_array(np, "qcom,mdss-pan-res", res, 2);
+	if (rc) {
+		pr_err("%s:%d, panel resolution not specified\n",
+						__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	pr_debug("panel res %d %d\n", res[0], res[1]);
+	panel_data->panel_info.xres = (!rc ? res[0] : 320);
+	panel_data->panel_info.yres = (!rc ? res[1] : 480);
+	rc = of_property_read_u32(np, "qcom,mdss-pan-bpp", &tmp);
+	if (rc) {
+		pr_err("%s:%d, panel bpp not specified\n",
+						__func__, __LINE__);
+		return -EINVAL;
+	}
+	pr_debug("panel bpp %d\n", tmp);
+	panel_data->panel_info.bpp = (!rc ? tmp : 18);
+	of_property_read_u32(np, "qcom,refresh_rate", &panel_refresh_rate);
+
+	panel_data->panel_info.type = EBI2_PANEL;
+	panel_data->panel_info.pdest = DISPLAY_1;
+
+	return rc;
+}
+
+static int mdss_qpic_panel_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	static struct mdss_panel_data vendor_pdata;
+	static const char *panel_name;
+
+	pr_debug("%s:%d, debug info id=%d", __func__, __LINE__, pdev->id);
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
+	panel_name = of_get_property(pdev->dev.of_node, "label", NULL);
+	if (!panel_name)
+		pr_info("%s:%d, panel name not specified\n",
+						__func__, __LINE__);
+	else
+		pr_info("%s: Panel Name = %s\n", __func__, panel_name);
+
+	rc = mdss_panel_parse_dt(pdev, &vendor_pdata);
+	if (rc)
+		return rc;
+
+	/* select panel according to label */
+	if (panel_name && !strcmp(panel_name, "ili qvga lcdc panel")) {
+		qpic_panel_on = ili9341_on;
+		qpic_panel_off = ili9341_off;
+	} else {
+		/* select default panel driver */
+		pr_info("%s: select default panel driver\n", __func__);
+		qpic_panel_on = ili9341_on;
+		qpic_panel_off = ili9341_off;
+	}
+
+
+	rc = qpic_register_panel(&vendor_pdata);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static const struct of_device_id mdss_qpic_panel_match[] = {
+	{.compatible = "qcom,mdss-qpic-panel"},
+	{}
+};
+
+static struct platform_driver this_driver = {
+	.probe  = mdss_qpic_panel_probe,
+	.driver = {
+		.name = "qpic_panel",
+		.of_match_table = mdss_qpic_panel_match,
+	},
+};
+
+static int __init mdss_qpic_panel_init(void)
+{
+	return platform_driver_register(&this_driver);
+}
+MODULE_DEVICE_TABLE(of, mdss_qpic_panel_match);
+module_init(mdss_qpic_panel_init);
diff --git a/drivers/video/fbdev/msm/mdss_qpic_panel.h b/drivers/video/fbdev/msm/mdss_qpic_panel.h
new file mode 100644
index 0000000..e570d76
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_qpic_panel.h
@@ -0,0 +1,136 @@
+/* Copyright (c) 2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_QPIC_PANEL_H
+#define MDSS_QPIC_PANEL_H
+
+#include <linux/list.h>
+#include <linux/msm-sps.h>
+
+#include "mdss_panel.h"
+
+#define LCDC_INTERNAL_BUFFER_SIZE   30
+
+/**
+ * Macros for coding MIPI commands
+ */
+#define INV_SIZE             0xFFFF
+/* Size of argument to MIPI command is variable */
+#define OP_SIZE_PAIR(op, size)    ((op<<16) | size)
+/* MIPI {command, argument size} tuple */
+#define LCDC_EXTRACT_OP_SIZE(op_identifier)    ((op_identifier&0xFFFF))
+/* extract size from command identifier */
+#define LCDC_EXTRACT_OP_CMD(op_identifier)    (((op_identifier>>16)&0xFFFF))
+/* extract command id from command identifier */
+
+
+/* MIPI standard efinitions */
+#define LCDC_ADDRESS_MODE_ORDER_BOTTOM_TO_TOP                0x80
+#define LCDC_ADDRESS_MODE_ORDER_RIGHT_TO_LEFT                0x40
+#define LCDC_ADDRESS_MODE_ORDER_REVERSE                      0x20
+#define LCDC_ADDRESS_MODE_ORDER_REFRESH_BOTTOM_TO_TOP        0x10
+#define LCDC_ADDRESS_MODE_ORDER_BGER_RGB                     0x08
+#define LCDC_ADDRESS_MODE_ORDER_REFERESH_RIGHT_TO_LEFT       0x04
+#define LCDC_ADDRESS_MODE_FLIP_HORIZONTAL                    0x02
+#define LCDC_ADDRESS_MODE_FLIP_VERTICAL                      0x01
+
+#define LCDC_PIXEL_FORMAT_3_BITS_PER_PIXEL    0x1
+#define LCDC_PIXEL_FORMAT_8_BITS_PER_PIXEL    0x2
+#define LCDC_PIXEL_FORMAT_12_BITS_PER_PIXEL   0x3
+#define LCDC_PIXEL_FORMAT_16_BITS_PER_PIXEL   0x5
+#define LCDC_PIXEL_FORMAT_18_BITS_PER_PIXEL   0x6
+#define LCDC_PIXEL_FORMAT_24_BITS_PER_PIXEL   0x7
+
+#define LCDC_CREATE_PIXEL_FORMAT(dpi_format, dbi_format) \
+	(dpi_format | (dpi_format<<4))
+
+#define POWER_MODE_IDLE_ON       0x40
+#define POWER_MODE_PARTIAL_ON    0x20
+#define POWER_MODE_SLEEP_ON      0x10
+#define POWER_MODE_NORMAL_ON     0x08
+#define POWER_MODE_DISPLAY_ON    0x04
+
+#define LCDC_DISPLAY_MODE_SCROLLING_ON       0x80
+#define LCDC_DISPLAY_MODE_INVERSION_ON       0x20
+#define LCDC_DISPLAY_MODE_GAMMA_MASK         0x07
+
+/**
+ * LDCc MIPI Type B supported commands
+ */
+#define	OP_ENTER_IDLE_MODE      0x39
+#define	OP_ENTER_INVERT_MODE    0x21
+#define	OP_ENTER_NORMAL_MODE    0x13
+#define	OP_ENTER_PARTIAL_MODE   0x12
+#define	OP_ENTER_SLEEP_MODE     0x10
+#define	OP_EXIT_INVERT_MODE     0x20
+#define	OP_EXIT_SLEEP_MODE      0x11
+#define	OP_EXIT_IDLE_MODE       0x38
+#define	OP_GET_ADDRESS_MODE     0x0B /* size 1 */
+#define	OP_GET_BLUE_CHANNEL     0x08 /* size 1 */
+#define	OP_GET_DIAGNOSTIC       0x0F /* size 2 */
+#define	OP_GET_DISPLAY_MODE     0x0D /* size 1 */
+#define	OP_GET_GREEN_CHANNEL    0x07 /* size 1 */
+#define	OP_GET_PIXEL_FORMAT     0x0C /* size 1 */
+#define	OP_GET_POWER_MODE       0x0A /* size 1 */
+#define	OP_GET_RED_CHANNEL      0x06 /* size 1 */
+#define	OP_GET_SCANLINE         0x45 /* size 1 */
+#define	OP_GET_SIGNAL_MODE      0x0E /* size 1 */
+#define	OP_NOP                  0x00
+#define	OP_READ_DDB_CONTINUE    0xA8 /* size not fixed */
+#define	OP_READ_DDB_START       0xA1 /* size not fixed */
+#define	OP_READ_MEMORY_CONTINUE 0x3E /* size not fixed */
+#define	OP_READ_MEMORY_START    0x2E /* size not fixed */
+#define	OP_SET_ADDRESS_MODE     0x36 /* size 1 */
+#define	OP_SET_COLUMN_ADDRESS   0x2A /* size 4 */
+#define	OP_SET_DISPLAY_OFF      0x28
+#define	OP_SET_DISPLAY_ON       0x29
+#define	OP_SET_GAMMA_CURVE      0x26 /* size 1 */
+#define	OP_SET_PAGE_ADDRESS     0x2B /* size 4 */
+#define	OP_SET_PARTIAL_COLUMNS  0x31 /* size 4 */
+#define	OP_SET_PARTIAL_ROWS     0x30 /* size 4 */
+#define	OP_SET_PIXEL_FORMAT     0x3A /* size 1 */
+#define	OP_SOFT_RESET           0x01
+#define	OP_WRITE_MEMORY_CONTINUE  0x3C /* size not fixed */
+#define	OP_WRITE_MEMORY_START   0x2C /* size not fixed */
+
+/**
+ * ILI9341 commands
+ */
+#define OP_ILI9341_INTERFACE_CONTROL	0xf6
+#define OP_ILI9341_TEARING_EFFECT_LINE_ON	0x35
+
+struct qpic_pinctrl_res {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *gpio_state_active;
+	struct pinctrl_state *gpio_state_suspend;
+};
+
+struct qpic_panel_io_desc {
+	int rst_gpio;
+	int cs_gpio;
+	int ad8_gpio;
+	int te_gpio;
+	int bl_gpio;
+	struct regulator *vdd_vreg;
+	struct regulator *avdd_vreg;
+	u32 init;
+	struct qpic_pinctrl_res pin_res;
+};
+
+int mdss_qpic_panel_io_init(struct platform_device *pdev,
+	struct qpic_panel_io_desc *qpic_panel_io);
+u32 qpic_panel_get_cmd(u32 command, u32 size);
+int ili9341_on(struct qpic_panel_io_desc *qpic_panel_io);
+void ili9341_off(struct qpic_panel_io_desc *qpic_panel_io);
+
+#endif /* MDSS_QPIC_PANEL_H */
diff --git a/drivers/video/fbdev/msm/mdss_rotator.c b/drivers/video/fbdev/msm/mdss_rotator.c
new file mode 100644
index 0000000..73676d0
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_rotator.c
@@ -0,0 +1,3045 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/sync.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/regulator/consumer.h>
+
+#include "mdss_rotator_internal.h"
+#include "mdss_mdp.h"
+#include "mdss_debug.h"
+
+/* waiting for hw time out, 3 vsync for 30fps*/
+#define ROT_HW_ACQUIRE_TIMEOUT_IN_MS 100
+
+/* acquire fence time out, following other driver fence time out practice */
+#define ROT_FENCE_WAIT_TIMEOUT MSEC_PER_SEC
+/*
+ * Max rotator hw blocks possible. Used for upper array limits instead of
+ * alloc and freeing small array
+ */
+#define ROT_MAX_HW_BLOCKS 2
+
+#define ROT_CHECK_BOUNDS(offset, size, max_size) \
+	(((size) > (max_size)) || ((offset) > ((max_size) - (size))))
+
+#define CLASS_NAME "rotator"
+#define DRIVER_NAME "mdss_rotator"
+
+#define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val)	\
+	{						\
+		.src = MSM_BUS_MASTER_AMPSS_M0,		\
+		.dst = MSM_BUS_SLAVE_DISPLAY_CFG,	\
+		.ab = (ab_val),				\
+		.ib = (ib_val),				\
+	}
+
+#define BUS_VOTE_19_MHZ 153600000
+
+static struct msm_bus_vectors rot_reg_bus_vectors[] = {
+	MDP_REG_BUS_VECTOR_ENTRY(0, 0),
+	MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ),
+};
+static struct msm_bus_paths rot_reg_bus_usecases[ARRAY_SIZE(
+		rot_reg_bus_vectors)];
+static struct msm_bus_scale_pdata rot_reg_bus_scale_table = {
+	.usecase = rot_reg_bus_usecases,
+	.num_usecases = ARRAY_SIZE(rot_reg_bus_usecases),
+	.name = "mdss_rot_reg",
+	.active_only = 1,
+};
+
+static struct mdss_rot_mgr *rot_mgr;
+static void mdss_rotator_wq_handler(struct work_struct *work);
+
+static int mdss_rotator_bus_scale_set_quota(struct mdss_rot_bus_data_type *bus,
+		u64 quota)
+{
+	int new_uc_idx;
+	int ret;
+
+	if (bus->bus_hdl < 1) {
+		pr_err("invalid bus handle %d\n", bus->bus_hdl);
+		return -EINVAL;
+	}
+
+	if (bus->curr_quota_val == quota) {
+		pr_debug("bw request already requested\n");
+		return 0;
+	}
+
+	if (!quota) {
+		new_uc_idx = 0;
+	} else {
+		struct msm_bus_vectors *vect = NULL;
+		struct msm_bus_scale_pdata *bw_table =
+			bus->bus_scale_pdata;
+		u64 port_quota = quota;
+		u32 total_axi_port_cnt;
+		int i;
+
+		new_uc_idx = (bus->curr_bw_uc_idx %
+			(bw_table->num_usecases - 1)) + 1;
+
+		total_axi_port_cnt = bw_table->usecase[new_uc_idx].num_paths;
+		if (total_axi_port_cnt == 0) {
+			pr_err("Number of bw paths is 0\n");
+			return -ENODEV;
+		}
+		do_div(port_quota, total_axi_port_cnt);
+
+		for (i = 0; i < total_axi_port_cnt; i++) {
+			vect = &bw_table->usecase[new_uc_idx].vectors[i];
+			vect->ab = port_quota;
+			vect->ib = 0;
+		}
+	}
+	bus->curr_bw_uc_idx = new_uc_idx;
+	bus->curr_quota_val = quota;
+
+	pr_debug("uc_idx=%d quota=%llu\n", new_uc_idx, quota);
+	MDSS_XLOG(new_uc_idx, ((quota >> 32) & 0xFFFFFFFF),
+		(quota & 0xFFFFFFFF));
+	ATRACE_BEGIN("msm_bus_scale_req_rot");
+	ret = msm_bus_scale_client_update_request(bus->bus_hdl,
+		new_uc_idx);
+	ATRACE_END("msm_bus_scale_req_rot");
+	return ret;
+}
+
+static int mdss_rotator_enable_reg_bus(struct mdss_rot_mgr *mgr, u64 quota)
+{
+	int ret = 0, changed = 0;
+	u32 usecase_ndx = 0;
+
+	if (!mgr || !mgr->reg_bus.bus_hdl)
+		return 0;
+
+	if (quota)
+		usecase_ndx = 1;
+
+	if (usecase_ndx != mgr->reg_bus.curr_bw_uc_idx) {
+		mgr->reg_bus.curr_bw_uc_idx = usecase_ndx;
+		changed++;
+	}
+
+	pr_debug("%s, changed=%d register bus %s\n", __func__, changed,
+		quota ? "Enable":"Disable");
+
+	if (changed) {
+		ATRACE_BEGIN("msm_bus_scale_req_rot_reg");
+		ret = msm_bus_scale_client_update_request(mgr->reg_bus.bus_hdl,
+			usecase_ndx);
+		ATRACE_END("msm_bus_scale_req_rot_reg");
+	}
+
+	return ret;
+}
+
+/*
+ * Clock rate of all open sessions working a particular hw block
+ * are added together to get the required rate for that hw block.
+ * The max of each hw block becomes the final clock rate voted for
+ */
+static unsigned long mdss_rotator_clk_rate_calc(
+	struct mdss_rot_mgr *mgr,
+	struct mdss_rot_file_private *private)
+{
+	struct mdss_rot_perf *perf;
+	unsigned long clk_rate[ROT_MAX_HW_BLOCKS] = {0};
+	unsigned long total_clk_rate = 0;
+	int i, wb_idx;
+
+	mutex_lock(&private->perf_lock);
+	list_for_each_entry(perf, &private->perf_list, list) {
+		bool rate_accounted_for = false;
+
+		mutex_lock(&perf->work_dis_lock);
+		/*
+		 * If there is one session that has two work items across
+		 * different hw blocks rate is accounted for in both blocks.
+		 */
+		for (i = 0; i < mgr->queue_count; i++) {
+			if (perf->work_distribution[i]) {
+				clk_rate[i] += perf->clk_rate;
+				rate_accounted_for = true;
+			}
+		}
+
+		/*
+		 * Sessions that are open but not distributed on any hw block
+		 * Still need to be accounted for. Rate is added to last known
+		 * wb idx.
+		 */
+		wb_idx = perf->last_wb_idx;
+		if ((!rate_accounted_for) && (wb_idx >= 0) &&
+				(wb_idx < mgr->queue_count))
+			clk_rate[wb_idx] += perf->clk_rate;
+		mutex_unlock(&perf->work_dis_lock);
+	}
+	mutex_unlock(&private->perf_lock);
+
+	for (i = 0; i < mgr->queue_count; i++)
+		total_clk_rate = max(clk_rate[i], total_clk_rate);
+
+	pr_debug("Total clk rate calc=%lu\n", total_clk_rate);
+	return total_clk_rate;
+}
+
+static struct clk *mdss_rotator_get_clk(struct mdss_rot_mgr *mgr, u32 clk_idx)
+{
+	if (clk_idx >= MDSS_CLK_ROTATOR_END_IDX) {
+		pr_err("Invalid clk index:%u", clk_idx);
+		return NULL;
+	}
+
+	return mgr->rot_clk[clk_idx];
+}
+
+static void mdss_rotator_set_clk_rate(struct mdss_rot_mgr *mgr,
+		unsigned long rate, u32 clk_idx)
+{
+	unsigned long clk_rate;
+	struct clk *clk = mdss_rotator_get_clk(mgr, clk_idx);
+	int ret;
+
+	if (clk) {
+		mutex_lock(&mgr->clk_lock);
+		clk_rate = clk_round_rate(clk, rate);
+		if (IS_ERR_VALUE(clk_rate)) {
+			pr_err("unable to round rate err=%ld\n", clk_rate);
+		} else if (clk_rate != clk_get_rate(clk)) {
+			ret = clk_set_rate(clk, clk_rate);
+			if (IS_ERR_VALUE(ret)) {
+				pr_err("clk_set_rate failed, err:%d\n", ret);
+			} else {
+				pr_debug("rotator clk rate=%lu\n", clk_rate);
+				MDSS_XLOG(clk_rate);
+			}
+		}
+		mutex_unlock(&mgr->clk_lock);
+	} else {
+		pr_err("rotator clk not setup properly\n");
+	}
+}
+
+static void mdss_rotator_footswitch_ctrl(struct mdss_rot_mgr *mgr, bool on)
+{
+	int ret;
+
+	if (mgr->regulator_enable == on) {
+		pr_err("Regulators already in selected mode on=%d\n", on);
+		return;
+	}
+
+	pr_debug("%s: rotator regulators", on ? "Enable" : "Disable");
+	ret = msm_dss_enable_vreg(mgr->module_power.vreg_config,
+		mgr->module_power.num_vreg, on);
+	if (ret) {
+		pr_warn("Rotator regulator failed to %s\n",
+			on ? "enable" : "disable");
+		return;
+	}
+
+	mgr->regulator_enable = on;
+}
+
+static int mdss_rotator_clk_ctrl(struct mdss_rot_mgr *mgr, int enable)
+{
+	struct clk *clk;
+	int ret = 0;
+	int i, changed = 0;
+
+	mutex_lock(&mgr->clk_lock);
+	if (enable) {
+		if (mgr->rot_enable_clk_cnt == 0)
+			changed++;
+		mgr->rot_enable_clk_cnt++;
+	} else {
+		if (mgr->rot_enable_clk_cnt) {
+			mgr->rot_enable_clk_cnt--;
+			if (mgr->rot_enable_clk_cnt == 0)
+				changed++;
+		} else {
+			pr_err("Can not be turned off\n");
+		}
+	}
+
+	if (changed) {
+		pr_debug("Rotator clk %s\n", enable ? "enable" : "disable");
+		for (i = 0; i < MDSS_CLK_ROTATOR_END_IDX; i++) {
+			clk = mgr->rot_clk[i];
+			if (enable) {
+				ret = clk_prepare_enable(clk);
+				if (ret) {
+					pr_err("enable failed clk_idx %d\n", i);
+					goto error;
+				}
+			} else {
+				clk_disable_unprepare(clk);
+			}
+		}
+		mutex_lock(&mgr->bus_lock);
+		if (enable) {
+			/* Active+Sleep */
+			msm_bus_scale_client_update_context(
+				mgr->data_bus.bus_hdl, false,
+				mgr->data_bus.curr_bw_uc_idx);
+			trace_rotator_bw_ao_as_context(0);
+		} else {
+			/* Active Only */
+			msm_bus_scale_client_update_context(
+				mgr->data_bus.bus_hdl, true,
+				mgr->data_bus.curr_bw_uc_idx);
+			trace_rotator_bw_ao_as_context(1);
+		}
+		mutex_unlock(&mgr->bus_lock);
+	}
+	mutex_unlock(&mgr->clk_lock);
+
+	return ret;
+error:
+	for (i--; i >= 0; i--)
+		clk_disable_unprepare(mgr->rot_clk[i]);
+	mutex_unlock(&mgr->clk_lock);
+	return ret;
+}
+
+int mdss_rotator_resource_ctrl(struct mdss_rot_mgr *mgr, int enable)
+{
+	int changed = 0;
+	int ret = 0;
+
+	mutex_lock(&mgr->clk_lock);
+	if (enable) {
+		if (mgr->res_ref_cnt == 0)
+			changed++;
+		mgr->res_ref_cnt++;
+	} else {
+		if (mgr->res_ref_cnt) {
+			mgr->res_ref_cnt--;
+			if (mgr->res_ref_cnt == 0)
+				changed++;
+		} else {
+			pr_err("Rot resource already off\n");
+		}
+	}
+
+	pr_debug("%s: res_cnt=%d changed=%d enable=%d\n",
+		__func__, mgr->res_ref_cnt, changed, enable);
+	MDSS_XLOG(mgr->res_ref_cnt, changed, enable);
+
+	if (changed) {
+		if (enable)
+			mdss_rotator_footswitch_ctrl(mgr, true);
+		else
+			mdss_rotator_footswitch_ctrl(mgr, false);
+	}
+	mutex_unlock(&mgr->clk_lock);
+	return ret;
+}
+
+/* caller is expected to hold perf->work_dis_lock lock */
+static bool mdss_rotator_is_work_pending(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_perf *perf)
+{
+	int i;
+
+	for (i = 0; i < mgr->queue_count; i++) {
+		if (perf->work_distribution[i]) {
+			pr_debug("Work is still scheduled to complete\n");
+			return true;
+		}
+	}
+	return false;
+}
+
+static void mdss_rotator_install_fence_fd(struct mdss_rot_entry_container *req)
+{
+	int i = 0;
+
+	for (i = 0; i < req->count; i++)
+		sync_fence_install(req->entries[i].output_fence,
+				req->entries[i].output_fence_fd);
+}
+
+static int mdss_rotator_create_fence(struct mdss_rot_entry *entry)
+{
+	int ret = 0, fd;
+	u32 val;
+	struct sync_pt *sync_pt;
+	struct sync_fence *fence;
+	struct mdss_rot_timeline *rot_timeline;
+
+	if (!entry->queue)
+		return -EINVAL;
+
+	rot_timeline = &entry->queue->timeline;
+
+	mutex_lock(&rot_timeline->lock);
+	val = rot_timeline->next_value + 1;
+
+	sync_pt = sw_sync_pt_create(rot_timeline->timeline, val);
+	if (sync_pt == NULL) {
+		pr_err("cannot create sync point\n");
+		goto sync_pt_create_err;
+	}
+
+	/* create fence */
+	fence = sync_fence_create(rot_timeline->fence_name, sync_pt);
+	if (fence == NULL) {
+		pr_err("%s: cannot create fence\n", rot_timeline->fence_name);
+		sync_pt_free(sync_pt);
+		ret = -ENOMEM;
+		goto sync_pt_create_err;
+	}
+
+	fd = get_unused_fd_flags(0);
+	if (fd < 0) {
+		pr_err("get_unused_fd_flags failed error:0x%x\n", fd);
+		ret = fd;
+		goto get_fd_err;
+	}
+
+	rot_timeline->next_value++;
+	mutex_unlock(&rot_timeline->lock);
+
+	entry->output_fence_fd = fd;
+	entry->output_fence = fence;
+	pr_debug("output sync point created at val=%u\n", val);
+
+	return 0;
+
+get_fd_err:
+	sync_fence_put(fence);
+sync_pt_create_err:
+	mutex_unlock(&rot_timeline->lock);
+	return ret;
+}
+
+static void mdss_rotator_clear_fence(struct mdss_rot_entry *entry)
+{
+	struct mdss_rot_timeline *rot_timeline;
+
+	if (entry->input_fence) {
+		sync_fence_put(entry->input_fence);
+		entry->input_fence = NULL;
+	}
+
+	rot_timeline = &entry->queue->timeline;
+
+	/* fence failed to copy to user space */
+	if (entry->output_fence) {
+		sync_fence_put(entry->output_fence);
+		entry->output_fence = NULL;
+		put_unused_fd(entry->output_fence_fd);
+
+		mutex_lock(&rot_timeline->lock);
+		rot_timeline->next_value--;
+		mutex_unlock(&rot_timeline->lock);
+	}
+}
+
+static int mdss_rotator_signal_output(struct mdss_rot_entry *entry)
+{
+	struct mdss_rot_timeline *rot_timeline;
+
+	if (!entry->queue)
+		return -EINVAL;
+
+	rot_timeline = &entry->queue->timeline;
+
+	if (entry->output_signaled) {
+		pr_debug("output already signaled\n");
+		return 0;
+	}
+
+	mutex_lock(&rot_timeline->lock);
+	sw_sync_timeline_inc(rot_timeline->timeline, 1);
+	mutex_unlock(&rot_timeline->lock);
+
+	entry->output_signaled = true;
+
+	return 0;
+}
+
+static int mdss_rotator_wait_for_input(struct mdss_rot_entry *entry)
+{
+	int ret;
+
+	if (!entry->input_fence) {
+		pr_debug("invalid input fence, no wait\n");
+		return 0;
+	}
+
+	ret = sync_fence_wait(entry->input_fence, ROT_FENCE_WAIT_TIMEOUT);
+	sync_fence_put(entry->input_fence);
+	entry->input_fence = NULL;
+	return ret;
+}
+
+static int mdss_rotator_import_buffer(struct mdp_layer_buffer *buffer,
+	struct mdss_mdp_data *data, u32 flags, struct device *dev, bool input)
+{
+	int i, ret = 0;
+	struct msmfb_data planes[MAX_PLANES];
+	int dir = DMA_TO_DEVICE;
+
+	if (!input)
+		dir = DMA_FROM_DEVICE;
+
+	memset(planes, 0, sizeof(planes));
+
+	if (buffer->plane_count > MAX_PLANES) {
+		pr_err("buffer plane_count exceeds MAX_PLANES limit:%d\n",
+				buffer->plane_count);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < buffer->plane_count; i++) {
+		planes[i].memory_id = buffer->planes[i].fd;
+		planes[i].offset = buffer->planes[i].offset;
+	}
+
+	ret =  mdss_mdp_data_get_and_validate_size(data, planes,
+			buffer->plane_count, flags, dev, true, dir, buffer);
+	data->state = MDP_BUF_STATE_READY;
+	data->last_alloc = local_clock();
+
+	return ret;
+}
+
+static int mdss_rotator_map_and_check_data(struct mdss_rot_entry *entry)
+{
+	int ret;
+	struct mdp_layer_buffer *input;
+	struct mdp_layer_buffer *output;
+	struct mdss_mdp_format_params *fmt;
+	struct mdss_mdp_plane_sizes ps;
+	bool rotation;
+
+	input = &entry->item.input;
+	output = &entry->item.output;
+
+	rotation = (entry->item.flags &  MDP_ROTATION_90) ? true : false;
+
+	ATRACE_BEGIN(__func__);
+	ret = mdss_iommu_ctrl(1);
+	if (IS_ERR_VALUE(ret)) {
+		ATRACE_END(__func__);
+		return ret;
+	}
+
+	/* if error during map, the caller will release the data */
+	entry->src_buf.state = MDP_BUF_STATE_ACTIVE;
+	ret = mdss_mdp_data_map(&entry->src_buf, true, DMA_TO_DEVICE);
+	if (ret) {
+		pr_err("source buffer mapping failed ret:%d\n", ret);
+		goto end;
+	}
+
+	entry->dst_buf.state = MDP_BUF_STATE_ACTIVE;
+	ret = mdss_mdp_data_map(&entry->dst_buf, true, DMA_FROM_DEVICE);
+	if (ret) {
+		pr_err("destination buffer mapping failed ret:%d\n", ret);
+		goto end;
+	}
+
+	fmt = mdss_mdp_get_format_params(input->format);
+	if (!fmt) {
+		pr_err("invalid input format:%d\n", input->format);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ret = mdss_mdp_get_plane_sizes(
+			fmt, input->width, input->height, &ps, 0, rotation);
+	if (ret) {
+		pr_err("fail to get input plane size ret=%d\n", ret);
+		goto end;
+	}
+
+	ret = mdss_mdp_data_check(&entry->src_buf, &ps, fmt);
+	if (ret) {
+		pr_err("fail to check input data ret=%d\n", ret);
+		goto end;
+	}
+
+	fmt = mdss_mdp_get_format_params(output->format);
+	if (!fmt) {
+		pr_err("invalid output format:%d\n", output->format);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ret = mdss_mdp_get_plane_sizes(
+			fmt, output->width, output->height, &ps, 0, rotation);
+	if (ret) {
+		pr_err("fail to get output plane size ret=%d\n", ret);
+		goto end;
+	}
+
+	ret = mdss_mdp_data_check(&entry->dst_buf, &ps, fmt);
+	if (ret) {
+		pr_err("fail to check output data ret=%d\n", ret);
+		goto end;
+	}
+
+end:
+	mdss_iommu_ctrl(0);
+	ATRACE_END(__func__);
+
+	return ret;
+}
+
+static struct mdss_rot_perf *__mdss_rotator_find_session(
+	struct mdss_rot_file_private *private,
+	u32 session_id)
+{
+	struct mdss_rot_perf *perf, *perf_next;
+	bool found = false;
+
+	list_for_each_entry_safe(perf, perf_next, &private->perf_list, list) {
+		if (perf->config.session_id == session_id) {
+			found = true;
+			break;
+		}
+	}
+	if (!found)
+		perf = NULL;
+	return perf;
+}
+
+static struct mdss_rot_perf *mdss_rotator_find_session(
+	struct mdss_rot_file_private *private,
+	u32 session_id)
+{
+	struct mdss_rot_perf *perf;
+
+	mutex_lock(&private->perf_lock);
+	perf = __mdss_rotator_find_session(private, session_id);
+	mutex_unlock(&private->perf_lock);
+	return perf;
+}
+
+static void mdss_rotator_release_data(struct mdss_rot_entry *entry)
+{
+	struct mdss_mdp_data *src_buf = &entry->src_buf;
+	struct mdss_mdp_data *dst_buf = &entry->dst_buf;
+
+	mdss_mdp_data_free(src_buf, true, DMA_TO_DEVICE);
+	src_buf->last_freed = local_clock();
+	src_buf->state = MDP_BUF_STATE_UNUSED;
+
+	mdss_mdp_data_free(dst_buf, true, DMA_FROM_DEVICE);
+	dst_buf->last_freed = local_clock();
+	dst_buf->state = MDP_BUF_STATE_UNUSED;
+}
+
+static int mdss_rotator_import_data(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_entry *entry)
+{
+	int ret;
+	struct mdp_layer_buffer *input;
+	struct mdp_layer_buffer *output;
+	u32 flag = 0;
+
+	input = &entry->item.input;
+	output = &entry->item.output;
+
+	if (entry->item.flags & MDP_ROTATION_SECURE)
+		flag = MDP_SECURE_OVERLAY_SESSION;
+
+	ret = mdss_rotator_import_buffer(input, &entry->src_buf, flag,
+				&mgr->pdev->dev, true);
+	if (ret) {
+		pr_err("fail to import input buffer\n");
+		return ret;
+	}
+
+	/*
+	 * driver assumes output buffer is ready to be written
+	 * immediately
+	 */
+	ret = mdss_rotator_import_buffer(output, &entry->dst_buf, flag,
+				&mgr->pdev->dev, false);
+	if (ret) {
+		pr_err("fail to import output buffer\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct mdss_rot_hw_resource *mdss_rotator_hw_alloc(
+	struct mdss_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
+{
+	struct mdss_rot_hw_resource *hw;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	u32 pipe_ndx, offset = mdss_mdp_get_wb_ctl_support(mdata, true);
+	int ret;
+
+	hw = devm_kzalloc(&mgr->pdev->dev, sizeof(struct mdss_rot_hw_resource),
+		GFP_KERNEL);
+	if (!hw)
+		return ERR_PTR(-ENOMEM);
+
+	hw->ctl = mdss_mdp_ctl_alloc(mdata, offset);
+	if (IS_ERR_OR_NULL(hw->ctl)) {
+		pr_err("unable to allocate ctl\n");
+		ret = -ENODEV;
+		goto error;
+	}
+
+	if (wb_id == MDSS_ROTATION_HW_ANY)
+		hw->wb = mdss_mdp_wb_alloc(MDSS_MDP_WB_ROTATOR, hw->ctl->num);
+	else
+		hw->wb = mdss_mdp_wb_assign(wb_id, hw->ctl->num);
+
+	if (IS_ERR_OR_NULL(hw->wb)) {
+		pr_err("unable to allocate wb\n");
+		ret = -ENODEV;
+		goto error;
+	}
+	hw->ctl->wb = hw->wb;
+	hw->mixer = mdss_mdp_mixer_assign(hw->wb->num, true, true);
+
+	if (IS_ERR_OR_NULL(hw->mixer)) {
+		pr_err("unable to allocate wb mixer\n");
+		ret = -ENODEV;
+		goto error;
+	}
+	hw->ctl->mixer_left = hw->mixer;
+	hw->mixer->ctl = hw->ctl;
+
+	hw->mixer->rotator_mode = true;
+
+	switch (hw->mixer->num) {
+	case MDSS_MDP_WB_LAYERMIXER0:
+		hw->ctl->opmode = MDSS_MDP_CTL_OP_ROT0_MODE;
+		break;
+	case MDSS_MDP_WB_LAYERMIXER1:
+		hw->ctl->opmode =  MDSS_MDP_CTL_OP_ROT1_MODE;
+		break;
+	default:
+		pr_err("invalid layer mixer=%d\n", hw->mixer->num);
+		ret = -EINVAL;
+		goto error;
+	}
+
+	hw->ctl->ops.start_fnc = mdss_mdp_writeback_start;
+	hw->ctl->power_state = MDSS_PANEL_POWER_ON;
+	hw->ctl->wb_type = MDSS_MDP_WB_CTL_TYPE_BLOCK;
+
+
+	if (hw->ctl->ops.start_fnc)
+		ret = hw->ctl->ops.start_fnc(hw->ctl);
+
+	if (ret)
+		goto error;
+
+	if (pipe_id >= mdata->ndma_pipes)
+		goto error;
+
+	pipe_ndx = mdata->dma_pipes[pipe_id].ndx;
+	hw->pipe = mdss_mdp_pipe_assign(mdata, hw->mixer,
+			pipe_ndx, MDSS_MDP_PIPE_RECT0);
+	if (IS_ERR_OR_NULL(hw->pipe)) {
+		pr_err("dma pipe allocation failed\n");
+		ret = -ENODEV;
+		goto error;
+	}
+
+	hw->pipe->mixer_left = hw->mixer;
+	hw->pipe_id = hw->wb->num;
+	hw->wb_id = hw->wb->num;
+
+	return hw;
+error:
+	if (!IS_ERR_OR_NULL(hw->pipe))
+		mdss_mdp_pipe_destroy(hw->pipe);
+	if (!IS_ERR_OR_NULL(hw->ctl)) {
+		if (hw->ctl->ops.stop_fnc)
+			hw->ctl->ops.stop_fnc(hw->ctl, MDSS_PANEL_POWER_OFF);
+		mdss_mdp_ctl_free(hw->ctl);
+	}
+	devm_kfree(&mgr->pdev->dev, hw);
+
+	return ERR_PTR(ret);
+}
+
+static void mdss_rotator_free_hw(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_hw_resource *hw)
+{
+	struct mdss_mdp_mixer *mixer;
+	struct mdss_mdp_ctl *ctl;
+
+	mixer = hw->pipe->mixer_left;
+
+	mdss_mdp_pipe_destroy(hw->pipe);
+
+	ctl = mdss_mdp_ctl_mixer_switch(mixer->ctl,
+		MDSS_MDP_WB_CTL_TYPE_BLOCK);
+	if (ctl) {
+		if (ctl->ops.stop_fnc)
+			ctl->ops.stop_fnc(ctl, MDSS_PANEL_POWER_OFF);
+		mdss_mdp_ctl_free(ctl);
+	}
+
+	devm_kfree(&mgr->pdev->dev, hw);
+}
+
+struct mdss_rot_hw_resource *mdss_rotator_get_hw_resource(
+	struct mdss_rot_queue *queue, struct mdss_rot_entry *entry)
+{
+	struct mdss_rot_hw_resource *hw = queue->hw;
+
+	if (!hw) {
+		pr_err("no hw in the queue\n");
+		return NULL;
+	}
+
+	mutex_lock(&queue->hw_lock);
+
+	if (hw->workload) {
+		hw = ERR_PTR(-EBUSY);
+		goto get_hw_resource_err;
+	}
+	hw->workload = entry;
+
+get_hw_resource_err:
+	mutex_unlock(&queue->hw_lock);
+	return hw;
+}
+
+static void mdss_rotator_put_hw_resource(struct mdss_rot_queue *queue,
+	struct mdss_rot_hw_resource *hw)
+{
+	mutex_lock(&queue->hw_lock);
+	hw->workload = NULL;
+	mutex_unlock(&queue->hw_lock);
+}
+
+/*
+ * caller will need to call mdss_rotator_deinit_queue when
+ * the function returns error
+ */
+static int mdss_rotator_init_queue(struct mdss_rot_mgr *mgr)
+{
+	int i, size, ret = 0;
+	char name[32];
+
+	size = sizeof(struct mdss_rot_queue) * mgr->queue_count;
+	mgr->queues = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
+	if (!mgr->queues)
+		return -ENOMEM;
+
+	for (i = 0; i < mgr->queue_count; i++) {
+		snprintf(name, sizeof(name), "rot_workq_%d", i);
+		pr_debug("work queue name=%s\n", name);
+		mgr->queues[i].rot_work_queue = alloc_ordered_workqueue("%s",
+				WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, name);
+		if (!mgr->queues[i].rot_work_queue) {
+			ret = -EPERM;
+			break;
+		}
+
+		snprintf(name, sizeof(name), "rot_timeline_%d", i);
+		pr_debug("timeline name=%s\n", name);
+		mgr->queues[i].timeline.timeline =
+			sw_sync_timeline_create(name);
+		if (!mgr->queues[i].timeline.timeline) {
+			ret = -EPERM;
+			break;
+		}
+
+		size = sizeof(mgr->queues[i].timeline.fence_name);
+		snprintf(mgr->queues[i].timeline.fence_name, size,
+				"rot_fence_%d", i);
+		mutex_init(&mgr->queues[i].timeline.lock);
+
+		mutex_init(&mgr->queues[i].hw_lock);
+	}
+
+	return ret;
+}
+
+static void mdss_rotator_deinit_queue(struct mdss_rot_mgr *mgr)
+{
+	int i;
+
+	if (!mgr->queues)
+		return;
+
+	for (i = 0; i < mgr->queue_count; i++) {
+		if (mgr->queues[i].rot_work_queue)
+			destroy_workqueue(mgr->queues[i].rot_work_queue);
+
+		if (mgr->queues[i].timeline.timeline) {
+			struct sync_timeline *obj;
+
+			obj = (struct sync_timeline *)
+				mgr->queues[i].timeline.timeline;
+			sync_timeline_destroy(obj);
+		}
+	}
+	devm_kfree(&mgr->pdev->dev, mgr->queues);
+	mgr->queue_count = 0;
+}
+
+/*
+ * mdss_rotator_assign_queue() - Function assign rotation work onto hw
+ * @mgr:	Rotator manager.
+ * @entry:	Contains details on rotator work item being requested
+ * @private:	Private struct used for access rot session performance struct
+ *
+ * This Function allocates hw required to complete rotation work item
+ * requested.
+ *
+ * Caller is responsible for calling cleanup function if error is returned
+ */
+static int mdss_rotator_assign_queue(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_entry *entry,
+	struct mdss_rot_file_private *private)
+{
+	struct mdss_rot_perf *perf;
+	struct mdss_rot_queue *queue;
+	struct mdss_rot_hw_resource *hw;
+	struct mdp_rotation_item *item = &entry->item;
+	u32 wb_idx = item->wb_idx;
+	u32 pipe_idx = item->pipe_idx;
+	int ret = 0;
+
+	/*
+	 * todo: instead of always assign writeback block 0, we can
+	 * apply some load balancing logic in the future
+	 */
+	if (wb_idx == MDSS_ROTATION_HW_ANY) {
+		wb_idx = 0;
+		pipe_idx = 0;
+	}
+
+	if (wb_idx >= mgr->queue_count) {
+		pr_err("Invalid wb idx = %d\n", wb_idx);
+		return -EINVAL;
+	}
+
+	queue = mgr->queues + wb_idx;
+
+	mutex_lock(&queue->hw_lock);
+
+	if (!queue->hw) {
+		hw = mdss_rotator_hw_alloc(mgr, pipe_idx, wb_idx);
+		if (IS_ERR_OR_NULL(hw)) {
+			pr_err("fail to allocate hw\n");
+			ret = PTR_ERR(hw);
+		} else {
+			queue->hw = hw;
+		}
+	}
+
+	if (queue->hw) {
+		entry->queue = queue;
+		queue->hw->pending_count++;
+	}
+
+	mutex_unlock(&queue->hw_lock);
+
+	perf = mdss_rotator_find_session(private, item->session_id);
+	if (!perf) {
+		pr_err("Could not find session based on rotation work item\n");
+		return -EINVAL;
+	}
+
+	entry->perf = perf;
+	perf->last_wb_idx = wb_idx;
+
+	return ret;
+}
+
+static void mdss_rotator_unassign_queue(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_entry *entry)
+{
+	struct mdss_rot_queue *queue = entry->queue;
+
+	if (!queue)
+		return;
+
+	entry->queue = NULL;
+
+	mutex_lock(&queue->hw_lock);
+
+	if (!queue->hw) {
+		pr_err("entry assigned a queue with no hw\n");
+		mutex_unlock(&queue->hw_lock);
+		return;
+	}
+
+	queue->hw->pending_count--;
+	if (queue->hw->pending_count == 0) {
+		mdss_rotator_free_hw(mgr, queue->hw);
+		queue->hw = NULL;
+	}
+
+	mutex_unlock(&queue->hw_lock);
+}
+
+static void mdss_rotator_queue_request(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_file_private *private,
+	struct mdss_rot_entry_container *req)
+{
+	struct mdss_rot_entry *entry;
+	struct mdss_rot_queue *queue;
+	unsigned long clk_rate;
+	u32 wb_idx;
+	int i;
+
+	for (i = 0; i < req->count; i++) {
+		entry = req->entries + i;
+		queue = entry->queue;
+		wb_idx = queue->hw->wb_id;
+		mutex_lock(&entry->perf->work_dis_lock);
+		entry->perf->work_distribution[wb_idx]++;
+		mutex_unlock(&entry->perf->work_dis_lock);
+		entry->work_assigned = true;
+	}
+
+	clk_rate = mdss_rotator_clk_rate_calc(mgr, private);
+	mdss_rotator_set_clk_rate(mgr, clk_rate, MDSS_CLK_ROTATOR_CORE);
+
+	for (i = 0; i < req->count; i++) {
+		entry = req->entries + i;
+		queue = entry->queue;
+		entry->output_fence = NULL;
+		queue_work(queue->rot_work_queue, &entry->commit_work);
+	}
+}
+
+static int mdss_rotator_calc_perf(struct mdss_rot_perf *perf)
+{
+	struct mdp_rotation_config *config = &perf->config;
+	u32 read_bw, write_bw;
+	struct mdss_mdp_format_params *in_fmt, *out_fmt;
+
+	in_fmt = mdss_mdp_get_format_params(config->input.format);
+	if (!in_fmt) {
+		pr_err("invalid input format\n");
+		return -EINVAL;
+	}
+	out_fmt = mdss_mdp_get_format_params(config->output.format);
+	if (!out_fmt) {
+		pr_err("invalid output format\n");
+		return -EINVAL;
+	}
+	if (!config->input.width ||
+		(0xffffffff/config->input.width < config->input.height))
+		return -EINVAL;
+
+	perf->clk_rate = config->input.width * config->input.height;
+
+	if (!perf->clk_rate ||
+		(0xffffffff/perf->clk_rate < config->frame_rate))
+		return -EINVAL;
+
+	perf->clk_rate *= config->frame_rate;
+	/* rotator processes 4 pixels per clock */
+	perf->clk_rate /= 4;
+
+	read_bw = config->input.width * config->input.height *
+		config->frame_rate;
+	if (in_fmt->chroma_sample == MDSS_MDP_CHROMA_420)
+		read_bw = (read_bw * 3) / 2;
+	else
+		read_bw *= in_fmt->bpp;
+
+	write_bw = config->output.width * config->output.height *
+		config->frame_rate;
+	if (out_fmt->chroma_sample == MDSS_MDP_CHROMA_420)
+		write_bw = (write_bw * 3) / 2;
+	else
+		write_bw *= out_fmt->bpp;
+
+	read_bw = apply_comp_ratio_factor(read_bw, in_fmt,
+			&config->input.comp_ratio);
+	write_bw = apply_comp_ratio_factor(write_bw, out_fmt,
+			&config->output.comp_ratio);
+
+	perf->bw = read_bw + write_bw;
+	return 0;
+}
+
+static int mdss_rotator_update_perf(struct mdss_rot_mgr *mgr)
+{
+	struct mdss_rot_file_private *priv;
+	struct mdss_rot_perf *perf;
+	int not_in_suspend_mode;
+	u64 total_bw = 0;
+
+	ATRACE_BEGIN(__func__);
+
+	not_in_suspend_mode = !atomic_read(&mgr->device_suspended);
+
+	if (not_in_suspend_mode) {
+		mutex_lock(&mgr->file_lock);
+		list_for_each_entry(priv, &mgr->file_list, list) {
+			mutex_lock(&priv->perf_lock);
+			list_for_each_entry(perf, &priv->perf_list, list) {
+				total_bw += perf->bw;
+			}
+			mutex_unlock(&priv->perf_lock);
+		}
+		mutex_unlock(&mgr->file_lock);
+	}
+
+	mutex_lock(&mgr->bus_lock);
+	total_bw += mgr->pending_close_bw_vote;
+	mdss_rotator_enable_reg_bus(mgr, total_bw);
+	mdss_rotator_bus_scale_set_quota(&mgr->data_bus, total_bw);
+	mutex_unlock(&mgr->bus_lock);
+
+	ATRACE_END(__func__);
+	return 0;
+}
+
+static void mdss_rotator_release_from_work_distribution(
+		struct mdss_rot_mgr *mgr,
+		struct mdss_rot_entry *entry)
+{
+	if (entry->work_assigned) {
+		bool free_perf = false;
+		u32 wb_idx = entry->queue->hw->wb_id;
+
+		mutex_lock(&mgr->lock);
+		mutex_lock(&entry->perf->work_dis_lock);
+		if (entry->perf->work_distribution[wb_idx])
+			entry->perf->work_distribution[wb_idx]--;
+
+		if (!entry->perf->work_distribution[wb_idx]
+				&& list_empty(&entry->perf->list)) {
+			/* close session has offloaded perf free to us */
+			free_perf = true;
+		}
+		mutex_unlock(&entry->perf->work_dis_lock);
+		entry->work_assigned = false;
+		if (free_perf) {
+			mutex_lock(&mgr->bus_lock);
+			mgr->pending_close_bw_vote -= entry->perf->bw;
+			mutex_unlock(&mgr->bus_lock);
+			mdss_rotator_resource_ctrl(mgr, false);
+			devm_kfree(&mgr->pdev->dev,
+				entry->perf->work_distribution);
+			devm_kfree(&mgr->pdev->dev, entry->perf);
+			mdss_rotator_update_perf(mgr);
+			mdss_rotator_clk_ctrl(mgr, false);
+			entry->perf = NULL;
+		}
+		mutex_unlock(&mgr->lock);
+	}
+}
+
+static void mdss_rotator_release_entry(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_entry *entry)
+{
+	mdss_rotator_release_from_work_distribution(mgr, entry);
+	mdss_rotator_clear_fence(entry);
+	mdss_rotator_release_data(entry);
+	mdss_rotator_unassign_queue(mgr, entry);
+}
+
+static int mdss_rotator_config_dnsc_factor(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_entry *entry)
+{
+	int ret = 0;
+	u16 src_w, src_h, dst_w, dst_h, bit;
+	struct mdp_rotation_item *item = &entry->item;
+	struct mdss_mdp_format_params *fmt;
+
+	src_w = item->src_rect.w;
+	src_h = item->src_rect.h;
+
+	if (item->flags & MDP_ROTATION_90) {
+		dst_w = item->dst_rect.h;
+		dst_h = item->dst_rect.w;
+	} else {
+		dst_w = item->dst_rect.w;
+		dst_h = item->dst_rect.h;
+	}
+
+	if (!mgr->has_downscale &&
+		(src_w != dst_w || src_h != dst_h)) {
+		pr_err("rotator downscale not supported\n");
+		ret = -EINVAL;
+		goto dnsc_err;
+	}
+
+	entry->dnsc_factor_w = 0;
+	entry->dnsc_factor_h = 0;
+
+	if ((src_w != dst_w) || (src_h != dst_h)) {
+		if ((src_w % dst_w) || (src_h % dst_h)) {
+			ret = -EINVAL;
+			goto dnsc_err;
+		}
+		entry->dnsc_factor_w = src_w / dst_w;
+		bit = fls(entry->dnsc_factor_w);
+		/*
+		 * New Chipsets supports downscale upto 1/64
+		 * change the Bit check from 5 to 7 to support 1/64 down scale
+		 */
+		if ((entry->dnsc_factor_w & ~BIT(bit - 1)) || (bit > 7)) {
+			ret = -EINVAL;
+			goto dnsc_err;
+		}
+		entry->dnsc_factor_h = src_h / dst_h;
+		bit = fls(entry->dnsc_factor_h);
+		if ((entry->dnsc_factor_h & ~BIT(bit - 1)) || (bit > 7)) {
+			ret = -EINVAL;
+			goto dnsc_err;
+		}
+	}
+
+	fmt =  mdss_mdp_get_format_params(item->output.format);
+	if (mdss_mdp_is_ubwc_format(fmt) &&
+		(entry->dnsc_factor_h || entry->dnsc_factor_w)) {
+		pr_err("ubwc not supported with downscale %d\n",
+			item->output.format);
+		ret = -EINVAL;
+	}
+
+dnsc_err:
+
+	/* Downscaler does not support asymmetrical dnsc */
+	if (entry->dnsc_factor_w != entry->dnsc_factor_h)
+		ret = -EINVAL;
+
+	if (ret) {
+		pr_err("Invalid rotator downscale ratio %dx%d->%dx%d\n",
+			src_w, src_h, dst_w, dst_h);
+		entry->dnsc_factor_w = 0;
+		entry->dnsc_factor_h = 0;
+	}
+	return ret;
+}
+
+static bool mdss_rotator_verify_format(struct mdss_rot_mgr *mgr,
+	struct mdss_mdp_format_params *in_fmt,
+	struct mdss_mdp_format_params *out_fmt, bool rotation)
+{
+	u8 in_v_subsample, in_h_subsample;
+	u8 out_v_subsample, out_h_subsample;
+
+	if (!mgr->has_ubwc && (mdss_mdp_is_ubwc_format(in_fmt) ||
+			mdss_mdp_is_ubwc_format(out_fmt))) {
+		pr_err("Rotator doesn't allow ubwc\n");
+		return -EINVAL;
+	}
+
+	if (!(out_fmt->flag & VALID_ROT_WB_FORMAT)) {
+		pr_err("Invalid output format\n");
+		return false;
+	}
+
+	if (in_fmt->is_yuv != out_fmt->is_yuv) {
+		pr_err("Rotator does not support CSC\n");
+		return false;
+	}
+
+	/* Forcing same pixel depth */
+	if (memcmp(in_fmt->bits, out_fmt->bits, sizeof(in_fmt->bits))) {
+		/* Exception is that RGB can drop alpha or add X */
+		if (in_fmt->is_yuv || out_fmt->alpha_enable ||
+			(in_fmt->bits[C2_R_Cr] != out_fmt->bits[C2_R_Cr]) ||
+			(in_fmt->bits[C0_G_Y] != out_fmt->bits[C0_G_Y]) ||
+			(in_fmt->bits[C1_B_Cb] != out_fmt->bits[C1_B_Cb])) {
+			pr_err("Bit format does not match\n");
+			return false;
+		}
+	}
+
+	/* Need to make sure that sub-sampling persists through rotation */
+	if (rotation) {
+		mdss_mdp_get_v_h_subsample_rate(in_fmt->chroma_sample,
+			&in_v_subsample, &in_h_subsample);
+		mdss_mdp_get_v_h_subsample_rate(out_fmt->chroma_sample,
+			&out_v_subsample, &out_h_subsample);
+
+		if ((in_v_subsample != out_h_subsample) ||
+				(in_h_subsample != out_v_subsample)) {
+			pr_err("Rotation has invalid subsampling\n");
+			return false;
+		}
+	} else {
+		if (in_fmt->chroma_sample != out_fmt->chroma_sample) {
+			pr_err("Format subsampling mismatch\n");
+			return false;
+		}
+	}
+
+	pr_debug("in_fmt=%0d, out_fmt=%d, has_ubwc=%d\n",
+		in_fmt->format, out_fmt->format, mgr->has_ubwc);
+	return true;
+}
+
+static int mdss_rotator_verify_config(struct mdss_rot_mgr *mgr,
+	struct mdp_rotation_config *config)
+{
+	struct mdss_mdp_format_params *in_fmt, *out_fmt;
+	u8 in_v_subsample, in_h_subsample;
+	u8 out_v_subsample, out_h_subsample;
+	u32 input, output;
+	bool rotation;
+
+	input = config->input.format;
+	output = config->output.format;
+	rotation = (config->flags & MDP_ROTATION_90) ? true : false;
+
+	in_fmt = mdss_mdp_get_format_params(input);
+	if (!in_fmt) {
+		pr_err("Unrecognized input format:%u\n", input);
+		return -EINVAL;
+	}
+
+	out_fmt = mdss_mdp_get_format_params(output);
+	if (!out_fmt) {
+		pr_err("Unrecognized output format:%u\n", output);
+		return -EINVAL;
+	}
+
+	mdss_mdp_get_v_h_subsample_rate(in_fmt->chroma_sample,
+		&in_v_subsample, &in_h_subsample);
+	mdss_mdp_get_v_h_subsample_rate(out_fmt->chroma_sample,
+		&out_v_subsample, &out_h_subsample);
+
+	/* Dimension of image needs to be divisible by subsample rate  */
+	if ((config->input.height % in_v_subsample) ||
+			(config->input.width % in_h_subsample)) {
+		pr_err("In ROI, subsample mismatch, w=%d, h=%d, vss%d, hss%d\n",
+			config->input.width, config->input.height,
+			in_v_subsample, in_h_subsample);
+		return -EINVAL;
+	}
+
+	if ((config->output.height % out_v_subsample) ||
+			(config->output.width % out_h_subsample)) {
+		pr_err("Out ROI, subsample mismatch, w=%d, h=%d, vss%d, hss%d\n",
+			config->output.width, config->output.height,
+			out_v_subsample, out_h_subsample);
+		return -EINVAL;
+	}
+
+	if (!mdss_rotator_verify_format(mgr, in_fmt,
+			out_fmt, rotation)) {
+		pr_err("Rot format pairing invalid, in_fmt:%d, out_fmt:%d\n",
+			input, output);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mdss_rotator_validate_item_matches_session(
+	struct mdp_rotation_config *config, struct mdp_rotation_item *item)
+{
+	int ret;
+
+	ret = __compare_session_item_rect(&config->input,
+		&item->src_rect, item->input.format, true);
+	if (ret)
+		return ret;
+
+	ret = __compare_session_item_rect(&config->output,
+		&item->dst_rect, item->output.format, false);
+	if (ret)
+		return ret;
+
+	ret = __compare_session_rotations(config->flags, item->flags);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mdss_rotator_validate_img_roi(struct mdp_rotation_item *item)
+{
+	struct mdss_mdp_format_params *fmt;
+	uint32_t width, height;
+	int ret = 0;
+
+	width = item->input.width;
+	height = item->input.height;
+	if (item->flags & MDP_ROTATION_DEINTERLACE) {
+		width *= 2;
+		height /= 2;
+	}
+
+	/* Check roi bounds */
+	if (ROT_CHECK_BOUNDS(item->src_rect.x, item->src_rect.w, width) ||
+			ROT_CHECK_BOUNDS(item->src_rect.y, item->src_rect.h,
+			height)) {
+		pr_err("invalid src flag=%08x img wh=%dx%d rect=%d,%d,%d,%d\n",
+			item->flags, width, height, item->src_rect.x,
+			item->src_rect.y, item->src_rect.w, item->src_rect.h);
+		return -EINVAL;
+	}
+	if (ROT_CHECK_BOUNDS(item->dst_rect.x, item->dst_rect.w,
+			item->output.width) ||
+			ROT_CHECK_BOUNDS(item->dst_rect.y, item->dst_rect.h,
+			item->output.height)) {
+		pr_err("invalid dst img wh=%dx%d rect=%d,%d,%d,%d\n",
+			item->output.width, item->output.height,
+			item->dst_rect.x, item->dst_rect.y, item->dst_rect.w,
+			item->dst_rect.h);
+		return -EINVAL;
+	}
+
+	fmt = mdss_mdp_get_format_params(item->output.format);
+	if (!fmt) {
+		pr_err("invalid output format:%d\n", item->output.format);
+		return -EINVAL;
+	}
+
+	if (mdss_mdp_is_ubwc_format(fmt))
+		ret = mdss_mdp_validate_offset_for_ubwc_format(fmt,
+			item->dst_rect.x, item->dst_rect.y);
+
+	return ret;
+}
+
+static int mdss_rotator_validate_fmt_and_item_flags(
+	struct mdp_rotation_config *config, struct mdp_rotation_item *item)
+{
+	struct mdss_mdp_format_params *fmt;
+
+	fmt = mdss_mdp_get_format_params(item->input.format);
+	if ((item->flags & MDP_ROTATION_DEINTERLACE) &&
+			mdss_mdp_is_ubwc_format(fmt)) {
+		pr_err("cannot perform mdp deinterlace on tiled formats\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int mdss_rotator_validate_entry(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_file_private *private,
+	struct mdss_rot_entry *entry)
+{
+	int ret;
+	struct mdp_rotation_item *item;
+	struct mdss_rot_perf *perf;
+
+	item = &entry->item;
+
+	if (item->wb_idx != item->pipe_idx) {
+		pr_err("invalid writeback and pipe idx\n");
+		return -EINVAL;
+	}
+
+	if (item->wb_idx != MDSS_ROTATION_HW_ANY &&
+		item->wb_idx > mgr->queue_count) {
+		pr_err("invalid writeback idx\n");
+		return -EINVAL;
+	}
+
+	perf = mdss_rotator_find_session(private, item->session_id);
+	if (!perf) {
+		pr_err("Could not find session:%u\n", item->session_id);
+		return -EINVAL;
+	}
+
+	ret = mdss_rotator_validate_item_matches_session(&perf->config, item);
+	if (ret) {
+		pr_err("Work item does not match session:%u\n",
+			item->session_id);
+		return ret;
+	}
+
+	ret = mdss_rotator_validate_img_roi(item);
+	if (ret) {
+		pr_err("Image roi is invalid\n");
+		return ret;
+	}
+
+	ret = mdss_rotator_validate_fmt_and_item_flags(&perf->config, item);
+	if (ret)
+		return ret;
+
+	ret = mdss_rotator_config_dnsc_factor(mgr, entry);
+	if (ret) {
+		pr_err("fail to configure downscale factor\n");
+		return ret;
+	}
+	return ret;
+}
+
+/*
+ * Upon failure from the function, caller needs to make sure
+ * to call mdss_rotator_remove_request to clean up resources.
+ */
+static int mdss_rotator_add_request(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_file_private *private,
+	struct mdss_rot_entry_container *req)
+{
+	struct mdss_rot_entry *entry;
+	struct mdp_rotation_item *item;
+	u32 flag = 0;
+	int i, ret;
+
+	for (i = 0; i < req->count; i++) {
+		entry = req->entries + i;
+		item = &entry->item;
+
+		if (item->flags & MDP_ROTATION_SECURE)
+			flag = MDP_SECURE_OVERLAY_SESSION;
+
+		ret = mdss_rotator_validate_entry(mgr, private, entry);
+		if (ret) {
+			pr_err("fail to validate the entry\n");
+			return ret;
+		}
+
+		ret = mdss_rotator_import_data(mgr, entry);
+		if (ret) {
+			pr_err("fail to import the data\n");
+			return ret;
+		}
+
+		if (item->input.fence >= 0) {
+			entry->input_fence =
+				sync_fence_fdget(item->input.fence);
+			if (!entry->input_fence) {
+				pr_err("invalid input fence fd\n");
+				return -EINVAL;
+			}
+		}
+
+		ret = mdss_rotator_assign_queue(mgr, entry, private);
+		if (ret) {
+			pr_err("fail to assign queue to entry\n");
+			return ret;
+		}
+
+		entry->request = req;
+
+		INIT_WORK(&entry->commit_work, mdss_rotator_wq_handler);
+
+		ret = mdss_rotator_create_fence(entry);
+		if (ret) {
+			pr_err("fail to create fence\n");
+			return ret;
+		}
+		item->output.fence = entry->output_fence_fd;
+
+		pr_debug("Entry added. wbidx=%u, src{%u,%u,%u,%u}f=%u\n"
+			"dst{%u,%u,%u,%u}f=%u session_id=%u\n", item->wb_idx,
+			item->src_rect.x, item->src_rect.y,
+			item->src_rect.w, item->src_rect.h, item->input.format,
+			item->dst_rect.x, item->dst_rect.y,
+			item->dst_rect.w, item->dst_rect.h, item->output.format,
+			item->session_id);
+	}
+
+	mutex_lock(&private->req_lock);
+	list_add(&req->list, &private->req_list);
+	mutex_unlock(&private->req_lock);
+
+	return 0;
+}
+
+static void mdss_rotator_remove_request(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_file_private *private,
+	struct mdss_rot_entry_container *req)
+{
+	int i;
+
+	mutex_lock(&private->req_lock);
+	for (i = 0; i < req->count; i++)
+		mdss_rotator_release_entry(mgr, req->entries + i);
+	list_del_init(&req->list);
+	mutex_unlock(&private->req_lock);
+}
+
+/* This function should be called with req_lock */
+static void mdss_rotator_cancel_request(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_entry_container *req)
+{
+	struct mdss_rot_entry *entry;
+	int i;
+
+	/*
+	 * To avoid signal the rotation entry output fence in the wrong
+	 * order, all the entries in the same request needs to be cancelled
+	 * first, before signaling the output fence.
+	 */
+	for (i = req->count - 1; i >= 0; i--) {
+		entry = req->entries + i;
+		cancel_work_sync(&entry->commit_work);
+	}
+
+	for (i = req->count - 1; i >= 0; i--) {
+		entry = req->entries + i;
+		mdss_rotator_signal_output(entry);
+		mdss_rotator_release_entry(mgr, entry);
+	}
+
+	list_del_init(&req->list);
+	devm_kfree(&mgr->pdev->dev, req);
+}
+
+static void mdss_rotator_cancel_all_requests(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_file_private *private)
+{
+	struct mdss_rot_entry_container *req, *req_next;
+
+	pr_debug("Canceling all rotator requests\n");
+
+	mutex_lock(&private->req_lock);
+	list_for_each_entry_safe(req, req_next, &private->req_list, list)
+		mdss_rotator_cancel_request(mgr, req);
+	mutex_unlock(&private->req_lock);
+}
+
+static void mdss_rotator_free_competed_request(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_file_private *private)
+{
+	struct mdss_rot_entry_container *req, *req_next;
+
+	mutex_lock(&private->req_lock);
+	list_for_each_entry_safe(req, req_next, &private->req_list, list) {
+		if (atomic_read(&req->pending_count) == 0) {
+			list_del_init(&req->list);
+			devm_kfree(&mgr->pdev->dev, req);
+		}
+	}
+	mutex_unlock(&private->req_lock);
+}
+
+static void mdss_rotator_release_rotator_perf_session(
+	struct mdss_rot_mgr *mgr,
+	struct mdss_rot_file_private *private)
+{
+	struct mdss_rot_perf *perf, *perf_next;
+
+	pr_debug("Releasing all rotator request\n");
+	mdss_rotator_cancel_all_requests(mgr, private);
+
+	mutex_lock(&private->perf_lock);
+	list_for_each_entry_safe(perf, perf_next, &private->perf_list, list) {
+		list_del_init(&perf->list);
+		devm_kfree(&mgr->pdev->dev, perf->work_distribution);
+		devm_kfree(&mgr->pdev->dev, perf);
+	}
+	mutex_unlock(&private->perf_lock);
+}
+
+static void mdss_rotator_release_all(struct mdss_rot_mgr *mgr)
+{
+	struct mdss_rot_file_private *priv, *priv_next;
+
+	mutex_lock(&mgr->file_lock);
+	list_for_each_entry_safe(priv, priv_next, &mgr->file_list, list) {
+		mdss_rotator_release_rotator_perf_session(mgr, priv);
+		mdss_rotator_resource_ctrl(mgr, false);
+		list_del_init(&priv->list);
+		priv->file->private_data = NULL;
+		devm_kfree(&mgr->pdev->dev, priv);
+	}
+	mutex_unlock(&rot_mgr->file_lock);
+
+	mdss_rotator_update_perf(mgr);
+}
+
+static int mdss_rotator_prepare_hw(struct mdss_rot_hw_resource *hw,
+	struct mdss_rot_entry *entry)
+{
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_mdp_ctl *orig_ctl, *rot_ctl;
+	int ret;
+
+	pipe = hw->pipe;
+	orig_ctl = pipe->mixer_left->ctl;
+	if (orig_ctl->shared_lock)
+		mutex_lock(orig_ctl->shared_lock);
+
+	rot_ctl = mdss_mdp_ctl_mixer_switch(orig_ctl,
+						MDSS_MDP_WB_CTL_TYPE_BLOCK);
+	if (!rot_ctl) {
+		ret = -EINVAL;
+		goto error;
+	} else {
+		hw->ctl = rot_ctl;
+		pipe->mixer_left = rot_ctl->mixer_left;
+	}
+
+	return 0;
+
+error:
+	if (orig_ctl->shared_lock)
+		mutex_unlock(orig_ctl->shared_lock);
+	return ret;
+}
+
+static void mdss_rotator_translate_rect(struct mdss_rect *dst,
+	struct mdp_rect *src)
+{
+	dst->x = src->x;
+	dst->y = src->y;
+	dst->w = src->w;
+	dst->h = src->h;
+}
+
+static u32 mdss_rotator_translate_flags(u32 input)
+{
+	u32 output = 0;
+
+	if (input & MDP_ROTATION_NOP)
+		output |= MDP_ROT_NOP;
+	if (input & MDP_ROTATION_FLIP_LR)
+		output |= MDP_FLIP_LR;
+	if (input & MDP_ROTATION_FLIP_UD)
+		output |= MDP_FLIP_UD;
+	if (input & MDP_ROTATION_90)
+		output |= MDP_ROT_90;
+	if (input & MDP_ROTATION_DEINTERLACE)
+		output |= MDP_DEINTERLACE;
+	if (input & MDP_ROTATION_SECURE)
+		output |= MDP_SECURE_OVERLAY_SESSION;
+	if (input & MDP_ROTATION_BWC_EN)
+		output |= MDP_BWC_EN;
+
+	return output;
+}
+
+static int mdss_rotator_config_hw(struct mdss_rot_hw_resource *hw,
+	struct mdss_rot_entry *entry)
+{
+	struct mdss_mdp_pipe *pipe;
+	struct mdp_rotation_item *item;
+	struct mdss_rot_perf *perf;
+	int ret;
+
+	ATRACE_BEGIN(__func__);
+	pipe = hw->pipe;
+	item = &entry->item;
+	perf = entry->perf;
+
+	pipe->flags = mdss_rotator_translate_flags(item->flags);
+	pipe->src_fmt = mdss_mdp_get_format_params(item->input.format);
+	pipe->img_width = item->input.width;
+	pipe->img_height = item->input.height;
+	mdss_rotator_translate_rect(&pipe->src, &item->src_rect);
+	mdss_rotator_translate_rect(&pipe->dst, &item->src_rect);
+	pipe->scaler.enable = 0;
+	pipe->frame_rate = perf->config.frame_rate;
+
+	pipe->params_changed++;
+
+	mdss_mdp_smp_release(pipe);
+
+	ret = mdss_mdp_smp_reserve(pipe);
+	if (ret) {
+		pr_err("unable to mdss_mdp_smp_reserve rot data\n");
+		goto done;
+	}
+
+	ret = mdss_mdp_overlay_setup_scaling(pipe);
+	if (ret) {
+		pr_err("scaling setup failed %d\n", ret);
+		goto done;
+	}
+
+	ret = mdss_mdp_pipe_queue_data(pipe, &entry->src_buf);
+	pr_debug("Config pipe. src{%u,%u,%u,%u}f=%u\n"
+		"dst{%u,%u,%u,%u}f=%u session_id=%u\n",
+		item->src_rect.x, item->src_rect.y,
+		item->src_rect.w, item->src_rect.h, item->input.format,
+		item->dst_rect.x, item->dst_rect.y,
+		item->dst_rect.w, item->dst_rect.h, item->output.format,
+		item->session_id);
+	MDSS_XLOG(item->input.format, pipe->img_width, pipe->img_height,
+		pipe->flags);
+done:
+	ATRACE_END(__func__);
+	return ret;
+}
+
+static int mdss_rotator_kickoff_entry(struct mdss_rot_hw_resource *hw,
+	struct mdss_rot_entry *entry)
+{
+	int ret;
+	struct mdss_mdp_writeback_arg wb_args = {
+		.data = &entry->dst_buf,
+		.priv_data = entry,
+	};
+
+	ret = mdss_mdp_writeback_display_commit(hw->ctl, &wb_args);
+	return ret;
+}
+
+static int mdss_rotator_wait_for_entry(struct mdss_rot_hw_resource *hw,
+	struct mdss_rot_entry *entry)
+{
+	int ret;
+	struct mdss_mdp_ctl *ctl = hw->ctl;
+
+	ret = mdss_mdp_display_wait4comp(ctl);
+	if (ctl->shared_lock)
+		mutex_unlock(ctl->shared_lock);
+	return ret;
+}
+
+static int mdss_rotator_commit_entry(struct mdss_rot_hw_resource *hw,
+	struct mdss_rot_entry *entry)
+{
+	int ret;
+
+	ret = mdss_rotator_prepare_hw(hw, entry);
+	if (ret) {
+		pr_err("fail to prepare hw resource %d\n", ret);
+		return ret;
+	}
+
+	ret = mdss_rotator_config_hw(hw, entry);
+	if (ret) {
+		pr_err("fail to configure hw resource %d\n", ret);
+		return ret;
+	}
+
+	ret = mdss_rotator_kickoff_entry(hw, entry);
+	if (ret) {
+		pr_err("fail to do kickoff %d\n", ret);
+		return ret;
+	}
+
+	ret = mdss_rotator_wait_for_entry(hw, entry);
+	if (ret) {
+		pr_err("fail to wait for completion %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int mdss_rotator_handle_entry(struct mdss_rot_hw_resource *hw,
+	struct mdss_rot_entry *entry)
+{
+	int ret;
+
+	ret = mdss_rotator_wait_for_input(entry);
+	if (ret) {
+		pr_err("wait for input buffer failed %d\n", ret);
+		return ret;
+	}
+
+	ret = mdss_rotator_map_and_check_data(entry);
+	if (ret) {
+		pr_err("fail to prepare input/output data %d\n", ret);
+		return ret;
+	}
+
+	ret = mdss_rotator_commit_entry(hw, entry);
+	if (ret)
+		pr_err("rotator commit failed %d\n", ret);
+
+	return ret;
+}
+
+static void mdss_rotator_wq_handler(struct work_struct *work)
+{
+	struct mdss_rot_entry *entry;
+	struct mdss_rot_entry_container *request;
+	struct mdss_rot_hw_resource *hw;
+	int ret;
+
+	entry = container_of(work, struct mdss_rot_entry, commit_work);
+	request = entry->request;
+
+	if (!request) {
+		pr_err("fatal error, no request with entry\n");
+		return;
+	}
+
+	hw = mdss_rotator_get_hw_resource(entry->queue, entry);
+	if (!hw) {
+		pr_err("no hw for the queue\n");
+		goto get_hw_res_err;
+	}
+
+	ret = mdss_rotator_handle_entry(hw, entry);
+	if (ret) {
+		struct mdp_rotation_item *item = &entry->item;
+
+		pr_err("Rot req fail. src{%u,%u,%u,%u}f=%u\n"
+		"dst{%u,%u,%u,%u}f=%u session_id=%u, wbidx%d, pipe_id=%d\n",
+		item->src_rect.x, item->src_rect.y,
+		item->src_rect.w, item->src_rect.h, item->input.format,
+		item->dst_rect.x, item->dst_rect.y,
+		item->dst_rect.w, item->dst_rect.h, item->output.format,
+		item->session_id, item->wb_idx, item->pipe_idx);
+	}
+
+	mdss_rotator_put_hw_resource(entry->queue, hw);
+
+get_hw_res_err:
+	mdss_rotator_signal_output(entry);
+	mdss_rotator_release_entry(rot_mgr, entry);
+	atomic_dec(&request->pending_count);
+}
+
+static int mdss_rotator_validate_request(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_file_private *private,
+	struct mdss_rot_entry_container *req)
+{
+	int i, ret = 0;
+	struct mdss_rot_entry *entry;
+
+	for (i = 0; i < req->count; i++) {
+		entry = req->entries + i;
+		ret = mdss_rotator_validate_entry(mgr, private,
+			entry);
+		if (ret) {
+			pr_err("fail to validate the entry\n");
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+static u32 mdss_rotator_generator_session_id(struct mdss_rot_mgr *mgr)
+{
+	u32 id;
+
+	mutex_lock(&mgr->lock);
+	id = mgr->session_id_generator++;
+	mutex_unlock(&mgr->lock);
+	return id;
+}
+
+static int mdss_rotator_open_session(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_file_private *private, unsigned long arg)
+{
+	struct mdp_rotation_config config;
+	struct mdss_rot_perf *perf;
+	int ret;
+
+	ret = copy_from_user(&config, (void __user *)arg, sizeof(config));
+	if (ret) {
+		pr_err("fail to copy session data\n");
+		return ret;
+	}
+
+	ret = mdss_rotator_verify_config(mgr, &config);
+	if (ret) {
+		pr_err("Rotator verify format failed\n");
+		return ret;
+	}
+
+	perf = devm_kzalloc(&mgr->pdev->dev, sizeof(*perf), GFP_KERNEL);
+	if (!perf)
+		return -ENOMEM;
+
+	ATRACE_BEGIN(__func__); /* Open session votes for bw */
+	perf->work_distribution = devm_kzalloc(&mgr->pdev->dev,
+		sizeof(u32) * mgr->queue_count, GFP_KERNEL);
+	if (!perf->work_distribution) {
+		ret = -ENOMEM;
+		goto alloc_err;
+	}
+
+	config.session_id = mdss_rotator_generator_session_id(mgr);
+	perf->config = config;
+	perf->last_wb_idx = -1;
+	mutex_init(&perf->work_dis_lock);
+
+	INIT_LIST_HEAD(&perf->list);
+
+	ret = mdss_rotator_calc_perf(perf);
+	if (ret) {
+		pr_err("error setting the session%d\n", ret);
+		goto copy_user_err;
+	}
+
+	ret = copy_to_user((void *)arg, &config, sizeof(config));
+	if (ret) {
+		pr_err("fail to copy to user\n");
+		goto copy_user_err;
+	}
+
+	mutex_lock(&private->perf_lock);
+	list_add(&perf->list, &private->perf_list);
+	mutex_unlock(&private->perf_lock);
+
+	ret = mdss_rotator_resource_ctrl(mgr, true);
+	if (ret) {
+		pr_err("Failed to aqcuire rotator resources\n");
+		goto resource_err;
+	}
+
+	mdss_rotator_clk_ctrl(rot_mgr, true);
+	ret = mdss_rotator_update_perf(mgr);
+	if (ret) {
+		pr_err("fail to open session, not enough clk/bw\n");
+		goto perf_err;
+	}
+	pr_debug("open session id=%u in{%u,%u}f:%u out{%u,%u}f:%u\n",
+		config.session_id, config.input.width, config.input.height,
+		config.input.format, config.output.width, config.output.height,
+		config.output.format);
+
+	goto done;
+perf_err:
+	mdss_rotator_clk_ctrl(rot_mgr, false);
+	mdss_rotator_resource_ctrl(mgr, false);
+resource_err:
+	mutex_lock(&private->perf_lock);
+	list_del_init(&perf->list);
+	mutex_unlock(&private->perf_lock);
+copy_user_err:
+	devm_kfree(&mgr->pdev->dev, perf->work_distribution);
+alloc_err:
+	devm_kfree(&mgr->pdev->dev, perf);
+done:
+	ATRACE_END(__func__);
+	return ret;
+}
+
+static int mdss_rotator_close_session(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_file_private *private, unsigned long arg)
+{
+	struct mdss_rot_perf *perf;
+	bool offload_release_work = false;
+	u32 id;
+
+	id = (u32)arg;
+	mutex_lock(&mgr->lock);
+	mutex_lock(&private->perf_lock);
+	perf = __mdss_rotator_find_session(private, id);
+	if (!perf) {
+		mutex_unlock(&private->perf_lock);
+		mutex_unlock(&mgr->lock);
+		pr_err("Trying to close session that does not exist\n");
+		return -EINVAL;
+	}
+
+	ATRACE_BEGIN(__func__);
+	mutex_lock(&perf->work_dis_lock);
+	if (mdss_rotator_is_work_pending(mgr, perf)) {
+		pr_debug("Work is still pending, offload free to wq\n");
+		mutex_lock(&mgr->bus_lock);
+		mgr->pending_close_bw_vote += perf->bw;
+		mutex_unlock(&mgr->bus_lock);
+		offload_release_work = true;
+	}
+	list_del_init(&perf->list);
+	mutex_unlock(&perf->work_dis_lock);
+	mutex_unlock(&private->perf_lock);
+
+	if (offload_release_work)
+		goto done;
+
+	mdss_rotator_resource_ctrl(mgr, false);
+	devm_kfree(&mgr->pdev->dev, perf->work_distribution);
+	devm_kfree(&mgr->pdev->dev, perf);
+	mdss_rotator_update_perf(mgr);
+	mdss_rotator_clk_ctrl(rot_mgr, false);
+done:
+	pr_debug("Closed session id:%u", id);
+	ATRACE_END(__func__);
+	mutex_unlock(&mgr->lock);
+	return 0;
+}
+
+static int mdss_rotator_config_session(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_file_private *private, unsigned long arg)
+{
+	int ret = 0;
+	struct mdss_rot_perf *perf;
+	struct mdp_rotation_config config;
+
+	ret = copy_from_user(&config, (void __user *)arg,
+				sizeof(config));
+	if (ret) {
+		pr_err("fail to copy session data\n");
+		return ret;
+	}
+
+	ret = mdss_rotator_verify_config(mgr, &config);
+	if (ret) {
+		pr_err("Rotator verify format failed\n");
+		return ret;
+	}
+
+	mutex_lock(&mgr->lock);
+	perf = mdss_rotator_find_session(private, config.session_id);
+	if (!perf) {
+		pr_err("No session with id=%u could be found\n",
+			config.session_id);
+		mutex_unlock(&mgr->lock);
+		return -EINVAL;
+	}
+
+	ATRACE_BEGIN(__func__);
+	mutex_lock(&private->perf_lock);
+	perf->config = config;
+	ret = mdss_rotator_calc_perf(perf);
+	mutex_unlock(&private->perf_lock);
+
+	if (ret) {
+		pr_err("error in configuring the session %d\n", ret);
+		goto done;
+	}
+
+	ret = mdss_rotator_update_perf(mgr);
+
+	pr_debug("reconfig session id=%u in{%u,%u}f:%u out{%u,%u}f:%u\n",
+		config.session_id, config.input.width, config.input.height,
+		config.input.format, config.output.width, config.output.height,
+		config.output.format);
+done:
+	ATRACE_END(__func__);
+	mutex_unlock(&mgr->lock);
+	return ret;
+}
+
+struct mdss_rot_entry_container *mdss_rotator_req_init(
+	struct mdss_rot_mgr *mgr, struct mdp_rotation_item *items,
+	u32 count, u32 flags)
+{
+	struct mdss_rot_entry_container *req;
+	int size, i;
+
+	/*
+	 * Check input and output plane_count from each given item
+	 * are within the MAX_PLANES limit
+	 */
+	for (i = 0 ; i < count; i++) {
+		if ((items[i].input.plane_count > MAX_PLANES) ||
+				(items[i].output.plane_count > MAX_PLANES)) {
+			pr_err("Input/Output plane_count exceeds MAX_PLANES limit, input:%d, output:%d\n",
+					items[i].input.plane_count,
+					items[i].output.plane_count);
+			return ERR_PTR(-EINVAL);
+		}
+	}
+
+	size = sizeof(struct mdss_rot_entry_container);
+	size += sizeof(struct mdss_rot_entry) * count;
+	req = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
+
+	if (!req)
+		return ERR_PTR(-ENOMEM);
+
+
+	INIT_LIST_HEAD(&req->list);
+	req->count = count;
+	req->entries = (struct mdss_rot_entry *)
+		((void *)req + sizeof(struct mdss_rot_entry_container));
+	req->flags = flags;
+	atomic_set(&req->pending_count, count);
+
+	for (i = 0; i < count; i++)
+		req->entries[i].item = items[i];
+
+	return req;
+}
+
+static int mdss_rotator_handle_request_common(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_file_private *private,
+	struct mdss_rot_entry_container *req,
+	struct mdp_rotation_item *items)
+{
+	int i, ret;
+
+	mdss_rotator_free_competed_request(mgr, private);
+
+	ret = mdss_rotator_add_request(mgr, private, req);
+	if (ret) {
+		pr_err("fail to add rotation request\n");
+		mdss_rotator_remove_request(mgr, private, req);
+		return ret;
+	}
+
+	for (i = 0; i < req->count; i++)
+		items[i].output.fence =
+			req->entries[i].item.output.fence;
+
+	return ret;
+}
+
+static int mdss_rotator_handle_request(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_file_private *private, unsigned long arg)
+{
+	struct mdp_rotation_request user_req;
+	struct mdp_rotation_item *items = NULL;
+	struct mdss_rot_entry_container *req = NULL;
+	int size, ret;
+	uint32_t req_count;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (mdata->handoff_pending) {
+		pr_err("Rotator request failed. Handoff pending\n");
+		return -EPERM;
+	}
+
+	if (mdss_get_sd_client_cnt()) {
+		pr_err("rot request not permitted during secure display session\n");
+		return -EPERM;
+	}
+
+	ret = copy_from_user(&user_req, (void __user *)arg,
+					sizeof(user_req));
+	if (ret) {
+		pr_err("fail to copy rotation request\n");
+		return ret;
+	}
+
+	req_count = user_req.count;
+	if ((!req_count) || (req_count > MAX_LAYER_COUNT)) {
+		pr_err("invalid rotator req count :%d\n", req_count);
+		return -EINVAL;
+	}
+
+	/*
+	 * here, we make a copy of the items so that we can copy
+	 * all the output fences to the client in one call.   Otherwise,
+	 * we will have to call multiple copy_to_user
+	 */
+	size = sizeof(struct mdp_rotation_item) * req_count;
+	items = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
+	if (!items) {
+		pr_err("fail to allocate rotation items\n");
+		return -ENOMEM;
+	}
+	ret = copy_from_user(items, user_req.list, size);
+	if (ret) {
+		pr_err("fail to copy rotation items\n");
+		goto handle_request_err;
+	}
+
+	req = mdss_rotator_req_init(mgr, items, user_req.count, user_req.flags);
+	if (IS_ERR_OR_NULL(req)) {
+		pr_err("fail to allocate rotation request\n");
+		ret = PTR_ERR(req);
+		goto handle_request_err;
+	}
+
+	mutex_lock(&mgr->lock);
+
+	if (req->flags & MDSS_ROTATION_REQUEST_VALIDATE) {
+		ret = mdss_rotator_validate_request(mgr, private, req);
+		goto handle_request_err1;
+	}
+
+	ret = mdss_rotator_handle_request_common(mgr, private, req, items);
+	if (ret) {
+		pr_err("fail to handle request\n");
+		goto handle_request_err1;
+	}
+
+	ret = copy_to_user(user_req.list, items, size);
+	if (ret) {
+		pr_err("fail to copy output fence to user\n");
+		mdss_rotator_remove_request(mgr, private, req);
+		goto handle_request_err1;
+	}
+
+	mdss_rotator_install_fence_fd(req);
+	mdss_rotator_queue_request(mgr, private, req);
+
+	mutex_unlock(&mgr->lock);
+
+	devm_kfree(&mgr->pdev->dev, items);
+	return ret;
+
+handle_request_err1:
+	mutex_unlock(&mgr->lock);
+handle_request_err:
+	devm_kfree(&mgr->pdev->dev, items);
+	devm_kfree(&mgr->pdev->dev, req);
+	return ret;
+}
+
+static int mdss_rotator_open(struct inode *inode, struct file *file)
+{
+	struct mdss_rot_file_private *private;
+
+	if (!rot_mgr)
+		return -ENODEV;
+
+	if (atomic_read(&rot_mgr->device_suspended))
+		return -EPERM;
+
+	private = devm_kzalloc(&rot_mgr->pdev->dev, sizeof(*private),
+		GFP_KERNEL);
+	if (!private)
+		return -ENOMEM;
+
+	mutex_init(&private->req_lock);
+	mutex_init(&private->perf_lock);
+	INIT_LIST_HEAD(&private->req_list);
+	INIT_LIST_HEAD(&private->perf_list);
+	INIT_LIST_HEAD(&private->list);
+
+	mutex_lock(&rot_mgr->file_lock);
+	list_add(&private->list, &rot_mgr->file_list);
+	file->private_data = private;
+	private->file = file;
+	mutex_unlock(&rot_mgr->file_lock);
+
+	return 0;
+}
+
+static bool mdss_rotator_file_priv_allowed(struct mdss_rot_mgr *mgr,
+		struct mdss_rot_file_private *priv)
+{
+	struct mdss_rot_file_private *_priv, *_priv_next;
+	bool ret = false;
+
+	mutex_lock(&mgr->file_lock);
+	list_for_each_entry_safe(_priv, _priv_next, &mgr->file_list, list) {
+		if (_priv == priv) {
+			ret = true;
+			break;
+		}
+	}
+	mutex_unlock(&mgr->file_lock);
+	return ret;
+}
+
+static int mdss_rotator_close(struct inode *inode, struct file *file)
+{
+	struct mdss_rot_file_private *private;
+
+	if (!rot_mgr)
+		return -ENODEV;
+
+	if (!file->private_data)
+		return -EINVAL;
+
+	private = (struct mdss_rot_file_private *)file->private_data;
+
+	if (!(mdss_rotator_file_priv_allowed(rot_mgr, private))) {
+		pr_err("Calling close with unrecognized rot_file_private\n");
+		return -EINVAL;
+	}
+
+	mdss_rotator_release_rotator_perf_session(rot_mgr, private);
+
+	mutex_lock(&rot_mgr->file_lock);
+	list_del_init(&private->list);
+	devm_kfree(&rot_mgr->pdev->dev, private);
+	file->private_data = NULL;
+	mutex_unlock(&rot_mgr->file_lock);
+
+	mdss_rotator_update_perf(rot_mgr);
+	return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static int mdss_rotator_handle_request32(struct mdss_rot_mgr *mgr,
+	struct mdss_rot_file_private *private, unsigned long arg)
+{
+	struct mdp_rotation_request32 user_req32;
+	struct mdp_rotation_item *items = NULL;
+	struct mdss_rot_entry_container *req = NULL;
+	int size, ret;
+	uint32_t req_count;
+
+	if (mdss_get_sd_client_cnt()) {
+		pr_err("rot request not permitted during secure display session\n");
+		return -EPERM;
+	}
+
+	ret = copy_from_user(&user_req32, (void __user *)arg,
+					sizeof(user_req32));
+	if (ret) {
+		pr_err("fail to copy rotation request\n");
+		return ret;
+	}
+
+	req_count = user_req32.count;
+	if ((!req_count) || (req_count > MAX_LAYER_COUNT)) {
+		pr_err("invalid rotator req count :%d\n", req_count);
+		return -EINVAL;
+	}
+
+	size = sizeof(struct mdp_rotation_item) * req_count;
+	items = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
+	if (!items) {
+		pr_err("fail to allocate rotation items\n");
+		return -ENOMEM;
+	}
+	ret = copy_from_user(items, compat_ptr(user_req32.list), size);
+	if (ret) {
+		pr_err("fail to copy rotation items\n");
+		goto handle_request32_err;
+	}
+
+	req = mdss_rotator_req_init(mgr, items, user_req32.count,
+		user_req32.flags);
+	if (IS_ERR_OR_NULL(req)) {
+		pr_err("fail to allocate rotation request\n");
+		ret = PTR_ERR(req);
+		goto handle_request32_err;
+	}
+
+	mutex_lock(&mgr->lock);
+
+	if (req->flags & MDSS_ROTATION_REQUEST_VALIDATE) {
+		ret = mdss_rotator_validate_request(mgr, private, req);
+		goto handle_request32_err1;
+	}
+
+	ret = mdss_rotator_handle_request_common(mgr, private, req, items);
+	if (ret) {
+		pr_err("fail to handle request\n");
+		goto handle_request32_err1;
+	}
+
+	ret = copy_to_user(compat_ptr(user_req32.list), items, size);
+	if (ret) {
+		pr_err("fail to copy output fence to user\n");
+		mdss_rotator_remove_request(mgr, private, req);
+		goto handle_request32_err1;
+	}
+
+	mdss_rotator_install_fence_fd(req);
+	mdss_rotator_queue_request(mgr, private, req);
+
+	mutex_unlock(&mgr->lock);
+
+	devm_kfree(&mgr->pdev->dev, items);
+	return ret;
+
+handle_request32_err1:
+	mutex_unlock(&mgr->lock);
+handle_request32_err:
+	devm_kfree(&mgr->pdev->dev, items);
+	devm_kfree(&mgr->pdev->dev, req);
+	return ret;
+}
+
+static unsigned int __do_compat_ioctl_rot(unsigned int cmd32)
+{
+	unsigned int cmd;
+
+	switch (cmd32) {
+	case MDSS_ROTATION_REQUEST32:
+		cmd = MDSS_ROTATION_REQUEST;
+		break;
+	case MDSS_ROTATION_OPEN32:
+		cmd = MDSS_ROTATION_OPEN;
+		break;
+	case MDSS_ROTATION_CLOSE32:
+		cmd = MDSS_ROTATION_CLOSE;
+		break;
+	case MDSS_ROTATION_CONFIG32:
+		cmd = MDSS_ROTATION_CONFIG;
+		break;
+	default:
+		cmd = cmd32;
+		break;
+	}
+
+	return cmd;
+}
+
+static long mdss_rotator_compat_ioctl(struct file *file, unsigned int cmd,
+	unsigned long arg)
+{
+	struct mdss_rot_file_private *private;
+	int ret = -EINVAL;
+
+	if (!rot_mgr)
+		return -ENODEV;
+
+	if (atomic_read(&rot_mgr->device_suspended))
+		return -EPERM;
+
+	if (!file->private_data)
+		return -EINVAL;
+
+	private = (struct mdss_rot_file_private *)file->private_data;
+
+	if (!(mdss_rotator_file_priv_allowed(rot_mgr, private))) {
+		pr_err("Calling ioctl with unrecognized rot_file_private\n");
+		return -EINVAL;
+	}
+
+	cmd = __do_compat_ioctl_rot(cmd);
+
+	switch (cmd) {
+	case MDSS_ROTATION_REQUEST:
+		ATRACE_BEGIN("rotator_request32");
+		ret = mdss_rotator_handle_request32(rot_mgr, private, arg);
+		ATRACE_END("rotator_request32");
+		break;
+	case MDSS_ROTATION_OPEN:
+		ret = mdss_rotator_open_session(rot_mgr, private, arg);
+		break;
+	case MDSS_ROTATION_CLOSE:
+		ret = mdss_rotator_close_session(rot_mgr, private, arg);
+		break;
+	case MDSS_ROTATION_CONFIG:
+		ret = mdss_rotator_config_session(rot_mgr, private, arg);
+		break;
+	default:
+		pr_err("unexpected IOCTL %d\n", cmd);
+	}
+
+	if (ret)
+		pr_err("rotator ioctl=%d failed, err=%d\n", cmd, ret);
+	return ret;
+
+}
+#endif
+
+static long mdss_rotator_ioctl(struct file *file, unsigned int cmd,
+						 unsigned long arg)
+{
+	struct mdss_rot_file_private *private;
+	int ret = -EINVAL;
+
+	if (!rot_mgr)
+		return -ENODEV;
+
+	if (atomic_read(&rot_mgr->device_suspended))
+		return -EPERM;
+
+	if (!file->private_data)
+		return -EINVAL;
+
+	private = (struct mdss_rot_file_private *)file->private_data;
+
+	if (!(mdss_rotator_file_priv_allowed(rot_mgr, private))) {
+		pr_err("Calling ioctl with unrecognized rot_file_private\n");
+		return -EINVAL;
+	}
+
+	switch (cmd) {
+	case MDSS_ROTATION_REQUEST:
+		ATRACE_BEGIN("rotator_request");
+		ret = mdss_rotator_handle_request(rot_mgr, private, arg);
+		ATRACE_END("rotator_request");
+		break;
+	case MDSS_ROTATION_OPEN:
+		ret = mdss_rotator_open_session(rot_mgr, private, arg);
+		break;
+	case MDSS_ROTATION_CLOSE:
+		ret = mdss_rotator_close_session(rot_mgr, private, arg);
+		break;
+	case MDSS_ROTATION_CONFIG:
+		ret = mdss_rotator_config_session(rot_mgr, private, arg);
+		break;
+	default:
+		pr_err("unexpected IOCTL %d\n", cmd);
+	}
+
+	if (ret)
+		pr_err("rotator ioctl=%d failed, err=%d\n", cmd, ret);
+	return ret;
+}
+
+static ssize_t mdss_rotator_show_capabilities(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	size_t len = PAGE_SIZE;
+	int cnt = 0;
+
+	if (!rot_mgr)
+		return cnt;
+
+#define SPRINT(fmt, ...) \
+		(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
+
+	SPRINT("wb_count=%d\n", rot_mgr->queue_count);
+	SPRINT("downscale=%d\n", rot_mgr->has_downscale);
+
+	return cnt;
+}
+
+static DEVICE_ATTR(caps, 0444, mdss_rotator_show_capabilities, NULL);
+
+static struct attribute *mdss_rotator_fs_attrs[] = {
+	&dev_attr_caps.attr,
+	NULL
+};
+
+static struct attribute_group mdss_rotator_fs_attr_group = {
+	.attrs = mdss_rotator_fs_attrs
+};
+
+static const struct file_operations mdss_rotator_fops = {
+	.owner = THIS_MODULE,
+	.open = mdss_rotator_open,
+	.release = mdss_rotator_close,
+	.unlocked_ioctl = mdss_rotator_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = mdss_rotator_compat_ioctl,
+#endif
+};
+
+static int mdss_rotator_parse_dt_bus(struct mdss_rot_mgr *mgr,
+	struct platform_device *dev)
+{
+	struct device_node *node;
+	int ret = 0, i;
+	bool register_bus_needed;
+	int usecases;
+
+	mgr->data_bus.bus_scale_pdata = msm_bus_cl_get_pdata(dev);
+	if (IS_ERR_OR_NULL(mgr->data_bus.bus_scale_pdata)) {
+		ret = PTR_ERR(mgr->data_bus.bus_scale_pdata);
+		if (!ret) {
+			ret = -EINVAL;
+			pr_err("msm_bus_cl_get_pdata failed. ret=%d\n", ret);
+			mgr->data_bus.bus_scale_pdata = NULL;
+		}
+	}
+
+	register_bus_needed = of_property_read_bool(dev->dev.of_node,
+		"qcom,mdss-has-reg-bus");
+	if (register_bus_needed) {
+		node = of_get_child_by_name(
+			    dev->dev.of_node, "qcom,mdss-rot-reg-bus");
+		if (!node) {
+			mgr->reg_bus.bus_scale_pdata = &rot_reg_bus_scale_table;
+			usecases = mgr->reg_bus.bus_scale_pdata->num_usecases;
+			for (i = 0; i < usecases; i++) {
+				rot_reg_bus_usecases[i].num_paths = 1;
+				rot_reg_bus_usecases[i].vectors =
+					&rot_reg_bus_vectors[i];
+			}
+		} else {
+			mgr->reg_bus.bus_scale_pdata =
+				msm_bus_pdata_from_node(dev, node);
+			if (IS_ERR_OR_NULL(mgr->reg_bus.bus_scale_pdata)) {
+				ret = PTR_ERR(mgr->reg_bus.bus_scale_pdata);
+				if (!ret)
+					ret = -EINVAL;
+				pr_err("reg_rot_bus failed rc=%d\n", ret);
+				mgr->reg_bus.bus_scale_pdata = NULL;
+			}
+		}
+	}
+	return ret;
+}
+
+static int mdss_rotator_parse_dt(struct mdss_rot_mgr *mgr,
+	struct platform_device *dev)
+{
+	int ret = 0;
+	u32 data;
+
+	ret = of_property_read_u32(dev->dev.of_node,
+		"qcom,mdss-wb-count", &data);
+	if (ret) {
+		pr_err("Error in device tree\n");
+		return ret;
+	}
+	if (data > ROT_MAX_HW_BLOCKS) {
+		pr_err("Err, num of wb block (%d) larger than sw max %d\n",
+			data, ROT_MAX_HW_BLOCKS);
+		return -EINVAL;
+	}
+
+	rot_mgr->queue_count = data;
+	rot_mgr->has_downscale = of_property_read_bool(dev->dev.of_node,
+					   "qcom,mdss-has-downscale");
+	rot_mgr->has_ubwc = of_property_read_bool(dev->dev.of_node,
+					   "qcom,mdss-has-ubwc");
+
+	ret = mdss_rotator_parse_dt_bus(mgr, dev);
+	if (ret)
+		pr_err("Failed to parse bus data\n");
+
+	return ret;
+}
+
+static void mdss_rotator_put_dt_vreg_data(struct device *dev,
+	struct dss_module_power *mp)
+{
+	if (!mp) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	msm_dss_config_vreg(dev, mp->vreg_config, mp->num_vreg, 0);
+	if (mp->vreg_config) {
+		devm_kfree(dev, mp->vreg_config);
+		mp->vreg_config = NULL;
+	}
+	mp->num_vreg = 0;
+}
+
+static int mdss_rotator_get_dt_vreg_data(struct device *dev,
+	struct dss_module_power *mp)
+{
+	const char *st = NULL;
+	struct device_node *of_node = NULL;
+	int dt_vreg_total = 0;
+	int i;
+	int rc;
+
+	if (!dev || !mp) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	of_node = dev->of_node;
+
+	dt_vreg_total = of_property_count_strings(of_node, "qcom,supply-names");
+	if (dt_vreg_total < 0) {
+		DEV_ERR("%s: vreg not found. rc=%d\n", __func__,
+			dt_vreg_total);
+		return 0;
+	}
+	mp->num_vreg = dt_vreg_total;
+	mp->vreg_config = devm_kzalloc(dev, sizeof(struct dss_vreg) *
+		dt_vreg_total, GFP_KERNEL);
+	if (!mp->vreg_config) {
+		DEV_ERR("%s: can't alloc vreg mem\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* vreg-name */
+	for (i = 0; i < dt_vreg_total; i++) {
+		rc = of_property_read_string_index(of_node,
+			"qcom,supply-names", i, &st);
+		if (rc) {
+			DEV_ERR("%s: error reading name. i=%d, rc=%d\n",
+				__func__, i, rc);
+			goto error;
+		}
+		snprintf(mp->vreg_config[i].vreg_name, 32, "%s", st);
+	}
+	msm_dss_config_vreg(dev, mp->vreg_config, mp->num_vreg, 1);
+
+	for (i = 0; i < dt_vreg_total; i++) {
+		DEV_DBG("%s: %s min=%d, max=%d, enable=%d disable=%d\n",
+			__func__,
+			mp->vreg_config[i].vreg_name,
+			mp->vreg_config[i].min_voltage,
+			mp->vreg_config[i].max_voltage,
+			mp->vreg_config[i].load[DSS_REG_MODE_ENABLE],
+			mp->vreg_config[i].load[DSS_REG_MODE_DISABLE]);
+	}
+	return rc;
+
+error:
+	if (mp->vreg_config) {
+		devm_kfree(dev, mp->vreg_config);
+		mp->vreg_config = NULL;
+	}
+	mp->num_vreg = 0;
+	return rc;
+}
+
+static void mdss_rotator_bus_scale_unregister(struct mdss_rot_mgr *mgr)
+{
+	pr_debug("unregister bus_hdl=%x, reg_bus_hdl=%x\n",
+		mgr->data_bus.bus_hdl, mgr->reg_bus.bus_hdl);
+
+	if (mgr->data_bus.bus_hdl)
+		msm_bus_scale_unregister_client(mgr->data_bus.bus_hdl);
+
+	if (mgr->reg_bus.bus_hdl)
+		msm_bus_scale_unregister_client(mgr->reg_bus.bus_hdl);
+}
+
+static int mdss_rotator_bus_scale_register(struct mdss_rot_mgr *mgr)
+{
+	if (!mgr->data_bus.bus_scale_pdata) {
+		pr_err("Scale table is NULL\n");
+		return -EINVAL;
+	}
+
+	mgr->data_bus.bus_hdl =
+		msm_bus_scale_register_client(
+		mgr->data_bus.bus_scale_pdata);
+	if (!mgr->data_bus.bus_hdl) {
+		pr_err("bus_client register failed\n");
+		return -EINVAL;
+	}
+	pr_debug("registered bus_hdl=%x\n", mgr->data_bus.bus_hdl);
+
+	if (mgr->reg_bus.bus_scale_pdata) {
+		mgr->reg_bus.bus_hdl =
+			msm_bus_scale_register_client(
+			mgr->reg_bus.bus_scale_pdata);
+		if (!mgr->reg_bus.bus_hdl) {
+			pr_err("register bus_client register failed\n");
+			mdss_rotator_bus_scale_unregister(mgr);
+			return -EINVAL;
+		}
+		pr_debug("registered register bus_hdl=%x\n",
+			mgr->reg_bus.bus_hdl);
+	}
+
+	return 0;
+}
+
+static int mdss_rotator_clk_register(struct platform_device *pdev,
+	struct mdss_rot_mgr *mgr, char *clk_name, u32 clk_idx)
+{
+	struct clk *tmp;
+
+	pr_debug("registered clk_reg\n");
+
+	if (clk_idx >= MDSS_CLK_ROTATOR_END_IDX) {
+		pr_err("invalid clk index %d\n", clk_idx);
+		return -EINVAL;
+	}
+
+	if (mgr->rot_clk[clk_idx]) {
+		pr_err("Stomping on clk prev registered:%d\n", clk_idx);
+		return -EINVAL;
+	}
+
+	tmp = devm_clk_get(&pdev->dev, clk_name);
+	if (IS_ERR(tmp)) {
+		pr_err("unable to get clk: %s\n", clk_name);
+		return PTR_ERR(tmp);
+	}
+	mgr->rot_clk[clk_idx] = tmp;
+	return 0;
+}
+
+static int mdss_rotator_res_init(struct platform_device *pdev,
+	struct mdss_rot_mgr *mgr)
+{
+	int ret;
+
+	ret = mdss_rotator_get_dt_vreg_data(&pdev->dev, &mgr->module_power);
+	if (ret)
+		return ret;
+
+	ret = mdss_rotator_clk_register(pdev, mgr,
+		"iface_clk", MDSS_CLK_ROTATOR_AHB);
+	if (ret)
+		goto error;
+
+	ret = mdss_rotator_clk_register(pdev, mgr,
+		"rot_core_clk", MDSS_CLK_ROTATOR_CORE);
+	if (ret)
+		goto error;
+
+	ret = mdss_rotator_bus_scale_register(mgr);
+	if (ret)
+		goto error;
+
+	return 0;
+error:
+	mdss_rotator_put_dt_vreg_data(&pdev->dev, &mgr->module_power);
+	return ret;
+}
+
+static int mdss_rotator_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	rot_mgr = devm_kzalloc(&pdev->dev, sizeof(struct mdss_rot_mgr),
+		GFP_KERNEL);
+	if (!rot_mgr)
+		return -ENOMEM;
+
+	rot_mgr->pdev = pdev;
+	ret = mdss_rotator_parse_dt(rot_mgr, pdev);
+	if (ret) {
+		pr_err("fail to parse the dt\n");
+		goto error_parse_dt;
+	}
+
+	mutex_init(&rot_mgr->lock);
+	mutex_init(&rot_mgr->clk_lock);
+	mutex_init(&rot_mgr->bus_lock);
+	atomic_set(&rot_mgr->device_suspended, 0);
+	ret = mdss_rotator_init_queue(rot_mgr);
+	if (ret) {
+		pr_err("fail to init queue\n");
+		goto error_get_dev_num;
+	}
+
+	mutex_init(&rot_mgr->file_lock);
+	INIT_LIST_HEAD(&rot_mgr->file_list);
+
+	platform_set_drvdata(pdev, rot_mgr);
+
+	ret = alloc_chrdev_region(&rot_mgr->dev_num, 0, 1, DRIVER_NAME);
+	if (ret  < 0) {
+		pr_err("alloc_chrdev_region failed ret = %d\n", ret);
+		goto error_get_dev_num;
+	}
+
+	rot_mgr->class = class_create(THIS_MODULE, CLASS_NAME);
+	if (IS_ERR(rot_mgr->class)) {
+		ret = PTR_ERR(rot_mgr->class);
+		pr_err("couldn't create class rc = %d\n", ret);
+		goto error_class_create;
+	}
+
+	rot_mgr->device = device_create(rot_mgr->class, NULL,
+		rot_mgr->dev_num, NULL, DRIVER_NAME);
+	if (IS_ERR(rot_mgr->device)) {
+		ret = PTR_ERR(rot_mgr->device);
+		pr_err("device_create failed %d\n", ret);
+		goto error_class_device_create;
+	}
+
+	cdev_init(&rot_mgr->cdev, &mdss_rotator_fops);
+	ret = cdev_add(&rot_mgr->cdev,
+			MKDEV(MAJOR(rot_mgr->dev_num), 0), 1);
+	if (ret < 0) {
+		pr_err("cdev_add failed %d\n", ret);
+		goto error_cdev_add;
+	}
+
+	ret = sysfs_create_group(&rot_mgr->device->kobj,
+			&mdss_rotator_fs_attr_group);
+	if (ret)
+		pr_err("unable to register rotator sysfs nodes\n");
+
+	ret = mdss_rotator_res_init(pdev, rot_mgr);
+	if (ret < 0) {
+		pr_err("res_init failed %d\n", ret);
+		goto error_res_init;
+	}
+	return 0;
+
+error_res_init:
+	cdev_del(&rot_mgr->cdev);
+error_cdev_add:
+	device_destroy(rot_mgr->class, rot_mgr->dev_num);
+error_class_device_create:
+	class_destroy(rot_mgr->class);
+error_class_create:
+	unregister_chrdev_region(rot_mgr->dev_num, 1);
+error_get_dev_num:
+	mdss_rotator_deinit_queue(rot_mgr);
+error_parse_dt:
+	devm_kfree(&pdev->dev, rot_mgr);
+	rot_mgr = NULL;
+	return ret;
+}
+
+static int mdss_rotator_remove(struct platform_device *dev)
+{
+	struct mdss_rot_mgr *mgr;
+
+	mgr = (struct mdss_rot_mgr *)platform_get_drvdata(dev);
+	if (!mgr)
+		return -ENODEV;
+
+	sysfs_remove_group(&rot_mgr->device->kobj, &mdss_rotator_fs_attr_group);
+
+	mdss_rotator_release_all(mgr);
+
+	mdss_rotator_put_dt_vreg_data(&dev->dev, &mgr->module_power);
+	mdss_rotator_bus_scale_unregister(mgr);
+	cdev_del(&rot_mgr->cdev);
+	device_destroy(rot_mgr->class, rot_mgr->dev_num);
+	class_destroy(rot_mgr->class);
+	unregister_chrdev_region(rot_mgr->dev_num, 1);
+
+	mdss_rotator_deinit_queue(rot_mgr);
+	devm_kfree(&dev->dev, rot_mgr);
+	rot_mgr = NULL;
+	return 0;
+}
+
+static void mdss_rotator_suspend_cancel_rot_work(struct mdss_rot_mgr *mgr)
+{
+	struct mdss_rot_file_private *priv, *priv_next;
+
+	mutex_lock(&mgr->file_lock);
+	list_for_each_entry_safe(priv, priv_next, &mgr->file_list, list) {
+		mdss_rotator_cancel_all_requests(mgr, priv);
+	}
+	mutex_unlock(&rot_mgr->file_lock);
+}
+
+#if defined(CONFIG_PM)
+static int mdss_rotator_suspend(struct platform_device *dev, pm_message_t state)
+{
+	struct mdss_rot_mgr *mgr;
+
+	mgr = (struct mdss_rot_mgr *)platform_get_drvdata(dev);
+	if (!mgr)
+		return -ENODEV;
+
+	atomic_inc(&mgr->device_suspended);
+	mdss_rotator_suspend_cancel_rot_work(mgr);
+	mdss_rotator_update_perf(mgr);
+	return 0;
+}
+
+static int mdss_rotator_resume(struct platform_device *dev)
+{
+	struct mdss_rot_mgr *mgr;
+
+	mgr = (struct mdss_rot_mgr *)platform_get_drvdata(dev);
+	if (!mgr)
+		return -ENODEV;
+
+	atomic_dec(&mgr->device_suspended);
+	mdss_rotator_update_perf(mgr);
+	return 0;
+}
+#endif
+
+static const struct of_device_id mdss_rotator_dt_match[] = {
+	{ .compatible = "qcom,mdss_rotator",},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, mdss_rotator_dt_match);
+
+static struct platform_driver mdss_rotator_driver = {
+	.probe = mdss_rotator_probe,
+	.remove = mdss_rotator_remove,
+#if defined(CONFIG_PM)
+	.suspend = mdss_rotator_suspend,
+	.resume = mdss_rotator_resume,
+#endif
+	.driver = {
+		.name = "mdss_rotator",
+		.of_match_table = mdss_rotator_dt_match,
+		.pm = NULL,
+	}
+};
+
+static int __init mdss_rotator_init(void)
+{
+	return platform_driver_register(&mdss_rotator_driver);
+}
+
+static void __exit mdss_rotator_exit(void)
+{
+	return platform_driver_unregister(&mdss_rotator_driver);
+}
+
+module_init(mdss_rotator_init);
+module_exit(mdss_rotator_exit);
+
+MODULE_DESCRIPTION("MSM Rotator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/msm/mdss_rotator_internal.h b/drivers/video/fbdev/msm/mdss_rotator_internal.h
new file mode 100644
index 0000000..8dd400e
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_rotator_internal.h
@@ -0,0 +1,249 @@
+/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_MDP_ROTATOR_INTERNAL_H
+#define MDSS_MDP_ROTATOR_INTERNAL_H
+
+#include <linux/list.h>
+#include <linux/sync.h>
+#include <linux/file.h>
+#include <linux/mdss_rotator.h>
+#include <linux/sw_sync.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+
+#include  "mdss_mdp.h"
+
+/*
+ * Defining characteristics about rotation work, that has corresponding
+ * fmt and roi checks in open session
+ */
+#define MDSS_MDP_DEFINING_FLAG_BITS MDP_ROTATION_90
+
+struct mdss_rot_entry;
+struct mdss_rot_perf;
+
+enum mdss_rotator_clk_type {
+	MDSS_CLK_ROTATOR_AHB,
+	MDSS_CLK_ROTATOR_CORE,
+	MDSS_CLK_ROTATOR_END_IDX,
+};
+
+/*
+ * placeholder for performance profiling
+ * or debug support, not used currently
+ */
+struct mdss_rot_entry_cb_intf {
+	void (*pre_commit)(struct mdss_rot_entry *entry, void *data);
+	void (*post_commit)(struct mdss_rot_entry *entry,
+		void *data, int status);
+};
+
+struct mdss_rot_timeline {
+	struct mutex lock;
+	struct sw_sync_timeline *timeline;
+	u32 next_value;
+	char fence_name[32];
+};
+
+struct mdss_rot_hw_resource {
+	struct mdss_mdp_ctl *ctl;
+	struct mdss_mdp_mixer *mixer;
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_mdp_writeback *wb;
+	u32 pipe_id;
+	u32 wb_id;
+
+	u32 pending_count;
+	struct mdss_rot_entry *workload;
+};
+
+struct mdss_rot_queue {
+	struct workqueue_struct *rot_work_queue;
+	struct mdss_rot_timeline timeline;
+
+	struct mutex hw_lock;
+	struct mdss_rot_hw_resource *hw;
+};
+
+struct mdss_rot_entry_container {
+	struct list_head list;
+	u32 flags;
+	u32 count;
+	atomic_t pending_count;
+	struct mdss_rot_entry *entries;
+};
+
+struct mdss_rot_entry {
+	struct mdp_rotation_item item;
+	struct work_struct commit_work;
+
+	struct mdss_rot_queue *queue;
+	struct mdss_rot_entry_container *request;
+
+	struct mdss_mdp_data src_buf;
+	struct mdss_mdp_data dst_buf;
+
+	struct sync_fence *input_fence;
+
+	int output_fence_fd;
+	struct sync_fence *output_fence;
+	bool output_signaled;
+
+	u32 dnsc_factor_w;
+	u32 dnsc_factor_h;
+
+	struct mdss_rot_entry_cb_intf intf;
+	void *intf_data;
+
+	struct mdss_rot_perf *perf;
+	bool work_assigned; /* Used when cleaning up work_distribution */
+};
+
+struct mdss_rot_perf {
+	struct list_head list;
+	struct mdp_rotation_config config;
+	unsigned long clk_rate;
+	u64 bw;
+	struct mutex work_dis_lock;
+	u32 *work_distribution;
+	int last_wb_idx; /* last known wb index, used when above count is 0 */
+};
+
+struct mdss_rot_file_private {
+	struct list_head list;
+
+	struct mutex req_lock;
+	struct list_head req_list;
+
+	struct mutex perf_lock;
+	struct list_head perf_list;
+
+	struct file *file;
+};
+
+struct mdss_rot_bus_data_type {
+	struct msm_bus_scale_pdata *bus_scale_pdata;
+	u32 bus_hdl;
+	u32 curr_bw_uc_idx;
+	u64 curr_quota_val;
+};
+
+struct mdss_rot_mgr {
+	struct mutex lock;
+
+	atomic_t device_suspended;
+
+	u32 session_id_generator;
+
+	struct platform_device *pdev;
+
+	dev_t dev_num;
+	struct cdev cdev;
+	struct class *class;
+	struct device *device;
+
+	/*
+	 * mangaing rotation queues, depends on
+	 * how many hw pipes available on the system
+	 */
+	int queue_count;
+	struct mdss_rot_queue *queues;
+
+	struct mutex file_lock;
+	/*
+	 * managing all the open file sessions to bw calculations,
+	 * and resource clean up during suspend
+	 */
+	struct list_head file_list;
+
+	struct mutex bus_lock;
+	u64 pending_close_bw_vote;
+	struct mdss_rot_bus_data_type data_bus;
+	struct mdss_rot_bus_data_type reg_bus;
+
+	/* Module power is only used for regulator management */
+	struct dss_module_power module_power;
+	bool regulator_enable;
+
+	struct mutex clk_lock;
+	int res_ref_cnt;
+	struct clk *rot_clk[MDSS_CLK_ROTATOR_END_IDX];
+	int rot_enable_clk_cnt;
+
+	bool has_downscale;
+	bool has_ubwc;
+};
+
+#ifdef CONFIG_COMPAT
+
+/* open a rotation session */
+#define MDSS_ROTATION_OPEN32 \
+	_IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 1, compat_caddr_t)
+
+/* change the rotation session configuration */
+#define MDSS_ROTATION_CONFIG32 \
+	_IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 2, compat_caddr_t)
+
+/* queue the rotation request */
+#define MDSS_ROTATION_REQUEST32 \
+	_IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 3, compat_caddr_t)
+
+/* close a rotation session with the specified rotation session ID */
+#define MDSS_ROTATION_CLOSE32 \
+	_IOW(MDSS_ROTATOR_IOCTL_MAGIC, 4, unsigned int)
+
+struct mdp_rotation_request32 {
+	uint32_t version;
+	uint32_t flags;
+	uint32_t count;
+	compat_caddr_t list;
+	uint32_t reserved[6];
+};
+#endif
+
+static inline int __compare_session_item_rect(
+	struct mdp_rotation_buf_info *s_rect,
+	struct mdp_rect *i_rect, uint32_t i_fmt, bool src)
+{
+	if ((s_rect->width != i_rect->w) || (s_rect->height != i_rect->h) ||
+			(s_rect->format != i_fmt)) {
+		pr_err("%s: session{%u,%u}f:%u mismatch from item{%u,%u}f:%u\n",
+			(src ? "src":"dst"), s_rect->width, s_rect->height,
+			s_rect->format, i_rect->w, i_rect->h, i_fmt);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*
+ * Compare all important flag bits associated with rotation between session
+ * config and item request. Format and roi validation is done during open
+ * session and is based certain defining bits. If these defining bits are
+ * different in item request, there is a possibility that rotation item
+ * is not a valid configuration.
+ */
+static inline int __compare_session_rotations(uint32_t cfg_flag,
+	uint32_t item_flag)
+{
+	cfg_flag &= MDSS_MDP_DEFINING_FLAG_BITS;
+	item_flag &= MDSS_MDP_DEFINING_FLAG_BITS;
+	if (cfg_flag != item_flag) {
+		pr_err("Rotation degree request different from open session\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
new file mode 100644
index 0000000..05353a0
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -0,0 +1,904 @@
+/* Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/iommu.h>
+#include <linux/qcom_iommu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/msm-clk.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+#include <linux/of_platform.h>
+#include <linux/msm_dma_iommu_mapping.h>
+
+#include <linux/qcom_iommu.h>
+#include <asm/dma-iommu.h>
+#include "soc/qcom/secure_buffer.h"
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+#include "mdss_smmu.h"
+#include "mdss_debug.h"
+
+#define SZ_4G 0xF0000000
+
+static DEFINE_MUTEX(mdp_iommu_lock);
+
+void mdss_iommu_lock(void)
+{
+	mutex_lock(&mdp_iommu_lock);
+}
+
+void mdss_iommu_unlock(void)
+{
+	mutex_unlock(&mdp_iommu_lock);
+}
+
+static int mdss_smmu_util_parse_dt_clock(struct platform_device *pdev,
+		struct dss_module_power *mp)
+{
+	u32 i = 0, rc = 0;
+	const char *clock_name;
+	u32 clock_rate;
+	int num_clk;
+
+	num_clk = of_property_count_strings(pdev->dev.of_node,
+			"clock-names");
+	if (num_clk <= 0) {
+		pr_err("clocks are not defined\n");
+		goto clk_err;
+	}
+
+	mp->num_clk = num_clk;
+	mp->clk_config = devm_kzalloc(&pdev->dev,
+			sizeof(struct dss_clk) * mp->num_clk, GFP_KERNEL);
+	if (!mp->clk_config) {
+		rc = -ENOMEM;
+		mp->num_clk = 0;
+		goto clk_err;
+	}
+
+	for (i = 0; i < mp->num_clk; i++) {
+		of_property_read_string_index(pdev->dev.of_node, "clock-names",
+							i, &clock_name);
+		strlcpy(mp->clk_config[i].clk_name, clock_name,
+				sizeof(mp->clk_config[i].clk_name));
+
+		of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
+							i, &clock_rate);
+		mp->clk_config[i].rate = clock_rate;
+
+		if (!clock_rate)
+			mp->clk_config[i].type = DSS_CLK_AHB;
+		else
+			mp->clk_config[i].type = DSS_CLK_PCLK;
+	}
+
+clk_err:
+	return rc;
+}
+
+static int mdss_smmu_clk_register(struct platform_device *pdev,
+		struct dss_module_power *mp)
+{
+	int i, ret;
+	struct clk *clk;
+
+	ret = mdss_smmu_util_parse_dt_clock(pdev, mp);
+	if (ret) {
+		pr_err("unable to parse clocks\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < mp->num_clk; i++) {
+		clk = devm_clk_get(&pdev->dev,
+				mp->clk_config[i].clk_name);
+		if (IS_ERR(clk)) {
+			pr_err("unable to get clk: %s\n",
+					mp->clk_config[i].clk_name);
+			return PTR_ERR(clk);
+		}
+		mp->clk_config[i].clk = clk;
+	}
+	return 0;
+}
+
+static int mdss_smmu_enable_power(struct mdss_smmu_client *mdss_smmu,
+	bool enable)
+{
+	int rc = 0;
+	struct dss_module_power *mp;
+
+	if (!mdss_smmu)
+		return -EINVAL;
+
+	mp = &mdss_smmu->mp;
+
+	if (!mp->num_vreg && !mp->num_clk)
+		return 0;
+
+	if (enable) {
+		rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, true);
+		if (rc) {
+			pr_err("vreg enable failed - rc:%d\n", rc);
+			goto end;
+		}
+		mdss_update_reg_bus_vote(mdss_smmu->reg_bus_clt,
+			VOTE_INDEX_LOW);
+		rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+		if (rc) {
+			pr_err("clock enable failed - rc:%d\n", rc);
+			mdss_update_reg_bus_vote(mdss_smmu->reg_bus_clt,
+				VOTE_INDEX_DISABLE);
+			msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg,
+				false);
+			goto end;
+		}
+	} else {
+		msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+		mdss_update_reg_bus_vote(mdss_smmu->reg_bus_clt,
+			VOTE_INDEX_DISABLE);
+		msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, false);
+	}
+end:
+	return rc;
+}
+
+/*
+ * mdss_smmu_v2_attach()
+ *
+ * Associates each configured VA range with the corresponding smmu context
+ * bank device. Enables the clks as smmu_v2 requires voting it before the usage.
+ * And iommu attach is done only once during the initial attach and it is never
+ * detached as smmu v2 uses a feature called 'retention'.
+ */
+static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
+{
+	struct mdss_smmu_client *mdss_smmu;
+	int i, rc = 0;
+
+	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+		if (!mdss_smmu_is_valid_domain_type(mdata, i))
+			continue;
+
+		mdss_smmu = mdss_smmu_get_cb(i);
+		if (mdss_smmu && mdss_smmu->dev) {
+			if (!mdss_smmu->handoff_pending) {
+				rc = mdss_smmu_enable_power(mdss_smmu, true);
+				if (rc) {
+					pr_err("power enable failed - domain:[%d] rc:%d\n",
+						i, rc);
+					goto err;
+				}
+			}
+			mdss_smmu->handoff_pending = false;
+
+			if (!mdss_smmu->domain_attached) {
+				rc = arm_iommu_attach_device(mdss_smmu->dev,
+						mdss_smmu->mmu_mapping);
+				if (rc) {
+					pr_err("iommu attach device failed for domain[%d] with err:%d\n",
+						i, rc);
+					mdss_smmu_enable_power(mdss_smmu,
+						false);
+					goto err;
+				}
+				mdss_smmu->domain_attached = true;
+				pr_debug("iommu v2 domain[%i] attached\n", i);
+			}
+		} else {
+			pr_err("iommu device not attached for domain[%d]\n", i);
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+
+err:
+	for (i--; i >= 0; i--) {
+		mdss_smmu = mdss_smmu_get_cb(i);
+		if (mdss_smmu && mdss_smmu->dev) {
+			arm_iommu_detach_device(mdss_smmu->dev);
+			mdss_smmu_enable_power(mdss_smmu, false);
+			mdss_smmu->domain_attached = false;
+		}
+	}
+
+	return rc;
+}
+
+/*
+ * mdss_smmu_v2_detach()
+ *
+ * Only disables the clks as it is not required to detach the iommu mapped
+ * VA range from the device in smmu_v2 as explained in the mdss_smmu_v2_attach
+ */
+static int mdss_smmu_detach_v2(struct mdss_data_type *mdata)
+{
+	struct mdss_smmu_client *mdss_smmu;
+	int i;
+
+	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+		if (!mdss_smmu_is_valid_domain_type(mdata, i))
+			continue;
+
+		mdss_smmu = mdss_smmu_get_cb(i);
+		if (mdss_smmu && mdss_smmu->dev && !mdss_smmu->handoff_pending)
+			mdss_smmu_enable_power(mdss_smmu, false);
+	}
+
+	return 0;
+}
+
+static int mdss_smmu_get_domain_id_v2(u32 type)
+{
+	return type;
+}
+
+/*
+ * mdss_smmu_dma_buf_attach_v2()
+ *
+ * Same as mdss_smmu_dma_buf_attach except that the device is got from
+ * the configured smmu v2 context banks.
+ */
+static struct dma_buf_attachment *mdss_smmu_dma_buf_attach_v2(
+		struct dma_buf *dma_buf, struct device *dev, int domain)
+{
+	struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+	if (!mdss_smmu) {
+		pr_err("not able to get smmu context\n");
+		return NULL;
+	}
+
+	return dma_buf_attach(dma_buf, mdss_smmu->dev);
+}
+
+/*
+ * mdss_smmu_map_dma_buf_v2()
+ *
+ * Maps existing buffer (by struct scatterlist) into SMMU context bank device.
+ * From which we can take the virtual address and size allocated.
+ * msm_map_dma_buf is depricated with smmu v2 and it uses dma_map_sg instead
+ */
+static int mdss_smmu_map_dma_buf_v2(struct dma_buf *dma_buf,
+		struct sg_table *table, int domain, dma_addr_t *iova,
+		unsigned long *size, int dir)
+{
+	int rc;
+	struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+	if (!mdss_smmu) {
+		pr_err("not able to get smmu context\n");
+		return -EINVAL;
+	}
+	ATRACE_BEGIN("map_buffer");
+	rc = msm_dma_map_sg_lazy(mdss_smmu->dev, table->sgl, table->nents, dir,
+		dma_buf);
+	if (rc != table->nents) {
+		pr_err("dma map sg failed\n");
+		return -ENOMEM;
+	}
+	ATRACE_END("map_buffer");
+	*iova = table->sgl->dma_address;
+	*size = table->sgl->dma_length;
+	return 0;
+}
+
+static void mdss_smmu_unmap_dma_buf_v2(struct sg_table *table, int domain,
+		int dir, struct dma_buf *dma_buf)
+{
+	struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+	if (!mdss_smmu) {
+		pr_err("not able to get smmu context\n");
+		return;
+	}
+
+	ATRACE_BEGIN("unmap_buffer");
+	msm_dma_unmap_sg(mdss_smmu->dev, table->sgl, table->nents, dir,
+		 dma_buf);
+	ATRACE_END("unmap_buffer");
+}
+
+/*
+ * mdss_smmu_dma_alloc_coherent_v2()
+ *
+ * Allocates buffer same as mdss_smmu_dma_alloc_coherent_v1, but in addition it
+ * also maps to the SMMU domain with the help of the respective SMMU context
+ * bank device
+ */
+static int mdss_smmu_dma_alloc_coherent_v2(struct device *dev, size_t size,
+		dma_addr_t *phys, dma_addr_t *iova, void **cpu_addr,
+		gfp_t gfp, int domain)
+{
+	struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+	if (!mdss_smmu) {
+		pr_err("not able to get smmu context\n");
+		return -EINVAL;
+	}
+
+	*cpu_addr = dma_alloc_coherent(mdss_smmu->dev, size, iova, gfp);
+	if (!*cpu_addr) {
+		pr_err("dma alloc coherent failed!\n");
+		return -ENOMEM;
+	}
+	*phys = iommu_iova_to_phys(mdss_smmu->mmu_mapping->domain,
+			*iova);
+	return 0;
+}
+
+static void mdss_smmu_dma_free_coherent_v2(struct device *dev, size_t size,
+		void *cpu_addr, dma_addr_t phys, dma_addr_t iova, int domain)
+{
+	struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+	if (!mdss_smmu) {
+		pr_err("not able to get smmu context\n");
+		return;
+	}
+
+	dma_free_coherent(mdss_smmu->dev, size, cpu_addr, iova);
+}
+
+/*
+ * mdss_smmu_map_v1()
+ *
+ * Same as mdss_smmu_map_v1, just that it maps to the appropriate domain
+ * referred by the smmu context bank handles.
+ */
+static int mdss_smmu_map_v2(int domain, phys_addr_t iova, phys_addr_t phys,
+		int gfp_order, int prot)
+{
+	struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+	if (!mdss_smmu) {
+		pr_err("not able to get smmu context\n");
+		return -EINVAL;
+	}
+
+	return iommu_map(mdss_smmu->mmu_mapping->domain,
+			iova, phys, gfp_order, prot);
+}
+
+static void mdss_smmu_unmap_v2(int domain, unsigned long iova, int gfp_order)
+{
+	struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+	if (!mdss_smmu) {
+		pr_err("not able to get smmu context\n");
+		return;
+	}
+
+	iommu_unmap(mdss_smmu->mmu_mapping->domain, iova, gfp_order);
+}
+
+/*
+ * mdss_smmUdsi_alloc_buf_v2()
+ *
+ * Allocates the buffer and mapping is done later
+ */
+static char *mdss_smmu_dsi_alloc_buf_v2(struct device *dev, int size,
+		dma_addr_t *dmap, gfp_t gfp)
+{
+	char *data;
+
+	data = kzalloc(size, GFP_KERNEL | GFP_DMA);
+	if (data)
+		*dmap = (dma_addr_t) virt_to_phys(data);
+
+	return data;
+}
+
+/*
+ * mdss_smmu_dsi_map_buffer_v2()
+ *
+ * Maps the buffer allocated in mdss_smmu_dsi_alloc_buffer_v2 with the SMMU
+ * domain and uses dma_map_single as msm_iommu_map_contig_buffer is depricated
+ * in smmu v2.
+ */
+static int mdss_smmu_dsi_map_buffer_v2(phys_addr_t phys, unsigned int domain,
+		unsigned long size, dma_addr_t *dma_addr, void *cpu_addr,
+		int dir)
+{
+	struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+	if (!mdss_smmu) {
+		pr_err("not able to get smmu context\n");
+		return -EINVAL;
+	}
+
+	*dma_addr = dma_map_single(mdss_smmu->dev, cpu_addr, size, dir);
+	if (IS_ERR_VALUE(*dma_addr)) {
+		pr_err("dma map single failed\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void mdss_smmu_dsi_unmap_buffer_v2(dma_addr_t dma_addr, int domain,
+		unsigned long size, int dir)
+{
+	struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+
+	if (!mdss_smmu) {
+		pr_err("not able to get smmu context\n");
+		return;
+	}
+
+	if (is_mdss_iommu_attached())
+		dma_unmap_single(mdss_smmu->dev, dma_addr, size, dir);
+}
+
+int mdss_smmu_fault_handler(struct iommu_domain *domain, struct device *dev,
+	unsigned long iova, int flags, void *user_data)
+{
+	struct mdss_smmu_client *mdss_smmu =
+		(struct mdss_smmu_client *)user_data;
+	u32 fsynr1, mid, i;
+
+	if (!mdss_smmu || !mdss_smmu->mmu_base)
+		goto end;
+
+	fsynr1 = readl_relaxed(mdss_smmu->mmu_base + SMMU_CBN_FSYNR1);
+	mid = fsynr1 & 0xff;
+	pr_err("mdss_smmu: iova:0x%lx flags:0x%x fsynr1: 0x%x mid: 0x%x\n",
+		iova, flags, fsynr1, mid);
+
+	/* get domain id information */
+	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+		if (mdss_smmu == mdss_smmu_get_cb(i))
+			break;
+	}
+
+	if (i == MDSS_IOMMU_MAX_DOMAIN)
+		goto end;
+
+	mdss_mdp_debug_mid(mid);
+end:
+	return -ENODEV;
+}
+
+static void mdss_smmu_deinit_v2(struct mdss_data_type *mdata)
+{
+	int i;
+	struct mdss_smmu_client *mdss_smmu;
+
+	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+		mdss_smmu = mdss_smmu_get_cb(i);
+		if (mdss_smmu && mdss_smmu->dev)
+			arm_iommu_release_mapping(mdss_smmu->mmu_mapping);
+	}
+}
+
+/*
+ * sg_clone -	Duplicate an existing chained sgl
+ * @orig_sgl:	Original sg list to be duplicated
+ * @len:	Total length of sg while taking chaining into account
+ * @gfp_mask:	GFP allocation mask
+ * @padding:	specifies if padding is required
+ *
+ * Description:
+ *   Clone a chained sgl. This cloned copy may be modified in some ways while
+ *   keeping the original sgl in tact. Also allow the cloned copy to have
+ *   a smaller length than the original which may reduce the sgl total
+ *   sg entries and also allows cloned copy to have one extra sg  entry on
+ *   either sides of sgl.
+ *
+ * Returns:
+ *   Pointer to new kmalloced sg list, ERR_PTR() on error
+ *
+ */
+static struct scatterlist *sg_clone(struct scatterlist *orig_sgl, u64 len,
+				gfp_t gfp_mask, bool padding)
+{
+	int nents;
+	bool last_entry;
+	struct scatterlist *sgl, *head;
+
+	nents = sg_nents(orig_sgl);
+	if (nents < 0)
+		return ERR_PTR(-EINVAL);
+	if (padding)
+		nents += 2;
+
+	head = kmalloc_array(nents, sizeof(struct scatterlist), gfp_mask);
+	if (!head)
+		return ERR_PTR(-ENOMEM);
+
+	sgl = head;
+
+	sg_init_table(sgl, nents);
+
+	if (padding) {
+		*sgl = *orig_sgl;
+		if (sg_is_chain(orig_sgl)) {
+			orig_sgl = sg_next(orig_sgl);
+			*sgl = *orig_sgl;
+		}
+		sgl->page_link &= (unsigned long)(~0x03);
+		sgl = sg_next(sgl);
+	}
+
+	for (; sgl; orig_sgl = sg_next(orig_sgl), sgl = sg_next(sgl)) {
+
+		last_entry = sg_is_last(sgl);
+
+		/*
+		 * * If page_link is pointing to a chained sgl then set
+		 * the sg entry in the cloned list to the next sg entry
+		 * in the original sg list as chaining is already taken
+		 * care.
+		 */
+
+		if (sg_is_chain(orig_sgl))
+			orig_sgl = sg_next(orig_sgl);
+
+		if (padding)
+			last_entry = sg_is_last(orig_sgl);
+
+		*sgl = *orig_sgl;
+		sgl->page_link &= (unsigned long)(~0x03);
+
+		if (last_entry) {
+			if (padding) {
+				len -= sg_dma_len(sgl);
+				sgl = sg_next(sgl);
+				*sgl = *orig_sgl;
+			}
+			sg_dma_len(sgl) = len ? len : SZ_4K;
+			/* Set bit 1 to indicate end of sgl */
+			sgl->page_link |= 0x02;
+		} else {
+			len -= sg_dma_len(sgl);
+		}
+	}
+
+	return head;
+}
+
+/*
+ * sg_table_clone - Duplicate an existing sg_table including chained sgl
+ * @orig_table:     Original sg_table to be duplicated
+ * @len:            Total length of sg while taking chaining into account
+ * @gfp_mask:       GFP allocation mask
+ * @padding:	    specifies if padding is required
+ *
+ * Description:
+ *   Clone a sg_table along with chained sgl. This cloned copy may be
+ *   modified in some ways while keeping the original table and sgl in tact.
+ *   Also allow the cloned sgl copy to have a smaller length than the original
+ *   which may reduce the sgl total sg entries.
+ *
+ * Returns:
+ *   Pointer to new kmalloced sg_table, ERR_PTR() on error
+ *
+ */
+static struct sg_table *sg_table_clone(struct sg_table *orig_table,
+				gfp_t gfp_mask, bool padding)
+{
+	struct sg_table *table;
+	struct scatterlist *sg = orig_table->sgl;
+	u64 len = 0;
+
+	for (len = 0; sg; sg = sg_next(sg))
+		len += sg->length;
+
+	table = kmalloc(sizeof(struct sg_table), gfp_mask);
+	if (!table)
+		return ERR_PTR(-ENOMEM);
+
+	table->sgl = sg_clone(orig_table->sgl, len, gfp_mask, padding);
+	if (IS_ERR(table->sgl)) {
+		kfree(table);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	table->nents = table->orig_nents = sg_nents(table->sgl);
+
+	return table;
+}
+
+static void mdss_smmu_ops_init(struct mdss_data_type *mdata)
+{
+	mdata->smmu_ops.smmu_attach = mdss_smmu_attach_v2;
+	mdata->smmu_ops.smmu_detach = mdss_smmu_detach_v2;
+	mdata->smmu_ops.smmu_get_domain_id = mdss_smmu_get_domain_id_v2;
+	mdata->smmu_ops.smmu_dma_buf_attach =
+				mdss_smmu_dma_buf_attach_v2;
+	mdata->smmu_ops.smmu_map_dma_buf = mdss_smmu_map_dma_buf_v2;
+	mdata->smmu_ops.smmu_unmap_dma_buf = mdss_smmu_unmap_dma_buf_v2;
+	mdata->smmu_ops.smmu_dma_alloc_coherent =
+				mdss_smmu_dma_alloc_coherent_v2;
+	mdata->smmu_ops.smmu_dma_free_coherent =
+				mdss_smmu_dma_free_coherent_v2;
+	mdata->smmu_ops.smmu_map = mdss_smmu_map_v2;
+	mdata->smmu_ops.smmu_unmap = mdss_smmu_unmap_v2;
+	mdata->smmu_ops.smmu_dsi_alloc_buf = mdss_smmu_dsi_alloc_buf_v2;
+	mdata->smmu_ops.smmu_dsi_map_buffer =
+				mdss_smmu_dsi_map_buffer_v2;
+	mdata->smmu_ops.smmu_dsi_unmap_buffer =
+				mdss_smmu_dsi_unmap_buffer_v2;
+	mdata->smmu_ops.smmu_deinit = mdss_smmu_deinit_v2;
+	mdata->smmu_ops.smmu_sg_table_clone = sg_table_clone;
+}
+
+/*
+ * mdss_smmu_device_create()
+ * @dev: mdss_mdp device
+ *
+ * For smmu_v2, each context bank is a separate child device of mdss_mdp.
+ * Platform devices are created for those smmu related child devices of
+ * mdss_mdp here. This would facilitate probes to happen for these devices in
+ * which the smmu mapping and initialization is handled.
+ */
+void mdss_smmu_device_create(struct device *dev)
+{
+	struct device_node *parent, *child;
+
+	parent = dev->of_node;
+	for_each_child_of_node(parent, child) {
+		if (is_mdss_smmu_compatible_device(child->name))
+			of_platform_device_create(child, NULL, dev);
+	}
+}
+
+int mdss_smmu_init(struct mdss_data_type *mdata, struct device *dev)
+{
+	mdss_smmu_device_create(dev);
+	mdss_smmu_ops_init(mdata);
+	mdata->mdss_util->iommu_lock = mdss_iommu_lock;
+	mdata->mdss_util->iommu_unlock = mdss_iommu_unlock;
+	return 0;
+}
+
+static struct mdss_smmu_domain mdss_mdp_unsec = {
+	"mdp_0", MDSS_IOMMU_DOMAIN_UNSECURE, SZ_1M, (SZ_4G - SZ_1M)};
+static struct mdss_smmu_domain mdss_rot_unsec = {
+	NULL, MDSS_IOMMU_DOMAIN_ROT_UNSECURE, SZ_1M, (SZ_4G - SZ_1M)};
+static struct mdss_smmu_domain mdss_mdp_sec = {
+	"mdp_1", MDSS_IOMMU_DOMAIN_SECURE, SZ_1M, (SZ_4G - SZ_1M)};
+static struct mdss_smmu_domain mdss_rot_sec = {
+	NULL, MDSS_IOMMU_DOMAIN_ROT_SECURE, SZ_1M, (SZ_4G - SZ_1M)};
+
+static const struct of_device_id mdss_smmu_dt_match[] = {
+	{ .compatible = "qcom,smmu_mdp_unsec", .data = &mdss_mdp_unsec},
+	{ .compatible = "qcom,smmu_rot_unsec", .data = &mdss_rot_unsec},
+	{ .compatible = "qcom,smmu_mdp_sec", .data = &mdss_mdp_sec},
+	{ .compatible = "qcom,smmu_rot_sec", .data = &mdss_rot_sec},
+	{}
+};
+MODULE_DEVICE_TABLE(of, mdss_smmu_dt_match);
+
+/*
+ * mdss_smmu_probe()
+ * @pdev: platform device
+ *
+ * Each smmu context acts as a separate device and the context banks are
+ * configured with a VA range.
+ * Registeres the clks as each context bank has its own clks, for which voting
+ * has to be done everytime before using that context bank.
+ */
+int mdss_smmu_probe(struct platform_device *pdev)
+{
+	struct device *dev;
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_smmu_client *mdss_smmu;
+	int rc = 0;
+	struct mdss_smmu_domain smmu_domain;
+	const struct of_device_id *match;
+	struct dss_module_power *mp;
+	int disable_htw = 1;
+	char name[MAX_CLIENT_NAME_LEN];
+	const __be32 *address = NULL, *size = NULL;
+
+	if (!mdata) {
+		pr_err("probe failed as mdata is not initialized\n");
+		return -EPROBE_DEFER;
+	}
+
+	match = of_match_device(mdss_smmu_dt_match, &pdev->dev);
+	if (!match || !match->data) {
+		pr_err("probe failed as match data is invalid\n");
+		return -EINVAL;
+	}
+
+	smmu_domain = *(struct mdss_smmu_domain *) (match->data);
+	if (smmu_domain.domain >= MDSS_IOMMU_MAX_DOMAIN) {
+		pr_err("no matching device found\n");
+		return -EINVAL;
+	}
+
+	if (of_find_property(pdev->dev.of_node, "iommus", NULL)) {
+		dev = &pdev->dev;
+	} else {
+		/*
+		 * For old iommu driver we query the context bank device
+		 * rather than getting it from dt.
+		 */
+		dev = msm_iommu_get_ctx(smmu_domain.ctx_name);
+		if (!dev) {
+			pr_err("Invalid SMMU ctx for domain:%d\n",
+				smmu_domain.domain);
+			return -EINVAL;
+		}
+	}
+
+	mdss_smmu = &mdata->mdss_smmu[smmu_domain.domain];
+	mp = &mdss_smmu->mp;
+	memset(mp, 0, sizeof(struct dss_module_power));
+
+	if (of_find_property(pdev->dev.of_node,
+		"gdsc-mmagic-mdss-supply", NULL)) {
+
+		mp->vreg_config = devm_kzalloc(&pdev->dev,
+			sizeof(struct dss_vreg), GFP_KERNEL);
+		if (!mp->vreg_config)
+			return -ENOMEM;
+
+		strlcpy(mp->vreg_config->vreg_name, "gdsc-mmagic-mdss",
+				sizeof(mp->vreg_config->vreg_name));
+		mp->num_vreg = 1;
+	}
+
+	rc = msm_dss_config_vreg(&pdev->dev, mp->vreg_config,
+		mp->num_vreg, true);
+	if (rc) {
+		pr_err("vreg config failed rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = mdss_smmu_clk_register(pdev, mp);
+	if (rc) {
+		pr_err("smmu clk register failed for domain[%d] with err:%d\n",
+			smmu_domain.domain, rc);
+		msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
+			false);
+		return rc;
+	}
+
+	snprintf(name, MAX_CLIENT_NAME_LEN, "smmu:%u", smmu_domain.domain);
+	mdss_smmu->reg_bus_clt = mdss_reg_bus_vote_client_create(name);
+	if (IS_ERR(mdss_smmu->reg_bus_clt)) {
+		pr_err("mdss bus client register failed\n");
+		msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
+			false);
+		return PTR_ERR(mdss_smmu->reg_bus_clt);
+	}
+
+	rc = mdss_smmu_enable_power(mdss_smmu, true);
+	if (rc) {
+		pr_err("power enable failed - domain:[%d] rc:%d\n",
+			smmu_domain.domain, rc);
+		goto bus_client_destroy;
+	}
+
+	mdss_smmu->mmu_mapping = arm_iommu_create_mapping(
+		msm_iommu_get_bus(dev), smmu_domain.start, smmu_domain.size);
+	if (IS_ERR(mdss_smmu->mmu_mapping)) {
+		pr_err("iommu create mapping failed for domain[%d]\n",
+			smmu_domain.domain);
+		rc = PTR_ERR(mdss_smmu->mmu_mapping);
+		goto disable_power;
+	}
+
+	rc = iommu_domain_set_attr(mdss_smmu->mmu_mapping->domain,
+		DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
+	if (rc) {
+		pr_err("couldn't disable coherent HTW\n");
+		goto release_mapping;
+	}
+
+	if (smmu_domain.domain == MDSS_IOMMU_DOMAIN_SECURE ||
+		smmu_domain.domain == MDSS_IOMMU_DOMAIN_ROT_SECURE) {
+		int secure_vmid = VMID_CP_PIXEL;
+
+		rc = iommu_domain_set_attr(mdss_smmu->mmu_mapping->domain,
+			DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
+		if (rc) {
+			pr_err("couldn't set secure pixel vmid\n");
+			goto release_mapping;
+		}
+	}
+
+	if (!mdata->handoff_pending)
+		mdss_smmu_enable_power(mdss_smmu, false);
+	else
+		mdss_smmu->handoff_pending = true;
+
+	mdss_smmu->dev = dev;
+
+	address = of_get_address_by_name(pdev->dev.of_node, "mmu_cb", 0, 0);
+	if (address) {
+		size = address + 1;
+		mdss_smmu->mmu_base = ioremap(be32_to_cpu(*address),
+			be32_to_cpu(*size));
+		if (mdss_smmu->mmu_base)
+			iommu_set_fault_handler(mdss_smmu->mmu_mapping->domain,
+				mdss_smmu_fault_handler, mdss_smmu);
+	} else {
+		pr_debug("unable to map context bank base\n");
+	}
+
+	pr_info("iommu v2 domain[%d] mapping and clk register successful!\n",
+			smmu_domain.domain);
+	return 0;
+
+release_mapping:
+	arm_iommu_release_mapping(mdss_smmu->mmu_mapping);
+disable_power:
+	mdss_smmu_enable_power(mdss_smmu, false);
+bus_client_destroy:
+	mdss_reg_bus_vote_client_destroy(mdss_smmu->reg_bus_clt);
+	mdss_smmu->reg_bus_clt = NULL;
+	msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
+			false);
+	return rc;
+}
+
+int mdss_smmu_remove(struct platform_device *pdev)
+{
+	int i;
+	struct mdss_smmu_client *mdss_smmu;
+
+	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+		mdss_smmu = mdss_smmu_get_cb(i);
+		if (mdss_smmu && mdss_smmu->dev &&
+			(mdss_smmu->dev == &pdev->dev))
+			arm_iommu_release_mapping(mdss_smmu->mmu_mapping);
+	}
+	return 0;
+}
+
+static struct platform_driver mdss_smmu_driver = {
+	.probe = mdss_smmu_probe,
+	.remove = mdss_smmu_remove,
+	.shutdown = NULL,
+	.driver = {
+		.name = "mdss_smmu",
+		.of_match_table = mdss_smmu_dt_match,
+	},
+};
+
+static int mdss_smmu_register_driver(void)
+{
+	return platform_driver_register(&mdss_smmu_driver);
+}
+
+static int __init mdss_smmu_driver_init(void)
+{
+	int ret;
+
+	ret = mdss_smmu_register_driver();
+	if (ret)
+		pr_err("mdss_smmu_register_driver() failed!\n");
+
+	return ret;
+}
+module_init(mdss_smmu_driver_init);
+
+static void __exit mdss_smmu_driver_cleanup(void)
+{
+	platform_driver_unregister(&mdss_smmu_driver);
+}
+module_exit(mdss_smmu_driver_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MDSS SMMU driver");
diff --git a/drivers/video/fbdev/msm/mdss_smmu.h b/drivers/video/fbdev/msm/mdss_smmu.h
new file mode 100644
index 0000000..be2a55f
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_smmu.h
@@ -0,0 +1,318 @@
+/* Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_SMMU_H
+#define MDSS_SMMU_H
+
+#include <linux/msm_ion.h>
+#include <linux/msm_mdp.h>
+#include <linux/mdss_io_util.h>
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+#include "mdss_debug.h"
+
+#define MDSS_SMMU_COMPATIBLE "qcom,smmu"
+#define SMMU_CBN_FSYNR1		0x6c
+
+struct mdss_iommu_map_type {
+	char *client_name;
+	char *ctx_name;
+	unsigned long start;
+	unsigned long size;
+};
+
+struct mdss_smmu_domain {
+	char *ctx_name;
+	int domain;
+	unsigned long start;
+	unsigned long size;
+};
+
+void mdss_smmu_register(struct device *dev);
+int mdss_smmu_init(struct mdss_data_type *mdata, struct device *dev);
+
+static inline int mdss_smmu_dma_data_direction(int dir)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	return (mdss_has_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR)) ?
+		DMA_BIDIRECTIONAL : dir;
+}
+
+static inline bool is_mdss_smmu_compatible_device(const char *str)
+{
+	/* check the prefix */
+	return (!strcmp(str, MDSS_SMMU_COMPATIBLE)) ? true : false;
+}
+
+/*
+ * mdss_smmu_is_valid_domain_type()
+ *
+ * Used to check if rotator smmu domain is defined or not by checking if
+ * vbif base is defined and wb rotator exists. As those are associated.
+ */
+static inline bool mdss_smmu_is_valid_domain_type(struct mdss_data_type *mdata,
+		int domain_type)
+{
+	if ((domain_type == MDSS_IOMMU_DOMAIN_ROT_UNSECURE ||
+			domain_type == MDSS_IOMMU_DOMAIN_ROT_SECURE) &&
+			(!mdss_mdp_is_wb_rotator_supported(mdata) ||
+			!mdss_mdp_is_nrt_vbif_base_defined(mdata)))
+		return false;
+	return true;
+}
+
+static inline struct mdss_smmu_client *mdss_smmu_get_cb(u32 domain)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mdss_smmu_is_valid_domain_type(mdata, domain))
+		return NULL;
+
+	return (domain >= MDSS_IOMMU_MAX_DOMAIN) ? NULL :
+			&mdata->mdss_smmu[domain];
+}
+
+static inline struct ion_client *mdss_get_ionclient(void)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	return mdata ? mdata->iclient : NULL;
+}
+
+static inline int is_mdss_iommu_attached(void)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	return mdata ? mdata->iommu_attached : false;
+}
+
+static inline int mdss_smmu_get_domain_type(u32 flags, bool rotator)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int type;
+
+	if (flags & MDP_SECURE_OVERLAY_SESSION) {
+		type = (rotator &&
+			mdata->mdss_smmu[MDSS_IOMMU_DOMAIN_ROT_SECURE].dev) ?
+			MDSS_IOMMU_DOMAIN_ROT_SECURE : MDSS_IOMMU_DOMAIN_SECURE;
+	} else {
+		type = (rotator &&
+			mdata->mdss_smmu[MDSS_IOMMU_DOMAIN_ROT_UNSECURE].dev) ?
+			MDSS_IOMMU_DOMAIN_ROT_UNSECURE :
+			MDSS_IOMMU_DOMAIN_UNSECURE;
+	}
+	return type;
+}
+
+static inline int mdss_smmu_attach(struct mdss_data_type *mdata)
+{
+	int rc;
+
+	mdata->mdss_util->iommu_lock();
+	MDSS_XLOG(mdata->iommu_attached);
+
+	if (mdata->iommu_attached) {
+		pr_debug("mdp iommu already attached\n");
+		rc = 0;
+		goto end;
+	}
+
+	if (!mdata->smmu_ops.smmu_attach) {
+		rc = -ENODEV;
+		goto end;
+	}
+
+	rc =  mdata->smmu_ops.smmu_attach(mdata);
+	if (!rc)
+		mdata->iommu_attached = true;
+
+end:
+	mdata->mdss_util->iommu_unlock();
+	return rc;
+}
+
+static inline int mdss_smmu_detach(struct mdss_data_type *mdata)
+{
+	int rc;
+
+	mdata->mdss_util->iommu_lock();
+	MDSS_XLOG(mdata->iommu_attached);
+
+	if (!mdata->iommu_attached) {
+		pr_debug("mdp iommu already dettached\n");
+		rc = 0;
+		goto end;
+	}
+
+	if (!mdata->smmu_ops.smmu_detach) {
+		rc = -ENODEV;
+		goto end;
+	}
+
+	rc = mdata->smmu_ops.smmu_detach(mdata);
+	if (!rc)
+		mdata->iommu_attached = false;
+
+end:
+	mdata->mdss_util->iommu_unlock();
+	return rc;
+}
+
+static inline int mdss_smmu_get_domain_id(u32 type)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mdss_smmu_is_valid_domain_type(mdata, type))
+		return -ENODEV;
+
+	if (!mdata || !mdata->smmu_ops.smmu_get_domain_id
+			|| type >= MDSS_IOMMU_MAX_DOMAIN)
+		return -ENODEV;
+
+	return mdata->smmu_ops.smmu_get_domain_id(type);
+}
+
+static inline struct dma_buf_attachment *mdss_smmu_dma_buf_attach(
+		struct dma_buf *dma_buf, struct device *dev, int domain)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mdata || !mdata->smmu_ops.smmu_dma_buf_attach)
+		return NULL;
+
+	return mdata->smmu_ops.smmu_dma_buf_attach(dma_buf, dev, domain);
+}
+
+static inline int mdss_smmu_map_dma_buf(struct dma_buf *dma_buf,
+		struct sg_table *table, int domain, dma_addr_t *iova,
+		unsigned long *size, int dir)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mdata->smmu_ops.smmu_map_dma_buf)
+		return -ENODEV;
+
+	return mdata->smmu_ops.smmu_map_dma_buf(dma_buf, table,
+			domain, iova, size,
+			mdss_smmu_dma_data_direction(dir));
+}
+
+static inline void mdss_smmu_unmap_dma_buf(struct sg_table *table, int domain,
+		int dir, struct dma_buf *dma_buf)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (mdata->smmu_ops.smmu_unmap_dma_buf)
+		mdata->smmu_ops.smmu_unmap_dma_buf(table, domain,
+		mdss_smmu_dma_data_direction(dir), dma_buf);
+}
+
+static inline int mdss_smmu_dma_alloc_coherent(struct device *dev, size_t size,
+		dma_addr_t *phys, dma_addr_t *iova, void **cpu_addr,
+		gfp_t gfp, int domain)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mdata || !mdata->smmu_ops.smmu_dma_alloc_coherent)
+		return -ENODEV;
+
+	return mdata->smmu_ops.smmu_dma_alloc_coherent(dev, size,
+			phys, iova, cpu_addr, gfp, domain);
+}
+
+static inline void mdss_smmu_dma_free_coherent(struct device *dev, size_t size,
+		void *cpu_addr, dma_addr_t phys, dma_addr_t iova, int domain)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (mdata && mdata->smmu_ops.smmu_dma_free_coherent)
+		mdata->smmu_ops.smmu_dma_free_coherent(dev, size, cpu_addr,
+			phys, iova, domain);
+}
+
+static inline int mdss_smmu_map(int domain, phys_addr_t iova, phys_addr_t phys,
+		int gfp_order, int prot)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mdata->smmu_ops.smmu_map)
+		return -ENODEV;
+
+	return mdata->smmu_ops.smmu_map(domain, iova, phys, gfp_order, prot);
+}
+
+static inline void mdss_smmu_unmap(int domain, unsigned long iova,
+		int gfp_order)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (mdata->smmu_ops.smmu_unmap)
+		mdata->smmu_ops.smmu_unmap(domain, iova, gfp_order);
+}
+
+static inline char *mdss_smmu_dsi_alloc_buf(struct device *dev, int size,
+		dma_addr_t *dmap, gfp_t gfp)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mdata->smmu_ops.smmu_dsi_alloc_buf)
+		return NULL;
+
+	return mdata->smmu_ops.smmu_dsi_alloc_buf(dev, size, dmap, gfp);
+}
+
+static inline int mdss_smmu_dsi_map_buffer(phys_addr_t phys,
+		unsigned int domain, unsigned long size, dma_addr_t *dma_addr,
+		void *cpu_addr, int dir)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mdata->smmu_ops.smmu_dsi_map_buffer)
+		return -ENODEV;
+
+	return mdata->smmu_ops.smmu_dsi_map_buffer(phys, domain, size,
+			dma_addr, cpu_addr,
+			mdss_smmu_dma_data_direction(dir));
+}
+
+static inline void mdss_smmu_dsi_unmap_buffer(dma_addr_t dma_addr, int domain,
+		unsigned long size, int dir)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (mdata->smmu_ops.smmu_dsi_unmap_buffer)
+		mdata->smmu_ops.smmu_dsi_unmap_buffer(dma_addr, domain,
+			size, mdss_smmu_dma_data_direction(dir));
+}
+
+static inline void mdss_smmu_deinit(struct mdss_data_type *mdata)
+{
+	if (mdata->smmu_ops.smmu_deinit)
+		mdata->smmu_ops.smmu_deinit(mdata);
+}
+
+static inline struct sg_table *mdss_smmu_sg_table_clone(struct sg_table
+			*orig_table, gfp_t gfp_mask, bool padding)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+	if (!mdata || !mdata->smmu_ops.smmu_sg_table_clone)
+		return NULL;
+
+	return mdata->smmu_ops.smmu_sg_table_clone(orig_table,
+				gfp_mask, padding);
+}
+
+#endif /* MDSS_SMMU_H */
diff --git a/drivers/video/fbdev/msm/mdss_util.c b/drivers/video/fbdev/msm/mdss_util.c
new file mode 100644
index 0000000..30fcf28
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_util.c
@@ -0,0 +1,256 @@
+
+/* Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/interrupt.h>
+#include "mdss_mdp.h"
+
+struct mdss_hw *mdss_irq_handlers[MDSS_MAX_HW_BLK];
+static DEFINE_SPINLOCK(mdss_lock);
+
+int mdss_register_irq(struct mdss_hw *hw)
+{
+	unsigned long irq_flags;
+	u32 ndx_bit;
+
+	if (!hw || hw->hw_ndx >= MDSS_MAX_HW_BLK)
+		return -EINVAL;
+
+	ndx_bit = BIT(hw->hw_ndx);
+
+	spin_lock_irqsave(&mdss_lock, irq_flags);
+	if (!mdss_irq_handlers[hw->hw_ndx])
+		mdss_irq_handlers[hw->hw_ndx] = hw;
+	else
+		pr_err("panel %d's irq at %pK is already registered\n",
+			hw->hw_ndx, hw->irq_handler);
+	spin_unlock_irqrestore(&mdss_lock, irq_flags);
+
+	return 0;
+}
+
+void mdss_enable_irq(struct mdss_hw *hw)
+{
+	unsigned long irq_flags;
+	u32 ndx_bit;
+
+	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
+		return;
+
+	if (!mdss_irq_handlers[hw->hw_ndx]) {
+		pr_err("failed. First register the irq then enable it.\n");
+		return;
+	}
+
+	ndx_bit = BIT(hw->hw_ndx);
+
+	pr_debug("Enable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
+			hw->irq_info->irq_ena, hw->irq_info->irq_mask);
+
+	spin_lock_irqsave(&mdss_lock, irq_flags);
+	if (hw->irq_info->irq_mask & ndx_bit) {
+		pr_debug("MDSS HW ndx=%d is already set, mask=%x\n",
+				hw->hw_ndx, hw->irq_info->irq_mask);
+	} else {
+		hw->irq_info->irq_mask |= ndx_bit;
+		if (!hw->irq_info->irq_ena) {
+			hw->irq_info->irq_ena = true;
+			enable_irq(hw->irq_info->irq);
+		}
+	}
+	spin_unlock_irqrestore(&mdss_lock, irq_flags);
+}
+
+void mdss_disable_irq(struct mdss_hw *hw)
+{
+	unsigned long irq_flags;
+	u32 ndx_bit;
+
+	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
+		return;
+
+	ndx_bit = BIT(hw->hw_ndx);
+
+	pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
+			hw->irq_info->irq_ena, hw->irq_info->irq_mask);
+
+	spin_lock_irqsave(&mdss_lock, irq_flags);
+	if (!(hw->irq_info->irq_mask & ndx_bit)) {
+		pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
+	} else {
+		hw->irq_info->irq_mask &= ~ndx_bit;
+		if (hw->irq_info->irq_mask == 0) {
+			hw->irq_info->irq_ena = false;
+			disable_irq_nosync(hw->irq_info->irq);
+		}
+	}
+	spin_unlock_irqrestore(&mdss_lock, irq_flags);
+}
+
+/* called from interrupt context */
+void mdss_disable_irq_nosync(struct mdss_hw *hw)
+{
+	u32 ndx_bit;
+
+	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
+		return;
+
+	ndx_bit = BIT(hw->hw_ndx);
+
+	pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
+			hw->irq_info->irq_ena, hw->irq_info->irq_mask);
+
+	spin_lock(&mdss_lock);
+	if (!(hw->irq_info->irq_mask & ndx_bit)) {
+		pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
+	} else {
+		hw->irq_info->irq_mask &= ~ndx_bit;
+		if (hw->irq_info->irq_mask == 0) {
+			hw->irq_info->irq_ena = false;
+			disable_irq_nosync(hw->irq_info->irq);
+		}
+	}
+	spin_unlock(&mdss_lock);
+}
+
+int mdss_irq_dispatch(u32 hw_ndx, int irq, void *ptr)
+{
+	struct mdss_hw *hw;
+	int rc = -ENODEV;
+
+	spin_lock(&mdss_lock);
+	hw = mdss_irq_handlers[hw_ndx];
+	spin_unlock(&mdss_lock);
+
+	if (hw)
+		rc = hw->irq_handler(irq, hw->ptr);
+
+	return rc;
+}
+
+void mdss_enable_irq_wake(struct mdss_hw *hw)
+{
+	unsigned long irq_flags;
+	u32 ndx_bit;
+
+	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
+		return;
+
+	if (!mdss_irq_handlers[hw->hw_ndx]) {
+		pr_err("failed. First register the irq then enable it.\n");
+		return;
+	}
+
+	ndx_bit = BIT(hw->hw_ndx);
+
+	pr_debug("Enable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
+			hw->irq_info->irq_wake_ena,
+			hw->irq_info->irq_wake_mask);
+
+	spin_lock_irqsave(&mdss_lock, irq_flags);
+	if (hw->irq_info->irq_wake_mask & ndx_bit) {
+		pr_debug("MDSS HW ndx=%d is already set, mask=%x\n",
+				hw->hw_ndx, hw->irq_info->irq_wake_mask);
+	} else {
+		hw->irq_info->irq_wake_mask |= ndx_bit;
+		if (!hw->irq_info->irq_wake_ena) {
+			hw->irq_info->irq_wake_ena = true;
+			enable_irq_wake(hw->irq_info->irq);
+		}
+	}
+	spin_unlock_irqrestore(&mdss_lock, irq_flags);
+}
+
+void mdss_disable_irq_wake(struct mdss_hw *hw)
+{
+	unsigned long irq_flags;
+	u32 ndx_bit;
+
+	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
+		return;
+
+	ndx_bit = BIT(hw->hw_ndx);
+
+	pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
+			hw->irq_info->irq_wake_ena,
+			hw->irq_info->irq_wake_mask);
+
+	spin_lock_irqsave(&mdss_lock, irq_flags);
+	if (!(hw->irq_info->irq_wake_mask & ndx_bit)) {
+		pr_warn("MDSS HW ndx=%d is NOT set\n", hw->hw_ndx);
+	} else {
+		hw->irq_info->irq_wake_mask &= ~ndx_bit;
+		if (hw->irq_info->irq_wake_ena) {
+			hw->irq_info->irq_wake_ena = false;
+			disable_irq_wake(hw->irq_info->irq);
+		}
+	}
+	spin_unlock_irqrestore(&mdss_lock, irq_flags);
+}
+
+static bool check_display(char *param_string)
+{
+	char *str = NULL;
+	bool display_disable = false;
+
+	str = strnstr(param_string, ";", MDSS_MAX_PANEL_LEN);
+	if (!str)
+		return display_disable;
+
+	str = strnstr(str, ":", MDSS_MAX_PANEL_LEN);
+	if (!str)
+		return display_disable;
+	else if (str[1] == '1')
+		display_disable = 1;
+
+	return display_disable;
+}
+
+struct mdss_util_intf mdss_util = {
+	.register_irq = mdss_register_irq,
+	.enable_irq = mdss_enable_irq,
+	.disable_irq = mdss_disable_irq,
+	.enable_wake_irq = mdss_enable_irq_wake,
+	.disable_wake_irq = mdss_disable_irq_wake,
+	.disable_irq_nosync = mdss_disable_irq_nosync,
+	.irq_dispatch = mdss_irq_dispatch,
+	.get_iommu_domain = NULL,
+	.iommu_attached = NULL,
+	.iommu_ctrl = NULL,
+	.bus_bandwidth_ctrl = NULL,
+	.bus_scale_set_quota = NULL,
+	.panel_intf_type = NULL,
+	.panel_intf_status = NULL,
+	.mdp_probe_done = false,
+	.param_check = check_display,
+	.display_disabled = false
+};
+
+struct mdss_util_intf *mdss_get_util_intf()
+{
+	return &mdss_util;
+}
+EXPORT_SYMBOL(mdss_get_util_intf);
+
+/* This routine should only be called from interrupt context */
+bool mdss_get_irq_enable_state(struct mdss_hw *hw)
+{
+	bool is_irq_enabled;
+
+	spin_lock(&mdss_lock);
+	is_irq_enabled = hw->irq_info->irq_ena;
+	spin_unlock(&mdss_lock);
+
+	return is_irq_enabled;
+}
diff --git a/drivers/video/fbdev/msm/mdss_wb.c b/drivers/video/fbdev/msm/mdss_wb.c
new file mode 100644
index 0000000..4b509ec
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_wb.c
@@ -0,0 +1,213 @@
+/* Copyright (c) 2011-2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/msm_mdp.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/switch.h>
+
+#include "mdss_panel.h"
+#include "mdss_wb.h"
+
+/**
+ * mdss_wb_check_params - check new panel info params
+ * @pdata: current panel information
+ * @new: updates to panel info
+ *
+ * Checks if there are any changes that require panel reconfiguration
+ * in order to be reflected on writeback buffer.
+ *
+ * Return negative errno if invalid input, zero if there is no panel reconfig
+ * needed and non-zero if reconfiguration is needed.
+ */
+static int mdss_wb_check_params(struct mdss_panel_data *pdata,
+	struct mdss_panel_info *new)
+{
+	struct mdss_panel_info *old;
+
+	if (!pdata || !new) {
+		pr_err("%s: Invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	if (new->xres >= 4096 || new->yres >= 4096) {
+		pr_err("%s: Invalid resolutions\n", __func__);
+		return -EINVAL;
+	}
+
+	old = &pdata->panel_info;
+
+	if ((old->xres != new->xres) || (old->yres != new->yres))
+		return 1;
+
+	return 0;
+}
+
+static int mdss_wb_event_handler(struct mdss_panel_data *pdata,
+				 int event, void *arg)
+{
+	int rc = 0;
+
+	switch (event) {
+	case MDSS_EVENT_CHECK_PARAMS:
+		rc = mdss_wb_check_params(pdata, (struct mdss_panel_info *)arg);
+		break;
+	default:
+		pr_debug("%s: panel event (%d) not handled\n", __func__, event);
+		break;
+	}
+	return rc;
+}
+
+static int mdss_wb_parse_dt(struct platform_device *pdev,
+			    struct mdss_panel_data *pdata)
+{
+	struct device_node *np = pdev->dev.of_node;
+	u32 res[2], tmp;
+	int rc;
+
+	rc = of_property_read_u32_array(np, "qcom,mdss_pan_res", res, 2);
+	pdata->panel_info.xres = (!rc ? res[0] : 1280);
+	pdata->panel_info.yres = (!rc ? res[1] : 720);
+
+	rc = of_property_read_u32(np, "qcom,mdss_pan_bpp", &tmp);
+	pdata->panel_info.bpp = (!rc ? tmp : 24);
+
+	return 0;
+}
+
+static int mdss_wb_dev_init(struct mdss_wb_ctrl *wb_ctrl)
+{
+	int rc = 0;
+
+	if (!wb_ctrl) {
+		pr_err("%s: no driver data\n", __func__);
+		return -ENODEV;
+	}
+
+	wb_ctrl->sdev.name = "wfd";
+	rc = switch_dev_register(&wb_ctrl->sdev);
+	if (rc) {
+		pr_err("Failed to setup switch dev for writeback panel");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int mdss_wb_dev_uninit(struct mdss_wb_ctrl *wb_ctrl)
+{
+	if (!wb_ctrl) {
+		pr_err("%s: no driver data\n", __func__);
+		return -ENODEV;
+	}
+
+	switch_dev_unregister(&wb_ctrl->sdev);
+	return 0;
+}
+
+static int mdss_wb_probe(struct platform_device *pdev)
+{
+	struct mdss_panel_data *pdata = NULL;
+	struct mdss_wb_ctrl *wb_ctrl = NULL;
+	int rc = 0;
+
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
+	wb_ctrl = devm_kzalloc(&pdev->dev, sizeof(*wb_ctrl), GFP_KERNEL);
+	if (!wb_ctrl)
+		return -ENOMEM;
+
+	pdata = &wb_ctrl->pdata;
+	wb_ctrl->pdev = pdev;
+	platform_set_drvdata(pdev, wb_ctrl);
+
+	rc = !mdss_wb_parse_dt(pdev, pdata);
+	if (!rc)
+		goto error_no_mem;
+
+	rc = mdss_wb_dev_init(wb_ctrl);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to set up device nodes for writeback panel\n");
+		goto error_no_mem;
+	}
+
+	pdata->panel_info.type = WRITEBACK_PANEL;
+	pdata->panel_info.clk_rate = 74250000;
+	pdata->panel_info.pdest = DISPLAY_4;
+	pdata->panel_info.out_format = MDP_Y_CBCR_H2V2_VENUS;
+
+	pdata->event_handler = mdss_wb_event_handler;
+	pdev->dev.platform_data = pdata;
+
+	rc = mdss_register_panel(pdev, pdata);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to register writeback panel\n");
+		goto error_init;
+	}
+
+	return rc;
+
+error_init:
+	mdss_wb_dev_uninit(wb_ctrl);
+error_no_mem:
+	devm_kfree(&pdev->dev, wb_ctrl);
+	return rc;
+}
+
+static int mdss_wb_remove(struct platform_device *pdev)
+{
+	struct mdss_wb_ctrl *wb_ctrl = platform_get_drvdata(pdev);
+
+	if (!wb_ctrl) {
+		pr_err("%s: no driver data\n", __func__);
+		return -ENODEV;
+	}
+
+	mdss_wb_dev_uninit(wb_ctrl);
+	devm_kfree(&wb_ctrl->pdev->dev, wb_ctrl);
+	return 0;
+}
+
+static const struct of_device_id mdss_wb_match[] = {
+	{ .compatible = "qcom,mdss_wb", },
+	{ { 0 } }
+};
+
+static struct platform_driver mdss_wb_driver = {
+	.probe = mdss_wb_probe,
+	.remove = mdss_wb_remove,
+	.driver = {
+		.name = "mdss_wb",
+		.of_match_table = mdss_wb_match,
+	},
+};
+
+static int __init mdss_wb_driver_init(void)
+{
+	int rc = 0;
+
+	rc = platform_driver_register(&mdss_wb_driver);
+	return rc;
+}
+
+module_init(mdss_wb_driver_init);
diff --git a/drivers/video/fbdev/msm/mdss_wb.h b/drivers/video/fbdev/msm/mdss_wb.h
new file mode 100644
index 0000000..9cc88b6
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_wb.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_WB_H
+#define MDSS_WB_H
+
+#include <linux/switch.h>
+
+struct mdss_wb_ctrl {
+	struct platform_device *pdev;
+	struct mdss_panel_data pdata;
+	struct switch_dev sdev;
+};
+
+#endif
diff --git a/drivers/video/fbdev/msm/mhl_msc.c b/drivers/video/fbdev/msm/mhl_msc.c
new file mode 100644
index 0000000..ca9e74a
--- /dev/null
+++ b/drivers/video/fbdev/msm/mhl_msc.c
@@ -0,0 +1,729 @@
+/* Copyright (c) 2013-2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/mhl_8334.h>
+#include <linux/vmalloc.h>
+#include <linux/input.h>
+#include "mhl_msc.h"
+#include "mdss_hdmi_mhl.h"
+
+static struct mhl_tx_ctrl *mhl_ctrl;
+static DEFINE_MUTEX(msc_send_workqueue_mutex);
+
+const char *devcap_reg_name[] = {
+	"DEV_STATE       ",
+	"MHL_VERSION     ",
+	"DEV_CAT         ",
+	"ADOPTER_ID_H    ",
+	"ADOPTER_ID_L    ",
+	"VID_LINK_MODE   ",
+	"AUD_LINK_MODE   ",
+	"VIDEO_TYPE      ",
+	"LOG_DEV_MAP     ",
+	"BANDWIDTH       ",
+	"FEATURE_FLAG    ",
+	"DEVICE_ID_H     ",
+	"DEVICE_ID_L     ",
+	"SCRATCHPAD_SIZE ",
+	"INT_STAT_SIZE   ",
+	"Reserved        ",
+};
+
+static bool mhl_check_tmds_enabled(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	if (mhl_ctrl && mhl_ctrl->hdmi_mhl_ops) {
+		struct msm_hdmi_mhl_ops *ops = mhl_ctrl->hdmi_mhl_ops;
+		struct platform_device *pdev = mhl_ctrl->pdata->hdmi_pdev;
+
+		return (ops->tmds_enabled(pdev) == true);
+	}
+	pr_err("%s: invalid input\n", __func__);
+	return false;
+}
+
+static void mhl_print_devcap(u8 offset, u8 devcap)
+{
+	switch (offset) {
+	case DEVCAP_OFFSET_DEV_CAT:
+		pr_debug("DCAP: %02X %s: %02X DEV_TYPE=%X POW=%s\n",
+			offset, devcap_reg_name[offset], devcap,
+			devcap & 0x0F, (devcap & 0x10) ? "y" : "n");
+		break;
+	case DEVCAP_OFFSET_FEATURE_FLAG:
+		pr_debug("DCAP: %02X %s: %02X RCP=%s RAP=%s SP=%s\n",
+			offset, devcap_reg_name[offset], devcap,
+			(devcap & 0x01) ? "y" : "n",
+			(devcap & 0x02) ? "y" : "n",
+			(devcap & 0x04) ? "y" : "n");
+		break;
+	default:
+		pr_debug("DCAP: %02X %s: %02X\n",
+			offset, devcap_reg_name[offset], devcap);
+		break;
+	}
+}
+
+static bool mhl_qualify_path_enable(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	int rc = false;
+
+	if (!mhl_ctrl)
+		return rc;
+
+	if (mhl_ctrl->tmds_en_state ||
+	    /* Identify sink with non-standard INT STAT SIZE */
+	    (mhl_ctrl->devcap[DEVCAP_OFFSET_MHL_VERSION] == 0x10 &&
+	     mhl_ctrl->devcap[DEVCAP_OFFSET_INT_STAT_SIZE] == 0x44))
+		rc = true;
+
+	return rc;
+}
+
+void mhl_register_msc(struct mhl_tx_ctrl *ctrl)
+{
+	if (ctrl)
+		mhl_ctrl = ctrl;
+}
+
+static int mhl_flag_scrpd_burst_req(struct mhl_tx_ctrl *mhl_ctrl,
+		struct msc_command_struct *req)
+{
+	int postpone_send = 0;
+
+	if ((req->command == MHL_SET_INT) &&
+	    (req->offset == MHL_RCHANGE_INT)) {
+		if (mhl_ctrl->scrpd_busy) {
+			/* reduce priority */
+			if (req->payload.data[0] == MHL_INT_REQ_WRT)
+				postpone_send = 1;
+		} else {
+			if (req->payload.data[0] == MHL_INT_REQ_WRT) {
+				mhl_ctrl->scrpd_busy = true;
+				mhl_ctrl->wr_burst_pending = true;
+			} else if (req->payload.data[0] == MHL_INT_GRT_WRT) {
+				mhl_ctrl->scrpd_busy = true;
+			}
+		}
+	}
+	return postpone_send;
+}
+
+void mhl_msc_send_work(struct work_struct *work)
+{
+	struct mhl_tx_ctrl *mhl_ctrl =
+		container_of(work, struct mhl_tx_ctrl, mhl_msc_send_work);
+	struct msc_cmd_envelope *cmd_env;
+	int ret, postpone_send;
+	/*
+	 * Remove item from the queue
+	 * and schedule it
+	 */
+	mutex_lock(&msc_send_workqueue_mutex);
+	while (!list_empty(&mhl_ctrl->list_cmd)) {
+		cmd_env = list_first_entry(&mhl_ctrl->list_cmd,
+					   struct msc_cmd_envelope,
+					   msc_queue_envelope);
+		list_del(&cmd_env->msc_queue_envelope);
+		mutex_unlock(&msc_send_workqueue_mutex);
+
+		postpone_send = mhl_flag_scrpd_burst_req(
+			mhl_ctrl,
+			&cmd_env->msc_cmd_msg);
+		if (postpone_send) {
+			if (cmd_env->msc_cmd_msg.retry-- > 0) {
+				mutex_lock(&msc_send_workqueue_mutex);
+				list_add_tail(
+					&cmd_env->msc_queue_envelope,
+					&mhl_ctrl->list_cmd);
+				mutex_unlock(&msc_send_workqueue_mutex);
+			} else {
+				pr_err("%s: max scrpd retry out\n",
+				       __func__);
+			}
+		} else {
+			ret = mhl_send_msc_command(mhl_ctrl,
+						   &cmd_env->msc_cmd_msg);
+			if (ret == -EAGAIN) {
+				int retry = 2;
+
+				while (retry--) {
+					ret = mhl_send_msc_command(
+						mhl_ctrl,
+						&cmd_env->msc_cmd_msg);
+					if (ret != -EAGAIN)
+						break;
+				}
+			}
+			if (ret == -EAGAIN)
+				pr_err("%s: send_msc_command retry out!\n",
+				       __func__);
+			vfree(cmd_env);
+		}
+
+		mutex_lock(&msc_send_workqueue_mutex);
+	}
+	mutex_unlock(&msc_send_workqueue_mutex);
+}
+
+int mhl_queue_msc_command(struct mhl_tx_ctrl *mhl_ctrl,
+			  struct msc_command_struct *req,
+			  int priority_send)
+{
+	struct msc_cmd_envelope *cmd_env;
+
+	mutex_lock(&msc_send_workqueue_mutex);
+	cmd_env = vmalloc(sizeof(struct msc_cmd_envelope));
+	if (!cmd_env) {
+		pr_err("%s: out of memory!\n", __func__);
+		mutex_unlock(&msc_send_workqueue_mutex);
+		return -ENOMEM;
+	}
+
+	memcpy(&cmd_env->msc_cmd_msg, req,
+	       sizeof(struct msc_command_struct));
+
+	if (priority_send)
+		list_add(&cmd_env->msc_queue_envelope,
+			 &mhl_ctrl->list_cmd);
+	else
+		list_add_tail(&cmd_env->msc_queue_envelope,
+			      &mhl_ctrl->list_cmd);
+	mutex_unlock(&msc_send_workqueue_mutex);
+	queue_work(mhl_ctrl->msc_send_workqueue, &mhl_ctrl->mhl_msc_send_work);
+
+	return 0;
+}
+
+static int mhl_update_devcap(struct mhl_tx_ctrl *mhl_ctrl,
+	int offset, u8 devcap)
+{
+	if (!mhl_ctrl)
+		return -EFAULT;
+	if (offset < 0 || offset > 15)
+		return -EFAULT;
+	mhl_ctrl->devcap[offset] = devcap;
+	mhl_print_devcap(offset, mhl_ctrl->devcap[offset]);
+
+	return 0;
+}
+
+int mhl_msc_clear(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	if (!mhl_ctrl)
+		return -EFAULT;
+
+	memset(mhl_ctrl->devcap, 0, 16);
+	mhl_ctrl->devcap_state = 0;
+	mhl_ctrl->path_en_state = 0;
+	mhl_ctrl->status[0] = 0;
+	mhl_ctrl->status[1] = 0;
+	mhl_ctrl->scrpd_busy = 0;
+	mhl_ctrl->wr_burst_pending = 0;
+
+	return 0;
+}
+
+int mhl_msc_command_done(struct mhl_tx_ctrl *mhl_ctrl,
+			 struct msc_command_struct *req)
+{
+	bool dongle_pwr_en = false;
+
+	switch (req->command) {
+	case MHL_WRITE_STAT:
+		if (req->offset == MHL_STATUS_REG_LINK_MODE) {
+			if (req->payload.data[0]
+			    & MHL_STATUS_PATH_ENABLED) {
+				/* Enable TMDS output */
+				mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
+				if (mhl_ctrl->devcap_state == MHL_DEVCAP_ALL) {
+					dongle_pwr_en = mhl_ctrl->devcap[
+						   MHL_DEV_CATEGORY_OFFSET] &
+						MHL_DEV_CATEGORY_POW_BIT;
+					if (dongle_pwr_en)
+						mhl_drive_hpd(mhl_ctrl, HPD_UP);
+				}
+			} else {
+				/* Disable TMDS output */
+				mhl_tmds_ctrl(mhl_ctrl, TMDS_DISABLE);
+				mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+			}
+		}
+		break;
+	case MHL_READ_DEVCAP:
+		mhl_update_devcap(mhl_ctrl,
+			req->offset, req->retval);
+		mhl_ctrl->devcap_state |= BIT(req->offset);
+		switch (req->offset) {
+		case MHL_DEV_CATEGORY_OFFSET:
+			if (req->retval & MHL_DEV_CATEGORY_POW_BIT)
+				pr_debug("%s: devcap pow bit set\n",
+					 __func__);
+			else
+				pr_debug("%s: devcap pow bit unset\n",
+					 __func__);
+			break;
+		case DEVCAP_OFFSET_RESERVED:
+			mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
+			mhl_drive_hpd(mhl_ctrl, HPD_UP);
+			break;
+		case DEVCAP_OFFSET_MHL_VERSION:
+		case DEVCAP_OFFSET_INT_STAT_SIZE:
+			if (mhl_qualify_path_enable(mhl_ctrl))
+				mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
+			break;
+		}
+		break;
+	case MHL_WRITE_BURST:
+		mhl_msc_send_set_int(
+			mhl_ctrl,
+			MHL_RCHANGE_INT,
+			MHL_INT_DSCR_CHG,
+			MSC_PRIORITY_SEND);
+		break;
+	}
+	return 0;
+}
+
+int mhl_msc_send_set_int(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 offset, u8 mask, u8 prior)
+{
+	struct msc_command_struct req;
+
+	req.command = MHL_SET_INT;
+	req.offset = offset;
+	req.payload.data[0] = mask;
+	return mhl_queue_msc_command(mhl_ctrl, &req, prior);
+}
+
+int mhl_msc_send_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
+			    u8 offset, u8 value)
+{
+	struct msc_command_struct req;
+
+	req.command = MHL_WRITE_STAT;
+	req.offset = offset;
+	req.payload.data[0] = value;
+	return mhl_queue_msc_command(mhl_ctrl, &req, MSC_NORMAL_SEND);
+}
+
+static int mhl_msc_write_burst(struct mhl_tx_ctrl *mhl_ctrl,
+	u8 offset, u8 *data, u8 length)
+{
+	struct msc_command_struct req;
+
+	if (!mhl_ctrl)
+		return -EFAULT;
+
+	if (!mhl_ctrl->wr_burst_pending)
+		return -EFAULT;
+
+	req.command = MHL_WRITE_BURST;
+	req.offset = offset;
+	req.length = length;
+	req.payload.burst_data = data;
+	mhl_queue_msc_command(mhl_ctrl, &req, MSC_PRIORITY_SEND);
+	mhl_ctrl->wr_burst_pending = false;
+	return 0;
+}
+
+int mhl_msc_send_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 sub_cmd, u8 cmd_data)
+{
+	struct msc_command_struct req;
+
+	req.command = MHL_MSC_MSG;
+	req.payload.data[0] = sub_cmd;
+	req.payload.data[1] = cmd_data;
+	return mhl_queue_msc_command(mhl_ctrl, &req, MSC_NORMAL_SEND);
+}
+
+/*
+ * Certain MSC msgs such as RCPK, RCPE and RAPK
+ * should be transmitted as a high priority
+ * because these msgs should be sent within
+ * 1000ms of a receipt of RCP/RAP. So such msgs can
+ * be added to the head of msc cmd queue.
+ */
+static int mhl_msc_send_prior_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+				      u8 sub_cmd, u8 cmd_data)
+{
+	struct msc_command_struct req;
+
+	req.command = MHL_MSC_MSG;
+	req.payload.data[0] = sub_cmd;
+	req.payload.data[1] = cmd_data;
+	return mhl_queue_msc_command(mhl_ctrl, &req, MSC_PRIORITY_SEND);
+}
+
+int mhl_msc_read_devcap(struct mhl_tx_ctrl *mhl_ctrl, u8 offset)
+{
+	struct msc_command_struct req;
+
+	if (offset < 0 || offset > 15)
+		return -EFAULT;
+	req.command = MHL_READ_DEVCAP;
+	req.offset = offset;
+	req.payload.data[0] = 0;
+	return mhl_queue_msc_command(mhl_ctrl, &req, MSC_NORMAL_SEND);
+}
+
+int mhl_msc_read_devcap_all(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	int offset;
+	int ret;
+
+	for (offset = 0; offset < DEVCAP_SIZE; offset++) {
+		ret = mhl_msc_read_devcap(mhl_ctrl, offset);
+		if (ret == -EBUSY)
+			pr_err("%s: queue busy!\n", __func__);
+	}
+	return ret;
+}
+
+static void mhl_handle_input(struct mhl_tx_ctrl *mhl_ctrl,
+			     u8 key_code, u16 input_key_code)
+{
+	int key_press = (key_code & 0x80) == 0;
+
+	pr_debug("%s: send key events[%x][%x][%d]\n",
+		 __func__, key_code, input_key_code, key_press);
+	input_report_key(mhl_ctrl->input, input_key_code, key_press);
+	input_sync(mhl_ctrl->input);
+}
+
+int mhl_rcp_recv(struct mhl_tx_ctrl *mhl_ctrl, u8 key_code)
+{
+	u8 index = key_code & 0x7f;
+	u16 input_key_code;
+
+	if (!mhl_ctrl->rcp_key_code_tbl) {
+		pr_err("%s: RCP Key Code Table not initialized\n", __func__);
+		return -EINVAL;
+	}
+
+	input_key_code = mhl_ctrl->rcp_key_code_tbl[index];
+
+	if ((index < mhl_ctrl->rcp_key_code_tbl_len) &&
+	    (input_key_code > 0)) {
+		/* prior send rcpk */
+		mhl_msc_send_prior_msc_msg(
+			mhl_ctrl,
+			MHL_MSC_MSG_RCPK,
+			key_code);
+
+		if (mhl_ctrl->input)
+			mhl_handle_input(mhl_ctrl, key_code, input_key_code);
+	} else {
+		/* prior send rcpe */
+		mhl_msc_send_prior_msc_msg(
+			mhl_ctrl,
+			MHL_MSC_MSG_RCPE,
+			MHL_RCPE_INEFFECTIVE_KEY_CODE);
+
+		/* send rcpk after rcpe send */
+		mhl_msc_send_prior_msc_msg(
+			mhl_ctrl,
+			MHL_MSC_MSG_RCPK,
+			key_code);
+	}
+	return 0;
+}
+
+static int mhl_rap_action(struct mhl_tx_ctrl *mhl_ctrl, u8 action_code)
+{
+	switch (action_code) {
+	case MHL_RAP_CONTENT_ON:
+		mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
+		break;
+	case MHL_RAP_CONTENT_OFF:
+		/*
+		 * instead of only disabling tmds
+		 * send power button press - CONTENT_OFF
+		 */
+		input_report_key(mhl_ctrl->input, KEY_VENDOR, 1);
+		input_sync(mhl_ctrl->input);
+		input_report_key(mhl_ctrl->input, KEY_VENDOR, 0);
+		input_sync(mhl_ctrl->input);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int mhl_rap_recv(struct mhl_tx_ctrl *mhl_ctrl, u8 action_code)
+{
+	u8 error_code;
+	bool tmds_en;
+
+	tmds_en = mhl_check_tmds_enabled(mhl_ctrl);
+	switch (action_code) {
+	case MHL_RAP_POLL:
+		if (tmds_en)
+			error_code = MHL_RAPK_NO_ERROR;
+		else
+			error_code = MHL_RAPK_UNSUPPORTED_ACTION_CODE;
+		break;
+	case MHL_RAP_CONTENT_ON:
+	case MHL_RAP_CONTENT_OFF:
+		if (tmds_en) {
+			mhl_rap_action(mhl_ctrl, action_code);
+			error_code = MHL_RAPK_NO_ERROR;
+		} else {
+			error_code = MHL_RAPK_UNSUPPORTED_ACTION_CODE;
+		}
+		break;
+	default:
+		error_code = MHL_RAPK_UNRECOGNIZED_ACTION_CODE;
+		break;
+	}
+	/* prior send rapk */
+	return mhl_msc_send_prior_msc_msg(
+		mhl_ctrl,
+		MHL_MSC_MSG_RAPK,
+		error_code);
+}
+
+int mhl_msc_recv_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 sub_cmd, u8 cmd_data)
+{
+	int rc = 0;
+
+	switch (sub_cmd) {
+	case MHL_MSC_MSG_RCP:
+		pr_debug("MHL: receive RCP(0x%02x)\n", cmd_data);
+		rc = mhl_rcp_recv(mhl_ctrl, cmd_data);
+		break;
+	case MHL_MSC_MSG_RCPK:
+		pr_debug("MHL: receive RCPK(0x%02x)\n", cmd_data);
+		break;
+	case MHL_MSC_MSG_RCPE:
+		pr_debug("MHL: receive RCPE(0x%02x)\n", cmd_data);
+		break;
+	case MHL_MSC_MSG_RAP:
+		pr_debug("MHL: receive RAP(0x%02x)\n", cmd_data);
+		rc = mhl_rap_recv(mhl_ctrl, cmd_data);
+		break;
+	case MHL_MSC_MSG_RAPK:
+		pr_debug("MHL: receive RAPK(0x%02x)\n", cmd_data);
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+int mhl_msc_recv_set_int(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 offset, u8 set_int)
+{
+	int prior;
+
+	if (offset >= 2)
+		return -EFAULT;
+
+	switch (offset) {
+	case 0:
+		if (set_int & MHL_INT_DCAP_CHG) {
+			/* peer dcap has changed */
+			mhl_ctrl->devcap_state = 0;
+			mhl_msc_read_devcap_all(mhl_ctrl);
+		}
+		if (set_int & MHL_INT_DSCR_CHG) {
+			/* peer's scratchpad reg changed */
+			pr_debug("%s: dscr chg\n", __func__);
+			mhl_read_scratchpad(mhl_ctrl);
+			mhl_ctrl->scrpd_busy = false;
+		}
+		if (set_int & MHL_INT_REQ_WRT) {
+			/* SET_INT: REQ_WRT */
+			if (mhl_ctrl->scrpd_busy) {
+				prior = MSC_NORMAL_SEND;
+			} else {
+				prior = MSC_PRIORITY_SEND;
+				mhl_ctrl->scrpd_busy = true;
+			}
+			mhl_msc_send_set_int(
+				mhl_ctrl,
+				MHL_RCHANGE_INT,
+				MHL_INT_GRT_WRT,
+				prior);
+		}
+		if (set_int & MHL_INT_GRT_WRT) {
+			/* SET_INT: GRT_WRT */
+			pr_debug("%s: recvd req to permit/grant write",
+				 __func__);
+			complete_all(&mhl_ctrl->req_write_done);
+			mhl_msc_write_burst(
+				mhl_ctrl,
+				MHL_SCRATCHPAD_OFFSET,
+				mhl_ctrl->scrpd.data,
+				mhl_ctrl->scrpd.length);
+		}
+		break;
+	case 1:
+		if (set_int & MHL_INT_EDID_CHG) {
+			/* peer EDID has changed
+			 * toggle HPD to read EDID
+			 */
+			pr_debug("%s: EDID CHG\n", __func__);
+			mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+			msleep(110);
+			mhl_drive_hpd(mhl_ctrl, HPD_UP);
+		}
+	}
+	return 0;
+}
+
+int mhl_msc_recv_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
+			    u8 offset, u8 value)
+{
+	bool tmds_en;
+
+	if (offset >= 2)
+		return -EFAULT;
+
+	switch (offset) {
+	case 0:
+		/*
+		 * connected device bits
+		 * changed and DEVCAP READY
+		 */
+		if (((value ^ mhl_ctrl->status[offset]) &
+		     MHL_STATUS_DCAP_RDY)) {
+			if (value & MHL_STATUS_DCAP_RDY) {
+				mhl_ctrl->devcap_state = 0;
+				mhl_msc_read_devcap_all(mhl_ctrl);
+			} else {
+				/*
+				 * peer dcap turned not ready
+				 * use old devap state
+				 */
+				pr_debug("%s: DCAP RDY bit cleared\n",
+					 __func__);
+			}
+		}
+		break;
+	case 1:
+		/*
+		 * connected device bits
+		 * changed and PATH ENABLED
+		 * bit set
+		 */
+		tmds_en = mhl_check_tmds_enabled(mhl_ctrl);
+		if ((value ^ mhl_ctrl->status[offset])
+		    & MHL_STATUS_PATH_ENABLED) {
+			if (value & MHL_STATUS_PATH_ENABLED) {
+				if (tmds_en &&
+				    (mhl_ctrl->devcap[offset] &
+				     MHL_FEATURE_RAP_SUPPORT)) {
+					mhl_msc_send_msc_msg(
+						mhl_ctrl,
+						MHL_MSC_MSG_RAP,
+						MHL_RAP_CONTENT_ON);
+				}
+				mhl_ctrl->path_en_state
+					|= (MHL_STATUS_PATH_ENABLED |
+					    MHL_STATUS_CLK_MODE_NORMAL);
+				mhl_msc_send_write_stat(
+					mhl_ctrl,
+					MHL_STATUS_REG_LINK_MODE,
+					mhl_ctrl->path_en_state);
+			} else {
+				mhl_ctrl->path_en_state
+					&= ~(MHL_STATUS_PATH_ENABLED |
+					     MHL_STATUS_CLK_MODE_NORMAL);
+				mhl_msc_send_write_stat(
+					mhl_ctrl,
+					MHL_STATUS_REG_LINK_MODE,
+					mhl_ctrl->path_en_state);
+			}
+		}
+		break;
+	}
+	mhl_ctrl->status[offset] = value;
+	return 0;
+}
+
+static int mhl_request_write_burst(struct mhl_tx_ctrl *mhl_ctrl,
+				   u8 start_reg,
+				   u8 length, u8 *data)
+{
+	int i, reg;
+	int timeout, retry = 20;
+
+	if (!(mhl_ctrl->devcap[DEVCAP_OFFSET_FEATURE_FLAG] &
+	      MHL_FEATURE_SP_SUPPORT)) {
+		pr_debug("MHL: SCRATCHPAD_NOT_SUPPORTED\n");
+		return -EFAULT;
+	}
+
+	/*
+	 * scratchpad remains busy as long as a peer's permission or
+	 * write bursts are pending; experimentally it was found that
+	 * 50ms is optimal
+	 */
+	while (mhl_ctrl->scrpd_busy && retry--)
+		msleep(50);
+	if (!retry) {
+		pr_debug("MHL: scratchpad_busy\n");
+		return -EBUSY;
+	}
+
+	for (i = 0, reg = start_reg; (i < length) &&
+		     (reg < MHL_SCRATCHPAD_SIZE); i++, reg++)
+		mhl_ctrl->scrpd.data[reg] = data[i];
+	mhl_ctrl->scrpd.length = length;
+	mhl_ctrl->scrpd.offset = start_reg;
+
+	retry = 5;
+	do {
+		init_completion(&mhl_ctrl->req_write_done);
+		mhl_msc_send_set_int(
+			mhl_ctrl,
+			MHL_RCHANGE_INT,
+			MHL_INT_REQ_WRT,
+			MSC_PRIORITY_SEND);
+		timeout = wait_for_completion_interruptible_timeout(
+			&mhl_ctrl->req_write_done,
+			msecs_to_jiffies(MHL_BURST_WAIT));
+		if (!timeout)
+			mhl_ctrl->scrpd_busy = false;
+	} while (retry-- && timeout == 0);
+	if (!timeout) {
+		pr_err("%s: timed out!\n", __func__);
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+/* write scratchpad entry */
+int mhl_write_scratchpad(struct mhl_tx_ctrl *mhl_ctrl,
+			  u8 offset, u8 length, u8 *data)
+{
+	int rc;
+
+	if ((length < ADOPTER_ID_SIZE) ||
+	    (length > MAX_SCRATCHPAD_TRANSFER_SIZE) ||
+	    (offset > (MAX_SCRATCHPAD_TRANSFER_SIZE - ADOPTER_ID_SIZE)) ||
+	    ((offset + length) > MAX_SCRATCHPAD_TRANSFER_SIZE)) {
+		pr_debug("MHL: write_burst (0x%02x)\n", -EINVAL);
+		return  -EINVAL;
+	}
+
+	rc = mhl_request_write_burst(mhl_ctrl, offset, length, data);
+
+	return rc;
+}
diff --git a/drivers/video/fbdev/msm/mhl_msc.h b/drivers/video/fbdev/msm/mhl_msc.h
new file mode 100644
index 0000000..59fbd25
--- /dev/null
+++ b/drivers/video/fbdev/msm/mhl_msc.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MHL_MSC_H__
+#define __MHL_MSC_H__
+#include <linux/mhl_8334.h>
+
+#define MAX_RCP_KEYS_SUPPORTED 256
+
+#define MSC_NORMAL_SEND 0
+#define MSC_PRIORITY_SEND 1
+
+#define TMDS_ENABLE 1
+#define TMDS_DISABLE 0
+
+/******************************************************************/
+/* the below APIs are implemented by the MSC functionality */
+int mhl_msc_clear(struct mhl_tx_ctrl *mhl_ctrl);
+
+int mhl_msc_command_done(struct mhl_tx_ctrl *mhl_ctrl,
+			 struct msc_command_struct *req);
+
+int mhl_msc_send_set_int(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 offset, u8 mask, u8 priority);
+
+int mhl_msc_send_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
+			    u8 offset, u8 value);
+int mhl_msc_send_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 sub_cmd, u8 cmd_data);
+
+int mhl_msc_recv_set_int(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 offset, u8 set_int);
+
+int mhl_msc_recv_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
+			    u8 offset, u8 value);
+int mhl_msc_recv_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 sub_cmd, u8 cmd_data);
+void mhl_msc_send_work(struct work_struct *work);
+
+/******************************************************************/
+/* Tx should implement these APIs */
+int mhl_send_msc_command(struct mhl_tx_ctrl *mhl_ctrl,
+			 struct msc_command_struct *req);
+void mhl_read_scratchpad(struct mhl_tx_ctrl *mhl_ctrl);
+void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl, uint8_t to_state);
+void mhl_tmds_ctrl(struct mhl_tx_ctrl *ctrl, uint8_t on);
+/******************************************************************/
+/* MHL driver registers ctrl with MSC */
+void mhl_register_msc(struct mhl_tx_ctrl *ctrl);
+
+#endif /* __MHL_MSC_H__ */
diff --git a/drivers/video/fbdev/msm/mhl_sii8334.c b/drivers/video/fbdev/msm/mhl_sii8334.c
new file mode 100644
index 0000000..5d0ac99
--- /dev/null
+++ b/drivers/video/fbdev/msm/mhl_sii8334.c
@@ -0,0 +1,2097 @@
+/* Copyright (c) 2012-2014, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/input.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/mhl_8334.h>
+#include <linux/mdss_io_util.h>
+
+#include "mdss_fb.h"
+#include "mdss_hdmi_tx.h"
+#include "mdss_hdmi_edid.h"
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mhl_msc.h"
+#include "mdss_hdmi_mhl.h"
+
+#define MHL_DRIVER_NAME "sii8334"
+#define COMPATIBLE_NAME "qcom,mhl-sii8334"
+#define MAX_CURRENT 700000
+
+#define pr_debug_intr(...)
+
+#define MSC_START_BIT_MSC_CMD        (0x01 << 0)
+#define MSC_START_BIT_VS_CMD        (0x01 << 1)
+#define MSC_START_BIT_READ_REG        (0x01 << 2)
+#define MSC_START_BIT_WRITE_REG        (0x01 << 3)
+#define MSC_START_BIT_WRITE_BURST        (0x01 << 4)
+
+/* supported RCP key code */
+u16 support_rcp_key_code_tbl[] = {
+	KEY_ENTER,		/* 0x00 Select */
+	KEY_UP,			/* 0x01 Up */
+	KEY_DOWN,		/* 0x02 Down */
+	KEY_LEFT,		/* 0x03 Left */
+	KEY_RIGHT,		/* 0x04 Right */
+	KEY_UNKNOWN,		/* 0x05 Right-up */
+	KEY_UNKNOWN,		/* 0x06 Right-down */
+	KEY_UNKNOWN,		/* 0x07 Left-up */
+	KEY_UNKNOWN,		/* 0x08 Left-down */
+	KEY_MENU,		/* 0x09 Root Menu */
+	KEY_OPTION,		/* 0x0A Setup Menu */
+	KEY_UNKNOWN,		/* 0x0B Contents Menu */
+	KEY_UNKNOWN,		/* 0x0C Favorite Menu */
+	KEY_EXIT,		/* 0x0D Exit */
+	KEY_RESERVED,		/* 0x0E */
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x1F */
+	KEY_NUMERIC_0,		/* 0x20 NUMERIC_0 */
+	KEY_NUMERIC_1,		/* 0x21 NUMERIC_1 */
+	KEY_NUMERIC_2,		/* 0x22 NUMERIC_2 */
+	KEY_NUMERIC_3,		/* 0x23 NUMERIC_3 */
+	KEY_NUMERIC_4,		/* 0x24 NUMERIC_4 */
+	KEY_NUMERIC_5,		/* 0x25 NUMERIC_5 */
+	KEY_NUMERIC_6,		/* 0x26 NUMERIC_6 */
+	KEY_NUMERIC_7,		/* 0x27 NUMERIC_7 */
+	KEY_NUMERIC_8,		/* 0x28 NUMERIC_8 */
+	KEY_NUMERIC_9,		/* 0x29 NUMERIC_9 */
+	KEY_DOT,		/* 0x2A Dot */
+	KEY_ENTER,		/* 0x2B Enter */
+	KEY_ESC,		/* 0x2C Clear */
+	KEY_RESERVED,		/* 0x2D */
+	KEY_RESERVED,		/* 0x2E */
+	KEY_RESERVED,		/* 0x2F */
+	KEY_UNKNOWN,		/* 0x30 Channel Up */
+	KEY_UNKNOWN,		/* 0x31 Channel Down */
+	KEY_UNKNOWN,		/* 0x32 Previous Channel */
+	KEY_UNKNOWN,		/* 0x33 Sound Select */
+	KEY_UNKNOWN,		/* 0x34 Input Select */
+	KEY_UNKNOWN,		/* 0x35 Show Information */
+	KEY_UNKNOWN,		/* 0x36 Help */
+	KEY_UNKNOWN,		/* 0x37 Page Up */
+	KEY_UNKNOWN,		/* 0x38 Page Down */
+	KEY_RESERVED,		/* 0x39 */
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x3F */
+	KEY_RESERVED,		/* 0x40 */
+	KEY_VOLUMEUP,		/* 0x41 Volume Up */
+	KEY_VOLUMEDOWN,		/* 0x42 Volume Down */
+	KEY_MUTE,		/* 0x43 Mute */
+	KEY_PLAY,		/* 0x44 Play */
+	KEY_STOP,		/* 0x45 Stop */
+	KEY_PAUSE,		/* 0x46 Pause */
+	KEY_UNKNOWN,		/* 0x47 Record */
+	KEY_REWIND,		/* 0x48 Rewind */
+	KEY_FASTFORWARD,	/* 0x49 Fast Forward */
+	KEY_UNKNOWN,		/* 0x4A Eject */
+	KEY_FORWARD,		/* 0x4B Forward */
+	KEY_BACK,		/* 0x4C Backward */
+	KEY_RESERVED,		/* 0x4D */
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x4F */
+	KEY_UNKNOWN,		/* 0x50 Angle */
+	KEY_UNKNOWN,		/* 0x51 Subtitle */
+	KEY_RESERVED,		/* 0x52 */
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x5F */
+	KEY_PLAYPAUSE,		/* 0x60 Play Function */
+	KEY_PLAYPAUSE,		/* 0x61 Pause_Play Function */
+	KEY_UNKNOWN,		/* 0x62 Record Function */
+	KEY_PAUSE,		/* 0x63 Pause Record Function */
+	KEY_STOP,		/* 0x64 Stop Function  */
+	KEY_MUTE,		/* 0x65 Mute Function */
+	KEY_UNKNOWN,		/* 0x66 Restore Volume Function */
+	KEY_UNKNOWN,		/* 0x67 Tune Function */
+	KEY_UNKNOWN,		/* 0x68 Select Media Function */
+	KEY_RESERVED,		/* 0x69 */
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x70 */
+	KEY_BLUE,			/* 0x71 F1 */
+	KEY_RED,			/* 0x72 F2 */
+	KEY_GREEN,			/* 0x73 F3 */
+	KEY_YELLOW,			/* 0x74 F4 */
+	KEY_UNKNOWN,		/* 0x75 F5 */
+	KEY_RESERVED,		/* 0x76 */
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x7D */
+	KEY_VENDOR,		/* Vendor Specific */
+	KEY_RESERVED,		/* 0x7F */
+};
+
+
+uint8_t slave_addrs[MAX_PAGES] = {
+	DEV_PAGE_TPI_0,
+	DEV_PAGE_TX_L0_0,
+	DEV_PAGE_TX_L1_0,
+	DEV_PAGE_TX_2_0,
+	DEV_PAGE_TX_3_0,
+	DEV_PAGE_CBUS,
+	DEV_PAGE_DDC_EDID,
+	DEV_PAGE_DDC_SEGM,
+};
+
+static irqreturn_t mhl_tx_isr(int irq, void *dev_id);
+static void switch_mode(struct mhl_tx_ctrl *mhl_ctrl,
+			enum mhl_st_type to_mode, bool hpd_off);
+static void mhl_init_reg_settings(struct mhl_tx_ctrl *mhl_ctrl,
+				  bool mhl_disc_en);
+static int mhl_gpio_config(struct mhl_tx_ctrl *mhl_ctrl, int on);
+static int mhl_vreg_config(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on);
+
+int mhl_i2c_reg_read(struct i2c_client *client,
+			    uint8_t slave_addr_index, uint8_t reg_offset)
+{
+	int rc = -1;
+	uint8_t buffer = 0;
+
+	rc = mdss_i2c_byte_read(client, slave_addrs[slave_addr_index],
+				reg_offset, &buffer);
+	if (rc) {
+		pr_err("%s: slave=%x, off=%x\n",
+		       __func__, slave_addrs[slave_addr_index], reg_offset);
+		return rc;
+	}
+	return buffer;
+}
+
+
+int mhl_i2c_reg_write(struct i2c_client *client,
+			     uint8_t slave_addr_index, uint8_t reg_offset,
+			     uint8_t value)
+{
+	return mdss_i2c_byte_write(client, slave_addrs[slave_addr_index],
+				 reg_offset, &value);
+}
+
+void mhl_i2c_reg_modify(struct i2c_client *client,
+			       uint8_t slave_addr_index, uint8_t reg_offset,
+			       uint8_t mask, uint8_t val)
+{
+	uint8_t temp;
+
+	temp = mhl_i2c_reg_read(client, slave_addr_index, reg_offset);
+	temp &= (~mask);
+	temp |= (mask & val);
+	mhl_i2c_reg_write(client, slave_addr_index, reg_offset, temp);
+}
+
+
+static int mhl_tx_get_dt_data(struct device *dev,
+	struct mhl_tx_platform_data *pdata)
+{
+	int i, rc = 0;
+	struct device_node *of_node = NULL;
+	struct dss_gpio *temp_gpio = NULL;
+	struct platform_device *hdmi_pdev = NULL;
+	struct device_node *hdmi_tx_node = NULL;
+	int dt_gpio;
+
+	i = 0;
+
+	if (!dev || !pdata) {
+		pr_err("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	of_node = dev->of_node;
+	if (!of_node) {
+		pr_err("%s: invalid of_node\n", __func__);
+		goto error;
+	}
+
+	pr_debug("%s: id=%d\n", __func__, dev->id);
+
+	/* GPIOs */
+	temp_gpio = NULL;
+	temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL);
+	pr_debug("%s: gpios allocd\n", __func__);
+	if (!(temp_gpio)) {
+		pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+		goto error;
+	}
+	/* RESET */
+	dt_gpio = of_get_named_gpio(of_node, "mhl-rst-gpio", 0);
+	if (dt_gpio < 0) {
+		pr_err("%s: Can't get mhl-rst-gpio\n", __func__);
+		goto error;
+	}
+
+	temp_gpio->gpio = dt_gpio;
+	snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-rst-gpio");
+	pr_debug("%s: rst gpio=[%d]\n", __func__,
+		 temp_gpio->gpio);
+	pdata->gpios[MHL_TX_RESET_GPIO] = temp_gpio;
+
+	/* PWR */
+	temp_gpio = NULL;
+	temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL);
+	pr_debug("%s: gpios allocd\n", __func__);
+	if (!(temp_gpio)) {
+		pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+		goto error;
+	}
+	dt_gpio = of_get_named_gpio(of_node, "mhl-pwr-gpio", 0);
+	if (dt_gpio < 0) {
+		pr_err("%s: Can't get mhl-pwr-gpio\n", __func__);
+		goto error;
+	}
+
+	temp_gpio->gpio = dt_gpio;
+	snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-pwr-gpio");
+	pr_debug("%s: pmic gpio=[%d]\n", __func__,
+		 temp_gpio->gpio);
+	pdata->gpios[MHL_TX_PMIC_PWR_GPIO] = temp_gpio;
+
+	/* INTR */
+	temp_gpio = NULL;
+	temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL);
+	pr_debug("%s: gpios allocd\n", __func__);
+	if (!(temp_gpio)) {
+		pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+		goto error;
+	}
+	dt_gpio = of_get_named_gpio(of_node, "mhl-intr-gpio", 0);
+	if (dt_gpio < 0) {
+		pr_err("%s: Can't get mhl-intr-gpio\n", __func__);
+		goto error;
+	}
+
+	temp_gpio->gpio = dt_gpio;
+	snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-intr-gpio");
+	pr_debug("%s: intr gpio=[%d]\n", __func__,
+		 temp_gpio->gpio);
+	pdata->gpios[MHL_TX_INTR_GPIO] = temp_gpio;
+
+	/* parse phandle for hdmi tx */
+	hdmi_tx_node = of_parse_phandle(of_node, "qcom,hdmi-tx-map", 0);
+	if (!hdmi_tx_node) {
+		pr_err("%s: can't find hdmi phandle\n", __func__);
+		goto error;
+	}
+
+	hdmi_pdev = of_find_device_by_node(hdmi_tx_node);
+	if (!hdmi_pdev) {
+		pr_err("%s: can't find the device by node\n", __func__);
+		goto error;
+	}
+	pr_debug("%s: hdmi_pdev [0X%x] to pdata->pdev\n",
+	       __func__, (unsigned int)hdmi_pdev);
+
+	pdata->hdmi_pdev = hdmi_pdev;
+
+	return 0;
+error:
+	pr_err("%s: ret due to err\n", __func__);
+	for (i = 0; i < MHL_TX_MAX_GPIO; i++)
+		if (pdata->gpios[i])
+			devm_kfree(dev, pdata->gpios[i]);
+	return rc;
+} /* mhl_tx_get_dt_data */
+
+static int mhl_sii_reset_pin(struct mhl_tx_ctrl *mhl_ctrl, int on)
+{
+	if (mhl_ctrl->pdata->gpios[MHL_TX_RESET_GPIO]) {
+		gpio_set_value(
+			mhl_ctrl->pdata->gpios[MHL_TX_RESET_GPIO]->gpio,
+			on);
+	}
+	return 0;
+}
+
+
+static int mhl_sii_wait_for_rgnd(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	int timeout;
+
+	pr_debug("%s:%u\n", __func__, __LINE__);
+
+	if (mhl_ctrl->mhl_mode) {
+		pr_debug("%s: already in mhl mode\n", __func__);
+		return 0;
+	}
+
+	reinit_completion(&mhl_ctrl->rgnd_done);
+	/*
+	 * after toggling reset line and enabling disc
+	 * tx can take a while to generate intr
+	 */
+	timeout = wait_for_completion_timeout
+		(&mhl_ctrl->rgnd_done, HZ * 3);
+	if (!timeout) {
+		/*
+		 * most likely nothing plugged in USB
+		 * USB HOST connected or already in USB mode
+		 */
+		pr_warn("%s:%u timedout\n", __func__, __LINE__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int mhl_sii_config(struct mhl_tx_ctrl *mhl_ctrl, bool on)
+{
+	int rc = 0;
+	struct i2c_client *client = NULL;
+
+	if (!mhl_ctrl) {
+		pr_err("%s: ctrl is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	client = mhl_ctrl->i2c_handle;
+
+	if (on && !mhl_ctrl->irq_req_done) {
+		rc = mhl_vreg_config(mhl_ctrl, 1);
+		if (rc) {
+			pr_err("%s: vreg init failed [%d]\n",
+				__func__, rc);
+			return -ENODEV;
+		}
+
+		rc = mhl_gpio_config(mhl_ctrl, 1);
+		if (rc) {
+			pr_err("%s: gpio init failed [%d]\n",
+				__func__, rc);
+			return -ENODEV;
+		}
+
+		rc = request_threaded_irq(mhl_ctrl->i2c_handle->irq, NULL,
+			&mhl_tx_isr, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+			client->dev.driver->name, mhl_ctrl);
+		if (rc) {
+			pr_err("%s: request_threaded_irq failed, status: %d\n",
+			       __func__, rc);
+			return -ENODEV;
+		}
+		mhl_ctrl->irq_req_done = true;
+	} else if (!on && mhl_ctrl->irq_req_done) {
+		free_irq(mhl_ctrl->i2c_handle->irq, mhl_ctrl);
+		mhl_gpio_config(mhl_ctrl, 0);
+		mhl_vreg_config(mhl_ctrl, 0);
+		mhl_ctrl->irq_req_done = false;
+	}
+
+	return rc;
+}
+
+static void mhl_sii_disc_intr_work(struct work_struct *work)
+{
+	struct mhl_tx_ctrl *mhl_ctrl = NULL;
+
+	mhl_ctrl = container_of(work, struct mhl_tx_ctrl, mhl_intr_work);
+
+	mhl_sii_config(mhl_ctrl, false);
+}
+
+/*  USB_HANDSHAKING FUNCTIONS */
+static int mhl_sii_device_discovery(void *data, int id,
+			     void (*usb_notify_cb)(void *, int), void *ctx)
+{
+	int rc;
+	struct mhl_tx_ctrl *mhl_ctrl = data;
+	unsigned long flags;
+
+	if (id) {
+		/* When MHL cable is disconnected we get a sii8334
+		 * mhl_disconnect interrupt which is handled separately.
+		 */
+		pr_debug("%s: USB ID pin high\n", __func__);
+		return id;
+	}
+
+	if (!mhl_ctrl || !usb_notify_cb) {
+		pr_warn("%s: cb || ctrl is NULL\n", __func__);
+		/* return "USB" so caller can proceed */
+		return -EINVAL;
+	}
+
+	if (!mhl_ctrl->notify_usb_online) {
+		mhl_ctrl->notify_usb_online = usb_notify_cb;
+		mhl_ctrl->notify_ctx = ctx;
+	}
+
+	flush_work(&mhl_ctrl->mhl_intr_work);
+
+	if (!mhl_ctrl->irq_req_done) {
+		rc = mhl_sii_config(mhl_ctrl, true);
+		if (rc) {
+			pr_err("%s: Failed to config vreg/gpio\n", __func__);
+			return rc;
+		}
+
+		/* wait for i2c interrupt line to be activated */
+		msleep(100);
+	}
+
+	if (!mhl_ctrl->disc_enabled) {
+		spin_lock_irqsave(&mhl_ctrl->lock, flags);
+		mhl_ctrl->tx_powered_off = false;
+		spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+		mhl_sii_reset_pin(mhl_ctrl, 0);
+		msleep(50);
+		mhl_sii_reset_pin(mhl_ctrl, 1);
+		/* chipset PR recommends waiting for at least 100 ms
+		 * the chipset needs longer to come out of D3 state.
+		 */
+		msleep(100);
+		mhl_init_reg_settings(mhl_ctrl, true);
+		/* allow tx to enable dev disc after D3 state */
+		msleep(100);
+		if (mhl_sii_wait_for_rgnd(mhl_ctrl)) {
+			pr_err("%s: discovery timeout\n", __func__);
+
+			mhl_sii_config(mhl_ctrl, false);
+
+			return -EAGAIN;
+		}
+	} else {
+		if (mhl_ctrl->cur_state == POWER_STATE_D3) {
+			mhl_sii_wait_for_rgnd(mhl_ctrl);
+		} else {
+			/* in MHL mode */
+			pr_debug("%s:%u\n", __func__, __LINE__);
+		}
+	}
+
+	rc = mhl_ctrl->mhl_mode ? 0 : 1;
+
+	pr_debug("%s: ret result: %s\n", __func__, rc ? "usb" : " mhl");
+	return rc;
+}
+
+static int mhl_power_get_property(struct power_supply *psy,
+				  enum power_supply_property psp,
+				  union power_supply_propval *val)
+{
+	struct mhl_tx_ctrl *mhl_ctrl =
+		container_of(psy, struct mhl_tx_ctrl, mhl_psy);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		val->intval = mhl_ctrl->current_val;
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = mhl_ctrl->vbus_active;
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = mhl_ctrl->vbus_active && mhl_ctrl->mhl_mode;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int mhl_power_set_property(struct power_supply *psy,
+				  enum power_supply_property psp,
+				  const union power_supply_propval *val)
+{
+	struct mhl_tx_ctrl *mhl_ctrl =
+		container_of(psy, struct mhl_tx_ctrl, mhl_psy);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		mhl_ctrl->vbus_active = val->intval;
+		if (mhl_ctrl->vbus_active)
+			mhl_ctrl->current_val = MAX_CURRENT;
+		else
+			mhl_ctrl->current_val = 0;
+		power_supply_changed(psy);
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static char *mhl_pm_power_supplied_to[] = {
+	"usb",
+};
+
+static enum power_supply_property mhl_pm_power_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static void cbus_reset(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t i;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/* Read the chip rev ID */
+	mhl_ctrl->chip_rev_id = MHL_SII_PAGE0_RD(0x04);
+	pr_debug("MHL: chip rev ID read=[%x]\n", mhl_ctrl->chip_rev_id);
+
+	/*
+	 * REG_SRST
+	 */
+	MHL_SII_REG_NAME_MOD(REG_SRST, BIT3, BIT3);
+	msleep(20);
+	MHL_SII_REG_NAME_MOD(REG_SRST, BIT3, 0x00);
+	/*
+	 * REG_INTR1 and REG_INTR4
+	 */
+	MHL_SII_REG_NAME_WR(REG_INTR1_MASK, BIT6);
+	MHL_SII_REG_NAME_WR(REG_INTR4_MASK,
+		BIT0 | BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
+
+	if (mhl_ctrl->chip_rev_id < 1)
+		MHL_SII_REG_NAME_WR(REG_INTR5_MASK, BIT3 | BIT4);
+	else
+		MHL_SII_REG_NAME_WR(REG_INTR5_MASK, 0x00);
+
+	/* Unmask CBUS1 Intrs */
+	MHL_SII_REG_NAME_WR(REG_CBUS_INTR_ENABLE,
+		BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
+
+	/* Unmask CBUS2 Intrs */
+	MHL_SII_REG_NAME_WR(REG_CBUS_MSC_INT2_ENABLE, BIT2 | BIT3);
+
+	for (i = 0; i < 4; i++) {
+		/*
+		 * Enable WRITE_STAT interrupt for writes to
+		 * all 4 MSC Status registers.
+		 */
+		MHL_SII_CBUS_WR((0xE0 + i), 0xFF);
+
+		/*
+		 * Enable SET_INT interrupt for writes to
+		 * all 4 MSC Interrupt registers.
+		 */
+		MHL_SII_CBUS_WR((0xF0 + i), 0xFF);
+	}
+}
+
+static void init_cbus_regs(struct i2c_client *client)
+{
+	uint8_t		regval;
+
+	/* Increase DDC translation layer timer*/
+	MHL_SII_CBUS_WR(0x0007, 0xF2);
+	/* Drive High Time */
+	MHL_SII_CBUS_WR(0x0036, 0x0B);
+	/* Use programmed timing */
+	MHL_SII_CBUS_WR(0x0039, 0x30);
+	/* CBUS Drive Strength */
+	MHL_SII_CBUS_WR(0x0040, 0x03);
+	/*
+	 * Write initial default settings
+	 * to devcap regs: default settings
+	 */
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_DEV_STATE, DEVCAP_VAL_DEV_STATE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_MHL_VERSION, DEVCAP_VAL_MHL_VERSION);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_DEV_CAT, DEVCAP_VAL_DEV_CAT);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_ADOPTER_ID_H, DEVCAP_VAL_ADOPTER_ID_H);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_ADOPTER_ID_L, DEVCAP_VAL_ADOPTER_ID_L);
+	MHL_SII_CBUS_WR(0x0080 | DEVCAP_OFFSET_VID_LINK_MODE,
+			  DEVCAP_VAL_VID_LINK_MODE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_AUD_LINK_MODE,
+			  DEVCAP_VAL_AUD_LINK_MODE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_VIDEO_TYPE, DEVCAP_VAL_VIDEO_TYPE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_LOG_DEV_MAP, DEVCAP_VAL_LOG_DEV_MAP);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_BANDWIDTH, DEVCAP_VAL_BANDWIDTH);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_FEATURE_FLAG, DEVCAP_VAL_FEATURE_FLAG);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_DEVICE_ID_H, DEVCAP_VAL_DEVICE_ID_H);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_DEVICE_ID_L, DEVCAP_VAL_DEVICE_ID_L);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_SCRATCHPAD_SIZE,
+			  DEVCAP_VAL_SCRATCHPAD_SIZE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_INT_STAT_SIZE,
+			  DEVCAP_VAL_INT_STAT_SIZE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_RESERVED, DEVCAP_VAL_RESERVED);
+
+	/* Make bits 2,3 (initiator timeout) to 1,1
+	 * for register CBUS_LINK_CONTROL_2
+	 * REG_CBUS_LINK_CONTROL_2
+	 */
+	regval = MHL_SII_CBUS_RD(0x0031);
+	regval = (regval | 0x0C);
+	/* REG_CBUS_LINK_CONTROL_2 */
+	MHL_SII_CBUS_WR(0x0031, regval);
+	 /* REG_MSC_TIMEOUT_LIMIT */
+	MHL_SII_CBUS_WR(0x0022, 0x0F);
+	/* REG_CBUS_LINK_CONTROL_1 */
+	MHL_SII_CBUS_WR(0x0030, 0x01);
+	/* disallow vendor specific commands */
+	MHL_SII_CBUS_MOD(0x002E, BIT4, BIT4);
+}
+
+/*
+ * Configure the initial reg settings
+ */
+static void mhl_init_reg_settings(struct mhl_tx_ctrl *mhl_ctrl,
+	bool mhl_disc_en)
+{
+	uint8_t regval;
+
+	/*
+	 * ============================================
+	 * POWER UP
+	 * ============================================
+	 */
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/* Power up 1.2V core */
+	MHL_SII_PAGE1_WR(0x003D, 0x3F);
+	/* Enable Tx PLL Clock */
+	MHL_SII_PAGE2_WR(0x0011, 0x01);
+	/* Enable Tx Clock Path and Equalizer */
+	MHL_SII_PAGE2_WR(0x0012, 0x11);
+	/* Tx Source Termination ON */
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0x10);
+	/* Enable 1X MHL Clock output */
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL6, 0xBC);
+	/* Tx Differential Driver Config */
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL2, 0x3C);
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL4, 0xC8);
+	/* PLL Bandwidth Control */
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL7, 0x03);
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL8, 0x0A);
+	/*
+	 * ============================================
+	 * Analog PLL Control
+	 * ============================================
+	 */
+	/* Enable Rx PLL clock */
+	MHL_SII_REG_NAME_WR(REG_TMDS_CCTRL,  0x08);
+	MHL_SII_PAGE0_WR(0x00F8, 0x8C);
+	MHL_SII_PAGE0_WR(0x0085, 0x02);
+	MHL_SII_PAGE2_WR(0x0000, 0x00);
+	regval = MHL_SII_PAGE2_RD(0x0005);
+	regval &= ~BIT5;
+	MHL_SII_PAGE2_WR(0x0005, regval);
+	MHL_SII_PAGE2_WR(0x0013, 0x60);
+	/* PLL Cal ref sel */
+	MHL_SII_PAGE2_WR(0x0017, 0x03);
+	/* VCO Cal */
+	MHL_SII_PAGE2_WR(0x001A, 0x20);
+	/* Auto EQ */
+	MHL_SII_PAGE2_WR(0x0022, 0xE0);
+	MHL_SII_PAGE2_WR(0x0023, 0xC0);
+	MHL_SII_PAGE2_WR(0x0024, 0xA0);
+	MHL_SII_PAGE2_WR(0x0025, 0x80);
+	MHL_SII_PAGE2_WR(0x0026, 0x60);
+	MHL_SII_PAGE2_WR(0x0027, 0x40);
+	MHL_SII_PAGE2_WR(0x0028, 0x20);
+	MHL_SII_PAGE2_WR(0x0029, 0x00);
+	/* Rx PLL Bandwidth 4MHz */
+	MHL_SII_PAGE2_WR(0x0031, 0x0A);
+	/* Rx PLL Bandwidth value from I2C */
+	MHL_SII_PAGE2_WR(0x0045, 0x06);
+	MHL_SII_PAGE2_WR(0x004B, 0x06);
+	MHL_SII_PAGE2_WR(0x004C, 0x60);
+	/* Manual zone control */
+	MHL_SII_PAGE2_WR(0x004C, 0xE0);
+	/* PLL Mode value */
+	MHL_SII_PAGE2_WR(0x004D, 0x00);
+	MHL_SII_PAGE0_WR(0x0008, 0x35);
+	/*
+	 * Discovery Control and Status regs
+	 * Setting De-glitch time to 50 ms (default)
+	 * Switch Control Disabled
+	 */
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL2, 0xAD);
+	/* 1.8V CBUS VTH */
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL5, 0x57);
+	/* RGND and single Discovery attempt */
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL6, 0x11);
+	/* Ignore VBUS */
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL8, 0x82);
+
+	/* Enable CBUS Discovery */
+	if (mhl_disc_en) {
+		MHL_SII_REG_NAME_WR(REG_DISC_CTRL9, 0x24);
+		/* Enable MHL Discovery */
+		MHL_SII_REG_NAME_WR(REG_DISC_CTRL1, 0x27);
+		/* Pull-up resistance off for IDLE state */
+		MHL_SII_REG_NAME_WR(REG_DISC_CTRL4, 0x8C);
+	} else {
+		MHL_SII_REG_NAME_WR(REG_DISC_CTRL9, 0x26);
+		/* Disable MHL Discovery */
+		MHL_SII_REG_NAME_WR(REG_DISC_CTRL1, 0x26);
+		MHL_SII_REG_NAME_WR(REG_DISC_CTRL4, 0x8C);
+	}
+
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL7, 0x20);
+	/* MHL CBUS Discovery - immediate comm.  */
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL3, 0x86);
+
+	MHL_SII_PAGE3_WR(0x3C, 0x80);
+
+	MHL_SII_REG_NAME_MOD(REG_INT_CTRL,
+			     (BIT6 | BIT5 | BIT4), (BIT6 | BIT4));
+
+	/* Enable Auto Soft RESET */
+	MHL_SII_REG_NAME_WR(REG_SRST, 0x084);
+	/* HDMI Transcode mode enable */
+	MHL_SII_PAGE0_WR(0x000D, 0x1C);
+
+	cbus_reset(mhl_ctrl);
+	init_cbus_regs(client);
+}
+
+
+static void switch_mode(struct mhl_tx_ctrl *mhl_ctrl, enum mhl_st_type to_mode,
+			bool hpd_off)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+	unsigned long flags;
+	int rc;
+	struct msm_hdmi_mhl_ops *hdmi_mhl_ops = mhl_ctrl->hdmi_mhl_ops;
+
+	pr_debug("%s: tx pwr on\n", __func__);
+	spin_lock_irqsave(&mhl_ctrl->lock, flags);
+	mhl_ctrl->tx_powered_off = false;
+	spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+
+	switch (to_mode) {
+	case POWER_STATE_D0_NO_MHL:
+		mhl_ctrl->cur_state = to_mode;
+		mhl_init_reg_settings(mhl_ctrl, true);
+		/* REG_DISC_CTRL1 */
+		MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT1 | BIT0, BIT0);
+
+		/* TPI_DEVICE_POWER_STATE_CTRL_REG */
+		mhl_i2c_reg_modify(client, TX_PAGE_TPI, 0x001E, BIT1 | BIT0,
+			0x00);
+		break;
+	case POWER_STATE_D0_MHL:
+		mhl_ctrl->cur_state = to_mode;
+		break;
+	case POWER_STATE_D3:
+		if (mhl_ctrl->cur_state == POWER_STATE_D3) {
+			pr_debug("%s: mhl tx already in low power mode\n",
+				__func__);
+			break;
+		}
+
+		/* Force HPD to 0 when not in MHL mode.  */
+		mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+		mhl_tmds_ctrl(mhl_ctrl, TMDS_DISABLE);
+		/*
+		 * Change TMDS termination to high impedance
+		 * on disconnection.
+		 */
+		MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0xD0);
+		msleep(50);
+		if (!mhl_ctrl->disc_enabled)
+			MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT1 | BIT0, 0x00);
+		if (hdmi_mhl_ops && hpd_off) {
+			rc = hdmi_mhl_ops->set_upstream_hpd(
+				mhl_ctrl->pdata->hdmi_pdev, 0);
+			pr_debug("%s: hdmi unset hpd %s\n", __func__,
+				 rc ? "failed" : "passed");
+		}
+		mhl_ctrl->cur_state = POWER_STATE_D3;
+		mhl_ctrl->mhl_mode = 0;
+		break;
+	default:
+		break;
+	}
+}
+
+static bool is_mhl_powered(void *mhl_ctx)
+{
+	struct mhl_tx_ctrl *mhl_ctrl = (struct mhl_tx_ctrl *)mhl_ctx;
+	unsigned long flags;
+	bool r = false;
+
+	spin_lock_irqsave(&mhl_ctrl->lock, flags);
+	if (mhl_ctrl->tx_powered_off)
+		r = false;
+	else
+		r = true;
+	spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+
+	pr_debug("%s: ret pwr state as %x\n", __func__, r);
+	return r;
+}
+
+void mhl_tmds_ctrl(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	if (on) {
+		MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, BIT4);
+		mhl_ctrl->tmds_en_state = true;
+	} else {
+		MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, 0x00);
+		mhl_ctrl->tmds_en_state = false;
+	}
+}
+
+void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl, uint8_t to_state)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+	unsigned long flags;
+
+	pr_debug("%s: To state=[0x%x]\n", __func__, to_state);
+	if (to_state == HPD_UP) {
+		/*
+		 * Drive HPD to UP state
+		 * Set HPD_OUT_OVR_EN = HPD State
+		 * EDID read and Un-force HPD (from low)
+		 * propagate to src let HPD float by clearing
+		 * HPD OUT OVRRD EN
+		 */
+		spin_lock_irqsave(&mhl_ctrl->lock, flags);
+		mhl_ctrl->tx_powered_off = false;
+		spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+		MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT4, 0);
+	} else {
+		/* Drive HPD to DOWN state */
+		MHL_SII_REG_NAME_MOD(REG_INT_CTRL, (BIT4 | BIT5), BIT4);
+	}
+}
+
+static void mhl_msm_connection(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t val;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	pr_debug("%s: cur st [0x%x]\n", __func__,
+		mhl_ctrl->cur_state);
+
+	if (mhl_ctrl->cur_state == POWER_STATE_D0_MHL) {
+		/* Already in D0 - MHL power state */
+		pr_err("%s: cur st not D0\n", __func__);
+		return;
+	}
+	switch_mode(mhl_ctrl, POWER_STATE_D0_MHL, true);
+
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0x10);
+	MHL_SII_CBUS_WR(0x07, 0xF2);
+
+	/*
+	 * Keep the discovery enabled. Need RGND interrupt
+	 * Possibly chip disables discovery after MHL_EST??
+	 * Need to re-enable here
+	 */
+	val = MHL_SII_PAGE3_RD(0x10);
+	MHL_SII_PAGE3_WR(0x10, val | BIT0);
+
+	/*
+	 * indicate DCAP_RDY and DCAP_CHG
+	 * to the peer only after
+	 * msm conn has been established
+	 */
+	mhl_msc_send_write_stat(mhl_ctrl,
+				MHL_STATUS_REG_CONNECTED_RDY,
+				MHL_STATUS_DCAP_RDY);
+
+	mhl_msc_send_set_int(mhl_ctrl,
+			     MHL_RCHANGE_INT,
+			     MHL_INT_DCAP_CHG,
+			     MSC_PRIORITY_SEND);
+
+}
+
+static void mhl_msm_disconnection(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/* disabling Tx termination */
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0xD0);
+	switch_mode(mhl_ctrl, POWER_STATE_D3, true);
+	mhl_msc_clear(mhl_ctrl);
+}
+
+static int mhl_msm_read_rgnd_int(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t rgnd_imp;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+	struct msm_hdmi_mhl_ops *hdmi_mhl_ops = mhl_ctrl->hdmi_mhl_ops;
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&mhl_ctrl->lock, flags);
+	mhl_ctrl->tx_powered_off = false;
+	spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+
+	/* DISC STATUS REG 2 */
+	rgnd_imp = (mhl_i2c_reg_read(client, TX_PAGE_3, 0x001C) &
+		    (BIT1 | BIT0));
+	pr_debug("imp range read=%02X\n", (int)rgnd_imp);
+
+	if (rgnd_impi == 0x02) {
+		pr_debug("%s: mhl sink\n", __func__);
+		if (hdmi_mhl_ops) {
+			rc = hdmi_mhl_ops->set_upstream_hpd(
+				mhl_ctrl->pdata->hdmi_pdev, 1);
+			pr_debug("%s: hdmi set hpd %s\n", __func__,
+				 rc ? "failed" : "passed");
+		}
+		mhl_ctrl->mhl_mode = 1;
+		power_supply_changed(&mhl_ctrl->mhl_psy);
+		if (mhl_ctrl->notify_usb_online)
+			mhl_ctrl->notify_usb_online(mhl_ctrl->notify_ctx, 1);
+	} else {
+		pr_debug("%s: non-mhl sink\n", __func__);
+		mhl_ctrl->mhl_mode = 0;
+		switch_mode(mhl_ctrl, POWER_STATE_D3, true);
+	}
+	complete(&mhl_ctrl->rgnd_done);
+	return mhl_ctrl->mhl_mode ?
+		MHL_DISCOVERY_RESULT_MHL : MHL_DISCOVERY_RESULT_USB;
+}
+
+static void force_usb_switch_open(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/*disable discovery*/
+	MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT0, 0);
+	/* force USB ID switch to open*/
+	MHL_SII_REG_NAME_MOD(REG_DISC_CTRL6, BIT6, BIT6);
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL3, 0x86);
+	/* force HPD to 0 when not in mhl mode. */
+	MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT5 | BIT4, BIT4);
+}
+
+static void release_usb_switch_open(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	msleep(50);
+	MHL_SII_REG_NAME_MOD(REG_DISC_CTRL6, BIT6, 0x00);
+	MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT0, BIT0);
+}
+
+static void scdt_st_chg(struct i2c_client *client)
+{
+	uint8_t tmds_cstat;
+	uint8_t mhl_fifo_status;
+
+	/* tmds cstat */
+	tmds_cstat = MHL_SII_PAGE3_RD(0x0040);
+	pr_debug("%s: tmds cstat: 0x%02x\n", __func__,
+		 tmds_cstat);
+
+	if (!(tmds_cstat & BIT1))
+		return;
+
+	mhl_fifo_status = MHL_SII_REG_NAME_RD(REG_INTR5);
+	pr_debug("%s: mhl fifo st: 0x%02x\n", __func__,
+		 mhl_fifo_status);
+	if (mhl_fifo_status & 0x0C) {
+		MHL_SII_REG_NAME_WR(REG_INTR5,  0x0C);
+		pr_debug("%s: mhl fifo rst\n", __func__);
+		MHL_SII_REG_NAME_WR(REG_SRST, 0x94);
+		MHL_SII_REG_NAME_WR(REG_SRST, 0x84);
+	}
+}
+
+
+static int dev_detect_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t status, reg;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/* INTR_STATUS4 */
+	status = MHL_SII_REG_NAME_RD(REG_INTR4);
+	pr_debug("%s: reg int4 st=%02X\n", __func__, status);
+
+	if ((status == 0x00) &&
+	    (mhl_ctrl->cur_state == POWER_STATE_D3)) {
+		pr_warn("%s: invalid intr\n", __func__);
+		return 0;
+	}
+
+	if (status == 0xFF) {
+		pr_warn("%s: invalid intr 0xff\n", __func__);
+		MHL_SII_REG_NAME_WR(REG_INTR4, status);
+		return 0;
+	}
+
+	if ((status & BIT0) && (mhl_ctrl->chip_rev_id < 1)) {
+		pr_debug("%s: scdt intr\n", __func__);
+		scdt_st_chg(client);
+	}
+
+	if (status & BIT1)
+		pr_debug("mhl: int4 bit1 set\n");
+
+	/* mhl_est interrupt */
+	if (status & BIT2) {
+		pr_debug("%s: mhl_est st=%02X\n", __func__,
+			 (int) status);
+		mhl_msm_connection(mhl_ctrl);
+	} else if (status & BIT3) {
+		pr_debug("%s: uUSB-a type dev detct\n", __func__);
+		power_supply_changed(&mhl_ctrl->mhl_psy);
+		mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+		return 0;
+	}
+
+	if (status & BIT5) {
+		/* clr intr - reg int4 */
+		pr_debug("%s: mhl discon: int4 st=%02X\n", __func__,
+			 (int)status);
+		mhl_ctrl->mhl_det_discon = true;
+
+		reg = MHL_SII_REG_NAME_RD(REG_INTR4);
+		MHL_SII_REG_NAME_WR(REG_INTR4, reg);
+		mhl_msm_disconnection(mhl_ctrl);
+		power_supply_changed(&mhl_ctrl->mhl_psy);
+		if (mhl_ctrl->notify_usb_online)
+			mhl_ctrl->notify_usb_online(mhl_ctrl->notify_ctx, 0);
+
+		queue_work(mhl_ctrl->mhl_workq, &mhl_ctrl->mhl_intr_work);
+
+		return 0;
+	}
+
+	if ((mhl_ctrl->cur_state != POWER_STATE_D0_NO_MHL) &&
+	    (status & BIT6)) {
+		/* rgnd rdy Intr */
+		pr_debug("%s: rgnd ready intr\n", __func__);
+		switch_mode(mhl_ctrl, POWER_STATE_D0_NO_MHL, true);
+		mhl_msm_read_rgnd_int(mhl_ctrl);
+	}
+
+	/* Can't succeed at these in D3 */
+	if ((mhl_ctrl->cur_state != POWER_STATE_D3) &&
+	     (status & BIT4)) {
+		/* cbus lockout interrupt?
+		 * Hardware detection mechanism figures that
+		 * CBUS line is latched and raises this intr
+		 * where we force usb switch open and release
+		 */
+		pr_warn("%s: cbus locked out!\n", __func__);
+		force_usb_switch_open(mhl_ctrl);
+		release_usb_switch_open(mhl_ctrl);
+	}
+	MHL_SII_REG_NAME_WR(REG_INTR4, status);
+	return 0;
+}
+
+static void mhl_misc_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t intr_5_stat;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/*
+	 * Clear INT 5
+	 * INTR5 is related to FIFO underflow/overflow reset
+	 * which is handled in 8334 by auto FIFO reset
+	 */
+	intr_5_stat = MHL_SII_REG_NAME_RD(REG_INTR5);
+	MHL_SII_REG_NAME_WR(REG_INTR5,  intr_5_stat);
+}
+
+static void mhl_tx_down(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+	unsigned long flags;
+	uint8_t reg;
+
+	switch_mode(mhl_ctrl, POWER_STATE_D3, true);
+
+	reg = MHL_SII_REG_NAME_RD(REG_INTR1);
+	MHL_SII_REG_NAME_WR(REG_INTR1, reg);
+
+	reg = MHL_SII_REG_NAME_RD(REG_INTR4);
+	MHL_SII_REG_NAME_WR(REG_INTR4, reg);
+
+	/* disable INTR1 and INTR4 */
+	MHL_SII_REG_NAME_MOD(REG_INTR1_MASK, BIT6, 0x0);
+	MHL_SII_REG_NAME_MOD(REG_INTR4_MASK,
+		(BIT0 | BIT1 | BIT2 | BIT3 | BIT4 | BIT5 | BIT6), 0x0);
+
+	MHL_SII_PAGE1_MOD(0x003D, BIT0, 0x00);
+	spin_lock_irqsave(&mhl_ctrl->lock, flags);
+	mhl_ctrl->tx_powered_off = true;
+	spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+	pr_debug("%s: disabled\n", __func__);
+	disable_irq_nosync(client->irq);
+}
+
+static void mhl_hpd_stat_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t intr_1_stat, cbus_stat, t;
+	unsigned long flags;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	if (!is_mhl_powered(mhl_ctrl))
+		return;
+
+	/* INTR STATUS 1 */
+	intr_1_stat = MHL_SII_PAGE0_RD(0x0071);
+
+	if (!intr_1_stat)
+		return;
+
+	/* Clear interrupts */
+	MHL_SII_PAGE0_WR(0x0071, intr_1_stat);
+
+	if (BIT6 & intr_1_stat) {
+		/*
+		 * HPD status change event is pending
+		 * Read CBUS HPD status for this info
+		 * MSC REQ ABRT REASON
+		 */
+		cbus_stat = MHL_SII_CBUS_RD(0x0D);
+		pr_debug("%s: cbus_stat=[0x%02x] cur_pwr=[%u]\n",
+			 __func__, cbus_stat, mhl_ctrl->cur_state);
+
+		spin_lock_irqsave(&mhl_ctrl->lock, flags);
+		t = mhl_ctrl->dwnstream_hpd;
+		pr_debug("%s: %u: dwnstrm_hpd=0x%02x\n",
+			 __func__, __LINE__, mhl_ctrl->dwnstream_hpd);
+		spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+
+		if (BIT6 & (cbus_stat ^ t)) {
+			u8 status = cbus_stat & BIT6;
+
+			mhl_drive_hpd(mhl_ctrl, status ? HPD_UP : HPD_DOWN);
+			if (!status && mhl_ctrl->mhl_det_discon) {
+				pr_debug("%s:%u: power_down\n",
+					 __func__, __LINE__);
+				mhl_tx_down(mhl_ctrl);
+			}
+			spin_lock_irqsave(&mhl_ctrl->lock, flags);
+			mhl_ctrl->dwnstream_hpd = cbus_stat;
+			pr_debug("%s: %u: dwnstrm_hpd=0x%02x\n",
+				 __func__, __LINE__, mhl_ctrl->dwnstream_hpd);
+			spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+			mhl_ctrl->mhl_det_discon = false;
+		}
+	}
+}
+
+static void mhl_sii_cbus_process_errors(struct i2c_client *client,
+					u8 int_status)
+{
+	u8 abort_reason = 0;
+
+	if (int_status & BIT2) {
+		abort_reason = MHL_SII_REG_NAME_RD(REG_DDC_ABORT_REASON);
+		pr_debug("%s: CBUS DDC Abort Reason(0x%02x)\n",
+			 __func__, abort_reason);
+	}
+	if (int_status & BIT5) {
+		abort_reason = MHL_SII_REG_NAME_RD(REG_PRI_XFR_ABORT_REASON);
+		pr_debug("%s: CBUS MSC Requestor Abort Reason(0x%02x)\n",
+			 __func__, abort_reason);
+		MHL_SII_REG_NAME_WR(REG_PRI_XFR_ABORT_REASON, 0xFF);
+	}
+	if (int_status & BIT6) {
+		abort_reason = MHL_SII_REG_NAME_RD(
+			REG_CBUS_PRI_FWR_ABORT_REASON);
+		pr_debug("%s: CBUS MSC Responder Abort Reason(0x%02x)\n",
+			 __func__, abort_reason);
+		MHL_SII_REG_NAME_WR(REG_CBUS_PRI_FWR_ABORT_REASON, 0xFF);
+	}
+}
+
+int mhl_send_msc_command(struct mhl_tx_ctrl *mhl_ctrl,
+			 struct msc_command_struct *req)
+{
+	int timeout;
+	u8 start_bit = 0x00;
+	u8 *burst_data;
+	int i;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	if (mhl_ctrl->cur_state != POWER_STATE_D0_MHL) {
+		pr_debug("%s: power_state:%02x CBUS(0x0A):%02x\n",
+			 __func__,
+			 mhl_ctrl->cur_state,
+			 MHL_SII_REG_NAME_RD(REG_CBUS_BUS_STATUS));
+		return -EFAULT;
+	}
+
+	if (!req)
+		return -EFAULT;
+
+	pr_debug("%s: command=0x%02x offset=0x%02x %02x %02x",
+		 __func__,
+		 req->command,
+		 req->offset,
+		 req->payload.data[0],
+		 req->payload.data[1]);
+
+	/* REG_CBUS_PRI_ADDR_CMD = REQ CBUS CMD or OFFSET */
+	MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->offset);
+	MHL_SII_REG_NAME_WR(REG_CBUS_PRI_WR_DATA_1ST,
+			    req->payload.data[0]);
+
+	switch (req->command) {
+	case MHL_SET_INT:
+	case MHL_WRITE_STAT:
+		start_bit = MSC_START_BIT_WRITE_REG;
+		break;
+	case MHL_READ_DEVCAP:
+		start_bit = MSC_START_BIT_READ_REG;
+		break;
+	case MHL_GET_STATE:
+	case MHL_GET_VENDOR_ID:
+	case MHL_SET_HPD:
+	case MHL_CLR_HPD:
+	case MHL_GET_SC1_ERRORCODE:
+	case MHL_GET_DDC_ERRORCODE:
+	case MHL_GET_MSC_ERRORCODE:
+	case MHL_GET_SC3_ERRORCODE:
+		start_bit = MSC_START_BIT_MSC_CMD;
+		MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->command);
+		break;
+	case MHL_MSC_MSG:
+		start_bit = MSC_START_BIT_VS_CMD;
+		MHL_SII_REG_NAME_WR(REG_CBUS_PRI_WR_DATA_2ND,
+				    req->payload.data[1]);
+		MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->command);
+		break;
+	case MHL_WRITE_BURST:
+		start_bit = MSC_START_BIT_WRITE_BURST;
+		MHL_SII_REG_NAME_WR(REG_MSC_WRITE_BURST_LEN, req->length - 1);
+		if (!(req->payload.burst_data)) {
+			pr_err("%s: burst data is null!\n", __func__);
+			goto cbus_send_fail;
+		}
+		burst_data = req->payload.burst_data;
+		for (i = 0; i < req->length; i++, burst_data++)
+			MHL_SII_REG_NAME_WR(REG_CBUS_SCRATCHPAD_0 + i,
+				*burst_data);
+		break;
+	default:
+		pr_err("%s: unknown command! (%02x)\n",
+		       __func__, req->command);
+		goto cbus_send_fail;
+	}
+
+	reinit_completion(&mhl_ctrl->msc_cmd_done);
+	MHL_SII_REG_NAME_WR(REG_CBUS_PRI_START, start_bit);
+	timeout = wait_for_completion_timeout
+		(&mhl_ctrl->msc_cmd_done, msecs_to_jiffies(T_ABORT_NEXT));
+	if (!timeout) {
+		pr_err("%s: cbus_command_send timed out!\n", __func__);
+		goto cbus_send_fail;
+	}
+
+	switch (req->command) {
+	case MHL_READ_DEVCAP:
+		req->retval = MHL_SII_REG_NAME_RD(REG_CBUS_PRI_RD_DATA_1ST);
+		break;
+	case MHL_MSC_MSG:
+		/* check if MSC_MSG NACKed */
+		if (MHL_SII_REG_NAME_RD(REG_MSC_WRITE_BURST_LEN) & BIT6)
+			return -EAGAIN;
+	default:
+		req->retval = 0;
+		break;
+	}
+	mhl_msc_command_done(mhl_ctrl, req);
+	pr_debug("%s: msc cmd done\n", __func__);
+	return 0;
+
+cbus_send_fail:
+	return -EFAULT;
+}
+
+/* read scratchpad */
+void mhl_read_scratchpad(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+	int i;
+
+	for (i = 0; i < MHL_SCRATCHPAD_SIZE; i++) {
+		mhl_ctrl->scrpd.data[i] = MHL_SII_REG_NAME_RD(
+			REG_CBUS_SCRATCHPAD_0 + i);
+	}
+}
+
+static void mhl_cbus_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t regval;
+	int req_done = 0;
+	uint8_t sub_cmd = 0x0;
+	uint8_t cmd_data = 0x0;
+	int msc_msg_recved = 0;
+	int rc = -1;
+	unsigned long flags;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	regval = MHL_SII_REG_NAME_RD(REG_CBUS_INTR_STATUS);
+	if (regval == 0xff)
+		return;
+
+	if (regval)
+		MHL_SII_REG_NAME_WR(REG_CBUS_INTR_STATUS, regval);
+
+	pr_debug("%s: CBUS_INT = %02x\n", __func__, regval);
+
+	/* MSC_MSG (RCP/RAP) */
+	if (regval & BIT3) {
+		sub_cmd = MHL_SII_REG_NAME_RD(REG_CBUS_PRI_VS_CMD);
+		cmd_data = MHL_SII_REG_NAME_RD(REG_CBUS_PRI_VS_DATA);
+		msc_msg_recved = 1;
+	}
+	/* MSC_MT_ABRT/MSC_MR_ABRT/DDC_ABORT */
+	if (regval & (BIT6 | BIT5 | BIT2))
+		mhl_sii_cbus_process_errors(client, regval);
+
+	/* MSC_REQ_DONE */
+	if (regval & BIT4)
+		req_done = 1;
+
+	/* look for interrupts on CBUS_MSC_INT2 */
+	regval  = MHL_SII_REG_NAME_RD(REG_CBUS_MSC_INT2_STATUS);
+
+	/* clear all interrupts */
+	if (regval)
+		MHL_SII_REG_NAME_WR(REG_CBUS_MSC_INT2_STATUS, regval);
+
+	pr_debug("%s: CBUS_MSC_INT2 = %02x\n", __func__, regval);
+
+	/* received SET_INT */
+	if (regval & BIT2) {
+		uint8_t intr;
+
+		intr = MHL_SII_REG_NAME_RD(REG_CBUS_SET_INT_0);
+		MHL_SII_REG_NAME_WR(REG_CBUS_SET_INT_0, intr);
+		mhl_msc_recv_set_int(mhl_ctrl, 0, intr);
+		if (intr & MHL_INT_DCAP_CHG) {
+			/* No need to go to low power mode */
+			spin_lock_irqsave(&mhl_ctrl->lock, flags);
+			mhl_ctrl->dwnstream_hpd = 0x00;
+			pr_debug("%s: %u: dwnstrm_hpd=0x%02x\n",
+				 __func__, __LINE__, mhl_ctrl->dwnstream_hpd);
+			spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+		}
+
+		pr_debug("%s: MHL_INT_0 = %02x\n", __func__, intr);
+		intr = MHL_SII_REG_NAME_RD(REG_CBUS_SET_INT_1);
+		MHL_SII_REG_NAME_WR(REG_CBUS_SET_INT_1, intr);
+		mhl_msc_recv_set_int(mhl_ctrl, 1, intr);
+
+		pr_debug("%s: MHL_INT_1 = %02x\n", __func__, intr);
+		MHL_SII_REG_NAME_WR(REG_CBUS_SET_INT_2, 0xFF);
+		MHL_SII_REG_NAME_WR(REG_CBUS_SET_INT_3, 0xFF);
+	}
+
+	/* received WRITE_STAT */
+	if (regval & BIT3) {
+		uint8_t stat;
+
+		stat = MHL_SII_REG_NAME_RD(REG_CBUS_WRITE_STAT_0);
+		mhl_msc_recv_write_stat(mhl_ctrl, 0, stat);
+
+		pr_debug("%s: MHL_STATUS_0 = %02x\n", __func__, stat);
+		stat = MHL_SII_REG_NAME_RD(REG_CBUS_WRITE_STAT_1);
+		mhl_msc_recv_write_stat(mhl_ctrl, 1, stat);
+		pr_debug("%s: MHL_STATUS_1 = %02x\n", __func__, stat);
+
+		MHL_SII_REG_NAME_WR(REG_CBUS_WRITE_STAT_0, 0xFF);
+		MHL_SII_REG_NAME_WR(REG_CBUS_WRITE_STAT_1, 0xFF);
+		MHL_SII_REG_NAME_WR(REG_CBUS_WRITE_STAT_2, 0xFF);
+		MHL_SII_REG_NAME_WR(REG_CBUS_WRITE_STAT_3, 0xFF);
+	}
+
+	/* received MSC_MSG */
+	if (msc_msg_recved) {
+		/*mhl msc recv msc msg*/
+		rc = mhl_msc_recv_msc_msg(mhl_ctrl, sub_cmd, cmd_data);
+		if (rc)
+			pr_err("MHL: mhl msc recv msc msg failed(%d)!\n", rc);
+	}
+	/* complete last command */
+	if (req_done)
+		complete_all(&mhl_ctrl->msc_cmd_done);
+
+}
+
+static irqreturn_t mhl_tx_isr(int irq, void *data)
+{
+	int rc;
+	struct mhl_tx_ctrl *mhl_ctrl = (struct mhl_tx_ctrl *)data;
+	unsigned long flags;
+
+	pr_debug("%s: Getting Interrupts\n", __func__);
+
+	spin_lock_irqsave(&mhl_ctrl->lock, flags);
+	if (mhl_ctrl->tx_powered_off) {
+		pr_warn("%s: powered off\n", __func__);
+		spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+		return IRQ_HANDLED;
+	}
+	spin_unlock_irqrestore(&mhl_ctrl->lock, flags);
+
+	/*
+	 * Check RGND, MHL_EST, CBUS_LOCKOUT, SCDT
+	 * interrupts. In D3, we get only RGND
+	 */
+	rc = dev_detect_isr(mhl_ctrl);
+	if (rc)
+		pr_debug("%s: dev_detect_isr rc=[%d]\n", __func__, rc);
+
+	pr_debug("%s: cur pwr state is [0x%x]\n",
+		 __func__, mhl_ctrl->cur_state);
+
+	/*
+	 * If dev_detect_isr() didn't move the tx to D3
+	 * on disconnect, continue to check other
+	 * interrupt sources.
+	 */
+	mhl_misc_isr(mhl_ctrl);
+
+	/*
+	 * Check for any peer messages for DCAP_CHG, MSC etc
+	 * Dispatch to have the CBUS module working only
+	 * once connected.
+	 */
+	mhl_cbus_isr(mhl_ctrl);
+	mhl_hpd_stat_isr(mhl_ctrl);
+
+	return IRQ_HANDLED;
+}
+
+
+static int mhl_sii_reg_config(struct i2c_client *client, bool enable)
+{
+	static struct regulator *reg_8941_l24;
+	static struct regulator *reg_8941_l02;
+	static struct regulator *reg_8941_smps3a;
+	static struct regulator *reg_8941_vdda;
+	int rc = -EINVAL;
+
+	pr_debug("%s\n", __func__);
+
+	if (!enable) {
+		if (reg_8941_vdda) {
+			regulator_disable(reg_8941_vdda);
+			regulator_put(reg_8941_vdda);
+			reg_8941_vdda = NULL;
+		}
+
+		if (reg_8941_smps3a) {
+			regulator_disable(reg_8941_smps3a);
+			regulator_put(reg_8941_smps3a);
+			reg_8941_smps3a = NULL;
+		}
+
+		if (reg_8941_l02) {
+			regulator_disable(reg_8941_l02);
+			regulator_put(reg_8941_l02);
+			reg_8941_l02 = NULL;
+		}
+
+		if (reg_8941_l24) {
+			regulator_disable(reg_8941_l24);
+			regulator_put(reg_8941_l24);
+			reg_8941_l24 = NULL;
+		}
+		return 0;
+	}
+
+	if (!reg_8941_l24) {
+		reg_8941_l24 = regulator_get(&client->dev,
+			"avcc_18");
+		if (IS_ERR(reg_8941_l24)) {
+			pr_err("could not get 8941 l24, rc = %ld\n",
+				PTR_ERR(reg_8941_l24));
+			return -ENODEV;
+		}
+		if (enable)
+			rc = regulator_enable(reg_8941_l24);
+		else
+			rc = regulator_disable(reg_8941_l24);
+		if (rc) {
+			pr_err("'%s' regulator config[%u] failed, rc=%d\n",
+			       "avcc_1.8V", enable, rc);
+			goto l24_fail;
+		} else {
+			pr_debug("%s: vreg L24 %s\n",
+				 __func__, (enable ? "enabled" : "disabled"));
+		}
+	}
+
+	if (!reg_8941_l02) {
+		reg_8941_l02 = regulator_get(&client->dev,
+			"avcc_12");
+		if (IS_ERR(reg_8941_l02)) {
+			pr_err("could not get reg_8941_l02, rc = %ld\n",
+				PTR_ERR(reg_8941_l02));
+			goto l24_fail;
+		}
+		if (enable)
+			rc = regulator_enable(reg_8941_l02);
+		else
+			rc = regulator_disable(reg_8941_l02);
+		if (rc) {
+			pr_debug("'%s' regulator configure[%u] failed, rc=%d\n",
+				 "avcc_1.2V", enable, rc);
+			goto l02_fail;
+		} else {
+			pr_debug("%s: vreg L02 %s\n",
+				 __func__, (enable ? "enabled" : "disabled"));
+		}
+	}
+
+	if (!reg_8941_smps3a) {
+		reg_8941_smps3a = regulator_get(&client->dev,
+			"smps3a");
+		if (IS_ERR(reg_8941_smps3a)) {
+			pr_err("could not get vreg smps3a, rc = %ld\n",
+				PTR_ERR(reg_8941_smps3a));
+			goto l02_fail;
+		}
+		if (enable)
+			rc = regulator_enable(reg_8941_smps3a);
+		else
+			rc = regulator_disable(reg_8941_smps3a);
+		if (rc) {
+			pr_err("'%s' regulator config[%u] failed, rc=%d\n",
+			       "SMPS3A", enable, rc);
+			goto smps3a_fail;
+		} else {
+			pr_debug("%s: vreg SMPS3A %s\n",
+				 __func__, (enable ? "enabled" : "disabled"));
+		}
+	}
+
+	if (!reg_8941_vdda) {
+		reg_8941_vdda = regulator_get(&client->dev,
+			"vdda");
+		if (IS_ERR(reg_8941_vdda)) {
+			pr_err("could not get vreg vdda, rc = %ld\n",
+				PTR_ERR(reg_8941_vdda));
+			goto smps3a_fail;
+		}
+		if (enable)
+			rc = regulator_enable(reg_8941_vdda);
+		else
+			rc = regulator_disable(reg_8941_vdda);
+		if (rc) {
+			pr_err("'%s' regulator config[%u] failed, rc=%d\n",
+			       "VDDA", enable, rc);
+			goto vdda_fail;
+		} else {
+			pr_debug("%s: vreg VDDA %s\n",
+				 __func__, (enable ? "enabled" : "disabled"));
+		}
+	}
+
+	return rc;
+
+vdda_fail:
+	regulator_disable(reg_8941_vdda);
+	regulator_put(reg_8941_vdda);
+smps3a_fail:
+	regulator_disable(reg_8941_smps3a);
+	regulator_put(reg_8941_smps3a);
+l02_fail:
+	regulator_disable(reg_8941_l02);
+	regulator_put(reg_8941_l02);
+l24_fail:
+	regulator_disable(reg_8941_l24);
+	regulator_put(reg_8941_l24);
+
+	return -EINVAL;
+}
+
+
+static int mhl_vreg_config(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on)
+{
+	int ret;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+	int pwr_gpio = mhl_ctrl->pdata->gpios[MHL_TX_PMIC_PWR_GPIO]->gpio;
+
+	pr_debug("%s\n", __func__);
+	if (on) {
+		ret = gpio_request(pwr_gpio,
+		    mhl_ctrl->pdata->gpios[MHL_TX_PMIC_PWR_GPIO]->gpio_name);
+		if (ret < 0) {
+			pr_err("%s: mhl pwr gpio req failed: %d\n",
+			       __func__, ret);
+			return ret;
+		}
+		ret = gpio_direction_output(pwr_gpio, 1);
+		if (ret < 0) {
+			pr_err("%s: set gpio MHL_PWR_EN dircn failed: %d\n",
+			       __func__, ret);
+			goto vreg_config_failed;
+		}
+
+		ret = mhl_sii_reg_config(client, true);
+		if (ret) {
+			pr_err("%s: regulator enable failed\n", __func__);
+			goto vreg_config_failed;
+		}
+		pr_debug("%s: mhl sii power on successful\n", __func__);
+	} else {
+		pr_warn("%s: turning off pwr controls\n", __func__);
+		mhl_sii_reg_config(client, false);
+		gpio_free(pwr_gpio);
+	}
+	pr_debug("%s: successful\n", __func__);
+	return 0;
+vreg_config_failed:
+	gpio_free(pwr_gpio);
+	return -EINVAL;
+}
+
+/*
+ * Request for GPIO allocations
+ * Set appropriate GPIO directions
+ */
+static int mhl_gpio_config(struct mhl_tx_ctrl *mhl_ctrl, int on)
+{
+	int ret;
+	struct dss_gpio *temp_reset_gpio, *temp_intr_gpio;
+
+	/* caused too many line spills */
+	temp_reset_gpio = mhl_ctrl->pdata->gpios[MHL_TX_RESET_GPIO];
+	temp_intr_gpio = mhl_ctrl->pdata->gpios[MHL_TX_INTR_GPIO];
+
+	if (on) {
+		if (gpio_is_valid(temp_reset_gpio->gpio)) {
+			ret = gpio_request(temp_reset_gpio->gpio,
+					   temp_reset_gpio->gpio_name);
+			if (ret < 0) {
+				pr_err("%s:rst_gpio=[%d] req failed:%d\n",
+				       __func__, temp_reset_gpio->gpio, ret);
+				return -EBUSY;
+			}
+			ret = gpio_direction_output(temp_reset_gpio->gpio, 0);
+			if (ret < 0) {
+				pr_err("%s: set dirn rst failed: %d\n",
+				       __func__, ret);
+				return -EBUSY;
+			}
+		}
+		if (gpio_is_valid(temp_intr_gpio->gpio)) {
+			ret = gpio_request(temp_intr_gpio->gpio,
+					   temp_intr_gpio->gpio_name);
+			if (ret < 0) {
+				pr_err("%s: intr_gpio req failed: %d\n",
+				       __func__, ret);
+				return -EBUSY;
+			}
+			ret = gpio_direction_input(temp_intr_gpio->gpio);
+			if (ret < 0) {
+				pr_err("%s: set dirn intr failed: %d\n",
+				       __func__, ret);
+				return -EBUSY;
+			}
+			mhl_ctrl->i2c_handle->irq = gpio_to_irq(
+				temp_intr_gpio->gpio);
+			pr_debug("%s: gpio_to_irq=%d\n",
+				 __func__, mhl_ctrl->i2c_handle->irq);
+		}
+	} else {
+		pr_warn("%s: freeing gpios\n", __func__);
+		gpio_free(temp_intr_gpio->gpio);
+		gpio_free(temp_reset_gpio->gpio);
+	}
+	pr_debug("%s: successful\n", __func__);
+	return 0;
+}
+
+static int mhl_i2c_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	int rc = 0;
+	struct mhl_tx_platform_data *pdata = NULL;
+	struct mhl_tx_ctrl *mhl_ctrl;
+	struct usb_ext_notification *mhl_info = NULL;
+	struct msm_hdmi_mhl_ops *hdmi_mhl_ops = NULL;
+
+	mhl_ctrl = devm_kzalloc(&client->dev, sizeof(*mhl_ctrl), GFP_KERNEL);
+	if (!mhl_ctrl) {
+		rc = -ENOMEM;
+		goto failed_no_mem;
+	}
+
+	if (client->dev.of_node) {
+		pdata = devm_kzalloc(&client->dev,
+			     sizeof(struct mhl_tx_platform_data), GFP_KERNEL);
+		if (!pdata) {
+			rc = -ENOMEM;
+			goto failed_no_mem;
+		}
+
+		rc = mhl_tx_get_dt_data(&client->dev, pdata);
+		if (rc) {
+			pr_err("%s: FAILED: parsing device tree data; rc=%d\n",
+				__func__, rc);
+			goto failed_dt_data;
+		}
+		mhl_ctrl->i2c_handle = client;
+		mhl_ctrl->pdata = pdata;
+		i2c_set_clientdata(client, mhl_ctrl);
+	}
+
+	/*
+	 * Other initializations
+	 * such tx specific
+	 */
+	mhl_ctrl->disc_enabled = false;
+	INIT_WORK(&mhl_ctrl->mhl_msc_send_work, mhl_msc_send_work);
+	mhl_ctrl->cur_state = POWER_STATE_D0_MHL;
+	INIT_LIST_HEAD(&mhl_ctrl->list_cmd);
+	init_completion(&mhl_ctrl->msc_cmd_done);
+	spin_lock_init(&mhl_ctrl->lock);
+	mhl_ctrl->msc_send_workqueue = create_singlethread_workqueue
+		("mhl_msc_cmd_queue");
+	mhl_ctrl->mhl_workq = create_singlethread_workqueue("mhl_workq");
+
+	INIT_WORK(&mhl_ctrl->mhl_intr_work, mhl_sii_disc_intr_work);
+
+	mhl_ctrl->input = input_allocate_device();
+	if (mhl_ctrl->input) {
+		int i;
+		struct input_dev *input = mhl_ctrl->input;
+
+		mhl_ctrl->rcp_key_code_tbl = vmalloc(
+			sizeof(support_rcp_key_code_tbl));
+		if (!mhl_ctrl->rcp_key_code_tbl)
+			return -ENOMEM;
+
+		mhl_ctrl->rcp_key_code_tbl_len = sizeof(
+			support_rcp_key_code_tbl);
+		memcpy(mhl_ctrl->rcp_key_code_tbl,
+		       &support_rcp_key_code_tbl[0],
+		       mhl_ctrl->rcp_key_code_tbl_len);
+
+		input->phys = "cbus/input0";
+		input->id.bustype = BUS_VIRTUAL;
+		input->id.vendor  = 0x1095;
+		input->id.product = 0x8334;
+		input->id.version = 0xA;
+
+		input->name = "mhl-rcp";
+
+		input->keycode = support_rcp_key_code_tbl;
+		input->keycodesize = sizeof(u16);
+		input->keycodemax = ARRAY_SIZE(support_rcp_key_code_tbl);
+
+		input->evbit[0] = EV_KEY;
+		for (i = 0; i < ARRAY_SIZE(support_rcp_key_code_tbl); i++) {
+			if (support_rcp_key_code_tbl[i] > 1)
+				input_set_capability(input, EV_KEY,
+					support_rcp_key_code_tbl[i]);
+		}
+
+		if (input_register_device(input) < 0) {
+			pr_warn("%s: failed to register input device\n",
+				__func__);
+			input_free_device(input);
+			mhl_ctrl->input = NULL;
+		}
+	}
+
+	mhl_ctrl->dwnstream_hpd = 0;
+	mhl_ctrl->tx_powered_off = false;
+
+
+	init_completion(&mhl_ctrl->rgnd_done);
+
+
+	mhl_ctrl->mhl_psy.name = "ext-vbus";
+	mhl_ctrl->mhl_psy.type = POWER_SUPPLY_TYPE_USB_DCP;
+	mhl_ctrl->mhl_psy.supplied_to = mhl_pm_power_supplied_to;
+	mhl_ctrl->mhl_psy.num_supplicants = ARRAY_SIZE(
+					mhl_pm_power_supplied_to);
+	mhl_ctrl->mhl_psy.properties = mhl_pm_power_props;
+	mhl_ctrl->mhl_psy.num_properties = ARRAY_SIZE(mhl_pm_power_props);
+	mhl_ctrl->mhl_psy.get_property = mhl_power_get_property;
+	mhl_ctrl->mhl_psy.set_property = mhl_power_set_property;
+
+	rc = power_supply_register(&client->dev, &mhl_ctrl->mhl_psy);
+	if (rc < 0) {
+		dev_err(&client->dev, "%s:power_supply_register ext_vbus_psy failed\n",
+			__func__);
+		goto failed_probe;
+	}
+
+	hdmi_mhl_ops = devm_kzalloc(&client->dev,
+				    sizeof(struct msm_hdmi_mhl_ops),
+				    GFP_KERNEL);
+	if (!hdmi_mhl_ops) {
+		pr_err("%s: alloc hdmi mhl ops failed\n", __func__);
+		rc = -ENOMEM;
+		goto failed_probe_pwr;
+	}
+
+	pr_debug("%s: i2c client addr is [%x]\n", __func__, client->addr);
+	if (mhl_ctrl->pdata->hdmi_pdev) {
+		rc = msm_hdmi_register_mhl(mhl_ctrl->pdata->hdmi_pdev,
+					   hdmi_mhl_ops, mhl_ctrl);
+		if (rc) {
+			pr_err("%s: register with hdmi failed\n", __func__);
+			rc = -EPROBE_DEFER;
+			goto failed_probe_pwr;
+		}
+	}
+
+	if (!hdmi_mhl_ops || !hdmi_mhl_ops->tmds_enabled ||
+	    !hdmi_mhl_ops->set_mhl_max_pclk) {
+		pr_err("%s: func ptr is NULL\n", __func__);
+		rc = -EINVAL;
+		goto failed_probe_pwr;
+	}
+	mhl_ctrl->hdmi_mhl_ops = hdmi_mhl_ops;
+
+	rc = hdmi_mhl_ops->set_mhl_max_pclk(
+		mhl_ctrl->pdata->hdmi_pdev, MAX_MHL_PCLK);
+	if (rc) {
+		pr_err("%s: can't set max mhl pclk\n", __func__);
+		goto failed_probe_pwr;
+	}
+
+	mhl_info = devm_kzalloc(&client->dev, sizeof(*mhl_info), GFP_KERNEL);
+	if (!mhl_info) {
+		rc = -ENOMEM;
+		goto failed_probe_pwr;
+	}
+
+	mhl_info->ctxt = mhl_ctrl;
+	mhl_info->notify = mhl_sii_device_discovery;
+	if (msm_register_usb_ext_notification(mhl_info)) {
+		pr_err("%s: register for usb notifcn failed\n", __func__);
+		rc = -EPROBE_DEFER;
+		goto failed_probe_pwr;
+	}
+	mhl_ctrl->mhl_info = mhl_info;
+	mhl_register_msc(mhl_ctrl);
+	return 0;
+
+failed_probe_pwr:
+	power_supply_unregister(&mhl_ctrl->mhl_psy);
+failed_probe:
+	mhl_sii_config(mhl_ctrl, false);
+	/* do not deep-free */
+	if (mhl_info)
+		devm_kfree(&client->dev, mhl_info);
+failed_dt_data:
+	if (pdata)
+		devm_kfree(&client->dev, pdata);
+failed_no_mem:
+	if (mhl_ctrl)
+		devm_kfree(&client->dev, mhl_ctrl);
+	mhl_info = NULL;
+	pdata = NULL;
+	mhl_ctrl = NULL;
+	pr_err("%s: PROBE FAILED, rc=%d\n", __func__, rc);
+	return rc;
+}
+
+
+static int mhl_i2c_remove(struct i2c_client *client)
+{
+	struct mhl_tx_ctrl *mhl_ctrl = i2c_get_clientdata(client);
+
+	if (!mhl_ctrl) {
+		pr_warn("%s: i2c get client data failed\n", __func__);
+		return -EINVAL;
+	}
+
+	mhl_sii_config(mhl_ctrl, false);
+
+	destroy_workqueue(mhl_ctrl->mhl_workq);
+
+	if (mhl_ctrl->mhl_info)
+		devm_kfree(&client->dev, mhl_ctrl->mhl_info);
+	if (mhl_ctrl->pdata)
+		devm_kfree(&client->dev, mhl_ctrl->pdata);
+	devm_kfree(&client->dev, mhl_ctrl);
+	return 0;
+}
+
+static struct i2c_device_id mhl_sii_i2c_id[] = {
+	{ MHL_DRIVER_NAME, 0 },
+	{ }
+};
+
+
+MODULE_DEVICE_TABLE(i2c, mhl_sii_i2c_id);
+
+#if defined(CONFIG_PM) || defined(CONFIG_PM_SLEEP)
+static int mhl_i2c_suspend_sub(struct i2c_client *client)
+{
+	struct mhl_tx_ctrl *mhl_ctrl = i2c_get_clientdata(client);
+
+	pr_debug("%s\n", __func__);
+
+	if (!mhl_ctrl) {
+		pr_err("%s: invalid ctrl data\n", __func__);
+		return 0;
+	}
+
+	if (mhl_ctrl->mhl_mode)	{
+		mhl_ctrl->mhl_mode = 0;
+
+		power_supply_changed(&mhl_ctrl->mhl_psy);
+		if (mhl_ctrl->notify_usb_online)
+			mhl_ctrl->notify_usb_online(mhl_ctrl->notify_ctx, 0);
+
+		mhl_sii_config(mhl_ctrl, false);
+	}
+
+	return 0;
+}
+
+static int mhl_i2c_resume_sub(struct i2c_client *client)
+{
+	pr_debug("%s\n", __func__);
+
+	return 0;
+}
+#endif /* defined(CONFIG_PM) || defined(CONFIG_PM_SLEEP) */
+
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+static int mhl_i2c_suspend(struct i2c_client *client, pm_message_t state)
+{
+	if (!client)
+		return -ENODEV;
+	pr_debug("%s: mhl suspend\n", __func__);
+	return mhl_i2c_suspend_sub(client);
+}
+
+static int mhl_i2c_resume(struct i2c_client *client)
+{
+	if (!client)
+		return -ENODEV;
+	pr_debug("%s: mhl resume\n", __func__);
+	return mhl_i2c_resume_sub(client);
+}
+#else
+#define mhl_i2c_suspend NULL
+#define mhl_i2c_resume NULL
+#endif /* defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP) */
+
+#ifdef CONFIG_PM_SLEEP
+static int mhl_i2c_pm_suspend(struct device *dev)
+{
+	struct i2c_client *client =
+		container_of(dev, struct i2c_client, dev);
+
+	if (!client)
+		return -ENODEV;
+	pr_debug("%s: mhl pm suspend\n", __func__);
+	return mhl_i2c_suspend_sub(client);
+
+}
+
+static int mhl_i2c_pm_resume(struct device *dev)
+{
+	struct i2c_client *client =
+		container_of(dev, struct i2c_client, dev);
+
+	if (!client)
+		return -ENODEV;
+	pr_debug("%s: mhl pm resume\n", __func__);
+	return mhl_i2c_resume_sub(client);
+}
+
+static const struct dev_pm_ops mhl_i2c_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(mhl_i2c_pm_suspend, mhl_i2c_pm_resume)
+};
+#endif /* CONFIG_PM_SLEEP */
+
+const struct of_device_id mhl_match_table[] = {
+	{.compatible = COMPATIBLE_NAME,},
+	{ },
+};
+
+static struct i2c_driver mhl_sii_i2c_driver = {
+	.driver = {
+		.name = MHL_DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = mhl_match_table,
+#ifdef CONFIG_PM_SLEEP
+		.pm = &mhl_i2c_pm_ops,
+#endif /* CONFIG_PM_SLEEP */
+	},
+	.probe = mhl_i2c_probe,
+	.remove =  mhl_i2c_remove,
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+	.suspend = mhl_i2c_suspend,
+	.resume = mhl_i2c_resume,
+#endif /* defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP) */
+	.id_table = mhl_sii_i2c_id,
+};
+
+module_i2c_driver(mhl_sii_i2c_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHL SII 8334 TX Driver");
diff --git a/drivers/video/fbdev/msm/msm_dba/Kconfig b/drivers/video/fbdev/msm/msm_dba/Kconfig
new file mode 100644
index 0000000..69894cd
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_dba/Kconfig
@@ -0,0 +1,24 @@
+#
+# MSM DBA
+#
+
+config MSM_DBA
+	bool "MSM Display Bridge Abstraction support"
+	depends on ARM || ARM64
+	---help---
+	  Support for MSM display bridge abstraction interface. MSM display
+	  drivers can use the same interface to interact with different third
+	  party bridge chips. Drivers implemented for third party bridge chips
+	  should support this interface to allow display driver to control the
+	  bridge chip. The MSM DBA driver maintains a list of devices supported
+	  on the platform and allow clients to register and access these
+	  devices.
+
+config MSM_DBA_ADV7533
+	bool "ADV7533 driver support through MSM DBA interface"
+	depends on MSM_DBA
+	default n
+	---help---
+	  Support for ADV7533 DSI to HDMI display bridge driver. The driver
+	  controls the ADV7533 HW through the I2C interface and configures
+	  the DSI input and HDMI output video format.
diff --git a/drivers/video/fbdev/msm/msm_dba/Makefile b/drivers/video/fbdev/msm/msm_dba/Makefile
new file mode 100644
index 0000000..cf28ad4
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_dba/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MSM_DBA) += msm_dba.o msm_dba_init.o msm_dba_helpers.o msm_dba_debug.o
+obj-$(CONFIG_MSM_DBA_ADV7533) += adv7533.o
+clean:
+	rm *.o
diff --git a/drivers/video/fbdev/msm/msm_dba/adv7533.c b/drivers/video/fbdev/msm/msm_dba/adv7533.c
new file mode 100644
index 0000000..b628e1a
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_dba/adv7533.c
@@ -0,0 +1,2143 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include "msm_dba_internal.h"
+#include <linux/mdss_io_util.h>
+
+#define ADV7533_REG_CHIP_REVISION (0x00)
+#define ADV7533_DSI_CEC_I2C_ADDR_REG (0xE1)
+#define ADV7533_RESET_DELAY (10)
+
+#define PINCTRL_STATE_ACTIVE    "pmx_adv7533_active"
+#define PINCTRL_STATE_SUSPEND   "pmx_adv7533_suspend"
+
+#define MDSS_MAX_PANEL_LEN      256
+#define EDID_SEG_SIZE 0x100
+/* size of audio and speaker info Block */
+#define AUDIO_DATA_SIZE 32
+
+/* 0x94 interrupts */
+#define HPD_INT_ENABLE           BIT(7)
+#define MONITOR_SENSE_INT_ENABLE BIT(6)
+#define ACTIVE_VSYNC_EDGE        BIT(5)
+#define AUDIO_FIFO_FULL          BIT(4)
+#define EDID_READY_INT_ENABLE    BIT(2)
+#define HDCP_AUTHENTICATED       BIT(1)
+#define HDCP_RI_READY            BIT(0)
+
+#define MAX_WAIT_TIME (100)
+#define MAX_RW_TRIES (3)
+
+/* 0x95 interrupts */
+#define HDCP_ERROR               BIT(7)
+#define HDCP_BKSV_FLAG           BIT(6)
+#define CEC_TX_READY             BIT(5)
+#define CEC_TX_ARB_LOST          BIT(4)
+#define CEC_TX_RETRY_TIMEOUT     BIT(3)
+#define CEC_TX_RX_BUF3_READY     BIT(2)
+#define CEC_TX_RX_BUF2_READY     BIT(1)
+#define CEC_TX_RX_BUF1_READY     BIT(0)
+
+#define HPD_INTERRUPTS           (HPD_INT_ENABLE | \
+					MONITOR_SENSE_INT_ENABLE)
+#define EDID_INTERRUPTS          EDID_READY_INT_ENABLE
+#define HDCP_INTERRUPTS1         HDCP_AUTHENTICATED
+#define HDCP_INTERRUPTS2         (HDCP_BKSV_FLAG | \
+					HDCP_ERROR)
+#define CEC_INTERRUPTS           (CEC_TX_READY | \
+					CEC_TX_ARB_LOST | \
+					CEC_TX_RETRY_TIMEOUT | \
+					CEC_TX_RX_BUF3_READY | \
+					CEC_TX_RX_BUF2_READY | \
+					CEC_TX_RX_BUF1_READY)
+
+#define CFG_HPD_INTERRUPTS       BIT(0)
+#define CFG_EDID_INTERRUPTS      BIT(1)
+#define CFG_HDCP_INTERRUPTS      BIT(2)
+#define CFG_CEC_INTERRUPTS       BIT(3)
+
+#define MAX_OPERAND_SIZE	14
+#define CEC_MSG_SIZE            (MAX_OPERAND_SIZE + 2)
+
+enum adv7533_i2c_addr {
+	I2C_ADDR_MAIN = 0x3D,
+	I2C_ADDR_CEC_DSI = 0x3C,
+};
+
+enum adv7533_cec_buf {
+	ADV7533_CEC_BUF1,
+	ADV7533_CEC_BUF2,
+	ADV7533_CEC_BUF3,
+	ADV7533_CEC_BUF_MAX,
+};
+
+struct adv7533_reg_cfg {
+	u8 i2c_addr;
+	u8 reg;
+	u8 val;
+	int sleep_in_ms;
+};
+
+struct adv7533_cec_msg {
+	u8 buf[CEC_MSG_SIZE];
+	u8 timestamp;
+	bool pending;
+};
+
+struct adv7533 {
+	u8 main_i2c_addr;
+	u8 cec_dsi_i2c_addr;
+	u8 video_mode;
+	int irq;
+	u32 irq_gpio;
+	u32 irq_flags;
+	u32 hpd_irq_gpio;
+	u32 hpd_irq_flags;
+	u32 switch_gpio;
+	u32 switch_flags;
+	struct pinctrl *ts_pinctrl;
+	struct pinctrl_state *pinctrl_state_active;
+	struct pinctrl_state *pinctrl_state_suspend;
+	bool audio;
+	bool disable_gpios;
+	struct dss_module_power power_data;
+	bool hdcp_enabled;
+	bool cec_enabled;
+	bool is_power_on;
+	void *edid_data;
+	u8 edid_buf[EDID_SEG_SIZE];
+	u8 audio_spkr_data[AUDIO_DATA_SIZE];
+	struct workqueue_struct *workq;
+	struct delayed_work adv7533_intr_work_id;
+	struct msm_dba_device_info dev_info;
+	struct adv7533_cec_msg cec_msg[ADV7533_CEC_BUF_MAX];
+	struct i2c_client *i2c_client;
+	struct mutex ops_mutex;
+};
+
+static char mdss_mdp_panel[MDSS_MAX_PANEL_LEN];
+
+static struct adv7533_reg_cfg adv7533_init_setup[] = {
+	/* power down */
+	{I2C_ADDR_MAIN, 0x41, 0x50, 5},
+	/* HPD override */
+	{I2C_ADDR_MAIN, 0xD6, 0x48, 5},
+	/* color space */
+	{I2C_ADDR_MAIN, 0x16, 0x20, 0},
+	/* Fixed */
+	{I2C_ADDR_MAIN, 0x9A, 0xE0, 0},
+	/* HDCP */
+	{I2C_ADDR_MAIN, 0xBA, 0x70, 0},
+	/* Fixed */
+	{I2C_ADDR_MAIN, 0xDE, 0x82, 0},
+	/* V1P2 */
+	{I2C_ADDR_MAIN, 0xE4, 0x40, 0},
+	/* Fixed */
+	{I2C_ADDR_MAIN, 0xE5, 0x80, 0},
+	/* Fixed */
+	{I2C_ADDR_CEC_DSI, 0x15, 0xD0, 0},
+	/* Fixed */
+	{I2C_ADDR_CEC_DSI, 0x17, 0xD0, 0},
+	/* Fixed */
+	{I2C_ADDR_CEC_DSI, 0x24, 0x20, 0},
+	/* Fixed */
+	{I2C_ADDR_CEC_DSI, 0x57, 0x11, 0},
+};
+
+static struct adv7533_reg_cfg adv7533_video_en[] = {
+	 /* Timing Generator Enable */
+	{I2C_ADDR_CEC_DSI, 0x27, 0xCB, 0},
+	{I2C_ADDR_CEC_DSI, 0x27, 0x8B, 0},
+	{I2C_ADDR_CEC_DSI, 0x27, 0xCB, 0},
+	/* power up */
+	{I2C_ADDR_MAIN, 0x41, 0x10, 0},
+	/* hdmi enable */
+	{I2C_ADDR_CEC_DSI, 0x03, 0x89, 0},
+	/* color depth */
+	{I2C_ADDR_MAIN, 0x4C, 0x04, 0},
+	/* down dither */
+	{I2C_ADDR_MAIN, 0x49, 0x02, 0},
+	/* Audio and CEC clock gate */
+	{I2C_ADDR_CEC_DSI, 0x05, 0xC8, 0},
+	/* GC packet enable */
+	{I2C_ADDR_MAIN, 0x40, 0x80, 0},
+};
+
+static struct adv7533_reg_cfg adv7533_cec_en[] = {
+	/* Fixed, clock gate disable */
+	{I2C_ADDR_CEC_DSI, 0x05, 0xC8, 0},
+	/* read divider(7:2) from calc */
+	{I2C_ADDR_CEC_DSI, 0xBE, 0x01, 0},
+};
+
+static struct adv7533_reg_cfg adv7533_cec_tg_init[] = {
+	/* TG programming for 19.2MHz, divider 25 */
+	{I2C_ADDR_CEC_DSI, 0xBE, 0x61, 0},
+	{I2C_ADDR_CEC_DSI, 0xC1, 0x0D, 0},
+	{I2C_ADDR_CEC_DSI, 0xC2, 0x80, 0},
+	{I2C_ADDR_CEC_DSI, 0xC3, 0x0C, 0},
+	{I2C_ADDR_CEC_DSI, 0xC4, 0x9A, 0},
+	{I2C_ADDR_CEC_DSI, 0xC5, 0x0E, 0},
+	{I2C_ADDR_CEC_DSI, 0xC6, 0x66, 0},
+	{I2C_ADDR_CEC_DSI, 0xC7, 0x0B, 0},
+	{I2C_ADDR_CEC_DSI, 0xC8, 0x1A, 0},
+	{I2C_ADDR_CEC_DSI, 0xC9, 0x0A, 0},
+	{I2C_ADDR_CEC_DSI, 0xCA, 0x33, 0},
+	{I2C_ADDR_CEC_DSI, 0xCB, 0x0C, 0},
+	{I2C_ADDR_CEC_DSI, 0xCC, 0x00, 0},
+	{I2C_ADDR_CEC_DSI, 0xCD, 0x07, 0},
+	{I2C_ADDR_CEC_DSI, 0xCE, 0x33, 0},
+	{I2C_ADDR_CEC_DSI, 0xCF, 0x05, 0},
+	{I2C_ADDR_CEC_DSI, 0xD0, 0xDA, 0},
+	{I2C_ADDR_CEC_DSI, 0xD1, 0x08, 0},
+	{I2C_ADDR_CEC_DSI, 0xD2, 0x8D, 0},
+	{I2C_ADDR_CEC_DSI, 0xD3, 0x01, 0},
+	{I2C_ADDR_CEC_DSI, 0xD4, 0xCD, 0},
+	{I2C_ADDR_CEC_DSI, 0xD5, 0x04, 0},
+	{I2C_ADDR_CEC_DSI, 0xD6, 0x80, 0},
+	{I2C_ADDR_CEC_DSI, 0xD7, 0x05, 0},
+	{I2C_ADDR_CEC_DSI, 0xD8, 0x66, 0},
+	{I2C_ADDR_CEC_DSI, 0xD9, 0x03, 0},
+	{I2C_ADDR_CEC_DSI, 0xDA, 0x26, 0},
+	{I2C_ADDR_CEC_DSI, 0xDB, 0x0A, 0},
+	{I2C_ADDR_CEC_DSI, 0xDC, 0xCD, 0},
+	{I2C_ADDR_CEC_DSI, 0xDE, 0x00, 0},
+	{I2C_ADDR_CEC_DSI, 0xDF, 0xC0, 0},
+	{I2C_ADDR_CEC_DSI, 0xE1, 0x00, 0},
+	{I2C_ADDR_CEC_DSI, 0xE2, 0xE6, 0},
+	{I2C_ADDR_CEC_DSI, 0xE3, 0x02, 0},
+	{I2C_ADDR_CEC_DSI, 0xE4, 0xB3, 0},
+	{I2C_ADDR_CEC_DSI, 0xE5, 0x03, 0},
+	{I2C_ADDR_CEC_DSI, 0xE6, 0x9A, 0},
+};
+
+static struct adv7533_reg_cfg adv7533_cec_power[] = {
+	/* cec power up */
+	{I2C_ADDR_MAIN, 0xE2, 0x00, 0},
+	/* hpd override */
+	{I2C_ADDR_MAIN, 0xD6, 0x48, 0},
+	/* edid reread */
+	{I2C_ADDR_MAIN, 0xC9, 0x13, 0},
+	/* read all CEC Rx Buffers */
+	{I2C_ADDR_CEC_DSI, 0xBA, 0x08, 0},
+	/* logical address0 0x04 */
+	{I2C_ADDR_CEC_DSI, 0xBC, 0x04, 0},
+	/* select logical address0 */
+	{I2C_ADDR_CEC_DSI, 0xBB, 0x10, 0},
+};
+
+static struct adv7533_reg_cfg I2S_cfg[] = {
+	{I2C_ADDR_MAIN, 0x0D, 0x18, 0},	/* Bit width = 16Bits*/
+	{I2C_ADDR_MAIN, 0x15, 0x20, 0},	/* Sampling Frequency = 48kHz*/
+	{I2C_ADDR_MAIN, 0x02, 0x18, 0},	/* N value 6144 --> 0x1800*/
+	{I2C_ADDR_MAIN, 0x14, 0x02, 0},	/* Word Length = 16Bits*/
+	{I2C_ADDR_MAIN, 0x73, 0x01, 0},	/* Channel Count = 2 channels */
+};
+
+static int adv7533_write(struct adv7533 *pdata, u8 offset, u8 reg, u8 val)
+{
+	u8 addr = 0;
+	int ret = 0;
+
+	if (!pdata) {
+		pr_debug("%s: Invalid argument\n", __func__);
+		return -EINVAL;
+	}
+
+	if (offset == I2C_ADDR_MAIN)
+		addr = pdata->main_i2c_addr;
+	else if (offset == I2C_ADDR_CEC_DSI)
+		addr = pdata->cec_dsi_i2c_addr;
+	else
+		addr = offset;
+
+	ret = msm_dba_helper_i2c_write_byte(pdata->i2c_client, addr, reg, val);
+	if (ret)
+		pr_err_ratelimited("%s: wr err: addr 0x%x, reg 0x%x, val 0x%x\n",
+				__func__, addr, reg, val);
+	return ret;
+}
+
+static int adv7533_read(struct adv7533 *pdata, u8 offset,
+		u8 reg, char *buf, u32 size)
+{
+	u8 addr = 0;
+	int ret = 0;
+
+	if (!pdata) {
+		pr_debug("%s: Invalid argument\n", __func__);
+		return -EINVAL;
+	}
+
+	if (offset == I2C_ADDR_MAIN)
+		addr = pdata->main_i2c_addr;
+	else if (offset == I2C_ADDR_CEC_DSI)
+		addr = pdata->cec_dsi_i2c_addr;
+	else
+		addr = offset;
+
+	ret = msm_dba_helper_i2c_read(pdata->i2c_client, addr, reg, buf, size);
+	if (ret)
+		pr_err_ratelimited("%s: read err: addr 0x%x, reg 0x%x, size 0x%x\n",
+				__func__, addr, reg, size);
+	return ret;
+}
+
+static int adv7533_dump_debug_info(struct msm_dba_device_info *dev, u32 flags)
+{
+	int rc = 0;
+	u8 byte_val = 0;
+	u16 addr = 0;
+	struct adv7533 *pdata = NULL;
+
+	if (!dev) {
+		pr_err("%s: dev is NULL\n", __func__);
+		return -EINVAL;
+	}
+	pdata = container_of(dev, struct adv7533, dev_info);
+	if (!pdata) {
+		pr_err("%s: pdata is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	/* dump main addr*/
+	pr_err("========Main I2C=0x%02x Start==========\n",
+		pdata->main_i2c_addr);
+	for (addr = 0; addr <= 0xFF; addr++) {
+		rc = adv7533_read(pdata, I2C_ADDR_MAIN,
+			(u8)addr, &byte_val, 1);
+		if (rc)
+			pr_err("%s: read reg=0x%02x failed @ addr=0x%02x\n",
+				__func__, addr, pdata->main_i2c_addr);
+		else
+			pr_err("0x%02x -> 0x%02X\n", addr, byte_val);
+	}
+	pr_err("========Main I2C=0x%02x End==========\n",
+		pdata->main_i2c_addr);
+	/* dump CEC addr*/
+	pr_err("=======CEC I2C=0x%02x Start=========\n",
+		pdata->cec_dsi_i2c_addr);
+	for (addr = 0; addr <= 0xFF; addr++) {
+		rc = adv7533_read(pdata, I2C_ADDR_CEC_DSI,
+			(u8)addr, &byte_val, 1);
+		if (rc)
+			pr_err("%s: read reg=0x%02x failed @ addr=0x%02x\n",
+				__func__, addr, pdata->cec_dsi_i2c_addr);
+		else
+			pr_err("0x%02x -> 0x%02X\n", addr, byte_val);
+	}
+	pr_err("========CEC I2C=0x%02x End==========\n",
+		pdata->cec_dsi_i2c_addr);
+
+	return rc;
+}
+
+static int adv7533_write_array(struct adv7533 *pdata,
+	struct adv7533_reg_cfg *cfg, int size)
+{
+	int ret = 0;
+	int i;
+
+	size = size / sizeof(struct adv7533_reg_cfg);
+	for (i = 0; i < size; i++) {
+		switch (cfg[i].i2c_addr) {
+		case I2C_ADDR_MAIN:
+			ret = adv7533_write(pdata, I2C_ADDR_MAIN,
+				cfg[i].reg, cfg[i].val);
+			if (ret != 0)
+				pr_err("%s: adv7533_write_byte returned %d\n",
+					__func__, ret);
+			break;
+		case I2C_ADDR_CEC_DSI:
+			ret = adv7533_write(pdata, I2C_ADDR_CEC_DSI,
+				cfg[i].reg, cfg[i].val);
+			if (ret != 0)
+				pr_err("%s: adv7533_write_byte returned %d\n",
+					__func__, ret);
+			break;
+		default:
+			ret = -EINVAL;
+			pr_err("%s: Default case? BUG!\n", __func__);
+			break;
+		}
+		if (ret != 0) {
+			pr_err("%s: adv7533 reg writes failed. ", __func__);
+			pr_err("Last write %02X to %02X\n",
+				cfg[i].val, cfg[i].reg);
+			goto w_regs_fail;
+		}
+		if (cfg[i].sleep_in_ms)
+			msleep(cfg[i].sleep_in_ms);
+	}
+
+w_regs_fail:
+	if (ret != 0)
+		pr_err("%s: Exiting with ret = %d after %d writes\n",
+			__func__, ret, i);
+	return ret;
+}
+
+static int adv7533_read_device_rev(struct adv7533 *pdata)
+{
+	u8 rev = 0;
+	int ret;
+
+	ret = adv7533_read(pdata, I2C_ADDR_MAIN, ADV7533_REG_CHIP_REVISION,
+							&rev, 1);
+
+	return ret;
+}
+
+static int adv7533_program_i2c_addr(struct adv7533 *pdata)
+{
+	u8 i2c_8bits = pdata->cec_dsi_i2c_addr << 1;
+	int ret = 0;
+
+	if (pdata->cec_dsi_i2c_addr != I2C_ADDR_CEC_DSI) {
+		ret = adv7533_write(pdata, I2C_ADDR_MAIN,
+					ADV7533_DSI_CEC_I2C_ADDR_REG,
+					i2c_8bits);
+
+		if (ret)
+			pr_err("%s: write err CEC_ADDR[0x%02x] main_addr=0x%02x\n",
+				__func__, ADV7533_DSI_CEC_I2C_ADDR_REG,
+				pdata->main_i2c_addr);
+	}
+
+	return ret;
+}
+
+static void adv7533_parse_vreg_dt(struct device *dev,
+				struct dss_module_power *mp)
+{
+	int i, rc = 0;
+	int dt_vreg_total = 0;
+	struct device_node *of_node = NULL;
+	u32 *val_array = NULL;
+
+	of_node = dev->of_node;
+
+	dt_vreg_total = of_property_count_strings(of_node, "qcom,supply-names");
+	if (dt_vreg_total <= 0) {
+		pr_warn("%s: vreg not found. rc=%d\n", __func__,
+					dt_vreg_total);
+		goto end;
+	}
+	mp->num_vreg = dt_vreg_total;
+	mp->vreg_config = devm_kzalloc(dev, sizeof(struct dss_vreg) *
+			dt_vreg_total, GFP_KERNEL);
+	if (!mp->vreg_config)
+		goto end;
+
+	val_array = devm_kzalloc(dev, sizeof(u32) * dt_vreg_total, GFP_KERNEL);
+	if (!val_array)
+		goto end;
+
+	for (i = 0; i < dt_vreg_total; i++) {
+		const char *st = NULL;
+		/* vreg-name */
+		rc = of_property_read_string_index(of_node,
+				"qcom,supply-names", i, &st);
+		if (rc) {
+			pr_warn("%s: error reading name. i=%d, rc=%d\n",
+				__func__, i, rc);
+			goto end;
+		}
+		snprintf(mp->vreg_config[i].vreg_name, 32, "%s", st);
+
+		/* vreg-min-voltage */
+		memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+		rc = of_property_read_u32_array(of_node,
+			"qcom,min-voltage-level", val_array,
+					dt_vreg_total);
+		if (rc) {
+			pr_warn("%s: error read min volt. rc=%d\n",
+						__func__, rc);
+			goto end;
+		}
+		mp->vreg_config[i].min_voltage = val_array[i];
+
+		/* vreg-max-voltage */
+		memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+		rc = of_property_read_u32_array(of_node,
+				"qcom,max-voltage-level", val_array,
+						dt_vreg_total);
+		if (rc) {
+			pr_warn("%s: error read max volt. rc=%d\n",
+					__func__, rc);
+			goto end;
+		}
+		mp->vreg_config[i].max_voltage = val_array[i];
+
+		/* vreg-op-mode */
+		memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+		rc = of_property_read_u32_array(of_node,
+				"qcom,enable-load", val_array,
+					dt_vreg_total);
+		if (rc) {
+			pr_warn("%s: error read enable load. rc=%d\n",
+				__func__, rc);
+			goto end;
+		}
+		mp->vreg_config[i].enable_load = val_array[i];
+
+		memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+		rc = of_property_read_u32_array(of_node,
+			"qcom,disable-load", val_array,
+			dt_vreg_total);
+		if (rc) {
+			pr_warn("%s: error read disable load. rc=%d\n",
+				__func__, rc);
+			goto end;
+		}
+		mp->vreg_config[i].disable_load = val_array[i];
+
+		/* post-on-sleep */
+		memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+		rc = of_property_read_u32_array(of_node,
+				"qcom,post-on-sleep", val_array,
+						dt_vreg_total);
+		if (rc)
+			pr_warn("%s: error read post on sleep. rc=%d\n",
+					__func__, rc);
+		else
+			mp->vreg_config[i].post_on_sleep = val_array[i];
+
+		pr_debug("%s: %s min=%d, max=%d, enable=%d disable=%d post-on-sleep=%d\n",
+			__func__,
+			mp->vreg_config[i].vreg_name,
+			mp->vreg_config[i].min_voltage,
+			mp->vreg_config[i].max_voltage,
+			mp->vreg_config[i].enable_load,
+			mp->vreg_config[i].disable_load,
+			mp->vreg_config[i].post_on_sleep);
+	}
+
+	devm_kfree(dev, val_array);
+	return;
+
+end:
+	if (mp->vreg_config) {
+		devm_kfree(dev, mp->vreg_config);
+		mp->vreg_config = NULL;
+	}
+	mp->num_vreg = 0;
+
+	if (val_array)
+		devm_kfree(dev, val_array);
+}
+
+static int adv7533_parse_dt(struct device *dev,
+	struct adv7533 *pdata)
+{
+	struct device_node *np = dev->of_node;
+	u32 temp_val = 0;
+	int ret = 0;
+
+	ret = of_property_read_u32(np, "instance_id", &temp_val);
+	pr_debug("%s: DT property %s is %X\n", __func__, "instance_id",
+		temp_val);
+	if (ret)
+		goto end;
+	pdata->dev_info.instance_id = temp_val;
+
+	ret = of_property_read_u32(np, "adi,main-addr", &temp_val);
+	pr_debug("%s: DT property %s is %X\n", __func__, "adi,main-addr",
+		temp_val);
+	if (ret)
+		goto end;
+	pdata->main_i2c_addr = (u8)temp_val;
+
+	ret = of_property_read_u32(np, "adi,cec-dsi-addr", &temp_val);
+	pr_debug("%s: DT property %s is %X\n", __func__, "adi,cec-dsi-addr",
+		temp_val);
+	if (ret)
+		goto end;
+	pdata->cec_dsi_i2c_addr = (u8)temp_val;
+
+	ret = of_property_read_u32(np, "adi,video-mode", &temp_val);
+	pr_debug("%s: DT property %s is %X\n", __func__, "adi,video-mode",
+		temp_val);
+	if (ret)
+		goto end;
+	pdata->video_mode = (u8)temp_val;
+
+	pdata->audio = of_property_read_bool(np, "adi,enable-audio");
+
+	adv7533_parse_vreg_dt(dev, &pdata->power_data);
+
+	/* Get pinctrl if target uses pinctrl */
+	pdata->ts_pinctrl = devm_pinctrl_get(dev);
+	if (IS_ERR_OR_NULL(pdata->ts_pinctrl)) {
+		ret = PTR_ERR(pdata->ts_pinctrl);
+		pr_err("%s: Pincontrol DT property returned %X\n",
+			__func__, ret);
+	}
+
+	pdata->pinctrl_state_active = pinctrl_lookup_state(pdata->ts_pinctrl,
+		"pmx_adv7533_active");
+	if (IS_ERR_OR_NULL(pdata->pinctrl_state_active)) {
+		ret = PTR_ERR(pdata->pinctrl_state_active);
+		pr_err("Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_ACTIVE, ret);
+	}
+
+	pdata->pinctrl_state_suspend = pinctrl_lookup_state(pdata->ts_pinctrl,
+		"pmx_adv7533_suspend");
+	if (IS_ERR_OR_NULL(pdata->pinctrl_state_suspend)) {
+		ret = PTR_ERR(pdata->pinctrl_state_suspend);
+		pr_err("Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_SUSPEND, ret);
+	}
+
+	pdata->disable_gpios = of_property_read_bool(np,
+			"adi,disable-gpios");
+
+	if (!(pdata->disable_gpios)) {
+		pdata->irq_gpio = of_get_named_gpio_flags(np,
+				"adi,irq-gpio", 0, &pdata->irq_flags);
+
+		pdata->hpd_irq_gpio = of_get_named_gpio_flags(np,
+				"adi,hpd-irq-gpio", 0,
+				&pdata->hpd_irq_flags);
+
+		pdata->switch_gpio = of_get_named_gpio_flags(np,
+				"adi,switch-gpio", 0, &pdata->switch_flags);
+	}
+
+end:
+	return ret;
+}
+
+static int adv7533_gpio_configure(struct adv7533 *pdata, bool on)
+{
+	int ret = 0;
+
+	if (pdata->disable_gpios)
+		return 0;
+
+	if (on) {
+		if (gpio_is_valid(pdata->irq_gpio)) {
+			ret = gpio_request(pdata->irq_gpio, "adv7533_irq_gpio");
+			if (ret) {
+				pr_err("%d unable to request gpio [%d] ret=%d\n",
+					__LINE__, pdata->irq_gpio, ret);
+				goto err_none;
+			}
+			ret = gpio_direction_input(pdata->irq_gpio);
+			if (ret) {
+				pr_err("unable to set dir for gpio[%d]\n",
+					pdata->irq_gpio);
+				goto err_irq_gpio;
+			}
+		} else {
+			pr_err("irq gpio not provided\n");
+			goto err_none;
+		}
+
+		if (gpio_is_valid(pdata->hpd_irq_gpio)) {
+			ret = gpio_request(pdata->hpd_irq_gpio,
+				"adv7533_hpd_irq_gpio");
+			if (ret) {
+				pr_err("unable to request gpio [%d]\n",
+					pdata->hpd_irq_gpio);
+				goto err_irq_gpio;
+			}
+			ret = gpio_direction_input(pdata->hpd_irq_gpio);
+			if (ret) {
+				pr_err("unable to set dir for gpio[%d]\n",
+					pdata->hpd_irq_gpio);
+				goto err_hpd_irq_gpio;
+			}
+		} else {
+			pr_warn("hpd irq gpio not provided\n");
+		}
+
+		if (gpio_is_valid(pdata->switch_gpio)) {
+			ret = gpio_request(pdata->switch_gpio,
+				"adv7533_switch_gpio");
+			if (ret) {
+				pr_err("%d unable to request gpio [%d] ret=%d\n",
+					__LINE__, pdata->irq_gpio, ret);
+				goto err_hpd_irq_gpio;
+			}
+
+			ret = gpio_direction_output(pdata->switch_gpio, 0);
+			if (ret) {
+				pr_err("unable to set dir for gpio [%d]\n",
+					pdata->switch_gpio);
+				goto err_switch_gpio;
+			}
+
+			gpio_set_value(pdata->switch_gpio,
+				!pdata->switch_flags);
+			msleep(ADV7533_RESET_DELAY);
+		}
+
+		return 0;
+	}
+	if (gpio_is_valid(pdata->irq_gpio))
+		gpio_free(pdata->irq_gpio);
+	if (gpio_is_valid(pdata->hpd_irq_gpio))
+		gpio_free(pdata->hpd_irq_gpio);
+	if (gpio_is_valid(pdata->switch_gpio))
+		gpio_free(pdata->switch_gpio);
+
+	return 0;
+	}
+
+err_switch_gpio:
+	if (gpio_is_valid(pdata->switch_gpio))
+		gpio_free(pdata->switch_gpio);
+err_hpd_irq_gpio:
+	if (gpio_is_valid(pdata->hpd_irq_gpio))
+		gpio_free(pdata->hpd_irq_gpio);
+err_irq_gpio:
+	if (gpio_is_valid(pdata->irq_gpio))
+		gpio_free(pdata->irq_gpio);
+err_none:
+	return ret;
+}
+
+static void adv7533_notify_clients(struct msm_dba_device_info *dev,
+		enum msm_dba_callback_event event)
+{
+	struct msm_dba_client_info *c;
+	struct list_head *pos = NULL;
+
+	if (!dev) {
+		pr_err("%s: invalid input\n", __func__);
+		return;
+	}
+
+	list_for_each(pos, &dev->client_list) {
+		c = list_entry(pos, struct msm_dba_client_info, list);
+
+		pr_debug("%s: notifying event %d to client %s\n", __func__,
+			event, c->client_name);
+
+		if (c && c->cb)
+			c->cb(c->cb_data, event);
+	}
+}
+
+u32 adv7533_read_edid(struct adv7533 *pdata, u32 size, char *edid_buf)
+{
+	u32 ret = 0, read_size = size / 2;
+	u8 edid_addr = 0;
+	int ndx;
+
+	if (!pdata || !edid_buf)
+		return 0;
+
+	pr_debug("%s: size %d\n", __func__, size);
+
+	adv7533_read(pdata, I2C_ADDR_MAIN, 0x43, &edid_addr, 1);
+
+	pr_debug("%s: edid address 0x%x\n", __func__, edid_addr);
+
+	adv7533_read(pdata, edid_addr >> 1, 0x00, edid_buf, read_size);
+
+	adv7533_read(pdata, edid_addr >> 1, read_size,
+		edid_buf + read_size, read_size);
+
+	for (ndx = 0; ndx < size; ndx += 4)
+		pr_debug("%s: EDID[%02x-%02x] %02x %02x %02x %02x\n",
+			__func__, ndx, ndx + 3,
+			edid_buf[ndx + 0], edid_buf[ndx + 1],
+			edid_buf[ndx + 2], edid_buf[ndx + 3]);
+
+	return ret;
+}
+
+static int adv7533_cec_prepare_msg(struct adv7533 *pdata, u8 *msg, u32 size)
+{
+	int i, ret = -EINVAL;
+	int op_sz;
+
+	if (!pdata || !msg) {
+		pr_err("%s: invalid input\n", __func__);
+		goto end;
+	}
+
+	if (size <= 0 || size > CEC_MSG_SIZE) {
+		pr_err("%s: ERROR: invalid msg size\n", __func__);
+		goto end;
+	}
+
+	/* operand size = total size - header size - opcode size */
+	op_sz = size - 2;
+
+	/* write header */
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x70, msg[0]);
+
+	/* write opcode */
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x71, msg[1]);
+
+	/* write operands */
+	for (i = 0; i < op_sz && i < MAX_OPERAND_SIZE; i++) {
+		pr_debug("%s: writing operands\n", __func__);
+		adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x72 + i, msg[i + 2]);
+	}
+
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x80, size);
+
+end:
+	return ret;
+}
+
+static int adv7533_rd_cec_msg(struct adv7533 *pdata, u8 *cec_buf, int msg_num)
+{
+	int ret = -EINVAL;
+	u8 reg = 0;
+
+	if (!pdata || !cec_buf) {
+		pr_err("%s: Invalid input\n", __func__);
+		goto end;
+	}
+
+	if (msg_num == ADV7533_CEC_BUF1)
+		reg = 0x85;
+	else if (msg_num == ADV7533_CEC_BUF2)
+		reg = 0x97;
+	else if (msg_num == ADV7533_CEC_BUF3)
+		reg = 0xA8;
+	else
+		pr_err("%s: Invalid msg_num %d\n", __func__, msg_num);
+
+	if (!reg)
+		goto end;
+
+	adv7533_read(pdata, I2C_ADDR_CEC_DSI, reg, cec_buf, CEC_MSG_SIZE);
+end:
+	return ret;
+}
+
+static void adv7533_handle_hdcp_intr(struct adv7533 *pdata, u8 hdcp_status)
+{
+	u8 ddc_status = 0;
+
+	if (!pdata) {
+		pr_err("%s: Invalid input\n", __func__);
+		goto end;
+	}
+
+	/* HDCP ready for read */
+	if (hdcp_status & BIT(6))
+		pr_debug("%s: BKSV FLAG\n", __func__);
+
+	/* check for HDCP error */
+	if (hdcp_status & BIT(7)) {
+		pr_err("%s: HDCP ERROR\n", __func__);
+
+		/* get error details */
+		adv7533_read(pdata, I2C_ADDR_MAIN, 0xC8, &ddc_status, 1);
+
+		switch (ddc_status & 0xF0 >> 4) {
+		case 0:
+			pr_debug("%s: DDC: NO ERROR\n", __func__);
+			break;
+		case 1:
+			pr_err("%s: DDC: BAD RX BKSV\n", __func__);
+			break;
+		case 2:
+			pr_err("%s: DDC: Ri MISMATCH\n", __func__);
+			break;
+		case 3:
+			pr_err("%s: DDC: Pj MISMATCH\n", __func__);
+			break;
+		case 4:
+			pr_err("%s: DDC: I2C ERROR\n", __func__);
+			break;
+		case 5:
+			pr_err("%s: DDC: TIMED OUT DS DONE\n", __func__);
+			break;
+		case 6:
+			pr_err("%s: DDC: MAX CAS EXC\n", __func__);
+			break;
+		default:
+			pr_debug("%s: DDC: UNKNOWN ERROR\n", __func__);
+		}
+	}
+end:
+	return;
+}
+
+static void adv7533_handle_cec_intr(struct adv7533 *pdata, u8 cec_status)
+{
+	u8 cec_int_clear = 0x08;
+	bool cec_rx_intr = false;
+	u8 cec_rx_ready = 0;
+	u8 cec_rx_timestamp = 0;
+
+	if (!pdata) {
+		pr_err("%s: Invalid input\n", __func__);
+		goto end;
+	}
+
+	if (cec_status & 0x07) {
+		cec_rx_intr = true;
+		adv7533_read(pdata, I2C_ADDR_CEC_DSI, 0xBA, &cec_int_clear, 1);
+	}
+
+	if (cec_status & BIT(5))
+		pr_debug("%s: CEC TX READY\n", __func__);
+
+	if (cec_status & BIT(4))
+		pr_debug("%s: CEC TX Arbitration lost\n", __func__);
+
+	if (cec_status & BIT(3))
+		pr_debug("%s: CEC TX retry timout\n", __func__);
+
+	if (!cec_rx_intr)
+		return;
+
+
+	adv7533_read(pdata, I2C_ADDR_CEC_DSI, 0xB9, &cec_rx_ready, 1);
+
+	adv7533_read(pdata, I2C_ADDR_CEC_DSI, 0x96, &cec_rx_timestamp, 1);
+
+	if (cec_rx_ready & BIT(0)) {
+		pr_debug("%s: CEC Rx buffer 1 ready\n", __func__);
+		adv7533_rd_cec_msg(pdata,
+			pdata->cec_msg[ADV7533_CEC_BUF1].buf,
+			ADV7533_CEC_BUF1);
+
+		pdata->cec_msg[ADV7533_CEC_BUF1].pending = true;
+
+		pdata->cec_msg[ADV7533_CEC_BUF1].timestamp =
+			cec_rx_timestamp & (BIT(0) | BIT(1));
+
+		adv7533_notify_clients(&pdata->dev_info,
+			MSM_DBA_CB_CEC_READ_PENDING);
+	}
+
+	if (cec_rx_ready & BIT(1)) {
+		pr_debug("%s: CEC Rx buffer 2 ready\n", __func__);
+		adv7533_rd_cec_msg(pdata,
+			pdata->cec_msg[ADV7533_CEC_BUF2].buf,
+			ADV7533_CEC_BUF2);
+
+		pdata->cec_msg[ADV7533_CEC_BUF2].pending = true;
+
+		pdata->cec_msg[ADV7533_CEC_BUF2].timestamp =
+			cec_rx_timestamp & (BIT(2) | BIT(3));
+
+		adv7533_notify_clients(&pdata->dev_info,
+			MSM_DBA_CB_CEC_READ_PENDING);
+	}
+
+	if (cec_rx_ready & BIT(2)) {
+		pr_debug("%s: CEC Rx buffer 3 ready\n", __func__);
+		adv7533_rd_cec_msg(pdata,
+			pdata->cec_msg[ADV7533_CEC_BUF3].buf,
+			ADV7533_CEC_BUF3);
+
+		pdata->cec_msg[ADV7533_CEC_BUF3].pending = true;
+
+		pdata->cec_msg[ADV7533_CEC_BUF3].timestamp =
+			cec_rx_timestamp & (BIT(4) | BIT(5));
+
+		adv7533_notify_clients(&pdata->dev_info,
+			MSM_DBA_CB_CEC_READ_PENDING);
+	}
+
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0xBA,
+		cec_int_clear | (cec_status & 0x07));
+
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0xBA, cec_int_clear & ~0x07);
+
+end:
+	return;
+}
+
+static int adv7533_edid_read_init(struct adv7533 *pdata)
+{
+	int ret = -EINVAL;
+
+	if (!pdata) {
+		pr_err("%s: invalid pdata\n", __func__);
+		goto end;
+	}
+
+	/* initiate edid read in adv7533 */
+	adv7533_write(pdata, I2C_ADDR_MAIN, 0x41, 0x10);
+	adv7533_write(pdata, I2C_ADDR_MAIN, 0xC9, 0x13);
+
+end:
+	return ret;
+}
+
+static void *adv7533_handle_hpd_intr(struct adv7533 *pdata)
+{
+	int ret = 0;
+	u8 hpd_state;
+	u8 connected = 0, disconnected = 0;
+
+	if (!pdata) {
+		pr_err("%s: invalid pdata\n", __func__);
+		goto end;
+	}
+
+	adv7533_read(pdata, I2C_ADDR_MAIN, 0x42, &hpd_state, 1);
+
+	connected = (hpd_state & BIT(5)) && (hpd_state & BIT(6));
+	disconnected = !(hpd_state & (BIT(5) | BIT(6)));
+
+	if (connected) {
+		pr_debug("%s: Rx CONNECTED\n", __func__);
+	} else if (disconnected) {
+		pr_debug("%s: Rx DISCONNECTED\n", __func__);
+
+		adv7533_notify_clients(&pdata->dev_info,
+			MSM_DBA_CB_HPD_DISCONNECT);
+	} else {
+		pr_debug("%s: HPD Intermediate state\n", __func__);
+	}
+
+	ret = connected ? 1 : 0;
+end:
+	return ERR_PTR(ret);
+}
+
+static int adv7533_enable_interrupts(struct adv7533 *pdata, int interrupts)
+{
+	int ret = 0;
+	u8 reg_val, init_reg_val;
+
+	if (!pdata) {
+		pr_err("%s: invalid input\n", __func__);
+		goto end;
+	}
+
+	adv7533_read(pdata, I2C_ADDR_MAIN, 0x94, &reg_val, 1);
+
+	init_reg_val = reg_val;
+
+	if (interrupts & CFG_HPD_INTERRUPTS)
+		reg_val |= HPD_INTERRUPTS;
+
+	if (interrupts & CFG_EDID_INTERRUPTS)
+		reg_val |= EDID_INTERRUPTS;
+
+	if (interrupts & CFG_HDCP_INTERRUPTS)
+		reg_val |= HDCP_INTERRUPTS1;
+
+	if (reg_val != init_reg_val) {
+		pr_debug("%s: enabling 0x94 interrupts\n", __func__);
+		adv7533_write(pdata, I2C_ADDR_MAIN, 0x94, reg_val);
+	}
+
+	adv7533_read(pdata, I2C_ADDR_MAIN, 0x95, &reg_val, 1);
+
+	init_reg_val = reg_val;
+
+	if (interrupts & CFG_HDCP_INTERRUPTS)
+		reg_val |= HDCP_INTERRUPTS2;
+
+	if (interrupts & CFG_CEC_INTERRUPTS)
+		reg_val |= CEC_INTERRUPTS;
+
+	if (reg_val != init_reg_val) {
+		pr_debug("%s: enabling 0x95 interrupts\n", __func__);
+		adv7533_write(pdata, I2C_ADDR_MAIN, 0x95, reg_val);
+	}
+end:
+	return ret;
+}
+
+static int adv7533_disable_interrupts(struct adv7533 *pdata, int interrupts)
+{
+	int ret = 0;
+	u8 reg_val, init_reg_val;
+
+	if (!pdata) {
+		pr_err("%s: invalid input\n", __func__);
+		goto end;
+	}
+
+	adv7533_read(pdata, I2C_ADDR_MAIN, 0x94, &reg_val, 1);
+
+	init_reg_val = reg_val;
+
+	if (interrupts & CFG_HPD_INTERRUPTS)
+		reg_val &= ~HPD_INTERRUPTS;
+
+	if (interrupts & CFG_EDID_INTERRUPTS)
+		reg_val &= ~EDID_INTERRUPTS;
+
+	if (interrupts & CFG_HDCP_INTERRUPTS)
+		reg_val &= ~HDCP_INTERRUPTS1;
+
+	if (reg_val != init_reg_val) {
+		pr_debug("%s: disabling 0x94 interrupts\n", __func__);
+		adv7533_write(pdata, I2C_ADDR_MAIN, 0x94, reg_val);
+	}
+
+	adv7533_read(pdata, I2C_ADDR_MAIN, 0x95, &reg_val, 1);
+
+	init_reg_val = reg_val;
+
+	if (interrupts & CFG_HDCP_INTERRUPTS)
+		reg_val &= ~HDCP_INTERRUPTS2;
+
+	if (interrupts & CFG_CEC_INTERRUPTS)
+		reg_val &= ~CEC_INTERRUPTS;
+
+	if (reg_val != init_reg_val) {
+		pr_debug("%s: disabling 0x95 interrupts\n", __func__);
+		adv7533_write(pdata, I2C_ADDR_MAIN, 0x95, reg_val);
+	}
+end:
+	return ret;
+}
+
+static void adv7533_intr_work(struct work_struct *work)
+{
+	int ret;
+	u8 int_status  = 0xFF;
+	u8 hdcp_cec_status = 0xFF;
+	u32 interrupts = 0;
+	int connected = false;
+	struct adv7533 *pdata;
+	struct delayed_work *dw = to_delayed_work(work);
+
+	pdata = container_of(dw, struct adv7533,
+			adv7533_intr_work_id);
+	if (!pdata) {
+		pr_err("%s: invalid input\n", __func__);
+		return;
+	}
+
+	/* READ Interrupt registers */
+	adv7533_read(pdata, I2C_ADDR_MAIN, 0x96, &int_status, 1);
+	adv7533_read(pdata, I2C_ADDR_MAIN, 0x97, &hdcp_cec_status, 1);
+
+	if (int_status & (BIT(6) | BIT(7))) {
+		void *ptr_val = adv7533_handle_hpd_intr(pdata);
+
+		ret = PTR_ERR(ptr_val);
+		if (IS_ERR(ptr_val)) {
+			pr_err("%s: error in hpd handing: %d\n",
+				__func__, ret);
+			goto reset;
+		}
+		connected = ret;
+	}
+
+	/* EDID ready for read */
+	if ((int_status & BIT(2)) && pdata->is_power_on) {
+		pr_debug("%s: EDID READY\n", __func__);
+
+		ret = adv7533_read_edid(pdata, sizeof(pdata->edid_buf),
+			pdata->edid_buf);
+		if (ret)
+			pr_err("%s: edid read failed\n", __func__);
+
+		adv7533_notify_clients(&pdata->dev_info,
+			MSM_DBA_CB_HPD_CONNECT);
+	}
+
+	if (pdata->hdcp_enabled)
+		adv7533_handle_hdcp_intr(pdata, hdcp_cec_status);
+
+	if (pdata->cec_enabled)
+		adv7533_handle_cec_intr(pdata, hdcp_cec_status);
+reset:
+	/* Clear HPD/EDID interrupts */
+	adv7533_write(pdata, I2C_ADDR_MAIN, 0x96, int_status);
+
+	/* Clear HDCP/CEC interrupts */
+	adv7533_write(pdata, I2C_ADDR_MAIN, 0x97, hdcp_cec_status);
+
+	/* Re-enable HPD interrupts */
+	interrupts |= CFG_HPD_INTERRUPTS;
+
+	/* Re-enable EDID interrupts */
+	interrupts |= CFG_EDID_INTERRUPTS;
+
+	/* Re-enable HDCP interrupts */
+	if (pdata->hdcp_enabled)
+		interrupts |= CFG_HDCP_INTERRUPTS;
+
+	/* Re-enable CEC interrupts */
+	if (pdata->cec_enabled)
+		interrupts |= CFG_CEC_INTERRUPTS;
+
+	if (adv7533_enable_interrupts(pdata, interrupts))
+		pr_err("%s: err enabling interrupts\n", __func__);
+
+	/* initialize EDID read after cable connected */
+	if (connected)
+		adv7533_edid_read_init(pdata);
+}
+
+static irqreturn_t adv7533_irq(int irq, void *data)
+{
+	struct adv7533 *pdata = data;
+	u32 interrupts = 0;
+
+	if (!pdata) {
+		pr_err("%s: invalid input\n", __func__);
+		return IRQ_HANDLED;
+	}
+
+	/* disable HPD interrupts */
+	interrupts |= CFG_HPD_INTERRUPTS;
+
+	/* disable EDID interrupts */
+	interrupts |= CFG_EDID_INTERRUPTS;
+
+	/* disable HDCP interrupts */
+	if (pdata->hdcp_enabled)
+		interrupts |= CFG_HDCP_INTERRUPTS;
+
+	/* disable CEC interrupts */
+	if (pdata->cec_enabled)
+		interrupts |= CFG_CEC_INTERRUPTS;
+
+	if (adv7533_disable_interrupts(pdata, interrupts))
+		pr_err("%s: err disabling interrupts\n", __func__);
+
+	queue_delayed_work(pdata->workq, &pdata->adv7533_intr_work_id, 0);
+
+	return IRQ_HANDLED;
+}
+
+static struct i2c_device_id adv7533_id[] = {
+	{ "adv7533", 0},
+	{}
+};
+
+static struct adv7533 *adv7533_get_platform_data(void *client)
+{
+	struct adv7533 *pdata = NULL;
+	struct msm_dba_device_info *dev;
+	struct msm_dba_client_info *cinfo =
+		(struct msm_dba_client_info *)client;
+
+	if (!cinfo) {
+		pr_err("%s: invalid client data\n", __func__);
+		goto end;
+	}
+
+	dev = cinfo->dev;
+	if (!dev) {
+		pr_err("%s: invalid device data\n", __func__);
+		goto end;
+	}
+
+	pdata = container_of(dev, struct adv7533, dev_info);
+	if (!pdata)
+		pr_err("%s: invalid platform data\n", __func__);
+
+end:
+	return pdata;
+}
+
+static int adv7533_cec_enable(void *client, bool cec_on, u32 flags)
+{
+	int ret = -EINVAL;
+	struct adv7533 *pdata = adv7533_get_platform_data(client);
+
+	if (!pdata) {
+		pr_err("%s: invalid platform data\n", __func__);
+		goto end;
+	}
+
+	if (cec_on) {
+		adv7533_write_array(pdata, adv7533_cec_en,
+					sizeof(adv7533_cec_en));
+		adv7533_write_array(pdata, adv7533_cec_tg_init,
+					sizeof(adv7533_cec_tg_init));
+		adv7533_write_array(pdata, adv7533_cec_power,
+					sizeof(adv7533_cec_power));
+
+		pdata->cec_enabled = true;
+
+		ret = adv7533_enable_interrupts(pdata, CFG_CEC_INTERRUPTS);
+
+	} else {
+		pdata->cec_enabled = false;
+		ret = adv7533_disable_interrupts(pdata, CFG_CEC_INTERRUPTS);
+	}
+end:
+	return ret;
+}
+static void adv7533_set_audio_block(void *client, u32 size, void *buf)
+{
+	struct adv7533 *pdata =
+		adv7533_get_platform_data(client);
+
+	if (!pdata || !buf) {
+		pr_err("%s: invalid data\n", __func__);
+		return;
+	}
+
+	mutex_lock(&pdata->ops_mutex);
+
+	size = min_t(u32, size, AUDIO_DATA_SIZE);
+
+	memset(pdata->audio_spkr_data, 0, AUDIO_DATA_SIZE);
+	memcpy(pdata->audio_spkr_data, buf, size);
+
+	mutex_unlock(&pdata->ops_mutex);
+}
+
+static void adv7533_get_audio_block(void *client, u32 size, void *buf)
+{
+	struct adv7533 *pdata =
+		adv7533_get_platform_data(client);
+
+	if (!pdata || !buf) {
+		pr_err("%s: invalid data\n", __func__);
+		return;
+	}
+
+	mutex_lock(&pdata->ops_mutex);
+
+	size = min_t(u32, size, AUDIO_DATA_SIZE);
+
+	memcpy(buf, pdata->audio_spkr_data, size);
+
+	mutex_unlock(&pdata->ops_mutex);
+}
+
+static int adv7533_check_hpd(void *client, u32 flags)
+{
+	struct adv7533 *pdata = adv7533_get_platform_data(client);
+	u8 reg_val = 0;
+	u8 intr_status;
+	int connected = 0;
+
+	if (!pdata) {
+		pr_err("%s: invalid platform data\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Check if cable is already connected.
+	 * Since adv7533_irq line is edge triggered,
+	 * if cable is already connected by this time
+	 * it won't trigger HPD interrupt.
+	 */
+	mutex_lock(&pdata->ops_mutex);
+	adv7533_read(pdata, I2C_ADDR_MAIN, 0x42, &reg_val, 1);
+
+	connected  = (reg_val & BIT(6));
+	if (connected) {
+		pr_debug("%s: cable is connected\n", __func__);
+		/* Clear the interrupts before initiating EDID read */
+		adv7533_read(pdata, I2C_ADDR_MAIN, 0x96, &intr_status, 1);
+		adv7533_write(pdata, I2C_ADDR_MAIN, 0x96, intr_status);
+		adv7533_enable_interrupts(pdata, (CFG_EDID_INTERRUPTS |
+				CFG_HPD_INTERRUPTS));
+
+		adv7533_edid_read_init(pdata);
+	}
+	mutex_unlock(&pdata->ops_mutex);
+
+	return connected;
+}
+
+/* Device Operations */
+static int adv7533_power_on(void *client, bool on, u32 flags)
+{
+	int ret = -EINVAL;
+	struct adv7533 *pdata = adv7533_get_platform_data(client);
+
+	if (!pdata) {
+		pr_err("%s: invalid platform data\n", __func__);
+		return ret;
+	}
+
+	pr_debug("%s: %d\n", __func__, on);
+	mutex_lock(&pdata->ops_mutex);
+
+	if (on && !pdata->is_power_on) {
+		adv7533_write_array(pdata, adv7533_init_setup,
+					sizeof(adv7533_init_setup));
+
+		ret = adv7533_enable_interrupts(pdata, CFG_HPD_INTERRUPTS);
+		if (ret) {
+			pr_err("%s: Failed: enable HPD intr %d\n",
+				__func__, ret);
+			goto end;
+		}
+		pdata->is_power_on = true;
+	} else if (!on) {
+		/* power down hdmi */
+		adv7533_write(pdata, I2C_ADDR_MAIN, 0x41, 0x50);
+		pdata->is_power_on = false;
+
+		adv7533_notify_clients(&pdata->dev_info,
+			MSM_DBA_CB_HPD_DISCONNECT);
+	}
+end:
+	mutex_unlock(&pdata->ops_mutex);
+	return ret;
+}
+
+static void adv7533_video_setup(struct adv7533 *pdata,
+	struct msm_dba_video_cfg *cfg)
+{
+	u32 h_total, hpw, hfp, hbp;
+	u32 v_total, vpw, vfp, vbp;
+
+	if (!pdata || !cfg) {
+		pr_err("%s: invalid input\n", __func__);
+		return;
+	}
+
+	h_total = cfg->h_active + cfg->h_front_porch +
+	      cfg->h_pulse_width + cfg->h_back_porch;
+	v_total = cfg->v_active + cfg->v_front_porch +
+	      cfg->v_pulse_width + cfg->v_back_porch;
+
+	hpw = cfg->h_pulse_width;
+	hfp = cfg->h_front_porch;
+	hbp = cfg->h_back_porch;
+
+	vpw = cfg->v_pulse_width;
+	vfp = cfg->v_front_porch;
+	vbp = cfg->v_back_porch;
+
+	pr_debug("h_total 0x%x, h_active 0x%x, hfp 0x%x, hpw 0x%x, hbp 0x%x\n",
+		h_total, cfg->h_active, cfg->h_front_porch,
+		cfg->h_pulse_width, cfg->h_back_porch);
+
+	pr_debug("v_total 0x%x, v_active 0x%x, vfp 0x%x, vpw 0x%x, vbp 0x%x\n",
+		v_total, cfg->v_active, cfg->v_front_porch,
+		cfg->v_pulse_width, cfg->v_back_porch);
+
+
+	/* h_width */
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x28, ((h_total & 0xFF0) >> 4));
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x29, ((h_total & 0xF) << 4));
+
+	/* hsync_width */
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x2A, ((hpw & 0xFF0) >> 4));
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x2B, ((hpw & 0xF) << 4));
+
+	/* hfp */
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x2C, ((hfp & 0xFF0) >> 4));
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x2D, ((hfp & 0xF) << 4));
+
+	/* hbp */
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x2E, ((hbp & 0xFF0) >> 4));
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x2F, ((hbp & 0xF) << 4));
+
+	/* v_total */
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x30, ((v_total & 0xFF0) >> 4));
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x31, ((v_total & 0xF) << 4));
+
+	/* vsync_width */
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x32, ((vpw & 0xFF0) >> 4));
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x33, ((vpw & 0xF) << 4));
+
+	/* vfp */
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x34, ((vfp & 0xFF0) >> 4));
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x35, ((vfp & 0xF) << 4));
+
+	/* vbp */
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x36, ((vbp & 0xFF0) >> 4));
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x37, ((vbp & 0xF) << 4));
+}
+
+static int adv7533_config_vreg(struct adv7533 *pdata, int enable)
+{
+	int rc = 0;
+	struct dss_module_power *power_data = NULL;
+
+	if (!pdata) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	power_data = &pdata->power_data;
+	if (!power_data || !power_data->num_vreg) {
+		pr_warn("%s: Error: invalid power data\n", __func__);
+		return 0;
+	}
+
+	if (enable) {
+		rc = msm_dss_config_vreg(&pdata->i2c_client->dev,
+					power_data->vreg_config,
+					power_data->num_vreg, 1);
+		if (rc) {
+			pr_err("%s: Failed to config vreg. Err=%d\n",
+				__func__, rc);
+			goto exit;
+		}
+	} else {
+		rc = msm_dss_config_vreg(&pdata->i2c_client->dev,
+					power_data->vreg_config,
+					power_data->num_vreg, 0);
+		if (rc) {
+			pr_err("%s: Failed to deconfig vreg. Err=%d\n",
+				__func__, rc);
+			goto exit;
+		}
+	}
+exit:
+	return rc;
+
+}
+
+static int adv7533_enable_vreg(struct adv7533 *pdata, int enable)
+{
+	int rc = 0;
+	struct dss_module_power *power_data = NULL;
+
+	if (!pdata) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	power_data = &pdata->power_data;
+	if (!power_data || !power_data->num_vreg) {
+		pr_warn("%s: Error: invalid power data\n", __func__);
+		return 0;
+	}
+
+	if (enable) {
+		rc = msm_dss_enable_vreg(power_data->vreg_config,
+					power_data->num_vreg, 1);
+		if (rc) {
+			pr_err("%s: Failed to enable vreg. Err=%d\n",
+				__func__, rc);
+			goto exit;
+		}
+	} else {
+		rc = msm_dss_enable_vreg(power_data->vreg_config,
+					power_data->num_vreg, 0);
+		if (rc) {
+			pr_err("%s: Failed to disable vreg. Err=%d\n",
+				__func__, rc);
+			goto exit;
+		}
+	}
+exit:
+	return rc;
+
+}
+
+static int adv7533_video_on(void *client, bool on,
+	struct msm_dba_video_cfg *cfg, u32 flags)
+{
+	int ret = 0;
+	u8 lanes;
+	u8 reg_val = 0;
+	struct adv7533 *pdata = adv7533_get_platform_data(client);
+
+	if (!pdata || !cfg) {
+		pr_err("%s: invalid platform data\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&pdata->ops_mutex);
+
+	/* DSI lane configuration */
+	lanes = (cfg->num_of_input_lanes << 4);
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x1C, lanes);
+
+	adv7533_video_setup(pdata, cfg);
+
+	/* hdmi/dvi mode */
+	if (cfg->hdmi_mode)
+		adv7533_write(pdata, I2C_ADDR_MAIN, 0xAF, 0x06);
+	else
+		adv7533_write(pdata, I2C_ADDR_MAIN, 0xAF, 0x04);
+
+	/* set scan info for AVI Infoframe*/
+	if (cfg->scaninfo) {
+		adv7533_read(pdata, I2C_ADDR_MAIN, 0x55, &reg_val, 1);
+		reg_val |= cfg->scaninfo & (BIT(1) | BIT(0));
+		adv7533_write(pdata, I2C_ADDR_MAIN, 0x55, reg_val);
+	}
+
+	/*
+	 * aspect ratio and sync polarity set up.
+	 * Currently adv only supports 16:9 or 4:3 aspect ratio
+	 * configuration.
+	 */
+	if (cfg->h_active * 3 - cfg->v_active * 4) {
+		adv7533_write(pdata, I2C_ADDR_MAIN, 0x17, 0x02);
+		adv7533_write(pdata, I2C_ADDR_MAIN, 0x56, 0x28);
+	} else {
+		/* 4:3 aspect ratio */
+		adv7533_write(pdata, I2C_ADDR_MAIN, 0x17, 0x00);
+		adv7533_write(pdata, I2C_ADDR_MAIN, 0x56, 0x18);
+	}
+
+	adv7533_write_array(pdata, adv7533_video_en,
+				sizeof(adv7533_video_en));
+
+	mutex_unlock(&pdata->ops_mutex);
+	return ret;
+}
+
+static int adv7533_hdcp_enable(void *client, bool hdcp_on,
+	bool enc_on, u32 flags)
+{
+	int ret = -EINVAL;
+	u8 reg_val;
+	struct adv7533 *pdata =
+		adv7533_get_platform_data(client);
+
+	if (!pdata) {
+		pr_err("%s: invalid platform data\n", __func__);
+		return ret;
+	}
+
+	mutex_lock(&pdata->ops_mutex);
+
+	adv7533_read(pdata, I2C_ADDR_MAIN, 0xAF, &reg_val, 1);
+
+	if (hdcp_on)
+		reg_val |= BIT(7);
+	else
+		reg_val &= ~BIT(7);
+
+	if (enc_on)
+		reg_val |= BIT(4);
+	else
+		reg_val &= ~BIT(4);
+
+	adv7533_write(pdata, I2C_ADDR_MAIN, 0xAF, reg_val);
+
+	pdata->hdcp_enabled = hdcp_on;
+
+	if (pdata->hdcp_enabled)
+		adv7533_enable_interrupts(pdata, CFG_HDCP_INTERRUPTS);
+	else
+		adv7533_disable_interrupts(pdata, CFG_HDCP_INTERRUPTS);
+
+	mutex_unlock(&pdata->ops_mutex);
+	return ret;
+}
+
+static int adv7533_configure_audio(void *client,
+	struct msm_dba_audio_cfg *cfg, u32 flags)
+{
+	int ret = -EINVAL;
+	int sampling_rate = 0;
+	struct adv7533 *pdata =
+		adv7533_get_platform_data(client);
+	struct adv7533_reg_cfg reg_cfg[] = {
+		{I2C_ADDR_MAIN, 0x12, 0x00, 0},
+		{I2C_ADDR_MAIN, 0x13, 0x00, 0},
+		{I2C_ADDR_MAIN, 0x14, 0x00, 0},
+		{I2C_ADDR_MAIN, 0x15, 0x00, 0},
+		{I2C_ADDR_MAIN, 0x0A, 0x00, 0},
+		{I2C_ADDR_MAIN, 0x0C, 0x00, 0},
+		{I2C_ADDR_MAIN, 0x0D, 0x00, 0},
+		{I2C_ADDR_MAIN, 0x03, 0x00, 0},
+		{I2C_ADDR_MAIN, 0x02, 0x00, 0},
+		{I2C_ADDR_MAIN, 0x01, 0x00, 0},
+		{I2C_ADDR_MAIN, 0x09, 0x00, 0},
+		{I2C_ADDR_MAIN, 0x08, 0x00, 0},
+		{I2C_ADDR_MAIN, 0x07, 0x00, 0},
+		{I2C_ADDR_MAIN, 0x73, 0x00, 0},
+		{I2C_ADDR_MAIN, 0x76, 0x00, 0},
+	};
+
+	if (!pdata || !cfg) {
+		pr_err("%s: invalid data\n", __func__);
+		return ret;
+	}
+
+	mutex_lock(&pdata->ops_mutex);
+
+	if (cfg->copyright == MSM_DBA_AUDIO_COPYRIGHT_NOT_PROTECTED)
+		reg_cfg[0].val |= BIT(5);
+
+	if (cfg->pre_emphasis == MSM_DBA_AUDIO_PRE_EMPHASIS_50_15us)
+		reg_cfg[0].val |= BIT(2);
+
+	if (cfg->clock_accuracy == MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL1)
+		reg_cfg[0].val |= BIT(0);
+	else if (cfg->clock_accuracy == MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL3)
+		reg_cfg[0].val |= BIT(1);
+
+	reg_cfg[1].val = cfg->channel_status_category_code;
+
+	reg_cfg[2].val = (cfg->channel_status_word_length & 0xF) << 0 |
+		(cfg->channel_status_source_number & 0xF) << 4;
+
+	if (cfg->sampling_rate == MSM_DBA_AUDIO_32KHZ)
+		sampling_rate = 0x3;
+	else if (cfg->sampling_rate == MSM_DBA_AUDIO_44P1KHZ)
+		sampling_rate = 0x0;
+	else if (cfg->sampling_rate == MSM_DBA_AUDIO_48KHZ)
+		sampling_rate = 0x2;
+	else if (cfg->sampling_rate == MSM_DBA_AUDIO_88P2KHZ)
+		sampling_rate = 0x8;
+	else if (cfg->sampling_rate == MSM_DBA_AUDIO_96KHZ)
+		sampling_rate = 0xA;
+	else if (cfg->sampling_rate == MSM_DBA_AUDIO_176P4KHZ)
+		sampling_rate = 0xC;
+	else if (cfg->sampling_rate == MSM_DBA_AUDIO_192KHZ)
+		sampling_rate = 0xE;
+
+	reg_cfg[3].val = (sampling_rate & 0xF) << 4;
+
+	if (cfg->mode == MSM_DBA_AUDIO_MODE_MANUAL)
+		reg_cfg[4].val |= BIT(7);
+
+	if (cfg->interface == MSM_DBA_AUDIO_SPDIF_INTERFACE)
+		reg_cfg[4].val |= BIT(4);
+
+	if (cfg->interface == MSM_DBA_AUDIO_I2S_INTERFACE) {
+		/* i2s enable */
+		reg_cfg[5].val |= BIT(2);
+
+		/* audio samp freq select */
+		reg_cfg[5].val |= BIT(7);
+	}
+
+	/* format */
+	reg_cfg[5].val |= cfg->i2s_fmt & 0x3;
+
+	/* channel status override */
+	reg_cfg[5].val |= (cfg->channel_status_source & 0x1) << 6;
+
+	/* sample word lengths, default 24 */
+	reg_cfg[6].val |= 0x18;
+
+	/* endian order of incoming I2S data */
+	if (cfg->word_endianness == MSM_DBA_AUDIO_WORD_LITTLE_ENDIAN)
+		reg_cfg[6].val |= 0x1 << 7;
+
+	/* compressed audio v - bit */
+	reg_cfg[6].val |= (cfg->channel_status_v_bit & 0x1) << 5;
+
+	/* ACR - N */
+	reg_cfg[7].val |= (cfg->n & 0x000FF) >> 0;
+	reg_cfg[8].val |= (cfg->n & 0x0FF00) >> 8;
+	reg_cfg[9].val |= (cfg->n & 0xF0000) >> 16;
+
+	/* ACR - CTS */
+	reg_cfg[10].val |= (cfg->cts & 0x000FF) >> 0;
+	reg_cfg[11].val |= (cfg->cts & 0x0FF00) >> 8;
+	reg_cfg[12].val |= (cfg->cts & 0xF0000) >> 16;
+
+	/* channel count */
+	reg_cfg[13].val |= (cfg->channels & 0x3);
+
+	/* CA */
+	reg_cfg[14].val = cfg->channel_allocation;
+
+	adv7533_write_array(pdata, reg_cfg, sizeof(reg_cfg));
+
+	mutex_unlock(&pdata->ops_mutex);
+	return ret;
+}
+
+static int adv7533_hdmi_cec_write(void *client, u32 size,
+	char *buf, u32 flags)
+{
+	int ret = -EINVAL;
+	struct adv7533 *pdata =
+		adv7533_get_platform_data(client);
+
+	if (!pdata) {
+		pr_err("%s: invalid platform data\n", __func__);
+		return ret;
+	}
+
+	mutex_lock(&pdata->ops_mutex);
+
+	ret = adv7533_cec_prepare_msg(pdata, buf, size);
+	if (ret)
+		goto end;
+
+	/* Enable CEC msg tx with NACK 3 retries */
+	adv7533_write(pdata, I2C_ADDR_CEC_DSI, 0x81, 0x07);
+end:
+	mutex_unlock(&pdata->ops_mutex);
+	return ret;
+}
+
+static int adv7533_hdmi_cec_read(void *client, u32 *size, char *buf, u32 flags)
+{
+	int ret = -EINVAL;
+	int i;
+	struct adv7533 *pdata =
+		adv7533_get_platform_data(client);
+
+	if (!pdata) {
+		pr_err("%s: invalid platform data\n", __func__);
+		return ret;
+	}
+
+	mutex_lock(&pdata->ops_mutex);
+
+	for (i = 0; i < ADV7533_CEC_BUF_MAX; i++) {
+		struct adv7533_cec_msg *msg = &pdata->cec_msg[i];
+
+		if (msg->pending && msg->timestamp) {
+			memcpy(buf, msg->buf, CEC_MSG_SIZE);
+			msg->pending = false;
+			break;
+		}
+	}
+
+	if (i < ADV7533_CEC_BUF_MAX) {
+		*size = CEC_MSG_SIZE;
+		ret = 0;
+	} else {
+		pr_err("%s: no pending cec msg\n", __func__);
+		*size = 0;
+	}
+
+	mutex_unlock(&pdata->ops_mutex);
+	return ret;
+}
+
+static int adv7533_get_edid_size(void *client, u32 *size, u32 flags)
+{
+	int ret = 0;
+	struct adv7533 *pdata =
+		adv7533_get_platform_data(client);
+
+	if (!pdata) {
+		pr_err("%s: invalid platform data\n", __func__);
+		return ret;
+	}
+
+	mutex_lock(&pdata->ops_mutex);
+
+	if (!size) {
+		ret = -EINVAL;
+		goto end;
+	}
+
+	*size = EDID_SEG_SIZE;
+end:
+	mutex_unlock(&pdata->ops_mutex);
+	return ret;
+}
+
+static int adv7533_get_raw_edid(void *client,
+	u32 size, char *buf, u32 flags)
+{
+	struct adv7533 *pdata =
+		adv7533_get_platform_data(client);
+
+	if (!pdata || !buf) {
+		pr_err("%s: invalid data\n", __func__);
+		goto end;
+	}
+
+	mutex_lock(&pdata->ops_mutex);
+
+	size = min_t(u32, size, sizeof(pdata->edid_buf));
+
+	memcpy(buf, pdata->edid_buf, size);
+end:
+	mutex_unlock(&pdata->ops_mutex);
+	return 0;
+}
+
+static int adv7533_write_reg(struct msm_dba_device_info *dev,
+		u32 reg, u32 val)
+{
+	struct adv7533 *pdata;
+	int ret = -EINVAL;
+	u8 i2ca = 0;
+
+	if (!dev)
+		goto end;
+
+	pdata = container_of(dev, struct adv7533, dev_info);
+	if (!pdata)
+		goto end;
+
+	i2ca = ((reg & 0x100) ? pdata->cec_dsi_i2c_addr : pdata->main_i2c_addr);
+
+	adv7533_write(pdata, i2ca, (u8)(reg & 0xFF), (u8)(val & 0xFF));
+end:
+	return ret;
+}
+
+static int adv7533_read_reg(struct msm_dba_device_info *dev,
+		u32 reg, u32 *val)
+{
+	int ret = 0;
+	u8 byte_val = 0;
+	u8 i2ca = 0;
+	struct adv7533 *pdata;
+
+	if (!dev)
+		goto end;
+
+	pdata = container_of(dev, struct adv7533, dev_info);
+	if (!pdata)
+		goto end;
+
+	i2ca = ((reg & 0x100) ? pdata->cec_dsi_i2c_addr : pdata->main_i2c_addr);
+
+	adv7533_read(pdata, i2ca, (u8)(reg & 0xFF), &byte_val, 1);
+
+	*val = (u32)byte_val;
+
+end:
+	return ret;
+}
+
+static int adv7533_register_dba(struct adv7533 *pdata)
+{
+	struct msm_dba_ops *client_ops;
+	struct msm_dba_device_ops *dev_ops;
+
+	if (!pdata)
+		return -EINVAL;
+
+	client_ops = &pdata->dev_info.client_ops;
+	dev_ops = &pdata->dev_info.dev_ops;
+
+	client_ops->power_on        = adv7533_power_on;
+	client_ops->video_on        = adv7533_video_on;
+	client_ops->configure_audio = adv7533_configure_audio;
+	client_ops->hdcp_enable     = adv7533_hdcp_enable;
+	client_ops->hdmi_cec_on     = adv7533_cec_enable;
+	client_ops->hdmi_cec_write  = adv7533_hdmi_cec_write;
+	client_ops->hdmi_cec_read   = adv7533_hdmi_cec_read;
+	client_ops->get_edid_size   = adv7533_get_edid_size;
+	client_ops->get_raw_edid    = adv7533_get_raw_edid;
+	client_ops->check_hpd	    = adv7533_check_hpd;
+	client_ops->get_audio_block = adv7533_get_audio_block;
+	client_ops->set_audio_block = adv7533_set_audio_block;
+
+	dev_ops->write_reg = adv7533_write_reg;
+	dev_ops->read_reg = adv7533_read_reg;
+	dev_ops->dump_debug_info = adv7533_dump_debug_info;
+
+	strlcpy(pdata->dev_info.chip_name, "adv7533",
+		sizeof(pdata->dev_info.chip_name));
+
+	mutex_init(&pdata->dev_info.dev_mutex);
+
+	INIT_LIST_HEAD(&pdata->dev_info.client_list);
+
+	return msm_dba_add_probed_device(&pdata->dev_info);
+}
+
+static void adv7533_unregister_dba(struct adv7533 *pdata)
+{
+	if (!pdata)
+		return;
+
+	msm_dba_remove_probed_device(&pdata->dev_info);
+}
+
+
+static int adv7533_probe(struct i2c_client *client,
+	 const struct i2c_device_id *id)
+{
+	static struct adv7533 *pdata;
+	int ret = 0;
+
+	if (!client || !client->dev.of_node) {
+		pr_err("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	pdata = devm_kzalloc(&client->dev,
+		sizeof(struct adv7533), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	ret = adv7533_parse_dt(&client->dev, pdata);
+	if (ret) {
+		pr_err("%s: Failed to parse DT\n", __func__);
+		goto err_dt_parse;
+	}
+
+	pdata->i2c_client = client;
+
+	ret = adv7533_config_vreg(pdata, 1);
+	if (ret) {
+		pr_err("%s: Failed to config vreg\n", __func__);
+		return -EPROBE_DEFER;
+	}
+	adv7533_enable_vreg(pdata, 1);
+
+	mutex_init(&pdata->ops_mutex);
+
+	ret = adv7533_read_device_rev(pdata);
+	if (ret) {
+		pr_err("%s: Failed to read chip rev\n", __func__);
+		goto err_i2c_prog;
+	}
+
+	ret = adv7533_program_i2c_addr(pdata);
+	if (ret != 0) {
+		pr_err("%s: Failed to program i2c addr\n", __func__);
+		goto err_i2c_prog;
+	}
+
+	ret = adv7533_register_dba(pdata);
+	if (ret) {
+		pr_err("%s: Error registering with DBA %d\n",
+			__func__, ret);
+		goto err_dba_reg;
+	}
+
+	ret = pinctrl_select_state(pdata->ts_pinctrl,
+		pdata->pinctrl_state_active);
+	if (ret < 0)
+		pr_err("%s: Failed to select %s pinstate %d\n",
+			__func__, PINCTRL_STATE_ACTIVE, ret);
+
+	ret = adv7533_gpio_configure(pdata, true);
+	if (ret) {
+		pr_err("%s: Failed to configure GPIOs\n", __func__);
+		goto err_gpio_cfg;
+	}
+
+	if (gpio_is_valid(pdata->switch_gpio))
+		gpio_set_value(pdata->switch_gpio, pdata->switch_flags);
+
+	pdata->irq = gpio_to_irq(pdata->irq_gpio);
+
+	ret = request_threaded_irq(pdata->irq, NULL, adv7533_irq,
+		IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "adv7533", pdata);
+	if (ret) {
+		pr_err("%s: Failed to enable ADV7533 interrupt\n",
+			__func__);
+		goto err_irq;
+	}
+
+	dev_set_drvdata(&client->dev, &pdata->dev_info);
+	ret = msm_dba_helper_sysfs_init(&client->dev);
+	if (ret) {
+		pr_err("%s: sysfs init failed\n", __func__);
+		goto err_dba_helper;
+	}
+
+	pdata->workq = create_workqueue("adv7533_workq");
+	if (!pdata->workq) {
+		pr_err("%s: workqueue creation failed.\n", __func__);
+		ret = -EPERM;
+		goto err_workqueue;
+	}
+
+	if (pdata->audio) {
+		pr_debug("%s: enabling default audio configs\n", __func__);
+		if (adv7533_write_array(pdata, I2S_cfg, sizeof(I2S_cfg)))
+			goto end;
+	}
+
+	INIT_DELAYED_WORK(&pdata->adv7533_intr_work_id, adv7533_intr_work);
+
+	pm_runtime_enable(&client->dev);
+	pm_runtime_set_active(&client->dev);
+
+	return 0;
+end:
+	if (pdata->workq)
+		destroy_workqueue(pdata->workq);
+err_workqueue:
+	msm_dba_helper_sysfs_remove(&client->dev);
+err_dba_helper:
+	disable_irq(pdata->irq);
+	free_irq(pdata->irq, pdata);
+err_irq:
+	adv7533_gpio_configure(pdata, false);
+err_gpio_cfg:
+	adv7533_unregister_dba(pdata);
+err_dba_reg:
+err_i2c_prog:
+	adv7533_enable_vreg(pdata, 0);
+	adv7533_config_vreg(pdata, 0);
+err_dt_parse:
+	devm_kfree(&client->dev, pdata);
+	return ret;
+}
+
+static int adv7533_remove(struct i2c_client *client)
+{
+	int ret = -EINVAL;
+	struct msm_dba_device_info *dev;
+	struct adv7533 *pdata;
+
+	if (!client)
+		goto end;
+
+	dev = dev_get_drvdata(&client->dev);
+	if (!dev)
+		goto end;
+
+	pdata = container_of(dev, struct adv7533, dev_info);
+	if (!pdata)
+		goto end;
+
+	pm_runtime_disable(&client->dev);
+	disable_irq(pdata->irq);
+	free_irq(pdata->irq, pdata);
+
+	adv7533_config_vreg(pdata, 0);
+	ret = adv7533_gpio_configure(pdata, false);
+
+	mutex_destroy(&pdata->ops_mutex);
+
+	devm_kfree(&client->dev, pdata);
+
+end:
+	return ret;
+}
+
+static struct i2c_driver adv7533_driver = {
+	.driver = {
+		.name = "adv7533",
+		.owner = THIS_MODULE,
+	},
+	.probe = adv7533_probe,
+	.remove = adv7533_remove,
+	.id_table = adv7533_id,
+};
+
+static int __init adv7533_init(void)
+{
+	return i2c_add_driver(&adv7533_driver);
+}
+
+static void __exit adv7533_exit(void)
+{
+	i2c_del_driver(&adv7533_driver);
+}
+
+module_param_string(panel, mdss_mdp_panel, MDSS_MAX_PANEL_LEN, 0000);
+
+module_init(adv7533_init);
+module_exit(adv7533_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("adv7533 driver");
diff --git a/drivers/video/fbdev/msm/msm_dba/msm_dba.c b/drivers/video/fbdev/msm/msm_dba/msm_dba.c
new file mode 100644
index 0000000..cffefae
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_dba/msm_dba.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include <video/msm_dba.h>
+#include <msm_dba_internal.h>
+
+static DEFINE_MUTEX(register_mutex);
+
+void *msm_dba_register_client(struct msm_dba_reg_info *info,
+			      struct msm_dba_ops *ops)
+{
+	int rc = 0;
+	struct msm_dba_device_info *device = NULL;
+	struct msm_dba_client_info *client = NULL;
+
+	pr_debug("%s: ENTER\n", __func__);
+
+	if (!info || !ops) {
+		pr_err("%s: Invalid params\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	mutex_lock(&register_mutex);
+
+	pr_debug("%s: Client(%s) Chip(%s) Instance(%d)\n", __func__,
+		 info->client_name, info->chip_name, info->instance_id);
+
+	rc = msm_dba_get_probed_device(info, &device);
+	if (rc) {
+		pr_err("%s: Device not found (%s, %d)\n", __func__,
+							 info->chip_name,
+							 info->instance_id);
+		mutex_unlock(&register_mutex);
+		return ERR_PTR(rc);
+	}
+
+	pr_debug("%s: Client(%s) device found\n", __func__, info->client_name);
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client) {
+		mutex_unlock(&register_mutex);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	memset(client, 0x0, sizeof(*client));
+	client->dev = device;
+	strlcpy(client->client_name, info->client_name,
+		MSM_DBA_CLIENT_NAME_LEN);
+
+	client->cb = info->cb;
+	client->cb_data = info->cb_data;
+
+	mutex_lock_nested(&device->dev_mutex, SINGLE_DEPTH_NESTING);
+	list_add(&client->list, &device->client_list);
+	*ops = device->client_ops;
+	mutex_unlock(&device->dev_mutex);
+
+	if (device->reg_fxn) {
+		rc = device->reg_fxn(client);
+		if (rc) {
+			pr_err("%s: Client register failed (%s, %d)\n",
+			       __func__, info->chip_name, info->instance_id);
+			/* remove the client from list before freeing */
+			mutex_lock_nested(&device->dev_mutex,
+						SINGLE_DEPTH_NESTING);
+			list_del(&client->list);
+			mutex_unlock(&device->dev_mutex);
+			kfree(client);
+			mutex_unlock(&register_mutex);
+			return ERR_PTR(rc);
+		}
+	}
+
+	mutex_unlock(&register_mutex);
+
+	pr_debug("%s: EXIT\n", __func__);
+	return client;
+}
+EXPORT_SYMBOL(msm_dba_register_client);
+
+int msm_dba_deregister_client(void *client)
+{
+	int rc = 0;
+	struct msm_dba_client_info *handle = client;
+	struct msm_dba_client_info *node = NULL;
+	struct list_head *tmp = NULL;
+	struct list_head *position = NULL;
+
+	pr_debug("%s: ENTER\n", __func__);
+
+	if (!handle) {
+		pr_err("%s: Invalid Params\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&register_mutex);
+
+	pr_debug("%s: Client(%s) Chip(%s) Instance(%d)\n", __func__,
+		 handle->client_name, handle->dev->chip_name,
+		 handle->dev->instance_id);
+
+	if (handle->dev->dereg_fxn) {
+		rc = handle->dev->dereg_fxn(handle);
+		if (rc) {
+			pr_err("%s: Client deregister failed (%s)\n",
+			       __func__, handle->client_name);
+		}
+	}
+
+	mutex_lock_nested(&handle->dev->dev_mutex, SINGLE_DEPTH_NESTING);
+
+	list_for_each_safe(position, tmp, &handle->dev->client_list) {
+
+		node = list_entry(position, struct msm_dba_client_info, list);
+
+		if (node == handle) {
+			list_del(&node->list);
+			break;
+		}
+	}
+
+	mutex_unlock(&handle->dev->dev_mutex);
+
+	kfree(handle);
+
+	mutex_unlock(&register_mutex);
+
+	pr_debug("%s: EXIT (%d)\n", __func__, rc);
+	return rc;
+}
+EXPORT_SYMBOL(msm_dba_deregister_client);
diff --git a/drivers/video/fbdev/msm/msm_dba/msm_dba_debug.c b/drivers/video/fbdev/msm/msm_dba/msm_dba_debug.c
new file mode 100644
index 0000000..f59b7f5
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_dba/msm_dba_debug.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/i2c.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/stat.h>
+
+#include <video/msm_dba.h>
+#include "msm_dba_internal.h"
+
+static inline struct msm_dba_device_info *to_dba_dev(struct device *dev)
+{
+	if (!dev) {
+		pr_err("%s: dev is NULL\n", __func__);
+		return NULL;
+	}
+	return dev_get_drvdata(dev);
+}
+
+static ssize_t device_name_rda_attr(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct msm_dba_device_info *device = to_dba_dev(dev);
+
+	if (!device) {
+		pr_err("%s: device is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%s:%d\n", device->chip_name,
+						   device->instance_id);
+}
+
+static ssize_t client_list_rda_attr(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct msm_dba_device_info *device = to_dba_dev(dev);
+	struct msm_dba_client_info *c;
+	struct list_head *pos = NULL;
+	ssize_t bytes = 0;
+
+	if (!device) {
+		pr_err("%s: device is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->dev_mutex);
+
+	list_for_each(pos, &device->client_list) {
+		c = list_entry(pos, struct msm_dba_client_info, list);
+		bytes += snprintf(buf + bytes, (PAGE_SIZE - bytes), "%s\n",
+				c->client_name);
+	}
+
+	mutex_unlock(&device->dev_mutex);
+
+	return bytes;
+}
+
+static ssize_t power_status_rda_attr(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct msm_dba_device_info *device = to_dba_dev(dev);
+	struct msm_dba_client_info *c;
+	struct list_head *pos = NULL;
+	ssize_t bytes = 0;
+
+	if (!device) {
+		pr_err("%s: device is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->dev_mutex);
+	bytes = snprintf(buf, PAGE_SIZE, "power_status:%d\n",
+			 device->power_status);
+
+	list_for_each(pos, &device->client_list) {
+		c = list_entry(pos, struct msm_dba_client_info, list);
+		bytes += snprintf(buf + bytes, (PAGE_SIZE - bytes),
+				  "client: %s, status = %d\n",
+				  c->client_name, c->power_on);
+	}
+
+	mutex_unlock(&device->dev_mutex);
+	return bytes;
+}
+
+static ssize_t video_status_rda_attr(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct msm_dba_device_info *device = to_dba_dev(dev);
+	struct msm_dba_client_info *c;
+	struct list_head *pos = NULL;
+	ssize_t bytes = 0;
+
+	if (!device) {
+		pr_err("%s: device is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->dev_mutex);
+	bytes = snprintf(buf, PAGE_SIZE, "video_status:%d\n",
+			 device->video_status);
+
+	list_for_each(pos, &device->client_list) {
+		c = list_entry(pos, struct msm_dba_client_info, list);
+		bytes += snprintf(buf + bytes, (PAGE_SIZE - bytes),
+				  "client: %s, status = %d\n",
+				  c->client_name, c->video_on);
+	}
+
+	mutex_unlock(&device->dev_mutex);
+	return bytes;
+}
+
+static ssize_t audio_status_rda_attr(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct msm_dba_device_info *device = to_dba_dev(dev);
+	struct msm_dba_client_info *c;
+	struct list_head *pos = NULL;
+	ssize_t bytes = 0;
+
+	if (!device) {
+		pr_err("%s: device is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->dev_mutex);
+	bytes = snprintf(buf, PAGE_SIZE, "audio_status:%d\n",
+			 device->audio_status);
+
+	list_for_each(pos, &device->client_list) {
+		c = list_entry(pos, struct msm_dba_client_info, list);
+		bytes += snprintf(buf + bytes, (PAGE_SIZE - bytes),
+				  "client: %s, status = %d\n",
+				  c->client_name, c->audio_on);
+	}
+
+	mutex_unlock(&device->dev_mutex);
+	return bytes;
+}
+
+static ssize_t write_reg_wta_attr(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf,
+				  size_t count)
+{
+	struct msm_dba_device_info *device = to_dba_dev(dev);
+	char *regstr, *valstr, *ptr;
+	char str[20];
+	long reg = 0;
+	long val = 0;
+	int rc = 0;
+	int len;
+
+	if (!device) {
+		pr_err("%s: device is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	len = strlen(buf);
+	strlcpy(str, buf, 20);
+	if (len < 20)
+		str[len] = '\0';
+	else
+		str[19] = '\0';
+
+	ptr = str;
+	regstr = strsep(&ptr, ":");
+	valstr = strsep(&ptr, ":");
+
+	rc = kstrtol(regstr, 0, &reg);
+	if (rc) {
+		pr_err("%s: kstrol error %d\n", __func__, rc);
+	} else {
+		rc = kstrtol(valstr, 0, &val);
+		if (rc)
+			pr_err("%s: kstrol error for val %d\n", __func__, rc);
+	}
+
+	if (!rc) {
+		mutex_lock(&device->dev_mutex);
+
+		if (device->dev_ops.write_reg) {
+			rc = device->dev_ops.write_reg(device,
+						       (u32)reg,
+						       (u32)val);
+
+			if (rc) {
+				pr_err("%s: failed to write reg %d", __func__,
+				       rc);
+			}
+		} else {
+			pr_err("%s: not supported\n", __func__);
+		}
+
+		mutex_unlock(&device->dev_mutex);
+	}
+
+	return count;
+}
+
+static ssize_t read_reg_rda_attr(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct msm_dba_device_info *device = to_dba_dev(dev);
+	ssize_t bytes;
+
+	if (!device) {
+		pr_err("%s: device is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&device->dev_mutex);
+
+	bytes = snprintf(buf, PAGE_SIZE, "0x%x\n", device->register_val);
+
+	mutex_unlock(&device->dev_mutex);
+
+	return bytes;
+}
+
+static ssize_t read_reg_wta_attr(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf,
+				 size_t count)
+{
+	struct msm_dba_device_info *device = to_dba_dev(dev);
+	long reg = 0;
+	int rc = 0;
+	u32 val = 0;
+
+	if (!device) {
+		pr_err("%s: device is NULL\n", __func__);
+		return count;
+	}
+
+	rc = kstrtol(buf, 0, &reg);
+	if (rc) {
+		pr_err("%s: kstrol error %d\n", __func__, rc);
+	} else {
+		mutex_lock(&device->dev_mutex);
+
+		if (device->dev_ops.read_reg) {
+			rc = device->dev_ops.read_reg(device,
+						      (u32)reg,
+						      &val);
+
+			if (rc) {
+				pr_err("%s: failed to write reg %d", __func__,
+				       rc);
+			} else {
+				device->register_val = val;
+			}
+		} else {
+			pr_err("%s: not supported\n", __func__);
+		}
+
+		mutex_unlock(&device->dev_mutex);
+	}
+
+	return count;
+}
+
+static ssize_t dump_info_wta_attr(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf,
+				  size_t count)
+{
+	struct msm_dba_device_info *device = to_dba_dev(dev);
+	int rc;
+
+	if (!device) {
+		pr_err("%s: device is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (device->dev_ops.dump_debug_info) {
+		rc = device->dev_ops.dump_debug_info(device, 0x00);
+		if (rc)
+			pr_err("%s: failed to dump debug data\n", __func__);
+	} else {
+		pr_err("%s: not supported\n", __func__);
+	}
+
+	return count;
+}
+
+static DEVICE_ATTR(device_name, 0444, device_name_rda_attr, NULL);
+static DEVICE_ATTR(client_list, 0444, client_list_rda_attr, NULL);
+static DEVICE_ATTR(power_status, 0444, power_status_rda_attr, NULL);
+static DEVICE_ATTR(video_status, 0444, video_status_rda_attr, NULL);
+static DEVICE_ATTR(audio_status, 0444, audio_status_rda_attr, NULL);
+static DEVICE_ATTR(write_reg, 0200, NULL, write_reg_wta_attr);
+static DEVICE_ATTR(read_reg, 0644, read_reg_rda_attr,
+		   read_reg_wta_attr);
+static DEVICE_ATTR(dump_info, 0200, NULL, dump_info_wta_attr);
+
+static struct attribute *msm_dba_sysfs_attrs[] = {
+	&dev_attr_device_name.attr,
+	&dev_attr_client_list.attr,
+	&dev_attr_power_status.attr,
+	&dev_attr_video_status.attr,
+	&dev_attr_audio_status.attr,
+	&dev_attr_write_reg.attr,
+	&dev_attr_read_reg.attr,
+	&dev_attr_dump_info.attr,
+	NULL,
+};
+
+static struct attribute_group msm_dba_sysfs_attr_grp = {
+	.attrs = msm_dba_sysfs_attrs,
+};
+
+int msm_dba_helper_sysfs_init(struct device *dev)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = sysfs_create_group(&dev->kobj, &msm_dba_sysfs_attr_grp);
+	if (rc)
+		pr_err("%s: sysfs group creation failed %d\n", __func__, rc);
+
+	return rc;
+}
+
+void msm_dba_helper_sysfs_remove(struct device *dev)
+{
+	if (!dev) {
+		pr_err("%s: Invalid params\n", __func__);
+		return;
+	}
+
+	sysfs_remove_group(&dev->kobj, &msm_dba_sysfs_attr_grp);
+}
diff --git a/drivers/video/fbdev/msm/msm_dba/msm_dba_helpers.c b/drivers/video/fbdev/msm/msm_dba/msm_dba_helpers.c
new file mode 100644
index 0000000..5074624
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_dba/msm_dba_helpers.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/i2c.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+
+#include <video/msm_dba.h>
+#include "msm_dba_internal.h"
+
+static void msm_dba_helper_hdcp_handler(struct work_struct *work)
+{
+	struct msm_dba_device_info *dev;
+	int rc = 0;
+
+	if (!work) {
+		pr_err("%s: Invalid params\n", __func__);
+		return;
+	}
+
+	dev = container_of(work, struct msm_dba_device_info, hdcp_work);
+
+	mutex_lock(&dev->dev_mutex);
+	if (dev->hdcp_status) {
+		pr_debug("%s[%s:%d] HDCP is authenticated\n", __func__,
+			 dev->chip_name, dev->instance_id);
+		mutex_unlock(&dev->dev_mutex);
+		return;
+	}
+
+	if (dev->dev_ops.hdcp_reset) {
+		rc = dev->dev_ops.hdcp_reset(dev);
+		if (rc)
+			pr_err("%s[%s:%d] HDCP reset failed\n", __func__,
+			       dev->chip_name, dev->instance_id);
+	}
+
+	if (dev->dev_ops.hdcp_retry) {
+		rc = dev->dev_ops.hdcp_retry(dev, MSM_DBA_ASYNC_FLAG);
+		if (rc)
+			pr_err("%s[%s:%d] HDCP retry failed\n", __func__,
+			       dev->chip_name, dev->instance_id);
+	}
+	mutex_unlock(&dev->dev_mutex);
+}
+
+static void msm_dba_helper_issue_cb(struct msm_dba_device_info *dev,
+				    struct msm_dba_client_info *client,
+				    enum msm_dba_callback_event event)
+{
+	struct msm_dba_client_info *c;
+	struct list_head *pos = NULL;
+	u32 user_mask = 0;
+
+	list_for_each(pos, &dev->client_list) {
+		c = list_entry(pos, struct msm_dba_client_info, list);
+		if (client && client == c)
+			continue;
+
+		user_mask = c->event_mask & event;
+		if (c->cb && user_mask)
+			c->cb(c->cb_data, user_mask);
+	}
+}
+
+static irqreturn_t msm_dba_helper_irq_handler(int irq, void *dev)
+{
+	struct msm_dba_device_info *device = dev;
+	u32 mask = 0;
+	int rc = 0;
+	bool ret;
+
+	mutex_lock(&device->dev_mutex);
+	if (device->dev_ops.handle_interrupts) {
+		rc = device->dev_ops.handle_interrupts(device, &mask);
+		if (rc)
+			pr_err("%s: interrupt handler failed\n", __func__);
+	}
+
+	pr_debug("%s(%s:%d): Eventmask  = 0x%x\n", __func__, device->chip_name,
+		 device->instance_id, mask);
+	if (mask)
+		msm_dba_helper_issue_cb(device, NULL, mask);
+
+	if ((mask &  MSM_DBA_CB_HDCP_LINK_UNAUTHENTICATED) &&
+	     device->hdcp_monitor_on) {
+		ret = queue_work(device->hdcp_wq, &device->hdcp_work);
+		if (!ret)
+			pr_err("%s: queue_work failed %d\n", __func__, rc);
+	}
+
+	if (device->dev_ops.unmask_interrupts)
+		rc = device->dev_ops.unmask_interrupts(device, mask);
+
+	mutex_unlock(&device->dev_mutex);
+	return IRQ_HANDLED;
+}
+
+int msm_dba_helper_i2c_write_byte(struct i2c_client *client,
+				  u8 addr,
+				  u8 reg,
+				  u8 val)
+{
+	int rc = 0;
+	struct i2c_msg msg;
+	u8 buf[2] = {reg, val};
+
+	if (!client) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: [%s:0x%02x] : W[0x%02x, 0x%02x]\n", __func__,
+		 client->name, addr, reg, val);
+	client->addr = addr;
+
+	msg.addr = addr;
+	msg.flags = 0;
+	msg.len = 2;
+	msg.buf = buf;
+
+	if (i2c_transfer(client->adapter, &msg, 1) < 1) {
+		pr_err("%s: i2c write failed\n", __func__);
+		rc = -EIO;
+	}
+
+	return rc;
+}
+
+int msm_dba_helper_i2c_write_buffer(struct i2c_client *client,
+				    u8 addr,
+				    u8 *buf,
+				    u32 size)
+{
+	int rc = 0;
+	struct i2c_msg msg;
+
+	if (!client) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+	pr_debug("%s: [%s:0x02%x] : W %d bytes\n", __func__,
+		 client->name, addr, size);
+
+	client->addr = addr;
+
+	msg.addr = addr;
+	msg.flags = 0;
+	msg.len = size;
+	msg.buf = buf;
+
+	if (i2c_transfer(client->adapter, &msg, 1) != 1) {
+		pr_err("%s: i2c write failed\n", __func__);
+		rc = -EIO;
+	}
+
+	return rc;
+}
+int msm_dba_helper_i2c_read(struct i2c_client *client,
+			    u8 addr,
+			    u8 reg,
+			    char *buf,
+			    u32 size)
+{
+	int rc = 0;
+	struct i2c_msg msg[2];
+
+	if (!client || !buf) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	client->addr = addr;
+
+	msg[0].addr = addr;
+	msg[0].flags = 0;
+	msg[0].len = 1;
+	msg[0].buf = &reg;
+
+	msg[1].addr = addr;
+	msg[1].flags = I2C_M_RD;
+	msg[1].len = size;
+	msg[1].buf = buf;
+
+	if (i2c_transfer(client->adapter, msg, 2) != 2) {
+		pr_err("%s: i2c read failed\n", __func__);
+		rc = -EIO;
+	}
+
+	pr_debug("%s: [%s:0x02%x] : R[0x%02x, 0x%02x]\n", __func__,
+		 client->name, addr, reg, *buf);
+	return rc;
+}
+
+int msm_dba_helper_power_on(void *client, bool on, u32 flags)
+{
+	int rc = 0;
+	struct msm_dba_client_info *c = client;
+	struct msm_dba_device_info *device;
+	struct msm_dba_client_info *node;
+	struct list_head *pos = NULL;
+	bool power_on = false;
+
+	if (!c) {
+		pr_err("%s: Invalid Params\n", __func__);
+		return -EINVAL;
+	}
+
+	device = c->dev;
+	mutex_lock(&device->dev_mutex);
+
+	/*
+	 * Power on the device if atleast one client powers on the device. But
+	 * power off will be done only after all the clients have called power
+	 * off
+	 */
+	if (on == device->power_status) {
+		c->power_on = on;
+	} else if (on) {
+		rc = device->dev_ops.dev_power_on(device, on);
+		if (rc)
+			pr_err("%s:%s: power on failed\n", device->chip_name,
+			       __func__);
+		else
+			c->power_on = on;
+	} else {
+		c->power_on = false;
+
+		list_for_each(pos, &device->client_list) {
+			node = list_entry(pos, struct msm_dba_client_info,
+					  list);
+			if (c->power_on) {
+				power_on = true;
+				break;
+			}
+		}
+
+		if (!power_on) {
+			rc = device->dev_ops.dev_power_on(device, false);
+			if (rc) {
+				pr_err("%s:%s: power off failed\n",
+				       device->chip_name, __func__);
+				c->power_on = true;
+			}
+		}
+	}
+
+	mutex_unlock(&device->dev_mutex);
+	return rc;
+}
+
+int msm_dba_helper_video_on(void *client, bool on,
+			    struct msm_dba_video_cfg *cfg, u32 flags)
+{
+	int rc = 0;
+	struct msm_dba_client_info *c = client;
+	struct msm_dba_device_info *device;
+	struct msm_dba_client_info *node;
+	struct list_head *pos = NULL;
+	bool video_on = false;
+
+	if (!c) {
+		pr_err("%s: Invalid Params\n", __func__);
+		return -EINVAL;
+	}
+
+	device = c->dev;
+	mutex_lock(&device->dev_mutex);
+
+	/*
+	 * Video will be turned on if at least one client turns on video. But
+	 * video off will be done only after all the clients have called video
+	 * off
+	 */
+	if (on == device->video_status) {
+		c->video_on = on;
+	} else if (on) {
+		rc = device->dev_ops.dev_video_on(device, cfg, on);
+		if (rc)
+			pr_err("%s:%s: video on failed\n", device->chip_name,
+			       __func__);
+		else
+			c->video_on = on;
+	} else {
+		c->video_on = false;
+
+		list_for_each(pos, &device->client_list) {
+			node = list_entry(pos, struct msm_dba_client_info,
+					  list);
+			if (c->video_on) {
+				video_on = true;
+				break;
+			}
+		}
+
+		if (!video_on) {
+			rc = device->dev_ops.dev_video_on(device, cfg, false);
+			if (rc) {
+				pr_err("%s:%s: video off failed\n",
+				       device->chip_name, __func__);
+				c->video_on = true;
+			}
+		}
+	}
+
+	mutex_unlock(&device->dev_mutex);
+	return rc;
+}
+
+int msm_dba_helper_interrupts_enable(void *client, bool on,
+				     u32 event_mask, u32 flags)
+{
+	struct msm_dba_client_info *c = client;
+	struct msm_dba_device_info *device;
+
+	if (!c) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	device = c->dev;
+	mutex_lock(&device->dev_mutex);
+
+	if (on)
+		c->event_mask = event_mask;
+	else
+		c->event_mask = 0;
+
+	mutex_unlock(&device->dev_mutex);
+	return 0;
+}
+
+int msm_dba_helper_register_irq(struct msm_dba_device_info *dev,
+				u32 irq, u32 irq_flags)
+{
+	int rc;
+
+	if (!dev) {
+		pr_err("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&dev->dev_mutex);
+
+	rc = request_threaded_irq(irq, NULL, msm_dba_helper_irq_handler,
+				  irq_flags, dev->chip_name, dev);
+
+	if (rc)
+		pr_err("%s:%s: Failed to register irq\n", dev->chip_name,
+		       __func__);
+
+	mutex_unlock(&dev->dev_mutex);
+	return rc;
+}
+
+int msm_dba_helper_get_caps(void *client, struct msm_dba_capabilities *caps)
+{
+	struct msm_dba_client_info *c = client;
+	struct msm_dba_device_info *device;
+
+	if (!c || !caps) {
+		pr_err("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	device = c->dev;
+	mutex_lock(&device->dev_mutex);
+
+	memcpy(caps, &device->caps, sizeof(*caps));
+
+	mutex_unlock(&device->dev_mutex);
+	return 0;
+}
+
+int msm_dba_register_hdcp_monitor(struct msm_dba_device_info *dev, bool enable)
+{
+	int rc = 0;
+
+	if (!dev) {
+		pr_err("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (enable) {
+		dev->hdcp_wq = alloc_workqueue("hdcp_monitor(%s:%d)", 0, 0,
+					       dev->chip_name,
+					       dev->instance_id);
+		if (!dev->hdcp_wq) {
+			pr_err("%s: failed to allocate wq\n", __func__);
+			rc = -ENOMEM;
+			goto fail;
+		}
+
+		INIT_WORK(&dev->hdcp_work, msm_dba_helper_hdcp_handler);
+		dev->hdcp_monitor_on = true;
+	} else if (!enable && dev->hdcp_wq) {
+		destroy_workqueue(dev->hdcp_wq);
+		dev->hdcp_wq = NULL;
+		dev->hdcp_monitor_on = false;
+	}
+
+fail:
+	return rc;
+}
+
+int msm_dba_helper_force_reset(void *client, u32 flags)
+{
+	struct msm_dba_client_info *c = client;
+	struct msm_dba_device_info *device;
+	int rc = 0;
+
+	if (!c) {
+		pr_err("%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	device = c->dev;
+	mutex_lock(&device->dev_mutex);
+
+	msm_dba_helper_issue_cb(device, c, MSM_DBA_CB_PRE_RESET);
+
+	if (device->dev_ops.force_reset)
+		rc = device->dev_ops.force_reset(device, flags);
+
+	if (rc)
+		pr_err("%s: Force reset failed\n", __func__);
+
+	msm_dba_helper_issue_cb(device, c, MSM_DBA_CB_POST_RESET);
+
+	mutex_unlock(&device->dev_mutex);
+	return rc;
+}
diff --git a/drivers/video/fbdev/msm/msm_dba/msm_dba_init.c b/drivers/video/fbdev/msm/msm_dba/msm_dba_init.c
new file mode 100644
index 0000000..0c40faf
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_dba/msm_dba_init.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2015, 2018,The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include <video/msm_dba.h>
+#include "msm_dba_internal.h"
+
+struct msm_dba_device_list {
+	struct msm_dba_device_info *dev;
+	struct list_head list;
+};
+
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(init_mutex);
+
+int msm_dba_add_probed_device(struct msm_dba_device_info *dev)
+{
+	struct msm_dba_device_list *node;
+
+	if (!dev) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&init_mutex);
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node) {
+		mutex_unlock(&init_mutex);
+		return -ENOMEM;
+	}
+
+	memset(node, 0x0, sizeof(*node));
+	node->dev = dev;
+	list_add(&node->list, &device_list);
+
+	pr_debug("%s: Added new device (%s, %d)\n", __func__, dev->chip_name,
+						dev->instance_id);
+	mutex_unlock(&init_mutex);
+
+	return 0;
+}
+
+int msm_dba_get_probed_device(struct msm_dba_reg_info *reg,
+			      struct msm_dba_device_info **dev)
+{
+	int rc = 0;
+	struct msm_dba_device_list *node;
+	struct list_head *position = NULL;
+
+	if (!reg || !dev) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&init_mutex);
+
+	*dev = NULL;
+	list_for_each(position, &device_list) {
+		node = list_entry(position, struct msm_dba_device_list, list);
+		if (!strcmp(reg->chip_name, node->dev->chip_name) &&
+		    reg->instance_id == node->dev->instance_id) {
+			pr_debug("%s: Found device (%s, %d)\n", __func__,
+							reg->chip_name,
+							reg->instance_id);
+			*dev = node->dev;
+			break;
+		}
+	}
+
+	if (!*dev) {
+		pr_err("%s: Device not found (%s, %d)\n", __func__,
+							reg->chip_name,
+							reg->instance_id);
+		rc = -ENODEV;
+	}
+
+	mutex_unlock(&init_mutex);
+
+	return rc;
+}
+
+int msm_dba_remove_probed_device(struct msm_dba_device_info *dev)
+{
+	struct msm_dba_device_list *node;
+	struct list_head *position = NULL;
+	struct list_head *temp = NULL;
+
+	if (!dev) {
+		pr_err("%s: Invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&init_mutex);
+
+	list_for_each_safe(position, temp, &device_list) {
+		node = list_entry(position, struct msm_dba_device_list, list);
+		if (node->dev == dev) {
+			list_del(&node->list);
+			pr_debug("%s: Removed device (%s, %d)\n", __func__,
+							    dev->chip_name,
+							    dev->instance_id);
+			kfree(node);
+			break;
+		}
+	}
+
+	mutex_unlock(&init_mutex);
+
+	return 0;
+}
diff --git a/drivers/video/fbdev/msm/msm_dba/msm_dba_internal.h b/drivers/video/fbdev/msm/msm_dba/msm_dba_internal.h
new file mode 100644
index 0000000..78c6d2a
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_dba/msm_dba_internal.h
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_DBA_INTERNAL_H
+#define _MSM_DBA_INTERNAL_H
+
+#include <video/msm_dba.h>
+
+struct msm_dba_client_info;
+struct msm_dba_device_info;
+
+/**
+ * struct msm_dba_device_ops - Function pointers to device specific operations
+ * @dev_power_on: Power on operation called by msm_dba_helper_power_on. Mutex
+ *		   protection is handled by the caller.
+ * @dev_video_on: Video on operation called by msm_dba_helper_video_on. Mutex
+ *		   protection is handled by the caller.
+ * @handle_interrupts: Function pointer called when an interrupt is fired. If
+ *		        the bridge driver uses msm_dba_helper_register_irq
+ *		        for handling interrupts, irq handler will call
+ *		        handle_interrupts to figure out the event mask.
+ * @unmask_interrupts: Function pointer called by irq handler for unmasking
+ *		       interrupts.
+ * @hdcp_reset: Function pointer to reset the HDCP block. This needs to be valid
+ *		if HDCP monitor is used.
+ * @hdcp_retry: Function pointer to retry HDCP authentication. This needs to be
+ *		valid if HDCP monitor is used.
+ * @write_reg: Function pointer to write to device specific register.
+ * @read_reg: Function pointer to read device specific register.
+ * @force_reset: Function pointer to force reset the device.
+ * @dump_debug_info: Function pointer to trigger a dump to dmesg.
+ *
+ * The device operation function pointers are used if bridge driver uses helper
+ * functions in place of some client operations. If used, the helper functions
+ * will call the device function pointers to perform device specific
+ * programming.
+ */
+struct msm_dba_device_ops {
+	int (*dev_power_on)(struct msm_dba_device_info *dev, bool on);
+	int (*dev_video_on)(struct msm_dba_device_info *dev,
+			    struct msm_dba_video_cfg *cfg, bool on);
+	int (*handle_interrupts)(struct msm_dba_device_info *dev, u32 *mask);
+	int (*unmask_interrupts)(struct msm_dba_device_info *dev, u32 mask);
+	int (*hdcp_reset)(struct msm_dba_device_info *dev);
+	int (*hdcp_retry)(struct msm_dba_device_info *dev, u32 flags);
+	int (*write_reg)(struct msm_dba_device_info *dev, u32 reg, u32 val);
+	int (*read_reg)(struct msm_dba_device_info *dev, u32 reg, u32 *val);
+	int (*force_reset)(struct msm_dba_device_info *dev, u32 flags);
+	int (*dump_debug_info)(struct msm_dba_device_info *dev, u32 flags);
+};
+
+/**
+ * struct msm_dba_device_info - Device specific information
+ * @chip_name: chip name
+ * @instance_id: Instance id
+ * @caps: Capabilities of the bridge chip
+ * @dev_ops: function pointers to device specific operations
+ * @client_ops: function pointers to client operations
+ * @dev_mutex: mutex for protecting device access
+ * @hdcp_wq: HDCP workqueue for handling failures.
+ * @client_list: list head for client list
+ * @reg_fxn: Function pointer called when a client registers with dba driver
+ * @dereg_fxn: Function pointer called when a client deregisters.
+ * @power_status: current power status of device
+ * @video_status: current video status of device
+ * @audio_status: current audio status of device
+ * @hdcp_on: hdcp enable status.
+ * @enc-on: encryption enable status.
+ * @hdcp_status: hdcp link status.
+ * @hdcp_monitor_on: hdcp monitor status
+ * @register_val: debug field used to support read register.
+ *
+ * Structure containing device specific information. This structure is allocated
+ * by the bridge driver. This structure should be unique to each device.
+ *
+ */
+struct msm_dba_device_info {
+	char chip_name[MSM_DBA_CHIP_NAME_MAX_LEN];
+	u32 instance_id;
+	struct msm_dba_capabilities caps;
+	struct msm_dba_device_ops dev_ops;
+	struct msm_dba_ops client_ops;
+	struct mutex dev_mutex;
+	struct workqueue_struct *hdcp_wq;
+	struct work_struct hdcp_work;
+	struct list_head client_list;
+	int (*reg_fxn)(struct msm_dba_client_info *client);
+	int (*dereg_fxn)(struct msm_dba_client_info *client);
+
+	bool power_status;
+	bool video_status;
+	bool audio_status;
+	bool hdcp_on;
+	bool enc_on;
+	bool hdcp_status;
+	bool hdcp_monitor_on;
+
+	/* Debug info */
+	u32 register_val;
+};
+
+/**
+ * struct msm_dba_client_info - Client specific information
+ * @dev: pointer to device information
+ * @client_name: client name
+ * @power_on: client power on status
+ * @video_on: client video on status
+ * @audio_on: client audio on status
+ * @event_mask: client event mask for callbacks.
+ * @cb: callback function for the client
+ * @cb_data: callback data pointer.
+ * @list: list pointer
+ *
+ * This structure is used to uniquely identify a client for a bridge chip. The
+ * pointer to this structure is returned as a handle from
+ * msm_dba_register_client.
+ */
+struct msm_dba_client_info {
+	struct msm_dba_device_info *dev;
+	char client_name[MSM_DBA_CLIENT_NAME_LEN];
+	bool power_on;
+	bool video_on;
+	bool audio_on;
+	u32 event_mask;
+	msm_dba_cb cb;
+	void *cb_data;
+	struct list_head list;
+};
+
+/**
+ * msm_dba_add_probed_device() - Add a new device to the probed devices list.
+ * @info: Pointer to structure containing the device information. This should be
+ *	  allocated by the specific bridge driver and kept until
+ *	  msm_dba_remove_probed_device() is called.
+ *
+ * Once a bridge chip is initialized and probed, it should add its device to the
+ * existing list of all probed display bridge chips. This list is maintained by
+ * the MSM DBA driver and is checked whenever there is a client register
+ * request.
+ */
+int msm_dba_add_probed_device(struct msm_dba_device_info *info);
+
+/**
+ * msm_dba_remove_probed_device() - Remove a device from the probed devices list
+ * @info: Pointer to structure containing the device info. This should be the
+ *	  same pointer used for msm_dba_add_probed_device().
+ *
+ * Bridge chip driver should call this to remove device from probed list.
+ */
+int msm_dba_remove_probed_device(struct msm_dba_device_info *info);
+
+/**
+ * msm_dba_get_probed_device() - Check if a device is present in the device list
+ * @reg: Pointer to structure containing the chip info received from the client
+ *	 driver
+ * @info: Pointer to the device info pointer that will be returned if the device
+ *	  has been found in the device list
+ *
+ * When clients of the MSM DBA driver call msm_dba_register_client(), the MSM
+ * DBA driver will use this function to check if the specific device requested
+ * by the client has been probed. If probed, function will return a pointer to
+ * the device information structure.
+ */
+int msm_dba_get_probed_device(struct msm_dba_reg_info *reg,
+			      struct msm_dba_device_info **info);
+
+/**
+ * msm_dba_helper_i2c_read() - perform an i2c read transaction
+ * @client: i2c client pointer
+ * @addr: i2c slave address
+ * @reg: register where the data should be read from
+ * @buf: buffer where the read data is stored.
+ * @size: bytes to read from slave. buffer should be atleast size bytes.
+ *
+ * Helper function to perform a read from an i2c slave. Internally this calls
+ * i2c_transfer().
+ */
+int msm_dba_helper_i2c_read(struct i2c_client *client,
+			    u8 addr,
+			    u8 reg,
+			    char *buf,
+			    u32 size);
+
+/**
+ * msm_dba_helper_i2c_write_buffer() - write buffer to i2c slave.
+ * @client: i2c client pointer
+ * @addr: i2c slave address
+ * @buf: buffer where the data will be read from.
+ * @size: bytes to write.
+ *
+ * Helper function to perform a write to an i2c slave. Internally this calls
+ * i2c_transfer().
+ */
+int msm_dba_helper_i2c_write_buffer(struct i2c_client *client,
+				    u8 addr,
+				    u8 *buf,
+				    u32 size);
+
+/**
+ * msm_dba_helper_i2c_write_byte() - write to a register on an i2c slave.
+ * @client: i2c client pointer
+ * @addr: i2c slave address
+ * @reg: slave register to write to
+ * @val: data to write.
+ *
+ * Helper function to perform a write to an i2c slave. Internally this calls
+ * i2c_transfer().
+ */
+int msm_dba_helper_i2c_write_byte(struct i2c_client *client,
+				  u8 addr,
+				  u8 reg,
+				  u8 val);
+
+/**
+ * msm_dba_helper_power_on() - power on bridge chip
+ * @client: client handle
+ * @on: on/off
+ * @flags: flags
+ *
+ * This helper function can be used as power_on() function defined in struct
+ * msm_dba_ops. Internally, this function does some bookkeeping to figure out
+ * when to actually power on/off the device. If used, bridge driver should
+ * provide a dev_power_on to do the device specific power change.
+ */
+int msm_dba_helper_power_on(void *client, bool on, u32 flags);
+
+/**
+ * msm_dba_helper_video_on() - video on bridge chip
+ * @client: client handle
+ * @on: on/off
+ * @flags: flags
+ *
+ * This helper function can be used as video_on() function defined in struct
+ * msm_dba_ops. Internally, this function does some bookkeeping to figure out
+ * when to actually video on/off the device. If used, bridge driver should
+ * provide a dev_video_on to do the device specific video change.
+ */
+int msm_dba_helper_video_on(void *client, bool on,
+			    struct msm_dba_video_cfg *cfg, u32 flags);
+
+/**
+ * msm_dba_helper_interrupts_enable() - manage interrupt callbacks
+ * @client: client handle
+ * @on: on/off
+ * @events_mask: events on which callbacks are required.
+ * @flags: flags
+ *
+ * This helper function provides the functionality needed for interrupts_enable
+ * function pointer in struct msm_dba_ops.
+ */
+int msm_dba_helper_interrupts_enable(void *client, bool on,
+				     u32 events_mask, u32 flags);
+
+/**
+ * msm_dba_helper_get_caps() - return device capabilities
+ * @client: client handle
+ * @flags: flags
+ *
+ * Helper function to replace get_caps function pointer in struct msm_dba_ops
+ * structure.
+ */
+int msm_dba_helper_get_caps(void *client, struct msm_dba_capabilities *caps);
+
+/**
+ * msm_dba_helper_register_irq() - register irq and handle interrupts.
+ * @dev: pointer to device structure
+ * @irq: irq number
+ * @irq_flags: irq_flags.
+ *
+ * Helper function register an irq and handling interrupts. This will attach a
+ * threaded interrupt handler to the irq provided as input. When the irq
+ * handler is triggered, handler will call handle_interrupts in the device
+ * specific functions pointers so that bridge driver can parse the interrupt
+ * status registers and return the event mask. IRQ handler will use this event
+ * mask to provide callbacks to the clients. Once the callbacks are done,
+ * handler will call unmask_interrupts() before returning,
+ */
+int msm_dba_helper_register_irq(struct msm_dba_device_info *dev,
+				u32 irq, u32 irq_flags);
+
+/**
+ * msm_dba_register_hdcp_monitor() - kicks off monitoring for hdcp failures
+ * @dev: pointer to device structure.
+ * @enable: enable/disable
+ *
+ * Helper function to enable HDCP monitoring. This should be called only if irq
+ * is handled through msm dba helper functions.
+ */
+int msm_dba_register_hdcp_monitor(struct msm_dba_device_info *dev, bool enable);
+
+/**
+ * msm_dba_helper_sysfs_init() - create sysfs attributes for debugging
+ * @dev: pointer to struct device structure.
+ *
+ */
+int msm_dba_helper_sysfs_init(struct device *dev);
+
+/**
+ * msm_dba_helper_sysfs_remove() - remove sysfs attributes
+ * @dev: pointer to struct device structure.
+ *
+ */
+void msm_dba_helper_sysfs_remove(struct device *dev);
+
+/**
+ * msm_dba_helper_force_reset() - force reset bridge chip
+ * @client: client handle
+ * @flags: flags
+ *
+ * Helper function to replace force_reset function pointer in struct msm_dba_ops
+ * structure. Driver should set dev_ops.force_reset to a valid function.
+ */
+int msm_dba_helper_force_reset(void *client, u32 flags);
+#endif /* _MSM_DBA_INTERNAL_H */
diff --git a/drivers/video/fbdev/msm/msm_mdss_io_8974.c b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
new file mode 100644
index 0000000..05a7fc0
--- /dev/null
+++ b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
@@ -0,0 +1,2714 @@
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/iopoll.h>
+#include <linux/kthread.h>
+
+#include "mdss_dsi.h"
+#include "mdss_edp.h"
+#include "mdss_dsi_phy.h"
+
+#define MDSS_DSI_DSIPHY_REGULATOR_CTRL_0	0x00
+#define MDSS_DSI_DSIPHY_REGULATOR_CTRL_1	0x04
+#define MDSS_DSI_DSIPHY_REGULATOR_CTRL_2	0x08
+#define MDSS_DSI_DSIPHY_REGULATOR_CTRL_3	0x0c
+#define MDSS_DSI_DSIPHY_REGULATOR_CTRL_4	0x10
+#define MDSS_DSI_DSIPHY_REGULATOR_CAL_PWR_CFG	0x18
+#define MDSS_DSI_DSIPHY_LDO_CNTRL		0x1dc
+#define MDSS_DSI_DSIPHY_REGULATOR_TEST		0x294
+#define MDSS_DSI_DSIPHY_STRENGTH_CTRL_0		0x184
+#define MDSS_DSI_DSIPHY_STRENGTH_CTRL_1		0x188
+#define MDSS_DSI_DSIPHY_STRENGTH_CTRL_2		0x18c
+#define MDSS_DSI_DSIPHY_TIMING_CTRL_0		0x140
+#define MDSS_DSI_DSIPHY_GLBL_TEST_CTRL		0x1d4
+#define MDSS_DSI_DSIPHY_CTRL_0			0x170
+#define MDSS_DSI_DSIPHY_CTRL_1			0x174
+
+#define SW_RESET BIT(2)
+#define SW_RESET_PLL BIT(0)
+#define PWRDN_B BIT(7)
+
+/* 8996 */
+#define DATALANE_OFFSET_FROM_BASE_8996	0x100
+#define DSIPHY_CMN_PLL_CNTRL		0x0048
+#define DATALANE_SIZE_8996			0x80
+
+#define DSIPHY_CMN_GLBL_TEST_CTRL		0x0018
+#define DSIPHY_CMN_CTRL_0			0x001c
+#define DSIPHY_CMN_CTRL_1			0x0020
+#define DSIPHY_CMN_LDO_CNTRL			0x004c
+#define DSIPHY_PLL_CLKBUFLR_EN			0x041c
+#define DSIPHY_PLL_PLL_BANDGAP			0x0508
+
+#define DSIPHY_LANE_STRENGTH_CTRL_1		0x003c
+#define DSIPHY_LANE_VREG_CNTRL			0x0064
+
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL0		0x214
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL1		0x218
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL2		0x21C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL3		0x220
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL4		0x224
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL5		0x228
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL6		0x22C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL7		0x230
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL8		0x234
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL9		0x238
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL10		0x23C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL11		0x240
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL12		0x244
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL13		0x248
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL14		0x24C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15		0x250
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL16		0x254
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL17		0x258
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL18		0x25C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19		0x260
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19		0x260
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20		0x264
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21		0x268
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22		0x26C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23		0x270
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24		0x274
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25		0x278
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26		0x27C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27		0x280
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28		0x284
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29		0x288
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL30		0x28C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL31		0x290
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR	0x294
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2	0x298
+
+#define DSIPHY_DLN0_CFG1		0x0104
+#define DSIPHY_DLN0_TIMING_CTRL_4	0x0118
+#define DSIPHY_DLN0_TIMING_CTRL_5	0x011C
+#define DSIPHY_DLN0_TIMING_CTRL_6	0x0120
+#define DSIPHY_DLN0_TIMING_CTRL_7	0x0124
+#define DSIPHY_DLN0_TIMING_CTRL_8	0x0128
+
+#define DSIPHY_DLN1_CFG1		0x0184
+#define DSIPHY_DLN1_TIMING_CTRL_4	0x0198
+#define DSIPHY_DLN1_TIMING_CTRL_5	0x019C
+#define DSIPHY_DLN1_TIMING_CTRL_6	0x01A0
+#define DSIPHY_DLN1_TIMING_CTRL_7	0x01A4
+#define DSIPHY_DLN1_TIMING_CTRL_8	0x01A8
+
+#define DSIPHY_DLN2_CFG1		0x0204
+#define DSIPHY_DLN2_TIMING_CTRL_4	0x0218
+#define DSIPHY_DLN2_TIMING_CTRL_5	0x021C
+#define DSIPHY_DLN2_TIMING_CTRL_6	0x0220
+#define DSIPHY_DLN2_TIMING_CTRL_7	0x0224
+#define DSIPHY_DLN2_TIMING_CTRL_8	0x0228
+
+#define DSIPHY_DLN3_CFG1		0x0284
+#define DSIPHY_DLN3_TIMING_CTRL_4	0x0298
+#define DSIPHY_DLN3_TIMING_CTRL_5	0x029C
+#define DSIPHY_DLN3_TIMING_CTRL_6	0x02A0
+#define DSIPHY_DLN3_TIMING_CTRL_7	0x02A4
+#define DSIPHY_DLN3_TIMING_CTRL_8	0x02A8
+
+#define DSIPHY_CKLN_CFG1		0x0304
+#define DSIPHY_CKLN_TIMING_CTRL_4	0x0318
+#define DSIPHY_CKLN_TIMING_CTRL_5	0x031C
+#define DSIPHY_CKLN_TIMING_CTRL_6	0x0320
+#define DSIPHY_CKLN_TIMING_CTRL_7	0x0324
+#define DSIPHY_CKLN_TIMING_CTRL_8	0x0328
+
+#define DSIPHY_PLL_RESETSM_CNTRL5	0x043c
+
+#define PLL_CALC_DATA(addr0, addr1, data0, data1)      \
+	(((data1) << 24) | ((((addr1)/4) & 0xFF) << 16) | \
+	 ((data0) << 8) | (((addr0)/4) & 0xFF))
+
+#define MDSS_DYN_REF_REG_W(base, offset, addr0, addr1, data0, data1)   \
+	writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
+			(base) + (offset))
+
+void mdss_dsi_dfps_config_8996(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct mdss_panel_data *pdata;
+	struct mdss_panel_info *pinfo;
+	struct mdss_dsi_phy_ctrl *pd;
+	int glbl_tst_cntrl =
+		MIPI_INP(ctrl->phy_io.base + DSIPHY_CMN_GLBL_TEST_CTRL);
+
+	pdata = &ctrl->panel_data;
+	if (!pdata) {
+		pr_err("%s: Invalid panel data\n", __func__);
+		return;
+	}
+	pinfo = &pdata->panel_info;
+	pd = &(((ctrl->panel_data).panel_info.mipi).dsi_phy_db);
+
+	if (mdss_dsi_is_ctrl_clk_slave(ctrl)) {
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL0,
+				DSIPHY_DLN0_CFG1, DSIPHY_DLN1_CFG1,
+				0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL1,
+				DSIPHY_DLN2_CFG1, DSIPHY_DLN3_CFG1,
+				0x0, 0x0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL2,
+				DSIPHY_CKLN_CFG1, DSIPHY_DLN0_TIMING_CTRL_4,
+				0x0, pd->timing_8996[0]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL3,
+				DSIPHY_DLN1_TIMING_CTRL_4,
+				DSIPHY_DLN2_TIMING_CTRL_4,
+				pd->timing_8996[8],
+				pd->timing_8996[16]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL4,
+				DSIPHY_DLN3_TIMING_CTRL_4,
+				DSIPHY_CKLN_TIMING_CTRL_4,
+				pd->timing_8996[24],
+				pd->timing_8996[32]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL5,
+				DSIPHY_DLN0_TIMING_CTRL_5,
+				DSIPHY_DLN1_TIMING_CTRL_5,
+				pd->timing_8996[1],
+				pd->timing_8996[9]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL6,
+				DSIPHY_DLN2_TIMING_CTRL_5,
+				DSIPHY_DLN3_TIMING_CTRL_5,
+				pd->timing_8996[17],
+				pd->timing_8996[25]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL7,
+				DSIPHY_CKLN_TIMING_CTRL_5,
+				DSIPHY_DLN0_TIMING_CTRL_6,
+				pd->timing_8996[33],
+				pd->timing_8996[2]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL8,
+				DSIPHY_DLN1_TIMING_CTRL_6,
+				DSIPHY_DLN2_TIMING_CTRL_6,
+				pd->timing_8996[10],
+				pd->timing_8996[18]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL9,
+				DSIPHY_DLN3_TIMING_CTRL_6,
+				DSIPHY_CKLN_TIMING_CTRL_6,
+				pd->timing_8996[26],
+				pd->timing_8996[34]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL10,
+				DSIPHY_DLN0_TIMING_CTRL_7,
+				DSIPHY_DLN1_TIMING_CTRL_7,
+				pd->timing_8996[3],
+				pd->timing_8996[11]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL11,
+				DSIPHY_DLN2_TIMING_CTRL_7,
+				DSIPHY_DLN3_TIMING_CTRL_7,
+				pd->timing_8996[19],
+				pd->timing_8996[27]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL12,
+				DSIPHY_CKLN_TIMING_CTRL_7,
+				DSIPHY_DLN0_TIMING_CTRL_8,
+				pd->timing_8996[35],
+				pd->timing_8996[4]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL13,
+				DSIPHY_DLN1_TIMING_CTRL_8,
+				DSIPHY_DLN2_TIMING_CTRL_8,
+				pd->timing_8996[12],
+				pd->timing_8996[20]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL14,
+				DSIPHY_DLN3_TIMING_CTRL_8,
+				DSIPHY_CKLN_TIMING_CTRL_8,
+				pd->timing_8996[28],
+				pd->timing_8996[36]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL15,
+				0x0110, 0x0110,	0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL16,
+				0x0110, 0x0110,	0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL17,
+				0x0110, 0x0110, 0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL18,
+				0x0110, 0x0110,	0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL19,
+				0x0110, 0x0110,	0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL20,
+				0x110, 0x110, 0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL21,
+				0x110, 0x110, 0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL22,
+				0x110, 0x110, 0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL23,
+				0x110, 0x110, 0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL24,
+				0x110, 0x110, 0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL25,
+				0x110, 0x110, 0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL26,
+				0x110, 0x110, 0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL27,
+				0x110, 0x110, 0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL28,
+				0x110, 0x110, 0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL29,
+				0x110, 0x110, 0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL30,
+				0x110, 0x110, 0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL31,
+				0x110, 0x110, 0, 0);
+		MIPI_OUTP(ctrl->ctrl_base +
+				DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR, 0x0);
+		MIPI_OUTP(ctrl->ctrl_base +
+				DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2, 0x0);
+	} else {
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL0,
+				DSIPHY_CMN_GLBL_TEST_CTRL,
+				DSIPHY_PLL_PLL_BANDGAP,
+				glbl_tst_cntrl | BIT(1), 0x1);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL1,
+				DSIPHY_PLL_RESETSM_CNTRL5,
+				DSIPHY_PLL_PLL_BANDGAP,
+				0x0D, 0x03);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL2,
+				DSIPHY_PLL_RESETSM_CNTRL5,
+				DSIPHY_CMN_PLL_CNTRL,
+				0x1D, 0x00);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL3,
+				DSIPHY_CMN_CTRL_1, DSIPHY_DLN0_CFG1,
+				0x20, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL4,
+				DSIPHY_DLN1_CFG1, DSIPHY_DLN2_CFG1,
+				0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL5,
+				DSIPHY_DLN3_CFG1, DSIPHY_CKLN_CFG1,
+				0, 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL6,
+				DSIPHY_DLN0_TIMING_CTRL_4,
+				DSIPHY_DLN1_TIMING_CTRL_4,
+				pd->timing_8996[0],
+				pd->timing_8996[8]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL7,
+				DSIPHY_DLN2_TIMING_CTRL_4,
+				DSIPHY_DLN3_TIMING_CTRL_4,
+				pd->timing_8996[16],
+				pd->timing_8996[24]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL8,
+				DSIPHY_CKLN_TIMING_CTRL_4,
+				DSIPHY_DLN0_TIMING_CTRL_5,
+				pd->timing_8996[32],
+				pd->timing_8996[1]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL9,
+				DSIPHY_DLN1_TIMING_CTRL_5,
+				DSIPHY_DLN2_TIMING_CTRL_5,
+				pd->timing_8996[9],
+				pd->timing_8996[17]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL10,
+				DSIPHY_DLN3_TIMING_CTRL_5,
+				DSIPHY_CKLN_TIMING_CTRL_5,
+				pd->timing_8996[25],
+				pd->timing_8996[33]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL11,
+				DSIPHY_DLN0_TIMING_CTRL_6,
+				DSIPHY_DLN1_TIMING_CTRL_6,
+				pd->timing_8996[2],
+				pd->timing_8996[10]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL12,
+				DSIPHY_DLN2_TIMING_CTRL_6,
+				DSIPHY_DLN3_TIMING_CTRL_6,
+				pd->timing_8996[18],
+				pd->timing_8996[26]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL13,
+				DSIPHY_CKLN_TIMING_CTRL_6,
+				DSIPHY_DLN0_TIMING_CTRL_7,
+				pd->timing_8996[34],
+				pd->timing_8996[3]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL14,
+				DSIPHY_DLN1_TIMING_CTRL_7,
+				DSIPHY_DLN2_TIMING_CTRL_7,
+				pd->timing_8996[11],
+				pd->timing_8996[19]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL15,
+				DSIPHY_DLN3_TIMING_CTRL_7,
+				DSIPHY_CKLN_TIMING_CTRL_7,
+				pd->timing_8996[27],
+				pd->timing_8996[35]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL16,
+				DSIPHY_DLN0_TIMING_CTRL_8,
+				DSIPHY_DLN1_TIMING_CTRL_8,
+				pd->timing_8996[4],
+				pd->timing_8996[12]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL17,
+				DSIPHY_DLN2_TIMING_CTRL_8,
+				DSIPHY_DLN3_TIMING_CTRL_8,
+				pd->timing_8996[20],
+				pd->timing_8996[28]);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL18,
+				DSIPHY_CKLN_TIMING_CTRL_8,
+				DSIPHY_CMN_CTRL_1,
+				pd->timing_8996[36], 0);
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL30,
+				DSIPHY_CMN_GLBL_TEST_CTRL,
+				DSIPHY_CMN_GLBL_TEST_CTRL,
+				((glbl_tst_cntrl) & (~BIT(2))),
+				((glbl_tst_cntrl) & (~BIT(2))));
+		MDSS_DYN_REF_REG_W(ctrl->ctrl_base,
+				DSI_DYNAMIC_REFRESH_PLL_CTRL31,
+				DSIPHY_CMN_GLBL_TEST_CTRL,
+				DSIPHY_CMN_GLBL_TEST_CTRL,
+				((glbl_tst_cntrl) & (~BIT(2))),
+				((glbl_tst_cntrl) & (~BIT(2))));
+	}
+
+	wmb(); /* make sure phy timings are updated*/
+}
+
+static void mdss_dsi_ctrl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	/* start phy sw reset */
+	MIPI_OUTP(ctrl->ctrl_base + 0x12c, 0x0001);
+	udelay(1000);
+	wmb();	/* make sure reset */
+	/* end phy sw reset */
+	MIPI_OUTP(ctrl->ctrl_base + 0x12c, 0x0000);
+	udelay(100);
+	wmb();	/* maek sure reset cleared */
+}
+
+int mdss_dsi_phy_pll_reset_status(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int rc;
+	u32 val;
+	u32 const sleep_us = 10, timeout_us = 100;
+
+	pr_debug("%s: polling for RESETSM_READY_STATUS.CORE_READY\n",
+		__func__);
+	rc = readl_poll_timeout(ctrl->phy_io.base + 0x4cc, val,
+		(val & 0x1), sleep_us, timeout_us);
+
+	return rc;
+}
+
+static void mdss_dsi_phy_sw_reset_sub(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct mdss_dsi_ctrl_pdata *sctrl = NULL;
+	struct dsi_shared_data *sdata;
+	struct mdss_dsi_ctrl_pdata *octrl;
+	u32 reg_val = 0;
+
+	if (ctrl == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	sdata = ctrl->shared_data;
+	octrl = mdss_dsi_get_other_ctrl(ctrl);
+
+	if (ctrl->shared_data->phy_rev == DSI_PHY_REV_20) {
+		if (mdss_dsi_is_ctrl_clk_master(ctrl))
+			sctrl = mdss_dsi_get_ctrl_clk_slave();
+		else
+			return;
+	}
+
+	/*
+	 * For dual dsi case if we do DSI PHY sw reset,
+	 * this will reset DSI PHY regulators also.
+	 * Since DSI PHY regulator is shared among both
+	 * the DSI controllers, we should not do DSI PHY
+	 * sw reset when the other DSI controller is still
+	 * active.
+	 */
+	mutex_lock(&sdata->phy_reg_lock);
+	if ((mdss_dsi_is_hw_config_dual(sdata) &&
+		(octrl && octrl->is_phyreg_enabled))) {
+		/* start phy lane and HW reset */
+		reg_val = MIPI_INP(ctrl->ctrl_base + 0x12c);
+		reg_val |= (BIT(16) | BIT(8));
+		MIPI_OUTP(ctrl->ctrl_base + 0x12c, reg_val);
+		/* wait for 1ms as per HW design */
+		usleep_range(1000, 2000);
+		/* ensure phy lane and HW reset starts */
+		wmb();
+		/* end phy lane and HW reset */
+		reg_val = MIPI_INP(ctrl->ctrl_base + 0x12c);
+		reg_val &= ~(BIT(16) | BIT(8));
+		MIPI_OUTP(ctrl->ctrl_base + 0x12c, reg_val);
+		/* wait for 100us as per HW design */
+		usleep_range(100, 200);
+		/* ensure phy lane and HW reset ends */
+		wmb();
+	} else {
+		/* start phy sw reset */
+		mdss_dsi_ctrl_phy_reset(ctrl);
+		if (sctrl)
+			mdss_dsi_ctrl_phy_reset(sctrl);
+
+	}
+	mutex_unlock(&sdata->phy_reg_lock);
+}
+
+void mdss_dsi_phy_sw_reset(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct mdss_dsi_ctrl_pdata *sctrl = NULL;
+	struct dsi_shared_data *sdata;
+
+	if (ctrl == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	sdata = ctrl->shared_data;
+
+	/*
+	 * When operating in split display mode, make sure that the PHY reset
+	 * is only done from the clock master. This will ensure that the PLL is
+	 * off when PHY reset is called.
+	 */
+	if (mdss_dsi_is_ctrl_clk_slave(ctrl))
+		return;
+
+	mdss_dsi_phy_sw_reset_sub(ctrl);
+
+	if (mdss_dsi_is_ctrl_clk_master(ctrl)) {
+		sctrl = mdss_dsi_get_ctrl_clk_slave();
+		if (sctrl)
+			mdss_dsi_phy_sw_reset_sub(sctrl);
+		else
+			pr_warn("%s: unable to get slave ctrl\n", __func__);
+	}
+
+	/* All other quirks go here */
+	if ((sdata->hw_rev == MDSS_DSI_HW_REV_103) &&
+		!mdss_dsi_is_hw_config_dual(sdata) &&
+		mdss_dsi_is_right_ctrl(ctrl)) {
+
+		/*
+		 * phy sw reset will wipe out the pll settings for PLL.
+		 * Need to explicitly turn off PLL1 if unused to avoid
+		 * current leakage issues.
+		 */
+		if ((mdss_dsi_is_hw_config_split(sdata) ||
+			mdss_dsi_is_pll_src_pll0(sdata)) &&
+			ctrl->vco_dummy_clk) {
+			pr_debug("Turn off unused PLL1 registers\n");
+			clk_set_rate(ctrl->vco_dummy_clk, 1);
+		}
+	}
+}
+
+static void mdss_dsi_phy_regulator_disable(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	if (!ctrl) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	if (ctrl->shared_data->phy_rev == DSI_PHY_REV_20)
+		return;
+
+	MIPI_OUTP(ctrl->phy_regulator_io.base + 0x018, 0x000);
+}
+
+static void mdss_dsi_phy_shutdown(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	if (!ctrl) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	if (ctrl->shared_data->phy_rev == DSI_PHY_REV_20) {
+		MIPI_OUTP(ctrl->phy_io.base + DSIPHY_PLL_CLKBUFLR_EN, 0);
+		MIPI_OUTP(ctrl->phy_io.base + DSIPHY_CMN_GLBL_TEST_CTRL, 0);
+		MIPI_OUTP(ctrl->phy_io.base + DSIPHY_CMN_CTRL_0, 0);
+	} else {
+		MIPI_OUTP(ctrl->phy_io.base + MDSS_DSI_DSIPHY_CTRL_0, 0x000);
+	}
+}
+
+/**
+ * mdss_dsi_lp_cd_rx() -- enable LP and CD at receiving
+ * @ctrl: pointer to DSI controller structure
+ *
+ * LP: low power
+ * CD: contention detection
+ */
+void mdss_dsi_lp_cd_rx(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct mdss_dsi_phy_ctrl *pd;
+
+	if (!ctrl) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	if (ctrl->shared_data->phy_rev == DSI_PHY_REV_20)
+		return;
+
+	pd = &(((ctrl->panel_data).panel_info.mipi).dsi_phy_db);
+
+	MIPI_OUTP((ctrl->phy_io.base) + 0x0188, pd->strength[1]);
+	/* Strength ctrl 1, LP Rx + CD Rxcontention detection */
+	wmb();
+}
+
+static void mdss_dsi_28nm_phy_regulator_enable(
+		struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	struct mdss_dsi_phy_ctrl *pd;
+
+	pd = &(((ctrl_pdata->panel_data).panel_info.mipi).dsi_phy_db);
+
+	if (pd->reg_ldo_mode) {
+		/* Regulator ctrl 0 */
+		MIPI_OUTP(ctrl_pdata->phy_regulator_io.base, 0x0);
+		/* Regulator ctrl - CAL_PWR_CFG */
+		MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+				+ 0x18, pd->regulator[6]);
+		/* Add H/w recommended delay */
+		udelay(1000);
+		/* Regulator ctrl - TEST */
+		MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+				+ 0x14, pd->regulator[5]);
+		/* Regulator ctrl 3 */
+		MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+				+ 0xc, pd->regulator[3]);
+		/* Regulator ctrl 2 */
+		MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+				+ 0x8, pd->regulator[2]);
+		/* Regulator ctrl 1 */
+		MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+				+ 0x4, pd->regulator[1]);
+		/* Regulator ctrl 4 */
+		MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+				+ 0x10, pd->regulator[4]);
+		/* LDO ctrl */
+		if ((ctrl_pdata->shared_data->hw_rev ==
+			MDSS_DSI_HW_REV_103_1)
+			|| (ctrl_pdata->shared_data->hw_rev ==
+			MDSS_DSI_HW_REV_104_2))
+			MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x1dc, 0x05);
+		else
+			MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x1dc, 0x0d);
+	} else {
+		/* Regulator ctrl 0 */
+		MIPI_OUTP(ctrl_pdata->phy_regulator_io.base,
+					0x0);
+		/* Regulator ctrl - CAL_PWR_CFG */
+		MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+				+ 0x18, pd->regulator[6]);
+		/* Add H/w recommended delay */
+		udelay(1000);
+		/* Regulator ctrl 1 */
+		MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+				+ 0x4, pd->regulator[1]);
+		/* Regulator ctrl 2 */
+		MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+				+ 0x8, pd->regulator[2]);
+		/* Regulator ctrl 3 */
+		MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+				+ 0xc, pd->regulator[3]);
+		/* Regulator ctrl 4 */
+		MIPI_OUTP((ctrl_pdata->phy_regulator_io.base)
+				+ 0x10, pd->regulator[4]);
+		/* LDO ctrl */
+		MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x1dc, 0x00);
+		/* Regulator ctrl 0 */
+		MIPI_OUTP(ctrl_pdata->phy_regulator_io.base,
+				pd->regulator[0]);
+	}
+}
+
+static void mdss_dsi_28nm_phy_config(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	struct mdss_dsi_phy_ctrl *pd;
+	int i, off, ln, offset;
+
+	if (!ctrl_pdata) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	pd = &(((ctrl_pdata->panel_data).panel_info.mipi).dsi_phy_db);
+
+	/* Strength ctrl 0 for 28nm PHY*/
+	if ((ctrl_pdata->shared_data->hw_rev <= MDSS_DSI_HW_REV_104_2) &&
+		(ctrl_pdata->shared_data->hw_rev != MDSS_DSI_HW_REV_103)) {
+		MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x0170, 0x5b);
+		MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x0184, pd->strength[0]);
+		/* make sure PHY strength ctrl is set */
+		wmb();
+	}
+
+	off = 0x0140;	/* phy timing ctrl 0 - 11 */
+	for (i = 0; i < 12; i++) {
+		MIPI_OUTP((ctrl_pdata->phy_io.base) + off, pd->timing[i]);
+		/* make sure phy timing register is programed */
+		wmb();
+		off += 4;
+	}
+
+	/* 4 lanes + clk lane configuration */
+	/* lane config n * (0 - 4) & DataPath setup */
+	for (ln = 0; ln < 5; ln++) {
+		off = (ln * 0x40);
+		for (i = 0; i < 9; i++) {
+			offset = i + (ln * 9);
+			MIPI_OUTP((ctrl_pdata->phy_io.base) + off,
+							pd->lanecfg[offset]);
+			/* make sure lane config register is programed */
+			wmb();
+			off += 4;
+		}
+	}
+
+	MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x0180, 0x0a);
+	/* MMSS_DSI_0_PHY_DSIPHY_CTRL_4 */
+	wmb();
+
+	/* DSI_0_PHY_DSIPHY_GLBL_TEST_CTRL */
+	if (!mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data)) {
+		MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x01d4, 0x01);
+	} else {
+		if (((ctrl_pdata->panel_data).panel_info.pdest == DISPLAY_1) ||
+		(ctrl_pdata->shared_data->hw_rev == MDSS_DSI_HW_REV_103_1))
+			MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x01d4, 0x01);
+		else
+			MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x01d4, 0x00);
+	}
+	/* ensure DSIPHY_GLBL_TEST_CTRL is set */
+	wmb();
+
+	/* MMSS_DSI_0_PHY_DSIPHY_CTRL_0 */
+	MIPI_OUTP((ctrl_pdata->phy_io.base) + 0x0170, 0x5f);
+	/* make sure PHY lanes are powered on */
+	wmb();
+
+	off = 0x01b4;	/* phy BIST ctrl 0 - 5 */
+	for (i = 0; i < 6; i++) {
+		MIPI_OUTP((ctrl_pdata->phy_io.base) + off, pd->bistctrl[i]);
+		wmb(); /* make sure PHY bit control is configured */
+		off += 4;
+	}
+
+}
+
+static void mdss_dsi_20nm_phy_regulator_enable(struct mdss_dsi_ctrl_pdata
+	*ctrl_pdata)
+{
+	struct mdss_dsi_phy_ctrl *pd;
+	void __iomem *phy_io_base;
+
+	pd = &(((ctrl_pdata->panel_data).panel_info.mipi).dsi_phy_db);
+	phy_io_base = ctrl_pdata->phy_regulator_io.base;
+
+	if (pd->regulator_len != 7) {
+		pr_err("%s: wrong regulator settings\n", __func__);
+		return;
+	}
+
+	if (pd->reg_ldo_mode) {
+		MIPI_OUTP(ctrl_pdata->phy_io.base + MDSS_DSI_DSIPHY_LDO_CNTRL,
+			0x1d);
+	} else {
+		MIPI_OUTP(phy_io_base + MDSS_DSI_DSIPHY_REGULATOR_CTRL_1,
+			pd->regulator[1]);
+		MIPI_OUTP(phy_io_base + MDSS_DSI_DSIPHY_REGULATOR_CTRL_2,
+			pd->regulator[2]);
+		MIPI_OUTP(phy_io_base + MDSS_DSI_DSIPHY_REGULATOR_CTRL_3,
+			pd->regulator[3]);
+		MIPI_OUTP(phy_io_base + MDSS_DSI_DSIPHY_REGULATOR_CTRL_4,
+			pd->regulator[4]);
+		MIPI_OUTP(phy_io_base + MDSS_DSI_DSIPHY_REGULATOR_CAL_PWR_CFG,
+			pd->regulator[6]);
+		MIPI_OUTP(ctrl_pdata->phy_io.base + MDSS_DSI_DSIPHY_LDO_CNTRL,
+			0x00);
+		MIPI_OUTP(phy_io_base + MDSS_DSI_DSIPHY_REGULATOR_CTRL_0,
+			pd->regulator[0]);
+	}
+}
+
+static void mdss_dsi_20nm_phy_config(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	struct mdss_dsi_phy_ctrl *pd;
+	int i, off, ln, offset;
+
+	pd = &(((ctrl_pdata->panel_data).panel_info.mipi).dsi_phy_db);
+
+	if (pd->strength_len != 2) {
+		pr_err("%s: wrong strength ctrl\n", __func__);
+		return;
+	}
+
+	MIPI_OUTP((ctrl_pdata->phy_io.base) + MDSS_DSI_DSIPHY_STRENGTH_CTRL_0,
+		pd->strength[0]);
+
+
+	if (!mdss_dsi_is_hw_config_dual(ctrl_pdata->shared_data)) {
+		if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) ||
+			mdss_dsi_is_left_ctrl(ctrl_pdata) ||
+			(mdss_dsi_is_right_ctrl(ctrl_pdata) &&
+			mdss_dsi_is_pll_src_pll0(ctrl_pdata->shared_data)))
+			MIPI_OUTP((ctrl_pdata->phy_io.base) +
+				MDSS_DSI_DSIPHY_GLBL_TEST_CTRL, 0x00);
+		else
+			MIPI_OUTP((ctrl_pdata->phy_io.base) +
+				MDSS_DSI_DSIPHY_GLBL_TEST_CTRL, 0x01);
+	} else {
+		if (mdss_dsi_is_left_ctrl(ctrl_pdata))
+			MIPI_OUTP((ctrl_pdata->phy_io.base) +
+				MDSS_DSI_DSIPHY_GLBL_TEST_CTRL, 0x00);
+		else
+			MIPI_OUTP((ctrl_pdata->phy_io.base) +
+				MDSS_DSI_DSIPHY_GLBL_TEST_CTRL, 0x01);
+	}
+
+	if (pd->lanecfg_len != 45) {
+		pr_err("%s: wrong lane cfg\n", __func__);
+		return;
+	}
+
+	/* 4 lanes + clk lane configuration */
+	/* lane config n * (0 - 4) & DataPath setup */
+	for (ln = 0; ln < 5; ln++) {
+		off = (ln * 0x40);
+		for (i = 0; i < 9; i++) {
+			offset = i + (ln * 9);
+			MIPI_OUTP((ctrl_pdata->phy_io.base) + off,
+				pd->lanecfg[offset]);
+			/* make sure lane config register is programed */
+			wmb();
+			off += 4;
+		}
+	}
+
+	off = 0;	/* phy timing ctrl 0 - 11 */
+	for (i = 0; i < 12; i++) {
+		MIPI_OUTP((ctrl_pdata->phy_io.base) +
+			MDSS_DSI_DSIPHY_TIMING_CTRL_0 + off, pd->timing[i]);
+		wmb(); /* make sure phy timing register is programed */
+		off += 4;
+	}
+
+	MIPI_OUTP((ctrl_pdata->phy_io.base) + MDSS_DSI_DSIPHY_CTRL_1, 0);
+	/* make sure everything is written before enable */
+	wmb();
+	MIPI_OUTP((ctrl_pdata->phy_io.base) + MDSS_DSI_DSIPHY_CTRL_0, 0x7f);
+}
+
+static void mdss_dsi_8996_pll_source_standalone(
+				struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	u32 data;
+
+	/*
+	 * pll right output enabled
+	 * bit clk select from left
+	 */
+	MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_PLL_CLKBUFLR_EN, 0x01);
+	data = MIPI_INP((ctrl->phy_io.base) + DSIPHY_CMN_GLBL_TEST_CTRL);
+	data &= ~BIT(2);
+	MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_GLBL_TEST_CTRL, data);
+}
+
+static void mdss_dsi_8996_pll_source_from_right(
+				struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	u32 data;
+
+	/*
+	 * pll left + right output disabled
+	 * bit clk select from right
+	 */
+	MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_PLL_CLKBUFLR_EN, 0x00);
+	data = MIPI_INP((ctrl->phy_io.base) + DSIPHY_CMN_GLBL_TEST_CTRL);
+	data |= BIT(2);
+	MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_GLBL_TEST_CTRL, data);
+
+	/* enable bias current for pll1 during split display case */
+	MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_PLL_PLL_BANDGAP, 0x3);
+}
+
+static void mdss_dsi_8996_pll_source_from_left(
+				struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	u32 data;
+
+	/*
+	 * pll left + right output enabled
+	 * bit clk select from left
+	 */
+	MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_PLL_CLKBUFLR_EN, 0x03);
+	data = MIPI_INP((ctrl->phy_io.base) + DSIPHY_CMN_GLBL_TEST_CTRL);
+	data &= ~BIT(2);
+	MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_GLBL_TEST_CTRL, data);
+}
+
+static void mdss_dsi_8996_phy_regulator_enable(
+	struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct mdss_dsi_phy_ctrl *pd;
+	int j, off, ln, cnt, ln_off;
+	char *ip;
+	void __iomem *base;
+
+	pd = &(((ctrl->panel_data).panel_info.mipi).dsi_phy_db);
+	/* 4 lanes + clk lane configuration */
+	for (ln = 0; ln < 5; ln++) {
+		/*
+		 * data lane offset from base: 0x100
+		 * data lane size: 0x80
+		 */
+		base = ctrl->phy_io.base +
+				DATALANE_OFFSET_FROM_BASE_8996;
+		base += (ln * DATALANE_SIZE_8996); /* lane base */
+
+		/* vreg ctrl, 1 * 5 */
+		cnt = 1;
+		ln_off = cnt * ln;
+		ip = &pd->regulator[ln_off];
+		off = 0x64;
+		for (j = 0; j < cnt; j++, off += 4)
+			MIPI_OUTP(base + off, *ip++);
+	}
+
+	wmb(); /* make sure registers committed */
+
+}
+
+static void mdss_dsi_8996_phy_power_off(
+	struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int ln;
+	void __iomem *base;
+	u32 data;
+
+	/* Turn off PLL power */
+	data = MIPI_INP(ctrl->phy_io.base + DSIPHY_CMN_CTRL_0);
+	MIPI_OUTP(ctrl->phy_io.base + DSIPHY_CMN_CTRL_0, data & ~BIT(7));
+
+	/* 4 lanes + clk lane configuration */
+	for (ln = 0; ln < 5; ln++) {
+		base = ctrl->phy_io.base +
+				DATALANE_OFFSET_FROM_BASE_8996;
+		base += (ln * DATALANE_SIZE_8996); /* lane base */
+
+		/* turn off phy ldo */
+		MIPI_OUTP(base + DSIPHY_LANE_VREG_CNTRL, 0x1c);
+	}
+	MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_LDO_CNTRL, 0x1c);
+
+	/* 4 lanes + clk lane configuration */
+	for (ln = 0; ln < 5; ln++) {
+		base = ctrl->phy_io.base +
+				DATALANE_OFFSET_FROM_BASE_8996;
+		base += (ln * DATALANE_SIZE_8996); /* lane base */
+
+		MIPI_OUTP(base + DSIPHY_LANE_STRENGTH_CTRL_1, 0x0);
+	}
+
+	wmb(); /* make sure registers committed */
+}
+
+static void mdss_dsi_phy_power_off(
+	struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct mdss_panel_info *pinfo;
+
+	if (ctrl->phy_power_off)
+		return;
+
+	pinfo = &ctrl->panel_data.panel_info;
+
+	if ((ctrl->shared_data->phy_rev != DSI_PHY_REV_20) ||
+		!pinfo->allow_phy_power_off) {
+		pr_debug("%s: ctrl%d phy rev:%d panel support for phy off:%d\n",
+			__func__, ctrl->ndx, ctrl->shared_data->phy_rev,
+			pinfo->allow_phy_power_off);
+		return;
+	}
+
+	/* supported for phy rev 2.0 and if panel allows it*/
+	mdss_dsi_8996_phy_power_off(ctrl);
+
+	ctrl->phy_power_off = true;
+}
+
+static void mdss_dsi_8996_phy_power_on(
+	struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int j, off, ln, cnt, ln_off;
+	void __iomem *base;
+	struct mdss_dsi_phy_ctrl *pd;
+	char *ip;
+	u32 data;
+
+	pd = &(((ctrl->panel_data).panel_info.mipi).dsi_phy_db);
+
+	/* 4 lanes + clk lane configuration */
+	for (ln = 0; ln < 5; ln++) {
+		base = ctrl->phy_io.base +
+				DATALANE_OFFSET_FROM_BASE_8996;
+		base += (ln * DATALANE_SIZE_8996); /* lane base */
+
+		/* strength, 2 * 5 */
+		cnt = 2;
+		ln_off = cnt * ln;
+		ip = &pd->strength[ln_off];
+		off = 0x38;
+		for (j = 0; j < cnt; j++, off += 4)
+			MIPI_OUTP(base + off, *ip++);
+	}
+
+	mdss_dsi_8996_phy_regulator_enable(ctrl);
+
+	/* Turn on PLL power */
+	data = MIPI_INP(ctrl->phy_io.base + DSIPHY_CMN_CTRL_0);
+	MIPI_OUTP(ctrl->phy_io.base + DSIPHY_CMN_CTRL_0, data | BIT(7));
+}
+
+static void mdss_dsi_phy_power_on(
+	struct mdss_dsi_ctrl_pdata *ctrl, bool mmss_clamp)
+{
+	if (mmss_clamp && !ctrl->phy_power_off)
+		mdss_dsi_phy_init(ctrl);
+	else if ((ctrl->shared_data->phy_rev == DSI_PHY_REV_20) &&
+	    ctrl->phy_power_off)
+		mdss_dsi_8996_phy_power_on(ctrl);
+
+	ctrl->phy_power_off = false;
+}
+
+static void mdss_dsi_8996_phy_config(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct mdss_dsi_phy_ctrl *pd;
+	int j, off, ln, cnt, ln_off;
+	char *ip;
+	void __iomem *base;
+
+	pd = &(((ctrl->panel_data).panel_info.mipi).dsi_phy_db);
+
+	MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_LDO_CNTRL, 0x1c);
+
+	/* clk_en */
+	MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_GLBL_TEST_CTRL, 0x1);
+
+	if (pd->lanecfg_len != 20) {
+		pr_err("%s: wrong lane cfg\n", __func__);
+		return;
+	}
+
+	if (pd->strength_len != 10) {
+		pr_err("%s: wrong strength ctrl\n", __func__);
+		return;
+	}
+
+	if (pd->regulator_len != 5) {
+		pr_err("%s: wrong regulator setting\n", __func__);
+		return;
+	}
+
+	/* 4 lanes + clk lane configuration */
+	for (ln = 0; ln < 5; ln++) {
+		/*
+		 * data lane offset from base: 0x100
+		 * data lane size: 0x80
+		 */
+		base = ctrl->phy_io.base +
+				DATALANE_OFFSET_FROM_BASE_8996;
+		base += (ln * DATALANE_SIZE_8996); /* lane base */
+
+		/* lane cfg, 4 * 5 */
+		cnt = 4;
+		ln_off = cnt * ln;
+		ip = &pd->lanecfg[ln_off];
+		off = 0x0;
+		for (j = 0; j < cnt; j++) {
+			MIPI_OUTP(base + off, *ip++);
+			off += 4;
+		}
+
+		/* test str */
+		MIPI_OUTP(base + 0x14, 0x0088);	/* fixed */
+
+		/* phy timing, 8 * 5 */
+		cnt = 8;
+		ln_off = cnt * ln;
+		ip = &pd->timing_8996[ln_off];
+		off = 0x18;
+		for (j = 0; j < cnt; j++, off += 4)
+			MIPI_OUTP(base + off, *ip++);
+
+		/* strength, 2 * 5 */
+		cnt = 2;
+		ln_off = cnt * ln;
+		ip = &pd->strength[ln_off];
+		off = 0x38;
+		for (j = 0; j < cnt; j++, off += 4)
+			MIPI_OUTP(base + off, *ip++);
+	}
+
+	wmb(); /* make sure registers committed */
+
+	/* reset digital block */
+	MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_CTRL_1, 0x80);
+	udelay(100);
+	MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_CTRL_1, 0x00);
+
+	if (mdss_dsi_is_hw_config_split(ctrl->shared_data)) {
+		if (mdss_dsi_is_left_ctrl(ctrl))
+			mdss_dsi_8996_pll_source_from_left(ctrl);
+		else
+			mdss_dsi_8996_pll_source_from_right(ctrl);
+	} else {
+		if (mdss_dsi_is_right_ctrl(ctrl) &&
+			mdss_dsi_is_pll_src_pll0(ctrl->shared_data))
+			mdss_dsi_8996_pll_source_from_left(ctrl);
+		else
+			mdss_dsi_8996_pll_source_standalone(ctrl);
+	}
+
+	MIPI_OUTP(ctrl->phy_io.base + DSIPHY_CMN_CTRL_0, 0x7f);
+	wmb(); /* make sure registers committed */
+}
+
+static void mdss_dsi_phy_regulator_ctrl(struct mdss_dsi_ctrl_pdata *ctrl,
+	bool enable)
+{
+	struct mdss_dsi_ctrl_pdata *other_ctrl;
+	struct dsi_shared_data *sdata;
+
+	if (!ctrl) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	sdata = ctrl->shared_data;
+	other_ctrl = mdss_dsi_get_other_ctrl(ctrl);
+
+	mutex_lock(&sdata->phy_reg_lock);
+	if (enable) {
+		if (ctrl->shared_data->phy_rev == DSI_PHY_REV_20) {
+			mdss_dsi_8996_phy_regulator_enable(ctrl);
+		} else {
+			switch (ctrl->shared_data->hw_rev) {
+			case MDSS_DSI_HW_REV_103:
+				mdss_dsi_20nm_phy_regulator_enable(ctrl);
+				break;
+			default:
+			/*
+			 * For dual dsi case, do not reconfigure dsi phy
+			 * regulator if the other dsi controller is still
+			 * active.
+			 */
+			if (!mdss_dsi_is_hw_config_dual(sdata) ||
+				(other_ctrl && (!other_ctrl->is_phyreg_enabled
+						|| other_ctrl->mmss_clamp)))
+				mdss_dsi_28nm_phy_regulator_enable(ctrl);
+				break;
+			}
+		}
+		ctrl->is_phyreg_enabled = 1;
+	} else {
+		/*
+		 * In split-dsi/dual-dsi configuration, the dsi phy regulator
+		 * should be turned off only when both the DSI devices are
+		 * going to be turned off since it is shared.
+		 */
+		if (mdss_dsi_is_hw_config_split(ctrl->shared_data) ||
+			mdss_dsi_is_hw_config_dual(ctrl->shared_data)) {
+			if (other_ctrl && !other_ctrl->is_phyreg_enabled)
+				mdss_dsi_phy_regulator_disable(ctrl);
+		} else {
+			mdss_dsi_phy_regulator_disable(ctrl);
+		}
+		ctrl->is_phyreg_enabled = 0;
+	}
+	mutex_unlock(&sdata->phy_reg_lock);
+}
+
+static void mdss_dsi_phy_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, bool enable)
+{
+	struct mdss_dsi_ctrl_pdata *other_ctrl;
+
+	if (!ctrl) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	if (enable) {
+
+		if (ctrl->shared_data->phy_rev == DSI_PHY_REV_20) {
+			mdss_dsi_8996_phy_config(ctrl);
+		} else {
+			switch (ctrl->shared_data->hw_rev) {
+			case MDSS_DSI_HW_REV_103:
+				mdss_dsi_20nm_phy_config(ctrl);
+				break;
+			default:
+				mdss_dsi_28nm_phy_config(ctrl);
+				break;
+			}
+		}
+	} else {
+		/*
+		 * In split-dsi configuration, the phy should be disabled for
+		 * the first controller only when the second controller is
+		 * disabled. This is true regardless of whether broadcast
+		 * mode is enabled.
+		 */
+		if (mdss_dsi_is_hw_config_split(ctrl->shared_data)) {
+			other_ctrl = mdss_dsi_get_other_ctrl(ctrl);
+			if (mdss_dsi_is_right_ctrl(ctrl) && other_ctrl) {
+				mdss_dsi_phy_shutdown(other_ctrl);
+				mdss_dsi_phy_shutdown(ctrl);
+			}
+		} else {
+			mdss_dsi_phy_shutdown(ctrl);
+		}
+	}
+}
+
+void mdss_dsi_phy_disable(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	if (ctrl == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	mdss_dsi_phy_ctrl(ctrl, false);
+	mdss_dsi_phy_regulator_ctrl(ctrl, false);
+	/*
+	 * Wait for the registers writes to complete in order to
+	 * ensure that the phy is completely disabled
+	 */
+	wmb();
+}
+
+static void mdss_dsi_phy_init_sub(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	mdss_dsi_phy_regulator_ctrl(ctrl, true);
+	mdss_dsi_phy_ctrl(ctrl, true);
+}
+
+void mdss_dsi_phy_init(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct mdss_dsi_ctrl_pdata *sctrl = NULL;
+
+	/*
+	 * When operating in split display mode, make sure that both the PHY
+	 * blocks are initialized together prior to the PLL being enabled. This
+	 * is achieved by calling the phy_init function for the clk_slave from
+	 * the clock_master.
+	 */
+	if (mdss_dsi_is_ctrl_clk_slave(ctrl))
+		return;
+
+	mdss_dsi_phy_init_sub(ctrl);
+
+	if (mdss_dsi_is_ctrl_clk_master(ctrl)) {
+		sctrl = mdss_dsi_get_ctrl_clk_slave();
+		if (sctrl)
+			mdss_dsi_phy_init_sub(sctrl);
+		else
+			pr_warn("%s: unable to get slave ctrl\n", __func__);
+	}
+}
+
+void mdss_dsi_core_clk_deinit(struct device *dev, struct dsi_shared_data *sdata)
+{
+	if (sdata->mmss_misc_ahb_clk)
+		devm_clk_put(dev, sdata->mmss_misc_ahb_clk);
+	if (sdata->ext_pixel1_clk)
+		devm_clk_put(dev, sdata->ext_pixel1_clk);
+	if (sdata->ext_byte1_clk)
+		devm_clk_put(dev, sdata->ext_byte1_clk);
+	if (sdata->ext_pixel0_clk)
+		devm_clk_put(dev, sdata->ext_pixel0_clk);
+	if (sdata->ext_byte0_clk)
+		devm_clk_put(dev, sdata->ext_byte0_clk);
+	if (sdata->axi_clk)
+		devm_clk_put(dev, sdata->axi_clk);
+	if (sdata->ahb_clk)
+		devm_clk_put(dev, sdata->ahb_clk);
+	if (sdata->mdp_core_clk)
+		devm_clk_put(dev, sdata->mdp_core_clk);
+}
+
+int mdss_dsi_clk_refresh(struct mdss_panel_data *pdata, bool update_phy)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct mdss_panel_info *pinfo = NULL;
+	int rc = 0;
+
+	if (!pdata) {
+		pr_err("%s: invalid panel data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+							panel_data);
+	pinfo = &pdata->panel_info;
+
+	if (!ctrl_pdata || !pinfo) {
+		pr_err("%s: invalid ctrl data\n", __func__);
+		return -EINVAL;
+	}
+
+	if (update_phy) {
+		pinfo->mipi.frame_rate = mdss_panel_calc_frame_rate(pinfo);
+		pr_debug("%s: new frame rate %d\n",
+				__func__, pinfo->mipi.frame_rate);
+	}
+
+	rc = mdss_dsi_clk_div_config(&pdata->panel_info,
+			pdata->panel_info.mipi.frame_rate);
+	if (rc) {
+		pr_err("%s: unable to initialize the clk dividers\n",
+								__func__);
+		return rc;
+	}
+	ctrl_pdata->refresh_clk_rate = false;
+	ctrl_pdata->pclk_rate = pdata->panel_info.mipi.dsi_pclk_rate;
+	ctrl_pdata->byte_clk_rate = pdata->panel_info.clk_rate / 8;
+	pr_debug("%s ctrl_pdata->byte_clk_rate=%d ctrl_pdata->pclk_rate=%d\n",
+		__func__, ctrl_pdata->byte_clk_rate, ctrl_pdata->pclk_rate);
+
+	rc = mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+			MDSS_DSI_LINK_BYTE_CLK, ctrl_pdata->byte_clk_rate,
+			MDSS_DSI_CLK_UPDATE_CLK_RATE_AT_ON);
+	if (rc) {
+		pr_err("%s: dsi_byte_clk - clk_set_rate failed\n",
+				__func__);
+		return rc;
+	}
+
+	rc = mdss_dsi_clk_set_link_rate(ctrl_pdata->dsi_clk_handle,
+			MDSS_DSI_LINK_PIX_CLK, ctrl_pdata->pclk_rate,
+			MDSS_DSI_CLK_UPDATE_CLK_RATE_AT_ON);
+	if (rc) {
+		pr_err("%s: dsi_pixel_clk - clk_set_rate failed\n",
+				__func__);
+		return rc;
+	}
+
+	if (update_phy) {
+		/* phy panel timing calaculation */
+		rc = mdss_dsi_phy_calc_timing_param(pinfo,
+				ctrl_pdata->shared_data->phy_rev,
+				pinfo->mipi.frame_rate);
+		if (rc) {
+			pr_err("Error in calculating phy timings\n");
+			return rc;
+		}
+		ctrl_pdata->update_phy_timing = false;
+	}
+
+	return rc;
+}
+
+int mdss_dsi_core_clk_init(struct platform_device *pdev,
+	struct dsi_shared_data *sdata)
+{
+	struct device *dev = NULL;
+	int rc = 0;
+
+	if (!pdev) {
+		pr_err("%s: Invalid pdev\n", __func__);
+		goto error;
+	}
+
+	dev = &pdev->dev;
+
+	/* Mandatory Clocks */
+	sdata->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
+	if (IS_ERR(sdata->mdp_core_clk)) {
+		rc = PTR_ERR(sdata->mdp_core_clk);
+		pr_err("%s: Unable to get mdp core clk. rc=%d\n",
+			__func__, rc);
+		goto error;
+	}
+
+	sdata->ahb_clk = devm_clk_get(dev, "iface_clk");
+	if (IS_ERR(sdata->ahb_clk)) {
+		rc = PTR_ERR(sdata->ahb_clk);
+		pr_err("%s: Unable to get mdss ahb clk. rc=%d\n",
+			__func__, rc);
+		goto error;
+	}
+
+	sdata->axi_clk = devm_clk_get(dev, "bus_clk");
+	if (IS_ERR(sdata->axi_clk)) {
+		rc = PTR_ERR(sdata->axi_clk);
+		pr_err("%s: Unable to get axi bus clk. rc=%d\n",
+			__func__, rc);
+		goto error;
+	}
+
+	/* Optional Clocks */
+	sdata->ext_byte0_clk = devm_clk_get(dev, "ext_byte0_clk");
+	if (IS_ERR(sdata->ext_byte0_clk)) {
+		pr_debug("%s: unable to get byte0 clk rcg. rc=%d\n",
+			__func__, rc);
+		sdata->ext_byte0_clk = NULL;
+	}
+
+	sdata->ext_pixel0_clk = devm_clk_get(dev, "ext_pixel0_clk");
+	if (IS_ERR(sdata->ext_pixel0_clk)) {
+		pr_debug("%s: unable to get pixel0 clk rcg. rc=%d\n",
+			__func__, rc);
+		sdata->ext_pixel0_clk = NULL;
+	}
+
+	sdata->ext_byte1_clk = devm_clk_get(dev, "ext_byte1_clk");
+	if (IS_ERR(sdata->ext_byte1_clk)) {
+		pr_debug("%s: unable to get byte1 clk rcg. rc=%d\n",
+			__func__, rc);
+		sdata->ext_byte1_clk = NULL;
+	}
+
+	sdata->ext_pixel1_clk = devm_clk_get(dev, "ext_pixel1_clk");
+	if (IS_ERR(sdata->ext_pixel1_clk)) {
+		pr_debug("%s: unable to get pixel1 clk rcg. rc=%d\n",
+			__func__, rc);
+		sdata->ext_pixel1_clk = NULL;
+	}
+
+	sdata->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk");
+	if (IS_ERR(sdata->mmss_misc_ahb_clk)) {
+		sdata->mmss_misc_ahb_clk = NULL;
+		pr_debug("%s: Unable to get mmss misc ahb clk\n",
+			__func__);
+	}
+
+error:
+	if (rc)
+		mdss_dsi_core_clk_deinit(dev, sdata);
+	return rc;
+}
+
+void mdss_dsi_link_clk_deinit(struct device *dev,
+	struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	if (ctrl->vco_dummy_clk)
+		devm_clk_put(dev, ctrl->vco_dummy_clk);
+	if (ctrl->pixel_clk_rcg)
+		devm_clk_put(dev, ctrl->pixel_clk_rcg);
+	if (ctrl->byte_clk_rcg)
+		devm_clk_put(dev, ctrl->byte_clk_rcg);
+	if (ctrl->byte_clk)
+		devm_clk_put(dev, ctrl->byte_clk);
+	if (ctrl->esc_clk)
+		devm_clk_put(dev, ctrl->esc_clk);
+	if (ctrl->pixel_clk)
+		devm_clk_put(dev, ctrl->pixel_clk);
+}
+
+int mdss_dsi_link_clk_init(struct platform_device *pdev,
+	struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct device *dev = NULL;
+	int rc = 0;
+
+	if (!pdev) {
+		pr_err("%s: Invalid pdev\n", __func__);
+		goto error;
+	}
+
+	dev = &pdev->dev;
+
+	/* Mandatory Clocks */
+	ctrl->byte_clk = devm_clk_get(dev, "byte_clk");
+	if (IS_ERR(ctrl->byte_clk)) {
+		rc = PTR_ERR(ctrl->byte_clk);
+		pr_err("%s: can't find dsi_byte_clk. rc=%d\n",
+			__func__, rc);
+		ctrl->byte_clk = NULL;
+		goto error;
+	}
+
+	ctrl->pixel_clk = devm_clk_get(dev, "pixel_clk");
+	if (IS_ERR(ctrl->pixel_clk)) {
+		rc = PTR_ERR(ctrl->pixel_clk);
+		pr_err("%s: can't find dsi_pixel_clk. rc=%d\n",
+			__func__, rc);
+		ctrl->pixel_clk = NULL;
+		goto error;
+	}
+
+	ctrl->esc_clk = devm_clk_get(dev, "core_clk");
+	if (IS_ERR(ctrl->esc_clk)) {
+		rc = PTR_ERR(ctrl->esc_clk);
+		pr_err("%s: can't find dsi_esc_clk. rc=%d\n",
+			__func__, rc);
+		ctrl->esc_clk = NULL;
+		goto error;
+	}
+
+	/* Optional Clocks */
+	ctrl->byte_clk_rcg = devm_clk_get(dev, "byte_clk_rcg");
+	if (IS_ERR(ctrl->byte_clk_rcg)) {
+		pr_debug("%s: can't find byte clk rcg. rc=%d\n", __func__, rc);
+		ctrl->byte_clk_rcg = NULL;
+	}
+
+	ctrl->pixel_clk_rcg = devm_clk_get(dev, "pixel_clk_rcg");
+	if (IS_ERR(ctrl->pixel_clk_rcg)) {
+		pr_debug("%s: can't find pixel clk rcg. rc=%d\n", __func__, rc);
+		ctrl->pixel_clk_rcg = NULL;
+	}
+
+	ctrl->vco_dummy_clk = devm_clk_get(dev, "pll_vco_dummy_clk");
+	if (IS_ERR(ctrl->vco_dummy_clk)) {
+		pr_debug("%s: can't find vco dummy clk. rc=%d\n", __func__, rc);
+		ctrl->vco_dummy_clk = NULL;
+	}
+
+error:
+	if (rc)
+		mdss_dsi_link_clk_deinit(dev, ctrl);
+	return rc;
+}
+
+void mdss_dsi_shadow_clk_deinit(struct device *dev,
+	struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	if (ctrl->mux_byte_clk)
+		devm_clk_put(dev, ctrl->mux_byte_clk);
+	if (ctrl->mux_pixel_clk)
+		devm_clk_put(dev, ctrl->mux_pixel_clk);
+	if (ctrl->pll_byte_clk)
+		devm_clk_put(dev, ctrl->pll_byte_clk);
+	if (ctrl->pll_pixel_clk)
+		devm_clk_put(dev, ctrl->pll_pixel_clk);
+	if (ctrl->shadow_byte_clk)
+		devm_clk_put(dev, ctrl->shadow_byte_clk);
+	if (ctrl->shadow_pixel_clk)
+		devm_clk_put(dev, ctrl->shadow_pixel_clk);
+}
+
+int mdss_dsi_shadow_clk_init(struct platform_device *pdev,
+		struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	struct device *dev = NULL;
+	int rc = 0;
+
+	if (!pdev) {
+		pr_err("%s: Invalid pdev\n", __func__);
+		return -EINVAL;
+	}
+
+	dev = &pdev->dev;
+	ctrl->mux_byte_clk = devm_clk_get(dev, "pll_byte_clk_mux");
+	if (IS_ERR(ctrl->mux_byte_clk)) {
+		rc = PTR_ERR(ctrl->mux_byte_clk);
+		pr_err("%s: can't find mux_byte_clk. rc=%d\n",
+			__func__, rc);
+		ctrl->mux_byte_clk = NULL;
+		goto error;
+	}
+
+	ctrl->mux_pixel_clk = devm_clk_get(dev, "pll_pixel_clk_mux");
+	if (IS_ERR(ctrl->mux_pixel_clk)) {
+		rc = PTR_ERR(ctrl->mux_pixel_clk);
+		pr_err("%s: can't find mdss_mux_pixel_clk. rc=%d\n",
+			__func__, rc);
+		ctrl->mux_pixel_clk = NULL;
+		goto error;
+	}
+
+	ctrl->pll_byte_clk = devm_clk_get(dev, "pll_byte_clk_src");
+	if (IS_ERR(ctrl->pll_byte_clk)) {
+		rc = PTR_ERR(ctrl->pll_byte_clk);
+		pr_err("%s: can't find pll_byte_clk. rc=%d\n",
+			__func__, rc);
+		ctrl->pll_byte_clk = NULL;
+		goto error;
+	}
+
+	ctrl->pll_pixel_clk = devm_clk_get(dev, "pll_pixel_clk_src");
+	if (IS_ERR(ctrl->pll_pixel_clk)) {
+		rc = PTR_ERR(ctrl->pll_pixel_clk);
+		pr_err("%s: can't find pll_pixel_clk. rc=%d\n",
+			__func__, rc);
+		ctrl->pll_pixel_clk = NULL;
+		goto error;
+	}
+
+	ctrl->shadow_byte_clk = devm_clk_get(dev, "pll_shadow_byte_clk_src");
+	if (IS_ERR(ctrl->shadow_byte_clk)) {
+		rc = PTR_ERR(ctrl->shadow_byte_clk);
+		pr_err("%s: can't find shadow_byte_clk. rc=%d\n",
+			__func__, rc);
+		ctrl->shadow_byte_clk = NULL;
+		goto error;
+	}
+
+	ctrl->shadow_pixel_clk = devm_clk_get(dev, "pll_shadow_pixel_clk_src");
+	if (IS_ERR(ctrl->shadow_pixel_clk)) {
+		rc = PTR_ERR(ctrl->shadow_pixel_clk);
+		pr_err("%s: can't find shadow_pixel_clk. rc=%d\n",
+			__func__, rc);
+		ctrl->shadow_pixel_clk = NULL;
+		goto error;
+	}
+
+error:
+	if (rc)
+		mdss_dsi_shadow_clk_deinit(dev, ctrl);
+	return rc;
+}
+
+bool is_diff_frame_rate(struct mdss_panel_info *panel_info,
+	u32 frame_rate)
+{
+	if (panel_info->dynamic_fps && panel_info->current_fps)
+		return (frame_rate != panel_info->current_fps);
+	else
+		return (frame_rate != panel_info->mipi.frame_rate);
+}
+
+int mdss_dsi_clk_div_config(struct mdss_panel_info *panel_info,
+			    int frame_rate)
+{
+	struct mdss_panel_data *pdata  = container_of(panel_info,
+			struct mdss_panel_data, panel_info);
+	struct  mdss_dsi_ctrl_pdata *ctrl_pdata = container_of(pdata,
+			struct mdss_dsi_ctrl_pdata, panel_data);
+	u64 h_period, v_period, clk_rate;
+	u32 dsi_pclk_rate;
+	u8 lanes = 0, bpp;
+
+	if (panel_info->mipi.data_lane3)
+		lanes += 1;
+	if (panel_info->mipi.data_lane2)
+		lanes += 1;
+	if (panel_info->mipi.data_lane1)
+		lanes += 1;
+	if (panel_info->mipi.data_lane0)
+		lanes += 1;
+
+	switch (panel_info->mipi.dst_format) {
+	case DSI_CMD_DST_FORMAT_RGB888:
+	case DSI_VIDEO_DST_FORMAT_RGB888:
+	case DSI_VIDEO_DST_FORMAT_RGB666_LOOSE:
+		bpp = 3;
+		break;
+	case DSI_CMD_DST_FORMAT_RGB565:
+	case DSI_VIDEO_DST_FORMAT_RGB565:
+		bpp = 2;
+		break;
+	default:
+		bpp = 3;	/* Default format set to RGB888 */
+		break;
+	}
+
+	h_period = mdss_panel_get_htotal(panel_info, true);
+	v_period = mdss_panel_get_vtotal(panel_info);
+
+	if (ctrl_pdata->refresh_clk_rate || is_diff_frame_rate(panel_info,
+			frame_rate) || (!panel_info->clk_rate)) {
+		if (lanes > 0) {
+			panel_info->clk_rate = h_period * v_period * frame_rate
+				* bpp * 8;
+			do_div(panel_info->clk_rate, lanes);
+		} else {
+			pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
+			panel_info->clk_rate =
+				h_period * v_period * frame_rate * bpp * 8;
+		}
+	}
+
+	if (panel_info->clk_rate == 0)
+		panel_info->clk_rate = 454000000;
+
+	clk_rate = panel_info->clk_rate;
+	do_div(clk_rate, 8 * bpp);
+	dsi_pclk_rate = (u32) clk_rate * lanes;
+
+	if ((dsi_pclk_rate < 3300000) || (dsi_pclk_rate > 250000000))
+		dsi_pclk_rate = 35000000;
+	panel_info->mipi.dsi_pclk_rate = dsi_pclk_rate;
+
+	return 0;
+}
+
+static bool mdss_dsi_is_ulps_req_valid(struct mdss_dsi_ctrl_pdata *ctrl,
+		int enable)
+{
+	struct mdss_dsi_ctrl_pdata *octrl = NULL;
+	struct mdss_panel_data *pdata = &ctrl->panel_data;
+	struct mdss_panel_info *pinfo = &pdata->panel_info;
+
+	pr_debug("%s: checking ulps req validity for ctrl%d\n",
+		__func__, ctrl->ndx);
+
+	if (!mdss_dsi_ulps_feature_enabled(pdata) &&
+			!pinfo->ulps_suspend_enabled) {
+		pr_debug("%s: ULPS feature is not enabled\n", __func__);
+		return false;
+	}
+
+	/*
+	 * No need to enter ULPS when transitioning from splash screen to
+	 * boot animation since it is expected that the clocks would be turned
+	 * right back on.
+	 */
+	if (enable && pinfo->cont_splash_enabled) {
+		pr_debug("%s: skip ULPS config with splash screen enabled\n",
+			__func__);
+		return false;
+	}
+
+	/*
+	 * No need to enable ULPS if panel is not yet initialized.
+	 * However, this should be allowed in following usecases:
+	 *   1. If ULPS during suspend feature is enabled, where we
+	 *      configure the lanes in ULPS after turning off the panel.
+	 *   2. When coming out of idle PC with clamps enabled, where we
+	 *      transition the controller HW state back to ULPS prior to
+	 *      disabling ULPS.
+	 */
+	if (enable && !ctrl->mmss_clamp &&
+		!(ctrl->ctrl_state & CTRL_STATE_PANEL_INIT) &&
+		!pdata->panel_info.ulps_suspend_enabled) {
+		pr_debug("%s: panel not yet initialized\n", __func__);
+		return false;
+	}
+
+	/*
+	 * For split-DSI usecase, wait till both controllers are initialized.
+	 * The same exceptions as above are applicable here too.
+	 */
+	if (mdss_dsi_is_hw_config_split(ctrl->shared_data)) {
+		octrl = mdss_dsi_get_other_ctrl(ctrl);
+		if (enable && !ctrl->mmss_clamp && octrl &&
+			!(octrl->ctrl_state & CTRL_STATE_PANEL_INIT) &&
+			!pdata->panel_info.ulps_suspend_enabled) {
+			pr_debug("%s: split-DSI, other ctrl not ready yet\n",
+				__func__);
+			return false;
+		}
+	}
+
+	return true;
+}
+
+/**
+ * mdss_dsi_ulps_config() - Program DSI lanes to enter/exit ULPS mode
+ * @ctrl: pointer to DSI controller structure
+ * @enable: 1 to enter ULPS, 0 to exit ULPS
+ *
+ * This function executes the necessary programming sequence to enter/exit
+ * DSI Ultra-Low Power State (ULPS). This function assumes that the link and
+ * core clocks are already on.
+ */
+static int mdss_dsi_ulps_config(struct mdss_dsi_ctrl_pdata *ctrl,
+	int enable)
+{
+	int ret = 0;
+	struct mdss_panel_data *pdata = NULL;
+	struct mdss_panel_info *pinfo;
+	struct mipi_panel_info *mipi;
+	u32 lane_status = 0;
+	u32 active_lanes = 0;
+
+	if (!ctrl) {
+		pr_err("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	pdata = &ctrl->panel_data;
+	if (!pdata) {
+		pr_err("%s: Invalid panel data\n", __func__);
+		return -EINVAL;
+	}
+	pinfo = &pdata->panel_info;
+	mipi = &pinfo->mipi;
+
+	if (!mdss_dsi_is_ulps_req_valid(ctrl, enable)) {
+		pr_debug("%s: skiping ULPS config for ctrl%d, enable=%d\n",
+			__func__, ctrl->ndx, enable);
+		return 0;
+	}
+
+	/* clock lane will always be programmed for ulps */
+	active_lanes = BIT(4);
+	/*
+	 * make a note of all active data lanes for which ulps entry/exit
+	 * is needed
+	 */
+	if (mipi->data_lane0)
+		active_lanes |= BIT(0);
+	if (mipi->data_lane1)
+		active_lanes |= BIT(1);
+	if (mipi->data_lane2)
+		active_lanes |= BIT(2);
+	if (mipi->data_lane3)
+		active_lanes |= BIT(3);
+
+	pr_debug("%s: configuring ulps (%s) for ctrl%d, active lanes=0x%08x,clamps=%s\n",
+		__func__, (enable ? "on" : "off"), ctrl->ndx,
+		active_lanes, ctrl->mmss_clamp ? "enabled" : "disabled");
+
+	if (enable && !ctrl->ulps) {
+		/*
+		 * Ensure that the lanes are idle prior to placing a ULPS entry
+		 * request. This is needed to ensure that there is no overlap
+		 * between any HS or LP commands being sent out on the lane and
+		 * a potential ULPS entry request.
+		 *
+		 * This check needs to be avoided when we are resuming from idle
+		 * power collapse and just restoring the controller state to
+		 * ULPS with the clamps still in place.
+		 */
+		if (!ctrl->mmss_clamp) {
+			ret = mdss_dsi_wait_for_lane_idle(ctrl);
+			if (ret) {
+				pr_warn("%s: lanes not idle, skip ulps\n",
+					__func__);
+				ret = 0;
+				goto error;
+			}
+		}
+
+		/*
+		 * ULPS Entry Request.
+		 * Wait for a short duration to ensure that the lanes
+		 * enter ULP state.
+		 */
+		MIPI_OUTP(ctrl->ctrl_base + 0x0AC, active_lanes);
+		usleep_range(100, 110);
+
+		/* Check to make sure that all active data lanes are in ULPS */
+		lane_status = MIPI_INP(ctrl->ctrl_base + 0xA8);
+		if (lane_status & (active_lanes << 8)) {
+			pr_err("%s: ULPS entry req failed for ctrl%d. Lane status=0x%08x\n",
+				__func__, ctrl->ndx, lane_status);
+			ret = -EINVAL;
+			goto error;
+		}
+
+		ctrl->ulps = true;
+	} else if (!enable && ctrl->ulps) {
+		/*
+		 * Clear out any phy errors prior to exiting ULPS
+		 * This fixes certain instances where phy does not exit
+		 * ULPS cleanly. Also, do not print error during such cases.
+		 */
+		mdss_dsi_dln0_phy_err(ctrl, false);
+
+		/*
+		 * ULPS Exit Request
+		 * Hardware requirement is to wait for at least 1ms
+		 */
+		MIPI_OUTP(ctrl->ctrl_base + 0x0AC, active_lanes << 8);
+		usleep_range(1000, 1100);
+
+		/*
+		 * Sometimes when exiting ULPS, it is possible that some DSI
+		 * lanes are not in the stop state which could lead to DSI
+		 * commands not going through. To avoid this, force the lanes
+		 * to be in stop state.
+		 */
+		MIPI_OUTP(ctrl->ctrl_base + 0x0AC, active_lanes << 16);
+		wmb(); /* ensure lanes are put to stop state */
+
+		MIPI_OUTP(ctrl->ctrl_base + 0x0AC, 0x0);
+		wmb(); /* ensure lanes are in proper state */
+
+		/*
+		 * Wait for a short duration before enabling
+		 * data transmission
+		 */
+		usleep_range(100, 110);
+
+		lane_status = MIPI_INP(ctrl->ctrl_base + 0xA8);
+		ctrl->ulps = false;
+	} else {
+		pr_debug("%s: No change requested: %s -> %s\n", __func__,
+			ctrl->ulps ? "enabled" : "disabled",
+			enable ? "enabled" : "disabled");
+	}
+
+	pr_debug("%s: DSI lane status = 0x%08x. Ulps %s\n", __func__,
+		lane_status, enable ? "enabled" : "disabled");
+
+error:
+	return ret;
+}
+
+/**
+ * mdss_dsi_clamp_ctrl() - Program DSI clamps for supporting power collapse
+ * @ctrl: pointer to DSI controller structure
+ * @enable: 1 to enable clamps, 0 to disable clamps
+ *
+ * For idle-screen usecases with command mode panels, MDSS can be power
+ * collapsed. However, DSI phy needs to remain on. To avoid any mismatch
+ * between the DSI controller state, DSI phy needs to be clamped before
+ * power collapsing. This function executes the required programming
+ * sequence to configure these DSI clamps. This function should only be called
+ * when the DSI link clocks are disabled.
+ */
+static int mdss_dsi_clamp_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable)
+{
+	struct mipi_panel_info *mipi = NULL;
+	u32 clamp_reg, regval = 0;
+	u32 clamp_reg_off, phyrst_reg_off;
+
+	if (!ctrl) {
+		pr_err("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!ctrl->mmss_misc_io.base) {
+		pr_err("%s: mmss_misc_io not mapped\n", __func__);
+		return -EINVAL;
+	}
+
+	clamp_reg_off = ctrl->shared_data->ulps_clamp_ctrl_off;
+	phyrst_reg_off = ctrl->shared_data->ulps_phyrst_ctrl_off;
+	mipi = &ctrl->panel_data.panel_info.mipi;
+
+	/* clock lane will always be clamped */
+	clamp_reg = BIT(9);
+	if (ctrl->ulps)
+		clamp_reg |= BIT(8);
+	/* make a note of all active data lanes which need to be clamped */
+	if (mipi->data_lane0) {
+		clamp_reg |= BIT(7);
+		if (ctrl->ulps)
+			clamp_reg |= BIT(6);
+	}
+	if (mipi->data_lane1) {
+		clamp_reg |= BIT(5);
+		if (ctrl->ulps)
+			clamp_reg |= BIT(4);
+	}
+	if (mipi->data_lane2) {
+		clamp_reg |= BIT(3);
+		if (ctrl->ulps)
+			clamp_reg |= BIT(2);
+	}
+	if (mipi->data_lane3) {
+		clamp_reg |= BIT(1);
+		if (ctrl->ulps)
+			clamp_reg |= BIT(0);
+	}
+	pr_debug("%s: called for ctrl%d, enable=%d, clamp_reg=0x%08x\n",
+		__func__, ctrl->ndx, enable, clamp_reg);
+	if (enable && !ctrl->mmss_clamp) {
+		regval = MIPI_INP(ctrl->mmss_misc_io.base + clamp_reg_off);
+		/* Enable MMSS DSI Clamps */
+		if (ctrl->ndx == DSI_CTRL_0) {
+			MIPI_OUTP(ctrl->mmss_misc_io.base + clamp_reg_off,
+				regval | clamp_reg);
+			MIPI_OUTP(ctrl->mmss_misc_io.base + clamp_reg_off,
+				regval | (clamp_reg | BIT(15)));
+		} else if (ctrl->ndx == DSI_CTRL_1) {
+			MIPI_OUTP(ctrl->mmss_misc_io.base + clamp_reg_off,
+				regval | (clamp_reg << 16));
+			MIPI_OUTP(ctrl->mmss_misc_io.base + clamp_reg_off,
+				regval | ((clamp_reg << 16) | BIT(31)));
+		}
+		/* update clamp ctrl before setting phy reset disable */
+		wmb();
+
+		/*
+		 * This register write ensures that DSI PHY will not be
+		 * reset when mdss ahb clock reset is asserted while coming
+		 * out of power collapse
+		 */
+		if (IS_MDSS_MAJOR_MINOR_SAME(ctrl->shared_data->hw_rev,
+			MDSS_DSI_HW_REV_104) &&
+			(MDSS_GET_STEP(ctrl->shared_data->hw_rev) !=
+			MDSS_DSI_HW_REV_STEP_2)) {
+
+			regval = MIPI_INP(ctrl->mmss_misc_io.base +
+				clamp_reg_off);
+			MIPI_OUTP(ctrl->mmss_misc_io.base + clamp_reg_off,
+				regval | BIT(30));
+		} else {
+			MIPI_OUTP(ctrl->mmss_misc_io.base + phyrst_reg_off,
+				0x1);
+		}
+		/* make sure that clamp ctrl is updated before disable call */
+		wmb();
+		ctrl->mmss_clamp = true;
+	} else if (!enable && ctrl->mmss_clamp) {
+		if (IS_MDSS_MAJOR_MINOR_SAME(ctrl->shared_data->hw_rev,
+			MDSS_DSI_HW_REV_104) &&
+			(MDSS_GET_STEP(ctrl->shared_data->hw_rev) !=
+			MDSS_DSI_HW_REV_STEP_2)) {
+
+			regval = MIPI_INP(ctrl->mmss_misc_io.base +
+				clamp_reg_off);
+			MIPI_OUTP(ctrl->mmss_misc_io.base + clamp_reg_off,
+				regval & ~BIT(30));
+		} else {
+			MIPI_OUTP(ctrl->mmss_misc_io.base + phyrst_reg_off,
+				0x0);
+		}
+		/* update clamp ctrl before unsetting phy reset disable */
+		wmb();
+
+		regval = MIPI_INP(ctrl->mmss_misc_io.base + clamp_reg_off);
+		/* Disable MMSS DSI Clamps */
+		if (ctrl->ndx == DSI_CTRL_0)
+			MIPI_OUTP(ctrl->mmss_misc_io.base + clamp_reg_off,
+				regval & ~(clamp_reg | BIT(15)));
+		else if (ctrl->ndx == DSI_CTRL_1)
+			MIPI_OUTP(ctrl->mmss_misc_io.base + clamp_reg_off,
+				regval & ~((clamp_reg << 16) | BIT(31)));
+		/* make sure that clamp ctrl is updated before enable call */
+		wmb();
+		ctrl->mmss_clamp = false;
+	} else {
+		pr_debug("%s: No change requested: %s -> %s\n", __func__,
+			ctrl->mmss_clamp ? "enabled" : "disabled",
+			enable ? "enabled" : "disabled");
+	}
+
+	return 0;
+}
+
+DEFINE_MUTEX(dsi_clk_mutex);
+
+int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, void *clk_handle,
+	enum mdss_dsi_clk_type clk_type, enum mdss_dsi_clk_state clk_state)
+{
+	int rc = 0;
+	struct mdss_dsi_ctrl_pdata *mctrl = NULL;
+	int i, *vote_cnt;
+
+	void *m_clk_handle;
+	bool is_ecg = false;
+	int state = MDSS_DSI_CLK_OFF;
+
+	if (!ctrl) {
+		pr_err("%s: Invalid arg\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_clk_mutex);
+	/*
+	 * In sync_wait_broadcast mode, we need to enable clocks
+	 * for the other controller as well when enabling clocks
+	 * for the trigger controller.
+	 *
+	 * If sync wait_broadcase mode is not enabled, but if split display
+	 * mode is enabled where both DSI controller's branch clocks are
+	 * sourced out of a single PLL, then we need to ensure that the
+	 * controller associated with that PLL also has it's clocks turned
+	 * on. This is required to make sure that if that controller's PLL/PHY
+	 * are clamped then they can be removed.
+	 */
+	if (mdss_dsi_sync_wait_trigger(ctrl)) {
+		mctrl = mdss_dsi_get_other_ctrl(ctrl);
+		if (!mctrl)
+			pr_warn("%s: Unable to get other control\n", __func__);
+	} else if (mdss_dsi_is_ctrl_clk_slave(ctrl)) {
+		mctrl = mdss_dsi_get_ctrl_clk_master();
+		if (!mctrl)
+			pr_warn("%s: Unable to get clk master control\n",
+				__func__);
+	}
+
+	/*
+	 * it should add and remove extra votes based on voting clients to avoid
+	 * removal of legitimate vote from DSI client.
+	 */
+	if (mctrl && (clk_handle == ctrl->dsi_clk_handle)) {
+		m_clk_handle = mctrl->dsi_clk_handle;
+		vote_cnt = &mctrl->m_dsi_vote_cnt;
+	} else if (mctrl) {
+		m_clk_handle = mctrl->mdp_clk_handle;
+		vote_cnt = &mctrl->m_mdp_vote_cnt;
+	}
+
+	/*
+	 * When DSI is used in split mode, the link clock for master controller
+	 * has to be turned on first before the link clock for slave can be
+	 * turned on. In case the current controller is a slave, an ON vote is
+	 * cast for master before changing the state of the slave clock. After
+	 * the state change for slave, the ON votes will be removed depending on
+	 * the new state.
+	 */
+	pr_debug("%s: DSI_%d: clk = %d, state = %d, caller = %pS, mctrl=%d\n",
+		 __func__, ctrl->ndx, clk_type, clk_state,
+		 __builtin_return_address(0), mctrl ? 1 : 0);
+	if (mctrl && (clk_type & MDSS_DSI_LINK_CLK)) {
+		if (clk_state != MDSS_DSI_CLK_ON) {
+			/* preserve clk state; do not turn off forcefully */
+			is_ecg = is_dsi_clk_in_ecg_state(m_clk_handle);
+			if (is_ecg)
+				state = MDSS_DSI_CLK_EARLY_GATE;
+		}
+
+		rc = mdss_dsi_clk_req_state(m_clk_handle,
+			MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON, mctrl->ndx);
+		if (rc) {
+			pr_err("%s: failed to turn on mctrl clocks, rc=%d\n",
+				 __func__, rc);
+			goto error;
+		}
+		(*vote_cnt)++;
+	}
+
+	rc = mdss_dsi_clk_req_state(clk_handle, clk_type, clk_state, ctrl->ndx);
+	if (rc) {
+		pr_err("%s: failed set clk state, rc = %d\n", __func__, rc);
+		goto error;
+	}
+
+	if (mctrl && (clk_type & MDSS_DSI_LINK_CLK) &&
+			clk_state != MDSS_DSI_CLK_ON) {
+
+		/*
+		 * In case of split dsi, an ON vote is cast for all state change
+		 * requests. If the current state is ON, then the vote would not
+		 * be released.
+		 *
+		 * If the current state is ECG, there is one possible way to
+		 * transition in to this state, which is ON -> ECG. In this case
+		 * two votes will be removed because one was cast at ON and
+		 * other when entering ECG.
+		 *
+		 * If the current state is OFF, it could have been due to two
+		 * possible transitions in to OFF state.
+		 *	1. ON -> OFF: In this case two votes were cast by the
+		 *	   slave controller, one during ON (which is not
+		 *	   removed) and one during OFF. So we need to remove two
+		 *	   votes.
+		 *	2. ECG -> OFF: In this case there is only one vote
+		 *	   for ON, since the previous ECG state must have
+		 *	   removed two votes to let clocks turn off.
+		 *
+		 * To satisfy the above requirement, vote_cnt keeps track of
+		 * the number of ON votes for master requested by slave. For
+		 * every OFF/ECG state request, Either 2 or vote_cnt number of
+		 * votes are removed depending on which is lower.
+		 */
+		for (i = 0; (i < *vote_cnt && i < 2); i++) {
+			rc = mdss_dsi_clk_req_state(m_clk_handle,
+				MDSS_DSI_ALL_CLKS, state, mctrl->ndx);
+			if (rc) {
+				pr_err("%s: failed to set mctrl clk state, rc = %d\n",
+				       __func__, rc);
+				goto error;
+			}
+		}
+		(*vote_cnt) -= i;
+		pr_debug("%s: ctrl=%d, vote_cnt=%d dsi_vote_cnt=%d mdp_vote_cnt:%d\n",
+			__func__, ctrl->ndx, *vote_cnt, mctrl->m_dsi_vote_cnt,
+			mctrl->m_mdp_vote_cnt);
+	}
+
+error:
+	mutex_unlock(&dsi_clk_mutex);
+	return rc;
+}
+
+int mdss_dsi_pre_clkoff_cb(void *priv,
+			   enum mdss_dsi_clk_type clk,
+			   enum mdss_dsi_clk_state new_state)
+{
+	int rc = 0;
+	struct mdss_dsi_ctrl_pdata *ctrl = priv;
+	struct mdss_panel_data *pdata = NULL;
+
+	pdata = &ctrl->panel_data;
+
+	if ((clk & MDSS_DSI_LINK_CLK) && (new_state == MDSS_DSI_CLK_OFF)) {
+		/*
+		 * If ULPS feature is enabled, enter ULPS first.
+		 * However, when blanking the panel, we should enter ULPS
+		 * only if ULPS during suspend feature is enabled.
+		 */
+		if (!(ctrl->ctrl_state & CTRL_STATE_PANEL_INIT)) {
+			if (pdata->panel_info.ulps_suspend_enabled)
+				mdss_dsi_ulps_config(ctrl, 1);
+		} else if (mdss_dsi_ulps_feature_enabled(pdata)) {
+			rc = mdss_dsi_ulps_config(ctrl, 1);
+		}
+		if (rc) {
+			pr_err("%s: failed enable ulps, rc = %d\n",
+			       __func__, rc);
+		}
+	}
+
+	if ((clk & MDSS_DSI_CORE_CLK) && (new_state == MDSS_DSI_CLK_OFF)) {
+		/*
+		 * Enable DSI clamps only if entering idle power collapse or
+		 * when ULPS during suspend is enabled.
+		 */
+		if ((ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE) ||
+			pdata->panel_info.ulps_suspend_enabled) {
+			mdss_dsi_phy_power_off(ctrl);
+			rc = mdss_dsi_clamp_ctrl(ctrl, 1);
+			if (rc)
+				pr_err("%s: Failed to enable dsi clamps. rc=%d\n",
+					__func__, rc);
+		} else {
+			/*
+			 * Make sure that controller is not in ULPS state when
+			 * the DSI link is not active.
+			 */
+			rc = mdss_dsi_ulps_config(ctrl, 0);
+			if (rc)
+				pr_err("%s: failed to disable ulps. rc=%d\n",
+					__func__, rc);
+		}
+	}
+
+	return rc;
+}
+
+int mdss_dsi_post_clkon_cb(void *priv,
+			   enum mdss_dsi_clk_type clk,
+			   enum mdss_dsi_clk_state curr_state)
+{
+	int rc = 0;
+	struct mdss_panel_data *pdata = NULL;
+	struct mdss_dsi_ctrl_pdata *ctrl = priv;
+	bool mmss_clamp;
+
+	pdata = &ctrl->panel_data;
+
+	if (clk & MDSS_DSI_CORE_CLK) {
+		mmss_clamp = ctrl->mmss_clamp;
+		/*
+		 * controller setup is needed if coming out of idle
+		 * power collapse with clamps enabled.
+		 */
+		if (mmss_clamp)
+			mdss_dsi_ctrl_setup(ctrl);
+
+		if (ctrl->ulps) {
+			/*
+			 * ULPS Entry Request. This is needed if the lanes were
+			 * in ULPS prior to power collapse, since after
+			 * power collapse and reset, the DSI controller resets
+			 * back to idle state and not ULPS. This ulps entry
+			 * request will transition the state of the DSI
+			 * controller to ULPS which will match the state of the
+			 * DSI phy. This needs to be done prior to disabling
+			 * the DSI clamps.
+			 *
+			 * Also, reset the ulps flag so that ulps_config
+			 * function would reconfigure the controller state to
+			 * ULPS.
+			 */
+			ctrl->ulps = false;
+			rc = mdss_dsi_ulps_config(ctrl, 1);
+			if (rc) {
+				pr_err("%s: Failed to enter ULPS. rc=%d\n",
+					__func__, rc);
+				goto error;
+			}
+		}
+
+		rc = mdss_dsi_clamp_ctrl(ctrl, 0);
+		if (rc) {
+			pr_err("%s: Failed to disable dsi clamps. rc=%d\n",
+				__func__, rc);
+			goto error;
+		}
+
+		/*
+		 * Phy setup is needed if coming out of idle
+		 * power collapse with clamps enabled.
+		 */
+		if (ctrl->phy_power_off || mmss_clamp)
+			mdss_dsi_phy_power_on(ctrl, mmss_clamp);
+	}
+	if (clk & MDSS_DSI_LINK_CLK) {
+		if (ctrl->ulps) {
+			rc = mdss_dsi_ulps_config(ctrl, 0);
+			if (rc) {
+				pr_err("%s: failed to disable ulps, rc= %d\n",
+				       __func__, rc);
+				goto error;
+			}
+		}
+	}
+error:
+	return rc;
+}
+
+int mdss_dsi_post_clkoff_cb(void *priv,
+			    enum mdss_dsi_clk_type clk_type,
+			    enum mdss_dsi_clk_state curr_state)
+{
+	int rc = 0;
+	struct mdss_dsi_ctrl_pdata *ctrl = priv;
+	struct mdss_panel_data *pdata = NULL;
+	struct dsi_shared_data *sdata;
+	int i;
+
+	if (!ctrl) {
+		pr_err("%s: Invalid arg\n", __func__);
+		return -EINVAL;
+	}
+
+	pdata = &ctrl->panel_data;
+	if ((clk_type & MDSS_DSI_CORE_CLK) &&
+	    (curr_state == MDSS_DSI_CLK_OFF)) {
+		sdata = ctrl->shared_data;
+
+		for (i = DSI_MAX_PM - 1; i >= DSI_CORE_PM; i--) {
+			if ((ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE) &&
+				(i != DSI_CORE_PM))
+				continue;
+			rc = msm_dss_enable_vreg(
+				sdata->power_data[i].vreg_config,
+				sdata->power_data[i].num_vreg, 0);
+			if (rc) {
+				pr_warn("%s: failed to disable vregs for %s\n",
+					__func__,
+					__mdss_dsi_pm_name(i));
+				rc = 0;
+			} else {
+				ctrl->core_power = false;
+			}
+		}
+
+		/*
+		 * temp workaround until framework issues pertaining to LP2
+		 * power state transitions are fixed. For now, we internally
+		 * transition to LP2 state whenever core power is turned off
+		 * in LP1 state
+		 */
+		if (mdss_dsi_is_panel_on_lp(pdata))
+			mdss_dsi_panel_power_ctrl(pdata,
+				MDSS_PANEL_POWER_LP2);
+	}
+	return rc;
+}
+
+int mdss_dsi_pre_clkon_cb(void *priv,
+			  enum mdss_dsi_clk_type clk_type,
+			  enum mdss_dsi_clk_state new_state)
+{
+	int rc = 0;
+	struct mdss_dsi_ctrl_pdata *ctrl = priv;
+	struct mdss_panel_data *pdata = NULL;
+	struct dsi_shared_data *sdata;
+	int i;
+
+	if (!ctrl) {
+		pr_err("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	pdata = &ctrl->panel_data;
+	if ((clk_type & MDSS_DSI_CORE_CLK) && (new_state == MDSS_DSI_CLK_ON) &&
+	    (ctrl->core_power == false)) {
+		sdata = ctrl->shared_data;
+		/*
+		 * Enable DSI core power
+		 * 1.> PANEL_PM are controlled as part of
+		 *     panel_power_ctrl. Needed not be handled here.
+		 * 2.> CORE_PM are controlled by dsi clk manager.
+		 * 3.> CTRL_PM need to be enabled/disabled
+		 *     only during unblank/blank. Their state should
+		 *     not be changed during static screen.
+		 */
+		pr_debug("%s: Enable DSI core power\n", __func__);
+		for (i = DSI_CORE_PM; i < DSI_MAX_PM; i++) {
+			if ((ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE) &&
+				(!pdata->panel_info.cont_splash_enabled) &&
+				(i != DSI_CORE_PM))
+				continue;
+			rc = msm_dss_enable_vreg(
+				sdata->power_data[i].vreg_config,
+				sdata->power_data[i].num_vreg, 1);
+			if (rc) {
+				pr_err("%s: failed to enable vregs for %s\n",
+					__func__,
+					__mdss_dsi_pm_name(i));
+			} else {
+				ctrl->core_power = true;
+			}
+
+		}
+		/*
+		 * temp workaround until framework issues pertaining to LP2
+		 * power state transitions are fixed. For now, if we intend to
+		 * send a frame update when in LP1, we have to explicitly exit
+		 * LP2 state here
+		 */
+		if (mdss_dsi_is_panel_on_ulp(pdata))
+			mdss_dsi_panel_power_ctrl(pdata, MDSS_PANEL_POWER_LP1);
+	}
+	/* Disable dynamic clock gating*/
+	if (ctrl->mdss_util->dyn_clk_gating_ctrl)
+		ctrl->mdss_util->dyn_clk_gating_ctrl(0);
+
+	return rc;
+}
+
+void mdss_edp_clk_deinit(struct mdss_edp_drv_pdata *edp_drv)
+{
+	if (edp_drv->aux_clk)
+		clk_put(edp_drv->aux_clk);
+	if (edp_drv->pixel_clk)
+		clk_put(edp_drv->pixel_clk);
+	if (edp_drv->ahb_clk)
+		clk_put(edp_drv->ahb_clk);
+	if (edp_drv->link_clk)
+		clk_put(edp_drv->link_clk);
+	if (edp_drv->mdp_core_clk)
+		clk_put(edp_drv->mdp_core_clk);
+}
+
+int mdss_edp_clk_init(struct mdss_edp_drv_pdata *edp_drv)
+{
+	struct device *dev = &(edp_drv->pdev->dev);
+
+	edp_drv->aux_clk = clk_get(dev, "core_clk");
+	if (IS_ERR(edp_drv->aux_clk)) {
+		pr_err("%s: Can't find aux_clk", __func__);
+		edp_drv->aux_clk = NULL;
+		goto mdss_edp_clk_err;
+	}
+
+	edp_drv->pixel_clk = clk_get(dev, "pixel_clk");
+	if (IS_ERR(edp_drv->pixel_clk)) {
+		pr_err("%s: Can't find pixel_clk", __func__);
+		edp_drv->pixel_clk = NULL;
+		goto mdss_edp_clk_err;
+	}
+
+	edp_drv->ahb_clk = clk_get(dev, "iface_clk");
+	if (IS_ERR(edp_drv->ahb_clk)) {
+		pr_err("%s: Can't find ahb_clk", __func__);
+		edp_drv->ahb_clk = NULL;
+		goto mdss_edp_clk_err;
+	}
+
+	edp_drv->link_clk = clk_get(dev, "link_clk");
+	if (IS_ERR(edp_drv->link_clk)) {
+		pr_err("%s: Can't find link_clk", __func__);
+		edp_drv->link_clk = NULL;
+		goto mdss_edp_clk_err;
+	}
+
+	/* need mdss clock to receive irq */
+	edp_drv->mdp_core_clk = clk_get(dev, "mdp_core_clk");
+	if (IS_ERR(edp_drv->mdp_core_clk)) {
+		pr_err("%s: Can't find mdp_core_clk", __func__);
+		edp_drv->mdp_core_clk = NULL;
+		goto mdss_edp_clk_err;
+	}
+
+	return 0;
+
+mdss_edp_clk_err:
+	mdss_edp_clk_deinit(edp_drv);
+	return -EPERM;
+}
+
+int mdss_edp_aux_clk_enable(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret;
+
+	if (clk_set_rate(edp_drv->aux_clk, 19200000) < 0)
+		pr_err("%s: aux_clk - clk_set_rate failed\n",
+					__func__);
+
+	ret = clk_enable(edp_drv->aux_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable aux clk\n", __func__);
+		goto c2;
+	}
+
+	ret = clk_enable(edp_drv->ahb_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable ahb clk\n", __func__);
+		goto c1;
+	}
+
+	/* need mdss clock to receive irq */
+	ret = clk_enable(edp_drv->mdp_core_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable mdp_core_clk\n", __func__);
+		goto c0;
+	}
+
+	return 0;
+c0:
+	clk_disable(edp_drv->ahb_clk);
+c1:
+	clk_disable(edp_drv->aux_clk);
+c2:
+	return ret;
+
+}
+
+void mdss_edp_aux_clk_disable(struct mdss_edp_drv_pdata *edp_drv)
+{
+	clk_disable(edp_drv->aux_clk);
+	clk_disable(edp_drv->ahb_clk);
+	clk_disable(edp_drv->mdp_core_clk);
+}
+
+static void mdss_edp_clk_set_rate(struct mdss_edp_drv_pdata *edp_drv)
+{
+	if (clk_set_rate(edp_drv->link_clk, edp_drv->link_rate * 27000000) < 0)
+		pr_err("%s: link_clk - clk_set_rate failed\n",
+					__func__);
+
+	if (clk_set_rate(edp_drv->pixel_clk, edp_drv->pixel_rate) < 0)
+		pr_err("%s: pixel_clk - clk_set_rate failed\n",
+					__func__);
+}
+
+int mdss_edp_clk_enable(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret;
+
+	if (edp_drv->clk_on) {
+		pr_info("%s: edp clks are already ON\n", __func__);
+		return 0;
+	}
+
+	if (clk_set_rate(edp_drv->link_clk, edp_drv->link_rate * 27000000) < 0)
+		pr_err("%s: link_clk - clk_set_rate failed\n",
+					__func__);
+
+	if (clk_set_rate(edp_drv->aux_clk, edp_drv->aux_rate) < 0)
+		pr_err("%s: aux_clk - clk_set_rate failed\n",
+					__func__);
+
+	if (clk_set_rate(edp_drv->pixel_clk, edp_drv->pixel_rate) < 0)
+		pr_err("%s: pixel_clk - clk_set_rate failed\n",
+					__func__);
+
+	ret = clk_enable(edp_drv->aux_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable aux clk\n", __func__);
+		goto c4;
+	}
+	ret = clk_enable(edp_drv->pixel_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable pixel clk\n", __func__);
+		goto c3;
+	}
+	ret = clk_enable(edp_drv->ahb_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable ahb clk\n", __func__);
+		goto c2;
+	}
+	ret = clk_enable(edp_drv->link_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable link clk\n", __func__);
+		goto c1;
+	}
+	ret = clk_enable(edp_drv->mdp_core_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable mdp_core_clk\n", __func__);
+		goto c0;
+	}
+
+	edp_drv->clk_on = 1;
+
+	return 0;
+
+c0:
+	clk_disable(edp_drv->link_clk);
+c1:
+	clk_disable(edp_drv->ahb_clk);
+c2:
+	clk_disable(edp_drv->pixel_clk);
+c3:
+	clk_disable(edp_drv->aux_clk);
+c4:
+	return ret;
+}
+
+void mdss_edp_clk_disable(struct mdss_edp_drv_pdata *edp_drv)
+{
+	if (edp_drv->clk_on == 0) {
+		pr_info("%s: edp clks are already OFF\n", __func__);
+		return;
+	}
+
+	clk_disable(edp_drv->aux_clk);
+	clk_disable(edp_drv->pixel_clk);
+	clk_disable(edp_drv->ahb_clk);
+	clk_disable(edp_drv->link_clk);
+	clk_disable(edp_drv->mdp_core_clk);
+
+	edp_drv->clk_on = 0;
+}
+
+int mdss_edp_prepare_aux_clocks(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret;
+
+	/* ahb clock should be prepared first */
+	ret = clk_prepare(edp_drv->ahb_clk);
+	if (ret) {
+		pr_err("%s: Failed to prepare ahb clk\n", __func__);
+		goto c3;
+	}
+	ret = clk_prepare(edp_drv->aux_clk);
+	if (ret) {
+		pr_err("%s: Failed to prepare aux clk\n", __func__);
+		goto c2;
+	}
+
+	/* need mdss clock to receive irq */
+	ret = clk_prepare(edp_drv->mdp_core_clk);
+	if (ret) {
+		pr_err("%s: Failed to prepare mdp_core clk\n", __func__);
+		goto c1;
+	}
+
+	return 0;
+c1:
+	clk_unprepare(edp_drv->aux_clk);
+c2:
+	clk_unprepare(edp_drv->ahb_clk);
+c3:
+	return ret;
+
+}
+
+void mdss_edp_unprepare_aux_clocks(struct mdss_edp_drv_pdata *edp_drv)
+{
+	clk_unprepare(edp_drv->mdp_core_clk);
+	clk_unprepare(edp_drv->aux_clk);
+	clk_unprepare(edp_drv->ahb_clk);
+}
+
+int mdss_edp_prepare_clocks(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret;
+
+	mdss_edp_clk_set_rate(edp_drv);
+
+	/* ahb clock should be prepared first */
+	ret = clk_prepare(edp_drv->ahb_clk);
+	if (ret) {
+		pr_err("%s: Failed to prepare ahb clk\n", __func__);
+		goto c4;
+	}
+	ret = clk_prepare(edp_drv->aux_clk);
+	if (ret) {
+		pr_err("%s: Failed to prepare aux clk\n", __func__);
+		goto c3;
+	}
+	ret = clk_prepare(edp_drv->pixel_clk);
+	if (ret) {
+		pr_err("%s: Failed to prepare pixel clk\n", __func__);
+		goto c2;
+	}
+	ret = clk_prepare(edp_drv->link_clk);
+	if (ret) {
+		pr_err("%s: Failed to prepare link clk\n", __func__);
+		goto c1;
+	}
+	ret = clk_prepare(edp_drv->mdp_core_clk);
+	if (ret) {
+		pr_err("%s: Failed to prepare mdp_core clk\n", __func__);
+		goto c0;
+	}
+
+	return 0;
+c0:
+	clk_unprepare(edp_drv->link_clk);
+c1:
+	clk_unprepare(edp_drv->pixel_clk);
+c2:
+	clk_unprepare(edp_drv->aux_clk);
+c3:
+	clk_unprepare(edp_drv->ahb_clk);
+c4:
+	return ret;
+}
+
+void mdss_edp_unprepare_clocks(struct mdss_edp_drv_pdata *edp_drv)
+{
+	clk_unprepare(edp_drv->mdp_core_clk);
+	clk_unprepare(edp_drv->aux_clk);
+	clk_unprepare(edp_drv->pixel_clk);
+	clk_unprepare(edp_drv->link_clk);
+	/* ahb clock should be last one to disable */
+	clk_unprepare(edp_drv->ahb_clk);
+}
+
+void mdss_edp_clk_debug(unsigned char *edp_base, unsigned char *mmss_cc_base)
+{
+	u32 da4, da0, d32c;
+	u32 dc4, dc0, d330;
+
+	/* pixel clk */
+	da0  = edp_read(mmss_cc_base + 0x0a0);
+	da4  = edp_read(mmss_cc_base + 0x0a4);
+	d32c = edp_read(mmss_cc_base + 0x32c);
+
+	/* main link clk */
+	dc0  = edp_read(mmss_cc_base + 0x0c0);
+	dc4  = edp_read(mmss_cc_base + 0x0c4);
+	d330 = edp_read(mmss_cc_base + 0x330);
+
+	pr_err("%s: da0=%x da4=%x d32c=%x dc0=%x dc4=%x d330=%x\n", __func__,
+	(int)da0, (int)da4, (int)d32c, (int)dc0, (int)dc4, (int)d330);
+
+}
diff --git a/drivers/video/fbdev/msm/qpic_panel_ili_qvga.c b/drivers/video/fbdev/msm/qpic_panel_ili_qvga.c
new file mode 100644
index 0000000..d644f4f
--- /dev/null
+++ b/drivers/video/fbdev/msm/qpic_panel_ili_qvga.c
@@ -0,0 +1,235 @@
+/* Copyright (c) 2013-2015, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/memory.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/io.h>
+
+#include "mdss.h"
+#include "mdss_qpic.h"
+#include "mdss_qpic_panel.h"
+
+static int mdss_qpic_pinctrl_set_state(struct qpic_panel_io_desc *qpic_panel_io,
+		bool active);
+static int panel_io_init(struct qpic_panel_io_desc *panel_io)
+{
+	int rc;
+
+	if (panel_io->vdd_vreg) {
+		rc = regulator_set_voltage(panel_io->vdd_vreg,
+			1800000, 1800000);
+		if (rc) {
+			pr_err("vdd_vreg->set_voltage failed, rc=%d\n", rc);
+			return -EINVAL;
+		}
+	}
+	if (panel_io->avdd_vreg) {
+		rc = regulator_set_voltage(panel_io->avdd_vreg,
+			2704000, 2704000);
+		if (rc) {
+			pr_err("vdd_vreg->set_voltage failed, rc=%d\n", rc);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static void panel_io_off(struct qpic_panel_io_desc *qpic_panel_io)
+{
+	if (mdss_qpic_pinctrl_set_state(qpic_panel_io, false))
+		pr_warn("%s panel on: pinctrl not enabled\n", __func__);
+
+	if (qpic_panel_io->ad8_gpio)
+		gpio_free(qpic_panel_io->ad8_gpio);
+	if (qpic_panel_io->cs_gpio)
+		gpio_free(qpic_panel_io->cs_gpio);
+	if (qpic_panel_io->rst_gpio)
+		gpio_free(qpic_panel_io->rst_gpio);
+	if (qpic_panel_io->te_gpio)
+		gpio_free(qpic_panel_io->te_gpio);
+	if (qpic_panel_io->bl_gpio)
+		gpio_free(qpic_panel_io->bl_gpio);
+	if (qpic_panel_io->vdd_vreg)
+		regulator_disable(qpic_panel_io->vdd_vreg);
+	if (qpic_panel_io->avdd_vreg)
+		regulator_disable(qpic_panel_io->avdd_vreg);
+}
+
+void ili9341_off(struct qpic_panel_io_desc *qpic_panel_io)
+{
+	qpic_send_pkt(OP_SET_DISPLAY_OFF, NULL, 0);
+	/* wait for 20 ms after display off */
+	msleep(20);
+	panel_io_off(qpic_panel_io);
+}
+
+static int panel_io_on(struct qpic_panel_io_desc *qpic_panel_io)
+{
+	int rc;
+
+	if (qpic_panel_io->vdd_vreg) {
+		rc = regulator_enable(qpic_panel_io->vdd_vreg);
+		if (rc) {
+			pr_err("enable vdd failed, rc=%d\n", rc);
+			return -ENODEV;
+		}
+	}
+
+	if (qpic_panel_io->avdd_vreg) {
+		rc = regulator_enable(qpic_panel_io->avdd_vreg);
+		if (rc) {
+			pr_err("enable avdd failed, rc=%d\n", rc);
+			goto power_on_error;
+		}
+	}
+
+	/* GPIO settings using pinctrl */
+	if (mdss_qpic_pinctrl_set_state(qpic_panel_io, true)) {
+		pr_warn("%s panel on: pinctrl not enabled\n", __func__);
+
+		if ((qpic_panel_io->rst_gpio) &&
+			(gpio_request(qpic_panel_io->rst_gpio, "disp_rst_n"))) {
+			pr_err("%s request reset gpio failed\n", __func__);
+			goto power_on_error;
+		}
+
+		if ((qpic_panel_io->cs_gpio) &&
+			(gpio_request(qpic_panel_io->cs_gpio, "disp_cs_n"))) {
+			pr_err("%s request cs gpio failed\n", __func__);
+			goto power_on_error;
+		}
+
+		if ((qpic_panel_io->ad8_gpio) &&
+			(gpio_request(qpic_panel_io->ad8_gpio, "disp_ad8_n"))) {
+			pr_err("%s request ad8 gpio failed\n", __func__);
+			goto power_on_error;
+		}
+
+		if ((qpic_panel_io->te_gpio) &&
+			(gpio_request(qpic_panel_io->te_gpio, "disp_te_n"))) {
+			pr_err("%s request te gpio failed\n", __func__);
+			goto power_on_error;
+		}
+
+		if ((qpic_panel_io->bl_gpio) &&
+			(gpio_request(qpic_panel_io->bl_gpio, "disp_bl_n"))) {
+			pr_err("%s request bl gpio failed\n", __func__);
+			goto power_on_error;
+		}
+	}
+
+	/* wait for 20 ms after enable gpio as suggested by hw */
+	msleep(20);
+	return 0;
+power_on_error:
+	panel_io_off(qpic_panel_io);
+	return -EINVAL;
+}
+
+int ili9341_on(struct qpic_panel_io_desc *qpic_panel_io)
+{
+	u8 param[4];
+	int ret;
+
+	if (!qpic_panel_io->init) {
+		panel_io_init(qpic_panel_io);
+		qpic_panel_io->init = true;
+	}
+	ret = panel_io_on(qpic_panel_io);
+	if (ret)
+		return ret;
+	qpic_send_pkt(OP_SOFT_RESET, NULL, 0);
+	/* wait for 120 ms after reset as panel spec suggests */
+	msleep(120);
+	qpic_send_pkt(OP_SET_DISPLAY_OFF, NULL, 0);
+	/* wait for 20 ms after disply off */
+	msleep(20);
+
+	/* set memory access control */
+	param[0] = 0x48;
+	qpic_send_pkt(OP_SET_ADDRESS_MODE, param, 1);
+	/* wait for 20 ms after command sent as panel spec suggests */
+	msleep(20);
+
+	param[0] = 0x66;
+	qpic_send_pkt(OP_SET_PIXEL_FORMAT, param, 1);
+	/* wait for 20 ms after command sent as panel spec suggests */
+	msleep(20);
+
+	/* set interface */
+	param[0] = 1;
+	param[1] = 0;
+	param[2] = 0;
+	qpic_send_pkt(OP_ILI9341_INTERFACE_CONTROL, param, 3);
+	/* wait for 20 ms after command sent */
+	msleep(20);
+
+	/* exit sleep mode */
+	qpic_send_pkt(OP_EXIT_SLEEP_MODE, NULL, 0);
+	/* wait for 20 ms after command sent as panel spec suggests */
+	msleep(20);
+
+	/* normal mode */
+	qpic_send_pkt(OP_ENTER_NORMAL_MODE, NULL, 0);
+	/* wait for 20 ms after command sent as panel spec suggests */
+	msleep(20);
+
+	/* display on */
+	qpic_send_pkt(OP_SET_DISPLAY_ON, NULL, 0);
+	/* wait for 20 ms after command sent as panel spec suggests */
+	msleep(20);
+
+	param[0] = 0;
+	qpic_send_pkt(OP_ILI9341_TEARING_EFFECT_LINE_ON, param, 1);
+
+	/* test */
+	param[0] = qpic_read_data(OP_GET_PIXEL_FORMAT, 1);
+	pr_debug("Pixel format =%x", param[0]);
+
+	return 0;
+}
+
+static int mdss_qpic_pinctrl_set_state(struct qpic_panel_io_desc *qpic_panel_io,
+		bool active)
+{
+	struct pinctrl_state *pin_state;
+	int rc = -EFAULT;
+
+	if (IS_ERR_OR_NULL(qpic_panel_io->pin_res.pinctrl))
+		return PTR_ERR(qpic_panel_io->pin_res.pinctrl);
+
+	pin_state = active ? qpic_panel_io->pin_res.gpio_state_active
+		: qpic_panel_io->pin_res.gpio_state_suspend;
+	if (!IS_ERR_OR_NULL(pin_state)) {
+		rc = pinctrl_select_state(qpic_panel_io->pin_res.pinctrl,
+				pin_state);
+		if (rc)
+			pr_err("%s: can not set %s pins\n", __func__,
+					active ? MDSS_PINCTRL_STATE_DEFAULT
+					: MDSS_PINCTRL_STATE_SLEEP);
+	} else {
+		pr_err("%s: invalid '%s' pinstate\n", __func__,
+				active ? MDSS_PINCTRL_STATE_DEFAULT
+				: MDSS_PINCTRL_STATE_SLEEP);
+	}
+	return rc;
+}
diff --git a/drivers/video/fbdev/msm/splash.h b/drivers/video/fbdev/msm/splash.h
new file mode 100644
index 0000000..1cb7aa1
--- /dev/null
+++ b/drivers/video/fbdev/msm/splash.h
@@ -0,0 +1,5279 @@
+/* Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __SPLASH_H_
+#define __SPLASH_H_
+
+#define SPLASH_IMAGE_WIDTH	113
+#define SPLASH_IMAGE_HEIGHT	124
+#define SPLASH_IMAGE_FORMAT	MDP_BGR_888
+#define SPLASH_IMAGE_BPP	3
+
+char splash_bgr888_image[] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x10,
+	0x29, 0x19, 0x31, 0x31,
+	0x29, 0x31, 0x31, 0x29, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x08, 0x10, 0x31, 0x31,
+	0x29, 0x4a, 0x52, 0x4a, 0x6b, 0x5a, 0x73, 0x4a, 0x52, 0x4a, 0x10, 0x29,
+	0x19, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x08,
+	0x10, 0x31, 0x31, 0x29, 0x6b, 0x5a, 0x73, 0x6b, 0x7b, 0x73, 0x6b, 0x5a,
+	0x4a, 0x31, 0x31, 0x29,
+	0x3a, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x08, 0x10, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x08, 0x10, 0x31, 0x31, 0x29, 0x6b, 0x5a, 0x4a, 0x6b, 0x5a,
+	0x73, 0x3a, 0x31, 0x4a,
+	0x31, 0x31, 0x29, 0x10, 0x29, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x31, 0x31, 0x29, 0x3a, 0x31,
+	0x4a, 0x31, 0x31, 0x29,
+	0x10, 0x29, 0x19, 0x08, 0x08, 0x10, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x10, 0x21, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x08, 0x08,
+	0x10, 0x08, 0x08, 0x10,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x08,
+	0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x21,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x21, 0x00, 0x08, 0x08, 0x10,
+	0x08, 0x08, 0x10, 0x10,
+	0x29, 0x19, 0x10, 0x29, 0x19, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x08,
+	0x10, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10,
+	0x4a, 0x52, 0x4a, 0x08,
+	0x08, 0x10, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x31, 0x31, 0x29, 0x08,
+	0x08, 0x10, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x10, 0x29, 0x19, 0x4a, 0x52, 0x4a, 0x3a, 0x31, 0x4a,
+	0x08, 0x00, 0x00, 0x08,
+	0x08, 0x10, 0x4a, 0x52, 0x4a, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x08, 0x10, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x4a,
+	0x52, 0x4a, 0x9c, 0xa5, 0x94, 0x9c, 0x7b, 0x94, 0x08, 0x08, 0x10, 0x08,
+	0x08, 0x10, 0x10, 0x29,
+	0x19, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08,
+	0x10, 0x10, 0x21, 0x00,
+	0x08, 0x08, 0x10, 0x6b, 0x7b, 0x73, 0x9c, 0x7b, 0x94, 0x9c, 0xa5, 0x94,
+	0xce, 0xad, 0xad, 0xa5,
+	0xb5, 0xb5, 0x31, 0x31, 0x29, 0x08, 0x08, 0x10, 0x10, 0x29, 0x19, 0x08,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x4a,
+	0x52, 0x4a, 0xa5, 0x9c, 0xad, 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0x9c,
+	0x7b, 0x94, 0x10, 0x29,
+	0x19, 0x3a, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0x9c, 0x7b, 0x94, 0x9c, 0xa5, 0x94, 0xa5, 0xb5, 0xb5,
+	0xa5, 0xb5, 0xb5, 0xce,
+	0xde, 0xce, 0xc5, 0xad, 0xd6, 0x9c, 0xa5, 0x94, 0x3a, 0x10, 0x21, 0x00,
+	0x00, 0x00, 0x08, 0x08,
+	0x10, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x08, 0x10, 0x9c, 0x7b, 0x94, 0xce, 0xad, 0xad, 0xce, 0xe6, 0xef, 0xce,
+	0xe6, 0xef, 0xe6, 0xde,
+	0xde, 0xa5, 0x9c, 0xad, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x6b, 0x5a, 0x73, 0xce, 0xde, 0xce, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xce, 0xde, 0xce, 0x6b,
+	0x7b, 0x73, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x21, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x10, 0x29, 0x19, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xff, 0x9c,
+	0xa5, 0x94, 0xce, 0xde,
+	0xce, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x10, 0x21, 0x00, 0xce, 0xde, 0xce, 0xff, 0xf7, 0xff,
+	0xff, 0xf7, 0xff, 0x9c,
+	0xa5, 0x94, 0x00, 0x00, 0x00, 0x31, 0x31, 0x29, 0xef, 0xf7, 0xe6, 0xff,
+	0xf7, 0xff, 0xef, 0xde,
+	0xef, 0x08, 0x08, 0x10, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x4a, 0x52, 0x4a, 0xff, 0xf7, 0xff, 0x10,
+	0x29, 0x19, 0x08, 0x08,
+	0x10, 0x4a, 0x52, 0x4a, 0xce, 0xad, 0xad, 0xff, 0xff, 0xff, 0x4a, 0x52,
+	0x4a, 0x08, 0x08, 0x10,
+	0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x08, 0x08, 0x10, 0xce, 0xad, 0xad,
+	0xef, 0xf7, 0xff, 0xce,
+	0xde, 0xce, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x9c, 0x7b, 0x94, 0x31,
+	0x31, 0x29, 0xef, 0xf7,
+	0xe6, 0xff, 0xff, 0xff, 0x4a, 0x52, 0x4a, 0x00, 0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x5a, 0x73, 0xef,
+	0xf7, 0xe6, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00, 0x6b, 0x7b, 0x73, 0x08, 0x08, 0x10, 0xff, 0xff,
+	0xff, 0x6b, 0x7b, 0x73,
+	0x08, 0x08, 0x10, 0x31, 0x31, 0x29, 0x4a, 0x52, 0x4a, 0x08, 0x08, 0x10,
+	0xce, 0xde, 0xce, 0xff,
+	0xff, 0xff, 0x10, 0x29, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31,
+	0x31, 0x29, 0x4a, 0x52,
+	0x4a, 0xa5, 0xb5, 0xb5, 0xff, 0xff, 0xff, 0x9c, 0x7b, 0x94, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x4a,
+	0x52, 0x4a, 0xe6, 0xde,
+	0xde, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x31, 0x31, 0x29, 0x08, 0x08,
+	0x10, 0xff, 0xff, 0xff,
+	0x4a, 0x52, 0x4a, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0x9c,
+	0x7b, 0x94, 0xff, 0xff, 0xff, 0x3a, 0x10, 0x21, 0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x08,
+	0x10, 0x08, 0x08, 0x10, 0x6b, 0x5a, 0x73, 0xff, 0xff, 0xff, 0xa5, 0xb5,
+	0xb5, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x4a, 0x52,
+	0x4a, 0xef, 0xf7, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0xe6, 0xde, 0xde, 0x08, 0x31, 0x5a, 0x10, 0x7b, 0x9c, 0x10, 0x7b, 0x9c,
+	0x10, 0x7b, 0x9c, 0x10,
+	0x52, 0x7b, 0x31, 0x31, 0x29, 0xef, 0xf7, 0xff, 0x08, 0x08, 0x10, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0xa5, 0x94, 0xff, 0xff,
+	0xff, 0xa5, 0x9c, 0xad,
+	0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x10, 0x21, 0x00, 0x08,
+	0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x08, 0x08, 0x10, 0xff, 0xf7, 0xff, 0x4a, 0x52, 0x4a, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x31, 0x5a, 0x00, 0x84, 0xbd, 0x08, 0xad, 0xd6, 0x00, 0xbd, 0xef,
+	0x00, 0xbd, 0xef, 0x00,
+	0x9c, 0xd6, 0x08, 0xa5, 0xad, 0x08, 0xad, 0xd6, 0x10, 0xce, 0xce, 0x6b,
+	0x7b, 0x9c, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0x6b, 0x5a, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0xce, 0xde, 0xce, 0xff, 0xf7,
+	0xff, 0x10, 0x29, 0x19,
+	0x10, 0x5a, 0x9c, 0x00, 0x9c, 0xd6, 0x19, 0x94, 0xce, 0x00, 0xbd, 0xf7,
+	0x10, 0xc5, 0xef, 0x08,
+	0xad, 0xd6, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xd6, 0x10,
+	0xc5, 0xef, 0x00, 0xbd,
+	0xef, 0x08, 0xa5, 0xad, 0x08, 0x31, 0x5a, 0x10, 0x29, 0x19, 0xff, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0xe6, 0xde, 0xde, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x5a,
+	0x73, 0xef, 0xf7, 0xe6,
+	0x19, 0x7b, 0xbd, 0x19, 0x7b, 0xbd, 0x08, 0xad, 0xd6, 0x00, 0xbd, 0xef,
+	0x10, 0xc5, 0xef, 0x00,
+	0xbd, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xc5, 0xef, 0x3a, 0xde, 0xef, 0x19,
+	0xbd, 0xf7, 0x3a, 0xde,
+	0xef, 0x3a, 0xde, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad,
+	0xd6, 0x19, 0x94, 0xce,
+	0xa5, 0xb5, 0xb5, 0x4a, 0x5a, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x08, 0x10,
+	0x10, 0x7b, 0x9c, 0x00, 0x84, 0xbd, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xd6,
+	0x08, 0xad, 0xef, 0x10,
+	0xc5, 0xef, 0x08, 0xad, 0xd6, 0x10, 0xc5, 0xef, 0x10, 0xe6, 0xef, 0x19,
+	0xbd, 0xf7, 0x3a, 0xde,
+	0xef, 0x3a, 0xde, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xe6, 0xef, 0x10, 0xe6,
+	0xef, 0x10, 0xc5, 0xef,
+	0x10, 0xe6, 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xd6, 0x08, 0x31, 0x3a,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x08, 0x10,
+	0x10, 0x7b, 0x9c, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6,
+	0x10, 0xc5, 0xef, 0x08,
+	0xad, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xc5, 0xef, 0x10,
+	0xc5, 0xef, 0x6b, 0xe6,
+	0xef, 0x3a, 0xde, 0xef, 0x10, 0xe6, 0xef, 0x10, 0xe6, 0xef, 0x10, 0xc5,
+	0xef, 0x10, 0xe6, 0xef,
+	0x10, 0xc5, 0xef, 0x10, 0xe6, 0xef, 0x10, 0xc5, 0xef, 0x19, 0xbd, 0xf7,
+	0x19, 0x7b, 0xbd, 0x00,
+	0x00, 0x00, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x31, 0x5a, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x00, 0x9c, 0xd6,
+	0x08, 0xad, 0xef, 0x08,
+	0xad, 0xef, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xef, 0x10, 0xc5, 0xef, 0x10,
+	0xc5, 0xef, 0x10, 0xe6,
+	0xef, 0x3a, 0xde, 0xef, 0x19, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x10, 0xe6,
+	0xef, 0x10, 0xc5, 0xef,
+	0x10, 0xe6, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0x84, 0xbd, 0x10, 0x7b, 0x9c,
+	0x00, 0xbd, 0xef, 0x10,
+	0x73, 0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x08, 0x10, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x10, 0x52, 0x7b, 0x19, 0x7b, 0xbd, 0x00, 0x9c, 0xd6,
+	0x19, 0x94, 0xce, 0x08,
+	0xad, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xd6, 0x10,
+	0xe6, 0xef, 0x19, 0xbd,
+	0xf7, 0x6b, 0xe6, 0xef, 0x19, 0xbd, 0xf7, 0x10, 0xe6, 0xef, 0x10, 0xe6,
+	0xef, 0x10, 0xe6, 0xef,
+	0x10, 0xc5, 0xef, 0x00, 0x9c, 0xd6, 0x00, 0x84, 0xbd, 0x00, 0x9c, 0xd6,
+	0x19, 0x94, 0xce, 0x00,
+	0x9c, 0xd6, 0x10, 0x52, 0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x08, 0x31, 0x5a,
+	0x08, 0xa5, 0xad, 0x08,
+	0xad, 0xef, 0x00, 0xbd, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xc5, 0xef, 0x10,
+	0xc5, 0xef, 0x10, 0xc5,
+	0xef, 0x10, 0xe6, 0xef, 0x19, 0xbd, 0xf7, 0x10, 0xe6, 0xef, 0x10, 0xc5,
+	0xef, 0x10, 0xc5, 0xef,
+	0x19, 0xbd, 0xf7, 0x10, 0x7b, 0x9c, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce,
+	0x08, 0xad, 0xd6, 0x00,
+	0x9c, 0xd6, 0x19, 0x94, 0xce, 0x08, 0x31, 0x3a, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x08,
+	0x10, 0x31, 0x31, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08,
+	0x10, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x31, 0x3a, 0x10,
+	0x52, 0x7b, 0x10, 0x7b, 0x9c, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x10,
+	0xc5, 0xef, 0x10, 0xc5,
+	0xef, 0x3a, 0xde, 0xef, 0x10, 0xe6, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xe6,
+	0xef, 0x08, 0xad, 0xd6,
+	0x10, 0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x00, 0x9c, 0xd6, 0x19, 0x94, 0xce,
+	0x00, 0x9c, 0xd6, 0x00,
+	0x9c, 0xd6, 0x19, 0x7b, 0xbd, 0x19, 0x7b, 0xbd, 0x08, 0x08, 0x10, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x08, 0x10, 0x9c, 0x7b, 0x73, 0x6b, 0x5a, 0x73, 0x10, 0x29,
+	0x19, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x08, 0x10, 0x73, 0xa5, 0xad, 0x10, 0x52, 0x7b, 0x10, 0x52, 0x7b, 0x00,
+	0x84, 0xbd, 0x08, 0xa5,
+	0xad, 0x08, 0xad, 0xd6, 0x19, 0x94, 0xce, 0x00, 0x84, 0xbd, 0x10, 0x73,
+	0x7b, 0x10, 0x5a, 0x9c,
+	0x00, 0x84, 0xbd, 0x00, 0x9c, 0xd6, 0x00, 0x9c, 0xd6, 0x00, 0x9c, 0xd6,
+	0x00, 0x9c, 0xd6, 0x00,
+	0x84, 0xbd, 0x3a, 0xa5, 0xce, 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0x6b,
+	0x5a, 0x73, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0x52, 0x4a, 0x6b, 0x7b,
+	0x9c, 0x9c, 0x7b, 0x73,
+	0x31, 0x31, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x31, 0x31, 0x29, 0xc5, 0xad, 0xd6, 0x52, 0xa5, 0xa5, 0x10,
+	0x5a, 0x9c, 0x10, 0x7b,
+	0x9c, 0x19, 0x94, 0xce, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xd6, 0x19, 0x94,
+	0xce, 0x00, 0x9c, 0xd6,
+	0x00, 0x9c, 0xd6, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce,
+	0x00, 0x84, 0xbd, 0x19,
+	0x7b, 0xbd, 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0xce, 0xde, 0xce, 0xa5,
+	0xb5, 0xb5, 0xa5, 0xb5,
+	0xb5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x21,
+	0x00, 0x6b, 0x7b, 0x73,
+	0x9c, 0x7b, 0x94, 0x6b, 0x7b, 0x73, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x08, 0x10, 0x4a, 0x52, 0x4a, 0xa5, 0xd6, 0xad, 0xc5,
+	0xad, 0xd6, 0x4a, 0x7b,
+	0x9c, 0x10, 0x5a, 0x9c, 0x00, 0x84, 0xbd, 0x00, 0x9c, 0xd6, 0x00, 0x84,
+	0xbd, 0x00, 0x9c, 0xd6,
+	0x19, 0x94, 0xce, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x00, 0x84, 0xbd,
+	0x19, 0x7b, 0xbd, 0x73,
+	0xa5, 0xad, 0xa5, 0xb5, 0xb5, 0xc5, 0xad, 0xd6, 0xa5, 0xb5, 0xb5, 0xe6,
+	0xde, 0xde, 0xce, 0xde,
+	0xce, 0xce, 0xad, 0xad, 0x10, 0x29, 0x19, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x4a, 0x52, 0x4a, 0x6b, 0x7b, 0x73, 0x31, 0x31, 0x29, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x31, 0x4a, 0xce,
+	0xad, 0xad, 0xce, 0xde,
+	0xce, 0xa5, 0xb5, 0xb5, 0x73, 0xa5, 0xad, 0x10, 0x73, 0x7b, 0x10, 0x5a,
+	0x9c, 0x10, 0x7b, 0x9c,
+	0x10, 0x5a, 0xbd, 0x10, 0x7b, 0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x5a, 0x9c,
+	0x19, 0x7b, 0xbd, 0xa5,
+	0xb5, 0xb5, 0xa5, 0xb5, 0xb5, 0xc5, 0xad, 0xd6, 0xa5, 0xd6, 0xad, 0xef,
+	0xde, 0xef, 0xef, 0xf7,
+	0xe6, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xa5, 0x9c, 0xad, 0x08, 0x08,
+	0x10, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x4a,
+	0x52, 0x4a, 0xe6, 0xde,
+	0xde, 0xc5, 0xad, 0xd6, 0xa5, 0xb5, 0xb5, 0xce, 0xad, 0xad, 0x9c, 0xad,
+	0xce, 0x52, 0xa5, 0xa5,
+	0x10, 0x5a, 0x9c, 0x10, 0x7b, 0x9c, 0x10, 0x5a, 0x9c, 0x52, 0xa5, 0xa5,
+	0x73, 0xa5, 0xad, 0xc5,
+	0xad, 0xd6, 0xa5, 0xb5, 0xb5, 0xce, 0xde, 0xce, 0xce, 0xde, 0xce, 0xef,
+	0xde, 0xef, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xef, 0xf7,
+	0xe6, 0x08, 0x08, 0x10,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0xef, 0xf7,
+	0xe6, 0xef, 0xf7, 0xe6, 0xce, 0xde, 0xce, 0xce, 0xde, 0xce, 0xa5, 0xb5,
+	0xb5, 0xce, 0xde, 0xce,
+	0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0xa5, 0xb5, 0xb5, 0xce, 0xde, 0xce,
+	0xce, 0xad, 0xad, 0xc5,
+	0xad, 0xd6, 0xa5, 0xd6, 0xad, 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0xe6,
+	0xde, 0xde, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xa5, 0xb5, 0xb5, 0x08, 0x08, 0x10, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x08, 0x08,
+	0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x10, 0x21, 0x00, 0x00,
+	0x00, 0x00, 0x9c, 0xa5,
+	0x94, 0xff, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xe6, 0xc5, 0xad,
+	0xd6, 0xa5, 0xb5, 0xb5,
+	0xce, 0xad, 0xad, 0xc5, 0xad, 0xd6, 0xa5, 0xb5, 0xb5, 0xce, 0xad, 0xad,
+	0xc5, 0xad, 0xd6, 0xa5,
+	0xd6, 0xad, 0xc5, 0xad, 0xd6, 0xce, 0xde, 0xce, 0xef, 0xde, 0xef, 0xef,
+	0xf7, 0xe6, 0xff, 0xf7,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x31, 0x31, 0x29, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x08, 0x08, 0x10, 0x08,
+	0x08, 0x10, 0x10, 0x29,
+	0x19, 0xef, 0xde, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6,
+	0xc5, 0xad, 0xd6, 0xa5, 0xd6, 0xad, 0xa5, 0xb5, 0xb5, 0xce, 0xde, 0xce,
+	0xa5, 0xb5, 0xb5, 0xce,
+	0xde, 0xce, 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0xef, 0xf7, 0xe6, 0xef,
+	0xf7, 0xff, 0xff, 0xf7,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xde, 0xde,
+	0x08, 0x08, 0x10, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x10, 0x21,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0xe6, 0xde, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xe6, 0xe6, 0xde, 0xde, 0xce, 0xad, 0xad, 0xc5, 0xad, 0xd6,
+	0xa5, 0xb5, 0xb5, 0xce,
+	0xad, 0xad, 0xce, 0xde, 0xce, 0xce, 0xe6, 0xef, 0xff, 0xf7, 0xff, 0xff,
+	0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x08, 0x08,
+	0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x4a, 0x52, 0x4a, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xef, 0xf7, 0xe6,
+	0xe6, 0xde, 0xde, 0xef,
+	0xf7, 0xe6, 0xce, 0xe6, 0xef, 0xff, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xa5, 0x9c, 0xad, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xde, 0xde, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x4a, 0x52, 0x4a, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x08,
+	0x08, 0x10, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0xce, 0xde, 0xce, 0xef, 0xde,
+	0xef, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xef,
+	0xf7, 0xff, 0x31, 0x31,
+	0x29, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0xce, 0xad,
+	0xad, 0xef, 0xf7, 0xe6,
+	0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xef, 0xf7, 0xe6, 0xef,
+	0xde, 0xef, 0xef, 0xf7,
+	0xe6, 0x9c, 0xa5, 0x94, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x21, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x31,
+	0x29, 0xa5, 0xb5, 0xb5,
+	0xce, 0xde, 0xce, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xff, 0xff, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xe6, 0xef, 0xde, 0xef, 0xef,
+	0xf7, 0xe6, 0xe6, 0xde,
+	0xde, 0xce, 0xe6, 0xef, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xf7,
+	0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xff, 0xef, 0xf7, 0xe6,
+	0xef, 0xf7, 0xe6, 0xef,
+	0xde, 0xef, 0xce, 0xe6, 0xef, 0xe6, 0xde, 0xde, 0xe6, 0xde, 0xde, 0xce,
+	0xde, 0xce, 0xce, 0xde,
+	0xce, 0xe6, 0xde, 0xde, 0xce, 0xad, 0xad, 0x08, 0x08, 0x10, 0x00, 0x00,
+	0x00, 0x10, 0x21, 0x00,
+	0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08,
+	0x10, 0x6b, 0x5a, 0x4a,
+	0x73, 0xa5, 0xad, 0xce, 0xad, 0xad, 0xc5, 0xad, 0xd6, 0xe6, 0xde, 0xde,
+	0xe6, 0xde, 0xde, 0xff,
+	0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xef,
+	0xde, 0xef, 0xce, 0xde,
+	0xce, 0xef, 0xde, 0xef, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xde, 0xef,
+	0xce, 0xde, 0xce, 0xc5,
+	0xad, 0xd6, 0xce, 0xde, 0xce, 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0xa5,
+	0xb5, 0xb5, 0xc5, 0xad,
+	0xd6, 0xa5, 0xb5, 0xb5, 0xc5, 0xad, 0xd6, 0xef, 0xf7, 0xff, 0x4a, 0x52,
+	0x4a, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00,
+	0x00, 0x10, 0x29, 0x19,
+	0x6b, 0x5a, 0x73, 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0xce, 0xde, 0xce,
+	0xef, 0xf7, 0xe6, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xef, 0xde, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xf7, 0xff, 0xef, 0xf7, 0xe6, 0xe6, 0xde, 0xde, 0xce, 0xde, 0xce, 0xc5,
+	0xad, 0xd6, 0xce, 0xde,
+	0xce, 0xce, 0xad, 0xad, 0xce, 0xde, 0xce, 0xa5, 0xb5, 0xb5, 0xce, 0xde,
+	0xce, 0xff, 0xf7, 0xff,
+	0x31, 0x31, 0x29, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x4a, 0x52, 0x4a,
+	0x31, 0x31, 0x29, 0x08,
+	0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0xa5, 0x9c, 0xad, 0xce, 0xde, 0xce, 0xce, 0xe6, 0xef,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xf7,
+	0xff, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xce, 0xe6, 0xef, 0xe6, 0xde, 0xde, 0xce, 0xde, 0xce, 0xc5, 0xad,
+	0xd6, 0xce, 0xad, 0xad,
+	0xe6, 0xde, 0xde, 0xff, 0xf7, 0xff, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x08, 0x10, 0x31, 0x31, 0x29, 0x3a, 0x10, 0x21, 0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x31, 0x31,
+	0x29, 0x00, 0x00, 0x00,
+	0x10, 0x21, 0x00, 0x6b, 0x5a, 0x73, 0xce, 0xde, 0xce, 0xff, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xe6, 0xce, 0xde, 0xce,
+	0xce, 0xde, 0xce, 0x9c, 0xad, 0xce, 0xef, 0xf7, 0xe6, 0xa5, 0x9c, 0xad,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x3a, 0x31, 0x4a, 0x10,
+	0x29, 0x19, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0x52,
+	0x4a, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0xe6, 0xde, 0xde, 0xef, 0xf7, 0xe6,
+	0xff, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xe6, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xe6, 0xff, 0xf7, 0xff,
+	0xef, 0xf7, 0xff, 0xef, 0xde, 0xef, 0xce, 0xe6, 0xad, 0xc5, 0xad, 0xd6,
+	0xff, 0xff, 0xff, 0x10,
+	0x29, 0x19, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x31, 0x31,
+	0x29, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x10, 0x21, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x29,
+	0x19, 0x08, 0x08, 0x10,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x5a, 0x73, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xde, 0xef,
+	0xce, 0xde, 0xce, 0xce,
+	0xde, 0xce, 0xce, 0xad, 0xad, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
+	0x21, 0x00, 0x08, 0x08,
+	0x10, 0x08, 0x08, 0x10, 0x4a, 0x52, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x00,
+	0x00, 0x4a, 0x52, 0x4a,
+	0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0xf7, 0xe6,
+	0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xef,
+	0xde, 0xef, 0xce, 0xde, 0xce, 0xef, 0xf7, 0xff, 0x08, 0x08, 0x10, 0x00,
+	0x00, 0x00, 0x3a, 0x10,
+	0x21, 0x31, 0x31, 0x29, 0x31, 0x31, 0x29, 0x08, 0x08, 0x10, 0x10, 0x29,
+	0x19, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x10, 0x29, 0x19,
+	0x3a, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x7b, 0x94,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0x4a,
+	0x52, 0x4a, 0x00, 0x00,
+	0x00, 0x10, 0x29, 0x19, 0x4a, 0x52, 0x4a, 0x31, 0x31, 0x29, 0x08, 0x08,
+	0x10, 0x3a, 0x31, 0x4a,
+	0x08, 0x08, 0x10, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x4a, 0x52, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10,
+	0xef, 0xf7, 0xe6, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xce, 0xde,
+	0xce, 0x3a, 0x10, 0x21, 0x10, 0x29, 0x19, 0x3a, 0x31, 0x4a, 0x3a, 0x10,
+	0x21, 0x10, 0x21, 0x00,
+	0x08, 0x08, 0x10, 0x31, 0x31, 0x29, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0x3a, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x9c, 0x7b, 0x94, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xe6, 0xde, 0xde, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x4a, 0x52, 0x4a, 0x08, 0x08, 0x10,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x4a, 0x52, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0xef,
+	0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xce, 0xe6, 0xef, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff,
+	0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0x3a, 0x10, 0x21, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x31, 0x29,
+	0x08, 0x08, 0x10, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x31, 0x31, 0x29, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x4a,
+	0x52, 0x4a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xe6, 0xde, 0xde,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x4a, 0x52,
+	0x4a, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0x31,
+	0x31, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x31, 0x29, 0x08, 0x08, 0x10,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0xe6, 0xde, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6,
+	0xe6, 0xde, 0xde, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xf7,
+	0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xf7, 0xff, 0xef, 0xf7,
+	0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xff, 0x9c, 0x7b, 0x94,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x08,
+	0x08, 0x10, 0x10, 0x29, 0x19, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0x52, 0x4a,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x10, 0x29, 0x19, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xe6, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xe6, 0xce,
+	0xe6, 0xef, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xe6, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xe6, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xa5, 0xb5, 0xb5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0x00,
+	0x00, 0x00, 0x08, 0x08, 0x10, 0x3a, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x31, 0x31, 0x29, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0x52, 0x4a, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xf7, 0xff, 0xef,
+	0xf7, 0xe6, 0xe6, 0xde, 0xde, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xf7,
+	0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xce, 0xde, 0xce, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x10, 0x29, 0x19, 0x08, 0x08, 0x10, 0x00,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0x31,
+	0x31, 0x29, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0xce, 0xad, 0xad, 0xef,
+	0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xe6, 0xef, 0xde, 0xef, 0xef, 0xf7, 0xe6, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x10, 0x29, 0x19, 0x08,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x10,
+	0x21, 0x00, 0x3a, 0x31, 0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xce,
+	0xe6, 0xef, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xf7, 0xff, 0xce, 0xe6, 0xef, 0xe6, 0xde, 0xde, 0xef,
+	0xf7, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x3a,
+	0x31, 0x4a, 0x08, 0x08,
+	0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x08, 0x10, 0x4a, 0x52, 0x4a, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0xef, 0xf7,
+	0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xe6, 0xde, 0xde, 0xe6,
+	0xde, 0xde, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xe6, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x4a, 0x52,
+	0x4a, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x10, 0x21, 0x4a, 0x52, 0x4a, 0x00,
+	0x00, 0x00, 0x08, 0x08,
+	0x10, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xce,
+	0xe6, 0xef, 0xe6, 0xde,
+	0xde, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x10, 0x29, 0x19, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x3a, 0x31, 0x4a, 0x6b,
+	0x7b, 0x73, 0x08, 0x00,
+	0x00, 0x10, 0x29, 0x19, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xe6, 0xde,
+	0xde, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6,
+	0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x10, 0x29, 0x19, 0x08, 0x08, 0x10, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x10,
+	0x29, 0x19, 0x6b, 0x5a,
+	0x73, 0x08, 0x00, 0x00, 0x10, 0x29, 0x19, 0xff, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xe6, 0xde, 0xde, 0xe6, 0xde, 0xde, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xe6, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x08, 0x10, 0x31, 0x31, 0x29, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x31, 0x31, 0x29, 0x08, 0x00, 0x00, 0x31, 0x31, 0x29, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xf7,
+	0xff, 0xef, 0xf7, 0xff, 0xe6, 0xde, 0xde, 0xef, 0xf7, 0xe6, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xe6, 0xde, 0xde, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x10, 0x29, 0x19, 0x08, 0x08,
+	0x10, 0x08, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x31, 0x4a, 0x31, 0x31,
+	0x29, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xe6, 0xff,
+	0xff, 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xde, 0xde, 0xce, 0xe6,
+	0xef, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xff, 0xff, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa5,
+	0xb5, 0xb5, 0x00, 0x00,
+	0x00, 0x10, 0x21, 0x00, 0x3a, 0x31, 0x4a, 0x10, 0x29, 0x19, 0x3a, 0x08,
+	0x00, 0x08, 0x08, 0x10,
+	0x08, 0x08, 0x10, 0x31, 0x31, 0x29, 0x31, 0x31, 0x29, 0x10, 0x29, 0x19,
+	0x08, 0x08, 0x10, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x10,
+	0x7b, 0x9c, 0x10, 0xc5,
+	0xef, 0x10, 0xc5, 0xef, 0x10, 0x7b, 0x9c, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x4a, 0x52, 0x4a,
+	0xef, 0xf7, 0xe6, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xf7,
+	0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xde,
+	0xde, 0xef, 0xf7, 0xe6,
+	0xff, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0x6b, 0x5a,
+	0x4a, 0x08, 0x08, 0x10, 0x08, 0x08, 0x10, 0x08, 0x00, 0x00, 0x08, 0x00,
+	0x00, 0x08, 0x08, 0x10,
+	0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00,
+	0x3a, 0x10, 0x21, 0x6b,
+	0x5a, 0x73, 0x10, 0x29, 0x19, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19,
+	0x94, 0xce, 0x08, 0xad,
+	0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad,
+	0xd6, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x4a, 0x5a, 0x73, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xe6, 0xde, 0xde,
+	0xef, 0xde, 0xef, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xe6, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xce, 0xe6, 0xef, 0xce,
+	0xe6, 0xef, 0xce, 0xe6,
+	0xef, 0x08, 0x10, 0x42, 0x10, 0x29, 0x19, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x31, 0x31, 0x29, 0x4a, 0x52, 0x4a, 0x08, 0x08, 0x10, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19,
+	0x7b, 0xbd, 0x00, 0x9c,
+	0xd6, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0x9c, 0xd6, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x4a, 0x52, 0x4a,
+	0xef, 0xde, 0xef, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xe6, 0xff, 0xff, 0xff,
+	0xe6, 0xde, 0xde, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0x08, 0xad, 0xd6, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xef, 0x10, 0xc5, 0xef, 0x10, 0xc5, 0xef, 0x08, 0x08, 0x10, 0x08, 0x08,
+	0x10, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x08, 0x10, 0x31, 0x31, 0x29, 0x10, 0x29, 0x19, 0x3a,
+	0x10, 0x21, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x84,
+	0xbd, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5,
+	0xef, 0x00, 0xbd, 0xf7,
+	0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0x7b, 0xbd, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x31,
+	0x31, 0x29, 0xe6, 0xde, 0xde, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xce, 0xe6, 0xef, 0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xce, 0xe6, 0xef, 0x08,
+	0xad, 0xef, 0x00, 0xbd,
+	0xf7, 0x10, 0xe6, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xe6, 0xef, 0x08, 0x08,
+	0x10, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x10, 0x29, 0x19, 0x10, 0x29, 0x19, 0x00,
+	0x00, 0x00, 0x00, 0x84,
+	0xbd, 0x10, 0xe6, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x19, 0x7b,
+	0xbd, 0x00, 0x9c, 0xd6, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x19, 0xbd,
+	0xf7, 0x00, 0xbd, 0xf7,
+	0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x10, 0xc5, 0xef,
+	0x10, 0x5a, 0x9c, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x29, 0x19, 0xce, 0xde, 0xce, 0xff,
+	0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xe6, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xde, 0xde, 0xef, 0xde, 0xef,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xce,
+	0xe6, 0xef, 0x00, 0x9c,
+	0xd6, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad,
+	0xd6, 0x08, 0x08, 0x10,
+	0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x08, 0x00, 0x00, 0x08,
+	0x08, 0x10, 0x08, 0x31,
+	0x3a, 0x10, 0xc5, 0xef, 0x10, 0xc5, 0xef, 0x10, 0xc5, 0xef, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x84,
+	0xbd, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad,
+	0xef, 0x00, 0xbd, 0xef,
+	0x19, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+	0x10, 0xc5, 0xef, 0x00,
+	0xbd, 0xef, 0x08, 0x31, 0x3a, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x9c, 0xa5,
+	0x94, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xe6, 0xef,
+	0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xce, 0xe6,
+	0xef, 0x08, 0xad, 0xd6, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+	0xef, 0x19, 0x94, 0xce,
+	0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x19, 0x94, 0xce, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xef, 0x10, 0xc5,
+	0xef, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x84, 0xbd, 0x00,
+	0x84, 0xbd, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x00, 0x84, 0xbd, 0x19,
+	0x94, 0xce, 0x00, 0x84,
+	0xbd, 0x08, 0xad, 0xd6, 0x00, 0x9c, 0xd6, 0x08, 0xad, 0xef, 0x10, 0xc5,
+	0xef, 0x19, 0xbd, 0xf7,
+	0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x00,
+	0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xd6, 0x08, 0x08, 0x10, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x4a, 0x52, 0x4a, 0xff, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xef, 0xde,
+	0xef, 0x9c, 0xde, 0xd6, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5,
+	0xef, 0x08, 0xad, 0xef,
+	0x00, 0x84, 0xbd, 0x08, 0x31, 0x3a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x21, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x10, 0x5a, 0x9c, 0x08, 0xad, 0xd6, 0x00, 0xbd, 0xef, 0x10, 0xc5,
+	0xef, 0x10, 0xc5, 0xef,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x19,
+	0x94, 0xce, 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef, 0x08,
+	0xad, 0xef, 0x08, 0xad,
+	0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5,
+	0xef, 0x00, 0xbd, 0xf7,
+	0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+	0x10, 0xc5, 0xef, 0x00,
+	0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19,
+	0x7b, 0xbd, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x31,
+	0x29, 0xce, 0xe6, 0xef,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xce, 0xe6,
+	0xef, 0xce, 0xad, 0xad, 0x9c, 0xad, 0xce, 0x19, 0x94, 0xce, 0x08, 0xad,
+	0xef, 0x00, 0xbd, 0xef,
+	0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6, 0x10, 0x5a, 0x9c, 0x08, 0x08, 0x10,
+	0x00, 0x00, 0x00, 0x08,
+	0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00,
+	0x00, 0x00, 0x08, 0x08,
+	0x10, 0x10, 0x73, 0x7b, 0x00, 0x9c, 0xd6, 0x00, 0xbd, 0xef, 0x10, 0xc5,
+	0xef, 0x08, 0xad, 0xef,
+	0x00, 0xbd, 0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x84, 0xbd, 0x00, 0x9c, 0xd6, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08,
+	0xad, 0xef, 0x10, 0xc5,
+	0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x19, 0xbd, 0xf7, 0x19, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7,
+	0x19, 0xbd, 0xf7, 0x00,
+	0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00,
+	0xbd, 0xef, 0x10, 0xce,
+	0xce, 0x08, 0x31, 0x3a, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x08, 0x10, 0xce, 0xde, 0xce, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xe6, 0xc5, 0xad, 0xd6, 0xce, 0xde, 0xce, 0x9c, 0xad, 0xce, 0x08, 0xad,
+	0xd6, 0x08, 0xad, 0xef,
+	0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x00, 0x9c, 0xd6, 0x19, 0x7b, 0xbd,
+	0x10, 0x7b, 0x9c, 0x08,
+	0x31, 0x5a, 0x08, 0x31, 0x3a, 0x08, 0x08, 0x10, 0x08, 0x31, 0x3a, 0x08,
+	0x31, 0x3a, 0x10, 0x5a,
+	0x9c, 0x00, 0x84, 0xbd, 0x00, 0x9c, 0xd6, 0x19, 0x94, 0xce, 0x00, 0xbd,
+	0xef, 0x08, 0xad, 0xef,
+	0x10, 0xc5, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x19, 0x7b, 0xbd, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x08,
+	0xad, 0xef, 0x00, 0xbd,
+	0xef, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x10, 0xc5,
+	0xef, 0x00, 0xbd, 0xf7,
+	0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xf7,
+	0x19, 0xbd, 0xf7, 0x00,
+	0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00,
+	0xbd, 0xf7, 0x10, 0xc5,
+	0xef, 0x00, 0xbd, 0xf7, 0x19, 0x94, 0xce, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x7b, 0x94,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xa5, 0xb5, 0xb5, 0xce, 0xde, 0xce, 0x9c, 0xad,
+	0xce, 0x19, 0x94, 0xce,
+	0x08, 0xad, 0xd6, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xd6, 0x00, 0x9c, 0xd6,
+	0x00, 0x9c, 0xd6, 0x00,
+	0x84, 0xbd, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x19,
+	0x7b, 0xbd, 0x00, 0x84,
+	0xbd, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x08, 0xad, 0xef, 0x08, 0xad,
+	0xef, 0x10, 0xc5, 0xef,
+	0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xef, 0x08, 0xad, 0xef,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, 0xbd, 0x08, 0xad, 0xd6, 0x08,
+	0xad, 0xef, 0x10, 0xc5,
+	0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x19, 0xbd,
+	0xf7, 0x00, 0xbd, 0xf7,
+	0x19, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xef, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00,
+	0xbd, 0xf7, 0x19, 0xbd,
+	0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xef, 0x10, 0x52,
+	0x7b, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x6b,
+	0x7b, 0x73, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xef, 0xf7,
+	0xe6, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xce, 0xad, 0xad, 0xce, 0xde,
+	0xce, 0xa5, 0xb5, 0xb5,
+	0x00, 0x9c, 0xd6, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef,
+	0x19, 0x94, 0xce, 0x00,
+	0x9c, 0xd6, 0x19, 0x94, 0xce, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x00,
+	0x84, 0xbd, 0x00, 0x84,
+	0xbd, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x08, 0xad,
+	0xef, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef,
+	0x10, 0xc5, 0xef, 0x19,
+	0x94, 0xce, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x7b, 0xbd, 0x19,
+	0x94, 0xce, 0x08, 0xad,
+	0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd,
+	0xf7, 0x00, 0xbd, 0xf7,
+	0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef,
+	0x00, 0xbd, 0xf7, 0x08,
+	0xad, 0xef, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00,
+	0xbd, 0xf7, 0x19, 0xbd,
+	0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x10, 0xc5,
+	0xef, 0x08, 0xad, 0xd6,
+	0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0xce, 0xde, 0xce, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xce, 0xde,
+	0xce, 0xc5, 0xad, 0xd6,
+	0xa5, 0xb5, 0xb5, 0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6, 0x08, 0xad, 0xd6,
+	0x08, 0xad, 0xef, 0x08,
+	0xad, 0xef, 0x08, 0xad, 0xef, 0x00, 0x9c, 0xd6, 0x00, 0x9c, 0xd6, 0x00,
+	0x9c, 0xd6, 0x00, 0x9c,
+	0xd6, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x08, 0xad,
+	0xef, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7,
+	0x10, 0xc5, 0xef, 0x08,
+	0xad, 0xef, 0x08, 0xad, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x84, 0xbd, 0x00, 0x9c,
+	0xd6, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x19, 0xbd,
+	0xf7, 0x00, 0xbd, 0xef,
+	0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10,
+	0xc5, 0xef, 0x00, 0xbd,
+	0xf7, 0x10, 0xc5, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xef, 0x19, 0x7b, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x5a, 0x73, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xe6, 0xce, 0xde, 0xce,
+	0xa5, 0xb5, 0xb5, 0x9c, 0xad, 0xce, 0x00, 0x84, 0xbd, 0x00, 0x9c, 0xd6,
+	0x08, 0xad, 0xef, 0x10,
+	0xc5, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef, 0x19,
+	0x94, 0xce, 0x08, 0xad,
+	0xef, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x08, 0xad,
+	0xef, 0x00, 0xbd, 0xef,
+	0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef,
+	0x19, 0xbd, 0xf7, 0x00,
+	0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x10, 0x7b, 0xe6, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef, 0x00, 0xbd,
+	0xef, 0x00, 0xbd, 0xf7,
+	0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x08,
+	0xad, 0xef, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08,
+	0xad, 0xef, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7,
+	0x10, 0xc5, 0xef, 0x10, 0xe6, 0xef, 0x00, 0xbd, 0xef, 0x08, 0x31, 0x3a,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a,
+	0x52, 0x4a, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xe6, 0xff,
+	0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xf7, 0xff,
+	0xe6, 0xde, 0xde, 0xc5, 0xad, 0xd6, 0x29, 0x5a, 0x4a, 0x00, 0x84, 0xbd,
+	0x19, 0x94, 0xce, 0x08,
+	0xad, 0xd6, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00,
+	0xbd, 0xef, 0x08, 0xad,
+	0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef, 0x08, 0xad,
+	0xef, 0x08, 0xad, 0xef,
+	0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x00,
+	0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x19, 0x94, 0xce, 0x08, 0xad, 0xd6, 0x08, 0xad,
+	0xef, 0x19, 0xbd, 0xf7,
+	0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7,
+	0x10, 0xc5, 0xef, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10,
+	0xc5, 0xef, 0x19, 0xbd,
+	0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd,
+	0xf7, 0x00, 0xbd, 0xf7,
+	0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef,
+	0x08, 0xad, 0xd6, 0x08,
+	0x08, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x9c, 0xa5,
+	0x94, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xf7,
+	0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xe6, 0xff, 0xf7, 0xff,
+	0xef, 0xf7, 0xff, 0xce, 0xde, 0xce, 0x4a, 0x52, 0x4a, 0x08, 0x31, 0x5a,
+	0x00, 0x84, 0xbd, 0x00,
+	0x9c, 0xd6, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x10,
+	0xc5, 0xef, 0x08, 0xad,
+	0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xef, 0x10, 0xc5,
+	0xef, 0x08, 0xad, 0xef,
+	0x00, 0xbd, 0xef, 0x10, 0xc5, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7,
+	0x19, 0xbd, 0xf7, 0x00,
+	0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00,
+	0xbd, 0xef, 0x00, 0xbd,
+	0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0xd6, 0x19, 0x94,
+	0xce, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7,
+	0x08, 0xad, 0xef, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10,
+	0xc5, 0xef, 0x00, 0xbd,
+	0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd,
+	0xf7, 0x00, 0xbd, 0xf7,
+	0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+	0x10, 0xc5, 0xef, 0x10,
+	0xc5, 0xef, 0x10, 0x5a, 0x9c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x9c, 0x7b,
+	0x94, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x4a, 0x52, 0x4a, 0x08, 0x00, 0x00,
+	0x08, 0x31, 0x5a, 0x00,
+	0x84, 0xbd, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08,
+	0xad, 0xef, 0x19, 0xbd,
+	0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef,
+	0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x94,
+	0xce, 0x00, 0x9c, 0xd6,
+	0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef,
+	0x19, 0xbd, 0xf7, 0x00,
+	0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef,
+	0x08, 0xad, 0xef, 0x00,
+	0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xd6, 0x10, 0x73, 0x7b, 0xe6,
+	0xde, 0xde, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0x31, 0x31, 0x29, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x10,
+	0x52, 0x7b, 0x00, 0x84, 0xbd, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef, 0x08,
+	0xad, 0xef, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad,
+	0xef, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd,
+	0xf7, 0x00, 0xbd, 0xf7,
+	0x00, 0xbd, 0xef, 0x08, 0xad, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x84, 0xbd,
+	0x00, 0x9c, 0xd6, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x08, 0xad, 0xef,
+	0x19, 0xbd, 0xf7, 0x00,
+	0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xd6, 0x10, 0xc5, 0xef, 0x19,
+	0x94, 0xce, 0x9c, 0xad,
+	0xce, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xe6, 0xff,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xef, 0xf7, 0xff, 0xef, 0xde, 0xef, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x10, 0x52, 0x7b, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x08,
+	0xad, 0xef, 0x08, 0xad,
+	0xef, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10, 0xc5,
+	0xef, 0x00, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef,
+	0x00, 0xbd, 0xf7, 0x10,
+	0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x08,
+	0xad, 0xef, 0x10, 0xc5,
+	0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10, 0xc5,
+	0xef, 0x00, 0xbd, 0xf7,
+	0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x84, 0xbd,
+	0x00, 0x84, 0xbd, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x00,
+	0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+	0xf7, 0x10, 0xc5, 0xef,
+	0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef,
+	0x00, 0xbd, 0xf7, 0x10,
+	0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10,
+	0xc5, 0xef, 0x08, 0xad,
+	0xd6, 0x10, 0x7b, 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff,
+	0xff, 0xef, 0xf7, 0xff,
+	0xff, 0xff, 0xff, 0x9c, 0xa5, 0x94, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x08, 0x08, 0x10, 0x10, 0x52, 0x7b, 0x00, 0x84, 0xbd, 0x08,
+	0xad, 0xd6, 0x00, 0xbd,
+	0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7,
+	0x08, 0xad, 0xef, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7,
+	0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x19, 0x94, 0xce, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef,
+	0x00, 0xbd, 0xef, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+	0xf7, 0x10, 0xc5, 0xef,
+	0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7,
+	0x08, 0xad, 0xef, 0x00,
+	0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x00,
+	0xbd, 0xef, 0x10, 0xc5,
+	0xef, 0x08, 0xad, 0xd6, 0x00, 0x84, 0xbd, 0x10, 0x52, 0x7b, 0xef, 0xf7,
+	0xe6, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xff, 0xff, 0xf7, 0xff, 0xef, 0xf7, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xf7, 0xff,
+	0xce, 0xde, 0xce, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x52, 0x7b, 0x00,
+	0x84, 0xbd, 0x19, 0x94,
+	0xce, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7,
+	0x19, 0xbd, 0xf7, 0x00,
+	0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+	0xef, 0x19, 0xbd, 0xf7,
+	0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x00, 0x9c, 0xd6, 0x19, 0x7b, 0xbd,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x00, 0xbd, 0xf7,
+	0x19, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xef, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x10, 0xc5,
+	0xef, 0x08, 0xad, 0xef,
+	0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef,
+	0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00,
+	0xbd, 0xf7, 0x19, 0xbd,
+	0xf7, 0x00, 0xbd, 0xef, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xd6, 0x10, 0x52,
+	0x7b, 0x3a, 0x31, 0x4a,
+	0xef, 0xf7, 0xe6, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6,
+	0xff, 0xff, 0xff, 0xef,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xef, 0xf7, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xce, 0xde, 0xce,
+	0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x08, 0x08, 0x10, 0x10,
+	0x52, 0x7b, 0x00, 0x84,
+	0xbd, 0x00, 0x9c, 0xd6, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00, 0xbd,
+	0xf7, 0x10, 0xc5, 0xef,
+	0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x00, 0xbd, 0xf7,
+	0x19, 0xbd, 0xf7, 0x00,
+	0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd,
+	0xef, 0x08, 0xad, 0xef,
+	0x08, 0xad, 0xef, 0x19, 0x94, 0xce, 0x19, 0x94, 0xce, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x19, 0x7b, 0xbd, 0x00, 0x84, 0xbd, 0x08, 0xad, 0xef,
+	0x10, 0xc5, 0xef, 0x00,
+	0xbd, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad,
+	0xef, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x10,
+	0xc5, 0xef, 0x00, 0xbd,
+	0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x00, 0x9c,
+	0xd6, 0x10, 0x7b, 0x9c,
+	0x08, 0x31, 0x5a, 0x00, 0x00, 0x00, 0x4a, 0x5a, 0x73, 0xce, 0xde, 0xce,
+	0xef, 0xf7, 0xe6, 0xff,
+	0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xef, 0xf7,
+	0xe6, 0xef, 0xf7, 0xff, 0xef, 0xf7, 0xe6, 0xa5, 0xb5, 0xb5, 0x6b, 0x5a,
+	0x73, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x31, 0x3a, 0x10, 0x52,
+	0x7b, 0x19, 0x7b, 0xbd, 0x00, 0x9c, 0xd6, 0x08, 0xad, 0xef, 0x10, 0xc5,
+	0xef, 0x19, 0xbd, 0xf7,
+	0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+	0x08, 0xad, 0xef, 0x10,
+	0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad,
+	0xef, 0x08, 0xad, 0xef,
+	0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6, 0x19, 0x7b, 0xbd, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, 0xbd, 0x00, 0x9c, 0xd6,
+	0x19, 0x94, 0xce, 0x08,
+	0xad, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00,
+	0xbd, 0xf7, 0x10, 0xc5,
+	0xef, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x00, 0xbd,
+	0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef,
+	0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x19,
+	0xbd, 0xf7, 0x08, 0xad,
+	0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x08, 0xad,
+	0xd6, 0x19, 0x94, 0xce,
+	0x00, 0x84, 0xbd, 0x10, 0x52, 0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x08, 0x10, 0x10, 0x29, 0x19, 0x31, 0x31, 0x29, 0x31,
+	0x31, 0x29, 0x08, 0x08,
+	0x10, 0x08, 0x08, 0x10, 0x08, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x31,
+	0x3a, 0x10, 0x52, 0x7b, 0x00, 0x84, 0xbd, 0x00, 0x9c, 0xd6, 0x08, 0xad,
+	0xef, 0x10, 0xc5, 0xef,
+	0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19,
+	0xbd, 0xf7, 0x08, 0xad,
+	0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad,
+	0xef, 0x00, 0x84, 0xbd,
+	0x19, 0x94, 0xce, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, 0xbd,
+	0x19, 0x94, 0xce, 0x00,
+	0x9c, 0xd6, 0x00, 0x9c, 0xd6, 0x08, 0xad, 0xef, 0x08, 0xad, 0xd6, 0x08,
+	0xad, 0xef, 0x08, 0xad,
+	0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad,
+	0xef, 0x08, 0xad, 0xef,
+	0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef, 0x08, 0xad,
+	0xef, 0x08, 0xad, 0xef,
+	0x00, 0x9c, 0xd6, 0x10, 0x5a, 0x9c, 0x10, 0x52, 0x7b, 0x08, 0x31, 0x3a,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x08, 0x10, 0x08, 0x00, 0x00, 0x08,
+	0x08, 0x10, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x31, 0x5a, 0x10, 0x5a, 0x9c, 0x00, 0x84, 0xbd, 0x19, 0x94,
+	0xce, 0x08, 0xad, 0xef,
+	0x08, 0xad, 0xef, 0x00, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7,
+	0x00, 0xbd, 0xf7, 0x10,
+	0xc5, 0xef, 0x00, 0xbd, 0xf7, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x10,
+	0xc5, 0xef, 0x00, 0xbd,
+	0xef, 0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x19, 0x94, 0xce, 0x00, 0x9c,
+	0xd6, 0x00, 0x84, 0xbd,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x10, 0x7b, 0x9c, 0x00,
+	0x84, 0xbd, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x00, 0x84, 0xbd, 0x19,
+	0x94, 0xce, 0x00, 0x9c,
+	0xd6, 0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6, 0x08, 0xad, 0xd6, 0x08, 0xad,
+	0xef, 0x08, 0xad, 0xd6,
+	0x08, 0xad, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef,
+	0x10, 0xc5, 0xef, 0x08,
+	0xad, 0xef, 0x19, 0xbd, 0xf7, 0x00, 0xbd, 0xf7, 0x00, 0xbd, 0xef, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xef, 0x10, 0xc5, 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x00, 0xbd,
+	0xf7, 0x08, 0xad, 0xd6,
+	0x19, 0x94, 0xce, 0x00, 0x84, 0xbd, 0x10, 0x7b, 0x9c, 0x10, 0x52, 0x7b,
+	0x08, 0x31, 0x5a, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x31, 0x3a, 0x10, 0x52, 0x7b, 0x00, 0x84,
+	0xbd, 0x00, 0x84, 0xbd,
+	0x08, 0xad, 0xd6, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x10, 0xc5, 0xef,
+	0x00, 0xbd, 0xf7, 0x10,
+	0xc5, 0xef, 0x08, 0xad, 0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x00,
+	0xbd, 0xf7, 0x08, 0xad,
+	0xef, 0x19, 0x94, 0xce, 0x08, 0xad, 0xd6, 0x00, 0x9c, 0xd6, 0x00, 0x9c,
+	0xd6, 0x10, 0x5a, 0x9c,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x10,
+	0x5a, 0x9c, 0x00, 0x84,
+	0xbd, 0x19, 0x7b, 0xbd, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x19, 0x94,
+	0xce, 0x00, 0x84, 0xbd,
+	0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef,
+	0x19, 0x94, 0xce, 0x08,
+	0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xef, 0x19,
+	0xbd, 0xf7, 0x00, 0xbd,
+	0xef, 0x19, 0xbd, 0xf7, 0x08, 0xad, 0xef, 0x00, 0xbd, 0xf7, 0x10, 0xc5,
+	0xef, 0x08, 0xad, 0xef,
+	0x08, 0xad, 0xef, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x10, 0x5a, 0x9c,
+	0x10, 0x52, 0x7b, 0x08,
+	0x31, 0x3a, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x31, 0x5a, 0x10, 0x52,
+	0x7b, 0x00, 0x84, 0xbd,
+	0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x08, 0xad, 0xd6, 0x08, 0xad, 0xef,
+	0x19, 0xbd, 0xf7, 0x08,
+	0xad, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x00,
+	0xbd, 0xef, 0x08, 0xad,
+	0xef, 0x19, 0x94, 0xce, 0x08, 0xad, 0xef, 0x00, 0x84, 0xbd, 0x00, 0x84,
+	0xbd, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x10, 0x73, 0x7b, 0x10, 0x5a, 0x9c, 0x10, 0x7b,
+	0x9c, 0x10, 0x5a, 0xbd,
+	0x00, 0x84, 0xbd, 0x10, 0x7b, 0x9c, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd,
+	0x00, 0x84, 0xbd, 0x00,
+	0x84, 0xbd, 0x00, 0x84, 0xbd, 0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6, 0x08,
+	0xad, 0xef, 0x08, 0xad,
+	0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x08, 0xad,
+	0xef, 0x08, 0xad, 0xef,
+	0x08, 0xad, 0xd6, 0x19, 0x94, 0xce, 0x00, 0x84, 0xbd, 0x10, 0x5a, 0x9c,
+	0x10, 0x52, 0x7b, 0x10,
+	0x52, 0x7b, 0x08, 0x31, 0x3a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x08, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x10, 0x21, 0x00, 0x08,
+	0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x31,
+	0x3a, 0x10, 0x52, 0x7b,
+	0x10, 0x5a, 0x9c, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x08, 0xad, 0xef,
+	0x08, 0xad, 0xd6, 0x08,
+	0xad, 0xef, 0x08, 0xad, 0xef, 0x10, 0xc5, 0xef, 0x08, 0xad, 0xef, 0x08,
+	0xad, 0xef, 0x19, 0x94,
+	0xce, 0x00, 0x9c, 0xd6, 0x00, 0x84, 0xbd, 0x19, 0x7b, 0xbd, 0x10, 0x5a,
+	0x9c, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x10, 0x5a, 0x9c, 0x10, 0x52, 0x7b, 0x10, 0x5a, 0x9c,
+	0x10, 0x5a, 0x9c, 0x10,
+	0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x7b, 0x9c, 0x00, 0x84, 0xbd, 0x00,
+	0x84, 0xbd, 0x19, 0x94,
+	0xce, 0x00, 0x9c, 0xd6, 0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6, 0x08, 0xad,
+	0xef, 0x08, 0xad, 0xd6,
+	0x19, 0x94, 0xce, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x10, 0x5a, 0x9c,
+	0x10, 0x52, 0x7b, 0x10,
+	0x52, 0x7b, 0x08, 0x31, 0x5a, 0x08, 0x08, 0x10, 0x08, 0x08, 0x10, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x31, 0x3a,
+	0x08, 0x31, 0x5a, 0x10, 0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x19, 0x7b, 0xbd,
+	0x00, 0x84, 0xbd, 0x00,
+	0x9c, 0xd6, 0x08, 0xad, 0xd6, 0x19, 0x94, 0xce, 0x00, 0x9c, 0xd6, 0x19,
+	0x94, 0xce, 0x00, 0x9c,
+	0xd6, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x10, 0x5a,
+	0x9c, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x10, 0x52, 0x7b, 0x10, 0x52, 0x7b, 0x10, 0x5a, 0x9c, 0x10,
+	0x5a, 0x9c, 0x10, 0x5a,
+	0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x7b, 0x9c, 0x00, 0x84, 0xbd, 0x19, 0x7b,
+	0xbd, 0x00, 0x84, 0xbd,
+	0x19, 0x7b, 0xbd, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x10, 0x5a, 0x9c,
+	0x10, 0x52, 0x7b, 0x10,
+	0x52, 0x7b, 0x08, 0x31, 0x5a, 0x08, 0x31, 0x5a, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x08, 0x31, 0x5a, 0x10, 0x5a, 0x9c, 0x10, 0x7b, 0x9c,
+	0x00, 0x84, 0xbd, 0x00,
+	0x84, 0xbd, 0x19, 0x7b, 0xbd, 0x00, 0x84, 0xbd, 0x00, 0x84, 0xbd, 0x00,
+	0x84, 0xbd, 0x00, 0x84,
+	0xbd, 0x19, 0x7b, 0xbd, 0x10, 0x7b, 0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x52,
+	0x7b, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x08, 0x31, 0x5a, 0x10, 0x52, 0x7b, 0x10, 0x52, 0x7b, 0x10, 0x5a,
+	0x9c, 0x10, 0x73, 0x7b,
+	0x10, 0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x52, 0x7b,
+	0x10, 0x52, 0x7b, 0x10,
+	0x52, 0x7b, 0x08, 0x31, 0x5a, 0x08, 0x31, 0x3a, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x31, 0x5a, 0x08, 0x31, 0x5a,
+	0x10, 0x52, 0x7b, 0x10,
+	0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x7b, 0x9c, 0x10, 0x5a, 0x9c, 0x00,
+	0x84, 0xbd, 0x00, 0x84,
+	0xbd, 0x10, 0x7b, 0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x08, 0x31,
+	0x5a, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x31,
+	0x5a, 0x10, 0x52, 0x7b,
+	0x08, 0x31, 0x5a, 0x10, 0x52, 0x7b, 0x10, 0x52, 0x7b, 0x08, 0x31, 0x5a,
+	0x10, 0x52, 0x7b, 0x08,
+	0x31, 0x5a, 0x08, 0x31, 0x5a, 0x08, 0x31, 0x3a, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x08, 0x31, 0x5a, 0x08,
+	0x31, 0x5a, 0x10, 0x52, 0x7b, 0x10, 0x5a, 0x9c, 0x10, 0x5a, 0x9c, 0x10,
+	0x5a, 0x9c, 0x10, 0x5a,
+	0x9c, 0x10, 0x5a, 0x9c, 0x10, 0x52, 0x7b, 0x10, 0x52, 0x7b, 0x08, 0x31,
+	0x5a, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x31, 0x5a, 0x08, 0x31, 0x5a,
+	0x08, 0x31, 0x5a, 0x08,
+	0x31, 0x5a, 0x08, 0x31, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x31, 0x3a, 0x08, 0x31, 0x5a, 0x08,
+	0x31, 0x5a, 0x10, 0x52,
+	0x7b, 0x08, 0x31, 0x5a, 0x08, 0x31, 0x5a, 0x08, 0x31, 0x3a, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+};
+
+#endif
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index e9c2f7b..53326ba 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -769,11 +769,11 @@
 
 	for (i = 0; i < len; i++) {
 		ret = usb_control_msg(dev->udev,
-				    usb_rcvctrlpipe(dev->udev, 0), (0x02),
-				    (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
-				    HZ);
-		if (ret < 1) {
-			pr_err("Read EDID byte %d failed err %x\n", i, ret);
+				      usb_rcvctrlpipe(dev->udev, 0), 0x02,
+				      (0x80 | (0x02 << 5)), i << 8, 0xA1,
+				      rbuf, 2, USB_CTRL_GET_TIMEOUT);
+		if (ret < 2) {
+			pr_err("Read EDID byte %d failed: %d\n", i, ret);
 			i--;
 			break;
 		}
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 7062bb0..462e183 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -323,6 +323,8 @@
 	/* device_register() causes the bus infrastructure to look for a
 	 * matching driver. */
 	err = device_register(&dev->dev);
+	if (err)
+		ida_simple_remove(&virtio_index_ida, dev->index);
 out:
 	if (err)
 		add_status(dev, VIRTIO_CONFIG_S_FAILED);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 2c2e679..a7c08cc 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -241,11 +241,11 @@
 
 #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
 
-static void update_balloon_stats(struct virtio_balloon *vb)
+static unsigned int update_balloon_stats(struct virtio_balloon *vb)
 {
 	unsigned long events[NR_VM_EVENT_ITEMS];
 	struct sysinfo i;
-	int idx = 0;
+	unsigned int idx = 0;
 	long available;
 
 	all_vm_events(events);
@@ -253,18 +253,22 @@
 
 	available = si_mem_available();
 
+#ifdef CONFIG_VM_EVENT_COUNTERS
 	update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
 				pages_to_bytes(events[PSWPIN]));
 	update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
 				pages_to_bytes(events[PSWPOUT]));
 	update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
 	update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+#endif
 	update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
 				pages_to_bytes(i.freeram));
 	update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
 				pages_to_bytes(i.totalram));
 	update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
 				pages_to_bytes(available));
+
+	return idx;
 }
 
 /*
@@ -290,14 +294,14 @@
 {
 	struct virtqueue *vq;
 	struct scatterlist sg;
-	unsigned int len;
+	unsigned int len, num_stats;
 
-	update_balloon_stats(vb);
+	num_stats = update_balloon_stats(vb);
 
 	vq = vb->stats_vq;
 	if (!virtqueue_get_buf(vq, &len))
 		return;
-	sg_init_one(&sg, vb->stats, sizeof(vb->stats));
+	sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
 	virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
 	virtqueue_kick(vq);
 }
@@ -421,15 +425,16 @@
 	vb->deflate_vq = vqs[1];
 	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
 		struct scatterlist sg;
+		unsigned int num_stats;
 		vb->stats_vq = vqs[2];
 
 		/*
 		 * Prime this virtqueue with one buffer so the hypervisor can
 		 * use it to signal us later (it can't be broken yet!).
 		 */
-		update_balloon_stats(vb);
+		num_stats = update_balloon_stats(vb);
 
-		sg_init_one(&sg, vb->stats, sizeof vb->stats);
+		sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
 		if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
 		    < 0)
 			BUG();
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 1e8be12..0a3c676 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -316,7 +316,7 @@
 			rc = -ENOMEM;
 			goto out;
 		}
-	} else if (msg_type == XS_TRANSACTION_END) {
+	} else if (u->u.msg.tx_id != 0) {
 		list_for_each_entry(trans, &u->transactions, list)
 			if (trans->handle.id == u->u.msg.tx_id)
 				break;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 30ca770..f8ab4a6 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -483,6 +483,9 @@
 
 	if (v9inode->qid.type != st->qid.type)
 		return 0;
+
+	if (v9inode->qid.path != st->qid.path)
+		return 0;
 	return 1;
 }
 
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index afaa4b6..c3dd0d4 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -87,6 +87,9 @@
 
 	if (v9inode->qid.type != st->qid.type)
 		return 0;
+
+	if (v9inode->qid.path != st->qid.path)
+		return 0;
 	return 1;
 }
 
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 1e9d2f8..1592dc6 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -362,7 +362,7 @@
 {
 	struct afs_server *server;
 	struct afs_vnode *vnode, *xvnode;
-	time_t now;
+	time64_t now;
 	long timeout;
 	int ret;
 
@@ -370,7 +370,7 @@
 
 	_enter("");
 
-	now = get_seconds();
+	now = ktime_get_real_seconds();
 
 	/* find the first vnode to update */
 	spin_lock(&server->cb_lock);
@@ -424,7 +424,8 @@
 
 	/* and then reschedule */
 	_debug("reschedule");
-	vnode->update_at = get_seconds() + afs_vnode_update_timeout;
+	vnode->update_at = ktime_get_real_seconds() +
+			afs_vnode_update_timeout;
 
 	spin_lock(&server->cb_lock);
 
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index d764236..168f2a4 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -106,6 +106,9 @@
 	case CBProbe:
 		call->type = &afs_SRXCBProbe;
 		return true;
+	case CBProbeUuid:
+		call->type = &afs_SRXCBProbeUuid;
+		return true;
 	case CBTellMeAboutYourself:
 		call->type = &afs_SRXCBTellMeAboutYourself;
 		return true;
@@ -165,7 +168,6 @@
 	struct afs_callback *cb;
 	struct afs_server *server;
 	__be32 *bp;
-	u32 tmp;
 	int ret, loop;
 
 	_enter("{%u}", call->unmarshall);
@@ -227,9 +229,9 @@
 		if (ret < 0)
 			return ret;
 
-		tmp = ntohl(call->tmp);
-		_debug("CB count: %u", tmp);
-		if (tmp != call->count && tmp != 0)
+		call->count2 = ntohl(call->tmp);
+		_debug("CB count: %u", call->count2);
+		if (call->count2 != call->count && call->count2 != 0)
 			return -EBADMSG;
 		call->offset = 0;
 		call->unmarshall++;
@@ -237,14 +239,14 @@
 	case 4:
 		_debug("extract CB array");
 		ret = afs_extract_data(call, call->buffer,
-				       call->count * 3 * 4, false);
+				       call->count2 * 3 * 4, false);
 		if (ret < 0)
 			return ret;
 
 		_debug("unmarshall CB array");
 		cb = call->request;
 		bp = call->buffer;
-		for (loop = call->count; loop > 0; loop--, cb++) {
+		for (loop = call->count2; loop > 0; loop--, cb++) {
 			cb->version	= ntohl(*bp++);
 			cb->expiry	= ntohl(*bp++);
 			cb->type	= ntohl(*bp++);
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 6344aee..7237297 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -29,6 +29,7 @@
 
 const struct file_operations afs_file_operations = {
 	.open		= afs_open,
+	.flush		= afs_flush,
 	.release	= afs_release,
 	.llseek		= generic_file_llseek,
 	.read_iter	= generic_file_read_iter,
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 31c616a..88e44060 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -105,7 +105,7 @@
 			vnode->vfs_inode.i_mode = mode;
 		}
 
-		vnode->vfs_inode.i_ctime.tv_sec	= status->mtime_server;
+		vnode->vfs_inode.i_ctime.tv_sec	= status->mtime_client;
 		vnode->vfs_inode.i_mtime	= vnode->vfs_inode.i_ctime;
 		vnode->vfs_inode.i_atime	= vnode->vfs_inode.i_ctime;
 		vnode->vfs_inode.i_version	= data_version;
@@ -139,7 +139,7 @@
 	vnode->cb_version	= ntohl(*bp++);
 	vnode->cb_expiry	= ntohl(*bp++);
 	vnode->cb_type		= ntohl(*bp++);
-	vnode->cb_expires	= vnode->cb_expiry + get_seconds();
+	vnode->cb_expires	= vnode->cb_expiry + ktime_get_real_seconds();
 	*_bp = bp;
 }
 
@@ -676,8 +676,8 @@
 		memset(bp, 0, padsz);
 		bp = (void *) bp + padsz;
 	}
-	*bp++ = htonl(AFS_SET_MODE);
-	*bp++ = 0; /* mtime */
+	*bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
+	*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
 	*bp++ = 0; /* owner */
 	*bp++ = 0; /* group */
 	*bp++ = htonl(mode & S_IALLUGO); /* unix mode */
@@ -945,8 +945,8 @@
 		memset(bp, 0, c_padsz);
 		bp = (void *) bp + c_padsz;
 	}
-	*bp++ = htonl(AFS_SET_MODE);
-	*bp++ = 0; /* mtime */
+	*bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
+	*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
 	*bp++ = 0; /* owner */
 	*bp++ = 0; /* group */
 	*bp++ = htonl(S_IRWXUGO); /* unix mode */
@@ -1145,8 +1145,8 @@
 	*bp++ = htonl(vnode->fid.vnode);
 	*bp++ = htonl(vnode->fid.unique);
 
-	*bp++ = 0; /* mask */
-	*bp++ = 0; /* mtime */
+	*bp++ = htonl(AFS_SET_MTIME); /* mask */
+	*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
 	*bp++ = 0; /* owner */
 	*bp++ = 0; /* group */
 	*bp++ = 0; /* unix mode */
@@ -1178,7 +1178,7 @@
 	_enter(",%x,{%x:%u},,",
 	       key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode);
 
-	size = to - offset;
+	size = (loff_t)to - (loff_t)offset;
 	if (first != last)
 		size += (loff_t)(last - first) << PAGE_SHIFT;
 	pos = (loff_t)first << PAGE_SHIFT;
@@ -1222,8 +1222,8 @@
 	*bp++ = htonl(vnode->fid.vnode);
 	*bp++ = htonl(vnode->fid.unique);
 
-	*bp++ = 0; /* mask */
-	*bp++ = 0; /* mtime */
+	*bp++ = htonl(AFS_SET_MTIME); /* mask */
+	*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
 	*bp++ = 0; /* owner */
 	*bp++ = 0; /* group */
 	*bp++ = 0; /* unix mode */
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 86cc726..42582e4 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -70,9 +70,9 @@
 
 	set_nlink(inode, vnode->status.nlink);
 	inode->i_uid		= vnode->status.owner;
-	inode->i_gid		= GLOBAL_ROOT_GID;
+	inode->i_gid            = vnode->status.group;
 	inode->i_size		= vnode->status.size;
-	inode->i_ctime.tv_sec	= vnode->status.mtime_server;
+	inode->i_ctime.tv_sec	= vnode->status.mtime_client;
 	inode->i_ctime.tv_nsec	= 0;
 	inode->i_atime		= inode->i_mtime = inode->i_ctime;
 	inode->i_blocks		= 0;
@@ -245,12 +245,13 @@
 			vnode->cb_version = 0;
 			vnode->cb_expiry = 0;
 			vnode->cb_type = 0;
-			vnode->cb_expires = get_seconds();
+			vnode->cb_expires = ktime_get_real_seconds();
 		} else {
 			vnode->cb_version = cb->version;
 			vnode->cb_expiry = cb->expiry;
 			vnode->cb_type = cb->type;
-			vnode->cb_expires = vnode->cb_expiry + get_seconds();
+			vnode->cb_expires = vnode->cb_expiry +
+				ktime_get_real_seconds();
 		}
 	}
 
@@ -323,7 +324,7 @@
 	    !test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
 	    !test_bit(AFS_VNODE_MODIFIED, &vnode->flags) &&
 	    !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
-		if (vnode->cb_expires < get_seconds() + 10) {
+		if (vnode->cb_expires < ktime_get_real_seconds() + 10) {
 			_debug("callback expired");
 			set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
 		} else {
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 535a38d..dd98dcd 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -11,6 +11,7 @@
 
 #include <linux/compiler.h>
 #include <linux/kernel.h>
+#include <linux/ktime.h>
 #include <linux/fs.h>
 #include <linux/pagemap.h>
 #include <linux/rxrpc.h>
@@ -105,7 +106,10 @@
 	unsigned		request_size;	/* size of request data */
 	unsigned		reply_max;	/* maximum size of reply */
 	unsigned		first_offset;	/* offset into mapping[first] */
-	unsigned		last_to;	/* amount of mapping[last] */
+	union {
+		unsigned	last_to;	/* amount of mapping[last] */
+		unsigned	count2;		/* count used in unmarshalling */
+	};
 	unsigned char		unmarshall;	/* unmarshalling phase */
 	bool			incoming;	/* T if incoming call */
 	bool			send_pages;	/* T if data from mapping should be sent */
@@ -242,7 +246,7 @@
  */
 struct afs_vlocation {
 	atomic_t		usage;
-	time_t			time_of_death;	/* time at which put reduced usage to 0 */
+	time64_t		time_of_death;	/* time at which put reduced usage to 0 */
 	struct list_head	link;		/* link in cell volume location list */
 	struct list_head	grave;		/* link in master graveyard list */
 	struct list_head	update;		/* link in master update list */
@@ -253,7 +257,7 @@
 	struct afs_cache_vlocation vldb;	/* volume information DB record */
 	struct afs_volume	*vols[3];	/* volume access record pointer (index by type) */
 	wait_queue_head_t	waitq;		/* status change waitqueue */
-	time_t			update_at;	/* time at which record should be updated */
+	time64_t		update_at;	/* time at which record should be updated */
 	spinlock_t		lock;		/* access lock */
 	afs_vlocation_state_t	state;		/* volume location state */
 	unsigned short		upd_rej_cnt;	/* ENOMEDIUM count during update */
@@ -266,7 +270,7 @@
  */
 struct afs_server {
 	atomic_t		usage;
-	time_t			time_of_death;	/* time at which put reduced usage to 0 */
+	time64_t		time_of_death;	/* time at which put reduced usage to 0 */
 	struct in_addr		addr;		/* server address */
 	struct afs_cell		*cell;		/* cell in which server resides */
 	struct list_head	link;		/* link in cell's server list */
@@ -369,8 +373,8 @@
 	struct rb_node		server_rb;	/* link in server->fs_vnodes */
 	struct rb_node		cb_promise;	/* link in server->cb_promises */
 	struct work_struct	cb_broken_work;	/* work to be done on callback break */
-	time_t			cb_expires;	/* time at which callback expires */
-	time_t			cb_expires_at;	/* time used to order cb_promise */
+	time64_t		cb_expires;	/* time at which callback expires */
+	time64_t		cb_expires_at;	/* time used to order cb_promise */
 	unsigned		cb_version;	/* callback version */
 	unsigned		cb_expiry;	/* callback expiry time */
 	afs_callback_type_t	cb_type;	/* type of callback */
@@ -749,6 +753,7 @@
 extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
 extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
 extern int afs_writeback_all(struct afs_vnode *);
+extern int afs_flush(struct file *, fl_owner_t);
 extern int afs_fsync(struct file *, loff_t, loff_t, int);
 
 
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
index 91ea1aa..100b207 100644
--- a/fs/afs/misc.c
+++ b/fs/afs/misc.c
@@ -84,6 +84,8 @@
 	case RXKADDATALEN:	return -EKEYREJECTED;
 	case RXKADILLEGALLEVEL:	return -EKEYREJECTED;
 
+	case RXGEN_OPCODE:	return -ENOTSUPP;
+
 	default:		return -EREMOTEIO;
 	}
 }
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 25f05a8..523b1d3 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -321,6 +321,8 @@
 	struct rxrpc_call *rxcall;
 	struct msghdr msg;
 	struct kvec iov[1];
+	size_t offset;
+	u32 abort_code;
 	int ret;
 
 	_enter("%x,{%d},", addr->s_addr, ntohs(call->port));
@@ -368,9 +370,11 @@
 	msg.msg_controllen	= 0;
 	msg.msg_flags		= (call->send_pages ? MSG_MORE : 0);
 
-	/* have to change the state *before* sending the last packet as RxRPC
-	 * might give us the reply before it returns from sending the
-	 * request */
+	/* We have to change the state *before* sending the last packet as
+	 * rxrpc might give us the reply before it returns from sending the
+	 * request.  Further, if the send fails, we may already have been given
+	 * a notification and may have collected it.
+	 */
 	if (!call->send_pages)
 		call->state = AFS_CALL_AWAIT_REPLY;
 	ret = rxrpc_kernel_send_data(afs_socket, rxcall,
@@ -389,7 +393,17 @@
 	return wait_mode->wait(call);
 
 error_do_abort:
-	rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD");
+	call->state = AFS_CALL_COMPLETE;
+	if (ret != -ECONNABORTED) {
+		rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT,
+					-ret, "KSD");
+	} else {
+		abort_code = 0;
+		offset = 0;
+		rxrpc_kernel_recv_data(afs_socket, rxcall, NULL, 0, &offset,
+				       false, &abort_code);
+		ret = call->type->abort_to_error(abort_code);
+	}
 error_kill_call:
 	afs_end_call(call);
 	_leave(" = %d", ret);
@@ -434,16 +448,18 @@
 		case -EINPROGRESS:
 		case -EAGAIN:
 			goto out;
+		case -ECONNABORTED:
+			goto call_complete;
 		case -ENOTCONN:
 			abort_code = RX_CALL_DEAD;
 			rxrpc_kernel_abort_call(afs_socket, call->rxcall,
 						abort_code, -ret, "KNC");
-			goto do_abort;
+			goto save_error;
 		case -ENOTSUPP:
-			abort_code = RX_INVALID_OPERATION;
+			abort_code = RXGEN_OPCODE;
 			rxrpc_kernel_abort_call(afs_socket, call->rxcall,
 						abort_code, -ret, "KIV");
-			goto do_abort;
+			goto save_error;
 		case -ENODATA:
 		case -EBADMSG:
 		case -EMSGSIZE:
@@ -453,7 +469,7 @@
 				abort_code = RXGEN_SS_UNMARSHAL;
 			rxrpc_kernel_abort_call(afs_socket, call->rxcall,
 						abort_code, EBADMSG, "KUM");
-			goto do_abort;
+			goto save_error;
 		}
 	}
 
@@ -464,8 +480,9 @@
 	_leave("");
 	return;
 
-do_abort:
+save_error:
 	call->error = ret;
+call_complete:
 	call->state = AFS_CALL_COMPLETE;
 	goto done;
 }
@@ -475,7 +492,6 @@
  */
 static int afs_wait_for_call_to_complete(struct afs_call *call)
 {
-	const char *abort_why;
 	int ret;
 
 	DECLARE_WAITQUEUE(myself, current);
@@ -494,13 +510,8 @@
 			continue;
 		}
 
-		abort_why = "KWC";
-		ret = call->error;
-		if (call->state == AFS_CALL_COMPLETE)
-			break;
-		abort_why = "KWI";
-		ret = -EINTR;
-		if (signal_pending(current))
+		if (call->state == AFS_CALL_COMPLETE ||
+		    signal_pending(current))
 			break;
 		schedule();
 	}
@@ -508,13 +519,14 @@
 	remove_wait_queue(&call->waitq, &myself);
 	__set_current_state(TASK_RUNNING);
 
-	/* kill the call */
+	/* Kill off the call if it's still live. */
 	if (call->state < AFS_CALL_COMPLETE) {
-		_debug("call incomplete");
+		_debug("call interrupted");
 		rxrpc_kernel_abort_call(afs_socket, call->rxcall,
-					RX_CALL_DEAD, -ret, abort_why);
+					RX_USER_ABORT, -EINTR, "KWI");
 	}
 
+	ret = call->error;
 	_debug("call complete");
 	afs_end_call(call);
 	_leave(" = %d", ret);
diff --git a/fs/afs/security.c b/fs/afs/security.c
index 8d01042..bfa9d34 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -340,17 +340,22 @@
 	} else {
 		if (!(access & AFS_ACE_LOOKUP))
 			goto permission_denied;
+		if ((mask & MAY_EXEC) && !(inode->i_mode & S_IXUSR))
+			goto permission_denied;
 		if (mask & (MAY_EXEC | MAY_READ)) {
 			if (!(access & AFS_ACE_READ))
 				goto permission_denied;
+			if (!(inode->i_mode & S_IRUSR))
+				goto permission_denied;
 		} else if (mask & MAY_WRITE) {
 			if (!(access & AFS_ACE_WRITE))
 				goto permission_denied;
+			if (!(inode->i_mode & S_IWUSR))
+				goto permission_denied;
 		}
 	}
 
 	key_put(key);
-	ret = generic_permission(inode, mask);
 	_leave(" = %d", ret);
 	return ret;
 
diff --git a/fs/afs/server.c b/fs/afs/server.c
index d4066ab..c001b1f 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -242,7 +242,7 @@
 	spin_lock(&afs_server_graveyard_lock);
 	if (atomic_read(&server->usage) == 0) {
 		list_move_tail(&server->grave, &afs_server_graveyard);
-		server->time_of_death = get_seconds();
+		server->time_of_death = ktime_get_real_seconds();
 		queue_delayed_work(afs_wq, &afs_server_reaper,
 				   afs_server_timeout * HZ);
 	}
@@ -277,9 +277,9 @@
 	LIST_HEAD(corpses);
 	struct afs_server *server;
 	unsigned long delay, expiry;
-	time_t now;
+	time64_t now;
 
-	now = get_seconds();
+	now = ktime_get_real_seconds();
 	spin_lock(&afs_server_graveyard_lock);
 
 	while (!list_empty(&afs_server_graveyard)) {
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index 45a8639..92bd555 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -340,7 +340,8 @@
 	struct afs_vlocation *xvl;
 
 	/* wait at least 10 minutes before updating... */
-	vl->update_at = get_seconds() + afs_vlocation_update_timeout;
+	vl->update_at = ktime_get_real_seconds() +
+			afs_vlocation_update_timeout;
 
 	spin_lock(&afs_vlocation_updates_lock);
 
@@ -506,7 +507,7 @@
 	if (atomic_read(&vl->usage) == 0) {
 		_debug("buried");
 		list_move_tail(&vl->grave, &afs_vlocation_graveyard);
-		vl->time_of_death = get_seconds();
+		vl->time_of_death = ktime_get_real_seconds();
 		queue_delayed_work(afs_wq, &afs_vlocation_reap,
 				   afs_vlocation_timeout * HZ);
 
@@ -543,11 +544,11 @@
 	LIST_HEAD(corpses);
 	struct afs_vlocation *vl;
 	unsigned long delay, expiry;
-	time_t now;
+	time64_t now;
 
 	_enter("");
 
-	now = get_seconds();
+	now = ktime_get_real_seconds();
 	spin_lock(&afs_vlocation_graveyard_lock);
 
 	while (!list_empty(&afs_vlocation_graveyard)) {
@@ -622,13 +623,13 @@
 {
 	struct afs_cache_vlocation vldb;
 	struct afs_vlocation *vl, *xvl;
-	time_t now;
+	time64_t now;
 	long timeout;
 	int ret;
 
 	_enter("");
 
-	now = get_seconds();
+	now = ktime_get_real_seconds();
 
 	/* find a record to update */
 	spin_lock(&afs_vlocation_updates_lock);
@@ -684,7 +685,8 @@
 
 	/* and then reschedule */
 	_debug("reschedule");
-	vl->update_at = get_seconds() + afs_vlocation_update_timeout;
+	vl->update_at = ktime_get_real_seconds() +
+			afs_vlocation_update_timeout;
 
 	spin_lock(&afs_vlocation_updates_lock);
 
diff --git a/fs/afs/write.c b/fs/afs/write.c
index f865c3f..3fba2b5 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -148,12 +148,12 @@
 		kfree(candidate);
 		return -ENOMEM;
 	}
-	*pagep = page;
-	/* page won't leak in error case: it eventually gets cleaned off LRU */
 
 	if (!PageUptodate(page) && len != PAGE_SIZE) {
 		ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
 		if (ret < 0) {
+			unlock_page(page);
+			put_page(page);
 			kfree(candidate);
 			_leave(" = %d [prep]", ret);
 			return ret;
@@ -161,6 +161,9 @@
 		SetPageUptodate(page);
 	}
 
+	/* page won't leak in error case: it eventually gets cleaned off LRU */
+	*pagep = page;
+
 try_again:
 	spin_lock(&vnode->writeback_lock);
 
@@ -296,10 +299,14 @@
 		ASSERTCMP(pv.nr, ==, count);
 
 		for (loop = 0; loop < count; loop++) {
-			ClearPageUptodate(pv.pages[loop]);
+			struct page *page = pv.pages[loop];
+			ClearPageUptodate(page);
 			if (error)
-				SetPageError(pv.pages[loop]);
-			end_page_writeback(pv.pages[loop]);
+				SetPageError(page);
+			if (PageWriteback(page))
+				end_page_writeback(page);
+			if (page->index >= first)
+				first = page->index + 1;
 		}
 
 		__pagevec_release(&pv);
@@ -502,6 +509,7 @@
 
 		if (PageWriteback(page) || !PageDirty(page)) {
 			unlock_page(page);
+			put_page(page);
 			continue;
 		}
 
@@ -735,6 +743,20 @@
 }
 
 /*
+ * Flush out all outstanding writes on a file opened for writing when it is
+ * closed.
+ */
+int afs_flush(struct file *file, fl_owner_t id)
+{
+	_enter("");
+
+	if ((file->f_mode & FMODE_WRITE) == 0)
+		return 0;
+
+	return vfs_fsync(file, 0);
+}
+
+/*
  * notification that a previously read-only page is about to become writable
  * - if it returns an error, the caller will deliver a bus error signal
  */
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 5db6c8d..0ea31a5 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -87,7 +87,8 @@
 		spin_unlock_irqrestore(&current->sighand->siglock, flags);
 	}
 
-	return (bytes > 0);
+	/* if 'wr' returned 0 (impossible) we assume -EIO (safe) */
+	return bytes == 0 ? 0 : wr < 0 ? wr : -EIO;
 }
 
 static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
@@ -101,6 +102,7 @@
 	} pkt;
 	struct file *pipe = NULL;
 	size_t pktsz;
+	int ret;
 
 	pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n",
 		 (unsigned long) wq->wait_queue_token,
@@ -174,8 +176,18 @@
 
 	mutex_unlock(&sbi->wq_mutex);
 
-	if (autofs4_write(sbi, pipe, &pkt, pktsz))
+	switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
+	case 0:
+		break;
+	case -ENOMEM:
+	case -ERESTARTSYS:
+		/* Just fail this one */
+		autofs4_wait_release(sbi, wq->wait_queue_token, ret);
+		break;
+	default:
 		autofs4_catatonic_mode(sbi);
+		break;
+	}
 	fput(pipe);
 }
 
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 705bb5f..a29730c 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3397,13 +3397,6 @@
 		goto again;
 	}
 
-	/* We've already setup this transaction, go ahead and exit */
-	if (block_group->cache_generation == trans->transid &&
-	    i_size_read(inode)) {
-		dcs = BTRFS_DC_SETUP;
-		goto out_put;
-	}
-
 	/*
 	 * We want to set the generation to 0, that way if anything goes wrong
 	 * from here on out we know not to trust this cache when we load up next
@@ -3427,6 +3420,13 @@
 	}
 	WARN_ON(ret);
 
+	/* We've already setup this transaction, go ahead and exit */
+	if (block_group->cache_generation == trans->transid &&
+	    i_size_read(inode)) {
+		dcs = BTRFS_DC_SETUP;
+		goto out_put;
+	}
+
 	if (i_size_read(inode) > 0) {
 		ret = btrfs_check_trunc_cache_free_space(root,
 					&root->fs_info->global_block_rsv);
@@ -9362,6 +9362,7 @@
 	ret = btrfs_del_root(trans, tree_root, &root->root_key);
 	if (ret) {
 		btrfs_abort_transaction(trans, ret);
+		err = ret;
 		goto out_end_trans;
 	}
 
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index f089d7d..894d563 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6812,6 +6812,20 @@
 	max_size = min_t(unsigned long, PAGE_SIZE, max_size);
 	ret = btrfs_decompress(compress_type, tmp, page,
 			       extent_offset, inline_size, max_size);
+
+	/*
+	 * decompression code contains a memset to fill in any space between the end
+	 * of the uncompressed data and the end of max_size in case the decompressed
+	 * data ends up shorter than ram_bytes.  That doesn't cover the hole between
+	 * the end of an inline extent and the beginning of the next block, so we
+	 * cover that region here.
+	 */
+
+	if (max_size + pg_offset < PAGE_SIZE) {
+		char *map = kmap(page);
+		memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
+		kunmap(page);
+	}
 	kfree(tmp);
 	return ret;
 }
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 77f9efc..9a47b55 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -6196,8 +6196,13 @@
 		goto out;
 	}
 
+	/*
+	 * Check that we don't overflow at later allocations, we request
+	 * clone_sources_count + 1 items, and compare to unsigned long inside
+	 * access_ok.
+	 */
 	if (arg->clone_sources_count >
-	    ULLONG_MAX / sizeof(*arg->clone_sources)) {
+	    ULONG_MAX / sizeof(struct clone_root) - 1) {
 		ret = -EINVAL;
 		goto out;
 	}
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index 6e14404..a724d9a 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -501,7 +501,8 @@
 	path = btrfs_alloc_path();
 	if (!path) {
 		test_msg("Couldn't allocate path\n");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out;
 	}
 
 	ret = add_block_group_free_space(&trans, root->fs_info, cache);
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 7fc89e4..83bb2f2 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -351,7 +351,5 @@
 
 out:
 	btrfs_free_path(path);
-	if (ret)
-		btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret);
-	return 0;
+	return ret;
 }
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index c0f52c4..3d2639c 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1396,6 +1396,29 @@
 	return request_close_session(mdsc, session);
 }
 
+static bool drop_negative_children(struct dentry *dentry)
+{
+	struct dentry *child;
+	bool all_negative = true;
+
+	if (!d_is_dir(dentry))
+		goto out;
+
+	spin_lock(&dentry->d_lock);
+	list_for_each_entry(child, &dentry->d_subdirs, d_child) {
+		if (d_really_is_positive(child)) {
+			all_negative = false;
+			break;
+		}
+	}
+	spin_unlock(&dentry->d_lock);
+
+	if (all_negative)
+		shrink_dcache_parent(dentry);
+out:
+	return all_negative;
+}
+
 /*
  * Trim old(er) caps.
  *
@@ -1441,16 +1464,27 @@
 	if ((used | wanted) & ~oissued & mine)
 		goto out;   /* we need these caps */
 
-	session->s_trim_caps--;
 	if (oissued) {
 		/* we aren't the only cap.. just remove us */
 		__ceph_remove_cap(cap, true);
+		session->s_trim_caps--;
 	} else {
+		struct dentry *dentry;
 		/* try dropping referring dentries */
 		spin_unlock(&ci->i_ceph_lock);
-		d_prune_aliases(inode);
-		dout("trim_caps_cb %p cap %p  pruned, count now %d\n",
-		     inode, cap, atomic_read(&inode->i_count));
+		dentry = d_find_any_alias(inode);
+		if (dentry && drop_negative_children(dentry)) {
+			int count;
+			dput(dentry);
+			d_prune_aliases(inode);
+			count = atomic_read(&inode->i_count);
+			if (count == 1)
+				session->s_trim_caps--;
+			dout("trim_caps_cb %p cap %p pruned, count now %d\n",
+			     inode, cap, count);
+		} else {
+			dput(dentry);
+		}
 		return 0;
 	}
 
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 5c24071..ab6e7dc 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -489,9 +489,6 @@
 {
 	int i, res = -ENOMEM;
 
-	if (fscrypt_bounce_page_pool)
-		return 0;
-
 	mutex_lock(&fscrypt_init_mutex);
 	if (fscrypt_bounce_page_pool)
 		goto already_initialized;
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index d1bbdc9..e14bb7b 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -332,7 +332,7 @@
 	 * in a directory. Consequently, a user space name cannot be mapped to
 	 * a disk-space name
 	 */
-	return -EACCES;
+	return -ENOKEY;
 }
 EXPORT_SYMBOL(fscrypt_fname_usr_to_disk);
 
@@ -367,7 +367,7 @@
 		return 0;
 	}
 	if (!lookup)
-		return -EACCES;
+		return -ENOKEY;
 
 	/*
 	 * We don't have the key and we are doing a lookup; decode the
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index bb4e209..c160d2d 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -113,7 +113,7 @@
 
 	if (!inode_has_encryption_context(inode)) {
 		if (!S_ISDIR(inode->i_mode))
-			ret = -EINVAL;
+			ret = -ENOTDIR;
 		else if (!inode->i_sb->s_cop->empty_dir)
 			ret = -EOPNOTSUPP;
 		else if (!inode->i_sb->s_cop->empty_dir(inode))
diff --git a/fs/dax.c b/fs/dax.c
index bf6218d..800748f 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1265,6 +1265,17 @@
 	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
 		return -EIO;
 
+	/*
+	 * Write can allocate block for an area which has a hole page mapped
+	 * into page tables. We have to tear down these mappings so that data
+	 * written by write(2) is visible in mmap.
+	 */
+	if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
+		invalidate_inode_pages2_range(inode->i_mapping,
+					      pos >> PAGE_SHIFT,
+					      (end - 1) >> PAGE_SHIFT);
+	}
+
 	while (pos < end) {
 		unsigned offset = pos & (PAGE_SIZE - 1);
 		struct blk_dax_ctl dax = { 0 };
@@ -1329,23 +1340,6 @@
 	if (iov_iter_rw(iter) == WRITE)
 		flags |= IOMAP_WRITE;
 
-	/*
-	 * Yes, even DAX files can have page cache attached to them:  A zeroed
-	 * page is inserted into the pagecache when we have to serve a write
-	 * fault on a hole.  It should never be dirtied and can simply be
-	 * dropped from the pagecache once we get real data for the page.
-	 *
-	 * XXX: This is racy against mmap, and there's nothing we can do about
-	 * it. We'll eventually need to shift this down even further so that
-	 * we can check if we allocated blocks over a hole first.
-	 */
-	if (mapping->nrpages) {
-		ret = invalidate_inode_pages2_range(mapping,
-				pos >> PAGE_SHIFT,
-				(pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT);
-		WARN_ON_ONCE(ret);
-	}
-
 	while (iov_iter_count(iter)) {
 		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
 				iter, iomap_dax_actor);
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index 286f10b..4f457d5 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -442,15 +442,16 @@
 	}
 	if (ecryptfs_daemon_hash) {
 		struct ecryptfs_daemon *daemon;
+		struct hlist_node *n;
 		int i;
 
 		mutex_lock(&ecryptfs_daemon_hash_mux);
 		for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
 			int rc;
 
-			hlist_for_each_entry(daemon,
-					     &ecryptfs_daemon_hash[i],
-					     euid_chain) {
+			hlist_for_each_entry_safe(daemon, n,
+						  &ecryptfs_daemon_hash[i],
+						  euid_chain) {
 				rc = ecryptfs_exorcise_daemon(daemon);
 				if (rc)
 					printk(KERN_ERR "%s: Error whilst "
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index e57d463..a8573fa 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4733,6 +4733,7 @@
 						    EXT4_INODE_EOFBLOCKS);
 		}
 		ext4_mark_inode_dirty(handle, inode);
+		ext4_update_inode_fsync_trans(handle, inode, 1);
 		ret2 = ext4_journal_stop(handle);
 		if (ret2)
 			break;
@@ -4805,7 +4806,8 @@
 	}
 
 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-	     offset + len > i_size_read(inode)) {
+	    (offset + len > i_size_read(inode) ||
+	     offset + len > EXT4_I(inode)->i_disksize)) {
 		new_size = offset + len;
 		ret = inode_newsize_ok(inode, new_size);
 		if (ret)
@@ -4976,7 +4978,8 @@
 	}
 
 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-	     offset + len > i_size_read(inode)) {
+	    (offset + len > i_size_read(inode) ||
+	     offset + len > EXT4_I(inode)->i_disksize)) {
 		new_size = offset + len;
 		ret = inode_newsize_ok(inode, new_size);
 		if (ret)
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 170421e..2d94e85 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -771,7 +771,7 @@
 		if (err)
 			return ERR_PTR(err);
 		if (!fscrypt_has_encryption_key(dir))
-			return ERR_PTR(-EPERM);
+			return ERR_PTR(-ENOKEY);
 		if (!handle)
 			nblocks += EXT4_DATA_TRANS_BLOCKS(dir->i_sb);
 		encrypt = 1;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 00b8a5a..b1766a6 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1378,6 +1378,8 @@
 		return NULL;
 
 	retval = ext4_fname_setup_filename(dir, d_name, 1, &fname);
+	if (retval == -ENOENT)
+		return NULL;
 	if (retval)
 		return ERR_PTR(retval);
 
@@ -1415,6 +1417,10 @@
 			       "falling back\n"));
 	}
 	nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
+	if (!nblocks) {
+		ret = NULL;
+		goto cleanup_and_exit;
+	}
 	start = EXT4_I(dir)->i_dir_start_lookup;
 	if (start >= nblocks)
 		start = 0;
@@ -3090,7 +3096,7 @@
 		if (err)
 			return err;
 		if (!fscrypt_has_encryption_key(dir))
-			return -EPERM;
+			return -ENOKEY;
 		disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
 				 sizeof(struct fscrypt_symlink_data));
 		sd = kzalloc(disk_link.len, GFP_KERNEL);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 11f3717..8add4e8 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -277,7 +277,10 @@
 
 	err = fscrypt_setup_filename(dir, child, 1, &fname);
 	if (err) {
-		*res_page = ERR_PTR(err);
+		if (err == -ENOENT)
+			*res_page = NULL;
+		else
+			*res_page = ERR_PTR(err);
 		return NULL;
 	}
 
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 08d7dc9..8556fe1 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -403,7 +403,7 @@
 			return err;
 
 		if (!fscrypt_has_encryption_key(dir))
-			return -EPERM;
+			return -ENOKEY;
 
 		disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
 				sizeof(struct fscrypt_symlink_data));
@@ -447,7 +447,7 @@
 			goto err_out;
 
 		if (!fscrypt_has_encryption_key(inode)) {
-			err = -EPERM;
+			err = -ENOKEY;
 			goto err_out;
 		}
 
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index ffec69d..ad2e55d 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -173,19 +173,33 @@
 	spin_unlock_bh(&wb->work_lock);
 }
 
+static void finish_writeback_work(struct bdi_writeback *wb,
+				  struct wb_writeback_work *work)
+{
+	struct wb_completion *done = work->done;
+
+	if (work->auto_free)
+		kfree(work);
+	if (done && atomic_dec_and_test(&done->cnt))
+		wake_up_all(&wb->bdi->wb_waitq);
+}
+
 static void wb_queue_work(struct bdi_writeback *wb,
 			  struct wb_writeback_work *work)
 {
 	trace_writeback_queue(wb, work);
 
-	spin_lock_bh(&wb->work_lock);
-	if (!test_bit(WB_registered, &wb->state))
-		goto out_unlock;
 	if (work->done)
 		atomic_inc(&work->done->cnt);
-	list_add_tail(&work->list, &wb->work_list);
-	mod_delayed_work(bdi_wq, &wb->dwork, 0);
-out_unlock:
+
+	spin_lock_bh(&wb->work_lock);
+
+	if (test_bit(WB_registered, &wb->state)) {
+		list_add_tail(&work->list, &wb->work_list);
+		mod_delayed_work(bdi_wq, &wb->dwork, 0);
+	} else
+		finish_writeback_work(wb, work);
+
 	spin_unlock_bh(&wb->work_lock);
 }
 
@@ -1875,16 +1889,9 @@
 
 	set_bit(WB_writeback_running, &wb->state);
 	while ((work = get_next_work_item(wb)) != NULL) {
-		struct wb_completion *done = work->done;
-
 		trace_writeback_exec(wb, work);
-
 		wrote += wb_writeback(wb, work);
-
-		if (work->auto_free)
-			kfree(work);
-		if (done && atomic_dec_and_test(&done->cnt))
-			wake_up_all(&wb->bdi->wb_waitq);
+		finish_writeback_work(wb, work);
 	}
 
 	/*
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index e23ff70..39c382f 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -256,7 +256,7 @@
 			goto out;
 	}
 	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
-		if (flags & GFS2_DIF_JDATA)
+		if (new_flags & GFS2_DIF_JDATA)
 			gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
 		error = filemap_fdatawrite(inode->i_mapping);
 		if (error)
@@ -264,6 +264,8 @@
 		error = filemap_fdatawait(inode->i_mapping);
 		if (error)
 			goto out;
+		if (new_flags & GFS2_DIF_JDATA)
+			gfs2_ordered_del_inode(ip);
 	}
 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
 	if (error)
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index 0ac4c1f..25177e6 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -103,7 +103,7 @@
 	/* Ignore bigendian datum due to broken mastering programs */
 	return get_unaligned_le32(p);
 }
-extern int iso_date(char *, int);
+extern int iso_date(u8 *, int);
 
 struct inode;		/* To make gcc happy */
 
diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h
index ed09e2b..f835976 100644
--- a/fs/isofs/rock.h
+++ b/fs/isofs/rock.h
@@ -65,7 +65,7 @@
 };
 
 struct stamp {
-	char time[7];
+	__u8 time[7];		/* actually 6 unsigned, 1 signed */
 } __attribute__ ((packed));
 
 struct RR_TF_s {
diff --git a/fs/isofs/util.c b/fs/isofs/util.c
index 005a15c..37860fe 100644
--- a/fs/isofs/util.c
+++ b/fs/isofs/util.c
@@ -15,7 +15,7 @@
  * to GMT.  Thus  we should always be correct.
  */
 
-int iso_date(char * p, int flag)
+int iso_date(u8 *p, int flag)
 {
 	int year, month, day, hour, minute, second, tz;
 	int crtime;
diff --git a/fs/libfs.c b/fs/libfs.c
index 48826d4..9588780a 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -245,7 +245,8 @@
 	struct inode *root;
 	struct qstr d_name = QSTR_INIT(name, strlen(name));
 
-	s = sget(fs_type, NULL, set_anon_super, MS_NOUSER, NULL);
+	s = sget_userns(fs_type, NULL, set_anon_super, MS_KERNMOUNT|MS_NOUSER,
+			&init_user_ns, NULL);
 	if (IS_ERR(s))
 		return ERR_CAST(s);
 
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index fc4084e..9d37324 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -365,6 +365,7 @@
 		printk(KERN_WARNING
 			"lockd_up: svc_rqst allocation failed, error=%d\n",
 			error);
+		lockd_unregister_notifiers();
 		goto out_rqst;
 	}
 
@@ -455,13 +456,16 @@
 	}
 
 	error = lockd_up_net(serv, net);
-	if (error < 0)
-		goto err_net;
+	if (error < 0) {
+		lockd_unregister_notifiers();
+		goto err_put;
+	}
 
 	error = lockd_start_svc(serv);
-	if (error < 0)
-		goto err_start;
-
+	if (error < 0) {
+		lockd_down_net(serv, net);
+		goto err_put;
+	}
 	nlmsvc_users++;
 	/*
 	 * Note: svc_serv structures have an initial use count of 1,
@@ -472,12 +476,6 @@
 err_create:
 	mutex_unlock(&nlmsvc_mutex);
 	return error;
-
-err_start:
-	lockd_down_net(serv, net);
-err_net:
-	lockd_unregister_notifiers();
-	goto err_put;
 }
 EXPORT_SYMBOL_GPL(lockd_up);
 
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index d04ec381..1e5321d 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1292,7 +1292,7 @@
 		return 0;
 	}
 
-	error = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+	error = nfs_lookup_verify_inode(inode, flags);
 	dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
 			__func__, inode->i_ino, error ? "invalid" : "valid");
 	return !error;
@@ -1443,6 +1443,7 @@
 
 const struct dentry_operations nfs4_dentry_operations = {
 	.d_revalidate	= nfs4_lookup_revalidate,
+	.d_weak_revalidate	= nfs_weak_revalidate,
 	.d_delete	= nfs_dentry_delete,
 	.d_iput		= nfs_dentry_iput,
 	.d_automount	= nfs_d_automount,
@@ -2097,7 +2098,7 @@
 		if (new_inode != NULL)
 			nfs_drop_nlink(new_inode);
 		d_move(old_dentry, new_dentry);
-		nfs_set_verifier(new_dentry,
+		nfs_set_verifier(old_dentry,
 					nfs_save_change_attribute(new_dir));
 	} else if (error == -ENOENT)
 		nfs_dentry_handle_enoent(old_dentry);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 074ac71..f6b0848 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -1004,9 +1004,9 @@
 	server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
 	server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
 
-	if (server->rsize > server_resp_sz)
+	if (!server->rsize || server->rsize > server_resp_sz)
 		server->rsize = server_resp_sz;
-	if (server->wsize > server_rqst_sz)
+	if (!server->wsize || server->wsize > server_rqst_sz)
 		server->wsize = server_rqst_sz;
 #endif /* CONFIG_NFS_V4_1 */
 }
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index a53b8e0..4638654 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -38,7 +38,6 @@
 #include <linux/mm.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
-#include <linux/file.h>
 #include <linux/string.h>
 #include <linux/ratelimit.h>
 #include <linux/printk.h>
@@ -256,15 +255,12 @@
 };
 
 const u32 nfs4_fs_locations_bitmap[3] = {
-	FATTR4_WORD0_TYPE
-	| FATTR4_WORD0_CHANGE
+	FATTR4_WORD0_CHANGE
 	| FATTR4_WORD0_SIZE
 	| FATTR4_WORD0_FSID
 	| FATTR4_WORD0_FILEID
 	| FATTR4_WORD0_FS_LOCATIONS,
-	FATTR4_WORD1_MODE
-	| FATTR4_WORD1_NUMLINKS
-	| FATTR4_WORD1_OWNER
+	FATTR4_WORD1_OWNER
 	| FATTR4_WORD1_OWNER_GROUP
 	| FATTR4_WORD1_RAWDEV
 	| FATTR4_WORD1_SPACE_USED
@@ -6009,7 +6005,6 @@
 	p->server = server;
 	atomic_inc(&lsp->ls_count);
 	p->ctx = get_nfs_open_context(ctx);
-	get_file(fl->fl_file);
 	memcpy(&p->fl, fl, sizeof(p->fl));
 	return p;
 out_free_seqid:
@@ -6122,7 +6117,6 @@
 		nfs_free_seqid(data->arg.lock_seqid);
 	nfs4_put_lock_state(data->lsp);
 	put_nfs_open_context(data->ctx);
-	fput(data->fl.fl_file);
 	kfree(data);
 	dprintk("%s: done!\n", __func__);
 }
@@ -6678,9 +6672,7 @@
 				   struct page *page)
 {
 	struct nfs_server *server = NFS_SERVER(dir);
-	u32 bitmask[3] = {
-		[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
-	};
+	u32 bitmask[3];
 	struct nfs4_fs_locations_arg args = {
 		.dir_fh = NFS_FH(dir),
 		.name = name,
@@ -6699,12 +6691,15 @@
 
 	dprintk("%s: start\n", __func__);
 
+	bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
+	bitmask[1] = nfs4_fattr_bitmap[1];
+
 	/* Ask for the fileid of the absent filesystem if mounted_on_fileid
 	 * is not supported */
 	if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
-		bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
+		bitmask[0] &= ~FATTR4_WORD0_FILEID;
 	else
-		bitmask[0] |= FATTR4_WORD0_FILEID;
+		bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
 
 	nfs_fattr_init(&fs_locations->fattr);
 	fs_locations->server = server;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 9267191..71deeae 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1718,7 +1718,6 @@
 			break;
 		case -NFS4ERR_STALE_CLIENTID:
 			set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
-			nfs4_state_clear_reclaim_reboot(clp);
 			nfs4_state_start_reclaim_reboot(clp);
 			break;
 		case -NFS4ERR_EXPIRED:
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index cfb8f7c..20cd850 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -201,17 +201,13 @@
 		TP_ARGS(clp, error),
 
 		TP_STRUCT__entry(
-			__string(dstaddr,
-				rpc_peeraddr2str(clp->cl_rpcclient,
-					RPC_DISPLAY_ADDR))
+			__string(dstaddr, clp->cl_hostname)
 			__field(int, error)
 		),
 
 		TP_fast_assign(
 			__entry->error = error;
-			__assign_str(dstaddr,
-				rpc_peeraddr2str(clp->cl_rpcclient,
-						RPC_DISPLAY_ADDR));
+			__assign_str(dstaddr, clp->cl_hostname);
 		),
 
 		TP_printk(
@@ -1103,9 +1099,7 @@
 			__field(dev_t, dev)
 			__field(u32, fhandle)
 			__field(u64, fileid)
-			__string(dstaddr, clp ?
-				rpc_peeraddr2str(clp->cl_rpcclient,
-					RPC_DISPLAY_ADDR) : "unknown")
+			__string(dstaddr, clp ? clp->cl_hostname : "unknown")
 		),
 
 		TP_fast_assign(
@@ -1118,9 +1112,7 @@
 				__entry->fileid = 0;
 				__entry->dev = 0;
 			}
-			__assign_str(dstaddr, clp ?
-				rpc_peeraddr2str(clp->cl_rpcclient,
-					RPC_DISPLAY_ADDR) : "unknown")
+			__assign_str(dstaddr, clp ? clp->cl_hostname : "unknown")
 		),
 
 		TP_printk(
@@ -1162,9 +1154,7 @@
 			__field(dev_t, dev)
 			__field(u32, fhandle)
 			__field(u64, fileid)
-			__string(dstaddr, clp ?
-				rpc_peeraddr2str(clp->cl_rpcclient,
-					RPC_DISPLAY_ADDR) : "unknown")
+			__string(dstaddr, clp ? clp->cl_hostname : "unknown")
 			__field(int, stateid_seq)
 			__field(u32, stateid_hash)
 		),
@@ -1179,9 +1169,7 @@
 				__entry->fileid = 0;
 				__entry->dev = 0;
 			}
-			__assign_str(dstaddr, clp ?
-				rpc_peeraddr2str(clp->cl_rpcclient,
-					RPC_DISPLAY_ADDR) : "unknown")
+			__assign_str(dstaddr, clp ? clp->cl_hostname : "unknown")
 			__entry->stateid_seq =
 				be32_to_cpu(stateid->seqid);
 			__entry->stateid_hash =
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ddce94ce..51bf1f9 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1339,7 +1339,7 @@
 			mnt->options |= NFS_OPTION_MIGRATION;
 			break;
 		case Opt_nomigration:
-			mnt->options &= NFS_OPTION_MIGRATION;
+			mnt->options &= ~NFS_OPTION_MIGRATION;
 			break;
 
 		/*
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index e4772a8..9905735 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1859,6 +1859,8 @@
 	if (res)
 		error = nfs_generic_commit_list(inode, &head, how, &cinfo);
 	nfs_commit_end(cinfo.mds);
+	if (res == 0)
+		return res;
 	if (error < 0)
 		goto out_error;
 	if (!may_wait)
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 62469c6..75f942a 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -59,6 +59,9 @@
 				gi->gid[i] = exp->ex_anon_gid;
 			else
 				gi->gid[i] = rqgi->gid[i];
+
+			/* Each thread allocates its own gi, no race */
+			groups_sort(gi);
 		}
 	} else {
 		gi = get_group_info(rqgi);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index d35eb07..9ebb2d7 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3513,7 +3513,9 @@
 		/* ignore lock owners */
 		if (local->st_stateowner->so_is_open_owner == 0)
 			continue;
-		if (local->st_stateowner == &oo->oo_owner) {
+		if (local->st_stateowner != &oo->oo_owner)
+			continue;
+		if (local->st_stid.sc_type == NFS4_OPEN_STID) {
 			ret = local;
 			atomic_inc(&ret->st_stid.sc_count);
 			break;
@@ -3522,6 +3524,52 @@
 	return ret;
 }
 
+static __be32
+nfsd4_verify_open_stid(struct nfs4_stid *s)
+{
+	__be32 ret = nfs_ok;
+
+	switch (s->sc_type) {
+	default:
+		break;
+	case NFS4_CLOSED_STID:
+	case NFS4_CLOSED_DELEG_STID:
+		ret = nfserr_bad_stateid;
+		break;
+	case NFS4_REVOKED_DELEG_STID:
+		ret = nfserr_deleg_revoked;
+	}
+	return ret;
+}
+
+/* Lock the stateid st_mutex, and deal with races with CLOSE */
+static __be32
+nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
+{
+	__be32 ret;
+
+	mutex_lock(&stp->st_mutex);
+	ret = nfsd4_verify_open_stid(&stp->st_stid);
+	if (ret != nfs_ok)
+		mutex_unlock(&stp->st_mutex);
+	return ret;
+}
+
+static struct nfs4_ol_stateid *
+nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
+{
+	struct nfs4_ol_stateid *stp;
+	for (;;) {
+		spin_lock(&fp->fi_lock);
+		stp = nfsd4_find_existing_open(fp, open);
+		spin_unlock(&fp->fi_lock);
+		if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
+			break;
+		nfs4_put_stid(&stp->st_stid);
+	}
+	return stp;
+}
+
 static struct nfs4_openowner *
 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
 			   struct nfsd4_compound_state *cstate)
@@ -3566,6 +3614,7 @@
 	mutex_init(&stp->st_mutex);
 	mutex_lock(&stp->st_mutex);
 
+retry:
 	spin_lock(&oo->oo_owner.so_client->cl_lock);
 	spin_lock(&fp->fi_lock);
 
@@ -3590,7 +3639,11 @@
 	spin_unlock(&fp->fi_lock);
 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
 	if (retstp) {
-		mutex_lock(&retstp->st_mutex);
+		/* Handle races with CLOSE */
+		if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+			nfs4_put_stid(&retstp->st_stid);
+			goto retry;
+		}
 		/* To keep mutex tracking happy */
 		mutex_unlock(&stp->st_mutex);
 		stp = retstp;
@@ -3967,7 +4020,8 @@
 {
 	struct nfs4_stid *ret;
 
-	ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
+	ret = find_stateid_by_type(cl, s,
+				NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
 	if (!ret)
 		return NULL;
 	return delegstateid(ret);
@@ -3990,6 +4044,12 @@
 	deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
 	if (deleg == NULL)
 		goto out;
+	if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
+		nfs4_put_stid(&deleg->dl_stid);
+		if (cl->cl_minorversion)
+			status = nfserr_deleg_revoked;
+		goto out;
+	}
 	flags = share_access_to_flags(open->op_share_access);
 	status = nfs4_check_delegmode(deleg, flags);
 	if (status) {
@@ -4393,6 +4453,7 @@
 	struct nfs4_ol_stateid *stp = NULL;
 	struct nfs4_delegation *dp = NULL;
 	__be32 status;
+	bool new_stp = false;
 
 	/*
 	 * Lookup file; if found, lookup stateid and check open request,
@@ -4404,9 +4465,7 @@
 		status = nfs4_check_deleg(cl, open, &dp);
 		if (status)
 			goto out;
-		spin_lock(&fp->fi_lock);
-		stp = nfsd4_find_existing_open(fp, open);
-		spin_unlock(&fp->fi_lock);
+		stp = nfsd4_find_and_lock_existing_open(fp, open);
 	} else {
 		open->op_file = NULL;
 		status = nfserr_bad_stateid;
@@ -4414,35 +4473,31 @@
 			goto out;
 	}
 
+	if (!stp) {
+		stp = init_open_stateid(fp, open);
+		if (!open->op_stp)
+			new_stp = true;
+	}
+
 	/*
 	 * OPEN the file, or upgrade an existing OPEN.
 	 * If truncate fails, the OPEN fails.
+	 *
+	 * stp is already locked.
 	 */
-	if (stp) {
+	if (!new_stp) {
 		/* Stateid was found, this is an OPEN upgrade */
-		mutex_lock(&stp->st_mutex);
 		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
 		if (status) {
 			mutex_unlock(&stp->st_mutex);
 			goto out;
 		}
 	} else {
-		/* stp is returned locked. */
-		stp = init_open_stateid(fp, open);
-		/* See if we lost the race to some other thread */
-		if (stp->st_access_bmap != 0) {
-			status = nfs4_upgrade_open(rqstp, fp, current_fh,
-						stp, open);
-			if (status) {
-				mutex_unlock(&stp->st_mutex);
-				goto out;
-			}
-			goto upgrade_out;
-		}
 		status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
 		if (status) {
-			mutex_unlock(&stp->st_mutex);
+			stp->st_stid.sc_type = NFS4_CLOSED_STID;
 			release_open_stateid(stp);
+			mutex_unlock(&stp->st_mutex);
 			goto out;
 		}
 
@@ -4451,7 +4506,7 @@
 		if (stp->st_clnt_odstate == open->op_odstate)
 			open->op_odstate = NULL;
 	}
-upgrade_out:
+
 	nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
 	mutex_unlock(&stp->st_mutex);
 
@@ -4677,7 +4732,7 @@
 	spin_unlock(&nn->blocked_locks_lock);
 
 	while (!list_empty(&reaplist)) {
-		nbl = list_first_entry(&nn->blocked_locks_lru,
+		nbl = list_first_entry(&reaplist,
 					struct nfsd4_blocked_lock, nbl_lru);
 		list_del_init(&nbl->nbl_lru);
 		posix_unblock_lock(&nbl->nbl_lock);
@@ -4858,6 +4913,16 @@
 		     struct nfs4_stid **s, struct nfsd_net *nn)
 {
 	__be32 status;
+	bool return_revoked = false;
+
+	/*
+	 *  only return revoked delegations if explicitly asked.
+	 *  otherwise we report revoked or bad_stateid status.
+	 */
+	if (typemask & NFS4_REVOKED_DELEG_STID)
+		return_revoked = true;
+	else if (typemask & NFS4_DELEG_STID)
+		typemask |= NFS4_REVOKED_DELEG_STID;
 
 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
 		return nfserr_bad_stateid;
@@ -4872,6 +4937,12 @@
 	*s = find_stateid_by_type(cstate->clp, stateid, typemask);
 	if (!*s)
 		return nfserr_bad_stateid;
+	if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
+		nfs4_put_stid(*s);
+		if (cstate->minorversion)
+			return nfserr_deleg_revoked;
+		return nfserr_bad_stateid;
+	}
 	return nfs_ok;
 }
 
@@ -5291,7 +5362,6 @@
 	bool unhashed;
 	LIST_HEAD(reaplist);
 
-	s->st_stid.sc_type = NFS4_CLOSED_STID;
 	spin_lock(&clp->cl_lock);
 	unhashed = unhash_open_stateid(s, &reaplist);
 
@@ -5330,10 +5400,12 @@
 	nfsd4_bump_seqid(cstate, status);
 	if (status)
 		goto out; 
+
+	stp->st_stid.sc_type = NFS4_CLOSED_STID;
 	nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
-	mutex_unlock(&stp->st_mutex);
 
 	nfsd4_close_open_stateid(stp);
+	mutex_unlock(&stp->st_mutex);
 
 	/* put reference from nfs4_preprocess_seqid_op */
 	nfs4_put_stid(&stp->st_stid);
@@ -7071,7 +7143,7 @@
 	spin_unlock(&nn->blocked_locks_lock);
 
 	while (!list_empty(&reaplist)) {
-		nbl = list_first_entry(&nn->blocked_locks_lru,
+		nbl = list_first_entry(&reaplist,
 					struct nfsd4_blocked_lock, nbl_lru);
 		list_del_init(&nbl->nbl_lru);
 		posix_unblock_lock(&nbl->nbl_lock);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 1645b97..5c48006 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -155,7 +155,8 @@
 
 int nfsd_minorversion(u32 minorversion, enum vers_op change)
 {
-	if (minorversion > NFSD_SUPPORTED_MINOR_VERSION)
+	if (minorversion > NFSD_SUPPORTED_MINOR_VERSION &&
+	    change != NFSD_AVAIL)
 		return -1;
 	switch(change) {
 	case NFSD_SET:
@@ -399,23 +400,20 @@
 
 void nfsd_reset_versions(void)
 {
-	int found_one = 0;
 	int i;
 
-	for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
-		if (nfsd_program.pg_vers[i])
-			found_one = 1;
-	}
+	for (i = 0; i < NFSD_NRVERS; i++)
+		if (nfsd_vers(i, NFSD_TEST))
+			return;
 
-	if (!found_one) {
-		for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++)
-			nfsd_program.pg_vers[i] = nfsd_version[i];
-#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
-		for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++)
-			nfsd_acl_program.pg_vers[i] =
-				nfsd_acl_version[i];
-#endif
-	}
+	for (i = 0; i < NFSD_NRVERS; i++)
+		if (i != 4)
+			nfsd_vers(i, NFSD_SET);
+		else {
+			int minor = 0;
+			while (nfsd_minorversion(minor, NFSD_SET) >= 0)
+				minor++;
+		}
 }
 
 /*
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 7d18d62..36362d4 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1956,8 +1956,6 @@
 					  err, ii->vfs_inode.i_ino);
 				return err;
 			}
-			mark_buffer_dirty(ibh);
-			nilfs_mdt_mark_dirty(ifile);
 			spin_lock(&nilfs->ns_inode_lock);
 			if (likely(!ii->i_bh))
 				ii->i_bh = ibh;
@@ -1966,6 +1964,10 @@
 			goto retry;
 		}
 
+		// Always redirty the buffer to avoid race condition
+		mark_buffer_dirty(ii->i_bh);
+		nilfs_mdt_mark_dirty(ifile);
+
 		clear_bit(NILFS_I_QUEUED, &ii->i_state);
 		set_bit(NILFS_I_BUSY, &ii->i_state);
 		list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 8718af8..80fdfad 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -90,6 +90,7 @@
 		return ERR_PTR(-ENOMEM);
 	}
 	d_instantiate(dentry, inode);
+	dentry->d_flags |= DCACHE_RCUACCESS;
 	dentry->d_fsdata = (void *)ns->ops;
 	d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
 	if (d) {
diff --git a/fs/proc/array.c b/fs/proc/array.c
index c932ec4..794b52a 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -423,8 +423,11 @@
 		 * safe because the task has stopped executing permanently.
 		 */
 		if (permitted && (task->flags & PF_DUMPCORE)) {
-			eip = KSTK_EIP(task);
-			esp = KSTK_ESP(task);
+			if (try_get_task_stack(task)) {
+				eip = KSTK_EIP(task);
+				esp = KSTK_ESP(task);
+				put_task_stack(task);
+			}
 		}
 	}
 
diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c
index 15f327b..7340c36 100644
--- a/fs/proc/proc_tty.c
+++ b/fs/proc/proc_tty.c
@@ -14,6 +14,7 @@
 #include <linux/tty.h>
 #include <linux/seq_file.h>
 #include <linux/bitops.h>
+#include "internal.h"
 
 /*
  * The /proc/tty directory inodes...
@@ -164,7 +165,7 @@
 	if (!ent)
 		return;
 		
-	remove_proc_entry(driver->driver_name, proc_tty_driver);
+	remove_proc_entry(ent->name, proc_tty_driver);
 	
 	driver->proc_entry = NULL;
 }
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 4942549..4b1f6d5 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -710,7 +710,7 @@
 	else
 		sectorsize = sb->s_blocksize;
 
-	sector += (sbi->s_session << sb->s_blocksize_bits);
+	sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits);
 
 	udf_debug("Starting at sector %u (%ld byte sectors)\n",
 		  (unsigned int)(sector >> sb->s_blocksize_bits),
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 2cde073..9d9c032 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -419,7 +419,7 @@
 			 * in such case.
 			 */
 			down_read(&mm->mmap_sem);
-			ret = 0;
+			ret = VM_FAULT_NOPAGE;
 		}
 	}
 
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 7eb9970..8ad65d4 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -2713,7 +2713,7 @@
 					&i)))
 				goto done;
 			XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
-			cur->bc_rec.b.br_state = XFS_EXT_NORM;
+			cur->bc_rec.b.br_state = new->br_state;
 			if ((error = xfs_btree_insert(cur, &i)))
 				goto done;
 			XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index fe9a9a1..98ca9f1 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2386,6 +2386,7 @@
 				 */
 				if (ip->i_ino != inum + i) {
 					xfs_iunlock(ip, XFS_ILOCK_EXCL);
+					rcu_read_unlock();
 					continue;
 				}
 			}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 5b81f7f..33c3899 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -871,22 +871,6 @@
 		return error;
 
 	/*
-	 * We are going to log the inode size change in this transaction so
-	 * any previous writes that are beyond the on disk EOF and the new
-	 * EOF that have not been written out need to be written here.  If we
-	 * do not write the data out, we expose ourselves to the null files
-	 * problem. Note that this includes any block zeroing we did above;
-	 * otherwise those blocks may not be zeroed after a crash.
-	 */
-	if (did_zeroing ||
-	    (newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) {
-		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
-						      ip->i_d.di_size, newsize);
-		if (error)
-			return error;
-	}
-
-	/*
 	 * We've already locked out new page faults, so now we can safely remove
 	 * pages from the page cache knowing they won't get refaulted until we
 	 * drop the XFS_MMAP_EXCL lock after the extent manipulations are
@@ -902,9 +886,29 @@
 	 * user visible changes). There's not much we can do about this, except
 	 * to hope that the caller sees ENOMEM and retries the truncate
 	 * operation.
+	 *
+	 * And we update in-core i_size and truncate page cache beyond newsize
+	 * before writeback the [di_size, newsize] range, so we're guaranteed
+	 * not to write stale data past the new EOF on truncate down.
 	 */
 	truncate_setsize(inode, newsize);
 
+	/*
+	 * We are going to log the inode size change in this transaction so
+	 * any previous writes that are beyond the on disk EOF and the new
+	 * EOF that have not been written out need to be written here.  If we
+	 * do not write the data out, we expose ourselves to the null files
+	 * problem. Note that this includes any block zeroing we did above;
+	 * otherwise those blocks may not be zeroed after a crash.
+	 */
+	if (did_zeroing ||
+	    (newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) {
+		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
+						ip->i_d.di_size, newsize - 1);
+		if (error)
+			return error;
+	}
+
 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
 	if (error)
 		return error;
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 0590926..1e26f45 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -753,7 +753,7 @@
 	 * in the in-core log.  The following number can be made tighter if
 	 * we actually look at the block size of the filesystem.
 	 */
-	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
+	num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
 	if (head_blk >= num_scan_bblks) {
 		/*
 		 * We are guaranteed that the entire check can be performed
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index dc81e52..2e6000a 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -778,7 +778,14 @@
  */
 #define PERCPU_INPUT(cacheline)						\
 	VMLINUX_SYMBOL(__per_cpu_start) = .;				\
+	VMLINUX_SYMBOL(__per_cpu_user_mapped_start) = .;		\
 	*(.data..percpu..first)						\
+	. = ALIGN(cacheline);						\
+	*(.data..percpu..user_mapped)					\
+	*(.data..percpu..user_mapped..shared_aligned)			\
+	. = ALIGN(PAGE_SIZE);						\
+	*(.data..percpu..user_mapped..page_aligned)			\
+	VMLINUX_SYMBOL(__per_cpu_user_mapped_end) = .;			\
 	. = ALIGN(PAGE_SIZE);						\
 	*(.data..percpu..page_aligned)					\
 	. = ALIGN(cacheline);						\
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index caedb74..43783ef 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -59,7 +59,7 @@
 extern u32 arch_timer_get_rate(void);
 extern u64 (*arch_timer_read_counter)(void);
 extern struct arch_timer_kvm_info *arch_timer_get_kvm_info(void);
-
+extern void arch_timer_mem_get_cval(u32 *lo, u32 *hi);
 #else
 
 static inline u32 arch_timer_get_rate(void)
@@ -72,6 +72,10 @@
 	return 0;
 }
 
+static void arch_timer_mem_get_cval(u32 *lo, u32 *hi)
+{
+	*lo = *hi = ~0U;
+}
 #endif
 
 #endif
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index f6d9af3e..cac5735 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -80,6 +80,14 @@
 			    struct ahash_instance *inst);
 void ahash_free_instance(struct crypto_instance *inst);
 
+int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+		    unsigned int keylen);
+
+static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
+{
+	return alg->setkey != shash_no_setkey;
+}
+
 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
 			    struct hash_alg_common *alg,
 			    struct crypto_instance *inst);
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
index 4a53c0d..e045722 100644
--- a/include/crypto/mcryptd.h
+++ b/include/crypto/mcryptd.h
@@ -26,6 +26,7 @@
 
 struct mcryptd_cpu_queue {
 	struct crypto_queue queue;
+	spinlock_t q_lock;
 	struct work_struct work;
 };
 
diff --git a/include/dt-bindings/arm/arm-smmu.h b/include/dt-bindings/arm/arm-smmu.h
new file mode 100644
index 0000000..3a1dbd3
--- /dev/null
+++ b/include/dt-bindings/arm/arm-smmu.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DT_ARM_SMMU_H__
+#define __DT_ARM_SMMU_H__
+
+#define ARM_SMMU_OPT_SECURE_CFG_ACCESS  (1 << 0)
+#define ARM_SMMU_OPT_FATAL_ASF		(1 << 1)
+#define ARM_SMMU_OPT_SKIP_INIT		(1 << 2)
+#define ARM_SMMU_OPT_DYNAMIC		(1 << 3)
+#define ARM_SMMU_OPT_3LVL_TABLES	(1 << 4)
+#define ARM_SMMU_OPT_NO_ASID_RETENTION	(1 << 5)
+#define ARM_SMMU_OPT_DISABLE_ATOS	(1 << 6)
+#define ARM_SMMU_OPT_MMU500_ERRATA1	(1 << 7)
+#define ARM_SMMU_OPT_STATIC_CB          (1 << 8)
+#define ARM_SMMU_OPT_HALT               (1 << 9)
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
index 7e1394c..36d34b1 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -62,47 +62,44 @@
 #define GCC_GP2_CLK_SRC						44
 #define GCC_GP3_CLK						45
 #define GCC_GP3_CLK_SRC						46
-#define GCC_MSS_CFG_AHB_CLK					47
-#define GCC_MSS_GPLL0_DIV_CLK_SRC				48
-#define GCC_MSS_SNOC_AXI_CLK					49
-#define GCC_PCIE_0_CLKREF_CLK					50
-#define GCC_PCIE_AUX_CLK					51
-#define GCC_PCIE_AUX_PHY_CLK_SRC				52
-#define GCC_PCIE_CFG_AHB_CLK					53
-#define GCC_PCIE_MSTR_AXI_CLK					54
-#define GCC_PCIE_PHY_REFGEN_CLK					55
-#define GCC_PCIE_PHY_REFGEN_CLK_SRC				56
-#define GCC_PCIE_PIPE_CLK					57
-#define GCC_PCIE_SLEEP_CLK					58
-#define GCC_PCIE_SLV_AXI_CLK					59
-#define GCC_PCIE_SLV_Q2A_AXI_CLK				60
-#define GCC_PDM2_CLK						61
-#define GCC_PDM2_CLK_SRC					62
-#define GCC_PDM_AHB_CLK						63
-#define GCC_PDM_XO4_CLK						64
-#define GCC_PRNG_AHB_CLK					65
-#define GCC_SDCC1_AHB_CLK					66
-#define GCC_SDCC1_APPS_CLK					67
-#define GCC_SDCC1_APPS_CLK_SRC					68
-#define GCC_SPMI_FETCHER_AHB_CLK				69
-#define GCC_SPMI_FETCHER_CLK					70
-#define GCC_SPMI_FETCHER_CLK_SRC				71
-#define GCC_SYS_NOC_CPUSS_AHB_CLK				72
-#define GCC_SYS_NOC_USB3_CLK					73
-#define GCC_USB30_MASTER_CLK					74
-#define GCC_USB30_MASTER_CLK_SRC				75
-#define GCC_USB30_MOCK_UTMI_CLK					76
-#define GCC_USB30_MOCK_UTMI_CLK_SRC				77
-#define GCC_USB30_SLEEP_CLK					78
-#define GCC_USB3_PHY_AUX_CLK					79
-#define GCC_USB3_PHY_AUX_CLK_SRC				80
-#define GCC_USB3_PHY_PIPE_CLK					81
-#define GCC_USB3_PRIM_CLKREF_CLK				82
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK				83
-#define GPLL0							84
-#define GPLL0_OUT_EVEN						85
-#define GPLL4							86
-#define GPLL4_OUT_EVEN						87
+#define GCC_PCIE_0_CLKREF_CLK					47
+#define GCC_PCIE_AUX_CLK					48
+#define GCC_PCIE_AUX_PHY_CLK_SRC				49
+#define GCC_PCIE_CFG_AHB_CLK					50
+#define GCC_PCIE_MSTR_AXI_CLK					51
+#define GCC_PCIE_PHY_REFGEN_CLK					52
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC				53
+#define GCC_PCIE_PIPE_CLK					54
+#define GCC_PCIE_SLEEP_CLK					55
+#define GCC_PCIE_SLV_AXI_CLK					56
+#define GCC_PCIE_SLV_Q2A_AXI_CLK				57
+#define GCC_PDM2_CLK						58
+#define GCC_PDM2_CLK_SRC					59
+#define GCC_PDM_AHB_CLK						60
+#define GCC_PDM_XO4_CLK						61
+#define GCC_PRNG_AHB_CLK					62
+#define GCC_SDCC1_AHB_CLK					63
+#define GCC_SDCC1_APPS_CLK					64
+#define GCC_SDCC1_APPS_CLK_SRC					65
+#define GCC_SPMI_FETCHER_AHB_CLK				66
+#define GCC_SPMI_FETCHER_CLK					67
+#define GCC_SPMI_FETCHER_CLK_SRC				68
+#define GCC_SYS_NOC_CPUSS_AHB_CLK				69
+#define GCC_SYS_NOC_USB3_CLK					70
+#define GCC_USB30_MASTER_CLK					71
+#define GCC_USB30_MASTER_CLK_SRC				72
+#define GCC_USB30_MOCK_UTMI_CLK					73
+#define GCC_USB30_MOCK_UTMI_CLK_SRC				74
+#define GCC_USB30_SLEEP_CLK					75
+#define GCC_USB3_PHY_AUX_CLK					76
+#define GCC_USB3_PHY_AUX_CLK_SRC				77
+#define GCC_USB3_PHY_PIPE_CLK					78
+#define GCC_USB3_PRIM_CLKREF_CLK				79
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK				80
+#define GPLL0							81
+#define GPLL0_OUT_EVEN						82
+#define GPLL4							83
+#define GPLL4_OUT_EVEN						84
 
 /* CPU clocks */
 #define CLOCK_A7SS						0
diff --git a/include/dt-bindings/regulator/qcom,rpmh-regulator.h b/include/dt-bindings/regulator/qcom,rpmh-regulator.h
index 3dad124..5d152f3 100644
--- a/include/dt-bindings/regulator/qcom,rpmh-regulator.h
+++ b/include/dt-bindings/regulator/qcom,rpmh-regulator.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -43,19 +43,35 @@
 
 /*
  * These mode constants may be used for qcom,supported-modes and qcom,init-mode
- * properties of an RPMh resource.  Modes should be matched to the physical
- * PMIC regulator type (i.e. LDO, SMPS, or BOB).
+ * properties of an RPMh resource.  Each type of regulator supports a subset of
+ * the possible modes.
+ *
+ * %RPMH_REGULATOR_MODE_PASS:	Pass-through mode in which output is directly
+ *				tied to input.  This mode is only supported by
+ *				BOB type regulators.
+ * %RPMH_REGULATOR_MODE_RET:	Retention mode in which only an extremely small
+ *				load current is allowed.  This mode is supported
+ *				by LDO and SMPS type regulators.
+ * %RPMH_REGULATOR_MODE_LPM:	Low power mode in which a small load current is
+ *				allowed.  This mode corresponds to PFM for SMPS
+ *				and BOB type regulators.  This mode is supported
+ *				by LDO, HFSMPS, BOB, and PMIC4 FTSMPS type
+ *				regulators.
+ * %RPMH_REGULATOR_MODE_AUTO:	Auto mode in which the regulator hardware
+ *				automatically switches between LPM and HPM based
+ *				upon the real-time load current.  This mode is
+ *				supported by HFSMPS, BOB, and PMIC4 FTSMPS type
+ *				regulators.
+ * %RPMH_REGULATOR_MODE_HPM:	High power mode in which the full rated current
+ *				of the regulator is allowed.  This mode
+ *				corresponds to PWM for SMPS and BOB type
+ *				regulators.  This mode is supported by all types
+ *				of regulators.
  */
-#define RPMH_REGULATOR_MODE_LDO_LPM	5
-#define RPMH_REGULATOR_MODE_LDO_HPM	7
-
-#define RPMH_REGULATOR_MODE_SMPS_PFM	5
-#define RPMH_REGULATOR_MODE_SMPS_AUTO	6
-#define RPMH_REGULATOR_MODE_SMPS_PWM	7
-
-#define RPMH_REGULATOR_MODE_BOB_PASS	0
-#define RPMH_REGULATOR_MODE_BOB_PFM	1
-#define RPMH_REGULATOR_MODE_BOB_AUTO	2
-#define RPMH_REGULATOR_MODE_BOB_PWM	3
+#define RPMH_REGULATOR_MODE_PASS	0
+#define RPMH_REGULATOR_MODE_RET		1
+#define RPMH_REGULATOR_MODE_LPM		2
+#define RPMH_REGULATOR_MODE_AUTO	3
+#define RPMH_REGULATOR_MODE_HPM		4
 
 #endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 61a3d90..ca2b4c4 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -276,11 +276,8 @@
 /* Arch dependent functions for cpu hotplug support */
 int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
 int acpi_unmap_cpu(int cpu);
-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid);
 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
 
-void acpi_set_processor_mapping(void);
-
 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
 int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
 #endif
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 97498be..75ffd3b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -43,6 +43,7 @@
 	u32 max_entries;
 	u32 map_flags;
 	u32 pages;
+	bool unpriv_array;
 	struct user_struct *user;
 	const struct bpf_map_ops *ops;
 	struct work_struct work;
@@ -189,6 +190,7 @@
 struct bpf_array {
 	struct bpf_map map;
 	u32 elem_size;
+	u32 index_mask;
 	/* 'ownership' of prog_array is claimed by the first program that
 	 * is going to use this map or by the first program which FD is stored
 	 * in the map to make sure that all callers and callees have the same
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 3101141..070fc49 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -67,7 +67,11 @@
 };
 
 struct bpf_insn_aux_data {
-	enum bpf_reg_type ptr_type;	/* pointer type for load/store insns */
+	union {
+		enum bpf_reg_type ptr_type;     /* pointer type for load/store insns */
+		struct bpf_map *map_ptr;        /* pointer for call insn into lookup_elem */
+	};
+	bool seen; /* this insn was processed by the verifier */
 };
 
 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 447a915..4431ea2 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -239,12 +239,10 @@
 {
 	if (err == 0)
 		return VM_FAULT_LOCKED;
-	if (err == -EFAULT)
+	if (err == -EFAULT || err == -EAGAIN)
 		return VM_FAULT_NOPAGE;
 	if (err == -ENOMEM)
 		return VM_FAULT_OOM;
-	if (err == -EAGAIN)
-		return VM_FAULT_RETRY;
 	/* -ENOSPC, -EDQUOT, -EIO ... */
 	return VM_FAULT_SIGBUS;
 }
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 0d442e3..47f5ba6 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -199,7 +199,7 @@
 extern void clockevents_resume(void);
 
 # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
-#  ifdef CONFIG_ARCH_HAS_TICK_BROADCAST
+#  if defined(CONFIG_ARCH_HAS_TICK_BROADCAST) && defined(CONFIG_SMP)
 extern void tick_broadcast(const struct cpumask *mask);
 #  else
 #   define tick_broadcast	NULL
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 3484287..1f7e4ec 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -44,6 +44,13 @@
 extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
 extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
 
+extern ssize_t cpu_show_meltdown(struct device *dev,
+				 struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spectre_v1(struct device *dev,
+				   struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spectre_v2(struct device *dev,
+				   struct device_attribute *attr, char *buf);
+
 extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,
 				 const struct attribute_group **groups,
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index d921206..6500554 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -51,7 +51,7 @@
 	CPUHP_SH_SH3X_PREPARE,
 	CPUHP_BLK_MQ_PREPARE,
 	CPUHP_TOPOLOGY_PREPARE,
-	CPUHP_TIMERS_DEAD,
+	CPUHP_TIMERS_PREPARE,
 	CPUHP_NOTF_ERR_INJ_PREPARE,
 	CPUHP_MIPS_SOC_PREPARE,
 	CPUHP_BRINGUP_CPU,
diff --git a/include/linux/cred.h b/include/linux/cred.h
index f0e70a1..cf1a5d0 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -82,6 +82,7 @@
 extern void set_groups(struct cred *, struct group_info *);
 extern int groups_search(const struct group_info *, kgid_t);
 extern bool may_setgroups(void);
+extern void groups_sort(struct group_info *);
 
 /*
  * The security context of a task
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index ec9c128..69935e66 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -730,7 +730,6 @@
 	return ret;
 }
 
-#ifdef CONFIG_HAS_DMA
 static inline int dma_get_cache_alignment(void)
 {
 #ifdef ARCH_DMA_MINALIGN
@@ -738,7 +737,6 @@
 #endif
 	return 1;
 }
-#endif
 
 /* flags for the coherent memory api */
 #define	DMA_MEMORY_MAP			0x01
diff --git a/include/linux/fence.h b/include/linux/fence.h
index fd9b89f..7c9b78c 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -47,7 +47,7 @@
  * can be compared to decide which fence would be signaled later.
  * @flags: A mask of FENCE_FLAG_* defined below
  * @timestamp: Timestamp when the fence was signaled.
- * @status: Optional, only valid if < 0, must be set before calling
+ * @error: Optional, only valid if < 0, must be set before calling
  * fence_signal, indicates that the fence has completed with an error.
  *
  * the flags member must be manipulated and read using the appropriate
@@ -79,7 +79,7 @@
 	unsigned seqno;
 	unsigned long flags;
 	ktime_t timestamp;
-	int status;
+	int error;
 };
 
 enum fence_flag_bits {
@@ -133,7 +133,7 @@
  * or some failure occurred that made it impossible to enable
  * signaling. True indicates successful enabling.
  *
- * fence->status may be set in enable_signaling, but only when false is
+ * fence->error may be set in enable_signaling, but only when false is
  * returned.
  *
  * Calling fence_signal before enable_signaling is called allows
@@ -145,7 +145,7 @@
  * the second time will be a noop since it was already signaled.
  *
  * Notes on signaled:
- * May set fence->status if returning true.
+ * May set fence->error if returning true.
  *
  * Notes on wait:
  * Must not be NULL, set to fence_default_wait for default implementation.
@@ -329,6 +329,19 @@
 }
 
 /**
+ * __fence_is_later - return if f1 is chronologically later than f2
+ * @f1:	[in]	the first fence's seqno
+ * @f2:	[in]	the second fence's seqno from the same context
+ *
+ * Returns true if f1 is chronologically later than f2. Both fences must be
+ * from the same context, since a seqno is not common across contexts.
+ */
+static inline bool __fence_is_later(u32 f1, u32 f2)
+{
+	return (int)(f1 - f2) > 0;
+}
+
+/**
  * fence_is_later - return if f1 is chronologically later than f2
  * @f1:	[in]	the first fence from the same context
  * @f2:	[in]	the second fence from the same context
@@ -341,7 +354,7 @@
 	if (WARN_ON(f1->context != f2->context))
 		return false;
 
-	return (int)(f1->seqno - f2->seqno) > 0;
+	return __fence_is_later(f1->seqno, f2->seqno);
 }
 
 /**
@@ -369,6 +382,50 @@
 		return fence_is_signaled(f2) ? NULL : f2;
 }
 
+/**
+ * fence_get_status_locked - returns the status upon completion
+ * @fence: [in]	the fence to query
+ *
+ * Drivers can supply an optional error status condition before they signal
+ * the fence (to indicate whether the fence was completed due to an error
+ * rather than success). The value of the status condition is only valid
+ * if the fence has been signaled, fence_get_status_locked() first checks
+ * the signal state before reporting the error status.
+ *
+ * Returns 0 if the fence has not yet been signaled, 1 if the fence has
+ * been signaled without an error condition, or a negative error code
+ * if the fence has been completed in err.
+ */
+static inline int fence_get_status_locked(struct fence *fence)
+{
+	if (fence_is_signaled_locked(fence))
+		return fence->error ?: 1;
+	else
+		return 0;
+}
+
+int fence_get_status(struct fence *fence);
+
+/**
+ * fence_set_error - flag an error condition on the fence
+ * @fence: [in]	the fence
+ * @error: [in]	the error to store
+ *
+ * Drivers can supply an optional error status condition before they signal
+ * the fence, to indicate that the fence was completed due to an error
+ * rather than success. This must be set before signaling (so that the value
+ * is visible before any waiters on the signal callback are woken). This
+ * helper exists to help catching erroneous setting of #fence.error.
+ */
+static inline void fence_set_error(struct fence *fence,
+				       int error)
+{
+	BUG_ON(test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+	BUG_ON(error >= 0 || error < -MAX_ERRNO);
+
+	fence->error = error;
+}
+
 signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
 signed long fence_wait_any_timeout(struct fence **fences, uint32_t count,
 				   bool intr, signed long timeout);
diff --git a/include/linux/frame.h b/include/linux/frame.h
index e6baaba..d772c61 100644
--- a/include/linux/frame.h
+++ b/include/linux/frame.h
@@ -11,7 +11,7 @@
  * For more information, see tools/objtool/Documentation/stack-validation.txt.
  */
 #define STACK_FRAME_NON_STANDARD(func) \
-	static void __used __section(__func_stack_frame_non_standard) \
+	static void __used __section(.discard.func_stack_frame_non_standard) \
 		*__func_stack_frame_non_standard_##func = func
 
 #else /* !CONFIG_STACK_VALIDATION */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 115bb81..94a8aae 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -764,7 +764,7 @@
 {
 	if (fscache_cookie_valid(cookie) && PageFsCache(page))
 		return __fscache_maybe_release_page(cookie, page, gfp);
-	return false;
+	return true;
 }
 
 /**
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 29d4385..206fe3b 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -32,6 +32,7 @@
 
 #include <linux/types.h>
 #include <linux/spinlock_types.h>
+#include <linux/atomic.h>
 
 struct device;
 struct device_node;
@@ -70,7 +71,7 @@
  */
 struct gen_pool_chunk {
 	struct list_head next_chunk;	/* next chunk in pool */
-	atomic_t avail;
+	atomic_long_t avail;
 	phys_addr_t phys_addr;		/* physical starting address of memory chunk */
 	unsigned long start_addr;	/* start address of memory chunk */
 	unsigned long end_addr;		/* end address of memory chunk (inclusive) */
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index ee971f3..7118876 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -128,6 +128,8 @@
 #define IN_DEV_ARP_ANNOUNCE(in_dev)	IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
 #define IN_DEV_ARP_IGNORE(in_dev)	IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
 #define IN_DEV_ARP_NOTIFY(in_dev)	IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
+#define IN_DEV_NF_IPV4_DEFRAG_SKIP(in_dev) \
+	IN_DEV_ORCONF((in_dev), NF_IPV4_DEFRAG_SKIP)
 
 struct in_ifaddr {
 	struct hlist_node	hash;
diff --git a/include/linux/input/synaptics_dsx.h b/include/linux/input/synaptics_dsx.h
new file mode 100644
index 0000000..56fe12e
--- /dev/null
+++ b/include/linux/input/synaptics_dsx.h
@@ -0,0 +1,113 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#ifndef _SYNAPTICS_DSX_H_
+#define _SYNAPTICS_DSX_H_
+
+#define PLATFORM_DRIVER_NAME "synaptics_dsx"
+#define STYLUS_DRIVER_NAME "synaptics_dsx_stylus"
+#define ACTIVE_PEN_DRIVER_NAME "synaptics_dsx_active_pen"
+#define PROXIMITY_DRIVER_NAME "synaptics_dsx_proximity"
+#define GESTURE_DRIVER_NAME "synaptics_dsx_gesture"
+#define I2C_DRIVER_NAME "synaptics_dsx_i2c"
+#define SPI_DRIVER_NAME "synaptics_dsx_spi"
+
+/*
+ * struct synaptics_dsx_button_map - button map
+ * @nbuttons: number of buttons
+ * @map: pointer to array of button codes
+ */
+struct synaptics_dsx_button_map {
+	unsigned char nbuttons;
+	unsigned int *map;
+};
+
+/*
+ * struct synaptics_dsx_board_data - DSX board data
+ * @x_flip: x flip flag
+ * @y_flip: y flip flag
+ * @swap_axes: swap axes flag
+ * @irq_gpio: attention interrupt GPIO
+ * @irq_on_state: attention interrupt active state
+ * @power_gpio: power switch GPIO
+ * @power_on_state: power switch active state
+ * @reset_gpio: reset GPIO
+ * @reset_on_state: reset active state
+ * @max_y_for_2d: maximum y value for 2D area when virtual buttons are present
+ * @irq_flags: IRQ flags
+ * @i2c_addr: I2C slave address
+ * @ub_i2c_addr: microbootloader mode I2C slave address
+ * @device_descriptor_addr: HID device descriptor address
+ * @panel_x: x-axis resolution of display panel
+ * @panel_y: y-axis resolution of display panel
+ * @power_delay_ms: delay time to wait after powering up device
+ * @reset_delay_ms: delay time to wait after resetting device
+ * @reset_active_ms: reset active time
+ * @byte_delay_us: delay time between two bytes of SPI data
+ * @block_delay_us: delay time between two SPI transfers
+ * @addr_delay_us: delay time after sending address word
+ * @pwr_reg_name: pointer to name of regulator for power control
+ * @bus_reg_name: pointer to name of regulator for bus pullup control
+ * @cap_button_map: pointer to 0D button map
+ * @vir_button_map: pointer to virtual button map
+ */
+struct synaptics_dsx_board_data {
+	bool x_flip;
+	bool y_flip;
+	bool swap_axes;
+	int irq_gpio;
+	int irq_on_state;
+	int power_gpio;
+	int power_on_state;
+	int reset_gpio;
+	int reset_on_state;
+	int max_y_for_2d;
+	unsigned long irq_flags;
+	unsigned short i2c_addr;
+	unsigned short ub_i2c_addr;
+	unsigned short device_descriptor_addr;
+	unsigned int panel_x;
+	unsigned int panel_y;
+	unsigned int power_delay_ms;
+	unsigned int reset_delay_ms;
+	unsigned int reset_active_ms;
+	unsigned int byte_delay_us;
+	unsigned int block_delay_us;
+	unsigned int addr_delay_us;
+	const char *pwr_reg_name;
+	const char *bus_reg_name;
+	struct synaptics_dsx_button_map *cap_button_map;
+	struct synaptics_dsx_button_map *vir_button_map;
+};
+
+#endif
diff --git a/include/linux/input/synaptics_dsx_v2_6.h b/include/linux/input/synaptics_dsx_v2_6.h
new file mode 100644
index 0000000..2b91bc0
--- /dev/null
+++ b/include/linux/input/synaptics_dsx_v2_6.h
@@ -0,0 +1,111 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2015 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#ifndef _SYNAPTICS_DSX_H_
+#define _SYNAPTICS_DSX_H_
+
+#define PLATFORM_DRIVER_NAME "synaptics_dsxv26"
+#define STYLUS_DRIVER_NAME "synaptics_dsxv26_stylus"
+#define ACTIVE_PEN_DRIVER_NAME "synaptics_dsxv26_active_pen"
+#define PROXIMITY_DRIVER_NAME "synaptics_dsxv26_proximity"
+#define GESTURE_DRIVER_NAME "synaptics_dsxv26_gesture"
+#define I2C_DRIVER_NAME "synaptics_dsxv26"
+#define SPI_DRIVER_NAME "synaptics_dsxv26"
+
+/*
+ * struct synaptics_dsx_button_map - button map
+ * @nbuttons: number of buttons
+ * @map: pointer to array of button codes
+ */
+struct synaptics_dsx_button_map {
+	unsigned char nbuttons;
+	unsigned int *map;
+};
+
+/*
+ * struct synaptics_dsx_board_data - DSX board data
+ * @x_flip: x flip flag
+ * @y_flip: y flip flag
+ * @swap_axes: swap axes flag
+ * @irq_gpio: attention interrupt GPIO
+ * @irq_on_state: attention interrupt active state
+ * @power_gpio: power switch GPIO
+ * @power_on_state: power switch active state
+ * @reset_gpio: reset GPIO
+ * @reset_on_state: reset active state
+ * @max_y_for_2d: maximum y value for 2D area when virtual buttons are present
+ * @irq_flags: IRQ flags
+ * @i2c_addr: I2C slave address
+ * @ub_i2c_addr: microbootloader mode I2C slave address
+ * @device_descriptor_addr: HID device descriptor address
+ * @panel_x: x-axis resolution of display panel
+ * @panel_y: y-axis resolution of display panel
+ * @power_delay_ms: delay time to wait after powering up device
+ * @reset_delay_ms: delay time to wait after resetting device
+ * @reset_active_ms: reset active time
+ * @byte_delay_us: delay time between two bytes of SPI data
+ * @block_delay_us: delay time between two SPI transfers
+ * @pwr_reg_name: pointer to name of regulator for power control
+ * @bus_reg_name: pointer to name of regulator for bus pullup control
+ * @cap_button_map: pointer to 0D button map
+ * @vir_button_map: pointer to virtual button map
+ */
+struct synaptics_dsx_board_data {
+	bool x_flip;
+	bool y_flip;
+	bool swap_axes;
+	int irq_gpio;
+	int irq_on_state;
+	int power_gpio;
+	int power_on_state;
+	int reset_gpio;
+	int reset_on_state;
+	int max_y_for_2d;
+	unsigned long irq_flags;
+	unsigned short i2c_addr;
+	unsigned short ub_i2c_addr;
+	unsigned short device_descriptor_addr;
+	unsigned int panel_x;
+	unsigned int panel_y;
+	unsigned int power_delay_ms;
+	unsigned int reset_delay_ms;
+	unsigned int reset_active_ms;
+	unsigned int byte_delay_us;
+	unsigned int block_delay_us;
+	const char *pwr_reg_name;
+	const char *bus_reg_name;
+	struct synaptics_dsx_button_map *cap_button_map;
+	struct synaptics_dsx_button_map *vir_button_map;
+};
+
+#endif
diff --git a/include/linux/ipa_usb.h b/include/linux/ipa_usb.h
index de11633..cceae83 100644
--- a/include/linux/ipa_usb.h
+++ b/include/linux/ipa_usb.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -121,14 +121,16 @@
  * @dir:                 channel direction
  * @xfer_ring_len:       length of transfer ring in bytes (must be integral
  *                       multiple of transfer element size - 16B for xDCI)
- * @xfer_ring_base_addr: physical base address of transfer ring. Address must be
- *                       aligned to xfer_ring_len rounded to power of two
  * @xfer_scratch:        parameters for xDCI channel scratch
- * @xfer_ring_base_addr_iova: IO virtual address mapped to xfer_ring_base_addr
+ * @xfer_ring_base_addr_iova: IO virtual address mapped to pysical base address
  * @data_buff_base_len:  length of data buffer allocated by USB driver
- * @data_buff_base_addr: physical base address for the data buffer (where TRBs
- *                       points)
- * @data_buff_base_addr_iova:  IO virtual address mapped to data_buff_base_addr
+ * @data_buff_base_addr_iova:  IO virtual address mapped to pysical base address
+ * @sgt_xfer_rings:      Scatter table for Xfer rings,contains valid non NULL
+ *			 value
+ *                       when USB S1-SMMU enabed, else NULL.
+ * @sgt_data_buff:       Scatter table for data buffs,contains valid non NULL
+ *			 value
+ *                       when USB S1-SMMU enabed, else NULL.
  *
  */
 struct ipa_usb_xdci_chan_params {
@@ -143,12 +145,12 @@
 	/* transfer ring params */
 	enum gsi_chan_dir dir;
 	u16 xfer_ring_len;
-	u64 xfer_ring_base_addr;
 	struct ipa_usb_xdci_chan_scratch xfer_scratch;
 	u64 xfer_ring_base_addr_iova;
 	u32 data_buff_base_len;
-	u64 data_buff_base_addr;
 	u64 data_buff_base_addr_iova;
+	struct sg_table *sgt_xfer_rings;
+	struct sg_table *sgt_data_buff;
 };
 
 /**
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 3b94400..11ff751 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -248,7 +248,8 @@
 						 * 100: prefer care-of address
 						 */
 				dontfrag:1,
-				autoflowlabel:1;
+				autoflowlabel:1,
+				autoflowlabel_set:1;
 	__u8			min_hopcount;
 	__u8			tclass;
 	__be32			rcv_flowinfo;
diff --git a/include/linux/kaiser.h b/include/linux/kaiser.h
new file mode 100644
index 0000000..58c55b1
--- /dev/null
+++ b/include/linux/kaiser.h
@@ -0,0 +1,52 @@
+#ifndef _LINUX_KAISER_H
+#define _LINUX_KAISER_H
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+#include <asm/kaiser.h>
+
+static inline int kaiser_map_thread_stack(void *stack)
+{
+	/*
+	 * Map that page of kernel stack on which we enter from user context.
+	 */
+	return kaiser_add_mapping((unsigned long)stack +
+			THREAD_SIZE - PAGE_SIZE, PAGE_SIZE, __PAGE_KERNEL);
+}
+
+static inline void kaiser_unmap_thread_stack(void *stack)
+{
+	/*
+	 * Note: may be called even when kaiser_map_thread_stack() failed.
+	 */
+	kaiser_remove_mapping((unsigned long)stack +
+			THREAD_SIZE - PAGE_SIZE, PAGE_SIZE);
+}
+#else
+
+/*
+ * These stubs are used whenever CONFIG_PAGE_TABLE_ISOLATION is off, which
+ * includes architectures that support KAISER, but have it disabled.
+ */
+
+static inline void kaiser_init(void)
+{
+}
+static inline int kaiser_add_mapping(unsigned long addr,
+				     unsigned long size, unsigned long flags)
+{
+	return 0;
+}
+static inline void kaiser_remove_mapping(unsigned long start,
+					 unsigned long size)
+{
+}
+static inline int kaiser_map_thread_stack(void *stack)
+{
+	return 0;
+}
+static inline void kaiser_unmap_thread_stack(void *stack)
+{
+}
+
+#endif /* !CONFIG_PAGE_TABLE_ISOLATION */
+#endif /* _LINUX_KAISER_H */
diff --git a/include/linux/leds-qpnp-flash.h b/include/linux/leds-qpnp-flash.h
index 1fe6e17..e3b9cf1 100644
--- a/include/linux/leds-qpnp-flash.h
+++ b/include/linux/leds-qpnp-flash.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -21,7 +21,14 @@
 
 #define FLASH_LED_PREPARE_OPTIONS_MASK	GENMASK(3, 0)
 
-int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
+#if (defined CONFIG_LEDS_QPNP_FLASH || defined CONFIG_LEDS_QPNP_FLASH_V2)
+extern int (*qpnp_flash_led_prepare)(struct led_trigger *trig, int options,
 					int *max_current);
-
+#else
+static inline int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
+					int *max_current)
+{
+	return -ENODEV;
+}
+#endif
 #endif
diff --git a/include/linux/mdss_io_util.h b/include/linux/mdss_io_util.h
new file mode 100644
index 0000000..028f3e3
--- /dev/null
+++ b/include/linux/mdss_io_util.h
@@ -0,0 +1,121 @@
+/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_IO_UTIL_H__
+#define __MDSS_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+#ifdef DEBUG
+#define DEV_DBG(fmt, args...)   pr_err(fmt, ##args)
+#else
+#define DEV_DBG(fmt, args...)   pr_debug(fmt, ##args)
+#endif
+#define DEV_INFO(fmt, args...)  pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...)  pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...)   pr_err(fmt, ##args)
+
+struct dss_io_data {
+	u32 len;
+	void __iomem *base;
+};
+
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug);
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug);
+void dss_reg_dump(void __iomem *base, u32 len, const char *prefix, u32 debug);
+
+#define DSS_REG_W_ND(io, offset, val)  dss_reg_w(io, offset, val, false)
+#define DSS_REG_W(io, offset, val)     dss_reg_w(io, offset, val, true)
+#define DSS_REG_R_ND(io, offset)       dss_reg_r(io, offset, false)
+#define DSS_REG_R(io, offset)          dss_reg_r(io, offset, true)
+
+enum dss_vreg_type {
+	DSS_REG_LDO,
+	DSS_REG_VS,
+};
+
+enum dss_vreg_mode {
+	DSS_REG_MODE_ENABLE,
+	DSS_REG_MODE_DISABLE,
+	DSS_REG_MODE_LP,
+	DSS_REG_MODE_ULP,
+	DSS_REG_MODE_MAX,
+};
+
+struct dss_vreg {
+	struct regulator *vreg; /* vreg handle */
+	char vreg_name[32];
+	int min_voltage;
+	int max_voltage;
+	u32 load[DSS_REG_MODE_MAX];
+	int pre_on_sleep;
+	int post_on_sleep;
+	int pre_off_sleep;
+	int post_off_sleep;
+};
+
+struct dss_gpio {
+	unsigned int gpio;
+	unsigned int value;
+	char gpio_name[32];
+};
+
+enum dss_clk_type {
+	DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+	DSS_CLK_PCLK,
+	DSS_CLK_OTHER,
+};
+
+struct dss_clk {
+	struct clk *clk; /* clk handle */
+	char clk_name[32];
+	enum dss_clk_type type;
+	unsigned long rate;
+};
+
+struct dss_module_power {
+	unsigned int num_vreg;
+	struct dss_vreg *vreg_config;
+	unsigned int num_gpio;
+	struct dss_gpio *gpio_config;
+	unsigned int num_clk;
+	struct dss_clk *clk_config;
+};
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+	struct dss_io_data *io_data, const char *name);
+void msm_dss_iounmap(struct dss_io_data *io_data);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable);
+int msm_dss_gpio_enable(struct dss_gpio *in_gpio, int num_gpio, int enable);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+	int num_vreg, int config);
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg,	int enable);
+int msm_dss_config_vreg_opt_mode(struct dss_vreg *in_vreg, int num_vreg,
+	 enum dss_vreg_mode mode);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+
+int mdss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+		       uint8_t reg_offset, uint8_t *read_buf);
+int mdss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *value);
+
+#endif /* __MDSS_IO_UTIL_H__ */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 8b35bdb..fd77f83 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -490,9 +490,21 @@
 extern int do_swap_account;
 #endif
 
-void lock_page_memcg(struct page *page);
+struct mem_cgroup *lock_page_memcg(struct page *page);
+void __unlock_page_memcg(struct mem_cgroup *memcg);
 void unlock_page_memcg(struct page *page);
 
+static inline void __mem_cgroup_update_page_stat(struct page *page,
+						 struct mem_cgroup *memcg,
+						 enum mem_cgroup_stat_index idx,
+						 int val)
+{
+	VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));
+
+	if (memcg && memcg->stat)
+		this_cpu_add(memcg->stat->count[idx], val);
+}
+
 /**
  * mem_cgroup_update_page_stat - update page state statistics
  * @page: the page
@@ -508,13 +520,12 @@
  *     mem_cgroup_update_page_stat(page, state, -1);
  *   unlock_page(page) or unlock_page_memcg(page)
  */
+
 static inline void mem_cgroup_update_page_stat(struct page *page,
 				 enum mem_cgroup_stat_index idx, int val)
 {
-	VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));
 
-	if (page->mem_cgroup)
-		this_cpu_add(page->mem_cgroup->stat->count[idx], val);
+	__mem_cgroup_update_page_stat(page, page->mem_cgroup, idx, val);
 }
 
 static inline void mem_cgroup_inc_page_stat(struct page *page,
@@ -709,7 +720,12 @@
 {
 }
 
-static inline void lock_page_memcg(struct page *page)
+static inline struct mem_cgroup *lock_page_memcg(struct page *page)
+{
+	return NULL;
+}
+
+static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
 {
 }
 
@@ -745,6 +761,13 @@
 {
 }
 
+static inline void __mem_cgroup_update_page_stat(struct page *page,
+						 struct mem_cgroup *memcg,
+						 enum mem_cgroup_stat_index idx,
+						 int nr)
+{
+}
+
 static inline void mem_cgroup_inc_page_stat(struct page *page,
 					    enum mem_cgroup_stat_index idx)
 {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 80faf44..dd1b009 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -476,6 +476,7 @@
 enum {
 	MLX4_INTERFACE_STATE_UP		= 1 << 0,
 	MLX4_INTERFACE_STATE_DELETION	= 1 << 1,
+	MLX4_INTERFACE_STATE_NOWAIT	= 1 << 2,
 };
 
 #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6045d4d..25ed105 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -143,7 +143,7 @@
 	MLX5_CMD_OP_ALLOC_Q_COUNTER               = 0x771,
 	MLX5_CMD_OP_DEALLOC_Q_COUNTER             = 0x772,
 	MLX5_CMD_OP_QUERY_Q_COUNTER               = 0x773,
-	MLX5_CMD_OP_SET_RATE_LIMIT                = 0x780,
+	MLX5_CMD_OP_SET_PP_RATE_LIMIT             = 0x780,
 	MLX5_CMD_OP_QUERY_RATE_LIMIT              = 0x781,
 	MLX5_CMD_OP_ALLOC_PD                      = 0x800,
 	MLX5_CMD_OP_DEALLOC_PD                    = 0x801,
@@ -6689,7 +6689,7 @@
 	u8         vxlan_udp_port[0x10];
 };
 
-struct mlx5_ifc_set_rate_limit_out_bits {
+struct mlx5_ifc_set_pp_rate_limit_out_bits {
 	u8         status[0x8];
 	u8         reserved_at_8[0x18];
 
@@ -6698,7 +6698,7 @@
 	u8         reserved_at_40[0x40];
 };
 
-struct mlx5_ifc_set_rate_limit_in_bits {
+struct mlx5_ifc_set_pp_rate_limit_in_bits {
 	u8         opcode[0x10];
 	u8         reserved_at_10[0x10];
 
@@ -6711,6 +6711,8 @@
 	u8         reserved_at_60[0x20];
 
 	u8         rate_limit[0x20];
+
+	u8         reserved_at_a0[0x160];
 };
 
 struct mlx5_ifc_access_register_out_bits {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 907e029..16155d0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -351,6 +351,7 @@
 struct vm_operations_struct {
 	void (*open)(struct vm_area_struct * area);
 	void (*close)(struct vm_area_struct * area);
+	int (*split)(struct vm_area_struct * area, unsigned long addr);
 	int (*mremap)(struct vm_area_struct * area);
 	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
 	int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 634c4c5..c540001 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -63,8 +63,9 @@
  * ("bit1" and "bit2" must be single bits)
  */
 #define _calc_vm_trans(x, bit1, bit2) \
+  ((!(bit1) || !(bit2)) ? 0 : \
   ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
-   : ((x) & (bit1)) / ((bit1) / (bit2)))
+   : ((x) & (bit1)) / ((bit1) / (bit2))))
 
 /*
  * Combine the mmap "prot" argument into "vm_flags" used internally.
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 25c0dc3..854dfa6 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -381,18 +381,6 @@
 	___pmd;								\
 })
 
-#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd)		\
-({									\
-	unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;		\
-	pmd_t ___pmd;							\
-									\
-	___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd);		\
-	mmu_notifier_invalidate_range(__mm, ___haddr,			\
-				      ___haddr + HPAGE_PMD_SIZE);	\
-									\
-	___pmd;								\
-})
-
 /*
  * set_pte_at_notify() sets the pte _after_ running the notifier.
  * This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -480,7 +468,6 @@
 #define pmdp_clear_young_notify pmdp_test_and_clear_young
 #define	ptep_clear_flush_notify ptep_clear_flush
 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
-#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
 #define set_pte_at_notify set_pte_at
 
 #endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 90900c2..0c28c28 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -133,8 +133,9 @@
 	NR_SLAB_UNRECLAIMABLE,
 	NR_PAGETABLE,		/* used for pagetables */
 	NR_KERNEL_STACK_KB,	/* measured in KiB */
-	/* Second 128 byte cacheline */
+	NR_KAISERTABLE,
 	NR_BOUNCE,
+	/* Second 128 byte cacheline */
 #if IS_ENABLED(CONFIG_ZSMALLOC)
 	NR_ZSPAGES,		/* allocated in zsmalloc */
 #endif
diff --git a/include/linux/msm_gpi.h b/include/linux/msm_gpi.h
index 31eaf13..6fe4a4e 100644
--- a/include/linux/msm_gpi.h
+++ b/include/linux/msm_gpi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,8 +35,14 @@
 #define MSM_GPI_TRE_TYPE(tre) ((tre->dword[3] >> 16) & 0xFF)
 
 /* DMA w. Buffer TRE */
+#ifdef CONFIG_ARM64
 #define MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(ptr) ((u32)ptr)
 #define MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(ptr) ((u32)(ptr >> 32))
+#else
+#define MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(ptr) (ptr)
+#define MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(ptr) 0
+#endif
+
 #define MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(length) (length & 0xFFFFFF)
 #define MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(bei, ieot, ieob, ch) ((0x1 << 20) | \
 	(0x0 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
@@ -55,15 +61,25 @@
 #define MSM_GPI_DMA_IMMEDIATE_TRE_GET_LEN(tre) (tre->dword[2] & 0xF)
 
 /* DMA w. Scatter/Gather List TRE */
+#ifdef CONFIG_ARM64
 #define MSM_GPI_SG_LIST_TRE_DWORD0(ptr) ((u32)ptr)
 #define MSM_GPI_SG_LIST_TRE_DWORD1(ptr) ((u32)(ptr >> 32))
+#else
+#define MSM_GPI_SG_LIST_TRE_DWORD0(ptr) (ptr)
+#define MSM_GPI_SG_LIST_TRE_DWORD1(ptr) 0
+#endif
 #define MSM_GPI_SG_LIST_TRE_DWORD2(length) (length & 0xFFFF)
 #define MSM_GPI_SG_LIST_TRE_DWORD3(bei, ieot, ieob, ch) ((0x1 << 20) | \
 	(0x2 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
 
 /* SG Element */
+#ifdef CONFIG_ARM64
 #define MSM_GPI_SG_ELEMENT_DWORD0(ptr) ((u32)ptr)
 #define MSM_GPI_SG_ELEMENT_DWORD1(ptr) ((u32)(ptr >> 32))
+#else
+#define MSM_GPI_SG_ELEMENT_DWORD0(ptr) (ptr)
+#define MSM_GPI_SG_ELEMENT_DWORD1(ptr) 0
+#endif
 #define MSM_GSI_SG_ELEMENT_DWORD2(length) (length & 0xFFFFF)
 #define MSM_GSI_SG_ELEMENT_DWORD3 (0)
 
@@ -208,6 +224,7 @@
 	u32 length;
 	enum msm_gpi_tce_code completion_code; /* TCE event code */
 	u32 status;
+	struct __packed msm_gpi_tre imed_tre;
 	void *userdata;
 };
 
diff --git a/include/linux/msm_hdmi.h b/include/linux/msm_hdmi.h
new file mode 100644
index 0000000..afaa08a20
--- /dev/null
+++ b/include/linux/msm_hdmi.h
@@ -0,0 +1,97 @@
+/* include/linux/msm_hdmi.h
+ *
+<<<<<<< HEAD
+ * Copyright (c) 2014-2015, 2018, The Linux Foundation. All rights reserved.
+=======
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+>>>>>>> dfa46f9... fbdev: msm: fix compilation error
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_HDMI_H_
+#define _MSM_HDMI_H_
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+/*
+ * HDMI cable notify handler structure.
+ * link A link for the linked list
+ * status Current status of HDMI cable connection
+ * hpd_notify Callback function to provide cable status
+ */
+struct hdmi_cable_notify {
+	struct list_head link;
+	int status;
+	void (*hpd_notify)(struct hdmi_cable_notify *h);
+};
+
+struct msm_hdmi_audio_edid_blk {
+	u8 *audio_data_blk;
+	unsigned int audio_data_blk_size; /* in bytes */
+	u8 *spk_alloc_data_blk;
+	unsigned int spk_alloc_data_blk_size; /* in bytes */
+};
+
+struct msm_hdmi_audio_setup_params {
+	u32 sample_rate_hz;
+	u32 num_of_channels;
+	u32 channel_allocation;
+	u32 level_shift;
+	bool down_mix;
+	u32 sample_present;
+};
+
+struct msm_hdmi_audio_codec_ops {
+	int (*audio_info_setup)(struct platform_device *pdev,
+		struct msm_hdmi_audio_setup_params *params);
+	int (*get_audio_edid_blk)(struct platform_device *pdev,
+		struct msm_hdmi_audio_edid_blk *blk);
+	int (*hdmi_cable_status)(struct platform_device *pdev, u32 vote);
+};
+
+#ifdef CONFIG_FB_MSM_MDSS_HDMI_PANEL
+/*
+ * Register for HDMI cable connect or disconnect notification.
+ * @param handler callback handler for notification
+ * @return negative value as error otherwise current status of cable
+ */
+int register_hdmi_cable_notification(
+		struct hdmi_cable_notify *handler);
+
+/*
+ * Un-register for HDMI cable connect or disconnect notification.
+ * @param handler callback handler for notification
+ * @return negative value as error
+ */
+int unregister_hdmi_cable_notification(
+		struct hdmi_cable_notify *handler);
+
+int msm_hdmi_register_audio_codec(struct platform_device *pdev,
+	struct msm_hdmi_audio_codec_ops *ops);
+
+#else
+static inline int register_hdmi_cable_notification(
+		struct hdmi_cable_notify *handler) {
+	return 0;
+}
+
+static inline int unregister_hdmi_cable_notification(
+		struct hdmi_cable_notify *handler) {
+	return 0;
+}
+
+static inline int msm_hdmi_register_audio_codec(struct platform_device *pdev,
+		struct msm_hdmi_audio_codec_ops *ops) {
+	return 0;
+}
+#endif /* CONFIG_FB_MSM_MDSS_HDMI_PANEL */
+
+#endif /*_MSM_HDMI_H_*/
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index 35d0fd7..e821a31 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -88,10 +88,11 @@
 #endif
 
 #if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
-extern void gpmc_onenand_init(struct omap_onenand_platform_data *d);
+extern int gpmc_onenand_init(struct omap_onenand_platform_data *d);
 #else
 #define board_onenand_data	NULL
-static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d)
+static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d)
 {
+	return 0;
 }
 #endif
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 8f16299..8902f23 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -35,6 +35,12 @@
 
 #endif
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+#define USER_MAPPED_SECTION "..user_mapped"
+#else
+#define USER_MAPPED_SECTION ""
+#endif
+
 /*
  * Base implementations of per-CPU variable declarations and definitions, where
  * the section in which the variable is to be placed is provided by the
@@ -115,6 +121,12 @@
 #define DEFINE_PER_CPU(type, name)					\
 	DEFINE_PER_CPU_SECTION(type, name, "")
 
+#define DECLARE_PER_CPU_USER_MAPPED(type, name)				\
+	DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION)
+
+#define DEFINE_PER_CPU_USER_MAPPED(type, name)				\
+	DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION)
+
 /*
  * Declaration/definition used for per-CPU variables that must come first in
  * the set of variables.
@@ -144,6 +156,14 @@
 	DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
 	____cacheline_aligned_in_smp
 
+#define DECLARE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(type, name)		\
+	DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION PER_CPU_SHARED_ALIGNED_SECTION) \
+	____cacheline_aligned_in_smp
+
+#define DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(type, name)		\
+	DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION PER_CPU_SHARED_ALIGNED_SECTION) \
+	____cacheline_aligned_in_smp
+
 #define DECLARE_PER_CPU_ALIGNED(type, name)				\
 	DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION)	\
 	____cacheline_aligned
@@ -162,11 +182,21 @@
 #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)				\
 	DEFINE_PER_CPU_SECTION(type, name, "..page_aligned")		\
 	__aligned(PAGE_SIZE)
+/*
+ * Declaration/definition used for per-CPU variables that must be page aligned and need to be mapped in user mode.
+ */
+#define DECLARE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(type, name)		\
+	DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION"..page_aligned") \
+	__aligned(PAGE_SIZE)
+
+#define DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(type, name)		\
+	DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION"..page_aligned") \
+	__aligned(PAGE_SIZE)
 
 /*
  * Declaration/definition used for per-CPU variables that must be read mostly.
  */
-#define DECLARE_PER_CPU_READ_MOSTLY(type, name)			\
+#define DECLARE_PER_CPU_READ_MOSTLY(type, name)				\
 	DECLARE_PER_CPU_SECTION(type, name, "..read_mostly")
 
 #define DEFINE_PER_CPU_READ_MOSTLY(type, name)				\
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 70936bf..47c5b39 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1277,6 +1277,7 @@
 extern void perf_event_disable_local(struct perf_event *event);
 extern void perf_event_disable_inatomic(struct perf_event *event);
 extern void perf_event_task_tick(void);
+extern int perf_event_account_interrupt(struct perf_event *event);
 #else /* !CONFIG_PERF_EVENTS: */
 static inline void *
 perf_aux_output_begin(struct perf_output_handle *handle,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index a04d69a..867110c 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -684,6 +684,17 @@
 }
 
 /**
+ * phy_interface_mode_is_rgmii - Convenience function for testing if a
+ * PHY interface mode is RGMII (all variants)
+ * @mode: the phy_interface_t enum
+ */
+static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
+{
+	return mode >= PHY_INTERFACE_MODE_RGMII &&
+		mode <= PHY_INTERFACE_MODE_RGMII_TXID;
+};
+
+/**
  * phy_interface_is_rgmii - Convenience function for testing if a PHY interface
  * is RGMII (all variants)
  * @phydev: the phy_device struct
diff --git a/include/linux/platform_data/qcom_wcnss_device.h b/include/linux/platform_data/qcom_wcnss_device.h
new file mode 100644
index 0000000..f9156ef
--- /dev/null
+++ b/include/linux/platform_data/qcom_wcnss_device.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2011, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_WCNSS_DEVICE__H
+#define __QCOM_WCNSS_DEVICE__H
+
+struct qcom_wcnss_opts {
+	bool has_48mhz_xo;
+};
+
+#endif /* __QCOM_WCNSS_DEVICE__H */
diff --git a/include/linux/psci.h b/include/linux/psci.h
index bdea1cb..6306ab1 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -26,6 +26,7 @@
 int psci_cpu_suspend_enter(unsigned long index);
 
 struct psci_operations {
+	u32 (*get_version)(void);
 	int (*cpu_suspend)(u32 state, unsigned long entry_point);
 	int (*cpu_off)(u32 state);
 	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index b83507c..e38f471 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -99,12 +99,18 @@
 
 /* Note: callers invoking this in a loop must use a compiler barrier,
  * for example cpu_relax(). Callers must hold producer_lock.
+ * Callers are responsible for making sure pointer that is being queued
+ * points to a valid data.
  */
 static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
 {
 	if (unlikely(!r->size) || r->queue[r->producer])
 		return -ENOSPC;
 
+	/* Make sure the pointer we are storing points to a valid data. */
+	/* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
+	smp_wmb();
+
 	r->queue[r->producer++] = ptr;
 	if (unlikely(r->producer >= r->size))
 		r->producer = 0;
@@ -244,6 +250,9 @@
 	if (ptr)
 		__ptr_ring_discard_one(r);
 
+	/* Make sure anyone accessing data through the pointer is up to date. */
+	/* Pairs with smp_wmb in __ptr_ring_produce. */
+	smp_read_barrier_depends();
 	return ptr;
 }
 
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index 3e060d9..9a079a6 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -338,6 +338,7 @@
  * %CHAN_PATH_SCALING5: ratio of {1, 8}
  * %CHAN_PATH_SCALING6: ratio of {10, 81} The actual ratio is (1/8.1).
  * %CHAN_PATH_SCALING7: ratio of {1, 10}
+ * %CHAN_PATH_SCALING8: ratio of {1, 16}
  * %CHAN_PATH_NONE: Do not use this pre-scaling ratio type.
  *
  * The pre-scaling is applied for signals to be within the voltage range
@@ -352,6 +353,7 @@
 	PATH_SCALING5,
 	PATH_SCALING6,
 	PATH_SCALING7,
+	PATH_SCALING8,
 	PATH_SCALING_NONE,
 };
 
@@ -380,6 +382,12 @@
  * %SCALE_QRD_SKUT1_BATT_THERM: Conversion to temperature(decidegC) based on
  *          btm parameters for SKUT1
  * %SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp
+ * %SCALE_BATT_THERM_TEMP: Conversion to temperature(decidegC) based on btm
+ *			parameters.
+ * %SCALE_CHRG_TEMP: Conversion for charger temp.
+ * %SCALE_DIE_TEMP: Conversion for die temp.
+ * %SCALE_I_DEFAULT: Default scaling to convert raw adc code to current (uA).
+ * %SCALE_USBIN_I: Conversion for USB input current.
  * %SCALE_NONE: Do not use this scaling type.
  */
 enum qpnp_adc_scale_fn_type {
@@ -397,6 +405,11 @@
 	SCALE_NCP_03WF683_THERM,
 	SCALE_QRD_SKUT1_BATT_THERM,
 	SCALE_PMI_CHG_TEMP = 16,
+	SCALE_BATT_THERM_TEMP,
+	SCALE_CHRG_TEMP,
+	SCALE_DIE_TEMP,
+	SCALE_I_DEFAULT,
+	SCALE_USBIN_I,
 	SCALE_NONE,
 };
 
@@ -1125,7 +1138,8 @@
 	{1, 20},
 	{1, 8},
 	{10, 81},
-	{1, 10}
+	{1, 10},
+	{1, 16}
 };
 
 /**
@@ -1347,6 +1361,23 @@
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt);
 /**
+ * qpnp_iadc_scale_default() - Scales the pre-calibrated digital output
+ *		of current ADC to the ADC reference and compensates for the
+ *		gain and offset.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	Individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	Physical result to be stored.
+ */
+int32_t qpnp_iadc_scale_default(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
  * qpnp_adc_scale_pmic_therm() - Scales the pre-calibrated digital output
  *		of an ADC to the ADC reference and compensates for the
  *		gain and offset. Performs the AMUX out as 2mV/K and returns
@@ -1384,6 +1415,23 @@
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt);
 /**
+ * qpnp_adc_batt_therm() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. Returns the temperature in decidegC.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the pm8xxx adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	physical result to be stored.
+ */
+int32_t qpnp_adc_batt_therm(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
  * qpnp_adc_scale_batt_therm() - Scales the pre-calibrated digital output
  *		of an ADC to the ADC reference and compensates for the
  *		gain and offset. Returns the temperature in decidegC.
@@ -1401,6 +1449,61 @@
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt);
 /**
+ * qpnp_adc_scale_chrg_temp() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. The voltage measured by HKADC is related to
+ *		the junction temperature according to
+ *		Tj = 377.5 degC - (V_adc / 0.004)
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	Individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_chrg_temp(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_die_temp() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset. The voltage measured by HKADC is related to
+ *		the junction temperature according to
+ *		Tj = -273.15 degC + (V_adc / 0.002)
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	Individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_die_temp(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_usbin_curr() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset.
+ * @dev:	Structure device for qpnp vadc
+ * @adc_code:	pre-calibrated digital output of the ADC.
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	Individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_usbin_curr(struct qpnp_vadc_chip *dev,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+/**
  * qpnp_adc_scale_qrd_batt_therm() - Scales the pre-calibrated digital output
  *		of an ADC to the ADC reference and compensates for the
  *		gain and offset. Returns the temperature in decidegC.
@@ -1906,6 +2009,12 @@
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt)
 { return -ENXIO; }
+static inline int32_t qpnp_iadc_scale_default(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
 static inline int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *vadc,
 			int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
@@ -1918,12 +2027,36 @@
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt)
 { return -ENXIO; }
+static inline int32_t qpnp_adc_batt_therm(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
 static inline int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *vadc,
 			int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
 			struct qpnp_vadc_result *chan_rslt)
 { return -ENXIO; }
+static inline int32_t qpnp_adc_scale_chrg_temp(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_die_temp(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_usbin_curr(struct qpnp_vadc_chip *vadc,
+			int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
 static inline int32_t qpnp_adc_scale_qrd_batt_therm(
 			struct qpnp_vadc_chip *vadc, int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 4ae95f7..6224a0a 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -100,44 +100,6 @@
 }
 
 /**
- * hlist_nulls_add_tail_rcu
- * @n: the element to add to the hash list.
- * @h: the list to add to.
- *
- * Description:
- * Adds the specified element to the end of the specified hlist_nulls,
- * while permitting racing traversals.  NOTE: tail insertion requires
- * list traversal.
- *
- * The caller must take whatever precautions are necessary
- * (such as holding appropriate locks) to avoid racing
- * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
- * or hlist_nulls_del_rcu(), running on this same list.
- * However, it is perfectly legal to run concurrently with
- * the _rcu list-traversal primitives, such as
- * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
- * problems on Alpha CPUs.  Regardless of the type of CPU, the
- * list-traversal primitive must be guarded by rcu_read_lock().
- */
-static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
-					struct hlist_nulls_head *h)
-{
-	struct hlist_nulls_node *i, *last = NULL;
-
-	for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
-	     i = hlist_nulls_next_rcu(i))
-		last = i;
-
-	if (last) {
-		n->next = last->next;
-		n->pprev = &last->next;
-		rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
-	} else {
-		hlist_nulls_add_head_rcu(n, h);
-	}
-}
-
-/**
  * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
  * @tpos:	the type * to use as a loop cursor.
  * @pos:	the &struct hlist_nulls_node to use as a loop cursor.
diff --git a/include/linux/regulator/cpr-regulator.h b/include/linux/regulator/cpr-regulator.h
new file mode 100644
index 0000000..7a04e70
--- /dev/null
+++ b/include/linux/regulator/cpr-regulator.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2013-2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __REGULATOR_CPR_REGULATOR_H__
+#define __REGULATOR_CPR_REGULATOR_H__
+
+#include <linux/init.h>
+
+#ifdef CONFIG_REGULATOR_CPR
+
+int __init cpr_regulator_init(void);
+
+#else
+
+static inline int __init cpr_regulator_init(void)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_REGULATOR_CPR */
+
+#endif /* __REGULATOR_CPR_REGULATOR_H__ */
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index f2e27e0..01b3778 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -16,7 +16,6 @@
 	unsigned char mac_addr[ETH_ALEN];
 	unsigned no_ether_link:1;
 	unsigned ether_link_active_low:1;
-	unsigned needs_init:1;
 };
 
 #endif
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index c6f0f0d..00a1f33 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -116,6 +116,12 @@
 	.show	= _name##_show,						\
 }
 
+#define __ATTR_RO_MODE(_name, _mode) {					\
+	.attr	= { .name = __stringify(_name),				\
+		    .mode = VERIFY_OCTAL_PERMISSIONS(_mode) },		\
+	.show	= _name##_show,						\
+}
+
 #define __ATTR_WO(_name) {						\
 	.attr	= { .name = __stringify(_name), .mode = S_IWUSR },	\
 	.store	= _name##_store,					\
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 647532b..f50b717c 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -219,7 +219,8 @@
 	} rack;
 	u16	advmss;		/* Advertised MSS			*/
 	u8	rate_app_limited:1,  /* rate_{delivered,interval_us} limited? */
-		unused:7;
+		is_sack_reneg:1,    /* in recovery from loss with SACK reneg? */
+		unused:6;
 	u8	nonagle     : 4,/* Disable Nagle algorithm?             */
 		thin_lto    : 1,/* Use linear timeouts for thin streams */
 		thin_dupack : 1,/* Fast retransmit on first dupack      */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index a700e5f..52bc890 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -47,7 +47,7 @@
 #define THERMAL_WEIGHT_DEFAULT 0
 
 /* Max sensors that can be used for a single virtual thermalzone */
-#define THERMAL_MAX_VIRT_SENSORS 8
+#define THERMAL_MAX_VIRT_SENSORS 10
 
 /* use value, which < 0K, to indicate an invalid/uninitialized temperature */
 #define THERMAL_TEMP_INVALID	-274000
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 5f5107b..c2476e2 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -237,9 +237,11 @@
 unsigned long round_jiffies_up_relative(unsigned long j);
 
 #ifdef CONFIG_HOTPLUG_CPU
+int timers_prepare_cpu(unsigned int cpu);
 int timers_dead_cpu(unsigned int cpu);
 #else
-#define timers_dead_cpu NULL
+#define timers_prepare_cpu	NULL
+#define timers_dead_cpu		NULL
 #endif
 
 #endif
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 40144f3..a41244f 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -394,6 +394,8 @@
 /* tty_io.c */
 extern int __init tty_init(void);
 extern const char *tty_name(const struct tty_struct *tty);
+extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
+extern void tty_ldisc_unlock(struct tty_struct *tty);
 #else
 static inline void console_init(void)
 { }
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 200c3ab..3fda92f 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -81,12 +81,22 @@
  * @buf_len: Size of each individual buffer is determined based on aggregation
  *	negotiated as per the protocol. In case of no aggregation supported by
  *	the protocol, we use default values.
+ * @db_reg_phs_addr_lsb: IPA channel doorbell register's physical address LSB
+ * @mapped_db_reg_phs_addr_lsb: doorbell LSB IOVA address mapped with IOMMU
+ * @db_reg_phs_addr_msb: IPA channel doorbell register's physical address MSB
+ * @sgt_trb_xfer_ring: USB TRB ring related sgtable entries
+ * @sgt_data_buff: Data buffer related sgtable entries
  */
 struct usb_gsi_request {
 	void *buf_base_addr;
 	dma_addr_t dma;
 	size_t num_bufs;
 	size_t buf_len;
+	u32 db_reg_phs_addr_lsb;
+	dma_addr_t mapped_db_reg_phs_addr_lsb;
+	u32 db_reg_phs_addr_msb;
+	struct sg_table sgt_trb_xfer_ring;
+	struct sg_table sgt_data_buff;
 };
 
 /*
@@ -468,9 +478,6 @@
  * @deactivated: True if gadget is deactivated - in deactivated state it cannot
  *	be connected.
  * @connected: True if gadget is connected.
- * @bam2bam_func_enabled; Indicates function using bam2bam is enabled or not.
- * @extra_buf_alloc: Extra allocation size for AXI prefetch so that out of
- * boundary access is protected.
  *
  * Gadgets have a mostly-portable "gadget driver" implementing device
  * functions, handling all usb configurations and interfaces.  Gadget
@@ -524,9 +531,6 @@
 	unsigned			deactivated:1;
 	unsigned			connected:1;
 	bool				remote_wakeup;
-	bool				bam2bam_func_enabled;
-	u32				extra_buf_alloc;
-	bool				l1_supported;
 };
 #define work_to_gadget(w)	(container_of((w), struct usb_gadget, work))
 
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
deleted file mode 100644
index 53d8458..0000000
--- a/include/linux/usb/msm_hsusb.h
+++ /dev/null
@@ -1,357 +0,0 @@
-/* include/linux/usb/msm_hsusb.h
- *
- * Copyright (C) 2008 Google, Inc.
- * Author: Brian Swetland <swetland@google.com>
- * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ASM_ARCH_MSM_HSUSB_H
-#define __ASM_ARCH_MSM_HSUSB_H
-
-#include <linux/types.h>
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb/otg.h>
-#include <linux/clk.h>
-#include <linux/pm_qos.h>
-#include <linux/hrtimer.h>
-#include <linux/power_supply.h>
-#include <linux/cdev.h>
-#include <linux/usb_bam.h>
-#include <linux/extcon.h>
-#include <linux/regulator/driver.h>
-/**
- * Requested USB votes for NOC frequency
- *
- * USB_NOC_NOM_VOTE    Vote for NOM set of NOC frequencies
- * USB_NOC_SVS_VOTE    Vote for SVS set of NOC frequencies
- *
- */
-enum usb_noc_mode {
-	USB_NOC_NOM_VOTE = 0,
-	USB_NOC_SVS_VOTE,
-	USB_NOC_NUM_VOTE,
-};
-
-/**
- * Different states involved in USB charger detection.
- *
- * USB_CHG_STATE_UNDEFINED	USB charger is not connected or detection
- *                              process is not yet started.
- * USB_CHG_STATE_IN_PROGRESS	Charger detection in progress
- * USB_CHG_STATE_WAIT_FOR_DCD	Waiting for Data pins contact.
- * USB_CHG_STATE_DCD_DONE	Data pin contact is detected.
- * USB_CHG_STATE_PRIMARY_DONE	Primary detection is completed (Detects
- *                              between SDP and DCP/CDP).
- * USB_CHG_STATE_SECONDARY_DONE	Secondary detection is completed (Detects
- *                              between DCP and CDP).
- * USB_CHG_STATE_DETECTED	USB charger type is determined.
- *
- */
-enum usb_chg_state {
-	USB_CHG_STATE_UNDEFINED = 0,
-	USB_CHG_STATE_IN_PROGRESS,
-	USB_CHG_STATE_WAIT_FOR_DCD,
-	USB_CHG_STATE_DCD_DONE,
-	USB_CHG_STATE_PRIMARY_DONE,
-	USB_CHG_STATE_SECONDARY_DONE,
-	USB_CHG_STATE_DETECTED,
-};
-
-/**
- * USB charger types
- *
- * USB_INVALID_CHARGER	Invalid USB charger.
- * USB_SDP_CHARGER	Standard downstream port. Refers to a downstream port
- *                      on USB2.0 compliant host/hub.
- * USB_DCP_CHARGER	Dedicated charger port (AC charger/ Wall charger).
- * USB_CDP_CHARGER	Charging downstream port. Enumeration can happen and
- *                      IDEV_CHG_MAX can be drawn irrespective of USB state.
- * USB_NONCOMPLIANT_CHARGER A non-compliant charger pull DP and DM to specific
- *			    voltages between 2.0-3.3v for identification.
- *
- */
-enum usb_chg_type {
-	USB_INVALID_CHARGER = 0,
-	USB_SDP_CHARGER,
-	USB_DCP_CHARGER,
-	USB_CDP_CHARGER,
-	USB_NONCOMPLIANT_CHARGER,
-	USB_FLOATED_CHARGER,
-};
-
-/**
- * Maintain state for hvdcp external charger status
- * DEFAULT	This is used when DCP is detected
- * ACTIVE	This is used when ioctl is called to block LPM
- * INACTIVE	This is used when ioctl is called to unblock LPM
- */
-
-enum usb_ext_chg_status {
-	DEFAULT = 1,
-	ACTIVE,
-	INACTIVE,
-};
-
-/**
- * USB ID state
- */
-enum usb_id_state {
-	USB_ID_GROUND = 0,
-	USB_ID_FLOAT,
-};
-
-#define USB_NUM_BUS_CLOCKS      3
-
-/**
- * struct msm_otg: OTG driver data. Shared by HCD and DCD.
- * @otg: USB OTG Transceiver structure.
- * @pdata: otg device platform data.
- * @irq: IRQ number assigned for HSUSB controller.
- * @async_irq: IRQ number used by some controllers during low power state
- * @phy_irq: IRQ number assigned for PHY to notify events like id and line
-		state changes.
- * @pclk: clock struct of iface_clk.
- * @core_clk: clock struct of core_bus_clk.
- * @sleep_clk: clock struct of sleep_clk for USB PHY.
- * @phy_reset_clk: clock struct of phy_reset_clk for USB PHY. This clock is
-		a reset only clock and resets the PHY, ULPI bridge and
-		CSR wrapper.
- * @phy_por_clk: clock struct of phy_por_clk for USB PHY. This clock is
-		a reset only clock and resets only the PHY (POR).
- * @phy_csr_clk: clock struct of phy_csr_clk for USB PHY. This clock is
-		required to access PHY CSR registers via AHB2PHY interface.
- * @bus_clks: bimc/snoc/pcnoc clock struct.
- * @core_reset: Reset control for core_clk
- * @phy_reset: Reset control for phy_reset_clk
- * @phy_por_reset: Reset control for phy_por_clk
- * @default_noc_mode: default frequency for NOC clocks - SVS or NOM
- * @core_clk_rate: core clk max frequency
- * @regs: ioremapped register base address.
- * @usb_phy_ctrl_reg: relevant PHY_CTRL_REG register base address.
- * @inputs: OTG state machine inputs(Id, SessValid etc).
- * @sm_work: OTG state machine work.
- * @sm_work_pending: OTG state machine work is pending, queued post pm_resume
- * @resume_pending: USB h/w lpm_exit pending. Done on next sm_work run
- * @pm_suspended: OTG device is system(PM) suspended.
- * @pm_notify: Notifier to receive system wide PM transition events.
-		It is used to defer wakeup events processing until
-		system is RESUMED.
- * @in_lpm: indicates low power mode (LPM) state.
- * @async_int: IRQ line on which ASYNC interrupt arrived in LPM.
- * @cur_power: The amount of mA available from downstream port.
- * @otg_wq: Strict order otg workqueue for OTG works (SM/ID/SUSPEND).
- * @chg_work: Charger detection work.
- * @chg_state: The state of charger detection process.
- * @chg_type: The type of charger attached.
- * @bus_perf_client: Bus performance client handle to request BUS bandwidth
- * @host_bus_suspend: indicates host bus suspend or not.
- * @device_bus_suspend: indicates device bus suspend or not.
- * @bus_clks_enabled: indicates pcnoc/snoc/bimc clocks are on or not.
- * @chg_check_timer: The timer used to implement the workaround to detect
- *               very slow plug in of wall charger.
- * @bc1p2_current_max: Max charging current allowed as per bc1.2 chg detection
- * @typec_current_max: Max charging current allowed as per type-c chg detection
- * @is_ext_chg_dcp: To indicate whether charger detected by external entity
-		SMB hardware is DCP charger or not.
- * @ext_id_irq: IRQ for ID interrupt.
- * @phy_irq_pending: Gets set when PHY IRQ arrives in LPM.
- * @id_state: Indicates USBID line status.
- * @rm_pulldown: Indicates pulldown status on D+ and D- data lines.
- * @extcon_vbus: Used for VBUS notification registration.
- * @extcon_id: Used for ID notification registration.
- * @vbus_nb: Notification callback for VBUS event.
- * @id_nb: Notification callback for ID event.
- * @dpdm_desc: Regulator descriptor for D+ and D- voting.
- * @dpdm_rdev: Regulator class device for dpdm regulator.
- * @dbg_idx: Dynamic debug buffer Index.
- * @dbg_lock: Dynamic debug buffer Lock.
- * @buf: Dynamic Debug Buffer.
- * @max_nominal_system_clk_rate: max freq at which system clock can run in
-		nominal mode.
- */
-struct msm_otg {
-	struct usb_phy phy;
-	struct msm_otg_platform_data *pdata;
-	struct platform_device *pdev;
-	int irq;
-	int async_irq;
-	int phy_irq;
-	struct clk *xo_clk;
-	struct clk *pclk;
-	struct clk *core_clk;
-	struct clk *sleep_clk;
-	struct clk *phy_reset_clk;
-	struct clk *phy_por_clk;
-	struct clk *phy_csr_clk;
-	struct clk *bus_clks[USB_NUM_BUS_CLOCKS];
-	struct clk *phy_ref_clk;
-	struct reset_control *core_reset;
-	struct reset_control *phy_reset;
-	struct reset_control *phy_por_reset;
-	long core_clk_rate;
-	long core_clk_svs_rate;
-	long core_clk_nominal_rate;
-	enum usb_noc_mode default_noc_mode;
-	struct resource *io_res;
-	void __iomem *regs;
-	void __iomem *phy_csr_regs;
-	void __iomem *usb_phy_ctrl_reg;
-#define ID		0
-#define B_SESS_VLD	1
-#define A_BUS_SUSPEND	14
-#define B_FALSE_SDP	18
-	unsigned long inputs;
-	struct work_struct sm_work;
-	bool sm_work_pending;
-	bool resume_pending;
-	atomic_t pm_suspended;
-	struct notifier_block pm_notify;
-	atomic_t in_lpm;
-	bool err_event_seen;
-	int async_int;
-	unsigned int cur_power;
-	struct workqueue_struct *otg_wq;
-	struct delayed_work chg_work;
-	struct delayed_work id_status_work;
-	enum usb_chg_state chg_state;
-	enum usb_chg_type chg_type;
-	unsigned int dcd_time;
-	unsigned long caps;
-	uint32_t bus_perf_client;
-	bool host_bus_suspend;
-	bool device_bus_suspend;
-	bool bus_clks_enabled;
-	struct timer_list chg_check_timer;
-	/*
-	 * Allowing PHY power collpase turns off the HSUSB 3.3v and 1.8v
-	 * analog regulators while going to low power mode.
-	 * Currently only 28nm PHY has the support to allowing PHY
-	 * power collapse since it doesn't have leakage currents while
-	 * turning off the power rails.
-	 */
-#define ALLOW_PHY_POWER_COLLAPSE	BIT(0)
-	/*
-	 * Allow PHY RETENTION mode before turning off the digital
-	 * voltage regulator(VDDCX).
-	 */
-#define ALLOW_PHY_RETENTION		BIT(1)
-	/*
-	 * Allow putting the core in Low Power mode, when
-	 * USB bus is suspended but cable is connected.
-	 */
-#define ALLOW_LPM_ON_DEV_SUSPEND	BIT(2)
-	/*
-	 * Allowing PHY regulators LPM puts the HSUSB 3.3v and 1.8v
-	 * analog regulators into LPM while going to USB low power mode.
-	 */
-#define ALLOW_PHY_REGULATORS_LPM	BIT(3)
-	/*
-	 * Allow PHY RETENTION mode before turning off the digital
-	 * voltage regulator(VDDCX) during host mode.
-	 */
-#define ALLOW_HOST_PHY_RETENTION	BIT(4)
-	/*
-	 * Allow VDD minimization without putting PHY into retention
-	 * for fixing PHY current leakage issue when LDOs ar turned off.
-	 */
-#define ALLOW_VDD_MIN_WITH_RETENTION_DISABLED BIT(5)
-
-	/*
-	 * PHY can keep D+ pull-up during peripheral bus suspend and
-	 * D+/D- pull-down during host bus suspend without any
-	 * re-work. This is possible only when PHY DVDD is supplied
-	 * by a PMIC LDO (unlike VDDCX/VDDMX).
-	 */
-#define ALLOW_BUS_SUSPEND_WITHOUT_REWORK BIT(6)
-	unsigned long lpm_flags;
-#define PHY_PWR_COLLAPSED		BIT(0)
-#define PHY_RETENTIONED			BIT(1)
-#define XO_SHUTDOWN			BIT(2)
-#define CLOCKS_DOWN			BIT(3)
-#define PHY_REGULATORS_LPM	BIT(4)
-	int reset_counter;
-	unsigned int online;
-	unsigned int host_mode;
-	unsigned int bc1p2_current_max;
-	unsigned int typec_current_max;
-
-	dev_t ext_chg_dev;
-	struct cdev ext_chg_cdev;
-	struct class *ext_chg_class;
-	struct device *ext_chg_device;
-	bool ext_chg_opened;
-	enum usb_ext_chg_status ext_chg_active;
-	struct completion ext_chg_wait;
-	struct pinctrl *phy_pinctrl;
-	bool is_ext_chg_dcp;
-	struct qpnp_vadc_chip	*vadc_dev;
-	int ext_id_irq;
-	bool phy_irq_pending;
-	enum usb_id_state id_state;
-	bool rm_pulldown;
-	struct extcon_dev       *extcon_vbus;
-	struct extcon_dev       *extcon_id;
-	struct notifier_block   vbus_nb;
-	struct notifier_block   id_nb;
-	struct regulator_desc	dpdm_rdesc;
-	struct regulator_dev	*dpdm_rdev;
-/* Maximum debug message length */
-#define DEBUG_MSG_LEN   128UL
-/* Maximum number of messages */
-#define DEBUG_MAX_MSG   256UL
-	unsigned int dbg_idx;
-	rwlock_t dbg_lock;
-
-	char (buf[DEBUG_MAX_MSG])[DEBUG_MSG_LEN];   /* buffer */
-	unsigned int vbus_state;
-	unsigned int usb_irq_count;
-	int pm_qos_latency;
-	struct pm_qos_request pm_qos_req_dma;
-	struct delayed_work perf_vote_work;
-};
-
-struct ci13xxx_platform_data {
-	u8 usb_core_id;
-	/*
-	 * value of 2^(log2_itc-1) will be used as the interrupt threshold
-	 * (ITC), when log2_itc is between 1 to 7.
-	 */
-	int log2_itc;
-	bool l1_supported;
-	bool enable_ahb2ahb_bypass;
-	bool enable_streaming;
-	bool enable_axi_prefetch;
-};
-
-#ifdef CONFIG_USB_BAM
-void msm_bam_set_usb_host_dev(struct device *dev);
-bool msm_usb_bam_enable(enum usb_ctrl ctrl, bool bam_enable);
-int msm_do_bam_disable_enable(enum usb_ctrl ctrl);
-#else
-static inline void msm_bam_set_usb_host_dev(struct device *dev) {}
-static inline bool msm_usb_bam_enable(enum usb_ctrl ctrl, bool bam_enable)
-{
-	return true;
-}
-int msm_do_bam_disable_enable(enum usb_ctrl ctrl) { return true; }
-#endif
-#ifdef CONFIG_USB_CI13XXX_MSM
-void msm_hw_soft_reset(void);
-#else
-static inline void msm_hw_soft_reset(void)
-{
-}
-#endif
-
-#endif
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h
index b86f127..974c379 100644
--- a/include/linux/usb/msm_hsusb_hw.h
+++ b/include/linux/usb/msm_hsusb_hw.h
@@ -21,14 +21,10 @@
 
 #define USB_AHBBURST         (MSM_USB_BASE + 0x0090)
 #define USB_AHBMODE          (MSM_USB_BASE + 0x0098)
-#define USB_GENCONFIG        (MSM_USB_BASE + 0x009C)
 #define USB_GENCONFIG_2      (MSM_USB_BASE + 0x00a0)
 #define ULPI_TX_PKT_EN_CLR_FIX	BIT(19)
 
 #define USB_CAPLENGTH        (MSM_USB_BASE + 0x0100) /* 8 bit */
-#define USB_HS_APF_CTRL      (MSM_USB_BASE + 0x0380)
-
-#define APF_CTRL_EN		BIT(0)
 
 #define USB_USBCMD           (MSM_USB_BASE + 0x0140)
 #define USB_PORTSC           (MSM_USB_BASE + 0x0184)
@@ -38,32 +34,15 @@
 #define USB_PHY_CTRL2        (MSM_USB_BASE + 0x0278)
 
 #define GENCONFIG_2_SESS_VLD_CTRL_EN	BIT(7)
-#define GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN	BIT(12)
-#define GENCONFIG_2_DPSE_DMSE_HV_INTR_EN	BIT(15)
 #define USBCMD_SESS_VLD_CTRL		BIT(25)
 
 #define USBCMD_RESET   2
 #define USB_USBINTR          (MSM_USB_BASE + 0x0148)
 
-#define USB_L1_EP_CTRL       (MSM_USB_BASE + 0x0250)
-#define USB_L1_CONFIG        (MSM_USB_BASE + 0x0254)
-
-#define L1_CONFIG_LPM_EN        BIT(4)
-#define L1_CONFIG_REMOTE_WAKEUP BIT(5)
-#define L1_CONFIG_GATE_SYS_CLK	BIT(7)
-#define L1_CONFIG_PHY_LPM	BIT(10)
-#define L1_CONFIG_PLL		BIT(11)
-#define AHB2AHB_BYPASS          BIT(31)
-#define AHB2AHB_BYPASS_BIT_MASK        BIT(31)
-#define AHB2AHB_BYPASS_CLEAR   (0 << 31)
-
 #define PORTSC_PHCD            (1 << 23) /* phy suspend mode */
 #define PORTSC_PTS_MASK        (3 << 30)
 #define PORTSC_PTS_ULPI        (2 << 30)
 #define PORTSC_PTS_SERIAL      (3 << 30)
-#define PORTSC_LS	       (3 << 10)
-#define PORTSC_LS_DM	       (1 << 10)
-#define PORTSC_CCS	       (1 << 0)
 
 #define USB_ULPI_VIEWPORT    (MSM_USB_BASE + 0x0170)
 #define ULPI_RUN              (1 << 30)
@@ -73,10 +52,6 @@
 #define ULPI_DATA(n)          ((n) & 255)
 #define ULPI_DATA_READ(n)     (((n) >> 8) & 255)
 
-#define GENCONFIG_BAM_DISABLE (1 << 13)
-#define GENCONFIG_TXFIFO_IDLE_FORCE_DISABLE (1 << 4)
-#define GENCONFIG_ULPI_SERIAL_EN (1 << 5)
-
 /* synopsys 28nm phy registers */
 #define ULPI_PWR_CLK_MNG_REG	0x88
 #define OTG_COMP_DISABLE	BIT(0)
@@ -88,16 +63,10 @@
 #define ASYNC_INTR_CTRL         (1 << 29) /* Enable async interrupt */
 #define ULPI_STP_CTRL           (1 << 30) /* Block communication with PHY */
 #define PHY_RETEN               (1 << 1) /* PHY retention enable/disable */
-#define PHY_IDHV_INTEN          (1 << 8) /* PHY ID HV interrupt */
-#define PHY_OTGSESSVLDHV_INTEN  (1 << 9) /* PHY Session Valid HV int. */
-#define PHY_CLAMP_DPDMSE_EN	(1 << 21) /* PHY mpm DP DM clamp enable */
-#define PHY_POR_BIT_MASK        BIT(0)
 #define PHY_POR_ASSERT		(1 << 0) /* USB2 28nm PHY POR ASSERT */
-#define PHY_POR_DEASSERT        (0 << 0) /* USB2 28nm PHY POR DEASSERT */
 
 /* OTG definitions */
 #define OTGSC_INTSTS_MASK	(0x7f << 16)
-#define OTGSC_IDPU		(1 << 5)
 #define OTGSC_ID		(1 << 8)
 #define OTGSC_BSV		(1 << 11)
 #define OTGSC_IDIS		(1 << 16)
@@ -105,29 +74,4 @@
 #define OTGSC_IDIE		(1 << 24)
 #define OTGSC_BSVIE		(1 << 27)
 
-/* USB PHY CSR registers and bit definitions */
-
-#define USB_PHY_CSR_PHY_CTRL_COMMON0 (MSM_USB_PHY_CSR_BASE + 0x078)
-#define SIDDQ BIT(2)
-
-#define USB_PHY_CSR_PHY_CTRL1 (MSM_USB_PHY_CSR_BASE + 0x08C)
-#define ID_HV_CLAMP_EN_N BIT(1)
-
-#define USB_PHY_CSR_PHY_CTRL3 (MSM_USB_PHY_CSR_BASE + 0x094)
-#define CLAMP_MPM_DPSE_DMSE_EN_N BIT(2)
-
-#define USB2_PHY_USB_PHY_IRQ_CMD (MSM_USB_PHY_CSR_BASE + 0x0D0)
-#define USB2_PHY_USB_PHY_INTERRUPT_SRC_STATUS (MSM_USB_PHY_CSR_BASE + 0x05C)
-
-#define USB2_PHY_USB_PHY_INTERRUPT_CLEAR0 (MSM_USB_PHY_CSR_BASE + 0x0DC)
-#define USB2_PHY_USB_PHY_INTERRUPT_CLEAR1 (MSM_USB_PHY_CSR_BASE + 0x0E0)
-
-#define USB2_PHY_USB_PHY_INTERRUPT_MASK1 (MSM_USB_PHY_CSR_BASE + 0x0D8)
-
-#define USB_PHY_IDDIG_1_0 BIT(7)
-
-#define USB_PHY_IDDIG_RISE_MASK BIT(0)
-#define USB_PHY_IDDIG_FALL_MASK BIT(1)
-#define USB_PHY_ID_MASK (USB_PHY_IDDIG_RISE_MASK | USB_PHY_IDDIG_FALL_MASK)
-
 #endif /* __LINUX_USB_GADGET_MSM72K_UDC_H__ */
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index d999b3c..64aa52e 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -58,7 +58,6 @@
 	OTG_STATE_B_SRP_INIT,
 	OTG_STATE_B_PERIPHERAL,
 	OTG_STATE_B_SUSPEND,
-	OTG_STATE_B_CHARGER,
 
 	/* extra dual-role default-b states */
 	OTG_STATE_B_WAIT_ACON,
@@ -142,10 +141,6 @@
 
 	/* reset the PHY clocks */
 	int     (*reset)(struct usb_phy *x);
-
-	/* for notification of usb_phy_dbg_events */
-	void    (*dbg_event)(struct usb_phy *x,
-			char *event, int msg1, int msg2);
 	int	(*disable_chirp)(struct usb_phy *x, bool disable);
 };
 
diff --git a/include/linux/usb/usb_qdss.h b/include/linux/usb/usb_qdss.h
index b58d8ee..fe626c18 100644
--- a/include/linux/usb/usb_qdss.h
+++ b/include/linux/usb/usb_qdss.h
@@ -15,6 +15,9 @@
 
 #include <linux/kernel.h>
 
+#define USB_QDSS_CH_MDM	"qdss_mdm"
+#define USB_QDSS_CH_MSM	"qdss"
+
 struct qdss_request {
 	char *buf;
 	int length;
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 6e0ce8c..fde7550 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -79,6 +79,7 @@
 #		define EVENT_RX_KILL	10
 #		define EVENT_LINK_CHANGE	11
 #		define EVENT_SET_RX_MODE	12
+#		define EVENT_NO_IP_ALIGN	13
 };
 
 static inline struct usb_driver *driver_of(struct usb_interface *intf)
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 9638bfe..584f9a6 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -48,6 +48,8 @@
 	struct virtio_vsock_hdr	hdr;
 	struct work_struct work;
 	struct list_head list;
+	/* socket refcnt not held, only use for cancellation */
+	struct vsock_sock *vsk;
 	void *buf;
 	u32 len;
 	u32 off;
@@ -56,6 +58,7 @@
 
 struct virtio_vsock_pkt_info {
 	u32 remote_cid, remote_port;
+	struct vsock_sock *vsk;
 	struct msghdr *msg;
 	u32 pkt_len;
 	u16 type;
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 9cc195f..a9c2e4c 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -89,10 +89,8 @@
 #endif
 #endif
 #ifdef CONFIG_DEBUG_TLBFLUSH
-#ifdef CONFIG_SMP
 		NR_TLB_REMOTE_FLUSH,	/* cpu tried to flush others' tlbs */
 		NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
-#endif /* CONFIG_SMP */
 		NR_TLB_LOCAL_FLUSH_ALL,
 		NR_TLB_LOCAL_FLUSH_ONE,
 #endif /* CONFIG_DEBUG_TLBFLUSH */
diff --git a/include/linux/wcnss_wlan.h b/include/linux/wcnss_wlan.h
new file mode 100644
index 0000000..b37f8df
--- /dev/null
+++ b/include/linux/wcnss_wlan.h
@@ -0,0 +1,167 @@
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _WCNSS_WLAN_H_
+#define _WCNSS_WLAN_H_
+
+#include <linux/device.h>
+#include <linux/sched.h>
+
+#define IRIS_REGULATORS		4
+#define PRONTO_REGULATORS	3
+
+enum wcnss_opcode {
+	WCNSS_WLAN_SWITCH_OFF = 0,
+	WCNSS_WLAN_SWITCH_ON,
+};
+
+enum wcnss_hw_type {
+	WCNSS_RIVA_HW = 0,
+	WCNSS_PRONTO_HW,
+};
+
+struct vregs_level {
+	int nominal_min;
+	int low_power_min;
+	int max_voltage;
+	int uA_load;
+};
+
+struct wcnss_wlan_config {
+	int	use_48mhz_xo;
+	int	is_pronto_vadc;
+	int	is_pronto_v3;
+	void __iomem	*msm_wcnss_base;
+	int	iris_id;
+	int	vbatt;
+	struct vregs_level pronto_vlevel[PRONTO_REGULATORS];
+	struct vregs_level iris_vlevel[IRIS_REGULATORS];
+};
+
+enum {
+	WCNSS_XO_48MHZ = 1,
+	WCNSS_XO_19MHZ,
+	WCNSS_XO_INVALID,
+};
+
+enum {
+	WCNSS_WLAN_DATA2,
+	WCNSS_WLAN_DATA1,
+	WCNSS_WLAN_DATA0,
+	WCNSS_WLAN_SET,
+	WCNSS_WLAN_CLK,
+	WCNSS_WLAN_MAX_GPIO,
+};
+
+#define WCNSS_VBATT_THRESHOLD           3500000
+#define WCNSS_VBATT_GUARD               20000
+#define WCNSS_VBATT_HIGH                3700000
+#define WCNSS_VBATT_LOW                 3300000
+#define WCNSS_VBATT_INITIAL             3000000
+#define WCNSS_WLAN_IRQ_INVALID -1
+#define HAVE_WCNSS_SUSPEND_RESUME_NOTIFY 1
+#define HAVE_WCNSS_RESET_INTR 1
+#define HAVE_WCNSS_CAL_DOWNLOAD 1
+#define HAVE_CBC_DONE 1
+#define HAVE_WCNSS_RX_BUFF_COUNT 1
+#define HAVE_WCNSS_SNOC_HIGH_FREQ_VOTING 1
+#define HAVE_WCNSS_5G_DISABLE 1
+#define WLAN_MAC_ADDR_SIZE (6)
+#define WLAN_RF_REG_ADDR_START_OFFSET	0x3
+#define WLAN_RF_REG_DATA_START_OFFSET	0xf
+#define WLAN_RF_READ_REG_CMD		0x3
+#define WLAN_RF_WRITE_REG_CMD		0x2
+#define WLAN_RF_READ_CMD_MASK		0x3fff
+#define WLAN_RF_CLK_WAIT_CYCLE		2
+#define WLAN_RF_PREPARE_CMD_DATA	5
+#define WLAN_RF_READ_DATA		6
+#define WLAN_RF_DATA_LEN		3
+#define WLAN_RF_DATA0_SHIFT		0
+#define WLAN_RF_DATA1_SHIFT		1
+#define WLAN_RF_DATA2_SHIFT		2
+#define PRONTO_PMU_OFFSET       0x1004
+#define WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP   BIT(5)
+
+struct device *wcnss_wlan_get_device(void);
+void wcnss_get_monotonic_boottime(struct timespec *ts);
+struct resource *wcnss_wlan_get_memory_map(struct device *dev);
+int wcnss_wlan_get_dxe_tx_irq(struct device *dev);
+int wcnss_wlan_get_dxe_rx_irq(struct device *dev);
+void wcnss_wlan_register_pm_ops(struct device *dev,
+				const struct dev_pm_ops *pm_ops);
+void wcnss_wlan_unregister_pm_ops(struct device *dev,
+				  const struct dev_pm_ops *pm_ops);
+void wcnss_register_thermal_mitigation(struct device *dev,
+				       void (*tm_notify)(struct device *dev,
+							 int));
+void wcnss_unregister_thermal_mitigation(void (*tm_notify)(struct device *dev,
+							   int));
+struct platform_device *wcnss_get_platform_device(void);
+struct wcnss_wlan_config *wcnss_get_wlan_config(void);
+void wcnss_set_iris_xo_mode(int iris_xo_mode_set);
+int wcnss_wlan_power(struct device *dev,
+		     struct wcnss_wlan_config *cfg,
+		     enum wcnss_opcode opcode,
+		     int *iris_xo_mode_set);
+int wcnss_req_power_on_lock(char *driver_name);
+int wcnss_free_power_on_lock(char *driver_name);
+unsigned int wcnss_get_serial_number(void);
+int wcnss_get_wlan_mac_address(char mac_addr[WLAN_MAC_ADDR_SIZE]);
+void wcnss_allow_suspend(void);
+void wcnss_prevent_suspend(void);
+int wcnss_hardware_type(void);
+void *wcnss_prealloc_get(unsigned int size);
+int wcnss_prealloc_put(void *ptr);
+void wcnss_reset_fiq(bool clk_chk_en);
+void wcnss_suspend_notify(void);
+void wcnss_resume_notify(void);
+void wcnss_riva_log_debug_regs(void);
+void wcnss_pronto_log_debug_regs(void);
+int wcnss_is_hw_pronto_ver3(void);
+int wcnss_device_ready(void);
+bool wcnss_cbc_complete(void);
+int wcnss_device_is_shutdown(void);
+void wcnss_riva_dump_pmic_regs(void);
+int wcnss_xo_auto_detect_enabled(void);
+u32 wcnss_get_wlan_rx_buff_count(void);
+int wcnss_wlan_iris_xo_mode(void);
+int wcnss_wlan_dual_band_disabled(void);
+void wcnss_flush_work(struct work_struct *work);
+void wcnss_flush_delayed_work(struct delayed_work *dwork);
+void wcnss_init_work(struct work_struct *work, void *callbackptr);
+void wcnss_init_delayed_work(struct delayed_work *dwork, void *callbackptr);
+int wcnss_get_iris_name(char *iris_version);
+void wcnss_dump_stack(struct task_struct *task);
+void wcnss_snoc_vote(bool clk_chk_en);
+int wcnss_parse_voltage_regulator(struct wcnss_wlan_config *wlan_config,
+				  struct device *dev);
+
+#ifdef CONFIG_WCNSS_REGISTER_DUMP_ON_BITE
+void wcnss_log_debug_regs_on_bite(void);
+#else
+static inline void wcnss_log_debug_regs_on_bite(void)
+{
+}
+#endif
+int wcnss_set_wlan_unsafe_channel(
+				u16 *unsafe_ch_list, u16 ch_count);
+int wcnss_get_wlan_unsafe_channel(
+				u16 *unsafe_ch_list, u16 buffer_size,
+				u16 *ch_count);
+#define wcnss_wlan_get_drvdata(dev) dev_get_drvdata(dev)
+#define wcnss_wlan_set_drvdata(dev, data) dev_set_drvdata((dev), (data))
+/* WLAN driver uses these names */
+#define req_riva_power_on_lock(name) wcnss_req_power_on_lock(name)
+#define free_riva_power_on_lock(name) wcnss_free_power_on_lock(name)
+
+#endif /* _WCNSS_WLAN_H_ */
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index f275896..f32ed9a 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -100,6 +100,9 @@
 	void (*destruct)(struct vsock_sock *);
 	void (*release)(struct vsock_sock *);
 
+	/* Cancel all pending packets sent on vsock. */
+	int (*cancel_pkt)(struct vsock_sock *vsk);
+
 	/* Connections. */
 	int (*connect)(struct vsock_sock *);
 
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 63ce902..d5e79f1 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -55,6 +55,12 @@
  */
 #define CFG80211_ROAMED_API_UNIFIED 1
 
+/* Indicate backport support for DBS scan control */
+#define CFG80211_SCAN_DBS_CONTROL_SUPPORT 1
+
+/* Indicate backport support for per chain rssi scan */
+#define CFG80211_SCAN_PER_CHAIN_RSSI_SUPPORT 1
+
 /**
  * DOC: Introduction
  *
@@ -1742,6 +1748,8 @@
  *	by %parent_bssid.
  * @parent_bssid: the BSS according to which %parent_tsf is set. This is set to
  *	the BSS that requested the scan in which the beacon/probe was received.
+ * @chains: bitmask for filled values in @chain_signal.
+ * @chain_signal: per-chain signal strength of last received BSS in dBm.
  */
 struct cfg80211_inform_bss {
 	struct ieee80211_channel *chan;
@@ -1750,6 +1758,8 @@
 	u64 boottime_ns;
 	u64 parent_tsf;
 	u8 parent_bssid[ETH_ALEN] __aligned(2);
+	u8 chains;
+	s8 chain_signal[IEEE80211_MAX_CHAINS];
 };
 
 /**
@@ -1793,6 +1803,8 @@
  *	that holds the beacon data. @beacon_ies is still valid, of course, and
  *	points to the same data as hidden_beacon_bss->beacon_ies in that case.
  * @signal: signal strength value (type depends on the wiphy's signal_type)
+ * @chains: bitmask for filled values in @chain_signal.
+ * @chain_signal: per-chain signal strength of last received BSS in dBm.
  * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
  */
 struct cfg80211_bss {
@@ -1811,6 +1823,8 @@
 	u16 capability;
 
 	u8 bssid[ETH_ALEN];
+	u8 chains;
+	s8 chain_signal[IEEE80211_MAX_CHAINS];
 
 	u8 priv[0] __aligned(sizeof(void *));
 };
diff --git a/include/net/cnss2.h b/include/net/cnss2.h
new file mode 100644
index 0000000..ca2de60
--- /dev/null
+++ b/include/net/cnss2.h
@@ -0,0 +1,193 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _NET_CNSS2_H
+#define _NET_CNSS2_H
+
+#include <linux/pci.h>
+
+#define CNSS_MAX_FILE_NAME		20
+#define CNSS_MAX_TIMESTAMP_LEN		32
+
+/*
+ * Temporary change for compilation, will be removed
+ * after WLAN host driver switched to use new APIs
+ */
+#define CNSS_API_WITH_DEV
+
+enum cnss_bus_width_type {
+	CNSS_BUS_WIDTH_NONE,
+	CNSS_BUS_WIDTH_LOW,
+	CNSS_BUS_WIDTH_MEDIUM,
+	CNSS_BUS_WIDTH_HIGH
+};
+
+enum cnss_platform_cap_flag {
+	CNSS_HAS_EXTERNAL_SWREG = 0x01,
+	CNSS_HAS_UART_ACCESS = 0x02,
+};
+
+struct cnss_platform_cap {
+	u32 cap_flag;
+};
+
+struct cnss_fw_files {
+	char image_file[CNSS_MAX_FILE_NAME];
+	char board_data[CNSS_MAX_FILE_NAME];
+	char otp_data[CNSS_MAX_FILE_NAME];
+	char utf_file[CNSS_MAX_FILE_NAME];
+	char utf_board_data[CNSS_MAX_FILE_NAME];
+	char epping_file[CNSS_MAX_FILE_NAME];
+	char evicted_data[CNSS_MAX_FILE_NAME];
+};
+
+struct cnss_soc_info {
+	void __iomem *va;
+	phys_addr_t pa;
+	uint32_t chip_id;
+	uint32_t chip_family;
+	uint32_t board_id;
+	uint32_t soc_id;
+	uint32_t fw_version;
+	char fw_build_timestamp[CNSS_MAX_TIMESTAMP_LEN + 1];
+};
+
+struct cnss_wlan_runtime_ops {
+	int (*runtime_suspend)(struct pci_dev *pdev);
+	int (*runtime_resume)(struct pci_dev *pdev);
+};
+
+struct cnss_wlan_driver {
+	char *name;
+	int  (*probe)(struct pci_dev *pdev, const struct pci_device_id *id);
+	void (*remove)(struct pci_dev *pdev);
+	int  (*reinit)(struct pci_dev *pdev, const struct pci_device_id *id);
+	void (*shutdown)(struct pci_dev *pdev);
+	void (*crash_shutdown)(struct pci_dev *pdev);
+	int  (*suspend)(struct pci_dev *pdev, pm_message_t state);
+	int  (*resume)(struct pci_dev *pdev);
+	int  (*suspend_noirq)(struct pci_dev *pdev);
+	int  (*resume_noirq)(struct pci_dev *pdev);
+	void (*modem_status)(struct pci_dev *, int state);
+	void (*update_status)(struct pci_dev *pdev, uint32_t status);
+	struct cnss_wlan_runtime_ops *runtime_ops;
+	const struct pci_device_id *id_table;
+};
+
+enum cnss_driver_status {
+	CNSS_UNINITIALIZED,
+	CNSS_INITIALIZED,
+	CNSS_LOAD_UNLOAD,
+	CNSS_RECOVERY,
+};
+
+struct cnss_ce_tgt_pipe_cfg {
+	u32 pipe_num;
+	u32 pipe_dir;
+	u32 nentries;
+	u32 nbytes_max;
+	u32 flags;
+	u32 reserved;
+};
+
+struct cnss_ce_svc_pipe_cfg {
+	u32 service_id;
+	u32 pipe_dir;
+	u32 pipe_num;
+};
+
+struct cnss_shadow_reg_cfg {
+	u16 ce_id;
+	u16 reg_offset;
+};
+
+struct cnss_shadow_reg_v2_cfg {
+	u32 addr;
+};
+
+struct cnss_wlan_enable_cfg {
+	u32 num_ce_tgt_cfg;
+	struct cnss_ce_tgt_pipe_cfg *ce_tgt_cfg;
+	u32 num_ce_svc_pipe_cfg;
+	struct cnss_ce_svc_pipe_cfg *ce_svc_cfg;
+	u32 num_shadow_reg_cfg;
+	struct cnss_shadow_reg_cfg *shadow_reg_cfg;
+	u32 num_shadow_reg_v2_cfg;
+	struct cnss_shadow_reg_v2_cfg *shadow_reg_v2_cfg;
+};
+
+enum cnss_driver_mode {
+	CNSS_MISSION,
+	CNSS_FTM,
+	CNSS_EPPING,
+	CNSS_WALTEST,
+	CNSS_OFF,
+	CNSS_CCPM,
+	CNSS_QVIT,
+	CNSS_CALIBRATION,
+};
+
+enum cnss_recovery_reason {
+	CNSS_REASON_DEFAULT,
+	CNSS_REASON_LINK_DOWN,
+	CNSS_REASON_RDDM,
+	CNSS_REASON_TIMEOUT,
+};
+
+extern int cnss_wlan_register_driver(struct cnss_wlan_driver *driver);
+extern void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver);
+extern void cnss_device_crashed(struct device *dev);
+extern int cnss_pci_link_down(struct device *dev);
+extern void cnss_schedule_recovery(struct device *dev,
+				   enum cnss_recovery_reason reason);
+extern int cnss_self_recovery(struct device *dev,
+			      enum cnss_recovery_reason reason);
+extern int cnss_force_fw_assert(struct device *dev);
+extern void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size);
+extern int cnss_get_fw_files_for_target(struct device *dev,
+					struct cnss_fw_files *pfw_files,
+					u32 target_type, u32 target_version);
+extern int cnss_get_platform_cap(struct device *dev,
+				 struct cnss_platform_cap *cap);
+extern int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info);
+extern int cnss_request_bus_bandwidth(struct device *dev, int bandwidth);
+extern int cnss_power_up(struct device *dev);
+extern int cnss_power_down(struct device *dev);
+extern void cnss_request_pm_qos(struct device *dev, u32 qos_val);
+extern void cnss_remove_pm_qos(struct device *dev);
+extern void cnss_lock_pm_sem(struct device *dev);
+extern void cnss_release_pm_sem(struct device *dev);
+extern int cnss_wlan_pm_control(struct device *dev, bool vote);
+extern int cnss_auto_suspend(struct device *dev);
+extern int cnss_auto_resume(struct device *dev);
+extern int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
+					int *num_vectors,
+					uint32_t *user_base_data,
+					uint32_t *base_vector);
+extern int cnss_get_msi_irq(struct device *dev, unsigned int vector);
+extern void cnss_get_msi_address(struct device *dev, uint32_t *msi_addr_low,
+				 uint32_t *msi_addr_high);
+extern int cnss_wlan_enable(struct device *dev,
+			    struct cnss_wlan_enable_cfg *config,
+			    enum cnss_driver_mode mode,
+			    const char *host_version);
+extern int cnss_wlan_disable(struct device *dev, enum cnss_driver_mode mode);
+extern unsigned int cnss_get_qmi_timeout(void);
+extern int cnss_athdiag_read(struct device *dev, uint32_t offset,
+			     uint32_t mem_type, uint32_t data_len,
+			     uint8_t *output);
+extern int cnss_athdiag_write(struct device *dev, uint32_t offset,
+			      uint32_t mem_type, uint32_t data_len,
+			      uint8_t *input);
+extern int cnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode);
+
+#endif /* _NET_CNSS2_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 4ef6792..ad065a4 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -33,6 +33,8 @@
 #include <net/flow.h>
 #include <net/flow_dissector.h>
 
+#define IPV4_MIN_MTU		68			/* RFC 791 */
+
 struct sock;
 
 struct inet_skb_parm {
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 2c7d876..8fd61bc 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1007,7 +1007,7 @@
  * @RX_FLAG_DECRYPTED: This frame was decrypted in hardware.
  * @RX_FLAG_MMIC_STRIPPED: the Michael MIC is stripped off this frame,
  *	verification has been done by the hardware.
- * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame.
+ * @RX_FLAG_IV_STRIPPED: The IV and ICV are stripped from this frame.
  *	If this flag is set, the stack cannot do any replay detection
  *	hence the driver or hardware will have to do that.
  * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this
@@ -1078,6 +1078,8 @@
  * @RX_FLAG_ALLOW_SAME_PN: Allow the same PN as same packet before.
  *	This is used for AMSDU subframes which can have the same PN as
  *	the first subframe.
+ * @RX_FLAG_ICV_STRIPPED: The ICV is stripped from this frame. CRC checking must
+ *	be done in the hardware.
  */
 enum mac80211_rx_flags {
 	RX_FLAG_MMIC_ERROR		= BIT(0),
@@ -1113,6 +1115,7 @@
 	RX_FLAG_RADIOTAP_VENDOR_DATA	= BIT(31),
 	RX_FLAG_MIC_STRIPPED		= BIT_ULL(32),
 	RX_FLAG_ALLOW_SAME_PN		= BIT_ULL(33),
+	RX_FLAG_ICV_STRIPPED		= BIT_ULL(34),
 };
 
 #define RX_FLAG_STBC_SHIFT		26
diff --git a/include/net/sock.h b/include/net/sock.h
index 97f8ed2..badd144 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -649,11 +649,7 @@
 
 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 {
-	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
-	    sk->sk_family == AF_INET6)
-		hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
-	else
-		hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
 }
 
 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 775c3bd..448aec0 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1017,7 +1017,7 @@
 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
 			    struct rate_sample *rs);
 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
-		  struct skb_mstamp *now, struct rate_sample *rs);
+		  bool is_sack_reneg, struct skb_mstamp *now, struct rate_sample *rs);
 void tcp_rate_check_app_limited(struct sock *sk);
 
 /* These functions determine how the current flow behaves in respect of SACK
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 1beab55..818a38f 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -243,10 +243,11 @@
 static inline enum ib_mtu iboe_get_mtu(int mtu)
 {
 	/*
-	 * reduce IB headers from effective IBoE MTU. 28 stands for
-	 * atomic header which is the biggest possible header after BTH
+	 * Reduce IB headers from effective IBoE MTU.
 	 */
-	mtu = mtu - IB_GRH_BYTES - IB_BTH_BYTES - 28;
+	mtu = mtu - (IB_GRH_BYTES + IB_UDP_BYTES + IB_BTH_BYTES +
+		     IB_EXT_XRC_BYTES + IB_EXT_ATOMICETH_BYTES +
+		     IB_ICRC_BYTES);
 
 	if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096))
 		return IB_MTU_4096;
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index b13419c..e02b78a 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -37,14 +37,17 @@
 #include <uapi/linux/if_ether.h>
 
 enum {
-	IB_LRH_BYTES  = 8,
-	IB_ETH_BYTES  = 14,
-	IB_VLAN_BYTES = 4,
-	IB_GRH_BYTES  = 40,
-	IB_IP4_BYTES  = 20,
-	IB_UDP_BYTES  = 8,
-	IB_BTH_BYTES  = 12,
-	IB_DETH_BYTES = 8
+	IB_LRH_BYTES		= 8,
+	IB_ETH_BYTES		= 14,
+	IB_VLAN_BYTES		= 4,
+	IB_GRH_BYTES		= 40,
+	IB_IP4_BYTES		= 20,
+	IB_UDP_BYTES		= 8,
+	IB_BTH_BYTES		= 12,
+	IB_DETH_BYTES		= 8,
+	IB_EXT_ATOMICETH_BYTES	= 28,
+	IB_EXT_XRC_BYTES	= 4,
+	IB_ICRC_BYTES		= 4
 };
 
 struct ib_field {
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index dae99d7..706a701 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -165,11 +165,11 @@
 
 struct sata_device {
 	unsigned int class;
-	struct smp_resp        rps_resp; /* report_phy_sata_resp */
 	u8     port_no;        /* port number, if this is a PM (Port) */
 
 	struct ata_port *ap;
 	struct ata_host ata_host;
+	struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
 	u8     fis[ATA_RESP_FIS_SIZE];
 };
 
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index ad38816..31b4cce 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,7 +23,6 @@
 #endif
 
 enum icnss_uevent {
-	ICNSS_UEVENT_FW_READY,
 	ICNSS_UEVENT_FW_CRASHED,
 	ICNSS_UEVENT_FW_DOWN,
 };
diff --git a/include/soc/qcom/memory_dump.h b/include/soc/qcom/memory_dump.h
index b4733d7..5bc50b5 100644
--- a/include/soc/qcom/memory_dump.h
+++ b/include/soc/qcom/memory_dump.h
@@ -86,6 +86,7 @@
 	MSM_DUMP_DATA_FCM = 0xEE,
 	MSM_DUMP_DATA_POWER_REGS = 0xED,
 	MSM_DUMP_DATA_TMC_ETF = 0xF0,
+	MSM_DUMP_DATA_TPDM_SWAO_MCMB = 0xF2,
 	MSM_DUMP_DATA_TMC_REG = 0x100,
 	MSM_DUMP_DATA_LOG_BUF = 0x110,
 	MSM_DUMP_DATA_LOG_BUF_FIRST_IDX = 0x111,
diff --git a/include/soc/qcom/msm_tz_smmu.h b/include/soc/qcom/msm_tz_smmu.h
index a83c9bd..43a3069 100644
--- a/include/soc/qcom/msm_tz_smmu.h
+++ b/include/soc/qcom/msm_tz_smmu.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -56,6 +56,20 @@
 int msm_tz_set_cb_format(enum tz_smmu_device_id sec_id, int cbndx);
 int msm_iommu_sec_pgtbl_init(void);
 int register_iommu_sec_ptbl(void);
+bool arm_smmu_skip_write(void __iomem *addr);
+
+/* Donot write to smmu global space with CONFIG_MSM_TZ_SMMU */
+#undef writel_relaxed
+#undef writeq_relaxed
+#define writel_relaxed(v, c)	do {					\
+	if (!arm_smmu_skip_write(c))					\
+		((void)__raw_writel((__force u32)cpu_to_le32(v), (c)));	\
+	} while (0)
+
+#define writeq_relaxed(v, c) do {					\
+	if (!arm_smmu_skip_write(c))					\
+		((void)__raw_writeq((__force u64)cpu_to_le64(v), (c)));	\
+	} while (0)
 #else
 
 static inline int msm_tz_smmu_atos_start(struct device *dev, int cb_num)
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index 9e91e4b..505e82b 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -110,6 +110,8 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8953")
 #define early_machine_is_sdm450()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm450")
+#define early_machine_is_sdm632()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm632")
 #else
 #define of_board_is_sim()		0
 #define of_board_is_rumi()		0
@@ -154,6 +156,7 @@
 #define early_machine_is_sda670()	0
 #define early_machine_is_msm8953()	0
 #define early_machine_is_sdm450()	0
+#define early_machine_is_sdm632()	0
 #endif
 
 #define PLATFORM_SUBTYPE_MDM	1
@@ -220,6 +223,8 @@
 	MSM_CPU_SDA670,
 	MSM_CPU_8953,
 	MSM_CPU_SDM450,
+	MSM_CPU_SDM632,
+	MSM_CPU_SDA632,
 };
 
 struct msm_soc_info {
diff --git a/include/soc/qcom/system_pm.h b/include/soc/qcom/system_pm.h
index 6d0993a..028c729 100644
--- a/include/soc/qcom/system_pm.h
+++ b/include/soc/qcom/system_pm.h
@@ -14,13 +14,15 @@
 #define __SOC_QCOM_SYS_PM_H__
 
 #ifdef CONFIG_QTI_SYSTEM_PM
-int system_sleep_enter(uint64_t sleep_val);
+int system_sleep_enter(void);
 
 void system_sleep_exit(void);
 
 bool system_sleep_allowed(void);
+
+int system_sleep_update_wakeup(void);
 #else
-static inline int system_sleep_enter(uint64_t sleep_val)
+static inline int system_sleep_enter(void)
 { return -ENODEV; }
 
 static inline void system_sleep_exit(void)
@@ -29,6 +31,9 @@
 static inline bool system_sleep_allowed(void)
 { return false; }
 
+static inline int system_sleep_update_wakeup(void)
+{ return -ENODEV; }
+
 #endif /* CONFIG_QTI_SYSTEM_PM */
 
 #endif /* __SOC_QCOM_SYS_PM_H__ */
diff --git a/include/sound/wcd-dsp-mgr.h b/include/sound/wcd-dsp-mgr.h
index 7ba1817..52b52c4 100644
--- a/include/sound/wcd-dsp-mgr.h
+++ b/include/sound/wcd-dsp-mgr.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -80,6 +80,7 @@
 
 	/* Software generated signal indicating debug dumps to be collected */
 	WDSP_DEBUG_DUMP,
+	WDSP_DEBUG_DUMP_INTERNAL,
 };
 
 /*
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index a87e894..30f99ce 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -297,7 +297,7 @@
 	struct list_head tg_pt_gp_lun_list;
 	struct se_lun *tg_pt_gp_alua_lun;
 	struct se_node_acl *tg_pt_gp_alua_nacl;
-	struct delayed_work tg_pt_gp_transition_work;
+	struct work_struct tg_pt_gp_transition_work;
 	struct completion *tg_pt_gp_transition_complete;
 };
 
@@ -493,6 +493,7 @@
 #define CMD_T_BUSY		(1 << 9)
 #define CMD_T_TAS		(1 << 10)
 #define CMD_T_FABRIC_STOP	(1 << 11)
+#define CMD_T_PRE_EXECUTE	(1 << 12)
 	spinlock_t		t_state_lock;
 	struct kref		cmd_kref;
 	struct completion	t_transport_stop_comp;
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 8ade3eb..90fce4d 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -208,7 +208,7 @@
 	{ KVM_TRACE_MMIO_WRITE, "write" }
 
 TRACE_EVENT(kvm_mmio,
-	TP_PROTO(int type, int len, u64 gpa, u64 val),
+	TP_PROTO(int type, int len, u64 gpa, void *val),
 	TP_ARGS(type, len, gpa, val),
 
 	TP_STRUCT__entry(
@@ -222,7 +222,10 @@
 		__entry->type		= type;
 		__entry->len		= len;
 		__entry->gpa		= gpa;
-		__entry->val		= val;
+		__entry->val		= 0;
+		if (val)
+			memcpy(&__entry->val, val,
+			       min_t(u32, sizeof(__entry->val), len));
 	),
 
 	TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 8dc7ad5..0125cde 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -273,6 +273,7 @@
 		__field(u64,		exec_time	)
 		__field(u32,		freq		)
 		__field(u32,		legacy_freq	)
+		__field(u32,		max_freq	)
 		__field(pid_t,		pid		)
 		__array(char,	comm,   TASK_COMM_LEN	)
 	),
@@ -284,13 +285,15 @@
 		__entry->exec_time	= exec_time;
 		__entry->freq		= cpu_cycles_to_freq(cycles, exec_time);
 		__entry->legacy_freq	= cpu_cur_freq(cpu);
+		__entry->max_freq	= cpu_max_freq(cpu);
 		__entry->pid            = p->pid;
 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
 	),
 
-	TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u task=%d (%s)",
+	TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u max_freq=%u task=%d (%s)",
 		  __entry->cpu, __entry->event, __entry->cycles,
-		  __entry->exec_time, __entry->freq, __entry->legacy_freq, __entry->pid, __entry->comm)
+		  __entry->exec_time, __entry->freq, __entry->legacy_freq,
+		  __entry->max_freq, __entry->pid, __entry->comm)
 );
 
 TRACE_EVENT(sched_update_task_ravg,
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 8a707f8..8a13e39 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -455,20 +455,22 @@
 	TP_ARGS(rqst, status),
 
 	TP_STRUCT__entry(
-		__field(struct sockaddr *, addr)
 		__field(__be32, xid)
 		__field(int, status)
 		__field(unsigned long, flags)
+		__dynamic_array(unsigned char, addr, rqst->rq_addrlen)
 	),
 
 	TP_fast_assign(
-		__entry->addr = (struct sockaddr *)&rqst->rq_addr;
 		__entry->xid = status > 0 ? rqst->rq_xid : 0;
 		__entry->status = status;
 		__entry->flags = rqst->rq_flags;
+		memcpy(__get_dynamic_array(addr),
+			&rqst->rq_addr, rqst->rq_addrlen);
 	),
 
-	TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr,
+	TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s",
+			(struct sockaddr *)__get_dynamic_array(addr),
 			be32_to_cpu(__entry->xid), __entry->status,
 			show_rqstp_flags(__entry->flags))
 );
@@ -513,22 +515,23 @@
 	TP_ARGS(rqst, status),
 
 	TP_STRUCT__entry(
-		__field(struct sockaddr *, addr)
 		__field(__be32, xid)
-		__field(int, dropme)
 		__field(int, status)
 		__field(unsigned long, flags)
+		__dynamic_array(unsigned char, addr, rqst->rq_addrlen)
 	),
 
 	TP_fast_assign(
-		__entry->addr = (struct sockaddr *)&rqst->rq_addr;
 		__entry->xid = rqst->rq_xid;
 		__entry->status = status;
 		__entry->flags = rqst->rq_flags;
+		memcpy(__get_dynamic_array(addr),
+			&rqst->rq_addr, rqst->rq_addrlen);
 	),
 
 	TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s",
-		__entry->addr, be32_to_cpu(__entry->xid),
+		(struct sockaddr *)__get_dynamic_array(addr),
+		be32_to_cpu(__entry->xid),
 		__entry->status, show_rqstp_flags(__entry->flags))
 );
 
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
index 22b6ad3..8562b1c 100644
--- a/include/uapi/linux/bcache.h
+++ b/include/uapi/linux/bcache.h
@@ -90,7 +90,7 @@
 
 #define PTR_CHECK_DEV			((1 << PTR_DEV_BITS) - 1)
 
-#define PTR(gen, offset, dev)						\
+#define MAKE_PTR(gen, offset, dev)					\
 	((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
 
 /* Bkey utility code */
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index 81c464a..6016e9e 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -26,8 +26,21 @@
 #define EPOLL_CTL_DEL 2
 #define EPOLL_CTL_MOD 3
 
+/* Epoll event masks */
+#define EPOLLIN		0x00000001
+#define EPOLLPRI	0x00000002
+#define EPOLLOUT	0x00000004
+#define EPOLLERR	0x00000008
+#define EPOLLHUP	0x00000010
+#define EPOLLRDNORM	0x00000040
+#define EPOLLRDBAND	0x00000080
+#define EPOLLWRNORM	0x00000100
+#define EPOLLWRBAND	0x00000200
+#define EPOLLMSG	0x00000400
+#define EPOLLRDHUP	0x00002000
+
 /* Set exclusive wakeup mode for the target file descriptor */
-#define EPOLLEXCLUSIVE (1 << 28)
+#define EPOLLEXCLUSIVE (1U << 28)
 
 /*
  * Request the handling of system wakeup events so as to prevent system suspends
@@ -39,13 +52,13 @@
  *
  * Requires CAP_BLOCK_SUSPEND
  */
-#define EPOLLWAKEUP (1 << 29)
+#define EPOLLWAKEUP (1U << 29)
 
 /* Set the One Shot behaviour for the target file descriptor */
-#define EPOLLONESHOT (1 << 30)
+#define EPOLLONESHOT (1U << 30)
 
 /* Set the Edge Triggered behaviour for the target file descriptor */
-#define EPOLLET (1 << 31)
+#define EPOLLET (1U << 31)
 
 /* 
  * On x86-64 make the 64bit structure have the same alignment as the
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index f291569..739a4f3 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -167,6 +167,7 @@
 	IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
 	IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
 	IPV4_DEVCONF_DROP_GRATUITOUS_ARP,
+	IPV4_DEVCONF_NF_IPV4_DEFRAG_SKIP,
 	__IPV4_DEVCONF_MAX
 };
 
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index ef07f78..de3f890 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -165,6 +165,8 @@
 #define IPA_FLT_MAC_DST_ADDR_L2TP	(1ul << 22)
 #define IPA_FLT_TCP_SYN			(1ul << 23)
 #define IPA_FLT_TCP_SYN_L2TP		(1ul << 24)
+#define IPA_FLT_L2TP_INNER_IP_TYPE  (1ul << 25)
+#define IPA_FLT_L2TP_INNER_IPV4_DST_ADDR (1ul << 26)
 
 /**
  * maximal number of NAT PDNs in the PDN config table
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 9fbdc11..3092188 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -3782,6 +3782,9 @@
  *	@NL80211_BSS_PARENT_BSSID. (u64).
  * @NL80211_BSS_PARENT_BSSID: the BSS according to which @NL80211_BSS_PARENT_TSF
  *	is set.
+ * @NL80211_BSS_CHAIN_SIGNAL: per-chain signal strength of last BSS update.
+ *	Contains a nested array of signal strength attributes (u8, dBm),
+ *	using the nesting index as the antenna number.
  * @__NL80211_BSS_AFTER_LAST: internal
  * @NL80211_BSS_MAX: highest BSS attribute
  */
@@ -3805,6 +3808,7 @@
 	NL80211_BSS_PAD,
 	NL80211_BSS_PARENT_TSF,
 	NL80211_BSS_PARENT_BSSID,
+	NL80211_BSS_CHAIN_SIGNAL,
 
 	/* keep last */
 	__NL80211_BSS_AFTER_LAST,
@@ -4835,6 +4839,27 @@
  *	RSSI threshold values to monitor rather than exactly one threshold.
  * @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD: Driver SME supports FILS shared key
  *	authentication with %NL80211_CMD_CONNECT.
+ * @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK: Device wants to do 4-way
+ *	handshake with PSK in station mode (PSK is passed as part of the connect
+ *	and associate commands), doing it in the host might not be supported.
+ * @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X: Device wants to do doing 4-way
+ *	handshake with 802.1X in station mode (will pass EAP frames to the host
+ *	and accept the set_pmk/del_pmk commands), doing it in the host might not
+ *	be supported.
+ * @NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME: Driver is capable of overriding
+ *	the max channel attribute in the FILS request params IE with the
+ *	actual dwell time.
+ * @NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP: Driver accepts broadcast probe
+ *	response
+ * @NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE: Driver supports sending
+ *	the first probe request in each channel at rate of at least 5.5Mbps.
+ * @NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION: Driver supports
+ *	probe request tx deferral and suppression
+ * @NL80211_EXT_FEATURE_MFP_OPTIONAL: Driver supports the %NL80211_MFP_OPTIONAL
+ *	value in %NL80211_ATTR_USE_MFP.
+ * @NL80211_EXT_FEATURE_LOW_SPAN_SCAN: Driver supports low span scan.
+ * @NL80211_EXT_FEATURE_LOW_POWER_SCAN: Driver supports low power scan.
+ * @NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN: Driver supports high accuracy scan.
  *
  * @NUM_NL80211_EXT_FEATURES: number of extended features.
  * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4855,6 +4880,16 @@
 	NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI,
 	NL80211_EXT_FEATURE_CQM_RSSI_LIST,
 	NL80211_EXT_FEATURE_FILS_SK_OFFLOAD,
+	NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK,
+	NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X,
+	NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME,
+	NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP,
+	NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE,
+	NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION,
+	NL80211_EXT_FEATURE_MFP_OPTIONAL,
+	NL80211_EXT_FEATURE_LOW_SPAN_SCAN,
+	NL80211_EXT_FEATURE_LOW_POWER_SCAN,
+	NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN,
 
 	/* add new features before the definition below */
 	NUM_NL80211_EXT_FEATURES,
@@ -4915,6 +4950,10 @@
  * of NL80211_CMD_TRIGGER_SCAN and NL80211_CMD_START_SCHED_SCAN
  * requests.
  *
+ * NL80211_SCAN_FLAG_LOW_SPAN, NL80211_SCAN_FLAG_LOW_POWER, and
+ * NL80211_SCAN_FLAG_HIGH_ACCURACY flags are exclusive of each other, i.e., only
+ * one of them can be used in the request.
+ *
  * @NL80211_SCAN_FLAG_LOW_PRIORITY: scan request has low priority
  * @NL80211_SCAN_FLAG_FLUSH: flush cache before scanning
  * @NL80211_SCAN_FLAG_AP: force a scan even if the interface is configured
@@ -4931,12 +4970,29 @@
  *	locally administered 1, multicast 0) is assumed.
  *	This flag must not be requested when the feature isn't supported, check
  *	the nl80211 feature flags for the device.
+ *	SSID and/or RSSI.
+ * @NL80211_SCAN_FLAG_LOW_SPAN: Span corresponds to the total time taken to
+ *	accomplish the scan. Thus, this flag intends the driver to perform the
+ *	scan request with lesser span/duration. It is specific to the driver
+ *	implementations on how this is accomplished. Scan accuracy may get
+ *	impacted with this flag.
+ * @NL80211_SCAN_FLAG_LOW_POWER: This flag intends the scan attempts to consume
+ *	optimal possible power. Drivers can resort to their specific means to
+ *	optimize the power. Scan accuracy may get impacted with this flag.
+ * @NL80211_SCAN_FLAG_HIGH_ACCURACY: Accuracy here intends to the extent of scan
+ *	results obtained. Thus HIGH_ACCURACY scan flag aims to get maximum
+ *	possible scan results. This flag hints the driver to use the best
+ *	possible scan configuration to improve the accuracy in scanning.
+ *	Latency and power use may get impacted with this flag.
  */
 enum nl80211_scan_flags {
 	NL80211_SCAN_FLAG_LOW_PRIORITY			= 1<<0,
 	NL80211_SCAN_FLAG_FLUSH				= 1<<1,
 	NL80211_SCAN_FLAG_AP				= 1<<2,
 	NL80211_SCAN_FLAG_RANDOM_ADDR			= 1<<3,
+	NL80211_SCAN_FLAG_LOW_SPAN			= 1<<8,
+	NL80211_SCAN_FLAG_LOW_POWER			= 1<<9,
+	NL80211_SCAN_FLAG_HIGH_ACCURACY			= 1<<10,
 };
 
 /**
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 08aa800..d67f476 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -482,6 +482,7 @@
 	NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
 	NET_IPV4_CONF_ARP_ACCEPT=21,
 	NET_IPV4_CONF_ARP_NOTIFY=22,
+	NET_IPV4_CONF_NF_IPV4_DEFRAG_SKIP = 23,
 };
 
 /* /proc/sys/net/ipv4/netfilter */
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 0d69769..0303a6f 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -423,6 +423,11 @@
 #define USB_ENDPOINT_XFER_INT		3
 #define USB_ENDPOINT_MAX_ADJUSTABLE	0x80
 
+#define USB_EP_MAXP_MULT_SHIFT	11
+#define USB_EP_MAXP_MULT_MASK	(3 << USB_EP_MAXP_MULT_SHIFT)
+#define USB_EP_MAXP_MULT(m) \
+	(((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT)
+
 /* The USB 3.0 spec redefines bits 5:4 of bmAttributes as interrupt ep type. */
 #define USB_ENDPOINT_INTRTYPE		0x30
 #define USB_ENDPOINT_INTR_PERIODIC	(0 << 4)
@@ -630,6 +635,20 @@
 	return __le16_to_cpu(epd->wMaxPacketSize);
 }
 
+/**
+ * usb_endpoint_maxp_mult - get endpoint's transactional opportunities
+ * @epd: endpoint to be checked
+ *
+ * Return @epd's wMaxPacketSize[12:11] + 1
+ */
+static inline int
+usb_endpoint_maxp_mult(const struct usb_endpoint_descriptor *epd)
+{
+	int maxp = __le16_to_cpu(epd->wMaxPacketSize);
+
+	return USB_EP_MAXP_MULT(maxp) + 1;
+}
+
 static inline int usb_endpoint_interrupt_type(
 		const struct usb_endpoint_descriptor *epd)
 {
@@ -854,6 +873,8 @@
 	__u8  bReserved;
 } __attribute__((packed));
 
+#define USB_DT_USB_WIRELESS_CAP_SIZE	11
+
 /* USB 2.0 Extension descriptor */
 #define	USB_CAP_TYPE_EXT		2
 
@@ -1046,6 +1067,7 @@
 	__u8  bDevCapabilityType;
 } __attribute__((packed));
 
+#define USB_DT_USB_PTM_ID_SIZE		3
 /*
  * The size of the descriptor for the Sublink Speed Attribute Count
  * (SSAC) specified in bmAttributes[4:0].
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 229dd25..71772c3 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -1016,6 +1016,7 @@
 #define V4L2_QCOM_BUF_INPUT_UNSUPPORTED		0x01000000
 #define V4L2_QCOM_BUF_FLAG_EOS			0x02000000
 #define V4L2_QCOM_BUF_FLAG_READONLY		0x04000000
+#define V4L2_QCOM_BUF_FLAG_PERF_MODE		0x20000000
 #define V4L2_MSM_BUF_FLAG_DEFER			0x40000000
 #define V4L2_QCOM_BUF_FLAG_IDRFRAME		0x80000000
 
diff --git a/include/uapi/media/cam_sensor.h b/include/uapi/media/cam_sensor.h
index 87f25b0..f5af604 100644
--- a/include/uapi/media/cam_sensor.h
+++ b/include/uapi/media/cam_sensor.h
@@ -119,20 +119,20 @@
  *
  * @slave_addr            :    OIS i2c slave address
  * @i2c_freq_mode         :    i2c frequency mode
+ * @cmd_type              :    Explains type of command
  * @ois_fw_flag           :    indicates if fw is present or not
  * @is_ois_calib          :    indicates the calibration data is available
  * @ois_name              :    OIS name
  * @opcode                :    opcode
- * @cmd_type              :    Explains type of command
  */
 struct cam_cmd_ois_info {
 	uint16_t              slave_addr;
 	uint8_t               i2c_freq_mode;
+	uint8_t               cmd_type;
 	uint8_t               ois_fw_flag;
 	uint8_t               is_ois_calib;
 	char                  ois_name[MAX_OIS_NAME_SIZE];
 	struct cam_ois_opcode opcode;
-	uint8_t               cmd_type;
 } __attribute__((packed));
 
 /**
diff --git a/include/video/msm_dba.h b/include/video/msm_dba.h
new file mode 100644
index 0000000..f251048
--- /dev/null
+++ b/include/video/msm_dba.h
@@ -0,0 +1,608 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_DBA_H
+#define _MSM_DBA_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#define MSM_DBA_CHIP_NAME_MAX_LEN 20
+#define MSM_DBA_CLIENT_NAME_LEN   20
+
+#define MSM_DBA_DEFER_PROPERTY_FLAG 0x1
+#define MSM_DBA_ASYNC_FLAG          0x2
+
+/**
+ * enum msm_dba_callback_event - event types for callback notification
+ * @MSM_DBA_CB_REMOTE_INT: Event associated with remote devices on an interface
+ *			   that supports a bi-directional control channel.
+ * @MSM_DBA_CB_HDCP_LINK_AUTHENTICATED: Authentication session is successful.
+ *					The link is authenticated and encryption
+ *					can be enabled if not enabled already.
+ * @MSM_DBA_CB_HDCP_LINK_UNAUTHENTICATED: A previously authenticated link has
+ *					  failed. The content on the interface
+ *					  is no longer secure.
+ * @MSM_DBA_CB_HPD_CONNECT: Detected a cable connect event.
+ * @MSM_DBA_CB_HPD_DISCONNECT: Detected a cable disconnect event.
+ * @MSM_DBA_CB_VIDEO_FAILURE: Detected a failure with respect to video data on
+ *			      the interface. This is a generic failure and
+ *			      client should request a debug dump to debug the
+ *			      issue. Client can also attempt a reset to recover
+ *			      the device.
+ * @MSM_DBA_CB_AUDIO_FAILURE: Detected a failure with respect to audio data on
+ *			      the interface. This is a generic failure and
+ *			      client should request a debug dump. Client can
+ *			      also attempt a reset to recover the device.
+ * @MSM_DBA_CB_CEC_WRITE_SUCCESS: The asynchronous CEC write request is
+ *				  successful.
+ * @MSM_DBA_CB_CEC_WRITE_FAIL: The asynchronous CEC write request failed.
+ * @MSM_DBA_CB_CEC_READ_PENDING: There is a pending CEC read message.
+ * @MSM_DBA_CB_PRE_RESET: This callback is called just before the device is
+ *			  being reset.
+ * @MSM_DBA_CB_POST_RESET: This callback is called after device reset is
+ *			   complete and the driver has applied back all the
+ *			   properties.
+ *
+ * Clients for this driver can register for receiving callbacks for specific
+ * events. This enum defines the type of events supported by the driver. An
+ * event mask is typically used to denote multiple events.
+ */
+enum msm_dba_callback_event {
+	MSM_DBA_CB_REMOTE_INT = BIT(0),
+	MSM_DBA_CB_HDCP_LINK_AUTHENTICATED = BIT(1),
+	MSM_DBA_CB_HDCP_LINK_UNAUTHENTICATED = BIT(2),
+	MSM_DBA_CB_HPD_CONNECT = BIT(3),
+	MSM_DBA_CB_HPD_DISCONNECT = BIT(4),
+	MSM_DBA_CB_VIDEO_FAILURE = BIT(5),
+	MSM_DBA_CB_AUDIO_FAILURE = BIT(6),
+	MSM_DBA_CB_CEC_WRITE_SUCCESS = BIT(7),
+	MSM_DBA_CB_CEC_WRITE_FAIL = BIT(8),
+	MSM_DBA_CB_CEC_READ_PENDING = BIT(9),
+	MSM_DBA_CB_PRE_RESET = BIT(10),
+	MSM_DBA_CB_POST_RESET = BIT(11),
+};
+
+/**
+ * enum msm_dba_audio_interface_type - audio interface type
+ * @MSM_DBA_AUDIO_I2S_INTERFACE: I2S interface for audio
+ * @MSM_DBA_AUDIO_SPDIF_INTERFACE: SPDIF interface for audio
+ */
+enum msm_dba_audio_interface_type {
+	MSM_DBA_AUDIO_I2S_INTERFACE = BIT(0),
+	MSM_DBA_AUDIO_SPDIF_INTERFACE = BIT(1),
+};
+
+/**
+ * enum msm_dba_audio_format_type - audio format type
+ * @MSM_DBA_AUDIO_FMT_UNCOMPRESSED_LPCM: uncompressed format
+ * @MSM_DBA_AUDIO_FMT_COMPRESSED: compressed formats
+ */
+enum msm_dba_audio_format_type {
+	MSM_DBA_AUDIO_FMT_UNCOMPRESSED_LPCM = BIT(0),
+	MSM_DBA_AUDIO_FMT_COMPRESSED = BIT(1),
+};
+
+/**
+ * enum msm_dba_audio_copyright_type - audio copyright
+ * @MSM_DBA_AUDIO_COPYRIGHT_PROTECTED: copy right protected
+ * @MSM_DBA_AUDIO_COPYRIGHT_NOT_PROTECTED: not copy right protected
+ */
+enum msm_dba_audio_copyright_type {
+	MSM_DBA_AUDIO_COPYRIGHT_PROTECTED = BIT(0),
+	MSM_DBA_AUDIO_COPYRIGHT_NOT_PROTECTED = BIT(1),
+};
+
+/**
+ * enum msm_dba_audio_pre_emphasis_type - pre-emphasis
+ * @MSM_DBA_AUDIO_NO_PRE_EMPHASIS: 2 audio channels w/o pre-emphasis
+ * @MSM_DBA_AUDIO_PRE_EMPHASIS_50_15us: 2 audio channels with 50/15uS
+ */
+enum msm_dba_audio_pre_emphasis_type {
+	MSM_DBA_AUDIO_NO_PRE_EMPHASIS = BIT(0),
+	MSM_DBA_AUDIO_PRE_EMPHASIS_50_15us = BIT(1),
+};
+
+/**
+ * enum msm_dba_audio_clock_accuracy - Audio Clock Accuracy
+ * @MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL1: normal accuracy +/-1000 x 10^-6
+ * @MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL2: high accuracy +/- 50 x 10^-6
+ * @MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL3: variable pitch shifted clock
+ */
+enum msm_dba_audio_clock_accuracy {
+	MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL1 = BIT(1),
+	MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL2 = BIT(0),
+	MSM_DBA_AUDIO_CLOCK_ACCURACY_LVL3 = BIT(2),
+};
+
+/**
+ * enum msm_dba_channel_status_source - CS override
+ * @MSM_DBA_AUDIO_CS_SOURCE_I2S_STREAM: use channel status bits from I2S stream
+ * @MSM_DBA_AUDIO_CS_SOURCE_REGISTERS: use channel status bits from registers
+ */
+enum msm_dba_channel_status_source {
+	MSM_DBA_AUDIO_CS_SOURCE_I2S_STREAM,
+	MSM_DBA_AUDIO_CS_SOURCE_REGISTERS
+};
+
+/**
+ * enum msm_dba_audio_sampling_rates_type - audio sampling rates
+ * @MSM_DBA_AUDIO_32KHZ: 32KHz sampling rate
+ * @MSM_DBA_AUDIO_44P1KHZ: 44.1KHz sampling rate
+ * @MSM_DBA_AUDIO_48KHZ: 48KHz sampling rate
+ * @MSM_DBA_AUDIO_96KHZ: 96KHz sampling rate
+ * @MSM_DBA_AUDIO_192KHZ: 192KHz sampling rate
+ */
+enum msm_dba_audio_sampling_rates_type {
+	MSM_DBA_AUDIO_32KHZ = BIT(0),
+	MSM_DBA_AUDIO_44P1KHZ = BIT(1),
+	MSM_DBA_AUDIO_48KHZ = BIT(2),
+	MSM_DBA_AUDIO_88P2KHZ = BIT(1),
+	MSM_DBA_AUDIO_96KHZ = BIT(3),
+	MSM_DBA_AUDIO_176P4KHZ = BIT(1),
+	MSM_DBA_AUDIO_192KHZ = BIT(4),
+};
+
+/**
+ * enum msm_dba_audio_word_bit_depth - audio word size
+ * @MSM_DBA_AUDIO_WORD_16BIT: 16 bits per word
+ * @MSM_DBA_AUDIO_WORD_24BIT: 24 bits per word
+ * @MSM_DBA_AUDIO_WORD_32BIT: 32 bits per word
+ */
+enum msm_dba_audio_word_bit_depth {
+	MSM_DBA_AUDIO_WORD_16BIT = BIT(1),
+	MSM_DBA_AUDIO_WORD_24BIT = BIT(2),
+	MSM_DBA_AUDIO_WORD_32BIT = BIT(3),
+};
+
+/**
+ * enum msm_dba_audio_channel_count - audio channel count
+ * @MSM_DBA_AUDIO_CHANNEL_2: 2 channel audio
+ * @MSM_DBA_AUDIO_CHANNEL_4: 4 channel audio
+ * @MSM_DBA_AUDIO_CHANNEL_8: 8 channel audio
+ */
+enum msm_dba_audio_channel_count {
+	MSM_DBA_AUDIO_CHANNEL_2 = BIT(0),
+	MSM_DBA_AUDIO_CHANNEL_4 = BIT(1),
+	MSM_DBA_AUDIO_CHANNEL_8 = BIT(2),
+};
+
+/**
+ * enum msm_dba_audio_i2s_format - i2s audio data format
+ * @MSM_DBA_AUDIO_I2S_FMT_STANDARD: Standard format
+ * @MSM_DBA_AUDIO_I2S_FMT_RIGHT_JUSTIFIED: i2s data is right justified
+ * @MSM_DBA_AUDIO_I2S_FMT_LEFT_JUSTIFIED: i2s data is left justified
+ * @MSM_DBA_AUDIO_I2S_FMT_AES3_DIRECT: AES signal format
+ */
+enum msm_dba_audio_i2s_format {
+	MSM_DBA_AUDIO_I2S_FMT_STANDARD = 0,
+	MSM_DBA_AUDIO_I2S_FMT_RIGHT_JUSTIFIED,
+	MSM_DBA_AUDIO_I2S_FMT_LEFT_JUSTIFIED,
+	MSM_DBA_AUDIO_I2S_FMT_AES3_DIRECT,
+	MSM_DBA_AUDIO_I2S_FMT_MAX,
+};
+
+enum msm_dba_video_aspect_ratio {
+	MSM_DBA_AR_UNKNOWN = 0,
+	MSM_DBA_AR_4_3,
+	MSM_DBA_AR_5_4,
+	MSM_DBA_AR_16_9,
+	MSM_DBA_AR_16_10,
+	MSM_DBA_AR_64_27,
+	MSM_DBA_AR_256_135,
+	MSM_DBA_AR_MAX
+};
+
+enum msm_dba_audio_word_endian_type {
+	MSM_DBA_AUDIO_WORD_LITTLE_ENDIAN = 0,
+	MSM_DBA_AUDIO_WORD_BIG_ENDIAN,
+	MSM_DBA_AUDIO_WORD_ENDIAN_MAX
+};
+
+/**
+ * msm_dba_audio_op_mode - i2s audio operation mode
+ * @MSM_DBA_AUDIO_MODE_MANUAL: Manual mode
+ * @MSM_DBA_AUDIO_MODE_AUTOMATIC: Automatic mode
+ */
+enum msm_dba_audio_op_mode {
+	MSM_DBA_AUDIO_MODE_MANUAL,
+	MSM_DBA_AUDIO_MODE_AUTOMATIC,
+};
+
+/**
+ * typedef *msm_dba_cb() - Prototype for callback function
+ * @data: Pointer to user data provided with register API
+ * @event: Event type associated with callback. This can be a bitmask.
+ */
+typedef void (*msm_dba_cb)(void *data, enum msm_dba_callback_event event);
+
+/**
+ * struct msm_dba_reg_info - Client information used with register API
+ * @client_name: Name of the client for debug purposes
+ * @chip_name: Bridge chip ID
+ * @instance_id: Instance ID of the bridge chip in case of multiple instances
+ * @cb: callback function called in case of events.
+ * @cb_data: pointer to a data structure that will be returned with callback
+ *
+ * msm_dba_reg_info structure will be used to provide information during
+ * registering with driver. This structure will contain the information required
+ * to identify the specific bridge chip the client wants to use.
+ *
+ * Client should also specify the callback function which needs to be called in
+ * case of events. There is an optional data field which is a pointer that will
+ * be returned as one of arguments in the callback function. This data field can
+ * be NULL if client does not wish to use it.
+ */
+struct msm_dba_reg_info {
+	char client_name[MSM_DBA_CLIENT_NAME_LEN];
+	char chip_name[MSM_DBA_CHIP_NAME_MAX_LEN];
+	u32 instance_id;
+	msm_dba_cb cb;
+	void *cb_data;
+};
+
+/**
+ * struct msm_dba_video_caps_info - video capabilities of the bridge chip
+ * @hdcp_support: if hdcp is supported
+ * @edid_support: if reading edid from sink is supported
+ * @data_lanes_lp_support: if low power mode is supported on data lanes
+ * @clock_lanes_lp_support: If low power mode is supported on clock lanes
+ * @max_pclk_khz: maximum pixel clock supported
+ * @num_of_input_lanes: Number of input data lanes supported by the bridge chip
+ */
+struct msm_dba_video_caps_info {
+	bool hdcp_support;
+	bool edid_support;
+	bool data_lanes_lp_support;
+	bool clock_lanes_lp_support;
+	u32 max_pclk_khz;
+	u32 num_of_input_lanes;
+};
+
+/**
+ * struct msm_dba_audio_caps_info - audio capabilities of the bridge chip
+ * @audio_support: if audio is supported
+ * @audio_rates: audio sampling rates supported
+ * @audio_fmts: audio formats supported
+ */
+struct msm_dba_audio_caps_info {
+	u32 audio_support;
+	u32 audio_rates;
+	u32 audio_fmts;
+};
+
+/**
+ * struct msm_dba_capabilities - general capabilities of the bridge chip
+ * @vid_caps: video capabilities
+ * @aud_caps: audio capabilities
+ * @av_mute_support: av mute support in bridge chip
+ * @deferred_commit_support: support for deferred commit
+ */
+struct msm_dba_capabilities {
+	struct msm_dba_video_caps_info vid_caps;
+	struct msm_dba_audio_caps_info aud_caps;
+	bool av_mute_support;
+	bool deferred_commit_support;
+};
+
+/**
+ * struct msm_dba_audio_cfg - Structure for audio configuration
+ * @interface: Specifies audio interface type. Client should check the
+ *	       capabilities for the interfaces supported by the bridge.
+ * @format: Compressed vs Uncompressed formats.
+ * @channels: Number of channels.
+ * @i2s_fmt: I2S data packing format. This is valid only if interface is I2S.
+ * @sampling_rate: sampling rate of audio data
+ * @word_size: word size
+ * @word_endianness: little or big endian words
+ */
+struct msm_dba_audio_cfg {
+	enum msm_dba_audio_interface_type interface;
+	enum msm_dba_audio_format_type format;
+	enum msm_dba_audio_channel_count channels;
+	enum msm_dba_audio_i2s_format i2s_fmt;
+	enum msm_dba_audio_sampling_rates_type sampling_rate;
+	enum msm_dba_audio_word_bit_depth word_size;
+	enum msm_dba_audio_word_endian_type word_endianness;
+	enum msm_dba_audio_copyright_type copyright;
+	enum msm_dba_audio_pre_emphasis_type pre_emphasis;
+	enum msm_dba_audio_clock_accuracy clock_accuracy;
+	enum msm_dba_channel_status_source channel_status_source;
+	enum msm_dba_audio_op_mode mode;
+
+	u32 channel_status_category_code;
+	u32 channel_status_source_number;
+	u32 channel_status_v_bit;
+	u32 channel_allocation;
+	u32 channel_status_word_length;
+
+	u32 n;
+	u32 cts;
+};
+
+/**
+ * struct msm_dba_video_cfg - video configuration data
+ * @h_active: active width of the video signal
+ * @h_front_porch: horizontal front porch in pixels
+ * @h_pulse_width: pulse width of hsync in pixels
+ * @h_back_porch: horizontal back porch in pixels
+ * @h_polarity: polarity of hsync signal
+ * @v_active: active height of the video signal
+ * @v_front_porch: vertical front porch in lines
+ * @v_pulse_width: pulse width of vsync in lines
+ * @v_back_porch: vertical back porch in lines
+ * @v_polarity: polarity of vsync signal
+ * @pclk_khz: pixel clock in KHz
+ * @interlaced: if video is interlaced
+ * @vic: video indetification code
+ * @hdmi_mode: hdmi or dvi mode for the sink
+ * @ar: aspect ratio of the signal
+ * @num_of_input_lanes: number of input lanes in case of DSI/LVDS
+ */
+struct msm_dba_video_cfg {
+	u32  h_active;
+	u32  h_front_porch;
+	u32  h_pulse_width;
+	u32  h_back_porch;
+	bool h_polarity;
+	u32  v_active;
+	u32  v_front_porch;
+	u32  v_pulse_width;
+	u32  v_back_porch;
+	bool v_polarity;
+	u32  pclk_khz;
+	bool interlaced;
+	u32  vic;
+	bool hdmi_mode;
+	enum msm_dba_video_aspect_ratio ar;
+	u32  num_of_input_lanes;
+	u8 scaninfo;
+};
+
+struct mdss_dba_timing_info {
+	u16 xres;
+	u16 yres;
+	u8 bpp;
+	u8 fps;
+	u8 lanes;
+};
+
+/**
+ * struct msm_dba_ops- operation supported by bridge chip
+ * @get_caps: returns the bridge chip capabilities
+ *	      DEFER and ASYNC flags are not supported.
+ * @power_on: powers on/off the bridge chip. This usually involves turning on
+ *	      the power regulators and bringing the chip out of reset. Chip
+ *	      should be capable of raising interrupts at this point.
+ *	      DEFER and ASYNC flags are supported.
+ * @video_on: turn on/off video stream. This function also requires the video
+ *	      timing information that might be needed for programming the bridge
+ *	      chip.
+ *	      DEFER flag is supported.
+ *	      ASYNC flag is not supported.
+ * @audio_on: turn on/off audio stream.
+ *	      DEFER flag is supported.
+ *	      ASYNC flag is not supported.
+ * @configure_audio: setup audio configuration
+ *		     DEFER flag is supported.
+ *		     ASYNC flag is not supported.
+ * @av_mute: controls av mute functionalities if supported. AV mute is different
+ *	     from audio_on and video_on where in even though the actual data is
+ *	     sent, mute is specified through control packets.
+ *	     DEFER flag is supported.
+ *	     ASYNC flag is not supported.
+ * @interupts_enable: enables interrupts to get event callbacks. Clients need
+ *		      to specify an event mask of the events they are
+ *		      interested in. If a client provides an event as part of
+ *		      the mask, it will receive the interrupt regardless of the
+ *		      client modifying the property.
+ *		      DEFER flag is supported.
+ *		      ASYNC flag is not supported.
+ * @hdcp_enable: enable/disable hdcp. If HDCP is enabled, this function will
+ *		 start a new authentication session. There is a separate
+ *		 argument for enabling encryption. Encryption can be enabled any
+ *		 time after HDCP has been fully authenticated. This function
+ *		 will support an asynchronous mode where calling this function
+ *		 will kick off HDCP and return to the caller. Caller has to wait
+ *		 for MSM_DBA_CB_HDCP_SUCCESS callback to ensure link is
+ *		 authenticated.
+ *		 DEFER flag is not supported.
+ *		 ASYNC flag is supported.
+ * @hdcp_get_ksv_list_size: returns the KSV list size. In case of a simple sink
+ *			    the size will be 1. In case of a repeater, this can
+ *			    be more than one.
+ *			    DEFER and ASYNC flags are not supported.
+ * @hdcp_get_ksv_list: return the KSV list. Client can query the KSV information
+ *		       from the bridge. Client should call
+ *		       hdcp_get_ksv_list_size first and then allocate 40*size
+ *		       bytes to hold all the KSVs.
+ *		       DEFER and ASYNC flags are not supported.
+ * @hdmi_cec_on: enable or disable cec module. Clients need to enable CEC
+ *		 feature before they do read or write CEC messages.
+ * @hdmi_cec_write: perform a CEC write. For bridges with HDMI as output
+ *		    interface, this function allows clients to send a CEC
+ *		    message. Client should pack the data according to the CEC
+ *		    specification and provide the final buffer. Since CEC writes
+ *		    can take longer time to ascertaining if they are successful,
+ *		    this function supports the ASYNC flag. Driver will return
+ *		    either MSM_DBA_CB_CEC_WRITE_SUCCESS or
+ *		    MSM_DBA_CB_CEC_WRITE_FAIL callbacks.
+ *		    DEFER is not supported.
+ *		    ASYNC flag is supported.
+ * @hdmi_cec_read: get a pending CEC read message. In case of an incoming CEC
+ *		   message, driver will return MSM_DBA_CB_CEC_READ_PENDING
+ *		   callback. On getting this event callback, client should call
+ *		   hdmi_cec_read to get the message. The buffer should at least
+ *		   be 15 bytes or more. Client should read the CEC message from
+ *		   a thread different from the callback.
+ *		   DEFER and ASYNC flags are not supported.
+ * @get_edid_size: returns size of the edid.
+ *		   DEFER and ASYNC flags are not supported.
+ * @get_raw_edid: returns raw edid data.
+ *		   DEFER and ASYNC flags are not supported.
+ * @enable_remote_comm: enable/disable remote communication. Some interfaces
+ *		        like FPDLINK III support a bi-directional control
+ *		        channel that could be used to send control data using an
+ *		        I2C or SPI protocol. This Function will enable this
+ *		        control channel if supported.
+ *		        DEFER and ASYNC flags are not supported.
+ * @add_remote_device: add slaves on remote side for enabling communication. For
+ *		       interfaces that support bi directional control channel,
+ *		       this function allows clients to specify slave IDs of
+ *		       devices on remote bus. Messages addressed to these IDs
+ *		       will be trapped by the bridge chip and put on the remote
+ *		       bus.
+ *		       DEFER and ASYNC flags are not supported.
+ * @commit_deferred_props: commits deferred properties
+ *			   DEFER and ASYNC flags are not supported.
+ * @force_reset: reset the device forcefully. In case the device goes into a bad
+ *		 state, a client can force reset to try and recover the device.
+ *		 The reset will be applied in spite of different configurations
+ *		 from other clients. Driver will apply all the properties that
+ *		 have been applied so far after the reset is complete. In case
+ *		 of multiple clients, driver will issue a reset callback.
+ * @dump_debug_info: dumps debug information to dmesg.
+ * @check_hpd: Check if cable is connected or not. if cable is connected we
+ *		send notification to display framework.
+ * @set_audio_block: This function will populate the raw audio speaker block
+ *		     data along with size of each block in bridgechip buffer.
+ * @get_audio_block: This function will return the raw audio speaker block
+ *		     along with size of each block.
+ *
+ * The msm_dba_ops structure represents a set of operations that can be
+ * supported by each bridge chip. Depending on the functionality supported by a
+ * specific bridge chip, some of the operations need not be supported. For
+ * example if a bridge chip does not support reading EDID from a sink device,
+ * get_edid_size and get_raw_edid can be NULL.
+ *
+ * Deferring properties: The deferred flag allows us to address any quirks with
+ * respect to specific bridge chips. If there is a need for some properties to
+ * be committed together, turning on video and audio at the same time, the
+ * deferred flag can be used. Properties that are set using a DEFER flag will
+ * not be committed to hardware until commit_deferred_props() function is
+ * called.
+ *
+ */
+struct msm_dba_ops {
+	int (*get_caps)(void *client,
+			struct msm_dba_capabilities *caps);
+
+	int (*power_on)(void *client,
+			bool on,
+			u32 flags);
+
+	int (*video_on)(void *client,
+			bool on,
+			struct msm_dba_video_cfg *cfg,
+			u32 flags);
+
+	int (*audio_on)(void *client,
+			bool on,
+			u32 flags);
+
+	int (*configure_audio)(void *client,
+			       struct msm_dba_audio_cfg *cfg,
+			       u32 flags);
+
+	int (*av_mute)(void *client,
+		       bool video_mute,
+		       bool audio_mute,
+		       u32 flags);
+
+	int (*interrupts_enable)(void *client,
+				bool on,
+				u32 event_mask,
+				u32 flags);
+
+	int (*hdcp_enable)(void *client,
+			   bool hdcp_on,
+			   bool enc_on,
+			   u32 flags);
+
+	int (*hdcp_get_ksv_list_size)(void *client,
+				      u32 *count,
+				      u32 flags);
+
+	int (*hdcp_get_ksv_list)(void *client,
+				 u32 count,
+				 char *buf,
+				 u32 flags);
+
+	int (*hdmi_cec_on)(void *client,
+			      bool enable,
+			      u32 flags);
+
+	int (*hdmi_cec_write)(void *client,
+			      u32 size,
+			      char *buf,
+			      u32 flags);
+
+	int (*hdmi_cec_read)(void *client,
+			     u32 *size,
+			     char *buf,
+			     u32 flags);
+
+	int (*get_edid_size)(void *client,
+			     u32 *size,
+			     u32 flags);
+
+	int (*get_raw_edid)(void *client,
+			    u32 size,
+			    char *buf,
+			    u32 flags);
+
+	int (*enable_remote_comm)(void *client,
+				  bool on,
+				  u32 flags);
+
+	int (*add_remote_device)(void *client,
+				 u32 *slave_ids,
+				 u32 count,
+				 u32 flags);
+
+	int (*commit_deferred_props)(void *client,
+				    u32 flags);
+
+	int (*force_reset)(void *client, u32 flags);
+	int (*dump_debug_info)(void *client, u32 flags);
+	int (*check_hpd)(void *client, u32 flags);
+	void (*set_audio_block)(void *client, u32 size, void *buf);
+	void (*get_audio_block)(void *client, u32 size, void *buf);
+	void* (*get_supp_timing_info)(void);
+};
+
+/**
+ * msm_dba_register_client() - Allows a client to register with the driver.
+ * @info: Client information along with the bridge chip id the client wishes to
+ *	  program.
+ * @ops: Function pointers to bridge chip operations. Some function pointers can
+ *	 be NULL depending on the functionalities supported by bridge chip.
+ *
+ * The register API supports multiple clients to register for the same bridge
+ * chip. If Successful, this will return a pointer that should be used as a
+ * handle for all subsequent function calls.
+ */
+void *msm_dba_register_client(struct msm_dba_reg_info *info,
+			      struct msm_dba_ops *ops);
+
+/**
+ * msm_dba_deregister_client() - Allows client to de-register with the driver.
+ * @client: client handle returned by register API.
+ *
+ * This function will release all the resources used by a particular client. If
+ * it is the only client using the bridge chip, the bridge chip will be powered
+ * down and put into reset.
+ */
+int msm_dba_deregister_client(void *client);
+
+#endif /* _MSM_DBA_H */
diff --git a/init/main.c b/init/main.c
index 674bc77..17e439f 100644
--- a/init/main.c
+++ b/init/main.c
@@ -80,6 +80,7 @@
 #include <linux/integrity.h>
 #include <linux/proc_ns.h>
 #include <linux/io.h>
+#include <linux/kaiser.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -473,6 +474,7 @@
 	pgtable_init();
 	vmalloc_init();
 	ioremap_huge_init();
+	kaiser_init();
 }
 
 asmlinkage __visible void __init start_kernel(void)
diff --git a/kernel/acct.c b/kernel/acct.c
index 74963d1..37f1dc6 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -99,7 +99,7 @@
 {
 	struct kstatfs sbuf;
 
-	if (time_is_before_jiffies(acct->needcheck))
+	if (time_is_after_jiffies(acct->needcheck))
 		goto out;
 
 	/* May block */
diff --git a/kernel/audit.c b/kernel/audit.c
index f1ca116..da4e7c0 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -79,13 +79,13 @@
 #define AUDIT_OFF	0
 #define AUDIT_ON	1
 #define AUDIT_LOCKED	2
-u32		audit_enabled;
-u32		audit_ever_enabled;
+u32		audit_enabled = AUDIT_OFF;
+u32		audit_ever_enabled = !!AUDIT_OFF;
 
 EXPORT_SYMBOL_GPL(audit_enabled);
 
 /* Default state when kernel boots without any parameters. */
-static u32	audit_default;
+static u32	audit_default = AUDIT_OFF;
 
 /* If auditing cannot proceed, audit_failure selects what happens. */
 static u32	audit_failure = AUDIT_FAIL_PRINTK;
@@ -1199,8 +1199,6 @@
 	skb_queue_head_init(&audit_skb_queue);
 	skb_queue_head_init(&audit_skb_hold_queue);
 	audit_initialized = AUDIT_INITIALIZED;
-	audit_enabled = audit_default;
-	audit_ever_enabled |= !!audit_default;
 
 	audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized");
 
@@ -1217,6 +1215,8 @@
 	audit_default = !!simple_strtol(str, NULL, 0);
 	if (!audit_default)
 		audit_initialized = AUDIT_DISABLED;
+	audit_enabled = audit_default;
+	audit_ever_enabled = !!audit_enabled;
 
 	pr_info("%s\n", audit_default ?
 		"enabled (after initialization)" : "disabled (until reboot)");
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index f3721e1..9a1e6ed 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -46,9 +46,10 @@
 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 {
 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
+	u32 elem_size, index_mask, max_entries;
+	bool unpriv = !capable(CAP_SYS_ADMIN);
 	struct bpf_array *array;
-	u64 array_size;
-	u32 elem_size;
+	u64 array_size, mask64;
 
 	/* check sanity of attributes */
 	if (attr->max_entries == 0 || attr->key_size != 4 ||
@@ -63,11 +64,32 @@
 
 	elem_size = round_up(attr->value_size, 8);
 
+	max_entries = attr->max_entries;
+
+	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
+	 * upper most bit set in u32 space is undefined behavior due to
+	 * resulting 1U << 32, so do it manually here in u64 space.
+	 */
+	mask64 = fls_long(max_entries - 1);
+	mask64 = 1ULL << mask64;
+	mask64 -= 1;
+
+	index_mask = mask64;
+	if (unpriv) {
+		/* round up array size to nearest power of 2,
+		 * since cpu will speculate within index_mask limits
+		 */
+		max_entries = index_mask + 1;
+		/* Check for overflows. */
+		if (max_entries < attr->max_entries)
+			return ERR_PTR(-E2BIG);
+	}
+
 	array_size = sizeof(*array);
 	if (percpu)
-		array_size += (u64) attr->max_entries * sizeof(void *);
+		array_size += (u64) max_entries * sizeof(void *);
 	else
-		array_size += (u64) attr->max_entries * elem_size;
+		array_size += (u64) max_entries * elem_size;
 
 	/* make sure there is no u32 overflow later in round_up() */
 	if (array_size >= U32_MAX - PAGE_SIZE)
@@ -77,6 +99,8 @@
 	array = bpf_map_area_alloc(array_size);
 	if (!array)
 		return ERR_PTR(-ENOMEM);
+	array->index_mask = index_mask;
+	array->map.unpriv_array = unpriv;
 
 	/* copy mandatory map attributes */
 	array->map.map_type = attr->map_type;
@@ -110,7 +134,7 @@
 	if (unlikely(index >= array->map.max_entries))
 		return NULL;
 
-	return array->value + array->elem_size * index;
+	return array->value + array->elem_size * (index & array->index_mask);
 }
 
 /* Called from eBPF program */
@@ -122,7 +146,7 @@
 	if (unlikely(index >= array->map.max_entries))
 		return NULL;
 
-	return this_cpu_ptr(array->pptrs[index]);
+	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
 }
 
 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
@@ -142,7 +166,7 @@
 	 */
 	size = round_up(map->value_size, 8);
 	rcu_read_lock();
-	pptr = array->pptrs[index];
+	pptr = array->pptrs[index & array->index_mask];
 	for_each_possible_cpu(cpu) {
 		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
 		off += size;
@@ -190,10 +214,11 @@
 		return -EEXIST;
 
 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
-		memcpy(this_cpu_ptr(array->pptrs[index]),
+		memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
 		       value, map->value_size);
 	else
-		memcpy(array->value + array->elem_size * index,
+		memcpy(array->value +
+		       array->elem_size * (index & array->index_mask),
 		       value, map->value_size);
 	return 0;
 }
@@ -227,7 +252,7 @@
 	 */
 	size = round_up(map->value_size, 8);
 	rcu_read_lock();
-	pptr = array->pptrs[index];
+	pptr = array->pptrs[index & array->index_mask];
 	for_each_possible_cpu(cpu) {
 		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
 		off += size;
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
index 5c51d19..673fa6f 100644
--- a/kernel/bpf/percpu_freelist.c
+++ b/kernel/bpf/percpu_freelist.c
@@ -78,8 +78,10 @@
 {
 	struct pcpu_freelist_head *head;
 	struct pcpu_freelist_node *node;
+	unsigned long flags;
 	int orig_cpu, cpu;
 
+	local_irq_save(flags);
 	orig_cpu = cpu = raw_smp_processor_id();
 	while (1) {
 		head = per_cpu_ptr(s->freelist, cpu);
@@ -87,14 +89,16 @@
 		node = head->first;
 		if (node) {
 			head->first = node->next;
-			raw_spin_unlock(&head->lock);
+			raw_spin_unlock_irqrestore(&head->lock, flags);
 			return node;
 		}
 		raw_spin_unlock(&head->lock);
 		cpu = cpumask_next(cpu, cpu_possible_mask);
 		if (cpu >= nr_cpu_ids)
 			cpu = 0;
-		if (cpu == orig_cpu)
+		if (cpu == orig_cpu) {
+			local_irq_restore(flags);
 			return NULL;
+		}
 	}
 }
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index cd62aea..5b017fb 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -565,57 +565,6 @@
 	list_add(&tl->list_node, &bpf_prog_types);
 }
 
-/* fixup insn->imm field of bpf_call instructions:
- * if (insn->imm == BPF_FUNC_map_lookup_elem)
- *      insn->imm = bpf_map_lookup_elem - __bpf_call_base;
- * else if (insn->imm == BPF_FUNC_map_update_elem)
- *      insn->imm = bpf_map_update_elem - __bpf_call_base;
- * else ...
- *
- * this function is called after eBPF program passed verification
- */
-static void fixup_bpf_calls(struct bpf_prog *prog)
-{
-	const struct bpf_func_proto *fn;
-	int i;
-
-	for (i = 0; i < prog->len; i++) {
-		struct bpf_insn *insn = &prog->insnsi[i];
-
-		if (insn->code == (BPF_JMP | BPF_CALL)) {
-			/* we reach here when program has bpf_call instructions
-			 * and it passed bpf_check(), means that
-			 * ops->get_func_proto must have been supplied, check it
-			 */
-			BUG_ON(!prog->aux->ops->get_func_proto);
-
-			if (insn->imm == BPF_FUNC_get_route_realm)
-				prog->dst_needed = 1;
-			if (insn->imm == BPF_FUNC_get_prandom_u32)
-				bpf_user_rnd_init_once();
-			if (insn->imm == BPF_FUNC_tail_call) {
-				/* mark bpf_tail_call as different opcode
-				 * to avoid conditional branch in
-				 * interpeter for every normal call
-				 * and to prevent accidental JITing by
-				 * JIT compiler that doesn't support
-				 * bpf_tail_call yet
-				 */
-				insn->imm = 0;
-				insn->code |= BPF_X;
-				continue;
-			}
-
-			fn = prog->aux->ops->get_func_proto(insn->imm);
-			/* all functions that have prototype and verifier allowed
-			 * programs to call them, must be real in-kernel functions
-			 */
-			BUG_ON(!fn->func);
-			insn->imm = fn->func - __bpf_call_base;
-		}
-	}
-}
-
 /* drop refcnt on maps used by eBPF program and free auxilary data */
 static void free_used_maps(struct bpf_prog_aux *aux)
 {
@@ -810,9 +759,6 @@
 	if (err < 0)
 		goto free_used_maps;
 
-	/* fixup BPF_CALL->imm field */
-	fixup_bpf_calls(prog);
-
 	/* eBPF program is ready to be JITed */
 	prog = bpf_prog_select_runtime(prog, &err);
 	if (err < 0)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 372454a..19c44cf 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1187,7 +1187,7 @@
 	}
 }
 
-static int check_call(struct bpf_verifier_env *env, int func_id)
+static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
 {
 	struct bpf_verifier_state *state = &env->cur_state;
 	const struct bpf_func_proto *fn = NULL;
@@ -1238,6 +1238,13 @@
 	err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
 	if (err)
 		return err;
+	if (func_id == BPF_FUNC_tail_call) {
+		if (meta.map_ptr == NULL) {
+			verbose("verifier bug\n");
+			return -EINVAL;
+		}
+		env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
+	}
 	err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
 	if (err)
 		return err;
@@ -1790,10 +1797,17 @@
 			/* case: R = imm
 			 * remember the value we stored into this reg
 			 */
+			u64 imm;
+
+			if (BPF_CLASS(insn->code) == BPF_ALU64)
+				imm = insn->imm;
+			else
+				imm = (u32)insn->imm;
+
 			regs[insn->dst_reg].type = CONST_IMM;
-			regs[insn->dst_reg].imm = insn->imm;
-			regs[insn->dst_reg].max_value = insn->imm;
-			regs[insn->dst_reg].min_value = insn->imm;
+			regs[insn->dst_reg].imm = imm;
+			regs[insn->dst_reg].max_value = imm;
+			regs[insn->dst_reg].min_value = imm;
 		}
 
 	} else if (opcode > BPF_END) {
@@ -1861,10 +1875,28 @@
 			   ((BPF_SRC(insn->code) == BPF_X &&
 			     regs[insn->src_reg].type == CONST_IMM) ||
 			    BPF_SRC(insn->code) == BPF_K)) {
-			if (BPF_SRC(insn->code) == BPF_X)
+			if (BPF_SRC(insn->code) == BPF_X) {
+				/* check in case the register contains a big
+				 * 64-bit value
+				 */
+				if (regs[insn->src_reg].imm < -MAX_BPF_STACK ||
+				    regs[insn->src_reg].imm > MAX_BPF_STACK) {
+					verbose("R%d value too big in R%d pointer arithmetic\n",
+						insn->src_reg, insn->dst_reg);
+					return -EACCES;
+				}
 				dst_reg->imm += regs[insn->src_reg].imm;
-			else
+			} else {
+				/* safe against overflow: addition of 32-bit
+				 * numbers in 64-bit representation
+				 */
 				dst_reg->imm += insn->imm;
+			}
+			if (dst_reg->imm > 0 || dst_reg->imm < -MAX_BPF_STACK) {
+				verbose("R%d out-of-bounds pointer arithmetic\n",
+					insn->dst_reg);
+				return -EACCES;
+			}
 			return 0;
 		} else if (opcode == BPF_ADD &&
 			   BPF_CLASS(insn->code) == BPF_ALU64 &&
@@ -2697,11 +2729,12 @@
 
 		/* If we didn't map access then again we don't care about the
 		 * mismatched range values and it's ok if our old type was
-		 * UNKNOWN and we didn't go to a NOT_INIT'ed reg.
+		 * UNKNOWN and we didn't go to a NOT_INIT'ed or pointer reg.
 		 */
 		if (rold->type == NOT_INIT ||
 		    (!varlen_map_access && rold->type == UNKNOWN_VALUE &&
-		     rcur->type != NOT_INIT))
+		     rcur->type != NOT_INIT &&
+		     !__is_pointer_value(env->allow_ptr_leaks, rcur)))
 			continue;
 
 		/* Don't care about the reg->id in this case. */
@@ -2862,6 +2895,7 @@
 		if (err)
 			return err;
 
+		env->insn_aux_data[insn_idx].seen = true;
 		if (class == BPF_ALU || class == BPF_ALU64) {
 			err = check_alu_op(env, insn);
 			if (err)
@@ -2992,7 +3026,7 @@
 					return -EINVAL;
 				}
 
-				err = check_call(env, insn->imm);
+				err = check_call(env, insn->imm, insn_idx);
 				if (err)
 					return err;
 
@@ -3059,6 +3093,7 @@
 					return err;
 
 				insn_idx++;
+				env->insn_aux_data[insn_idx].seen = true;
 			} else {
 				verbose("invalid BPF_LD mode\n");
 				return -EINVAL;
@@ -3210,6 +3245,63 @@
 			insn->src_reg = 0;
 }
 
+/* single env->prog->insni[off] instruction was replaced with the range
+ * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
+ * [0, off) and [off, end) to new locations, so the patched range stays zero
+ */
+static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
+				u32 off, u32 cnt)
+{
+	struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
+	int i;
+
+	if (cnt == 1)
+		return 0;
+	new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
+	if (!new_data)
+		return -ENOMEM;
+	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
+	memcpy(new_data + off + cnt - 1, old_data + off,
+	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
+	for (i = off; i < off + cnt - 1; i++)
+		new_data[i].seen = true;
+	env->insn_aux_data = new_data;
+	vfree(old_data);
+	return 0;
+}
+
+static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
+					    const struct bpf_insn *patch, u32 len)
+{
+	struct bpf_prog *new_prog;
+
+	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
+	if (!new_prog)
+		return NULL;
+	if (adjust_insn_aux_data(env, new_prog->len, off, len))
+		return NULL;
+	return new_prog;
+}
+
+/* The verifier does more data flow analysis than llvm and will not explore
+ * branches that are dead at run time. Malicious programs can have dead code
+ * too. Therefore replace all dead at-run-time code with nops.
+ */
+static void sanitize_dead_code(struct bpf_verifier_env *env)
+{
+	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+	struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
+	struct bpf_insn *insn = env->prog->insnsi;
+	const int insn_cnt = env->prog->len;
+	int i;
+
+	for (i = 0; i < insn_cnt; i++) {
+		if (aux_data[i].seen)
+			continue;
+		memcpy(insn + i, &nop, sizeof(nop));
+	}
+}
+
 /* convert load instructions that access fields of 'struct __sk_buff'
  * into sequence of instructions that access fields of 'struct sk_buff'
  */
@@ -3229,10 +3321,10 @@
 			verbose("bpf verifier is misconfigured\n");
 			return -EINVAL;
 		} else if (cnt) {
-			new_prog = bpf_patch_insn_single(env->prog, 0,
-							 insn_buf, cnt);
+			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
 			if (!new_prog)
 				return -ENOMEM;
+
 			env->prog = new_prog;
 			delta += cnt - 1;
 		}
@@ -3253,7 +3345,7 @@
 		else
 			continue;
 
-		if (env->insn_aux_data[i].ptr_type != PTR_TO_CTX)
+		if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
 			continue;
 
 		cnt = ops->convert_ctx_access(type, insn->dst_reg, insn->src_reg,
@@ -3263,8 +3355,7 @@
 			return -EINVAL;
 		}
 
-		new_prog = bpf_patch_insn_single(env->prog, i + delta, insn_buf,
-						 cnt);
+		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
 		if (!new_prog)
 			return -ENOMEM;
 
@@ -3278,6 +3369,81 @@
 	return 0;
 }
 
+/* fixup insn->imm field of bpf_call instructions
+ *
+ * this function is called after eBPF program passed verification
+ */
+static int fixup_bpf_calls(struct bpf_verifier_env *env)
+{
+	struct bpf_prog *prog = env->prog;
+	struct bpf_insn *insn = prog->insnsi;
+	const struct bpf_func_proto *fn;
+	const int insn_cnt = prog->len;
+	struct bpf_insn insn_buf[16];
+	struct bpf_prog *new_prog;
+	struct bpf_map *map_ptr;
+	int i, cnt, delta = 0;
+
+
+	for (i = 0; i < insn_cnt; i++, insn++) {
+		if (insn->code != (BPF_JMP | BPF_CALL))
+			continue;
+
+		if (insn->imm == BPF_FUNC_get_route_realm)
+			prog->dst_needed = 1;
+		if (insn->imm == BPF_FUNC_get_prandom_u32)
+			bpf_user_rnd_init_once();
+		if (insn->imm == BPF_FUNC_tail_call) {
+			/* mark bpf_tail_call as different opcode to avoid
+			 * conditional branch in the interpeter for every normal
+			 * call and to prevent accidental JITing by JIT compiler
+			 * that doesn't support bpf_tail_call yet
+ 			 */
+			insn->imm = 0;
+			insn->code |= BPF_X;
+
+			/* instead of changing every JIT dealing with tail_call
+			 * emit two extra insns:
+			 * if (index >= max_entries) goto out;
+			 * index &= array->index_mask;
+			 * to avoid out-of-bounds cpu speculation
+			 */
+			map_ptr = env->insn_aux_data[i + delta].map_ptr;
+			if (!map_ptr->unpriv_array)
+				continue;
+			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
+						  map_ptr->max_entries, 2);
+			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
+						    container_of(map_ptr,
+								 struct bpf_array,
+								 map)->index_mask);
+			insn_buf[2] = *insn;
+			cnt = 3;
+			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+			if (!new_prog)
+				return -ENOMEM;
+
+			delta    += cnt - 1;
+			env->prog = prog = new_prog;
+			insn      = new_prog->insnsi + i + delta;
+			continue;
+		}
+
+		fn = prog->aux->ops->get_func_proto(insn->imm);
+		/* all functions that have prototype and verifier allowed
+		 * programs to call them, must be real in-kernel functions
+		 */
+		if (!fn->func) {
+			verbose("kernel subsystem misconfigured func %d\n",
+				insn->imm);
+			return -EFAULT;
+		}
+		insn->imm = fn->func - __bpf_call_base;
+	}
+
+	return 0;
+}
+
 static void free_states(struct bpf_verifier_env *env)
 {
 	struct bpf_verifier_state_list *sl, *sln;
@@ -3373,9 +3539,15 @@
 	free_states(env);
 
 	if (ret == 0)
+		sanitize_dead_code(env);
+
+	if (ret == 0)
 		/* program is valid, convert *(u32*)(ctx + off) accesses */
 		ret = convert_ctx_accesses(env);
 
+	if (ret == 0)
+		ret = fixup_bpf_calls(env);
+
 	if (log_level && log_len >= log_size - 1) {
 		BUG_ON(log_len >= log_size);
 		/* verifier log exceeded user supplied buffer */
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 26c624e..a83771f 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4407,7 +4407,11 @@
 	 */
 	do {
 		css_task_iter_start(&from->self, &it);
-		task = css_task_iter_next(&it);
+
+		do {
+			task = css_task_iter_next(&it);
+		} while (task && (task->flags & PF_EXITING));
+
 		if (task)
 			get_task_struct(task);
 		css_task_iter_end(&it);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 915e750..c68d150 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1376,9 +1376,9 @@
 	 * before blk_mq_queue_reinit_notify() from notify_dead(),
 	 * otherwise a RCU stall occurs.
 	 */
-	[CPUHP_TIMERS_DEAD] = {
+	[CPUHP_TIMERS_PREPARE] = {
 		.name			= "timers:dead",
-		.startup.single		= NULL,
+		.startup.single		= timers_prepare_cpu,
 		.teardown.single	= timers_dead_cpu,
 	},
 	/* Kicks the plugged cpu into life */
@@ -1388,11 +1388,6 @@
 		.teardown.single	= NULL,
 		.cant_stop		= true,
 	},
-	[CPUHP_AP_SMPCFD_DYING] = {
-		.name			= "smpcfd:dying",
-		.startup.single		= NULL,
-		.teardown.single	= smpcfd_dying_cpu,
-	},
 	/*
 	 * Handled on controll processor until the plugged processor manages
 	 * this itself.
@@ -1439,6 +1434,11 @@
 		.startup.single		= NULL,
 		.teardown.single	= kmap_remove_unused_cpu,
 	},
+	[CPUHP_AP_SMPCFD_DYING] = {
+		.name			= "smpcfd:dying",
+		.startup.single		= NULL,
+		.teardown.single	= smpcfd_dying_cpu,
+	},
 	/* Entry state on starting. Interrupts enabled from here on. Transient
 	 * state for synchronsization */
 	[CPUHP_AP_ONLINE] = {
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 0b89128..3990c1f 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -357,7 +357,7 @@
 			}
 			kdb_printf("\n");
 			for (i = 0; i < count; i++) {
-				if (kallsyms_symbol_next(p_tmp, i) < 0)
+				if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
 					break;
 				kdb_printf("%s ", p_tmp);
 				*(p_tmp + len) = '\0';
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 712ba4e..e144ded 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7218,25 +7218,12 @@
 	perf_output_end(&handle);
 }
 
-/*
- * Generic event overflow handling, sampling.
- */
-
-static int __perf_event_overflow(struct perf_event *event,
-				   int throttle, struct perf_sample_data *data,
-				   struct pt_regs *regs)
+static int
+__perf_event_account_interrupt(struct perf_event *event, int throttle)
 {
-	int events = atomic_read(&event->event_limit);
 	struct hw_perf_event *hwc = &event->hw;
-	u64 seq;
 	int ret = 0;
-
-	/*
-	 * Non-sampling counters might still use the PMI to fold short
-	 * hardware counters, ignore those.
-	 */
-	if (unlikely(!is_sampling_event(event)))
-		return 0;
+	u64 seq;
 
 	seq = __this_cpu_read(perf_throttled_seq);
 	if (seq != hwc->interrupts_seq) {
@@ -7264,6 +7251,34 @@
 			perf_adjust_period(event, delta, hwc->last_period, true);
 	}
 
+	return ret;
+}
+
+int perf_event_account_interrupt(struct perf_event *event)
+{
+	return __perf_event_account_interrupt(event, 1);
+}
+
+/*
+ * Generic event overflow handling, sampling.
+ */
+
+static int __perf_event_overflow(struct perf_event *event,
+				   int throttle, struct perf_sample_data *data,
+				   struct pt_regs *regs)
+{
+	int events = atomic_read(&event->event_limit);
+	int ret = 0;
+
+	/*
+	 * Non-sampling counters might still use the PMI to fold short
+	 * hardware counters, ignore those.
+	 */
+	if (unlikely(!is_sampling_event(event)))
+		return 0;
+
+	ret = __perf_event_account_interrupt(event, throttle);
+
 	/*
 	 * XXX event_limit might not quite work as expected on inherited
 	 * events
diff --git a/kernel/fork.c b/kernel/fork.c
index b83adf9..79fdfd8 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -58,6 +58,7 @@
 #include <linux/tsacct_kern.h>
 #include <linux/cn_proc.h>
 #include <linux/freezer.h>
+#include <linux/kaiser.h>
 #include <linux/delayacct.h>
 #include <linux/taskstats_kern.h>
 #include <linux/random.h>
@@ -213,6 +214,7 @@
 
 static inline void free_thread_stack(struct task_struct *tsk)
 {
+	kaiser_unmap_thread_stack(tsk->stack);
 #ifdef CONFIG_VMAP_STACK
 	if (task_stack_vm_area(tsk)) {
 		unsigned long flags;
@@ -495,6 +497,10 @@
 	 * functions again.
 	 */
 	tsk->stack = stack;
+
+	err= kaiser_map_thread_stack(tsk->stack);
+	if (err)
+		goto free_stack;
 #ifdef CONFIG_VMAP_STACK
 	tsk->stack_vm_area = stack_vm_area;
 #endif
diff --git a/kernel/groups.c b/kernel/groups.c
index 2fcadd6..94bde52 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -77,7 +77,7 @@
 }
 
 /* a simple Shell sort */
-static void groups_sort(struct group_info *group_info)
+void groups_sort(struct group_info *group_info)
 {
 	int base, max, stride;
 	int gidsetsize = group_info->ngroups;
@@ -103,6 +103,7 @@
 		stride /= 3;
 	}
 }
+EXPORT_SYMBOL(groups_sort);
 
 /* a simple bsearch */
 int groups_search(const struct group_info *group_info, kgid_t grp)
@@ -134,7 +135,6 @@
 void set_groups(struct cred *new, struct group_info *group_info)
 {
 	put_group_info(new->group_info);
-	groups_sort(group_info);
 	get_group_info(group_info);
 	new->group_info = group_info;
 }
@@ -218,6 +218,7 @@
 		return retval;
 	}
 
+	groups_sort(group_info);
 	retval = set_current_groups(group_info);
 	put_group_info(group_info);
 
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index a9b8cf5..def4548 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -612,7 +612,7 @@
 
 	return 0;
 }
-late_initcall(jump_label_test);
+early_initcall(jump_label_test);
 #endif /* STATIC_KEYS_SELFTEST */
 
 #endif /* HAVE_JUMP_LABEL */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bbe783e..7b02ae6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1051,8 +1051,13 @@
  */
 static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
 {
-	if (unlikely(!cpu_active(dest_cpu)))
-		return rq;
+	if (p->flags & PF_KTHREAD) {
+		if (unlikely(!cpu_online(dest_cpu)))
+			return rq;
+	} else {
+		if (unlikely(!cpu_active(dest_cpu)))
+			return rq;
+	}
 
 	/* Affinity changed (again). */
 	if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
@@ -5100,6 +5105,14 @@
 
 	raw_spin_lock_irqsave(&p->pi_lock, flags);
 	cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
+
+	/* The userspace tasks are forbidden to run on
+	 * isolated CPUs. So exclude isolated CPUs from
+	 * the getaffinity.
+	 */
+	if (!(p->flags & PF_KTHREAD))
+		cpumask_andnot(mask, mask, cpu_isolated_mask);
+
 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
 out_unlock:
@@ -6446,6 +6459,12 @@
 	if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
 		goto free_dlo_mask;
 
+#ifdef HAVE_RT_PUSH_IPI
+	rd->rto_cpu = -1;
+	raw_spin_lock_init(&rd->rto_lock);
+	init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
+#endif
+
 	init_dl_bw(&rd->dl_bw);
 	if (cpudl_init(&rd->cpudl) != 0)
 		goto free_dlo_mask;
@@ -9614,11 +9633,11 @@
 	reset_task_stats(p);
 	p->ravg.mark_start = wallclock;
 	p->ravg.sum_history[0] = EXITING_TASK_MARKER;
-	free_task_load_ptrs(p);
 
 	enqueue_task(rq, p, 0);
 	clear_ed_task(p, rq);
 	task_rq_unlock(rq, p, &rf);
+	free_task_load_ptrs(p);
 }
 #endif /* CONFIG_SCHED_WALT */
 
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 08d4511..da8261d 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -446,13 +446,13 @@
  *
  * This function returns true if:
  *
- *   runtime / (deadline - t) > dl_runtime / dl_period ,
+ *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
  *
  * IOW we can't recycle current parameters.
  *
- * Notice that the bandwidth check is done against the period. For
+ * Notice that the bandwidth check is done against the deadline. For
  * task with deadline equal to period this is the same of using
- * dl_deadline instead of dl_period in the equation above.
+ * dl_period instead of dl_deadline in the equation above.
  */
 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
 			       struct sched_dl_entity *pi_se, u64 t)
@@ -477,7 +477,7 @@
 	 * of anything below microseconds resolution is actually fiction
 	 * (but still we want to give the user that illusion >;).
 	 */
-	left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
+	left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
 	right = ((dl_se->deadline - t) >> DL_SCALE) *
 		(pi_se->dl_runtime >> DL_SCALE);
 
@@ -506,10 +506,15 @@
 	}
 }
 
+static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
+{
+	return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
+}
+
 /*
  * If the entity depleted all its runtime, and if we want it to sleep
  * while waiting for some new execution time to become available, we
- * set the bandwidth enforcement timer to the replenishment instant
+ * set the bandwidth replenishment timer to the replenishment instant
  * and try to activate it.
  *
  * Notice that it is important for the caller to know if the timer
@@ -531,7 +536,7 @@
 	 * that it is actually coming from rq->clock and not from
 	 * hrtimer's time base reading.
 	 */
-	act = ns_to_ktime(dl_se->deadline);
+	act = ns_to_ktime(dl_next_period(dl_se));
 	now = hrtimer_cb_get_time(timer);
 	delta = ktime_to_ns(now) - rq_clock(rq);
 	act = ktime_add_ns(act, delta);
@@ -639,6 +644,7 @@
 		lockdep_unpin_lock(&rq->lock, rf.cookie);
 		rq = dl_task_offline_migration(rq, p);
 		rf.cookie = lockdep_pin_lock(&rq->lock);
+		update_rq_clock(rq);
 
 		/*
 		 * Now that the task has been migrated to the new RQ and we
@@ -690,6 +696,37 @@
 	timer->function = dl_task_timer;
 }
 
+/*
+ * During the activation, CBS checks if it can reuse the current task's
+ * runtime and period. If the deadline of the task is in the past, CBS
+ * cannot use the runtime, and so it replenishes the task. This rule
+ * works fine for implicit deadline tasks (deadline == period), and the
+ * CBS was designed for implicit deadline tasks. However, a task with
+ * constrained deadline (deadine < period) might be awakened after the
+ * deadline, but before the next period. In this case, replenishing the
+ * task would allow it to run for runtime / deadline. As in this case
+ * deadline < period, CBS enables a task to run for more than the
+ * runtime / period. In a very loaded system, this can cause a domino
+ * effect, making other tasks miss their deadlines.
+ *
+ * To avoid this problem, in the activation of a constrained deadline
+ * task after the deadline but before the next period, throttle the
+ * task and set the replenishing timer to the begin of the next period,
+ * unless it is boosted.
+ */
+static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
+{
+	struct task_struct *p = dl_task_of(dl_se);
+	struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
+
+	if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
+	    dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
+		if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
+			return;
+		dl_se->dl_throttled = 1;
+	}
+}
+
 static
 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
 {
@@ -925,6 +962,11 @@
 	__dequeue_dl_entity(dl_se);
 }
 
+static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
+{
+	return dl_se->dl_deadline < dl_se->dl_period;
+}
+
 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 {
 	struct task_struct *pi_task = rt_mutex_get_top_task(p);
@@ -951,6 +993,15 @@
 	}
 
 	/*
+	 * Check if a constrained deadline task was activated
+	 * after the deadline but before the next period.
+	 * If that is the case, the task will be throttled and
+	 * the replenishment timer will be set to the next period.
+	 */
+	if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
+		dl_check_constrained_dl(&p->dl);
+
+	/*
 	 * If p is throttled, we do nothing. In fact, if it exhausted
 	 * its budget it needs a replenishment and, since it now is on
 	 * its rq, the bandwidth timer callback (which clearly has not
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 130bbb7..1ff2e5e 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6606,7 +6606,7 @@
 	 * Due to large variance we need a large fuzz factor; hackbench in
 	 * particularly is sensitive here.
 	 */
-	if ((avg_idle / 512) < avg_cost)
+	if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
 		return -1;
 
 	time = local_clock();
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index c30c48f..a1afd13 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -51,6 +51,11 @@
  */
 SCHED_FEAT(TTWU_QUEUE, false)
 
+/*
+ * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
+ */
+SCHED_FEAT(SIS_AVG_CPU, false)
+
 #ifdef HAVE_RT_PUSH_IPI
 /*
  * In order to avoid a thundering herd attack of CPUs that are
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 1294950..73f11c4 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -77,10 +77,6 @@
 	raw_spin_unlock(&rt_b->rt_runtime_lock);
 }
 
-#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
-static void push_irq_work_func(struct irq_work *work);
-#endif
-
 void init_rt_rq(struct rt_rq *rt_rq)
 {
 	struct rt_prio_array *array;
@@ -100,13 +96,6 @@
 	rt_rq->rt_nr_migratory = 0;
 	rt_rq->overloaded = 0;
 	plist_head_init(&rt_rq->pushable_tasks);
-
-#ifdef HAVE_RT_PUSH_IPI
-	rt_rq->push_flags = 0;
-	rt_rq->push_cpu = nr_cpu_ids;
-	raw_spin_lock_init(&rt_rq->push_lock);
-	init_irq_work(&rt_rq->push_work, push_irq_work_func);
-#endif
 #endif /* CONFIG_SMP */
 	/* We start is dequeued state, because no RT tasks are queued */
 	rt_rq->rt_queued = 0;
@@ -2147,160 +2136,167 @@
 }
 
 #ifdef HAVE_RT_PUSH_IPI
-/*
- * The search for the next cpu always starts at rq->cpu and ends
- * when we reach rq->cpu again. It will never return rq->cpu.
- * This returns the next cpu to check, or nr_cpu_ids if the loop
- * is complete.
- *
- * rq->rt.push_cpu holds the last cpu returned by this function,
- * or if this is the first instance, it must hold rq->cpu.
- */
-static int rto_next_cpu(struct rq *rq)
-{
-	int prev_cpu = rq->rt.push_cpu;
-	int cpu;
 
-	cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
+/*
+ * When a high priority task schedules out from a CPU and a lower priority
+ * task is scheduled in, a check is made to see if there's any RT tasks
+ * on other CPUs that are waiting to run because a higher priority RT task
+ * is currently running on its CPU. In this case, the CPU with multiple RT
+ * tasks queued on it (overloaded) needs to be notified that a CPU has opened
+ * up that may be able to run one of its non-running queued RT tasks.
+ *
+ * All CPUs with overloaded RT tasks need to be notified as there is currently
+ * no way to know which of these CPUs have the highest priority task waiting
+ * to run. Instead of trying to take a spinlock on each of these CPUs,
+ * which has shown to cause large latency when done on machines with many
+ * CPUs, sending an IPI to the CPUs to have them push off the overloaded
+ * RT tasks waiting to run.
+ *
+ * Just sending an IPI to each of the CPUs is also an issue, as on large
+ * count CPU machines, this can cause an IPI storm on a CPU, especially
+ * if its the only CPU with multiple RT tasks queued, and a large number
+ * of CPUs scheduling a lower priority task at the same time.
+ *
+ * Each root domain has its own irq work function that can iterate over
+ * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
+ * tassk must be checked if there's one or many CPUs that are lowering
+ * their priority, there's a single irq work iterator that will try to
+ * push off RT tasks that are waiting to run.
+ *
+ * When a CPU schedules a lower priority task, it will kick off the
+ * irq work iterator that will jump to each CPU with overloaded RT tasks.
+ * As it only takes the first CPU that schedules a lower priority task
+ * to start the process, the rto_start variable is incremented and if
+ * the atomic result is one, then that CPU will try to take the rto_lock.
+ * This prevents high contention on the lock as the process handles all
+ * CPUs scheduling lower priority tasks.
+ *
+ * All CPUs that are scheduling a lower priority task will increment the
+ * rt_loop_next variable. This will make sure that the irq work iterator
+ * checks all RT overloaded CPUs whenever a CPU schedules a new lower
+ * priority task, even if the iterator is in the middle of a scan. Incrementing
+ * the rt_loop_next will cause the iterator to perform another scan.
+ *
+ */
+static int rto_next_cpu(struct root_domain *rd)
+{
+	int next;
+	int cpu;
 
 	/*
-	 * If the previous cpu is less than the rq's CPU, then it already
-	 * passed the end of the mask, and has started from the beginning.
-	 * We end if the next CPU is greater or equal to rq's CPU.
+	 * When starting the IPI RT pushing, the rto_cpu is set to -1,
+	 * rt_next_cpu() will simply return the first CPU found in
+	 * the rto_mask.
+	 *
+	 * If rto_next_cpu() is called with rto_cpu is a valid cpu, it
+	 * will return the next CPU found in the rto_mask.
+	 *
+	 * If there are no more CPUs left in the rto_mask, then a check is made
+	 * against rto_loop and rto_loop_next. rto_loop is only updated with
+	 * the rto_lock held, but any CPU may increment the rto_loop_next
+	 * without any locking.
 	 */
-	if (prev_cpu < rq->cpu) {
-		if (cpu >= rq->cpu)
-			return nr_cpu_ids;
+	for (;;) {
 
-	} else if (cpu >= nr_cpu_ids) {
+		/* When rto_cpu is -1 this acts like cpumask_first() */
+		cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
+
+		rd->rto_cpu = cpu;
+
+		if (cpu < nr_cpu_ids)
+			return cpu;
+
+		rd->rto_cpu = -1;
+
 		/*
-		 * We passed the end of the mask, start at the beginning.
-		 * If the result is greater or equal to the rq's CPU, then
-		 * the loop is finished.
+		 * ACQUIRE ensures we see the @rto_mask changes
+		 * made prior to the @next value observed.
+		 *
+		 * Matches WMB in rt_set_overload().
 		 */
-		cpu = cpumask_first(rq->rd->rto_mask);
-		if (cpu >= rq->cpu)
-			return nr_cpu_ids;
-	}
-	rq->rt.push_cpu = cpu;
+		next = atomic_read_acquire(&rd->rto_loop_next);
 
-	/* Return cpu to let the caller know if the loop is finished or not */
-	return cpu;
+		if (rd->rto_loop == next)
+			break;
+
+		rd->rto_loop = next;
+	}
+
+	return -1;
 }
 
-static int find_next_push_cpu(struct rq *rq)
+static inline bool rto_start_trylock(atomic_t *v)
 {
-	struct rq *next_rq;
-	int cpu;
-
-	while (1) {
-		cpu = rto_next_cpu(rq);
-		if (cpu >= nr_cpu_ids)
-			break;
-		next_rq = cpu_rq(cpu);
-
-		/* Make sure the next rq can push to this rq */
-		if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
-			break;
-	}
-
-	return cpu;
+	return !atomic_cmpxchg_acquire(v, 0, 1);
 }
 
-#define RT_PUSH_IPI_EXECUTING		1
-#define RT_PUSH_IPI_RESTART		2
+static inline void rto_start_unlock(atomic_t *v)
+{
+	atomic_set_release(v, 0);
+}
 
 static void tell_cpu_to_push(struct rq *rq)
 {
-	int cpu;
+	int cpu = -1;
 
-	if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
-		raw_spin_lock(&rq->rt.push_lock);
-		/* Make sure it's still executing */
-		if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
-			/*
-			 * Tell the IPI to restart the loop as things have
-			 * changed since it started.
-			 */
-			rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
-			raw_spin_unlock(&rq->rt.push_lock);
-			return;
-		}
-		raw_spin_unlock(&rq->rt.push_lock);
-	}
+	/* Keep the loop going if the IPI is currently active */
+	atomic_inc(&rq->rd->rto_loop_next);
 
-	/* When here, there's no IPI going around */
-
-	rq->rt.push_cpu = rq->cpu;
-	cpu = find_next_push_cpu(rq);
-	if (cpu >= nr_cpu_ids)
+	/* Only one CPU can initiate a loop at a time */
+	if (!rto_start_trylock(&rq->rd->rto_loop_start))
 		return;
 
-	rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
+	raw_spin_lock(&rq->rd->rto_lock);
 
-	irq_work_queue_on(&rq->rt.push_work, cpu);
+	/*
+	 * The rto_cpu is updated under the lock, if it has a valid cpu
+	 * then the IPI is still running and will continue due to the
+	 * update to loop_next, and nothing needs to be done here.
+	 * Otherwise it is finishing up and an ipi needs to be sent.
+	 */
+	if (rq->rd->rto_cpu < 0)
+		cpu = rto_next_cpu(rq->rd);
+
+	raw_spin_unlock(&rq->rd->rto_lock);
+
+	rto_start_unlock(&rq->rd->rto_loop_start);
+
+	if (cpu >= 0)
+		irq_work_queue_on(&rq->rd->rto_push_work, cpu);
 }
 
 /* Called from hardirq context */
-static void try_to_push_tasks(void *arg)
+void rto_push_irq_work_func(struct irq_work *work)
 {
-	struct rt_rq *rt_rq = arg;
-	struct rq *rq, *src_rq;
-	int this_cpu;
+	struct root_domain *rd =
+		container_of(work, struct root_domain, rto_push_work);
+	struct rq *rq;
 	int cpu;
 
-	this_cpu = rt_rq->push_cpu;
+	rq = this_rq();
 
-	/* Paranoid check */
-	BUG_ON(this_cpu != smp_processor_id());
-
-	rq = cpu_rq(this_cpu);
-	src_rq = rq_of_rt_rq(rt_rq);
-
-again:
+	/*
+	 * We do not need to grab the lock to check for has_pushable_tasks.
+	 * When it gets updated, a check is made if a push is possible.
+	 */
 	if (has_pushable_tasks(rq)) {
 		raw_spin_lock(&rq->lock);
-		push_rt_task(rq);
+		push_rt_tasks(rq);
 		raw_spin_unlock(&rq->lock);
 	}
 
+	raw_spin_lock(&rd->rto_lock);
+
 	/* Pass the IPI to the next rt overloaded queue */
-	raw_spin_lock(&rt_rq->push_lock);
-	/*
-	 * If the source queue changed since the IPI went out,
-	 * we need to restart the search from that CPU again.
-	 */
-	if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
-		rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
-		rt_rq->push_cpu = src_rq->cpu;
-	}
+	cpu = rto_next_cpu(rd);
 
-	cpu = find_next_push_cpu(src_rq);
+	raw_spin_unlock(&rd->rto_lock);
 
-	if (cpu >= nr_cpu_ids)
-		rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
-	raw_spin_unlock(&rt_rq->push_lock);
-
-	if (cpu >= nr_cpu_ids)
+	if (cpu < 0)
 		return;
 
-	/*
-	 * It is possible that a restart caused this CPU to be
-	 * chosen again. Don't bother with an IPI, just see if we
-	 * have more to push.
-	 */
-	if (unlikely(cpu == rq->cpu))
-		goto again;
-
 	/* Try the next RT overloaded CPU */
-	irq_work_queue_on(&rt_rq->push_work, cpu);
-}
-
-static void push_irq_work_func(struct irq_work *work)
-{
-	struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
-
-	try_to_push_tasks(rt_rq);
+	irq_work_queue_on(&rd->rto_push_work, cpu);
 }
 #endif /* HAVE_RT_PUSH_IPI */
 
@@ -2310,8 +2306,9 @@
 	bool resched = false;
 	struct task_struct *p;
 	struct rq *src_rq;
+	int rt_overload_count = rt_overloaded(this_rq);
 
-	if (likely(!rt_overloaded(this_rq)))
+	if (likely(!rt_overload_count))
 		return;
 
 	/*
@@ -2320,6 +2317,11 @@
 	 */
 	smp_rmb();
 
+	/* If we are the only overloaded CPU do nothing */
+	if (rt_overload_count == 1 &&
+	    cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
+		return;
+
 #ifdef HAVE_RT_PUSH_IPI
 	if (sched_feat(RT_PUSH_IPI)) {
 		tell_cpu_to_push(this_rq);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c85928b..5508248 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -539,7 +539,7 @@
 }
 
 /* RT IPI pull logic requires IRQ_WORK */
-#ifdef CONFIG_IRQ_WORK
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
 # define HAVE_RT_PUSH_IPI
 #endif
 
@@ -561,12 +561,6 @@
 	unsigned long rt_nr_total;
 	int overloaded;
 	struct plist_head pushable_tasks;
-#ifdef HAVE_RT_PUSH_IPI
-	int push_flags;
-	int push_cpu;
-	struct irq_work push_work;
-	raw_spinlock_t push_lock;
-#endif
 #endif /* CONFIG_SMP */
 	int rt_queued;
 
@@ -657,6 +651,19 @@
 	struct dl_bw dl_bw;
 	struct cpudl cpudl;
 
+#ifdef HAVE_RT_PUSH_IPI
+	/*
+	 * For IPI pull requests, loop across the rto_mask.
+	 */
+	struct irq_work rto_push_work;
+	raw_spinlock_t rto_lock;
+	/* These are only updated and read within rto_lock */
+	int rto_loop;
+	int rto_cpu;
+	/* These atomics are updated outside of a lock */
+	atomic_t rto_loop_next;
+	atomic_t rto_loop_start;
+#endif
 	/*
 	 * The "RT overload" flag: it gets set if a CPU has more than
 	 * one runnable RT task.
@@ -673,6 +680,9 @@
 
 extern struct root_domain def_root_domain;
 
+#ifdef HAVE_RT_PUSH_IPI
+extern void rto_push_irq_work_func(struct irq_work *work);
+#endif
 #endif /* CONFIG_SMP */
 
 /*
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index f941d92..b7da03f 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1965,6 +1965,10 @@
 	p->misfit = false;
 }
 
+/*
+ * kfree() may wakeup kswapd. So this function should NOT be called
+ * with any CPU's rq->lock acquired.
+ */
 void free_task_load_ptrs(struct task_struct *p)
 {
 	kfree(p->ravg.curr_window_cpu);
@@ -2915,7 +2919,7 @@
 
 unsigned long thermal_cap(int cpu)
 {
-	return thermal_cap_cpu[cpu] ?: cpu_rq(cpu)->cpu_capacity_orig;
+	return thermal_cap_cpu[cpu] ?: SCHED_CAPACITY_SCALE;
 }
 
 unsigned long do_thermal_cap(int cpu, unsigned long thermal_max_freq)
diff --git a/kernel/signal.c b/kernel/signal.c
index e48668c..7ebe236 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -72,7 +72,7 @@
 	handler = sig_handler(t, sig);
 
 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
-			handler == SIG_DFL && !force)
+	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
 		return 1;
 
 	return sig_handler_ignored(handler, sig);
@@ -88,13 +88,15 @@
 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
 		return 0;
 
-	if (!sig_task_ignored(t, sig, force))
+	/*
+	 * Tracers may want to know about even ignored signal unless it
+	 * is SIGKILL which can't be reported anyway but can be ignored
+	 * by SIGNAL_UNKILLABLE task.
+	 */
+	if (t->ptrace && sig != SIGKILL)
 		return 0;
 
-	/*
-	 * Tracers may want to know about even ignored signals.
-	 */
-	return !t->ptrace;
+	return sig_task_ignored(t, sig, force);
 }
 
 /*
@@ -917,9 +919,9 @@
 	 * then start taking the whole group down immediately.
 	 */
 	if (sig_fatal(p, sig) &&
-	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
+	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
 	    !sigismember(&t->real_blocked, sig) &&
-	    (sig == SIGKILL || !t->ptrace)) {
+	    (sig == SIGKILL || !p->ptrace)) {
 		/*
 		 * This signal will be fatal to the whole group.
 		 */
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 29bb99c..dea7e55 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -314,7 +314,30 @@
 		.extra1		= &zero,
 		.extra2		= &sysctl_sched_group_upmigrate_pct,
 	},
+	{
+		.procname	= "sched_boost",
+		.data		= &sysctl_sched_boost,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_boost_handler,
+		.extra1         = &zero,
+		.extra2		= &three,
+	},
 #endif
+	{
+		.procname	= "sched_upmigrate",
+		.data		= &sysctl_sched_capacity_margin,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_updown_migrate_handler,
+	},
+	{
+		.procname	= "sched_downmigrate",
+		.data		= &sysctl_sched_capacity_margin_down,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_updown_migrate_handler,
+	},
 #ifdef CONFIG_SCHED_DEBUG
 	{
 		.procname	= "sched_min_granularity_ns",
@@ -356,15 +379,6 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
-	{
-		.procname	= "sched_boost",
-		.data		= &sysctl_sched_boost,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_boost_handler,
-		.extra1         = &zero,
-		.extra2		= &three,
-	},
 #endif
 	{
 		.procname	= "sched_initial_task_util",
@@ -389,20 +403,6 @@
 		.extra1		= &min_wakeup_granularity_ns,
 		.extra2		= &max_wakeup_granularity_ns,
 	},
-	{
-		.procname	= "sched_upmigrate",
-		.data		= &sysctl_sched_capacity_margin,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_updown_migrate_handler,
-	},
-	{
-		.procname	= "sched_downmigrate",
-		.data		= &sysctl_sched_capacity_margin_down,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_updown_migrate_handler,
-	},
 #ifdef CONFIG_SMP
 	{
 		.procname	= "sched_tunable_scaling",
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 33fba7f..d542c09 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -256,6 +256,7 @@
 	{ CTL_INT,	NET_IPV4_CONF_NOPOLICY,			"disable_policy" },
 	{ CTL_INT,	NET_IPV4_CONF_FORCE_IGMP_VERSION,	"force_igmp_version" },
 	{ CTL_INT,	NET_IPV4_CONF_PROMOTE_SECONDARIES,	"promote_secondaries" },
+	{ CTL_INT, NET_IPV4_CONF_NF_IPV4_DEFRAG_SKIP, "nf_ipv4_defrag_skip" },
 	{}
 };
 
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index df3191e..4ce4285 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -684,6 +684,11 @@
 		tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
 }
 
+static inline bool local_timer_softirq_pending(void)
+{
+	return local_softirq_pending() & TIMER_SOFTIRQ;
+}
+
 static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
 					 ktime_t now, int cpu)
 {
@@ -700,8 +705,18 @@
 	} while (read_seqretry(&jiffies_lock, seq));
 	ts->last_jiffies = basejiff;
 
-	if (rcu_needs_cpu(basemono, &next_rcu) ||
-	    arch_needs_cpu() || irq_work_needs_cpu()) {
+	/*
+	 * Keep the periodic tick, when RCU, architecture or irq_work
+	 * requests it.
+	 * Aside of that check whether the local timer softirq is
+	 * pending. If so its a bad idea to call get_next_timer_interrupt()
+	 * because there is an already expired timer, so it will request
+	 * immeditate expiry, which rearms the hardware timer with a
+	 * minimal delta which brings us back to this place
+	 * immediately. Lather, rinse and repeat...
+	 */
+	if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
+	    irq_work_needs_cpu() || local_timer_softirq_pending()) {
 		next_tick = basemono + TICK_NSEC;
 	} else {
 		/*
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 4c0b001..5b5d016 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -826,11 +826,10 @@
 	struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
 
 	/*
-	 * If the timer is deferrable and nohz is active then we need to use
-	 * the deferrable base.
+	 * If the timer is deferrable and NO_HZ_COMMON is set then we need
+	 * to use the deferrable base.
 	 */
-	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
-	    (tflags & TIMER_DEFERRABLE)) {
+	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) {
 		base = &timer_base_deferrable;
 		if (tflags & TIMER_PINNED)
 			base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
@@ -843,11 +842,10 @@
 	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
 
 	/*
-	 * If the timer is deferrable and nohz is active then we need to use
-	 * the deferrable base.
+	 * If the timer is deferrable and NO_HZ_COMMON is set then we need
+	 * to use the deferrable base.
 	 */
-	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
-	    (tflags & TIMER_DEFERRABLE)) {
+	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) {
 		base = &timer_base_deferrable;
 		if (tflags & TIMER_PINNED)
 			base = this_cpu_ptr(&timer_bases[BASE_DEF]);
@@ -1002,8 +1000,6 @@
 	if (!ret && pending_only)
 		goto out_unlock;
 
-	debug_activate(timer, expires);
-
 	new_base = get_target_base(base, timer->flags);
 
 	if (base != new_base) {
@@ -1027,6 +1023,8 @@
 		}
 	}
 
+	debug_activate(timer, expires);
+
 	timer->expires = expires;
 	/*
 	 * If 'idx' was calculated above and the base time did not advance
@@ -1692,7 +1690,7 @@
 	base->must_forward_clk = false;
 
 	__run_timers(base);
-	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
+	if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
 		__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
 
 	if ((atomic_cmpxchg(&deferrable_pending, 1, 0) &&
@@ -1872,6 +1870,21 @@
 	}
 }
 
+int timers_prepare_cpu(unsigned int cpu)
+{
+	struct timer_base *base;
+	int b;
+
+	for (b = 0; b < NR_BASES; b++) {
+		base = per_cpu_ptr(&timer_bases[b], cpu);
+		base->clk = jiffies;
+		base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
+		base->is_idle = false;
+		base->must_forward_clk = true;
+	}
+	return 0;
+}
+
 static void __migrate_timers(unsigned int cpu, bool remove_pinned)
 {
 	struct timer_base *old_base;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f5c016e..3e1d11f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -280,6 +280,8 @@
 /* Missed count stored at end */
 #define RB_MISSED_STORED	(1 << 30)
 
+#define RB_MISSED_FLAGS		(RB_MISSED_EVENTS|RB_MISSED_STORED)
+
 struct buffer_data_page {
 	u64		 time_stamp;	/* page time stamp */
 	local_t		 commit;	/* write committed index */
@@ -331,7 +333,9 @@
  */
 size_t ring_buffer_page_len(void *page)
 {
-	return local_read(&((struct buffer_data_page *)page)->commit)
+	struct buffer_data_page *bpage = page;
+
+	return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS)
 		+ BUF_PAGE_HDR_SIZE;
 }
 
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e3aae88..046abb0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3800,37 +3800,30 @@
 	.llseek		= seq_lseek,
 };
 
-/*
- * The tracer itself will not take this lock, but still we want
- * to provide a consistent cpumask to user-space:
- */
-static DEFINE_MUTEX(tracing_cpumask_update_lock);
-
-/*
- * Temporary storage for the character representation of the
- * CPU bitmask (and one more byte for the newline):
- */
-static char mask_str[NR_CPUS + 1];
-
 static ssize_t
 tracing_cpumask_read(struct file *filp, char __user *ubuf,
 		     size_t count, loff_t *ppos)
 {
 	struct trace_array *tr = file_inode(filp)->i_private;
+	char *mask_str;
 	int len;
 
-	mutex_lock(&tracing_cpumask_update_lock);
+	len = snprintf(NULL, 0, "%*pb\n",
+		       cpumask_pr_args(tr->tracing_cpumask)) + 1;
+	mask_str = kmalloc(len, GFP_KERNEL);
+	if (!mask_str)
+		return -ENOMEM;
 
-	len = snprintf(mask_str, count, "%*pb\n",
+	len = snprintf(mask_str, len, "%*pb\n",
 		       cpumask_pr_args(tr->tracing_cpumask));
 	if (len >= count) {
 		count = -EINVAL;
 		goto out_err;
 	}
-	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
+	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
 
 out_err:
-	mutex_unlock(&tracing_cpumask_update_lock);
+	kfree(mask_str);
 
 	return count;
 }
@@ -3850,8 +3843,6 @@
 	if (err)
 		goto err_unlock;
 
-	mutex_lock(&tracing_cpumask_update_lock);
-
 	local_irq_disable();
 	arch_spin_lock(&tr->max_lock);
 	for_each_tracing_cpu(cpu) {
@@ -3874,8 +3865,6 @@
 	local_irq_enable();
 
 	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
-
-	mutex_unlock(&tracing_cpumask_update_lock);
 	free_cpumask_var(tracing_cpumask_new);
 
 	return count;
@@ -6348,7 +6337,7 @@
 		.spd_release	= buffer_spd_release,
 	};
 	struct buffer_ref *ref;
-	int entries, size, i;
+	int entries, i;
 	ssize_t ret = 0;
 
 #ifdef CONFIG_TRACER_MAX_TRACE
@@ -6399,14 +6388,6 @@
 			break;
 		}
 
-		/*
-		 * zero out any left over data, this is going to
-		 * user land.
-		 */
-		size = ring_buffer_page_len(ref->page);
-		if (size < PAGE_SIZE)
-			memset(ref->page + size, 0, PAGE_SIZE - size);
-
 		page = virt_to_page(ref->page);
 
 		spd.pages[i] = page;
@@ -7130,6 +7111,7 @@
 	buf->data = alloc_percpu(struct trace_array_cpu);
 	if (!buf->data) {
 		ring_buffer_free(buf->buffer);
+		buf->buffer = NULL;
 		return -ENOMEM;
 	}
 
@@ -7153,7 +7135,9 @@
 				    allocate_snapshot ? size : 1);
 	if (WARN_ON(ret)) {
 		ring_buffer_free(tr->trace_buffer.buffer);
+		tr->trace_buffer.buffer = NULL;
 		free_percpu(tr->trace_buffer.data);
+		tr->trace_buffer.data = NULL;
 		return -ENOMEM;
 	}
 	tr->allocated_snapshot = allocate_snapshot;
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index f3a960e..0664044 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -449,7 +449,7 @@
 	}
 
 	field = trace_find_event_field(file->event_call, field_name);
-	if (!field) {
+	if (!field || !field->size) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -547,7 +547,7 @@
 		}
 
 		field = trace_find_event_field(file->event_call, field_name);
-		if (!field) {
+		if (!field || !field->size) {
 			ret = -EINVAL;
 			goto out;
 		}
diff --git a/kernel/uid16.c b/kernel/uid16.c
index cc40793..dcffcce 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -190,6 +190,7 @@
 		return retval;
 	}
 
+	groups_sort(group_info);
 	retval = set_current_groups(group_info);
 	put_group_info(group_info);
 
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3630826..9aea3480 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1514,6 +1514,7 @@
 	struct timer_list *timer = &dwork->timer;
 	struct work_struct *work = &dwork->work;
 
+	WARN_ON_ONCE(!wq);
 	WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
 		     timer->data != (unsigned long)dwork);
 	WARN_ON_ONCE(timer_pending(timer));
@@ -1640,7 +1641,7 @@
 		mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
 
 	/*
-	 * Sanity check nr_running.  Because wq_unbind_fn() releases
+	 * Sanity check nr_running.  Because unbind_workers() releases
 	 * pool->lock between setting %WORKER_UNBOUND and zapping
 	 * nr_running, the warning may trigger spuriously.  Check iff
 	 * unbind is not in progress.
@@ -4477,9 +4478,8 @@
  * cpu comes back online.
  */
 
-static void wq_unbind_fn(struct work_struct *work)
+static void unbind_workers(int cpu)
 {
-	int cpu = smp_processor_id();
 	struct worker_pool *pool;
 	struct worker *worker;
 
@@ -4676,12 +4676,13 @@
 
 int workqueue_offline_cpu(unsigned int cpu)
 {
-	struct work_struct unbind_work;
 	struct workqueue_struct *wq;
 
 	/* unbinding per-cpu workers should happen on the local CPU */
-	INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
-	queue_work_on(cpu, system_highpri_wq, &unbind_work);
+	if (WARN_ON(cpu != smp_processor_id()))
+		return -1;
+
+	unbind_workers(cpu);
 
 	/* update NUMA affinity of unbound workqueues */
 	mutex_lock(&wq_pool_mutex);
@@ -4689,9 +4690,6 @@
 		wq_update_unbound_numa(wq, cpu, false);
 	mutex_unlock(&wq_pool_mutex);
 
-	/* wait for per-cpu unbinding to finish */
-	flush_work(&unbind_work);
-	destroy_work_on_stack(&unbind_work);
 	return 0;
 }
 
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 1ef0cec..dc14bea 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -313,42 +313,47 @@
 
 	/* Decide how to handle the operation */
 	switch (op) {
-	case ASN1_OP_MATCH_ANY_ACT:
-	case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
-	case ASN1_OP_COND_MATCH_ANY_ACT:
-	case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
-		ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
-		if (ret < 0)
-			return ret;
-		goto skip_data;
-
-	case ASN1_OP_MATCH_ACT:
-	case ASN1_OP_MATCH_ACT_OR_SKIP:
-	case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
-		ret = actions[machine[pc + 2]](context, hdr, tag, data + dp, len);
-		if (ret < 0)
-			return ret;
-		goto skip_data;
-
 	case ASN1_OP_MATCH:
 	case ASN1_OP_MATCH_OR_SKIP:
+	case ASN1_OP_MATCH_ACT:
+	case ASN1_OP_MATCH_ACT_OR_SKIP:
 	case ASN1_OP_MATCH_ANY:
 	case ASN1_OP_MATCH_ANY_OR_SKIP:
+	case ASN1_OP_MATCH_ANY_ACT:
+	case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
 	case ASN1_OP_COND_MATCH_OR_SKIP:
+	case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
 	case ASN1_OP_COND_MATCH_ANY:
 	case ASN1_OP_COND_MATCH_ANY_OR_SKIP:
-	skip_data:
+	case ASN1_OP_COND_MATCH_ANY_ACT:
+	case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
+
 		if (!(flags & FLAG_CONS)) {
 			if (flags & FLAG_INDEFINITE_LENGTH) {
+				size_t tmp = dp;
+
 				ret = asn1_find_indefinite_length(
-					data, datalen, &dp, &len, &errmsg);
+					data, datalen, &tmp, &len, &errmsg);
 				if (ret < 0)
 					goto error;
-			} else {
-				dp += len;
 			}
 			pr_debug("- LEAF: %zu\n", len);
 		}
+
+		if (op & ASN1_OP_MATCH__ACT) {
+			unsigned char act;
+
+			if (op & ASN1_OP_MATCH__ANY)
+				act = machine[pc + 1];
+			else
+				act = machine[pc + 2];
+			ret = actions[act](context, hdr, tag, data + dp, len);
+			if (ret < 0)
+				return ret;
+		}
+
+		if (!(flags & FLAG_CONS))
+			dp += len;
 		pc += asn1_op_lengths[op];
 		goto next_op;
 
@@ -434,6 +439,8 @@
 			else
 				act = machine[pc + 1];
 			ret = actions[act](context, hdr, 0, data + tdp, len);
+			if (ret < 0)
+				return ret;
 		}
 		pc += asn1_op_lengths[op];
 		goto next_op;
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index da796e2..c7c96bc 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -360,6 +360,10 @@
 				if (parse_lineno(last, &query->last_lineno) < 0)
 					return -EINVAL;
 
+				/* special case for last lineno not specified */
+				if (query->last_lineno == 0)
+					query->last_lineno = UINT_MAX;
+
 				if (query->last_lineno < query->first_lineno) {
 					pr_err("last-line:%d < 1st-line:%d\n",
 						query->last_lineno,
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 144fe6b..ca06adc 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -194,7 +194,7 @@
 	chunk->phys_addr = phys;
 	chunk->start_addr = virt;
 	chunk->end_addr = virt + size - 1;
-	atomic_set(&chunk->avail, size);
+	atomic_long_set(&chunk->avail, size);
 
 	spin_lock(&pool->lock);
 	list_add_rcu(&chunk->next_chunk, &pool->chunks);
@@ -304,7 +304,7 @@
 	nbits = (size + (1UL << order) - 1) >> order;
 	rcu_read_lock();
 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
-		if (size > atomic_read(&chunk->avail))
+		if (size > atomic_long_read(&chunk->avail))
 			continue;
 
 		start_bit = 0;
@@ -324,7 +324,7 @@
 
 		addr = chunk->start_addr + ((unsigned long)start_bit << order);
 		size = nbits << order;
-		atomic_sub(size, &chunk->avail);
+		atomic_long_sub(size, &chunk->avail);
 		break;
 	}
 	rcu_read_unlock();
@@ -390,7 +390,7 @@
 			remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
 			BUG_ON(remain);
 			size = nbits << order;
-			atomic_add(size, &chunk->avail);
+			atomic_long_add(size, &chunk->avail);
 			rcu_read_unlock();
 			return;
 		}
@@ -464,7 +464,7 @@
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
-		avail += atomic_read(&chunk->avail);
+		avail += atomic_long_read(&chunk->avail);
 	rcu_read_unlock();
 	return avail;
 }
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index e24388a..468fb7c 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -26,6 +26,7 @@
  *	 however I decided to publish this code under the plain GPL.
  */
 
+#include <linux/sched.h>
 #include <linux/string.h>
 #include "mpi-internal.h"
 #include "longlong.h"
@@ -256,6 +257,7 @@
 				}
 				e <<= 1;
 				c--;
+				cond_resched();
 			}
 
 			i--;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 8258e9e..c234c07 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -745,20 +745,15 @@
 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
 
 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
-		pmd_t *pmd)
+		pmd_t *pmd, int flags)
 {
 	pmd_t _pmd;
 
-	/*
-	 * We should set the dirty bit only for FOLL_WRITE but for now
-	 * the dirty bit in the pmd is meaningless.  And if the dirty
-	 * bit will become meaningful and we'll only set it with
-	 * FOLL_WRITE, an atomic set_bit will be required on the pmd to
-	 * set the young bit, instead of the current set_pmd_at.
-	 */
-	_pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
+	_pmd = pmd_mkyoung(*pmd);
+	if (flags & FOLL_WRITE)
+		_pmd = pmd_mkdirty(_pmd);
 	if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
-				pmd, _pmd,  1))
+				pmd, _pmd, flags & FOLL_WRITE))
 		update_mmu_cache_pmd(vma, addr, pmd);
 }
 
@@ -787,7 +782,7 @@
 		return NULL;
 
 	if (flags & FOLL_TOUCH)
-		touch_pmd(vma, addr, pmd);
+		touch_pmd(vma, addr, pmd, flags);
 
 	/*
 	 * device mapped pages can only be returned if the
@@ -1158,7 +1153,7 @@
 	page = pmd_page(*pmd);
 	VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
 	if (flags & FOLL_TOUCH)
-		touch_pmd(vma, addr, pmd);
+		touch_pmd(vma, addr, pmd, flags);
 	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
 		/*
 		 * We don't mlock() pte-mapped THPs. This way we can avoid
@@ -1514,37 +1509,69 @@
 {
 	struct mm_struct *mm = vma->vm_mm;
 	spinlock_t *ptl;
-	int ret = 0;
+	pmd_t entry;
+	bool preserve_write;
+	int ret;
 
 	ptl = __pmd_trans_huge_lock(pmd, vma);
-	if (ptl) {
-		pmd_t entry;
-		bool preserve_write = prot_numa && pmd_write(*pmd);
-		ret = 1;
+	if (!ptl)
+		return 0;
 
-		/*
-		 * Avoid trapping faults against the zero page. The read-only
-		 * data is likely to be read-cached on the local CPU and
-		 * local/remote hits to the zero page are not interesting.
-		 */
-		if (prot_numa && is_huge_zero_pmd(*pmd)) {
-			spin_unlock(ptl);
-			return ret;
-		}
+	preserve_write = prot_numa && pmd_write(*pmd);
+	ret = 1;
 
-		if (!prot_numa || !pmd_protnone(*pmd)) {
-			entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
-			entry = pmd_modify(entry, newprot);
-			if (preserve_write)
-				entry = pmd_mkwrite(entry);
-			ret = HPAGE_PMD_NR;
-			set_pmd_at(mm, addr, pmd, entry);
-			BUG_ON(vma_is_anonymous(vma) && !preserve_write &&
-					pmd_write(entry));
-		}
-		spin_unlock(ptl);
-	}
+	/*
+	 * Avoid trapping faults against the zero page. The read-only
+	 * data is likely to be read-cached on the local CPU and
+	 * local/remote hits to the zero page are not interesting.
+	 */
+	if (prot_numa && is_huge_zero_pmd(*pmd))
+		goto unlock;
 
+	if (prot_numa && pmd_protnone(*pmd))
+		goto unlock;
+
+	/*
+	 * In case prot_numa, we are under down_read(mmap_sem). It's critical
+	 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
+	 * which is also under down_read(mmap_sem):
+	 *
+	 *	CPU0:				CPU1:
+	 *				change_huge_pmd(prot_numa=1)
+	 *				 pmdp_huge_get_and_clear_notify()
+	 * madvise_dontneed()
+	 *  zap_pmd_range()
+	 *   pmd_trans_huge(*pmd) == 0 (without ptl)
+	 *   // skip the pmd
+	 *				 set_pmd_at();
+	 *				 // pmd is re-established
+	 *
+	 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
+	 * which may break userspace.
+	 *
+	 * pmdp_invalidate() is required to make sure we don't miss
+	 * dirty/young flags set by hardware.
+	 */
+	entry = *pmd;
+	pmdp_invalidate(vma, addr, pmd);
+
+	/*
+	 * Recover dirty/young flags.  It relies on pmdp_invalidate to not
+	 * corrupt them.
+	 */
+	if (pmd_dirty(*pmd))
+		entry = pmd_mkdirty(entry);
+	if (pmd_young(*pmd))
+		entry = pmd_mkyoung(entry);
+
+	entry = pmd_modify(entry, newprot);
+	if (preserve_write)
+		entry = pmd_mkwrite(entry);
+	ret = HPAGE_PMD_NR;
+	set_pmd_at(mm, addr, pmd, entry);
+	BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
+unlock:
+	spin_unlock(ptl);
 	return ret;
 }
 
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 65c36ac..6ff65c4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3135,6 +3135,13 @@
 	}
 }
 
+static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
+{
+	if (addr & ~(huge_page_mask(hstate_vma(vma))))
+		return -EINVAL;
+	return 0;
+}
+
 /*
  * We cannot handle pagefaults against hugetlb pages at all.  They cause
  * handle_mm_fault() to try to instantiate regular-sized pages in the
@@ -3151,6 +3158,7 @@
 	.fault = hugetlb_vm_op_fault,
 	.open = hugetlb_vm_op_open,
 	.close = hugetlb_vm_op_close,
+	.split = hugetlb_vm_op_split,
 };
 
 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
diff --git a/mm/madvise.c b/mm/madvise.c
index 8b25167..59d1aae 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -230,15 +230,14 @@
 {
 	struct file *file = vma->vm_file;
 
+	*prev = vma;
 #ifdef CONFIG_SWAP
 	if (!file) {
-		*prev = vma;
 		force_swapin_readahead(vma, start, end);
 		return 0;
 	}
 
 	if (shmem_mapping(file->f_mapping)) {
-		*prev = vma;
 		force_shm_swapin_readahead(vma, start, end,
 					file->f_mapping);
 		return 0;
@@ -253,7 +252,6 @@
 		return 0;
 	}
 
-	*prev = vma;
 	start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 	if (end > vma->vm_end)
 		end = vma->vm_end;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index fce6c48..37d63b2 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1619,9 +1619,13 @@
  * @page: the page
  *
  * This function protects unlocked LRU pages from being moved to
- * another cgroup and stabilizes their page->mem_cgroup binding.
+ * another cgroup.
+ *
+ * It ensures lifetime of the returned memcg. Caller is responsible
+ * for the lifetime of the page; __unlock_page_memcg() is available
+ * when @page might get freed inside the locked section.
  */
-void lock_page_memcg(struct page *page)
+struct mem_cgroup *lock_page_memcg(struct page *page)
 {
 	struct mem_cgroup *memcg;
 	unsigned long flags;
@@ -1630,18 +1634,24 @@
 	 * The RCU lock is held throughout the transaction.  The fast
 	 * path can get away without acquiring the memcg->move_lock
 	 * because page moving starts with an RCU grace period.
-	 */
+	 *
+	 * The RCU lock also protects the memcg from being freed when
+	 * the page state that is going to change is the only thing
+	 * preventing the page itself from being freed. E.g. writeback
+	 * doesn't hold a page reference and relies on PG_writeback to
+	 * keep off truncation, migration and so forth.
+         */
 	rcu_read_lock();
 
 	if (mem_cgroup_disabled())
-		return;
+		return NULL;
 again:
 	memcg = page->mem_cgroup;
 	if (unlikely(!memcg))
-		return;
+		return NULL;
 
 	if (atomic_read(&memcg->moving_account) <= 0)
-		return;
+		return memcg;
 
 	spin_lock_irqsave(&memcg->move_lock, flags);
 	if (memcg != page->mem_cgroup) {
@@ -1657,18 +1667,18 @@
 	memcg->move_lock_task = current;
 	memcg->move_lock_flags = flags;
 
-	return;
+	return memcg;
 }
 EXPORT_SYMBOL(lock_page_memcg);
 
 /**
- * unlock_page_memcg - unlock a page->mem_cgroup binding
- * @page: the page
+ * __unlock_page_memcg - unlock and unpin a memcg
+ * @memcg: the memcg
+ *
+ * Unlock and unpin a memcg returned by lock_page_memcg().
  */
-void unlock_page_memcg(struct page *page)
+void __unlock_page_memcg(struct mem_cgroup *memcg)
 {
-	struct mem_cgroup *memcg = page->mem_cgroup;
-
 	if (memcg && memcg->move_lock_task == current) {
 		unsigned long flags = memcg->move_lock_flags;
 
@@ -1680,6 +1690,15 @@
 
 	rcu_read_unlock();
 }
+
+/**
+ * unlock_page_memcg - unlock a page->mem_cgroup binding
+ * @page: the page
+ */
+void unlock_page_memcg(struct page *page)
+{
+	__unlock_page_memcg(page->mem_cgroup);
+}
 EXPORT_SYMBOL(unlock_page_memcg);
 
 /*
diff --git a/mm/mmap.c b/mm/mmap.c
index c30a61e..621db7f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2548,9 +2548,11 @@
 	struct vm_area_struct *new;
 	int err;
 
-	if (is_vm_hugetlb_page(vma) && (addr &
-					~(huge_page_mask(hstate_vma(vma)))))
-		return -EINVAL;
+	if (vma->vm_ops && vma->vm_ops->split) {
+		err = vma->vm_ops->split(vma, addr);
+		if (err)
+			return err;
+	}
 
 	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!new)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 439cc63..dd7817cd3 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2713,9 +2713,10 @@
 int test_clear_page_writeback(struct page *page)
 {
 	struct address_space *mapping = page_mapping(page);
+	struct mem_cgroup *memcg;
 	int ret;
 
-	lock_page_memcg(page);
+	memcg = lock_page_memcg(page);
 	if (mapping && mapping_use_writeback_tags(mapping)) {
 		struct inode *inode = mapping->host;
 		struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -2743,13 +2744,20 @@
 	} else {
 		ret = TestClearPageWriteback(page);
 	}
+	/*
+	 * NOTE: Page might be free now! Writeback doesn't hold a page
+	 * reference on its own, it relies on truncation to wait for
+	 * the clearing of PG_writeback. The below can only access
+	 * page state that is static across allocation cycles.
+	 */
 	if (ret) {
-		mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
+		__mem_cgroup_update_page_stat(page, memcg,
+					      MEM_CGROUP_STAT_WRITEBACK, -1);
 		dec_node_page_state(page, NR_WRITEBACK);
 		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
 		inc_node_page_state(page, NR_WRITTEN);
 	}
-	unlock_page_memcg(page);
+	__unlock_page_memcg(memcg);
 	return ret;
 }
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 09a684a..63b19a3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2642,30 +2642,23 @@
  * Update NUMA hit/miss statistics
  *
  * Must be called with interrupts disabled.
- *
- * When __GFP_OTHER_NODE is set assume the node of the preferred
- * zone is the local node. This is useful for daemons who allocate
- * memory on behalf of other processes.
  */
 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
 								gfp_t flags)
 {
 #ifdef CONFIG_NUMA
-	int local_nid = numa_node_id();
 	enum zone_stat_item local_stat = NUMA_LOCAL;
 
-	if (unlikely(flags & __GFP_OTHER_NODE)) {
+	if (z->node != numa_node_id())
 		local_stat = NUMA_OTHER;
-		local_nid = preferred_zone->node;
-	}
 
-	if (z->node == local_nid) {
+	if (z->node == preferred_zone->node)
 		__inc_zone_state(z, NUMA_HIT);
-		__inc_zone_state(z, local_stat);
-	} else {
+	else {
 		__inc_zone_state(z, NUMA_MISS);
 		__inc_zone_state(preferred_zone, NUMA_FOREIGN);
 	}
+	__inc_zone_state(z, local_stat);
 #endif
 }
 
@@ -7383,11 +7376,18 @@
 	cc.zone->cma_alloc = 1;
 	/*
 	 * In case of -EBUSY, we'd like to know which page causes problem.
-	 * So, just fall through. We will check it in test_pages_isolated().
+	 * So, just fall through. test_pages_isolated() has a tracepoint
+	 * which will report the busy page.
+	 *
+	 * It is possible that busy pages could become available before
+	 * the call to test_pages_isolated, and the range will actually be
+	 * allocated.  So, if we fall through be sure to clear ret so that
+	 * -EBUSY is not accidentally used or returned to caller.
 	 */
 	ret = __alloc_contig_migrate_range(&cc, start, end);
 	if (ret && ret != -EBUSY)
 		goto done;
+	ret =0;
 
 	/*
 	 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 2ab7973..26f0e49 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -932,6 +932,7 @@
 	"nr_slab_unreclaimable",
 	"nr_page_table_pages",
 	"nr_kernel_stack",
+	"nr_overhead",
 	"nr_bounce",
 #if IS_ENABLED(CONFIG_ZSMALLOC)
 	"nr_zspages",
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 1689bb5..d3548c4 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1407,7 +1407,7 @@
 	 * pools/users, we can't allow mapping in interrupt context
 	 * because it can corrupt another users mappings.
 	 */
-	WARN_ON_ONCE(in_interrupt());
+	BUG_ON(in_interrupt());
 
 	/* From now on, migration cannot move the object */
 	pin_tag(handle);
diff --git a/mm/zswap.c b/mm/zswap.c
index dbef278..ded051e 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -752,18 +752,22 @@
 	pool = zswap_pool_find_get(type, compressor);
 	if (pool) {
 		zswap_pool_debug("using existing", pool);
+		WARN_ON(pool == zswap_pool_current());
 		list_del_rcu(&pool->list);
-	} else {
-		spin_unlock(&zswap_pools_lock);
-		pool = zswap_pool_create(type, compressor);
-		spin_lock(&zswap_pools_lock);
 	}
 
+	spin_unlock(&zswap_pools_lock);
+
+	if (!pool)
+		pool = zswap_pool_create(type, compressor);
+
 	if (pool)
 		ret = param_set_charp(s, kp);
 	else
 		ret = -EINVAL;
 
+	spin_lock(&zswap_pools_lock);
+
 	if (!ret) {
 		put_pool = zswap_pool_current();
 		list_add_rcu(&pool->list, &zswap_pools);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 4a47074..c8ea3cf 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -111,12 +111,7 @@
 		vlan_gvrp_uninit_applicant(real_dev);
 	}
 
-	/* Take it out of our own structures, but be sure to interlock with
-	 * HW accelerating devices or SW vlan input packet processing if
-	 * VLAN is not 0 (leave it there for 802.1p).
-	 */
-	if (vlan_id)
-		vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
+	vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
 
 	/* Get rid of the vlan's reference to real_dev */
 	dev_put(real_dev);
diff --git a/net/9p/client.c b/net/9p/client.c
index cf129fe..1fd6019 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -749,8 +749,7 @@
 	}
 again:
 	/* Wait for the response */
-	err = wait_event_interruptible(*req->wq,
-				       req->status >= REQ_STATUS_RCVD);
+	err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
 
 	/*
 	 * Make sure our req is coherent with regard to updates in other
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index f24b25c..f3a4efc 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -286,8 +286,8 @@
 		if (err == -ENOSPC) {
 			chan->ring_bufs_avail = 0;
 			spin_unlock_irqrestore(&chan->lock, flags);
-			err = wait_event_interruptible(*chan->vc_wq,
-							chan->ring_bufs_avail);
+			err = wait_event_killable(*chan->vc_wq,
+						  chan->ring_bufs_avail);
 			if (err  == -ERESTARTSYS)
 				return err;
 
@@ -327,7 +327,7 @@
 		 * Other zc request to finish here
 		 */
 		if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
-			err = wait_event_interruptible(vp_wq,
+			err = wait_event_killable(vp_wq,
 			      (atomic_read(&vp_pinned) < chan->p9_max_pages));
 			if (err == -ERESTARTSYS)
 				return err;
@@ -471,8 +471,8 @@
 		if (err == -ENOSPC) {
 			chan->ring_bufs_avail = 0;
 			spin_unlock_irqrestore(&chan->lock, flags);
-			err = wait_event_interruptible(*chan->vc_wq,
-						       chan->ring_bufs_avail);
+			err = wait_event_killable(*chan->vc_wq,
+						  chan->ring_bufs_avail);
 			if (err  == -ERESTARTSYS)
 				goto err_out;
 
@@ -489,8 +489,7 @@
 	virtqueue_kick(chan->vq);
 	spin_unlock_irqrestore(&chan->lock, flags);
 	p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
-	err = wait_event_interruptible(*req->wq,
-				       req->status >= REQ_STATUS_RCVD);
+	err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
 	/*
 	 * Non kernel buffers are pinned, unpin them
 	 */
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index ffd09c1..2bbca23 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3353,9 +3353,10 @@
 			break;
 
 		case L2CAP_CONF_EFS:
-			remote_efs = 1;
-			if (olen == sizeof(efs))
+			if (olen == sizeof(efs)) {
+				remote_efs = 1;
 				memcpy(&efs, (void *) val, olen);
+			}
 			break;
 
 		case L2CAP_CONF_EWS:
@@ -3574,16 +3575,17 @@
 			break;
 
 		case L2CAP_CONF_EFS:
-			if (olen == sizeof(efs))
+			if (olen == sizeof(efs)) {
 				memcpy(&efs, (void *)val, olen);
 
-			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
-			    efs.stype != L2CAP_SERV_NOTRAFIC &&
-			    efs.stype != chan->local_stype)
-				return -ECONNREFUSED;
+				if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+				    efs.stype != L2CAP_SERV_NOTRAFIC &&
+				    efs.stype != chan->local_stype)
+					return -ECONNREFUSED;
 
-			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
-					   (unsigned long) &efs, endptr - ptr);
+				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+						   (unsigned long) &efs, endptr - ptr);
+			}
 			break;
 
 		case L2CAP_CONF_FCS:
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index aa1df1a..82ce571 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -706,18 +706,20 @@
 
 static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	struct nf_bridge_info *nf_bridge;
-	unsigned int mtu_reserved;
+	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+	unsigned int mtu, mtu_reserved;
 
 	mtu_reserved = nf_bridge_mtu_reduction(skb);
+	mtu = skb->dev->mtu;
 
-	if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) {
+	if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
+		mtu = nf_bridge->frag_max_size;
+
+	if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
 		nf_bridge_info_free(skb);
 		return br_dev_queue_push_xmit(net, sk, skb);
 	}
 
-	nf_bridge = nf_bridge_info_get(skb);
-
 	/* This is wrong! We should preserve the original fragment
 	 * boundaries by preserving frag_list rather than refragmenting.
 	 */
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 5d4006e..4f83122 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1092,19 +1092,20 @@
 	struct net_bridge *br = netdev_priv(dev);
 	int err;
 
+	err = register_netdevice(dev);
+	if (err)
+		return err;
+
 	if (tb[IFLA_ADDRESS]) {
 		spin_lock_bh(&br->lock);
 		br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
 		spin_unlock_bh(&br->lock);
 	}
 
-	err = register_netdevice(dev);
-	if (err)
-		return err;
-
 	err = br_changelink(dev, tb, data);
 	if (err)
-		unregister_netdevice(dev);
+		br_dev_delete(dev, NULL);
+
 	return err;
 }
 
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 292e33b..5f3a627 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -34,7 +34,9 @@
 		return -ENOTSUPP;
 	}
 
-	WARN_ON(!key->len);
+	if (!key->len)
+		return -EINVAL;
+
 	key->key = kmemdup(buf, key->len, GFP_NOIO);
 	if (!key->key) {
 		ret = -ENOMEM;
diff --git a/net/core/dev.c b/net/core/dev.c
index 49f17ff..e0217f8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1306,6 +1306,7 @@
 {
 	rtnl_lock();
 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
+	call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
 	rtnl_unlock();
 }
 EXPORT_SYMBOL(netdev_notify_peers);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index e9989b8..7913771 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -742,15 +742,6 @@
 	return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
 }
 
-static void
-warn_incomplete_ethtool_legacy_settings_conversion(const char *details)
-{
-	char name[sizeof(current->comm)];
-
-	pr_info_once("warning: `%s' uses legacy ethtool link settings API, %s\n",
-		     get_task_comm(name, current), details);
-}
-
 /* Query device for its ethtool_cmd settings.
  *
  * Backward compatibility note: for compatibility with legacy ethtool,
@@ -777,10 +768,8 @@
 							   &link_ksettings);
 		if (err < 0)
 			return err;
-		if (!convert_link_ksettings_to_legacy_settings(&cmd,
-							       &link_ksettings))
-			warn_incomplete_ethtool_legacy_settings_conversion(
-				"link modes are only partially reported");
+		convert_link_ksettings_to_legacy_settings(&cmd,
+							  &link_ksettings);
 
 		/* send a sensible cmd tag back to user */
 		cmd.cmd = ETHTOOL_GSET;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 7001da9..b7efe2f 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -263,7 +263,7 @@
 	spin_lock_irqsave(&net->nsid_lock, flags);
 	peer = idr_find(&net->netns_ids, id);
 	if (peer)
-		get_net(peer);
+		peer = maybe_get_net(peer);
 	spin_unlock_irqrestore(&net->nsid_lock, flags);
 	rcu_read_unlock();
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d7ecf40..26ef78a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3829,7 +3829,7 @@
 	struct sock *sk = skb->sk;
 
 	if (!skb_may_tx_timestamp(sk, false))
-		return;
+		goto err;
 
 	/* Take a reference to prevent skb_orphan() from freeing the socket,
 	 * but only if the socket refcount is not zero.
@@ -3838,7 +3838,11 @@
 		*skb_hwtstamps(skb) = *hwtstamps;
 		__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
 		sock_put(sk);
+		return;
 	}
+
+err:
+	kfree_skb(skb);
 }
 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
 
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index acd2a6c..fb467db 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -295,7 +295,7 @@
 	case SKNLGRP_INET6_UDP_DESTROY:
 		if (!sock_diag_handlers[AF_INET6])
 			request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
-				       NETLINK_SOCK_DIAG, AF_INET);
+				       NETLINK_SOCK_DIAG, AF_INET6);
 		break;
 	}
 	return 0;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 0df2aa6..a7f05f0 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -369,14 +369,16 @@
 		.data		= &sysctl_net_busy_poll,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
 	},
 	{
 		.procname	= "busy_read",
 		.data		= &sysctl_net_busy_read,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
 	},
 #endif
 #ifdef CONFIG_NET_SCHED
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 39e7e2b..62522b8 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -57,10 +57,16 @@
 		if (state == DCCP_TIME_WAIT)
 			timeo = DCCP_TIMEWAIT_LEN;
 
+		/* tw_timer is pinned, so we need to make sure BH are disabled
+		 * in following section, otherwise timer handler could run before
+		 * we complete the initialization.
+		 */
+		local_bh_disable();
 		inet_twsk_schedule(tw, timeo);
 		/* Linkage updates. */
 		__inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
 		inet_twsk_put(tw);
+		local_bh_enable();
 	} else {
 		/* Sorry, if we're out of memory, just CLOSE this
 		 * socket up.  We've got bigger problems than
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 062a67c..4457e5d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1380,7 +1380,7 @@
 
 static bool inetdev_valid_mtu(unsigned int mtu)
 {
-	return mtu >= 68;
+	return mtu >= IPV4_MIN_MTU;
 }
 
 static void inetdev_send_gratuitous_arp(struct net_device *dev,
@@ -2228,6 +2228,8 @@
 					      "route_localnet"),
 		DEVINET_SYSCTL_FLUSHING_ENTRY(DROP_UNICAST_IN_L2_MULTICAST,
 					      "drop_unicast_in_l2_multicast"),
+		DEVINET_SYSCTL_RW_ENTRY(NF_IPV4_DEFRAG_SKIP,
+					"nf_ipv4_defrag_skip"),
 	},
 };
 
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 2ec005c..08b7260 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1254,14 +1254,19 @@
 
 static void ip_fib_net_exit(struct net *net)
 {
-	unsigned int i;
+	int i;
 
 	rtnl_lock();
 #ifdef CONFIG_IP_MULTIPLE_TABLES
 	RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
 	RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
 #endif
-	for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
+	/* Destroy the tables in reverse order to guarantee that the
+	 * local table, ID 255, is destroyed before the main table, ID
+	 * 254. This is necessary as the local table may contain
+	 * references to data contained in the main table.
+	 */
+	for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) {
 		struct hlist_head *head = &net->ipv4.fib_table_hash[i];
 		struct hlist_node *tmp;
 		struct fib_table *tb;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 691146a..42a19fb 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -768,7 +768,7 @@
 }
 
 /*
- *	Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, ICMP_QUENCH, and
+ *	Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEEDED, ICMP_QUENCH, and
  *	ICMP_PARAMETERPROB.
  */
 
@@ -796,7 +796,8 @@
 	if (iph->ihl < 5) /* Mangled header, drop. */
 		goto out_err;
 
-	if (icmph->type == ICMP_DEST_UNREACH) {
+	switch (icmph->type) {
+	case ICMP_DEST_UNREACH:
 		switch (icmph->code & 15) {
 		case ICMP_NET_UNREACH:
 		case ICMP_HOST_UNREACH:
@@ -832,8 +833,16 @@
 		}
 		if (icmph->code > NR_ICMP_UNREACH)
 			goto out;
-	} else if (icmph->type == ICMP_PARAMETERPROB)
+		break;
+	case ICMP_PARAMETERPROB:
 		info = ntohl(icmph->un.gateway) >> 24;
+		break;
+	case ICMP_TIME_EXCEEDED:
+		__ICMP_INC_STATS(net, ICMP_MIB_INTIMEEXCDS);
+		if (icmph->code == ICMP_EXC_FRAGTIME)
+			goto out;
+		break;
+	}
 
 	/*
 	 *	Throw it at our lower layers
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 08575e3..7bff0c6 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -89,6 +89,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/times.h>
 #include <linux/pkt_sched.h>
+#include <linux/byteorder/generic.h>
 
 #include <net/net_namespace.h>
 #include <net/arp.h>
@@ -321,6 +322,23 @@
 	return scount;
 }
 
+/* source address selection per RFC 3376 section 4.2.13 */
+static __be32 igmpv3_get_srcaddr(struct net_device *dev,
+				 const struct flowi4 *fl4)
+{
+	struct in_device *in_dev = __in_dev_get_rcu(dev);
+
+	if (!in_dev)
+		return htonl(INADDR_ANY);
+
+	for_ifa(in_dev) {
+		if (inet_ifa_match(fl4->saddr, ifa))
+			return fl4->saddr;
+	} endfor_ifa(in_dev);
+
+	return htonl(INADDR_ANY);
+}
+
 static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
 {
 	struct sk_buff *skb;
@@ -368,7 +386,7 @@
 	pip->frag_off = htons(IP_DF);
 	pip->ttl      = 1;
 	pip->daddr    = fl4.daddr;
-	pip->saddr    = fl4.saddr;
+	pip->saddr    = igmpv3_get_srcaddr(dev, &fl4);
 	pip->protocol = IPPROTO_IGMP;
 	pip->tot_len  = 0;	/* filled in later */
 	ip_select_ident(net, skb, NULL);
@@ -404,16 +422,17 @@
 }
 
 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
-	int type, struct igmpv3_grec **ppgr)
+	int type, struct igmpv3_grec **ppgr, unsigned int mtu)
 {
 	struct net_device *dev = pmc->interface->dev;
 	struct igmpv3_report *pih;
 	struct igmpv3_grec *pgr;
 
-	if (!skb)
-		skb = igmpv3_newpack(dev, dev->mtu);
-	if (!skb)
-		return NULL;
+	if (!skb) {
+		skb = igmpv3_newpack(dev, mtu);
+		if (!skb)
+			return NULL;
+	}
 	pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec));
 	pgr->grec_type = type;
 	pgr->grec_auxwords = 0;
@@ -436,12 +455,17 @@
 	struct igmpv3_grec *pgr = NULL;
 	struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
 	int scount, stotal, first, isquery, truncate;
+	unsigned int mtu;
 
 	if (pmc->multiaddr == IGMP_ALL_HOSTS)
 		return skb;
 	if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
 		return skb;
 
+	mtu = READ_ONCE(dev->mtu);
+	if (mtu < IPV4_MIN_MTU)
+		return skb;
+
 	isquery = type == IGMPV3_MODE_IS_INCLUDE ||
 		  type == IGMPV3_MODE_IS_EXCLUDE;
 	truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
@@ -462,7 +486,7 @@
 		    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
 			if (skb)
 				igmpv3_sendpack(skb);
-			skb = igmpv3_newpack(dev, dev->mtu);
+			skb = igmpv3_newpack(dev, mtu);
 		}
 	}
 	first = 1;
@@ -498,12 +522,12 @@
 				pgr->grec_nsrcs = htons(scount);
 			if (skb)
 				igmpv3_sendpack(skb);
-			skb = igmpv3_newpack(dev, dev->mtu);
+			skb = igmpv3_newpack(dev, mtu);
 			first = 1;
 			scount = 0;
 		}
 		if (first) {
-			skb = add_grhead(skb, pmc, type, &pgr);
+			skb = add_grhead(skb, pmc, type, &pgr, mtu);
 			first = 0;
 		}
 		if (!skb)
@@ -538,7 +562,7 @@
 				igmpv3_sendpack(skb);
 				skb = NULL; /* add_grhead will get a new one */
 			}
-			skb = add_grhead(skb, pmc, type, &pgr);
+			skb = add_grhead(skb, pmc, type, &pgr, mtu);
 		}
 	}
 	if (pgr)
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 453db95..4bf3b8a 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -198,6 +198,7 @@
 	qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
 	net = container_of(qp->q.net, struct net, ipv4.frags);
 
+	rcu_read_lock();
 	spin_lock(&qp->q.lock);
 
 	if (qp->q.flags & INET_FRAG_COMPLETE)
@@ -207,7 +208,7 @@
 	__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
 
 	if (!inet_frag_evicting(&qp->q)) {
-		struct sk_buff *head = qp->q.fragments;
+		struct sk_buff *clone, *head = qp->q.fragments;
 		const struct iphdr *iph;
 		int err;
 
@@ -216,32 +217,40 @@
 		if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
 			goto out;
 
-		rcu_read_lock();
 		head->dev = dev_get_by_index_rcu(net, qp->iif);
 		if (!head->dev)
-			goto out_rcu_unlock;
+			goto out;
+
 
 		/* skb has no dst, perform route lookup again */
 		iph = ip_hdr(head);
 		err = ip_route_input_noref(head, iph->daddr, iph->saddr,
 					   iph->tos, head->dev);
 		if (err)
-			goto out_rcu_unlock;
+			goto out;
 
 		/* Only an end host needs to send an ICMP
 		 * "Fragment Reassembly Timeout" message, per RFC792.
 		 */
 		if (frag_expire_skip_icmp(qp->user) &&
 		    (skb_rtable(head)->rt_type != RTN_LOCAL))
-			goto out_rcu_unlock;
+			goto out;
+
+		clone = skb_clone(head, GFP_ATOMIC);
 
 		/* Send an ICMP "Fragment Reassembly Timeout" message. */
-		icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
-out_rcu_unlock:
-		rcu_read_unlock();
+		if (clone) {
+			spin_unlock(&qp->q.lock);
+			icmp_send(clone, ICMP_TIME_EXCEEDED,
+				  ICMP_EXC_FRAGTIME, 0);
+			consume_skb(clone);
+			goto out_rcu_unlock;
+		}
 	}
 out:
 	spin_unlock(&qp->q.lock);
+out_rcu_unlock:
+	rcu_read_unlock();
 	ipq_put(qp);
 }
 
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 4d37bdc..551dd39 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -819,6 +819,7 @@
 	{
 		struct ip_mreqn mreq;
 		struct net_device *dev = NULL;
+		int midx;
 
 		if (sk->sk_type == SOCK_STREAM)
 			goto e_inval;
@@ -863,11 +864,15 @@
 		err = -EADDRNOTAVAIL;
 		if (!dev)
 			break;
+
+		midx = l3mdev_master_ifindex(dev);
+
 		dev_put(dev);
 
 		err = -EINVAL;
 		if (sk->sk_bound_dev_if &&
-		    mreq.imr_ifindex != sk->sk_bound_dev_if)
+		    mreq.imr_ifindex != sk->sk_bound_dev_if &&
+		    (!midx || midx != sk->sk_bound_dev_if))
 			break;
 
 		inet->mc_index = mreq.imr_ifindex;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index bd7f183..96536a0 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -346,8 +346,8 @@
 	dev->needed_headroom = t_hlen + hlen;
 	mtu -= (dev->hard_header_len + t_hlen);
 
-	if (mtu < 68)
-		mtu = 68;
+	if (mtu < IPV4_MIN_MTU)
+		mtu = IPV4_MIN_MTU;
 
 	return mtu;
 }
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 071a785..b23464d 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -306,7 +306,7 @@
 	while ((d = next)) {
 		next = d->next;
 		dev = d->dev;
-		if ((!ic_dev || dev != ic_dev->dev) && !netdev_uses_dsa(dev)) {
+		if (d != ic_dev && !netdev_uses_dsa(dev)) {
 			pr_debug("IP-Config: Downing %s\n", dev->name);
 			dev_change_flags(dev, d->flags);
 		}
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 713c09a..0c9ded2 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -158,6 +158,10 @@
 	if (skb->len < sizeof(struct iphdr) ||
 	    ip_hdrlen(skb) < sizeof(struct iphdr))
 		return NF_ACCEPT;
+
+	if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
+		return NF_ACCEPT;
+
 	return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
 }
 
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index d88da36..93224b2 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -11,6 +11,7 @@
 #include <linux/netfilter.h>
 #include <linux/module.h>
 #include <linux/skbuff.h>
+#include <linux/inetdevice.h>
 #include <net/route.h>
 #include <net/ip.h>
 
@@ -78,8 +79,13 @@
 #endif
 	/* Gather fragments. */
 	if (ip_is_fragment(ip_hdr(skb))) {
-		enum ip_defrag_users user =
-			nf_ct_defrag_user(state->hook, skb);
+		enum ip_defrag_users user;
+
+		if (skb->dev &&
+		    IN_DEV_NF_IPV4_DEFRAG_SKIP(__in_dev_get_rcu(skb->dev)))
+			return NF_ACCEPT;
+
+		user = nf_ct_defrag_user(state->hook, skb);
 
 		if (nf_ct_ipv4_gather_frags(state->net, skb, user))
 			return NF_STOLEN;
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index f8aad03..6f5e8d0 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -255,11 +255,6 @@
 	/* maniptype == SRC for postrouting. */
 	enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
 
-	/* We never see fragments: conntrack defrags on pre-routing
-	 * and local-out, and nf_nat_out protects post-routing.
-	 */
-	NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
-
 	ct = nf_ct_get(skb, &ctinfo);
 	/* Can't track?  It's not due to stress, or conntrack would
 	 * have dropped it.  Hence it's the user's responsibilty to
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 5a8f7c3..53e49f5 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1260,16 +1260,6 @@
 	.timeout	= 180,
 };
 
-static struct nf_conntrack_helper snmp_helper __read_mostly = {
-	.me			= THIS_MODULE,
-	.help			= help,
-	.expect_policy		= &snmp_exp_policy,
-	.name			= "snmp",
-	.tuple.src.l3num	= AF_INET,
-	.tuple.src.u.udp.port	= cpu_to_be16(SNMP_PORT),
-	.tuple.dst.protonum	= IPPROTO_UDP,
-};
-
 static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
 	.me			= THIS_MODULE,
 	.help			= help,
@@ -1288,17 +1278,10 @@
 
 static int __init nf_nat_snmp_basic_init(void)
 {
-	int ret = 0;
-
 	BUG_ON(nf_nat_snmp_hook != NULL);
 	RCU_INIT_POINTER(nf_nat_snmp_hook, help);
 
-	ret = nf_conntrack_helper_register(&snmp_trap_helper);
-	if (ret < 0) {
-		nf_conntrack_helper_unregister(&snmp_helper);
-		return ret;
-	}
-	return ret;
+	return nf_conntrack_helper_register(&snmp_trap_helper);
 }
 
 static void __exit nf_nat_snmp_basic_fini(void)
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index ab0bbcb..a860df2 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -502,11 +502,16 @@
 	int err;
 	struct ip_options_data opt_copy;
 	struct raw_frag_vec rfv;
+	int hdrincl;
 
 	err = -EMSGSIZE;
 	if (len > 0xFFFF)
 		goto out;
 
+	/* hdrincl should be READ_ONCE(inet->hdrincl)
+	 * but READ_ONCE() doesn't work with bit fields
+	 */
+	hdrincl = inet->hdrincl;
 	/*
 	 *	Check the flags.
 	 */
@@ -582,7 +587,7 @@
 		/* Linux does not mangle headers on raw sockets,
 		 * so that IP options + IP_HDRINCL is non-sense.
 		 */
-		if (inet->hdrincl)
+		if (hdrincl)
 			goto done;
 		if (ipc.opt->opt.srr) {
 			if (!daddr)
@@ -604,12 +609,12 @@
 
 	flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
 			   RT_SCOPE_UNIVERSE,
-			   inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
+			   hdrincl ? IPPROTO_RAW : sk->sk_protocol,
 			   inet_sk_flowi_flags(sk) |
-			    (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
+			    (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
 			   daddr, saddr, 0, 0, sk->sk_uid);
 
-	if (!inet->hdrincl) {
+	if (!hdrincl) {
 		rfv.msg = msg;
 		rfv.hlen = 0;
 
@@ -634,7 +639,7 @@
 		goto do_confirm;
 back_from_confirm:
 
-	if (inet->hdrincl)
+	if (hdrincl)
 		err = raw_send_hdrinc(sk, &fl4, msg, len,
 				      &rt, msg->msg_flags, &ipc.sockc);
 
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cd632e6..03728c6 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -633,9 +633,12 @@
 	struct fnhe_hash_bucket *hash;
 	struct fib_nh_exception *fnhe;
 	struct rtable *rt;
+	u32 genid, hval;
 	unsigned int i;
 	int depth;
-	u32 hval = fnhe_hashfun(daddr);
+
+	genid = fnhe_genid(dev_net(nh->nh_dev));
+	hval = fnhe_hashfun(daddr);
 
 	spin_lock_bh(&fnhe_lock);
 
@@ -658,12 +661,13 @@
 	}
 
 	if (fnhe) {
+		if (fnhe->fnhe_genid != genid)
+			fnhe->fnhe_genid = genid;
 		if (gw)
 			fnhe->fnhe_gw = gw;
-		if (pmtu) {
+		if (pmtu)
 			fnhe->fnhe_pmtu = pmtu;
-			fnhe->fnhe_expires = max(1UL, expires);
-		}
+		fnhe->fnhe_expires = max(1UL, expires);
 		/* Update all cached dsts too */
 		rt = rcu_dereference(fnhe->fnhe_rth_input);
 		if (rt)
@@ -682,7 +686,7 @@
 			fnhe->fnhe_next = hash->chain;
 			rcu_assign_pointer(hash->chain, fnhe);
 		}
-		fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
+		fnhe->fnhe_genid = genid;
 		fnhe->fnhe_daddr = daddr;
 		fnhe->fnhe_gw = gw;
 		fnhe->fnhe_pmtu = pmtu;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0a57417..bf071f3 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2306,6 +2306,7 @@
 	tp->snd_cwnd_cnt = 0;
 	tp->window_clamp = 0;
 	tcp_set_ca_state(sk, TCP_CA_Open);
+	tp->is_sack_reneg = 0;
 	tcp_clear_retrans(tp);
 	inet_csk_delack_init(sk);
 	/* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index cb8db34..e86a34f 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -81,7 +81,8 @@
 	u32	lt_last_lost;	     /* LT intvl start: tp->lost */
 	u32	pacing_gain:10,	/* current gain for setting pacing rate */
 		cwnd_gain:10,	/* current gain for setting cwnd */
-		full_bw_cnt:3,	/* number of rounds without large bw gains */
+		full_bw_reached:1,   /* reached full bw in Startup? */
+		full_bw_cnt:2,	/* number of rounds without large bw gains */
 		cycle_idx:3,	/* current index in pacing_gain cycle array */
 		has_seen_rtt:1, /* have we seen an RTT sample yet? */
 		unused_b:5;
@@ -151,7 +152,7 @@
 {
 	const struct bbr *bbr = inet_csk_ca(sk);
 
-	return bbr->full_bw_cnt >= bbr_full_bw_cnt;
+	return bbr->full_bw_reached;
 }
 
 /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
@@ -688,6 +689,7 @@
 		return;
 	}
 	++bbr->full_bw_cnt;
+	bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
 }
 
 /* If pipe is probably full, drain the queue and then enter steady-state. */
@@ -821,6 +823,7 @@
 	bbr->restore_cwnd = 0;
 	bbr->round_start = 0;
 	bbr->idle_restart = 0;
+	bbr->full_bw_reached = 0;
 	bbr->full_bw = 0;
 	bbr->full_bw_cnt = 0;
 	bbr->cycle_mstamp.v64 = 0;
@@ -840,6 +843,11 @@
  */
 static u32 bbr_undo_cwnd(struct sock *sk)
 {
+	struct bbr *bbr = inet_csk_ca(sk);
+
+	bbr->full_bw = 0;   /* spurious slow-down; reset full pipe detection */
+	bbr->full_bw_cnt = 0;
+	bbr_reset_lt_bw_sampling(sk);
 	return tcp_sk(sk)->snd_cwnd;
 }
 
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ec9e58b..8ce03d8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1967,6 +1967,8 @@
 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
 		tp->sacked_out = 0;
 		tp->fackets_out = 0;
+		/* Mark SACK reneging until we recover from this loss event. */
+		tp->is_sack_reneg = 1;
 	}
 	tcp_clear_all_retrans_hints(tp);
 
@@ -2464,6 +2466,7 @@
 		return true;
 	}
 	tcp_set_ca_state(sk, TCP_CA_Open);
+	tp->is_sack_reneg = 0;
 	return false;
 }
 
@@ -2495,8 +2498,10 @@
 			NET_INC_STATS(sock_net(sk),
 					LINUX_MIB_TCPSPURIOUSRTOS);
 		inet_csk(sk)->icsk_retransmits = 0;
-		if (frto_undo || tcp_is_sack(tp))
+		if (frto_undo || tcp_is_sack(tp)) {
 			tcp_set_ca_state(sk, TCP_CA_Open);
+			tp->is_sack_reneg = 0;
+		}
 		return true;
 	}
 	return false;
@@ -3590,6 +3595,7 @@
 	struct tcp_sacktag_state sack_state;
 	struct rate_sample rs = { .prior_delivered = 0 };
 	u32 prior_snd_una = tp->snd_una;
+	bool is_sack_reneg = tp->is_sack_reneg;
 	u32 ack_seq = TCP_SKB_CB(skb)->seq;
 	u32 ack = TCP_SKB_CB(skb)->ack_seq;
 	bool is_dupack = false;
@@ -3712,7 +3718,7 @@
 		tcp_schedule_loss_probe(sk);
 	delivered = tp->delivered - delivered;	/* freshly ACKed or SACKed */
 	lost = tp->lost - lost;			/* freshly marked lost */
-	tcp_rate_gen(sk, delivered, lost, &now, &rs);
+	tcp_rate_gen(sk, delivered, lost, is_sack_reneg, &now, &rs);
 	tcp_cong_control(sk, ack, delivered, flag, &rs);
 	tcp_xmit_recovery(sk, rexmit);
 	return 1;
@@ -5082,7 +5088,7 @@
 	if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
 		sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
 		/* pairs with tcp_poll() */
-		smp_mb__after_atomic();
+		smp_mb();
 		if (sk->sk_socket &&
 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
 			tcp_new_space(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 49d32fbc..05ac17a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -831,7 +831,7 @@
 			tcp_time_stamp,
 			req->ts_recent,
 			0,
-			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
+			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
 					  AF_INET),
 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
 			ip_hdr(skb)->tos);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 64e1ba4..830a564 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -328,10 +328,16 @@
 				timeo = TCP_TIMEWAIT_LEN;
 		}
 
+		/* tw_timer is pinned, so we need to make sure BH are disabled
+		 * in following section, otherwise timer handler could run before
+		 * we complete the initialization.
+		 */
+		local_bh_disable();
 		inet_twsk_schedule(tw, timeo);
 		/* Linkage updates. */
 		__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
 		inet_twsk_put(tw);
+		local_bh_enable();
 	} else {
 		/* Sorry, if we're out of memory, just CLOSE this
 		 * socket up.  We've got bigger problems than
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index 9be1581..18309f5 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -106,7 +106,7 @@
 
 /* Update the connection delivery information and generate a rate sample. */
 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
-		  struct skb_mstamp *now, struct rate_sample *rs)
+		  bool is_sack_reneg, struct skb_mstamp *now, struct rate_sample *rs)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	u32 snd_us, ack_us;
@@ -124,8 +124,12 @@
 
 	rs->acked_sacked = delivered;	/* freshly ACKed or SACKed */
 	rs->losses = lost;		/* freshly marked lost */
-	/* Return an invalid sample if no timing information is available. */
-	if (!rs->prior_mstamp.v64) {
+	/* Return an invalid sample if no timing information is available or
+	 * in recovery from loss with SACK reneging. Rate samples taken during
+	 * a SACK reneging event may overestimate bw by including packets that
+	 * were SACKed before the reneg.
+	 */
+	if (!rs->prior_mstamp.v64 || is_sack_reneg) {
 		rs->delivered = -1;
 		rs->interval_us = -1;
 		return;
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 4c4bac1..3ecb61e 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -158,7 +158,7 @@
 
 static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
 {
-	return  min(tp->snd_ssthresh, tp->snd_cwnd-1);
+	return  min(tp->snd_ssthresh, tp->snd_cwnd);
 }
 
 static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 2abaa2e..140d05f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -290,10 +290,10 @@
 	.keep_addr_on_down	= 0,
 };
 
-/* Check if a valid qdisc is available */
-static inline bool addrconf_qdisc_ok(const struct net_device *dev)
+/* Check if link is ready: is it up and is a valid qdisc available */
+static inline bool addrconf_link_ready(const struct net_device *dev)
 {
-	return !qdisc_tx_is_noop(dev);
+	return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
 }
 
 static void addrconf_del_rs_timer(struct inet6_dev *idev)
@@ -438,7 +438,7 @@
 
 	ndev->token = in6addr_any;
 
-	if (netif_running(dev) && addrconf_qdisc_ok(dev))
+	if (netif_running(dev) && addrconf_link_ready(dev))
 		ndev->if_flags |= IF_READY;
 
 	ipv6_mc_init_dev(ndev);
@@ -3408,7 +3408,7 @@
 			/* restore routes for permanent addresses */
 			addrconf_permanent_addr(dev);
 
-			if (!addrconf_qdisc_ok(dev)) {
+			if (!addrconf_link_ready(dev)) {
 				/* device is not ready yet. */
 				pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
 					dev->name);
@@ -3423,7 +3423,7 @@
 				run_pending = 1;
 			}
 		} else if (event == NETDEV_CHANGE) {
-			if (!addrconf_qdisc_ok(dev)) {
+			if (!addrconf_link_ready(dev)) {
 				/* device is still not ready. */
 				break;
 			}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 0281645..55a8f68 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -225,7 +225,6 @@
 	np->mcast_hops	= IPV6_DEFAULT_MCASTHOPS;
 	np->mc_loop	= 1;
 	np->pmtudisc	= IPV6_PMTUDISC_WANT;
-	np->autoflowlabel = ip6_default_np_autolabel(sock_net(sk));
 	sk->sk_ipv6only	= net->ipv6.sysctl.bindv6only;
 
 	/* Init the ipv4 part of the socket since we can have sockets
@@ -926,12 +925,12 @@
 	err = register_pernet_subsys(&inet6_net_ops);
 	if (err)
 		goto register_pernet_fail;
-	err = icmpv6_init();
-	if (err)
-		goto icmp_fail;
 	err = ip6_mr_init();
 	if (err)
 		goto ipmr_fail;
+	err = icmpv6_init();
+	if (err)
+		goto icmp_fail;
 	err = ndisc_init();
 	if (err)
 		goto ndisc_fail;
@@ -1061,10 +1060,10 @@
 	ndisc_cleanup();
 ndisc_fail:
 	ip6_mr_cleanup();
-ipmr_fail:
-	icmpv6_cleanup();
 icmp_fail:
 	unregister_pernet_subsys(&inet6_net_ops);
+ipmr_fail:
+	icmpv6_cleanup();
 register_pernet_fail:
 	sock_unregister(PF_INET6);
 	rtnl_unregister_all(PF_INET6);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 65a58fe..a7d0c01 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -461,7 +461,7 @@
 				      &ipv6h->saddr, &ipv6h->daddr, tpi->key,
 				      tpi->proto);
 	if (tunnel) {
-		ip6_tnl_rcv(tunnel, skb, tpi, NULL, false);
+		ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
 
 		return PACKET_RCVD;
 	}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 4e6c439..dd93836 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -165,6 +165,14 @@
 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
 
+static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
+{
+	if (!np->autoflowlabel_set)
+		return ip6_default_np_autolabel(net);
+	else
+		return np->autoflowlabel;
+}
+
 /*
  * xmit an sk_buff (used by TCP, SCTP and DCCP)
  * Note : socket lock is not held for SYNACK packets, but might be modified
@@ -228,7 +236,7 @@
 		hlimit = ip6_dst_hoplimit(dst);
 
 	ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
-						     np->autoflowlabel, fl6));
+				ip6_autoflowlabel(net, np), fl6));
 
 	hdr->payload_len = htons(seg_len);
 	hdr->nexthdr = proto;
@@ -1699,7 +1707,7 @@
 
 	ip6_flow_hdr(hdr, v6_cork->tclass,
 		     ip6_make_flowlabel(net, skb, fl6->flowlabel,
-					np->autoflowlabel, fl6));
+					ip6_autoflowlabel(net, np), fl6));
 	hdr->hop_limit = v6_cork->hop_limit;
 	hdr->nexthdr = proto;
 	hdr->saddr = fl6->saddr;
@@ -1800,9 +1808,10 @@
 	cork.base.opt = NULL;
 	v6_cork.opt = NULL;
 	err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
-	if (err)
+	if (err) {
+		ip6_cork_release(&cork, &v6_cork);
 		return ERR_PTR(err);
-
+	}
 	if (ipc6->dontfrag < 0)
 		ipc6->dontfrag = inet6_sk(sk)->dontfrag;
 
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 64aefc2..4c52236 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -911,7 +911,7 @@
 		if (t->parms.collect_md) {
 			tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
 			if (!tun_dst)
-				return 0;
+				goto drop;
 		}
 		ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
 				    log_ecn_error);
@@ -1080,10 +1080,11 @@
 			memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
 			neigh_release(neigh);
 		}
-	} else if (!(t->parms.flags &
-		     (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
-		/* enable the cache only only if the routing decision does
-		 * not depend on the current inner header value
+	} else if (t->parms.proto != 0 && !(t->parms.flags &
+					    (IP6_TNL_F_USE_ORIG_TCLASS |
+					     IP6_TNL_F_USE_ORIG_FWMARK))) {
+		/* enable the cache only if neither the outer protocol nor the
+		 * routing decision depends on the current inner header value
 		 */
 		use_cache = true;
 	}
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index da64b20..afc30a0 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -189,12 +189,12 @@
 	struct vti6_net *ip6n = net_generic(net, vti6_net_id);
 	int err;
 
+	dev->rtnl_link_ops = &vti6_link_ops;
 	err = register_netdevice(dev);
 	if (err < 0)
 		goto out;
 
 	strcpy(t->parms.name, dev->name);
-	dev->rtnl_link_ops = &vti6_link_ops;
 
 	dev_hold(dev);
 	vti6_tnl_link(ip6n, t);
@@ -485,11 +485,15 @@
 	if (!skb->ignore_df && skb->len > mtu) {
 		skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
 
-		if (skb->protocol == htons(ETH_P_IPV6))
+		if (skb->protocol == htons(ETH_P_IPV6)) {
+			if (mtu < IPV6_MIN_MTU)
+				mtu = IPV6_MIN_MTU;
+
 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
-		else
+		} else {
 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
 				  htonl(mtu));
+		}
 
 		return -EMSGSIZE;
 	}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 636ec56..6e3871c 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -585,16 +585,24 @@
 
 		if (val) {
 			struct net_device *dev;
+			int midx;
 
-			if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
-				goto e_inval;
+			rcu_read_lock();
 
-			dev = dev_get_by_index(net, val);
+			dev = dev_get_by_index_rcu(net, val);
 			if (!dev) {
+				rcu_read_unlock();
 				retv = -ENODEV;
 				break;
 			}
-			dev_put(dev);
+			midx = l3mdev_master_ifindex_rcu(dev);
+
+			rcu_read_unlock();
+
+			if (sk->sk_bound_dev_if &&
+			    sk->sk_bound_dev_if != val &&
+			    (!midx || midx != sk->sk_bound_dev_if))
+				goto e_inval;
 		}
 		np->mcast_oif = val;
 		retv = 0;
@@ -866,6 +874,7 @@
 		break;
 	case IPV6_AUTOFLOWLABEL:
 		np->autoflowlabel = valbool;
+		np->autoflowlabel_set = 1;
 		retv = 0;
 		break;
 	}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 1bdc703..ca8fac6 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1682,16 +1682,16 @@
 }
 
 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
-	int type, struct mld2_grec **ppgr)
+	int type, struct mld2_grec **ppgr, unsigned int mtu)
 {
-	struct net_device *dev = pmc->idev->dev;
 	struct mld2_report *pmr;
 	struct mld2_grec *pgr;
 
-	if (!skb)
-		skb = mld_newpack(pmc->idev, dev->mtu);
-	if (!skb)
-		return NULL;
+	if (!skb) {
+		skb = mld_newpack(pmc->idev, mtu);
+		if (!skb)
+			return NULL;
+	}
 	pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
 	pgr->grec_type = type;
 	pgr->grec_auxwords = 0;
@@ -1714,10 +1714,15 @@
 	struct mld2_grec *pgr = NULL;
 	struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
 	int scount, stotal, first, isquery, truncate;
+	unsigned int mtu;
 
 	if (pmc->mca_flags & MAF_NOREPORT)
 		return skb;
 
+	mtu = READ_ONCE(dev->mtu);
+	if (mtu < IPV6_MIN_MTU)
+		return skb;
+
 	isquery = type == MLD2_MODE_IS_INCLUDE ||
 		  type == MLD2_MODE_IS_EXCLUDE;
 	truncate = type == MLD2_MODE_IS_EXCLUDE ||
@@ -1738,7 +1743,7 @@
 		    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
 			if (skb)
 				mld_sendpack(skb);
-			skb = mld_newpack(idev, dev->mtu);
+			skb = mld_newpack(idev, mtu);
 		}
 	}
 	first = 1;
@@ -1774,12 +1779,12 @@
 				pgr->grec_nsrcs = htons(scount);
 			if (skb)
 				mld_sendpack(skb);
-			skb = mld_newpack(idev, dev->mtu);
+			skb = mld_newpack(idev, mtu);
 			first = 1;
 			scount = 0;
 		}
 		if (first) {
-			skb = add_grhead(skb, pmc, type, &pgr);
+			skb = add_grhead(skb, pmc, type, &pgr, mtu);
 			first = 0;
 		}
 		if (!skb)
@@ -1814,7 +1819,7 @@
 				mld_sendpack(skb);
 				skb = NULL; /* add_grhead will get a new one */
 			}
-			skb = add_grhead(skb, pmc, type, &pgr);
+			skb = add_grhead(skb, pmc, type, &pgr, mtu);
 		}
 	}
 	if (pgr)
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index b263bf3..64ec233 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -230,7 +230,7 @@
 
 	if ((unsigned int)end > IPV6_MAXPLEN) {
 		pr_debug("offset is too large.\n");
-		return -1;
+		return -EINVAL;
 	}
 
 	ecn = ip6_frag_ecn(ipv6_hdr(skb));
@@ -263,7 +263,8 @@
 			 * this case. -DaveM
 			 */
 			pr_debug("end of fragment not rounded to 8 bytes.\n");
-			return -1;
+			inet_frag_kill(&fq->q, &nf_frags);
+			return -EPROTO;
 		}
 		if (end > fq->q.len) {
 			/* Some bits beyond end -> corruption. */
@@ -357,7 +358,7 @@
 discard_fq:
 	inet_frag_kill(&fq->q, &nf_frags);
 err:
-	return -1;
+	return -EINVAL;
 }
 
 /*
@@ -566,6 +567,7 @@
 
 int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
 {
+	u16 savethdr = skb->transport_header;
 	struct net_device *dev = skb->dev;
 	int fhoff, nhoff, ret;
 	struct frag_hdr *fhdr;
@@ -599,8 +601,12 @@
 
 	spin_lock_bh(&fq->q.lock);
 
-	if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) {
-		ret = -EINVAL;
+	ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
+	if (ret < 0) {
+		if (ret == -EPROTO) {
+			skb->transport_header = savethdr;
+			ret = 0;
+		}
 		goto out_unlock;
 	}
 
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 5acd855..f7e685f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3470,7 +3470,11 @@
 		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
 		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
 #endif
-	 } else if (event == NETDEV_UNREGISTER) {
+	 } else if (event == NETDEV_UNREGISTER &&
+		    dev->reg_state != NETREG_UNREGISTERED) {
+		/* NETDEV_UNREGISTER could be fired for multiple times by
+		 * netdev_wait_allrefs(). Make sure we only call this once.
+		 */
 		in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 		in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 40d7405..db6d437 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1085,6 +1085,7 @@
 	ipip6_tunnel_link(sitn, t);
 	t->parms.iph.ttl = p->iph.ttl;
 	t->parms.iph.tos = p->iph.tos;
+	t->parms.iph.frag_off = p->iph.frag_off;
 	if (t->parms.link != p->link) {
 		t->parms.link = p->link;
 		ipip6_tunnel_bind_dev(t->dev);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 9828dc2..bb84165 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -964,7 +964,7 @@
 			tcp_rsk(req)->rcv_nxt,
 			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
 			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
-			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
+			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
 			0, 0);
 }
 
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 7eb0e8f..22785dc 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1624,60 +1624,35 @@
 };
 
 /* Clone a kcm socket. */
-static int kcm_clone(struct socket *osock, struct kcm_clone *info,
-		     struct socket **newsockp)
+static struct file *kcm_clone(struct socket *osock)
 {
 	struct socket *newsock;
 	struct sock *newsk;
-	struct file *newfile;
-	int err, newfd;
+	struct file *file;
 
-	err = -ENFILE;
 	newsock = sock_alloc();
 	if (!newsock)
-		goto out;
+		return ERR_PTR(-ENFILE);
 
 	newsock->type = osock->type;
 	newsock->ops = osock->ops;
 
 	__module_get(newsock->ops->owner);
 
-	newfd = get_unused_fd_flags(0);
-	if (unlikely(newfd < 0)) {
-		err = newfd;
-		goto out_fd_fail;
-	}
-
-	newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
-	if (unlikely(IS_ERR(newfile))) {
-		err = PTR_ERR(newfile);
-		goto out_sock_alloc_fail;
-	}
-
 	newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
 			 &kcm_proto, true);
 	if (!newsk) {
-		err = -ENOMEM;
-		goto out_sk_alloc_fail;
+		sock_release(newsock);
+		return ERR_PTR(-ENOMEM);
 	}
-
 	sock_init_data(newsock, newsk);
 	init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
 
-	fd_install(newfd, newfile);
-	*newsockp = newsock;
-	info->fd = newfd;
+	file = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
+	if (IS_ERR(file))
+		sock_release(newsock);
 
-	return 0;
-
-out_sk_alloc_fail:
-	fput(newfile);
-out_sock_alloc_fail:
-	put_unused_fd(newfd);
-out_fd_fail:
-	sock_release(newsock);
-out:
-	return err;
+	return file;
 }
 
 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
@@ -1707,21 +1682,25 @@
 	}
 	case SIOCKCMCLONE: {
 		struct kcm_clone info;
-		struct socket *newsock = NULL;
+		struct file *file;
 
-		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
-			return -EFAULT;
+		info.fd = get_unused_fd_flags(0);
+		if (unlikely(info.fd < 0))
+			return info.fd;
 
-		err = kcm_clone(sock, &info, &newsock);
-
-		if (!err) {
-			if (copy_to_user((void __user *)arg, &info,
-					 sizeof(info))) {
-				err = -EFAULT;
-				sys_close(info.fd);
-			}
+		file = kcm_clone(sock);
+		if (IS_ERR(file)) {
+			put_unused_fd(info.fd);
+			return PTR_ERR(file);
 		}
-
+		if (copy_to_user((void __user *)arg, &info,
+				 sizeof(info))) {
+			put_unused_fd(info.fd);
+			fput(file);
+			return -EFAULT;
+		}
+		fd_install(info.fd, file);
+		err = 0;
 		break;
 	}
 	default:
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index b06acd0..cfc4dd8 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1944,7 +1944,7 @@
 
 	rcu_read_lock_bh();
 	list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
-		(void)l2tp_tunnel_delete(tunnel);
+		l2tp_tunnel_delete(tunnel);
 	}
 	rcu_read_unlock_bh();
 
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 3468d56..9d77a54 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -48,7 +48,8 @@
 	return (struct l2tp_ip_sock *)sk;
 }
 
-static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
+static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
+					  __be32 raddr, int dif, u32 tunnel_id)
 {
 	struct sock *sk;
 
@@ -62,6 +63,7 @@
 		if ((l2tp->conn_id == tunnel_id) &&
 		    net_eq(sock_net(sk), net) &&
 		    !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
+		    (!inet->inet_daddr || !raddr || inet->inet_daddr == raddr) &&
 		    (!sk->sk_bound_dev_if || !dif ||
 		     sk->sk_bound_dev_if == dif))
 			goto found;
@@ -72,15 +74,6 @@
 	return sk;
 }
 
-static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
-{
-	struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
-	if (sk)
-		sock_hold(sk);
-
-	return sk;
-}
-
 /* When processing receive frames, there are two cases to
  * consider. Data frames consist of a non-zero session-id and an
  * optional cookie. Control frames consist of a regular L2TP header
@@ -186,8 +179,8 @@
 		struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
 
 		read_lock_bh(&l2tp_ip_lock);
-		sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb),
-					   tunnel_id);
+		sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
+					   inet_iif(skb), tunnel_id);
 		if (!sk) {
 			read_unlock_bh(&l2tp_ip_lock);
 			goto discard;
@@ -289,7 +282,7 @@
 		inet->inet_saddr = 0;  /* Use device */
 
 	write_lock_bh(&l2tp_ip_lock);
-	if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
+	if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
 				  sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
 		write_unlock_bh(&l2tp_ip_lock);
 		ret = -EADDRINUSE;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 96efe47..86ad51a 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -59,12 +59,14 @@
 
 static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
 					   struct in6_addr *laddr,
+					   const struct in6_addr *raddr,
 					   int dif, u32 tunnel_id)
 {
 	struct sock *sk;
 
 	sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
 		const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
+		const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
 		struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
 
 		if (l2tp == NULL)
@@ -73,6 +75,7 @@
 		if ((l2tp->conn_id == tunnel_id) &&
 		    net_eq(sock_net(sk), net) &&
 		    (!sk_laddr || ipv6_addr_any(sk_laddr) || ipv6_addr_equal(sk_laddr, laddr)) &&
+		    (!raddr || ipv6_addr_any(sk_raddr) || ipv6_addr_equal(sk_raddr, raddr)) &&
 		    (!sk->sk_bound_dev_if || !dif ||
 		     sk->sk_bound_dev_if == dif))
 			goto found;
@@ -83,17 +86,6 @@
 	return sk;
 }
 
-static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
-						struct in6_addr *laddr,
-						int dif, u32 tunnel_id)
-{
-	struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id);
-	if (sk)
-		sock_hold(sk);
-
-	return sk;
-}
-
 /* When processing receive frames, there are two cases to
  * consider. Data frames consist of a non-zero session-id and an
  * optional cookie. Control frames consist of a regular L2TP header
@@ -200,8 +192,8 @@
 		struct ipv6hdr *iph = ipv6_hdr(skb);
 
 		read_lock_bh(&l2tp_ip6_lock);
-		sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb),
-					    tunnel_id);
+		sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
+					    inet6_iif(skb), tunnel_id);
 		if (!sk) {
 			read_unlock_bh(&l2tp_ip6_lock);
 			goto discard;
@@ -339,7 +331,7 @@
 	rcu_read_unlock();
 
 	write_lock_bh(&l2tp_ip6_lock);
-	if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if,
+	if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if,
 				   addr->l2tp_conn_id)) {
 		write_unlock_bh(&l2tp_ip6_lock);
 		err = -EADDRINUSE;
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 1ccd310..ee03bc8 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -287,7 +287,7 @@
 	l2tp_tunnel_notify(&l2tp_nl_family, info,
 			   tunnel, L2TP_CMD_TUNNEL_DELETE);
 
-	(void) l2tp_tunnel_delete(tunnel);
+	l2tp_tunnel_delete(tunnel);
 
 out:
 	return ret;
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index e75cbf6..a0d901d 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -231,9 +231,6 @@
 		    !(sta->sdata->bss && sta->sdata->bss == sdata->bss))
 			continue;
 
-		if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_ASSOC))
-			continue;
-
 		max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
 	}
 	rcu_read_unlock();
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 34c2add..03dbc6b 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -681,7 +681,6 @@
 	const struct ieee80211_mesh_sync_ops *sync_ops;
 	s64 sync_offset_clockdrift_max;
 	spinlock_t sync_offset_lock;
-	bool adjusting_tbtt;
 	/* mesh power save */
 	enum nl80211_mesh_power_mode nonpeer_pm;
 	int ps_peers_light_sleep;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 50e1b7f..b4b3fe0 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -279,10 +279,6 @@
 	/* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
 	*pos |= ifmsh->ps_peers_deep_sleep ?
 			IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00;
-	*pos++ |= ifmsh->adjusting_tbtt ?
-			IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
-	*pos++ = 0x00;
-
 	return 0;
 }
 
@@ -850,7 +846,6 @@
 	ifmsh->mesh_cc_id = 0;	/* Disabled */
 	/* register sync ops from extensible synchronization framework */
 	ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id);
-	ifmsh->adjusting_tbtt = false;
 	ifmsh->sync_offset_clockdrift_max = 0;
 	set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
 	ieee80211_mesh_root_setup(ifmsh);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 7fcdcf6..fcba70e5 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -505,12 +505,14 @@
 
 	/* Userspace handles station allocation */
 	if (sdata->u.mesh.user_mpm ||
-	    sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
-		cfg80211_notify_new_peer_candidate(sdata->dev, addr,
-						   elems->ie_start,
-						   elems->total_len,
-						   GFP_KERNEL);
-	else
+	    sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
+		if (mesh_peer_accepts_plinks(elems) &&
+		    mesh_plink_availables(sdata))
+			cfg80211_notify_new_peer_candidate(sdata->dev, addr,
+							   elems->ie_start,
+							   elems->total_len,
+							   GFP_KERNEL);
+	} else
 		sta = __mesh_sta_info_alloc(sdata, addr);
 
 	return sta;
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index faca22c..75608c0 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -123,7 +123,6 @@
 	 */
 
 	if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
-		clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
 		msync_dbg(sdata, "STA %pM : is adjusting TBTT\n",
 			  sta->sta.addr);
 		goto no_sync;
@@ -172,11 +171,9 @@
 					 struct beacon_data *beacon)
 {
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-	u8 cap;
 
 	WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
 	WARN_ON(!rcu_read_lock_held());
-	cap = beacon->meshconf->meshconf_cap;
 
 	spin_lock_bh(&ifmsh->sync_offset_lock);
 
@@ -190,21 +187,13 @@
 			  "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n",
 			  ifmsh->sync_offset_clockdrift_max);
 		set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags);
-
-		ifmsh->adjusting_tbtt = true;
 	} else {
 		msync_dbg(sdata,
 			  "TBTT : max clockdrift=%lld; too small to adjust\n",
 			  (long long)ifmsh->sync_offset_clockdrift_max);
 		ifmsh->sync_offset_clockdrift_max = 0;
-
-		ifmsh->adjusting_tbtt = false;
 	}
 	spin_unlock_bh(&ifmsh->sync_offset_lock);
-
-	beacon->meshconf->meshconf_cap = ifmsh->adjusting_tbtt ?
-			IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING | cap :
-			~IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING & cap;
 }
 
 static const struct sync_method sync_methods[] = {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 274c564..1ffd1e1 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1244,7 +1244,7 @@
 
 static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
 					  struct ieee80211_vif *vif,
-					  struct ieee80211_sta *pubsta,
+					  struct sta_info *sta,
 					  struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -1258,10 +1258,13 @@
 	if (!ieee80211_is_data(hdr->frame_control))
 		return NULL;
 
-	if (pubsta) {
+	if (sta) {
 		u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
 
-		txq = pubsta->txq[tid];
+		if (!sta->uploaded)
+			return NULL;
+
+		txq = sta->sta.txq[tid];
 	} else if (vif) {
 		txq = vif->txq;
 	}
@@ -1499,23 +1502,17 @@
 	struct fq *fq = &local->fq;
 	struct ieee80211_vif *vif;
 	struct txq_info *txqi;
-	struct ieee80211_sta *pubsta;
 
 	if (!local->ops->wake_tx_queue ||
 	    sdata->vif.type == NL80211_IFTYPE_MONITOR)
 		return false;
 
-	if (sta && sta->uploaded)
-		pubsta = &sta->sta;
-	else
-		pubsta = NULL;
-
 	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
 		sdata = container_of(sdata->bss,
 				     struct ieee80211_sub_if_data, u.ap);
 
 	vif = &sdata->vif;
-	txqi = ieee80211_get_txq(local, vif, pubsta, skb);
+	txqi = ieee80211_get_txq(local, vif, sta, skb);
 
 	if (!txqi)
 		return false;
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index efa3f48..73e8f34 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -293,7 +293,8 @@
 			return RX_DROP_UNUSABLE;
 		ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
 		/* remove ICV */
-		if (pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN))
+		if (!(status->flag & RX_FLAG_ICV_STRIPPED) &&
+		    pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN))
 			return RX_DROP_UNUSABLE;
 	}
 
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 5c71d60..caa5986 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -295,7 +295,8 @@
 		return RX_DROP_UNUSABLE;
 
 	/* Trim ICV */
-	skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
+	if (!(status->flag & RX_FLAG_ICV_STRIPPED))
+		skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
 
 	/* Remove IV */
 	memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 1309e2c..c5a5a69 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -937,6 +937,8 @@
 {
 	struct mpls_route __rcu **platform_label;
 	struct net *net = dev_net(dev);
+	unsigned int nh_flags = RTNH_F_DEAD | RTNH_F_LINKDOWN;
+	unsigned int alive;
 	unsigned index;
 
 	platform_label = rtnl_dereference(net->mpls.platform_label);
@@ -946,9 +948,11 @@
 		if (!rt)
 			continue;
 
+		alive = 0;
 		change_nexthops(rt) {
 			if (rtnl_dereference(nh->nh_dev) != dev)
-				continue;
+				goto next;
+
 			switch (event) {
 			case NETDEV_DOWN:
 			case NETDEV_UNREGISTER:
@@ -956,13 +960,16 @@
 				/* fall through */
 			case NETDEV_CHANGE:
 				nh->nh_flags |= RTNH_F_LINKDOWN;
-				if (event != NETDEV_UNREGISTER)
-					ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
 				break;
 			}
 			if (event == NETDEV_UNREGISTER)
 				RCU_INIT_POINTER(nh->nh_dev, NULL);
+next:
+			if (!(nh->nh_flags & nh_flags))
+				alive++;
 		} endfor_nexthops(rt);
+
+		WRITE_ONCE(rt->rt_nhn_alive, alive);
 	}
 }
 
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 004af03..d869ea5 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -364,6 +364,11 @@
 		ret = nf_queue(skb, state, &entry, verdict);
 		if (ret == 1 && entry)
 			goto next_hook;
+	} else {
+		/* Implicit handling for NF_STOLEN, as well as any other
+		 * non conventional verdicts.
+		 */
+		ret = 0;
 	}
 	return ret;
 }
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index a6e44ef..2155c24 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2040,12 +2040,16 @@
 		seq_puts(seq,
 			 "  -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n");
 	} else {
+		struct net *net = seq_file_net(seq);
+		struct netns_ipvs *ipvs = net_ipvs(net);
 		const struct ip_vs_service *svc = v;
 		const struct ip_vs_iter *iter = seq->private;
 		const struct ip_vs_dest *dest;
 		struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
 		char *sched_name = sched ? sched->name : "none";
 
+		if (svc->ipvs != ipvs)
+			return 0;
 		if (iter->table == ip_vs_svc_table) {
 #ifdef CONFIG_IP_VS_IPV6
 			if (svc->af == AF_INET6)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 778fcdb..fa3ef25 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2068,7 +2068,7 @@
 	 * is called on error from nf_tables_newrule().
 	 */
 	expr = nft_expr_first(rule);
-	while (expr->ops && expr != nft_expr_last(rule)) {
+	while (expr != nft_expr_last(rule) && expr->ops) {
 		nf_tables_expr_destroy(ctx, expr);
 		expr = nft_expr_next(expr);
 	}
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index b1fcfa0..28d0653 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -32,6 +32,13 @@
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
 
+struct nfnl_cthelper {
+	struct list_head		list;
+	struct nf_conntrack_helper	helper;
+};
+
+static LIST_HEAD(nfnl_cthelper_list);
+
 static int
 nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
 			struct nf_conn *ct, enum ip_conntrack_info ctinfo)
@@ -205,18 +212,20 @@
 		     struct nf_conntrack_tuple *tuple)
 {
 	struct nf_conntrack_helper *helper;
+	struct nfnl_cthelper *nfcth;
 	int ret;
 
 	if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
 		return -EINVAL;
 
-	helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL);
-	if (helper == NULL)
+	nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL);
+	if (nfcth == NULL)
 		return -ENOMEM;
+	helper = &nfcth->helper;
 
 	ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
 	if (ret < 0)
-		goto err;
+		goto err1;
 
 	strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
 	helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
@@ -247,12 +256,98 @@
 
 	ret = nf_conntrack_helper_register(helper);
 	if (ret < 0)
-		goto err;
+		goto err2;
+
+	list_add_tail(&nfcth->list, &nfnl_cthelper_list);
+	return 0;
+err2:
+	kfree(helper->expect_policy);
+err1:
+	kfree(nfcth);
+	return ret;
+}
+
+static int
+nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy,
+				struct nf_conntrack_expect_policy *new_policy,
+				const struct nlattr *attr)
+{
+	struct nlattr *tb[NFCTH_POLICY_MAX + 1];
+	int err;
+
+	err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr,
+			       nfnl_cthelper_expect_pol);
+	if (err < 0)
+		return err;
+
+	if (!tb[NFCTH_POLICY_NAME] ||
+	    !tb[NFCTH_POLICY_EXPECT_MAX] ||
+	    !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
+		return -EINVAL;
+
+	if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name))
+		return -EBUSY;
+
+	new_policy->max_expected =
+		ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
+	new_policy->timeout =
+		ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
 
 	return 0;
-err:
-	kfree(helper);
-	return ret;
+}
+
+static int nfnl_cthelper_update_policy_all(struct nlattr *tb[],
+					   struct nf_conntrack_helper *helper)
+{
+	struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1];
+	struct nf_conntrack_expect_policy *policy;
+	int i, err;
+
+	/* Check first that all policy attributes are well-formed, so we don't
+	 * leave things in inconsistent state on errors.
+	 */
+	for (i = 0; i < helper->expect_class_max + 1; i++) {
+
+		if (!tb[NFCTH_POLICY_SET + i])
+			return -EINVAL;
+
+		err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i],
+						      &new_policy[i],
+						      tb[NFCTH_POLICY_SET + i]);
+		if (err < 0)
+			return err;
+	}
+	/* Now we can safely update them. */
+	for (i = 0; i < helper->expect_class_max + 1; i++) {
+		policy = (struct nf_conntrack_expect_policy *)
+				&helper->expect_policy[i];
+		policy->max_expected = new_policy->max_expected;
+		policy->timeout	= new_policy->timeout;
+	}
+
+	return 0;
+}
+
+static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper,
+				       const struct nlattr *attr)
+{
+	struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
+	unsigned int class_max;
+	int err;
+
+	err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
+			       nfnl_cthelper_expect_policy_set);
+	if (err < 0)
+		return err;
+
+	if (!tb[NFCTH_POLICY_SET_NUM])
+		return -EINVAL;
+
+	class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
+	if (helper->expect_class_max + 1 != class_max)
+		return -EBUSY;
+
+	return nfnl_cthelper_update_policy_all(tb, helper);
 }
 
 static int
@@ -265,8 +360,7 @@
 		return -EBUSY;
 
 	if (tb[NFCTH_POLICY]) {
-		ret = nfnl_cthelper_parse_expect_policy(helper,
-							tb[NFCTH_POLICY]);
+		ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
 		if (ret < 0)
 			return ret;
 	}
@@ -295,7 +389,8 @@
 	const char *helper_name;
 	struct nf_conntrack_helper *cur, *helper = NULL;
 	struct nf_conntrack_tuple tuple;
-	int ret = 0, i;
+	struct nfnl_cthelper *nlcth;
+	int ret = 0;
 
 	if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
 		return -EINVAL;
@@ -306,31 +401,22 @@
 	if (ret < 0)
 		return ret;
 
-	rcu_read_lock();
-	for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
-		hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
+	list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
+		cur = &nlcth->helper;
 
-			/* skip non-userspace conntrack helpers. */
-			if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
-				continue;
+		if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+			continue;
 
-			if (strncmp(cur->name, helper_name,
-					NF_CT_HELPER_NAME_LEN) != 0)
-				continue;
+		if ((tuple.src.l3num != cur->tuple.src.l3num ||
+		     tuple.dst.protonum != cur->tuple.dst.protonum))
+			continue;
 
-			if ((tuple.src.l3num != cur->tuple.src.l3num ||
-			     tuple.dst.protonum != cur->tuple.dst.protonum))
-				continue;
+		if (nlh->nlmsg_flags & NLM_F_EXCL)
+			return -EEXIST;
 
-			if (nlh->nlmsg_flags & NLM_F_EXCL) {
-				ret = -EEXIST;
-				goto err;
-			}
-			helper = cur;
-			break;
-		}
+		helper = cur;
+		break;
 	}
-	rcu_read_unlock();
 
 	if (helper == NULL)
 		ret = nfnl_cthelper_create(tb, &tuple);
@@ -338,9 +424,6 @@
 		ret = nfnl_cthelper_update(tb, helper);
 
 	return ret;
-err:
-	rcu_read_unlock();
-	return ret;
 }
 
 static int
@@ -504,11 +587,12 @@
 			     struct sk_buff *skb, const struct nlmsghdr *nlh,
 			     const struct nlattr * const tb[])
 {
-	int ret = -ENOENT, i;
+	int ret = -ENOENT;
 	struct nf_conntrack_helper *cur;
 	struct sk_buff *skb2;
 	char *helper_name = NULL;
 	struct nf_conntrack_tuple tuple;
+	struct nfnl_cthelper *nlcth;
 	bool tuple_set = false;
 
 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -529,45 +613,39 @@
 		tuple_set = true;
 	}
 
-	for (i = 0; i < nf_ct_helper_hsize; i++) {
-		hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
+	list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
+		cur = &nlcth->helper;
+		if (helper_name &&
+		    strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+			continue;
 
-			/* skip non-userspace conntrack helpers. */
-			if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
-				continue;
+		if (tuple_set &&
+		    (tuple.src.l3num != cur->tuple.src.l3num ||
+		     tuple.dst.protonum != cur->tuple.dst.protonum))
+			continue;
 
-			if (helper_name && strncmp(cur->name, helper_name,
-						NF_CT_HELPER_NAME_LEN) != 0) {
-				continue;
-			}
-			if (tuple_set &&
-			    (tuple.src.l3num != cur->tuple.src.l3num ||
-			     tuple.dst.protonum != cur->tuple.dst.protonum))
-				continue;
-
-			skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-			if (skb2 == NULL) {
-				ret = -ENOMEM;
-				break;
-			}
-
-			ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
-						nlh->nlmsg_seq,
-						NFNL_MSG_TYPE(nlh->nlmsg_type),
-						NFNL_MSG_CTHELPER_NEW, cur);
-			if (ret <= 0) {
-				kfree_skb(skb2);
-				break;
-			}
-
-			ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
-						MSG_DONTWAIT);
-			if (ret > 0)
-				ret = 0;
-
-			/* this avoids a loop in nfnetlink. */
-			return ret == -EAGAIN ? -ENOBUFS : ret;
+		skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+		if (skb2 == NULL) {
+			ret = -ENOMEM;
+			break;
 		}
+
+		ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
+					      nlh->nlmsg_seq,
+					      NFNL_MSG_TYPE(nlh->nlmsg_type),
+					      NFNL_MSG_CTHELPER_NEW, cur);
+		if (ret <= 0) {
+			kfree_skb(skb2);
+			break;
+		}
+
+		ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
+				      MSG_DONTWAIT);
+		if (ret > 0)
+			ret = 0;
+
+		/* this avoids a loop in nfnetlink. */
+		return ret == -EAGAIN ? -ENOBUFS : ret;
 	}
 	return ret;
 }
@@ -578,10 +656,10 @@
 {
 	char *helper_name = NULL;
 	struct nf_conntrack_helper *cur;
-	struct hlist_node *tmp;
 	struct nf_conntrack_tuple tuple;
 	bool tuple_set = false, found = false;
-	int i, j = 0, ret;
+	struct nfnl_cthelper *nlcth, *n;
+	int j = 0, ret;
 
 	if (tb[NFCTH_NAME])
 		helper_name = nla_data(tb[NFCTH_NAME]);
@@ -594,28 +672,27 @@
 		tuple_set = true;
 	}
 
-	for (i = 0; i < nf_ct_helper_hsize; i++) {
-		hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
-								hnode) {
-			/* skip non-userspace conntrack helpers. */
-			if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
-				continue;
+	list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
+		cur = &nlcth->helper;
+		j++;
 
-			j++;
+		if (helper_name &&
+		    strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+			continue;
 
-			if (helper_name && strncmp(cur->name, helper_name,
-						NF_CT_HELPER_NAME_LEN) != 0) {
-				continue;
-			}
-			if (tuple_set &&
-			    (tuple.src.l3num != cur->tuple.src.l3num ||
-			     tuple.dst.protonum != cur->tuple.dst.protonum))
-				continue;
+		if (tuple_set &&
+		    (tuple.src.l3num != cur->tuple.src.l3num ||
+		     tuple.dst.protonum != cur->tuple.dst.protonum))
+			continue;
 
-			found = true;
-			nf_conntrack_helper_unregister(cur);
-		}
+		found = true;
+		nf_conntrack_helper_unregister(cur);
+		kfree(cur->expect_policy);
+
+		list_del(&nlcth->list);
+		kfree(nlcth);
 	}
+
 	/* Make sure we return success if we flush and there is no helpers */
 	return (found || j == 0) ? 0 : -ENOENT;
 }
@@ -664,20 +741,16 @@
 static void __exit nfnl_cthelper_exit(void)
 {
 	struct nf_conntrack_helper *cur;
-	struct hlist_node *tmp;
-	int i;
+	struct nfnl_cthelper *nlcth, *n;
 
 	nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
 
-	for (i=0; i<nf_ct_helper_hsize; i++) {
-		hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
-									hnode) {
-			/* skip non-userspace conntrack helpers. */
-			if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
-				continue;
+	list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
+		cur = &nlcth->helper;
 
-			nf_conntrack_helper_unregister(cur);
-		}
+		nf_conntrack_helper_unregister(cur);
+		kfree(cur->expect_policy);
+		kfree(nlcth);
 	}
 }
 
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index af832c5..5efb402 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -443,7 +443,7 @@
 	skb = alloc_skb(size, GFP_ATOMIC);
 	if (!skb) {
 		skb_tx_error(entskb);
-		return NULL;
+		goto nlmsg_failure;
 	}
 
 	nlh = nlmsg_put(skb, 0, 0,
@@ -452,7 +452,7 @@
 	if (!nlh) {
 		skb_tx_error(entskb);
 		kfree_skb(skb);
-		return NULL;
+		goto nlmsg_failure;
 	}
 	nfmsg = nlmsg_data(nlh);
 	nfmsg->nfgen_family = entry->state.pf;
@@ -598,12 +598,17 @@
 	}
 
 	nlh->nlmsg_len = skb->len;
+	if (seclen)
+		security_release_secctx(secdata, seclen);
 	return skb;
 
 nla_put_failure:
 	skb_tx_error(entskb);
 	kfree_skb(skb);
 	net_err_ratelimited("nf_queue: error creating packet message\n");
+nlmsg_failure:
+	if (seclen)
+		security_release_secctx(secdata, seclen);
 	return NULL;
 }
 
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index 393d359..ef4768a 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -38,7 +38,7 @@
 
 	if (priv->queues_total > 1) {
 		if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) {
-			int cpu = smp_processor_id();
+			int cpu = raw_smp_processor_id();
 
 			queue = priv->queuenum + cpu % priv->queues_total;
 		} else {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index c9fac08..e1c123d 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -96,6 +96,44 @@
 
 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
 
+static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
+
+static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
+	"nlk_cb_mutex-ROUTE",
+	"nlk_cb_mutex-1",
+	"nlk_cb_mutex-USERSOCK",
+	"nlk_cb_mutex-FIREWALL",
+	"nlk_cb_mutex-SOCK_DIAG",
+	"nlk_cb_mutex-NFLOG",
+	"nlk_cb_mutex-XFRM",
+	"nlk_cb_mutex-SELINUX",
+	"nlk_cb_mutex-ISCSI",
+	"nlk_cb_mutex-AUDIT",
+	"nlk_cb_mutex-FIB_LOOKUP",
+	"nlk_cb_mutex-CONNECTOR",
+	"nlk_cb_mutex-NETFILTER",
+	"nlk_cb_mutex-IP6_FW",
+	"nlk_cb_mutex-DNRTMSG",
+	"nlk_cb_mutex-KOBJECT_UEVENT",
+	"nlk_cb_mutex-GENERIC",
+	"nlk_cb_mutex-17",
+	"nlk_cb_mutex-SCSITRANSPORT",
+	"nlk_cb_mutex-ECRYPTFS",
+	"nlk_cb_mutex-RDMA",
+	"nlk_cb_mutex-CRYPTO",
+	"nlk_cb_mutex-SMC",
+	"nlk_cb_mutex-23",
+	"nlk_cb_mutex-24",
+	"nlk_cb_mutex-25",
+	"nlk_cb_mutex-26",
+	"nlk_cb_mutex-27",
+	"nlk_cb_mutex-28",
+	"nlk_cb_mutex-29",
+	"nlk_cb_mutex-30",
+	"nlk_cb_mutex-31",
+	"nlk_cb_mutex-MAX_LINKS"
+};
+
 static int netlink_dump(struct sock *sk);
 static void netlink_skb_destructor(struct sk_buff *skb);
 
@@ -223,6 +261,9 @@
 	struct sock *sk = skb->sk;
 	int ret = -ENOMEM;
 
+	if (!net_eq(dev_net(dev), sock_net(sk)))
+		return 0;
+
 	dev_hold(dev);
 
 	if (is_vmalloc_addr(skb->head))
@@ -585,6 +626,9 @@
 	} else {
 		nlk->cb_mutex = &nlk->cb_def_mutex;
 		mutex_init(nlk->cb_mutex);
+		lockdep_set_class_and_name(nlk->cb_mutex,
+					   nlk_cb_mutex_keys + protocol,
+					   nlk_cb_mutex_key_strings[protocol]);
 	}
 	init_waitqueue_head(&nlk->wait);
 
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 5cf33df..c699d64 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -1106,7 +1106,7 @@
 err_free_dev:
 	kfree(dev);
 
-	return ERR_PTR(rc);
+	return NULL;
 }
 EXPORT_SYMBOL(nfc_allocate_device);
 
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e7f6657..267db0d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1661,7 +1661,6 @@
 		atomic_long_set(&rollover->num, 0);
 		atomic_long_set(&rollover->num_huge, 0);
 		atomic_long_set(&rollover->num_failed, 0);
-		po->rollover = rollover;
 	}
 
 	match = NULL;
@@ -1706,6 +1705,8 @@
 		if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
 			__dev_remove_pack(&po->prot_hook);
 			po->fanout = match;
+			po->rollover = rollover;
+			rollover = NULL;
 			atomic_inc(&match->sk_ref);
 			__fanout_link(sk, po);
 			err = 0;
@@ -1719,10 +1720,7 @@
 	}
 
 out:
-	if (err && rollover) {
-		kfree_rcu(rollover, rcu);
-		po->rollover = NULL;
-	}
+	kfree(rollover);
 	mutex_unlock(&fanout_mutex);
 	return err;
 }
@@ -1746,11 +1744,6 @@
 			list_del(&f->list);
 		else
 			f = NULL;
-
-		if (po->rollover) {
-			kfree_rcu(po->rollover, rcu);
-			po->rollover = NULL;
-		}
 	}
 	mutex_unlock(&fanout_mutex);
 
@@ -3039,6 +3032,7 @@
 	synchronize_net();
 
 	if (f) {
+		kfree(po->rollover);
 		fanout_release_data(f);
 		kfree(f);
 	}
@@ -3107,6 +3101,10 @@
 	if (need_rehook) {
 		if (po->running) {
 			rcu_read_unlock();
+			/* prevents packet_notifier() from calling
+			 * register_prot_hook()
+			 */
+			po->num = 0;
 			__unregister_prot_hook(sk, true);
 			rcu_read_lock();
 			dev_curr = po->prot_hook.dev;
@@ -3115,6 +3113,7 @@
 								 dev->ifindex);
 		}
 
+		BUG_ON(po->running);
 		po->num = proto;
 		po->prot_hook.type = proto;
 
@@ -3853,7 +3852,6 @@
 	void *data = &val;
 	union tpacket_stats_u st;
 	struct tpacket_rollover_stats rstats;
-	struct packet_rollover *rollover;
 
 	if (level != SOL_PACKET)
 		return -ENOPROTOOPT;
@@ -3932,18 +3930,13 @@
 		       0);
 		break;
 	case PACKET_ROLLOVER_STATS:
-		rcu_read_lock();
-		rollover = rcu_dereference(po->rollover);
-		if (rollover) {
-			rstats.tp_all = atomic_long_read(&rollover->num);
-			rstats.tp_huge = atomic_long_read(&rollover->num_huge);
-			rstats.tp_failed = atomic_long_read(&rollover->num_failed);
-			data = &rstats;
-			lv = sizeof(rstats);
-		}
-		rcu_read_unlock();
-		if (!rollover)
+		if (!po->rollover)
 			return -EINVAL;
+		rstats.tp_all = atomic_long_read(&po->rollover->num);
+		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
+		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
+		data = &rstats;
+		lv = sizeof(rstats);
 		break;
 	case PACKET_TX_HAS_OFF:
 		val = po->tp_tx_has_off;
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 9ee4631..d55bfc3 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -92,7 +92,6 @@
 
 struct packet_rollover {
 	int			sock;
-	struct rcu_head		rcu;
 	atomic_long_t		num;
 	atomic_long_t		num_huge;
 	atomic_long_t		num_failed;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index c985ecb..ae5ac17 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -252,7 +252,7 @@
 	const int pkt_len = 20;
 	struct qrtr_hdr *hdr;
 	struct sk_buff *skb;
-	u32 *buf;
+	__le32 *buf;
 
 	skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
 	if (!skb)
@@ -269,7 +269,7 @@
 	hdr->dst_node_id = cpu_to_le32(dst_node);
 	hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
 
-	buf = (u32 *)skb_put(skb, pkt_len);
+	buf = (__le32 *)skb_put(skb, pkt_len);
 	memset(buf, 0, pkt_len);
 	buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
 	buf[1] = cpu_to_le32(src_node);
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
index d921adc..66b3d62 100644
--- a/net/rds/ib_frmr.c
+++ b/net/rds/ib_frmr.c
@@ -104,14 +104,15 @@
 	struct rds_ib_frmr *frmr = &ibmr->u.frmr;
 	struct ib_send_wr *failed_wr;
 	struct ib_reg_wr reg_wr;
-	int ret;
+	int ret, off = 0;
 
 	while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
 		atomic_inc(&ibmr->ic->i_fastreg_wrs);
 		cpu_relax();
 	}
 
-	ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len, 0, PAGE_SIZE);
+	ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len,
+				&off, PAGE_SIZE);
 	if (unlikely(ret != ibmr->sg_len))
 		return ret < 0 ? ret : -EINVAL;
 
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 8d3a851..f6027f4 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -40,7 +40,6 @@
 /*
  * XXX
  *  - build with sparse
- *  - should we limit the size of a mr region?  let transport return failure?
  *  - should we detect duplicate keys on a socket?  hmm.
  *  - an rdma is an mlock, apply rlimit?
  */
@@ -184,7 +183,7 @@
 	long i;
 	int ret;
 
-	if (rs->rs_bound_addr == 0) {
+	if (rs->rs_bound_addr == 0 || !rs->rs_transport) {
 		ret = -ENOTCONN; /* XXX not a great errno */
 		goto out;
 	}
@@ -200,6 +199,14 @@
 		goto out;
 	}
 
+	/* Restrict the size of mr irrespective of underlying transport
+	 * To account for unaligned mr regions, subtract one from nr_pages
+	 */
+	if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
+		ret = -EMSGSIZE;
+		goto out;
+	}
+
 	rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
 		args->vec.addr, args->vec.bytes, nr_pages);
 
@@ -517,6 +524,9 @@
 
 	local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
 
+	if (args->nr_local == 0)
+		return -EINVAL;
+
 	/* figure out the number of pages in the vector */
 	for (i = 0; i < args->nr_local; i++) {
 		if (copy_from_user(&vec, &local_vec[i],
@@ -866,6 +876,7 @@
 err:
 	if (page)
 		put_page(page);
+	rm->atomic.op_active = 0;
 	kfree(rm->atomic.op_notifier);
 
 	return ret;
diff --git a/net/rds/rds.h b/net/rds/rds.h
index f107a96..30a51fe 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -50,6 +50,9 @@
 #define RDS_FRAG_SHIFT	12
 #define RDS_FRAG_SIZE	((unsigned int)(1 << RDS_FRAG_SHIFT))
 
+/* Used to limit both RDMA and non-RDMA RDS message to 1MB */
+#define RDS_MAX_MSG_SIZE	((unsigned int)(1 << 20))
+
 #define RDS_CONG_MAP_BYTES	(65536 / 8)
 #define RDS_CONG_MAP_PAGES	(PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
 #define RDS_CONG_MAP_PAGE_BITS	(PAGE_SIZE * 8)
diff --git a/net/rds/send.c b/net/rds/send.c
index f28651b..ef53d164 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -946,6 +946,11 @@
 			ret = rds_cmsg_rdma_map(rs, rm, cmsg);
 			if (!ret)
 				*allocated_mr = 1;
+			else if (ret == -ENODEV)
+				/* Accommodate the get_mr() case which can fail
+				 * if connection isn't established yet.
+				 */
+				ret = -EAGAIN;
 			break;
 		case RDS_CMSG_ATOMIC_CSWP:
 		case RDS_CMSG_ATOMIC_FADD:
@@ -988,6 +993,29 @@
 	return hash;
 }
 
+static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
+{
+	struct rds_rdma_args *args;
+	struct cmsghdr *cmsg;
+
+	for_each_cmsghdr(cmsg, msg) {
+		if (!CMSG_OK(msg, cmsg))
+			return -EINVAL;
+
+		if (cmsg->cmsg_level != SOL_RDS)
+			continue;
+
+		if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
+			if (cmsg->cmsg_len <
+			    CMSG_LEN(sizeof(struct rds_rdma_args)))
+				return -EINVAL;
+			args = CMSG_DATA(cmsg);
+			*rdma_bytes += args->remote_vec.bytes;
+		}
+	}
+	return 0;
+}
+
 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 {
 	struct sock *sk = sock->sk;
@@ -1002,6 +1030,7 @@
 	int nonblock = msg->msg_flags & MSG_DONTWAIT;
 	long timeo = sock_sndtimeo(sk, nonblock);
 	struct rds_conn_path *cpath;
+	size_t total_payload_len = payload_len, rdma_payload_len = 0;
 
 	/* Mirror Linux UDP mirror of BSD error message compatibility */
 	/* XXX: Perhaps MSG_MORE someday */
@@ -1034,6 +1063,16 @@
 	}
 	release_sock(sk);
 
+	ret = rds_rdma_bytes(msg, &rdma_payload_len);
+	if (ret)
+		goto out;
+
+	total_payload_len += rdma_payload_len;
+	if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) {
+		ret = -EMSGSIZE;
+		goto out;
+	}
+
 	if (payload_len > rds_sk_sndbuf(rs)) {
 		ret = -EMSGSIZE;
 		goto out;
@@ -1083,8 +1122,12 @@
 
 	/* Parse any control messages the user may have included. */
 	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
-	if (ret)
+	if (ret) {
+		/* Trigger connection so that its ready for the next retry */
+		if (ret ==  -EAGAIN)
+			rds_conn_connect_if_down(conn);
 		goto out;
+	}
 
 	if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
 		printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 20e2923..78f976d3 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -478,9 +478,10 @@
 	 * we do need to clean up the listen socket here.
 	 */
 	if (rtn->rds_tcp_listen_sock) {
-		rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
+		struct socket *lsock = rtn->rds_tcp_listen_sock;
+
 		rtn->rds_tcp_listen_sock = NULL;
-		flush_work(&rtn->rds_tcp_accept_w);
+		rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
 	}
 }
 
@@ -517,10 +518,10 @@
 	struct rds_tcp_connection *tc, *_tc;
 	LIST_HEAD(tmp_list);
 	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+	struct socket *lsock = rtn->rds_tcp_listen_sock;
 
-	rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
 	rtn->rds_tcp_listen_sock = NULL;
-	flush_work(&rtn->rds_tcp_accept_w);
+	rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
 	spin_lock_irq(&rds_tcp_conn_lock);
 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
 		struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
@@ -540,8 +541,12 @@
 void *rds_tcp_listen_sock_def_readable(struct net *net)
 {
 	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+	struct socket *lsock = rtn->rds_tcp_listen_sock;
 
-	return rtn->rds_tcp_listen_sock->sk->sk_user_data;
+	if (!lsock)
+		return NULL;
+
+	return lsock->sk->sk_user_data;
 }
 
 static int rds_tcp_dev_event(struct notifier_block *this,
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 9a1cc89..56ea662 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -66,7 +66,7 @@
 
 /* tcp_listen.c */
 struct socket *rds_tcp_listen_init(struct net *);
-void rds_tcp_listen_stop(struct socket *);
+void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor);
 void rds_tcp_listen_data_ready(struct sock *sk);
 int rds_tcp_accept_one(struct socket *sock);
 int rds_tcp_keepalive(struct socket *sock);
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 525b624..185a56b 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -227,6 +227,9 @@
 	 * before it has been accepted and the accepter has set up their
 	 * data_ready.. we only want to queue listen work for our listening
 	 * socket
+	 *
+	 * (*ready)() may be null if we are racing with netns delete, and
+	 * the listen socket is being torn down.
 	 */
 	if (sk->sk_state == TCP_LISTEN)
 		rds_tcp_accept_work(sk);
@@ -235,7 +238,8 @@
 
 out:
 	read_unlock_bh(&sk->sk_callback_lock);
-	ready(sk);
+	if (ready)
+		ready(sk);
 }
 
 struct socket *rds_tcp_listen_init(struct net *net)
@@ -275,7 +279,7 @@
 	return NULL;
 }
 
-void rds_tcp_listen_stop(struct socket *sock)
+void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor)
 {
 	struct sock *sk;
 
@@ -296,5 +300,6 @@
 
 	/* wait for accepts to stop and close the socket */
 	flush_workqueue(rds_wq);
+	flush_work(acceptor);
 	sock_release(sock);
 }
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
index 8faf7a7..50dd516 100644
--- a/net/rmnet_data/rmnet_data_handlers.c
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -453,6 +453,13 @@
 	}
 
 	ep = &config->muxed_ep[mux_id];
+	if (!ep->refcount) {
+		LOGD("Packet on %s:%d; has no logical endpoint config",
+		     skb->dev->name, mux_id);
+
+		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP);
+		return RX_HANDLER_CONSUMED;
+	}
 
 	skb->dev = ep->egress_dev;
 
diff --git a/net/rmnet_data/rmnet_data_stats.h b/net/rmnet_data/rmnet_data_stats.h
index 366e486..75ed434 100644
--- a/net/rmnet_data/rmnet_data_stats.h
+++ b/net/rmnet_data/rmnet_data_stats.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016, 2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,7 @@
 	RMNET_STATS_SKBFREE_DEAGG_DATA_LEN_0,
 	RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM,
 	RMNET_STATS_SKBFREE_MAPC_UNSUPPORTED,
+	RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP,
 	RMNET_STATS_SKBFREE_MAX
 };
 
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 3f9d8d7..b099b64 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -275,6 +275,10 @@
 		rxrpc_conn_retransmit_call(conn, skb);
 		return 0;
 
+	case RXRPC_PACKET_TYPE_BUSY:
+		/* Just ignore BUSY packets for now. */
+		return 0;
+
 	case RXRPC_PACKET_TYPE_ABORT:
 		if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
 				  &wtmp, sizeof(wtmp)) < 0)
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 44fb8d8..1060d14 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -649,6 +649,7 @@
 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 	struct rxrpc_peer *peer;
 	unsigned int mtu;
+	bool wake = false;
 	u32 rwind = ntohl(ackinfo->rwind);
 
 	_proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
@@ -656,9 +657,14 @@
 	       ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
 	       rwind, ntohl(ackinfo->jumbo_max));
 
-	if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
-		rwind = RXRPC_RXTX_BUFF_SIZE - 1;
-	call->tx_winsize = rwind;
+	if (call->tx_winsize != rwind) {
+		if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
+			rwind = RXRPC_RXTX_BUFF_SIZE - 1;
+		if (rwind > call->tx_winsize)
+			wake = true;
+		call->tx_winsize = rwind;
+	}
+
 	if (call->cong_ssthresh > rwind)
 		call->cong_ssthresh = rwind;
 
@@ -672,6 +678,9 @@
 		spin_unlock_bh(&peer->lock);
 		_net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
 	}
+
+	if (wake)
+		wake_up(&call->waitq);
 }
 
 /*
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index e0aa30f..9617b42 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -161,7 +161,7 @@
 	if (action == TC_ACT_SHOT)
 		this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
 
-	tm->lastuse = lastuse;
+	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
 static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 6b07fba..fc3650b 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -211,7 +211,7 @@
 	struct tcf_t *tm = &m->tcf_tm;
 
 	_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
-	tm->lastuse = lastuse;
+	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 1308bbf..b56d579 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -200,9 +200,13 @@
 	pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
 
 	if (p->set_tc_index) {
+		int wlen = skb_network_offset(skb);
+
 		switch (tc_skb_protocol(skb)) {
 		case htons(ETH_P_IP):
-			if (skb_cow_head(skb, sizeof(struct iphdr)))
+			wlen += sizeof(struct iphdr);
+			if (!pskb_may_pull(skb, wlen) ||
+			    skb_try_make_writable(skb, wlen))
 				goto drop;
 
 			skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
@@ -210,7 +214,9 @@
 			break;
 
 		case htons(ETH_P_IPV6):
-			if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
+			wlen += sizeof(struct ipv6hdr);
+			if (!pskb_may_pull(skb, wlen) ||
+			    skb_try_make_writable(skb, wlen))
 				goto drop;
 
 			skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index 95d7b15..e371a0d 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -166,7 +166,7 @@
 /* Lookup timer debug name. */
 const char *sctp_tname(const sctp_subtype_t id)
 {
-	if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX)
+	if (id.timeout < ARRAY_SIZE(sctp_timer_tbl))
 		return sctp_timer_tbl[id.timeout];
 	return "unknown_timer";
 }
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 5825853..0994ce4 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -382,17 +382,18 @@
 }
 
 static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
-				    struct sctp_sndrcvinfo *sinfo,
-				    struct list_head *queue, int msg_len)
+				    struct sctp_sndrcvinfo *sinfo, int msg_len)
 {
+	struct sctp_outq *q = &asoc->outqueue;
 	struct sctp_chunk *chk, *temp;
 
-	list_for_each_entry_safe(chk, temp, queue, list) {
+	list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
 		if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
 		    chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
 			continue;
 
 		list_del_init(&chk->list);
+		q->out_qlen -= chk->skb->len;
 		asoc->sent_cnt_removable--;
 		asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
 
@@ -431,9 +432,7 @@
 			return;
 	}
 
-	sctp_prsctp_prune_unsent(asoc, sinfo,
-				 &asoc->outqueue.out_chunk_list,
-				 msg_len);
+	sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
 }
 
 /* Mark all the eligible packets on a transport for retransmission.  */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c062cea..7181ce6 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -82,8 +82,8 @@
 /* Forward declarations for internal helper functions. */
 static int sctp_writeable(struct sock *sk);
 static void sctp_wfree(struct sk_buff *skb);
-static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
-				size_t msg_len);
+static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+				size_t msg_len, struct sock **orig_sk);
 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
 static int sctp_wait_for_accept(struct sock *sk, long timeo);
@@ -1957,9 +1957,16 @@
 
 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 	if (!sctp_wspace(asoc)) {
-		err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
-		if (err)
+		/* sk can be changed by peel off when waiting for buf. */
+		err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
+		if (err) {
+			if (err == -ESRCH) {
+				/* asoc is already dead. */
+				new_asoc = NULL;
+				err = -EPIPE;
+			}
 			goto out_free;
+		}
 	}
 
 	/* If an address is passed with the sendto/sendmsg call, it is used
@@ -4239,7 +4246,7 @@
 	SCTP_DBG_OBJCNT_INC(sock);
 
 	local_bh_disable();
-	percpu_counter_inc(&sctp_sockets_allocated);
+	sk_sockets_allocated_inc(sk);
 	sock_prot_inuse_add(net, sk->sk_prot, 1);
 
 	/* Nothing can fail after this block, otherwise
@@ -4283,7 +4290,7 @@
 	}
 	sctp_endpoint_free(sp->ep);
 	local_bh_disable();
-	percpu_counter_dec(&sctp_sockets_allocated);
+	sk_sockets_allocated_dec(sk);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 	local_bh_enable();
 }
@@ -4771,12 +4778,6 @@
 	if (!asoc)
 		return -EINVAL;
 
-	/* If there is a thread waiting on more sndbuf space for
-	 * sending on this asoc, it cannot be peeled.
-	 */
-	if (waitqueue_active(&asoc->wait))
-		return -EBUSY;
-
 	/* An association cannot be branched off from an already peeled-off
 	 * socket, nor is this supported for tcp style sockets.
 	 */
@@ -7440,7 +7441,7 @@
 
 /* Helper function to wait for space in the sndbuf.  */
 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
-				size_t msg_len)
+				size_t msg_len, struct sock **orig_sk)
 {
 	struct sock *sk = asoc->base.sk;
 	int err = 0;
@@ -7457,10 +7458,11 @@
 	for (;;) {
 		prepare_to_wait_exclusive(&asoc->wait, &wait,
 					  TASK_INTERRUPTIBLE);
+		if (asoc->base.dead)
+			goto do_dead;
 		if (!*timeo_p)
 			goto do_nonblock;
-		if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
-		    asoc->base.dead)
+		if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
 			goto do_error;
 		if (signal_pending(current))
 			goto do_interrupted;
@@ -7473,11 +7475,17 @@
 		release_sock(sk);
 		current_timeo = schedule_timeout(current_timeo);
 		lock_sock(sk);
+		if (sk != asoc->base.sk) {
+			release_sock(sk);
+			sk = asoc->base.sk;
+			lock_sock(sk);
+		}
 
 		*timeo_p = current_timeo;
 	}
 
 out:
+	*orig_sk = sk;
 	finish_wait(&asoc->wait, &wait);
 
 	/* Release the association's refcnt.  */
@@ -7485,6 +7493,10 @@
 
 	return err;
 
+do_dead:
+	err = -ESRCH;
+	goto out;
+
 do_error:
 	err = -EPIPE;
 	goto out;
diff --git a/net/socket.c b/net/socket.c
index a4fb472..fc0b609 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1720,6 +1720,7 @@
 	/* We assume all kernel code knows the size of sockaddr_storage */
 	msg.msg_namelen = 0;
 	msg.msg_iocb = NULL;
+	msg.msg_flags = 0;
 	if (sock->file->f_flags & O_NONBLOCK)
 		flags |= MSG_DONTWAIT;
 	err = sock_recvmsg(sock, &msg, flags);
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 25d9a9c..624c322 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -231,6 +231,7 @@
 			goto out_free_groups;
 		creds->cr_group_info->gid[i] = kgid;
 	}
+	groups_sort(creds->cr_group_info);
 
 	return 0;
 out_free_groups:
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 1530825..6a08bc4 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -481,6 +481,7 @@
 				goto out;
 			rsci.cred.cr_group_info->gid[i] = kgid;
 		}
+		groups_sort(rsci.cred.cr_group_info);
 
 		/* mech name */
 		len = qword_get(&mesg, buf, mlen);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 5db68b3..600eacc 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -274,10 +274,9 @@
 
 static void rpc_set_active(struct rpc_task *task)
 {
-	trace_rpc_task_begin(task->tk_client, task, NULL);
-
 	rpc_task_set_debuginfo(task);
 	set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
+	trace_rpc_task_begin(task->tk_client, task, NULL);
 }
 
 /*
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 64af4f0..738a243 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -520,6 +520,7 @@
 		ug.gi->gid[i] = kgid;
 	}
 
+	groups_sort(ug.gi);
 	ugp = unix_gid_lookup(cd, uid);
 	if (ugp) {
 		struct cache_head *ch;
@@ -819,6 +820,7 @@
 		kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
 		cred->cr_group_info->gid[i] = kgid;
 	}
+	groups_sort(cred->cr_group_info);
 	if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
 		*authp = rpc_autherr_badverf;
 		return SVC_DENIED;
diff --git a/net/tipc/server.c b/net/tipc/server.c
index f89c0c2..f4c1b18 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -86,7 +86,6 @@
 static void tipc_recv_work(struct work_struct *work);
 static void tipc_send_work(struct work_struct *work);
 static void tipc_clean_outqueues(struct tipc_conn *con);
-static void tipc_sock_release(struct tipc_conn *con);
 
 static void tipc_conn_kref_release(struct kref *kref)
 {
@@ -104,7 +103,6 @@
 		}
 		saddr->scope = -TIPC_NODE_SCOPE;
 		kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
-		tipc_sock_release(con);
 		sock_release(sock);
 		con->sock = NULL;
 
@@ -194,19 +192,15 @@
 	write_unlock_bh(&sk->sk_callback_lock);
 }
 
-static void tipc_sock_release(struct tipc_conn *con)
+static void tipc_close_conn(struct tipc_conn *con)
 {
 	struct tipc_server *s = con->server;
 
-	if (con->conid)
-		s->tipc_conn_release(con->conid, con->usr_data);
-
-	tipc_unregister_callbacks(con);
-}
-
-static void tipc_close_conn(struct tipc_conn *con)
-{
 	if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
+		tipc_unregister_callbacks(con);
+
+		if (con->conid)
+			s->tipc_conn_release(con->conid, con->usr_data);
 
 		/* We shouldn't flush pending works as we may be in the
 		 * thread. In fact the races with pending rx/tx work structs
@@ -319,6 +313,7 @@
 	newcon->usr_data = s->tipc_conn_new(newcon->conid);
 	if (!newcon->usr_data) {
 		sock_release(newsock);
+		conn_put(newcon);
 		return -ENOMEM;
 	}
 
@@ -625,14 +620,12 @@
 void tipc_server_stop(struct tipc_server *s)
 {
 	struct tipc_conn *con;
-	int total = 0;
 	int id;
 
 	spin_lock_bh(&s->idr_lock);
-	for (id = 0; total < s->idr_in_use; id++) {
+	for (id = 0; s->idr_in_use; id++) {
 		con = idr_find(&s->conn_idr, id);
 		if (con) {
-			total++;
 			spin_unlock_bh(&s->idr_lock);
 			tipc_close_conn(con);
 			spin_lock_bh(&s->idr_lock);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 9d94e65..271cd66 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -141,6 +141,11 @@
 static void tipc_subscrp_timeout(unsigned long data)
 {
 	struct tipc_subscription *sub = (struct tipc_subscription *)data;
+	struct tipc_subscriber *subscriber = sub->subscriber;
+
+	spin_lock_bh(&subscriber->lock);
+	tipc_nametbl_unsubscribe(sub);
+	spin_unlock_bh(&subscriber->lock);
 
 	/* Notify subscriber of timeout */
 	tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
@@ -173,7 +178,6 @@
 	struct tipc_subscriber *subscriber = sub->subscriber;
 
 	spin_lock_bh(&subscriber->lock);
-	tipc_nametbl_unsubscribe(sub);
 	list_del(&sub->subscrp_list);
 	atomic_dec(&tn->subscription_count);
 	spin_unlock_bh(&subscriber->lock);
@@ -205,6 +209,7 @@
 		if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
 			continue;
 
+		tipc_nametbl_unsubscribe(sub);
 		tipc_subscrp_get(sub);
 		spin_unlock_bh(&subscriber->lock);
 		tipc_subscrp_delete(sub);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index b58dc95..107375d 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -371,10 +371,6 @@
 			goto rcu_out;
 	}
 
-	tipc_rcv(sock_net(sk), skb, b);
-	rcu_read_unlock();
-	return 0;
-
 rcu_out:
 	rcu_read_unlock();
 out:
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 8a398b3..ee12e17 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1101,10 +1101,19 @@
 	.sendpage = sock_no_sendpage,
 };
 
+static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+	if (!transport->cancel_pkt)
+		return -EOPNOTSUPP;
+
+	return transport->cancel_pkt(vsk);
+}
+
 static void vsock_connect_timeout(struct work_struct *work)
 {
 	struct sock *sk;
 	struct vsock_sock *vsk;
+	int cancel = 0;
 
 	vsk = container_of(work, struct vsock_sock, dwork.work);
 	sk = sk_vsock(vsk);
@@ -1115,8 +1124,11 @@
 		sk->sk_state = SS_UNCONNECTED;
 		sk->sk_err = ETIMEDOUT;
 		sk->sk_error_report(sk);
+		cancel = 1;
 	}
 	release_sock(sk);
+	if (cancel)
+		vsock_transport_cancel_pkt(vsk);
 
 	sock_put(sk);
 }
@@ -1223,11 +1235,13 @@
 			err = sock_intr_errno(timeout);
 			sk->sk_state = SS_UNCONNECTED;
 			sock->state = SS_UNCONNECTED;
+			vsock_transport_cancel_pkt(vsk);
 			goto out_wait;
 		} else if (timeout == 0) {
 			err = -ETIMEDOUT;
 			sk->sk_state = SS_UNCONNECTED;
 			sock->state = SS_UNCONNECTED;
+			vsock_transport_cancel_pkt(vsk);
 			goto out_wait;
 		}
 
@@ -1524,8 +1538,7 @@
 	long timeout;
 	int err;
 	struct vsock_transport_send_notify_data send_data;
-
-	DEFINE_WAIT(wait);
+	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
 	sk = sock->sk;
 	vsk = vsock_sk(sk);
@@ -1568,11 +1581,10 @@
 	if (err < 0)
 		goto out;
 
-
 	while (total_written < len) {
 		ssize_t written;
 
-		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+		add_wait_queue(sk_sleep(sk), &wait);
 		while (vsock_stream_has_space(vsk) == 0 &&
 		       sk->sk_err == 0 &&
 		       !(sk->sk_shutdown & SEND_SHUTDOWN) &&
@@ -1581,33 +1593,30 @@
 			/* Don't wait for non-blocking sockets. */
 			if (timeout == 0) {
 				err = -EAGAIN;
-				finish_wait(sk_sleep(sk), &wait);
+				remove_wait_queue(sk_sleep(sk), &wait);
 				goto out_err;
 			}
 
 			err = transport->notify_send_pre_block(vsk, &send_data);
 			if (err < 0) {
-				finish_wait(sk_sleep(sk), &wait);
+				remove_wait_queue(sk_sleep(sk), &wait);
 				goto out_err;
 			}
 
 			release_sock(sk);
-			timeout = schedule_timeout(timeout);
+			timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
 			lock_sock(sk);
 			if (signal_pending(current)) {
 				err = sock_intr_errno(timeout);
-				finish_wait(sk_sleep(sk), &wait);
+				remove_wait_queue(sk_sleep(sk), &wait);
 				goto out_err;
 			} else if (timeout == 0) {
 				err = -EAGAIN;
-				finish_wait(sk_sleep(sk), &wait);
+				remove_wait_queue(sk_sleep(sk), &wait);
 				goto out_err;
 			}
-
-			prepare_to_wait(sk_sleep(sk), &wait,
-					TASK_INTERRUPTIBLE);
 		}
-		finish_wait(sk_sleep(sk), &wait);
+		remove_wait_queue(sk_sleep(sk), &wait);
 
 		/* These checks occur both as part of and after the loop
 		 * conditional since we need to check before and after
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 62c056e..9c07c76 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -57,6 +57,7 @@
 	pkt->len		= len;
 	pkt->hdr.len		= cpu_to_le32(len);
 	pkt->reply		= info->reply;
+	pkt->vsk		= info->vsk;
 
 	if (info->msg && len > 0) {
 		pkt->buf = kmalloc(len, GFP_KERNEL);
@@ -180,6 +181,7 @@
 	struct virtio_vsock_pkt_info info = {
 		.op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
 		.type = type,
+		.vsk = vsk,
 	};
 
 	return virtio_transport_send_pkt_info(vsk, &info);
@@ -519,6 +521,7 @@
 	struct virtio_vsock_pkt_info info = {
 		.op = VIRTIO_VSOCK_OP_REQUEST,
 		.type = VIRTIO_VSOCK_TYPE_STREAM,
+		.vsk = vsk,
 	};
 
 	return virtio_transport_send_pkt_info(vsk, &info);
@@ -534,6 +537,7 @@
 			  VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
 			 (mode & SEND_SHUTDOWN ?
 			  VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
+		.vsk = vsk,
 	};
 
 	return virtio_transport_send_pkt_info(vsk, &info);
@@ -560,6 +564,7 @@
 		.type = VIRTIO_VSOCK_TYPE_STREAM,
 		.msg = msg,
 		.pkt_len = len,
+		.vsk = vsk,
 	};
 
 	return virtio_transport_send_pkt_info(vsk, &info);
@@ -581,6 +586,7 @@
 		.op = VIRTIO_VSOCK_OP_RST,
 		.type = VIRTIO_VSOCK_TYPE_STREAM,
 		.reply = !!pkt,
+		.vsk = vsk,
 	};
 
 	/* Send RST only if the original pkt is not a RST pkt */
@@ -826,6 +832,7 @@
 		.remote_cid = le64_to_cpu(pkt->hdr.src_cid),
 		.remote_port = le32_to_cpu(pkt->hdr.src_port),
 		.reply = true,
+		.vsk = vsk,
 	};
 
 	return virtio_transport_send_pkt_info(vsk, &info);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d8387b1..8f9bd38 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -6701,8 +6701,17 @@
 	if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) {
 		request->flags = nla_get_u32(
 			info->attrs[NL80211_ATTR_SCAN_FLAGS]);
-		if ((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
-		    !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) {
+		if (((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
+		     !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) ||
+		    ((request->flags & NL80211_SCAN_FLAG_LOW_SPAN) &&
+		     !wiphy_ext_feature_isset(wiphy,
+				      NL80211_EXT_FEATURE_LOW_SPAN_SCAN)) ||
+		    ((request->flags & NL80211_SCAN_FLAG_LOW_POWER) &&
+		     !wiphy_ext_feature_isset(wiphy,
+				      NL80211_EXT_FEATURE_LOW_POWER_SCAN)) ||
+		    ((request->flags & NL80211_SCAN_FLAG_HIGH_ACCURACY) &&
+		     !wiphy_ext_feature_isset(wiphy,
+		      NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN))) {
 			err = -EOPNOTSUPP;
 			goto out_free;
 		}
@@ -7585,6 +7594,11 @@
 			      intbss->ts_boottime, NL80211_BSS_PAD))
 		goto nla_put_failure;
 
+	if (!nl80211_put_signal(msg, intbss->pub.chains,
+				intbss->pub.chain_signal,
+				NL80211_BSS_CHAIN_SIGNAL))
+		goto nla_put_failure;
+
 	switch (rdev->wiphy.signal_type) {
 	case CFG80211_SIGNAL_TYPE_MBM:
 		if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 35ad69f..ad8611b 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -904,6 +904,9 @@
 		found->ts = tmp->ts;
 		found->ts_boottime = tmp->ts_boottime;
 		found->parent_tsf = tmp->parent_tsf;
+		found->pub.chains = tmp->pub.chains;
+		memcpy(found->pub.chain_signal, tmp->pub.chain_signal,
+		       IEEE80211_MAX_CHAINS);
 		ether_addr_copy(found->parent_bssid, tmp->parent_bssid);
 	} else {
 		struct cfg80211_internal_bss *new;
@@ -1156,6 +1159,8 @@
 	tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
 	tmp.ts_boottime = data->boottime_ns;
 	tmp.parent_tsf = data->parent_tsf;
+	tmp.pub.chains = data->chains;
+	memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS);
 	ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
 
 	signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 77fbfbd..178acf9 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1393,6 +1393,7 @@
 		newp->xfrm_nr = old->xfrm_nr;
 		newp->index = old->index;
 		newp->type = old->type;
+		newp->family = old->family;
 		memcpy(newp->xfrm_vec, old->xfrm_vec,
 		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
diff --git a/scripts/coccicheck b/scripts/coccicheck
index ec487b8..c36b04b 100755
--- a/scripts/coccicheck
+++ b/scripts/coccicheck
@@ -29,12 +29,6 @@
 	VERBOSE=0
 fi
 
-if [ -z "$J" ]; then
-	NPROC=$(getconf _NPROCESSORS_ONLN)
-else
-	NPROC="$J"
-fi
-
 FLAGS="--very-quiet"
 
 # You can use SPFLAGS to append extra arguments to coccicheck or override any
@@ -69,6 +63,9 @@
     # Take only the last argument, which is the C file to test
     shift $(( $# - 1 ))
     OPTIONS="$COCCIINCLUDE $1"
+
+    # No need to parallelize Coccinelle since this mode takes one input file.
+    NPROC=1
 else
     ONLINE=0
     if [ "$KBUILD_EXTMOD" = "" ] ; then
@@ -76,6 +73,12 @@
     else
         OPTIONS="--dir $KBUILD_EXTMOD $COCCIINCLUDE"
     fi
+
+    if [ -z "$J" ]; then
+        NPROC=$(getconf _NPROCESSORS_ONLN)
+    else
+        NPROC="$J"
+    fi
 fi
 
 if [ "$KBUILD_EXTMOD" != "" ] ; then
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index bd83497..845eb9b 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -838,6 +838,7 @@
 	".cmem*",			/* EZchip */
 	".fmt_slot*",			/* EZchip */
 	".gnu.lto*",
+	".discard.*",
 	NULL
 };
 
diff --git a/scripts/module-common.lds b/scripts/module-common.lds
index 73a2c7d..9b6e246 100644
--- a/scripts/module-common.lds
+++ b/scripts/module-common.lds
@@ -4,7 +4,10 @@
  * combine them automatically.
  */
 SECTIONS {
-	/DISCARD/ : { *(.discard) }
+	/DISCARD/ : {
+		*(.discard)
+		*(.discard.*)
+	}
 
 	__ksymtab		0 : { *(SORT(___ksymtab+*)) }
 	__ksymtab_gpl		0 : { *(SORT(___ksymtab_gpl+*)) }
@@ -19,4 +22,6 @@
 
 	. = ALIGN(8);
 	.init_array		0 : { *(SORT(.init_array.*)) *(.init_array) }
+
+	__jump_table		0 : ALIGN(8) { KEEP(*(__jump_table)) }
 }
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index 71b4a8a..7badec3 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -39,10 +39,9 @@
 	false; \
 fi ; \
 $(srctree)/scripts/setlocalversion --save-scmversion; \
-ln -sf $(srctree) $(2); \
 tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \
-	$(addprefix $(2)/,$(TAR_CONTENT) $(3)); \
-rm -f $(2) $(objtree)/.scmversion
+	--transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \
+rm -f $(objtree)/.scmversion
 
 # rpm-pkg
 # ---------------------------------------------------------------------------
diff --git a/security/Kconfig b/security/Kconfig
index 4415de2..638afc8 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -50,6 +50,16 @@
 	bool
 	default n
 
+config PAGE_TABLE_ISOLATION
+	bool "Remove the kernel mapping in user mode"
+	default y
+	depends on X86_64 && SMP
+	help
+	  This enforces a strict kernel and user space isolation, in order
+	  to close hardware side channels on kernel address information.
+
+	  If you are unsure how to answer this question, answer Y.
+
 config SECURITYFS
 	bool "Enable the securityfs filesystem"
 	help
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 0e87629..2b3def1 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -51,6 +51,8 @@
 			ima_hash_algo = HASH_ALGO_SHA1;
 		else if (strncmp(str, "md5", 3) == 0)
 			ima_hash_algo = HASH_ALGO_MD5;
+		else
+			return 1;
 		goto out;
 	}
 
@@ -60,6 +62,8 @@
 			break;
 		}
 	}
+	if (i == HASH_ALGO__LAST)
+		return 1;
 out:
 	hash_setup_done = 1;
 	return 1;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 5030fcf..cb7f8f7 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -250,11 +250,12 @@
  * The keyring selected is returned with an extra reference upon it which the
  * caller must release.
  */
-static void construct_get_dest_keyring(struct key **_dest_keyring)
+static int construct_get_dest_keyring(struct key **_dest_keyring)
 {
 	struct request_key_auth *rka;
 	const struct cred *cred = current_cred();
 	struct key *dest_keyring = *_dest_keyring, *authkey;
+	int ret;
 
 	kenter("%p", dest_keyring);
 
@@ -263,6 +264,8 @@
 		/* the caller supplied one */
 		key_get(dest_keyring);
 	} else {
+		bool do_perm_check = true;
+
 		/* use a default keyring; falling through the cases until we
 		 * find one that we actually have */
 		switch (cred->jit_keyring) {
@@ -277,8 +280,10 @@
 					dest_keyring =
 						key_get(rka->dest_keyring);
 				up_read(&authkey->sem);
-				if (dest_keyring)
+				if (dest_keyring) {
+					do_perm_check = false;
 					break;
+				}
 			}
 
 		case KEY_REQKEY_DEFL_THREAD_KEYRING:
@@ -313,11 +318,29 @@
 		default:
 			BUG();
 		}
+
+		/*
+		 * Require Write permission on the keyring.  This is essential
+		 * because the default keyring may be the session keyring, and
+		 * joining a keyring only requires Search permission.
+		 *
+		 * However, this check is skipped for the "requestor keyring" so
+		 * that /sbin/request-key can itself use request_key() to add
+		 * keys to the original requestor's destination keyring.
+		 */
+		if (dest_keyring && do_perm_check) {
+			ret = key_permission(make_key_ref(dest_keyring, 1),
+					     KEY_NEED_WRITE);
+			if (ret) {
+				key_put(dest_keyring);
+				return ret;
+			}
+		}
 	}
 
 	*_dest_keyring = dest_keyring;
 	kleave(" [dk %d]", key_serial(dest_keyring));
-	return;
+	return 0;
 }
 
 /*
@@ -443,11 +466,15 @@
 	if (ctx->index_key.type == &key_type_keyring)
 		return ERR_PTR(-EPERM);
 
-	user = key_user_lookup(current_fsuid());
-	if (!user)
-		return ERR_PTR(-ENOMEM);
+	ret = construct_get_dest_keyring(&dest_keyring);
+	if (ret)
+		goto error;
 
-	construct_get_dest_keyring(&dest_keyring);
+	user = key_user_lookup(current_fsuid());
+	if (!user) {
+		ret = -ENOMEM;
+		goto error_put_dest_keyring;
+	}
 
 	ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
 	key_user_put(user);
@@ -462,7 +489,7 @@
 	} else if (ret == -EINPROGRESS) {
 		ret = 0;
 	} else {
-		goto couldnt_alloc_key;
+		goto error_put_dest_keyring;
 	}
 
 	key_put(dest_keyring);
@@ -472,8 +499,9 @@
 construction_failed:
 	key_negate_and_link(key, key_negative_timeout, NULL, NULL);
 	key_put(key);
-couldnt_alloc_key:
+error_put_dest_keyring:
 	key_put(dest_keyring);
+error:
 	kleave(" = %d", ret);
 	return ERR_PTR(ret);
 }
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index e60c79d..52f3c55 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -348,27 +348,26 @@
 	struct avc_xperms_decision_node *xpd_node;
 	struct extended_perms_decision *xpd;
 
-	xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
-				GFP_ATOMIC | __GFP_NOMEMALLOC);
+	xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT);
 	if (!xpd_node)
 		return NULL;
 
 	xpd = &xpd_node->xpd;
 	if (which & XPERMS_ALLOWED) {
 		xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
-						GFP_ATOMIC | __GFP_NOMEMALLOC);
+						GFP_NOWAIT);
 		if (!xpd->allowed)
 			goto error;
 	}
 	if (which & XPERMS_AUDITALLOW) {
 		xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
-						GFP_ATOMIC | __GFP_NOMEMALLOC);
+						GFP_NOWAIT);
 		if (!xpd->auditallow)
 			goto error;
 	}
 	if (which & XPERMS_DONTAUDIT) {
 		xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
-						GFP_ATOMIC | __GFP_NOMEMALLOC);
+						GFP_NOWAIT);
 		if (!xpd->dontaudit)
 			goto error;
 	}
@@ -396,8 +395,7 @@
 {
 	struct avc_xperms_node *xp_node;
 
-	xp_node = kmem_cache_zalloc(avc_xperms_cachep,
-				GFP_ATOMIC|__GFP_NOMEMALLOC);
+	xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT);
 	if (!xp_node)
 		return xp_node;
 	INIT_LIST_HEAD(&xp_node->xpd_head);
@@ -550,7 +548,7 @@
 {
 	struct avc_node *node;
 
-	node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC);
+	node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT);
 	if (!node)
 		goto out;
 
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index e26ecb0..e53e076 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -98,7 +98,7 @@
 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
 
 #ifdef CONFIG_SECURITY_SELINUX_DEVELOP
-int selinux_enforcing;
+int selinux_enforcing __aligned(0x1000) __attribute__((section(".bss_rtic")));
 
 static int __init enforcing_setup(char *str)
 {
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index ebc9fdf..3321348 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -466,7 +466,6 @@
 		v = snd_pcm_hw_param_last(pcm, params, var, dir);
 	else
 		v = snd_pcm_hw_param_first(pcm, params, var, dir);
-	snd_BUG_ON(v < 0);
 	return v;
 }
 
@@ -1370,8 +1369,11 @@
 
 	if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
 		return tmp;
-	mutex_lock(&runtime->oss.params_lock);
 	while (bytes > 0) {
+		if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+			tmp = -ERESTARTSYS;
+			break;
+		}
 		if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
 			tmp = bytes;
 			if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
@@ -1415,14 +1417,18 @@
 			xfer += tmp;
 			if ((substream->f_flags & O_NONBLOCK) != 0 &&
 			    tmp != runtime->oss.period_bytes)
-				break;
+				tmp = -EAGAIN;
 		}
-	}
-	mutex_unlock(&runtime->oss.params_lock);
-	return xfer;
-
  err:
-	mutex_unlock(&runtime->oss.params_lock);
+		mutex_unlock(&runtime->oss.params_lock);
+		if (tmp < 0)
+			break;
+		if (signal_pending(current)) {
+			tmp = -ERESTARTSYS;
+			break;
+		}
+		tmp = 0;
+	}
 	return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
 }
 
@@ -1470,8 +1476,11 @@
 
 	if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
 		return tmp;
-	mutex_lock(&runtime->oss.params_lock);
 	while (bytes > 0) {
+		if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+			tmp = -ERESTARTSYS;
+			break;
+		}
 		if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
 			if (runtime->oss.buffer_used == 0) {
 				tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
@@ -1502,12 +1511,16 @@
 			bytes -= tmp;
 			xfer += tmp;
 		}
-	}
-	mutex_unlock(&runtime->oss.params_lock);
-	return xfer;
-
  err:
-	mutex_unlock(&runtime->oss.params_lock);
+		mutex_unlock(&runtime->oss.params_lock);
+		if (tmp < 0)
+			break;
+		if (signal_pending(current)) {
+			tmp = -ERESTARTSYS;
+			break;
+		}
+		tmp = 0;
+	}
 	return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
 }
 
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index 727ac44..a84a1d3 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -591,18 +591,26 @@
 	snd_pcm_sframes_t frames = size;
 
 	plugin = snd_pcm_plug_first(plug);
-	while (plugin && frames > 0) {
+	while (plugin) {
+		if (frames <= 0)
+			return frames;
 		if ((next = plugin->next) != NULL) {
 			snd_pcm_sframes_t frames1 = frames;
-			if (plugin->dst_frames)
+			if (plugin->dst_frames) {
 				frames1 = plugin->dst_frames(plugin, frames);
+				if (frames1 <= 0)
+					return frames1;
+			}
 			if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) {
 				return err;
 			}
 			if (err != frames1) {
 				frames = err;
-				if (plugin->src_frames)
+				if (plugin->src_frames) {
 					frames = plugin->src_frames(plugin, frames1);
+					if (frames <= 0)
+						return frames;
+				}
 			}
 		} else
 			dst_channels = NULL;
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 4fc68b1..48f6aee 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -149,7 +149,9 @@
 				err = -ENXIO;
 				goto _error;
 			}
+			mutex_lock(&pcm->open_mutex);
 			err = snd_pcm_info_user(substream, info);
+			mutex_unlock(&pcm->open_mutex);
 		_error:
 			mutex_unlock(&register_mutex);
 			return err;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index f8d0bd8..25deca44 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -267,8 +267,10 @@
 				runtime->rate);
 		*audio_tstamp = ns_to_timespec(audio_nsecs);
 	}
-	runtime->status->audio_tstamp = *audio_tstamp;
-	runtime->status->tstamp = *curr_tstamp;
+	if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) {
+		runtime->status->audio_tstamp = *audio_tstamp;
+		runtime->status->tstamp = *curr_tstamp;
+	}
 
 	/*
 	 * re-take a driver timestamp to let apps detect if the reference tstamp
@@ -1666,7 +1668,7 @@
 		return changed;
 	if (params->rmask) {
 		int err = snd_pcm_hw_refine(pcm, params);
-		if (snd_BUG_ON(err < 0))
+		if (err < 0)
 			return err;
 	}
 	return snd_pcm_hw_param_value(params, var, dir);
@@ -1713,7 +1715,7 @@
 		return changed;
 	if (params->rmask) {
 		int err = snd_pcm_hw_refine(pcm, params);
-		if (snd_BUG_ON(err < 0))
+		if (err < 0)
 			return err;
 	}
 	return snd_pcm_hw_param_value(params, var, dir);
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index b450a27..16f8124 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -579,15 +579,14 @@
 	return 0;
 }
 
-int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
+static int __snd_rawmidi_info_select(struct snd_card *card,
+				     struct snd_rawmidi_info *info)
 {
 	struct snd_rawmidi *rmidi;
 	struct snd_rawmidi_str *pstr;
 	struct snd_rawmidi_substream *substream;
 
-	mutex_lock(&register_mutex);
 	rmidi = snd_rawmidi_search(card, info->device);
-	mutex_unlock(&register_mutex);
 	if (!rmidi)
 		return -ENXIO;
 	if (info->stream < 0 || info->stream > 1)
@@ -603,6 +602,16 @@
 	}
 	return -ENXIO;
 }
+
+int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
+{
+	int ret;
+
+	mutex_lock(&register_mutex);
+	ret = __snd_rawmidi_info_select(card, info);
+	mutex_unlock(&register_mutex);
+	return ret;
+}
 EXPORT_SYMBOL(snd_rawmidi_info_select);
 
 static int snd_rawmidi_info_select_user(struct snd_card *card,
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index 37d9cfb..b80985f 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -355,7 +355,7 @@
 	unsigned long freq;
 
 	t = tmr->timeri->timer;
-	if (snd_BUG_ON(!t))
+	if (!t)
 		return -EINVAL;
 
 	freq = tmr->preferred_resolution;
diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
index 59127b6..e00f7e3 100644
--- a/sound/core/timer_compat.c
+++ b/sound/core/timer_compat.c
@@ -66,11 +66,11 @@
 	struct snd_timer *t;
 
 	tu = file->private_data;
-	if (snd_BUG_ON(!tu->timeri))
-		return -ENXIO;
+	if (!tu->timeri)
+		return -EBADFD;
 	t = tu->timeri->timer;
-	if (snd_BUG_ON(!t))
-		return -ENXIO;
+	if (!t)
+		return -EBADFD;
 	memset(&info, 0, sizeof(info));
 	info.card = t->card ? t->card->number : -1;
 	if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
@@ -99,8 +99,8 @@
 	struct snd_timer_status32 status;
 	
 	tu = file->private_data;
-	if (snd_BUG_ON(!tu->timeri))
-		return -ENXIO;
+	if (!tu->timeri)
+		return -EBADFD;
 	memset(&status, 0, sizeof(status));
 	status.tstamp.tv_sec = tu->tstamp.tv_sec;
 	status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 54f348a..cbd20cb 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -39,6 +39,7 @@
 #include <sound/core.h>
 #include <sound/control.h>
 #include <sound/pcm.h>
+#include <sound/pcm_params.h>
 #include <sound/info.h>
 #include <sound/initval.h>
 
@@ -305,19 +306,6 @@
 	return 0;
 }
 
-static void params_change_substream(struct loopback_pcm *dpcm,
-				    struct snd_pcm_runtime *runtime)
-{
-	struct snd_pcm_runtime *dst_runtime;
-
-	if (dpcm == NULL || dpcm->substream == NULL)
-		return;
-	dst_runtime = dpcm->substream->runtime;
-	if (dst_runtime == NULL)
-		return;
-	dst_runtime->hw = dpcm->cable->hw;
-}
-
 static void params_change(struct snd_pcm_substream *substream)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
@@ -329,10 +317,6 @@
 	cable->hw.rate_max = runtime->rate;
 	cable->hw.channels_min = runtime->channels;
 	cable->hw.channels_max = runtime->channels;
-	params_change_substream(cable->streams[SNDRV_PCM_STREAM_PLAYBACK],
-				runtime);
-	params_change_substream(cable->streams[SNDRV_PCM_STREAM_CAPTURE],
-				runtime);
 }
 
 static int loopback_prepare(struct snd_pcm_substream *substream)
@@ -620,26 +604,29 @@
 static int rule_format(struct snd_pcm_hw_params *params,
 		       struct snd_pcm_hw_rule *rule)
 {
+	struct loopback_pcm *dpcm = rule->private;
+	struct loopback_cable *cable = dpcm->cable;
+	struct snd_mask m;
 
-	struct snd_pcm_hardware *hw = rule->private;
-	struct snd_mask *maskp = hw_param_mask(params, rule->var);
-
-	maskp->bits[0] &= (u_int32_t)hw->formats;
-	maskp->bits[1] &= (u_int32_t)(hw->formats >> 32);
-	memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
-	if (! maskp->bits[0] && ! maskp->bits[1])
-		return -EINVAL;
-	return 0;
+	snd_mask_none(&m);
+	mutex_lock(&dpcm->loopback->cable_lock);
+	m.bits[0] = (u_int32_t)cable->hw.formats;
+	m.bits[1] = (u_int32_t)(cable->hw.formats >> 32);
+	mutex_unlock(&dpcm->loopback->cable_lock);
+	return snd_mask_refine(hw_param_mask(params, rule->var), &m);
 }
 
 static int rule_rate(struct snd_pcm_hw_params *params,
 		     struct snd_pcm_hw_rule *rule)
 {
-	struct snd_pcm_hardware *hw = rule->private;
+	struct loopback_pcm *dpcm = rule->private;
+	struct loopback_cable *cable = dpcm->cable;
 	struct snd_interval t;
 
-        t.min = hw->rate_min;
-        t.max = hw->rate_max;
+	mutex_lock(&dpcm->loopback->cable_lock);
+	t.min = cable->hw.rate_min;
+	t.max = cable->hw.rate_max;
+	mutex_unlock(&dpcm->loopback->cable_lock);
         t.openmin = t.openmax = 0;
         t.integer = 0;
 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
@@ -648,22 +635,44 @@
 static int rule_channels(struct snd_pcm_hw_params *params,
 			 struct snd_pcm_hw_rule *rule)
 {
-	struct snd_pcm_hardware *hw = rule->private;
+	struct loopback_pcm *dpcm = rule->private;
+	struct loopback_cable *cable = dpcm->cable;
 	struct snd_interval t;
 
-        t.min = hw->channels_min;
-        t.max = hw->channels_max;
+	mutex_lock(&dpcm->loopback->cable_lock);
+	t.min = cable->hw.channels_min;
+	t.max = cable->hw.channels_max;
+	mutex_unlock(&dpcm->loopback->cable_lock);
         t.openmin = t.openmax = 0;
         t.integer = 0;
 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
 }
 
+static void free_cable(struct snd_pcm_substream *substream)
+{
+	struct loopback *loopback = substream->private_data;
+	int dev = get_cable_index(substream);
+	struct loopback_cable *cable;
+
+	cable = loopback->cables[substream->number][dev];
+	if (!cable)
+		return;
+	if (cable->streams[!substream->stream]) {
+		/* other stream is still alive */
+		cable->streams[substream->stream] = NULL;
+	} else {
+		/* free the cable */
+		loopback->cables[substream->number][dev] = NULL;
+		kfree(cable);
+	}
+}
+
 static int loopback_open(struct snd_pcm_substream *substream)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct loopback *loopback = substream->private_data;
 	struct loopback_pcm *dpcm;
-	struct loopback_cable *cable;
+	struct loopback_cable *cable = NULL;
 	int err = 0;
 	int dev = get_cable_index(substream);
 
@@ -682,7 +691,6 @@
 	if (!cable) {
 		cable = kzalloc(sizeof(*cable), GFP_KERNEL);
 		if (!cable) {
-			kfree(dpcm);
 			err = -ENOMEM;
 			goto unlock;
 		}
@@ -700,19 +708,19 @@
 	/* are cached -> they do not reflect the actual state */
 	err = snd_pcm_hw_rule_add(runtime, 0,
 				  SNDRV_PCM_HW_PARAM_FORMAT,
-				  rule_format, &runtime->hw,
+				  rule_format, dpcm,
 				  SNDRV_PCM_HW_PARAM_FORMAT, -1);
 	if (err < 0)
 		goto unlock;
 	err = snd_pcm_hw_rule_add(runtime, 0,
 				  SNDRV_PCM_HW_PARAM_RATE,
-				  rule_rate, &runtime->hw,
+				  rule_rate, dpcm,
 				  SNDRV_PCM_HW_PARAM_RATE, -1);
 	if (err < 0)
 		goto unlock;
 	err = snd_pcm_hw_rule_add(runtime, 0,
 				  SNDRV_PCM_HW_PARAM_CHANNELS,
-				  rule_channels, &runtime->hw,
+				  rule_channels, dpcm,
 				  SNDRV_PCM_HW_PARAM_CHANNELS, -1);
 	if (err < 0)
 		goto unlock;
@@ -724,6 +732,10 @@
 	else
 		runtime->hw = cable->hw;
  unlock:
+	if (err < 0) {
+		free_cable(substream);
+		kfree(dpcm);
+	}
 	mutex_unlock(&loopback->cable_lock);
 	return err;
 }
@@ -732,20 +744,10 @@
 {
 	struct loopback *loopback = substream->private_data;
 	struct loopback_pcm *dpcm = substream->runtime->private_data;
-	struct loopback_cable *cable;
-	int dev = get_cable_index(substream);
 
 	loopback_timer_stop(dpcm);
 	mutex_lock(&loopback->cable_lock);
-	cable = loopback->cables[substream->number][dev];
-	if (cable->streams[!substream->stream]) {
-		/* other stream is still alive */
-		cable->streams[substream->stream] = NULL;
-	} else {
-		/* free the cable */
-		loopback->cables[substream->number][dev] = NULL;
-		kfree(cable);
-	}
+	free_cable(substream);
 	mutex_unlock(&loopback->cable_lock);
 	return 0;
 }
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index c9af022..47c3e97 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -319,7 +319,7 @@
  */
 int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops)
 {
-	if (WARN_ON(!hdac_acomp))
+	if (!hdac_acomp)
 		return -ENODEV;
 
 	hdac_acomp->audio_ops = aops;
diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
index 81acc20..f21633c 100644
--- a/sound/hda/hdmi_chmap.c
+++ b/sound/hda/hdmi_chmap.c
@@ -746,7 +746,7 @@
 	memset(pcm_chmap, 0, sizeof(pcm_chmap));
 	chmap->ops.get_chmap(chmap->hdac, pcm_idx, pcm_chmap);
 
-	for (i = 0; i < sizeof(chmap); i++)
+	for (i = 0; i < ARRAY_SIZE(pcm_chmap); i++)
 		ucontrol->value.integer.value[i] = pcm_chmap[i];
 
 	return 0;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 5cb7e04..293f3f2 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2305,6 +2305,9 @@
 	/* AMD Hudson */
 	{ PCI_DEVICE(0x1022, 0x780d),
 	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+	/* AMD Raven */
+	{ PCI_DEVICE(0x1022, 0x15e3),
+	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
 	/* ATI HDMI */
 	{ PCI_DEVICE(0x1002, 0x0002),
 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index f2e4e99..2c3065c 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -261,6 +261,7 @@
 	CXT_FIXUP_HP_530,
 	CXT_FIXUP_CAP_MIX_AMP_5047,
 	CXT_FIXUP_MUTE_LED_EAPD,
+	CXT_FIXUP_HP_DOCK,
 	CXT_FIXUP_HP_SPECTRE,
 	CXT_FIXUP_HP_GATE_MIC,
 };
@@ -778,6 +779,14 @@
 		.type = HDA_FIXUP_FUNC,
 		.v.func = cxt_fixup_mute_led_eapd,
 	},
+	[CXT_FIXUP_HP_DOCK] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x16, 0x21011020 }, /* line-out */
+			{ 0x18, 0x2181103f }, /* line-in */
+			{ }
+		}
+	},
 	[CXT_FIXUP_HP_SPECTRE] = {
 		.type = HDA_FIXUP_PINS,
 		.v.pins = (const struct hda_pintbl[]) {
@@ -839,6 +848,7 @@
 	SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
 	SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
+	SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
 	SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
 	SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
 	SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
@@ -872,6 +882,7 @@
 	{ .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
 	{ .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
 	{ .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" },
+	{ .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" },
 	{}
 };
 
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 80c40a1..4ef3b00 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4419,7 +4419,7 @@
 static void alc_fixup_no_shutup(struct hda_codec *codec,
 				const struct hda_fixup *fix, int action)
 {
-	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+	if (action == HDA_FIXUP_ACT_PROBE) {
 		struct alc_spec *spec = codec->spec;
 		spec->shutup = alc_no_shutup;
 	}
@@ -4854,6 +4854,7 @@
 	ALC286_FIXUP_HP_GPIO_LED,
 	ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
 	ALC280_FIXUP_HP_DOCK_PINS,
+	ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED,
 	ALC280_FIXUP_HP_9480M,
 	ALC288_FIXUP_DELL_HEADSET_MODE,
 	ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -5394,6 +5395,16 @@
 		.chained = true,
 		.chain_id = ALC280_FIXUP_HP_GPIO4
 	},
+	[ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x1b, 0x21011020 }, /* line-out */
+			{ 0x18, 0x2181103f }, /* line-in */
+			{ },
+		},
+		.chained = true,
+		.chain_id = ALC269_FIXUP_HP_GPIO_MIC1_LED
+	},
 	[ALC280_FIXUP_HP_9480M] = {
 		.type = HDA_FIXUP_FUNC,
 		.v.func = alc280_fixup_hp_9480m,
@@ -5646,7 +5657,7 @@
 	SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
 	SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
 	SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
-	SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+	SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
 	SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
 	SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
 	SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -5812,6 +5823,7 @@
 	{.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"},
 	{.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
 	{.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
+	{.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"},
 	{.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
 	{.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
 	{.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
@@ -5960,6 +5972,11 @@
 		{0x1b, 0x01011020},
 		{0x21, 0x02211010}),
 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+		{0x12, 0x90a60130},
+		{0x14, 0x90170110},
+		{0x1b, 0x01011020},
+		{0x21, 0x0221101f}),
+	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170120},
 		{0x21, 0x02211030}),
@@ -6272,7 +6289,7 @@
 	case 0x10ec0703:
 		spec->codec_variant = ALC269_TYPE_ALC700;
 		spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
-		alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
+		alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
 		break;
 
 	}
diff --git a/sound/soc/codecs/da7218.c b/sound/soc/codecs/da7218.c
index c69e976..f886324 100644
--- a/sound/soc/codecs/da7218.c
+++ b/sound/soc/codecs/da7218.c
@@ -2519,7 +2519,7 @@
 	}
 
 	if (da7218->dev_id == DA7218_DEV_ID) {
-		hpldet_np = of_find_node_by_name(np, "da7218_hpldet");
+		hpldet_np = of_get_child_by_name(np, "da7218_hpldet");
 		if (!hpldet_np)
 			return pdata;
 
diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
index 5acd5b6..f9b6c5a 100644
--- a/sound/soc/codecs/tlv320aic31xx.h
+++ b/sound/soc/codecs/tlv320aic31xx.h
@@ -115,7 +115,7 @@
 /* INT2 interrupt control */
 #define AIC31XX_INT2CTRL	AIC31XX_REG(0, 49)
 /* GPIO1 control */
-#define AIC31XX_GPIO1		AIC31XX_REG(0, 50)
+#define AIC31XX_GPIO1		AIC31XX_REG(0, 51)
 
 #define AIC31XX_DACPRB		AIC31XX_REG(0, 60)
 /* ADC Instruction Set Register */
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index a2104d6..26fd6a6 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -232,7 +232,7 @@
 	struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev);
 	struct device_node *twl4030_codec_node = NULL;
 
-	twl4030_codec_node = of_find_node_by_name(codec->dev->parent->of_node,
+	twl4030_codec_node = of_get_child_by_name(codec->dev->parent->of_node,
 						  "codec");
 
 	if (!pdata && twl4030_codec_node) {
@@ -241,9 +241,11 @@
 				     GFP_KERNEL);
 		if (!pdata) {
 			dev_err(codec->dev, "Can not allocate memory\n");
+			of_node_put(twl4030_codec_node);
 			return NULL;
 		}
 		twl4030_setup_pdata_of(pdata, twl4030_codec_node);
+		of_node_put(twl4030_codec_node);
 	}
 
 	return pdata;
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 3bdd819..c03c9da 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1365,7 +1365,7 @@
 	const struct wmfw_region *region;
 	const struct wm_adsp_region *mem;
 	const char *region_name;
-	char *file, *text;
+	char *file, *text = NULL;
 	struct wm_adsp_buf *buf;
 	unsigned int reg;
 	int regions = 0;
@@ -1465,7 +1465,7 @@
 		 le64_to_cpu(footer->timestamp));
 
 	while (pos < firmware->size &&
-	       pos - firmware->size > sizeof(*region)) {
+	       sizeof(*region) < firmware->size - pos) {
 		region = (void *)&(firmware->data[pos]);
 		region_name = "Unknown";
 		reg = 0;
@@ -1526,10 +1526,21 @@
 			 regions, le32_to_cpu(region->len), offset,
 			 region_name);
 
+		if (le32_to_cpu(region->len) >
+		    firmware->size - pos - sizeof(*region)) {
+			adsp_err(dsp,
+				 "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+				 file, regions, region_name,
+				 le32_to_cpu(region->len), firmware->size);
+			ret = -EINVAL;
+			goto out_fw;
+		}
+
 		if (text) {
 			memcpy(text, region->data, le32_to_cpu(region->len));
 			adsp_info(dsp, "%s: %s\n", file, text);
 			kfree(text);
+			text = NULL;
 		}
 
 		if (reg) {
@@ -1574,6 +1585,7 @@
 	regmap_async_complete(regmap);
 	wm_adsp_buf_free(&buf_list);
 	release_firmware(firmware);
+	kfree(text);
 out:
 	kfree(file);
 
@@ -1980,7 +1992,7 @@
 
 	blocks = 0;
 	while (pos < firmware->size &&
-	       pos - firmware->size > sizeof(*blk)) {
+	       sizeof(*blk) < firmware->size - pos) {
 		blk = (void *)(&firmware->data[pos]);
 
 		type = le16_to_cpu(blk->type);
@@ -2054,6 +2066,17 @@
 		}
 
 		if (reg) {
+			if (le32_to_cpu(blk->len) >
+			    firmware->size - pos - sizeof(*blk)) {
+				adsp_err(dsp,
+					 "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+					 file, blocks, region_name,
+					 le32_to_cpu(blk->len),
+					 firmware->size);
+				ret = -EINVAL;
+				goto out_fw;
+			}
+
 			buf = wm_adsp_buf_alloc(blk->data,
 						le32_to_cpu(blk->len),
 						&buf_list);
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index fde08660..1c03490 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -1467,12 +1467,6 @@
 				sizeof(fsl_ssi_ac97_dai));
 
 		fsl_ac97_data = ssi_private;
-
-		ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
-		if (ret) {
-			dev_err(&pdev->dev, "could not set AC'97 ops\n");
-			return ret;
-		}
 	} else {
 		/* Initialize this copy of the CPU DAI driver structure */
 		memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
@@ -1583,6 +1577,14 @@
 			return ret;
 	}
 
+	if (fsl_ssi_is_ac97(ssi_private)) {
+		ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
+		if (ret) {
+			dev_err(&pdev->dev, "could not set AC'97 ops\n");
+			goto error_ac97_ops;
+		}
+	}
+
 	ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
 					      &ssi_private->cpu_dai_drv, 1);
 	if (ret) {
@@ -1666,6 +1668,10 @@
 	fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
 
 error_asoc_register:
+	if (fsl_ssi_is_ac97(ssi_private))
+		snd_soc_set_ac97_ops(NULL);
+
+error_ac97_ops:
 	if (ssi_private->soc->imx)
 		fsl_ssi_imx_clean(pdev, ssi_private);
 
diff --git a/sound/soc/img/img-parallel-out.c b/sound/soc/img/img-parallel-out.c
index c1610a0..3cf522d 100644
--- a/sound/soc/img/img-parallel-out.c
+++ b/sound/soc/img/img-parallel-out.c
@@ -166,9 +166,11 @@
 		return -EINVAL;
 	}
 
+	pm_runtime_get_sync(prl->dev);
 	reg = img_prl_out_readl(prl, IMG_PRL_OUT_CTL);
 	reg = (reg & ~IMG_PRL_OUT_CTL_EDGE_MASK) | control_set;
 	img_prl_out_writel(prl, reg, IMG_PRL_OUT_CTL);
+	pm_runtime_put(prl->dev);
 
 	return 0;
 }
diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
index ea162fb..d5adc04 100644
--- a/sound/soc/intel/skylake/skl-sst-utils.c
+++ b/sound/soc/intel/skylake/skl-sst-utils.c
@@ -295,6 +295,7 @@
 	struct uuid_module *module;
 	struct firmware stripped_fw;
 	unsigned int safe_file;
+	int ret = 0;
 
 	/* Get the FW pointer to derive ADSP header */
 	stripped_fw.data = fw->data;
@@ -343,8 +344,10 @@
 
 	for (i = 0; i < num_entry; i++, mod_entry++) {
 		module = kzalloc(sizeof(*module), GFP_KERNEL);
-		if (!module)
-			return -ENOMEM;
+		if (!module) {
+			ret = -ENOMEM;
+			goto free_uuid_list;
+		}
 
 		uuid_bin = (uuid_le *)mod_entry->uuid.id;
 		memcpy(&module->uuid, uuid_bin, sizeof(module->uuid));
@@ -355,8 +358,8 @@
 		size = sizeof(int) * mod_entry->instance_max_count;
 		module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
 		if (!module->instance_id) {
-			kfree(module);
-			return -ENOMEM;
+			ret = -ENOMEM;
+			goto free_uuid_list;
 		}
 
 		list_add_tail(&module->list, &skl->uuid_list);
@@ -367,6 +370,10 @@
 	}
 
 	return 0;
+
+free_uuid_list:
+	skl_freeup_uuid_list(skl);
+	return ret;
 }
 
 void skl_freeup_uuid_list(struct skl_sst *ctx)
diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c
index abb5eaa..7d92a24 100644
--- a/sound/soc/sh/rcar/cmd.c
+++ b/sound/soc/sh/rcar/cmd.c
@@ -31,23 +31,24 @@
 	struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
 	struct device *dev = rsnd_priv_to_dev(priv);
 	u32 data;
+	u32 path[] = {
+		[1] = 1 << 0,
+		[5] = 1 << 8,
+		[6] = 1 << 12,
+		[9] = 1 << 15,
+	};
 
 	if (!mix && !dvc)
 		return 0;
 
+	if (ARRAY_SIZE(path) < rsnd_mod_id(mod) + 1)
+		return -ENXIO;
+
 	if (mix) {
 		struct rsnd_dai *rdai;
 		struct rsnd_mod *src;
 		struct rsnd_dai_stream *tio;
 		int i;
-		u32 path[] = {
-			[0] = 0,
-			[1] = 1 << 0,
-			[2] = 0,
-			[3] = 0,
-			[4] = 0,
-			[5] = 1 << 8
-		};
 
 		/*
 		 * it is assuming that integrater is well understanding about
@@ -70,16 +71,19 @@
 	} else {
 		struct rsnd_mod *src = rsnd_io_to_mod_src(io);
 
-		u32 path[] = {
-			[0] = 0x30000,
-			[1] = 0x30001,
-			[2] = 0x40000,
-			[3] = 0x10000,
-			[4] = 0x20000,
-			[5] = 0x40100
+		u8 cmd_case[] = {
+			[0] = 0x3,
+			[1] = 0x3,
+			[2] = 0x4,
+			[3] = 0x1,
+			[4] = 0x2,
+			[5] = 0x4,
+			[6] = 0x1,
+			[9] = 0x2,
 		};
 
-		data = path[rsnd_mod_id(src)];
+		data = path[rsnd_mod_id(src)] |
+			cmd_case[rsnd_mod_id(src)] << 16;
 	}
 
 	dev_dbg(dev, "ctu/mix path = 0x%08x", data);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f181410..91b444d 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -978,10 +978,8 @@
 		return -ENOMEM;
 
 	ret = snd_ctl_add(card, kctrl);
-	if (ret < 0) {
-		snd_ctl_free_one(kctrl);
+	if (ret < 0)
 		return ret;
-	}
 
 	cfg->update = update;
 	cfg->card = card;
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 6bc93cb..edeb74a 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -361,6 +361,20 @@
 	return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
 }
 
+static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
+{
+	struct rsnd_mod *mod = rsnd_mod_get(dma);
+	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
+	volatile void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
+	u32 val = ioread32(addr);
+
+	val &= ~mask;
+	val |= (data & mask);
+
+	iowrite32(val, addr);
+}
+
 static int rsnd_dmapp_stop(struct rsnd_mod *mod,
 			   struct rsnd_dai_stream *io,
 			   struct rsnd_priv *priv)
@@ -368,10 +382,10 @@
 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
 	int i;
 
-	rsnd_dmapp_write(dma, 0, PDMACHCR);
+	rsnd_dmapp_bset(dma, 0,  PDMACHCR_DE, PDMACHCR);
 
 	for (i = 0; i < 1024; i++) {
-		if (0 == rsnd_dmapp_read(dma, PDMACHCR))
+		if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
 			return 0;
 		udelay(1);
 	}
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 6cb6db0..560cf4b 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -172,10 +172,15 @@
 {
 	struct rsnd_mod *ssi_mod = rsnd_io_to_mod_ssi(io);
 	struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io);
+	u32 mods;
 
-	return rsnd_ssi_multi_slaves_runtime(io) |
-		1 << rsnd_mod_id(ssi_mod) |
-		1 << rsnd_mod_id(ssi_parent_mod);
+	mods = rsnd_ssi_multi_slaves_runtime(io) |
+		1 << rsnd_mod_id(ssi_mod);
+
+	if (ssi_parent_mod)
+		mods |= 1 << rsnd_mod_id(ssi_parent_mod);
+
+	return mods;
 }
 
 u32 rsnd_ssi_multi_slaves_runtime(struct rsnd_dai_stream *io)
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 6f9b388..3f95d6b 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -44,7 +44,11 @@
 	mask1 = (1 << 4) | (1 << 20);	/* mask sync bit */
 	mask2 = (1 << 4);		/* mask sync bit */
 	val1  = val2  = 0;
-	if (rsnd_ssi_is_pin_sharing(io)) {
+	if (id == 8) {
+		/*
+		 * SSI8 pin is sharing with SSI7, nothing to do.
+		 */
+	} else if (rsnd_ssi_is_pin_sharing(io)) {
 		int shift = -1;
 
 		switch (id) {
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index 0e1c3ee..9735b4c 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -364,6 +364,8 @@
 	struct uniperif *reader = priv->dai_data.uni;
 	int ret;
 
+	reader->substream = substream;
+
 	if (!UNIPERIF_TYPE_IS_TDM(reader))
 		return 0;
 
@@ -393,6 +395,7 @@
 		/* Stop the reader */
 		uni_reader_stop(reader);
 	}
+	reader->substream = NULL;
 }
 
 static const struct snd_soc_dai_ops uni_reader_dai_ops = {
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 8238180..09c6e29 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -43,7 +43,7 @@
 	while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
 					     ctrl_iface->extralen,
 					     cs, UAC2_CLOCK_SOURCE))) {
-		if (cs->bClockID == clock_id)
+		if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
 			return cs;
 	}
 
@@ -59,8 +59,11 @@
 	while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
 					     ctrl_iface->extralen,
 					     cs, UAC2_CLOCK_SELECTOR))) {
-		if (cs->bClockID == clock_id)
+		if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) {
+			if (cs->bLength < 5 + cs->bNrInPins)
+				return NULL;
 			return cs;
+		}
 	}
 
 	return NULL;
@@ -75,7 +78,7 @@
 	while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
 					     ctrl_iface->extralen,
 					     cs, UAC2_CLOCK_MULTIPLIER))) {
-		if (cs->bClockID == clock_id)
+		if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
 			return cs;
 	}
 
diff --git a/sound/usb/helper.c b/sound/usb/helper.c
index 7712e2b..4783648 100644
--- a/sound/usb/helper.c
+++ b/sound/usb/helper.c
@@ -122,7 +122,7 @@
 	case USB_SPEED_SUPER:
 	case USB_SPEED_SUPER_PLUS:
 		if (get_endpoint(alts, 0)->bInterval >= 1 &&
-		    get_endpoint(alts, 0)->bInterval <= 4)
+		    get_endpoint(alts, 0)->bInterval <= 16)
 			return get_endpoint(alts, 0)->bInterval - 1;
 		break;
 	default:
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 3501ff9..98f879f 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -216,6 +216,11 @@
 				    int index, char *buf, int maxlen)
 {
 	int len = usb_string(state->chip->dev, index, buf, maxlen - 1);
+
+	if (len < 0)
+		return 0;
+
+	buf[len] = 0;
 	return len;
 }
 
@@ -1607,6 +1612,12 @@
 	__u8 *bmaControls;
 
 	if (state->mixer->protocol == UAC_VERSION_1) {
+		if (hdr->bLength < 7) {
+			usb_audio_err(state->chip,
+				      "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
+				      unitid);
+			return -EINVAL;
+		}
 		csize = hdr->bControlSize;
 		if (!csize) {
 			usb_audio_dbg(state->chip,
@@ -1624,6 +1635,12 @@
 		}
 	} else if (state->mixer->protocol == UAC_VERSION_2) {
 		struct uac2_feature_unit_descriptor *ftr = _ftr;
+		if (hdr->bLength < 6) {
+			usb_audio_err(state->chip,
+				      "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
+				      unitid);
+			return -EINVAL;
+		}
 		csize = 4;
 		channels = (hdr->bLength - 6) / 4 - 1;
 		bmaControls = ftr->bmaControls;
@@ -2344,7 +2361,8 @@
 	const struct usbmix_name_map *map;
 	char **namelist;
 
-	if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) {
+	if (desc->bLength < 5 || !desc->bNrInPins ||
+	    desc->bLength < 5 + desc->bNrInPins) {
 		usb_audio_err(state->chip,
 			"invalid SELECTOR UNIT descriptor %d\n", unitid);
 		return -EINVAL;
@@ -2414,19 +2432,25 @@
 	kctl->private_value = (unsigned long)namelist;
 	kctl->private_free = usb_mixer_selector_elem_free;
 
-	nameid = uac_selector_unit_iSelector(desc);
+	/* check the static mapping table at first */
 	len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
-	if (len)
-		;
-	else if (nameid)
-		snd_usb_copy_string_desc(state, nameid, kctl->id.name,
-					 sizeof(kctl->id.name));
-	else {
-		len = get_term_name(state, &state->oterm,
+	if (!len) {
+		/* no mapping ? */
+		/* if iSelector is given, use it */
+		nameid = uac_selector_unit_iSelector(desc);
+		if (nameid)
+			len = snd_usb_copy_string_desc(state, nameid,
+						       kctl->id.name,
+						       sizeof(kctl->id.name));
+		/* ... or pick up the terminal name at next */
+		if (!len)
+			len = get_term_name(state, &state->oterm,
 				    kctl->id.name, sizeof(kctl->id.name), 0);
+		/* ... or use the fixed string "USB" as the last resort */
 		if (!len)
 			strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
 
+		/* and add the proper suffix */
 		if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
 			append_ctl_name(kctl, " Clock Source");
 		else if ((state->oterm.type & 0xff00) == 0x0100)
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 7613b9e..1cd7f8b 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1170,10 +1170,11 @@
 /* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
  * between PCM/DOP and native DSD mode
  */
-static bool is_teac_50X_dac(unsigned int id)
+static bool is_teac_dsd_dac(unsigned int id)
 {
 	switch (id) {
 	case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
+	case USB_ID(0x0644, 0x8044): /* Esoteric D-05X */
 		return true;
 	}
 	return false;
@@ -1206,7 +1207,7 @@
 			break;
 		}
 		mdelay(20);
-	} else if (is_teac_50X_dac(subs->stream->chip->usb_id)) {
+	} else if (is_teac_dsd_dac(subs->stream->chip->usb_id)) {
 		/* Vendor mode switch cmd is required. */
 		switch (fmt->altsetting) {
 		case 3: /* DSD mode (DSD_U32) requested */
@@ -1376,7 +1377,7 @@
 	}
 
 	/* TEAC devices with USB DAC functionality */
-	if (is_teac_50X_dac(chip->usb_id)) {
+	if (is_teac_dsd_dac(chip->usb_id)) {
 		if (fp->altsetting == 3)
 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
 	}
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index a396292..f79669a 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -197,6 +197,9 @@
 #define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
 
+/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
+#define X86_FEATURE_KAISER	( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
+
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
 #define X86_FEATURE_VNMI        ( 8*32+ 1) /* Intel Virtual NMI */
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index bc7adb8..60a94b3 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -193,11 +193,14 @@
 	for (;;) {
 		readp = &record[records_read];
 		records_read += fread(readp, sizeof(struct kvp_record),
-					ENTRIES_PER_BLOCK * num_blocks,
-					filep);
+				ENTRIES_PER_BLOCK * num_blocks - records_read,
+				filep);
 
 		if (ferror(filep)) {
-			syslog(LOG_ERR, "Failed to read file, pool: %d", pool);
+			syslog(LOG_ERR,
+				"Failed to read file, pool: %d; error: %d %s",
+				 pool, errno, strerror(errno));
+			kvp_release_lock(pool);
 			exit(EXIT_FAILURE);
 		}
 
@@ -210,6 +213,7 @@
 
 			if (record == NULL) {
 				syslog(LOG_ERR, "malloc failed");
+				kvp_release_lock(pool);
 				exit(EXIT_FAILURE);
 			}
 			continue;
@@ -224,15 +228,11 @@
 	fclose(filep);
 	kvp_release_lock(pool);
 }
+
 static int kvp_file_init(void)
 {
 	int  fd;
-	FILE *filep;
-	size_t records_read;
 	char *fname;
-	struct kvp_record *record;
-	struct kvp_record *readp;
-	int num_blocks;
 	int i;
 	int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK;
 
@@ -246,61 +246,19 @@
 
 	for (i = 0; i < KVP_POOL_COUNT; i++) {
 		fname = kvp_file_info[i].fname;
-		records_read = 0;
-		num_blocks = 1;
 		sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i);
 		fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */);
 
 		if (fd == -1)
 			return 1;
 
-
-		filep = fopen(fname, "re");
-		if (!filep) {
-			close(fd);
-			return 1;
-		}
-
-		record = malloc(alloc_unit * num_blocks);
-		if (record == NULL) {
-			fclose(filep);
-			close(fd);
-			return 1;
-		}
-		for (;;) {
-			readp = &record[records_read];
-			records_read += fread(readp, sizeof(struct kvp_record),
-					ENTRIES_PER_BLOCK,
-					filep);
-
-			if (ferror(filep)) {
-				syslog(LOG_ERR, "Failed to read file, pool: %d",
-				       i);
-				exit(EXIT_FAILURE);
-			}
-
-			if (!feof(filep)) {
-				/*
-				 * We have more data to read.
-				 */
-				num_blocks++;
-				record = realloc(record, alloc_unit *
-						num_blocks);
-				if (record == NULL) {
-					fclose(filep);
-					close(fd);
-					return 1;
-				}
-				continue;
-			}
-			break;
-		}
 		kvp_file_info[i].fd = fd;
-		kvp_file_info[i].num_blocks = num_blocks;
-		kvp_file_info[i].records = record;
-		kvp_file_info[i].num_records = records_read;
-		fclose(filep);
-
+		kvp_file_info[i].num_blocks = 1;
+		kvp_file_info[i].records = malloc(alloc_unit);
+		if (kvp_file_info[i].records == NULL)
+			return 1;
+		kvp_file_info[i].num_records = 0;
+		kvp_update_mem_state(i);
 	}
 
 	return 0;
diff --git a/tools/include/linux/poison.h b/tools/include/linux/poison.h
index 51334ed..f306a76 100644
--- a/tools/include/linux/poison.h
+++ b/tools/include/linux/poison.h
@@ -14,6 +14,10 @@
 # define POISON_POINTER_DELTA 0
 #endif
 
+#ifdef __cplusplus
+#define LIST_POISON1  NULL
+#define LIST_POISON2  NULL
+#else
 /*
  * These are non-NULL pointers that will result in page faults
  * under normal circumstances, used to verify that nobody uses
@@ -21,6 +25,7 @@
  */
 #define LIST_POISON1  ((void *) 0x100 + POISON_POINTER_DELTA)
 #define LIST_POISON2  ((void *) 0x200 + POISON_POINTER_DELTA)
+#endif
 
 /********** include/linux/timer.h **********/
 /*
diff --git a/tools/objtool/arch/x86/insn/x86-opcode-map.txt b/tools/objtool/arch/x86/insn/x86-opcode-map.txt
index 767be7c..1754e09 100644
--- a/tools/objtool/arch/x86/insn/x86-opcode-map.txt
+++ b/tools/objtool/arch/x86/insn/x86-opcode-map.txt
@@ -896,7 +896,7 @@
 
 GrpTable: Grp3_1
 0: TEST Eb,Ib
-1:
+1: TEST Eb,Ib
 2: NOT Eb
 3: NEG Eb
 4: MUL AL,Eb
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index b8dadb0..a688a85 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -51,7 +51,7 @@
 	unsigned int len, state;
 	unsigned char type;
 	unsigned long immediate;
-	bool alt_group, visited;
+	bool alt_group, visited, ignore_alts;
 	struct symbol *call_dest;
 	struct instruction *jump_dest;
 	struct list_head alts;
@@ -353,6 +353,40 @@
 }
 
 /*
+ * FIXME: For now, just ignore any alternatives which add retpolines.  This is
+ * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
+ * But it at least allows objtool to understand the control flow *around* the
+ * retpoline.
+ */
+static int add_nospec_ignores(struct objtool_file *file)
+{
+	struct section *sec;
+	struct rela *rela;
+	struct instruction *insn;
+
+	sec = find_section_by_name(file->elf, ".rela.discard.nospec");
+	if (!sec)
+		return 0;
+
+	list_for_each_entry(rela, &sec->rela_list, list) {
+		if (rela->sym->type != STT_SECTION) {
+			WARN("unexpected relocation symbol type in %s", sec->name);
+			return -1;
+		}
+
+		insn = find_insn(file, rela->sym->sec, rela->addend);
+		if (!insn) {
+			WARN("bad .discard.nospec entry");
+			return -1;
+		}
+
+		insn->ignore_alts = true;
+	}
+
+	return 0;
+}
+
+/*
  * Find the destination instructions for all jumps.
  */
 static int add_jump_destinations(struct objtool_file *file)
@@ -382,6 +416,13 @@
 		} else if (rela->sym->sec->idx) {
 			dest_sec = rela->sym->sec;
 			dest_off = rela->sym->sym.st_value + rela->addend + 4;
+		} else if (strstr(rela->sym->name, "_indirect_thunk_")) {
+			/*
+			 * Retpoline jumps are really dynamic jumps in
+			 * disguise, so convert them accordingly.
+			 */
+			insn->type = INSN_JUMP_DYNAMIC;
+			continue;
 		} else {
 			/* sibling call */
 			insn->jump_dest = 0;
@@ -428,11 +469,18 @@
 			dest_off = insn->offset + insn->len + insn->immediate;
 			insn->call_dest = find_symbol_by_offset(insn->sec,
 								dest_off);
+			/*
+			 * FIXME: Thanks to retpolines, it's now considered
+			 * normal for a function to call within itself.  So
+			 * disable this warning for now.
+			 */
+#if 0
 			if (!insn->call_dest) {
 				WARN_FUNC("can't find call dest symbol at offset 0x%lx",
 					  insn->sec, insn->offset, dest_off);
 				return -1;
 			}
+#endif
 		} else if (rela->sym->type == STT_SECTION) {
 			insn->call_dest = find_symbol_by_offset(rela->sym->sec,
 								rela->addend+4);
@@ -594,12 +642,6 @@
 		return ret;
 
 	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
-		alt = malloc(sizeof(*alt));
-		if (!alt) {
-			WARN("malloc failed");
-			ret = -1;
-			goto out;
-		}
 
 		orig_insn = find_insn(file, special_alt->orig_sec,
 				      special_alt->orig_off);
@@ -610,6 +652,10 @@
 			goto out;
 		}
 
+		/* Ignore retpoline alternatives. */
+		if (orig_insn->ignore_alts)
+			continue;
+
 		new_insn = NULL;
 		if (!special_alt->group || special_alt->new_len) {
 			new_insn = find_insn(file, special_alt->new_sec,
@@ -635,6 +681,13 @@
 				goto out;
 		}
 
+		alt = malloc(sizeof(*alt));
+		if (!alt) {
+			WARN("malloc failed");
+			ret = -1;
+			goto out;
+		}
+
 		alt->insn = new_insn;
 		list_add_tail(&alt->list, &orig_insn->alts);
 
@@ -854,6 +907,10 @@
 
 	add_ignores(file);
 
+	ret = add_nospec_ignores(file);
+	if (ret)
+		return ret;
+
 	ret = add_jump_destinations(file);
 	if (ret)
 		return ret;
@@ -1173,6 +1230,14 @@
 
 	for_each_insn(file, insn) {
 		if (!insn->visited && insn->type == INSN_RETURN) {
+
+			/*
+			 * Don't warn about call instructions in unvisited
+			 * retpoline alternatives.
+			 */
+			if (!strcmp(insn->sec->name, ".altinstr_replacement"))
+				continue;
+
 			WARN_FUNC("return instruction outside of a callable function",
 				  insn->sec, insn->offset);
 			warnings++;
@@ -1229,7 +1294,7 @@
 
 	INIT_LIST_HEAD(&file.insn_list);
 	hash_init(file.insn_hash);
-	file.whitelist = find_section_by_name(file.elf, "__func_stack_frame_non_standard");
+	file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
 	file.rodata = find_section_by_name(file.elf, ".rodata");
 	file.ignore_unreachables = false;
 	file.c_file = find_section_by_name(file.elf, ".comment");
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index 28d1605..b60a6fd 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -150,7 +150,7 @@
 	snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
 		 d, d, perf, vcnt, v);
 
-	return system(cmd);
+	return system(cmd) ? TEST_FAIL : TEST_OK;
 }
 
 int test__attr(int subtest __maybe_unused)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index f7b35e1..f199d5b 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -202,7 +202,7 @@
 
 	/* Last entry */
 	if (curr->end == curr->start)
-		curr->end = roundup(curr->start, 4096);
+		curr->end = roundup(curr->start, 4096) + 4096;
 }
 
 void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
index 248a820..66d31de 100644
--- a/tools/testing/selftests/powerpc/harness.c
+++ b/tools/testing/selftests/powerpc/harness.c
@@ -114,9 +114,11 @@
 
 	rc = run_test(test_function, name);
 
-	if (rc == MAGIC_SKIP_RETURN_VALUE)
+	if (rc == MAGIC_SKIP_RETURN_VALUE) {
 		test_skip(name);
-	else
+		/* so that skipped test is not marked as failed */
+		rc = 0;
+	} else
 		test_finish(name, rc);
 
 	return rc;
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index bbab7f4..d116a19 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -1,5 +1,9 @@
 # Makefile for vm selftests
 
+ifndef OUTPUT
+  OUTPUT := $(shell pwd)
+endif
+
 CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
 BINARIES = compaction_test
 BINARIES += hugepage-mmap
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 6300c1a..4af37bf 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -6,7 +6,7 @@
 
 TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
 			check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test \
-			protection_keys
+			protection_keys test_vsyscall
 TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
 			test_FCMOV test_FCOMI test_FISTTP \
 			vdso_restorer
diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
index 9b4610c..f249e04 100644
--- a/tools/testing/selftests/x86/fsgsbase.c
+++ b/tools/testing/selftests/x86/fsgsbase.c
@@ -245,7 +245,7 @@
 		long ret;
 		asm volatile ("int $0x80"
 			      : "=a" (ret) : "a" (243), "b" (low_desc)
-			      : "flags");
+			      : "r8", "r9", "r10", "r11");
 		memcpy(&desc, low_desc, sizeof(desc));
 		munmap(low_desc, sizeof(desc));
 
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index e717fed..ac1a7a3 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -45,6 +45,12 @@
 #define AR_DB			(1 << 22)
 #define AR_G			(1 << 23)
 
+#ifdef __x86_64__
+# define INT80_CLOBBERS "r8", "r9", "r10", "r11"
+#else
+# define INT80_CLOBBERS
+#endif
+
 static int nerrs;
 
 /* Points to an array of 1024 ints, each holding its own index. */
@@ -360,9 +366,24 @@
 	install_invalid(&desc, false);
 
 	desc.seg_not_present = 0;
-	desc.read_exec_only = 0;
 	desc.seg_32bit = 1;
+	desc.read_exec_only = 0;
+	desc.limit = 0xfffff;
+
 	install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB);
+
+	desc.limit_in_pages = 1;
+
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB | AR_G);
+	desc.read_exec_only = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S | AR_P | AR_DB | AR_G);
+	desc.contents = 1;
+	desc.read_exec_only = 0;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
+	desc.read_exec_only = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
+
+	desc.limit = 0;
 	install_invalid(&desc, true);
 }
 
@@ -634,7 +655,7 @@
 	asm volatile ("int $0x80"
 		      : "=a" (ret), "+m" (low_user_desc) :
 			"a" (243), "b" (low_user_desc)
-		      : "flags");
+		      : INT80_CLOBBERS);
 	return ret;
 }
 
@@ -703,7 +724,7 @@
 			"+a" (eax)
 		      : "m" (low_user_desc_clear),
 			[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
-		      : "flags");
+		      : INT80_CLOBBERS);
 
 	if (sel != 0) {
 		result = "FAIL";
@@ -734,7 +755,7 @@
 			"+a" (eax)
 		      : "m" (low_user_desc_clear),
 			[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
-		      : "flags");
+		      : INT80_CLOBBERS);
 
 	if (sel != 0) {
 		result = "FAIL";
@@ -767,7 +788,7 @@
 			"+a" (eax)
 		      : "m" (low_user_desc_clear),
 			[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
-		      : "flags");
+		      : INT80_CLOBBERS);
 
 #ifdef __x86_64__
 	syscall(SYS_arch_prctl, ARCH_GET_FS, &new_base);
@@ -820,7 +841,7 @@
 			"+a" (eax)
 		      : "m" (low_user_desc_clear),
 			[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
-		      : "flags");
+		      : INT80_CLOBBERS);
 
 #ifdef __x86_64__
 	syscall(SYS_arch_prctl, ARCH_GET_GS, &new_base);
diff --git a/tools/testing/selftests/x86/mpx-hw.h b/tools/testing/selftests/x86/mpx-hw.h
index 093c190..28b3c7c 100644
--- a/tools/testing/selftests/x86/mpx-hw.h
+++ b/tools/testing/selftests/x86/mpx-hw.h
@@ -51,14 +51,14 @@
 struct mpx_bd_entry {
 	union {
 		char x[MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES];
-		void *contents[1];
+		void *contents[0];
 	};
 } __attribute__((packed));
 
 struct mpx_bt_entry {
 	union {
 		char x[MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES];
-		unsigned long contents[1];
+		unsigned long contents[0];
 	};
 } __attribute__((packed));
 
diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c
index b037ce9c..eaea924 100644
--- a/tools/testing/selftests/x86/ptrace_syscall.c
+++ b/tools/testing/selftests/x86/ptrace_syscall.c
@@ -58,7 +58,8 @@
 	asm volatile ("int $0x80"
 		      : "+a" (args->nr),
 			"+b" (args->arg0), "+c" (args->arg1), "+d" (args->arg2),
-			"+S" (args->arg3), "+D" (args->arg4), "+r" (bp));
+			"+S" (args->arg3), "+D" (args->arg4), "+r" (bp)
+			: : "r8", "r9", "r10", "r11");
 	args->arg5 = bp;
 #else
 	sys32_helper(args, int80_and_ret);
diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
index 50c2635..a48da95 100644
--- a/tools/testing/selftests/x86/single_step_syscall.c
+++ b/tools/testing/selftests/x86/single_step_syscall.c
@@ -56,9 +56,11 @@
 #ifdef __x86_64__
 # define REG_IP REG_RIP
 # define WIDTH "q"
+# define INT80_CLOBBERS "r8", "r9", "r10", "r11"
 #else
 # define REG_IP REG_EIP
 # define WIDTH "l"
+# define INT80_CLOBBERS
 #endif
 
 static unsigned long get_eflags(void)
@@ -140,7 +142,8 @@
 
 	printf("[RUN]\tSet TF and check int80\n");
 	set_eflags(get_eflags() | X86_EFLAGS_TF);
-	asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid));
+	asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
+			: INT80_CLOBBERS);
 	check_result();
 
 	/*
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
new file mode 100644
index 0000000..6e0bd52
--- /dev/null
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -0,0 +1,500 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <sys/time.h>
+#include <time.h>
+#include <stdlib.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <dlfcn.h>
+#include <string.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <sys/ucontext.h>
+#include <errno.h>
+#include <err.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <setjmp.h>
+
+#ifdef __x86_64__
+# define VSYS(x) (x)
+#else
+# define VSYS(x) 0
+#endif
+
+#ifndef SYS_getcpu
+# ifdef __x86_64__
+#  define SYS_getcpu 309
+# else
+#  define SYS_getcpu 318
+# endif
+#endif
+
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+		       int flags)
+{
+	struct sigaction sa;
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_sigaction = handler;
+	sa.sa_flags = SA_SIGINFO | flags;
+	sigemptyset(&sa.sa_mask);
+	if (sigaction(sig, &sa, 0))
+		err(1, "sigaction");
+}
+
+/* vsyscalls and vDSO */
+bool should_read_vsyscall = false;
+
+typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
+gtod_t vgtod = (gtod_t)VSYS(0xffffffffff600000);
+gtod_t vdso_gtod;
+
+typedef int (*vgettime_t)(clockid_t, struct timespec *);
+vgettime_t vdso_gettime;
+
+typedef long (*time_func_t)(time_t *t);
+time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400);
+time_func_t vdso_time;
+
+typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
+getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
+getcpu_t vdso_getcpu;
+
+static void init_vdso(void)
+{
+	void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+	if (!vdso)
+		vdso = dlopen("linux-gate.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+	if (!vdso) {
+		printf("[WARN]\tfailed to find vDSO\n");
+		return;
+	}
+
+	vdso_gtod = (gtod_t)dlsym(vdso, "__vdso_gettimeofday");
+	if (!vdso_gtod)
+		printf("[WARN]\tfailed to find gettimeofday in vDSO\n");
+
+	vdso_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
+	if (!vdso_gettime)
+		printf("[WARN]\tfailed to find clock_gettime in vDSO\n");
+
+	vdso_time = (time_func_t)dlsym(vdso, "__vdso_time");
+	if (!vdso_time)
+		printf("[WARN]\tfailed to find time in vDSO\n");
+
+	vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
+	if (!vdso_getcpu) {
+		/* getcpu() was never wired up in the 32-bit vDSO. */
+		printf("[%s]\tfailed to find getcpu in vDSO\n",
+		       sizeof(long) == 8 ? "WARN" : "NOTE");
+	}
+}
+
+static int init_vsys(void)
+{
+#ifdef __x86_64__
+	int nerrs = 0;
+	FILE *maps;
+	char line[128];
+	bool found = false;
+
+	maps = fopen("/proc/self/maps", "r");
+	if (!maps) {
+		printf("[WARN]\tCould not open /proc/self/maps -- assuming vsyscall is r-x\n");
+		should_read_vsyscall = true;
+		return 0;
+	}
+
+	while (fgets(line, sizeof(line), maps)) {
+		char r, x;
+		void *start, *end;
+		char name[128];
+		if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
+			   &start, &end, &r, &x, name) != 5)
+			continue;
+
+		if (strcmp(name, "[vsyscall]"))
+			continue;
+
+		printf("\tvsyscall map: %s", line);
+
+		if (start != (void *)0xffffffffff600000 ||
+		    end != (void *)0xffffffffff601000) {
+			printf("[FAIL]\taddress range is nonsense\n");
+			nerrs++;
+		}
+
+		printf("\tvsyscall permissions are %c-%c\n", r, x);
+		should_read_vsyscall = (r == 'r');
+		if (x != 'x') {
+			vgtod = NULL;
+			vtime = NULL;
+			vgetcpu = NULL;
+		}
+
+		found = true;
+		break;
+	}
+
+	fclose(maps);
+
+	if (!found) {
+		printf("\tno vsyscall map in /proc/self/maps\n");
+		should_read_vsyscall = false;
+		vgtod = NULL;
+		vtime = NULL;
+		vgetcpu = NULL;
+	}
+
+	return nerrs;
+#else
+	return 0;
+#endif
+}
+
+/* syscalls */
+static inline long sys_gtod(struct timeval *tv, struct timezone *tz)
+{
+	return syscall(SYS_gettimeofday, tv, tz);
+}
+
+static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
+{
+	return syscall(SYS_clock_gettime, id, ts);
+}
+
+static inline long sys_time(time_t *t)
+{
+	return syscall(SYS_time, t);
+}
+
+static inline long sys_getcpu(unsigned * cpu, unsigned * node,
+			      void* cache)
+{
+	return syscall(SYS_getcpu, cpu, node, cache);
+}
+
+static jmp_buf jmpbuf;
+
+static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
+{
+	siglongjmp(jmpbuf, 1);
+}
+
+static double tv_diff(const struct timeval *a, const struct timeval *b)
+{
+	return (double)(a->tv_sec - b->tv_sec) +
+		(double)((int)a->tv_usec - (int)b->tv_usec) * 1e-6;
+}
+
+static int check_gtod(const struct timeval *tv_sys1,
+		      const struct timeval *tv_sys2,
+		      const struct timezone *tz_sys,
+		      const char *which,
+		      const struct timeval *tv_other,
+		      const struct timezone *tz_other)
+{
+	int nerrs = 0;
+	double d1, d2;
+
+	if (tz_other && (tz_sys->tz_minuteswest != tz_other->tz_minuteswest || tz_sys->tz_dsttime != tz_other->tz_dsttime)) {
+		printf("[FAIL] %s tz mismatch\n", which);
+		nerrs++;
+	}
+
+	d1 = tv_diff(tv_other, tv_sys1);
+	d2 = tv_diff(tv_sys2, tv_other);
+	printf("\t%s time offsets: %lf %lf\n", which, d1, d2);
+
+	if (d1 < 0 || d2 < 0) {
+		printf("[FAIL]\t%s time was inconsistent with the syscall\n", which);
+		nerrs++;
+	} else {
+		printf("[OK]\t%s gettimeofday()'s timeval was okay\n", which);
+	}
+
+	return nerrs;
+}
+
+static int test_gtod(void)
+{
+	struct timeval tv_sys1, tv_sys2, tv_vdso, tv_vsys;
+	struct timezone tz_sys, tz_vdso, tz_vsys;
+	long ret_vdso = -1;
+	long ret_vsys = -1;
+	int nerrs = 0;
+
+	printf("[RUN]\ttest gettimeofday()\n");
+
+	if (sys_gtod(&tv_sys1, &tz_sys) != 0)
+		err(1, "syscall gettimeofday");
+	if (vdso_gtod)
+		ret_vdso = vdso_gtod(&tv_vdso, &tz_vdso);
+	if (vgtod)
+		ret_vsys = vgtod(&tv_vsys, &tz_vsys);
+	if (sys_gtod(&tv_sys2, &tz_sys) != 0)
+		err(1, "syscall gettimeofday");
+
+	if (vdso_gtod) {
+		if (ret_vdso == 0) {
+			nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vDSO", &tv_vdso, &tz_vdso);
+		} else {
+			printf("[FAIL]\tvDSO gettimeofday() failed: %ld\n", ret_vdso);
+			nerrs++;
+		}
+	}
+
+	if (vgtod) {
+		if (ret_vsys == 0) {
+			nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vsyscall", &tv_vsys, &tz_vsys);
+		} else {
+			printf("[FAIL]\tvsys gettimeofday() failed: %ld\n", ret_vsys);
+			nerrs++;
+		}
+	}
+
+	return nerrs;
+}
+
+static int test_time(void) {
+	int nerrs = 0;
+
+	printf("[RUN]\ttest time()\n");
+	long t_sys1, t_sys2, t_vdso = 0, t_vsys = 0;
+	long t2_sys1 = -1, t2_sys2 = -1, t2_vdso = -1, t2_vsys = -1;
+	t_sys1 = sys_time(&t2_sys1);
+	if (vdso_time)
+		t_vdso = vdso_time(&t2_vdso);
+	if (vtime)
+		t_vsys = vtime(&t2_vsys);
+	t_sys2 = sys_time(&t2_sys2);
+	if (t_sys1 < 0 || t_sys1 != t2_sys1 || t_sys2 < 0 || t_sys2 != t2_sys2) {
+		printf("[FAIL]\tsyscall failed (ret1:%ld output1:%ld ret2:%ld output2:%ld)\n", t_sys1, t2_sys1, t_sys2, t2_sys2);
+		nerrs++;
+		return nerrs;
+	}
+
+	if (vdso_time) {
+		if (t_vdso < 0 || t_vdso != t2_vdso) {
+			printf("[FAIL]\tvDSO failed (ret:%ld output:%ld)\n", t_vdso, t2_vdso);
+			nerrs++;
+		} else if (t_vdso < t_sys1 || t_vdso > t_sys2) {
+			printf("[FAIL]\tvDSO returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vdso, t_sys2);
+			nerrs++;
+		} else {
+			printf("[OK]\tvDSO time() is okay\n");
+		}
+	}
+
+	if (vtime) {
+		if (t_vsys < 0 || t_vsys != t2_vsys) {
+			printf("[FAIL]\tvsyscall failed (ret:%ld output:%ld)\n", t_vsys, t2_vsys);
+			nerrs++;
+		} else if (t_vsys < t_sys1 || t_vsys > t_sys2) {
+			printf("[FAIL]\tvsyscall returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vsys, t_sys2);
+			nerrs++;
+		} else {
+			printf("[OK]\tvsyscall time() is okay\n");
+		}
+	}
+
+	return nerrs;
+}
+
+static int test_getcpu(int cpu)
+{
+	int nerrs = 0;
+	long ret_sys, ret_vdso = -1, ret_vsys = -1;
+
+	printf("[RUN]\tgetcpu() on CPU %d\n", cpu);
+
+	cpu_set_t cpuset;
+	CPU_ZERO(&cpuset);
+	CPU_SET(cpu, &cpuset);
+	if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
+		printf("[SKIP]\tfailed to force CPU %d\n", cpu);
+		return nerrs;
+	}
+
+	unsigned cpu_sys, cpu_vdso, cpu_vsys, node_sys, node_vdso, node_vsys;
+	unsigned node = 0;
+	bool have_node = false;
+	ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0);
+	if (vdso_getcpu)
+		ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0);
+	if (vgetcpu)
+		ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0);
+
+	if (ret_sys == 0) {
+		if (cpu_sys != cpu) {
+			printf("[FAIL]\tsyscall reported CPU %hu but should be %d\n", cpu_sys, cpu);
+			nerrs++;
+		}
+
+		have_node = true;
+		node = node_sys;
+	}
+
+	if (vdso_getcpu) {
+		if (ret_vdso) {
+			printf("[FAIL]\tvDSO getcpu() failed\n");
+			nerrs++;
+		} else {
+			if (!have_node) {
+				have_node = true;
+				node = node_vdso;
+			}
+
+			if (cpu_vdso != cpu) {
+				printf("[FAIL]\tvDSO reported CPU %hu but should be %d\n", cpu_vdso, cpu);
+				nerrs++;
+			} else {
+				printf("[OK]\tvDSO reported correct CPU\n");
+			}
+
+			if (node_vdso != node) {
+				printf("[FAIL]\tvDSO reported node %hu but should be %hu\n", node_vdso, node);
+				nerrs++;
+			} else {
+				printf("[OK]\tvDSO reported correct node\n");
+			}
+		}
+	}
+
+	if (vgetcpu) {
+		if (ret_vsys) {
+			printf("[FAIL]\tvsyscall getcpu() failed\n");
+			nerrs++;
+		} else {
+			if (!have_node) {
+				have_node = true;
+				node = node_vsys;
+			}
+
+			if (cpu_vsys != cpu) {
+				printf("[FAIL]\tvsyscall reported CPU %hu but should be %d\n", cpu_vsys, cpu);
+				nerrs++;
+			} else {
+				printf("[OK]\tvsyscall reported correct CPU\n");
+			}
+
+			if (node_vsys != node) {
+				printf("[FAIL]\tvsyscall reported node %hu but should be %hu\n", node_vsys, node);
+				nerrs++;
+			} else {
+				printf("[OK]\tvsyscall reported correct node\n");
+			}
+		}
+	}
+
+	return nerrs;
+}
+
+static int test_vsys_r(void)
+{
+#ifdef __x86_64__
+	printf("[RUN]\tChecking read access to the vsyscall page\n");
+	bool can_read;
+	if (sigsetjmp(jmpbuf, 1) == 0) {
+		*(volatile int *)0xffffffffff600000;
+		can_read = true;
+	} else {
+		can_read = false;
+	}
+
+	if (can_read && !should_read_vsyscall) {
+		printf("[FAIL]\tWe have read access, but we shouldn't\n");
+		return 1;
+	} else if (!can_read && should_read_vsyscall) {
+		printf("[FAIL]\tWe don't have read access, but we should\n");
+		return 1;
+	} else {
+		printf("[OK]\tgot expected result\n");
+	}
+#endif
+
+	return 0;
+}
+
+
+#ifdef __x86_64__
+#define X86_EFLAGS_TF (1UL << 8)
+static volatile sig_atomic_t num_vsyscall_traps;
+
+static unsigned long get_eflags(void)
+{
+	unsigned long eflags;
+	asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags));
+	return eflags;
+}
+
+static void set_eflags(unsigned long eflags)
+{
+	asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags");
+}
+
+static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
+{
+	ucontext_t *ctx = (ucontext_t *)ctx_void;
+	unsigned long ip = ctx->uc_mcontext.gregs[REG_RIP];
+
+	if (((ip ^ 0xffffffffff600000UL) & ~0xfffUL) == 0)
+		num_vsyscall_traps++;
+}
+
+static int test_native_vsyscall(void)
+{
+	time_t tmp;
+	bool is_native;
+
+	if (!vtime)
+		return 0;
+
+	printf("[RUN]\tchecking for native vsyscall\n");
+	sethandler(SIGTRAP, sigtrap, 0);
+	set_eflags(get_eflags() | X86_EFLAGS_TF);
+	vtime(&tmp);
+	set_eflags(get_eflags() & ~X86_EFLAGS_TF);
+
+	/*
+	 * If vsyscalls are emulated, we expect a single trap in the
+	 * vsyscall page -- the call instruction will trap with RIP
+	 * pointing to the entry point before emulation takes over.
+	 * In native mode, we expect two traps, since whatever code
+	 * the vsyscall page contains will be more than just a ret
+	 * instruction.
+	 */
+	is_native = (num_vsyscall_traps > 1);
+
+	printf("\tvsyscalls are %s (%d instructions in vsyscall page)\n",
+	       (is_native ? "native" : "emulated"),
+	       (int)num_vsyscall_traps);
+
+	return 0;
+}
+#endif
+
+int main(int argc, char **argv)
+{
+	int nerrs = 0;
+
+	init_vdso();
+	nerrs += init_vsys();
+
+	nerrs += test_gtod();
+	nerrs += test_time();
+	nerrs += test_getcpu(0);
+	nerrs += test_getcpu(1);
+
+	sethandler(SIGSEGV, sigsegv, 0);
+	nerrs += test_vsys_r();
+
+#ifdef __x86_64__
+	nerrs += test_native_vsyscall();
+#endif
+
+	return nerrs ? 1 : 0;
+}
diff --git a/tools/usb/usbip/Makefile.am b/tools/usb/usbip/Makefile.am
index 66f8bf0..45eaa70 100644
--- a/tools/usb/usbip/Makefile.am
+++ b/tools/usb/usbip/Makefile.am
@@ -1,6 +1,7 @@
 SUBDIRS := libsrc src
 includedir = @includedir@/usbip
 include_HEADERS := $(addprefix libsrc/, \
-		     usbip_common.h vhci_driver.h usbip_host_driver.h)
+		     usbip_common.h vhci_driver.h usbip_host_driver.h \
+		     list.h sysfs_utils.h usbip_host_common.h)
 
 dist_man_MANS := $(addprefix doc/, usbip.8 usbipd.8)
diff --git a/tools/usb/usbip/src/utils.c b/tools/usb/usbip/src/utils.c
index 2b3d6d2..3d7b42e 100644
--- a/tools/usb/usbip/src/utils.c
+++ b/tools/usb/usbip/src/utils.c
@@ -30,6 +30,7 @@
 	char command[SYSFS_BUS_ID_SIZE + 4];
 	char match_busid_attr_path[SYSFS_PATH_MAX];
 	int rc;
+	int cmd_size;
 
 	snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),
 		 "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME,
@@ -37,12 +38,14 @@
 		 attr_name);
 
 	if (add)
-		snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid);
+		cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s",
+				    busid);
 	else
-		snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid);
+		cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s",
+				    busid);
 
 	rc = write_sysfs_attribute(match_busid_attr_path, command,
-				   sizeof(command));
+				   cmd_size);
 	if (rc < 0) {
 		dbg("failed to write match_busid: %s", strerror(errno));
 		return -1;
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 27a1f63..7b49a13 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -89,9 +89,6 @@
 	struct kvm_vcpu *vcpu;
 
 	vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
-	vcpu->arch.timer_cpu.armed = false;
-
-	WARN_ON(!kvm_timer_should_fire(vcpu));
 
 	/*
 	 * If the vcpu is blocked we want to wake it up so that it will see
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index c8aeb7b..9502124 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -77,11 +77,7 @@
 	else
 		elrsr1 = 0;
 
-#ifdef CONFIG_CPU_BIG_ENDIAN
-	cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
-#else
 	cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
-#endif
 }
 
 static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
index f138ed2..a26c677 100644
--- a/virt/kvm/arm/vgic/vgic-irqfd.c
+++ b/virt/kvm/arm/vgic/vgic-irqfd.c
@@ -112,8 +112,7 @@
 	u32 nr = dist->nr_spis;
 	int i, ret;
 
-	entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry),
-			  GFP_KERNEL);
+	entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
 	if (!entries)
 		return -ENOMEM;
 
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 4660a7d..31f5625 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -322,6 +322,7 @@
 	int ret = 0;
 	u32 *intids;
 	int nr_irqs, i;
+	u8 pendmask;
 
 	nr_irqs = vgic_copy_lpi_list(vcpu->kvm, &intids);
 	if (nr_irqs < 0)
@@ -329,7 +330,6 @@
 
 	for (i = 0; i < nr_irqs; i++) {
 		int byte_offset, bit_nr;
-		u8 pendmask;
 
 		byte_offset = intids[i] / BITS_PER_BYTE;
 		bit_nr = intids[i] % BITS_PER_BYTE;
@@ -360,29 +360,6 @@
 	return ret;
 }
 
-static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
-					     struct vgic_its *its,
-					     gpa_t addr, unsigned int len)
-{
-	u32 reg = 0;
-
-	mutex_lock(&its->cmd_lock);
-	if (its->creadr == its->cwriter)
-		reg |= GITS_CTLR_QUIESCENT;
-	if (its->enabled)
-		reg |= GITS_CTLR_ENABLE;
-	mutex_unlock(&its->cmd_lock);
-
-	return reg;
-}
-
-static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
-				     gpa_t addr, unsigned int len,
-				     unsigned long val)
-{
-	its->enabled = !!(val & GITS_CTLR_ENABLE);
-}
-
 static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
 					      struct vgic_its *its,
 					      gpa_t addr, unsigned int len)
@@ -687,6 +664,8 @@
 		return E_ITS_MAPC_COLLECTION_OOR;
 
 	collection = kzalloc(sizeof(*collection), GFP_KERNEL);
+	if (!collection)
+		return -ENOMEM;
 
 	collection->collection_id = coll_id;
 	collection->target_addr = COLLECTION_NOT_MAPPED;
@@ -1160,33 +1139,16 @@
 #define ITS_CMD_SIZE			32
 #define ITS_CMD_OFFSET(reg)		((reg) & GENMASK(19, 5))
 
-/*
- * By writing to CWRITER the guest announces new commands to be processed.
- * To avoid any races in the first place, we take the its_cmd lock, which
- * protects our ring buffer variables, so that there is only one user
- * per ITS handling commands at a given time.
- */
-static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
-					gpa_t addr, unsigned int len,
-					unsigned long val)
+/* Must be called with the cmd_lock held. */
+static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
 {
 	gpa_t cbaser;
 	u64 cmd_buf[4];
-	u32 reg;
 
-	if (!its)
+	/* Commands are only processed when the ITS is enabled. */
+	if (!its->enabled)
 		return;
 
-	mutex_lock(&its->cmd_lock);
-
-	reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
-	reg = ITS_CMD_OFFSET(reg);
-	if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
-		mutex_unlock(&its->cmd_lock);
-		return;
-	}
-
-	its->cwriter = reg;
 	cbaser = CBASER_ADDRESS(its->cbaser);
 
 	while (its->cwriter != its->creadr) {
@@ -1206,6 +1168,34 @@
 		if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
 			its->creadr = 0;
 	}
+}
+
+/*
+ * By writing to CWRITER the guest announces new commands to be processed.
+ * To avoid any races in the first place, we take the its_cmd lock, which
+ * protects our ring buffer variables, so that there is only one user
+ * per ITS handling commands at a given time.
+ */
+static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
+					gpa_t addr, unsigned int len,
+					unsigned long val)
+{
+	u64 reg;
+
+	if (!its)
+		return;
+
+	mutex_lock(&its->cmd_lock);
+
+	reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
+	reg = ITS_CMD_OFFSET(reg);
+	if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
+		mutex_unlock(&its->cmd_lock);
+		return;
+	}
+	its->cwriter = reg;
+
+	vgic_its_process_commands(kvm, its);
 
 	mutex_unlock(&its->cmd_lock);
 }
@@ -1286,6 +1276,39 @@
 	*regptr = reg;
 }
 
+static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
+					     struct vgic_its *its,
+					     gpa_t addr, unsigned int len)
+{
+	u32 reg = 0;
+
+	mutex_lock(&its->cmd_lock);
+	if (its->creadr == its->cwriter)
+		reg |= GITS_CTLR_QUIESCENT;
+	if (its->enabled)
+		reg |= GITS_CTLR_ENABLE;
+	mutex_unlock(&its->cmd_lock);
+
+	return reg;
+}
+
+static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
+				     gpa_t addr, unsigned int len,
+				     unsigned long val)
+{
+	mutex_lock(&its->cmd_lock);
+
+	its->enabled = !!(val & GITS_CTLR_ENABLE);
+
+	/*
+	 * Try to process any pending commands. This function bails out early
+	 * if the ITS is disabled or no commands have been queued.
+	 */
+	vgic_its_process_commands(kvm, its);
+
+	mutex_unlock(&its->cmd_lock);
+}
+
 #define REGISTER_ITS_DESC(off, rd, wr, length, acc)		\
 {								\
 	.reg_offset = off,					\
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f4c6d4f..1b20768 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -125,6 +125,11 @@
 
 static bool largepages_enabled = true;
 
+__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+		unsigned long start, unsigned long end)
+{
+}
+
 bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
 {
 	if (pfn_valid(pfn))
@@ -361,6 +366,9 @@
 		kvm_flush_remote_tlbs(kvm);
 
 	spin_unlock(&kvm->mmu_lock);
+
+	kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
+
 	srcu_read_unlock(&kvm->srcu, idx);
 }
 
@@ -1052,7 +1060,7 @@
 	 * changes) is disallowed above, so any other attribute changes getting
 	 * here can be skipped.
 	 */
-	if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
+	if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) {
 		r = kvm_iommu_map_pages(kvm, &new);
 		return r;
 	}
@@ -3896,7 +3904,7 @@
 	if (!vcpu_align)
 		vcpu_align = __alignof__(struct kvm_vcpu);
 	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
-					   0, NULL);
+					   SLAB_ACCOUNT, NULL);
 	if (!kvm_vcpu_cache) {
 		r = -ENOMEM;
 		goto out_free_3;